neutron-8.4.0/0000775000567000056710000000000013044373210014353 5ustar jenkinsjenkins00000000000000neutron-8.4.0/HACKING.rst0000664000567000056710000000367313044372760016173 0ustar jenkinsjenkins00000000000000Neutron Style Commandments ========================== - Step 1: Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ - Step 2: Read on Neutron Specific Commandments ----------------------------- - [N319] Validate that debug level logs are not translated - [N320] Validate that LOG messages, except debug ones, have translations - [N321] Validate that jsonutils module is used instead of json - [N322] Detect common errors with assert_called_once_with - [N324] Prevent use of deprecated contextlib.nested. - [N325] Python 3: Do not use xrange. - [N326] Python 3: do not use basestring. - [N327] Python 3: do not use dict.iteritems. - [N328] Detect wrong usage with assertEqual - [N329] Method's default argument shouldn't be mutable - [N330] Use assertEqual(*empty*, observed) instead of assertEqual(observed, *empty*) - [N331] Detect wrong usage with assertTrue(isinstance()). - [N332] Use assertEqual(expected_http_code, observed_http_code) instead of assertEqual(observed_http_code, expected_http_code). - [N333] Validate that LOG.warning is used instead of LOG.warn. The latter is deprecated. Creating Unit Tests ------------------- For every new feature, unit tests should be created that both test and (implicitly) document the usage of said feature. If submitting a patch for a bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. All unittest classes must ultimately inherit from testtools.TestCase. In the Neutron test suite, this should be done by inheriting from neutron.tests.base.BaseTestCase. All setUp and tearDown methods must upcall using the super() method. tearDown methods should be avoided and addCleanup calls should be preferred. Never manually create tempfiles. Always use the tempfile fixtures from the fixture library to ensure that they are cleaned up. neutron-8.4.0/run_tests.sh0000775000567000056710000001744313044372760016762 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash set -eu function usage { echo "Usage: $0 [OPTION]..." echo "Run Neutron's test suite(s)" echo "" echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment" echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)." echo " -n, --no-recreate-db Don't recreate the test database." echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." echo " -u, --update Update the virtual environment with any newer package versions" echo " -p, --pep8 Just run PEP8 and HACKING compliance check" echo " -8, --pep8-only-changed []" echo " Just run PEP8 and HACKING compliance check on files changed since HEAD~1 (or )" echo " -P, --no-pep8 Don't run static code checks" echo " -c, --coverage Generate coverage report" echo " -d, --debug Run tests with testtools instead of testr. This allows you to use the debugger." echo " -h, --help Print this usage message" echo " --virtual-env-path Location of the virtualenv directory" echo " Default: \$(pwd)" echo " --virtual-env-name Name of the virtualenv directory" echo " Default: .venv" echo " --tools-path Location of the tools directory" echo " Default: \$(pwd)" echo "" echo "Note: with no options specified, the script will try to run the tests in a virtual environment," echo " If no virtualenv is found, the script will ask if you would like to create one. If you " echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." exit } function process_options { i=1 while [ $i -le $# ]; do case "${!i}" in -h|--help) usage;; -V|--virtual-env) always_venv=1; never_venv=0;; -N|--no-virtual-env) always_venv=0; never_venv=1;; -s|--no-site-packages) no_site_packages=1;; -r|--recreate-db) recreate_db=1;; -n|--no-recreate-db) recreate_db=0;; -f|--force) force=1;; -u|--update) update=1;; -p|--pep8) just_pep8=1;; -8|--pep8-only-changed) just_pep8_changed=1;; -P|--no-pep8) no_pep8=1;; -c|--coverage) coverage=1;; -d|--debug) debug=1;; --virtual-env-path) (( i++ )) venv_path=${!i} ;; --virtual-env-name) (( i++ )) venv_dir=${!i} ;; --tools-path) (( i++ )) tools_path=${!i} ;; -*) testopts="$testopts ${!i}";; *) testargs="$testargs ${!i}" esac (( i++ )) done } tool_path=${tools_path:-$(pwd)} venv_path=${venv_path:-$(pwd)} venv_dir=${venv_name:-.venv} with_venv=tools/with_venv.sh always_venv=0 never_venv=0 force=0 no_site_packages=0 installvenvopts= testargs= testopts= wrapper="" just_pep8=0 just_pep8_changed=0 no_pep8=0 coverage=0 debug=0 recreate_db=1 update=0 LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=C process_options $@ # Make our paths available to other scripts we call export venv_path export venv_dir export venv_name export tools_dir export venv=${venv_path}/${venv_dir} if [ $no_site_packages -eq 1 ]; then installvenvopts="--no-site-packages" fi function run_tests { # Cleanup *pyc ${wrapper} find . -type f -name "*.pyc" -delete if [ $debug -eq 1 ]; then if [ "$testopts" = "" ] && [ "$testargs" = "" ]; then # Default to running all tests if specific test is not # provided. testargs="discover ./neutron/tests" fi ${wrapper} python -m testtools.run $testopts $testargs # Short circuit because all of the testr and coverage stuff # below does not make sense when running testtools.run for # debugging purposes. return $? fi if [ $coverage -eq 1 ]; then TESTRTESTS="$TESTRTESTS --coverage" else TESTRTESTS="$TESTRTESTS --slowest" fi # Just run the test suites in current environment set +e testargs=`echo "$testargs" | sed -e's/^\s*\(.*\)\s*$/\1/'` TESTRTESTS="$TESTRTESTS --testr-args='--subunit $testopts $testargs'" OS_TEST_PATH=`echo $testargs|grep -o 'neutron\.tests[^[:space:]:]\+'|tr . /` if [ -n "$OS_TEST_PATH" ]; then os_test_dir=$(dirname "$OS_TEST_PATH") else os_test_dir='' fi if [ -d "$OS_TEST_PATH" ]; then wrapper="OS_TEST_PATH=$OS_TEST_PATH $wrapper" elif [ -d "$os_test_dir" ]; then wrapper="OS_TEST_PATH=$os_test_dir $wrapper" fi echo "Running \`${wrapper} $TESTRTESTS\`" bash -c "${wrapper} $TESTRTESTS | ${wrapper} subunit2pyunit" RESULT=$? set -e copy_subunit_log if [ $coverage -eq 1 ]; then echo "Generating coverage report in covhtml/" # Don't compute coverage for common code, which is tested elsewhere ${wrapper} coverage combine ${wrapper} coverage html --include='neutron/*' --omit='neutron/openstack/common/*' -d covhtml -i fi return $RESULT } function copy_subunit_log { LOGNAME=`cat .testrepository/next-stream` LOGNAME=$(($LOGNAME - 1)) LOGNAME=".testrepository/${LOGNAME}" cp $LOGNAME subunit.log } function warn_on_flake8_without_venv { if [ $never_venv -eq 1 ]; then echo "**WARNING**:" echo "Running flake8 without virtual env may miss OpenStack HACKING detection" fi } function run_pep8 { echo "Running flake8 ..." warn_on_flake8_without_venv ${wrapper} flake8 } function run_pep8_changed { # NOTE(gilliard) We want use flake8 to check the entirety of every file that has # a change in it. Unfortunately the --filenames argument to flake8 only accepts # file *names* and there are no files named (eg) "nova/compute/manager.py". The # --diff argument behaves surprisingly as well, because although you feed it a # diff, it actually checks the file on disk anyway. local target=${testargs:-HEAD~1} local files=$(git diff --name-only $target | tr '\n' ' ') echo "Running flake8 on ${files}" warn_on_flake8_without_venv diff -u --from-file /dev/null ${files} | ${wrapper} flake8 --diff } TESTRTESTS="python setup.py testr" if [ $never_venv -eq 0 ] then # Remove the virtual environment if --force used if [ $force -eq 1 ]; then echo "Cleaning virtualenv..." rm -rf ${venv} fi if [ $update -eq 1 ]; then echo "Updating virtualenv..." python tools/install_venv.py $installvenvopts fi if [ -e ${venv} ]; then wrapper="${with_venv}" else if [ $always_venv -eq 1 ]; then # Automatically install the virtualenv python tools/install_venv.py $installvenvopts wrapper="${with_venv}" else echo -e "No virtual environment found...create one? (Y/n) \c" read use_ve if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then # Install the virtualenv and run the test suite in it python tools/install_venv.py $installvenvopts wrapper=${with_venv} fi fi fi fi # Delete old coverage data from previous runs if [ $coverage -eq 1 ]; then ${wrapper} coverage erase fi if [ $just_pep8 -eq 1 ]; then run_pep8 exit fi if [ $just_pep8_changed -eq 1 ]; then run_pep8_changed exit fi if [ $recreate_db -eq 1 ]; then rm -f tests.sqlite fi run_tests # NOTE(sirp): we only want to run pep8 when we're running the full-test suite, # not when we're running tests individually. To handle this, we need to # distinguish between options (testopts), which begin with a '-', and # arguments (testargs). if [ -z "$testargs" ]; then if [ $no_pep8 -eq 0 ]; then run_pep8 fi fi neutron-8.4.0/AUTHORS0000664000567000056710000007152513044373206015442 0ustar jenkinsjenkins00000000000000AKamyshnikova Aaron Rosen Aaron Rosen Aaron-Zhang231 Abhishek Chanda Abhishek Raut Abhishek Raut Abhishek Talwar Abishek Subramanian Adam Gandelman Adam Harwell Adelina Tuvenie Adin Scannell Adolfo Duarte Adrien Vergé Ailing Zhang Akash Gangil Akihiro MOTOKI Akihiro Motoki Akihiro Motoki Aleks Chirko Alessandro Pilotti Alessandro Pilotti Alessio Ababilov Alessio Ababilov Alex Holden Alex Oughton Alexander Ignatov Alexander Maretskiy Alexei Kornienko Alexey I. Froloff Aliaksandr Dziarkach Aman Kumar Amir Sadoughi Amit Saha Anand Shanmugam Andre Pech Andreas Jaeger Andreas Jaeger Andreas Scheuring Andrew Boik Andrew Boik Andrey Epifanov Andrey Kurilin Andy Hill Angela Smith Angus Lees Ann Kamyshnikova Ante Karamatic Anthony Chow Anthony Veiga Anton Frolov Aparupa Arata Notsu Arie Bregman Armando Migliaccio Armando Migliaccio Armando Migliaccio Artur Korzeniewski Arun Sriraman Arvind Somy Arvind Somya Assaf Muller Attila Fazekas Aviram Bar-Haim Avishay Balderman Baodong (Robert) Li Baodong Li Baohua Yang Ben Nemec Ben Nemec Benedikt Trefzer Bernard Cafarelli Bernhard M. Wiedemann Bertrand Lallau Bhagyashri Shewale Bhuvan Arumugam Billy Olsen Bo Chi Bo Wang Bob Kukura Bob Melander Bogdan Tabor Boris Pavlovic Brad Hall Brad Hall Bradley Jones Brandon Logan Brandon Palm Brant Knudson Brent Eagles Brian Bowen Brian Haley Brian Haley Brian Waldon Britt Houser Carl Baldwin Carl Baldwin Carol Bouchard Cedric Brandily Chang Bo Guo ChangBo Guo(gcb) Chengli XU Chirag Shahani Christian Berendt Christoph Arnold Christoph Thiel Chuck Chuck Carlino Chuck Short ChuckC Clark Boylan Claudiu Belu Clayton O'Neill Clint Byrum Cyril Roelandt Cyril Roelandt Cédric Ollivier Dan Florea Dan Prince Dan Wendlandt Dane LeBlanc Daniel Gollub Dariusz Smigiel (dasm) Dariusz Smigiel (dasm) Darragh O'Reilly Darragh O'Reilly Darren Birkett Davanum Srinivas Davanum Srinivas Dave Cahill Dave Lapsley Dave Tucker David Edery David Ripton David Shaughnessy Dazhao Debo Deepak N Derek Higgins Dermot Tynan Dhanashree Gosavi Dipa Thakkar Dirk Mueller Divya ChanneGowda Dmitry Ratushnyy Dmitry Sutyagin Dongcan Ye Doug Hellmann Doug Hellmann Doug Wiegley Doug Wiegley DuYaHong Duarte Nunes Dustin Lundquist Ed Bak Edan David Edgar Magana Edgar Magana Einst Crazy Elena Ezhova Emilien Macchi EmilienM Emma Foley Eoghan Glynn Eran Gampel Eric Brown Eric Windisch Erik Colnick Erik Colnick Eugene Nikanorov Evgeny Fedoruk Fawad Khaliq Federico Ressi Fei Long Wang Flavio Percoco Francisco Souza Franck Yelles Francois Deppierraz Francois Eleouet Frode Nordahl Gabriel Wainer Gal Sagie Gandharva Gary Kotton Gary Kotton Gauvain Pocentek Gerard Braad Ghe Rivero Gong Zhang Gordon Chung Gordon Chung Guilherme Salgado Haim Daniel Haiwei Xu Han Zhou Hareesh Puthalath Harsh Prasad Harshada Mangesh Kakad He Jie Xu He Qing He Yongli Hemanth Ravi Henry Gessau Henry Gessau Henry Gessau HenryGessau HenryVIII Herman Ge Hiroaki KAWAI Hirofumi Ichihara Hironori Shiina Hisaharu Ishii Hong Hui Xiao Huan Xie Hui HX Xiang Hui Xiang Hynek Mlnarik IWAMOTO Toshihiro Ian Wienand Ignacio Scopetta Ihar Hrachyshka Ilya Chukhnakov Ilya Pekelny Ilya Shakhat Ilya Sokolov Inessa Vasilevskaya Ionuț Arțăriși Irena Berezovsky Irena Berezovsky Iryoung Jeong Isaku Yamahata Isaku Yamahata Itsuro Oda Itzik Brown Itzik Brown Ivan Kolodyazhny Ivar Lazzaro Ivar Lazzaro JJ Asghar JUN JIE NAN Jacek Swiderski Jack McCann Jakub Libosvar James Anziano James Arendt James E. Blair James E. Blair James Page Jamie Lennox Jamie Lennox Jas Jason Dillaman Jason Kölker Jason Zhang Jaume Devesa Jay Pipes Jay S. Bryant Jens Rosenboom Jeremy Hanmer Jeremy McDermond Jeremy Stanley Jesse Andrews Jiajun Liu Jian Wen Jian Wen Jianing Yang Joe Gordon Joe Harrison Joe Heck Joe Mills John Belamaric John Davidge John Dewey John Dunning John Jason Brzozowski John Kasperski John Nielsen John Perkins John Schwarz Jon Grimm Jonathan LaCour Jordan Tardif Jorge Miramontes JuPing Juergen Brendel Julia Varlamova Juliano Martinez Juliano Martinez Julien Danjou Jun Park Justin Hammond Justin Lund KAWAI Hiroaki KIYOHIRO ADACHI Kahou Lei Kaiwei Fan Kanzhe Jiang Kawaguchi Ken'ichi Ohmichi Kenji Yasui Keshava Bharadwaj Kevin Benton Kevin Benton Kevin Benton Kevin Benton Kevin Fox Kevin L. Mitchell Kiall Mac Innes Kobi Samoray Koert van der Veer Koteswara Rao Kelam Koteswara Rao Kelam Kris Lindgren Kui Shi Kun Huang Kyle Mestery Kyle Mestery LIU Yulong Lajos Katona Lars Kellogg-Stedman Leon Cui Li Ma Li Ma Li Xipeng Li Zhixin Liang Bo Lianghwa Jou Liping Mao LipingMao LiuNanke Livnat Peer Lorin Hochstein Louis Taylor Lubosz Kosnik Lucas Alvares Gomes Lucian Petrut Luis A. Garcia Luiz H Ozaki Luke Gorrie Ly Loi Madhav Puri Madhu Mohan Nelemane Major Hayden Mandeep Dhami Manish Godara Manjeet Singh Bhatia Marga Millet Marga Millet Margaret Frances Mark McClain Mark McClain Mark McClain Mark McLoughlin Mark T. Voelker Martin Hickey Martin Kletzander Martin Roy Martins Jakubovics Maru Newby Maru Newby Maruti Mate Lakat Mathieu Gagné Mathieu Rohon Matt Dietz Matt Odden Matt Riedemann Matt Thompson Matthew Booth Matthew Thode Matthew Treinish Matthew Treinish Matthew Weeks Meenakshi Kaushik Mehdi Abaakouk Michael J Fork Michael Johnson Michael Krotscheck Michael Smith Michael Still Miguel Angel Ajo Miguel Angel Ajo Miguel Lavalle Miguel Lavalle Miguel Ángel Ajo Mike Bayer Mike Dorman Mike King Mike Kolesnik Ming Shuang Xian Mithil Arun Mitsuhiro SHIGEMATSU Mohammad Banikazemi Monty Taylor Morgan Fainberg Moshe Levi Motohiro OTSUKA Mr. Bojangles Mukul Murali Birru NGUYEN TUONG THANH Nachi Ueno Nachi Ueno Nader Lahouti Nate Johnston Nate Johnston Neil Jerram Nick Nick Bartos Nikola Dipanov Nikolay Fedotov Nikolay Sobolevskiy Nir Magnezi Numan Siddique Numan Siddique Oleg Bondarev Omer Anson Ondřej Nový Paul Carver Paul Michali Paul Michali Paul Ward Pavel Bondar Peng Xiao Peng Yong Pepijn Oomen Perry Zou Peter Feiner Petrut Lucian Pierre RAMBAUD Pierre Rognant Piotr Siwczak Piotr Siwczak Pradeep Kilambi Praneet Bachheti Prasoon Telang Praveen Kumar SM Praveen Yalagandula Preeti Mirji Pritesh Kothari Przemyslaw Czesnowicz Qiaowei Ren Qin Zhao Quan Tian Rahul Priyadarshi Raildo Mascena Rajaram Mallya Rajeev Grover Rajesh Mohan Rajesh Mohan Ralf Haferkamp Ramanjaneya Ramu Ramamurthy Ravi Shekhar Jethani Rawlin Peters Rawlin Peters Ray Chen Rich Curran Rick Clark Ritesh Anand Robert Collins Robert Collins Robert Kukura Robert Li Robert Mizielski Robert Pothier RobinWang Rodolfo Alonso Hernandez Roey Chen Roey Chen Rohit Agarwalla Rohit Agarwalla Roman Bogorodskiy Roman Podoliaka Roman Podolyaka Roman Prykhodchenko Roman Sokolkov Romil Gupta RongzeZhu Rosario Di Somma Rossella Sblendido Rossella Sblendido Rudrajit Tapadar Rui Zang Russell Bryant Ryan Moats Ryan Moe Ryan O'Hara Ryan Petrello Ryan Rossiter Ryan Tidwell Ryan Tidwell Ryota MIBU Ryu Ishimoto Sachi King Sachi King Saggi Mizrahi Sahid Orentino Ferdjaoui Saisrikiran Mudigonda Saju Madhavan Saksham Varma Salvatore Salvatore Orlando Salvatore Orlando Salvatore Orlando Sam Betts Sam Hague Sam Morrison Samer Deeb Sandhya Dasu Sanjeev Rampal Santhosh Santhosh Kumar Sascha Peilicke Sascha Peilicke Sascha Peilicke Saverio Proto Sayaji Sean Dague Sean Dague Sean M. Collins Sean M. Collins Sean McCully Sean Mooney Senhua Huang Serge Maskalik Sergey Belous Sergey Kolekonov Sergey Lukjanov Sergey Skripnick Sergey Vilgelm Sergey Vilgelm Sergio Cazzolato Shane Wang Shang Yong Shashank Hegde Shashank Hegde Shih-Hao Li Shiv Haris Shivakumar M Shivakumar M Shuangtai Tian Shweta P Shweta P Shweta Patil Siming Yin Simon Pasquier Sitaram Dontu Slawek Kaplonski Soheil Hassas Yeganeh Somik Behera Somik Behera Sourabh Patwardhan Sphoorti Joglekar Sreekumar S Sridar Kandaswamy Sridhar Ramaswamy Sridhar S Sridhar Venkat Sripriya Stanislav Kudriashev Stephen Eilert Stephen Gordon Stephen Gran Stephen Ma Steven Gonzales Steven Hillman Steven Ren Sudhakar Sudhakar Babu Gariganti Sudheendra Murthy Sudipta Biswas Sukhdev Sukhdev Sukhdev Kapur Sumit Naiksatam Sumit Naiksatam Sushil Kumar Swaminathan Vasudevan Swaminathan Vasudevan Swapnil Kulkarni (coolsvap) Sylvain Afchain Sławek Kapłoński Takaaki Suzuki Takashi NATSUME Takuma Watanabe Tan Lin Tatyana Leontovich Terry Wilson Thierry Carrez Thomas Bechtold Thomas Herve Thomas Morin Tim Miller Tim Swanson Tom Cammann Tom Fifield Tom Holtzen Tomasz Paszkowski Tomoaki Sato Tomoe Sugihara Tomoko Inoue Toni Ylenius Trinath Somanchi TrinathSomanchi Tu Hong Jun Tyler Smith Vadivel Poonathan Vasiliy Khomenko Victor Laza Victor Morales Victor Stinner Vincent Legoll Vincent Untz Vishal Agarwal Vishvananda Ishaya Vivekanandan Narasimhan Vlad Gridin Vladimir Eremin Vladislav Belogrudov Waldemar Znoinski Wanlong Gao Wei Hu Wei Wang WeiHu Weidong Shao Wenxin Wang Wlodzimierz Borkowski Wu Wenxiang Xiaolin Zhang Xu Chen Xu Han Peng Xuhan Peng YAMAMOTO Takashi YAMAMOTO Takashi Yaguang Tang Yalei Wang YanXingan Yang Yu Yang Yu YangLei Yatin Kumbhare Yi Zhao Ying Liu Yong Sheng Gong Yong Sheng Gong Yoni Shafrir Yoshihiro Kaneko Youcef Laribi Yu Fukuyama Yuanchao Sun Yuji Yuriy Taraday Yushiro FURUKAWA Yusuke Ide Yusuke Muraoka Yuuichi Fujioka Yves-Gwenael Bourhis ZHU ZHU Zang MingJie Zhao Lei ZhaoBo Zhenguo Niu Zhenguo Niu Zhesen ZhiQiang Fan ZhiQiang Fan Zhongyue Luo aaronorosen aaronzhang231 abhishek.talwar abhishek60014726 abhishekkekane adolfo duarte adreznec ajmiller alexpilotti ankitagrawal armando-migliaccio armando-migliaccio berlin caoyue cedric.brandily changzhi changzhi1990 chen-li chen-li chnm-kulkarni dekehn dql e0ne eperdomo eperdomo@cisco.com <> fujioka yuuichi fumihiko kakuma garyduan gessau gh159m gong yong sheng gongysh gongysh gordon chung guiyanxing hgangwx houming-wang huangpengtao hyunsun imran malik ivan-zhu jasonrad jingliuqing joe@midokura.com john_a_joyce johndavidge jun xie jun xie justin Lund kedar kulkarni lawrancejing leejian0612 lei zhang lijianlj linb liu-sheng liudong liuqing lizheming lizhixin3016 llg8212 lzklibj malos mamtap marios mark mcclain mat mathieu-rohon mohankumar_n mouad benchchaoui ncode nfedotov niusmallnan nmagnezi openstack panxia6679sina.com rajeev rajeev reedip rohitagarwalla rohitagarwalla roagarwa@cisco.com <> ronak root root root rossella sadasu salvatore <> salvatore sanuptpm shihanzhang shmcfarl shu,xinxin singhannie siyingchun snaiksat sonu sonu.kumar sridhargaddam sridhargaddam stanzgy steve.ruan sukhdev sushma_korati sysnet tianquan ting.wang trinaths venkata anil venkata anil venkatamahesh vijaychundury vikas vikram.choudhary vinkesh banka wangbo watanabe isao watanabe.isao whitekid xchenum xiexs yan.haifeng yangxurong yaowei yuyangbj zengfagao zhhuabj zhiyuan_cai zoukeke@cmss.chinamobile.com Édouard Thuleau neutron-8.4.0/rally-jobs/0000775000567000056710000000000013044373210016431 5ustar jenkinsjenkins00000000000000neutron-8.4.0/rally-jobs/plugins/0000775000567000056710000000000013044373210020112 5ustar jenkinsjenkins00000000000000neutron-8.4.0/rally-jobs/plugins/__init__.py0000664000567000056710000000000013044372736022225 0ustar jenkinsjenkins00000000000000neutron-8.4.0/rally-jobs/plugins/README.rst0000664000567000056710000000060613044372736021617 0ustar jenkinsjenkins00000000000000Rally plugins ============= All *.py modules from this directory will be auto-loaded by Rally and all plugins will be discoverable. There is no need of any extra configuration and there is no difference between writing them here and in rally code base. Note that it is better to push all interesting and useful benchmarks to Rally code base, this simplifies administration for Operators. neutron-8.4.0/rally-jobs/extra/0000775000567000056710000000000013044373210017554 5ustar jenkinsjenkins00000000000000neutron-8.4.0/rally-jobs/extra/README.rst0000664000567000056710000000025513044372736021261 0ustar jenkinsjenkins00000000000000Extra files =========== All files from this directory will be copy pasted to gates, so you are able to use absolute path in rally tasks. Files will be in ~/.rally/extra/* neutron-8.4.0/rally-jobs/neutron-neutron.yaml0000664000567000056710000001407113044372760022513 0ustar jenkinsjenkins00000000000000--- NeutronNetworks.create_and_list_networks: - runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 sla: failure_rate: max: 0 NeutronNetworks.create_and_list_subnets: - args: subnets_per_network: 2 runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 quotas: neutron: subnet: -1 network: -1 sla: failure_rate: max: 0 NeutronNetworks.create_and_list_routers: - args: network_create_args: subnet_create_args: subnet_cidr_start: "1.1.0.0/30" subnets_per_network: 2 router_create_args: runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 subnet: -1 router: -1 sla: failure_rate: max: 0 NeutronNetworks.create_and_list_ports: - args: network_create_args: port_create_args: ports_per_network: 2 runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 subnet: -1 router: -1 port: -1 sla: failure_rate: max: 0 NeutronNetworks.create_and_update_networks: - args: network_create_args: {} network_update_args: admin_state_up: False name: "_updated" runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 sla: failure_rate: max: 0 NeutronNetworks.create_and_update_subnets: - args: network_create_args: {} subnet_create_args: {} subnet_cidr_start: "1.4.0.0/16" subnets_per_network: 2 subnet_update_args: enable_dhcp: False name: "_subnet_updated" runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 5 users_per_tenant: 5 quotas: neutron: network: -1 subnet: -1 sla: failure_rate: max: 0 NeutronNetworks.create_and_update_routers: - args: network_create_args: {} subnet_create_args: {} subnet_cidr_start: "1.1.0.0/30" subnets_per_network: 2 router_create_args: {} router_update_args: admin_state_up: False name: "_router_updated" runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 subnet: -1 router: -1 sla: failure_rate: max: 0 NeutronNetworks.create_and_update_ports: - args: network_create_args: {} port_create_args: {} ports_per_network: 5 port_update_args: admin_state_up: False device_id: "dummy_id" device_owner: "dummy_owner" name: "_port_updated" runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 port: -1 sla: failure_rate: max: 0 NeutronNetworks.create_and_delete_networks: - args: network_create_args: {} runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 subnet: -1 sla: failure_rate: max: 0 NeutronNetworks.create_and_delete_subnets: - args: network_create_args: {} subnet_create_args: {} subnet_cidr_start: "1.1.0.0/30" subnets_per_network: 2 runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 subnet: -1 sla: failure_rate: max: 0 NeutronNetworks.create_and_delete_routers: - args: network_create_args: {} subnet_create_args: {} subnet_cidr_start: "1.1.0.0/30" subnets_per_network: 2 router_create_args: {} runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 subnet: -1 router: -1 sla: failure_rate: max: 0 NeutronNetworks.create_and_delete_ports: - args: network_create_args: {} port_create_args: {} ports_per_network: 5 runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 port: -1 sla: failure_rate: max: 0 Quotas.neutron_update: - args: max_quota: 1024 runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 20 users_per_tenant: 1 sla: failure_rate: max: 0 neutron-8.4.0/rally-jobs/README.rst0000664000567000056710000000177713044372736020150 0ustar jenkinsjenkins00000000000000Rally job related files ======================= This directory contains rally tasks and plugins that are run by OpenStack CI. Structure --------- * plugins - directory where you can add rally plugins. Almost everything in Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic cleanup resources, .... * extra - all files from this directory will be copy pasted to gates, so you are able to use absolute paths in rally tasks. Files will be located in ~/.rally/extra/* * neutron-neutron.yaml is a task that is run in gates against OpenStack with Neutron Service deployed by DevStack Useful links ------------ * More about Rally: https://rally.readthedocs.org/en/latest/ * Rally release notes: https://rally.readthedocs.org/en/latest/release_notes.html * How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html * About plugins: https://rally.readthedocs.org/en/latest/plugins.html * Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins neutron-8.4.0/TESTING.rst0000664000567000056710000006053713044372760016246 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Testing Neutron =============== Why Should You Care ------------------- There's two ways to approach testing: 1) Write unit tests because they're required to get your patch merged. This typically involves mock heavy tests that assert that your code is as written. 2) Putting as much thought in to your testing strategy as you do to the rest of your code. Use different layers of testing as appropriate to provide high *quality* coverage. Are you touching an agent? Test it against an actual system! Are you adding a new API? Test it for race conditions against a real database! Are you adding a new cross-cutting feature? Test that it does what it's supposed to do when run on a real cloud! Do you feel the need to verify your change manually? If so, the next few sections attempt to guide you through Neutron's different test infrastructures to help you make intelligent decisions and best exploit Neutron's test offerings. Definitions ----------- We will talk about three classes of tests: unit, functional and integration. Each respective category typically targets a larger scope of code. Other than that broad categorization, here are a few more characteristic: * Unit tests - Should be able to run on your laptop, directly following a 'git clone' of the project. The underlying system must not be mutated, mocks can be used to achieve this. A unit test typically targets a function or class. * Functional tests - Run against a pre-configured environment (tools/configure_for_func_testing.sh). Typically test a component such as an agent using no mocks. * Integration tests - Run against a running cloud, often target the API level, but also 'scenarios' or 'user stories'. You may find such tests under tests/api, tests/fullstack and in the Tempest and Rally projects. Tests in the Neutron tree are typically organized by the testing infrastructure used, and not by the scope of the test. For example, many tests under the 'unit' directory invoke an API call and assert that the expected output was received. The scope of such a test is the entire Neutron server stack, and clearly not a specific function such as in a typical unit test. Testing Frameworks ------------------ The different frameworks are listed below. The intent is to list the capabilities of each testing framework as to help the reader understand when should each tool be used. Remember that when adding code that touches many areas of Neutron, each area should be tested with the appropriate framework. Overlap between different test layers is often desirable and encouraged. Unit Tests ~~~~~~~~~~ Unit tests (neutron/tests/unit/) are meant to cover as much code as possible. They are designed to test the various pieces of the Neutron tree to make sure any new changes don't break existing functionality. Unit tests have no requirements nor make changes to the system they are running on. They use an in-memory sqlite database to test DB interaction. At the start of each test run: * RPC listeners are mocked away. * The fake Oslo messaging driver is used. At the end of each test run: * Mocks are automatically reverted. * The in-memory database is cleared of content, but its schema is maintained. * The global Oslo configuration object is reset. The unit testing framework can be used to effectively test database interaction, for example, distributed routers allocate a MAC address for every host running an OVS agent. One of DVR's DB mixins implements a method that lists all host MAC addresses. Its test looks like this: .. code-block:: python def test_get_dvr_mac_address_list(self): self._create_dvr_mac_entry('host_1', 'mac_1') self._create_dvr_mac_entry('host_2', 'mac_2') mac_list = self.mixin.get_dvr_mac_address_list(self.ctx) self.assertEqual(2, len(mac_list)) It inserts two new host MAC address, invokes the method under test and asserts its output. The test has many things going for it: * It targets the method under test correctly, not taking on a larger scope than is necessary. * It does not use mocks to assert that methods were called, it simply invokes the method and asserts its output (In this case, that the list method returns two records). This is allowed by the fact that the method was built to be testable - The method has clear input and output with no side effects. Functional Tests ~~~~~~~~~~~~~~~~ Functional tests (neutron/tests/functional/) are intended to validate actual system interaction. Mocks should be used sparingly, if at all. Care should be taken to ensure that existing system resources are not modified and that resources created in tests are properly cleaned up both on test success and failure. Note that when run at the gate, the functional tests compile OVS from source. Check out neutron/tests/contrib/gate_hook.sh. Other jobs presently use OVS from packages. Let's examine the benefits of the functional testing framework. Neutron offers a library called 'ip_lib' that wraps around the 'ip' binary. One of its methods is called 'device_exists' which accepts a device name and a namespace and returns True if the device exists in the given namespace. It's easy building a test that targets the method directly, and such a test would be considered a 'unit' test. However, what framework should such a test use? A test using the unit tests framework could not mutate state on the system, and so could not actually create a device and assert that it now exists. Such a test would look roughly like this: * It would mock 'execute', a method that executes shell commands against the system to return an IP device named 'foo'. * It would then assert that when 'device_exists' is called with 'foo', it returns True, but when called with a different device name it returns False. * It would most likely assert that 'execute' was called using something like: 'ip link show foo'. The value of such a test is arguable. Remember that new tests are not free, they need to be maintained. Code is often refactored, reimplemented and optimized. * There are other ways to find out if a device exists (Such as by looking at '/sys/class/net'), and in such a case the test would have to be updated. * Methods are mocked using their name. When methods are renamed, moved or removed, their mocks must be updated. This slows down development for avoidable reasons. * Most importantly, the test does not assert the behavior of the method. It merely asserts that the code is as written. When adding a functional test for 'device_exists', several framework level methods were added. These methods may now be used by other tests as well. One such method creates a virtual device in a namespace, and ensures that both the namespace and the device are cleaned up at the end of the test run regardless of success or failure using the 'addCleanup' method. The test generates details for a temporary device, asserts that a device by that name does not exist, create that device, asserts that it now exists, deletes it, and asserts that it no longer exists. Such a test avoids all three issues mentioned above if it were written using the unit testing framework. Functional tests are also used to target larger scope, such as agents. Many good examples exist: See the OVS, L3 and DHCP agents functional tests. Such tests target a top level agent method and assert that the system interaction that was supposed to be perform was indeed performed. For example, to test the DHCP agent's top level method that accepts network attributes and configures dnsmasq for that network, the test: * Instantiates an instance of the DHCP agent class (But does not start its process). * Calls its top level function with prepared data. * Creates a temporary namespace and device, and calls 'dhclient' from that namespace. * Assert that the device successfully obtained the expected IP address. Fullstack Tests ~~~~~~~~~~~~~~~ Why? ++++ The idea behind "fullstack" testing is to fill a gap between unit + functional tests and Tempest. Tempest tests are expensive to run, and target black box API tests exclusively. Tempest requires an OpenStack deployment to be run against, which can be difficult to configure and setup. Full stack testing addresses these issues by taking care of the deployment itself, according to the topology that the test requires. Developers further benefit from full stack testing as it can sufficiently simulate a real environment and provide a rapidly reproducible way to verify code while you're still writing it. How? ++++ Full stack tests set up their own Neutron processes (Server & agents). They assume a working Rabbit and MySQL server before the run starts. Instructions on how to run fullstack tests on a VM are available below. Each test defines its own topology (What and how many servers and agents should be running). Since the test runs on the machine itself, full stack testing enables "white box" testing. This means that you can, for example, create a router through the API and then assert that a namespace was created for it. Full stack tests run in the Neutron tree with Neutron resources alone. You may use the Neutron API (The Neutron server is set to NOAUTH so that Keystone is out of the picture). VMs may be simulated with a container-like class: neutron.tests.fullstack.resources.machine.FakeFullstackMachine. An example of its usage may be found at: neutron/tests/fullstack/test_connectivity.py. Full stack testing can simulate multi node testing by starting an agent multiple times. Specifically, each node would have its own copy of the OVS/LinuxBridge/DHCP/L3 agents, all configured with the same "host" value. Each OVS agent is connected to its own pair of br-int/br-ex, and those bridges are then interconnected. For LinuxBridge agent each agent is started in its own namespace, called "host-". Such namespaces are connected with OVS "central" bridge to eachother. .. image:: images/fullstack_multinode_simulation.png Segmentation at the database layer is guaranteed by creating a database per test. The messaging layer achieves segmentation by utilizing a RabbitMQ feature called 'vhosts'. In short, just like a MySQL server serve multiple databases, so can a RabbitMQ server serve multiple messaging domains. Exchanges and queues in one 'vhost' are segmented from those in another 'vhost'. When? +++++ 1) You'd like to test the interaction between Neutron components (Server and agents) and have already tested each component in isolation via unit or functional tests. You should have many unit tests, fewer tests to test a component and even fewer to test their interaction. Edge cases should not be tested with full stack testing. 2) You'd like to increase coverage by testing features that require multi node testing such as l2pop, L3 HA and DVR. 3) You'd like to test agent restarts. We've found bugs in the OVS, DHCP and L3 agents and haven't found an effective way to test these scenarios. Full stack testing can help here as the full stack infrastructure can restart an agent during the test. Example +++++++ Neutron offers a Quality of Service API, initially offering bandwidth capping at the port level. In the reference implementation, it does this by utilizing an OVS feature. neutron.tests.fullstack.test_qos.TestQoSWithOvsAgent.test_qos_policy_rule_lifecycle is a positive example of how the fullstack testing infrastructure should be used. It creates a network, subnet, QoS policy & rule and a port utilizing that policy. It then asserts that the expected bandwidth limitation is present on the OVS bridge connected to that port. The test is a true integration test, in the sense that it invokes the API and then asserts that Neutron interacted with the hypervisor appropriately. API Tests ~~~~~~~~~ API tests (neutron/tests/api/) are intended to ensure the function and stability of the Neutron API. As much as possible, changes to this path should not be made at the same time as changes to the code to limit the potential for introducing backwards-incompatible changes, although the same patch that introduces a new API should include an API test. Since API tests target a deployed Neutron daemon that is not test-managed, they should not depend on controlling the runtime configuration of the target daemon. API tests should be black-box - no assumptions should be made about implementation. Only the contract defined by Neutron's REST API should be validated, and all interaction with the daemon should be via a REST client. neutron/tests/api was copied from the Tempest project. At the time, there was an overlap of tests between the Tempest and Neutron repositories. This overlap was then eliminated by carving out a subset of resources that belong to Tempest, with the rest in Neutron. API tests that belong to Tempest deal with a subset of Neutron's resources: * Port * Network * Subnet * Security Group * Router * Floating IP These resources were chosen for their ubiquitously. They are found in most Neutron depoloyments regardless of plugin, and are directly involved in the networking and security of an instance. Together, they form the bare minimum needed by Neutron. This is excluding extensions to these resources (For example: Extra DHCP options to subnets, or snat_gateway mode to routers) that are not mandatory in the majority of cases. Tests for other resources should be contributed to the Neutron repository. Scenario tests should be similarly split up between Tempest and Neutron according to the API they're targeting. Development Process ------------------- It is expected that any new changes that are proposed for merge come with tests for that feature or code area. Any bugs fixes that are submitted must also have tests to prove that they stay fixed! In addition, before proposing for merge, all of the current tests should be passing. Structure of the Unit Test Tree ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The structure of the unit test tree should match the structure of the code tree, e.g. :: - target module: neutron.agent.utils - test module: neutron.tests.unit.agent.test_utils Unit test modules should have the same path under neutron/tests/unit/ as the module they target has under neutron/, and their name should be the name of the target module prefixed by `test_`. This requirement is intended to make it easier for developers to find the unit tests for a given module. Similarly, when a test module targets a package, that module's name should be the name of the package prefixed by `test_` with the same path as when a test targets a module, e.g. :: - target package: neutron.ipam - test module: neutron.tests.unit.test_ipam The following command can be used to validate whether the unit test tree is structured according to the above requirements: :: ./tools/check_unit_test_structure.sh Where appropriate, exceptions can be added to the above script. If code is not part of the Neutron namespace, for example, it's probably reasonable to exclude their unit tests from the check. Running Tests ------------- There are three mechanisms for running tests: run_tests.sh, tox, and nose2. Before submitting a patch for review you should always ensure all test pass; a tox run is triggered by the jenkins gate executed on gerrit for each patch pushed for review. With these mechanisms you can either run the tests in the standard environment or create a virtual environment to run them in. By default after running all of the tests, any pep8 errors found in the tree will be reported. With `run_tests.sh` ~~~~~~~~~~~~~~~~~~~ You can use the `run_tests.sh` script in the root source directory to execute tests in a virtualenv:: ./run_tests -V With `nose2` ~~~~~~~~~~~~ You can use `nose2`_ to run individual tests, as well as use for debugging portions of your code:: source .venv/bin/activate pip install nose2 nose2 There are disadvantages to running nose2 - the tests are run sequentially, so race condition bugs will not be triggered, and the full test suite will take significantly longer than tox & testr. The upside is that testr has some rough edges when it comes to diagnosing errors and failures, and there is no easy way to set a breakpoint in the Neutron code, and enter an interactive debugging session while using testr. Note that nose2's predecessor, `nose`_, does not understand `load_tests protocol`_ introduced in Python 2.7. This limitation will result in errors being reported for modules that depend on load_tests (usually due to use of `testscenarios`_). nose, therefore, is not supported, while nose2 is. .. _nose2: http://nose2.readthedocs.org/en/latest/index.html .. _nose: https://nose.readthedocs.org/en/latest/index.html .. _load_tests protocol: https://docs.python.org/2/library/unittest.html#load-tests-protocol .. _testscenarios: https://pypi.python.org/pypi/testscenarios/ With `tox` ~~~~~~~~~~ Neutron, like other OpenStack projects, uses `tox`_ for managing the virtual environments for running test cases. It uses `Testr`_ for managing the running of the test cases. Tox handles the creation of a series of `virtualenvs`_ that target specific versions of Python. Testr handles the parallel execution of series of test cases as well as the tracking of long-running tests and other things. For more information on the standard Tox-based test infrastructure used by OpenStack and how to do some common test/debugging procedures with Testr, see this wiki page: https://wiki.openstack.org/wiki/Testr .. _Testr: https://wiki.openstack.org/wiki/Testr .. _tox: http://tox.readthedocs.org/en/latest/ .. _virtualenvs: https://pypi.python.org/pypi/virtualenv PEP8 and Unit Tests +++++++++++++++++++ Running pep8 and unit tests is as easy as executing this in the root directory of the Neutron source code:: tox To run only pep8:: tox -e pep8 Since pep8 includes running pylint on all files, it can take quite some time to run. To restrict the pylint check to only the files altered by the latest patch changes:: tox -e pep8 HEAD~1 To run only the unit tests:: tox -e py27 Functional Tests ++++++++++++++++ To run functional tests that do not require sudo privileges or specific-system dependencies:: tox -e functional To run all the functional tests, including those requiring sudo privileges and system-specific dependencies, the procedure defined by tools/configure_for_func_testing.sh should be followed. IMPORTANT: configure_for_func_testing.sh relies on DevStack to perform extensive modification to the underlying host. Execution of the script requires sudo privileges and it is recommended that the following commands be invoked only on a clean and disposeable VM. A VM that has had DevStack previously installed on it is also fine. :: git clone https://git.openstack.org/openstack-dev/devstack ../devstack ./tools/configure_for_func_testing.sh ../devstack -i tox -e dsvm-functional The '-i' option is optional and instructs the script to use DevStack to install and configure all of Neutron's package dependencies. It is not necessary to provide this option if DevStack has already been used to deploy Neutron to the target host. Fullstack Tests +++++++++++++++ To run all the full-stack tests, you may use: :: tox -e dsvm-fullstack Since full-stack tests often require the same resources and dependencies as the functional tests, using the configuration script tools/configure_for_func_testing.sh is advised (As described above). When running full-stack tests on a clean VM for the first time, we advise to run ./stack.sh successfully to make sure all Neutron's dependencies are met. Full-stack based Neutron daemons produce logs to a sub-folder in /tmp/dsvm-fullstack-logs (for example, a test named "test_example" will produce logs to /tmp/dsvm-fullstack-logs/test_example/), so that will be a good place to look if your test is failing. Fullstack test suite assumes 240.0.0.0/4 (Class E) range in root namespace of the test machine is available for its usage. API Tests +++++++++ To run the api tests, deploy Tempest and Neutron with DevStack and then run the following command: :: tox -e api If tempest.conf cannot be found at the default location used by DevStack (/opt/stack/tempest/etc) it may be necessary to set TEMPEST_CONFIG_DIR before invoking tox: :: export TEMPEST_CONFIG_DIR=[path to dir containing tempest.conf] tox -e api Running Individual Tests ~~~~~~~~~~~~~~~~~~~~~~~~ For running individual test modules, cases or tests, you just need to pass the dot-separated path you want as an argument to it. For example, the following would run only a single test or test case:: $ ./run_tests.sh neutron.tests.unit.test_manager $ ./run_tests.sh neutron.tests.unit.test_manager.NeutronManagerTestCase $ ./run_tests.sh neutron.tests.unit.test_manager.NeutronManagerTestCase.test_service_plugin_is_loaded or:: $ tox -e py27 neutron.tests.unit.test_manager $ tox -e py27 neutron.tests.unit.test_manager.NeutronManagerTestCase $ tox -e py27 neutron.tests.unit.test_manager.NeutronManagerTestCase.test_service_plugin_is_loaded If you want to pass other arguments to ostestr, you can do the following:: $ tox -e -epy27 -- --regex neutron.tests.unit.test_manager --serial Coverage -------- Neutron has a fast growing code base and there are plenty of areas that need better coverage. To get a grasp of the areas where tests are needed, you can check current unit tests coverage by running:: $ ./run_tests.sh -c or by running:: $ tox -ecover Since the coverage command can only show unit test coverage, a coverage document is maintained that shows test coverage per area of code in: doc/source/devref/testing_coverage.rst. You could also rely on Zuul logs, that are generated post-merge (not every project builds coverage results). To access them, do the following: * Check out the latest `merge commit `_ * Go to: http://logs.openstack.org///post/neutron-coverage/. * `Spec `_ is a work in progress to provide a better landing page. Debugging --------- By default, calls to pdb.set_trace() will be ignored when tests are run. For pdb statements to work, invoke run_tests as follows:: $ ./run_tests.sh -d [test module path] It's possible to debug tests in a tox environment:: $ tox -e venv -- python -m testtools.run [test module path] Tox-created virtual environments (venv's) can also be activated after a tox run and reused for debugging:: $ tox -e venv $ . .tox/venv/bin/activate $ python -m testtools.run [test module path] Tox packages and installs the Neutron source tree in a given venv on every invocation, but if modifications need to be made between invocation (e.g. adding more pdb statements), it is recommended that the source tree be installed in the venv in editable mode:: # run this only after activating the venv $ pip install --editable . Editable mode ensures that changes made to the source tree are automatically reflected in the venv, and that such changes are not overwritten during the next tox run. Post-mortem Debugging ~~~~~~~~~~~~~~~~~~~~~ Setting OS_POST_MORTEM_DEBUGGER in the shell environment will ensure that the debugger .post_mortem() method will be invoked on test failure:: $ OS_POST_MORTEM_DEBUGGER=pdb ./run_tests.sh -d [test module path] Supported debuggers are pdb, and pudb. Pudb is full-screen, console-based visual debugger for Python which let you inspect variables, the stack, and breakpoints in a very visual way, keeping a high degree of compatibility with pdb:: $ ./.venv/bin/pip install pudb $ OS_POST_MORTEM_DEBUGGER=pudb ./run_tests.sh -d [test module path] References ~~~~~~~~~~ .. [#pudb] PUDB debugger: https://pypi.python.org/pypi/pudb neutron-8.4.0/MANIFEST.in0000664000567000056710000000056613044372760016131 0ustar jenkinsjenkins00000000000000include AUTHORS include README.rst include ChangeLog include LICENSE include neutron/db/migration/README include neutron/db/migration/alembic.ini include neutron/db/migration/alembic_migrations/script.py.mako recursive-include neutron/db/migration/alembic_migrations/versions * recursive-include neutron/locale * exclude .gitignore exclude .gitreview global-exclude *.pyc neutron-8.4.0/setup.cfg0000664000567000056710000002230613044373210016177 0ustar jenkinsjenkins00000000000000[metadata] name = neutron summary = OpenStack Networking description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = http://www.openstack.org/ classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.4 [files] packages = neutron data_files = etc/neutron = etc/api-paste.ini etc/policy.json etc/rootwrap.conf etc/neutron/rootwrap.d = etc/neutron/rootwrap.d/debug.filters etc/neutron/rootwrap.d/dhcp.filters etc/neutron/rootwrap.d/dibbler.filters etc/neutron/rootwrap.d/iptables-firewall.filters etc/neutron/rootwrap.d/ebtables.filters etc/neutron/rootwrap.d/ipset-firewall.filters etc/neutron/rootwrap.d/l3.filters etc/neutron/rootwrap.d/linuxbridge-plugin.filters etc/neutron/rootwrap.d/openvswitch-plugin.filters scripts = bin/neutron-rootwrap-xen-dom0 [entry_points] console_scripts = neutron-bgp-dragent = neutron.cmd.eventlet.agents.bgp_dragent:main neutron-db-manage = neutron.db.migration.cli:main neutron-debug = neutron.debug.shell:main neutron-dhcp-agent = neutron.cmd.eventlet.agents.dhcp:main neutron-keepalived-state-change = neutron.cmd.keepalived_state_change:main neutron-ipset-cleanup = neutron.cmd.ipset_cleanup:main neutron-l3-agent = neutron.cmd.eventlet.agents.l3:main neutron-linuxbridge-agent = neutron.cmd.eventlet.plugins.linuxbridge_neutron_agent:main neutron-linuxbridge-cleanup = neutron.cmd.linuxbridge_cleanup:main neutron-macvtap-agent = neutron.cmd.eventlet.plugins.macvtap_neutron_agent:main neutron-metadata-agent = neutron.cmd.eventlet.agents.metadata:main neutron-netns-cleanup = neutron.cmd.netns_cleanup:main neutron-ns-metadata-proxy = neutron.cmd.eventlet.agents.metadata_proxy:main neutron-openvswitch-agent = neutron.cmd.eventlet.plugins.ovs_neutron_agent:main neutron-ovs-cleanup = neutron.cmd.ovs_cleanup:main neutron-pd-notify = neutron.cmd.pd_notify:main neutron-server = neutron.cmd.eventlet.server:main neutron-rpc-server = neutron.cmd.eventlet.server:main_rpc_eventlet neutron-rootwrap = oslo_rootwrap.cmd:main neutron-rootwrap-daemon = oslo_rootwrap.cmd:daemon neutron-usage-audit = neutron.cmd.eventlet.usage_audit:main neutron-metering-agent = neutron.cmd.eventlet.services.metering_agent:main neutron-sriov-nic-agent = neutron.cmd.eventlet.plugins.sriov_nic_neutron_agent:main neutron-sanity-check = neutron.cmd.sanity_check:main neutron.core_plugins = ml2 = neutron.plugins.ml2.plugin:Ml2Plugin neutron.service_plugins = dummy = neutron.tests.unit.dummy_plugin:DummyServicePlugin router = neutron.services.l3_router.l3_router_plugin:L3RouterPlugin firewall = neutron_fwaas.services.firewall.fwaas_plugin:FirewallPlugin lbaas = neutron_lbaas.services.loadbalancer.plugin:LoadBalancerPlugin vpnaas = neutron_vpnaas.services.vpn.plugin:VPNDriverPlugin metering = neutron.services.metering.metering_plugin:MeteringPlugin neutron.services.firewall.fwaas_plugin.FirewallPlugin = neutron_fwaas.services.firewall.fwaas_plugin:FirewallPlugin neutron.services.loadbalancer.plugin.LoadBalancerPlugin = neutron_lbaas.services.loadbalancer.plugin:LoadBalancerPlugin neutron.services.vpn.plugin.VPNDriverPlugin = neutron_vpnaas.services.vpn.plugin:VPNDriverPlugin qos = neutron.services.qos.qos_plugin:QoSPlugin bgp = neutron.services.bgp.bgp_plugin:BgpPlugin tag = neutron.services.tag.tag_plugin:TagPlugin flavors = neutron.services.flavors.flavors_plugin:FlavorsPlugin auto_allocate = neutron.services.auto_allocate.plugin:Plugin network_ip_availability = neutron.services.network_ip_availability.plugin:NetworkIPAvailabilityPlugin timestamp_core = neutron.services.timestamp.timestamp_plugin:TimeStampPlugin neutron.qos.notification_drivers = message_queue = neutron.services.qos.notification_drivers.message_queue:RpcQosServiceNotificationDriver neutron.ml2.type_drivers = flat = neutron.plugins.ml2.drivers.type_flat:FlatTypeDriver local = neutron.plugins.ml2.drivers.type_local:LocalTypeDriver vlan = neutron.plugins.ml2.drivers.type_vlan:VlanTypeDriver geneve = neutron.plugins.ml2.drivers.type_geneve:GeneveTypeDriver gre = neutron.plugins.ml2.drivers.type_gre:GreTypeDriver vxlan = neutron.plugins.ml2.drivers.type_vxlan:VxlanTypeDriver neutron.ml2.mechanism_drivers = logger = neutron.tests.unit.plugins.ml2.drivers.mechanism_logger:LoggerMechanismDriver test = neutron.tests.unit.plugins.ml2.drivers.mechanism_test:TestMechanismDriver linuxbridge = neutron.plugins.ml2.drivers.linuxbridge.mech_driver.mech_linuxbridge:LinuxbridgeMechanismDriver macvtap = neutron.plugins.ml2.drivers.macvtap.mech_driver.mech_macvtap:MacvtapMechanismDriver openvswitch = neutron.plugins.ml2.drivers.openvswitch.mech_driver.mech_openvswitch:OpenvswitchMechanismDriver l2population = neutron.plugins.ml2.drivers.l2pop.mech_driver:L2populationMechanismDriver sriovnicswitch = neutron.plugins.ml2.drivers.mech_sriov.mech_driver.mech_driver:SriovNicSwitchMechanismDriver fake_agent = neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent:FakeAgentMechanismDriver neutron.ml2.extension_drivers = test = neutron.tests.unit.plugins.ml2.drivers.ext_test:TestExtensionDriver testdb = neutron.tests.unit.plugins.ml2.drivers.ext_test:TestDBExtensionDriver port_security = neutron.plugins.ml2.extensions.port_security:PortSecurityExtensionDriver qos = neutron.plugins.ml2.extensions.qos:QosExtensionDriver dns = neutron.plugins.ml2.extensions.dns_integration:DNSExtensionDriverML2 neutron.openstack.common.cache.backends = memory = neutron.openstack.common.cache._backends.memory:MemoryBackend neutron.ipam_drivers = fake = neutron.tests.unit.ipam.fake_driver:FakeDriver internal = neutron.ipam.drivers.neutrondb_ipam.driver:NeutronDbPool neutron.agent.l2.extensions = qos = neutron.agent.l2.extensions.qos:QosAgentExtension neutron.qos.agent_drivers = ovs = neutron.plugins.ml2.drivers.openvswitch.agent.extension_drivers.qos_driver:QosOVSAgentDriver sriov = neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers.qos_driver:QosSRIOVAgentDriver linuxbridge = neutron.plugins.ml2.drivers.linuxbridge.agent.extension_drivers.qos_driver:QosLinuxbridgeAgentDriver neutron.agent.linux.pd_drivers = dibbler = neutron.agent.linux.dibbler:PDDibbler neutron.services.external_dns_drivers = designate = neutron.services.externaldns.drivers.designate.driver:Designate oslo.messaging.notify.drivers = neutron.openstack.common.notifier.log_notifier = oslo_messaging.notify._impl_log:LogDriver neutron.openstack.common.notifier.no_op_notifier = oslo_messaging.notify._impl_noop:NoOpDriver neutron.openstack.common.notifier.test_notifier = oslo_messaging.notify._impl_test:TestDriver neutron.openstack.common.notifier.rpc_notifier2 = oslo_messaging.notify.messaging:MessagingV2Driver neutron.openstack.common.notifier.rpc_notifier = oslo_messaging.notify.messaging:MessagingDriver oslo.config.opts = neutron = neutron.opts:list_opts neutron.agent = neutron.opts:list_agent_opts neutron.base.agent = neutron.opts:list_base_agent_opts neutron.bgp.agent = neutron.services.bgp.common.opts:list_bgp_agent_opts neutron.db = neutron.opts:list_db_opts neutron.dhcp.agent = neutron.opts:list_dhcp_agent_opts neutron.extensions = neutron.opts:list_extension_opts neutron.l3.agent = neutron.opts:list_l3_agent_opts neutron.metadata.agent = neutron.opts:list_metadata_agent_opts neutron.metering.agent = neutron.opts:list_metering_agent_opts neutron.ml2 = neutron.opts:list_ml2_conf_opts neutron.ml2.linuxbridge.agent = neutron.opts:list_linux_bridge_opts neutron.ml2.macvtap.agent = neutron.opts:list_macvtap_opts neutron.ml2.ovs.agent = neutron.opts:list_ovs_opts neutron.ml2.sriov = neutron.opts:list_ml2_conf_sriov_opts neutron.ml2.sriov.agent = neutron.opts:list_sriov_agent_opts neutron.qos = neutron.opts:list_qos_opts nova.auth = neutron.opts:list_auth_opts oslo.config.opts.defaults = neutron = neutron.common.config:set_cors_middleware_defaults neutron.db.alembic_migrations = neutron = neutron.db.migration:alembic_migrations neutron.interface_drivers = ivs = neutron.agent.linux.interface:IVSInterfaceDriver linuxbridge = neutron.agent.linux.interface:BridgeInterfaceDriver null = neutron.agent.linux.interface:NullDriver openvswitch = neutron.agent.linux.interface:OVSInterfaceDriver neutron.agent.firewall_drivers = noop = neutron.agent.firewall:NoopFirewallDriver iptables = neutron.agent.linux.iptables_firewall:IptablesFirewallDriver iptables_hybrid = neutron.agent.linux.iptables_firewall:OVSHybridIptablesFirewallDriver openvswitch = neutron.agent.linux.openvswitch_firewall:OVSFirewallDriver [build_sphinx] all_files = 1 build-dir = doc/build source-dir = doc/source [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = neutron/locale/neutron.pot [compile_catalog] directory = neutron/locale domain = neutron [update_catalog] domain = neutron output_dir = neutron/locale input_file = neutron/locale/neutron.pot [wheel] universal = 1 [pbr] warnerrors = true [egg_info] tag_build = tag_date = 0 neutron-8.4.0/.mailmap0000664000567000056710000000111613044372736016007 0ustar jenkinsjenkins00000000000000# Format is: # # lawrancejing Jiajun Liu Zhongyue Luo Kun Huang Zhenguo Niu Isaku Yamahata Isaku Yamahata Morgan Fainberg neutron-8.4.0/.testr.conf0000664000567000056710000000062713044372736016462 0ustar jenkinsjenkins00000000000000[DEFAULT] test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ OS_LOG_CAPTURE=${OS_LOG_CAPTURE:-1} \ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-160} \ ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./neutron/tests/unit} $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list neutron-8.4.0/PKG-INFO0000664000567000056710000000407013044373210015451 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: neutron Version: 8.4.0 Summary: OpenStack Networking Home-page: http://www.openstack.org/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: Welcome! ======== You have come across a cloud computing network fabric controller. It has identified itself as "Neutron." It aims to tame your (cloud) networking! External Resources: =================== The homepage for Neutron is: http://launchpad.net/neutron. Use this site for asking for help, and filing bugs. Code is available on git.openstack.org at . The latest and most in-depth documentation on how to use Neutron is available at: . This includes: Neutron Administrator Guide http://docs.openstack.org/admin-guide-cloud/networking.html Networking Guide http://docs.openstack.org/networking-guide/ Neutron API Reference: http://docs.openstack.org/api/openstack-network/2.0/content/ Current Neutron developer documentation is available at: http://wiki.openstack.org/NeutronDevelopment For help on usage and hacking of Neutron, please send mail to . For information on how to contribute to Neutron, please see the contents of the CONTRIBUTING.rst file. Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 neutron-8.4.0/etc/0000775000567000056710000000000013044373210015126 5ustar jenkinsjenkins00000000000000neutron-8.4.0/etc/rootwrap.conf0000664000567000056710000000225313044372736017670 0ustar jenkinsjenkins00000000000000# Configuration for neutron-rootwrap # This file should be owned by (and only-writeable by) the root user [DEFAULT] # List of directories to load filter definitions from (separated by ','). # These directories MUST all be only writeable by root ! filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap # List of directories to search executables in, in case filters do not # explicitely specify a full path (separated by ',') # If not specified, defaults to system PATH environment variable. # These directories MUST all be only writeable by root ! exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin # Enable logging to syslog # Default value is False use_syslog=False # Which syslog facility to use. # Valid values include auth, authpriv, syslog, local0, local1... # Default value is 'syslog' syslog_log_facility=syslog # Which messages to log. # INFO means log all usage # ERROR means only log unsuccessful attempts syslog_log_level=ERROR [xenapi] # XenAPI configuration is only required by the L2 agent if it is to # target a XenServer/XCP compute host's dom0. xenapi_connection_url= xenapi_connection_username=root xenapi_connection_password= neutron-8.4.0/etc/neutron/0000775000567000056710000000000013044373210016620 5ustar jenkinsjenkins00000000000000neutron-8.4.0/etc/neutron/plugins/0000775000567000056710000000000013044373210020301 5ustar jenkinsjenkins00000000000000neutron-8.4.0/etc/neutron/plugins/ml2/0000775000567000056710000000000013044373210020773 5ustar jenkinsjenkins00000000000000neutron-8.4.0/etc/neutron/plugins/ml2/.placeholder0000664000567000056710000000000013044372736023260 0ustar jenkinsjenkins00000000000000neutron-8.4.0/etc/neutron/rootwrap.d/0000775000567000056710000000000013044373210020717 5ustar jenkinsjenkins00000000000000neutron-8.4.0/etc/neutron/rootwrap.d/linuxbridge-plugin.filters0000664000567000056710000000224613044372760026136 0ustar jenkinsjenkins00000000000000# neutron-rootwrap command filters for nodes on which neutron is # expected to control network # # This file should be owned by (and only-writeable by) the root user # format seems to be # cmd-name: filter-name, raw-command, user, args [Filters] # linuxbridge-agent # unclear whether both variants are necessary, but I'm transliterating # from the old mechanism brctl: CommandFilter, brctl, root bridge: CommandFilter, bridge, root # ip_lib ip: IpFilter, ip, root find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.* ip_exec: IpNetnsExecFilter, ip, root # tc commands needed for QoS support tc_replace_tbf: RegExpFilter, tc, root, tc, qdisc, replace, dev, .+, root, tbf, rate, .+, latency, .+, burst, .+ tc_add_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress, handle, .+ tc_delete: RegExpFilter, tc, root, tc, qdisc, del, dev, .+, .+ tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+ tc_show_filters: RegExpFilter, tc, root, tc, filter, show, dev, .+, parent, .+ tc_add_filter: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, all, prio, .+, basic, police, rate, .+, burst, .+, mtu, .+, drop neutron-8.4.0/etc/neutron/rootwrap.d/l3.filters0000664000567000056710000000315013044372760022637 0ustar jenkinsjenkins00000000000000# neutron-rootwrap command filters for nodes on which neutron is # expected to control network # # This file should be owned by (and only-writeable by) the root user # format seems to be # cmd-name: filter-name, raw-command, user, args [Filters] # arping arping: CommandFilter, arping, root # l3_agent sysctl: CommandFilter, sysctl, root route: CommandFilter, route, root radvd: CommandFilter, radvd, root # metadata proxy metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root # RHEL invocation of the metadata proxy will report /usr/bin/python kill_metadata: KillFilter, root, python, -9 kill_metadata7: KillFilter, root, python2.7, -9 kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -9, -HUP kill_radvd: KillFilter, root, /sbin/radvd, -9, -HUP # ip_lib ip: IpFilter, ip, root find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.* ip_exec: IpNetnsExecFilter, ip, root # For ip monitor kill_ip_monitor: KillFilter, root, ip, -9 # ovs_lib (if OVSInterfaceDriver is used) ovs-vsctl: CommandFilter, ovs-vsctl, root # iptables_manager iptables-save: CommandFilter, iptables-save, root iptables-restore: CommandFilter, iptables-restore, root ip6tables-save: CommandFilter, ip6tables-save, root ip6tables-restore: CommandFilter, ip6tables-restore, root # Keepalived keepalived: CommandFilter, keepalived, root kill_keepalived: KillFilter, root, /usr/sbin/keepalived, -HUP, -15, -9 # l3 agent to delete floatingip's conntrack state conntrack: CommandFilter, conntrack, root # keepalived state change monitor keepalived_state_change: CommandFilter, neutron-keepalived-state-change, root neutron-8.4.0/etc/neutron/rootwrap.d/dibbler.filters0000664000567000056710000000102113044372736023722 0ustar jenkinsjenkins00000000000000# neutron-rootwrap command filters for nodes on which neutron is # expected to control network # # This file should be owned by (and only-writeable by) the root user # format seems to be # cmd-name: filter-name, raw-command, user, args [Filters] # Filters for the dibbler-based reference implementation of the pluggable # Prefix Delegation driver. Other implementations using an alternative agent # should include a similar filter in this folder. # prefix_delegation_agent dibbler-client: CommandFilter, dibbler-client, root neutron-8.4.0/etc/neutron/rootwrap.d/debug.filters0000664000567000056710000000131113044372736023407 0ustar jenkinsjenkins00000000000000# neutron-rootwrap command filters for nodes on which neutron is # expected to control network # # This file should be owned by (and only-writeable by) the root user # format seems to be # cmd-name: filter-name, raw-command, user, args [Filters] # This is needed because we should ping # from inside a namespace which requires root # _alt variants allow to match -c and -w in any order # (used by NeutronDebugAgent.ping_all) ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+ ping_alt: RegExpFilter, ping, root, ping, -c, \d+, -w, \d+, [0-9\.]+ ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+ ping6_alt: RegExpFilter, ping6, root, ping6, -c, \d+, -w, \d+, [0-9A-Fa-f:]+neutron-8.4.0/etc/neutron/rootwrap.d/ipset-firewall.filters0000664000567000056710000000053413044372736025256 0ustar jenkinsjenkins00000000000000# neutron-rootwrap command filters for nodes on which neutron is # expected to control network # # This file should be owned by (and only-writeable by) the root user # format seems to be # cmd-name: filter-name, raw-command, user, args [Filters] # neutron/agent/linux/iptables_firewall.py # "ipset", "-A", ... ipset: CommandFilter, ipset, root neutron-8.4.0/etc/neutron/rootwrap.d/dhcp.filters0000664000567000056710000000217113044372760023241 0ustar jenkinsjenkins00000000000000# neutron-rootwrap command filters for nodes on which neutron is # expected to control network # # This file should be owned by (and only-writeable by) the root user # format seems to be # cmd-name: filter-name, raw-command, user, args [Filters] # dhcp-agent dnsmasq: CommandFilter, dnsmasq, root # dhcp-agent uses kill as well, that's handled by the generic KillFilter # it looks like these are the only signals needed, per # neutron/agent/linux/dhcp.py kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP ovs-vsctl: CommandFilter, ovs-vsctl, root ivs-ctl: CommandFilter, ivs-ctl, root mm-ctl: CommandFilter, mm-ctl, root dhcp_release: CommandFilter, dhcp_release, root # metadata proxy metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root # RHEL invocation of the metadata proxy will report /usr/bin/python kill_metadata: KillFilter, root, python, -9 kill_metadata7: KillFilter, root, python2.7, -9 # ip_lib ip: IpFilter, ip, root find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.* ip_exec: IpNetnsExecFilter, ip, root neutron-8.4.0/etc/neutron/rootwrap.d/iptables-firewall.filters0000664000567000056710000000154213044372760025732 0ustar jenkinsjenkins00000000000000# neutron-rootwrap command filters for nodes on which neutron is # expected to control network # # This file should be owned by (and only-writeable by) the root user # format seems to be # cmd-name: filter-name, raw-command, user, args [Filters] # neutron/agent/linux/iptables_manager.py # "iptables-save", ... iptables-save: CommandFilter, iptables-save, root iptables-restore: CommandFilter, iptables-restore, root ip6tables-save: CommandFilter, ip6tables-save, root ip6tables-restore: CommandFilter, ip6tables-restore, root # neutron/agent/linux/iptables_manager.py # "iptables", "-A", ... iptables: CommandFilter, iptables, root ip6tables: CommandFilter, ip6tables, root # neutron/agent/linux/iptables_manager.py # "sysctl", "-w", ... sysctl: CommandFilter, sysctl, root # neutron/agent/linux/ip_conntrack.py conntrack: CommandFilter, conntrack, rootneutron-8.4.0/etc/neutron/rootwrap.d/openvswitch-plugin.filters0000664000567000056710000000145113044372736026173 0ustar jenkinsjenkins00000000000000# neutron-rootwrap command filters for nodes on which neutron is # expected to control network # # This file should be owned by (and only-writeable by) the root user # format seems to be # cmd-name: filter-name, raw-command, user, args [Filters] # openvswitch-agent # unclear whether both variants are necessary, but I'm transliterating # from the old mechanism ovs-vsctl: CommandFilter, ovs-vsctl, root # NOTE(yamamoto): of_interface=native doesn't use ovs-ofctl ovs-ofctl: CommandFilter, ovs-ofctl, root kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9 ovsdb-client: CommandFilter, ovsdb-client, root xe: CommandFilter, xe, root # ip_lib ip: IpFilter, ip, root find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.* ip_exec: IpNetnsExecFilter, ip, root neutron-8.4.0/etc/neutron/rootwrap.d/ebtables.filters0000664000567000056710000000044113044372736024105 0ustar jenkinsjenkins00000000000000# neutron-rootwrap command filters for nodes on which neutron is # expected to control network # # This file should be owned by (and only-writeable by) the root user # format seems to be # cmd-name: filter-name, raw-command, user, args [Filters] ebtables: CommandFilter, ebtables, root neutron-8.4.0/etc/README.txt0000664000567000056710000000046113044372736016641 0ustar jenkinsjenkins00000000000000To generate the sample neutron configuration files, run the following command from the top level of the neutron directory: tox -e genconfig If a 'tox' environment is unavailable, then you can run the following script instead to generate the configuration files: ./tools/generate_config_file_samples.sh neutron-8.4.0/etc/api-paste.ini0000664000567000056710000000202013044372760017515 0ustar jenkinsjenkins00000000000000[composite:neutron] use = egg:Paste#urlmap /: neutronversions /v2.0: neutronapi_v2_0 [composite:neutronapi_v2_0] use = call:neutron.auth:pipeline_factory noauth = cors request_id catch_errors extensions neutronapiapp_v2_0 keystone = cors request_id catch_errors authtoken keystonecontext extensions neutronapiapp_v2_0 [filter:request_id] paste.filter_factory = oslo_middleware:RequestId.factory [filter:catch_errors] paste.filter_factory = oslo_middleware:CatchErrors.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = neutron [filter:keystonecontext] paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory [filter:extensions] paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory [app:neutronversions] paste.app_factory = neutron.api.versions:Versions.factory [app:neutronapiapp_v2_0] paste.app_factory = neutron.api.v2.router:APIRouter.factory neutron-8.4.0/etc/policy.json0000664000567000056710000002503313044372760017334 0ustar jenkinsjenkins00000000000000{ "context_is_admin": "role:admin", "owner": "tenant_id:%(tenant_id)s", "admin_or_owner": "rule:context_is_admin or rule:owner", "context_is_advsvc": "role:advsvc", "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s", "admin_owner_or_network_owner": "rule:owner or rule:admin_or_network_owner", "admin_only": "rule:context_is_admin", "regular_user": "", "shared": "field:networks:shared=True", "shared_firewalls": "field:firewalls:shared=True", "shared_firewall_policies": "field:firewall_policies:shared=True", "shared_subnetpools": "field:subnetpools:shared=True", "shared_address_scopes": "field:address_scopes:shared=True", "external": "field:networks:router:external=True", "default": "rule:admin_or_owner", "create_subnet": "rule:admin_or_network_owner", "get_subnet": "rule:admin_or_owner or rule:shared", "update_subnet": "rule:admin_or_network_owner", "delete_subnet": "rule:admin_or_network_owner", "create_subnetpool": "", "create_subnetpool:shared": "rule:admin_only", "create_subnetpool:is_default": "rule:admin_only", "get_subnetpool": "rule:admin_or_owner or rule:shared_subnetpools", "update_subnetpool": "rule:admin_or_owner", "update_subnetpool:is_default": "rule:admin_only", "delete_subnetpool": "rule:admin_or_owner", "create_address_scope": "", "create_address_scope:shared": "rule:admin_only", "get_address_scope": "rule:admin_or_owner or rule:shared_address_scopes", "update_address_scope": "rule:admin_or_owner", "update_address_scope:shared": "rule:admin_only", "delete_address_scope": "rule:admin_or_owner", "create_network": "", "get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc", "get_network:router:external": "rule:regular_user", "get_network:segments": "rule:admin_only", "get_network:provider:network_type": "rule:admin_only", "get_network:provider:physical_network": "rule:admin_only", "get_network:provider:segmentation_id": "rule:admin_only", "get_network:queue_id": "rule:admin_only", "get_network_ip_availabilities": "rule:admin_only", "get_network_ip_availability": "rule:admin_only", "create_network:shared": "rule:admin_only", "create_network:router:external": "rule:admin_only", "create_network:is_default": "rule:admin_only", "create_network:segments": "rule:admin_only", "create_network:provider:network_type": "rule:admin_only", "create_network:provider:physical_network": "rule:admin_only", "create_network:provider:segmentation_id": "rule:admin_only", "update_network": "rule:admin_or_owner", "update_network:segments": "rule:admin_only", "update_network:shared": "rule:admin_only", "update_network:provider:network_type": "rule:admin_only", "update_network:provider:physical_network": "rule:admin_only", "update_network:provider:segmentation_id": "rule:admin_only", "update_network:router:external": "rule:admin_only", "delete_network": "rule:admin_or_owner", "network_device": "field:port:device_owner=~^network:", "create_port": "", "create_port:device_owner": "not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner", "create_port:mac_address": "rule:context_is_advsvc or rule:admin_or_network_owner", "create_port:fixed_ips": "rule:context_is_advsvc or rule:admin_or_network_owner", "create_port:port_security_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner", "create_port:binding:host_id": "rule:admin_only", "create_port:binding:profile": "rule:admin_only", "create_port:mac_learning_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner", "create_port:allowed_address_pairs": "rule:admin_or_network_owner", "get_port": "rule:context_is_advsvc or rule:admin_owner_or_network_owner", "get_port:queue_id": "rule:admin_only", "get_port:binding:vif_type": "rule:admin_only", "get_port:binding:vif_details": "rule:admin_only", "get_port:binding:host_id": "rule:admin_only", "get_port:binding:profile": "rule:admin_only", "update_port": "rule:admin_or_owner or rule:context_is_advsvc", "update_port:device_owner": "not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner", "update_port:mac_address": "rule:admin_only or rule:context_is_advsvc", "update_port:fixed_ips": "rule:context_is_advsvc or rule:admin_or_network_owner", "update_port:port_security_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner", "update_port:binding:host_id": "rule:admin_only", "update_port:binding:profile": "rule:admin_only", "update_port:mac_learning_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner", "update_port:allowed_address_pairs": "rule:admin_or_network_owner", "delete_port": "rule:context_is_advsvc or rule:admin_owner_or_network_owner", "get_router:ha": "rule:admin_only", "create_router": "rule:regular_user", "create_router:external_gateway_info:enable_snat": "rule:admin_only", "create_router:distributed": "rule:admin_only", "create_router:ha": "rule:admin_only", "get_router": "rule:admin_or_owner", "get_router:distributed": "rule:admin_only", "update_router:external_gateway_info:enable_snat": "rule:admin_only", "update_router:distributed": "rule:admin_only", "update_router:ha": "rule:admin_only", "delete_router": "rule:admin_or_owner", "add_router_interface": "rule:admin_or_owner", "remove_router_interface": "rule:admin_or_owner", "create_router:external_gateway_info:external_fixed_ips": "rule:admin_only", "update_router:external_gateway_info:external_fixed_ips": "rule:admin_only", "create_firewall": "", "get_firewall": "rule:admin_or_owner", "create_firewall:shared": "rule:admin_only", "get_firewall:shared": "rule:admin_only", "update_firewall": "rule:admin_or_owner", "update_firewall:shared": "rule:admin_only", "delete_firewall": "rule:admin_or_owner", "create_firewall_policy": "", "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewall_policies", "create_firewall_policy:shared": "rule:admin_or_owner", "update_firewall_policy": "rule:admin_or_owner", "delete_firewall_policy": "rule:admin_or_owner", "insert_rule": "rule:admin_or_owner", "remove_rule": "rule:admin_or_owner", "create_firewall_rule": "", "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls", "update_firewall_rule": "rule:admin_or_owner", "delete_firewall_rule": "rule:admin_or_owner", "create_qos_queue": "rule:admin_only", "get_qos_queue": "rule:admin_only", "update_agent": "rule:admin_only", "delete_agent": "rule:admin_only", "get_agent": "rule:admin_only", "create_dhcp-network": "rule:admin_only", "delete_dhcp-network": "rule:admin_only", "get_dhcp-networks": "rule:admin_only", "create_l3-router": "rule:admin_only", "delete_l3-router": "rule:admin_only", "get_l3-routers": "rule:admin_only", "get_dhcp-agents": "rule:admin_only", "get_l3-agents": "rule:admin_only", "get_loadbalancer-agent": "rule:admin_only", "get_loadbalancer-pools": "rule:admin_only", "get_agent-loadbalancers": "rule:admin_only", "get_loadbalancer-hosting-agent": "rule:admin_only", "create_floatingip": "rule:regular_user", "create_floatingip:floating_ip_address": "rule:admin_only", "update_floatingip": "rule:admin_or_owner", "delete_floatingip": "rule:admin_or_owner", "get_floatingip": "rule:admin_or_owner", "create_network_profile": "rule:admin_only", "update_network_profile": "rule:admin_only", "delete_network_profile": "rule:admin_only", "get_network_profiles": "", "get_network_profile": "", "update_policy_profiles": "rule:admin_only", "get_policy_profiles": "", "get_policy_profile": "", "create_metering_label": "rule:admin_only", "delete_metering_label": "rule:admin_only", "get_metering_label": "rule:admin_only", "create_metering_label_rule": "rule:admin_only", "delete_metering_label_rule": "rule:admin_only", "get_metering_label_rule": "rule:admin_only", "get_service_provider": "rule:regular_user", "get_lsn": "rule:admin_only", "create_lsn": "rule:admin_only", "create_flavor": "rule:admin_only", "update_flavor": "rule:admin_only", "delete_flavor": "rule:admin_only", "get_flavors": "rule:regular_user", "get_flavor": "rule:regular_user", "create_service_profile": "rule:admin_only", "update_service_profile": "rule:admin_only", "delete_service_profile": "rule:admin_only", "get_service_profiles": "rule:admin_only", "get_service_profile": "rule:admin_only", "get_policy": "rule:regular_user", "create_policy": "rule:admin_only", "update_policy": "rule:admin_only", "delete_policy": "rule:admin_only", "get_policy_bandwidth_limit_rule": "rule:regular_user", "create_policy_bandwidth_limit_rule": "rule:admin_only", "delete_policy_bandwidth_limit_rule": "rule:admin_only", "update_policy_bandwidth_limit_rule": "rule:admin_only", "get_rule_type": "rule:regular_user", "restrict_wildcard": "(not field:rbac_policy:target_tenant=*) or rule:admin_only", "create_rbac_policy": "", "create_rbac_policy:target_tenant": "rule:restrict_wildcard", "update_rbac_policy": "rule:admin_or_owner", "update_rbac_policy:target_tenant": "rule:restrict_wildcard and rule:admin_or_owner", "get_rbac_policy": "rule:admin_or_owner", "delete_rbac_policy": "rule:admin_or_owner", "create_flavor_service_profile": "rule:admin_only", "delete_flavor_service_profile": "rule:admin_only", "get_flavor_service_profile": "rule:regular_user", "get_auto_allocated_topology": "rule:admin_or_owner", "get_bgp_speaker": "rule:admin_only", "create_bgp_speaker": "rule:admin_only", "update_bgp_speaker": "rule:admin_only", "delete_bgp_speaker": "rule:admin_only", "get_bgp_peer": "rule:admin_only", "create_bgp_peer": "rule:admin_only", "update_bgp_peer": "rule:admin_only", "delete_bgp_peer": "rule:admin_only", "add_bgp_peer": "rule:admin_only", "remove_bgp_peer": "rule:admin_only", "add_gateway_network": "rule:admin_only", "remove_gateway_network": "rule:admin_only", "get_advertised_routes":"rule:admin_only", "add_bgp_speaker_to_dragent": "rule:admin_only", "remove_bgp_speaker_from_dragent": "rule:admin_only", "list_bgp_speaker_on_dragent": "rule:admin_only", "list_dragent_hosting_bgp_speaker": "rule:admin_only" } neutron-8.4.0/etc/oslo-config-generator/0000775000567000056710000000000013044373210021331 5ustar jenkinsjenkins00000000000000neutron-8.4.0/etc/oslo-config-generator/ml2_conf_sriov.ini0000664000567000056710000000021613044372760024763 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/neutron/plugins/ml2/ml2_conf_sriov.ini.sample wrap_width = 79 namespace = neutron.ml2.sriov namespace = oslo.log neutron-8.4.0/etc/oslo-config-generator/metadata_agent.ini0000664000567000056710000000017713044372760025006 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/metadata_agent.ini.sample wrap_width = 79 namespace = neutron.metadata.agent namespace = oslo.log neutron-8.4.0/etc/oslo-config-generator/openvswitch_agent.ini0000664000567000056710000000022513044372760025571 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/neutron/plugins/ml2/openvswitch_agent.ini.sample wrap_width = 79 namespace = neutron.ml2.ovs.agent namespace = oslo.log neutron-8.4.0/etc/oslo-config-generator/macvtap_agent.ini0000664000567000056710000000022513044372736024656 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/neutron/plugins/ml2/macvtap_agent.ini.sample wrap_width = 79 namespace = neutron.ml2.macvtap.agent namespace = oslo.log neutron-8.4.0/etc/oslo-config-generator/sriov_agent.ini0000664000567000056710000000022113044372736024361 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/neutron/plugins/ml2/sriov_agent.ini.sample wrap_width = 79 namespace = neutron.ml2.sriov.agent namespace = oslo.log neutron-8.4.0/etc/oslo-config-generator/ml2_conf.ini0000664000567000056710000000020213044372736023537 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/neutron/plugins/ml2/ml2_conf.ini.sample wrap_width = 79 namespace = neutron.ml2 namespace = oslo.log neutron-8.4.0/etc/oslo-config-generator/bgp_dragent.ini0000664000567000056710000000022613044372760024317 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/bgp_dragent.ini.sample wrap_width = 79 namespace = neutron.base.agent namespace = neutron.bgp.agent namespace = oslo.log neutron-8.4.0/etc/oslo-config-generator/metering_agent.ini0000664000567000056710000000017713044372760025040 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/metering_agent.ini.sample wrap_width = 79 namespace = neutron.metering.agent namespace = oslo.log neutron-8.4.0/etc/oslo-config-generator/dhcp_agent.ini0000664000567000056710000000022613044372760024137 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/dhcp_agent.ini.sample wrap_width = 79 namespace = neutron.base.agent namespace = neutron.dhcp.agent namespace = oslo.log neutron-8.4.0/etc/oslo-config-generator/l3_agent.ini0000664000567000056710000000022213044372760023533 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/l3_agent.ini.sample wrap_width = 79 namespace = neutron.base.agent namespace = neutron.l3.agent namespace = oslo.log neutron-8.4.0/etc/oslo-config-generator/linuxbridge_agent.ini0000664000567000056710000000023513044372736025540 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/neutron/plugins/ml2/linuxbridge_agent.ini.sample wrap_width = 79 namespace = neutron.ml2.linuxbridge.agent namespace = oslo.log neutron-8.4.0/etc/oslo-config-generator/neutron.conf0000664000567000056710000000072713044372760023711 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/neutron.conf.sample wrap_width = 79 namespace = neutron namespace = neutron.agent namespace = neutron.db namespace = neutron.extensions namespace = neutron.qos namespace = nova.auth namespace = oslo.log namespace = oslo.db namespace = oslo.policy namespace = oslo.concurrency namespace = oslo.messaging namespace = oslo.middleware.cors namespace = oslo.service.sslutils namespace = oslo.service.wsgi namespace = keystonemiddleware.auth_token neutron-8.4.0/doc/0000775000567000056710000000000013044373210015120 5ustar jenkinsjenkins00000000000000neutron-8.4.0/doc/pom.xml0000664000567000056710000001474713044372760016463 0ustar jenkinsjenkins00000000000000 4.0.0 org.openstack.docs openstack-guide 1.0.0-SNAPSHOT jar OpenStack Guides Rackspace Research Repositories true rackspace-research Rackspace Research Repository http://maven.research.rackspacecloud.com/content/groups/public/ rackspace-research Rackspace Research Repository http://maven.research.rackspacecloud.com/content/groups/public/ target/docbkx/pdf **/*.fo com.rackspace.cloud.api clouddocs-maven-plugin 1.0.5-SNAPSHOT goal1 generate-pdf generate-sources false goal2 generate-webhelp generate-sources 0 openstackdocs 1 UA-17511903-6 appendix toc,title article/appendix nop article toc,title book title,figure,table,example,equation chapter toc,title part toc,title preface toc,title qandadiv toc qandaset toc reference toc,title set toc,title 0 0 true source/docbkx neutron-api-1.0/neutron-api-guide.xml reviewer openstack neutron-8.4.0/doc/source/0000775000567000056710000000000013044373210016420 5ustar jenkinsjenkins00000000000000neutron-8.4.0/doc/source/dashboards/0000775000567000056710000000000013044373210020532 5ustar jenkinsjenkins00000000000000neutron-8.4.0/doc/source/dashboards/index.rst0000664000567000056710000001175013044372736022413 0ustar jenkinsjenkins00000000000000Gerrit Dashboards ================= - `Neutron master branch reviews `_ - `Neutron subproject reviews (master branch) `_ - `Neutron stable branch reviews `_ These dashboard links can be generated by `Gerrit Dashboard Creator`_. Useful dashboard definitions are found in ``dashboards`` directory. .. _Gerrit Dashboard Creator: https://github.com/openstack/gerrit-dash-creator .. _Neutron Failure Rate: http://grafana.openstack.org/dashboard/db/neutron-failure-rate .. _Tempest Failure Rate: http://grafana.openstack.org/dashboard/db/tempest-failure-rate neutron-8.4.0/doc/source/index.rst0000664000567000056710000000442613044372736020303 0ustar jenkinsjenkins00000000000000.. Copyright 2011-2013 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Welcome to Neutron's developer documentation! ============================================= Neutron is an OpenStack project to provide "network connectivity as a service" between interface devices (e.g., vNICs) managed by other OpenStack services (e.g., nova). It implements the `Neutron API`_. .. _`Neutron API`: http://docs.openstack.org/api/openstack-network/2.0/content/ This document describes Neutron for contributors of the project, and assumes that you are already familiar with Neutron from an `end-user perspective`_. .. _`end-user perspective`: http://docs.openstack.org/trunk/openstack-network/admin/content/index.html This documentation is generated by the Sphinx toolkit and lives in the source tree. Additional documentation on Neutron and other components of OpenStack can be found on the `OpenStack wiki`_ and the `Neutron section of the wiki`. The `Neutron Development wiki`_ is also a good resource for new contributors. .. _`OpenStack wiki`: http://wiki.openstack.org .. _`Neutron section of the wiki`: http://wiki.openstack.org/Neutron .. _`Neutron Development wiki`: http://wiki.openstack.org/NeutronDevelopment Enjoy! Neutron Policies ================ .. toctree:: :maxdepth: 2 policies/index Neutron Stadium =============== .. toctree:: :maxdepth: 2 stadium/index Developer Docs ============== .. toctree:: :maxdepth: 3 devref/index Dashboards ========== There is a collection of dashboards to help developers and reviewers located here. .. toctree:: :maxdepth: 2 dashboards/index API Extensions ============== Go to http://api.openstack.org for information about OpenStack Network API extensions. neutron-8.4.0/doc/source/conf.py0000664000567000056710000001711313044372760017733 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Copyright (c) 2010 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # Keystone documentation build configuration file, created by # sphinx-quickstart on Tue May 18 13:50:15 2010. # # This file is execfile()'d with the current directory set to it's containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import subprocess import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. BASE_DIR = os.path.dirname(os.path.abspath(__file__)) NEUTRON_DIR = os.path.abspath(os.path.join(BASE_DIR, "..", "..")) sys.path.insert(0, NEUTRON_DIR) # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'sphinx.ext.todo', 'oslosphinx'] todo_include_todos = True # Add any paths that contain templates here, relative to this directory. templates_path = [] if os.getenv('HUDSON_PUBLISH_DOCS'): templates_path = ['_ga', '_templates'] else: templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Neutron' copyright = u'2011-present, OpenStack Foundation.' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # Version info from neutron.version import version_info as neutron_version release = neutron_version.release_string() # The short X.Y version. version = neutron_version.version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. # unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['neutron.'] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = ['_theme'] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local -", "n1"] html_last_updated_fmt = subprocess.Popen(git_cmd, stdout=subprocess.PIPE).\ communicate()[0] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'neutrondoc' # -- Options for LaTeX output ------------------------------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, # documentclass [howto/manual]). latex_documents = [ ('index', 'Neutron.tex', u'Neutron Documentation', u'Neutron development team', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True neutron-8.4.0/doc/source/stadium/0000775000567000056710000000000013044373210020066 5ustar jenkinsjenkins00000000000000neutron-8.4.0/doc/source/stadium/sub_projects.rst0000664000567000056710000005431313044372760023341 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Neutron Stadium =============== Introduction ------------ Neutron has grown to be a complex project made of many moving parts. The codebase is the aggregation of smaller projects that, once assembled in a specific configuration, implement one of the many deployment architectures to deliver networking services. This document explains the inclusion process, and the criteria chosen to select a project for inclusion. It also outlines the lists of projects that are either managed by the `Neutron teams `_, or that are affiliated to Neutron via an integration point made available by the core pluggable framework. Demystifying the mission ------------------------ The Neutron `mission `_ states that Neutron is all about delivering network services and libraries. Although this has been true for the existence of the project, the project itself has evolved over the years to meet the demands of a growing community of users and developers who have an interest in adopting, building new and leveraging existing network functionality. To continue to stay true to its mission, and yet reduce the management burden, the project transformed itself into a pluggable framework, and a community where interested parties come together to discuss and define APIs and respective implementations that ultimately are delivered on top of the aforementioned pluggable framework. Some of these APIs and implementations are considered to be a part of the Neutron project. For the ones that are not, there is no connotation of _poor_ quality associated with them. Their association, or lack thereof, is simply a reflection of the fact that a good portion of Neutron team feels favorable towards developing, and supporting the project in the wider OpenStack ecosystem. Inclusion Process ----------------- The process for proposing a repo into openstack/ and under the Neutron project is to propose a patch to the openstack/governance repository. For example, to propose networking-foo, one would add the following entry under Neutron in reference/projects.yaml:: - repo: openstack/networking-foo tags: - name: release:independent For more information about the release:independent tag (and other currently defined tags) see: http://governance.openstack.org/reference/tags/ The Neutron PTL must approve the change. The TC clarified that once a project has been approved (Neutron in this case), the project can add additional repos without needing TC approval as long as the added repositories are within the existing approved scope of the project. http://git.openstack.org/cgit/openstack/governance/commit/?id=321a020cbcaada01976478ea9f677ebb4df7bd6d In order to create a project, in case it does not exist, follow steps as explained in: http://docs.openstack.org/infra/manual/creators.html Responsibilities ---------------- All affected repositories already have their own review teams. The sub-team working on the sub-project is entirely responsible for day-to-day development. That includes reviews, bug tracking, and working on testing. By being included, the project accepts oversight by the TC as a part of being in OpenStack, and also accepts oversight by the Neutron PTL. It is also assumed the respective review teams will make sure their projects stay in line with `current best practices `_. Inclusion Criteria ------------------ As mentioned before, the Neutron PTL must approve the inclusion of each additional repository under the Neutron project. When in doubt, the PTL should consider erring on the side of caution, and keep the project out of the list until more consensus amongst the team can be built or a more favorable assessment can be determined. That evaluation will be initially based on the new project requirements used for all new OpenStack projects for the criteria that is applicable. If there is any question about this, the review should be deferred to the TC as a new OpenStack project team. http://governance.openstack.org/reference/new-projects-requirements.html Including *everything* related to Neutron under the Neutron project team has not scaled well, so some Neutron related projects are encouraged to form a new OpenStack project team. The following list of guidelines are not hard rules. There may be exceptions. Instead, they serve as criteria that may influence the decision one way or the other. Sub-projects will be reviewed regularly to see how they meet these criteria. These criteria are designed around how easy it would be for members of the loosely defined "Neutron team" to jump in and help fix or even take over a given repository if needed. * Neutron stays quite busy developing and maintaining open source implementations for features. Any sub-project that serves as an interface to proprietary technology should most likely be a separate project team. This imposes a barrier on access to the technology for dev/test and CI integration. * If the project only interacts with Neutron on REST API boundaries (client of Neutron's API, or Neutron is a client of its API), it should probably be a separate project. python-neutronclient is an obvious exception here. * The area of functionality of a sub-project should be taken into consideration. The closer the functionality is to the base functionality implemented in openstack/neutron, the more likely it makes sense under the Neutron project team. Conversely, something "higher" in the stack considered an optional advanced service is more likely to make sense as an independent project. This is subject to change as the Neutron project evolves and continues to explore the boundaries that work best for the project. * OpenStack project teams are based around both technology and groups of people. If a sub-project is directly driven by a subset of members of the Neutron team, with the wider approval of the Neutron team, then it makes sense to retain it under the Neutron project team. Conversely, a project that was developed without oversight or engagement of any of the Neutron members cannot qualify. For the sake of this criterion, a member of the team is a known (core or not) contributor with a substantial track record of Neutron development. Official Sub-Project List ------------------------- The official source of all repositories that are a part of Neutron or another official OpenStack project team is here: http://governance.openstack.org/reference/projects/neutron.html We list the Neutron repositories, as well as other Neutron affiliated projects here to provide references and note the functionality they provide. Functionality legend ~~~~~~~~~~~~~~~~~~~~ - base: the base Neutron platform - client: API client implementation - core: a monolithic plugin that can implement API at multiple layers L3-L7; - dashboard: Horizon dashboard integration - docker: a Docker network plugin that uses Neutron to provide networking services to Docker containers; - fw: a Firewall service plugin; - intent: a service plugin that provides a declarative API to realize networking; - ipam: an IP address management driver; - l2: a Layer 2 service; - l3: a Layer 3 service plugin; - lb: a Load Balancer service plugin; - ml2: an ML2 mechanism driver; - pd: prefix delegation - sfc; traffic steering based on traffic classification - vpn: a VPN service plugin; Neutron projects ~~~~~~~~~~~~~~~~ This table shows the list of official Neutron repositories and their functionality. +-------------------------------+-----------------------+ | Name | Functionality | +===============================+=======================+ | dragonflow_ | core | +-------------------------------+-----------------------+ | networking-bagpipe_ | ml2 | +-------------------------------+-----------------------+ | networking-bgpvpn_ | vpn | +-------------------------------+-----------------------+ | networking-calico_ | ml2 | +-------------------------------+-----------------------+ | networking-l2gw_ | l2 | +-------------------------------+-----------------------+ | networking-midonet_ | core,ml2,l3,lb,fw | +-------------------------------+-----------------------+ | networking-odl_ | ml2,l3,lb,fw | +-------------------------------+-----------------------+ | networking-ofagent_ | ml2 | +-------------------------------+-----------------------+ | networking-onos_ | ml2,l3 | +-------------------------------+-----------------------+ | networking-ovn_ | core | +-------------------------------+-----------------------+ | networking-sfc_ | sfc | +-------------------------------+-----------------------+ | neutron_ | base,l2,ml2,core,l3 | +-------------------------------+-----------------------+ | neutron-lbaas_ | lb,dashboard | | neutron-lbaas-dashboard_ | | | octavia_ | | +-------------------------------+-----------------------+ | neutron-fwaas_ | fw | +-------------------------------+-----------------------+ | neutron-lib_ | base | +-------------------------------+-----------------------+ | neutron-vpnaas_ | vpn | +-------------------------------+-----------------------+ | python-neutronclient_ | client | +-------------------------------+-----------------------+ | python-neutron-pd-driver_ | pd | +-------------------------------+-----------------------+ Affiliated projects ~~~~~~~~~~~~~~~~~~~ This table shows the affiliated projects that integrate with Neutron, in one form or another. These projects typically leverage the pluggable capabilities of Neutron, the Neutron API, or a combination of both. +-------------------------------+-----------------------+ | Name | Functionality | +===============================+=======================+ | kuryr_ | docker | +-------------------------------+-----------------------+ | networking-ale-omniswitch_ | ml2 | +-------------------------------+-----------------------+ | networking-arista_ | ml2,l3 | +-------------------------------+-----------------------+ | networking-bigswitch_ | ml2,core,l3 | +-------------------------------+-----------------------+ | networking-brocade_ | ml2,l3 | +-------------------------------+-----------------------+ | networking-cisco_ | core,ml2,l3,fw,vpn | +-------------------------------+-----------------------+ | networking-edge-vpn_ | vpn | +-------------------------------+-----------------------+ | networking-fujitsu_ | ml2 | +-------------------------------+-----------------------+ | networking-hyperv_ | ml2 | +-------------------------------+-----------------------+ | networking-infoblox_ | ipam | +-------------------------------+-----------------------+ | networking-mlnx_ | ml2 | +-------------------------------+-----------------------+ | networking-nec_ | core | +-------------------------------+-----------------------+ | networking-plumgrid_ | core | +-------------------------------+-----------------------+ | networking-powervm_ | ml2 | +-------------------------------+-----------------------+ | nuage-openstack-neutron_ | core | +-------------------------------+-----------------------+ | networking-ovs-dpdk_ | ml2 | +-------------------------------+-----------------------+ | networking-vsphere_ | ml2 | +-------------------------------+-----------------------+ | vmware-nsx_ | core | +-------------------------------+-----------------------+ Project Teams FAQ ~~~~~~~~~~~~~~~~~ **Q: When talking about contributor overlap, what is a contributor?** A Neutron contributor is someone who spends some portion of their time helping with all of the things needed to run the Neutron project: bug triage, writing and reviewing blueprints, writing and reviewing code, writing and reviewing documentation, helping debug issues found by users or CI, and more. **Q: Why choose contributor overlap over technical overlap?** Technical overlap, or software qualities, are more difficult to pinpoint and require a more extensive assessment from the PTL and the Neutron team, which in turn has the danger of translating itself into a nearly full-time policing/enforcement job. Wrongdoing will always be spotted, regardless of whichever criteria is applied, and trusting known members of the team to do the right thing should be an adequate safety net to preserve the sanity of Neutron as a whole. **Q: What does a sub-project gain as a part of the Neutron project team?** A project under Neutron is no more an official part of OpenStack than another OpenStack project team. Projects under Neutron share some resources. In particular, they get managed backports, managed releases, managed CVEs, RFEs, bugs, docs and everything that pertain the SDLC of the Neutron end-to-end project. **Q: Why is kuryr a separate project?** Kuryr was started and incubated within the Neutron team. However, it interfaces with Neutron as a client of the Neutron API, so it makes sense to stand as an independent project. **Q: Why are several "advanced service" projects still included under Neutron?** neutron-lbaas, neutron-fwaas, and neutron-vpnaas are all included under the Neutron project team largely for historical reasons. They were originally a part of neutron itself and are still a part of the neutron deliverable in terms of OpenStack governance. Because of the deliverable inclusion, they should really only be considered for a move on a release boundary. **Q: Why is Octavia included under Neutron?** neutron-lbaas, neutron-lbaas-dashboard, and Octavia are all considered a unit. If we split one, we need to split them together. We can't split these yet, as they are a part of the official "neutron" deliverable. This needs to be done on a release boundary when the lbaas team is ready to do so. .. _networking-ale-omniswitch: ALE Omniswitch ++++++++++++++ * Git: https://git.openstack.org/cgit/openstack/networking-ale-omniswitch * Launchpad: https://launchpad.net/networking-ale-omniswitch * Pypi: https://pypi.python.org/pypi/networking-ale-omniswitch .. _networking-arista: Arista ++++++ * Git: https://git.openstack.org/cgit/openstack/networking-arista * Launchpad: https://launchpad.net/networking-arista * Pypi: https://pypi.python.org/pypi/networking-arista .. _networking-bagpipe: BaGPipe +++++++ * Git: https://git.openstack.org/cgit/openstack/networking-bagpipe .. _networking-bgpvpn: BGPVPN ++++++ * Git: https://git.openstack.org/cgit/openstack/networking-bgpvpn .. _networking-bigswitch: Big Switch Networks +++++++++++++++++++ * Git: https://git.openstack.org/cgit/openstack/networking-bigswitch * Pypi: https://pypi.python.org/pypi/bsnstacklib .. _networking-brocade: Brocade +++++++ * Git: https://git.openstack.org/cgit/openstack/networking-brocade * Launchpad: https://launchpad.net/networking-brocade * PyPI: https://pypi.python.org/pypi/networking-brocade .. _networking-calico: Calico ++++++ * Git: https://git.openstack.org/cgit/openstack/networking-calico * Launchpad: https://launchpad.net/networking-calico * PyPI: https://pypi.python.org/pypi/networking-calico .. _networking-cisco: Cisco +++++ * Git: https://git.openstack.org/cgit/openstack/networking-cisco * Launchpad: https://launchpad.net/networking-cisco * PyPI: https://pypi.python.org/pypi/networking-cisco .. _dragonflow: DragonFlow ++++++++++ * Git: https://git.openstack.org/cgit/openstack/dragonflow * Launchpad: https://launchpad.net/dragonflow * PyPI: https://pypi.python.org/pypi/DragonFlow .. _networking-edge-vpn: Edge VPN ++++++++ * Git: https://git.openstack.org/cgit/openstack/networking-edge-vpn * Launchpad: https://launchpad.net/edge-vpn .. _networking-fujitsu: FUJITSU +++++++ * Git: https://git.openstack.org/cgit/openstack/networking-fujitsu * Launchpad: https://launchpad.net/networking-fujitsu * PyPI: https://pypi.python.org/pypi/networking-fujitsu .. _networking-hyperv: Hyper-V +++++++ * Git: https://git.openstack.org/cgit/openstack/networking-hyperv * Launchpad: https://launchpad.net/networking-hyperv * PyPI: https://pypi.python.org/pypi/networking-hyperv .. _networking-infoblox: Infoblox ++++++++ * Git: https://git.openstack.org/cgit/openstack/networking-infoblox * Launchpad: https://launchpad.net/networking-infoblox * PyPI: https://pypi.python.org/pypi/networking-infoblox .. _kuryr: Kuryr +++++ * Git: https://git.openstack.org/cgit/openstack/kuryr/ * Launchpad: https://launchpad.net/kuryr * PyPI: https://pypi.python.org/pypi/kuryr/ .. _networking-l2gw: L2 Gateway ++++++++++ * Git: https://git.openstack.org/cgit/openstack/networking-l2gw * Launchpad: https://launchpad.net/networking-l2gw .. _networking-midonet: MidoNet +++++++ * Git: https://git.openstack.org/cgit/openstack/networking-midonet * Launchpad: https://launchpad.net/networking-midonet * PyPI: https://pypi.python.org/pypi/networking-midonet .. _networking-mlnx: Mellanox ++++++++ * Git: https://git.openstack.org/cgit/openstack/networking-mlnx * Launchpad: https://launchpad.net/networking-mlnx .. _networking-nec: NEC +++ * Git: https://git.openstack.org/cgit/openstack/networking-nec * Launchpad: https://launchpad.net/networking-nec * PyPI: https://pypi.python.org/pypi/networking-nec .. _neutron: Neutron +++++++ * Git: https://git.openstack.org/cgit/openstack/neutron * Launchpad: https://launchpad.net/neutron .. _python-neutronclient: Neutron Client ++++++++++++++ * Git: https://git.openstack.org/cgit/openstack/python-neutronclient * Launchpad: https://launchpad.net/python-neutronclient .. _python-neutron-pd-driver: Neutron Prefix Delegation +++++++++++++++++++++++++ * Git: https://git.openstack.org/cgit/openstack/python-neutron-pd-driver .. _neutron-fwaas: Neutron FWaaS +++++++++++++ * Git: https://git.openstack.org/cgit/openstack/neutron-fwaas * Launchpad: https://launchpad.net/neutron .. _neutron-lbaas: Neutron LBaaS +++++++++++++ * Git: https://git.openstack.org/cgit/openstack/neutron-lbaas * Launchpad: https://launchpad.net/neutron .. _neutron-lbaas-dashboard: Neutron LBaaS Dashboard +++++++++++++++++++++++ * Git: https://git.openstack.org/cgit/openstack/neutron-lbaas-dashboard * Launchpad: https://launchpad.net/neutron .. _neutron-lib: Neutron Library +++++++++++++++ * Git: https://git.openstack.org/cgit/openstack/neutron-lib * Launchpad: https://launchpad.net/neutron .. _neutron-vpnaas: Neutron VPNaaS ++++++++++++++ * Git: https://git.openstack.org/cgit/openstack/neutron-vpnaas * Launchpad: https://launchpad.net/neutron .. _nuage-openstack-neutron: Nuage +++++ * Git: https://github.com/nuagenetworks/nuage-openstack-neutron .. _networking-odl: OpenDayLight ++++++++++++ * Git: https://git.openstack.org/cgit/openstack/networking-odl * Launchpad: https://launchpad.net/networking-odl .. _networking-ofagent: OpenFlow Agent (ofagent) ++++++++++++++++++++++++ * Git: https://git.openstack.org/cgit/openstack/networking-ofagent * Launchpad: https://launchpad.net/networking-ofagent * PyPI: https://pypi.python.org/pypi/networking-ofagent .. _networking-onos: Open Network Operating System (onos) ++++++++++++++++++++++++++++++++++++ * Git: https://git.openstack.org/cgit/openstack/networking-onos * Launchpad: https://launchpad.net/networking-onos * PyPI: https://pypi.python.org/pypi/networking-onos .. _networking-ovn: Open Virtual Network ++++++++++++++++++++ * Git: https://git.openstack.org/cgit/openstack/networking-ovn * Launchpad: https://launchpad.net/networking-ovn * PyPI: https://pypi.python.org/pypi/networking-ovn .. _networking-ovs-dpdk: Open DPDK +++++++++ * Git: https://git.openstack.org/cgit/openstack/networking-ovs-dpdk * Launchpad: https://launchpad.net/networking-ovs-dpdk .. _networking-plumgrid: PLUMgrid ++++++++ * Git: https://git.openstack.org/cgit/openstack/networking-plumgrid * Launchpad: https://launchpad.net/networking-plumgrid * PyPI: https://pypi.python.org/pypi/networking-plumgrid .. _networking-powervm: PowerVM +++++++ * Git: https://git.openstack.org/cgit/openstack/networking-powervm * Launchpad: https://launchpad.net/networking-powervm * PyPI: https://pypi.python.org/pypi/networking-powervm .. _networking-sfc: SFC +++ * Git: https://git.openstack.org/cgit/openstack/networking-sfc .. _networking-vsphere: vSphere +++++++ * Git: https://git.openstack.org/cgit/openstack/networking-vsphere * Launchpad: https://launchpad.net/networking-vsphere .. _vmware-nsx: VMware NSX ++++++++++ * Git: https://git.openstack.org/cgit/openstack/vmware-nsx * Launchpad: https://launchpad.net/vmware-nsx * PyPI: https://pypi.python.org/pypi/vmware-nsx .. _octavia: Octavia +++++++ * Git: https://git.openstack.org/cgit/openstack/octavia * Launchpad: https://launchpad.net/octavia neutron-8.4.0/doc/source/stadium/sub_project_guidelines.rst0000664000567000056710000002152113044372760025361 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Sub-Project Guidelines ====================== This document provides guidance for those who maintain projects that consume main neutron or neutron advanced services repositories as a dependency. It is not meant to describe projects that are not tightly coupled with Neutron code. Code Reuse ---------- At all times, avoid using any Neutron symbols that are explicitly marked as private (those have an underscore at the start of their names). Try to avoid copy pasting the code from Neutron to extend it. Instead, rely on enormous number of different plugin entry points provided by Neutron (L2 agent extensions, API extensions, service plugins, core plugins, ML2 mechanism drivers, etc.) Oslo Incubator ~~~~~~~~~~~~~~ Don't ever reuse neutron code that comes from oslo-incubator in your subprojects. For neutron repository, the code is usually located under the following path: neutron.openstack.common.* If you need any oslo-incubator code in your repository, copy it into your repository from oslo-incubator and then use it from there. Neutron team does not maintain any backwards compatibility strategy for the code subtree and can break anyone who relies on it at any time. Requirements ------------ Neutron dependency ~~~~~~~~~~~~~~~~~~ Subprojects usually depend on neutron repositories, by using -e git://... schema to define such a dependency. The dependency *must not* be present in requirements lists though, and instead belongs to tox.ini deps section. This is because next pbr library releases do not guarantee -e git://... dependencies will work. You may still put some versioned neutron dependency in your requirements list to indicate the dependency for anyone who packages your subproject. Explicit dependencies ~~~~~~~~~~~~~~~~~~~~~ Each neutron project maintains its own lists of requirements. Subprojects that depend on neutron while directly using some of those libraries that neutron maintains as its dependencies must not rely on the fact that neutron will pull the needed dependencies for them. Direct library usage requires that this library is mentioned in requirements lists of the subproject. The reason to duplicate those dependencies is that neutron team does not stick to any backwards compatibility strategy in regards to requirements lists, and is free to drop any of those dependencies at any time, breaking anyone who could rely on those libraries to be pulled by neutron itself. Automated requirements updates ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ At all times, subprojects that use neutron as a dependency should make sure their dependencies do not conflict with neutron's ones. Core neutron projects maintain their requirements lists by utilizing a so-called proposal bot. To keep your subproject in sync with neutron, it is highly recommended that you register your project in openstack/requirements:projects.txt file to enable the bot to update requirements for you. Once a subproject opts in global requirements synchronization, it should enable check-requirements jobs in project-config. For example, see `this patch `_. Stable branches --------------- Stable branches for subprojects should be created at the same time when corresponding neutron stable branches are created. This is to avoid situations when a postponed cut-off results in a stable branch that contains some patches that belong to the next release. This would require reverting patches, and this is something you should avoid. Make sure your neutron dependency uses corresponding stable branch for neutron, not master. Note that to keep requirements in sync with core neutron repositories in stable branches, you should make sure that your project is registered in openstack/requirements:projects.txt *for the branch in question*. Subproject stable branches are supervised by horizontal `neutron-stable-maint team `_. More info on stable branch process can be found on `the following page `_. Stable merge requirements ------------------------- Merges into stable branches are handled by members of the `neutron-stable-maint gerrit group `_. The reason for this is to ensure consistency among stable branches, and compliance with policies for stable backports. For sub-projects who participate in the Neutron Stadium effort and who also create and utilize stable branches, there is an expectation around what is allowed to be merged in these stable branches. The Stadium projects should be following the stable branch policies as defined by on the `Stable Branch wiki `_. This means that, among other things, no features are allowed to be backported into stable branches. Releases -------- It is suggested that sub-projects release new tarballs on PyPI from time to time, especially for stable branches. It will make the life of packagers and other consumers of your code easier. It is highly suggested that you do not strip pieces of the source tree (tests, executables, tools) before releasing on PyPI: those missing pieces may be needed to validate the package, or make the packaging easier or more complete. As a rule of thumb, don't strip anything from the source tree unless completely needed. Sub-Project Release Process ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Only members of the `neutron-release `_ gerrit group can do the following release related tasks: * Make releases * Create stable branches * Make stable branches end of life Make sure you talk to a member of neutron-release to perform these tasks. Follow the process found `here `_ for creating a bug for your request. To release a sub-project, follow the following steps: * For projects which have not moved to post-versioning, we need to push an alpha tag to avoid pbr complaining. A member of the neutron-release group will handle this. * A sub-project owner should modify setup.cfg to remove the version (if you have one), which moves your project to post-versioning, similar to all the other Neutron projects. You can skip this step if you don't have a version in setup.cfg. * A member of neutron-release will then `tag the release `_, which will release the code to PyPI. * The releases will now be on PyPI. A sub-project owner should verify this by going to an URL similar to `this `_. * A sub-project owner should next go to Launchpad and release this version using the "Release Now" button for the release itself. * If a sub-project uses the "delay-release" option, a sub-project owner should update any bugs that were fixed with this release to "Fix Released" in Launchpad. This step is not necessary if the sub-project uses the "direct-release" option, which is the default. [#jeepyb_release_options]_ * The new release will be available on `OpenStack Releases `_. * A sub-project owner should add the next milestone to the Launchpad series, or if a new series is required, create the new series and a new milestone. * Finally a sub-project owner should send an email to the openstack-announce mailing list announcing the new release. To make a branch end of life, follow the following steps: * A member of neutron-release will abandon all open change reviews on the branch. * A member of neutron-release will push an EOL tag on the branch. (eg. "icehouse-eol") * A sub-project owner should request the infrastructure team to delete the branch by sending an email to the infrastructure mailing list, not by bothering the infrastructure team on IRC. * A sub-project owner should tweak jenkins jobs in project-config if any. References ~~~~~~~~~~ .. [#jeepyb_release_options] http://lists.openstack.org/pipermail/openstack-dev/2015-December/081724.html neutron-8.4.0/doc/source/stadium/index.rst0000664000567000056710000000151413044372760021741 0ustar jenkinsjenkins00000000000000.. Copyright 2014 Hewlett-Packard Development Company, L.P. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Neutron Stadium ================ The Stadium Guide contains information on policies and procedures for the Neutron Stadium. .. toctree:: :maxdepth: 3 sub_projects sub_project_guidelines neutron-8.4.0/doc/source/policies/0000775000567000056710000000000013044373210020227 5ustar jenkinsjenkins00000000000000neutron-8.4.0/doc/source/policies/blueprints.rst0000664000567000056710000004133113044372760023163 0ustar jenkinsjenkins00000000000000Blueprints and Specs ==================== The Neutron team uses the `neutron-specs `_ repository for its specification reviews. Detailed information can be found on the `wiki `_. Please also find additional information in the reviews.rst file. The Neutron team does not enforce deadlines for specs. These can be submitted throughout the release cycle. The drivers team will review this on a regular basis throughout the release, and based on the load for the milestones, will assign these into milestones or move them to the backlog for selection into a future release. Please note that we use a `template `_ for spec submissions. It is not required to fill out all sections in the template. Review of the spec may require filling in information left out by the submitter. Sub-Projects and Specs ---------------------- The `neutron-specs `_ repository is only meant for specs from Neutron itself, and the advanced services repositories as well. This includes FWaaS, LBaaS, and VPNaaS. Other sub-projects are encouraged to fold their specs into their own devref code in their sub-project gerrit repositories. Please see additional commments in the Neutron teams `section `_ for reviewer requirements of the neutron-specs repository. Neutron Request for Feature Enhancements ---------------------------------------- In Liberty the team introduced the concept of feature requests. Feature requests are tracked as Launchpad bugs, tagged with the new 'rfe' tag, and allow for the submission and review of these feature requests before code is submitted. This allows the team to verify the validity of a feature request before the process of submitting a neutron-spec is undertaken, or code is written. It also allows the community to express interest in a feature by subscribing to the bug and posting a comment in Launchpad. The 'rfe' tag should not be used for work that is already well-defined and has an assignee. If you are intending to submit code immediately, a simple bug report will suffice. Note the temptation to game the system exists, but given the history in Neutron for this type of activity, it will not be tolerated and will be called out as such in public on the mailing list. RFEs can be submitted by anyone and by having the community vote on them in Launchpad, we can gauge interest in features. The drivers team will evaluate these on a weekly basis along with the specs. RFEs will be evaluated in the current cycle against existing project priorities and available resources. The process for moving work from RFEs into the code involves someone assigning themselves the RFE bug and filing a matching spec using the slimmed down template in the neutron-specs repository. The spec will then be reviewed by the community and approved by the drivers team before landing in a release. This is the same process as before RFEs existed in Neutron. The workflow for the life an RFE in Launchpad is as follows: * The bug is submitted and will by default land in the "New" state. * As soon as a member of the neutron-drivers team acknowledges the bug, it will be moved into the "Confirmed" state. No assignee, or milestone is set at this time. The importance will be set to 'Wishlist' to signal the fact that the report is indeed a feature or enhancement and there is no severity associated to it. * The bug goes into the "Triaged" state while the discussion is ongoing. * The neutron-drivers team will evaluate the RFE and may advise the submitter to file a spec in neutron-specs to elaborate on the feature request, in case the RFE requires extra scrutiny, more design discussion, etc. * The PTL will work with the Lieutenant for the area being identified by the RFE to evaluate resources against the current workload. * If a spec is necessary, a member of the Neutron release team will register a matching Launchpad blueprint to be used for milestone tracking purposes, and as a landing page for the spec document, as available on `specs.o.o. `_. The blueprint will then be linked to the original RFE bug report. This step will ensure higher visibility of the RFE over the other RFEs and consistency across the various fields required during the blueprint registration process (Approver, Drafter, etc.). More precisely, the blueprint submitter will work with the RFE submitter to identify the following: * Priority: there will be only two priorities to choose from, High and Low. It is worth noting that priority is not to be confused with `importance `_, which is a property of Launchpad Bugs. Priority gives an indication of how promptly a work item should be tackled to allow it to complete. High priority is to be chosen for work items that must make substantial progress in the span of the targeted release, and deal with the following aspects: * OpenStack cross-project interaction and interoperability issues; * Issues that affect the existing system's usability; * Stability and testability of the platform; * Risky implementations that may require complex and/or pervasive changes to API and the logical model; Low priority is to be chosen for everything else. RFEs without an associated blueprint are effectively equivalent to low priority items. Bear in mind that, even though staffing should take priorities into account (i.e. by giving more resources to high priority items over low priority ones), the open source reality is that they can both proceed at their own pace and low priority items can indeed complete faster than high priority ones, even though they are given fewer resources. * Drafter: who is going to submit and iterate on the spec proposal; he/she may be the RFE submitter. * Assignee: who is going to develop the bulk of the code, or the go-to contributor, if more people are involved. Typically this is the RFE submitter, but not necessarily. * Approver: a member of the Neutron team who can commit enough time during the ongoing release cycle to ensure that code posted for review does not languish, and that all aspects of the feature development are taken care of (client, server changes and/or support from other projects if needed - tempest, nova, openstack-infra, devstack, etc.), as well as comprehensive testing. This is typically a core member who has enough experience with what it takes to get code merged, but other resources amongst the wider team can also be identified. Approvers are volunteers who show a specific interest in the blueprint specification, and have enough insight in the area of work so that they can make effective code reviews and provide design feedback. An approver will not work in isolation, as he/she can and will reach out for help to get the job done; however he/she is the main point of contact with the following responsibilities: * Pair up with the drafter/assignee in order to help skip development blockers. * Review patches associated with the blueprint: approver and assignee should touch base regularly and ping each other when new code is available for review, or if review feedback goes unaddressed. * Reach out to other reviewers for feedback in areas that may step out of the zone of her/his confidence. * Escalate issues, and raise warnings to the release team/PTL if the effort shows slow progress. Approver and assignee are key parts to land a blueprint: should the approver and/or assignee be unable to continue the commitment during the release cycle, it is the Approver's responsibility to reach out the release team/PTL so that replacements can be identified. * Provide a status update during the Neutron IRC meeting, if required. Approver `assignments `_ must be carefully identified to ensure that no-one overcommits. A Neutron contributor develops code himself/herself, and if he/she is an approver of more than a couple of blueprints in a single cycle/milestone (depending on the complexity of the spec), it may mean that he/she is clearly oversubscribed. The Neutron team will review the status of blueprints targeted for the milestone during their weekly meeting to ensure a smooth progression of the work planned. Blueprints for which resources cannot be identified will have to be deferred. * In either case (a spec being required or not), once the discussion has happened and there is positive consensus on the RFE, the report is 'approved', and its tag will move from 'rfe' to 'rfe-approved'. * At this point, the RFE needs resources, and if none are identified for some time the report will be marked incomplete. * As for setting the milestone (both for RFE bugs or blueprints), the current milestone is always chosen, assuming that work will start as soon as the feature is approved. Work that fails to complete by the defined milestone will roll over automatically until it gets completed or abandoned. * If the code fails to merge, the bug report may be marked as incomplete, unassigned and untargeted, and it will be garbage collected by the Launchpad Janitor if no-one takes over in time. Renewed interest in the feature will have to go through RFE submission process once again. In summary: +------------+-----------------------------------------------------------------------------+ |State | Meaning | +============+=============================================================================+ |New | This is where all RFE's start, as filed by the community. | +------------+-----------------------------------------------------------------------------+ |Incomplete | Drivers/LTs - Move to this state to mean, "more needed before proceeding" | +------------+-----------------------------------------------------------------------------+ |Confirmed | Drivers/LTs - Move to this state to mean, "yeah, I see that you filed it" | +------------+-----------------------------------------------------------------------------+ |Triaged | Drivers/LTs - Move to this state to mean, "discussion is ongoing" | +------------+-----------------------------------------------------------------------------+ |Won't Fix | Drivers/LTs - Move to this state to reject an RFE. | +------------+-----------------------------------------------------------------------------+ Once the triaging (discussion is complete) and the RFE is approved, the tag goes from 'rfe' to 'rfe-approved', and at this point the bug report goes through the usual state transition. Note, that the importance will be set to 'wishlist', to reflect the fact that the bug report is indeed not a bug, but a new feature or enhancement. This will also help have RFEs that are not followed up by a blueprint standout in the Launchpad `milestone dashboards `_. The drivers team will be discussing the following bug reports during their IRC meeting: * `New RFE's `_ * `Incomplete RFE's `_ * `Confirmed RFE's `_ * `Triaged RFE's `_ RFE Submission Guidelines ------------------------- Before we dive into the guidelines for writing a good RFE, it is worth mentioning that depending on your level of engagement with the Neutron project and your role (user, developer, deployer, operator, etc.), you are more than welcome to have a preliminary discussion of a potential RFE by reaching out to other people involved in the project. This usually happens by posting mails on the relevant mailing lists (e.g. `openstack-dev `_ - include [neutron] in the subject) or on #openstack-neutron IRC channel on Freenode. If current ongoing code reviews are related to your feature, posting comments/questions on gerrit may also be a way to engage. Some amount of interaction with Neutron developers will give you an idea of the plausibility and form of your RFE before you submit it. That said, this is not mandatory. When you submit a bug report on https://bugs.launchpad.net/neutron/+filebug, there are two fields that must be filled: 'summary' and 'further information'. The 'summary' must be brief enough to fit in one line: if you can't describe it in a few words it may mean that you are either trying to capture more than one RFE at once, or that you are having a hard time defining what you are trying to solve at all. The 'further information' section must be a description of what you would like to see implemented in Neutron. The description should provide enough details for a knowledgeable developer to understand what is the existing problem in the current platform that needs to be addressed, or what is the enhancement that would make the platform more capable, both for a functional and a non-functional standpoint. To this aim it is important to describe 'why' you believe the RFE should be accepted, and motivate the reason why without it Neutron is a poorer platform. The description should be self contained, and no external references should be necessary to further explain the RFE. In other words, when you write an RFE you should ask yourself the following questions: * What is that I (specify what user - a user can be a human or another system) cannot do today when interacting with Neutron? On the other hand, is there a Neutron component X that is unable to accomplish something? * Is there something that you would like Neutron handle better, ie. in a more scalable, or in a more reliable way? * What is that I would like to see happen after the RFE is accepted and implemented? * Why do you think it is important? Once you are happy with what you wrote, add 'rfe' as tag, and submit. Do not worry, we are here to help you get it right! Happy hacking. Missing your target ------------------- There are occasions when a spec will be approved and the code will not land in the cycle it was targeted at. For these cases, the work flow to get the spec into the next release is as follows: * During the RC window, the PTL will create a directory named '' under the 'backlog' directory in the neutron specs repo, and he/she will move all specs that did not make the release to this directory. * Anyone can propose a patch to neutron-specs which moves a spec from the previous release into the new release directory. The specs which are moved in this way can be fast-tracked into the next release. Please note that it is required to re-propose the spec for the new release. Documentation ------------- The above process involves two places where any given feature can start to be documented - namely in the RFE bug, and in the spec - and in addition to those Neutron has a substantial `developer reference guide `_ (aka 'devref'), and user-facing docs such as the `networking guide `_. So it might be asked: * What is the relationship between all of those? * What is the point of devref documentation, if everything has already been described in the spec? The answers have been beautifully expressed in an `openstack-dev post `_: 1. RFE: "I want X" 2. Spec: "I plan to implement X like this" 3. devref: "How X is implemented and how to extend it" 4. OS docs: "API and guide for using X" Once a feature X has been implemented, we shouldn't have to go to back to its RFE bug or spec to find information on it. The devref may reuse a lot of content from the spec, but the spec is not maintained and the implementation may differ in some ways from what was intended when the spec was agreed. The devref should be kept current with refactorings, etc., of the implementation. Devref content should be added as part of the implementation of a new feature. Since the spec is not maintained after the feature is implemented, the devref should include a maintained version of the information from the spec. If a feature requires OS docs (4), the commit message for the feature patch shall include 'DocImpact'. If the feature is purely a developer facing thing, (4) is not needed. neutron-8.4.0/doc/source/policies/thirdparty-ci.rst0000664000567000056710000001356313044372736023570 0ustar jenkinsjenkins00000000000000Neutron Third-party CI ====================== What Is Expected of Third Party CI System for Neutron ----------------------------------------------------- As of the Liberty summit, Neutron no longer *requires* a third-party CI, but it is strongly encouraged, as internal neutron refactoring can break external plugins and drivers at any time. Neutron expects any Third Party CI system that interacts with gerrit to follow the requirements set by the Infrastructure team [1]_ as well as the Neutron Third Party CI guidelines below. Please ping the PTL in #openstack-neutron or send an email to the openstack-dev ML (with subject [neutron]) with any questions. Be aware that the Infrastructure documentation as well as this document are living documents and undergo changes. Track changes to the infrastructure documentation using this url [2]_ (and please review the patches) and check this doc on a regular basis for updates. What Changes to Run Against --------------------------- If your code is a neutron plugin or driver, you should run against every neutron change submitted, except for docs, tests, tools, and top-level setup files. You can skip your CI runs for such exceptions by using ``skip-if`` and ``all-files-match-any`` directives in Zuul. You can see a programmatic example of the exceptions here [3]_. If your code is in a neutron-\*aas repo, you should run against the tests for that repo. You may also run against every neutron change, if your service driver is using neutron interfaces that are not provided by your service plugin (e.g. loadbalancer/plugin.py). If you are using only plugin interfaces, it should be safe to test against only the service repo tests. What Tests To Run ----------------- Network API tests (git link). Network scenario tests (The test_network_* tests here). Any tests written specifically for your setup. http://git.openstack.org/cgit/openstack/tempest/tree/tempest/api/network Run with the test filter: 'network'. This will include all neutron specific tests as well as any other tests that are tagged as requiring networking. An example tempest setup for devstack-gate:: export DEVSTACK_GATE_NEUTRON=1 export DEVSTACK_GATE_TEMPEST_REGEX='(?!.*\[.*\bslow\b.*\])((network)|(neutron))' An example setup for LBaaS:: export DEVSTACK_GATE_NEUTRON=1 export DEVSTACK_GATE_TEMPEST_REGEX='(?!.*\[.*\bslow\b.*\])(alancer|SimpleReadOnlyNeutron|tempest.api.network)' Third Party CI Voting --------------------- The Neutron team encourages you to NOT vote -1 with a third-party CI. False negatives are noisy to the community, and have given -1 from third-party CIs a bad reputation. Really bad, to the point of people ignoring them all. Failure messages are useful to those doing refactors, and provide you feedback on the state of your plugin. If you insist on voting, by default, the infra team will not allow voting by new 3rd party CI systems. The way to get your 3rd party CI system to vote is to talk with the Neutron PTL, who will let infra know the system is ready to vote. The requirements for a new system to be given voting rights are as follows: * A new system must be up and running for a month, with a track record of voting on the sandbox system. * A new system must correctly run and pass tests on patches for the third party driver/plugin for a month. * A new system must have a logfile setup and retention setup similar to the below. Once the system has been running for a month, the owner of the third party CI system can contact the Neutron PTL to have a conversation about getting voting rights upstream. The general process to get these voting rights is outlined here. Please follow that, taking note of the guidelines Neutron also places on voting for it's CI systems. A third party system can have it's voting rights removed as well. If the system becomes unstable (stops running, voting, or start providing inaccurate results), the Neutron PTL or any core reviewer will make an attempt to contact the owner and copy the openstack-dev mailing list. If no response is received within 2 days, the Neutron PTL will remove voting rights for the third party CI system. If a response is received, the owner will work to correct the issue. If the issue cannot be addressed in a reasonable amount of time, the voting rights will be temporarily removed. Log & Test Results Filesystem Layout ------------------------------------ Third-Party CI systems MUST provide logs and configuration data to help developers troubleshoot test failures. A third-party CI that DOES NOT post logs should be a candidate for removal, and new CI systems MUST post logs before they can be awarded voting privileges. Third party CI systems should follow the filesystem layout convention of the OpenStack CI system. Please store your logs as viewable in a web browser, in a directory structure. Requiring the user to download a giant tarball is not acceptable, and will be reason to not allow your system to vote from the start, or cancel it's voting rights if this changes while the system is running. At the root of the results - there should be the following: * console.html.gz - contains the output of stdout of the test run * local.conf / localrc - contains the setup used for this run * logs - contains the output of detail test log of the test run The above "logs" must be a directory, which contains the following: * Log files for each screen session that DevStack creates and launches an OpenStack component in * Test result files * testr_results.html.gz * tempest.txt.gz List of existing plugins and drivers ------------------------------------ https://wiki.openstack.org/wiki/Neutron_Plugins_and_Drivers#Existing_Plugin_and_Drivers References ---------- .. [1] http://ci.openstack.org/third_party.html .. [2] https://review.openstack.org/#/q/status:open+project:openstack-infra/system-config+branch:master+topic:third-party,n,z .. [3] https://github.com/openstack-infra/project-config/blob/master/zuul/layout.yaml neutron-8.4.0/doc/source/policies/neutron-teams.rst0000664000567000056710000004470013044372760023600 0ustar jenkinsjenkins00000000000000Neutron Core Reviewers ====================== The `Neutron Core Reviewer Team `_ is responsible for many things related to Neutron. A lot of these things include mundane tasks such as the following: * Ensuring the bug count is low * Curating the gate and triaging failures * Working on integrating shared code from projects such as Oslo * Ensuring documentation is up to date and remains relevant * Ensuring the level of testing for Neutron is adequate and remains relevant as features are added * Helping new contributors with questions as they peel back the covers of Neutron * Answering questions and participating in mailing list discussions * Interfacing with other OpenStack teams and ensuring they are going in the same parallel direction * Reviewing and merging code into the neutron tree In essence, core reviewers share the following common ideals: 1. They share responsibility in the project's success. 2. They have made a long-term, recurring time investment to improve the project. 3. They spend their time doing what needs to be done to ensure the projects success, not necessarily what is the most interesting or fun. A core reviewer's responsibility doesn't end up with merging code. The above lists are adding context around these responsibilities. Core Review Hierarchy --------------------- As Neutron has grown in complexity, it has become impossible for any one person to know enough to merge changes across the entire codebase. Areas of expertise have developed organically, and it is not uncommon for existing cores to defer to these experts when changes are proposed. Existing cores should be aware of the implications when they do merge changes outside the scope of their knowledge. It is with this in mind we propose a new system built around Lieutenants through a model of trust. In order to scale development and responsibility in Neutron, we have adopted a Lieutenant system. The PTL is the leader of the Neutron project, and ultimately responsible for decisions made in the project. The PTL has designated Lieutenants in place to help run portions of the Neutron project. The Lieutenants are in charge of their own areas, and they can propose core reviewers for their areas as well. The core reviewer addition and removal polices are in place below. The Lieutenants for each system, while responsible for their area, ultimately report to the PTL. The PTL may opt to have regular one on one meetings with the lieutenants. The PTL will resolve disputes in the project that arise between areas of focus, core reviewers, and other projects. Please note Lieutenants should be leading their own area of focus, not doing all the work themselves. As was mentioned in the previous section, a core's responsibilities do not end with merging code. They are responsible for bug triage and gate issues among other things. Lieutenants have an increased responsibility to ensure gate and bug triage for their area of focus is under control. The following are the current Neutron Lieutenants. +------------------------+---------------------------+----------------------+ | Area | Lieutenant | IRC nick | +========================+===========================+======================+ | API and DB | Akihiro Motoki | amotoki | | +---------------------------+----------------------+ | | Henry Gessau | HenryG | +------------------------+---------------------------+----------------------+ | Built-In Control Plane | Kevin Benton | kevinbenton | +------------------------+---------------------------+----------------------+ | Client | Akihiro Motoki | amotoki | +------------------------+---------------------------+----------------------+ | Docs | Edgar Magana | emagana | +------------------------+---------------------------+----------------------+ | Infra | Armando Migliaccio | armax | | +---------------------------+----------------------+ | | Doug Wiegley | dougwig | +------------------------+---------------------------+----------------------+ | L3 | Carl Baldwin | carl_baldwin | +------------------------+---------------------------+----------------------+ | Services | Doug Wiegley | dougwig | +------------------------+---------------------------+----------------------+ | Testing | Assaf Muller | amuller | +------------------------+---------------------------+----------------------+ Some notes on the above: * "Built-In Control Plane" means the L2 agents, DHCP agents, SGs, metadata agents and ML2. * The client includes commands installed server side. * L3 includes the L3 agent, DVR, and IPAM. * Services includes FWaaS, LBaaS, and VPNaaS. * Note these areas may change as the project evolves due to code refactoring, new feature areas, and libification of certain pieces of code. * Infra means interactions with infra from a neutron perspective Neutron also consists of several plugins, drivers, and agents that are developed effectively as sub-projects within Neutron in their own git repositories. Lieutenants are also named for these sub-projects to identify a clear point of contact and leader for that area. The Lieutenant is also responsible for updating the core review team for the sub-project's repositories. +------------------------+---------------------------+----------------------+ | Area | Lieutenant | IRC nick | +========================+===========================+======================+ | dragonflow | Eran Gampel | gampel | | +---------------------------+----------------------+ | | Gal Sagie | gsagie | +------------------------+---------------------------+----------------------+ | networking-bgpvpn | Mathieu Rohon | matrohon | | +---------------------------+----------------------+ | | Thomas Morin | tmorin | +------------------------+---------------------------+----------------------+ | networking-calico | Neil Jerram | neiljerram | +------------------------+---------------------------+----------------------+ | networking-l2gw | Sukhdev Kapur | sukhdev | +------------------------+---------------------------+----------------------+ | networking-midonet | Ryu Ishimoto | ryu25 | | +---------------------------+----------------------+ | | Jaume Devesa | devvesa | | +---------------------------+----------------------+ | | YAMAMOTO Takashi | yamamoto | +------------------------+---------------------------+----------------------+ | networking-odl | Flavio Fernandes | flaviof | | +---------------------------+----------------------+ | | Kyle Mestery | mestery | +------------------------+---------------------------+----------------------+ | networking-ofagent | YAMAMOTO Takashi | yamamoto | +------------------------+---------------------------+----------------------+ | networking-onos | Vikram Choudhary | vikram | | +---------------------------+----------------------+ | | Albert Dongfeng | albert_dongfeng | +------------------------+---------------------------+----------------------+ | networking-ovn | Russell Bryant | russellb | +------------------------+---------------------------+----------------------+ | networking-sfc | Cathy Zhang | cathy | +------------------------+---------------------------+----------------------+ | octavia | German Eichberger | xgerman | +------------------------+---------------------------+----------------------+ Existing Core Reviewers ----------------------- Existing core reviewers have been reviewing code for a varying degree of cycles. With the new plan of Lieutenants and ownership, it's fair to try to understand how they fit into the new model. Existing core reviewers seem to mostly focus in particular areas and are cognizant of their own strengths and weaknesses. These members may not be experts in all areas, but know their limits, and will not exceed those limits when reviewing changes outside their area of expertise. The model is built on trust, and when that trust is broken, responsibilities will be taken away. Lieutenant Responsibilities --------------------------- In the hierarchy of Neutron responsibilities, Lieutenants are expected to partake in the following additional activities compared to other core reviewers: * Ensuring feature requests for their areas have adequate testing and documentation coverage. * Gate triage and resolution. Lieutenants are expected to work to keep the Neutron gate running smoothly by triaging issues, filing elastic recheck queries, and closing gate bugs. * Triaging bugs for the specific areas. Neutron Teams ============= Given all of the above, Neutron has the following core reviewer teams with responsibility over the areas of code listed below: Neutron Core Reviewer Team -------------------------- `Neutron core reviewers `_ have merge rights to the following git repositories: * `openstack/neutron `_ * `openstack/python-neutronclient `_ Please note that as we adopt to the system above with core specialty in particular areas, we expect this broad core team to shrink as people naturally evolve into an area of specialization. Neutron FWaaS Core Reviewer Team -------------------------------- Neutron `FWaaS core reviewers `_ have merge rights to the following git repositories: * `openstack/neutron-fwaas `_ Neutron LBaaS Core Reviewer Team -------------------------------- Neutron `LBaaS core reviewers `_ have merge rights to the following git repositories: * `openstack/neutron-lbaas `_ Neutron VPNaaS Core Reviewer Team --------------------------------- Neutron `VPNaaS core reviewers `_ have merge rights to the following git repositories: * `openstack/neutron-vpnaas `_ Neutron Core Reviewer Teams for Plugins and Drivers --------------------------------------------------- The plugin decomposition effort has led to having many drivers with code in separate repositories with their own core reviewer teams. For each one of these repositories in the following repository list, there is a core team associated with it: * `Neutron project team `_ These teams are also responsible for handling their own specs/RFEs/features if they choose to use them. However, by choosing to be a part of the Neutron project, they submit to oversight and veto by the Neutron PTL if any issues arise. Neutron Specs Core Reviewer Team -------------------------------- Neutron `specs core reviewers `_ have +2 rights to the following git repositories: * `openstack/neutron-specs `_ The Neutron specs core reviewer team is responsible for reviewing specs targeted to all Neutron git repositories (Neutron + Advanced Services). It is worth noting that specs reviewers have the following attributes which are potentially different than code reviewers: * Broad understanding of cloud and networking technologies * Broad understanding of core OpenStack projects and technologies * An understanding of the effect approved specs have on the teams development capacity for each cycle Specs core reviewers may match core members of the above mentioned groups, but the group can be extended to other individuals, if required. Drivers Team ------------ The `drivers team `_ is the group of people who have full rights to the specs repo. This team, which matches `Launchpad Neutron Drivers team `_, is instituted to ensure a consistent architectural vision for the Neutron project, and to continue to disaggregate and share the responsibilities of the Neutron PTL. The team is in charge of reviewing and commenting on `RFEs `_, and working with specification contributors to provide guidance on the process that govern contributions to the Neutron project as a whole. The team `meets regularly `_ to go over RFE's and discuss the project roadmap. Anyone is welcome to join and/or read the meeting notes. Release Team ------------ The `release team `_ is a group of people with some additional gerrit permissions primarily aimed at allowing release management of Neutron sub-projects. These permissions include: * Ability to push signed tags to sub-projects whose releases are managed by the Neutron release team as opposed to the OpenStack release team. * Ability to push merge commits for Neutron or other sub-projects. * Ability to approve changes in all Neutron git repositories. This is required as the team needs to be able to quickly unblock things if needed, especially at release time. Code Merge Responsibilities =========================== While everyone is encouraged to review changes for these repositories, members of the Neutron core reviewer group have the ability to +2/-2 and +A changes to these repositories. This is an extra level of responsibility not to be taken lightly. Correctly merging code requires not only understanding the code itself, but also how the code affects things like documentation, testing, and interactions with other projects. It also means you pay attention to release milestones and understand if a patch you're merging is marked for the release, especially critical during the feature freeze. The bottom line here is merging code is a responsibility Neutron core reviewers have. Adding or Removing Core Reviewers --------------------------------- A new Neutron core reviewer may be proposed at anytime on the openstack-dev mailing list. Typically, the Lieutenant for a given area will propose a new core reviewer for their specific area of coverage, though the Neutron PTL may propose new core reviewers as well. The proposal is typically made after discussions with existing core reviewers. Once a proposal has been made, three existing Neutron core reviewers from the Lieutenant's area of focus must respond to the email with a +1. If the member is being added by a Lieutenant from an area of focus with less than three members, a simple majority will be used to determine if the vote is successful. Another Neutron core reviewer from the same area of focus can vote -1 to veto the proposed new core reviewer. The PTL will mediate all disputes for core reviewer additions. The PTL may remove a Neutron core reviewer at any time. Typically when a member has decreased their involvement with the project through a drop in reviews and participation in general project development, the PTL will propose their removal and remove them. Please note there is no voting or vetoing of core reviewer removal. Members who have previously been a core reviewer may be fast-tracked back into a core reviewer role if their involvement picks back up and the existing core reviewers support their re-instatement. Neutron Core Reviewer Membership Expectations --------------------------------------------- Neutron core reviewers have the following expectations: * Reasonable attendance at the weekly Neutron IRC meetings. * Participation in Neutron discussions on the mailing list, as well as in-channel in #openstack-neutron. * Participation in Neutron related design summit sessions at the OpenStack Summits. Please note in-person attendance at design summits, mid-cycles, and other code sprints is not a requirement to be a Neutron core reviewer. The Neutron team will do its best to facilitate virtual attendance at all events. Travel is not to be taken lightly, and we realize the costs involved for those who partake in attending these events. In addition to the above, code reviews are the most important requirement of Neutron core reviewers. Neutron follows the documented OpenStack `code review guidelines `_. We encourage all people to review Neutron patches, but core reviewers are required to maintain a level of review numbers relatively close to other core reviewers. There are no hard statistics around code review numbers, but in general we use 30, 60, 90 and 180 day stats when examining review stats. * `30 day review stats `_ * `60 day review stats `_ * `90 day review stats `_ * `180 day review stats `_ There are soft-touch items around being a Neutron core reviewer as well. Gaining trust with the existing Neutron core reviewers is important. Being able to work together with the existing Neutron core reviewer team is critical as well. Being a Neutron core reviewer means spending a significant amount of time with the existing Neutron core reviewers team on IRC, the mailing list, at Summits, and in reviews. Ensuring you participate and engage here is critical to becoming and remaining a core reviewer. neutron-8.4.0/doc/source/policies/contributor-onboarding.rst0000664000567000056710000000465213044372736025476 0ustar jenkinsjenkins00000000000000Contributor Onboarding ====================== For new contributors, the following are useful onboarding information. Contributing to Neutron ----------------------- Work within Neutron is discussed on the openstack-dev mailing list, as well as in the #openstack-neutron IRC channel. While these are great channels for engaging Neutron, the bulk of discussion of patches and code happens in gerrit itself. With regards to gerrit, code reviews are a great way to learn about the project. There is also a list of `low or wishlist `_ priority bugs which are ideal for a new contributor to take on. If you haven't done so you should setup a Neutron development environment so you can actually run the code. Devstack is the usual convenient environment to setup such an environment. See `devstack.org `_ or `NeutronDevstack `_ for more information on using Neutron with devstack. Helping with documentation can also be a useful first step for a newcomer. `Here `_ is a list of documentation bugs that are tagged with 'neutron'; bug reports are created here for neutron reviews with a 'DocImpact' in the commit message. IRC Information and Etiquette ----------------------------- The main IRC channel for Neutron is #openstack-neutron. We also utilize #openstack-lbaas for LBaaS specific discussions. The weekly meeting is documented in the `list of meetings `_ wiki page. neutron-8.4.0/doc/source/policies/index.rst0000664000567000056710000000205113044372760022077 0ustar jenkinsjenkins00000000000000.. Copyright 2014 Hewlett-Packard Development Company, L.P. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Neutron Policies ================ In the Policies Guide, you will find documented policies for developing with Neutron. This includes the processes we use for blueprints and specs, bugs, contributor onboarding, core reviewer memberships, and other procedural items. .. toctree:: :maxdepth: 3 blueprints bugs contributor-onboarding neutron-teams gate-failure-triage code-reviews thirdparty-ci neutron-8.4.0/doc/source/policies/code-reviews.rst0000664000567000056710000000457013044372760023374 0ustar jenkinsjenkins00000000000000Neutron Code Reviews ==================== Code reviews are a critical component of all OpenStack projects. Neutron accepts patches from many diverse people with diverse backgrounds, employers, and experience levels. Code reviews provide a way to enforce a level of consistency across the project, and also allow for the careful on boarding of contributions from new contributors. Neutron Code Review Practices ----------------------------- Neutron follows the `code review guidelines `_ as set forth for all OpenStack projects. It is expected that all reviewers are following the guidelines set forth on that page. Neutron Spec Review Practices ----------------------------- In addition to code reviews, Neutron also maintains a BP specification git repository. Detailed instructions for the use of this repository are provided `here `_. It is expected that Neutron core team members are actively reviewing specifications which are pushed out for review to the specification repository. In addition, there is a neutron-drivers team, composed of a handful of Neutron core reviewers, who can approve and merge Neutron specs. Some guidelines around this process are provided below: * Once a specification has been pushed, it is expected that it will not be approved for at least 3 days after a first Neutron core reviewer has reviewed it. This allows for additional cores to review the specification. * For blueprints which the core team deems of High or Critical importance, core reviewers may be assigned based on their subject matter expertise. * Specification priority will be set by the PTL with review by the core team once the specification is approved. Tracking Review Statistics -------------------------- Stackalytics provides some nice interfaces to track review statistics. The links are provided below. These statistics are used to track not only Neutron core reviewer statistics, but also to track review statistics for potential future core members. * `30 day review stats `_ * `60 day review stats `_ * `90 day review stats `_ * `180 day review stats `_ neutron-8.4.0/doc/source/policies/bugs.rst0000664000567000056710000011416513044372760021742 0ustar jenkinsjenkins00000000000000Neutron Bugs ============ Neutron (client, core, FwaaS, LBaaS, VPNaaS) maintains all of its bugs in the following Launchpad projects: * `Launchpad Neutron `_ * `Launchpad python-neutronclient `_ Neutron Bugs Team In Launchpad ------------------------------ The `Neutron Bugs `_ team in Launchpad is used to allow access to the projects above. Members of the above group have the ability to set bug priorities, target bugs to releases, and other administrative tasks around bugs. The administrators of this group are the members of the `neutron-drivers-core `_ gerrit group. Non administrators of this group include anyone who is involved with the Neutron project and has a desire to assist with bug triage. If you would like to join this Launchpad group, it's best to reach out to a member of the above mentioned neutron-drivers-core team in #openstack-neutron on Freenode and let them know why you would like to be a member. The team is more than happy to add additional bug triage capability, but it helps to know who is requesting access, and IRC is a quick way to make the connection. As outlined below the bug deputy is a volunteer who wants to help with defect management. Permissions will have to be granted assuming that people sign up on the deputy role. The permission won't be given freely, a person must show some degree of prior involvement. Neutron Bug Deputy ------------------ Neutron maintains the notion of a "bug deputy". The bug deputy plays an important role in the Neutron community. As a large project, Neutron is routinely fielding many bug reports. The bug deputy is responsible for acting as a "first contact" for these bug reports and performing initial screening/triaging. The bug deputy is expected to communicate with the various Neutron teams when a bug has been triaged. In addition, the bug deputy should be reporting "High" and "Critical" priority bugs. To avoid burnout, and to give a chance to everyone to gain experience in defect management, the Neutron bug deputy is a rotating role. The rotation will be set on a period (typically one or two weeks) determined by the team during the weekly Neutron IRC meeting and/or according to holidays. During the Neutron IRC meeting we will expect a volunteer to step up for the period. Members of the Neutron core team are invited to fill in the role, however non-core Neutron contributors who are interested are also encouraged to take up the role. This contributor is going to be the bug deputy for the period, and he/she will be asked to report to the team during the subsequent IRC meeting. The PTL will also work with the team to assess that everyone gets his/her fair share at fulfilling this duty. It is reasonable to expect some imbalance from time to time, and the team will work together to resolve it to ensure that everyone is 100% effective and well rounded in their role as _custodian_ of Neutron quality. Should the duty load be too much in busy times of the release, the PTL and the team will work together to assess whether more than one deputy is necessary in a given period. The presence of a bug deputy does not mean the rest of the team is simply off the hook for the period, in fact the bug deputy will have to actively work with the Lieutenants/Drivers, and these should help in getting the bug report moving down the resolution pipeline. During the period a member acts as bug deputy, he/she is expected to watch bugs filed against the Neutron projects (as listed above) and do a first screening to determine potential severity, tagging, logstash queries, other affected projects, affected releases, etc. From time to time bugs will be filed and auto-assigned by members of the core team to get them to a swift resolution. Obviously, the deputy is exempt from screening these. Finally, the PTL will work with the deputy to produce a brief summary of the issues of the week to be shared with the larger team during the weekly IRC meeting and tracked in the meeting notes. Getting Ready to Serve as the Neutron Bug Deputy ------------------------------------------------ If you are interested in serving as the Neutron bug deputy, there are several steps you will need to follow in order to be prepared. * Request to be added to the `neutron-bugs team in Launchpad `_. This request will be approved when you are assigned a bug deputy slot. * Read this page in full. Keep this document in mind at all times as it describes the duties of the bug deputy and how to triage bugs particularly around setting the importance and tags of bugs. * Sign up for neutron bug emails from LaunchPad. * Navigate to the `LaunchPad Neutron bug list `_. * On the right hand side, click on "Subscribe to bug mail". * In the pop-up that is displayed, keep the recipient as "Yourself", and your subscription something useful like "Neutron Bugs". You can choose either option for how much mail you get, but keep in mind that getting mail for all changes - while informative - will result in several dozen emails per day at least. * Do the same for the `LaunchPad python-neutronclient bug list `_. * Configure the information you get from `LaunchPad `_ to make visible additional information, especially the 'age' of the bugs. You accomplish that by clicking the little gear on the left hand side of the screen at the top of the bugs list. This provides an overview of information for each bug on a single page. * Optional: Set up your mail client to highlight bug email that indicates a new bug has been filed, since those are the ones you will be wanting to triage. Filter based on email from "@bugs.launchpad.net" with "[NEW]" in the subject line. * Volunteer during the course of the Neutron team meeting, when volunteers to be bug deputy are requested (usually towards the beginning of the meeting). * View your scheduled week on the `Neutron Meetings page `_. * During your shift, if it is feasible for your timezone, plan on attending the Neutron Drivers meeting. That way if you have tagged any bugs as RFE, you can be present to discuss them. Plugin and Driver Repositories ------------------------------ Many plugins and drivers have backend code that exists in another repository. These repositories may have their own Launchpad projects for bugs. The teams working on the code in these repos assume full responsibility for bug handling in those projects. For this reason, bugs whose solution would exist solely in the plugin/driver repo should not have Neutron in the affected projects section. However, you should add Neutron (Or any other project) to that list only if you expect that a patch is needed to that repo in order to solve the bug. It's also worth adding that some of these projects are part of the so called Neutron `stadium `_. Because of that, their release is managed centrally by the Neutron release team; requests for releases need to be funnelled and screened properly before they can happen. To this aim, the process to request a release is as follows: * Create a bug report to your Launchpad project: provide details as to what you would like to release; * If you provide an exact commit in the bug report then you need to be a bit careful. In most cases, you'll want to tag the *merge* commit that merges your last commit in to the branch. `This bug`__ shows an instance where this mistake was caught. Notice the difference between the `incorrect commit`__ and the `correct one`__ which is the merge commit. ``git log 6191994..22dd683 --oneline`` shows that the first one misses a handful of important commits that the second one catches. This is the nature of merging to master. .. __: https://bugs.launchpad.net/neutron/+bug/1540633 .. __: https://github.com/openstack/networking-infoblox/commit/6191994515 .. __: https://github.com/openstack/networking-infoblox/commit/22dd683e1a * Add Neutron to the list of affected projects. * Add 'release-subproject' tag to the list of tags for the bug report. * The Neutron release management team will watch these bugs, and work with you to have the request fulfilled by following the instructions found `here `_. .. _guidelines: Bug Screening Best Practices ---------------------------- When screening bug reports, the first step for the bug deputy is to assess how well written the bug report is, and whether there is enough information for anyone else besides the bug submitter to reproduce the bug and come up with a fix. There is plenty of information on the `OpenStack wiki `_ on how to write a good bug `report `_ and to learn how to tell a good bug report from a bad one. Should the bug report not adhere to these best practices, the bug deputy's first step would be to redirect the submitter to this section, invite him/her to supply the missing information, and mark the bug report as 'Incomplete'. For future submissions, the reporter can then use the template provided below to ensure speedy triaging. Done often enough, this practice should (ideally) ensure that in the long run, only 'good' bug reports are going to be filed. Bug Report Template ~~~~~~~~~~~~~~~~~~~ The more information you provide, the higher the chance of speedy triaging and resolution: identifying the problem is half the solution. To this aim, when writing a bug report, please consider supplying the following details and following these suggestions: * Summary (Bug title): keep it small, possibly one line. If you cannot describe the issue in less than 100 characters, you are probably submitting more than one bug at once. * Further information (Bug description): conversely from other bug trackers, Launchpad does not provide a structured way of submitting bug-related information, but everything goes in this section. Therefore, you are invited to break down the description in the following fields: * High level description: provide a brief sentence (a couple of lines) of what are you trying to accomplish, or would like to accomplish differently; the 'why' is important, but can be omitted if obvious (not to you of course). * Pre-conditions: what is the initial state of your system? Please consider enumerating resources available in the system, if useful in diagnosing the problem. Who are you? A regular tenant or a super-user? Are you describing service-to-service interaction? * Step-by-step reproduction steps: these can be actual neutron client commands or raw API requests; Grab the output if you think it is useful. Please, consider using `paste.o.o `_ for long outputs as Launchpad poorly format the description field, making the reading experience somewhat painful. * Expected output: what did you hope to see? How would you have expected the system to behave? A specific error/success code? The output in a specific format? Or more than a user was supposed to see, or less? * Actual output: did the system silently fail (in this case log traces are useful)? Did you get a different response from what you expected? * Version: * OpenStack version (Specific stable branch, or git hash if from trunk); * Linux distro, kernel. For a distro, it's also worth knowing specific versions of client and server, not just major release; * Relevant underlying processes such as openvswitch, iproute etc; * DevStack or other _deployment_ mechanism? * Environment: what services are you running (core services like DB and AMQP broker, as well as Nova/hypervisor if it matters), and which type of deployment (clustered servers); if you are running DevStack, is it a single node? Is it multi-node? Are you reporting an issue in your own environment or something you encountered in the OpenStack CI Infrastructure, aka the Gate? * Perceived severity: what would you consider the `importance `_ to be? * Tags (Affected component): try to use the existing tags by relying on auto-completion. Please, refrain from creating new ones, if you need new "official" tags_, please reach out to the PTL. If you would like a fix to be backported, please add a backport-potential tag. This does not mean you are gonna get the backport, as the stable team needs to follow the `stable branch policy `_ for merging fixes to stable branches. * Attachments: consider attaching logs, truncated log snippets are rarely useful. Be proactive, and consider attaching redacted configuration files if you can, as that will speed up the resolution process greatly. Bug Triage Process ~~~~~~~~~~~~~~~~~~ The process of bug triaging consists of the following steps: * Check if a bug was filed for a correct component (project). If not, either change the project or mark it as "Invalid". * For bugs that affect documenation (including autogenerated DocImpact bugs) proceed like this. If documentation affects * the ReST API, add "openstack-api-site" to the affected projects. * the OpenStack manuals, like the Networking Guide or the Configuration Reference, add "openstack-manuals" to the affected projects * developer documentation (devref), set the bug to "Confirmed" for the project Neutron, otherwise set it to "Invalid". * Check if a similar bug was filed before. Rely on your memory if Launchpad is not clever enough to spot a duplicate upon submission. You may also check already verified bugs for `Neutron `_ and `python-neutronclient `_ to see if the bug has been reported. If so, mark it as a duplicate of the previous bug. * Check if the bug meets the requirements of a good bug report, by checking that the guidelines_ are being followed. Omitted information is still acceptable if the issue is clear nonetheless; use your good judgement and your experience. Consult another core member/PTL if in doubt. If the bug report needs some love, mark the bug as 'Incomplete', point the submitter to this document and hope he/she turns around quickly with the missing information. If the bug report is sound, move next: * Revise tags as recommended by the submitter. Ensure they are 'official' tags. If the bug report talks about deprecating features or config variables, add a deprecation tag to the list. * As deputy one is usually excused not to process RFE bugs which are the responsibility of the drivers team members. * Depending on ease of reproduction (or if the issue can be spotted in the code), mark it as 'Confirmed'. If you are unable to assess/triage the issue because you do not have access to a repro environment, consider reaching out the `Lieutenant `_, go-to person for the affected component; he/she may be able to help: assign the bug to him/her for further screening. If the bug already has an assignee, check that a patch is in progress. Sometimes more than one patch is required to address an issue, make sure that there is at least one patch that 'Closes' the bug or document/question what it takes to mark the bug as fixed. * If the bug indicates test or gate failure, look at the failures for that test over time using `OpenStack Health `_ or `OpenStack Logstash `_. This can help to validate whether the bug identifies an issue that is occurring all of the time, some of the time, or only for the bug submitter. * If the bug is the result of a misuse of the system, mark the bug either as 'Won't fix', or 'Opinion' if you are still on the fence and need other people's input. * Assign the importance after reviewing the proposed severity. Bugs that obviously break core and widely used functionality should get assigned as "High" or "Critical" importance. The same applies to bugs that were filed for gate failures. * Choose a milestone, if you can. Targeted bugs are especially important close to the end of the release. * (Optional). Add comments explaining the issue and possible strategy of fixing/working around the bug. Also, as good as some are at adding all thoughts to bugs, it is still helpful to share the in-progress items that might not be captured in a bug description or during our weekly meeting. In order to provide some guidance and reduce ramp up time as we rotate, tagging bugs with 'needs-attention' can be useful to quickly identify what reports need further screening/eyes on. You are done! Iterate. Bug Expiration Policy and Bug Squashing --------------------------------------- More can be found at this `Launchpad page `_. In a nutshell, in order to make a bug report expire automatically, it needs to be unassigned, untargeted, and marked as Incomplete. The OpenStack community has had `Bug Days `_ but they have not been wildly successful. In order to keep the list of open bugs set to a manageable number (more like <100+, rather than closer to 1000+), at the end of each release (in feature freeze and/or during less busy times), the PTL with the help of team will go through the list of open (namely new, opinion, in progress, confirmed, triaged) bugs, and do a major sweep to have the Launchpad Janitor pick them up. This gives 60 days grace period to reporters/assignees to come back and revive the bug. Assuming that at regime, bugs are properly reported, acknowledged and fix-proposed, losing unaddressed issues is not going to be a major issue, but brief stats will be collected to assess how the team is doing over time. .. _tags: Tagging Bugs ------------ Launchpad's Bug Tracker allows you to create ad-hoc groups of bugs with tagging. In the Neutron team, we have a list of agreed tags that we may apply to bugs reported against various aspects of Neutron itself. The list of approved tags used to be available on the `wiki `_, however the section has been moved here, to improve collaborative editing, and keep the information more current. By using a standard set of tags, each explained on this page, we can avoid confusion. A bug report can have more than one tag at any given time. Proposing New Tags ~~~~~~~~~~~~~~~~~~ New tags, or changes in the meaning of existing tags (or deletion), are to be proposed via patch to this section. After discussion, and approval, a member of the bug team will create/delete the tag in Launchpad. Each tag covers an area with an identified go-to contact or `Lieutenant `_, who can provide further insight. Bug queries are provided below for convenience, more will be added over time if needed. +-------------------------------+---------------------------------------+----------------------+ | Tag | Description | Contact | +===============================+=======================================+======================+ | access-control_ | A bug affecting RBAC and policy.json | Kevin Benton | +-------------------------------+---------------------------------------+----------------------+ | api_ | A bug affecting the API layer | Salvatore Orlando | +-------------------------------+---------------------------------------+----------------------+ | auto-allocated-topology_ | A bug affecting get-me-a-network | Henry Gessau | +-------------------------------+---------------------------------------+----------------------+ | baremetal_ | A bug affecting Ironic support | Sukhdev Kapur | +-------------------------------+---------------------------------------+----------------------+ | db_ | A bug affecting the DB layer | Henry Gessau | +-------------------------------+---------------------------------------+----------------------+ | deprecation_ | To track config/feature deprecations | Neutron PTL/drivers | +-------------------------------+---------------------------------------+----------------------+ | dns_ | A bug affecting DNS integration | Miguel Lavalle | +-------------------------------+---------------------------------------+----------------------+ | doc_ | A bug affecting in-tree doc | Edgar Magana | +-------------------------------+---------------------------------------+----------------------+ | fullstack_ | A bug in the fullstack subtree | Assaf Muller | +-------------------------------+---------------------------------------+----------------------+ | functional-tests_ | A bug in the functional tests subtree | Assaf Muller | +-------------------------------+---------------------------------------+----------------------+ | fwaas_ | A bug affecting neutron-fwass | Sean Collins | +-------------------------------+---------------------------------------+----------------------+ | gate-failure_ | A bug affecting gate stability | Armando Migliaccio | +-------------------------------+---------------------------------------+----------------------+ | ipv6_ | A bug affecting IPv6 support | Henry Gessau | +-------------------------------+---------------------------------------+----------------------+ | l2-pop_ | A bug in L2 Population mech driver | Kevin Benton | +-------------------------------+---------------------------------------+----------------------+ | l3-bgp_ | A bug affecting BGP service plugin | Vikram Choudhary | +-------------------------------+---------------------------------------+----------------------+ | l3-dvr-backlog_ | A bug affecting distributed routing | Ryan Moats | +-------------------------------+---------------------------------------+----------------------+ | l3-ha_ | A bug affecting L3 HA (vrrp) | Assaf Muller | +-------------------------------+---------------------------------------+----------------------+ | l3-ipam-dhcp_ | A bug affecting L3/DHCP/metadata | Miguel Lavalle | +-------------------------------+---------------------------------------+----------------------+ | lbaas_ | A bug affecting neutron-lbaas | Brandon Logan | +-------------------------------+---------------------------------------+----------------------+ | linuxbridge_ | A bug affecting ML2/linuxbridge | Sean Collins | +-------------------------------+---------------------------------------+----------------------+ | loadimpact_ | Performance penalty/improvements | Ryan Moats | +-------------------------------+---------------------------------------+----------------------+ | logging_ | An issue with logging guidelines | Matt Riedemann | +-------------------------------+---------------------------------------+----------------------+ | low-hanging-fruit_ | Starter bugs for new contributors | N/A | +-------------------------------+---------------------------------------+----------------------+ | metering_ | A bug affecting the metering layer | ? | +-------------------------------+---------------------------------------+----------------------+ | needs-attention_ | A bug that needs further screening | PTL/Bug Deputy | +-------------------------------+---------------------------------------+----------------------+ | opnfv_ | Reported by/affecting OPNFV initiative| Drivers team | +-------------------------------+---------------------------------------+----------------------+ | ops_ | Reported by or affecting operators | Drivers Team | +-------------------------------+---------------------------------------+----------------------+ | oslo_ | An interop/cross-project issue | Ihar Hrachyshka | +-------------------------------+---------------------------------------+----------------------+ | ovs_ | A bug affecting ML2/OVS | Kevin Benton | +-------------------------------+---------------------------------------+----------------------+ | ovs-lib_ | A bug affecting OVS Lib | Terry Wilson | +-------------------------------+---------------------------------------+----------------------+ | py34_ | Issues affecting the Python 3 porting | Cedric Brandily | +-------------------------------+---------------------------------------+----------------------+ | qos_ | A bug affecting ML2/QoS | Miguel Ajo | +-------------------------------+---------------------------------------+----------------------+ | released-neutronclient_ | A bug affecting released clients | Kyle Mestery | +-------------------------------+---------------------------------------+----------------------+ | release-subproject_ | A request to release a subproject | Kyle Mestery | +-------------------------------+---------------------------------------+----------------------+ | rfe_ | Feature enhancements being screened | Drivers Team | +-------------------------------+---------------------------------------+----------------------+ | rfe-approved_ | Approved feature enhancements | Drivers Team | +-------------------------------+---------------------------------------+----------------------+ | sg-fw_ | A bug affecting security groups | Kevin Benton | +-------------------------------+---------------------------------------+----------------------+ | sriov-pci-pt_ | A bug affecting Sriov/PCI PassThrough | Moshe Levi | +-------------------------------+---------------------------------------+----------------------+ | troubleshooting_ | An issue affecting ease of debugging | Assaf Muller | +-------------------------------+---------------------------------------+----------------------+ | unittest_ | A bug affecting the unit test subtree | Cedric Brandily | +-------------------------------+---------------------------------------+----------------------+ | usability_ | UX, interoperability, feature parity | PTL/Drivers Team | +-------------------------------+---------------------------------------+----------------------+ | vpnaas_ | A bug affecting neutron-vpnaas | Paul Michali | +-------------------------------+---------------------------------------+----------------------+ | xxx-backport-potential_ | Cherry-pick request for stable team | Ihar Hrachyshka | +-------------------------------+---------------------------------------+----------------------+ .. _access-control: Access Control ++++++++++++++ * `Access Control - All bugs `_ * `Access Control - In progress `_ .. _api: API +++ * `API - All bugs `_ * `API - In progress `_ .. _auto-allocated-topology: Auto Allocated Topology +++++++++++++++++++++++ * `Auto Allocated Topology - All bugs `_ * `Auto Allocated Topology - In progress `_ .. _baremetal: Baremetal +++++++++ * `Baremetal - All bugs `_ * `Baremetal - In progress `_ .. _db: DB ++ * `DB - All bugs `_ * `DB - In progress `_ .. _deprecation: Deprecation +++++++++++ * `Deprecation - All bugs `_ * `DeprecationB - In progress `_ .. _dns: DNS +++ * `DNS - All bugs `_ * `DNS - In progress `_ .. _doc: DOC +++ * `DOC - All bugs `_ * `DOC - In progress `_ .. _fullstack: Fullstack +++++++++ * `Fullstack - All bugs `_ * `Fullstack - In progress `_ .. _functional-tests: Functional Tests ++++++++++++++++ * `Functional tests - All bugs `_ * `Functional tests - In progress `_ .. _fwaas: FWAAS +++++ * `FWaaS - All bugs `_ * `FWaaS - In progress `_ .. _gate-failure: Gate Failure ++++++++++++ * `Gate failure - All bugs `_ * `Gate failure - In progress `_ .. _ipv6: IPV6 ++++ * `IPv6 - All bugs `_ * `IPv6 - In progress `_ .. _l2-pop: L2 Population +++++++++++++ * `L2 Pop - All bugs `_ * `L2 Pop - In progress `_ .. _l3-bgp: L3 BGP ++++++ * `L3 BGP - All bugs `_ * `L3 BGP - In progress `_ .. _l3-dvr-backlog: L3 DVR Backlog ++++++++++++++ * `L3 DVR - All bugs `_ * `L3 DVR - In progress `_ .. _l3-ha: L3 HA +++++ * `L3 HA - All bugs `_ * `L3 HA - In progress `_ .. _l3-ipam-dhcp: L3 IPAM DHCP ++++++++++++ * `L3 IPAM DHCP - All bugs `_ * `L3 IPAM DHCP - In progress `_ .. _lbaas: LBAAS +++++ * `LBaaS - All bugs `_ * `LBaaS - In progress `_ .. _linuxbridge: LinuxBridge +++++++++++ * `LinuxBridge - All bugs `_ * `LinuxBridge - In progress `_ .. _loadimpact: Load Impact +++++++++++ * `Load Impact - All bugs `_ * `Load Impact - In progress `_ .. _logging: Logging +++++++ * `Logging - All bugs `_ * `Logging - In progress `_ .. _low-hanging-fruit: Low hanging fruit +++++++++++++++++ * `Low hanging fruit - All bugs `_ * `Low hanging fruit - In progress `_ .. _metering: Metering ++++++++ * `Metering - All bugs `_ * `Metering - In progress `_ .. _needs-attention: Needs Attention +++++++++++++++ * `Needs Attention - All bugs `_ .. _opnfv: OPNFV +++++ * `OPNFV - All bugs `_ .. _ops: Operators/Operations (ops) ++++++++++++++++++++++++++ * `Ops - All bugs `_ .. _oslo: OSLO ++++ * `Oslo - All bugs `_ * `Oslo - In progress `_ .. _ovs: OVS +++ * `OVS - All bugs `_ * `OVS - In progress `_ .. _ovs-lib: OVS Lib +++++++ * `OVS Lib - All bugs `_ * `OVS Lib - In progress `_ .. _py34: PY34 ++++ * `Py34 - All bugs `_ * `Py34 - In progress `_ .. _qos: QoS +++ * `QoS - All bugs `_ * `QoS - In progress `_ .. _released-neutronclient: Released Neutron Client +++++++++++++++++++++++ * `Released Neutron Client - All bugs `_ * `Released Neutron Client - In progress `_ .. _release-subproject: Release Subproject ++++++++++++++++++ * `Release Subproject - All bugs `_ * `Release Subproject - In progress `_ .. _rfe: RFE +++ * `RFE - All bugs `_ * `RFE - In progress `_ .. _rfe-approved: RFE-Approved ++++++++++++ * `RFE-Approved - All bugs `_ * `RFE-Approved - In progress `_ .. _sriov-pci-pt: SRIOV-PCI PASSTHROUGH +++++++++++++++++++++ * `SRIOV/PCI-PT - All bugs `_ * `SRIOV/PCI-PT - In progress `_ .. _sg-fw: SG-FW +++++ * `Security groups - All bugs `_ * `Security groups - In progress `_ .. _troubleshooting: Troubleshooting +++++++++++++++ * `Troubleshooting - All bugs `_ * `Troubleshooting - In progress `_ .. _unittest: Unit test +++++++++ * `Unit test - All bugs `_ * `Unit test - In progress `_ .. _usability: Usability +++++++++ * `UX - All bugs `_ * `UX - In progress `_ .. _vpnaas: VPNAAS ++++++ * `VPNaaS - All bugs `_ * `VPNaaS - In progress `_ .. _xxx-backport-potential: Backport/RC potential +++++++++++++++++++++ * `All Liberty bugs `_ * `All Kilo bugs `_ * `All Juno bugs `_ neutron-8.4.0/doc/source/policies/gate-failure-triage.rst0000664000567000056710000001040713044372760024612 0ustar jenkinsjenkins00000000000000Neutron Gate Failure Triage =========================== This page provides guidelines for spotting and assessing neutron gate failures. Some hints for triaging failures are also provided. Spotting Gate Failures ---------------------- This can be achieved using several tools: * `Joe Gordon's github.io pages `_ * `logstash `_ Even though Joe's script is not an "official" OpenStack page it provides a quick snapshot of the current status for the most important jobs This page is built using data available at graphite.openstack.org. If you want to check how that is done go `here `_ (caveat: the color of the neutron job is very similar to that of the full job with nova-network). For checking gate failures with logstash the following query will return failures for a specific job: > build_status:FAILURE AND message:Finished AND build_name:"check-tempest-dsvm-neutron" AND build_queue:"gate" And divided by the total number of jobs executed: > message:Finished AND build_name:"check-tempest-dsvm-neutron" AND build_queue:"gate" It will return the failure rate in the selected period for a given job. It is important to remark that failures in the check queue might be misleading as the problem causing the failure is most of the time in the patch being checked. Therefore it is always advisable to work on failures occurred in the gate queue. However, these failures are a precious resource for assessing frequency and determining root cause of failures which manifest in the gate queue. The step above will provide a quick outlook of where things stand. When the failure rate raises above 10% for a job in 24 hours, it's time to be on alert. 25% is amber alert. 33% is red alert. Anything above 50% means that probably somebody from the infra team has already a contract out on you. Whether you are relaxed, in alert mode, or freaking out because you see a red dot on your chest, it is always a good idea to check on daily bases the elastic-recheck pages. Under the `gate pipeline `_ tab, you can see gate failure rates for already known bugs. The bugs in this page are ordered by decreasing failure rates (for the past 24 hours). If one of the bugs affecting Neutron is among those on top of that list, you should check that the corresponding bug is already assigned and somebody is working on it. If not, and there is not a good reason for that, it should be ensured somebody gets a crack at it as soon as possible. The other part of the story is to check for `uncategorized `_ failures. This is where failures for new (unknown) gate breaking bugs end up; on the other hand also infra error causing job failures end up here. It should be duty of the diligent Neutron developer to ensure the classification rate for neutron jobs is as close as possible to 100%. To this aim, the diligent Neutron developer should adopt the following procedure: 1. Open logs for failed jobs and look for logs/testr_results.html.gz. 2. If that file is missing, check console.html and see where the job failed. 1. If there is a failure in devstack-gate-cleanup-host.txt it's likely to be an infra issue. 2. If the failure is in devstacklog.txt it could a devstack, neutron, or infra issue. 3. However, most of the time the failure is in one of the tempest tests. Take note of the error message and go to logstash. 4. On logstash, search for occurrences of this error message, and try to identify the root cause for the failure (see below). 5. File a bug for this failure, and push a elastic-recheck query for it (see below). 6. If you are confident with the area of this bug, and you have time, assign it to yourself; otherwise look for an assignee or talk to the Neutron's bug czar to find an assignee. Root Causing a Gate Failure --------------------------- Time-based identification, i.e. find the naughty patch by log scavenging. Filing An Elastic Recheck Query ------------------------------- The `elastic recheck `_ page has all the current open ER queries. To file one, please see the `ER Wiki `_. neutron-8.4.0/doc/source/devref/0000775000567000056710000000000013044373210017673 5ustar jenkinsjenkins00000000000000neutron-8.4.0/doc/source/devref/ovs_vhostuser.rst0000664000567000056710000000467413044372760023402 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Neutron Open vSwitch vhost-user support ======================================= Neutron supports using Open vSwitch + DPDK vhost-user interfaces directly in the OVS ML2 driver and agent. The current implementation relies on a multiple configuration values and includes runtime verification of Open vSwitch's capability to provide these interfaces. The OVS agent detects the capability of the underlying Open vSwitch installation and passes that information over RPC via the agent 'configurations' dictionary. The ML2 driver uses this information to select the proper VIF type and binding details. Neutron+OVS+DPDK platform requirements -------------------------------------- OVS 2.4.0+ DPDK 2.0+ Neutron OVS+DPDK vhost-user config ---------------------------------- [OVS] datapath_type=netdev vhostuser_socket_dir=/var/run/openvswitch When OVS is running with DPDK support enabled, and the datapath_type is set to "netdev", then the OVS ML2 driver will use the vhost-user VIF type and pass the necessary binding details to use OVS+DPDK and vhost-user sockets. This includes the vhostuser_socket_dir setting, which must match the directory passed to ovs-vswitchd on startup. What about the networking-ovs-dpdk repo? ---------------------------------------- The networking-ovs-dpdk repo will continue to exist and undergo active development. This feature just removes the necessity for a separate ML2 driver and OVS agent in the networking-ovs-dpdk repo. The networking-ovs-dpdk project also provides a devstack plugin which also allows automated CI, a puppet module, and an OpenFlow-based security group implementation. neutron-8.4.0/doc/source/devref/db_layer.rst0000664000567000056710000001027713044372760022226 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Neutron Database Layer ====================== This section contains some common information that will be useful for developers that need to do some db changes. Difference between 'default' and 'server_default' parameters for columns ------------------------------------------------------------------------ For columns it is possible to set 'default' or 'server_default'. What is the difference between them and why should they be used? The explanation is quite simple: * `default `_ - the default value that SQLAlchemy will specify in queries for creating instances of a given model; * `server_default `_ - the default value for a column that SQLAlchemy will specify in DDL. Summarizing, 'default' is useless in migrations and only 'server_default' should be used. For synchronizing migrations with models server_default parameter should also be added in model. If default value in database is not needed, 'server_default' should not be used. The declarative approach can be bypassed (i.e. 'default' may be omitted in the model) if default is enforced through business logic. Database migrations ------------------- For details on the neutron-db-manage wrapper and alembic migrations, see `Alembic Migrations `_. Tests to verify that database migrations and models are in sync --------------------------------------------------------------- .. automodule:: neutron.tests.functional.db.test_migrations .. autoclass:: _TestModelsMigrations :members: The Standard Attribute Table ---------------------------- There are many attributes that we would like to store in the database which are common across many Neutron objects (e.g. tags, timestamps, rbac entries). We have previously been handling this by duplicating the schema to every table via model mixins. This means that a DB migration is required for each object that wants to adopt one of these common attributes. This becomes even more cumbersome when the relationship between the attribute and the object is many-to-one because each object then needs its own table for the attributes (assuming referential integrity is a concern). To address this issue, the 'standardattribute' table is available. Any model can add support for this table by inheriting the 'HasStandardAttributes' mixin in neutron.db.model_base. This mixin will add a standard_attr_id BigInteger column to the model with a foreign key relationship to the 'standardattribute' table. The model will then be able to access any columns of the 'standardattribute' table and any tables related to it. The introduction of a new standard attribute only requires one column addition to the 'standardattribute' table for one-to-one relationships or a new table for one-to-many or one-to-zero relationships. Then all of the models using the 'HasStandardAttribute' mixin will automatically gain access to the new attribute. Any attributes that will apply to every neutron resource (e.g. timestamps) can be added directly to the 'standardattribute' table. For things that will frequently be NULL for most entries (e.g. a column to store an error reason), a new table should be added and joined to in a query to prevent a bunch of NULL entries in the database. neutron-8.4.0/doc/source/devref/plugin-api.rst0000664000567000056710000000227413044372736022513 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Neutron Plugin Architecture =========================== `Salvatore Orlando: How to write a Neutron Plugin (if you really need to) `_ Plugin API ---------- .. automodule:: neutron.neutron_plugin_base_v2 .. autoclass:: NeutronPluginBaseV2 :members: neutron-8.4.0/doc/source/devref/rpc_api.rst0000664000567000056710000001447313044372736022067 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Neutron RPC API Layer ===================== Neutron uses the oslo.messaging library to provide an internal communication channel between Neutron services. This communication is typically done via AMQP, but those details are mostly hidden by the use of oslo.messaging and it could be some other protocol in the future. RPC APIs are defined in Neutron in two parts: client side and server side. Client Side ----------- Here is an example of an rpc client definition: :: import oslo_messaging from neutron.common import rpc as n_rpc class ClientAPI(object): """Client side RPC interface definition. API version history: 1.0 - Initial version 1.1 - Added my_remote_method_2 """ def __init__(self, topic): target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) def my_remote_method(self, context, arg1, arg2): cctxt = self.client.prepare() return cctxt.call(context, 'my_remote_method', arg1=arg1, arg2=arg2) def my_remote_method_2(self, context, arg1): cctxt = self.client.prepare(version='1.1') return cctxt.call(context, 'my_remote_method_2', arg1=arg1) This class defines the client side interface for an rpc API. The interface has 2 methods. The first method existed in version 1.0 of the interface. The second method was added in version 1.1. When the newer method is called, it specifies that the remote side must implement at least version 1.1 to handle this request. Server Side ----------- The server side of an rpc interface looks like this: :: import oslo_messaging class ServerAPI(object): target = oslo_messaging.Target(version='1.1') def my_remote_method(self, context, arg1, arg2): return 'foo' def my_remote_method_2(self, context, arg1): return 'bar' This class implements the server side of the interface. The oslo_messaging.Target() defined says that this class currently implements version 1.1 of the interface. .. _rpc_versioning: Versioning ---------- Note that changes to rpc interfaces must always be done in a backwards compatible way. The server side should always be able to handle older clients (within the same major version series, such as 1.X). It is possible to bump the major version number and drop some code only needed for backwards compatibility. For more information about how to do that, see https://wiki.openstack.org/wiki/RpcMajorVersionUpdates. Example Change ~~~~~~~~~~~~~~ As an example minor API change, let's assume we want to add a new parameter to my_remote_method_2. First, we add the argument on the server side. To be backwards compatible, the new argument must have a default value set so that the interface will still work even if the argument is not supplied. Also, the interface's minor version number must be incremented. So, the new server side code would look like this: :: import oslo_messaging class ServerAPI(object): target = oslo_messaging.Target(version='1.2') def my_remote_method(self, context, arg1, arg2): return 'foo' def my_remote_method_2(self, context, arg1, arg2=None): if not arg2: # Deal with the fact that arg2 was not specified if needed. return 'bar' We can now update the client side to pass the new argument. The client must also specify that version '1.2' is required for this method call to be successful. The updated client side would look like this: :: import oslo_messaging from neutron.common import rpc as n_rpc class ClientAPI(object): """Client side RPC interface definition. API version history: 1.0 - Initial version 1.1 - Added my_remote_method_2 1.2 - Added arg2 to my_remote_method_2 """ def __init__(self, topic): target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) def my_remote_method(self, context, arg1, arg2): cctxt = self.client.prepare() return cctxt.call(context, 'my_remote_method', arg1=arg1, arg2=arg2) def my_remote_method_2(self, context, arg1, arg2): cctxt = self.client.prepare(version='1.2') return cctxt.call(context, 'my_remote_method_2', arg1=arg1, arg2=arg2) Neutron RPC APIs ---------------- As discussed before, RPC APIs are defined in two parts: a client side and a server side. Several of these pairs exist in the Neutron code base. The code base is being updated with documentation on every rpc interface implementation that indicates where the corresponding server or client code is located. Example: DHCP ~~~~~~~~~~~~~ The DHCP agent includes a client API, neutron.agent.dhcp.agent.DhcpPluginAPI. The DHCP agent uses this class to call remote methods back in the Neutron server. The server side is defined in neutron.api.rpc.handlers.dhcp_rpc.DhcpRpcCallback. It is up to the Neutron plugin in use to decide whether the DhcpRpcCallback interface should be exposed. Similarly, there is an RPC interface defined that allows the Neutron plugin to remotely invoke methods in the DHCP agent. The client side is defined in neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.DhcpAgentNotifyApi. The server side of this interface that runs in the DHCP agent is neutron.agent.dhcp.agent.DhcpAgent. More Info --------- For more information, see the oslo.messaging documentation: http://docs.openstack.org/developer/oslo.messaging/. neutron-8.4.0/doc/source/devref/instrumentation.rst0000664000567000056710000003431513044372760023707 0ustar jenkinsjenkins00000000000000Neutron Instrumentation ======================= OpenStack operators require information about the status and health of the Neutron system. While it is possible for an operator to pull all of the interface counters from compute and network nodes, today there is no capability to aggregate that information to provide comprehensive counters for each project within Neutron. Neutron instrumentation sets out to meet this need. Neutron instrumentation can be broken down into three major pieces: #. Data Collection (i.e. what data should be collected and how), #. Data Aggregation (i.e. how and where raw data should be aggregated into project information) #. Data Consumption (i.e. how is aggregated data consumed) While instrumentation might also be considered to include asynchronous event notifications, like fault detection, this is considered out of scope for the following two reasons: #. In Kilo, Neutron added the ProcessManager class to allow agents to spawn a monitor thread that would either respawn or exit the agent. While this is a useful feature for ensuring that the agent gets restarted, the only notification of this event is an error log entry. To ensure that this event is asynchronously passed up to an upstream consumer, the Neutron logger object should have its publish_errors option set to True and the transport URL set to the point at the upstream consumer. As the particular URL is consumer specific, further discussion is outside the scope of this section. #. For the data plane, it is necessary to have visibility into the hardware status of the compute and networking nodes. As some upstream consumers already support this (even incompletely) it is considered to be within the scope of the upstream consumer and not Neutron itself. How does Instrumentation differ from Metering Labels and Rules -------------------------------------------------------------- The existing metering label and rule extension provides the ability to collect traffic information on a per CIDR basis. Therefore, a possible implementation of instrumentation would be to use per-instance metering rules for all IP addresses in both directions. However, the information collected by metering rules is focused more on billing and so does not have the desired granularity (i.e. it counts transmitted packets without keeping track of what caused packets to fail). What Data to Collect -------------------- The first step is to consider what data to collect. In the absence of a standard, it is proposed to use the information set defined in [RFC2863]_ and [RFC4293]_. This proposal should not be read as implying that Neutron instrumentation data will be browsable via a MIB browser as that would be a potential Data Consumption model. .. [RFC2863] https://tools.ietf.org/html/rfc2863 .. [RFC4293] https://tools.ietf.org/html/rfc4293 For the reference implementation (Nova/VIF, OVS, and Linux Bridge), this section identifies what data is already available and how it can be mapped into the structures defined by the RFC. Other plugins are welcome to define either their own data sets and/or their own mappings to the data sets defined in the referenced RFCs. Focus here is on what is available from "stock" Linux and OpenStack. Additional statistics may become available if other items like NetFlow or sFlow are added to the mix, but those should be covered as an addition to the basic information discussed here. What is Available from Nova ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Within Nova, the libvirt driver makes the following host traffic statistics available under the get_diagnostics() and get_instance_diagnostics() calls on a per-virtual NIC basis: * Receive bytes, packets, errors and drops * Transmit bytes, packets, errors and drops There continues to be a long running effort to get these counters into Ceilometer (the wiki page at [#]_ attempted to do this via a direct call while [#]_ is trying to accomplish this via notifications from Nova). Rather than propose another way for collecting these statistics from Nova, this devref takes the approach of declaring them out of scope until there is an agreed upon method for getting the counters from Nova to Ceilometer and then see if Neutron can/should piggy-back off of that. .. [#] https://wiki.openstack.org/wiki/EfficientMetering/FutureNovaInteractionModel .. [#] http://lists.openstack.org/pipermail/openstack-dev/2015-June/067589.html What is Available from Linux Bridge ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For the Linux bridge, a check of [#]_ shows that IEEE 802.1d mandated statistics are only a "wishlist" item. The alternative is to use NETLINK/shell to list the interfaces attached to a particular bridge and then to collect statistics for each interface attached to the bridge. These statistics could then be mapped to appropriate places, as discussed below. Note: the examples below talk in terms of mapping counters available from the Linux operating system: * Receive bytes, packets, errors, dropped, overrun and multicast * Transmit bytes, packets, errors, dropped, carrier and collisions Available counters for interfaces on other operating systems can be mapped in a similar fashion. .. [#] http://git.kernel.org/cgit/linux/kernel/git/shemminger/bridge-utils.git/tree/doc/WISHLIST Of interest are counters from the each of the following (as of this writing, Linux Bridge only supports legacy routers, so the DVR case need not be considered): * Compute node * * Instance tap interface * Network node * DHCP namespace tap interface (if defined) * Router namespace qr interface * Router namespace qg interface What is Available from Openvswitch ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Like Linux bridge, the openvswitch implementation has interface counters that will be collected of interest are the receive and transmit counters from the following: Legacy Routing ++++++++++++++ * Compute node * * Instance tap interface * Network node * DHCP namespace tap interface (if defined) * Router namespace qr interface * Router namespace qg interface Distributed Routing (DVR) +++++++++++++++++++++++++ * Compute node * * Instance tap interface * * Router namespace qr interface * * FIP namespace fg interface * Network node * DHCP tap interface (if defined) * Router namespace qr interface * SNAT namespace qg interface Mapping from Available Information to MIB Data Set ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following table summarizes how the interface counters are mapped into each MIB Data Set. Specific details are covered in the sections below: +---------+--------------+----------------------+ | Node | Interface | Included in Data Set | | | +-----------+----------+ | | | RFC2863 | RFC4293 | +=========+==============+===========+==========+ | Compute | Instance tap | Yes | No | | +--------------+-----------+----------+ | | Router qr | Yes | Yes | | +--------------+-----------+----------+ | | FIP fg | No | Yes | +---------+--------------+-----------+----------+ | Network | DHCP tap | Yes | No | | +--------------+-----------+----------+ | | Router qr | Yes | Yes | | +--------------+-----------+----------+ | | Router qg | No | Yes | | +--------------+-----------+----------+ | | SNAT sg | No | Yes | +---------+--------------+-----------+----------+ Note: because of replication of the router qg interface when running distributed routing, aggregation of the individual counter information will be necessary to fill in the appropriate data set entries. This will be covered in the Data Aggregation section below: RFC 2863 Structures +++++++++++++++++++ For each compute host, each network will be represented with a "switch", modeled by instances of ifTable and ifXTable. This mapping has the advantage that for a particular network, the view to the project or the operator is identical - the only difference is that the operator can see all networks, while a project will only see the networks under their project id. The current reference implementation identifies tap interface names with the Neutron port they are associated with. In turn, the Neutron port identifies the Neutron network. Therefore, it is possible to take counters from each tap interface and map them into entries in the appropriate tables, using the following proposed assignments: * ifTable * ifInOctets = low 32 bits of interface received byte count * ifInUcastPkts = low 32 bits of interface received packet count * ifInDiscards = interface received dropped count * ifInErrors = interface received errors count * ifOutOctets = low 32 bits of interface transmit byte count * ifOutUcastPkts = low 32 bits of interface transmit packet count * ifOutDiscards = interface transmit dropped count * ifOutErrors = interface transmit errors count * ifXTable * ifHCInOctets = 64 bits of interface received byte count * ifHCInUcastPkts = 64 bits of interface received packet count * ifHCOctOctets = 64 bits of interface transmit byte count * ifHCOctUcastPkts = 64 bits of interface transmit packet count Section 3.1.6 of [RFC2863]_ provides the details of why 64-bit sized counters need to be supported. The summary is that with increasing transmission bandwidth use of 32-bit counters would require a problematic increase in counter polling frequency (a 1Gbs stream of full-sized packets will cause a 32-bit counter to wrap in 34 seconds). RFC 4293 Structures +++++++++++++++++++ Counters tracked by RFC 4293 come in two flavors: ones that are inherited from the interface, and those that track L3 events, such as fragmentation, re-assembly, truncations, etc. As the current instrumentation available from the reference implementation does not provide appropriate source information, the following counters are declared out of scope for this devref: * ipSystemStatsInHdrErrors, ipIfStatsInHdrErrors * ipSystemStatsInNoRoutes, ipIfStatsInNoRoutes * ipSystemStatsInAddrErrors, ipIfStatsInAddrErrors * ipSystemStatsInUnknownProtos, ipIfStatsInUnknownProtos * ipSystemStatsInTruncatedPkts, ipIfStatsInTruncatedPkts * ipSystemStatsInForwDatagrams, ipIfStatsInForwDatagrams * ipSystemStatsHCInForwDatagrams, ipIfStatsHCInForwDatagrams * ipSystemStatsReasmReqds, ipIfStatsReasmReqds * ipSystemStatsReasmOKs, ipIfStatsReasmOKs * ipSystemStatsReasmFails, ipIfStatsReasmFails * ipSystemStatsInDelivers, ipIfStatsInDelivers * ipSystemStatsHCInDelivers, ipIfStatsHCInDelivers * ipSystemStatsOutRequests, ipIfStatsOutRequests * ipSystemStatsHCOutRequests, ipIfStatsHCOutRequests * ipSystemStatsOutNoRoutes, ipIfStatsOutNoRoutes * ipSystemStatsOutForwDatagrams, ipIfStatsOutForwDatagrams * ipSystemStatsHCOutForwDatagrams, ipIfStatsHCOutForwDatagrams * ipSystemStatsOutFragReqds, ipIfStatsOutFragReqds * ipSystemStatsOutFragOKs, ipIfStatsOutFragOKs * ipSystemStatsOutFragFails, ipIfStatsOutFragFails * ipSystemStatsOutFragCreates, ipIfStatsOutFragCreates In ipIfStatsTable, the following counters will hold the same value as the referenced counter from RFC 2863: * ipIfStatsInReceives :== ifInUcastPkts * ipIfStatsHCInReceives :== ifInHCUcastPkts * ipIfStatsInOctets :== ifInOctets * ipIfStatsHCInOctets :== ifInHCOctets * ipIfStatsInDiscard :== ifInDiscards * ipIfStatsOutDiscard :== ifOutDiscards * ipIfStatsOutTransmits :== ifOutUcastPkts * ipIfStatsHCOutTransmits :== ifHCOutUcastPkts * ipIfStatsOutOctets :== ifOutOctets * ipIfStatsHCOutOctets :== ifHCOutOctets For ipSystemStatsTable, the following counters will hold values based on the following assignments. Thess summations are covered in more detail in the Data Aggregation section below * ipSystemStatsInReceives :== sum of all ipIfStatsInReceives for the router * ipSystemStatsHCInReceives :== sum of all ipIfStatsHCInReceives for the router * ipSystemStatsInOctets :== sum of all ipIfStatsInOctets for the router * ipSystemStatsHCInOctets :== sum of all ipIfStatsHCInOctets for the router * ipSystemStatsInDiscard :== sum of all ipIfStatsInDiscard for the router * ipSystemStatsOutDiscard :== sum of all ipIfStatsOutDiscard for the router * ipSystemStatsOutTransmits :== sum of all ipIfStatsOutTrasmit for the router * ipSystemStatsHCOutTransmits :== sum of all ipIfStatsHCOutTrasmit for the router * ipSystemStatsOutOctets :== sum of all ipIfStatsOctOctets for the router * ipSystemStatsHCOutOctets :== sum of all ipIfStatsHCOutOctets for the router Data Collection --------------- There are two options for how data can be collected: #. The Neutron L3 and ML2 agents could collect the counters themselves. #. A separate collection agent could be started on each compute/network node to collect counters. Because of the number of counters needed to be collected (for example, a cloud running legacy routing would need to collect (for each project) three counters from a network node and a tap counter for each running instance. While it would be desirable to reuse the existing L3 and ML2 agents, the initial proof of concept will run a separate agent that will use a separate threads to isolate the effects of counter collection from reporting. Once the performance of the collection agent is understood, then merging the functionality into the L3 or ML2 agents can be considered. The collection thread will initially use shell commands via rootwrap, with the plan of moving to native python libraries when support for them is available. In addition, there are two options for how to report counters back to the Neutron server: push or pull (or asynchronous notification vs polling). On the one hand, pull/polling eases the Neutron server's task in that it only needs to store/aggregate the results from the current polling cycle. However, this comes at the cost of dealing with the stale data issues that scaling a polling cycle will entail. On the other hand, asynchronous notification requires that the Neutron server has the capability to hold the current results from each collector. As the L3 and ML2 agents already have use asynchronous notification to report status back to the Neutron server, the proof of concept will follow the same model to ease a future merging of functionality. Data Aggregation ---------------- Will be covered in a follow-on patch set. Data Consumption ---------------- Will be covered in a follow-on patch set. neutron-8.4.0/doc/source/devref/external_dns_integration.rst0000664000567000056710000000346613044372736025543 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Integration with external DNS services ====================================== Since the Mitaka release, neutron has an interface defined to interact with an external DNS service. This interface is based on an abstract driver that can be used as the base class to implement concrete drivers to interact with various DNS services. The reference implementation of such a driver integrates neutron with `OpenStack Designate `_. This integration allows users to publish *dns_name* and *dns_domain* attributes associated with floating IP addresses, ports, and networks in an external DNS service. Changes to the neutron API -------------------------- To support integration with an external DNS service, the *dns_name* and *dns_domain* attributes were added to floating ips, ports and networks. The *dns_name* specifies the name to be associated with a corresponding IP address, both of which will be published to an existing domain with the name *dns_domain* in the external DNS service. Specifically, floating ips, ports and networks are extended as follows: * Floating ips have a *dns_name* and a *dns_domain* attribute. * Ports have a *dns_name* attribute. * Networks have a *dns_domain* attributes. neutron-8.4.0/doc/source/devref/api_extensions.rst0000664000567000056710000000240413044372760023466 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) API Extensions ============== API extensions is the standard way of introducing new functionality to the Neutron project, it allows plugins to determine if they wish to support the functionality or not. Examples -------- The easiest way to demonstrate how an API extension is written, is by studying an existing API extension and explaining the different layers. .. toctree:: :maxdepth: 1 security_group_api neutron-8.4.0/doc/source/devref/oslo-incubator.rst0000664000567000056710000000224313044372760023377 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) oslo-incubator ============== A number of modules used are from the oslo-incubator project. Imported modules that are directly used by Neutron are listed in openstack-common.conf. More information can be found in `the corresponding policy `_. neutron-8.4.0/doc/source/devref/quality_of_service.rst0000664000567000056710000003616713044372760024347 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Quality of Service ================== Quality of Service advanced service is designed as a service plugin. The service is decoupled from the rest of Neutron code on multiple levels (see below). QoS extends core resources (ports, networks) without using mixins inherited from plugins but through an ml2 extension driver. Details about the DB models, API extension, and use cases can be found here: `qos spec `_ . Service side design ------------------- * neutron.extensions.qos: base extension + API controller definition. Note that rules are subattributes of policies and hence embedded into their URIs. * neutron.services.qos.qos_plugin: QoSPlugin, service plugin that implements 'qos' extension, receiving and handling API calls to create/modify policies and rules. * neutron.services.qos.notification_drivers.manager: the manager that passes object notifications down to every enabled notification driver. * neutron.services.qos.notification_drivers.qos_base: the interface class for pluggable notification drivers that are used to update backends about new {create, update, delete} events on any rule or policy change. * neutron.services.qos.notification_drivers.message_queue: MQ-based reference notification driver which updates agents via messaging bus, using `RPC callbacks `_. * neutron.core_extensions.base: Contains an interface class to implement core resource (port/network) extensions. Core resource extensions are then easily integrated into interested plugins. We may need to have a core resource extension manager that would utilize those extensions, to avoid plugin modifications for every new core resource extension. * neutron.core_extensions.qos: Contains QoS core resource extension that conforms to the interface described above. * neutron.plugins.ml2.extensions.qos: Contains ml2 extension driver that handles core resource updates by reusing the core_extensions.qos module mentioned above. In the future, we would like to see a plugin-agnostic core resource extension manager that could be integrated into other plugins with ease. Supported QoS rule types ~~~~~~~~~~~~~~~~~~~~~~~~ Any plugin or Ml2 mechanism driver can claim support for some QoS rule types by providing a plugin/driver class property called 'supported_qos_rule_types' that should return a list of strings that correspond to QoS rule types (for the list of all rule types, see: neutron.services.qos.qos_consts.VALID_RULE_TYPES). In the most simple case, the property can be represented by a simple Python list defined on the class. For Ml2 plugin, the list of supported QoS rule types is defined as a common subset of rules supported by all active mechanism drivers. Note: the list of supported rule types reported by core plugin is not enforced when accessing QoS rule resources. This is mostly because then we would not be able to create any rules while at least one ml2 driver in gate lacks support for QoS (at the moment of writing, linuxbridge is such a driver). Database models ~~~~~~~~~~~~~~~ QoS design defines the following two conceptual resources to apply QoS rules for a port or a network: * QoS policy * QoS rule (type specific) Each QoS policy contains zero or more QoS rules. A policy is then applied to a network or a port, making all rules of the policy applied to the corresponding Neutron resource. When applied through a network association, policy rules could apply or not to neutron internal ports (like router, dhcp, load balancer, etc..). The QosRule base object provides a default should_apply_to_port method which could be overridden. In the future we may want to have a flag in QoSNetworkPolicyBinding or QosRule to enforce such type of application (for example when limiting all the ingress of routers devices on an external network automatically). From database point of view, following objects are defined in schema: * QosPolicy: directly maps to the conceptual policy resource. * QosNetworkPolicyBinding, QosPortPolicyBinding: defines attachment between a Neutron resource and a QoS policy. * QosBandwidthLimitRule: defines the only rule type available at the moment. All database models are defined under: * neutron.db.qos.models QoS versioned objects ~~~~~~~~~~~~~~~~~~~~~ There is a long history of passing database dictionaries directly into business logic of Neutron. This path is not the one we wanted to take for QoS effort, so we've also introduced a new objects middleware to encapsulate the database logic from the rest of the Neutron code that works with QoS resources. For this, we've adopted oslo.versionedobjects library and introduced a new NeutronObject class that is a base for all other objects that will belong to the middle layer. There is an expectation that Neutron will evolve into using objects for all resources it handles, though that part was obviously out of scope for the QoS effort. Every NeutronObject supports the following operations: * get_object: returns specific object that is represented by the id passed as an argument. * get_objects: returns all objects of the type, potentially with a filter applied. * create/update/delete: usual persistence operations. Base object class is defined in: * neutron.objects.base For QoS, new neutron objects were implemented: * QosPolicy: directly maps to the conceptual policy resource, as defined above. * QosBandwidthLimitRule: class that represents the only rule type supported by initial QoS design. Those are defined in: * neutron.objects.qos.policy * neutron.objects.qos.rule For QosPolicy neutron object, the following public methods were implemented: * get_network_policy/get_port_policy: returns a policy object that is attached to the corresponding Neutron resource. * attach_network/attach_port: attach a policy to the corresponding Neutron resource. * detach_network/detach_port: detach a policy from the corresponding Neutron resource. In addition to the fields that belong to QoS policy database object itself, synthetic fields were added to the object that represent lists of rules that belong to the policy. To get a list of all rules for a specific policy, a consumer of the object can just access the corresponding attribute via: * policy.rules Implementation is done in a way that will allow adding a new rule list field with little or no modifications in the policy object itself. This is achieved by smart introspection of existing available rule object definitions and automatic definition of those fields on the policy class. Note that rules are loaded in a non lazy way, meaning they are all fetched from the database on policy fetch. For QosRule objects, an extendable approach was taken to allow easy addition of objects for new rule types. To accommodate this, fields common to all types are put into a base class called QosRule that is then inherited into type-specific rule implementations that, ideally, only define additional fields and some other minor things. Note that the QosRule base class is not registered with oslo.versionedobjects registry, because it's not expected that 'generic' rules should be instantiated (and to suggest just that, the base rule class is marked as ABC). QoS objects rely on some primitive database API functions that are added in: * neutron.db.api: those can be reused to fetch other models that do not have corresponding versioned objects yet, if needed. * neutron.db.qos.api: contains database functions that are specific to QoS models. RPC communication ~~~~~~~~~~~~~~~~~ Details on RPC communication implemented in reference backend driver are discussed in `a separate page `_. One thing that should be mentioned here explicitly is that RPC callback endpoints communicate using real versioned objects (as defined by serialization for oslo.versionedobjects library), not vague json dictionaries. Meaning, oslo.versionedobjects are on the wire and not just used internally inside a component. One more thing to note is that though RPC interface relies on versioned objects, it does not yet rely on versioning features the oslo.versionedobjects library provides. This is because Liberty is the first release where we start using the RPC interface, so we have no way to get different versions in a cluster. That said, the versioning strategy for QoS is thought through and described in `the separate page `_. There is expectation that after RPC callbacks are introduced in Neutron, we will be able to migrate propagation from server to agents for other resources (f.e. security groups) to the new mechanism. This will need to wait until those resources get proper NeutronObject implementations. The flow of updates is as follows: * if a port that is bound to the agent is attached to a QoS policy, then ML2 plugin detects the change by relying on ML2 QoS extension driver, and notifies the agent about a port change. The agent proceeds with the notification by calling to get_device_details() and getting the new port dict that contains a new qos_policy_id. Each device details dict is passed into l2 agent extension manager that passes it down into every enabled extension, including QoS. QoS extension sees that there is a new unknown QoS policy for a port, so it uses ResourcesPullRpcApi to fetch the current state of the policy (with all the rules included) from the server. After that, the QoS extension applies the rules by calling into QoS driver that corresponds to the agent. * on existing QoS policy update (it includes any policy or its rules change), server pushes the new policy object state through ResourcesPushRpcApi interface. The interface fans out the serialized (dehydrated) object to any agent that is listening for QoS policy updates. If an agent have seen the policy before (it is attached to one of the ports it maintains), then it goes with applying the updates to the port. Otherwise, the agent silently ignores the update. Agent side design ----------------- Reference agents implement QoS functionality using an `L2 agent extension `_. * neutron.agent.l2.extensions.qos defines QoS L2 agent extension. It receives handle_port and delete_port events and passes them down into QoS agent backend driver (see below). The file also defines the QosAgentDriver interface. Note: each backend implements its own driver. The driver handles low level interaction with the underlying networking technology, while the QoS extension handles operations that are common to all agents. Agent backends ~~~~~~~~~~~~~~ At the moment, QoS is supported by Open vSwitch, SR-IOV and Linux bridge ml2 drivers. Each agent backend defines a QoS driver that implements the QosAgentDriver interface: * Open vSwitch (QosOVSAgentDriver); * SR-IOV (QosSRIOVAgentDriver); * Linux bridge (QosLinuxbridgeAgentDriver). Open vSwitch ++++++++++++ Open vSwitch implementation relies on the new ovs_lib OVSBridge functions: * get_egress_bw_limit_for_port * create_egress_bw_limit_for_port * delete_egress_bw_limit_for_port An egress bandwidth limit is effectively configured on the port by setting the port Interface parameters ingress_policing_rate and ingress_policing_burst. That approach is less flexible than linux-htb, Queues and OvS QoS profiles, which we may explore in the future, but which will need to be used in combination with openflow rules. SR-IOV ++++++ SR-IOV bandwidth limit implementation relies on the new pci_lib function: * set_vf_max_rate As the name of the function suggests, the limit is applied on a Virtual Function (VF). ip link interface has the following limitation for bandwidth limit: it uses Mbps as units of bandwidth measurement, not kbps, and does not support float numbers. So in case the limit is set to something less than 1000 kbps, it's set to 1 Mbps only. If the limit is set to something that does not divide to 1000 kbps chunks, then the effective limit is rounded to the nearest integer Mbps value. Linux bridge ~~~~~~~~~~~~ The Linux bridge implementation relies on the new tc_lib functions: * set_bw_limit * update_bw_limit * delete_bw_limit The ingress bandwidth limit is configured on the tap port by setting a simple `tc-tbf `_ queueing discipline (qdisc) on the port. It requires a value of HZ parameter configured in kernel on the host. This value is neccessary to calculate the minimal burst value which is set in tc. Details about how it is calculated can be found in `here `_. This solution is similar to Open vSwitch implementation. Configuration ------------- To enable the service, the following steps should be followed: On server side: * enable qos service in service_plugins; * set the needed notification_drivers in [qos] section (message_queue is the default); * for ml2, add 'qos' to extension_drivers in [ml2] section. On agent side (OVS): * add 'qos' to extensions in [agent] section. Testing strategy ---------------- All the code added or extended as part of the effort got reasonable unit test coverage. Neutron objects ~~~~~~~~~~~~~~~ Base unit test classes to validate neutron objects were implemented in a way that allows code reuse when introducing a new object type. There are two test classes that are utilized for that: * BaseObjectIfaceTestCase: class to validate basic object operations (mostly CRUD) with database layer isolated. * BaseDbObjectTestCase: class to validate the same operations with models in place and database layer unmocked. Every new object implemented on top of one of those classes is expected to either inherit existing test cases as is, or reimplement it, if it makes sense in terms of how those objects are implemented. Specific test classes can obviously extend the set of test cases as they see needed (f.e. you need to define new test cases for those additional methods that you may add to your object implementations on top of base semantics common to all neutron objects). Functional tests ~~~~~~~~~~~~~~~~ Additions to ovs_lib to set bandwidth limits on ports are covered in: * neutron.tests.functional.agent.test_ovs_lib New functional tests for tc_lib to set bandwidth limits on ports are in: * neutron.tests.functional.agent.linux.test_tc_lib API tests ~~~~~~~~~ API tests for basic CRUD operations for ports, networks, policies, and rules were added in: * neutron.tests.api.test_qos neutron-8.4.0/doc/source/devref/sriov_nic_agent.rst0000664000567000056710000000452713044372736023622 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) L2 Networking with SR-IOV enabled NICs ====================================== SR-IOV (Single Root I/O Virtualization) is a specification that allows a PCIe device to appear to be multiple separate physical PCIe devices. SR-IOV works by introducing the idea of physical functions (PFs) and virtual functions (VFs). Physical functions (PFs) are full-featured PCIe functions. Virtual functions (VFs) are “lightweight” functions that lack configuration resources. SR-IOV supports VLANs for L2 network isolation, other networking technologies such as VXLAN/GRE may be supported in the future. SR-IOV NIC agent manages configuration of SR-IOV Virtual Functions that connect VM instances running on the compute node to the public network. In most common deployments, there are compute and a network nodes. Compute node can support VM connectivity via SR-IOV enabled NIC. SR-IOV NIC Agent manages Virtual Functions admin state. In the future it will manage additional settings, such as quality of service, rate limit settings, spoofcheck and more. Network node will be usually deployed with either Open vSwitch or Linux Bridge to support network node functionality. Further Reading --------------- * `Nir Yechiel - SR-IOV Networking – Part I: Understanding the Basics `_ * `SR-IOV Passthrough For Networking `_ neutron-8.4.0/doc/source/devref/i18n.rst0000664000567000056710000000256013044372760021220 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Neutron Stadium i18n ==================== * Refer to oslo_i18n documentation for the general mechanisms that should be used: http://docs.openstack.org/developer/oslo.i18n/usage.html * Do NOT use the _i18n module in neutron-lib or neutron. * It is recommended that you create a {package_name}/_i18n.py file in your repo, and use that. Your localization strings will also live in your repo. * The neutron.i18n module will be around for a release or two, with shared localization strings, but migration is encouraged. neutron-8.4.0/doc/source/devref/quota.rst0000664000567000056710000004347713044372760021606 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Quota Management and Enforcement ================================ Most resources exposed by the Neutron API are subject to quota limits. The Neutron API exposes an extension for managing such quotas. Quota limits are enforced at the API layer, before the request is dispatched to the plugin. Default values for quota limits are specified in neutron.conf. Admin users can override those defaults values on a per-tenant basis. Limits are stored in the Neutron database; if no limit is found for a given resource and tenant, then the default value for such resource is used. Configuration-based quota management, where every tenant gets the same quota limit specified in the configuration file, has been deprecated as of the Liberty release. Please note that Neutron does not support both specification of quota limits per user and quota management for hierarchical multitenancy (as a matter of fact Neutron does not support hierarchical multitenancy at all). Also, quota limits are currently not enforced on RPC interfaces listening on the AMQP bus. Plugin and ML2 drivers are not supposed to enforce quotas for resources they manage. However, the subnet_allocation [#]_ extension is an exception and will be discussed below. The quota management and enforcement mechanisms discussed here apply to every resource which has been registered with the Quota engine, regardless of whether such resource belongs to the core Neutron API or one of its extensions. High Level View --------------- There are two main components in the Neutron quota system: * The Quota API extension; * The Quota Engine. Both components rely on a quota driver. The neutron codebase currently defines two quota drivers: * neutron.db.quota.driver.DbQuotaDriver * neutron.quota.ConfDriver The latter driver is however deprecated. The Quota API extension handles quota management, whereas the Quota Engine component handles quota enforcement. This API extension is loaded like any other extension. For this reason plugins must explicitly support it by including "quotas" in the support_extension_aliases attribute. In the Quota API simple CRUD operations are used for managing tenant quotas. Please note that the current behaviour when deleting a tenant quota is to reset quota limits for that tenant to configuration defaults. The API extension does not validate the tenant identifier with the identity service. Performing quota enforcement is the responsibility of the Quota Engine. RESTful API controllers, before sending a request to the plugin, try to obtain a reservation from the quota engine for the resources specified in the client request. If the reservation is successful, then it proceeds to dispatch the operation to the plugin. For a reservation to be successful, the total amount of resources requested, plus the total amount of resources reserved, plus the total amount of resources already stored in the database should not exceed the tenant's quota limit. Finally, both quota management and enforcement rely on a "quota driver" [#]_, whose task is basically to perform database operations. Quota Management ---------------- The quota management component is fairly straightforward. However, unlike the vast majority of Neutron extensions, it uses it own controller class [#]_. This class does not implement the POST operation. List, get, update, and delete operations are implemented by the usual index, show, update and delete methods. These method simply call into the quota driver for either fetching tenant quotas or updating them. The _update_attributes method is called only once in the controller lifetime. This method dynamically updates Neutron's resource attribute map [#]_ so that an attribute is added for every resource managed by the quota engine. Request authorisation is performed in this controller, and only 'admin' users are allowed to modify quotas for tenants. As the neutron policy engine is not used, it is not possible to configure which users should be allowed to manage quotas using policy.json. The driver operations dealing with quota management are: * delete_tenant_quota, which simply removes all entries from the 'quotas' table for a given tenant identifier; * update_quota_limit, which adds or updates an entry in the 'quotas' tenant for a given tenant identifier and a given resource name; * _get_quotas, which fetches limits for a set of resource and a given tenant identifier * _get_all_quotas, which behaves like _get_quotas, but for all tenants. Resource Usage Info ------------------- Neutron has two ways of tracking resource usage info: * CountableResource, where resource usage is calculated every time quotas limits are enforced by counting rows in the resource table and reservations for that resource. * TrackedResource, which instead relies on a specific table tracking usage data, and performs explicitly counting only when the data in this table are not in sync with actual used and reserved resources. Another difference between CountableResource and TrackedResource is that the former invokes a plugin method to count resources. CountableResource should be therefore employed for plugins which do not leverage the Neutron database. The actual class that the Neutron quota engine will use is determined by the track_quota_usage variable in the quota configuration section. If True, TrackedResource instances will be created, otherwise the quota engine will use CountableResource instances. Resource creation is performed by the create_resource_instance factory method in the neutron.quota.resource module. From a performance perspective, having a table tracking resource usage has some advantages, albeit not fundamental. Indeed the time required for executing queries to explicitly count objects will increase with the number of records in the table. On the other hand, using TrackedResource will fetch a single record, but has the drawback of having to execute an UPDATE statement once the operation is completed. Nevertheless, CountableResource instances do not simply perform a SELECT query on the relevant table for a resource, but invoke a plugin method, which might execute several statements and sometimes even interacts with the backend before returning. Resource usage tracking also becomes important for operational correctness when coupled with the concept of resource reservation, discussed in another section of this chapter. Tracking quota usage is not as simple as updating a counter every time resources are created or deleted. Indeed a quota-limited resource in Neutron can be created in several ways. While a RESTful API request is the most common one, resources can be created by RPC handlers listing on the AMQP bus, such as those which create DHCP ports, or by plugin operations, such as those which create router ports. To this aim, TrackedResource instances are initialised with a reference to the model class for the resource for which they track usage data. During object initialisation, SqlAlchemy event handlers are installed for this class. The event handler is executed after a record is inserted or deleted. As result usage data for that resource and will be marked as 'dirty' once the operation completes, so that the next time usage data is requested, it will be synchronised counting resource usage from the database. Even if this solution has some drawbacks, listed in the 'exceptions and caveats' section, it is more reliable than solutions such as: * Updating the usage counters with the new 'correct' value every time an operation completes. * Having a periodic task synchronising quota usage data with actual data in the Neutron DB. Finally, regardless of whether CountableResource or TrackedResource is used, the quota engine always invokes its count() method to retrieve resource usage. Therefore, from the perspective of the Quota engine there is absolutely no difference between CountableResource and TrackedResource. Quota Enforcement ----------------- Before dispatching a request to the plugin, the Neutron 'base' controller [#]_ attempts to make a reservation for requested resource(s). Reservations are made by calling the make_reservation method in neutron.quota.QuotaEngine. The process of making a reservation is fairly straightforward: * Get current resource usages. This is achieved by invoking the count method on every requested resource, and then retrieving the amount of reserved resources. * Fetch current quota limits for requested resources, by invoking the _get_tenant_quotas method. * Fetch expired reservations for selected resources. This amount will be subtracted from resource usage. As in most cases there won't be any expired reservation, this approach actually requires less DB operations than doing a sum of non-expired, reserved resources for each request. * For each resource calculate its headroom, and verify the requested amount of resource is less than the headroom. * If the above is true for all resource, the reservation is saved in the DB, otherwise an OverQuotaLimit exception is raised. The quota engine is able to make a reservation for multiple resources. However, it is worth noting that because of the current structure of the Neutron API layer, there will not be any practical case in which a reservation for multiple resources is made. For this reason performance optimisation avoiding repeating queries for every resource are not part of the current implementation. In order to ensure correct operations, a row-level lock is acquired in the transaction which creates the reservation. The lock is acquired when reading usage data. In case of write-set certification failures, which can occur in active/active clusters such as MySQL galera, the decorator oslo_db.api.wrap_db_retry will retry the transaction if a DBDeadLock exception is raised. While non-locking approaches are possible, it has been found out that, since a non-locking algorithms increases the chances of collision, the cost of handling a DBDeadlock is still lower than the cost of retrying the operation when a collision is detected. A study in this direction was conducted for IP allocation operations, but the same principles apply here as well [#]_. Nevertheless, moving away for DB-level locks is something that must happen for quota enforcement in the future. Committing and cancelling a reservation is as simple as deleting the reservation itself. When a reservation is committed, the resources which were committed are now stored in the database, so the reservation itself should be deleted. The Neutron quota engine simply removes the record when cancelling a reservation (ie: the request failed to complete), and also marks quota usage info as dirty when the reservation is committed (ie: the request completed correctly). Reservations are committed or cancelled by respectively calling the commit_reservation and cancel_reservation methods in neutron.quota.QuotaEngine. Reservations are not perennial. Eternal reservation would eventually exhaust tenants' quotas because they would never be removed when an API worker crashes whilst in the middle of an operation. Reservation expiration is currently set to 120 seconds, and is not configurable, not yet at least. Expired reservations are not counted when calculating resource usage. While creating a reservation, if any expired reservation is found, all expired reservation for that tenant and resource will be removed from the database, thus avoiding build-up of expired reservations. Setting up Resource Tracking for a Plugin ------------------------------------------ By default plugins do not leverage resource tracking. Having the plugin explicitly declare which resources should be tracked is a precise design choice aimed at limiting as much as possible the chance of introducing errors in existing plugins. For this reason a plugin must declare which resource it intends to track. This can be achieved using the tracked_resources decorator available in the neutron.quota.resource_registry module. The decorator should ideally be applied to the plugin's __init__ method. The decorator accepts in input a list of keyword arguments. The name of the argument must be a resource name, and the value of the argument must be a DB model class. For example: :: @resource_registry.tracked_resources(network=models_v2.Network, port=models_v2.Port, subnet=models_v2.Subnet, subnetpool=models_v2.SubnetPool) Will ensure network, port, subnet and subnetpool resources are tracked. In theory, it is possible to use this decorator multiple times, and not exclusively to __init__ methods. However, this would eventually lead to code readability and maintainability problems, so developers are strongly encourage to apply this decorator exclusively to the plugin's __init__ method (or any other method which is called by the plugin only once during its initialization). Notes for Implementors of RPC Interfaces and RESTful Controllers ------------------------------------------------------------------------------- Neutron unfortunately does not have a layer which is called before dispatching the operation from the plugin which can be leveraged both from RESTful and RPC over AMQP APIs. In particular the RPC handlers call straight into the plugin, without doing any request authorisation or quota enforcement. Therefore RPC handlers must explicitly indicate if they are going to call the plugin to create or delete any sort of resources. This is achieved in a simple way, by ensuring modified resources are marked as dirty after the RPC handler execution terminates. To this aim developers can use the mark_resources_dirty decorator available in the module neutron.quota.resource_registry. The decorator would scan the whole list of registered resources, and store the dirty status for their usage trackers in the database for those resources for which items have been created or destroyed during the plugin operation. Exceptions and Caveats ----------------------- Please be aware of the following limitations of the quota enforcement engine: * Subnet allocation from subnet pools, in particularly shared pools, is also subject to quota limit checks. However this checks are not enforced by the quota engine, but trough a mechanism implemented in the neutron.ipam.subnetalloc module. This is because the Quota engine is not able to satisfy the requirements for quotas on subnet allocation. * The quota engine also provides a limit_check routine which enforces quota checks without creating reservations. This way of doing quota enforcement is extremely unreliable and superseded by the reservation mechanism. It has not been removed to ensure off-tree plugins and extensions which leverage are not broken. * SqlAlchemy events might not be the most reliable way for detecting changes in resource usage. Since the event mechanism monitors the data model class, it is paramount for a correct quota enforcement, that resources are always created and deleted using object relational mappings. For instance, deleting a resource with a query.delete call, will not trigger the event. SQLAlchemy events should be considered as a temporary measure adopted as Neutron lacks persistent API objects. * As CountableResource instance do not track usage data, when making a reservation no write-intent lock is acquired. Therefore the quota engine with CountableResource is not concurrency-safe. * The mechanism for specifying for which resources enable usage tracking relies on the fact that the plugin is loaded before quota-limited resources are registered. For this reason it is not possible to validate whether a resource actually exists or not when enabling tracking for it. Developers should pay particular attention into ensuring resource names are correctly specified. * The code assumes usage trackers are a trusted source of truth: if they report a usage counter and the dirty bit is not set, that counter is correct. If it's dirty than surely that counter is out of sync. This is not very robust, as there might be issues upon restart when toggling the use_tracked_resources configuration variable, as stale counters might be trusted upon for making reservations. Also, the same situation might occur if a server crashes after the API operation is completed but before the reservation is committed, as the actual resource usage is changed but the corresponding usage tracker is not marked as dirty. References ---------- .. [#] Subnet allocation extension: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/extensions/subnetallocation.py .. [#] DB Quota driver class: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/db/quota_db.py#n33 .. [#] Quota API extension controller: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/extensions/quotasv2.py#n40 .. [#] Neutron resource attribute map: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/api/v2/attributes.py#n639 .. [#] Base controller class: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/api/v2/base.py#n50 .. [#] http://lists.openstack.org/pipermail/openstack-dev/2015-February/057534.html neutron-8.4.0/doc/source/devref/alembic_migrations.rst0000664000567000056710000004577713044372760024312 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) .. _alembic_migrations: Alembic Migrations ================== Introduction ------------ The migrations in the alembic/versions contain the changes needed to migrate from older Neutron releases to newer versions. A migration occurs by executing a script that details the changes needed to upgrade the database. The migration scripts are ordered so that multiple scripts can run sequentially to update the database. The Migration Wrapper --------------------- The scripts are executed by Neutron's migration wrapper ``neutron-db-manage`` which uses the Alembic library to manage the migration. Pass the ``--help`` option to the wrapper for usage information. The wrapper takes some options followed by some commands:: neutron-db-manage The wrapper needs to be provided with the database connection string, which is usually provided in the ``neutron.conf`` configuration file in an installation. The wrapper automatically reads from ``/etc/neutron/neutron.conf`` if it is present. If the configuration is in a different location:: neutron-db-manage --config-file /path/to/neutron.conf Multiple ``--config-file`` options can be passed if needed. Instead of reading the DB connection from the configuration file(s) the ``--database-connection`` option can be used:: neutron-db-manage --database-connection mysql+pymysql://root:secret@127.0.0.1/neutron?charset=utf8 The ``branches``, ``current``, and ``history`` commands all accept a ``--verbose`` option, which, when passed, will instruct ``neutron-db-manage`` to display more verbose output for the specified command:: neutron-db-manage current --verbose For some commands the wrapper needs to know the entrypoint of the core plugin for the installation. This can be read from the configuration file(s) or specified using the ``--core_plugin`` option:: neutron-db-manage --core_plugin neutron.plugins.ml2.plugin.Ml2Plugin When giving examples below of using the wrapper the options will not be shown. It is assumed you will use the options that you need for your environment. For new deployments you will start with an empty database. You then upgrade to the latest database version via:: neutron-db-manage upgrade heads For existing deployments the database will already be at some version. To check the current database version:: neutron-db-manage current After installing a new version of Neutron server, upgrading the database is the same command:: neutron-db-manage upgrade heads To create a script to run the migration offline:: neutron-db-manage upgrade heads --sql To run the offline migration between specific migration versions:: neutron-db-manage upgrade : --sql Upgrade the database incrementally:: neutron-db-manage upgrade --delta <# of revs> **NOTE:** Database downgrade is not supported. Migration Branches ------------------ Neutron makes use of alembic branches for two purposes. 1. Independent Sub-Project Tables ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Various `sub-projects `_ can be installed with Neutron. Each sub-project registers its own alembic branch which is responsible for migrating the schemas of the tables owned by the sub-project. The neutron-db-manage script detects which sub-projects have been installed by enumerating the ``neutron.db.alembic_migrations`` entrypoints. For more details see the `Entry Points section of Contributing extensions to Neutron `_. The neutron-db-manage script runs the given alembic command against all installed sub-projects. (An exception is the ``revision`` command, which is discussed in the `Developers`_ section below.) 2. Offline/Online Migrations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Since Liberty, Neutron maintains two parallel alembic migration branches. The first one, called 'expand', is used to store expansion-only migration rules. Those rules are strictly additive and can be applied while neutron-server is running. Examples of additive database schema changes are: creating a new table, adding a new table column, adding a new index, etc. The second branch, called 'contract', is used to store those migration rules that are not safe to apply while neutron-server is running. Those include: column or table removal, moving data from one part of the database into another (renaming a column, transforming single table into multiple, etc.), introducing or modifying constraints, etc. The intent of the split is to allow invoking those safe migrations from 'expand' branch while neutron-server is running, reducing downtime needed to upgrade the service. For more details, see the `Expand and Contract Scripts`_ section below. Developers ---------- A database migration script is required when you submit a change to Neutron or a sub-project that alters the database model definition. The migration script is a special python file that includes code to upgrade the database to match the changes in the model definition. Alembic will execute these scripts in order to provide a linear migration path between revisions. The neutron-db-manage command can be used to generate migration scripts for you to complete. The operations in the template are those supported by the Alembic migration library. .. _neutron-db-manage-without-devstack: Running neutron-db-manage without devstack ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When, as a developer, you want to work with the Neutron DB schema and alembic migrations only, it can be rather tedious to rely on devstack just to get an up-to-date neutron-db-manage installed. This section describes how to work on the schema and migration scripts with just the unit test virtualenv and mysql. You can also operate on a separate test database so you don't mess up the installed Neutron database. Setting up the environment ++++++++++++++++++++++++++ Install mysql service ''''''''''''''''''''' This only needs to be done once since it is a system install. If you have run devstack on your system before, then the mysql service is already installed and you can skip this step. Mysql must be configured as installed by devstack, and the following script accomplishes this without actually running devstack:: INSTALL_MYSQL_ONLY=True ./tools/configure_for_func_testing.sh ../devstack Run this from the root of the neutron repo. It assumes an up-to-date clone of the devstack repo is in ``../devstack``. Note that you must know the mysql root password. It is derived from (in order of precedence): - ``$MYSQL_PASSWORD`` in your environment - ``$MYSQL_PASSWORD`` in ``../devstack/local.conf`` - ``$MYSQL_PASSWORD`` in ``../devstack/localrc`` - default of 'secretmysql' from ``tools/configure_for_func_testing.sh`` Work on a test database ''''''''''''''''''''''' Rather than using the neutron database when working on schema and alembic migration script changes, we can work on a test database. In the examples below, we use a database named ``testdb``. To create the database:: mysql -e "create database testdb;" You will often need to clear it to re-run operations from a blank database:: mysql -e "drop database testdb; create database testdb;" To work on the test database instead of the neutron database, point to it with the ``--database-connection`` option:: neutron-db-manage --database-connection mysql+pymysql://root:secretmysql@127.0.0.1/testdb?charset=utf8 You may find it convenient to set up an alias (in your .bashrc) for this:: alias test-db-manage='neutron-db-manage --database-connection mysql+pymysql://root:secretmysql@127.0.0.1/testdb?charset=utf8' Create and activate the virtualenv '''''''''''''''''''''''''''''''''' From the root of the neutron (or sub-project) repo directory, run:: tox --notest -r -e py27 source .tox/py27/bin/activate Now you can use the ``test-db-manage`` alias in place of ``neutron-db-manage`` in the script auto-generation instructions below. When you are done, exit the virtualenv:: deactivate Script Auto-generation ~~~~~~~~~~~~~~~~~~~~~~ This section describes how to auto-generate an alembic migration script for a model change. You may either use the system installed devstack environment, or a virtualenv + testdb environment as described in :ref:`neutron-db-manage-without-devstack`. Stop the neutron service. Work from the base directory of the neutron (or sub-project) repo. Check out the master branch and and do ``git pull`` to ensure it is fully up to date. Check out your development branch and rebase to master. **NOTE:** Make sure you have not updated the ``CONTRACT_HEAD`` or ``EXPAND_HEAD`` yet at this point. Start with an empty database and upgrade to heads:: mysql -e "drop database neutron; create database neutron;" neutron-db-manage upgrade heads The database schema is now created without your model changes. The alembic ``revision --autogenerate`` command will look for differences between the schema generated by the upgrade command and the schema defined by the models, including your model updates:: neutron-db-manage revision -m "description of revision" --autogenerate This generates a prepopulated template with the changes needed to match the database state with the models. You should inspect the autogenerated template to ensure that the proper models have been altered. When running the above command you will probably get the following error message:: Multiple heads are present; please specify the head revision on which the new revision should be based, or perform a merge. This is alembic telling you that it does not know which branch (contract or expand) to generate the revision for. You must decide, based on whether you are doing contracting or expanding changes to the schema, and provide either the ``--contract`` or ``--expand`` option. If you have both types of changes, you must run the command twice, once with each option, and then manually edit the generated revision scripts to separate the migration operations. In rare circumstances, you may want to start with an empty migration template and manually author the changes necessary for an upgrade. You can create a blank file for a branch via:: neutron-db-manage revision -m "description of revision" --expand neutron-db-manage revision -m "description of revision" --contract **NOTE:** If you use above command you should check that migration is created in a directory that is named as current release. If not, please raise the issue with the development team (IRC, mailing list, launchpad bug). **NOTE:** The "description of revision" text should be a simple English sentence. The first 30 characters of the description will be used in the file name for the script, with underscores substituted for spaces. If the truncation occurs at an awkward point in the description, you can modify the script file name manually before committing. The timeline on each alembic branch should remain linear and not interleave with other branches, so that there is a clear path when upgrading. To verify that alembic branches maintain linear timelines, you can run this command:: neutron-db-manage check_migration If this command reports an error, you can troubleshoot by showing the migration timelines using the ``history`` command:: neutron-db-manage history Expand and Contract Scripts ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The obsolete "branchless" design of a migration script included that it indicates a specific "version" of the schema, and includes directives that apply all necessary changes to the database at once. If we look for example at the script ``2d2a8a565438_hierarchical_binding.py``, we will see:: # .../alembic_migrations/versions/2d2a8a565438_hierarchical_binding.py def upgrade(): # .. inspection code ... op.create_table( 'ml2_port_binding_levels', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('host', sa.String(length=255), nullable=False), # ... more columns ... ) for table in port_binding_tables: op.execute(( "INSERT INTO ml2_port_binding_levels " "SELECT port_id, host, 0 AS level, driver, segment AS segment_id " "FROM %s " "WHERE host <> '' " "AND driver <> '';" ) % table) op.drop_constraint(fk_name_dvr[0], 'ml2_dvr_port_bindings', 'foreignkey') op.drop_column('ml2_dvr_port_bindings', 'cap_port_filter') op.drop_column('ml2_dvr_port_bindings', 'segment') op.drop_column('ml2_dvr_port_bindings', 'driver') # ... more DROP instructions ... The above script contains directives that are both under the "expand" and "contract" categories, as well as some data migrations. the ``op.create_table`` directive is an "expand"; it may be run safely while the old version of the application still runs, as the old code simply doesn't look for this table. The ``op.drop_constraint`` and ``op.drop_column`` directives are "contract" directives (the drop column moreso than the drop constraint); running at least the ``op.drop_column`` directives means that the old version of the application will fail, as it will attempt to access these columns which no longer exist. The data migrations in this script are adding new rows to the newly added ``ml2_port_binding_levels`` table. Under the new migration script directory structure, the above script would be stated as two scripts; an "expand" and a "contract" script:: # expansion operations # .../alembic_migrations/versions/liberty/expand/2bde560fc638_hierarchical_binding.py def upgrade(): op.create_table( 'ml2_port_binding_levels', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('host', sa.String(length=255), nullable=False), # ... more columns ... ) # contraction operations # .../alembic_migrations/versions/liberty/contract/4405aedc050e_hierarchical_binding.py def upgrade(): for table in port_binding_tables: op.execute(( "INSERT INTO ml2_port_binding_levels " "SELECT port_id, host, 0 AS level, driver, segment AS segment_id " "FROM %s " "WHERE host <> '' " "AND driver <> '';" ) % table) op.drop_constraint(fk_name_dvr[0], 'ml2_dvr_port_bindings', 'foreignkey') op.drop_column('ml2_dvr_port_bindings', 'cap_port_filter') op.drop_column('ml2_dvr_port_bindings', 'segment') op.drop_column('ml2_dvr_port_bindings', 'driver') # ... more DROP instructions ... The two scripts would be present in different subdirectories and also part of entirely separate versioning streams. The "expand" operations are in the "expand" script, and the "contract" operations are in the "contract" script. For the time being, data migration rules also belong to contract branch. There is expectation that eventually live data migrations move into middleware that will be aware about different database schema elements to converge on, but Neutron is still not there. Scripts that contain only expansion or contraction rules do not require a split into two parts. If a contraction script depends on a script from expansion stream, the following directive should be added in the contraction script:: depends_on = ('',) HEAD files for conflict management ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In directory ``neutron/db/migration/alembic_migrations/versions`` there are two files, ``CONTRACT_HEAD`` and ``EXPAND_HEAD``. These files contain the ID of the head revision in each branch. The purpose of these files is to validate the revision timelines and prevent non-linear changes from entering the merge queue. When you create a new migration script by neutron-db-manage these files will be updated automatically. But if another migration script is merged while your change is under review, you will need to resolve the conflict manually by changing the ``down_revision`` in your migration script. Applying database migration rules ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To apply just expansion rules, execute:: neutron-db-manage upgrade --expand After the first step is done, you can stop neutron-server, apply remaining non-expansive migration rules, if any:: neutron-db-manage upgrade --contract and finally, start your neutron-server again. If you have multiple neutron-server instances in your cloud, and there are pending contract scripts not applied to the database, full shutdown of all those services is required before 'upgrade --contract' is executed. You can determine whether there are any pending contract scripts by checking the message returned from the following command:: neutron-db-manage has_offline_migrations If you are not interested in applying safe migration rules while the service is running, you can still upgrade database the old way, by stopping the service, and then applying all available rules:: neutron-db-manage upgrade head[s] It will apply all the rules from both the expand and the contract branches, in proper order. Tagging milestone revisions ~~~~~~~~~~~~~~~~~~~~~~~~~~~ When named release (liberty, mitaka, etc.) is done for neutron or a sub-project, the alembic revision scripts at the head of each branch for that release must be tagged. This is referred to as a milestone revision tag. For example, `here `_ is a patch that tags the liberty milestone revisions for the neutron-fwaas sub-project. Note that each branch (expand and contract) is tagged. Tagging milestones allows neutron-db-manage to upgrade the schema to a milestone release, e.g.:: neutron-db-manage upgrade liberty Generation of comparable metadata with current database schema ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Directory ``neutron/db/migration/models`` contains module ``head.py``, which provides all database models at current HEAD. Its purpose is to create comparable metadata with the current database schema. The database schema is generated by alembic migration scripts. The models must match, and this is verified by a model-migration sync test in Neutron's functional test suite. That test requires all modules containing DB models to be imported by head.py in order to make a complete comparison. When adding new database models, developers must update this module, otherwise the change will fail to merge. neutron-8.4.0/doc/source/devref/openvswitch_agent.rst0000664000567000056710000001005113044372760024162 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) OpenVSwitch L2 Agent ==================== This Agent uses the `OpenVSwitch`_ virtual switch to create L2 connectivity for instances, along with bridges created in conjunction with OpenStack Nova for filtering. ovs-neutron-agent can be configured to use different networking technologies to create tenant isolation. These technologies are implemented as ML2 type drivers which are used in conjunction with the OpenVSwitch mechanism driver. VLAN Tags --------- .. image:: images/under-the-hood-scenario-1-ovs-compute.png .. _OpenVSwitch: http://openvswitch.org GRE Tunnels ----------- GRE Tunneling is documented in depth in the `Networking in too much detail `_ by RedHat. VXLAN Tunnels ------------- VXLAN is an overlay technology which encapsulates MAC frames at layer 2 into a UDP header. More information can be found in `The VXLAN wiki page. `_ Geneve Tunnels -------------- Geneve uses UDP as its transport protocol and is dynamic in size using extensible option headers. It is important to note that currently it is only supported in newer kernels. (kernel >= 3.18, OVS version >=2.4) More information can be found in the `Geneve RFC document. `_ Bridge Management ----------------- In order to make the agent capable of handling more than one tunneling technology, to decouple the requirements of segmentation technology from tenant isolation, and to preserve backward compatibility for OVS agents working without tunneling, the agent relies on a tunneling bridge, or br-tun, and the well known integration bridge, or br-int. All VM VIFs are plugged into the integration bridge. VM VIFs on a given virtual network share a common "local" VLAN (i.e. not propagated externally). The VLAN id of this local VLAN is mapped to the physical networking details realizing that virtual network. For virtual networks realized as VXLAN/GRE tunnels, a Logical Switch (LS) identifier is used to differentiate tenant traffic on inter-HV tunnels. A mesh of tunnels is created to other Hypervisors in the cloud. These tunnels originate and terminate on the tunneling bridge of each hypervisor, leaving br-int unaffected. Port patching is done to connect local VLANs on the integration bridge to inter-hypervisor tunnels on the tunnel bridge. For each virtual network realized as a VLAN or flat network, a veth or a pair of patch ports is used to connect the local VLAN on the integration bridge with the physical network bridge, with flow rules adding, modifying, or stripping VLAN tags as necessary, thus preserving backward compatibility with the way the OVS agent used to work prior to the tunneling capability (for more details, please look at https://review.openstack.org/#/c/4367). Bear in mind, that this design decision may be overhauled in the future to support existing VLAN-tagged traffic (coming from NFV VMs for instance) and/or to deal with potential QinQ support natively available in the Open vSwitch. Further Reading --------------- * `Darragh O'Reilly - The Open vSwitch plugin with VLANs `_ neutron-8.4.0/doc/source/devref/testing_coverage.rst0000664000567000056710000001542313044372760023773 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Test Coverage ============= The intention is to track merged features or areas of code that lack certain types of tests. This document may be used both by developers that want to contribute tests, and operators that are considering adopting a feature. Coverage -------- Note that while both API and scenario tests target a deployed OpenStack cloud, API tests are under the Neutron tree and scenario tests are under the Tempest tree. It is the expectation that API changes involve API tests, agent features or modifications involve functional tests, and Neutron-wide features involve fullstack or scenario tests as appropriate. The table references tests that explicitly target a feature, and not a job that is configured to run against a specific backend (Thereby testing it implicitly). So, for example, while the Linux bridge agent has a job that runs the API and scenario tests with the Linux bridge agent configured, it does not have functional tests that target the agent explicitly. The 'gate' column is about running API/scenario tests with Neutron configured in a certain way, such as what L2 agent to use or what type of routers to create. * V - Merged * Blank - Not applicable * X - Absent or lacking * Patch number - Currently in review * A name - That person has committed to work on an item +------------------------+------------+------------+------------+------------+------------+------------+ | Area | Unit | Functional | API | Fullstack | Scenario | Gate | +========================+============+============+============+============+============+============+ | DVR | Partial* | L3-V OVS-X | V | amuller | X | V | +------------------------+------------+------------+------------+------------+------------+------------+ | L3 HA | V | V | X | 196393 | X | X | +------------------------+------------+------------+------------+------------+------------+------------+ | L2pop | V | X | | X | | | +------------------------+------------+------------+------------+------------+------------+------------+ | DHCP HA | V | | | amuller | | | +------------------------+------------+------------+------------+------------+------------+------------+ | OVS ARP responder | V | X* | | X* | | | +------------------------+------------+------------+------------+------------+------------+------------+ | OVS agent | V | Partial | | V | | V | +------------------------+------------+------------+------------+------------+------------+------------+ | Linux Bridge agent | V | X | | X | | Non-voting | +------------------------+------------+------------+------------+------------+------------+------------+ | Metering | V | X | V | X | | | +------------------------+------------+------------+------------+------------+------------+------------+ | DHCP agent | V | 136834 | | amuller | | V | +------------------------+------------+------------+------------+------------+------------+------------+ | rpc_workers | | | | | | X | +------------------------+------------+------------+------------+------------+------------+------------+ | Reference ipam driver | V | | | | | X (?) | +------------------------+------------+------------+------------+------------+------------+------------+ | MTU advertisement | V | | | X | | | +------------------------+------------+------------+------------+------------+------------+------------+ | VLAN transparency | V | | X | X | | | +------------------------+------------+------------+------------+------------+------------+------------+ | Prefix delegation | V | X | | X | | | +------------------------+------------+------------+------------+------------+------------+------------+ * DVR DB unit tests often assert that internal methods were called instead of testing functionality. A lot of our unit tests are flawed in this way, and DVR unit tests especially so. An attempt to remedy this was made in patch 178880. * OVS ARP responder cannot be tested at the gate because the gate uses Ubuntu 14.04 that only packages OVS 2.0. OVS added ARP manipulation support in version 2.1. * Prefix delegation doesn't have functional tests for the dibbler and pd layers, nor for the L3 agent changes. Missing Infrastructure ---------------------- The following section details missing test *types*. If you want to pick up an action item, please contact amuller for more context and guidance. * The Neutron team would like Rally to persist results over a window of time, graph and visualize this data, so that reviewers could compare average runs against a proposed patch. * It's possible to test RPC methods via the unit tests infrastructure. This was proposed in patch 162811. The goal is provide developers a light weight way to rapidly run tests that target the RPC layer, so that a patch that modifies an RPC method's signature could be verified quickly and locally. * Neutron currently does not test an in-place upgrade (Upgrading the server first, followed by agents one machine at a time). We make sure that the RPC layer remains backwards compatible manually via the review process but have no CI that verifies this. neutron-8.4.0/doc/source/devref/effective_neutron.rst0000664000567000056710000005674013044372760024164 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Effective Neutron: 100 specific ways to improve your Neutron contributions ========================================================================== There are a number of skills that make a great Neutron developer: writing good code, reviewing effectively, listening to peer feedback, etc. The objective of this document is to describe, by means of examples, the pitfalls, the good and bad practices that 'we' as project encounter on a daily basis and that make us either go slower or accelerate while contributing to Neutron. By reading and collaboratively contributing to such a knowledge base, your development and review cycle becomes shorter, because you will learn (and teach to others after you) what to watch out for, and how to be proactive in order to prevent negative feedback, minimize programming errors, writing better tests, and so on and so forth...in a nutshell, how to become an effective Neutron developer. The notes below are meant to be free-form and brief by design. They are not meant to replace or duplicate `OpenStack documentation `_, or any project-wide documentation initiative like `peer-review notes `_ or the `team guide `_. For this reason, references are acceptable and should be favored, if the shortcut is deemed useful to expand on the distilled information. We will try to keep these notes tidy by breaking them down into sections if it makes sense. Feel free to add, adjust, remove as you see fit. Please do so, taking into consideration yourself and other Neutron developers as readers. Capture your experience during development and review and add any comment that you believe will make your life and others' easier. Happy hacking! Developing better software -------------------------- Plugin development ~~~~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done during plugin development. * Use mixin classes as last resort. They can be a powerful tool to add behavior but their strength is also a weakness, as they can introduce `unpredictable `_ behavior to the `MRO `_, amongst other issues. * In lieu of mixins, if you need to add behavior that is relevant for ML2, consider using the `extension manager `_. * If you make changes to the DB class methods, like calling methods that can be inherited, think about what effect that may have to plugins that have controller `backends `_. * If you make changes to the ML2 plugin or components used by the ML2 plugin, think about the `effect `_ that may have to other plugins. * When adding behavior to the L2 and L3 db base classes, do not assume that there is an agent on the other side of the message broker that interacts with the server. Plugins may not rely on `agents `_ at all. * Be mindful of required capabilities when you develop plugin extensions. The `Extension description `_ provides the ability to specify the list of required capabilities for the extension you are developing. By declaring this list, the server will not start up if the requirements are not met, thus avoiding leading the system to experience undetermined behavior at runtime. Database interaction ~~~~~~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done during database development. * `first() `_ does not raise an exception. * Do not get an object to delete it. If you can `delete() `_ on the query object. Read the warnings for more details about in-python cascades. * For PostgreSQL if you're using GROUP BY everything in the SELECT list must be an aggregate SUM(...), COUNT(...), etc or used in the GROUP BY. The incorrect variant: .. code:: python q = query(Object.id, Object.name, func.count(Object.number)).group_by(Object.name) The correct variant: .. code:: python q = query(Object.id, Object.name, func.count(Object.number)).group_by(Object.id, Object.name) * Beware of the `InvalidRequestError `_ exception. There is even a `Neutron bug `_ registered for it. Bear in mind that this error may also occur when nesting transaction blocks, and the innermost block raises an error without proper rollback. Consider if `savepoints `_ can fit your use case. * When designing data models that are related to each other, be careful to how you model the relationships' loading `strategy `_. For instance a joined relationship can be very efficient over others (some examples include `router gateways `_ or `network availability zones `_). * If you add a relationship to a Neutron object that will be referenced in the majority of cases where the object is retrieved, be sure to use the lazy='joined' parameter to the relationship so the related objects are loaded as part of the same query. Otherwise, the default method is 'select', which emits a new DB query to retrieve each related object adversely impacting performance. For example, see `patch 88665 `_ which resulted in a significant improvement since router retrieval functions always include the gateway interface. * Conversely, do not use lazy='joined' if the relationship is only used in corner cases because the JOIN statement comes at a cost that may be significant if the relationship contains many objects. For example, see `patch 168214 `_ which reduced a subnet retrieval by ~90% by avoiding a join to the IP allocation table. * When writing extensions to existing objects (e.g. Networks), ensure that they are written in a way that the data on the object can be calculated without additional DB lookup. If that's not possible, ensure the DB lookup is performed once in bulk during a list operation. Otherwise a list call for a 1000 objects will change from a constant small number of DB queries to 1000 DB queries. For example, see `patch 257086 `_ which changed the availability zone code from the incorrect style to a database friendly one. * Sometimes in code we use the following structures: .. code:: python def create(): with context.session.begin(subtransactions=True): create_something() try: _do_other_thing_with_created_object() except Exception: with excutils.save_and_reraise_exception(): delete_something() def _do_other_thing_with_created_object(): with context.session.begin(subtransactions=True): .... The problem is that when exception is raised in ``_do_other_thing_with_created_object`` it is caught in except block, but the object cannot be deleted in except section because internal transaction from ``_do_other_thing_with_created_object`` has been rolled back. To avoid this nested transactions should be used. For such cases help function ``safe_creation`` has been created in ``neutron/db/common_db_mixin.py``. So, the example above should be replaced with: .. code:: python _safe_creation(context, create_something, delete_someting, _do_other_thing_with_created_object) Where nested transaction is used in _do_other_thing_with_created_object function. The ``_safe_creation function can also be passed the ``transaction=False`` argument to prevent any transaction from being created just to leverage the automatic deletion on exception logic. * Beware of ResultProxy.inserted_primary_key which returns a list of last inserted primary keys not the last inserted primary key: .. code:: python result = session.execute(mymodel.insert().values(**values)) # result.inserted_primary_key is a list even if we inserted a unique row! * Beware of pymysql which can silently unwrap a list with an element (and hide a wrong use of ResultProxy.inserted_primary_key for example): .. code:: python e.execute("create table if not exists foo (bar integer)") e.execute(foo.insert().values(bar=1)) e.execute(foo.insert().values(bar=[2])) The 2nd insert should crash (list provided, integer expected). It crashes at least with mysql and postgresql backends, but succeeds with pymysql because it transforms them into: .. code:: sql INSERT INTO foo (bar) VALUES (1) INSERT INTO foo (bar) VALUES ((2)) System development ~~~~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done when invoking system commands and interacting with linux utils. Eventlet concurrent model ~~~~~~~~~~~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done when using eventlet and monkey patching. * Do not use with_lockmode('update') on SQL queries without protecting the operation with a lockutils semaphore. For some SQLAlchemy database drivers that operators may choose (e.g. MySQLdb) it may result in a temporary deadlock by yielding to another coroutine while holding the DB lock. The following wiki provides more details: https://wiki.openstack.org/wiki/OpenStack_and_SQLAlchemy#MySQLdb_.2B_eventlet_.3D_sad Mocking and testing ~~~~~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done when writing tests, any test. For anything more elaborate, please visit the testing section. * Preferring low level testing versus full path testing (e.g. not testing database via client calls). The former is to be favored in unit testing, whereas the latter is to be favored in functional testing. * Prefer specific assertions (assert(Not)In, assert(Not)IsInstance, assert(Not)IsNone, etc) over generic ones (assertTrue/False, assertEqual) because they raise more meaningful errors: .. code:: python def test_specific(self): self.assertIn(3, [1, 2]) # raise meaningful error: "MismatchError: 3 not in [1, 2]" def test_generic(self): self.assertTrue(3 in [1, 2]) # raise meaningless error: "AssertionError: False is not true" * Use the pattern "self.assertEqual(expected, observed)" not the opposite, it helps reviewers to understand which one is the expected/observed value in non-trivial assertions. The expected and observed values are also labeled in the output when the assertion fails. * Prefer specific assertions (assertTrue, assertFalse) over assertEqual(True/False, observed). * Don't write tests that don't test the intended code. This might seem silly but it's easy to do with a lot of mocks in place. Ensure that your tests break as expected before your code change. * Avoid heavy use of the mock library to test your code. If your code requires more than one mock to ensure that it does the correct thing, it needs to be refactored into smaller, testable units. Otherwise we depend on fullstack/tempest/api tests to test all of the real behavior and we end up with code containing way too many hidden dependencies and side effects. * All behavior changes to fix bugs should include a test that prevents a regression. If you made a change and it didn't break a test, it means the code was not adequately tested in the first place, it's not an excuse to leave it untested. * Test the failure cases. Use a mock side effect to throw the necessary exceptions to test your 'except' clauses. * Don't mimic existing tests that violate these guidelines. We are attempting to replace all of these so more tests like them create more work. If you need help writing a test, reach out to the testing lieutenants and the team on IRC. * Mocking open() is a dangerous practice because it can lead to unexpected bugs like `bug 1503847 `_. In fact, when the built-in open method is mocked during tests, some utilities (like debtcollector) may still rely on the real thing, and may end up using the mock rather what they are really looking for. If you must, consider using `OpenFixture `_, but it is better not to mock open() at all. Backward compatibility ~~~~~~~~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done when extending the RPC Interfaces. * Make yourself familiar with :ref:`Upgrade review guidelines `. Scalability issues ~~~~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done when writing code that needs to process a lot of data. Translation and logging ~~~~~~~~~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done when instrumenting your code. * Make yourself familiar with `OpenStack logging guidelines `_ to avoid littering the logs with traces logged at inappropriate levels. * The logger should only be passed unicode values. For example, do not pass it exceptions or other objects directly (LOG.error(exc), LOG.error(port), etc.). See http://docs.openstack.org/developer/oslo.log/usage.html#no-more-implicit-conversion-to-unicode-str for more details. * Don't pass exceptions into LOG.exception: it is already implicitly included in the log message by Python logging module. * Don't use LOG.exception when there is no exception registered in current thread context: Python 3.x versions before 3.5 are known to fail on it. Project interfaces ~~~~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done when writing code that is used to interface with other projects, like Keystone or Nova. Documenting your code ~~~~~~~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done when writing docstrings. Landing patches more rapidly ---------------------------- Scoping your patch appropriately ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Do not make multiple changes in one patch unless absolutely necessary. Cleaning up nearby functions or fixing a small bug you noticed while working on something else makes the patch very difficult to review. It also makes cherry-picking and reverting very difficult. Even apparently minor changes such as reformatting whitespace around your change can burden reviewers and cause merge conflicts. * If a fix or feature requires code refactoring, submit the refactoring as a separate patch than the one that changes the logic. Otherwise it's difficult for a reviewer to tell the difference between mistakes in the refactor and changes required for the fix/feature. If it's a bug fix, try to implement the fix before the refactor to avoid making cherry-picks to stable branches difficult. * Consider your reviewers' time before submitting your patch. A patch that requires many hours or days to review will sit in the "todo" list until someone has many hours or days free (which may never happen.) If you can deliver your patch in small but incrementally understandable and testable pieces you will be more likely to attract reviewers. Nits and pedantic comments ~~~~~~~~~~~~~~~~~~~~~~~~~~ Document common nits and pedantic comments to watch out for. * Make sure you spell correctly, the best you can, no-one wants rebase generators at the end of the release cycle! * The odd pep8 error may cause an entire CI run to be wasted. Consider running validation (pep8 and/or tests) before submitting your patch. If you keep forgetting consider installing a git `hook `_ so that Git will do it for you. * Sometimes, new contributors want to dip their toes with trivial patches, but we at OpenStack *love* bike shedding and their patches may sometime stall. In some extreme cases, the more trivial the patch, the higher the chances it fails to merge. To ensure we as a team provide/have a frustration-free experience new contributors should be redirected to fixing `low-hanging-fruit bugs `_ that have a tangible positive impact to the codebase. Spelling mistakes, and docstring are fine, but there is a lot more that is relatively easy to fix and has a direct impact to Neutron users. Reviewer comments ~~~~~~~~~~~~~~~~~ * Acknowledge them one by one by either clicking 'Done' or by replying extensively. If you do not, the reviewer won't know whether you thought it was not important, or you simply forgot. If the reply satisfies the reviewer, consider capturing the input in the code/document itself so that it's for reviewers of newer patchsets to see (and other developers when the patch merges). * Watch for the feedback on your patches. Acknowledge it promptly and act on it quickly, so that the reviewer remains engaged. If you disappear for a week after you posted a patchset, it is very likely that the patch will end up being neglected. * Do not take negative feedback personally. Neutron is a large project with lots of contributors with different opinions on how things should be done. Many come from widely varying cultures and languages so the English, text-only feedback can unintentionally come across as harsh. Getting a -1 means reviewers are trying to help get the patch into a state that can be merged, it doesn't just mean they are trying to block it. It's very rare to get a patch merged on the first iteration that makes everyone happy. Code Review ~~~~~~~~~~~ * You should visit `OpenStack How To Review wiki `_ IRC ~~~~ * IRC is a place where you can speak with many of the Neutron developers and core reviewers. For more information you should visit `OpenStack IRC wiki `_ Neutron IRC channel is #openstack-neutron * There are weekly IRC meetings related to many different projects/teams in Neutron. A full list of these meetings and their date/time can be found in `OpenStack IRC Meetings `_. It is important to attend these meetings in the area of your contribution and possibly mention your work and patches. * When you have questions regarding an idea or a specific patch of yours, it can be helpful to find a relevant person in IRC and speak with them about it. You can find a user's IRC nickname in their launchpad account. * Being available on IRC is useful, since reviewers can contact you directly to quickly clarify a review issue. This speeds up the feedback loop. * Each area of Neutron or sub-project of Neutron has a specific lieutenant in charge of it. You can most likely find these lieutenants on IRC, it is advised however to try and send public questions to the channel rather then to a specific person if possible. (This increase the chances of getting faster answers to your questions). A list of the areas and lieutenants nicknames can be found at `Core Reviewers `_. Commit messages ~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done when writing commit messages. For more details see `Git commit message best practices `_. This is the TL;DR version with the important points for committing to Neutron. * One liners are bad, unless the change is trivial. * Remember to use DocImpact, APIImpact, UpgradeImpact appropriately. * Make sure the commit message doesn't have any spelling/grammar errors. This is the first thing reviewers read and they can be distracting enough to invite -1's. * Describe what the change accomplishes. If it's a bug fix, explain how this code will fix the problem. If it's part of a feature implementation, explain what component of the feature the patch implements. Do not just describe the bug, that's what launchpad is for. * Use the "Closes-Bug: #BUG-NUMBER" tag if the patch addresses a bug. Submitting a bugfix without a launchpad bug reference is unacceptable, even if it's trivial. Launchpad is how bugs are tracked so fixes without a launchpad bug are a nightmare when users report the bug from an older version and the Neutron team can't tell if/why/how it's been fixed. Launchpad is also how backports are identified and tracked so patches without a bug report cannot be picked to stable branches. * Use the "Implements: blueprint NAME-OF-BLUEPRINT" or "Partially-Implements: blueprint NAME-OF-BLUEPRINT" for features so reviewers can determine if the code matches the spec that was agreed upon. This also updates the blueprint on launchpad so it's easy to see all patches that are related to a feature. * If it's not immediately obvious, explain what the previous code was doing that was incorrect. (e.g. code assumed it would never get 'None' from a function call) * Be specific in your commit message about what the patch does and why it does this. For example, "Fixes incorrect logic in security groups" is not helpful because the code diff already shows that you are modifying security groups. The message should be specific enough that a reviewer looking at the code can tell if the patch does what the commit says in the most appropriate manner. If the reviewer has to guess why you did something, lots of your time will be wasted explaining why certain changes were made. Dealing with Zuul ~~~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done when dealing with OpenStack CI. * When you submit a patch, consider checking its `status `_ in the queue. If you see a job failures, you might as well save time and try to figure out in advance why it is failing. * Excessive use of 'recheck' to get test to pass is discouraged. Please examine the logs for the failing test(s) and make sure your change has not tickled anything that might be causing a new failure or race condition. Getting your change in could make it even harder to debug what is actually broken later on. neutron-8.4.0/doc/source/devref/client_command_extensions.rst0000664000567000056710000000227213044372736025677 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Client command extension support ================================ The client command extension adds support for extending the neutron client while considering ease of creation. The full document can be found in the python-neutronclient repository: http://docs.openstack.org/developer/python-neutronclient/devref/client_command_extensions.html neutron-8.4.0/doc/source/devref/rpc_callbacks.rst0000664000567000056710000002723113044372760023226 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) .. _rpc_callbacks: Neutron Messaging Callback System ================================= Neutron already has a :doc:`callback system ` for in-process resource callbacks where publishers and subscribers are able to publish and subscribe for resource events. This system is different, and is intended to be used for inter-process callbacks, via the messaging fanout mechanisms. In Neutron, agents may need to subscribe to specific resource details which may change over time. And the purpose of this messaging callback system is to allow agent subscription to those resources without the need to extend modify existing RPC calls, or creating new RPC messages. A few resource which can benefit of this system: * QoS policies; * Security Groups. Using a remote publisher/subscriber pattern, the information about such resources could be published using fanout messages to all interested nodes, minimizing messaging requests from agents to server since the agents get subscribed for their whole lifecycle (unless they unsubscribe). Within an agent, there could be multiple subscriber callbacks to the same resource events, the resources updates would be dispatched to the subscriber callbacks from a single message. Any update would come in a single message, doing only a single oslo versioned objects deserialization on each receiving agent. This publishing/subscription mechanism is highly dependent on the format of the resources passed around. This is why the library only allows versioned objects to be published and subscribed. Oslo versioned objects allow object version down/up conversion. [#vo_mkcompat]_ [#vo_mkcptests]_ For the VO's versioning schema look here: [#vo_versioning]_ versioned_objects serialization/deserialization with the obj_to_primitive(target_version=..) and primitive_to_obj() [#ov_serdes]_ methods is used internally to convert/retrieve objects before/after messaging. Serialized versioned objects look like:: {'versioned_object.version': '1.0', 'versioned_object.name': 'QoSPolicy', 'versioned_object.data': {'rules': [ {'versioned_object.version': '1.0', 'versioned_object.name': 'QoSBandwidthLimitRule', 'versioned_object.data': {'name': u'a'}, 'versioned_object.namespace': 'versionedobjects'} ], 'uuid': u'abcde', 'name': u'aaa'}, 'versioned_object.namespace': 'versionedobjects'} Rolling upgrades strategy ------------------------- In this section we assume the standard Neutron upgrade process, which means upgrade the server first and then upgrade the agents: :doc:`More information about the upgrade strategy `. The plan is to provide a semi-automatic method which avoids manual pinning and unpinning of versions by the administrator which could be prone to error. Resource pull requests ~~~~~~~~~~~~~~~~~~~~~~ Resource pull requests will always be ok because the underlying resource RPC does provide the version of the requested resource id / ids. The server will be upgraded first, so it will always be able to satisfy any version the agents request. Resource push notifications ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Agents will subscribe to the neutron-vo-- fanout queue which carries updated objects for the version they know about. The versions they know about depend on the runtime Neutron versioned objects they started with. When the server upgrades, it should be able to instantly calculate a census of agent versions per object (we will define a mechanism for this in a later section). It will use the census to send fanout messages on all the version span a resource type has. For example, if neutron-server knew it has rpc-callback aware agents with versions 1.0, and versions 1.2 of resource type "A", any update would be sent to neutron-vo-A_1.0 and neutron-vo-A_1.2. TODO(mangelajo): Verify that after upgrade is finished any unused messaging resources (queues, exchanges, and so on) are released as older agents go away and neutron-server stops producing new message casts. Otherwise document the need for a neutron-server restart after rolling upgrade has finished if we want the queues cleaned up. Leveraging agent state reports for object version discovery +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ We would add a row to the agent db for tracking agent known objects and version numbers. This would resemble the implementation of the configuration column. Agents would report at start not only their configuration now, but also their subscribed object type / version pairs, that would be stored in the database and would be available to any neutron-server requesting it:: 'subscribed_versions': {'QoSPolicy': '1.1', 'SecurityGroup': '1.0', 'Port': '1.0'} There's a subset of Liberty agents depending on QoSPolicy that will require 'QoSPolicy': '1.0' if the qos plugin is installed. We will be able to identify those by the binary name (included in the report): * 'neutron-openvswitch-agent' * 'neutron-sriov-nic-agent' Version discovery +++++++++++++++++ With the above mechanism in place and considering the exception of neutron-openvswitch-agent and neutron-sriov-agent requiring QoSpolicy 1.0, we could discover the subset of versions to be sent on every push notification. Agents that are in down state would be excluded from this calculation. We would use an extended timeout for agents in this calculation to make sure we're on the safe side, specially if deployer marked agents with low timeouts. Starting at Mitaka, any agent interested in versioned objects via this API should report their resource/version tuples of interest (the resource type/ version pairs they're subscribed to). The plugins interested in this RPC mechanism must inherit AgentDbMixin, since this mechanism is only intended to be used from agents at the moment, while it could be extended to be consumed from other components if necessary. The AgentDbMixin provides:: def get_agents_resource_versions(self, tracker): ... Caching mechanism ''''''''''''''''' The version subset per object will be cached to avoid DB requests on every push given that we assume that all old agents are already registered at the time of upgrade. Cached subset will be re-evaluated (to cut down the version sets as agents upgrade) after configured TTL. As a fast path to update this cache on all neutron-servers when upgraded agents come up (or old agents revive after a long timeout or even a downgrade) the server registering the new status update will notify the other servers about the new consumer resource versions via cast. All notifications for all calculated version sets must be sent, as non-upgraded agents would otherwise not receive them. It is safe to send notifications to any fanout queue as they will be discarded if no agent is listening. Topic names for every resource type RPC endpoint ------------------------------------------------ neutron-vo-- In the future, we may want to get oslo messaging to support subscribing topics dynamically, then we may want to use: neutron-vo--- instead, or something equivalent which would allow fine granularity for the receivers to only get interesting information to them. Subscribing to resources ------------------------ Imagine that you have agent A, which just got to handle a new port, which has an associated security group, and QoS policy. The agent code processing port updates may look like:: from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks import resources def process_resource_updates(resource_type, resource, event_type): # send to the right handler which will update any control plane # details related to the updated resource... def subscribe_resources(): registry.subscribe(process_resource_updates, resources.SEC_GROUP) registry.subscribe(process_resource_updates, resources.QOS_POLICY) def port_update(port): # here we extract sg_id and qos_policy_id from port.. sec_group = registry.pull(resources.SEC_GROUP, sg_id) qos_policy = registry.pull(resources.QOS_POLICY, qos_policy_id) The relevant function is: * subscribe(callback, resource_type): subscribes callback to a resource type. The callback function will receive the following arguments: * resource_type: the type of resource which is receiving the update. * resource: resource of supported object * event_type: will be one of CREATED, UPDATED, or DELETED, see neutron.api.rpc.callbacks.events for details. With the underlaying oslo_messaging support for dynamic topics on the receiver we cannot implement a per "resource type + resource id" topic, rabbitmq seems to handle 10000's of topics without suffering, but creating 100's of oslo_messaging receivers on different topics seems to crash. We may want to look into that later, to avoid agents receiving resource updates which are uninteresting to them. Unsubscribing from resources ---------------------------- To unsubscribe registered callbacks: * unsubscribe(callback, resource_type): unsubscribe from specific resource type. * unsubscribe_all(): unsubscribe from all resources. Sending resource events ----------------------- On the server side, resource updates could come from anywhere, a service plugin, an extension, anything that updates, creates, or destroys the resource and that is of any interest to subscribed agents. The server/publisher side may look like:: from neutron.api.rpc.callbacks.producer import registry from neutron.api.rpc.callbacks import events def create_qos_policy(...): policy = fetch_policy(...) update_the_db(...) registry.push(policy, events.CREATED) def update_qos_policy(...): policy = fetch_policy(...) update_the_db(...) registry.push(policy, events.UPDATED) def delete_qos_policy(...): policy = fetch_policy(...) update_the_db(...) registry.push(policy, events.DELETED) References ---------- .. [#ov_serdes] https://github.com/openstack/oslo.versionedobjects/blob/ce00f18f7e9143b5175e889970564813189e3e6d/oslo_versionedobjects/tests/test_objects.py#L410 .. [#vo_mkcompat] https://github.com/openstack/oslo.versionedobjects/blob/ce00f18f7e9143b5175e889970564813189e3e6d/oslo_versionedobjects/base.py#L474 .. [#vo_mkcptests] https://github.com/openstack/oslo.versionedobjects/blob/ce00f18f7e9143b5175e889970564813189e3e6d/oslo_versionedobjects/tests/test_objects.py#L114 .. [#vo_versioning] https://github.com/openstack/oslo.versionedobjects/blob/ce00f18f7e9143b5175e889970564813189e3e6d/oslo_versionedobjects/base.py#L248 neutron-8.4.0/doc/source/devref/tag.rst0000664000567000056710000000747713044372760021230 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Add Tags to Neutron Resources ============================= Tag service plugin allows users to set tags on their resources. Tagging resources can be used by external systems or any other clients of the Neutron REST API (and NOT backend drivers). The following use cases refer to adding tags to networks, but the same can be applicable to any other Neutron resource: 1) Ability to map different networks in different OpenStack locations to one logically same network (for Multi site OpenStack) 2) Ability to map Id's from different management/orchestration systems to OpenStack networks in mixed environments, for example for project Kuryr, map docker network id to neutron network id 3) Leverage tags by deployment tools 4) allow operators to tag information about provider networks (e.g. high-bandwith, low-latency, etc) 5) new features like get-me-a-network or a similar port scheduler could choose a network for a port based on tags Which Resources --------------- Tag system uses standardattr mechanism so it's targeting to resources have the mechanism. In Mitaka, they are networks, ports, routers, floating IPs, security group, security group rules and subnet pools but now tag system supports networks only. Model ----- Tag is not standalone resource. Tag is always related to existing resources. The following shows tag model:: +------------------+ +------------------+ | Network | | Tag | +------------------+ +------------------+ | standard_attr_id +------> | standard_attr_id | | | | tag | | | | | +------------------+ +------------------+ Tag has two columns only and tag column is just string. These tags are defined per resource. Tag is unique in a resource but it can be overlapped throughout. API --- The following shows basic API for tag. Tag is regarded as a subresource of resource so API always includes id of resource related to tag. Add a single tag on a network :: PUT /v2.0/networks/{network_id}/tags/{tag} Returns `201 Created`. If the tag already exists, no error is raised, it just returns the `201 Created` because the `OpenStack Development Mailing List `_ discussion told us that PUT should be no issue updating an existing tag. Replace set of tags on a network :: PUT /v2.0/networks/{network_id}/tags with request payload :: { 'tags': ['foo', 'bar', 'baz'] } Response :: { 'tags': ['foo', 'bar', 'baz'] } Check if a tag exists or not on a network :: GET /v2.0/networks/{network_id}/tags/{tag} Remove a single tag on a network :: DELETE /v2.0/networks/{network_id}/tags/{tag} Remove all tags on a network :: DELETE /v2.0/networks/{network_id}/tags PUT and DELETE for collections are the motivation of `extending the API framework `_. neutron-8.4.0/doc/source/devref/security_group_api.rst0000664000567000056710000000626513044372760024363 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Guided Tour: The Neutron Security Group API =========================================== https://wiki.openstack.org/wiki/Neutron/SecurityGroups API Extension ------------- The API extension is the 'front' end portion of the code, which handles defining a `REST-ful API`_, which is used by tenants. .. _`REST-ful API`: https://git.openstack.org/cgit/openstack/neutron/tree/neutron/extensions/securitygroup.py Database API ------------ The Security Group API extension adds a number of `methods to the database layer`_ of Neutron .. _`methods to the database layer`: https://git.openstack.org/cgit/openstack/neutron/tree/neutron/db/securitygroups_db.py Agent RPC --------- This portion of the code handles processing requests from tenants, after they have been stored in the database. It involves messaging all the L2 agents running on the compute nodes, and modifying the IPTables rules on each hypervisor. * `Plugin RPC classes `_ * `SecurityGroupServerRpcMixin `_ - defines the RPC API that the plugin uses to communicate with the agents running on the compute nodes * SecurityGroupServerRpcMixin - Defines the API methods used to fetch data from the database, in order to return responses to agents via the RPC API * `Agent RPC classes `_ * The SecurityGroupServerRpcApi defines the API methods that can be called by agents, back to the plugin that runs on the Neutron controller * The SecurityGroupAgentRpcCallbackMixin defines methods that a plugin uses to call back to an agent after performing an action called by an agent. IPTables Driver --------------- * ``prepare_port_filter`` takes a ``port`` argument, which is a ``dictionary`` object that contains information about the port - including the ``security_group_rules`` * ``prepare_port_filter`` appends the port to an internal dictionary, ``filtered_ports`` which is used to track the internal state. * Each security group has a `chain `_ in Iptables. * The ``IptablesFirewallDriver`` has a method to convert security group rules into iptables statements. neutron-8.4.0/doc/source/devref/service_extensions.rst0000664000567000056710000000676213044372760024370 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Service Extensions ================== Historically, Neutron supported the following advanced services: #. **FWaaS** (*Firewall-as-a-Service*): runs as part of the L3 agent. #. **LBaaS** (*Load-Balancer-as-a-Service*): implemented purely inside neutron-server, does not interact directly with agents. #. **VPNaaS** (*VPN-as-a-Service*): derives from L3 agent to add VPNaaS functionality. Starting with the Kilo release, these services are split into separate repositories, and more extensions are being developed as well. Service plugins are a clean way of adding functionality in a cohesive manner and yet, keeping them decoupled from the guts of the framework. The aforementioned features are developed as extensions (also known as service plugins), and more capabilities are being added to Neutron following the same pattern. For those that are deemed 'orthogonal' to any network service (e.g. tags, timestamps, auto_allocate, etc), there is an informal `mechanism `_ to have these loaded automatically at server startup. If you consider adding an entry to the dictionary, please be kind and reach out to your PTL or a member of the drivers team for approval. #. http://git.openstack.org/cgit/openstack/neutron-fwaas/ #. http://git.openstack.org/cgit/openstack/neutron-lbaas/ #. http://git.openstack.org/cgit/openstack/neutron-vpnaas/ Calling the Core Plugin from Services ------------------------------------- There are many cases where a service may want to create a resource managed by the core plugin (e.g. ports, networks, subnets). This can be achieved by importing the Neutron Manager and getting a direct reference to the core plugin: .. code:: python from neutron import manager plugin = manager.NeutronManager.get_plugin() plugin.create_port(context, port_dict) However, there is an important caveat. Calls to the core plugin in almost every case should not be made inside of an ongoing transaction. This is because many plugins (including ML2), can be configured to make calls to a backend after creating or modifying an object. If the call is made inside of a transaction and the transaction is rolled back after the core plugin call, the backend will not be notified that the change was undone. This will lead to consistency errors between the core plugin and its configured backend(s). ML2 has a guard against certain methods being called with an active DB transaction to help prevent developers from accidentally making this mistake. It will raise an error that says explicitly that the method should not be called within a transaction. neutron-8.4.0/doc/source/devref/development.environment.rst0000664000567000056710000000443713044372736025336 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2013 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Setting Up a Development Environment ==================================== This page describes how to setup a working Python development environment that can be used in developing Neutron on Ubuntu, Fedora or Mac OS X. These instructions assume you're already familiar with Git and Gerrit, which is a code repository mirror and code review toolset , however if you aren't please see `this Git tutorial`_ for an introduction to using Git and `this guide`_ for a tutorial on using Gerrit and Git for code contribution to OpenStack projects. .. _this Git tutorial: http://git-scm.com/book/en/Getting-Started .. _this guide: http://docs.openstack.org/infra/manual/developers.html#development-workflow Following these instructions will allow you to run the Neutron unit tests. If you want to be able to run Neutron in a full OpenStack environment, you can use the excellent `DevStack`_ project to do so. There is a wiki page that describes `setting up Neutron using DevStack`_. .. _DevStack: https://git.openstack.org/cgit/openstack-dev/devstack .. _setting up Neutron using Devstack: https://wiki.openstack.org/wiki/NeutronDevstack Getting the code ---------------- Grab the code:: git clone git://git.openstack.org/openstack/neutron.git cd neutron .. include:: ../../../TESTING.rst neutron-8.4.0/doc/source/devref/ml2_ext_manager.rst0000664000567000056710000000267513044372736023517 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) ML2 Extension Manager ===================== The extension manager for ML2 was introduced in Juno (more details can be found in the approved `spec `_). The features allows for extending ML2 resources without actually having to introduce cross cutting concerns to ML2. The mechanism has been applied for a number of use cases, and extensions that currently use this frameworks are available under `ml2/extensions `_. neutron-8.4.0/doc/source/devref/upgrade.rst0000664000567000056710000002622313044372760022072 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) .. note:: Much of this document discusses upgrade considerations for the Neutron reference implementation using Neutron's agents. It's expected that each Neutron plugin provides its own documentation that discusses upgrade considerations specific to that choice of backend. For example, OVN does not use Neutron agents, but does have a local controller that runs on each compute node. OVN supports rolling upgrades, but information about how that works should be covered in the documentation for networking-ovn, the OVN Neutron plugin. Upgrade strategy ================ There are two general upgrade scenarios supported by Neutron: #. All services are shut down, code upgraded, then all services are started again. #. Services are upgraded gradually, based on operator service windows. The latter is the preferred way to upgrade an OpenStack cloud, since it allows for more granularity and less service downtime. This scenario is usually called 'rolling upgrade'. Rolling upgrade --------------- Rolling upgrades imply that during some interval of time there will be services of different code versions running and interacting in the same cloud. It puts multiple constraints onto the software. #. older services should be able to talk with newer services. #. older services should not require the database to have older schema (otherwise newer services that require the newer schema would not work). `More info on rolling upgrades in OpenStack `_. Those requirements are achieved in Neutron by: #. If the Neutron backend makes use of Neutron agents, the Neutron server have backwards compatibility code to deal with older messaging payloads. #. isolating a single service that accesses database (neutron-server). To simplify the matter, it's always assumed that the order of service upgrades is as following: #. first, all neutron-servers are upgraded. #. then, if applicable, neutron agents are upgraded. This approach allows us to avoid backwards compatibility code on agent side and is in line with other OpenStack projects that support rolling upgrades (specifically, nova). Server upgrade ~~~~~~~~~~~~~~ Neutron-server is the very first component that should be upgraded to the new code. It's also the only component that relies on new database schema to be present, other components communicate with the cloud through AMQP and hence do not depend on particular database state. Database upgrades are implemented with alembic migration chains. Database upgrade is split into two parts: #. neutron-db-manage upgrade --expand #. neutron-db-manage upgrade --contract Each part represents a separate alembic branch. The former step can be executed while old neutron-server code is running. The latter step requires *all* neutron-server instances to be shut down. Once it's complete, neutron-servers can be started again. .. note:: Full shutdown of neutron-server instances can be skipped depending on whether there are pending contract scripts not applied to the database:: $ neutron-db-manage has_offline_migrations Command will return a message if there are pending contract scripts. :ref:`More info on alembic scripts `. Agents upgrade ~~~~~~~~~~~~~~ .. note:: This section does not apply when the cloud does not use AMQP agents to provide networking services to instances. In that case, other backend specific upgrade instructions may also apply. Once neutron-server services are restarted with the new database schema and the new code, it's time to upgrade Neutron agents. Note that in the meantime, neutron-server should be able to serve AMQP messages sent by older versions of agents which are part of the cloud. The recommended order of agent upgrade (per node) is: #. first, L2 agents (openvswitch, linuxbridge, sr-iov). #. then, all other agents (L3, DHCP, Metadata, ...). The rationale of the agent upgrade order is that L2 agent is usually responsible for wiring ports for other agents to use, so it's better to allow it to do its job first and then proceed with other agents that will use the already configured ports for their needs. Each network/compute node can have its own upgrade schedule that is independent of other nodes. AMQP considerations +++++++++++++++++++ Since it's always assumed that neutron-server component is upgraded before agents, only the former should handle both old and new RPC versions. The implication of that is that no code that handles UnsupportedVersion oslo.messaging exceptions belongs to agent code. Notifications ''''''''''''' For notifications that are issued by neutron-server to listening agents, special consideration is needed to support rolling upgrades. In this case, a newer controller sends newer payload to older agents. Until we have proper RPC version pinning feature to enforce older payload format during upgrade (as it's implemented in other projects like nova), we leave our agents resistant against unknown arguments sent as part of server notifications. This is achieved by consistently capturing those unknown arguments with keyword arguments and ignoring them on agent side; and by not enforcing newer RPC entry point versions on server side. This approach is not ideal, because it makes RPC API less strict. That's why other approaches should be considered for notifications in the future. :ref:`More information about RPC versioning `. Interface signature ''''''''''''''''''' An RPC interface is defined by its name, version, and (named) arguments that it accepts. There are no strict guarantees that arguments will have expected types or meaning, as long as they are serializable. Message content versioning '''''''''''''''''''''''''' To provide better compatibility guarantees for rolling upgrades, RPC interfaces could also define specific format for arguments they accept. In OpenStack world, it's usually implemented using oslo.versionedobjects library, and relying on the library to define serialized form for arguments that are passed through AMQP wire. Note that Neutron has *not* adopted oslo.versionedobjects library for its RPC interfaces yet (except for QoS feature). :ref:`More information about RPC callbacks used for QoS `. Networking backends ~~~~~~~~~~~~~~~~~~~ Backend software upgrade should not result in any data plane disruptions. Meaning, e.g. Open vSwitch L2 agent should not reset flows or rewire ports; Neutron L3 agent should not delete namespaces left by older version of the agent; Neutron DHCP agent should not require immediate DHCP lease renewal; etc. The same considerations apply to setups that do not rely on agents. Meaning, f.e. OpenDaylight or OVN controller should not break data plane connectivity during its upgrade process. Upgrade testing --------------- `Grenade `_ is the OpenStack project that is designed to validate upgrade scenarios. Currently, only offline (non-rolling) upgrade scenario is validated in Neutron gate. The upgrade scenario follows the following steps: #. the 'old' cloud is set up using latest stable release code #. all services are stopped #. code is updated to the patch under review #. new database migration scripts are applied, if needed #. all services are started #. the 'new' cloud is validated with a subset of tempest tests The scenario validates that no configuration option names are changed in one cycle. More generally, it validates that the 'new' cloud is capable of running using the 'old' configuration files. It also validates that database migration scripts can be executed. The scenario does *not* validate AMQP versioning compatibility. Other projects (for example Nova) have so called 'partial' grenade jobs where some services are left running using the old version of code. Such a job would be needed in Neutron gate to validate rolling upgrades for the project. Till that time, it's all up to reviewers to catch compatibility issues in patches on review. Another hole in testing belongs to split migration script branches. It's assumed that an 'old' cloud can successfully run after 'expand' migration scripts from the 'new' cloud are applied to its database; but it's not validated in gate. .. _upgrade_review_guidelines: Review guidelines ----------------- There are several upgrade related gotchas that should be tracked by reviewers. First things first, a general advice to reviewers: make sure new code does not violate requirements set by `global OpenStack deprecation policy `_. Now to specifics: #. Configuration options: * options should not be dropped from the tree without waiting for deprecation period (currently it's one development cycle long) and a deprecation message issued if the deprecated option is used. * option values should not change their meaning between releases. #. Data plane: * agent restart should not result in data plane disruption (no Open vSwitch ports reset; no network namespaces deleted; no device names changed). #. RPC versioning: * no RPC version major number should be bumped before all agents had a chance to upgrade (meaning, at least one release cycle is needed before compatibility code to handle old clients is stripped from the tree). * no compatibility code should be added to agent side of AMQP interfaces. * server code should be able to handle all previous versions of agents, unless the major version of an interface is bumped. * no RPC interface arguments should change their meaning, or names. * new arguments added to RPC interfaces should not be mandatory. It means that server should be able to handle old requests, without the new argument specified. Also, if the argument is not passed, the old behaviour before the addition of the argument should be retained. * minimal client version must not be bumped for server initiated notification changes for at least one cycle. #. Database migrations: * migration code should be split into two branches (contract, expand) as needed. No code that is unsafe to execute while neutron-server is running should be added to expand branch. * if possible, contract migrations should be minimized or avoided to reduce the time when API endpoints must be down during database upgrade. neutron-8.4.0/doc/source/devref/services_and_agents.rst0000664000567000056710000000670313044372760024452 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Services and agents =================== A usual Neutron setup consists of multiple services and agents running on one or multiple nodes (though some exotic setups potentially may not need any agents). Each of those services provides some of the networking or API services. Among those of special interest: #. neutron-server that provides API endpoints and serves as a single point of access to the database. It usually runs on nodes called Controllers. #. Layer2 agent that can utilize Open vSwitch, Linuxbridge or other vendor specific technology to provide network segmentation and isolation for tenant networks. The L2 agent should run on every node where it is deemed responsible for wiring and securing virtual interfaces (usually both Compute and Network nodes). #. Layer3 agent that runs on Network node and provides East-West and North-South routing plus some advanced services such as FWaaS or VPNaaS. For the purpose of this document, we call all services, servers and agents that run on any node as just "services". Entry points ------------ Entry points for services are defined in setup.cfg under "console_scripts" section. Those entry points should generally point to main() functions located under neutron/cmd/... path. Note: some existing vendor/plugin agents still maintain their entry points in other locations. Developers responsible for those agents are welcome to apply the guideline above. Interacting with Eventlet ------------------------- Neutron extensively utilizes the eventlet library to provide asynchronous concurrency model to its services. To utilize it correctly, the following should be kept in mind. If a service utilizes the eventlet library, then it should not call eventlet.monkey_patch() directly but instead maintain its entry point main() function under neutron/cmd/eventlet/... If that is the case, the standard Python library will be automatically patched for the service on entry point import (monkey patching is done inside `python package file `_). Note: an entry point 'main()' function may just be an indirection to a real callable located elsewhere, as is done for reference services such as DHCP, L3 and the neutron-server. For more info on the rationale behind the code tree setup, see `the corresponding cross-project spec `_. Connecting to the Database -------------------------- Only the neutron-server connects to the neutron database. Agents may never connect directly to the database, as this would break the ability to do rolling upgrades. neutron-8.4.0/doc/source/devref/address_scopes.rst0000664000567000056710000002351213044372760023442 0ustar jenkinsjenkins00000000000000Subnet Pools and Address Scopes =============================== This page discusses subnet pools and address scopes Subnet Pools ------------ Learn about subnet pools by watching the summit talk given in Vancouver [#]_. .. [#] http://www.youtube.com/watch?v=QqP8yBUUXBM&t=6m12s Subnet pools were added in Kilo. They are relatively simple. A SubnetPool has any number of SubnetPoolPrefix objects associated to it. These prefixes are in CIDR format. Each CIDR is a piece of the address space that is available for allocation. Subnet Pools support IPv6 just as well as IPv4. The Subnet model object now has a subnetpool_id attribute whose default is null for backward compatibility. The subnetpool_id attribute stores the UUID of the subnet pool that acted as the source for the address range of a particular subnet. When creating a subnet, the subnetpool_id can be optionally specified. If it is, the 'cidr' field is not required. If 'cidr' is specified, it will be allocated from the pool assuming the pool includes it and hasn't already allocated any part of it. If 'cidr' is left out, then the prefixlen attribute can be specified. If it is not, the default prefix length will be taken from the subnet pool. Think of it this way, the allocation logic always needs to know the size of the subnet desired. It can pull it from a specific CIDR, prefixlen, or default. A specific CIDR is optional and the allocation will try to honor it if provided. The request will fail if it can't honor it. Subnet pools do not allow overlap of subnets. Subnet Pool Quotas ~~~~~~~~~~~~~~~~~~ A quota mechanism was provided for subnet pools. It is different than other quota mechanisms in Neutron because it doesn't count instances of first class objects. Instead it counts how much of the address space is used. For IPv4, it made reasonable sense to count quota in terms of individual addresses. So, if you're allowed exactly one /24, your quota should be set to 256. Three /26s would be 192. This mechanism encourages more efficient use of the IPv4 space which will be increasingly important when working with globally routable addresses. For IPv6, the smallest viable subnet in Neutron is a /64. There is no reason to allocate a subnet of any other size for use on a Neutron network. It would look pretty funny to set a quota of 4611686018427387904 to allow one /64 subnet. To avoid this, we count IPv6 quota in terms of /64s. So, a quota of 3 allows three /64 subnets. When we need to allocate something smaller in the future, we will need to ensure that the code can handle non-integer quota consumption. Allocation ~~~~~~~~~~ Allocation is done in a way that aims to minimize fragmentation of the pool. The relevant code is here [#]_. First, the available prefixes are computed using a set difference: pool - allocations. The result is compacted [#]_ and then sorted by size. The subnet is then allocated from the smallest available prefix that is large enough to accommodate the request. .. [#] neutron/ipam/subnet_alloc.py (_allocate_any_subnet) .. [#] http://pythonhosted.org/netaddr/api.html#netaddr.IPSet.compact Address Scopes -------------- Before subnet pools or address scopes, it was impossible to tell if a network address was routable in a certain context because the address was given explicitly on subnet create and wasn't validated against any other addresses. Address scopes are meant to solve this by putting control over the address space in the hands of an authority: the address scope owner. It makes use of the already existing SubnetPool concept for allocation. Address scopes are "the thing within which address overlap is not allowed" and thus provide more flexible control as well as decoupling of address overlap from tenancy. Prior to the Mitaka release, there was implicitly only a single 'shared' address scope. Arbitrary address overlap was allowed making it pretty much a "free for all". To make things seem somewhat sane, normal tenants are not able to use routers to cross-plug networks from different tenants and NAT was used between internal networks and external networks. It was almost as if each tenant had a private address scope. The problem is that this model cannot support use cases where NAT is not desired or supported (e.g. IPv6) or we want to allow different tenants to cross-plug their networks. An AddressScope covers only one address family. But, they work equally well for IPv4 and IPv6. Routing ~~~~~~~ The reference implementation honors address scopes. Within an address scope, addresses route freely (barring any FW rules or other external restrictions). Between scopes, routed is prevented unless address translation is used. For now, floating IPs are the only place where traffic crosses scope boundaries. The 1-1 NAT allows this to happen. .. TODO (Carl) Implement NAT for floating ips crossing scopes .. TODO (Carl) Implement SNAT for crossing scopes RPC ~~~ The L3 agent in the reference implementation needs to know the address scope for each port on each router in order to map ingress traffic correctly. Each subnet from the same address family on a network is required to be from the same subnet pool. Therefore, the address scope will also be the same. If this were not the case, it would be more difficult to match ingress traffic on a port with the appropriate scope. It may be counter-intuitive but L3 address scopes need to be anchored to some sort of non-L3 thing (e.g. an L2 interface) in the topology in order to determine the scope of ingress traffic. For now, we use ports/networks. In the future, we may be able to distinguish by something else like the remote MAC address or something. The address scope id is set on each port in a dict under the 'address_scopes' attribute. The scope is distinct per address family. If the attribute does not appear, it is assumed to be null for both families. A value of null means that the addresses are in the "implicit" address scope which holds all addresses that don't have an explicit one. All subnets that existed in Neutron before address scopes existed fall here. Here is an example of how the json will look in the context of a router port:: "address_scopes": { "4": "d010a0ea-660e-4df4-86ca-ae2ed96da5c1", "6": null }, To implement floating IPs crossing scope boundaries, the L3 agent needs to know the target scope of the floating ip. The fixed address is not enough to disambiguate because, theoritically, there could be overlapping addresses from different scopes. The scope is computed [#]_ from the floating ip fixed port and attached to the floating ip dict under the 'fixed_ip_address_scope' attribute. Here's what the json looks like (trimmed):: { ... "floating_ip_address": "172.24.4.4", "fixed_ip_address": "172.16.0.3", "fixed_ip_address_scope": "d010a0ea-660e-4df4-86ca-ae2ed96da5c1", ... } .. [#] neutron/db/l3_db.py (_get_sync_floating_ips) Model ~~~~~ The model for subnet pools and address scopes can be found in neutron/db/models_v2.py and neutron/db/address_scope_db.py. This document won't go over all of the details. It is worth noting how they relate to existing Neutron objects. The existing Neutron subnet now optionally references a single subnet pool:: +----------------+ +------------------+ +--------------+ | Subnet | | SubnetPool | | AddressScope | +----------------+ +------------------+ +--------------+ | subnet_pool_id +------> | address_scope_id +------> | | | | | | | | | | | | | | | | | | | | +----------------+ +------------------+ +--------------+ L3 Agent ~~~~~~~~ The L3 agent is limited in its support for multiple address scopes. Within a router in the reference implementation, traffic is marked on ingress with the address scope corresponding to the network it is coming from. If that traffic would route to an interface in a different address scope, the traffic is blocked unless an exception is made. One exception is made for floating IP traffic. When traffic is headed to a floating IP, DNAT is applied and the traffic is allowed to route to the private IP address potentially crossing the address scope boundary. When traffic flows from an internal port to the external network and a floating IP is assigned, that traffic is also allowed. Another exception is made for traffic from an internal network to the external network when SNAT is enabled. In this case, SNAT to the router's fixed IP address is applied to the traffic. However, SNAT is not used if the external network has an explicit address scope assigned and it matches the internal network's. In that case, traffic routes straight through without NAT. The internal network's addresses are viable on the external network in this case. The reference implementation has limitations. Even with multiple address scopes, a router implementation is unable to connect to two networks with overlapping IP addresses. There are two reasons for this. First, a single routing table is used inside the namespace. An implementation using multiple routing tables has been in the works but there are some unresolved issues with it. Second, the default SNAT feature cannot be supported with the current Linux conntrack implementation unless a double NAT is used (one NAT to get from the address scope to an intermediate address specific to the scope and a second NAT to get from that intermediate address to an external address). Single NAT won't work if there are duplicate addresses across the scopes. Due to these complications the router will still refuse to connect to overlapping subnets. We can look in to an implementation that overcomes these limitations in the future. neutron-8.4.0/doc/source/devref/openvswitch_firewall.rst0000664000567000056710000004304313044372760024700 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Open vSwitch Firewall Driver ============================ The OVS driver has the same API as the current iptables firewall driver, keeping the state of security groups and ports inside of the firewall. Class ``SGPortMap`` was created to keep state consistent, and maps from ports to security groups and vice-versa. Every port and security group is represented by its own object encapsulating the necessary information. Note: Open vSwitch firewall driver uses register 5 for marking flow related to port and register 6 which defines network and is used for conntrack zones. Firewall API calls ------------------ There are two main calls performed by the firewall driver in order to either create or update a port with security groups - ``prepare_port_filter`` and ``update_port_filter``. Both methods rely on the security group objects that are already defined in the driver and work similarly to their iptables counterparts. The definition of the objects will be described later in this document. ``prepare_port_filter`` must be called only once during port creation, and it defines the initial rules for the port. When the port is updated, all filtering rules are removed, and new rules are generated based on the available information about security groups in the driver. Security group rules can be defined in the firewall driver by calling ``update_security_group_rules``, which rewrites all the rules for a given security group. If a remote security group is changed, then ``update_security_group_members`` is called to determine the set of IP addresses that should be allowed for this remote security group. Calling this method will not have any effect on existing instance ports. In other words, if the port is using security groups and its rules are changed by calling one of the above methods, then no new rules are generated for this port. ``update_port_filter`` must be called for the changes to take effect. All the machinery above is controlled by security group RPC methods, which mean the firewall driver doesn't have any logic of which port should be updated based on the provided changes, it only accomplishes actions when called from the controller. OpenFlow rules -------------- At first, every connection is split into ingress and egress processes based on the input or output port respectively. Each port contains the initial hardcoded flows for ARP, DHCP and established connections, which are accepted by default. To detect established connections, a flow must by marked by conntrack first with an ``action=ct()`` rule. An accepted flow means that ingress packets for the connection are directly sent to the port, and egress packets are left to be normally switched by the integration bridge. Connections that are not matched by the above rules are sent to either the ingress or egress filtering table, depending on its direction. The reason the rules are based on security group rules in separate tables is to make it easy to detect these rules during removal. The firewall driver method ``create_rules_generator_for_port`` creates a generator that builds a single security group rule either from rules belonging to a given group, or rules allowing connections to remote groups. Every rule is then expanded into several OpenFlow rules by the method ``create_flows_from_rule_and_port``. Rules example with explanation: ------------------------------- The following example presents two ports on the same host. They have different security groups and there is icmp traffic allowed from first security group to the second security group. Ports have following attributes: :: Port 1 - plugged to the port 1 in OVS bridge - ip address: 192.168.0.1 - mac address: fa:16:3e:a4:22:10 - security group 1: can send icmp packets out Port 2 - plugged to the port 2 in OVS bridge - ip address: 192.168.0.2 - mac address: fa:16:3e:24:57:c7 - security group 2: can receive icmp packets from security group 1 The first ``table 0`` distinguishes the traffic to ingress or egress and loads to ``register 5`` value identifying port traffic. Ingress flow is determined by switch port number and egress flow is determined by destination mac address. ``register 6`` contains :: table=0, priority=100,in_port=1 actions=load:0x1->NXM_NX_REG5[],load:0x284->NXM_NX_REG6[],resubmit(,71) table=0, priority=100,in_port=2 actions=load:0x2->NXM_NX_REG5[],load:0x284->NXM_NX_REG6[],resubmit(,71) table=0, priority=90,dl_dst=fa:16:3e:a4:22:10 actions=load:0x1->NXM_NX_REG5[],load:0x284->NXM_NX_REG6[],resubmit(,81) table=0, priority=90,dl_dst=fa:16:3e:24:57:c7 actions=load:0x2->NXM_NX_REG5[],load:0x284->NXM_NX_REG6[],resubmit(,81) table=0, priority=0 actions=NORMAL :: Following ``table 71`` implements arp spoofing protection, ip spoofing protection, allows traffic for obtaining ip addresses (dhcp, dhcpv6, slaac, ndp) for egress traffic and allows arp replies. Also identifies not tracked connections which are processed later with information obtained from conntrack. Notice the ``zone=NXM_NX_REG6[0..15]`` in ``actions`` when obtaining information from conntrack. It says every port has its own conntrack zone defined by value in ``register 6``. It's there to avoid accepting established traffic that belongs to different port with same conntrack parameters. Rules below allow ICMPv6 traffic for multicast listeners, neighbour solicitation and neighbour advertisement. :: table=71, priority=95,icmp6,reg5=0x1,in_port=1,icmp_type=130 actions=NORMAL table=71, priority=95,icmp6,reg5=0x1,in_port=1,icmp_type=131 actions=NORMAL table=71, priority=95,icmp6,reg5=0x1,in_port=1,icmp_type=132 actions=NORMAL table=71, priority=95,icmp6,reg5=0x1,in_port=1,icmp_type=135 actions=NORMAL table=71, priority=95,icmp6,reg5=0x1,in_port=1,icmp_type=136 actions=NORMAL table=71, priority=95,icmp6,reg5=0x2,in_port=2,icmp_type=130 actions=NORMAL table=71, priority=95,icmp6,reg5=0x2,in_port=2,icmp_type=131 actions=NORMAL table=71, priority=95,icmp6,reg5=0x2,in_port=2,icmp_type=132 actions=NORMAL table=71, priority=95,icmp6,reg5=0x2,in_port=2,icmp_type=135 actions=NORMAL table=71, priority=95,icmp6,reg5=0x2,in_port=2,icmp_type=136 actions=NORMAL Following rules implement arp spoofing protection :: table=71, priority=95,arp,reg5=0x1,in_port=1,dl_src=fa:16:3e:a4:22:10,arp_spa=192.168.0.1 actions=NORMAL table=71, priority=95,arp,reg5=0x2,in_port=2,dl_src=fa:16:3e:24:57:c7,arp_spa=192.168.0.2 actions=NORMAL DHCP and DHCPv6 traffic is allowed to instance but DHCP servers are blocked on instances. :: table=71, priority=80,udp,reg5=0x1,in_port=1,tp_src=68,tp_dst=67 actions=resubmit(,73) table=71, priority=80,udp6,reg5=0x1,in_port=1,tp_src=546,tp_dst=547 actions=resubmit(,73) table=71, priority=70,udp,reg5=0x1,in_port=1,tp_src=67,tp_dst=68 actions=drop table=71, priority=70,udp6,reg5=0x1,in_port=1,tp_src=547,tp_dst=546 actions=drop table=71, priority=80,udp,reg5=0x2,in_port=2,tp_src=68,tp_dst=67 actions=resubmit(,73) table=71, priority=80,udp6,reg5=0x2,in_port=2,tp_src=546,tp_dst=547 actions=resubmit(,73) table=71, priority=70,udp,reg5=0x2,in_port=2,tp_src=67,tp_dst=68 actions=drop table=71, priority=70,udp6,reg5=0x2,in_port=2,tp_src=547,tp_dst=546 actions=drop Flowing rules obtain conntrack information for valid ip and mac address combinations. All other packets are dropped. :: table=71, priority=65,ct_state=-trk,ip,reg5=0x1,in_port=1,dl_src=fa:16:3e:a4:22:10,nw_src=192.168.0.1 actions=ct(table=72,zone=NXM_NX_REG6[0..15]) table=71, priority=65,ct_state=-trk,ip,reg5=0x2,in_port=2,dl_src=fa:16:3e:24:57:c7,nw_src=192.168.0.2 actions=ct(table=72,zone=NXM_NX_REG6[0..15]) table=71, priority=65,ct_state=-trk,ipv6,reg5=0x1,in_port=1,dl_src=fa:16:3e:a4:22:10,ipv6_src=fe80::f816:3eff:fea4:2210 actions=ct(table=72,zone=NXM_NX_REG6[0..15]) table=71, priority=65,ct_state=-trk,ipv6,reg5=0x2,in_port=2,dl_src=fa:16:3e:24:57:c7,ipv6_src=fe80::f816:3eff:fe24:57c7 actions=ct(table=72,zone=NXM_NX_REG6[0..15]) table=71, priority=10,ct_state=-trk,reg5=0x1,in_port=1 actions=drop table=71, priority=10,ct_state=-trk,reg5=0x2,in_port=2 actions=drop table=71, priority=0 actions=drop ``table 72`` accepts only established or related connections, and implements rules defined by the security group. As this egress connection might also be an ingress connection for some other port, it's not switched yet but eventually processed by ingress pipeline. All established or new connections defined by security group rule are ``accepted``, which will be explained later. All invalid packets are dropped. In case below we allow all icmp egress traffic. :: table=72, priority=70,ct_state=+est-rel-rpl,icmp,reg5=0x1,dl_src=fa:16:3e:a4:22:10 actions=resubmit(,73) table=72, priority=70,ct_state=+new-est,icmp,reg5=0x1,dl_src=fa:16:3e:a4:22:10 actions=resubmit(,73) table=72, priority=50,ct_state=+inv+trk actions=drop Important on the flows below is the ``ct_mark=0x1``. Such value have flows that were marked as not existing anymore by rule introduced later. Those are typically connections that were allowed by some security group rule and the rule was removed. :: table=72, priority=50,ct_mark=0x1,reg5=0x1 actions=drop table=72, priority=50,ct_mark=0x1,reg5=0x2 actions=drop All other connections that are not marked and are established or related are allowed. :: table=72, priority=50,ct_state=+est-rel+rpl,ct_zone=644,ct_mark=0,reg5=0x1 actions=NORMAL table=72, priority=50,ct_state=+est-rel+rpl,ct_zone=644,ct_mark=0,reg5=0x2 actions=NORMAL table=72, priority=50,ct_state=-new-est+rel-inv,ct_zone=644,ct_mark=0,reg5=0x1 actions=NORMAL table=72, priority=50,ct_state=-new-est+rel-inv,ct_zone=644,ct_mark=0,reg5=0x2 actions=NORMAL In the following flows are marked established connections that weren't matched in the previous flows, which means they don't have accepting security group rule anymore. :: table=72, priority=40,ct_state=-est,reg5=0x1 actions=drop table=72, priority=40,ct_state=+est,reg5=0x1 actions=ct(commit,zone=NXM_NX_REG6[0..15],exec(load:0x1->NXM_NX_CT_MARK[])) table=72, priority=40,ct_state=-est,reg5=0x2 actions=drop table=72, priority=40,ct_state=+est,reg5=0x2 actions=ct(commit,zone=NXM_NX_REG6[0..15],exec(load:0x1->NXM_NX_CT_MARK[])) table=72, priority=0 actions=drop In following ``table 73`` are all detected ingress connections sent to ingress pipeline. Since the connection was already accepted by egress pipeline, all remaining egress connections are sent to normal switching. :: table=73, priority=100,dl_dst=fa:16:3e:a4:22:10 actions=load:0x1->NXM_NX_REG5[],resubmit(,81) table=73, priority=100,dl_dst=fa:16:3e:24:57:c7 actions=load:0x2->NXM_NX_REG5[],resubmit(,81) table=73, priority=90,ct_state=+new-est,reg5=0x1 actions=ct(commit,zone=NXM_NX_REG6[0..15]),NORMAL table=73, priority=90,ct_state=+new-est,reg5=0x2 actions=ct(commit,zone=NXM_NX_REG6[0..15]),NORMAL table=73, priority=80,reg5=0x1 actions=NORMAL table=73, priority=80,reg5=0x2 actions=NORMAL table=73, priority=0 actions=drop ``table 81`` is similar to ``table 71``, allows basic ingress traffic for obtaining ip address and arp queries. Note that vlan tag must be removed by adding ``strip_vlan`` to actions list, prior to injecting packet directly to port. Not tracked packets are sent to obtain conntrack information. :: table=81, priority=100,arp,reg5=0x1,dl_dst=fa:16:3e:a4:22:10 actions=strip_vlan,output:1 table=81, priority=100,arp,reg5=0x2,dl_dst=fa:16:3e:24:57:c7 actions=strip_vlan,output:2 table=81, priority=100,icmp6,reg5=0x1,dl_dst=fa:16:3e:a4:22:10,icmp_type=130 actions=strip_vlan,output:1 table=81, priority=100,icmp6,reg5=0x1,dl_dst=fa:16:3e:a4:22:10,icmp_type=131 actions=strip_vlan,output:1 table=81, priority=100,icmp6,reg5=0x1,dl_dst=fa:16:3e:a4:22:10,icmp_type=132 actions=strip_vlan,output:1 table=81, priority=100,icmp6,reg5=0x1,dl_dst=fa:16:3e:a4:22:10,icmp_type=135 actions=strip_vlan,output:1 table=81, priority=100,icmp6,reg5=0x1,dl_dst=fa:16:3e:a4:22:10,icmp_type=136 actions=strip_vlan,output:1 table=81, priority=100,icmp6,reg5=0x2,dl_dst=fa:16:3e:24:57:c7,icmp_type=130 actions=strip_vlan,output:2 table=81, priority=100,icmp6,reg5=0x2,dl_dst=fa:16:3e:24:57:c7,icmp_type=131 actions=strip_vlan,output:2 table=81, priority=100,icmp6,reg5=0x2,dl_dst=fa:16:3e:24:57:c7,icmp_type=132 actions=strip_vlan,output:2 table=81, priority=100,icmp6,reg5=0x2,dl_dst=fa:16:3e:24:57:c7,icmp_type=135 actions=strip_vlan,output:2 table=81, priority=100,icmp6,reg5=0x2,dl_dst=fa:16:3e:24:57:c7,icmp_type=136 actions=strip_vlan,output:2 table=81, priority=95,udp,reg5=0x1,tp_src=67,tp_dst=68 actions=strip_vlan,output:1 table=81, priority=95,udp6,reg5=0x1,tp_src=547,tp_dst=546 actions=strip_vlan,output:1 table=81, priority=95,udp,reg5=0x2,tp_src=67,tp_dst=68 actions=strip_vlan,output:2 table=81, priority=95,udp6,reg5=0x2,tp_src=547,tp_dst=546 actions=strip_vlan,output:2 table=81, priority=90,ct_state=-trk,ip,reg5=0x1 actions=ct(table=82,zone=NXM_NX_REG6[0..15]) table=81, priority=90,ct_state=-trk,ipv6,reg5=0x1 actions=ct(table=82,zone=NXM_NX_REG6[0..15]) table=81, priority=90,ct_state=-trk,ip,reg5=0x2 actions=ct(table=82,zone=NXM_NX_REG6[0..15]) table=81, priority=90,ct_state=-trk,ipv6,reg5=0x2 actions=ct(table=82,zone=NXM_NX_REG6[0..15]) table=81, priority=80,ct_state=+trk,reg5=0x1,dl_dst=fa:16:3e:a4:22:10 actions=resubmit(,82) table=81, priority=80,ct_state=+trk,reg5=0x2,dl_dst=fa:16:3e:24:57:c7 actions=resubmit(,82) table=81, priority=0 actions=drop Similarly to ``table 72``, ``table 82`` accepts established and related connections. In this case we allow all icmp traffic coming from ``security group 1`` which is in this case only ``port 1`` with ip address ``192.168.0.1``. :: table=82, priority=70,ct_state=+est-rel-rpl,icmp,reg5=0x2,dl_dst=fa:16:3e:24:57:c7,nw_src=192.168.0.1 actions=strip_vlan,output:2 table=82, priority=70,ct_state=+new-est,icmp,reg5=0x2,dl_dst=fa:16:3e:24:57:c7,nw_src=192.168.0.1 actions=ct(commit,zone=NXM_NX_REG6[0..15]),strip_vlan,output:2 table=82, priority=50,ct_state=+inv+trk actions=drop The mechanism for dropping connections that are not allowed anymore is the same as in ``table 72``. :: table=82, priority=50,ct_mark=0x1,reg5=0x1 actions=drop table=82, priority=50,ct_mark=0x1,reg5=0x2 actions=drop table=82, priority=50,ct_state=+est-rel+rpl,ct_zone=644,ct_mark=0,reg5=0x1,dl_dst=fa:16:3e:a4:22:10 actions=strip_vlan,output:1 table=82, priority=50,ct_state=+est-rel+rpl,ct_zone=644,ct_mark=0,reg5=0x2,dl_dst=fa:16:3e:24:57:c7 actions=strip_vlan,output:2 table=82, priority=50,ct_state=-new-est+rel-inv,ct_zone=644,ct_mark=0,reg5=0x1,dl_dst=fa:16:3e:a4:22:10 actions=strip_vlan,output:1 table=82, priority=50,ct_state=-new-est+rel-inv,ct_zone=644,ct_mark=0,reg5=0x2,dl_dst=fa:16:3e:24:57:c7 actions=strip_vlan,output:2 table=82, priority=40,ct_state=-est,reg5=0x1 actions=drop table=82, priority=40,ct_state=+est,reg5=0x1 actions=ct(commit,zone=NXM_NX_REG6[0..15],exec(load:0x1->NXM_NX_CT_MARK[])) table=82, priority=40,ct_state=-est,reg5=0x2 actions=drop table=82, priority=40,ct_state=+est,reg5=0x2 actions=ct(commit,zone=NXM_NX_REG6[0..15],exec(load:0x1->NXM_NX_CT_MARK[])) table=82, priority=0 actions=drop Future work ----------- - Create fullstack tests with tunneling enabled - Conjunctions in Openflow rules can be created to decrease the number of rules needed for remote security groups - Masking the port range can be used to avoid generating a single rule per port number being filtered. For example, if the port range is 1 to 5, one rule can be generated instead of 5. e.g. tcp,tcp_src=0x03e8/0xfff8 - During the update of firewall rules, we can use bundles to make the changes atomic Upgrade path from iptables hybrid driver ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ During an upgrade, the agent will need to re-plug each instance's tap device into the integration bridge while trying to not break existing connections. One of the following approaches can be taken: 1) Pause the running instance in order to prevent a short period of time where its network interface does not have firewall rules. This can happen due to the firewall driver calling OVS to obtain information about OVS the port. Once the instance is paused and no traffic is flowing, we can delete the qvo interface from integration bridge, detach the tap device from the qbr bridge and plug the tap device back into the integration bridge. Once this is done, the firewall rules are applied for the OVS tap interface and the instance is started from its paused state. 2) Set drop rules for the instance's tap interface, delete the qbr bridge and related veths, plug the tap device into the integration bridge, apply the OVS firewall rules and finally remove the drop rules for the instance. 3) Compute nodes can be upgraded one at a time. A free node can be switched to use the OVS firewall, and instances from other nodes can be live-migrated to it. Once the first node is evacuated, its firewall driver can be then be switched to the OVS driver. neutron-8.4.0/doc/source/devref/api_layer.rst0000664000567000056710000000571213044372736022413 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Neutron WSGI/HTTP API layer =========================== This section will cover the internals of Neutron's HTTP API, and the classes in Neutron that can be used to create Extensions to the Neutron API. Python web applications interface with webservers through the Python Web Server Gateway Interface (WSGI) - defined in `PEP 333 `_ Startup ------- Neutron's WSGI server is started from the `server module `_ and the entry point `serve_wsgi` is called to build an instance of the `NeutronApiService`_, which is then returned to the server module, which spawns a `Eventlet`_ `GreenPool`_ that will run the WSGI application and respond to requests from clients. .. _NeutronApiService: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/service.py .. _Eventlet: http://eventlet.net/ .. _GreenPool: http://eventlet.net/doc/modules/greenpool.html WSGI Application ---------------- During the building of the NeutronApiService, the `_run_wsgi` function creates a WSGI application using the `load_paste_app` function inside `config.py`_ - which parses `api-paste.ini`_ - in order to create a WSGI app using `Paste`_'s `deploy`_. The api-paste.ini file defines the WSGI applications and routes - using the `Paste INI file format`_. The INI file directs paste to instantiate the `APIRouter`_ class of Neutron, which contains several methods that map Neutron resources (such as Ports, Networks, Subnets) to URLs, and the controller for each resource. .. _config.py: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/common/config.py .. _api-paste.ini: http://git.openstack.org/cgit/openstack/neutron/tree/etc/api-paste.ini .. _APIRouter: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/api/v2/router.py .. _Paste: http://pythonpaste.org/ .. _Deploy: http://pythonpaste.org/deploy/ .. _Paste INI file format: http://pythonpaste.org/deploy/#applications Further reading --------------- `Yong Sheng Gong: Deep Dive into Neutron `_ neutron-8.4.0/doc/source/devref/policy.rst0000664000567000056710000003577713044372760021760 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Authorization Policy Enforcement ================================== As most OpenStack projects, Neutron leverages oslo_policy [#]_. However, since Neutron loves to be special and complicate every developer's life, it also "augments" oslo_policy capabilities by: * A wrapper module with its own API: neutron.policy; * The ability of adding fine-grained checks on attributes for resources in request bodies; * The ability of using the policy engine to filter out attributes in responses; * Adding some custom rule checks beyond those defined in oslo_policy; This document discusses Neutron-specific aspects of policy enforcement, and in particular how the enforcement logic is wired into API processing. For any other information please refer to the developer documentation for oslo_policy [#]_. Authorization workflow ----------------------- The Neutron API controllers perform policy checks in two phases during the processing of an API request: * Request authorization, immediately before dispatching the request to the plugin layer for ``POST``, ``PUT``, and ``DELETE``, and immediately after returning from the plugin layer for ``GET`` requests; * Response filtering, when building the response to be returned to the API consumer. Request authorization ~~~~~~~~~~~~~~~~~~~~~~ The aim of this step is to authorize processing for a request or reject it with an error status code. This step uses the ``neutron.policy.enforce`` routine. This routine raises ``oslo_policy.PolicyNotAuthorized`` when policy enforcement fails. The Neutron REST API controllers catch this exception and return: * A 403 response code on a ``POST`` request or an ``PUT`` request for an object owned by the tenant submitting the request; * A 403 response for failures while authorizing API actions such as ``add_router_interface``; * A 404 response for ``DELETE``, ``GET`` and all other ``PUT`` requests. For ``DELETE`` operations the resource must first be fetched. This is done invoking the same ``_item`` [#]_ method used for processing ``GET`` requests. This is also true for ``PUT`` operations, since the Neutron API implements ``PATCH`` semantics for ``PUTs``. The criteria to evaluate are built in the ``_build_match_rule`` [#]_ routine. This routine takes in input the following parameters: * The action to be performed, in the ``_`` form, ``e.g.: create_network`` * The data to use for performing checks. For ``POST`` operations this could be a partial specification of the object, whereas it is always a full specification for ``GET``, ``PUT``, and ``DELETE`` requests, as resource data are retrieved before dispatching the call to the plugin layer. * The collection name for the resource specified in the previous parameter; for instance, for a network it would be the "networks". The ``_build_match_rule`` routine returns a ``oslo_policy.RuleCheck`` instance built in the following way: * Always add a check for the action being performed. This will match a policy like create_network in ``policy.json``; * Return for ``GET`` operations; more detailed checks will be performed anyway when building the response; * For each attribute which has been explicitly specified in the request create a rule matching policy names in the form ``_:`` rule, and link it with the previous rule with an 'And' relationship (using ``oslo_policy.AndCheck``); this step will be performed only if the enforce_policy flag is set to True in the resource attribute descriptor (usually found in a data structure called ``RESOURCE_ATTRIBUTE_MAP``); * If the attribute is a composite one then further rules will be created; These will match policy names in the form ``_: :``. An 'And' relationship will be used in this case too. As all the rules to verify are linked by 'And' relationships, all the policy checks should succeed in order for a request to be authorized. Rule verification is performed by ``oslo_policy`` with no "customization" from the Neutron side. Response Filtering ~~~~~~~~~~~~~~~~~~~ Some Neutron extensions, like the provider networks one, add some attribute to resources which are however not meant to be consumed by all clients. This might be because these attributes contain implementation details, or are meant only to be used when exchanging information between services, such as Nova and Neutron; For this reason the policy engine is invoked again when building API responses. This is achieved by the ``_exclude_attributes_by_policy`` [#]_ method in ``neutron.api.v2.base.Controller``; This method, for each attribute in the response returned by the plugin layer, first checks if the ``is_visible`` flag is True. In that case it proceeds to checking policies for the attribute; if the policy check fails the attribute is added to a list of attributes that should be removed from the response before returning it to the API client. The neutron.policy API ------------------------ The ``neutron.policy`` module exposes a simple API whose main goal if to allow the REST API controllers to implement the authorization workflow discussed in this document. It is a bad practice to call the policy engine from within the plugin layer, as this would make request authorization dependent on configured plugins, and therefore make API behaviour dependent on the plugin itself, which defies Neutron tenet of being backend agnostic. The neutron.policy API exposes the following routines: * ``init`` Initializes the policy engine loading rules from the json policy (files). This method can safely be called several times. * ``reset`` Clears all the rules currently configured in the policy engine. It is called in unit tests and at the end of the initialization of core API router [#]_ in order to ensure rules are loaded after all the extensions are loaded. * ``refresh`` Combines init and reset. Called when a SIGHUP signal is sent to an API worker. * ``set_rules`` Explicitly set policy engine's rules. Used only in unit tests. * ``check`` Perform a check using the policy engine. Builds match rules as described in this document, and then evaluates the resulting rule using oslo_policy's policy engine. Returns True if the checks succeeds, false otherwise. * ``enforce`` Operates like the check routine but raises if the check in oslo_policy fails. * ``check_is_admin`` Enforce the predefined context_is_admin rule; used to determine the is_admin property for a neutron context. * ``check_is_advsvc`` Enforce the predefined context_is_advsvc rule; used to determine the is_advsvc property for a neutron context. Neutron specific policy rules ------------------------------ Neutron provides two additional policy rule classes in order to support the "augmented" authorization capabilities it provides. They both extend ``oslo_policy.RuleCheck`` and are registered using the ``oslo_policy.register`` decorator. OwnerCheck: Extended Checks for Resource Ownership ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This class is registered for rules matching the ``tenant_id`` keyword and overrides the generic check performed by oslo_policy in this case. It uses for those cases where neutron needs to check whether the tenant submitting a request for a new resource owns the parent resource of the one being created. Current usages of ``OwnerCheck`` include, for instance, creating and updating a subnet. The check, performed in the ``__call__`` method, works as follows: * verify if the target field is already in the target data. If yes, then simply verify whether the value for the target field in target data is equal to value for the same field in credentials, just like ``oslo_policy.GeneriCheck`` would do. This is also the most frequent case as the target field is usually ``tenant_id``; * if the previous check failed, extract a parent resource type and a parent field name from the target field. For instance ``networks:tenant_id`` identifies the ``tenant_id`` attribute of the ``network`` resource; * if no parent resource or target field could be identified raise a ``PolicyCheckError`` exception; * Retrieve a 'parent foreign key' from the ``RESOURCE_FOREIGN_KEYS`` data structure in ``neutron.api.v2.attributes``. This foreign key is simply the attribute acting as a primary key in the parent resource. A ``PolicyCheckError`` exception will be raised if such 'parent foreign key' cannot be retrieved; * Using the core plugin, retrieve an instance of the resource having 'parent foreign key' as an identifier; * Finally, verify whether the target field in this resource matches the one in the initial request data. For instance, for a port create request, verify whether the ``tenant_id`` of the port data structure matches the ``tenant_id`` of the network where this port is being created. FieldCheck: Verify Resource Attributes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This class is registered with the policy engine for rules matching the 'field' keyword, and provides a way to perform fine grained checks on resource attributes. For instance, using this class of rules it is possible to specify a rule for granting every tenant read access to shared resources. In policy.json, a FieldCheck rules is specified in the following way:: > field: := This will result in the initialization of a FieldCheck that will check for ```` in the target resource data, and return ``True`` if it is equal to ```` or return ``False`` is the ```` either is not equal to ```` or does not exist at all. Guidance for API developers ---------------------------- When developing REST APIs for Neutron it is important to be aware of how the policy engine will authorize these requests. This is true both for APIs served by Neutron "core" and for the APIs served by the various Neutron "stadium" services. * If an attribute of a resource might be subject to authorization checks then the ``enforce_policy`` attribute should be set to ``True``. While setting this flag to ``True`` for each attribute is a viable strategy, it is worth noting that this will require a call to the policy engine for each attribute, thus consistently increasing the time required to complete policy checks for a resource. This could result in a scalability issue, especially in the case of list operations retrieving a large number of resources; * Some resource attributes, even if not directly used in policy checks might still be required by the policy engine. This is for instance the case of the ``tenant_id`` attribute. For these attributes the ``required_by_policy`` attribute should always set to ``True``. This will ensure that the attribute is included in the resource data sent to the policy engine for evaluation; * The ``tenant_id`` attribute is a fundamental one in Neutron API request authorization. The default policy, ``admin_or_owner``, uses it to validate if a tenant owns the resource it is trying to operate on. To this aim, if a resource without a tenant_id is created, it is important to ensure that ad-hoc authZ policies are specified for this resource. * There is still only one check which is hardcoded in Neutron's API layer: the check to verify that a tenant owns the network on which it is creating a port. This check is hardcoded and is always executed when creating a port, unless the network is shared. Unfortunatelu a solution for performing this check in an efficient way through the policy engine has not yet been found. Due to its nature, there is no way to override this check using the policy engine. * It is strongly advised to not perform policy checks in the plugin or in the database management classes. This might lead to divergent API behaviours across plugins. Also, it might leave the Neutron DB in an inconsistent state if a request is not authorized after it has already been dispatched to the backend. Notes ----------------------- * No authorization checks are performed for requests coming from the RPC over AMQP channel. For all these requests a neutron admin context is built, and the plugins will process them as such. * For ``PUT`` and ``DELETE`` requests a 404 error is returned on request authorization failures rather than a 403, unless the tenant submitting the request own the resource to update or delete. This is to avoid conditions in which an API client might try and find out other tenants' resource identifiers by sending out ``PUT`` and ``DELETE`` requests for random resource identifiers. * There is no way at the moment to specify an ``OR`` relationship between two attributes of a given resource (eg.: ``port.name == 'meh' or port.status == 'DOWN'``), unless the rule with the or condition is explicitly added to the policy.json file. * ``OwnerCheck`` performs a plugin access; this will likely require a database access, but since the behaviour is implementation specific it might also imply a round-trip to the backend. This class of checks, when involving retrieving attributes for 'parent' resources should be used very sparingly. * In order for ``OwnerCheck`` rules to work, parent resources should have an entry in ``neutron.api.v2.attributes.RESOURCE_FOREIGN_KEYS``; moreover the resource must be managed by the 'core' plugin (ie: the one defined in the core_plugin configuration variable) References ---------- .. [#] `Oslo policy module `_ .. [#] `Oslo policy developer `_ .. [#] API controller item_ method .. _item: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/api/v2/base.py?id=2015.1.1#n282 .. [#] Policy engine's build_match_rule_ method .. _build_match_rule: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/policy.py?id=2015.1.1#n187 .. [#] exclude_attributes_by_policy_ method .. _exclude_attributes_by_policy: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/api/v2/base.py?id=2015.1.1#n132 .. [#] Policy reset_ in neutron.api.v2.router .. _reset: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/api/v2/router.py?id=2015.1.1#n122 neutron-8.4.0/doc/source/devref/images/0000775000567000056710000000000013044373210021140 5ustar jenkinsjenkins00000000000000neutron-8.4.0/doc/source/devref/images/fullstack_multinode_simulation.png0000664000567000056710000007520013044372736030202 0ustar jenkinsjenkins00000000000000PNG  IHDRj{:*zGIDATxmpUս%b1ȃ  LhƆJ$(Qr.ƂJ)XDRQц1֌ C RP/Z /g{sNN;kΜkڿ}!. B!!D!'B!>B!>ֶ !l8p>Q֪P(//OzGϜ9!<ɒ(sϝA%O"(|"'|">D!'O>D|"'B|" >'BO>|"DO'|"'!>D!'O>D|"'B|" >y})8K/4%ZgNV_~姟~ <['|fĉn$-༒{/,d«*) ʳe|"gIdOs͚5|"6[x>'|O >D|O')|>}[n>|^:lذҧz/^_fg}k׮ˆQhǑ#Gvm>("+A"<3GN0AIM4IP\:|┗۱Μ9SW-)ѣ~=:>umuٳg+ˡ.l:mذA{5Xj7x]`cʕץ˫ug)|*K)Y]Cݲнܝx蚷O>QСCI&O/| bƗ VG&(kx;}AZv}L{#\tE߂~^A"Qq/߿ )Z){{hڌ.`^^^UtO]޽{YCmݩ{#Z>DYOG {:mrFqAdC rnJY-[}eW"(r=;)A1>ŇOqaF̓Enzw^o6_{׿;lz{;_qKI"ig[>u~(>EkkS_'k,mK9_״dTmud2+]AGqnʔ)Sbtݲa~ݻ%-dzs>*KrH ˿KP>k $Rߧ|zL :X)\Ϳ:aB3m x駵 K/ԩCO>QSQ-=&gLrǎui[ǤLJwY|)W.]z֮ˡ𩊂<-IW)O]4]]cut|r043" '({<$p,2'y~a„ A2zR>k, n" TчzdzāOHf+34iNtSpwuvjȮȑrCZ|§V) akrmiב>Ƃw|%1z/{tEDN\8&|8ٻwHv]~.)+|lƧ=>;"7=Lݤ3hRc׮]r[neѾewʼF§éV!^ p,>Eky:_el̙:_H.62r.D|v> c'\ ?1:vq3dKQ-OQԆOa˖-Q^>2C'n%nvDO 31Npts*bJ1>O-g}\E:x$O7f8ʚN5Ϻ7(/V}ƈOiٳg?c)< >D2#.ncVL.>USc >#m~:ҹ=V7AV "1Qվ >uHveu>SO[t޽;ҼFy3rv"ٵkסCROVir 2$8t 6w{OM,ǎOM^q7U7l' |:{WP__o?nRQN![Zn?TΨ0k.Hu ^׍ЉO6;a/Awm:y;çNӍQlBƻ"'n?;HWR>7{J0N9R۸igbywg_.D >A=Rp†oq]܋2(]dݪxwqG4/KJճwp"y&*KA rý:tou:ݻ{6eCpuySHvW[w{ e/ D>mLp,o>"wYۃ ōO>U;)xwt}ᇿK̚U\){S։{.WR. ߼sE]5*śTT%yʭ{]B{'(6Qe楗^`s%OުaS!e#šO?X^iiiqW/ƹC* $_P7n_VwFO>QD|"'|">D!'O>D|"'B|"'BO>|"DO'|"'!>D!'O>D|"2֮]RfΜ >'fm޼Pר #>Q6ƪ*((0sܿ2`j.N:>Bݠ#G/Ǐޮ;w5jm,--=z( !FNZp1rȐ![n=wW7nXXXh?!>]}W}Μ9ӆw*\rٳgOrNSVUU8q]NT.( \v'(tAYXXM0MYHlٹsi,{_~ eY'DYti^̽9u/4rH;vp'uy.̀T^^{uK'g^2s̽{r'Ho8ѣ_yԘGw}1# >DnӴo>w"wWZE''D3uݜ݂OSkk єs'%M]ٍ4=s_~h*r'%tsv;>?9?hժU3={O:us>MGYhU<=z>u|nذAAljj/k{ፖlxthS|R͙>4{3gt "`_݌7Z4JV־DOW^Y{^;w|ew{"66IoD?qΜ9 fq7g$D?޵Z+'3`_ݔBSFKD;vЍpDe)]v >ܻirʓ|&S6<$z;>f3=*nT4nܸSC8|W;{en%Xe|" >/"}9"YéS/J˛bZaW{9zU//?zYVV-ܹs?KMMMhRm_>/=\ oh,Qs!2%m aZhرc8Q(2N;*QeǏ[{s=ץ͙tw=zLRϞ=/kŧ#ðNкL69s֬Yd}O4|Y@69vXyʻDm4|Ν;5z@ i͙qtս/;sm >g3t=eMUeZn4ަs7gC "hb`ݕ+l{MU }=RƧ +)9v3])hv۷ϻQ;vw݋t|Z[[Nj/7i3Iz!z[KPM_nOT!^SS/b| >B&2}Nν.|fJ7gv#GeƌcEE,MRzpηf> >݀;vc|d:nZT6f[ΦG8YݜلOs+U*ܨYf2* 6JUaw&O6rFFQ|m@ӊ >'L|U,Bl+ֺ)JH:R|f\7g$L*̂D]]]eeh/7E17Ɗʐ!C emt(735(M@|&MыΙ6l QTr4ev<`9nΌ K˖-6~|_*?MMMt{A+'*K7,Y & >Qtsf4>cXK4| O >QݜYOӆ f|Z͙#+VXz=fϞv>']5ݜOӑ#G-Zd,KgSOg(9s {cZA|"dek7gnkUڊ>'ݬ,e|콡nrg# >gjjjZZdIqqK/TNgF[+ r/N__\c,'|h^(ԣGX,cgI9r>'1|$rngϞ:,8 @gMdɒ%W^yQS|Ocjnnv2'N>ٳgV Lr v|O#G4)ޞ|JΝ 2$+R|ng[[۶C;|>S^ u+q >g'fVd1B9|v1DWfSXXtfM~ZK >-;ʚpCY ̦3R=ab|Oi|>{Av|޽b0! >'IO$O>'|'DO >'|O >'|'D> OI'$O >'|O >'|'D> OI'$O >'|O >'|'Diu+fF70o(oT4mIsm(yiѰ +ittث;S9q.+:˪5_q?GSnzV >i|C/OBm^j00^}z |O>so+Mӗ !s\L5Ɔ\ >*|nz͂ /;|ѣ/~㮻2y˂;'D3OoQo|г-Sn4" |O%lm־6oo;XR.[a|4jgvеEIćσǿtWwK;wyQ=X{+5|>FݪWز]MwCXQƯ6Uv y۽-w,oosO'D?-s`0M-݇}˭{ 峪b9h ={^+.{;=H99o#^g/s s{7ϝ#v2~^س(0{P%R[SvͤNڢ.~5EبEZe%e<>nv`}^r|S->-Z-§;8⪫u, >7qdMtu%C!|ɱ3O*A}9rܿGuCI^^Ï#@MuA)x =0͞owM?Y-%$=[#ƌe<>aKmu4)<(pK󮃱Jꠎ:]%kZ@g37޳-م6D+Wy63ns|bm?$d b>Aר/UuY>O]aXE =:|] >'x| WvZ7@fQ v31bN_rq(>3η{'_<)1S *Cڿށ^UUzY|Ѿh.]TAoH){ W9;8_o&O bJWy)/8| + [ʿ H+)c-4y;],/qd ډ&w̤kza#u%.y y_7M:DK} >QsFg6q384{aϞrF}4iDۃs1V;DRc-(9xe", {|܇ϠKg"=g[,tŧKJHn^_Fʹ?5yy"3!ѧ`* )ts"~hq~k1vsEh;Ku* A9 yY1V8v['D(kn yN|0NS!N*˭{ly߲` Ay2>=,Ro8n)WGCR띸頎5n|g/T]27Ig)5Fk>'13A3^PUiZtO!>DvXrR2 /;dK:M{Ns?Z޵˄W >Q&3#蠲xXf$4q@W]-׸|[.I67o)U|F![Ka):>؉d:c ŧ)~3tr`z ~[*(}28':>c:ԫwH0ZAqSދbxد_3J7yrxԲ anH"˯y:3͈՗'^# >Q3ʪuW,|F:47|:obyQwV 5н+$  '#/%S/v7s?& >'Jk|zFpA {D|Fg"G:́ thOksԾil_<`uchoq5A|*UbLtU&'Dنo,#Oxzu>ML">5-8#3|*t:7|ƁY"D|uzGݓ >7iG{/~: >Qfs90zAtŚggή-O;5'?ֹ ުө}vO7I[!icSm'V{p)޸;IǧAǽcyƾjN^1z_o,K" >QfSL]/fK">uP{vo Ι->c- iAa5^lINnw@oAު[|"'#n_}X$(J1uUW O/+|[?Y:oHyAe{LpqD3[Н]z>) `+o\+pP{.}+W^ƂO ɧ.^["DZ-@88d-(S-T'8d>S|LYpoeԲ]'D;V2J3ioN='3翟P >XmmO>gN'7 Qw=sCE3)S*ږJQ'|q2(+L|>yIg_(>) 峪.-͝I +$1;+g٦խwߣG- >^}Fe д~D> >3,O'|'D> OI'|O >'|O'|'D> OIgֹ DPB:ݎҟ"9|'D͛/@n'$ s,#٥RfeeeWsss| q|RNrO >s1+#2|RNrPO >1I9O"O'f|RN'>)'OY >'Of|O |b'|O>1r>EI9|O"O>'f|O v|O"O'f|RN'D>)'OY',O |O >1r>'EI9O"O>'f|RN'>)'|O "|RN'D>)'|"Y:϶mȣt >FTVV4kkk/'33|guI'7"sKH*I9Ʌr>3vGW&]?4)Fdn I%>-;"s=ab|f>}z=KP}F|r#2wX;LH O >'|'> OI'.O >'|O >'I > >I|O|O >'|OI  > > OI'$O >'|O >'|''d59~*|aǻbͳo jgxԶ4:h[,XYi2c᙭ލi>SIW<ҐvVB܂O O*_&e8q]tۋ.yr5eo(he4t4i~n?+ qmYUEy#a".^=kr/XűomhwĘ:j={w,o(ETi[bi6|>|Jrs3Y>Jk;ʘj`lng㋯0Q{'ZСv ݱnjQe|q!۾enYpn@n(94>XݴͶ\5f\Ydŧzo8ݷ_oꏽ{YUȻQ9L9 oV6g3!n|v|ٍA'O|rzmܦWLR?zϖ5Zz>8Zlmvtmз2-YϷʪǧ<`k̤kC#4=`m >Qri&Ct{/M-T/'uX^|[ۙ0zgpt򙗦ϗ[.[ō4GUw}p6fU #G K-/>r#w^wT}+l@!+6|vO1cٓOE +н6yVzZj[: :[vNi*媻+1 Sֻ1eF3bX:O9m5'wς}='m }$ŷ6[2 O#sAѣǴ+PHb7xs l7FO[ݪա~W1I=/)"K3x'$ɶz_}[#MyQ"f>‹O~[]o(>+~>GrLvS5n9⪫u_EsM>|οA~rS0+ݡ]` DTa(UmI&OK5 Ju_í/f;(ޫO߾ X1)0B-JC Zd#Ali$|C^>ۖ;* ,Oc۴lLçj3[c]튑3n<_6ۊ0bBms䖸'8|:Pn0hPE:eKV>QJ)h^ Dr)|tM[šo*+̙6fb$>s >{Ƨ$sX3s|ʕtpp4l5|JA o璊]E +? >gZu]6"uMNSfoV5+]J">}-nx߄& 6UJ|_ ># [Mob@Ik \ɖ ӭk1ngWS#1-+?f]wuX"§# SKo]UwF\uuWr gKGt| " \|z9e,ڇΘ5ٹQ-A|=4]Lwt(5[GBC QoG_Dʃw,ɢN#o(K34Ra;nZ#Y(" S)GF9.NN >nrV8P|ovꠉ|v Qk)n,wnOT3\2nB\R KA|VtW# 6ކӽ95ROE%z@dE9))dֻEïp+3$M|&܂V>F Wom%۾{gQ%aCb\cC'Cּm{C3 v"%E'L>3HZ\F rަ&Pqg3o\ܲrܿv'J|*̿Ai]ѧG§w 3 Bh[rOߒೋM}`V>%=l[1cv;'L(wb_3qrl1ӵߚHy_]Ro^44uc  .-m[nԒP>&C0zɭXjuY.bL|ӻD_,Sӵ&<~}σ9viOSi>X"R޲ҟjzyo§[S+O))%3ho6].3tʺۭӫC/5}>k޳5IaEd1 .V| {biէ/DσǿTEb }M_ͻ_0>EMw h$M/xM ً#]˼H[6Auc/N=}q\çШ6!MSrouY_|ԑOlc\AT!Q >s.d:> /!OB GI >'$OD|'|" >|O >'|O >'|'jO$O>'|'>'|O >'|RB'|O"$OD|'|" > |O >'|όsZx^^эFdb I=>KrKLH},͛/@܈-!'$ |򨹹S8)QZZӬJ|B9Ol29r)'CAy<>1|" >'O >D|O'|O>)|OO >|O'|O>|"'|b(|O>'|" >O>gPO>zɓ'ϝ;xj:u2|;v,;W_}uΞ=5e>7o}֊h^QQd-r}}}aaKxƍErlٳhii… s]-]T%#F_ٝ;wjcMMM'Md?O8QYY-.}Tt Q*zd>'Z|UUU%+e 8`і,Y----͘1~)\nR[rȸ!>EZcuxD'gy"BաΪVaz)>2d 7mӎ"JsjDDaU744ʨj (( i\|ekklLո=hr)Y߅ cdLshXnDǧ*!&>U6TRyؾ}Va姺ڶf2e#E|T!Fi$|-:M >QvS@k-ʕ$`?@թWcǎjUB&{*jUm%WGĉc {^[ٳG>d9Awr J0iZەsO!V2tE ]R}wM^{g0 (md%AWn]1PRSNnR_.~ԧ"[;wujʹ4}ңz'DىO*a~uVoJbeSFPS |iOcwq\{;nGTUUeM&Nx,)xAM|ZA$yˀ[lwPUY[[7nl TRH|Tn۪r"X1pKY/q)} >Qvz=C+ԮMl(URR`*kFvZ<;e˜mQӁ `)#kR:;|jL\3b֒K§2[:?al7Y|FoSU>QiT(:࢑ۢ AҭŗCa}*j֡nʕ+-R>u:ee*vʤՄ* Vt ]i_eNr~[ޢ7O2(Ckjjȑ#Ӛ܀)qKW_餼#oe:eM"GJJNS ^+c4Zϫʀb:C;X>g6[* ][R'FFɭG;iMVtBh6͊Y+o}d]mR&O2VwΥYFi#"DM%d&RÚ\ GT̤ZopgmmȨoV=< w3YOsϳJXh\E֩i]1+'y/Z b^uh1Pɉ֛"ש >Q0ZMQ!q4"RJE";8Uiʮwl_'3^#43鎸.lXZZz,PT-]O---;|zDm&TJ-Ytɤ>'J/|ڠ{lVտnɪ6 ^\++uyד_2ld"5ʙdl%LUlvoݺUz-ֿ\S[[[aeUU7jmv|ATY I6NS*Il)>yvZn}ET˶NG-m 0()Idu`3vN賛cN3Lu"vH5Q >Q,Y/dljGƩo%CbJ lQatZZZ\ۚ Țv٘eMQEEEnP|Z7jV/Wz/&GZskp ӶFA{[)]`jb4J͋O\WD@B"wylnuLjHȭ$J(KiÍ;Vu/UTTv#eBY lHHhleP4}wާ<(K.j.tЎ(No)sȶk"2>uV :_c"׮= sC0l3OBSQWEu.&d^S4"[Wt,n:C|kuTNۼIb'Ʃ*nLn,E*eJM#%5畕t"͛Mk444()㷻ъ~z+ʼ2 JERoQn S/ >'6|핱e|+i|fEbœ5|\gIIIAARoj>Mk˄'N|*I畟[gKW;6)|όħׯ=/}>+մх .YDgW̿|L'|b('D|O>'|"'O>'|R'O?mY-[V-GN0G{9|Fի/@٥r|OAv]wuL֟3<^~rb,'|\=\D|rA'a'D|" >'|O >D3Դ6UVV+3gΜ?^{ $wc9|F3<lR^^^,f9|vTVV>*--͝Ub/'|\ X6)\2H>y*' >O'|" >'DOΞ=M,Yׯr^iI&w%//|&gϞfZHMEWRkjjyUt"fP͛'Eܳg\2 Y|^S >Oի|ӖcǎSR=EPo`ĈLO2 2ޖ.Yŗ1ڸq\n݁M9w Qnvi)"4661X_"YyA3-gܬKQ=uUm_! "XVkΝ*,?ǏBQeOYDTv͙r >Q&3(1rɽ}#hN^eeڪƏ?vX2jjz[5kx/s-ޥA#Nii`>Ϡ\xQx^_nv Ba>JYG bn̘1CO> >ƧNUU E+))4 W};e8FSsUGeVUkʔ)ӪJvš`JӜ%nv 6`>?\P$9r~~FOqD+fVUt ]b ^ ʌ%Ȯ5zmPe_e|v?>;*K.OG;w\R߽͹4 ~V2 ~zZG'0|ݔ PУ&8UZyʔ)AM֭֔;-]Զب .+z7ZORVnn2ލ7ntYªwyf{k7̚5f&ϴŧ*F¡JyoV}k-}8bi7z}sKTD >-{%%% ()['э:r [p9Pç,"OqB`fNi'ጝ6DZj5ٖecY|&ޡ=&Ow&UڄIWYwohn&<т1~ǂϥKe~g >QF3Tfn}=EMe-u&NJelnN<|eN3Q^1m>#]UUBTlL(5*"b*YVF%RSS*f޵SY>W X~|" >vZAv/+#"k('2E?nMSM{Pmܺu*qEÎ>?Û+-n?O:%Nˣu '+˭TG7N۷o -YÝ;w8¡ W?yo@[WWg)()o}%hu >u;|'3Ւq6TQߕ}fVPO >sܨiM"M|Z[V-ڞ5oܹsekg6 T6Lb('DOӞ={},_h>s_oW9͛7 >'\D|" >'O >D|O'|O%|O >'O >|O'|O>|b.'|TO >1|"AgϞ=qD{{ٳu:ӶGfڧOg<ϷnZYY9b{ K644z_ >1 >y*r18vXcc)S|ջO~շ]0k<817f^^^YYЛ^)D|"ZZZFGنm-w=sG^_WWwqPOSA{{Iy^&r+o>q::2'7]W2ݑĉsCtܹ+W8lqÞ>;k-#F8r\O@^`EEr+q߲}]3骂O%(D|"?^PPοwP|"Iy3M3gXgngWwVܕ+WO>|cp99fnl vkZ'|R}}m;a~7y+,g}z5;'(>ϝ;gm~rS:¤)O>gwJ9Q~OKTtQ\嶠 MP'DOC Q~mޞ!J|" >Q.sϞ=߼ 쪫3,J~OrK.UfzqlG_(ֽ80vKK DO *..Vf%$Ңa-Nʚ ʖ|" >SG*'^ztF}%>G^^~~~ |" >QᳱQ9Y|_qOo y7pE7tX|" >Qsʔ)ɦWL >']?Øzȉ/oTt,Y>D|TI+ ŎOt՘q oA'M|" >S3gXOdo>l{^qӧDO:?~:,3 fOr XD|.[xo&_V'>D|H9R7?(8Ʊdn_<|" >QnSyV6oYy8m--vn/-EnOr Ľ.rCt߭sO>)j爫^I|#w*% >D|HIhXݿ -mz̆m-]l!'LUeוLҡC?G^E\|^˙'3:vXRGoUi`޷~O>'-|>}ZxL\Ydq_^ 3O ()RσǿhXrg4:Ə>D|TcǎUNtc\6;@w=&χ֪'|ϔ!+:d4 ՝u%ӕ'|ϔ?b%ݚD^"KgϞ|" >Ss*385KƯۼ5>G>nU eeeT'|O%jjjO)|*eo0N/P [n|" >A .fSћl;78_^tQ})'tyyͻ&}Q. .`iiife>K*?+2oݾ-D|\S ' [XYȸ>1|§;tcxk6_O>g7̙3怾tg͕dmmm&e>u)WWY'7lzMO>0|" >F_}Ո#oЫwoeO>1|,*Nyo:f^pe&s PORKK-r4a~fӔ)SE|" >Q=|U'D|"O:w\YYrX|ŕmֽ~g{lБ#G2O ()>}ޤ]xZKaG_=[ZZO (pջ3[S?V>dȐ?; PO_oŭDѬ߽mN4)=;'SsN-INasΚ3O᪫gS >QYlI?z=+z:?-W'|O[K,;3^bͳ 6WN'O (i:~3s0ֹ qW) h 'D3QٳgClm,o ؚƎښ|b('D9O666z9z~Ka']?m֜ywܷlUyPԖKYצU ǎˑ>1|":qNĆEWaab_ɓV'|O>W_۷oU][4\|"'L+# >|O'D|O>|"'|b(|O>'|" >O>'|" >'\b('|" >O'|" >'DO >'|R'OC>'|" >O>'|" >'D>'C>'OO'|O>'DO >'|O >'>y*x 'D|" >'O >D|O'|O%>y*'|O>'D|" >'|O >D|OS>'PO'|O>'DO >'|f3>ڶe*++uYv^F|feQJ]N'>Q^^2\('|XKazyEݿ'!äLX*i?퇷y! .'|ѧ7߳4"h^ DP >$O|'|" > > >'|O >'D|O|'|" > > >'|O >'D|O|'|" > > >'|O >'|O(/9_9'N]7zpPy.Y-]Ԕ^^ܫO_WL D3iGwA&]? |z aD&>1f4y [v|" >s7<{O ?`M?yҟraϞ}̤k_w|" >s1^{Ӆ۪ܯoB_j_|" >s.X|zÖ0W>v̰WO>gw}gz'WډO"RY{@'D}}>|ig/Վqv}߾HzEv^2gXtS㮻3ꮺ_n^o4R4؏CW7N~";%V4=yZ;bTm=1F|01c7['DO1ljM S\)2t GKS߫cszyPکF`ygC=+b5gwj\eSn >է"x K1WwC^E[:#pԗTݪN;:z! >QFֹ Ķ~Kt1,^oy0L#gYwnm->N|7yS/|ߎC|_|b]9߾-*=z\:1_+F҈nl\O['2wʏ/-E B|'k*?n`}|">6ۢ˭{TGPA|3]5fqYqo# :*e+֌P~BiN߷X%nws FPj~cٓO y~O+_C//5}X}+<-ū'9OˇGW]ħim Vn|߿6oEe_וLE\kn:3u{ HV*FS-bV9/Cˉ(Md&l9mwuWhݟ +lw24*UK|Gy>.ŧ|+DVaQ G6_y9|=o^opk)>/_ Fhqq@K&_lb 9ߴ+ɚ>;:), 3%, {ǼLA](-S־7=~iŧo o0Hw_ݹfd[>\GM]rֻ3J:n|-aQ ku*18l .*r`tHوe$|Oqh.-gp/Gt,qmrdCx{g;L|Ǝ(%GO:5M7t) :9\"X =_qc^^T O"|* ӛyy32dL >+[ zzw<@'d|ol`XYsEju~o< z!5|Ǝ;7Dc4`g(>`X>˳*gX\5  >'x|ܺ74뻿~pXaЁlqdXT)0u.EDgW E0f(ef r:]R1N9ή[|OK_&σǿt35#-Qdoي5rݤ%5xeK.g)K>HCiK6?ۿLVoe[g1Kh&O7EgvU͢le7W dpq/PQ/_~M2)A| OZe̖'{'̸5O-=J ̀O2&Pa*)|aǻnܬoa֠knbe彤t?k ŧu"Z袔s >.yyٍ:jt蘠 ] ۄQgq{Ã&Oy끟{# }M: >eV>Ż%nۡ (`{m{ܛ#nYo|" >/(Ba {/إEdCJͻw}ߧrܺw]HїӱD|}3t@T}So,]tMѶ'DHN`):\$O>'|'>'|O >'|OIO$O>'|'>'|O >'|OIO$O'|'> O >'|O >'O$O] fY_F!hzyE'؋#Vwy6|Oܼy(-B9|v1Y|$5eʔ<3%+--]lYTsss| q|fGQ'MԳgO+( |OcN>nݺQF*3ZZZΝ;k )蒰~cǺڕٳ <^SSosȐ! 'OY϶jﭯ?~8|O >cՙ3g6nuAJKK+e?ٍ˫Ȧ > .tIaaaz$'ՄT1c!ES |&GgϞݼyIΨL9رcK.UMnF555s >*9rO>fy ,Y"s >ӿ@q3gP'Dgmm6ϒe+!;S6ϕ]t.' %%%^xݬ^z=裔D 3V^͌,SSSSI9r>'쀠t}ɛ˜4h*QҴk׮E}5}E͞={ǎuq3}wѣ\_W g>'X?ԩS3:rUV9r|vWƮZ衇?R'D3w^3$gt̙ Lx㍹sˍ 3gNƹOB׿uÆ ӦMs7J|ҭXB.ǏWގ=JylӾ}dY uF☃Eӥs@-Zh׮]. >~g矟>}z=]vY}}g5]:]@ |\tFEM7ņݎ {[UC>&9dϝ;w.X;h֬Y? O >QuAUÇOv3ڍsY2A_%3X7of!l(?(Yό'#(3p74ș⪪+Wv᚛,YĽ0;W_-zժUQhA}݅j۶m1A֭;}4!PtY&Mr42e$J9&">6=zTgAAI_Sɭ\~w ^!>J YӨX%Ti:??߭Q__qnB4ձcǖ.]^֧OԴ~H?~޽$??…rF}ioo#>;w 5*I#AE-B|":ydCC_TL9nL=z(!PN8;ws̮]ԩS]aufYYYsss1E|"\UUUn esʕratO:.oC駦OBOӅ !>D >DOn.D<]O>OB|".'8D!,l>oK"ϫ[?|">S+(K|c؄ #'d{0L}Y|"9|eO >BKJJZfرCnaÔ¤Iϟ饗jѣ~a3Ң߯ϋ.hٟ|kSO=e=7:Ѝ78p@;hРn|"LS|Oݻ0_ݻBcI\4qKф7Tb6Μ9SpݵkKpJH,>[য়~*kb/%xkA|"TMQaHPNy<&orqTw&OAQqnFРkѹ&|iҥhRmtيCM2%4ޚ>}&[ϋ79zhw8o>~Sv[q*n]Ϙ;[׺5_Q(T"QQQqEaDDE#DD2QQq70bD`D WT**FF_?pYksvNI;prXos}}Whڟ30'ӧ|~f @`]QI# >4Hy^OOٯO~7nugg'3 >28pԡu7oϟ__o޼|Fuhv܊c*7̜n[kuPo߾Us O 򢚰)"2sx{[ųS-}_맊Opp[ŋh)] ^jn-ӧ@zZܻ&ZznO>vQnݺEmQ!yS=uK|;wKGkEԱ6WȆy֜zL-bPޝ5_|*D|򮭭-DvZ/D,4ȹ}KIFY?Yܨ1* Pc]cͣ_~klwQgwhTZܑZW~jIU_>iI X/_:`~~^7;S@V }~[Z +1<ƖGp.v,ī)NR/y>)u\O2CWku(QMU;͛_O/$=/gt~kƇ= +Mu8xb$tó! iЋpA8O72mv(U|ʇ\ۣ0&yS"vO?U<]|ItaJ-ήp5Jߓ Y9`s6Ww ֘jvj&IMa 0vdTy d/Kgħ!s8^&sH)Y(Ƈ)}9V-XЖol؆yu+Ҵkǀկ54ul.B;4<8#> Nп TU{ا6+#wLB߅ z?Ј~>4gi$q_xកE_#E7C4-3 >ϩ8j <=m^ wod+ߌdgz=biif`6c=1߆NM'_d`ׂ3> |$~>yDu5]31Laok҄|.>P}I S!K4S}g8 5Mdp2I,ﳸNvo]g= >i `E%7G^~$ >r}ɍYk<ӲmN)>Pyij}}}Lkn^ l Gmyr^35c~p)KYåsFGjoyJ}g}x|N=6sPYlqa|6Pz ,^޼yNH\g9E/^ ac<ֈKU7,$YϹ==>q5q qLsZ>1`ύT>3mV1cwvvWXt;'j0u…\X >x-uk]&Rs9V|Sc(V[܍yZ?ym=|޹sfsccxZ6}s">aïԇӃܿi.>羽.[>K,W6 -F3 >O^2=2N٨e_>=%c,espt}V}:(b+ph|\rb}ؠήC|Fgealɋ@dͭf|'|gϞّ:g|cPZ3GVÐcl_@B0H~]d(XA{%'N qpp@s'Y:>]e}AZ">505g̼ >3ZBzꕍO;uK/ sƧ{ƺvm (ߗ E |ʕfXr c`X:>#c%3 >.j}EuS r*jƧլğ}" ӓoXߡa^EuS~ciccqs-o;&\D6kw$AX{s|ݻ4H(ʡZ$֖F(Tcfĵ2̯*4=z¦jFEA*Q׮]S+ݸqHx=Ŏ{ >%.MqE*w:ak&S {šP)$nfұO~7Ov/ z Z4ov>k]>jGLjhMy#a B/WRU=.hōBg(j=3QBjG⽽=}D 54m¬g8>] ~XtԄ[͙J^:yNj 'ҵlݶU5UKQ*AreޯfG׼b>,zzFapg}^Lr%ĪFsP7QApP)hY)TL?P#GG y.fcc{qz*8I?7ZޟWB]N5!쵯;zH[ Ւj&bY=w Ƕڙ-xB5cviY낺7R>k ѨX _}eiE֌ U)u' ellYk¾Sֱ2-+\`"0SN|ҩ/Jӊ!Vyhj9 +-̈́5+o\|\<ѿ*u;t=]祏.ϱک][[ gQ|7>k'GJyj9={ǧScRsUOgE_o,^c… uC]:0u@4,oiJc䁗.]jt3Q[Oc'QE W\y1={E=mrZV^@Bj|MJ~q䧵59?N o۷=Wh*)$ ()2Y@Ԯ( >3J튢33baS((LQjWQvEQ |F]Q|QEg sssQ-Oox A27?#~Kl37,}E? 9gnEQEVEQEgEQQEQ|FQEQEQEgEQEgEQQEQ|FQEQEQEgEQEgEQ-K'I`IENDB`neutron-8.4.0/doc/source/devref/images/under-the-hood-scenario-1-ovs-network.png0000664000567000056710000050465713044372736030760 0ustar jenkinsjenkins00000000000000PNG  IHDR9 pHYs  iTXtXML:com.adobe.xmp 5 2 1 m @IDATx`EדK!JTTTD~~g^;*JgII +޻Ap{S~77ogT HB@! K%B@! Dʅ B@!P +B@! +׀B@! @& ^7TN! B@\B@! zݼR9! B@r ! B@k"xuJ儀B@! D5 B@!P +B@! +׀B@! @& ^7TN! B@\B@! zݼR9! B@r ! . :R}9iH:MmHMBRJ&mPQj5(uJP+ eB(^ւ hUVo Lf3ki=q3ƳlX=C*66) 7PtvߒQ0Nxh;DPN xf@+=[w]{buR3}1a֧`\\}}] Fv?t|dZͩE=Ikq%`j\RSG@{~B@G@ ;9G +ળbܹHz%lxUSq Q ZVLܳaL| va}EYQ -bXԞ15ۙDZ2c|vnEwDdao".fTEᑟlXW-r|J]<[shS J@) !p) h1PQ@"m|/d]T* oĮ <ѮcO֪Vk"/DCfG'σK!CA{۠Tyf$^YN}`@Ǧv4*;,Sڸ 6lEN Q-;`pӝJTW^8j&? >rUA 'RvDz^h?M=c v LQ#,z4,Xl1R\0_$9+:`7X1K7lGE8 8a~.(؇Eco,]jj2Y R0gjhC[`p6GO|B@/vi LiyMI.n 4.ʠ*=Z+0(?\TMu+ |r?M@\Y]|~k[EYْbG*ܩ Solh]]?JS^1F*3'q2x$$KW&[JQvVzukFe2JY=*+:P:QotKŧtx0(f5(1^-N}P%P%$v%DY Jt_ҭOwEo0*]Ƽ''΍sP 3T4VaP+m\+R\`?4*STVL]n@,ʣSZ뿬WTO6GUrB2p0|6/}. ]+7⟥% 6ɕf_|B讆.}^Udtu'is l-SaYX8/,]+LGڴn[b`pq+\aDiXa9-݀i_d2/he/xMP >< r:k., G๏m1.hf,Z o} Anv!.llߵRdJD":exz />]ƴoAej9G0o,+Yyup⽧[Wk:ϵ! ' 3,lYM{BGC(\?;* 2H?Jz"\7l @n ѷ |(&?('?Ϋf){T?:B:D׾qݖ"r^8!Jgn}*~5hw?% BɄf۰w[apu8tNiEl_ 7 #A0o>Z*yjF o8X];W#5 xU 4swfY!,ށ5鮘x w5؅lF#:ކpoؽXQWOOOVl ! .*yx%q! .'b{Oq'5$< N*CxTcxifC\|ou5kںu'|pyBm$Wbպ}U)Ud5Vӝ:zL.aOF0 W~dHeg^2𖠈uɞ jF?۷y7ķo;Bi< w-`Z5xWH-/î!H¸+B]2pR 6+ZC|Ki'w=!>EXf)Mo_)$D^̒ ^LD F*Q\ S(6r> ~bA kIsШHd%Q K*(/RN/\O3BRB(0viVXG7O x(%7 耋/<? PYLjHy`PFh\;Ӕa%[u@`D݋OU$FBIJhAl,D9Ӕp̩2(T ;ȟWϧc2Քk8_~ҭGW.Ǎ׍K?<ՆB(* 3 s>+`5Cpf guhM+ϑM\[)f})mu߾ph?Q6{F$nzݝ ޱTOdgS!p! ཐ4%-! .+MR vj {C>w5X ϼ:"+;Q`id%3 ӣ-+MCw1Iwr=H2vÓ&Kaf\CGW-1>xtN+P9nG^BװSg饅5*K'邛}~5vR}0[d[pQqccuɯ7u$Z67"1)4'qv fwmLs~}W۞mK,4 .< [A~!'>=_Wg3| ! .YibP4,mYYZ 4-ki6/u[Aˆע]t`5S}an0:}pSuհ0rh()[ZceHx!ƽ ^;Nd%k#0kl[ Jlc8e&=w1}bXQ[m<_O'B5p A\@[~P9|hM_qW^3f,Y A_*Kg{rѵwoDp*NܶsDF:ai~`8c?<._7WP: kC^țS-څ ^ޟ~#8"xϟ B@!P KC-n)B@! { %! B@ZL@o-n)B@! { %! B@ZL@o-n)B@! { %! B@ZL@o-n)B@! p.s)I u@~~, 䘜N^ +j fX̴L.V-ͦ菖8Z.NV7tcZN)x%$$$?yTZ~n#U[*5JJC'E«H{{yÓnբxkucI:u*@yvE6Jw劕0"`b 2*_$ -f-8- [;$(z @--hIC\LjGRrxrlݺUń}{@֥dj4`a۷-b%!pT(.)G^o@ > =@C259+'JNq~ pHB< l6]{|OP H_aZ"pmJV5ytx~~~9o򝙭b=grb'~n(ew "m)"rAV]},ku~N􅬑hRBъ9㆓O~ɇk!' ! BenxL,WxBiέ0SYf1ka@'jpo  @#P#˥t slFEjL|4!P/ 54I uӻ-ARPV\@#m:A_kEl(u@/;**bW,,xmF>~[ruyPluC$j a[1̙VL&<O־,WMMdB3%G. ! bsaUX3a|0ޓi 1ﭛ1{ }CУd|-j9)"hD6 !phu-8̷4 D?j j+<k@bh?b"{3 r! B@!P=sB@ F@U4GD^A 4ݒ ~Y9Ũ kpu!Ѫ8Gwpei>Ha>7, L@LX| #! 8 &cNǗhJL B@J@E`ҡVo kγX:{׸ %49(Qu]?meTFpwۭOBg,.k=%s! @m&p^nIz"N MeF oJCvӮiREcM!%e(0bCCa`^л5ɕ.]E>4+ٯW : Ŗ8 rN%5b! j 3)&Ϭ> EPH4O6l:f]6}!†ݻPBzNɁPy H6xO#iZjgޜ3/,~29ٙ8$!а 4f" Uaۚ99rXL&hbKy)L/Xh/YҪ+܏aqWط`&($D:lؗ^!pA4lO|n5U9EP7X>|z6 ߄@.ѹ}Sw q%f X @=KF8`++ +|Ĵ Z2x0"I;-K7Tѿ C9Rnu3ª#kZ;Aѡs{eI$_nCnz3_}O7%oU`Ɍgծ,i`JOM⚌R'ACw~>_O|(Ol  יJE%x{nmx[s3EdjrCbkO!÷E<8OIe{ս [rtĎ(=ޱh~V| 9+D,9NвeWh"X9PVίIB!u6 zO3)=X*p_"7XJz!j- W584-o8jZ.-fcC ~(, 2$! @.Uh;4 9#6yYj9͗d'~- ,. ǐšÿ/~D` Gds-,$m؄"exP>/聰ytzD(& 3 +"Eǭ܎VVŴHz%v Jb'!ۆ}r/ {-j)N EF!&0-BYXjH^ND yx[nZ CNnJ2 q4;oD:ɿB@8okhBu i{QJu,TzxB/iJBGֆt-O=v4;lB'\}kīң;֓iZ)O~Ҥ|(>BTW ^ FrP~$UT SB޼47^p岔ӴF74Ο%AMVsGO ^騯߫g?(! q-xM%yHڲYG{XnInBNnrs"a.4-s ^h2n;܉^a>0F۔ I. ^)ʅUN!Ĩٮ7dT8ͤ}a]qe@?Np+ n I%m B~d肝 rޢMMOO}mS{; $;h'f09D@!p^׎:OgM*w7|̹TCbٵq]M۔6p9_Qs8á2{Ǘ3r2gBl޼YS߂ѦEr@7 y)>~=JY:@} /K-^u`CεyL6(: ~~];jS'JoxY0P_;zq^繺0~|74ṇr55Wr KX؋Z`))6 qbb+ G@S/>?WD۹MX}ü8+(ۋy3^:}ZpDEw?~^Į=Bod\.&=1uO!P_lIi<2H[_.m"LŊ^U#/V6u*rQF8Gjke7Wrr5ܮG Y|xmU}OF6G} \?h}KTOzB@ԐAh_Ʈ聹NZvqR;h˄ C=4:Gˋ<,[8>QRd@mkD0{6"KC&5ԟۭT:ֵ̍bi)ȹOoJu?2k>lXiRdɁ7߬ǷWߞvSAUwmט Hh@j,xqͰw9^ i%}sLFNB@s!@7et/{d 4jD!HկSK5,Y-ی2*X\jgKI.& ^^+#5<Z͌浭opմYmH޿ O#Q5 B@s&R@ptvԥ;sZuD/VceB33Lu@ѣQRR#E^&v]&I! 0ɲK3/1@C&P#{wkiȜ.x]]]B@! .. ^54]ܒHB@J*(/)A! .)v̄hxT˰7(6dL+Ix}fV*:6"`$_ZZYTK\rB!ʋ2aȳSixw{W-XqHC/$[Q @-&@E`B| $|ӭmu୰k:/-nQx4 ӿXiΔBVRB@4 j |<}АmwX*c,I8iiE\6\{s8]^*-7lo4A@T9:3l4դ}Inf>ζ@m;B& rh)}5q~ EUr~t9 ir5~݀ϭ,[l&s k$xmyZ-ﲡPz! ?^+-'o^ܜ4pO^ Dnn*uMBBO>rJFʆ&y(݅d3-d@gЦM=E8OL6NKv`G1W#$'#zˠB>إX8;fwLlM˃L̓ zԓ*zhmXv+(M F-Ro4W4A>q>UD/H1aʽH* $<\n1:O23sasqGq <~*-990c; {۝T_ Hؾ Ik<"baapw! ;=< axs< ߛeGH'݄=JʡPPLvbĠݿ 7=[So 2+^䍷nCD gOLiIvbd]K'u^S̓ؼlVdr?<5 㪱סgFduTl ʨƧ~هnĔؠD\}O 8-|?L`L~XQt oýC8L5AMLeefL `Tëv9ڤcH|hȋ*-5S^|  6F~;R3G#EKhA+H ڇ:0ghUß>B v[l4j> ol65!=1G . zE  GCx nijݎѝ#`) [Y .ߌ2Z[5n"5n |]'x 1/P¹&P1dpKRuS8 w~'b}H! ~FdQ?qUߊ7H!&<?y3to\YNL(+wU&@b2)X!JBh"z9g.&+mH*hJCq!y# R\+C艮4uSL@O0QC.`A'RDnn@)s^_t#nZ h!z9?oEt".M;Иt2 :54 HU)IS*68UHC鰟$͝$sCqGQX hJU</˅j*F^ZvEYq= w)8 ʡ-kpHMk %(G]0-Չ8?l֭iOFb^ Fk&=|5zEqn?2wmAVI?P?tBoP 69ؚpڄ `5 *,~)N܂;۟}B.+!*bH$_. 7$1ԅVvn'*Ǹ,`b\sO"X$C "n$ZXH-xlAыDeٽس)/C%$.{1݌cHh6i1 $Y(ȏ]K>g333 Il7J%!Xoa;SILyzn`@i{`adCE@cB5 HH=^6^i3,jY5+ܦAWu. '$aby1QSnmwfٱ`/#V/قLt:LLX< iס7=;\+ ܁&$LY.[F:?Nn9?@)2ө~Nl lS,*!f[FlEE, D7}q)?(^MΉnHrҨ-a{-YM!(q{*r\qtHek;bCP]A܌:?4umނNdioL/ ()]DT.-u,3[ KĢgck0n -gikݾ7ScQWA*B~*~ PߋPIMok/o%G3(Gqyo {(.[}[|mQ~UƱ8ludQ[ݺeq8e:P8'{e!6؈_fE1z)<,`Ea;\tY3WśK70DK 9xwpt#Vg.+A'ϑč̍-vn793}N8D+<?5tj8. aj#(XӜ/lM/1}鲦ȲCMvbCx1C0.C+؁DlLS0_#-+uy| LaxHZB\#@e j_FI9{ zXi8~ݫХqEJ"xR u3$ kW"kgkS3s H#T#oЧ>|* Si8, W]=SD \1?|Z!|xGnX)ItOk'Σ({&kX6{V ˉR3w6qߙFBQC{ UY,Ղw<,>S q5xm_]؎ZބZqW}Eijo=|聁N2]ɝLVl7#߽i!{|4=SϔB@T;#B< _\hRԞgrXNz>4N˂O¹`7a eVx{ZUmjFCV[gSVMc臨ksn;˧'hFw -!]b[:KM]xlՏ$A4d{:J}"_*y+ G*2Gejv%#扁]"FO<&"$ B a(.{$YVuaUj5xvU]~9\kN>8t#PV@r@Pc[M-h_;#>YmT*Wc.9 7]7 -|v Ov M"?uBg& OLL4;@ iY Woĕet|\^^M6ϝ|ow^:ӡ4mڟiMB8?!A}t}|( _~J %@{!-?.t9O"C ZѼv;◠JfϜFojN!P \밖&@1M;hB]$(=q83M6C]x>\3'U|ls)EӘH4 ibj+E rl^JthdDn#e]@J6ʭjaa+Bc$X܍j;M#FD\_ux3;. Tp 0' )wkWϽr۷Aa(EE&%nhFm ʣKYsȋDn\px:sHMNuX-?)C4x!\EZ|ɷ+1f0-_YPJh_9jbhޕ;K/A&3yݗˊ C:%&1^X"|rϬ<)x`հ2нWh*<1s% 4fdh&M؃޼>5l]ww[aBUPXFYi *hx:쪚.V2YC#ii}NMS[ 2mU3lZjmrsOdqP,cvg|CEyIdؐogw%@uO=u_nZo;%E ^FGOq:4 7"7Z\?VggܲZADTc #oEfYx ~ͦ+!(9芩 A{.4.ӧ @V>|:htOx m6 FqXx<"vόGYg#![b3RSriؽ;ȦQ2PG+H' sU]0fsB-ُ0o4iphBk+ʿ4=tqhF,ܴþ Qδ~rǢ~bTN_ ~nedؽ{vn^Ln r+ۼxQvW{O.|V}׫FF$R#p/#NZ_c9ڵkBfh5 iOrеIߕB¦aZz&񂿇yY*ꐵv?޳i ^DFA^FG;6FB Kh- \-uaAhv*ۏ^iD8qnM $ ] rh}Æ#ؼ) ˗%SWBLO/xx; ޴Wvv+:}`; Fב ;V~q̄0r،rڧ#̓ѳW,5nxs4֬'G[~PS3gv[CǏo%s^Er10tpS ކڶ }t1h1rT[_ыj+S7E@ݤtB@Ԍ%4PMBEֈn!X?%jB_?BOtWUGc_2**nE((-+=A]R0<Ϥ̤4K-DmZp7mǟ^ȥu\w>-2kBYXl+{"~K-pPY8O>Y9& - i٘\]FU kɁtȐ%/z'έrbY/&zan$Ķ;d:zG.]16*t($+Ý{-x|׾ClwbV=G{ОZsB,}ic~6 @!pi/"KqFDQ8}(kz1q8c>^d6ZƄj@=ՈoUYƝi(-E h10sᑩt6e.s$Js Y6ϗ{BFHOLJSFeg`w7fJhi)@NC:劎 K.fx\pu3ݩTa)ܡw<^7#DR-du亲.⥫G;*-@'p/dhod ߐj`Dx<͈Wy&^T@KԪȏs(dթj.vӠQ:j[Ǔo=0< eh⨥tM&^o- ikЧ;K/۹LV/k+NϿ07=~=6p!f@N,' {/-'g/utEΤ\lYOd8@IDAT >H^XO&1!Ů7uUB \R˥huQfYjZձ,WO 99M93 }Rc2 4GŮ:V c/.UV^?]|s[}d7vakCzH(1KFv2x ~x wڈcM*ŭ/-!&) !P TUr2ՉTw¯*nװ⛧_:LSZ@ʽ@&Lmi?<}=1ʹ ܳX+r;Cх\45[_ xxH;!q"~61ĉnIU>5VM)W4'ӕXvP(wtm6AI!eEMMKh֓rnO[v97=reŷ凒"+[KQ:NKk`z"fz= ѥ+ [v'N}9ʿB@=5ef$O}>NΞ)rZ1HzciܳOa(- bpnOHM+DrZe&:bFq,;lBG5z-45L%ط#/-rL#hF~*%! bk~&AMnj,WY1}Px y= I'yTg)_EϢ-:>Jwh7NoyԸ٧Iui clbz\;iMr+F]ъd`YJOQ w?n5ґ_~JW]1͡uK>6RňR:N"9=lbQC0Ӓo :< #w7/c/ࡱMh :dk/-֊/c`MR 6a|tҊb18ۭ%uj4,-?'bIXOᔂc;#g\gOrR3wv-`x߫j1x< mCp 1NbFs<W=D4A q`z݁s'vASEz . 1q6E!}S;͈#Z&7S9K+`1E|VӼЮtTAYQ>lx;?[—gF)5;4F,}9{Y-6.9i %q_vF:J%;#eeV}}jK*Rh1#'+~)tmi]zmamP^38ڦ)PZjf2QPSwǮm0xy<+a}MT0Xۏ… jCtliwOpf3.=7 n@[{,?;vc]W_ߟ=+WoCIv1~$z 0޿zkm(@ǻ;#wVÇ>;G !P Hn'A\ZduUԈmJ~|xqI]H :li}أk wbO0;U .1ATH/h[]MFr|>Ml;ꁧm{}CQW^Y`~NX:m:V=\b>AC:o:|r\֓Vv;1@#RPFev7"?fU_ļ7>łٻQ\lGh(^ВH q L ǒswN{]c6D-`ӱp^".M0!J@^>-狮W݈E2/Tnlce& R;GJS]cN˝i0/1a7S'6cZn:_E=wI8K_xkL[@f v;Ƭq`ގ-~i\1[bpi~PԿ, 5%q@!P#[w#%o*h9[m0Nxo𦮸g aW>قGamX,[-n'd$^?pw|@[Dl.Z{b;Y׶"%niرhz<~ @O 1s7qku4-ZTl} l3UčI %Ku^m9bZbf+9b߬Cב{8dn]˾X z1&EM-IYB<F9|0[4y&Ocbb(pwvq2k,VkS))س7II9.FaA HhXih%977?4 19#UJ!"/-6=,sBny74rUo< ?~=zGΡșVs1Y7 ʀ={]Fn¢)w0#'ؾ#% 5/%cơ1͸hc| ߂жW!\enS@iiaj_%I_W<2o.5!n͸e!ؗyѯtG ?7PwhI EX+GG9J 9~d+q-j ƻk;xaCwg(FAд4m>)G޾7I^~=:lB>G`g }O1=M۽WA)2!Rq]Eo MF^5b#+>5ryB-H/(7 'u^ xZ-/&QŮ_VVse{6oN޽Y(**j4 ,bj4Փ_79M((̧ɍ&(sEiԁ: Lвfn#]QnQȜb&JXKi1Ou oov掱hm&odsq-7 +_<6t-%Ќ 4hxy{LzF͢ ƶ}6JGͭfDST1_m"Cq +$ .$sE&F\9 Ya9y3HUЍB@'y|.@{n#ώmu-0l Hw߭W_Ƒ#yA\\+}wW$pqq^o #l+^ůknn6֓p^-W^/NA}ps߾"hum{Gݢ>hfnp/FCQr 4~u?bbWy<> ~It8e4rW!s gA =u:߄ _ҳS3ig Yg 3McfЕ;/^v?% ݺimz?5qM}g?a,\]H @’-jI0~v|N>rKl!(YQ^JK/{i$_طz#2}UO!$7/6-diG~wUTKM{^7AzGAQAT|'*v}ᏽ!XH/{/!$\I S93%Z&hQ[J9ЮJ$lJ2. ǧD97i} MA*6|.AGmo5lH 3g b3j g-"(_q"(B{o9Rܬ= DFRcz}w SC}&B<1l(oɳf to<]s_ęvQ-]'rCH\w@)]1TZN#pσGѡkOa2&عctkِHs lO<! }u% ଥzsOV+75*z.OgOWZb tq/.݆"?_x!c4Ԗ_٣̜-8s'Z~WapIž3N&>0:c~x 4%PTXȩi2X#Yr RΊgQQ1śwVx +`8ƌv9`w`YzbNakޏ_改?ڱE8j=dM|^h/Bk60c'n_^)M3xu|4>I5x")6o9G^cR1d(q FZ|FaS͕(NMqnj9o@h1hQ ?+*dk@xF.3<{7#{Tood@iR"(S3W6 VxلP6&axeؿ>{ގJP~OAIY`n]o˷[`c9;G"Zu.|*>~xKflC -~u/5 h^>*VqY$*5A:$?wߋ⣏:/ݝ*Xع}Rӟ-طk1#iה)nl(lyf4U].xcZK_ǪT 40 0.q[+ ,r!fxKNE_:ׯUQdl5~[g^-# DbT>RiVA>$[E?+zKct^aDK 2jϖJ V7h wNs1i'4! %+ړ4ỬDیDq117"( }eLES S3pIό6ZwpKp=;\ TE A'ix]rXDzJ血, ZX$ٲO{s8=ؗi._wErB1p{j ||0zLg#f ys-RMsl:?̙%S~~{<~ӏ0iIVJ25 GAwŴF|2YO$OG0g/MϷ =:Hі]%Jȭ\7˲xMH8KyK/9oBk@Br0Ȅ4=Ï!@b`s/bL:ş~\ghj~Ybf;inїkEˇh~ǎG,̜9 KNMCoq񩓧`]Q }KDz[Z*U@ٳgw^jVF% C^gEU*][U=uF0/OSU)Wfc#ҍKY1Ӱs[f5Xl iqɕ/RiW0\bcf ["f ͻbp5X?Ge/_f2}vk īwϯ@Y5tb{]bݷ1t}˳ŭ, ]s@?7R?=TE2*U Ko~{8Ue5nT b…\`7sU!^ L}rzw_$}*8.j}R3/+jש//ڵGAFWKk\D3 ̨-5 S33CX ;bϣ/5.ߢ嵷awaotEzUysTaY>z[OYUR%J3}zص+liK@ZJ`wMt40YH6KK8[!YFʆWWz6L- aC]"{N(MR(u334c#X墮ZUy)2k¼T:%cЬN&o74mN MWi\*ٰq3MbnWP%J~IZW:@cleWn(b}fҩ}tB?Ds@6x~`0j vNv4TeSI@zr pP4~|g 6-݊aC[bQd퀥/ܛ((LcSB== ֬ H[XT@"lg?K;c`J٦$]@NVb\7ԔoVI*F!j^iDq* VT[/.J*"tպ+Ywx0/v0EHM A엿5&׀C- *O՛ y$I񽧰yEgc,kprZstW!:$Bi$vkBz &[Y! an1ƈ;5ikD{^%Bۑmqx뺫H&޺7BHhS u\ԃ#pB5|ԿT C TCUлw$n-{v`ɒ}Xl~-}ZrE#4O'B#.tdiJ0+A5[юT=ջ;!`Vv1ctѾZGcR\8q=yĞN@j| ҳ`5m]CgpF=`$*T7kFW@o? ^$?7l* KӻwӘX+\ҋ/Nbgюxc$%]M*5DD%W>oF˓3_`gC! u BJK@}=l@-(t oġCش$sܴۖlDvv̤q\Tdh+j͸HKfXʠk@UNk4!/֦_hCB7C ,`WGo1?R#;-~c+t&pr0[`p6)Ci@ы;3M\J*e`_FgڶST\`J ڷlEBXnܐlvF9/}"|sMKKœOO0""}XmLt~ʨ5+`#3)%ծl%{50 uW3KcI&m ޓX՟xexwF;Z` q q;&$>ܣ2sk̛w/u-C0Ѣek^S{ۂ,(Q"99+V >Ê7ߜf|XCmäd@XtϏ>Gei9-?v⣏&`Pyf=!+Ćl sQI*F!xE;RAVvA!D\x7rN˳P [on jiUDʓ<=͙>px6o:߱xFh@D!>a;`sAk$(a7G텤dli vٍO؎oA{;]sG)PO"vޟЪUG 0 [epvWKW,*BMOnn)9R+%pz*u ċ/LPR5%3yNJR2{Jr.tVg{`BLp 8AJjO%Ez=qmjUnn .x後 }|2>QaGs&ί7W"SZBzmއRj@*䙑M+ED~<1YHH '۪ 'zpoh mo;g;ؘڰTa%;`rl w7]ğLG Y3r`7-ΎY׿ pJ ]7f y ||ы(*ή M5B x{ťBd*#BJJbccp).!**Gx)33'q@t)scHk(yaf5*UU[ϧç{QN8yVޡ Z1iU*C) dQ;0%>bt%80C+^PDi Y1D^)5' sx Lpv/ϦBɂ`7e= .;bpyj`M({'6CNB`5Add B@G ]''`gbw}tpܛW,*44!sQQ yv)\7 eqaUyɚ}}79ïf 3JHvDzF4m9[Ŕ֡ =AWo|j5] GG0g>Zh +prN_qaz9_5U˕" $Ŗ"])>c[e'ct i27lvHe,'O\K xA&n\ ̣DX2)g[j^U%<]p-yҽIa.úu'׶A[es;o@~['[8pCPO+o~z-yqCxFj#Z6:K%KztxЛ y3C>ݰ!5%aJ߳2N oFtE|b^ =醍?kIa Ы#P=xxkr.0Еk+de_:_owd[#p}uY&'~&SӪ%Ԗ⇭qog M;+GLkߜC~d*p35;qmF͒(vfwu#ڧf$P>6eH.,CkSxSU=Y\dr%z(<_@/_ Զ@#6 š~yא-`Ѿ>M1֚rg`h8lMB:̴[$3\Hl_GGy9*:`i_M:R3鍻w95X+aM:--0OfzM|F?ji#?çmسn?^zu9^~y -НɢE|T ?o hJݝv5y_rFu, 44GXhNHV^o`RNeě WFagfG3Y H@YWoMRͪJ@@KW:ڪnHO{hbƎs 6$KņT4H rR&tOظ<3Zx2KSM00 N'&9=>h4(c}xo1li-ݟQIֆŒvQ(L[7!٢ x#a Xr,ǥ;J0S;;{l?l\HIVǧR1e+daK+g ̥p:֭=Mkb_c)5e x`VAhӫ%ZR:8֖`CXP4o O l"vڍ5㉧%wd3O {A4> TLn%i;l ww ua.;CPwpW?/Z8r4]h}J&p$cS"۪Ò[jI@:_ERsTI* ފVk9ׯK115Zk@Z)#N׭۴lbi_74eHAiySL 쀃ȣvVW,Ъ=ԮF8;#, EQ9Z:=k/ī}{T&Zå4ke9p m!ueH` P.OF6=p#Y^1;fhp e{ :-QӨ \TZ2\,smjr8Nuk4= 9cV Tl= dag ~r BطZO=δ#=<׌I/vIZ"A#I[H)`766%X0{TC9seP7{hQYwO ڴ `Buc[AvcܪU jxYY3E#%|iue-ALQКT 6J|7;-4#03gЁ[j4pyua\~{`mbna!Xz~wpz >v8~&ܽ?_ sJ0+6EәcXvM1Sq:O%~lI#d3y2̭9 raI VSBRcV:WC5nK@^Xلj;-: G2Ĩ_9HcPA[swEcgG-,pp{[`ÕmHId Vh.ڣ Ah5[)B'>G|}po20Os.M :lē o.Sl')Ck [jhԈ8d1SBx,اlxDU6–`7 R3Q))7.600,=zsӎ&.,(A6iNY%Hˣ<.lL-0gV "J[6ԠjRU7(>eSû ,jIl1"*ᩙ+3`s۶8Q gp`sVe1!L}?m?[a15X.М!i6cA!k~&3?ңņ '}#~\;'n ^W^/z`ר͑Ǎ}JT 4 J9/j](S9GYJ@| 5iؓ\e'`WH& piCF` IZI?5Qj,h;r*[g[5G-L9H(Oq8^n svbC s`mau9op[{}^r")mPqޜ)cXM QӫP%P$PGrU.Ihca7^/)__]\lfü=3gR;MN;iCߦs@F+aXޥ;qۀQtL~J{dIDG{Г }ndf]C}GmP"}]uPFMjڼĩ6X@">WzbGz`jccλ:R \iLbq>9(+P wi-^KYH7`@3C˖Z _7w5~ ~i/voDQ%Y3 t >p=a5=Q:bK-y0^a^o9US%JUݴT_jJU FtGϭp?rw˵6-$u& 11)8uJ$l|(Nܣdt̟Yܹ+Dֿ$[-~Vj~˱hXnI iCԭ0ym8B-QJI@gB{P3ԪT \[kpNdSʋj^J!Am"p oC3=o{05Bd'n\O`X,3E3_cKwь SϛA8OeьɴDq_qjͩ}_Y_ c rMD0]8R*$@N66Ԕ <ʇE,!>mr*[H^`zKp`ޛ:"?r˴峧Pe(ٙ,^Ӈ+_U^Gre"Srꩯ#*+j^G,(#kp U_ۢOGLTW k{EGPy lj=ICe}|^}lA`S)A }{_DK+ -z@(Ϛ@8U=J> LOm~_OkN`K Y+Ӳ\Kk}>xo@IDATdņ@68fC( /4f nE/)vnZ|6VA֊ ,Dk˲ȋ^ h6P\j0u68 媐 5j-Z& xط/k}v"}3k^I9V|ob„ \)[= @L&SnW=T s h5WeԩSؽgٽn⫖^ H)'SuA#Wo -r!fxKNE߾@XFBJXr+ǥ}j{I:Y0%M_ֿ??OﯝC$`w/ <؂ y#G{Xj[-קȦ*K`1Kjd&]`- #|*k|` IZqa-dž<2*#P+(ez0z*TQ3R!-uP] <Hkěs0WQveZ.BD̋r"`W,||!דEE}FWs?j |ޫ7ihDr? n|i;Mx7mʕo1O\ {+nu,ssh6𛩲Jx/%W;V7vʀ_y0b"9]9Iښ'i몾ʭI}5I+2Z},/hF"ڎx$H>0vl{ *URnt f@Q- .TҨ6F?YTIJ.%@P!-de;y%1yPٍu< J055[ɖ҄G…,Z4tcShM%UThne}mz}c,0;Twxxi+&pVR[̰^9yG ~@`#c.NౘVtU s0&T/P%А$ކt,2HekL^/\|Te^Ҙ[m ;+ g#O#~9 !!/]{o=5Z2c/k7LmMj{q- CAՈL\_ ᜴GinW#eeɹDU"Gr,n)M!DS{ hGGG!V58؏!66sM,ޒ@A~7"C_&&96R<927ҧPhPoCK GvP@iw [cܑ~WˋDTy.Dl PH^iϚOUsEFTB+eZ!jM`ffJ]98Zʂf1[ YA/\m͎>DJ4 &Vxj}17WkJY1C,򔅊=s}Cӻ[ mssСhFy;O)\PYB(-$VF@[. *J0טc2(r4 R-o2p0a$9W% 5}Z Zؘ[3Yƚ97*7X`fMG+| XLEQ_~A,C uKk9ن4p+_u r ]R=Q-XK <.h_)T<9n|'7J1LK ه JcbRy;KJ@Zz>rQ[W*7\ZG.}aJ_F oNJ>K0YX߅ @:G<&ҏiܒ%oၖX);V MM/2cN`o= xig NfBmm&LmǮpphjEM5¬Wj*nؕ@+[PQYE3}1‹5iw!/]NFڝA&HMAs?hΰ®Md:\j1L!(9o"lICmʕ@x'жmWL EVYr)_ͣJzlII1N:+>o1X@5f.$@C|lYPϰ[aBuE8 >]boobΥGLǬn6BS[r97~͊3L̜R.LǶ&>󐚗cҗ]a?SI]pו`NO۶/:v`?抈p7q7鿌7=/_UJ*USeUc?Y98+`͚c8z:i(/aM]q_hZtG bx^hWCO,w9s` m[)]&&UK@'V_: +jE2n¾۶'$8>hCAS2 Xڕfsr8@`v>>BI]LIyOxiox%hÆk0v[ ef ,ÃQǾz ۶gmg0lKJV qw`֬% OS[jܙiD^J_A\l| W##3^h C"|aghEP+ P{SɸJbHPH^ q 1 K'o@Sw  wBnA/&҇=їU%Pk ֢S3VOܳ|| ^iC4 )1Kǂ[qH A__x‘Mot P,vv|}Od1&Ps=6ŜVu˕h1cT 2P p=il;Y ;ί?|hO<䋊o1oUՕ-a ϿbÆ4fk/>\sխ03޽1s1$#[aXx(;ËÊЏ]ϸz}u|: ..ۺmp>'gcXq,r'h#rm^>xs@Ҥ@'#ͳb~A-JD&Զ-Ҕcx_cuDŽxDd!xuQBwU3~xoɋx g94k.7]qn̩[@M% Txxu6.Bi]US~aGɵ0 !bAl7䧀 ,stApϠ ^Ncu^|pƌniSM![R$60Y%. dZzX(_lf/GF|.GEВ 2&>jpoM2 ~eRc2WO<ک#;곿)]9]+ k 9QFEEU٢6Lks)U Խ52/jY Y5T+WDn{b#[w.d` >eyJR?fIp?*9F5oX-=v . Kه4z!xQJA=T%P!BK I\@bËe)Ӣ%53zzKе|ݒ :ж+?<%?= p7Zc 5* x\SjZ^H@뒼gB_7S(=-wŘf裛Sҏ5ͮαQ}m֭z+{ ?ُƌTQ% s*s 0#yi#oB'ѣ1ys:ScZAiB/mp#BcFV^7|(e+1x44[uDB EkbQ|)[MJJp<)}/6'I># tb`NgV33Џw*8X5]PSY*,&Ku!N^N`UZlx c G|;nvU1e2ux;<fx}845.&.cĕب(=? OQ:WEɵ>Ju[d w@}[WkU<6s)gxpjH r$αaݚ7t&ͰE/,5&WV۩{838D"Ѫs pvƷGvo<Jt՛QI՘~6F$Z6ZzkL/ L&6qz+jhr 1;3'O`uLeN/KDF ǍE3?_`3MR&p|`?Lm(a&kXegTc$3bo||YE=%XFxjBj~4ˢ G.Yg%k[U9iwdqYL۫Q |&k*6$6];[c8I;F'p#PeD.;`~q:m$#g%PѰօU TOV!d >> >e22 ; a4U$3X@+z}䚤 ]ƤWQGe~UwY))ix:gd.RW/wNL!= 2-/]b_UH`TSz8Faӱj<9m4 ݍu]˶<O)ycDkĭÔIclQ;&nV;a*-CqG×>lF]|mݤ,ßw+WtuqJEtGjDZ8vo'vMn>+*V$ to/T1W7Lke2kƛ>ǥOb9H%e8<4B$vkڲ7N>Ht~a0#0ArR:8C@ 54UPWc)iȤS@uKV͔ Ċ`C'p.+ҁsewZV01Y(,+CKR7HE5EFT D. UśAN2WJ;-WniY~xi9{EйgCif1og7v)ҒML@tFGP,0Kq>םgƤ =fasp޴gRXLf2=,Ў\joar>lp}閔M5AFV:'"D:+xN=I q6# VF)73㹞ݑO0&V%`,>*K c6-+޸X%@|0;~_\gg<<~:b1quCQYS>OFAmavְԔ#.fn;ϧgg"jgA`Q]@[?EVL~9nic|{.| G vjG?sPbO#)-6{cw.AN [sĞ>b-pNNARj=ݔbZt .= Z,R ib` :v~¢<::AV>sEkʂ@D&Ņ(&hux󍗑m3-䓧[r)yWrJv8c)|Oo{FEy;=`n[+S=%J;~\6J:l8썅qY{qVs&^e8o'Z.$X{þ}au*4C9߼4&ץT9UՖ |U5?]8DUP)%Ĉ E߱SWS=zRbqa?Ԏ1O[h|{6;O-U pxM6Xqp? lՄ  Ct^9J 1e&xkwôpW썋!vߍ2[<ٯF"WW%^\:ݫ#p*.nIy;e %񢜖<ߕ^5y߲m{ F=>͚C;[ exг_< y+N*v`OڋaKyjwػƌvR|+rMmVa2t9<8?6?`gWI;SǵBq7 $L#^Xb{}Wg;1-XvnrO?7 }JUMŶ,xc">22^rkp$33WZ):]b"-1oI<xë=cV43NO?F21⧑CrA =CAM9)u_mclSZ$Ĵ 1ke}*;-.+-Š;QfCK5.-,f=^ݛ}YODYd#X|VlƦ[@M$p3ךԧItV|ȕÎ_8X~qf2$`8m9ۣW[s 2u3:{&+PɦA9"4]@NN.^.nɨAN:^8=LpAH:9P Y6~aH>{vsn,4CrˁFz7wOU~SgcW?@xtW`pvU6i-›\^<,qK B A~)&NOKUCQs"kD|)[S0w.Lx|6lxqo?{;!$Cj~q(&<%z6<7Enp2I? Nw~qeֵMXعDŽp!݊6}KM1O|yؑ슻İGǼ>]ZlwE% ^njW_O&?Mb7+>g971uʃh }U<gwRo˛q#ٓMGWQHpd"nڵ]y}ٹgLj9ڻޔ3O@wb_O򰆮Z2S @f d9D=Z!3OopHAog Y _@sƄF-mp4fVbďЧY+47|-FF*+Y3lٴ R|Q$$P}]RI(e׷'89\F:2,m]󡣸l.K{RE PZZ\.*਌Oz^ <҇}̉rIgyc}%^3>*Qi4ܩUd&\Mdz˸HKX+̦&Hࡃ[0=uxq7xX"\U><%g9"/y.'٥k"||IiRD2~FbĚ+c&E^^GGTx$LzCK9bs3jM]ggg綸cTtx Bm[~tM$ CMy?OlÙ\k<>9SuPO4XD;j I ǿYރeeaZ mD@cvme6{ ױFTj2޳bS4ꁎ xlUFulѳw%BH $hpww+b-RRNo)U@q-wQB n,` 7vgޝ3w60z`N8{-̢d5  &йy]e],F85d.H̩Ѓ?܎FG(6CʑG)'?_~AӺ74tƆ_":SUYv'xݲ@|m P.D04Vn8g:P:Cra݊uȧrI)? &팶bĠ~0tPk nІfHp2 ׍Q|]+n;!e'à3~sI\FzӑO-OCFE`eԲ渄* z^&,VsV}ux{b9Hm>ueǚ;+PG3 L Xg^1uvɔxF15i6  ̜Ng0r>,_a7ri9Ф}rgs30R 'Y ?_tJϧsl|m' PL ̰hNeRBWOc !g (:pQp";`Y?8*Dalؿ (.q,쏏LJwbP4n\EVbbjK YQ}I2c*G."YvLAz{=Z xyM.\ճ3l܅+V"$QSzȹ%/ ԳfJXq&\ݐk ^L?VNq-F ہG"xI0÷?锄߻BX[Ę>Hw2r0&uI=5P=.y.z_ߏwĴiݸP4DI\)N;#hi]'ahW#.TG\t6lZr#Ľ~/ ҜOE)R/uI="7JLQ"/XJ@Z$g}n)EE`-.lߵdҌbF]K3LM/fwB׹"0!PYV &YiۥG_~~!Ơ.5hKqa Sd嫒~k](ػu 1y@'>V +BZJ_kԃk.^j9,'LH ="RОcԃh%Rw/a>٬Jǥ)&dy\̟X +()e=^}>ã˗㗡CQMM쯃s> #٭mB/_a1֌b*AQ;’fRZsR\鎁W텇yk=/.*, DCg3I{LO;ǫQfH³f;6|mT swdJźϟOl:o.iI8[-;zs *ro/u] KJx-/CjZ.fd. PmY0oq]G;ڹ"P 7J:MSߑ/A9>?f\i[bU]sԤ}~3*JM1dN1*rgA V e(UNM|Z%٧>!~u\ AATV][aY1th庩*ApD1X&6;d,?_yսOGʖi@h ) /4e֜Ϥj5N|bJu+{ROHJ|KlGAbœsqE7(·=6wPRFWVAhlo~Y/\D]1!+$!2u#R މ%n!7 6ߍǔ'~b6t'Q++ 7u[tF:MV,s{Ky֩4OK!&<?o9&fV7<&tķY 4LW:hyUK~o@K*4&KebA 5oAn +UdM*<ڕŜLf^xz{H vѝWw.¶o#,ȖEԯT*\WUtK#TWK~ J$w)EEYZx'Λ mbE|ltʝK}X-T|6mێsBO̜ٗ{"W$6nM y*"e_yUvko/gwEP.zFX> ȝa)=$L-Y/_t, h?*3G+/nbazQߌ=h5i{-b{gÑgCU'A(*èdXjTxhDMJקЛD;޿ >ѳ:8o051ۜa;꘿:m .OkiJ]W)?}=jF&JCv`{aKPZ$L駡44';5DmK6c$P.55Z47ۢŶéBg3/ΠkN@ԯ|/tFcf J`qN}f```{!VosAъf1`&.}QXB~Wm4&w*[HtѡsK*AލWkLy5kVaPHȰX*EyEݮ>MX%9VV# ??͖t7[k2xU5%WB2 U^f\k~4{]3c e%lHee&alQP7'(mWD7hcO|o޽N4߯!|<lj;pCj1,(Fm̽*K)GlH*G Ip8piSw2 K>fUx6/[]~zjW|F[;o.+W5زr"͹VuQhG&o9ƜwT]+;JJ1~#~ ,Y: ݻ %6=hw4keYwHAW?<Ԡ!PZH#Z+_ޱ^΂e)Q H*1ޗ_b(^,vxdщ_:7- :‘H/ )8ʍEdbxY*:'TG. -FU4Rڔ>Fjl u0J HYH@+:+O *]܄H|Ԥ(4+w"KEа"s:E*z~!!QxҒ yrFi)iF uЧ*9EXSSB:Pbh1[|I &j"A[*qѧ%6VlJ*j(Iê|yd*Xય0fLhPhQH+OE!~̥ )_?RBB9j()h К>9rc&If+iSi[a|PtUwKUBRLMU!޳ OVCu);55΢X{ qΎQ) )*e !"ÈYXugq"5&2 ƏmMה i&EF{;i,*$ەV^+/rVtݸnOa_uW4.;N=N[tC??S[lmhAc.0+ 2WBTće]b,l?-qg>Nhݷ6}nv,2ctu^Oҍ~^ xUG_/A׿P9csc1k;p`{ls//4aC阵STI1HP wrDVû"L噽 yHNft=| rQH2OtR5@oJvEZ"ѽ]ˑ 5ϖo\"cjDig`AlD(SUus`,QTL{IoKƎa"IIHeP %[(B7W&*Z !||ݏG8u^4c02QQSW"xܡk_;G^N^)?Kf}q}OGg2&Qnh^ǓChZSf=^>U+>,?/ZP`e]/?HJ%zsG'rKe!ʅﭥ%wt`yHzifn]ömt_:Ib{EX3:#>1C++EJ1Q1F+[{qQT>KKIlcޣvY t<{ͧZ:ǧ1hZQ&IMCxz2O'%t-dN~?oO㽚4.lHB;۞XvPS $-gЅ>oyJUy1 fQ۝W[-: I" rsiggKpIn!zFpǦy/J/R8I,FCIEU" lҾ ΜB /G5r!AmKvhl)zz +od@hUBPDKvv6 #dܹsHO@zz:]ó4 EF{)#^3VocúӤ0LZd[P..Df#uк܎>kšaHK {@v-}fKKsiWK"=- YHI±uLF +Ѡ«zPݬƃb]/'>>uO2:77ԪNFgBÒRI8pۗv׭F*0S%K*t&!iFǤ!ﲘHGJpܺ8R=5I??g,rR$!2zpkҏ; nf(f;&owyGhߎ7ޘ>A #RiCxH;C}b>Ehٲ6+*tIz[AW-ZZz~*^F&&&&K`tl^'Y|Vq/^@lX,bNösU Ō{o@Z6wZ4$CW~` Rm/J$CUӀi!dc3 3j哊˒ ^Aq8^K_4ї[l܊~5mL~ BCkwaExx\!KV{xIfcVUNQuh?dG;H$7xބ}A~zܙe"^VY넯:ys'LER+1 Q1G…{ӏ @ZB6+*lū Oz^F1OԌC |p?UBRRN]{6;ƀ@u!U\]XTd?CNVAޏM*Kw= uHݔ tSWuUzlz%> \ , OEdT u2L]s8q2gOOd)0f._JVTI-輦0-}'ĞMIilCݗrUOl+%6AZUvGZlX<&]1>hַ!nlEjkx WF.)MLH޽gqH"ϜRA]PL|X:8XgcfeE+F]4֕6jrT6;SHJz^^~t1:t{i9)+mpĜMcԤj9XΒPt667Ӡ {~ ٺ+Z;StJS%jD/A5JjTyw>x">$48qP{SNSH ,՜Qz=_4{-N٣r |AMgo I@ cjZ>cГT~u}0o'kY[u gk5^d$ƌnMCrw$%81 ڵ'B!%*Q%~&' d #x3}7S߭ѣw=Z6mZnLl decy%mJ;tȝ>[gExI˾(pOEuVUUvhR{O+4 #ţ9z4^R7_"vc\&>^h쏚|a?Qg0.徨[Iet7 yQ\ W 'xzGQA:Jw*8+^E$ڜq\"u7'NJ<-RT$gևOna}HI .j*|"kEk4<ѫ$Oξ6*pr69Q$֎6K1KX(*"M0~2{VA&>lٓ̈Nb|3JDƣ3I/Gwtz`O`qca = b̿ݣ_5 ~ߐ_l0ШyM'FtNtToqj;jLI͚5PUՋ7U[^)pO 3D@KOcK[u_gR.Z:Cyl.ȿ#Vdje0vd|)&Idr͚wUk>erTu 5kRH "zԇWMwGc&Mj2t]W%y_c'4U 2JL¾=ضaCO_RẸ.H~ ϳ\Sr6Yg/}~WF%eʕWnP.ۦ/ c٩ȣ%^,mo%9X~}'M^}BYi((L>Vh]媤~Y*#Goގs9Dӂj  77lWօL,O?r/ Kz=UY/t\I&q1c5'MnPplLώ^=btJ&8e>**Y>X%v uR1|zÓѝH;!B{پ~0 &RƃMѤL9p G@cj Р]' Ь;o؍D nznP34nܱ8;&lơ~ Hk8.¥v-vlG{J2ٞv:ލBNA%S`jzޏ Sp^h2VTոU(G .hֻ%jzA'm<*mB-tCFYgl-<I`X9ϔcuMC23 C]rAy1Pߖ$28bڰ~N+ /d'kcǷc2VjKlQ/~O:nqc)1Lsg8&ѭ' ۧlLInU ^#jaxt @g{inj6R&(P%0\^΁۸ {OUJ4p(>p=,Fz6hR7NJ$O?0L,-Քxi|^X}.Vq`2:ܘS ^5LuXwƧ_]N8^<$%:m\[[>Y2[c-H`X\P {)]F}Oj.9`o"бv&H=}ϭ-p5,pE u%0)#>sֽh"Ӌ9 tYi3> m$;q'qA^YF 7Y<o>Gnev1)@~4 ,܇1^aѕZ@/U D>C/g:How:ob68t0j V2dhMEqC!Ĉ`Os]5o@JN -APpή&ZP~YYyD^Nj-n~ƳBѺWs4튳K5PmA;[9 HsvZypz+.̉VEg`m]S/ph7Zj [td&2(ϳ 5V~hEEW9zmQ'h?;D~X=?Eyuƶ1k #YR]?bFhͼmzB.0InMV뇞Îmn@KɻH3K:A@~pU?v5Ka65psfF&syجfd<'D# 5]i]E8Ip[6Dbz?@N@D=jh 8~}7:EgJgvV/[ZĪ@mf;a..`x8(Nm=Gc&a)Ѱ}z}`ظn@z""(-o^xkqMcv͗K0}ػ"7ĺFgY0nlBE.H:X IeGOSBRR+y]ұl+(n7a0jYa`[r5*T.&%ÄQbdLмpbleܝ-Q-`E7GzEH}O4^D J0Ő垿sDÿч3- {mX[\cr'E`B>+ 2uzTC ݸ鄩쐩cG$,ߨ/fmLF ) "*T*sYib4PHH2-v**Wj]._#4~ fǁgy%dV\B>иl2A2VXJ{b#KoQdib, |QOl݈+Ρax` n?\]i,eE)*/i0]Vbb^y&ut$ٓ۰|Iߛ3;!id攰d[S/[!r$f"+16#Wu%MLrhBtGGVGVEpyjd='|Y7}fmF _?ƨMn4LБ`Wv uq);:/qj+6ʡ3ӯX`?;Q~V~Uv U3}3xCuKd򪖉^q(yٷ7ZՓR$cJ,BfJ%uee/Ȕ_SsH0-4l24Ap|lDaJ8Kay3s`fn-3Er%)-lm?/f+/>x!nj,ίRv/jX t$jw`d-Otf,aХD.*k!"WSGBvb;K[' NvvTĂ./3d; VZ\j,ENGkM`sWBh knaoQoZU Y; LpJ6 dШ{noaM3ڷķڎԁ|exulTiHcxV_MRz >Ey jsqFaGbxoDZE39,ZRuMWYu'Y-ûݶQslBzZZ4cr͘n?t4؀F[o{1mZOV!x5Jr0k"ȇ II8|$ͩ LFs*IˠYUi?dxxvT=-iyb2g6ѯ48#qVfmU q*L$ZBD |r荀7O#4F|u;5F}?5v> &8'm> h6Z'5xHi^j6 @ҐRlqGh6Nm Gf C7:@XOBgEd[.yߦ=4:|9Lmj.!m;Cip 3MM;XcoMD6Wp"~ :-ͧgBE@M%`8攀a]p Ct&+jq8+Zrmfn郴Jz{/t|q<ꙜWtVkSxnU(b蓁FwWҟvSD~)<>1KLoF~j?֭M:uH.mj%aEB Ә~JZ?jܼ,~"g"܉$V+]^1x7i8Cc;p˸SPxٜxXaZ]fHC~sZbա@ɣzߴsS\٩| TV\RyeR0xխznW,e4@+˺tE]r,,[Zxa +ݎ,;d(y aO*jEƘgO ||+neLdl$GJ3j(MЮ] ٰUUj)Pou#==IPvx-VZP㕗п$.L -=O5F=vEWD l9)x;h4nTQ>++O< ͱvT3w`L*Fq̥A|lOPdQmmKFNlͯѯ}Ti;5=$1)`r} M{]TPuUq꿩rECLE֔ʯ@Y(Wl1Ƀ;GUVAnAeˎGiU|VDA{Wsi4\"g&Sط/ǎH`E{`#g95'MGM޳/W.&8EӨW%YF  TasQn&s`q8OkYс}IARLO#; %G!=0.HZ)v!1 6%x֜%q`s4E.S IIz%%@ vRSLҨ&; XfIGKڢjp[ [ry .j&\ܨľh7:Æ`tZՌ׈jb?YXt,G"D',阒5gIn$$H`oX9Ñ4A-ұfM7W4)Ő;b~]FDáy3g.Y,*)I>"/( 437q)8-PB:B{7V3.Jz/7LJ@vv6*Pwwg΁-34"<#Ax 6 \qCgeeBa5@?y<`݈v#gNAV1!K+ӼՁ z#/1}dX&'ٛ ۷8?LMBĬ)?0vBx Z8`4k,{"V̘[21IМ_65Z + @Oa|\?kn!۲")* XG؛oOF^A| E#c)3igg?hOR0VL3汻ዟþ~CHŚ/@c1ꕱl{w1wuŲux0p >4.߲.\mֲZuVř϶xA)lҖ\i9,傕\.dJ-E܁ S"N8e @LDF)^+%\ h5Fݺnprr(7i0|xs|z,]z;׻+_)Mːr82DScGbq:C:6 hƣ'''88,-L 5[ J! '*dv*xMUf߯!B{Å!.Be\Vxgx~xٞl+,ฬEGQQ>ΝKi_M^" P5ɑ2g"I[M̬.,@rjt‰+@US)IZ2v [ú# ]1t~ Qh2,utcqh4xh:(y3 B1L=~nVP5jЖyJ㏀g B~ KQ~^{; 9fIX~.0,.^W/mڄ#թL)cm. п 1ᕞJ?g(%;Ipa'˯ŗР'F E^ XǃD5-îJ?8BX:Zs ys 7H?Ise n "9EU*))2^gpއonku *d"%]rMK.g~ǹTe#DnBB$=K3D{OV=?uև_MnlIkuȨ8EEGtv s>MC=1bdsU3d_v7CqGYo/w幼ffc@ՠ} xZYDη 0qWS/T;!,udyR؎fc"Xs[" k#76ZVPS˄Ƿ5]X6ۚQ`X_*2"-J!?x)/}/zpk?^Zp2Ȏ{B\2h$Gɢ

&jRXFZ_l@` ε@"ߖ'}Mu]:v|T)Gaaj2uwn x *^^~nH&eNX_͍O7t^ D,]@0sl¸qmyRБ._kme z+q?t篒 }QGXBWRԩ "x !`gX톗j,]]\ֆ*mbaaŇnW_#駺Rĝ4a%J&e:ՍnOOPM;"e]c+UGչYxUa!O '?|:9/y/cuyp$HksCSUOJJxbu^17Zm!؄uNѣ?35yi,hhH7Na]vxFN Ch:!!ò+Ȅxlb{uxF}}b?8t(\ nVg\ƯF go2NdPrUt*JbKbuubAn;U4L*.Aߙ0fdb4u! vdJ;r˨jo4JRBqЌ ,}/RrK0F6HC_ Kxh! > z5nbĤ).B҉sn\8Ub 1wPXWa#ؾ)xF'iCcy.Jbp$ }믪;O@JE6THh4=5k% І F=4YpnBh-A^'Ғ:Xy 뀋xq"{KcIDcXbbL%{(*vAEQ@78=;+[f|s.]?܄4'Ҋ Ÿˇ^Kq(D fJnXkkn8TժUIeS^{8ODQL-Q46Э[_jJ(BT@HebSb:p@ʧy~4Yl=0aB 4iRtLNk*^4J;Wg4lT\jχkZ">ɻ0oұ8׆Qx;EВ嫗}_ymCVU^E^wnbd9~|k ْ1IA3*![/`S `Νd ̯`Y7SZļnA}Y,ʧ} @s@)}eL *V*@5 m3Z֥96+F\W:yE

ioFOqlѿ8Ds#mƥ{"dXX#rǼv,ٝPa }-N6?bJ/B<(\;u[pwYCzHd_K8=7'U ѨQ\j#DE(hreLIlEuA- u,@a@IDAT݇x~%_ɳZ9s -ZCh~BƙҜŵkرK sN|8-|Ԯm_gxYگDGБZ [>pKGʕ-3cAΏ]6Q%S GB9|yaW}ypt NH6ןv/\.b/bRIŶ#GO{ *hߜbOX/5(=d~OCޜwEƌ(|:tCʍbN/+cMͩэGYt_3HO\3t]^MpuKiγ{M\Lolp<͉LVě=t%;Jż\'HCo& YPg=nj=>s"F_OC)]p8Fc쀞a5WӫQmTZfyQ{b,'lrO}=~YBaZѬnN9XlѢU\ Xr+PkoZ|Y^G,л55.c/~'\Q3m PV9, GEWDgE*(?ٜP F6h@<FNjMBU7Ynn Ս6PG 3)n ${)0 O`IՉ4p-Q&ъpjhD&: ,< V + 󢏔"$ԯ_ZO$uYDV4}<pֵDb.vԘ!3n3GD|%<\1Ԉ؈`&hBx•(8R* 52 Vr!_<(&0%,RrU q;}JAֽ:5v[Rll$nC7\[ݏ-I`.V{ ~mi;)N7%k|HFX'g0B0cfȝ(]Ӑk*Vfwʆ,K" ϒݖ#tͬ &+h.K_ge5&1dm))uVY. TSayB49Q4: 5ne"t%})gqd۲ټhGZ8Q( wXKw3֮x&:Y1c/])H䤛aWH.(QzK/EUѨiMt]kރ`⨞p߿imD$9;5N|L:qg0s8ƲFIN3\峯qf(&DCx^cʰ`!:!rpܼAؕ*Q7!שּׂka:"^n [tD3B8FlW'3\\w?vf&Ex11{E䕨R ];a&}>z5VQH[Dqf)]ާM{Vռ<'ɟ6%=//~ƺkW#9,SWzY=V3z],V&6hܹ]#*&@JjQ[GM\ =/6)[+W&dYbzR&^A2 o/zۿXH#"{vR~$Ûj+ըڮw -QPF"C)-BbʡDaE VJS󊋭h%Pf8edH ?}76-{hcKz q_U;`*^ S{OWIzk |epH_VNkncëo?2|qNƙ7Igq;^S4;4*c"P-Ta(ʿJb7o^8^Nъл`a78# 9J(G[@ )~#~g6FO<ۢ@KQ8%ک>M`$>d5*d/MT$rD/nۀi?*b.Hq1g<)v/y>Y! yQ7&i8%-ϛq@}ca0w94RC;qj˚r# 8OS(MkDk㘦Ը__OO)!edB<?',έV~zq .!ln'&^UHkxk%K1X8g!v/ۋ ʡFj(Z(Ez _%HW'11t mgpxq>~qt¤ֽ ZAG Rx@jdCp&)4MCW,bb'0 ݻ~ JBd|Ӫ8mݺc].+*ޤ;9Z+;^c4KIRy)Pixu \T),{=Uo#߁`aKTt |Nb-* [Pڕn) ['aS8X':>+ZI{}ܿ>I}| ƢwAShР#,姍adO[MZ%c)0Ö-0ǡGhKld5ܘ91NU2ǎIn @w(jkRȨV`6'oP1%9!r%P>xB0Yɽ $ULhfn*aFwDV4yqp,o0%"#Ъ[Syg'T(/LQ,N9(XߡB\hذ8i# GUS@g~,ՂP| 74fl5kp5ԾC)jcP~j\lSOR~LG̜5 [&_'Q_n@?e^0}s VO #)Y)ApxFpڃ=yaOCI' _<:!q9xWƚ`0g+#+58<`L$x V =C RN%C1)L.ԭ[%JOҒhDu-sۻIFط'yt(:ʖ-IpmɍlODd8f0`HhfddD-bГThS$Uara҄PByGݐpt176EGX BGc.::b1a\_PLhp F (S <)ÿU|,|VH +iQΠd,gw4yEtT8∨aLH jU4g?i Jtbϛ'f9kQ Nz ?# ƍiֿ!Lwt2~+0*_Ԙ3!lF~Ax\E'7bsWQƅ(*aXbdlΨ)OlڼcFNsTYVKxJ̡ãi>ew-uϾFdNGKd*Kz4-?!ޤhͨNA?NkB[("iؓK } %;SjI-RŁ  vpvaT+;d65S[2%/'@:D~iS'{.Ͱz ;f)L2uv%yC04xw>CN5K y STFM/fJ%HӘgΜ9N (8"*ouW,_x0~Bkn옌2VxQ;" +s]ԫWaMsbYĶ$QcѴi%Ya-1kͫЭk_1ÅoCD.(L J_i'Ў :s!7+cF(MA: q|uFy.$(j4m#"~=i3K)T9"eTHF̈́bibHsJDo(NX$ұm5ӆ=[6{]&~.2f)f#ƴi y] oĭ[^Dqi> _]f={I SK7f|~חGOtJ)!]+SF40cԩSb ǥKp9zoFg)dK]<lj_E{_B@^< )9ʅʢm!p+Tn%J" db՝3&X!u0Pz!T^vx@NMn"R$Hs!;,DВZDe(qFKLFpr͉˅[ 4Z:~(pLa:U,qWT%⚗/qgb mu)FEkK-mlh5۷C`AeS}/%jJթF<Ԍ& dYF% ?/GqCo!*62f M8f1BxQ3"`dDͲ UNU!wW4l,-^z3)(x3UweR7FqIC2lL4$o͔D:''{£g7 -;O@MV15ݹN(ӋSPTE~r,mCy|J~/Mz~SD0率5::%_> MΛoC?NAM;fpGW{Dr.f1Ë3KCPz [[.y ]\kv 3蓞z7uX.^qV[|FWޗ~H# $|QvzI-aT{hշ*K42~w!jK$Is[{,r]K} 賔S jc"h5728&C~.^ 2uj4on{)uoݏ\KФ6oK{N)-u9%/Όpw%JܙFW C;ykֺӆ -. ^i3t۞ܱKOyIXӗ%Dx#xY+ɇJ] 9C**I%U21DC.V[b/,0^^%!Z.'cR tܮ+Tqÿ_ryQ$ЩOh >:GI"t#1P&Jr"u-qRui3I_o~{~I4ה"Z(ZO\J]rfL9kϤӷ[$38FDiZwc[HͲ¯¿hGal1A'2N5~?]!Ӟ2)&'UY,Ĺ1ȪVOI!ÍW~k j'O/Ʃ{NIN+(Q|.ã' k ;S@mA@BR 1>q/bմ5FLxKpV NVp?8Ht',(DDA@kȠ@x:p˩ MV:Ux|ۃNUyDRӣޒUao=p#0t 71φ87=.y&6Pj f> b`OPy53HE4ؐP0F`| %׀Q%IXSwuaq +&Fَ5m1A>Ch. V;t+07QD $_;8SF͍ ᖓmN7Ǔ8tA  #wBVXV]׍六*n AG.;ʹ$ K,t}#Zƥe`&uJ*GOLDT9fh P˃nr{e*UI |~o57W?]/pI,4WnOf*Q3_b^Oz[!flr3ɭN3q#[cPT|Qpn*@B|.}%BI/is% ;4g`B("C(sFM`<4#hB7U?pi$^[wLOZE_ς/2>%M~JL%kڧ87<`C;t4|fr7ܯ⑏?e` ߣx n_G 2ㅅ9#%~0\)4.moXۘ+YaTDg''VϾ]P\W(a_pU ,{f5&FciXB^ĺqjl9 c:G nYODpt\U}Sc^1fFm }S-M2iRsm<5G>jQTJقTl\<5R *r]<9h4"ԁ*.<]{勖1 _оm4Mq'h+*V`$bİ:&2A4QHC  :DcbzVzCQ)dz,ʸ۲'PYe|rƌ+5 /SB#ęYV9Z-G=Qbe܂:kn4 v&0ssa`t*"껦 !T;q2o_akcS`r>VQu9?%ZL(AaWҖD;|`htb6KrMqjf̀߭G:p appˉps4Vު5';qs0O>R9:Gk4ğSFahLhp~0}Y5K8zh<.^sT(`u2+ ".jeo@8GNpk1,.7hIS8e{.@[MڰyV1PhP%g|Q3)L9L=%>fkrFZ8܁+׼ sHH8\ӮN͚~(7;6wI43KV윿jV}dNs4ڻ;, G>@2v&3sQ{.0Fqll?-;|!+y~S\ &ήK}R^=z d& Pf"EN8 KBK} dݣ̑cBt7هQp͗&~mQR" 6̏͋!Opd;E5Y V°qpr<䯂./29i>}~@E8EaA8~­Uwv?xuSib}"U05B#z&bwpHPWh#tp)ᓆsHLZMv|oac{a6:9䗴>P@;b` K 7-ϘA87̝6kfuϛ2!w61Ɂjl ? {`(ף=GM`bBo=Ccg] ᇕ"ݑAh'e{|I+6V{^<1kvcn8kh֣ ؂ >Ԩhs \፼Ŋ+_ dú mZVę!}ٗE^[BI; G:Qx9Hto*詿UOwI]RSW)&棤3p*UTD`6Ԧ  lx\ ȡ8­zyݿ g>ɫ!pP;dw Oxx|7V- ٰe);m. 6Xv#:}R֜PauE mלr~W}jKfVYf(W_p\ZÍ[,OIS@nuhܸ5eР&:fFzrgwm^P1p&0f:Yx*LJap˶&+i51 x"~u$ap%YYF?Qda-*;vߢPҩe{+ EctF+Xu. 5WfpZdLɭ^M6,c4L"E#<,fF$ ghѠgeD1zKV^U`dKi Ɔ~Ub,n¤siQ@UgNEƵhX$'tuC>:+Vy⛍1G||xM(zZI#1Tm-JhI;GoS*$9`biI4@ٞxڎ/85s*B_ބd2EnciJ&_rNٕ늱u0}* N!v&"_D(g+y..D?b~[Dwcj>z4N. }\ԕta2^{5s46 {Ǻq~_(͈|4/ r=w:6N~GNjg!{6ϲ+Ҩ)\r<# _oij{*Ż#RͿicҽh:v.a부rkn=2oLZYeĐ*Lmk /8wٲcDl޺KfčqrR{~&]~^ظx* k jR.ѵgu\\݌;79?q37oDaF ߹-m _Rύ xxnё]¹i/Rl8^9 Ou3,싫D[f+sgd8۝pwAAqO͉C~hѱ9Xݒk `q@'PE{:L;M@AM S+0{B9pѝmS&./zp4~i yP}wXEWQnڵL# -݀"(G&V^\EмO?d7e(^dSq+h"[E%pހ+Z&:%*_Bʹ*Z<#N]b~4[ *X%u*po\~1sTfI 3jC E9Nhx#ThQG.|IR;jn/õkkץ]Qv *KO1xD, l"4^եթD-|Ώ\N䭩=IW |]tK"Nk> B/uPeoj4Gᇟ:Ȼ߼q8z5Tv2YbL41*+'Rʅ BO|÷EQ7xط =k sr( >)Og ~ o+'1i.|+с8B"cyx*S^>#J& ɳ楽&i֞SʑyMiIaQ4>L5uA$eTq[T%i֛YKäg_\O"#9Ie?u| xWv3M ^~) 6iT{N=7]͉+4&]b$ViPM#W!,m=‰d#壛Y-X9N|MK{uz |kx?֞0*:o3L\E2C-RF-I^i.'A_"%0h^[I9j}LyӞѷ3C:#Qۛtzğtghzu庌$E<&)}䩨0̙U̓D__"wM~yܥccqBMu>IN{JZJP@/9!)aהjm+Ty\^;I a@M(IꥦÑL"Hi.DfI?D%PR ]K9ԓl)ϦWT|&<5PxGZ >#@jbKzו#ԝdj-(9$niZg{mfʻWM'1 KM>)]TDSPP^n.$UhXnxxٓ'vs@x[ز!x-^DL&IJk0ǞW8ӫX@ACSfO d7ᡡ0-7"_;sFѣCx;N:jʢObp|vyeM._}:w{ՖތyK}V;ӥ^t=P`2xf,MhJo)&Ӷ2S=s' oބZw)@IDATʥ4_fZ'2iR+"ό8Śxghh)ҸXK g'ؤKiLҦK E  Z1IcE1IJL1sqjyOi_S M9{cCc[׮=ﭲA_m߾013KӨH`z d  }DPe<ȸChlt4b_Vz6W'W -]yJ&s2iK#C4e@j|{BEt. !y%dr\l$9PkcǼMMnѲFRBxxʴ)#e)hzj[XpE5Ѭo+XZesVI!!ψ Sj\{Q'}S 3S@/f,uɰpvbz1JSLMvs A2|'9stl/S \]'L:exX^oP;}FLPTQ&>GgW1}"ߺs+KN)x8-qU}QDW3O{$ ǯ_}Ccv|j@z0-кgښ+L@WQ :hr1ގ|?j5bGEdv2$ۡNMg5h&'DDÔsg]P@/ *Hxè2e;\SR:0%6GART*B)ZnS{ާR4`%Pb/-MTObTDW-`I>-TMm"lKR6eq>NaT+˰|OWS|y9AgkJTEd^!L܌~nA:y'z7sS殥L5ny뾿" )ԆfpSΎYdNE#WD*{s nX*!ܔ'`/. . CTCW8=Qx98S+/A ?%K9h%8XRkSØ:OCtH.]<sג(橴}?#|{9\ hBdEVjs^w+FT0\u:۟nb٤g^.DtleL_'3nf̚Y`~h^!lHpxM*D-Ԅ O #|Mu A}^ OzTχ>8/uǞ3qfXl&ˆ60B4Xի9lGw>cp >ORa#iXg414&$M-m)\Diڬ <:QЖc lSGN[bbJ)CS@Ɵ4|?S%7M_᝘/0Ɠ_;0|-8._kgbG1>^05z﷞8@;u$>4G` 8y3|O$cd|CZ Mk0{@W}8hkS@OPS>)kNom"n4gHO h$AJm׸o`nH:'ht'U!8y`+Nyކ*lMCp  ݸjب-MB5#8l]]kO'Lu<Ȭ]nyE"*^"E&="%I3l;wE®ѷc3Q1?%"_# jBEpNȌ٪)^? y c'"ݙp =mn|:ocC5tDSwp/nT  "Q҆jH6$I1sp"o`=0Qhd'o+w`b* |6&qp1F-Y5|_1ܬƖ й/~Uή'>UIn7c(Rv[a +|r3~J+g!!Px6(:3/Ch'8XwF>8>R:x=}t:0c'T*S|7`_! wѠ9e+<8][ Qϻ(J§;jɮS ɤz ܂>'D83'Z-!o2ZT1> _ߏ?I: {t[to#=pf(Q9!:I`rFʝ8 b܏`kz1(ylMFL[Nvv/6RE1<~w#s O`yv, Wn~1Ʀ3mŖ?`Pڙy a7P;}YTJX9V|Lto?['#Z !=x||wã/ڍ|_)Xz g? w!ƈW^VՈϓNxHd؊+7ˆ:Yihe 'vls`.@^Ӱ^s wpf!#w\bPsW`]Ć',ѧ[kBmMQd!lM/AR9;zaF-|?hhW:'L]Jcw3@~Dڇ3wmg6G{`WFkFt) *K ”C WL})\ 듞IN Q^D/zeHς3\FƦzd5V>Qfj[#ǎMvj0bԶw Y~4lXAٍ/=#e9ˢ}Jt%w}KdS'Ijҽ9o ]`aooj-ʋQp<#ШH4ٰ?_WW /*gvu|4踖;o\ڶzΆRslϐөM7,(͛CvƖ(Sg /NML H~%x|Ǖ]uiC 5/ T{3ۂ^  -, '=1 '߿G*-*b֡uڡZWSi{ 47f丿 "PB'4u1Ոx49 Mo䩳hH |/qQ7E<}~>>9r9 ߛYQ<_nKgL:"E4f5hTmi}6*K׻Tyr %RS3o[QM/}t)x?ݾ-('ky2{efbhCXg2?uAU&ޖ DyTdX |17gFBYkO tM QI, n }QOi'e!-Zup jkL>/w `ރhTf_z~72#m{UMv{!!!{ * RD ߊņQgWPX@T IH%}KwfCI @fΝr̹w9sL9 ԄRᅇ&<_&~^ )#XFe*R0 ?.Z]9[wC*i3R-iԹg`]OzK$҆RR;НE~e=Hd[RroKvt#{`ٴg߾k"TK< ;I{A#AxBz[W2_J&|1z7]\S.V7-9 U] qG49 IUTQ /,[zX- NqZV~?ca:ϝ܉J _qL̚$#1^Բ3C7?i)i5Hpе}cn*fH,z]Ѳ`̱{kS솝ncC~3ss/1oijȀ dee##FMMB8f@o/$&-gѷq2Y9[acںa+Z!*Ycn&RRҩq IjmmkܻQr@LDzVgL؁F]Dӊ^.DvH9w )Hlަ?Ap=Z˽3rurAAfN^=`fsg=Xd[i<2h>yS@Gi)i`I?Kxg)i^LzRz֍#-7e᭩!#؍vGb"M)yp#G%rw=O$ܮh=gGtڔ095 ;C<}r`PmOpO̵GF䀢VX!˴ؚ888\wn߽ H i.6l#9=:iЯUTVNwxK9)!Q;91D_" aV{b[Lm#+Q8ŢAcJUe:||޹\YyX?//FVz$lY)o~ѓǡx㙱Wt1 1cx|6PKu07/o6aОZMZnXp2T,]J:PMCa3ʀ.=%'Ng0˳Чy}NDfvaq9g*r/X;gGY:rXZ@m8kt5{@"~ٛA KaҞ6r"&;~bV\uIʽ[qrh"_6g{ 35PXt\ק܆}lq:'?j):0n,xnnx;xdbwtۿM>܁##кI.̻=J"DÃKѯ^1T{,T &Ft9D$cYa+/@Nm;9#H@Ac}uUZ?۳}czշbaz[LJ. M~ުVf <7%tp U)yGS,t-s>3 + ԥzF?[z08{gS;bmr]/EtVt5ʁ*!659{ssh{n6[%Z֨ϧv R)5X\.U(pܰw<=GgoJVLqeܑ}+ ϱ6pR@MaYG!WVrm{&Y!%rkăuȥՐ{&UUJ?^_C32,TU@Rk| _^ڲ)d~ӈkX8矋8S*-*N'O[\}%*eW&m'.(DAp< ,SX5%ޚ]qZw.";鎭t)ђ46سw;ҒϢMpi?q]^<KkMX6%ӖIl]\Eh奵+]ٴdZ}0h?NqB._,Jܓ I>#ye㕴WⲊbp}uKbRXʋdӲ(׋ܓv˒Mq]ӞW)8=f#֯[Tt /Y_#EIyWXJP&jbZLs0k4wnc CL$[Ke1NN!9qydT:meOܛ3 rE ve+cE5YNǼܰI~_Rhpn;J-eZ]Iw7nYļ ;kK>; ϑZE5\ϳdJ_:2M# ݎ4pͧ_.?zs'i칡F r%6"3E\r c䝤ղ>K$]d~;Ww987ҕ ԞVmgB=)m4zг}Z皬tVx#;ҧNPy턮eٌ71@INܳe}k3؛,e&Җ)4RЦg][[j>bCғhύN5ELP֑5M+;IߤA*8iSҊ挴SfQK\[lQUK6H~I+k Ik䷤y4䏒^d ۧAYQ,r%`EʕOHG+ok)I9 MJZOJ}GYS(WJ[%ؔ0m fLE ҝmAz# Pd%l3Ȑfisd$^}Æƹ" )iEO%lkb"8_ZGR(&!X>,~-Lk6hC Xnj ?:uEE<ʡ<Ӷ,S.g`<ȵOʋ nƤrHƦ /DE{hONW8I9؅I)AO9`(t l Ҹljz& wUW٬j:W|AU⣕o2}+ؽGYԉ]qIY+/гCp 0i.Q@-DFe"wkDF?R4 ۙ |@:dHz8fm9!nark儅qwGY,˞~v5ŏٲEm9ڪtۈ0-?9Iu/>uؿy6yy`lP84L+jOEߥy5-m&wBLx3149;9`dHPF&N%3Rx' Ğ т=a< ' i&SfMhҲe@j Gwr͝ߩuӒ6]-؟<D%XͲF?`׮1"E[r_7ґV]?|$w'd,mFX3[Cd m`KKgP 2#",4l$0m|vٹ9m/ 0>~UhkAUo$=N^9t4PpON,;4j}ȧ׮gv녌d"Fq%dȅ3 l xʴ x<@_e|3Cgifz⡬4:IU^)g62;!nlʄr/N˩gQ?7[{2mLBP+|:ʎǘhOJsYFW]vBholg|Ë=_{9=Li``~6Fl1|J]gC^LAf}O{!'q*^%m |H|l!!xg@hAm1\'_rC;!ݷa6` owwnz]4 ұrk|掇7{jP9PqD *.DcPOl:(~}ykOӟċ&Xw 46kZ/qu&}Veʺ$NSE[%1rs&Fm[QBRt p"-B'A-A&G&{4@&% `ՀLjfT. Pa^ojqWK&sr*-ŷ5Ac/e_G}> vD+oWNOVFu;^3'RBL 'hTQs+e0 yjϡ>w[hcnOiP([g6h*Ǻ ЍL0a(.JKoRpwu%"`2](e0bгOȌ8yr@Hw,<4Q)ߚFQf B%^[?̒_앬EMV{9*IHsC_&wf,\fW;+z~K%_B#myeOoG׭iAm3^8(dGE^eS rRKU%㭏+>*^*m˒dsu rS^|yq^J QS+B[aQErP,MHI+K^~K}奕\>)S0K-Kf(+m.j8)AkI㕸e5L)C0a 0i)|=~Fp]k>eaWOU};A)rh?nڀ=HG-W.pWhsyV1J9֙ #9AI=WQ22r9ϲ眎nNu-irK/NpxgX_8+&s> Mp>xEX1m[a>SG0a|9Q8Qʁk6 7\{ˋXE)ZV4]bO$-*[⭡uɸx+P"]q(NWɴU)WI[N}%(EWQe@u=&5XK;7'yK+٬eL[>kѕIk-K+U[^\YCk)Ҕ_5]! dOݻUPFq}e '0XMSY(FWIi)Kk8mQË)W)xk+g}>-NsRaK'5KUZ.iq\%Z!I+**i+M&[hto\7P-ؙm}xbH4f?3Yb򌟑][6sC[#N>7?YDd፧= &w ^Lj3<=o`3(uf2f};4:#8^l D`=^wwOHnmd|z'.Ⱥ-m? WW'p*'uq@7Z|Ȁݾߎxs t9hB;R'ڪ@!Ť ΫtDjRRT1oӞK.4=!W2L" ;}vD\~j"M26>MhVEg<1α8gl‰4tCbj`:mBXHg xxyO®}[Mω|A@;0'/÷{#`-Cpk <;&>YUCʫ]$L4 յXTT T[WF/IZHJR,7^d$` ]aHD07uo8?#xgZ*зi3sS1` F6k>z|7><ɍJ.X}/haذ,݌]5HB`8ˎG5*d5VswDh f rPS9Qs@x o{x|(SRWЭ>;v{kѬw'>0 g֮4Fv=мZ ??/*~cW&W|,g*a@D3̟?&h#g*2Tj֖PJel MT\(;eDwA8wێOLD˂øk*9]|q]VVEKլƜY. O m ?ay܌m]<7? >}+85& lMTTSH:tG541eBr2-l& еNx;#asgV9+ s `6ZUzG6EY;ODkd( @~HvǷYSTشm:qŇYrTٽjyc}8Co$ۉnC4-Vm 8ͻЎ{:`ڍaP9_:n+Hǜ߷ $? b괦8Nav.l w@w衢.v< fD4h]&O?CcsQ?&WY$ٕY rɈ:T[\m%9K9LT9P)p/2ok]y9{#\0ݿ7 9-6 H:^mM02.5ULI6FHw<RTֆچ-v%yy8grОf ԣѝۇ(w.o +gDc||cq'Żm;~j-"b[X]k3xapKt&mPhzrQuP} kz4Q(ə tAk8?Ϸn{h~ w}XD!DDLA10f&=DL_;!:,<|St{0X:[ߧ ~L,ص j OMrQMr, UV1גQ@-EeR"GǗ䞄*WҔLWZ/ x+&5Qြʷՠr.w3^ Oz?c=`QhB{Ǥθ 0<3OibmG?iN1I9kp̟Դ9댘in3G0FS2ڈIo$@L\֒Ջ&RoPok΁;h[r^jP9Py`tSz`WDUO0L\Qw{q (;̧ O/\*JX\qa ^rL-O (2S?7r͐q|/^oN.ڽu xZ8ŸpoSL#"P^y.[!},,b۰ ܭslln/'wݥu1.qfRYrtE)尌mc{y5Yvr9 ?͚:Ж5R@sX|R4l9}!Va7]FFѣG-oU {_V=\R i^Sd 5INvr/̀WeTh *.f YYJ2w/З: 愾*@3߇atDǎ6z "M5^{:VoupQ- kxzѯيݫelv]v:`%6-=zg8 2|ʭI_P6UKl59T"22 ziPC }>_RӔ!AeY_xigԩRSڷ:H+U9@܈0_Vʁ **S'GƩjP8@v2^hO%齒MRDrF6&lwbcǐSxV֭9tC}KZ8Nk]Zʁj xjQ7:v\j&KwĔ{¬7Y^TK n+YZkݻ˭bcB%'q@@#fMF^-hh3}C5gZlV˽P5`Zu΢'YRCֆn)Z~HmΤrN '؟W 5ʁNF@9P-FC7RԶ\+xPΌLmƐMkʛ^ۻ .v DʳR[ $TN/^yj *T ZOht7ܸ^OP{LjsB[M>gxnp3O8Lf5w-lz3 h]oMfލQn//t`V5k1:Bu&_Z._۪%8ުq% e˺ofɕ*tqdeKZ hښ=7>ұ#5j{= m[iP;?ۏ!UtuYQ3B5Áj}]όPiW>ی@IDAT9- ԡUAIU Lɣjr UF =}IwHdžc܈~/QeI'?AA^&ߖk)Ӥ 6N@^'P>eykk]XVԠr:xiTU\"Q 5k)+oF|6[EؠgYhVE yxFc:h#CIa6^yc {{9{ Va,EK GS@O .HaƇyx`Dl#8A5da}u@ 5Hr@uv -?j9UIjh,Zͧ# @FFd 'F$.%5+q%#Jz~ץAhhR/3iСg|(/6\ P.YHk/SWpd_%F2բT\*${ԛun֮sV\eY`c`=0c\2kBJJ]R[$ۍ٫O" D0/~~.Crr&A4;lz7`2Hȣϥg"95I{c='wsiPc9nKWltK#lع`Ā<4eh,An/3i.ۋW 5yus1&S V7o)TknS՟MfRZSl^S5ˮ]\>O_h%V51Ll%nyh76 2փ?2jO_!SxyX#ǮXDdBhq"݄*yih&;웏 vcZ{iᳯsѲ{'tbCf|fF`q׸Q|;pmE&V>-ü m'hUZ;_P(||ݐ >vgw$N$7C+!oIdKzļ<[N`kƸ"[H{*rF!c\p^5JZʁ+ x{j~w>̝0egbOB6~n,F{##) <4v6Ћ{2 8Թ͇c#M26vWCC߄~Ϟ( <$Æj&:5:yPP%YZVʁ*s*FprϪM%f-(HCo 0.c+mڄY3#UO|qc$R *gz=jG;)㚎8āZj*b4 ֭+xn8;o?zyPN'ϩVIArhr!XW[nݐR ٭j.ܽyxemW֫W-qh>TcΜuغ86o:f.E#^k: ]Evvt)hKU( iZɍePʧVcr]+LIw~+E-drJ xk$ >[U<ע٨ARԪk-@uᆮZuߵۑU H;ݻ7̙K{ix3לO쀧=:w!ФI""u1!ug *0[\ 5ŊXHt+G-+x^pwMԠp@CbNHgFRi8pM@jlml)z&13;>ps.zɏ M$*Iu2A;<>MEmG794Q>Wz:cЈ*T Fj z #Xh~^ =Тep4 nz̯->*R - x+Vk?Wx}p }^ᖇcc#q| v߃o-û.EA1Zs[tAfA8`SZ9$d#}wYٛGRrӐ<=,=d#;`G#m]<slFFfQjiujh(& W]x2PeUdWx \2NB2ISȏ maoODHH(#6681􄏏|Ah{"5uRpͮLQ‘ҭU(fЛvw!5%KY4v<' 'vM7ae3TG5XNz}(cz5Ec5x?M[0hӱrL9ؿ l܃ nSs3Ѧ]mLwohk,PkU: (0tYsw$c8enK ||ԢD7nԐ;.vᷳ+d1] *jnaY 儎F=r2s}.9d Ke#M@$s'f3[Ypur8"81F-ЩS'E3lɟLd ]}fc9ࡇϿLBտbq?ҔU|WS}`{x[q]YZI 11oQ3ۻWC "y`ǎxEX4^ÁPꖐX&lX'Ô0O}6Btyx|(arzsK^0S*Z>ICظt 9وLJzp//W4fE>O[a!y1o6k/vKfSlШ]C4LgZଔneW*n4`o63$$ICwv>/+ SyHmV1*UMn4iRso@27͟-|T:UUK]#w{jӅؾj|qG‹/.c11 FN&//7;:GA$q َo?6 B.<1"AL%D$YYxeܝ.~>Ihѭ")| MS>e-a#/ @ drDgf*ud1rKI/`)>m-/}Yшs\]*]%-ⶰ6/ yf.F`E+uqũЄF)\Y*.^Fw* Ef[XJGm*KUE ^|zz 1 vى߾cnEn )La;R)D7 vlӋ~ǨѭvUn рϰusH R3|&MHw!DԲeXpw oC[UB`G7J 2ZdEBldt8Zn~ж7jp^.މ>_!pĕ כ1[1ђ6> ޖHMʀّ;0椤!+7aQ%A?P$ q|vlZBsξv|+^(*/@y Aaʤu8jFPWqA2y2f`­p 3WY,t (A(t "0o4''Kbe„j:5e jikd7 ޘ97/qS;5S)&2d ,3089+vmBf^l?jiӗC[aC* bSw7޼g6u dnyн >{mCaHT|(}0|F N<,C #~zI9lYv2~ 3<w&8z><>{hY0%=nƁ»/+o@jo=#1bpiB+%6kfFcѱMBEYC>&iifER<<_-Ϥr9$i{P[\Ct_|%V5@֩s ~YO?i~c(r Erˊ98:y&hֱ1=gO'c ʆ=6 %][RDBW/A^t-܆_?ԟqPkU:8 ^L z;Es@Gd:{`(K.w̟}ykOMƩxr1 q,|^u^ethSr;' yQ}P0 G@>circpZb"a@>l!5'+B•Ͽ!3&xԡD( 6H8pCvr 0!(*98~45 A+Qg'SknϺlAna?'3Pz9QQ++.veIGyDž1OñQg7D\6Q)9kyppA|:U\Sx0m~S;<1xBh5X{8;OM;f1n[s V9cע%ЦE5ZomD>O~;ksF񳴟֣: |Mf>b5kj7KwǨeQ3ÀO]< qpF?tz=wz:bwXwR~yzŮƼax~n/:A`ʋ#`8UD^XY7~j5Ǝ FA1|Lн9mvp<-pWf':cGw$rjM%Rb`ϡl!\0"8YOC3O}Ż}&&=no/5 yvfu'D⩷Z`!Ǟ9bֿ>cؽw/dמaĺX\is?p;͜N!raaU DN(6U[.չ46#yw1ߙQU$:5ɁX/~Wx~^ƚlZwMq@ ^B1{Ν xq7a7/RW(Rg.CeUrA[kzNz+;)kgX_^rNo{kٽj1pNxOm>LpP_Ěi'j"( a{EvIȶ^.dAzB B _W”_uX!1'Q#Kkiմoi?۾?<1g/p~?fۖaaLK3n<3_f[G?wB:&Kt>5|89SÚ(0Ɖ oCLHR{wcŝObwf,x:)̝^o_Y?{6U M\J?j ggA]8 v!cP$n:Q| ڐxK Rcҧ/ ֿ5 &߄h?EnO ſ~Cd,}[-ZhmƷ=O^~Z31.yXN󤿃yp؇ջ^<Т:uGUpYA^Q3TRoȊabb fznãt뛴n.F .ڦ㻠GR|OXF8t vMz"<0[Z;Xщ^AܐAs( .lg{hl f5~4H+phQc<ڵ =)yPCM(Ĵֿ!N{bm+OcbШy0ˑPoo7Wn`48`2|ЈHH\-i NscrJ&7 {y lw_Ͼ^ZhܹG\dj:|X8>x.O'TgI  Ҍv$G5'oFys5ԶMx ŨGoA0ӌ. |f~`̝_lܲ{(v%`O1#0&UT,ytS1r4+uܵ/~`>~+> >҄[jɘC;@O{`. hײQJ;'̟:_r 44s,Aѿ1仄ݰ:Fy Qa- f-ZB,`h0#g[x:u?Du?bxpScldݡQ:v[C܈%:kCpCEZAdž inG{U{  (U &%!?v,Dx2~g7)<sJĴUiܻեh-SN,B K &JZ $i+֘2٠+Âb CE YM@젶~ KGEYys"G|h$Z" Gwg_p\T,І]ҊT~ޫX_6Ev8`iM^ *jO<K)5IZDfhӶ>l5^˶Ǣ7]pEXۖhs21%Vل ZA%YBƑ]X>nz} v孯b7ٻ /۷1Fltw7H* i!WA %E@QDBC oc:ʾ{{xn[j)c؛H y12;~Yl#=!8yp08-:.#N@jr( ?nE!5AEM>cD?rMtvEWJEʟG)$'BqSءI3u -/Y{h:$i[9aA!.!,.Ɗ p1-CNjFts 3 .E*C~C0ܻ2={#r&WEXchKbŀ#^^9\1%tFs@z/VIy6Cs*Y'viCuO^Aljc!:S6U8~Aӯz)f7n9xG=2̜ hp%Tns2.= g8JCE@25\K}/'Y'Hv1Flh|yDésI~_=knDc6e 5@Q-~%o_0l\Bs )-1lx[4j\}3OMGcīT9=@)Zx<L +:0YƇ﭅MhЋp774}n?҈4OCoޏ2h׽>4d1&p xׂQMs i'gy?)Eu]-z ~H x\G>йCYYY!ɡ4ؐcG1z 7.P$eӋ?5[b?&>mނ<x-V|<Fas.'Z`X 0DQ4YYW`N~>~n=D톾C5F.qrLe &2u*72|h`jB)Գ6S'Rz*~XU<a9oO{_m&DB"j(`T.Jv<\N(^,*.Sc+J2ԗ_P2` T%M`˥l1Ej 4p/Gf 7:nM5$AuagtE=;JwA` jcqnEY{o~eWڰ3+|Pkfj%`FKrbx 횡FM%4+v9A/@IYF]sPaݙ&7"iGiFntF޿Pb.Ե @z՜ Hi[veJт =lPS-D3ru ͜GeK*vŠA5JLȼ†| `86j,>4JW*Rpl0Rw@CFYnj nDŽ$V6O۠WT#CeF'!&Mj M O݃C`%%!1%*KxШwtVFiИ c YqyܸZאpxVMMNŖ`倞z>!ĈY_KX0c۪~й^C3'mSWLrOG9 7]4[֭vn}{/2egM BKE* qR1aN¿ ҝ*H5|oϠR2_ߥI])U7 vh5RMRLPKssb)=yUTK|bk3h+1M`J֕_/ ٖm>m?yVKBJ W=jAJvjbjWiMz~dhPVzan=Jh*Wy,ô3hJt>O>`AoWq?u[=BkPC jtϋ=6¨PPۯWMWR!\忇*nKk "խMΨ.mKڢ_UkiiюO]-ʾ_v.m::ЯpZ*~UОGM}RG'jG{Ur>W=iGzИuչ^At|TzDۧvuҖ.]>_=9os@4}R?q-У+=n(3b }Zݱ-j&jJLdVz,OfH%aNf=mƪyЮN[J҆NKW *uqPh1Ƀ+OK+J^[0w \yC㇎MS$,|_xz8@.1d`L4opN>kĤIr7L [[q-B@Bʸ_)uLIJyGugn5Et6|Qx4U_.jI9Q%bbz8z.NqzzLO®`ƛV+?ִ銄K %qMb&郮Q 7w 6N[Fo?CDLa1Y6d qc{a ec-ж=Vqm=y/^g"&[u[zrQ9uZ1Lj>i=^al0 ?٢|S[ ьz]&;iزh N ?OSW_Gρ8 %9$#~(ΎdHOc#Lv 08"4XbV0f ~pf :2a Kl$1-d".1]}pSW*{9;%陰`n3d ""IVdm 'j?-\NEݻHHe '3=$tɅpۆ/ao!:&NpqBrBROњPSOc&}DS:#6Z_Έ)>C<)3PSi3ХkhS-&F1OغvA5k^t&b08paˤdZU)< ) dG+5x&cGن ?"n^ڇųcW aO@QÉ|z5[0"}97Qɩ$b|FaE;&`|]VP㫑睒GWRU|Sb䲢N|rNǖ{Z+@L;18fUiT>:&Le,,wJZ4 -R$ۣ,0sC%@IJ%$M.Ƅr#5f2I!L(pRΤZWɱ})FϏ>9΀sfOM:2Bzm$3{l&Ȕ"(wVrY3* M)؎FhܕyUj|z+M-d{Rhlȱ83-a&i>fⵂB eᰘp$Īav‚(+- lOLk Mڋk#,"ˣMl#+6!,qop-_-lbٸxi"x200wio8Ūs#@Ru98m'v]g'G#.a~Q"3zY:~!r> tҝXz'ޟ7A%lzU`&H 9ukl[Ƚ8u[p֯7^6{ ]tiHfMؽwDK-͓îy=ԮKh6_=mn|Ub"(IQصxsTj[MW'-i8af#e>z+-l+ x~[yu_mb16xof3e@ZD1ĚN!*)4ΣSCb6nJ%qphv`\X@IDAT/j{۲ѥ GW-tj݌+c'` 1kwc3֝F1_jaO˕i3fX`:TǠSR8K,=v{$sShvq<.]~ <_Lc&h4GܼvIzPK?6"n*"aAv޽oc!h5MJIp.IúT# =][3=m "qJ,z  M;rKid!މpf*U(0><̜'ߡWZ MJmj_ׇ1jaGWK O˜)Q=j05G` ɍ?aH:ߺ6VOC#0Q P,MX z5  '0HCFhM/(s@6[F}Y[ogڋ ;*Z'qX2g\Q?LZ8<ث1)x? lpdl|1z+uƸf-%=3x;ggys>}e2&z|'`|v`dZv5* RW<Q:t-ƒ/?O^K7 `}S>7j@-!=;~/l-q`R n2NS? 1W~9bH~_ pgvL0&fu+A\oX.ܸ Y˰䗋 y?B|5rr0?9~xEx,l-8Oh[8'-7Ĺ_ 8r~^xq_apcr: tLƁ{p74cF_ `Ah_R8^iG9`@M@B\ܼʼZA%JƇ06V3㳡;N8zv,^ؾS0a*݁ަX˯H~=Jem^:Y7aS'v!2?.,N upl#1FT(eb@QԈZ#k `a}!1 X;~}2z8;`pو\m( t^YLKdi&HIf:\oPhnF](mǶI1m6|5eIXOҕc^>Bz)&V5ţP"{1lgؠbbxoi8_= R-1zP%"&Ib hldXlxu?Nz)>vɊ&6z5jcQfځ[6О̾F҈i]?^HĖū銍վՐth?$`h[7NŻ'NO|8q^$-g$OZoaq @j(ݒX]P.#1ג ?ȤҌvD(gQMTIt k@Md&CSVF}R0f5fnɥ-k g6>2o>"p؞yxz6Nsu#dHe93M%3E҉/} m&f6_3ň)[n{hoF hӯƌڷjDfU̟MQR2W,ǥwބ'5aBY/1C*V VJ!Vfqpe &`СKTYn94rNJ& B/-3?z6g/z9DȲU)2YFnuO,D tkn#j\i@0=ǯ!W``אO,5N~96DG*2 T+_ _h[,=Eꝅe| !BptU_=(|dN}+gfFRF>Wt>-.wlзSʰ$6C,ݰ k7&`܆c]~9I|e)I)4#+MCܳcwv<*8 *ch{%_#F )<^~”2diPuܗoi >5rE7ÂxcXԫZYKڤjp\2BSqjMǥ p p =F ,i1ʇ)CB'҆Y(<\~Ud{^e*lqdFeW ڡF-j{bKE (S:WKýGO*(ލ{\4 kƏ4*(] OНȱs,YfQNx#Ήj:)ʄ(?z[$!*+S=ez§z*{Skv _ꁹ阴d 9 >D޼+!,s3#Gisr\='pT 1`ߕ\b+\)125kyIM>ŕy0wGsc%٢Vp0 !4Kn})q.:jCr?vf: l*P?sBèoEE)5I*v CHiCI2:XK%B7f~/⽗k"c244U @{ɀV|nX4&I8q4Ӫ% ǏEL:5vG G4lނsh@_R:}xF.O;@r\JH0DP=(" mhJlDvŐtlRMU|S>jUs&gǗcICʊj2`\w6:tӚZZc#:C¥$JSF;Y90&Ɋ`Fqjpt WŰaJDTB {޽XF T:}I\(ʕ5Z:9C8_(wc i%) qrE lbq|>hv7oMaƝ Fi{ر, += l-Фo6+;| KZRi ײ>r5ypf8b BEs!AhNuwGW({0 MLn뱌R!PzJwkC?sܾ|[њ]_3GI WyH/D`{1ey ue v-[?}pqs1z:/=+B->} =E3 ( }0p><ͦmhEӬgdA *h ?ͨH l_`ECjęYiXhkb/q>,6"SAaI )rĺz `LQ7m0fӼ+8Á1V wAu&#S߅88ĝ/kB1#cuOrWZ8mJ4#DO0 KIs[Xa=K 6LV<دeZV(K~ygfed .Rԥ֫ +Ҫ,)&ئ%*fŠzT@^M!feW:ʷE_HFF|[ExE3l1@M9w4:)say2qAOcu#j5$3420Cqw[i5p O'7.E(/ah2޵PliG9CvMaLy3`1ZXS\sm wd(B!b$?TH`|FډEz>|k4B g/zn7_5Ǐ*z9 Jr7alt~@%`A4jc%]v9i ax[(f=㐗ĄM%<½!Hј+1shDk[@Ah`(l'WlWQ3)M Sh0ell,!8͖exTL儡9l((Z,XVKÔ65͒)0sƀC[iЦo'aop̜ftϠ}-V>`"62s;+>GR)-!">\Z V( aL4#Iƅ7KKriި&aȁtߢ9=8\9O04#\r0q#̸! sí& B$PtI=4dgLLlelTXTIse VN|bBG;s&VO 0pF臦M˳Qb`.ի%uѣ3QzQy-)Yt ulfh|9&')q#M-`>\ٴͦm֒= VT^g 2ͬL>hDj&azʱltjH)cAđqڎ $qyH i&mD Vvh?,XOA+$p*q +[ha 43ƊڂXWk;P'إ_?8f*zR{1`& Qps6M;,e7/.)Q'ޖ`!o5 jوK|b&L|&v,gн{Gbxsw*/PxG^H&Vm8犭ȐҦu{Èv2D5S3KH?[&K|1b@_c{m|_vo7!/kem; uבO GdX$\ S{lil1\KbOo'^@.0-7)llEoU%An @2cv4'פ뼣 ^\Ccb7)̘`R ݦ*27J5'x-vmH 8x_^ * %@O5wzJ% {nuiKbM$*:Z^#M@/*$8_ں)c'Sp[WJʏ-P⃆oZ~?=EC9LJ),h8 ơCkbX>hY'r5baLqJ> sW#α9&ݐmIS*9#9 @U+X 3W)O݇^Ƈ܅ )y^VTWؙT`Xޞ5yW?V>]ުuV +abiDS/ 1hz ?AO[[ۗZSŽ˷e@Qt1ӰP{*߯uUzvħ.|?T {Id !,\[Tp WW_CXX}S&'25\g6F3^hgG}iqy0BsDŽiE)-]ڔػ SuTܛ`T7c|h>3| ǕspF8K`m[Cl883._D#>z9r~7Эk##,8hDe3bȓgGRߙ^& 4[4eL'K Vrhh\b rNc`^Z XB@0Hn<˘Hڠh1bp43A@Mۧ ~+! S:2OAͰJCM|$jMW4KpeK.lfbQڶ:緐iGGI֍t3\ pFKk -OiAB7e?R%tHNSEm˙-j5㛞t=q3LOf!Q׆Y3 O zhb,CXb[cиv1 vCD_ˁڠI8zt?8DA>d1G"p@k6m@ϩh݆"PHML~:N8!016U221APaE`S̽{3F^mLץ5+LHtὢE{z|?fO_#PX*~\Z;f6JūS>)ݐ(~:pJBɝOeqpPY1x CRt$.;CT87oܡst0MXUdD5.`ФP;p ͬ  {o4ch&PRq#8Df;Q8O珞×WN&ǣ8˹v=)ݱRȊ;H&Vx 5Cqv,j$%:rQ䰕x/dKw4/QK1jHt]3~Q V^ë{oɁL +W`X!1c|%GQ3oVn̝NcLdhz+C^=!I64(-)WϣL$$KSLb2>(0(i`sy:Laa(ғ%X~$ o+C$?O;F2j | q|Sgc:IZ|v*fa8X2A=f@cث_˵Lpb*ȗøCEoX(K΂)اW C'UjSct*v7P K3y/^I6s9z gOue ׀j0bX[#)$PN9d_h:PPxig=jg7a~ Wn1^18ݥt.:fXdwoFV/bgnvN|Ԛr05Jh m>OUfE# Dl-L:1WØ9XXsnG/:-vKCcD̊zr YM:}6sFIYwoZ#,X|sgDgEp3R ԓws Ŋ`w.x3ŋ{S&yHP QRo#LNLmd|jq:0PeQ b0t*\&XEHd$'a=X 5?賨}kٺ,>ΞOV+͌/.Ck|=$݁[CdD0F`K?LW;}plNQOC &SL}C>[QWo",$]*X`X.ԩ3oL _9K`XqxQtFclf''L ET?NȭjtBe#fA%`E0*,an{5bL۱d$ .Xb!I` CUtV- S{3=ze]>q@} |2yu0v.?aB+x8?Qz.+ eҬ&>MAY0ode:aڏFpH±y`AeW3Eds S߄M n> +Gѕn Mb0l\}SgUwt~M@]^&3g+©-zKnBk0OjEŌ̉I3ͱФksR#u&@r@@;ȓL4kc55jJ%o>"ۿ$cȐfhH IͿip'$pEY?w%\.ooC'.g#KEp) RɂtVR nL۠j/CSxJY׺Qg!< liYe-Yt s nX9!x&(Ӭ>cd)9Ld ]6q3LʪG;URS, "֍ `lj4CbJsc·sy*~^I6([L6b`Q' F"5Z9@.&Ճqu떄-9:3{C㾗>Uvmk.…WQ,>|^z ʹûYq vPsKX5Fs+|8 ˊB)+]:#{)윹$~3eR2KШ:tHQA.m&W2uGw3jGDo;zMKnFa p~Sءacg&ݚa}TO$k=9{L)1 +?%GPr=-ZQB:i ^ҧ@%Ehͤ s)w%U`aH-d50_[5ǿh clG==kWy(pXsō.ƅ'h|)\l|qcFU#hpez-l)O{'2) onQ\qH/WjB>;Uvx)W(UDGkhdFf]iY4q0FVC˗b{+P`R؛"W3yO5YqBe$j#NkobȘmV]ݤoA؆TB S/D3e5S9Lh!ur$&#g3℘I" !,h?m&>iy<শH32A([~9W0.*ը>w +GN-iڮ J >[7 Α{i+SICǏ^ƥW[%Lp\,|0m1 E+˘Y:tD"YedΏt7lrйڕg="vhԨ,=.]'$(Vh%nqi(P")ySrP S^^s/oȐH5viƒV̐` ۧn C+8r!.(<yJ5i axĸTFIAPCk'."ÿ-Qo?:e҈8#G@>(VVvsqwxU 82=(^nH9tnuC=/;Z\@d%Wuťm+0~J2?LTC'\J5kQZ>F!۾}obpD!yBӃ"q""@-,~fcI$E#N|la|ѐNwW(kp6űn7 k[ehܲf.Z_PIW2Pg/qX$wwcO̔B3_":C(ХZ46tډK|=f' A=E _cȨYv9 5눺㤇C\X,La/'ctXjrbTfX}IeҝxxV~؜ do%0NJ:I!E?CKAW #ILWk_- ^)BPY7j 6#?j5pZԸJEw?!H#k74E0-ɧvL"ܨgKT`O :BPs] -ƍ%iOv\ sd:u-.`hhR`ǐJ+qeEn6M-z ?u.D%p39ۨ>6n7bJ{Z}Q3BlS&^Ò &%ѶocX1 |Kd+XQRKw>xhW4u˾R| RЦ,;=҆w\HFFhzg=[a[mhS*1A 3iNbԜR*XcfN^3\ܱ  ' wu?>Aw7 V>^(]1땅Ax. Gَ3%6b?tZT>f(U8,3kP\k '􇭅<QC^T >(ݴL4NxmR_nd 7edJDOkn9> iR|Րsښө2*@Rm G.mj %%ag(Uʑ(KI?dB[ N0gF}"s%9(Wj!=&.]FV P^qQH-9M8gFוFgGvB'Ud3*[Fz"E`-~ Ed`ЀvOSLKu^t,oi|xn#'! sgOߢj3 ߎg|F+eyg}Sic=C3"Mϴ,ȥCP4 78ўyйgt5 UFjLRG3ATk75rSW-Yb*l^ٖ{MӢqVbi @1wW1{e.U|UZT+]V8 e[G`[[Sz~x[N$MC,V:$U֩`S(E(oTQKO*U~{>A eۈm"El|mX8d gG~)ɧGٗTNYm#vT~ Y)}G)k)[j#xO3ɮn]_Qρ|DXT닞$G&%{LCh\6.yJnYݞGҕ(;z;/Eufh^Zy =ugO](%QS7V̢!:W`:CKy| Tn\>n ȷfScـdR3N֝>wOv?lKJaW)j0`EZU=Ke5ݾXbDhK޶ZGޒe8/T֔_19;۩LZ^kkQCS[C9~? k?`6{7) o{*pm+O>f<.I6ZBEM5JhŤ$$*8N> ;(.xx.qFx(5| GyqC#ճZنo/GtAg&=[>Uy@G\B#Ȓb"G`ߦCj>'V)3c‘hq<bte|}hjږIɀ5z5pME>cǮcǜuX1e) WΞpq3Լ)^ԝkYIh'ͨOfuIf\oW]?g8  ,a;hM{c./|--y}SdO'UfbCZmC ksBhSJhU_HtngQDG Vv'W\~+,;0ROl߳=}Vwr`1}jц7~Tϓ|e UF5_`)qPլOi@ThQ暵v-?-zF3?Aų_!SUz bpi|wl@ |x 8.#c.NWoL8cSIl85 !]ᕻ+w>9~Hy%UJ#?`LsV`TkX3OE3R>DQͻEQiZcB$N&44KcM2+.߼; A:ҁY̌uȆ '(i>&p@4PS:)=όJ4Ff3LaakIxcx_.KN齉TUETDv+"vA"UQQ[{w9R(.w-v)Ɏ̔4i`O<{jϧÈ$Mt9~]h؜)A IOEL1Lx&Nvc5-v60%pJpKΛabXQ H'A&%q%uoKg6_Dge O s`+?ndlJrлٳ Ɉ8x6҂*%̭-amoK,㇎,,aI&T `f5oĄH6m:{1Cay.XZ{JCbłcRvb:jO ?=s2y- ɰ̔L}Y>*ӀV?m7?tjooRDoN.r %R]l{NlW?.ˋчӒp) =?$kV.ƋSF9z!;o!f| =ꇩtbd5^SY?}Y.$G]!ޜ{ a}!LGJ >u'>C$у bK!WRhsⵈ8=Ĵ! `HSLU'B5r!|꒽^%X6槩rȤelJz/#إɈKG"M$$!J Q@)Y K)ޗE,nD/TA]#);J鏔CJbA"y/F-.ݭAVS,h40ܚ<д\\ G'Ѷ%lllhs z{/^ Ap/4ݧQ$;jst D U1m^ > raOOo;"haXCR4lCV@IDATtK`!Rm[ʾZ& CGb|ȴ?aHg<%,4:k JF_#lB;/7Ǐ*5hمF㑽{|/C p@[08cOOcBNrho|Bbe$q1-nu9Eǹ RT\+ɩ1N3:ulu1\[j w䣡ڍٚBbƠU4=tϲV+'Gi v/jLdž3DŀͨfF8'461^w M:(exh87kڨmjC&5[aɗ2CYh] <6=Jthgg7q~fkKp\KQ!ZmfuY ݻv^57tS*!;iStګu߷Fر8fhv7{}nz{Y@6we֧>9psC=99ԅ5,ŘdXxz>d :h~AQ ԢV͛@|=o<1 ^% 6&-"3 u~3`lkCe3"l [LJq{:cT!4ّшJ1C7%gp[z'^<]^R+yB҇Z=M'q(@jxJ{wrIfơiI05x>b EwR+DLQr>>fXYhcIfgrբbA)Jt0%)OޜZ[OLJ݇@7 dnKSBMe͂2eJw-ﳴ;~HnK!uERK4d>AJ}-4Fn 婚]%+3k<إ%э^ADWq郞2^n==\@I-J9}z-Ohs2R"Ґ@Tp0m:9I'y n1Hb2ŻsRZdQ!В5b9T; V6OXWY^` UT^T&hݚ$9U*k[>lZm0wŴѲӉʏ>w8 carz{np"F2Įߺ όFrL2`+n%T.0omԇ9^i`BZ-_:)p[Wd'.f^ONAXM`Fl{F ?Bg@ҷkN8+/}so9 {Cg9 &ZJ1.`S?&xMkwo U^__=ZHs(;r7 #m _=j%V6Ɓ'i~M((5b+(޵Ďhԯ/)؋n`Ҿa|%䗟۸+r!XG j֭*>2 ϋ5ZXElƆzͥiTYm;CͤsVr@xke艺; W{h7u鶳|wۥMnvO[M#tzџx[H⢧PY?YTH#\fdӡBzPX^3 RjO:x~pķSÿT-K̆7׵o #rUf5Ks@pV+Z[B\l:~X| ?*!_#asic]K(/EZh\Hb]7ERʟL5jtF:6n;#[C[P3ZWV/%k_ur`yuR**^dh$6,+ޔn}bMe7Zomm=]wGn5 XPuDFxg . .e?wu q{ۯ^0jQiل[_táxs֊k/NtR*Zpt+?Jĝp! &81"ZU&ϙ2-UŞXi7t[ !44AyDlj[ʮȈ;7jh`nn~}ޭ⼾ɁMIHQ*5ݜz+W  FJ^Mx[[fl{y47 ) #IL**mu#AaNw'HUZP[Lq!p-*U{L$d#pM,~Ogv^QW֐Ck/Ï+|!UWU4j ʷmKKKZZYҝ^^񁅅U@Y1]n5Y'}zs(>Q(+7,w@|S;RĖZ,) _eccxF|~e2)L߹M] aa _k6LlJK۰R9rGP\l )B=f"5-ql<.$"::'*;kh~LLala 3s[ۛ܄5064f])y# B ݩ2YpH߮AQQݚBՂ~ ?79HHDnl0ŌSD/\aU[[xxGpp0)`pvv}0g@,+m&F곩 +3 z?S^e V9pU+n{(~ApsȈrpM-WàzG~ȟ8ֆDzg`@ie::>7tLeXוTcH{An1 Rrk<ަ"!99 @u3 G=RfVf045?VocCcE&RTwUJU8PjY+=^'s3A|g;WN 1|1mK lfb;xzz"((͚7C$U =Eq,\zz-P]PV- 98:uF7n U#ZMk(]jʉAD]BI!8Aa ۸;|lOIR1Eri[+* b.5%iqKXΞE!}Y[ S}O5_nZPMPnm^?ANWX]rC--`emI\e(ķ89y<ÅpDDTX4Vl9EKAUB@`u;ajO'Ax i.\h>9@ rRܾ,kQke.>qx(>+ ֔jBշ)}bwa1cz'189&ek̔2yk8PM@C)**Nv1JJhHh,Yr۶CTAzfO7򃣧#\}a6]?-Օ@m^|hSG[ρBz TQ %hٵܲ<$D&"12 Qqxu>kxUV΅n۶m}aQ{G%̟7<qD/5h~n3*?,M*  q|:t`z+$y5X>I@ge= 7ÇqA`>r믳-I3`l` ]'z<є"%^GfR:Ԩ_!$^v𷇅=PR׀* % +Wi}[#[Eҥ!\[%8J zݠ)iu=Z<iHM͇3hAJK|9jK8ZiBI~6 @1PJt hVVPʇ&y8v2c;$ts>x8ɵǯ<-M)MY)^:υJN:,]fN t*/(}dEĶhկ3\ٲ,߀9ðyk,kKiqɳz\:!t>=HIW6e,o]4K3ue-M8j}Ѧk de|s 4:jxWTM/9(x*zsC~Ѷw+Z_ ^?*Vn<.u|?i+'}Yk28VtJ2ńpi '}Y\ۏGʥ *͎kGI ' µBGx P[=Hw_cG>q~!6u,h1D$s YkH-4Ä07ɺd 5 axgf}xz}%coǘFlrL'խNVkOy45[b mhF0Ow?MٔH+[%9X?sQS0ixcܧZ\eୡ z`%=-,] s^_ڲ3 .UЦGݑSr_J Q@@p`m4i^+k]I{k]I\'y +hӗ8Sfqp,RJ:$FY%ىXW8~6SQKaR'm`R5oi|.ڥ[A6 CG] xMU 5h:~[/`; qh:_DɁw+£+4BsEIGڅXLC꯭a?aO\Js3- 782]}|g{M#1{@/)6#g8cOzP2íBLB GůcS](H+{-z6n~.rҫΧmp`c-4q𘻅d~O ħ{v#$tAW.IoJ{1UktG;wwa;uZvkAVѠE=t^=S{iu`(.v:2dVviV}Oo@cO}Fq!6*ӼOcmV0*-: a"x̟LK*Kռ@4eͅTLlT?}k[wGiGf.ֳK5x१Pl>T$\<_MM9 EEZRuo#/:r#HpDg@ma 料:IAZ⒊`kkQ(r?, sqt"yZ^8$'' 2pT4r Ow[%tAs6Y b |ܭj> قK^<+u+]5=: pHCŠ< 3]ƐKg` j/l_ V÷?hRdL@)|~J0Y'bg_nZ%ꀌU:'1=k||Q\x5pc3\jȱ I.n<-jNA(HMs1--U|˝j S6`BBL| ⬘ 4!9c ȐśKUlpkM^+ׇ#躿o4_7u-Vd6HJkUc('3qiRrr aʶFl .*s췦ч8TW J 2.i^E%32h$rT/GT*P Җwz:"#Oۼ]nM{%u ?;}vL5uߋ:TfD=rF4y> `J; v%W51-=86mEڀl q?R"6QX"2^in1Iw|NFDs'x6B1.5f.1edOgEx6aW8TL5[1f!_p* (4t@3o3`g쁇a##2q"S+<6k 0i⏿. "D%KU$[E'=1G #P oN^^=ܰ}K<+Fe~e#hE|=y&H"+ HũDoFAV}E#"81DVi|< \+"Fcxi4WUSІg`ިig1al(ҧOb̷x'^z|SǕ( 3ZC^¨AalV+WTD^:i᥼;0oA̿ЀW>z (}ZoR.7n.䤏@]65Bs:(*o'}貿cHܟ9[`0mJLܰ ʖj%9sk;աx~lHAzeL]KO]DeBMIʙpj .AfJ9 RXrN^^KW Šn=jdа!>0ŪV㕾aO9IPj&WGUHRm!A[jYHojk]) TXsW&c3dw8ѺNzz`gE`կѸKtMLcqiF)aᕅHak-~g^~]Fim1gт7 nE 4%s[YIٔ8? 濇o@A͑~&9T4(dL]a0['Ƣkc3~ģkt9,Sqh f혅{P^;^+le*Rè`}ltv1K7;>䱌b"$]F^X7V쓘b0~X䉷WY߂ge,їNOkờcC+IGB:%}! '7Fza71#{X#\c&?\u bd畡|l<>!C,%҇ed`I\1W0xT =>S3.X_>w?DŘJ8BuGcH;R.OcE|=$^Y -ЩZw#'2JTx\uK>^Ar)Yoc7ukڋ\l^ @|# ]\oIL*ֈ hި%vC;q<|;*v;CiSJܦq==-le_ o{'iM,8*!/@Hv+g'>Cv <ύ2FM!d_+Nn؆@ܑR<wî7! -!t1vG7_XM 2=#5U}ՀP;>7oE~b[mH@^ⴒϏ&w93~VIɶp^۠I=7D %x( w95L4A}|_G}+G߾>LtC/,xIBr%f:'7 $r{Ęz@E/_oFAfu2ѽg>U¯٠h/ *"7!\ő}o^NNCE$Ԝj ,LM*w*;A]YOڅ/JŖ):҂+- Ÿt m@И:ݣ}Px4 9Njđセ^ct`[#0 vA&.ʏr'I.CN8RyWX%rO"O=ڡi#'ظ~> g5J_eFXL:`*\'}*EL,+uz[ gq`n`6N{ zrkl='gxYbXa-1Qhָ-TG7 91vigs8}9)Ns۲'aްsFOWcHhe<]Pi~3Efp+wOMI,NE,ϐ`χbȌ|7}PZٚg^rHXg~qtVUr*`UQ:v.ہb#lxy::nGغ!jnqK(/Knx٦X\: -T)锒E&Q{j%lg߰5}-Cpif̙0J zyZ(u _PfbOYB^Iŵ>J8UDsz[޼ P=2-AFbRh2$=)u?ec]M x@O#ĪN$8wp[! zd +ҀNL|,mkdX,{0%ؿk 5tʮXSw^VL(bpQih>`ZɝK*Iᙉ5ZAر4l:І?bvľ'o  {EP~'-wlٯxyMU vTW0!J; p7jalWJJ`(+}-lZDQ`\V(ŝ)K_|VҺԙʤj9zj18 !*<n#)TtSbBSjU iY8ĉi1M8ٖ ȉYqِ:E̯L@.%n*yPZD쾆p&'5jt G[@|FWlcysmC qD+ӣE34hE~gc0#(+!(SJC0)FTH = Ebooj^ǎk0@e;Yh/&ض9|f B*UCPVݮ"0 rppX.$htusc/AT=bV2>V/܂7:K2*Χ)w&r$ =x)v|5u;>Bqt{Z jֱ]G+Sm2J5՝,1~6|ͬ16bɳ\J+5 Pյ+,WXąa1%D:5 >s|=vRGXfB~x"#RQYlвU>~m; Bv1At[N stwNAr[GǕkU! yp7*.%PLLާ%r_GQߟfEF8>Eyz<, )9b\:~fmzYOCS>ňȩNTQ`-{;oXڹ ~a 9º]x,SG&4m463#OhPfCx aw&pgҗR[8pq]>[C\w-ͼ@eIӡbAVJܟ &_:.OҳDf~:PY>r#Hd"2'vNp +` 51=Ha~n(IIY\I<eFaDdϟĊ&*\ & 1E<а)Z:ߍjcfF=^St"ԁ(=s".b,@ J=0hp(q_N_kUw'cPYC$Ioi%;kt'mHX88\C]OSQ:jfc;gwtMBSDJyTpEI+qil_;[zBs:-) VeP&7ЗGadd BZi} lw:q1h8~~a&74EN!p-Aqn“34u!.s Z7␀ [xÄ*DxuqCf>F;nk4O G#M2uN[ q/ b>mYwprG^B6TV>4Cԉ0$eP Dب[}]Ayx0tlvda) A&6mHWfsDÛ *JO4DžPU =PۅqtA>X4::%2APrHDG3=LwG-F cS ZF[H aXPBSVy K[֘8Pe fٱ,όFMi1"h!%n?<s*Eh=f W@mO2o5;ba1cm4ƑFS?o>y)ѣ ^a?ňkUVwJ^{lZ1n+ q|:t`ztVzD٣4bR-h3NdYH%br$IQ5VLσ3Z:.NQ]A28[y(8)r`EXF$ tFsy?=7,y-"ZU@VSJ⋼ZX5\ mϤnעj~WosΊ\4_IE/עZUWT^ݫǫL{ˮz]VϒeTz+b~U-zwU+?7OoU,mNzxw }do]`<K %zO]3ʷ^낒7ppZɓy]]^.5=JdS:p;%Çuc¸64-|ߌ86Uw<Ó)N?\fЍX #yDҩC)u kk7hfe2\39uss:gd 'WB*'֒>Qpj ˆW]2.M\Z.UNA+Jy5PcIx|w;P=8 ԓ~tBWl8r2rq&pn^9T5NoBAz f=0O"]<`aK<TxOe#UZ׆rGLEpu;/e7PiֵjU.r&7LxL־Ft}Ex!Y7GG#k4/^9GՊmDAQLN.Ϥ hҞU@+s)DZ5_c֫sqI)eہ:9 b?^-ʵ3%Ks6s@Wso{zjaT4eoAtKw'POG9()(414%'.ӭ+r{Jzʠ$'5>rfŔ*Θ$R>9ƿʇE*}Fxt2 -@S٫iđF~0~^9Lu5˨_BiHD)r %LCZ2y" :yR_JiR+%.ٙנQ8 . R,N3֑>:^R[FIkŌ+EyUvx4qbzo`:Q:ԑugA"=)jf%Xm_׆~o^^L{excgHWVc \SkY/|4WSZ;v@O<\N[^=΢yU倊 eI<ޕPTLLw-jq)x_`U6(H"e8y"m!#."$T%NI"+_Vd ,聬8/ }Z GI)۲ S B/x|xh D.qJ#]X p%iAA@Ǯ0*h.]Mb1R/*U^]Zo1lJlEbj+n'c;.i:wġWE;ڕ?6`/2_x`lgx: AzDgq[.:s,#ǻhH*Jo nմebE2a7ql Th?+='jIQY5ΏWB )R}׆T }?\4R:4k~2 36C3z1mD_AimMD{}\z7K5| ^QX"W7k/񸑰[ *q y=ཇKOjMsCt-̃k#ӎڕ5]6]&}qCa 'o~ea()1hPvbhA]>~ޝЃX#m]I}5Pg^.hl];0xWGx >{b6|G@&bs2޷aoXlޘ5%??1N'aERZ)-8bڦh2`8BFZ`Ť ϩXv>/ǁitu\Q}",sĔ/'׍2l@t8~"0[{0s|z[;ᝉ;auǪ?(L_M0~LS] N>8߉ p58c3zy)XB$D"X\,)V%b];>1,v~)XIos!1lRk\ O/zF, x!9,:~K?AУ $Ǜ.~prytv1:E8ULizJX>Z>Tq7\J[m#5u=j}=}mnr@M se# A#!=#y6ҘPjw`VK`gQ77`-c Zfb4&<Ee!8)܄ eplOC}^7 FV.8@u"xböaZ8 ,XOW t`߭ 1"w `1^]")hIez~WB:e{N\yOOǢƫu ,)n)l\K+7"0a?lBQ4mlQ89s? 0DlIl탎lz}ZH༙kG [VOh"X畞UB^<jsxx: ׿ kL41~#lP)y~VRKy6.?]|pn87DX =!|^cDO+[ x&){PDYؙ@IDAT0cC Z3$s+ RK?mUҒ|ddӳ̃bKget,4Q1 ~ j{c]-Dj]| 8s*l;& S +;K͔CO߿+ur8=-t a=2{cOjfHj@]od?{=O=G3JWK|RXr[>6]}莠X [G/sb{8A{Tc36r9\aH\3PƳga%Ƕ(nz9b RfԉXy iИfCcAZrr _UƔtO vؼtoO.]n+K7&9l #K aifNВcE[I1bM-o_V*yn֑B#۽zkywAn X5X VIqefQx׺{ϵۻ{Xds Fm#Ku)H)5! /47'5@G٫k;5 Ńa"0qP0Ur<plY[DSl|E|~I[R;cR0=_(co]Xx)`TnQG#x*z@kxnlLvŔ9O!U ke0| Bͳ: ]^RI4E1h)X, iPȼiȤ-/\3'.ø pQ#HRt Nộ;0nBKE4t J[JB\㋚hҳ5lyk;dK\X|\Tz)A9p_xC0&*SV'~Amd+DŒ `TV Q()mqCI#^Sw9#%*5 #yԼ]r eePBŕ8(-~U2'7%TCHfy2i)u&m)Gu?M#Poʠ<]t OۯDl^Ey(ȷ"@@I$RLpjөZ-or)Bh'.u3e)udV>MJ2 v56Rk&թInb$(Dj3xkJwr}bX"Z:Sjظ*ɖW_3CcP՜*Y,W}#A1B2M\+!* cNAAe+daM3HՀldܡSvqiX9;Z`)΄W6j_v]wM7)W7>Y 00Ep i <@lv]kcW[Poj욿3VN]7 6x0r}Dz䣼 ?f4}y.p.npn =ཻח^82 *~,`oŁ҆pR`FAeˎK) ڑ*q(@Yi(}9ь#+s+!$S>ˣ$4v9QW ~ڸI@,?ԵTD;'I9-gMFP7$1KI|FxZl]ˑjCt$EB\HsU)Y6Nl8-m‹L-mN;0)0,^K@ v&KͼeExč$miMxQq-iFԷĨ!} yyCbMyҞ$MʻԯI-y,d&ޚ^I\ϴIC A23P]NMm"BB3 d*z&Г[{GAK@۱.!aJ"̒,/#0(*uTsQlf, )P@2XK, ͏8jeξ@~{;>J_c=giBWŸ|(CWƗf>ҧCKIlx;d=bHF%AO%İdJY- motmΟ$γ;Dqd{B[V9mҧ?Hb_H'-.ݕ@v7aZ-~-k> hѵWkDgA߇ZŒ6cadF~Qo<7uX:٢ݘ8 ^Z2%zyc,欝W2n`1_N)//xvMU m`bi @WnKjrCmPc@gaS5׾>&On1?~Z/]m3l[QHH]q_tBTʂ<`e<5CCiB:E%X6X00Gq8tZ'%*; p>{;sɐzطZ?;iabZ౴ k7>?X=~t}x0ȉ3"":= _<F<KāgŸ==ꜼB`A'M2'qs BoO\r pH˵v D`Y-qN|QN}_e ; AwMq`ך4'Q? }8ozU |xjzN8?{Bq4gIo .ƶ8O_ GHK#?f ia:6/0uЇg1Mϲ>hV^C F&wތArBlz`ӑ|D >ʸ[Xu"Ђeh!-0,c2SߊYaH-mNO=xM&X+"5`ɷ( z4#WsۚbI^o|,/GR1'X[{JV FÈ;?.V nt^(!k4!Z`nFRRcۛgim3 o}qĖwbg v?~V'z*f"'اgp!? 4##.tD b.KE-WhWCXq?=6kK`+۝M7oa/C !vc?b_4mC3)&bymm,Mg:5bIZ'[?RޡA _r.G)m{OSB^Ym&&x0:>zC&$)`w.#jڟ%,   QR,Xd\نKo|6V\7#-6& alwo:*MsB/Ҕ.`APCQyχ(O 4AP)[IHs7 3[Μfo=s+R2PiÅQN䵳 Wb0h{qҙ*Y{䪚V^rmqIr)gheZZ3*ĽaQ62"RyB~8s}bh;0x u4GjIV9®ߠ I92H+Φ]G7xs+Wӻ ^Ӌ˳Ria3%46.vtYFaf>[8p̸S "2U9?h|?s {]Pcp n mg B8x;>e^C ؕ|Un:Yo\9i+v V ɠ+~5j 7DusC|RolæF6yp[.0DH=BD{` c_ȶ<4)uX NǂuvW-jxVvsw[RMOǍEYqfI+;@5*ҟ&ԛPn);xe=Q\ !=r16‡d/%<$*_ 5{:M&FGWG( )Z?{W3,E4x-=Q~1}9 yͼ c8:e"{Ro >euXlҟl6CƂ<32R712ا*敷fA2d531,o[l)gdb;BC*zVъr<)99YLݲM֟5HBjt";QrWPNHgC`">qtFhv.[e.)IKqΰsB@fΈ 5[la£IJ]Efk#N10;>OKkw5n* oks, `!a5Z~ֲ/%Yɶ`7{VI^9.r!%y mĒ_;Y}vYe6|氨td;Yv֒~[sT}Vd3`EX3\ y5& j#ďnjk H3wŸIWrV3ouDݼ_-ziGmmsS*maU;5%A T헖H^>'M[;;]\q|W=YӂIi]R,9, pNxW]GC4bۑM 1 6A0|}@`ލ۳^˟gO&2/4v֟m}H[ >0\>9.y+Q}IrsZtS_syO&7Wɰ8 ӑtq;UHkhTmJh@9,sZWb/Mzk0ey>Jz!rd]YmV+E}pp q݃2:j3[6 b^S[Tr嫒eRctoXm\SUCDr1&jM&Ákizl+8RMWb NdnD{Z!iܹyAX"Ijl蝢~Szsh}viFUB@!PX Y5ղ*Ix qd*":r;:_4W7W+ \.k uncC F|H smwGz] يfgr! +g'toUG sQNb*3LntUsBi'gDnlj1\t$dm eV[l]Z&r,osy h48S;׸ֵ4uɨQ(?Tfcq;vV/z )\%K Cz`{~\l۸[7h=2FRJwpM5mQ jW5X ~H8rg4|k=WS.mLHO=^G\QpGwG $/9{$4TT>=oIE3 1T BBB+KuU⺡lT;19 %iZA.NHbcXFP/Yʭf,/$/ඊ ]F2PĦWþ+еs|kcOsmVWhC# ̺ Hjqfc.Ê_GPyD'i-R6z~(Q$׋Z^`?,_cà_}]qEez#Xx]mSzJ#h{o~A8)] z`G; F{;xͭ\"R< v:[ RBF@rضg$' a' (8##c?OM9VW㲩wùm e|`ёJ"pt YdH<#<۴A /Ӑx3wJJnbaG3 >]7!X˜xb$'/KG"J_)Kh ?:Zb(,/ 9t( n!m)"Ј(S+imOƗ}X~'M[89,m.r! L V.>R$!>m٭GU%'A>AHٓʐȎsm_4'9Ո0 NrqLn\˫qh1T9؅n{THB>#nS%v\XjEk5g>u/`O7y-=3<~0&c]}[ö" dLǨatoFYU|03 9c@^4i(qO$՗KaW7o'@4)8 .B獏.O. P3/5snWNUI˿himlk>|p~ *qռG0g,z-$ S_+= MoC#ɒ/[F]Ej`'d^2pB/~ڙ<($}OZ -G q,y$o)1EHɧ`avU>Ƅa{..|ql_n R)uϠ'px [/IʷDڋ-[[j7}W3e#p8m8Fz@|{x}jUn-IH;~'޴'a;j w[Qx@>}HMF]/'gN^ x& rìuM_?#9fB1~x壭(k  3{EWƅrSQU*  >.{k_/6 JDҮ-8p9 z |fЉ~>h ټJaQolr.^|q,HU*8a":Şi)aPOKtfE`E?QJgBzf5~IӿƜmc gnU0C=܍ `1)vT#6h ISy~r31^vEbQN_gҌH;T!߉D0߄K1O^칗#yA$džW`wVf.7? Kkz`/x-:zm\IS賜fh&  E~Xp3*iVP˶Af%Ff I}u<`؊[@o(I)p<^;쇣~Tr8/:[wƍac& =.`" 4#y*ؐ_y~~9(CN{b}F+^?.YI{Y;¨4j`ا%&WGsfb_>]:yql{N݈V}+ EJsư[^q9cO @ߡppt89Ei|V"a(JPF>A[F=jϟ?D@^Ked2 #ȭGZNVP^1 qF+"betG:4Sgn`DJbi (r@ѫW(COvNkcDFIlTF\(/wU=KJD6cf H8ٸ/(/wqxx52#!:^hoZĭc "ASGvQ0zz‘!}Ă vWG_U0AXvwؕr=mơHyU*?ZeABz@J}phSϟ| =^y1Vݢg>3@ݼ#uc &̙&wuͰdEavj.8B/^c.o=WN{\ Urґ e`DrY;BJlb߄c":{t g#6uCt 3//_H֨O>(}_|{/_KgP{6"`8 –`Wñ`iOme"Ijjb2= 9dzQ8$}XhKU#t5#[P"-IeQ( x6K0$[B"7r5[<ȷ0>;Տ/~6g6H苛ϖIAĆᣏm9 ؎0o ePG͕$Cy~X=qq@rБ( C0k~hjjd5`}omh+Tv kGO?o>ˆDҞ &L#ۯLjZ]S|>Dn铺.e>L|Wf%cw \2oa5B$u)z ޭu4-j䐞j F~:ckTh$)Q>M53u*lФⒶ!F "?|Ub[x+6^Nh3>DQRHЏV4O"]q%A3+bm]g۬ZOxwp7V.\h@ xl ؏7Zsbjr%t+bC '>-~ju??6(T'AlwoDHDÙ N>"xѧ*5hۿ`J?K{kml}:#g£}-4 ?mQ|HWc%Ё쉝mH)GH)N\)^Wh[X>!n&={ +24 w .D_Ci6sH  ; kJh^W d#׾-:qi?z.4 Evti־_4\hՂ!xQ,\k^7}[⹀ xқ#Y⁶ۣ}׎/I.W{8ttnavEP ]뺠mvУ#{!;!ev~vҿjT;"qPٕ&"ti~ d˛)z, mgĉt,}s9<vxп%?ҡ|B=I޻7cPFJ YDw6-#ǯ,Vr֊zn>5?Π&{\gMVTPjd3|s=D]>/0䋯՗&cijýiXӊ{2k }(k6GFݵtEIͪX"ɞДo\%rF=ocad]/ioR,’dm]-dq _^rbc.E0 Y,x;񍉠Z}y?Kr"nHI-^̦ <ܓ,S?iEYl&ujda^hlc2܋&Lf9ٹ\{;6֕`K;n8xxdb҅ǖ "$'pBBW!p.Jqq_4 U% VC:ϼL.d jlS!i12mO xoXpDpd0:茮tAhD <ɠԼڒ)=WÇ}Ks򻶆}s4UCF1Ho%K-$uל$~Xʳ}|gvN_zýunXWkcśЋCu}"!dnqE?-ԯ]zS9qvx:|x/ oq!c}wѩuŀHrZZrX|ki|MKI&1si{7Ʈu?66 =^˷ZRa}0A1bQL["w_Wpo"77V}/{Z֋lL`IqPq\O^A>LJ9'D$Aj }:2<=C:T9V{Fd=zlIWcWb }z'YtLH-'TF)dx*F,oYG+ȡC Lz8I͡6f t_DZڭ+Ν;L[,2*{*t9B@!P%JJJp!O> L8#GҴ,  [khVWW8{U89Y3\,?k+nƦgIpUBM~u -l̈*j$e(+a6z )-,?_XJr[MXDGG#66mڴ?11?gX+{ΠU+ B6Gbb"I32<2l,r95e|]M .W'4y~Yi! gy#$I2H`m/D֞2Μѣ||u mpp0#uۀ`WWױ^YYSP( s[!)//Ge%_c42W 魍*v.Pe^X A.v-##GH1){EɤP( B@!pZq B@!P( @B@JB@!P( "gPUB@!P( @B@JB@!P( "gPUB@!P( @B@JB@!P( P(.S#OtGN/S% &"(OTsz@sƥI9lc ;s]*_!Ptdi:Q q8i&3B߲U%yHJ˫\D[{фNF̶~.6)˱Нj؝ qLN?"žeVYc)9  $l v?̕X 9"|  x՟5   TM'9u侙x-9vb]@(+p cî`JO|<kpUl0tֳ,{ >w-{m/N,T$}_Ͻ/iUX釰=B֖qKG_}yJsZ #n+ʡP~pt2[+q1" URD'`::9 7%dUE{( w=txw`;wC 䍊JٴEiq!lp5h/wJ+`@m_GX+Pͺ]P]ER^^bdw3u5W#y6N(/AYa<=(iokahRRpH{9 >zqOM؛*}_3؆ Z;1qrvF,:_:ME5R'ö'Қ وn.. ?oaqזW1 \=h_K̫*a6Ñe57mP(-@@(DYSg1zd!V3rm}7݆}ig= ck6'\6W jGaױ,tނ:cw?ʩѩt1K ówij3CY7a->tj*_$XկF݇LO 8&b҃Oaڀ w?þ ʮ5ޘl4 Uj lо]G+_NM7==l`Єq|j<<$V@c{xll 삗ߍ;"diq967%x)X ?xa%|i1~YsC1 Y@<".>^^$+$ɧ_ExXYE?o^èxkԷB@!8#&ϨuB@!heT"(M7VU ΫW[R1qۤ+aL\I7݉'ބ*j#ԘD yX ywATڟ-c%(4Y/LX9>z_x. W`DŽQKCKϼ,mLGa&:Xج&S5 !qW`OǨKØ>m:>ۖKNkF"s'>/sE]dopЮ0ހwڦs"}G\ & ߖSZ\s=HD~'͟B&7VnETh 3mea*ƒ܏|ȁ0=5*) ?bP(v[,mG?9;k]_]ij3?Q{̺4`m˵훷j[nV/]Fs쮭>X%y]6wi/N|Բ+w99i/ɚC,-/mw}I9ZqQo#Y?Rίj6]]efUdm}k%%ZIinMڍ4ZsڨvZ۴#'ZEҷo;e#?/HߵVZS97iph}1Svh}hʌ[ZY6/~@ Gj-#򴻆Ehn4GP#5"EwWۜS;Y3\VsvgOG-Y<==P+mP( egg zB@ 1!=ѧK|m#/=VA;[NJL ߙhBc0Y#GbQS7Pa~_mvs1m[K7!5+e'U~N۲h5͌ ; U|i7&(c݃jFs7sT Gnxz n^xm^ lӏ?~,.s`[lTGyoO!p^S{J ~gdf8-YxWO_.ѵ~?xm˫-7;1I;:c߷or=A4:a9* ?@s?[^!P:ĵϒ,ֽBiCs*OʫU6.E˕e m6 Bų81'V S{mK Ƽt#|5|*LgfvC3h6zj_Y:u:6tvDD⯓'וUw~8;rW1:pF7h0 }.^PR^}WjD]sojHpni3PXrךQtO Np+nĶ.R(-G@ޖcr*04IDATFLYSh*>Î}1Ko)323 ~T6J|w@N덞=֕o="`ǔ[Gb&$yyIM O[}*]@A_bz= hHLs [MNao!*j/f<$~X3 OZ2RQG[E6='&J2Gz&)L~YhAb<+J1mx7D;0-؉!,>(? F$J7ܜlq25b!`ü+q1Si}RJ BEk-JeS(3HLt}eQ# <꓏cuexQf&^ܾiHl̺i8 ?btPLd݆^x}1_^OX_-µ'Un+=;pK2/y lݴO+p ̨vh&&37(()]#\ss߂-n8f OpnN5Zf[?\?#lFaeg 6024oOB|p["ؐ9k6mo?KntFo$Qj nޜ\ZZۡ  `;L.T( ֌@M:acFT :[($؄eVSExv1kHg%`۱J\6 Dg$7dѳ!iVd`a+k'wO'~3F\l?<GwnU+#4>L8#uϰ_ʌS|;;a3pFh*}[`.V!AN(OsF^:;ƠW|;򓏰tz'w<8 MRuW_ ض']܄/B(/NCfyyzEz#,1}= c#6j2ԃtDfA.e#܇4#o S  !ַ~1#2P(i`S) nPX\ Ww/~{e(DZ:mo.:k.((< Q?JE]:*l8LWOojMJ(+Dfa+x"4*J.:/z(g!BU!%).~[a1#z%Fxxx2ESy!eSk/$Sy1R3@ك| 5 2 1 m @IDATx`TUkzr #Պ *6tu]uŵ V\ׂWlTKBK#:}I0 `#ry{{9sd.*`L 0&m ȻmϹL 0&`|!0&`L`A/>`L 0&X5`L 0nNa7L 0&`A`L 0&93&`L!_L 0&Xv `L 0| 0&`L`A/>`L 0&X5`L 0nNa7L 0&`A`L 0&93&`L@`L} zx{zBp [ U5k0jLc=aNt6;<|ߪNn::Xvs=aݜ 9X^\\Jw^9!+s]9^+l_T# , GZJ<ܵgݶ.Abfo!}&A~䭏aL O?}7 gu*1v8&)᭫%( GH(Zv:իˁ ='^O,֔`Λ/a^n{xw6>L cmruL 0 XPQPJkc'* F Ymm:;V|?݆A]i!)^ MLO 3odX,5plPOu5j h nA֜Q,瘳* k0+  ]"̝3 vl{Nh|$MM[j&Sݡױ8L 0sH@$9 _,g4\Y^s?EFn 4~ٻ!Z ʕH|<ᰙ}l<زf5KjI1(P{rK աa⊪|[-V? 4=Z/GJfu r߽ ;F0O` K^ăca4=y44SJ*PCwa=49AWů?.B2 zBB#I~0U/#c?^H? 9G!Q tsb׶ض,rzzoiqaZ_""B);Qs{{H,zGZZqB?`Kaڙ8d#DD!LI3`C͹HguUG_K^>5r!i>MLaygo"O<4&a01!n7cټ}h8Vp!'`ƌC}o1Wb'B 5o+ۭES#\f8vWN1;xvl#7}.6lεQ㑇cyQ^aw>g]Aj f< œuma|nuHv1FL'9+|o~Eƃ%:6r>9r7)Io8aLhV`9Dq Q^Rǐ_7`ۅ?Sɿ]㧕 r 8=:aTLIˊ]xW.O7zEll}n=8~yʇbL oU8޸xEЫU ðaCr*ÈXVbʎ=pWlEnNd6x" !OYU(HKf4 e$܊j68Nsﭵ8WFKGb74!#1ht*ٻe^]N9b4d7?,$G9 ԕ!@zB%2%#ǐ{29`jDƆf*I+GByGÀ}J~/GbHLV l«{w@?]1:~"vN8w0,}5s9epYO4REpXdlS`bܸ%cZ;ZI44Tě%2*`tvZh3y5?)I:l}߽\tzh;cPq]pR64L۹%aR'tAv2r!䝫}!: F|j<9p *^T i͝Tаh^Vi \HC73j2Jw 0& sL t Zu46S$\;hHXĀ q#:>F2Fږt&qCHMiu!D-VZQ+9pZaw菎\&5tF HjbIrY aq?u[%H4 W_oFrdjxBp ţ Ihc[XbLG@Da"p-}[?.u?"[Fqih+rh;I}צ> C0WJlė}  fUraLB8@Džh  0&p FbBBj jE+PhAavNpI> [E9H%bɪU H9 ؾ}+6@m}vlZEud qㅬP /g Vڀ=GЦ_Ľ#ϾC]lBW_aؾa3TK^a˰m&:X@[zl'md#ˤQTZЄDR[#OnL #P]WsGH`8bBaVa-̭@UAlTLQ)՛BS6 ?x_a{n.&/_|4&a{s3 O\FM„[sBa->JC'!Ԭ*i-g~;$1)Ip߭[vHH2oy6M21ҡKIt:,N'qH\?7M\ve0H b?s(p8m־_YGqD) "b0pA|Cyҡ!`/t&xtE$G&#Ұ񡈋gy| I pHL:o>&o^ɩ0p-a,])y{gn˥fSbĄpK~^̞п߄wq޸0&p> hȩt|,&9&`kG W;-nBdP_s WPycEc(Gn|#]D>6boـÛ%Se+4 %Y;w?#/ ʋpP6A`y#I Q[jVlܸ71h)ҼL8:ⓒÊ;zzZDy&xx?o؊?Y52d0Nu&Q>kC5܂ [v֩EC1o*tN>R" 'r=tE56D'#@ &dz$''XA&ڕ v˕3&`L9q `L 0&ЮX+^ 0&` ŽL 0&hW,/W`L 0Oa?GB&`L +튗+gL 0&@'#n!`L 0&ڕYe*),,Ċ˱u6iR4k+;yr cǡGh4GhlhJաƍaHg6;%1)2jT*X|}J wwwg~g,*0j{gY^Eaa|}j~t:PUUAIɘ6m$ _ Nb;f2|aog^.Uxڨ_9ĴGw^~6(Ŝk.L 0v Ƿ=5۱!A?pUJaMUf, Vhp3a-܈KޕF4޴ǯL 0]Oh= 6w/Vjr"b + s|(Xz<{ ja؋[ 㭇A5,ZWOgbp*ؽ̻ᩢaL<;Vl5&0B2.I@]ztaX: כӽG-q5` *1js=H ;+OӲ=2&iI;{y?)s`;q@~,Yz*d׻!KGøH6iXnJ1du3o=aqb: wwѷaضw! $ x}Rh Y20ܜ?= %# 0&ѽTU\7h8؂Uؽi)JjJUL,P_QVin/G `0F!:1N GXXa*/∂px5`'CM ÊOx7ĸ'aW_hא EFw>.jnvNjW+0o_+}0]WbS^&#& Z/7{QL0HrlRCOrFsnfh*DNF%zBSS'&beAط6AHxrþ C.q 6w."QhCh߫1jM=FETj7rkʍ魃X K"wói7@'p-Mw+҄71)< m" Mv9,z,Oᅧ^8/Xn'\SGR~Fb(D>7˚|@4oo7VD0YQĜ']G7Hҡ0&_ >;7¢Rh)DBua1 CoDƕS@ d#|\}u8PY^y\"y{bӷ i/U#o؏,?^Gs`VGxqذ[ [a8J6 ## =.}1apQ%.L 0`t |ܗ[_(y6"Zџ(ªc"T/f!Mؖ4?Nes;{BiٰBZ(B5O!l 3&p6ĽJA@LPo#0nn7F'f'TA0tӏ^Yj*.54 Z̯vs#a9le(L tON nɟ[֝w[6lz6Ҫw\E6w}Zo`m% 'r_GBc_tRP/%1޴~/9J ;\$U/Kx8cL@|v`]bHWti}ժ}&Rz:ԈP]B8ҿͣ(-+`L-e$~/_Z$yKr~`gO@JQ')ʳdL{8PDڱEiĪQ@s~}L 0&8C =?݂io#צSFZS?^[FQ( LD8(ūI1!44 Ҳ^b#aqqEN#-D"C6b;ǔuªr >$rŜQ~e &/\(xt EIp"'{B6Ȩ EsV(Bvkuz+( ws\DvJWA?WVh[W+.dgl?uKk >,*/nr d ҃/Q-USDz\mĺ-iB1(] 5sb,ZNz X9A~ZknBR-$ՙw-.\:+_X`ؾj!~]rF ס'Na|,[ u_| F%xqruLؿE][KB--]Ga?2 zw!`mYrGC/ZG|=C1n|2|| Mc@;B;,*LEd-|ϼz Wb ^Z: |L<3?\9}? ?oc՛Vc[}ɲi(BzaXd/ އ菸u)u| _۱D!hܺ _1VFBiډ|'["[*i⚘@G$ CEslk,F n+):HC&{)nRp*N Xa4jPr'WقxzaɊz t= )4̘1rOWGf/8vvs srwQ|چWsP}^Q+OYPX||.z:8J˓CG^y5N‘PgU|10hlX{9\>! ލ;/*^s>OY"sF, 6ޛ~G0cpO4`*d4|ծe%.ߔB2ߟgT~.DX,mBjiFck^2rj"mBYn\uEҰ\F߰+CC;B<fz_q;n_C4G.9zaRԠnI3Xv BȊGI9>}}+%3(z 6p`/aw |+{7Ð1v\wUje4>=a6H9B=((,1 gж4ф* ECr¿/Jo55\?ف-_ >衪'Q(o ݧ)149jZpuxZn9"yh|Nt>xEQ1> ͓۸/o5 #%<,uIӕU(XK70G(|> 3W6%>\ܨE$"/f10yD$̍N3PƄ୹pǞX7xcw   omvshYR?f_C_,~a,t@*TTJSxJqݸaH,tVCHvc I::ݿ3׭GVҍ2xt=Aŗ$cK<h lammt %Kش߆YIs1h]mU5<(_KG;x:w3R6QR9l9Pz=124wτ!_S{D=^$h c{> ];9Dxr8K U*'6oލe~ĉWQXЦFL 0&@$0uT̚5 (xƎߵPw޺bHU,&aմ,vE+-E"HK9nkX"{9a^j)K$<"iQLq[~(-6'܉mvG`ޘ6m:VpaL{{''e:[&i I 2c gƮ][q?D":.Wx=u%ȯ=0K3)c~);.0n`C5 (0(ʁanIp\k;5!F[%P/V .Lh 4I!_ |MV.?7'^]$\3 ӧ?=iX `ݐكg6Dd Ԛ-R ˮ…l)=NBQ!ǔ;K`İ$Q@6.Lh Mh4xq7bŊعs.q.' <1f$%%IO5aLKH}B12*vڥ:Gz 5Xv7]LhCor'*q$pI{S1曥e`m#(8>M~Q"0Z&7oB乆b? 9L*rZ!&\ g1)73 v"ıy;BBR,.N>o7\$%UgiNŤs@?zšO|9%3[bY>O냿P#GQIՔ9J2cyQr]jsG< =@)IEux<qB,˥5L jq&y;|++K5bXK'4=GEy{ v` G1Y` Bꏑ}ċK`O!H]z2v*ڕeE, 5 Qјh0xof(S_ Cѐ 0K􆓆tE~vhUFHr Z-|:xQNYua>l4]mDozLJM}{H^K8hY,y$C.אa8ڕHDJr4vA5W<>5 mŠ1{RJ<F5C.nvP] oU>^:4ILz(cW r qF/&׌ð\C絑zO ƚC8ZZF?"$AQ ѣG; ISn\Xv]gL3l$"\©њzTPNZMb0qt@8EqS)fZus0$"#ȫ,G#)FA^QvF( _&!9\>w#ˉiUE V^y+H̰%jQ[&4ԡކhx݀ՅU> C\B@tns3h/Db5=Y2 k+QVV@0)i?QS i=z%Ǧz44PQUo s7 7 Mg'@:|_HP EugbA}s&.apSȊLvj< {"NaTU(Kp!){HĎu$ۯD$}Ba+/ %#iWv ]'喆*t6M{&\RM(-A$kL<|A6B!r 5/ R}#i# QLL(jH((#E$ Eawgs.&甲Fqb0֩EUi!oA0`AJ `L\inYgF]c=mkL &/\sFpISzEXIP*$;>Lp f!@q$2U* qz!5(xarebU#9 !55O melGI~>ju/Z|(A7S|LթVwuK$&;ashH$  %iɴlX:9P;1jPOn\Fb] gL HQAfNH6m; ډ""? 1AMu`֫e__k>Ւ,l}54_ ")DHb.G%>z}JB0җpUbO})&S;+zԴEiciQC˹Jro^NKi &@;8@#q Nm)BD~\2DZC.L ˀ 0f4tfc^2uǣioCa^Ue!0>m,Ev. >wasiҙpUR͚RmCeJ}_ P(yUŅUfP%;_w&p d=^(Gn=|c9?#99UuqZ<| CN鐂CAs$4lyM9USX)HWiYׇݚn/mCY.ۍs ^ňLV&|@IDATG<ˆ|4|ohxx襶v&#y$ ʠC *|~zOdyr<9!7x#0(-+$L]zɧu=`ݜ n~taG#3 H846vvv)r!)9 9ٛhg; tiD-YѪM(.EEE-L7YhnRӉe$h59\h*;Jka# aSӆKN)ܬ*KģeP!P76{B H[qPMl@yA-- ~prG@X whaiWk8>SAIyݓX +c8TE(<' nu{˽gL B:$|fw7ٚ!%Εđ`$+w!=GE8hh~Xȑc8v FY1 Ebi4$T$l4gb1uX'WSGsW Sbdg!*!A ȒCAiCаVLrR8,5I5(8Z ދFyz#(>^8{S& J~س3$rS]CzQl#Fٽ ѷbh0&v,Ίؐq!cv$Juk($*'˘Bbh̓(: '#!ƉɆ& ; :a94%%Ѽھ7l tID?C!ѓW(.rFǕ+-9Ia`ߏZ7">JEt/%06YH)5CĴBu#AWjAK ohv QQAE 1GBQ4WQy| t@b!'|3kb G:d=Ps';"f1Š;6UPL58t +ÑRF`* %IZ_5s8gX3\ F4Y?ˁ"~-Zwx{0ţ^G/o9fiS؝Q׌'`ŌS!fL+?.#xG?}? w'O|k/QYoL /x)X!^.n8@-lA.ǯ#/,Ndw[ RиcPP C=c3Ѯ7Liˬ8e>9u=LՕdM-׎Z:Zp.n;wp_ &1(EP0g읰6Hd΁1<An8VpE%6r+HL"E8⼅EaذXgdxSNLҭQL 0Ly<3aqqxb|-&O ~aQ6Uxf!;?nzӧƜY/bg [8FR0VU7G1S ,85A[Ӟ};i]Wcob\\Q, ϸ&JM a /SŁ`-}Ӣ~W Y䡪@!|$@ ]CP$$M쇿BQ?̟?|5[R{Ȼ4f R1CʃMApء:F cRb+(c.HTi2a#10qKWCCN(eR;יljj7RzK ־A)S 8+r̩RIpS( JK. woCpazK!kN7 U&L 0v'C페БXsjTC_yIN"?Q 7b5Sy*M¶'y=: 0M&ǻ)i'hySO>k$d}ڲ # %Hihx8G$zՖ-roq{oN>zO(HHR ҫ)nɒU,[tU?Vu{{̾[CHRhp`1[>♷։rXp7y60,J* 4xP9ki cu!ؖjtShdډڍzg̪a" jR}N)Nxx8g÷Ux?#Z>]NtF!Θ=r!0"R¶_BЧNH z:-d h@8o?q AvJd~m7?>tlJ(1sP5Hi@/*߻^|G?7bɥ?,gGEpRlPQ JC W {_D͞P5|T9%6k1;i؇^r-XKĭIx;+/@wacI՘v3^2 mm\{M zMJ)jE^^ztXpp3G\ttN NcMe =K0ZB `{G;JJҍ_pCѬ=jsT1rro\AIj:՞v :xֿk h LU h@8U{N{3g~sG(E{7no.k^O~cˉ(Q "MIS_GQL_~JK) Y9 ;+|EDH#xX[Zϊ >3$h!zi'vn5,!* -[,Z,\#V#0??7_gx0_Tk;qPi*GYp; !Hfa@]p)/KÜ9)ã˖9N'M9]Ƈ!$˒Ș#CkgRwY(1Z=?,ƨm2D'p9~haf/Mr;Ggn0w|EQP0<^}&lFr!jьM]#5|׏t;4vx\s_ށ8/Č|f -Ɠg-\%t0R=HOIEt" StTޔdᄈnż9X4͠@n/n9,ݘ??cV jkMލE|dWPx544 8]A&R{zdm,SFO^^ӽ.)nO;$՛[Ƅ4"K/>zXb-.ZBB!J;VsAfzǎ}pO3JũEH**˰eFTהc,?s= AhXsWAP.^mDggV·%9yu}MMӛOo0 =,ؼyZ$1l2񁑞IL1cZU ((A\l8cs&&pd|lx^`m ΝQU&&C~Drb58AHcztK==>`r5+p^RgzoToĘGަ0!UrAGW^ه gӻ14 <;߿57ݴo?7y;b$:RGQڷzboT&xpŕK0gnl)!z oQj7s,2xurr3 B?Y@6YBv6n:͇q .K >.BI%U667w׋mk%vGС@`ؠ0:(,YgF<νiG O.:k]vu/cEiyԑL;m+fDE@fCii *+Wogx x Tr.H kjZQ{x'%kf:xq$n 8" #*`%jp]#*>Mcn2̘/#hu)b!io/v>oxy ȓa -v {P9g<8j h Ѐ)F|!7/>-]ƁzbX{ v p\03ɊD|/59rM ^`x:gj *Q[ۆFUǡl`%L:Ly#&J#480u'}@f>~$ sˠ"H"=H8";фAW鯚''/(D'_\:k}c= eOk_>5K;d\QK(bb & lʱ`:$P[o4%kN.0\&'c 5İ.F6O^w}J\·*J`)>P.afD],N:0sk( R/)SmI̙ ^L SDS 8$$99O7=e.`$nJ̰;N E2%E2{Qc' j(E's4E{ vj Z`s(4(կ°p3߇=&FR_zz,Ϡa8RG[:EuӕNמ%ƯkI[O m\[=44~Eyc_j6H`mQ%kXfD%^܁+! 8Q)SS .ӕp$j4 <}Z?Q@/ klr+3!3+JE9D04ԈY:K×_K)cU'p*wMMMЀ()􎦀P@$yqq0&04ĄĄ~u`BBVQ,bye͠HK}魦(񨢏i h h L|vP(bZNKV-w"O\/йǢOtwQ@B%VS@S@S`) REgJ*>><^|@Q3bSPTT T\]i h h ه.A 01om.aƑ62v(zlz(UWk h Ѐte~GQJiq&IIVo4Elc,{ΜTTx/ѧMZMMM2NS@S) H6lńБd,lc8dZiL '54NO A|D7EIcHH`{/ ǂMJ kK V2UΎ^,\ɜȖ1mW"2f>)/obʾD<7֕s >L%/}6>րp$Th h h F={QQaXȼ>:ttt!22l>/'s al7pGp Z1EB ߴێ*}( O<]6DF0 6NkpX:YdfƢ`N \e׀pNM)xv#"#"h ě%_ZEqU+ƄI=𙏖I}Q@>Ǒ@b3ab{CȽE)kh}&gZ0cMn . tx)E}C(pŵӭQqg3ZM oC"MO^|k'0Iu{IJ6mmX<II㢦@9Op|}4vP񣭮8 ;6Ɓźmpuli\1-`Ne\|JE:ڍ];HrSE9ȋW)'J8(~ETmЖP]PH/lv'$`Fz =؀~pXv[RnL wt44<|(\|OgLl,!Dtxgc+4 <]{^?S@t(T5E:@ KSH*ǝ"XS@S@S`ƆVlrqXuFc :ys cb{< *MM6^mGPUՌPc,{4Z,Fв_5**4w+ LRկȠsS@&o0[%^Fͣ&NqN^/_ ƙZ/u=>Wx2J[,&,\YU w{K1rHc\.^)O/2/ńü18sG;-:g?dh g1wL0#xdg' zbQ9!҇7 i;yLEN'\Wƣ4I .4'+LU&z *d&/#Saaca>>mwo4-;F BM3 !+ asUvT ډV~92IMGǏhύ0\aOf|'@V@0V.n%9'@-`1HLcPs0""B1 /ԨPG#So@URs xu@3'N[[ #'~8A_.TNsSI^ؖVmSAN!>|+|LؑoUW yĨ&>G%@>!y_+/bȀI W _c/) A-рpA)J8W.2Ćv4wnz)3 M7rm)R0j2t_a1PG'cs 0a'+Lv7j/V޳a։nv]Vԗ]G9z%Q@jJ4RSx:#D+T&rdпMԠӆ_/R g }q۶W2sJ,R z!G4o55wmuQaI`yVJpr-%3dK^& '+6pQ(1ѐX)is+|fS[ku/J+۱Qk!qaH&KO0E( ^&7 '^cNYy(}^gW~EPQـ6ƳCHYiH@_Y dV*5`# J}Pٞ>~xL1aٺq0ro%`lvU[q`W5\$((He`X2 J^CȃQZZ-%1i*qNM;t"Bv 깘-%X?XրNج DI_8A^rte 9<f3cz`ǪX7[-Lm(O ՘ 6Ž1-dMHA<[c58|5]2",܌X$=;;Qt[Vaa=L.4 \뻏2 *Pɷ݈6 9"ҐO48k Cˢ̗E0P2dz=Y>="c=N֦@bzN褐?+1sf38g}@RIF 0cf"ĎߤB/b|EM'Juxa::1BQ 5 ZSh Fs#̨(B,0Ua,CcFYXmS^\WjJXug'Z[QށxyE HLR&>xX>Ѐ4g ؗun¶[T[F3$'aDY4h cQ 4}o;f{6:0) S*6-8Ԍ}%5ؿQ&"(\,1^GN^-if3MvE}K%1#NĢ c+bD;{_Z6cf|<_Y* p_M,>SAb"KnmAյxhΌ"*1N\'ŏ( *1H.)j9`NL< 񔴉mwRFb*ѻREqȍ93p+mGQ;vV`v*V$CKψ! u*S`j]$6V.NzzJ/SImj0 &wPVGn?+u!+:k IqeZ7u:e|LSi#yv`oM/Eo";'AyT't]?Ov Q`jp1zX]ԤxՐ~H0(c.]yzX]\y$p:%Gx3ԧRS~-ތI8kC23z)Ogó D;vR8ccfL @.moH3֟Y@Sd-%R֖nV n@̓%XH=It,Tj|CӞ \Vpfizt+\sϞC4e_DgҀЏ:C70x}0905E2q|cC,7 RS+*MXv&֭gX^*)h`xsꗉVbcǠ@-FJJzVww=^[?v8_ރbyf&-OTDf6eMx =8YSfR@“G8YPR^xaad\0{Õ D>C cgNJ*ip.sճpB 3ceVrbbFY96Vn\tH?~)4&,]2{WMx[_|SIicb^ye at"Ǫ8 , HO'E1x!TܹeHJԒq#qu#.:TYX~k8ZEՕԅD6vïog'͗CZU$^]q_uz'Sa2e4#LLܤUJz<|CÑN"a}*7^ۏ1I8'&bh#(GT<^lFys30#:513'wc[IP(Apt ES(]Q N; 7̘V#pc77R3aӵDc"x^sqQ㈤b> #Iqx7XT^:9Դ>_q4@RZR-KPݘu CDR[؂F+T#/>&9T#_hjʽu h@8>tյ4QjXjAq $`9sW| e<|?p+Q  )_[9[|F+\wsTJ<7}YeKl))X]BlO79C~TYxh1ᅗvqx}6 :Jj73M+A-n3Joh@%Հ{R|-w|]?رvx\>]ɻϸ͞ 0$CzP?8I~ ( HU =tRAXGD%ERsWkh:PX{{AI0=)ŴMM.#M5u;ru9kcphU'^Py#J6,ɨE`H nXcgCIn%NtXV3V]Z4qvbellp#}0 LNjO騲ekZ*Z$x>Nm頯gEEbΜT2Dw22rɇu W92sr6yC͔ڌFtxd27+@"%4q[ߌYX`񦆣Rc'$Y5(Mb,[Цx"3jGBcp &ᵒh3Ev5#qN,LGFc$ hӭU2{)9uexSah#ӑD#e"~zr-a Dp61aa8#'389 -1!\6*(ZPj`q|4#@5.KӲiA6mgE`F\"43hM POOsSk UD%MG׭{`AŀJ#sFO`f`IHuB?ɕ!aFA~LnZhRygg RӥL0͝G֕ehEEM}$ԣ_18 Pj™A&2$RG oqѩXEE!%2i'V?qhc{TXnɳV2ni̊e$ny mX4J^ja~T@;`"߲`o}d,%v#ә< fc(r \!cOTՍ] dVP@PK;iDv(Tr!{ ŽɔX(-?jBW7m% .Yӥ6Wp G[C}0> "r_mz|[2n7ϓ[hZB~5wFn,OىCݴv8A;hVm;P^6":ځ55aF~4R_z h@xCG#1!wV"6W6(Ftv! cpC飯@+ etAf11]<_I[걷,•Ymol.A=:m s}a!F3zFUp,݁}G@ W.]%Ѩq#32 xAJH"LLՊwyؤpڹND6E"'UVeh'UqbKrzyVICU ӪS311woK#44  vʮVf!#(r :d1&Ů{Їwzq!3<3o69'5=t׉n5O7uN>8j,͝TUMGZ.B_h n^}L vuY{nqg~fK@fڱoQSG!!F}O܅.YT: +gfSwуd2FQ:75kD*jiì-x #s3>k@#EuobH ƏlA1]teK;VcMF3ެڇ#&Rݒz,A1|,OCmq`d"ԴM[Fw#^zzH?w.bnHZ2Q? Sߧ#]Y% ߚճMD:[MG;p+ dV%n+R gf(3PIh;*[AY$;Qh$& ^Ѯ0B#Ub.Sic:GP$f6^ilm08Pʕ\bI~'ǹ1ji:Tӫ.#"tܛU0iC^Gb`i\tytHׁGJS]( H.+Zn5"dC P@nvRNM5zq/(9ATpa\PI7PEbQ$=0M>+IBhНYUlH2%^VF4o>@EB0 0S.DYXց9K0N135Y\jEK %/Bn^6)>En*O"kX SҏvG<%*X/|A ^v<:ܖ+~ H~BγR3)ݣR'%f§$֨bO(b8闼ꋩ6f1o>X!;XE"_bq6Yϙ4i} cy˱ڐ@ %&*R 6Om.\{s7ĞðMލ޳Zp T^cө)wzyZ džq@^nnjx཭ RU99*XԾCm!5y#fchK#_ Ф3PS,Pu9?+|Lo |z+o}E4%VcP06y"U0 Ø|ەcM#c&:4Kz oUV!&) ^3g&g&2: wSINI]H$bFFӯ(\w <.<mmX(c$Ր\r"f?-mNJz 3˹f.:% "yodE1u=Q߽<vAdRDnG% B<6 >\vZlP``ţ8;trwy\@IDAT|`j `y[uQR@QP_> '>,Ʀb'CeL&޻@Dbx2! M}0|by/Pgn W pH^m4ڎO@'Wp3gsvd2HG_恵Cc~ }Q@&[Q`g=GӁI=(]&Kfy;3"N[;k^YUx}~<{XٙH l'tD/4O}}y@[J2xyjv+v, N=^>&Nǽux`9ʩ/HrۿW>62 h@82&f"b :SjQVYVdganC0TN0$3,oΏ"y>HŶF~lj+r'Mbt=IUEvv<+)l`ό>XɀYRPEV0b |?h1]jVcC)> ),115C-6DITRPqeExuf2{X֎dg??Se|Gj@8յAJɉǧ2RBӄڞ2Q^tzKaDDQL l8):m*jN8x_is3֡}| fy%dPm-'٦hKѦ}iDB.[p8~i08U{D+c\u۶W`6:8(L:c]A$grHCx޲/yt/etbe-*4V?~j!]LѤ"لрpBȬo2p'L%V$p9sʮBqq5K3{#f1HjZdJCCj㘝 _/"׫Bk ͭ*uCɬ0 g 9)-^N4ug1Iwr9o%qy^Wyf˺ b"gIuիf{2=ݡx`v#?> atJ+Onǭ?^&-=t3yY}{JdB`Te#(@yCUcscCG]Q@&*y>69[ lXQ Wz LD:#g$J`R ˬzUR$i%U r/ #qdku:pLmabz+!pB^vR 'X=2һWc?MLsdx3V1du2Iqq- ]{) 򽯑! .]2ÈZ^ػXUzG򅡷2bXgdn_5?Vz,]YC:Sw@{2?2aMGyX;lL?c,$3u^LL2|}H')4 Sr&QFNL99bGY􄴡Aӭab&J9$ivB [!DǛO]'Qv= IF+Ih$ AD&3g"; si.qgDJ`8]Ə2q74t׋c Oo,<4[(+k`JmpdtO 1D.z%&N 4pG%cx M\ tR L'r> hz, ӑJ`GI`aM^a^~{^| h@8}0i- /+Iيԩ?x@1e-YE771%p\40…\#m2 ʵrUΔN uhuY扰 PRuz1&({ CTrNDa‚ &c6Si@,c'ӌ"؋Mf)0RG>>X-PS@h E M`?YTL|K2ŋ=i|žҫ0vN/s( D R8/MG9I-𯶶^~վGcs 聋( )NТr"><&]_b?K-L0CpC5!d"C؋Q|R0% RgJ[0' @N f2C; O62ٽ΅ 乧bрp&{_@n 4JCC#V{aMiJ#\OwO++ѱCtt֮YKfeT`ms9"y$}d*-VSL@&! Qm˾0H)Rcr; h5bb" Gvd7n/d,`Po"φv<ؾ}ps,p]Wfh@8 b < ?_<Ջ@^8f?|_%%zC0%}|3[?g Wnmz؇y*J0CB䉊$6A{\ Y9^'S H_RSw$uJBByrw*RNpDMI'a"P"c9/q,R"^iQ`)x-M7NQ;Y 6UO>9|@Ҙ3#4WHΝ{=:FO9~[n!JJα1=܃nM7E衃_'ޕqww;V+,wtrژXʓV vj-Oz;O>uȐ@߿=G?^}+aVOW d3DxA23dR0:' 6{TIuiѲ ii1j>J^NxWCvv[w͟xċ]}T> ο c&M69wՁnF_^:'=^|EU'?%E8wufTknf=Ͱ!%:{4U%%1#%ʛ999Rb}Q@r\ e[;FOb{3ؒl!צN#]WW/{YCox- omϑw#4!Ǻu2 v.<UTʏ#8kPZl%y̌j8b)\Ei36t 1'afY4޶x4;n{rD>xKl fNe E3wuaSB ױ>li[ucCk`P}Rj!M/ד}`i)asS4k6@~+Ro{v~z`JH"ZW|#3~:-5F:K;h<t?;+&[ MM8r.PS@T#&^Awڋ^z)ލZ '[xwJɝ2m߾}`қp xS13I NևQ@L6V $C=9"$T}Maь&\>?݊];([+oOJ1#ݸ^)+G"L6` my~Kbۧ8~ziDZ8gq|~N{/c~5H &xzdfػъxEԓgA{s {N@LD0L(`Ŭ; F P#x_C£mb<+H nﲢ/'LF|:YY{s3:{l0 k4LnG/% <$YA^4뗡!hы`=Dr}R@RYIKR~QM.EC ,u\ Ht1U#md%?s"ػZփP:l dOJ5 b[j'>!tBCT ":Sh84U/ȂĸH5:{I$ ֦ڃ0X;9Gu;03O&p<0z;;20lIݽhcCvιGd"b#t~JB%V/>ciBvn.읝%;sl*-.X=jhpaqy?G <K^/~'1waQFuk9 B=csQ@U%ə "u?O߇oCFz"tlJ$+Tk~|7.c,νnvz4ۈ s ξ~Tu9JYx>nw1?'RcS{!FQO՟>sZWGp_S/x>{D$f܅x럿=*j<p(_(7Zï~p/ xqmŞuwc"-=1%Mz5_m+xqpkDef=6Tx5A>O]X&T1e{.lk!<anA<_Ӂo Uc[?3D{m1}k?_x;qXaGw$|oF_3xC$@jzeC8{v ]4oT*l)dI~׊3q${_~& %8p]FͶW7Rigf̹V?߻SL5a3|aN=8mLėo$MA~paCy}P+P2[oa >r) ß=n"",:a/p`]BFbZeҢ52)` 28 탛FyhQn+"(Wɜ \FƲptG݇7ߐS8>!:!PsH W[K%T@BXZ\J]W&qa H595N/>S; hAXN"ctt(f2sIQqI-[{EOKH0eI1Ob5}L*JT>0Bj@-l nR)WވNJ1EjJ(00G@}4- DeD}l58MZӠx(˦`RK3O'??EL8r 76CP㖄$˻1ȉb~w0SD;H'rr^X9~Q4 n䶚F44#xv,z'9JFЇPZ'{ǽ[F#ڋK`E^|3(Yl12&8w;k *|F2-f<~σH=O>~C\Ww=.g5ʷ:o+ZR_Ś.<֍ݯu~f㶛ߡSAQ!#!(wBIi-pV£HAe{piO,ݚXxhu (od|5޽7[ 8K\u Kj{MZmt6yPVNx߾ɂl؊U0̀~n3Xuօu1x4wȀ+.bK9уh|scd4o=ϼ^^s+HB͞{@$Mv,o|Z8mkrbQF\O#N'9'ރ`Lw2aaսwz-^{vZU8H6gb.<,4fE/PN/ 72+0`f;ܐG msk֧. {0}e|`^6#ʅhw",9Q,޸vl~o?X U]0oلd0wU{hWTbepݹH3<t ۰}X>;U)"LHBn^v_+Tj<|<|9v^ȡ!+cߙMx27+/dLKbypCHpHG&+"0< kNs9nWa~…HZ<}ӟ~#+?@C{5dnJ\ }Vttj^$/mCNL'DE|\,BňH yIxw~ vQ?%d=Ng~Aҋo Kԟ~:ù흗!M֋ ~pz5dFL8^z%TTTko|0[Q0/e;Ķ1w [s#.9o5bB\Q^9XqR:: r6q%҅YzJF.ngsٌt|pּt)>ݾr \7_aEuw,Ҹ=XxWpP} l3*Zɸbf յ6h9.,Bk/Aבb.@|.V _9 >U $jIQfh_`›["ց kDžܕڐap'ncc^{}?\˭oT*2>€II6_f?_Cife 7`&VMncVFcݪN&5SaW$n{$%Krx$] QUxW\+X ͛` D|\zX0￉-v#k7{[X؀T,\T[;JkH_y܉f=ۉwU㬫]ybh%s_lkI<؛KQ=7_XpEoc^31e nXhG}v[Hs.nkX<EuɻxmӻXp!n_  IZ 6n ֎59I#Ո|ߩ0P!F对v;Hx.TzUg^߸nFso@3Wȹ&* m?Z T f-<w=;Mg,?h9)RsM.p)0+\ AYsZvR zm'劀!99i*S>G"?NZs(H %Qp "Vu"J1A Hk/JV^A+Eő\]3Eѹ$V2:9}.Ky/$hrEJZIƯL8[(|Lbe) om큕ASep39o"urt!s?&k/oh?i4I>a%-}W/~1IQBA(nJ}98ߊ4SqrTQsHCdc`qߜ+Al;qu8λ杅ǡ!ӵJ!Pz7+UKt_GXJFo{t_?!=ON*fѧl'+'>p_}ྼѾ|k*28}NZ av:s .Oߤ໓vR$Lb:vpe+ r}4',:(`D"y 'xŴh8^!Å_gK`5b|/'w^32J\"_> cTB1~:m Sgr+2 8W3H͌.Yu l!BB $\i$,.]o,7M{m4hlٖ-[sv.]m͌S[SLy^-cŦ bM*/Noa$źr,(jhaWO.E˦d%gn I{:_BM$kjsTHKb;dO'+ߟfŽQ=쏬]h[SƐ&D$;K2FYhoo})9"۹IDRD2[",4LE%dʽpt5ע׬qrs]&#Cn݊D|_IJ :âGv.$ PE@] l~h٬;#Au*#U˗[7̈́wV)d I:S~:Ym11< -sF6uLYPTTqԩ{^DF+9#!{_97)3!Nmnܘ'ATT CsC؝bP܎C/>dP"mvdя~O~xMx{GB џ  2IjgACO: fL+ͿO_`/M|Lw䒫6uehlV:AFaD2PGE=ݝEP^LLlPg?˴#$e" }vY1Şbp0yo fO}}4܊YD%gn'ٻFF⡷Lg֞t^,cfd/N'ER4?گ|OO{CPXKc|wr«t7mڄ_|?{Nی'޳tI@ nTianϪ jD߀&Z^;bӨ{I匌_z(@-FE4>K؀&Ađ]]63.qlٚuj N tKʗaӲ]@=鬽Js.Th7MiD'" 2) ~m֭y/I.7000411g8Lƙ<<4dV Q.CZ T'w3~w򬌯8$&&]<wa9uW/VYhfii)>?ޗϭ| =P$?w_Ҭ^^tlAt:]%BYOlHry2Uyfj^׺0J,.OSRÂ6.Xs秃#砣V<8(4t^;-ez+ZQpN0(K 6o.p&}Α;sYg1^u޻gw .^l@uu7r.z+/9CpUCjsg&~W*d'\O䝐9.qZn Szlhi|7bQlW\_ VZ1sBndg6K@f.1?{֧ȵ6!WuH['OTx W<Ύ^GGG#55=܌ cRĘ1̌R妬*8Je#jRq#*_R>g7ח ϏHað4إTadM^1IL7 M8Ҧrw;WDV1و9} *K$"#R˺0Iƣc=J|'eMb'^2AǶ⠐哵ʱc92]ISэ2Y{ܹfVzTXa KcOaBP\18"FW]LG[ (FqC hXbKNVl\|:^/UM1g*Iw8L.IEl;;'8˾>O MMkƅ g:ƑAܼ8~*|+q+(t`1܌x"A+7ᛍϭJ` h{:E5iP>55aV+] .fƩpK!r:;ăZpx .)991̈"7)'Oztt *F  K9L U0,NΗOW_t kd6ApfqͰ2 }H7`4I!\N(Aɺ!ё#eN`-$2'g+#"zZ,rql.gR >MQN{g7x߾ T&FbB2^Aܼ)PУ\pBzBBC$+G[QZV(vqغ%a\odʸܹZǍfE.1us\6r8rF-M1:ND7MnrSD @G/bGG+9eE8(9(lؐFN^8NӏtWZMoFHNj{f^S\-Ѷ~/Ù< xG:_ & X P`$ۄ8%!54rݽe8|2cժ$5`#vXĪe  ZEAm<{5޸YKKlGYU֮Ma<[Z?aI?=9C#B˹hMر/ohO-nh c&IB udԫk܀vr_0Pbvs"Gar2)f#?n\1>W&??qO5 p1$c,OiS-~ձ<4sDd6dЁK8<|B+y}HVTe (3MTN0RTgL /VAII1N~WFl#Kr&PDeo@vv49Sʽ\utr%t^YvvSEJLc^;ߝޓ惃CACS=N*t8B2* -N?8nݚĨGt [gp,|Ÿ\o/cѩƮg޲9Wil|O> CKK ~OR$-y.RQ\nYBf~!񣛚z`S.}*=HRn?98( ^CC]p1aŲd.NqmP} EaL[LLFp229?х^t M\@ћe8w>L+Hgs9΍<&-@> Žbzs(\'N'Sq\@ʕH! 78'FAo觭]GQ `~~~ǒi-[ȸ;smo?Th959A\XDdR>.^}P֬IV:Ӊ嚸ڼ9C^Hmj5'* Q!k=86oO9tQmCk&* #:D)qX2y7l΄Q\]O||P7ƍ4iQs@8vF8מip_rS s`qPrF xGn iIJjDh?6Ek<ҙkU^bzѠݢxb8q@GIċeWG?HK>BR'aŲ>"4w'7nFe D<_ 9Z d8 n Ym8?dr=Qm4rQu H%0D޽߻T5ENͽx"5' 6dcoof䩀!\ nGy(W+WyHg6!}.wrS`!SCYRkN2@W%$)ty AjF9g.ۀm={YddYҐzBU2"-\n\r !:H;ĭ98.U4 mع"ף\\cPp3қč,t&Gtj.o| WVv 9ͺ!ukID$7+Q͞z]:M^~sC@IDAT9$p=Ӻ}cI]+RJK%e &8 (,[-ÈĪZZ*-/ё^lTb}Ia09"MRW'| 2-}rC#B .!9KrkB5auDY]#.ҎcM7@+WE:EK@#IwFn[-Ev87Md#Cぜ\v誹\/(9SIhig E{MeOxD ՅbQ EqL 1$d@KNgH9! 5R?2ܥF \&e&bnti#ue޾e!.$}!D nfU>0!Q|X~ 2%>! =\xHH᠅ Mh8r)w$KOYSB^Lt8؜\7#f'4g-ݢ/$._8db? fppC,#ʡkT$%̼tA[d讍3kxzIE3u|ŠpmkM גн>9vd{rzAoLg|^|!q[ 鋌E]l"~\_~-.5Do >XܟQMuvɸZТ5 \hPU@лqvaԕ^D"#M3_ZER6;E rxx?EP!GB?Nurk0tt1EYPTJ@$e. 7WﭸJLM#3ge~ܮ n@Gn4@.#6)]qdjYkB|d0J6xSdilڔܣ:YKXBljqFpP}I usXd:cB/8DͳѸݜ !iLpN|rrA͠HH]NW8V#$ͧ$CuCMNZrG?<O:!D(0* XIfINDq:jzr'n@x-=ܿ$?cШ K CRd6W~rQWDC^:#ҲV";xʹVO"_+hU3̙~'__ "2Po!&&&+)$ӆXI; Ő=]61>_KMO9"2DV_(gn8@6Nc`{k7":qC &f2emlW;t P"zȻ0Z~~0Zl|W3:IDXC:rxL-劄M zcJʁN x4Q W*sYp.ڡ}: 24tz:x#[ zfaX 9v,Byi=.\(MKʔPKZc\(7[Gr3TV1FT_F $kU#gt+F%Cv爩$]+ԑK6AF(/+&wpTtqI5 .s&"BD} v0+C"e3ƱY)zh7[J\AÛ |Kڢy"9)ar5%0N90*^1i%s}ޏNH8팧{? ԉUc+=W% }<])?O)p(@XY@[qaS-}0 l22p5uW["&}=f ,3b,sةdZ A*HnKysI9Dlqq'WC LZ?Q^43>h$Z1Qf*+gN")= UTxK1FbBTNwD., 4## M1TT: nで5h)#`EgxU}-3Ʊ^qV򝎓9K%:B}PQY9%QHnubk`ך׍}M=#(!'zM ;^kiц͈b$VdD;ɧ$.f6;hA7ggrN 炢[d)ద[,^kڿmDZn&l?~ԗ c4]%TBDxD$Tu1,yt4A\ЌtB?Vxxݷ1Nt#by>C4Sz IIj wKK0!u>,8axw_iR~yJ%w]Cִ܋Pbc -;/{<֠}zEŤ.YBЎI3qjR@s1٥V2 5>I[䯮i&(o,&PudY^KX9 iaI7.l~h-fc5D{4Gd[:/<\ n"f"q?믪B5ЈQUm{ʵ[)'̀eK{BZfFq|}}8z:xFEڒ8'N>ƈ%k; '7yɧ|ZT\ڤQ)+,>##`Ow?\Ŏ DxDv"+Q\P¯ܨ`i XLҟ|iuĥ A'eߎ)ט_?{zq\STچD4ha}ǔM`b[M[dB=xy4RZ.ӖEAgҊsNuvS(I;gH|J6ze$`סSN=3\!->eވPk#]hi8{R ع[>A>x\ZY~J,Ee4/$Ҋz4N\˝?WxP6HÆZ)^P@cCBz\!O[}p]=q€I/}OWU6"_M[xw&XBJo` :fׁl>x_{a^ 7PÍFƚ*=7)Ksi "|EA*eD(>PbދK畳ꔔtPClSXF q|^}ځvr}PJ3O ]PAU]uO0)~5"0;&ܴ |#kI hkAY6cGSї)k\qNjAeu9N ">[?4B%:)ͺ_ z¯7WP  ݕ BI=k(ΰR *@Tt iɌ療А0nZD68YeV[+?rﲏ8ZNvv`Eui8ƫف̼e01b~VuHD\t^0CfS焎Q͉Ų-8v BHF`IVp~FqUBD8)J:'tT"**#hmH * ljid,Xod[+":񸦍sC.\&2/ܮfRe{ Gz'q3ζc.DQU2T'_|{18}]Kǫz4$y"&1TJj#}; Z,x?C" aض a iŗpqad𤮤:D~ʨDsB?yJ lCH8_"\uuʝQ m% Luv%**\'\qr>A{Qxzť*pcZDU "%J*ܯŜlD:FЀB|?卓GT҂3Eu"`]_6L 00lz?n3eHQ˝ sPwBee)12჈r5{`4D6y݋`_įbY:arb|3U$@}Gh{{l35 }R"<6 !Q0zJ!h7O΋sE(7GݦP gB-wKĀFauH .Zv $ޡ@ zJ20\t:LcN(o1&鲀DyB"`Yҝrɇ392v̠F"/r*@ދis-s1g~wB82ݔpU[=OB0A"Zࠗ&2U|R+?+U%+ ($K %YTN n@CYՁΎh(d0AۧVnO;܊gˏ;t3˜Z᩻kk t=⍞r(ڥq?u|Xb[}g8,}BRDOW YX'9:ZOohL9aD/7x>af ockB)zaPM;0:@d!(_NqV݌Q-4زs[jY@Auh $۷F m! ;;)bn.0x1fSQ|s+ajRKoh'C/4$hy^z:AӋt4{rxav.2 PN܀fw'&$r#'JԡΚB:UMa>c myw9 cz -B7VlD? e3{c:cmSbK*|lmcBs2P[ E]Vwo {" &r\<_MO`3 e_1▭Kb#ܜG8P |Þ3xmk2? pގB` vĆ6lc'?(DHu /zw/&"d&pk1Vʜ8e5:}| l8~[j p81*XGF{WkBT!F{+JkZjA}V<^=qd7mHlu:.:a7vDsW:+pY!jEao[LԌ,9u‫#Q '\е0deFobɆыst;Η:ybz-C11ڏ\;b#[#䢱Is]? oct?"~\X ߔnn=~KNZ H4q h H^GQ\рe1)gK|C&*ѿ|F-CA<,[T؇v~#sy?`0oeUh$4A\pjdELoSSeBʍEh-;ruHNMA+6ntq4_G RK#^!-~tcG0 ȟ9Nh~i3942)B,k/WBVъe9h,İV$Ұi':\ D\Z*=v_kt7y/)1Ez`|p] p- )C9ԃ x!>) f{+mXubawf \o&0{ظz!,>Z[;.<hǨq9O3R6<>w="Œ^HA1◮AOc%{4'Òq1 949ɭ8Nt'%Q  Ãh,GkDpCbFRhh:1AlF|mȠLoAb24527"%oYEcc$-GqP>RbR&m Fh{c/>%kd{Ϛ_ХࡹqrӌZCh@MBQ{i C'Z;!M#Шk\#:p9r ;7[Y3n0GDCgv =(+Cn^Ro-x0&hqPrθtIdEXxZ.Fː 9< q%! 16fbBpi?Lzt̹OpStx-N`}"6uĨ6` ި-D!>3oA[ΊXM~'Z2{B(wn \S=C7fj=ow:<(|52uťj;Ky\f,ña@GtE]@o32? \.FH.ō,^>&$F&ۋw*.Q{?jN0RWZQ mxػk aǵXzz*.,B#$Jö.]@#A *KX,rp?)GʕkLA`d<,MF58ՀU+]121\ (z=C勨)DՅS8*cXՐ8|Xa@Tz&$C(U4|`{o .GY#2M5\B\̥^);e uCF|?w|].Cg?Șm(?_^8<qיvbԀv;bm?3=K,NekM-<M]7Pg=ȥI47kBGBѻzzi);=Ppsvwv(I`lVFzv5:QOmH'E*?:\FH02|6E?Ȋ`2޾A_7K+A}0@a59&&86h6 +v:^ɶm7VqzN:z `,]v7r=`Ҩgk GSsҁq+#xWqPƃ@ @"jd$q3)qg@B;a e#Sa<&zʆh0(A{vu(a(iiqkRLӰL0^r)I"P¥amk!i 7i/z MT|pd>Li 4M:ƞ[ڠG\^,kq }%|{IhqV^:IiQS!\=W nDSm=_V`iBaE$E"ʚ!nx CS"<Ǒ,z~fCsWmjPgwmZ0_ }%>Fb.;0lBÖ> ÏޝyR"q[ gHk"7Ă8m>fL`ONjD/CE#+4V>zSFwmteW(e3]_g3:wKG}+rbWM @OBsMwJ 7Y\sG`(-zmZiMs$@ĉ̔` ga#bh=Ly1 UGaokDuU@P2F Yi:{+ qB[s=|<1AW2 1SIn:Ý5-荊Y ZJYq">ͨI{ȡMd44mdP 3E ! E̷Ԫ $gC:Xi5&gЫ YoDAQVSzu= pdݜ= 1$Iԣ*CtJ65z6>Vr(Zqx &Zcͣ46@W0O aƌH$h*S*rkDwOiH5f0[6#mBO y{"$q51ԉn3B,gBn`ZE+BKiutOևȁth:WGਥŵC.#)Yt.z`0D3"SA?fѨ7D{GyH.02Vt|r+#En@xo.}R_~,HCcETRCJI|O#uz$P?:}OM1b=)A jX:E,Bqr(fQP$K)gx P]#`&njq;Q|^#b>]1?g_m(- ߕ"$PÓ|S=)ތ\LsYs 48c,S wq*,h)F2 -OkIdI}D2}L^Sp;lW@zF8uPb{@-;}/"t.:"A୓&; vo(A@bxtNM5 %*WzӒG];rwi2}Ws:7\y3L!Ԟ N6+QKeA%_3'w}=s9>_w)~ ڲ9kS>I08mu t@dm~?H+I'rnמ<`yhtK] kJ-rT#8Wz&.1X>ϕ~aP~ꃠ0 L.8H;SsqQ@T ~C5P{$i풹NwN7 sڹtS`Z 5ILZµԊ 7~@G p;/ł:/G-^.%7FyFVe6[WeT-yն Ss@C}ʹoݕ􇨥ҡV2 ɋ;eʻ!s^C]<g^X}Ztp>0{C:桮o/-yL-Gy}d66Eu^OZ8NS8W/AJVws 7P@-GAC )JP(m8v9/wؽm^8wN`";P\܄yK ;QH{c#*Y6I:_xė=/l]w 8Fήv :\' ~ۨ8ghB+DE-rJyjּxtVè( DEI2dc5$k%o< 9Eg҃;ү1u=MP/32WmtT F,_f]sjn@PGn[U/q.Gꈏ7#hbCԋT dPQK;CmFqaʫU 3q.q.G˄HX#t|5œ5됾,OqH59nCDޜÞZZkrp.{f}=*p^^d]7?dH QeP~xg4i殮ڧ8VF1}^f>CQt4+uj 9g\g\j]*w1I4X\f ߸䵙dH(x0LF-rr8o\_-mIo"p~g3-wa"Di3C6i~b~ vPٔ0B wL1WTRd1pPf37m} mKd+.DFPDE عԛjt(˒KiM9e}$#l}h̪(!lYD\\ꅫU^Z7DACә^.$>0ARnaf2W1:2=rt`Fgs]w#) }g/>}'~:9h)2mxX9:n@8GhO3չWġhb$< %7:_nY45f MՕԉ@DT$||}wccIrO@ZN`B:-${PbCDL M*`܆  1ᳶfԵ!8,f(3weC(a_"$;fjǔjջ?&3zƝM7j[o ;7iiKEwmWeEgyAm߰jH,psySTϨ$l0{M.gA?wܦ&g/?8}#&\ 򟾄?Jx`hb̆s8FN.~:>P}7_5o~-}'g/{_y?ć0";!9 |ow/x:Fw5/8֒dy3 '?Ͻڎt +v7տӗ1L&5{_x7_3;w_wp_6Ox~]쪌fG_G>uã,MM_8QZ8NH;^{8U禼OuvKFbx'yʜ5NwۏϾ,JwrSn(iNn )p) 6&,߁{׎<~o?6ny1^6ڍˡu!)B]-Kĥ&[N`~F1a #)g,;/_7[_EߐlZ#a+ QX0y}mvpAPV\ID8AgdC%Gln3AQD=J F`媍H_Fvi+EiiLƌ aqEYY)!/ 6myqah*Fs5РKu='yVQ'ч.N9Y {ocH \2BN59%-Y]8]wtGvkAFs$:f؏g~,R8cn "zK uw =d$EfB||"4ݸpA1;P vwtQhIo#8$A|C>xh*n/Ca|n7##11؉~#g>~H$%~߽Kd 5*W$wgOKc("cY/:mF@;@=&~E),Aʣj+ G[zq9 f'6O`7J/FLpȊҷӟh u>bGKk5DZ.W߰PgЯU_"sC7cTO^Cy:k DCa%o瓋܆]/5}=aËo#0p#FŭDiTb|,_ @$.(j%cRToۆ#y:B.ZT'J2 “=G&|c:Cg#EsP5ͨfOMhhl™#ѭ5cG#,vݧqT1<*& Kx ~d)cHڜ  Ka6 ?~ct#E_nɋF7p񍩻G pq(:2>bRV5"g0kÇ0}@ E`x%q3c7Q+ J|~\ds%Qؿ{/[b¹GaNÊZTLK)TT^>=`(#8_։t3!#M9mp+3Ov$9u%OH&18/}M ,( KfrVc]k>~HW}klG ;- {`ϻ#_Pǐ;炑Fy&?xE >ʄv Pg4̚0$#1rȡg)=9Q(.xmIr$Arȹtu\lVT~Pv2:g}/m|$ x]97zэUrJvd>QN,O6ĘCvWӂ'Ek[ N3A.w$C֘+b7kF(ϱ:Z}Ȓ{}i 'ѶJKzll7[|sERLj?m-I^i;)4>;,[@CP}v1gdrp'T<"ڙK,]j`\W3̨e]nK8Ȇ˟ ,, .,]z ˦ݲ-U$dˊlK4}̛n7w=W!'-ErR%Cl z̳[vs}>=:s/U!x]>sP|u^zuu|++snHl~롛'6TSԛSV|lr|KwX_/b*La2Ѥ,_:FʒE]O: hkwgiqx= ~zW,y˥0kvo\#zs O=% 5.w% Rk 񪈯+/G֎Ȼ&W&#My8(CMԩd*L/]C/~(/_w\VaPQB>"ƃƴz(=ؽ76v?O=,-|v1U0XtqoGy\+$H]>?=n$TNֿ|۝<8U Mp iBF- ]G '߹i8|x+6o/|P4cD$Ht#;+Q) &%6t;eGDn,k_P,)UY ,wHh+)'_|OY^Y >tz|>[~"ЏS_o_y%,ω@ěi˾:_ſ}S'?z85,-ku/d5Xgbix ֽ$˯I,h,3[wEH Uf.y[3o.a*ÐFrd;>$\/9,96?2lXS|#xGmoC ~%(6qϧoorhj}'Cmo']\[_3G|8ylvͥYQ\=Ţ2-B1(b/7!P?ZY7Yur*ɔujIgH/܅=X,.W, +pz. e95.$ fG%fHel|J\&7hɚmژ֮kܤ, COw~%d'[JX[?.Ro=|d xw<,N|wgvoBo+nxwr' Ǖ7M^jq?s%L6.'ooszg#Tb.Inb3\a I`,Cj09B׉56j'uA3p'+%0'`W<ہ2K"m ބ.rS%\FpYpm_D&_:4GʰH`M,rYPND^ Lsq5IX;4IIgtbw@b&WWAg/ЬeJj6_' 듐rp=aEbG [w_^}Q&ga i'Z{_2r˱ra6#95 :Ja!?ֻU, Cذs_ j<9Q%H@ M(DMLPyCݬ-C;g#ĻwYi-֖o##r+7)(/T{/+AȻrp8$^| LPwEK2tÞUp#N-GEا2RP4K "k J*jqMܬMRLFVHX9_2dzBokN4wY4R? id0YP`5erPI7pVm/5ܙqeufaU)ce2;-pЅN}nQbW8z*eQM_ +֨kڥT3"&qb^ygIo܄PSR)R\T̛K}Β6<;1 &,Y% )[{b\QytHXox+Gc:I9_%kiC|V`zq5D+ rޚ5߈r3TMɔs^n0Fc 4Xpr~ *G6ў'1EJjdUWl*T֩ʍXa86 Ӊ,^'CjxMNc1oUV_lNeT~CB{zш<$zl/(O1 Tr5f]0eځ6{M $%Fuێ8u99؞Jꜗ?)R^c&?vU>#/UIc}~)/oU<,zuK7rtut:7&Kp,')PC(-/S(*nQ^#)\{'˅"*1p+!A#C&04,!$B9vJ;L:aH|M{M|mD:7jY"Úp\Ns}H.EƒefK$w[U194 O$Fxxp8~˄}v 5YJ/JFy˜U{;3uhiLW͉| dy5t_ u9R>%%&OfDc6w1 9[<XnmNO=~YOĆ ]D ƣLЉY3 {P//+FE cr/z#ynyy򚶆h! ow-d|k h:3y몼ūYk835W啐SA 9ߜPi&Ⓚf'=]O<8>r-(,.GRR6<>[R+tuI<O9l2hwr5u:2myT Xa?{ 6m0Pn;ҀDrŕ̐xKi @UQ-2$N]ַ;{މ"LdXDQA^H,CފChB)2~Rl0_1b0 J{=p\d[r3"a[]ȕ'd.-j2K] e7nuUUx!Է\&)&S.l2OC*D~IV59/~s؀$k0IţwbyA+$5Ir$a g;G9#yy( *K-pK \X"a dKGL]\b}@A7v67dؐHSq(0Q{^\\^:gv9d|O]}-Hh$8cLjdkVmNxQ[ v2\))AA*1}}>n2+JE&yml5}*oz_8at*AR.*^W1_$W mDA8{m1bcfjFo•UE9hkj eѺ83)X>O^"y]yOh缦噗Tq?8~6͝"|h}U|ߞD]bD.^/|bd'VpC}2u"|cW"1 S6wpo $:@m|?<Mn\*ɝ-+UHmA9zw(H;w6bAM86Ksb 0-2)IuF.4⡮7EvQ䡽ZJJ/J[K(sc|%TY=$ {ZlD%+l@6xC8OW>z to!EAqkY( X+#%bTnO̫ėzx~=^mmص+W}& V}Wtc_>޹-Gc+e۱]ƕb<TUȨNzdI>uߍL:]iuJsT Y ]hcuQX.Jf;.-ҌY׶ ;LaqDee4+7T_vy( c4Ɓi hOquE1Rt<֛xh7WUN8/~@E*ucbM޵ UV TߓNOYKDNf:2-h/:>ͰB:|RK+p˪"<:՟\7`abvkЏ%Ɗu< k D|7㭏-?~4 Α9a,nAnn{)5n_ScM$@E@,wzV :<{+h8OXޓwSbqرz "jIdpE0$uϿz-߇g6' i:܈]nuڐfA~.·M𭚻HBk禚N)tۋ>7B\bwh_zD81 5.e %-AE|z}ȫ\R˸ӂ?WlBuJ&Q)N+:zvǯ^ǯZ,?Ǖ"S. R$DRꘞ:DA8;M' qF^z.KSw^نxG~ .!Z%dkr!NؐH d(y92X#8 w<\^Ս[֬u2&)#qUA [@'vw@Ga+^ۊZxjLu\9N#IHTbEㅆ3`sv5+ t#aµE(彴>D(>9qO:=n3R32Qi GݘD8-x$#vg,b-.BCƢ55ܭWYG<4j~=z?  ( ^K{<†EX/A ED!>!] ,z2۰C]\- .[ ʶ' |xZxE`f{SbqM& V_%H3{ Ouq ~F$(㾋@ Y% 4Tg:+j yE?i!ЉgrL/%e\^v:3 7eF ˡ K$@o'@Av&B$@K\lt곕:jH\F>3 Fg q4Ѥ݇ȟι1K)*"(­$@$@$DSk7~\1$K S0@OU!RAby0)v%$@$@$d,!>r,uXD=vEDvݸ鿠6 ĥp:&1%ZRat$!(7$@$00'яӒ٤ 6*hr>9Ak%8DosX @A : ^O.1i5zg)X-}fv =R7@dPF3K!Ebxzl9vՅ0^@_eÞh|Elkˈuayqix6  (nF$07 p PVz. u?1݊yԹ)lVX ~t,n؈j2 I"j=~~K*aKAmq*T|KL0{u'^|!lmÒ;#\?%NDU1PB-E3  8yZmK٩bXI? |(o)K2PY1peT%ؓE4^\th`L;N6WL& Y'@A8] D#^WfN?D\MxǦu$bh2;+Cb 2YHck} |D,̆MBTAJEro mJ dWG3,& cU#Ia`P`ڎ+t7ޒ U"#q}.f\T=tEVn.,Nn:t!s> ЩffLqG0 P}/Z"s0^3zN,2$w-۰3jѲcy063](_yw -x%̖ꚫ w@}=7!A== v52\''s \i޽%ޝxw+VW҃%˱,ߊ&7gF{lJiSmP̔g9KksDp:c;l*kw 7]'l忍ېp\&B{ʉievrQXxSʠ\<Ѐm'Y'~vʞ:,5k f",tPZfk 0 p睗`ヨc$w(QujM4OSəX_[]]7RrpѪHmPa=8,s rbGNN2)#//Z$BF z:c+ZUi TÁA3ⲁa惻QŠ,]!Ez2ғvaoVd8p b^m)LQN>հJε|Q22̡㉞]APoEBXua\u'satHOOg?wd3 $(׿0O zā92  h12Lς:q1a5p冻Cb!|֯C/Vd[! :0Aov(y}AłQVV;30b'|P/1Y %_y!^K $- si fi$qRE)K|Ux)x1 "3he X#@Ak= l-,f#f$@E@ rt}MOpI$0#(g+3%G!SskވΊv*C2R$@A87&002HS~$X$r:TpщRhMnM=8؁%yZM֋ 9l0 dXF%3ͲuF8 VAlV"KMWIUtl@f]'=?\ bǯ^: o,o?؎"G2:WaH B= rԸp;dP_Ģ&B/,b.JWghY{R2*sj"b! UYCF2'U'Uj3Z()،PXOj9.19r5 CyHbaw!@$M N !at% ;V8Yb΃>&ح&'$6:^Av&&yAxKLho,fFb}n:$Z0ɾ f5t aaP'7׌ `Z$+aAX:zϬldeg!ӊM3.HbaK$ x?xH蓹%bY[|ul›6Abܰ жX6_/h+W0+"4cWS,AliYxu6Ђ< .$dck `P>NtcHL2 2lA59 6u,]Bw]X@ |cIxe.Op 4$$X>**w2D{[^݊WëHMFw[. !,s0_?U7аYDXi I"J0Y @|PI:ص[L0wo5=z{q 8k/\7GR%֠L٬#1рv<[\).4tlĎ,!=6Yyؾ8~?q{*q%H2:<"BNLؖaơn2bY[/*21=7:K(Ro$@3L@ěņҼl%q8zf!FG  "Y=[BϘloUHGX톒L?Xt).Bgx 09NzSٿ+M@j~<_qCc;&Tf S+1;XZ"gAҜuZ,vNVʱE%wuvN..Y*9C& X&@A˽Ǻ D0܅>R3*ݐ@˝&DYiJèߵ{|,2e"ՇLLLXI))(Q1'ˆhiHWU҄pC[4Agy%ml%2Ґ&sq#<(}DBj|ZN6SבrL$S(cXY &GtEH#+^"&\I2abd/?Y&7AO6u,meh d={#'+DLG,NxE$}(OX# X% +SS,,T}#6ItYVa.JeQ{R$g;:>ЌڟkA$p(/!3 N~xktDv4mةNo=J+ڋ37SFs} 6nZ#|]>aVa,zJEX( 2 YO$p@_O> ZٿDr{Q5E9ۡO q|5q/{Dl=Z"f8V]2(x?rϸ#@Aw]@e:]xT+deb00Ba4A8ax8t? YfGS,'QM󢷡Ygsv5aeU(*JIo2\%@A8W{&($QB*(v:.(Q˅_ݻq-}eīVw,^\DqxnƺW{@ eH  (e]5veʞtŸFbͮr d^áYcIu?+Yh .aH5K7"LLKeCi&ggc4dv$0m( %3"[/na$@qErq՝l L   +q՝l \\?MJscXo "e-0ta[Ò@gR^^Q x]`Zb62PsLa, \ ǣIH/lE]YXr{&RX;Ձmm=HOINJtεԞT 7;^=vz|~y%S3I$0+8d<+Y( !}xaKxgDlIYǎ43fY3J~1dp3+w$@@Y"I0V֔a뀬2^ w UpPD `bUq)p*+q_v)ܞvୃjȹ*7m%Cp5[J$0kt0+:I,vt0YpC/]~ADdiD<[ 1Co4i6A/l GS%iCrlH>uU9̰aP`c/C& Gp9[L$0 Ժ^m؋ Q]̯BݶA'V4[(=INUbf<]]xX%[./hGKETxy ݽ. k0?AuE !%,H` Q0HfZ&-d"1؆A?֤P^lu N,z2DL6W"<\3{Cz`,Fk" Qr|yłX?77,'jk+KK*/][xGu9]5B9R֘H` PN#LfE$@#!P0(s-t$6ñchj~j6$AԷw7\GI,\W)W5(nf hg6`]|=I`n V D ׍t IHX|"| a3cIQP-9 ^k؊'!`+ʌ>v!i4`P˱i'0$ģQ>?H 6 Pf$@1E@V'ᄐ}Vttaϫ{ZʶdX\?Dӕym޶=#nGMQ%nK@-&a<)'<т HH `w~:.J񄟬l"TcLeeI.$@$p>2YaP˴ #\RȒ1}n+>wiKHVd"iD@f ,(,ŁP$)#) m0 Slƅx2CjK$C=6F+l)+t&%joF$@M0#vK)z8hf~N9g*uݥ4 IDATnYz3(=E~Xl$`q]M? &W m^?2| PkNg}'_@!_q)Pb$>C0eEb0j|uQD(4HE&(r$ez2e5Ȫ]{`2T`%W!5'9NJn`WZG-EUJAchOѳjzD@e(,~ &@t@Gw-'^;XsA Dщy21 ߅dECiCqYs# (­$@$:?cea#!f5 Y(FTEso!RT^>ٚӯ"siF@5HJpx Lq0&:P"lFd>ϫǑW/ZodɺmhQRZ8]f>$@qD0:M!AٌLXLg_w!+~'5X3ZN օy:d).vʗ$0ԷZDA˽7MuWرv)WfC$tz«GwK#BAYXN$Մn><# WHF@'뉷jKKmXNA 4u4 ֈ?</n9*6H`ә5 D2=jBmLZ~z& P.— {9Nc!$0 :80AOFn@(cצzY堨ԧwo1zOfC3C@'!_BbFww7(O8HE 6:3\IիWl6_XVtN m%,HHHX   EE @4 ^`HHHH` P"|M$@$@$@@0zu    Y$@A8Y4 D hցHHHf,g$@$@$@$ ?\dIENDB`neutron-8.4.0/doc/source/devref/images/under-the-hood-scenario-1-ovs-compute.png0000664000567000056710000061276713044372736030745 0ustar jenkinsjenkins00000000000000PNG  IHDRHB pHYs  iTXtXML:com.adobe.xmp 5 2 1 m @IDATx]`=y { X"XAł]A`45{ B B ?w7jH̹wgܹsBcJ B@!P( G#B@!P( "J B@!P(9}DP( B@!HB@!P( GEr9) B@!P(t@!P( BC@GNA B@!P(*P( B@!xzZP( |ѣG3gg"VTUK!!`j3r?B@!P<&6Xt1Q\! Va`(TP5ފP( @jj* D x晗1p`BR34$&&`iYf!TWW( B@!h"H)W*B@!P k2m2XSLvZ2=(8J]Y!P(?$r66մ Ff`16! BpYF#q\M  8~4ʞ_o$9G#&1 s7n/~xַtXScbEry+J9`l:HU B@!x B:vmX<}K&xiPZ1)Z\-ӱthԨ"z z76MkaF>Xpٱnf@nO!4.XX읊a͡s[4;0!Hg$_/^ż dL֬Dž{1q;xVܥ!| l0YKv".FR1ʑo5YG!P(! 'C(X<U!txy`s<7j3bf!>b9BGE2/20ah4?Nu* $ZȣL)_=ِ[eczyI^%B~沲[%iF;l,8aCqiOfa\Xznf^"~Çluaݡ`X=m̎׌|6'\tdhY9#xtt|2 ި^&/Cpb,z3˔:tk*en9S2H13$eSf~>g\?UPI!P( {ᝏC1PNh⇿_ls=лىek!": GQR hto8v>]qk6F jDڥi\?8~ E 'AJ# $):s+׮GsEJ]HEf̀+ǰf~  =nNU㚈 ށ]Q [v@ͦO^u/߾A)M:Y8.< ,+(P dwb?q_]ta{HX,& HUW"-3f l^X"2H;L&/d-"xXǡu cM0l!`CӺ7J;׬OhֲJvQ?!ѧ;o|#[T%!b1'1btY,4IK Xq$YDEh6;{]Cغ~SX-j;ay8C}J@VMNm±(a;v"& Cu`K\(jDP.'аE3\<WuB6o ȓXn B_sAT ju"mGcP( ?999isiWjŋwiz"@}Emmȗ5eͧlMm:vMնO.GdG Z!ӵ+1v),BUm̕ڎñG=4;+hek7cy]@c,sYh)MOFߡP F1 x]i8iX/\Iѣc&'@0R_KBP i+WP,Z.\iK`N`r9ж}[X&Ѳ dzZbNm×Dh:L`/R-P`fqan Wa%锄N R~ԯVLfy+0*4- ?U@]ٶns?noF#wotxz4fMGeH,c;k@qiNԍ&GU+ B Xvְ#7M)}EVN4 ƵpUа: D2Wd6X5+6oJwcI&}:R,ˠFHBгa,HC<y[Х'ۗ'Lu%E>u5v{l /P m)|' ;Xƭ ֯HDeNj=[`8z! ibʒChޯJa[2T*B7Nn% $ :_ GzNhP ڷ|}4X[ïUcՈ'f`Zu3? o&m:Fi۽2ւB'[$3v.x/.3$[t#EЦek8ֻx ; _܄ 箲=Tdm:JVG z3nq AْƠ>ԟ WNc5Hy:rZK_H'_YB!P(E@ p12%ۀF2Yd`inD-iȠӣXAlocI',6+4kL5Һ ; KαDcNJoTYX[NRcO'waA|8e BHIMK-Y7IȯIV?1 vpeHՊN ֖y=p>6i'CF1ɍz EE07Э4 :HI>v:OïiDFHCաY Y  c0N3{V o\f-EVQopytPN"Դ4ؑ&Sδ;C\,'rq V`+ϓcK,mc˅r-ZsTeGd T4_K lxlFf]@?_25KhA'Vܺ"/ɺSn+LR$C T( #aMkcIl j 4V=珉xy; . \ ƒti٦ :6޵r&Y }u%WS%6f!@,ZY' JzaWoΌjO!#1xMB<]Ct|- O~V9l2p~I@LR*ofNt#%5)?OHFhZn^JtHs.}{`ߒ9›Lxq=D` oIi lzî8u-fA;.߉7jq=e6<-^z!+~:ų3&(I!;K'It\ΐb$m*"%&oMC SS}0i_a^f 9HA$'&EpkA:^#9 v.g,ZB@!P(}vŴq3qL8\շahxyسeVl y^ƺ]NOfAf̛IM$W'AY ׅtN L\pۢI[حudCzR2N6}ќ[ BM?}`q\M( l O!j&^jYZJR20II|+PAD_qq^\I4Sqx*ĹCxڷ? vѸ#T.#-9 [SAr8;7`K'IbfOB~$Pʺ% N5[ѭ$~kN){B̷hLE˾mYb\pv{JٷayCi˭{,F#oQ:KzH&.)m77[af䗦4 VXlY8U^e]_231+?IguB@!P<Yy`)$b*>Q&e>l gQ+"b֬-|XJB^[ >tkCvVH}.V( ZBÔY˔CINOKa~&RG(: GY\AX /{Rֳ){TNB@!P ''{/n@S>TI,w*Ϗ@~k~m r̤ҙW¡yK /@) 0_ 0"]Nٸ0`ya@yc!DP1=p[n惚EQq:b@" %q4{G!bX.{je`(S¸.gyt`Ef:hXPnUeFsp]--,o?q=1V>aOU2Wye8rfnA^0,~ھߨ[-u"!E1QҞC[&n*.")wGNru0qjI{&yyr^3+ZnXb衄\deYp㔉\izea7ie!]N&-e=ʺqq^$\T됇 .ltZn3HVeȫ!O{c?^1^'ฟ3M$)'f8Sc<;oIeX\&p[y$8MI{7;2LٓtC)5ISUvCR} ܂O?3V!#2%n77*z|~K<(ӓgxN-Xز Wb }OA{4G%B :~0c㬒BB້@׮@' a|9QYxqk7 hq\BE!i׾̼,{$䳱~Ou¼Ͽy{5 x8HAy2]y?mjau,[/#/RǟgޯIɟ3$ǿ1~̼_F$?->EAyپyK2+K^8 @u;}x?8&G=II$ ?(㓯A=+rݷ߸^3p~KNqq/){ZkĨ̺ 2ճqYy~oz $̬C}ŋ5taۤ}[VI17-u൷v(8yqLڱhqL۹p1޷;e!y8&hY[GIBk&qn7Dž@qEQ]{32lW~YϝgX`+X2,tB\Wo2~ 8H,b[ݰ\/( ױamڭ]D`Yri7#əX\`IEA X,IȨX%,O2.I$X !$jdXrr%dSrXTbYsr:h!ЭҊEY,y"٩qhac;K\Xw'@a-]Mpkb- e[H%ŶyZ/˶BÜ#9 !{׾F,t͆/иQ7~=dܣU#Yn K;=@e!mAȲ/_Ϻ7gM:$FnGjc?}!+Y!R7 B2yhض[X_T28b^!xR m5֩ъn|}T':$V_x%_ĶHJ![+ԭ֚:$I~ף~_o wu:43,kߌˏ8Ӑ"'ܪߋH9iN:vXw7(S炛$ѭ5ZV ]J kC|;67d*-e2fX+ }qZR5bQ9~s*VMw _~9k1.P ȑC6EŪUPffG9T$7g\G.ʋ!ljB@!Oذa^|E:*UjT2-reB!B@\v A h5,v OZ~tR$`S= 1. ~[P<??6mڄKI2樺?Yy5Bx45qwwxK/D_4'^TR<rj՘}0,?Ȍ>H4UY(={ֈ%Nm2%˕`lXQnSy<玟C8}žz艣-KPM^2^ƍЧO*UꞚ2h%&O"eف򞀼ϋD"*ݻwF wK S$>Š.W<8J6 rU*ޒJbO%I- Eށz| -s\Μj]#@W=3n66dlA'tN+~|+!EKkͺ~|T=/=ئZb׿cK\xp$)|iMJJ¢Euq8Jw@cLjL8:uWWz gK cG8s*'?Rg#--`%q'(hH#xuPƸZt*)&VlE2?C3S$.M54AYtSt.o9q q5^g-lv:A$ޙ!a =\ItDͳS }MYڂg=}$vs=h-{SSNK4:yd 0{ָ̮?$4^m{Cao*ԉߵּ:WnDTJT ]OyGc?BC~ʒ{YPc`_p\QUԬSMsgI2|l_w,quqC@ʨݴ, >mY>wqV=aع4ҬQ{o@ :x/-ߏ8yzFFiIlỶagm~f`ѧmq$*K.ޟPy \Y_~}VUa!K`iYb>kj]!@ˈc0~_Vٟx~pSX,i5=a.w=QZ[/'krvW|/E@)_cdzZOW$R1=Jq+Ū 6&ڥ+3v-u+ 3AG1oл[\?wqLDQbs]FJl Ӕ񃕏*CTt٘I$rCG·=g@zP=ViL#t18/Zn/Bgk9ㇲjBZ$U{CZ$~}L{' @Z^i䀃KTFO xXvS |9c> g)ߦT O#; .,_;$BVx~1N+;NeC%uTJnh vޚQ7tn^n[ .>ekpJ8sH 2|cQCQLV(R,BZr*Ҕf{}=55{OjW9=񌤫X{*/z6>@xY__3R3oU_qGJ`kgeۆ54u~9&5a94;~ {FsO7{.⟹P@rR*c 5~OGrJhh 8s.ҟ˞@#/ bS\9WOE|L:ʶW qL8`DzUa^^k=w7vArB]c_4iu4%%&֚/Q_qi"K ' jMUHCKCPSNbI+DI^y"D#휝55|Xh춴QZW0yz:uW蓊8ӧ"K60B@IqѸ 6,eg)T⁳+*fzlPp*ww[D?{`Ka5t{:W @yTd?o,-qU'%ک.aᇿ`k/TOh Vcۖ3(W""Zˊ1_/rJqi9\|Qy5{WD@@lt,ST撌׽ؿ}abyܿK-kURp~@Ú`tE!N,k?&m,- '#M  GLdm1d[]ޝڕA=ՐXtXX3<3`BHH +. -ΔEc'7­FU3-DUz|> ;fLüǡzՠ(?'oA(ts&F^8z/к 98~1 ѩ6xꇑhۤu)]”8\e"%(B2৕ߢEm's=| }ł?RK ݓi)H6zJͨۓ$O㳑еX)uIW#* 5zBMd\Feؤ2b+芴pcXű-h6|K4s_ɞ55Gw* RРQI\6 c.Ebbn>Z1GKC19=-qQLc.\$VvLu#0y#+ ~?yq Yp8=Eol>p݄]qڱN#7 zlּ @~pv+`M1 uq7AHޯrb,.awWH9 Hq:4e3;,>x=K+tgZ]PO4h $مgeͮ% CM^BUHfoX P5oO_\V~?xcPEĻd(h??U[Lhף,zJ_GQU2zܔ8Ilpb.K*eY$}/\gRp`ifa lxJn&}Ap=7G_wC@ UEVca uH}rsqs G;o7pӹ@isaGް Ckr0W7K|kX)"¾0oLFd>7?^ ~A談r;×>ŊOa7W,Ǩp ,QW\iݠ}vmۈYo=ͺaވ{Jqw`s}ʜN ۋôUYQ;|ƫ"/LkFQ.N@l%\"k8{b钐8$63TzUT䉽.ؑ:BKߖ!MpeukUڣOOE|@)6c;Q8[ O+mR}%FT.$Ԣ@2Vbݶ0.t`ߗM3#< 2mzʸ"Z46ރժcZ|bD)?*ȣyo_hKYע(_P׽$q;+q\Kc^8]?:< tiq¨ӣ⎮ܙ8 tq4"W瑛]Ĥ&vg?㗦#*Öz.W%H0yۋ3=,L@龜?\*% R\ݏ?CO"6@LŶxrP#T+YgR#4- {CLR uF=kLK>Uӷ d*5㳼%cBBˆrAD3%#}Y,&nDX"8b^T|gƉ3ѨPڍڗ gt}o<5^o;1]E4S}3⪐wgp9dm>S]Cŷ׆e=j{0mMlEB”+ض*bɔ5ps·lV Lع9 N|&} }foWMT8Şo/E}ѾA/޸sXD&Šitك߾݅_CbG~bO6\k=ZScdX{҃d,֌A߮|i12uGHp>} CZ‹jL$ ~4  ^2Nu v!)qWpxcZCxsuN>4[-*;w#B5uYç|K*)V3'B¥ Rq1vk u }S~kPMWip,RP>sCI0ھlod0%J׺w/L60ӿ+tvFZߜC}{k/^_<Դ4F[(~ߌ5fT-ykIJ؊5'EMRL|'L~[/&gng+są,E/5[W{Ox ɵr}WI! ЁT,\ R_f邎5+rր/4*-~p3bv(Z#iC#!>G%sڳJ\K ;j]i? l-4𤚁< )#oޒU:7 H0ž%k GޙWבS]'ƾS;1mr= Kif#D6q2;5wD%h}f@g_)-G~WBd-:Q?#_=*+r#l 7^eHرMaCbyrP\jUl%o&7eɺyJuCvBBƨ^B'aWtZ,fD܆]?FW|{.Ӭ mئ<ǁ N7.C}xTnoH}ɒ!CCYQ$a|d}t Ҹ#[BlaAvoYOEӤ=-?nhz2 \mѢcV*}'q@IDAT١ ]z6>v3A%엙d.#>YR3ճϢfͫQF?)*GˁfRdYbqu-Q/5K1؇5UNpyIx t=ܸ*ᖾ#qw[OYS{J 3:{=*7툄ToԪa7U>rK&[kDq\މJG8S>Nݳ#B DX]h/Ţńq[ (_ZjVb̤DlHj'ʏ!8rs>!h[9l=q)$>]yo[hu0{6+C} ?ضIkMm9$pӆ$\r!C).DX$p9j*c"\gZ +2EPjip ;'*%]yڐmҵrFlI%k~Y.ÛK^"C}^Cy\>wףb{: Vў2t' ;_vˁ%t ~Tҭ$0{I'KT%l,$nʩ,̑VDbl"z,?ӗ2z7;CX"+'W'F0qC?T %aa5ˎ|wK=4!%}sKg(T&ɔ?ie [;8p{!7x3_Œ\m)O䙓{[۠/h`c+Vr4z1d3iSn#&=ө_COr͍Ĩm,2?⍗;9rο4Zp%Et} M#HW}swmK?t գ#ܼ<]C|Un?sx26zGgXbQJb@ig\t^l\Q+ϊAbаκAr~8s|4QwAȋtq,Γ88ۏ;jw̡N8rg&78::.D7q")ӈ~ 1q, uʣj*ZԒ |^RN/dʃ yEBû]GE8_);WesehZ/32wx 1tB_UT`6ekÍe}DL!"CI$sQMKn0a?NNhHneG4a z5}A;Gz*(2.lXS{O[2Sr 9{O8ka|tؓ,8+칖Ŗ$!]Hq!:FsFAPYUʳ( o}Hu/}kz^ŋE Q `G&pU/7S|h'LI†y ]q=PA8ZYVH`,Xqol+) 3glj'*:g[$>^^8+,v5`5p mA20v:NWi%JnÙԩ%jqD(_N|plW٫{%b#յ- |RBBKu2 if'sB҅KX*\kf2eѵ}ԨYzQNNq_v-7=l811:Xݻ`ضp]o2ϱ(ZkË1IC~,J"ltDň'C%lZVԁPx 4_iD䲆 2u=-Ez$Wc#)99Ia<+>!c=;c|'C&8ZtmϷ_Q]t3(xV)pp!$2|cPJu'c6C yUνo5-%~]pgDkpx9v/Gb/0yH:CЪu*GD˒aXb'eQhg+qSN]&8VXuDzOA/'kOk:~R`8y ߖ] X9=|3ӟP%=?耊+3,Mvɖ3NN~C5Q ;t Xj!֯ qK0\{z;|J$Y 4l͘5zn-U:DmQ^S UY\Ͳ~t_[oٲ.h_kc ,u%noFun~fI7(O_qp[8j/[~t [VmgcgSJ/mEWgQ8 f'+'yڈ  Kj޼~͵kױaJ\+`մ 4ah>oB`J<9PNc>pVfb[Cǎ0rH"\֩w_Э[w t0@->ɸ~-SOZע׎[%ұmV1|eN_,{K܍DCKpF|-]v<q l[‡_"$5@顐F밤{ ap=%  Ϝf煢hC~CmrOw"o1tP0񇧇_\wCh䒟HX+Z`Y3N.P[b۲q8n=CH2 hz`Ifߨ`t_(Q^*7[42a-YndѰqi2XHs~Ix[~%2H9(~10xT^@Ü<$/Y;oŀuaL3f.gϣ͓[BuGDFDa›jFʔ._y 6Cbi]7dˮp IPHթӧx\L-bIt}GyvņOΏ#'ϯ@\3۾ Ы81>GպKy5][R%RoI qNhe_jתO;.Ft|oN閇$?k)_0_h- h+JkX6=WtяWs^eh,DIRѻP|2 #1~ %Q4֮ |l?AqyHºsHD}D2H,Q C_]_?p`p?,+77S?(Y+ Ə?ogM x.qIȞSp J#Od8e9e.YXI0t_i '/b+[6X|yP`{b#;\G= /0hDŽ~Oc j@.<$(eƲ ŭ[z>"f5^uɩ ^'z,{g#>( tlcGHr/N]A!Eg&!GK/WAv=0fxݿq/˼R9p@@S;$Z]tܸ h$ IݺwGu1|``rP͏ƭoG^d~ST^+2<…e%+o'teh63&q5OŻSߢnȂlGN yȏ9үfa#. QxLJt"'vqOg}Kmfh6m9C$Ri\l+>nnXeg"Eݸ)<0_|=A4oielI1ܸ{Gޥ73Tu;2'p3F^aѰ~ |KrEHw%bi²=+b3NSL-Xǎ}'cr՚S}q]`T]I@B衅:& MQH*(Ҥ;T.wK"&Kx {ʾΔ.$i̚ gG|j=ƼO>aC:3Р.H?w`N]C^Cn~  伷ᣟ*iROʋ~^Kaӕo-$Faܤls4v4kKXr]C1y1$O]X3Fn1u~V+u#J$ O߾S~A wDtB" 篞~nw ~VՉdA{mJ"+;kiރ_! gYf>}Wщ*?l H2x&vmvVOqb-b \ᄋlLmNJ>ARMѝ \Bp.wD^i-g"a)9GgY#`w@tlw2}a1Drl=F.0B&>IxZfh{"?t\ѽOWtns|43|Tf걐G3?pliPBØj|cMwo~74epj,6l68~ꆞ}\6Zu)X~rW6Z0IS^G?#%5Uİ jֽ{5}++I%:5cb tq@нgn nƌdͩOZ;1+GXۑ D@5q_i` ##-{/հA -hs ڝ99+bK\åc۞p)C^)ey /GrT4Ѵ\&fls>UYSA \~z2>¥j\uQ)cšCy $fZ {6 G݇_dgԥtc5vklI@f~d6)N8eC e۶m;/o6ldDhKڰN_z5 ps"e0d 2;q1p#Qtz4,sWWOngpC5 73*vTUJP:٭xr $$WC' V =n+ɔp.Gt榖}@xnVZƠ0Nʢi~9[.wIy Ww_P_ {(ݻBisАwoѮTqg\(K{:S?, %&^hDVVMK7mm(QI4qNm#wi`>Rʖ],A'SRqfy\Y+dnRO;wtܥ6޺ec)&3* Jk۲eKBXNT" Րϓ (iVm ZœO6dU[Zk,,;|G +8xAh'*iZҗFr)fFK:TfSMd̓]vE1:CN!e_N훯q!(s+,{sN~':vO++`[et1ΐ6")'72jO)N܂s+-l\@J!XCF-%"{{{xgb_B@B}T j n?}b˱jҬB)31̉Uq8^njBWGE|Qʋ,3;;\ 7kWb̙>tV`:Z,ra!ra%*[:m+uj=-1iLHQX߲@i#tiŲW?ݰ`0racTE\1L}d MhY4Be-hŸJ'Q@IC#.Xqccd ڗoa5W=Rg<"2̰{n,Z4 IqXnR+%X#ohd9߾O^]_kڌy&,%ͯ"X80cƂ'!}ceNL1?}4>pN:qs7cΪ%Tb0f&Bwi']3(OW|Ӓsc q>5 3>2(o F>0%> z쒳 Dwv|*j+"MtO!-T0H\i@|qh%<[`0|hw~3L*c$!^͇"qi.?hKKOի0h@g,\0zt÷M$<ퟓW'a 7ʿ?EW{2wĊ"!>Np9k\D<^pԜ+M}CB-[~ȑAqwŚw3aCu9K3s-i!&/{w})uoXʟGA d1KהϱG1iҋ23"=@"C?,>OSWLƮ}[зw ig:E_xY|$8N:SbzVU~ˉV}i<0")lBnݣ#(F9-cQh_ftM,$y7/͇q&DG(Y?u+y)Hy0eS\`\^Q Е[{HLL@Ͱ-Կ?tuJ.e ŝ?. n^y/wm>f ]dl h6\Y zKE[œ:ጡG<@7,qh: ù7X&N ؕ%&n4\<~ IlM={{):e>'z֋hӣ5~Z?nZׯE]he7n;;gbJ4I 5P?08w$MR6 0°SoQ$1xp?/<8:"l`cgQ'+TG_ |Kt6m?oA1xȋ.YA˂'OLcc<ϟ?[7ų0ٳ߱ O6(:WOApn^6u*)F^?4B(j bR{.u|ksfQ&+XHEqh{\8xQ/ODn=o?xyyݣ\<k$a`a)IHL#6QQሌK.uBDq10F灃e`a ^]"Zسz 藻XSw#f"Hciyne}ܮW[ ^N`ϯcuy7ɋ;lq%it \G%&!:&&cD]CH ܏zqc@47h\ ava֋[q0/N}H4۷_N?_V`\<|̂G%s~ڏi";u3$Z> K ; &aii#h23H#14){n!|47VBP.< KǙP5dR`X\ۅ] gé|5/6cWR5xzyb rI:~>G>GX :Stpp&<AVVe VB$%A.CaH[XȪk,>4JO^pUC,ː6u}"%!W]ùk /y"'ӝ0S@s!"_!8e )̨3: =zI`[Z>PKcMiBS QNuR|^}&V) xФEF_'sɬ'rzOz^_tB^(Af]v̀c B* ?,zo#Yd|d48~tK}OX}#Lp>s.^Q#,,QȹLe͐ihD3k֠11hyE%'xU9`Cc ݆vA$#~<Naҳ%D& !O_4k׎v [Ff$Z9y0W@<}С2ܹ.БY۠~'O`A|/?qNs.Db9p57Fy񿉘4=Xe Y(14C@p+Y5O1S0FZr 9>cĔ79K+I1}[ 0I yRypLIO-N:2J Kތ=Yf%w3ȷWd^;zzYȓ~ddj-|ѨECwJZ b(n*MB3S3-#\w|lў"agHS=<`AqwtT3Ü7g%qf ZM-Ѧ2-c9'1'Cӷ6o@&Q#6"|8<3&=LŸ쀋,OZN^> fg,!hEU.]~1m2MjN:> "pm|";c鮺`O>5%%0h~n4f.R$FƤ]h^E5);~߸6YT$ R8@v.N5sh[e8~Nl?0lhr)zqA`5I¹{sFй _CyN@6$-\ܿ=&1{4h{kψI&GV$>o1\B[;b(Ll~6M0w[c~?'&Q3:w j]Fi͒Ő΁-Ш G"al>x*&6 PRnj8JcGV]lp]\ѭƑ ̛!=Dy?-X $LG X.~\h׳+2.oǗmohպ UB9Bđ;!p놠B{po}3V Ǘ8ڞ>CXY i#[IL_Pौ''xQȍѮwO(Ī_%`۶Fٿq%ۣSV%{MŸ~t_!0m K)b G֢?`.bAtiNZ[6R3p!!SA \agt yI҇Q-gB wݴ CY9»`v،SwC#dš@ C0(nƓ]h4lEM0dio%wR@Nt?y`7d~AdGof]1rBbHs Vӽ) b Vܑ] [d(wRmbwђ|>YC[=tucHĦC}OfjrX^roe>k}+4zBi H;?" jA,d]M6eÖ|&’D\䐿#YbaF5֖ 6+8X X>9gz\OSuFr97eѫFEO"^.  N۲ۅDؘ:v:,Dѹ&yI} I[5FUG96u&&"zGys)yR1] 6%!ʠe瑗6c^}O$ 깥AJciqSe(N)+bjd[1_X4擽KV@7MaKl:¡MNH:M\Dދiv遜aP5Sh}1^HD$Y'Ұ#@ie?x,( 8h,ő8pj.1|{ 40jk#!x;4 Ogc]SndJ*>]I( &f'-ǡ ߓtk} $k{B%d&6%R"C<^z2}aOgl˚^G{-ʥD63s ]FO_G~$f+d߿ 8u4iY rEF;&0v; oOt pM _ ZՑg Α!whLrK+b YEWFq^aOA[j\ډK"!NϿn}(P]GIm0Ohu?RӬ h{DдauL2;Zjh2zϠA|,?M_ȁLRZnݳ?m yۏ-$Ե%vl'gd `?򫆪<~$5F7?Bcv) A@\БLnZM\=; x4`! oY )|T _5ߕOBoSCWҀ71f ٯc` eh7O#6'xz80SX[J k'JX6A\hqhNؘShG&Q-%Z%启D]Kœܨg*딚D/#ʥ +T#[!7M`kŒD*'UJ`[M{ҳA@~Ђ>XK8!R 0er0c=LZ$Ijd(qN~GEGĩM!ZJ(P t=q?`Xx%^!O9 ē\i$,PDꐚɿ;+@L4P8Ͽmh6#EF&Ke{TRK:c vNQIT?$DS8םBQ l]V.%7LQ32-+=+ 3'SI瞼of?Z2sx=ٓV0]1=JwI ~XJU=%aWҹ*'YyP-N͵qzGC_5ۚ\8N𹢙JNIG/[70BvOEf,]Hs((5Ӵ ͅw*Bpa@N|kEω( AVyH,eC禢CN-@mQ%dmYUNQh^$%xŻPKRqJ+^릐KL[䫎-4QIx]@,YfkHt@= E%1T"+TF@}ɹ t(,a/GzFg0 vMnYGJ(HVndYS\uLMNi/F|\K,0Ryw",+/j^ynƩ O3L!$E򾼤)+?cKzI͞c7$EkoR<UI]tPj(Sc4u,bfxp&E>㣊Iy0)̯2rxyIB VWlyS5`uS%}y\0}LkqGbr"|5?C2aIΌ4ǫM%r 2Ъ >ۚTر(ޖDr?c ؊ʸDfQW^$:1;qq4, Y>?G<|8?n;EO&ӀZV c*|uwk$l%RӠ'g'.mgH簹ӑ',k%PxFNf6M4UDt҄B0Cͧ0֫%Sx7@IDATj>4u(RD;]N׬&LG]J#y t1Y*^Kފ|5LLg($,W)5պa!azrp"35ϊ; ԥӳ+0+31w^=M!1E|r9~Ȼl'Rwtaܲ#۶׷i@WL]$T,~v դB.-UkaܪS1؍ѫB=L%ENQڭ8{/Sz7X]Rێog_6;k}=>ĒЦ=Z&$;-[zgi=u~ T oyL|:y%tóDގB@KrAi*ͼ$ #;oPr7,Wj5 wCh/!K:#b mmy ?Q9GIrr,5&/_Ͷ>{F\?֊&9PPЪ)Tj;%6 'v+%F]-"<3ҐKB﫷oG\D!H'5u\0nx( e&B.2 7G~ƾHPKH -У.hk csQ'e0vxK8X#n%:0"st@Wq%,wlL`GoĽ@g I;sinZt9ToɦБM7v}QƮZ'iF|j# 㪏|X|GE&\"3 =?YMmԏbʒ8`% 6ͽOe}nY:4EEY);)xpEbb&MR`d(++0|gKЄSR61LokүU91S-wv,C&4qX&Zd&=+*OO4n {Y\t\^iO[vQ曨Oz; -5$ UL^w!)9 ]~ߪ2Rxv{;aO4Bps$e5ФfUI]G\5-[ٺ.Vߡ79~]iI 0.9NDJ:E|z-CRT,OEMZU98Fj&ԐECOsDUHp<{dK!=dD"//K3U>͞x( w06O*و> +gFb z6IFI+P6oH+;3h׭2+y2ǿԎ>S. z5RpʔN;/lWm Ͻ0I1oB{g :GL SbXP֬UENҝl4u|?k=D'"=tsm#S{qQߜH;ʒH-gG}b{b{07?QaowI:b;1EM^mp7'nbaD62v[TN_uS zex\lKİW -M=Uq*wU[u\V4J?g"de*2Z\"-3۸{cټhkM˜1'c[]ZLP:;BW^~AL"SIѹgO^u9HFi#6PBc0(A댿=p-6#z ;)Ї\fMn~sjy3Hӫg 3#ɓE TwM) R@H@V.mœ6Wjijֱ۳~Ȍѩ5]%O~h7%H麒##ݧF$Aɞ*pbdeդ0=!)9!zwk+c"7r ies0^T^>Yw?D H-8[.suncC%-s)WEӕmU̘ǭqUS̷) V4Bd|)+EesTB;֯_OBevaT 3U)#Cxq=ؑO2\ r]-A)E<=Uפ;( 9d]I*pUO[Dtt=F'}+g'}hx+BA,#qam^a݈8s!'OV94MA| (8w%#_U=y`iLޭmv A0g@UQ C/ZszU=Vk] '{jSQـ 06֫b}R S)l s Sޝ3oriq^UQ &ɩzHpU QW0sP=rIIpʲ|eM՚Kߍ{/7T0GJνhJKIFe$/^! +Ck:i׽}XL~v:uƌU sS-ANڿ uo5*R~?5 gůy sn[T0JP"ib<#((4l10g`Te!_wHbB xR8<u',#OŒkr/Rh!HO~s~ ΪI0WGcr4ORpS8+[W`4N-(lsR BtDq%$'P]pIir'3HafD^ޏ w`h 6M01yB^ ?QKdDx!GƔK]WaړVv?(p\$&$l6_G|~ B|O>ι2)ɸÐ q(]!մl-ܢ:9YHUf5 ;lFz/ }ag]e896bSNhu ! ٿ> 3&#O♗RCYQ䈖|>6np0Ӈ<1p~D>.| [nߚ(IY)vPINP #K9rX[~H| CO@zf6zưŕKqr-ض/->@nf O/)2phWtkv9@7\1`p/bgUsmu}x *ȧ$CҍȐCR34sR8󃍹.\9_;>o ['/4<$8#4ꁴ){$P"9&g{ZZJuakc 7!EVXG4+9PݷJ\c.EKcx88@Qt&JE߯?#^%Yvt(|%$05֧> %M6ly*푟v0vD=?oV@;(7lDZ+97`ǹKP#pv.O+hk_+ٌ뉚2:{Sd4q<+򒮄U? [BJ=sdU}l[4 ˗A8V/3=nX~ A;"'6wl[`uKg|΁;'|F@O4~^j L#eR)``FKOBnnnQ E*|ⅲQ@Bx'bA+x:}2b] fظC(`da{rWxNXCl|yg`gch$d'!x"q\. cJ?6ωr( %S> X3v G/n]:T_sac`BP Lů+><['h̜q*7'6}VV j g PU). _-YQ1e??c`c[Z.bSu(3?8yW̞ϳ1g(FM<iVoچ0:D3&5 nZi7`bݞxv%Z:ς .?1JJZի&RB--6dd5I"-2 IMBJZZGkKF|!nBAڒXh/Qk?4l 0kƿ3&~K)HI_lXP CF< mC/~M9 B$rɬ%$ eT o:1E7?Gkcxn^XZ@qH)z xgQԲWa2Ct#Kg5khx\-a`|\֖KB-u&FrZ %"r/7 RE~yc"?(V0Bc&6sx>g~Ʋu!9CFtM<Z0Y?wcO.2~f߶_KroM\B*V5dƾN~e77*s֌!~:&p_<[h tRh@˶N{*2/ .lCXMbM`txfK++%R. 4Aڀ e^f8v(q;c]jo#%= K"& f ѻw0khf'A 3Jg GPi 9F@6\&̓b 0h͑J˩y1x6'jF=h;f~&h GT dr /ENz"ZaNoBO.Ry /բF|"P^yKVceo8m sB߿=|Ҍ\)i3!syuȭ1}>$9! rvÇrE\' X+B|8"<;_;2aո@¢I7 +DJnd2LMAn}dBc:e}XUDMO L`lf /o_EaM|kH 3\?-r[U\_wdޞ)jQ/Qw6rG5DxQۏq<|? s؀X7}xBJAHKYT:.#Ȳڴ]E)Vj~[s\p H1zh([}+>, u*'Vcm iD̷dA~5󐛕!aig]7!aR]"Cs$DF>DJRRNW0>ݻAYn~y䦍IBv/F=d6e=b&V4penYHKM%{<3Yi4CB)s9)IoIv $&E"&1)Y%2qƕ7'ZLZS48= Dc+X( Ɇ$ee"3-U6ᙃ2[qj2qjPCGQH~/`V&)4& jʃ6f8VQ 1ץM/u]Ж\mJxf!xk$< [#ۏ#";)gnAX: ?PG0%U(#S Cs=`} K:zK󑓓A#2B;i4Stc[M`K-)+7BbµWa} K9sOG2jSaH<-ؘ~}^WC85ꆡcaۯ }T,XNÐH* %Rs -Ǝ[omOlJsGe`i3: OfFOXaB/#}a>_( nd"ʧZϞLF^@%\j@L2pHq ɑk.ռa| fgw/H`W`Eˆ4yp*Of]Vdg>=9*>~$bv׌N6BΟ;}.0G 9BwpLp*;{^NzƐ6Ǡms'" ׷|_#,0|`(buh>e%`>Ӫ[n,޲8ݩ LNٽ6[ˣĹ#"Va9 0I#'⎷q7CE< 7Bv?  Ѻ{hYߛjƟ;p<. {v~ oxxk (%YF$yg.mؽwl|}qi2ƒPFBcrf v̛bKqw8|'FPAcBO-swnZ!4}>t9Z毁Jb$1гBLؤVƼhDabHqeFnIeMTB/K맯Q`?nĠAٖi j}1]ٵ}oyrk%aԬGvԲ 5tMqR45 Y28{S"ƝkW$g2j=¶P2Nc9cb&#45L*_wgb +6WDpv2v@9m~&w7Wh 2ib]1U>z}g`v X? ñ8x#<'~od ֢ͨ0(pqca/z>'.^N_Nx2+(1zH"[[Sb)pˁL'ӵR-ar}A\jw7#=N >u4l6oBn6GWL-Oo)\;] C f4THBкm5D9Syh߾!L pV&q$ض ]NMq2 GK_HŽoaۙh(ӯao5h6pz&4K9D_Ħ h{Or zvYYX hUqKH|]f` WS᪦ H8a|+^xo ˿ƌH UQK`g02 d ӻ1cd!\d<4Yx’~ֺ'_sSP@c\Կ0!<Ǭlp O ^cjSVoE>wyc%m=dKUPHjLCZ*7tk? dǪm vlE UA.ׇ]!Adv>;sѰ? EO)G"{uFUnM'IHhBcTޝ;逸 ˝`HK '@nK&!@ 6ͼy377|aN8k+x.a 7 1޴DqwlC` J2l<VVI9="5 s}n2_sYSoo11mHPװpz.myrj}\~^7p( {C&PAPW Ɩ:B .AAЩeDZ]9j+tmbh{)- =**+[Z@-4՞ƺoR kH_WC&[R2ci:.OB;*KGS"Nu֔%m mU6R2ٯjm3U9RGj|׵fΣIF]l>( OS l|O 2dh ]C 0`(UD#OJ]A0oUu .P6gT 61-N;-WS{v̟`4gSY<XSSfr'| ?l^.FNcBCl"ŵ8Žq/ QU *up7j;d릢W#֬x %&-vDjv1?/,]Ш50w^b %-hmPDIe_t^@-R6l܊w]`!C p'?նLVT9gqDX Q[ehWL%n4u2Mά3n<(72Y3^ǜPZoPU$SmD0+'&?}s /bwV׽xk,m-y#x"2U s-aCK(-$κZxt #Mhh3ϱ_QBA@\ k{?Kf *:-~Aa]Gڂ0ioRZSJ0PBN{k'%@DDnͩO"y{5º⭏kaK ҦXuTŸcGJ7KQUjuj9g 6Cxi}FrK:,>QS7'{Ud݌Ne*XbthWL*3$*RS7yy_X۸@^T`ez/ p8cs=ah1(IkYo(齆? nu Dwo =[g2oq w3L{xBK~Zfp%H6GW`vO|f,[WY^\,H4=*!JOٓ&~;RR9ALW#2uBe1ֶ0W\CW-6@@vؾ\1ƿMiirݭ tTYYz K{{ `eE9U|jCKaF. B@oQUb=\蹚\Er)sd*+fU]Fc,!"ZJ?<[ $gq-` d ụӪ9\Hk6fSoLƈr[ÆD|^S{">XbF難1 mt-vV`IZk`~o#pW+(5/ʴ`%MCs*^HJ6 1'*>W6UĵnA&RoXObHN!΂muinʉ\Pil~ѫ8"m\=7ə`OG#GV`LnOܜ\%79Kg0>iC%ƻwps8P(ꍯJJKsq O!ۍpޛl+e(i]߶˽hvk11utG/9'סG!M%s$j刍M$۫uTEP 1>nuU|ܛԛUXK;zz@֠^%<ߔXSo7D&!`J cWC[.J&=w>܅ԫf V]]mn"$tj3!CdAC&!@->S6K,,x3N :N=_g /𙕕ӧ|*0yyt ˇ:m᠑w0c\$Nqe&e-5Kc 22 &WD[Hy2Rvk.TׄΪ@!h WXRUxv. | C(Q,䄚L\HO!~PQqg8,-̐ܲ@R s+,s3fE^Yqn`D[D1s\7'?;F=n|@8`k3 (+I@u?ZMO2}xڛ/p nJO7wZq [D&mL03\ -/#Jj1΅sՕP9G,niP\O|Z̖xhh [;TdQU;; ^@%Â56X:DWRٰaBUݓ cO -SqC2Q4pc܎v=bŝQB1]LyϦ|8gRHk2 W߇'O.I`ɸ*˱QCiquq,%+>C cbPPYE&IfpFB9A'g/$I3MAQAH_e/Ƅ0?)mۄgA "~|g.Gƹt:saĐ\vuq_MS,JT\</&RԳp a3.EfN2iocjb4=[-HK?A#dO|8mYnb0e|C8#&^-n-h;/e}y'JPYx}I(I^,D%`߉m~7rzl+GQG'ΥT-A˩8 }Y,[4 ێGyvlXƝk8G$Mt no}PRnk P(cBڪ!(܍uj ,SN% mV  L *VByGj"c]xw%=b#'w2NL(JcCА 넂؝ 嵏@_ s75xڨ *_y%(}':=a*c2zăRpQC^@DP&߈?Ɯ,9cJB폇 F#^n<2$h䌎%uc_]f",Ae)8qt7;d.LVHOقRQZEqtEIf,vťAb{@m3?|WC_Qѓ^\At&6qpXPoNjqUD/!H!~Dmꈽc "))qsBiI1 Slŏ+^Dpৈ_A} o{5j9_Hxg,VoY=q$YqwΆP5Ǹo܇nL J\OL=HR0%Qp(Zdێs 1Bey!:Ɗ*? g ADa`SGJ/htY .}'v?n;IoB5];vGt 4Ԥ⧯B@<4Q>d]vD ~X23&̂F0[֠1}c?7޳i%O|v}O Ae! žafn.~ۿ;Ea#a~4UЧH|[˾Q-b:ziNLFkMF]x6(T2~Q2bW\Y^N5%ϛ͹ގZ* z׳B_zvC1$ Gydk8/janm1װ\:|zazɭp' z ^&S3=%`]VAFxʧ8V KbU0mbRߖ???g;jk؟h;VKK4V8-ܳ> ?1{9 :3S :%×߉^F_d$xN0a^ė0yڻpU#?60eʓb:Sgk(U %N’C`xU{0=kFJxylVW|~( _/Á]=ryr-*{M~H\EIض/8^G| ܋*٭AK{A`OJTSI}<:۽j+llt8g?<:)qh㇞]cޅ1Q}ů'WS>\6zL D ]t.?-őGqJ;#㊸بSFĶM^oJiiixM3\N7=hEx֍XV% o Tƹ 2' s1?#OJXէ?#sBOJyfmrhLLquu9Cgt {JD%TMC|xFROp>Tn\} eo>dB @<֖&`_I ?t e9SșPI=( ^ e9&TaKHiJ&d&ۄ^GK<@0Q7ȶS"mE w[T5|u%(_eP:##z@IDAT3~%SJgfj/DEBT,߱} a(xΞJ 0䦣 V-G0c: -1z:D9JMI 7V8inF5d* v7tZ( +w!)]!e#RSN&w66gΤU1bYv=, 1p3ۼ,B9z+16Kڂ3),HKDFkӟZ%̌>$SXf BȔr[#! $y޴s'q2:|,?Ǫi^NJ\f=IN e%\젶rin GEj\R#kjyh %;C2×e+G} VW(k;Nd`iX8=*~ؼCmڇ}B||In:BJ 2)UJHfn9I<*UNpv LiUmSs{X[ЏGOOk{gXZ;L:"lpq''jdY{)q3nj5cqAEcPB,UMҽ+-Y!|5N['wh̲?k[ZS3TpB%"毈 }jr N+^GpbTLoBYSc+y?T0,> *+*wW [+YZ aQwa7o_4;VƞpwS7ar!cU 909F#BX[:ln P8?`,nzvtAg:sjO4KK={нb@',,%)얝obX֤˂eg[I\\5ikʑ=ta#2SOSe)V#)E^,6oBUƽH {N&RPs~" sCy"^aΓy>oE;Ж8B7du$g|,A.dǣ8{[@I)߆e Wۢ߀!\zs# $J"QCc_"q7P'@Å:8UeBy3pf6h ,l"-BD)]nX[yۙ>:@t]9_@Z aCK=MP\\3A p,E!?k? h+1dRKѿ~;[Hyz]R.x)uL:w.Ou#b6iQ$koRS& =CfspuC-ݢ_L:EpguD7c rD"=IMi \"b%+8">S0>iwk]C`ȸl7JKl`F)-i?TPu1awb`'jI!uѧk( 2 ÂQ?0[w 456 lo_ Or{Hxm4ogOJ=# s㑣ʌ[ki3$YʌY:V>.UVҒjQ}ahUYU$a)|(Eqs0hUd|ϋ|a8#&IVk/ jT¿. gz'Sro%c4Y)Ȗ'~*@R}(: iDg-+TV<!081bٹ׾jA. N] 8 }_!ŕ< (nh 7Xrjjjt :Ck4`H򻕜:[XHz7PR z^@7"Erj 9NY ϿDj /GIL-wuAq6_b~rWjЫ#&,R]p:w*C1ajS\bp!b. Is>OQ)t]^ 7 륥W*T @uPΐyٔ5ܔHu6R Bn\2NeB+`J:jyYcqa=Gݲȱ T a {r{ߩG0?qi0$`s"'ㅀLK[7I/R#*h=z,Kr ;8tXy݈Iڊ ǠhaFGrjP02j31!R܊sJ:Ii}],¼&?EbM2,m-=tce)tV/F7i/lvxlcemMj`D [Ju7vRI^)N][Es7g@=+WIGۛ,ߴUseo\ض:\6;}[QfIztρ4*;&5E8_Rθ*ÁP A`Fj%3\ŏ`^Kե~& Bz7n#On]es7ii&]};$)ba3"ͼ٬2Hhl ~6֛MKjma2T>^mo#Q=kF0XYe>ޅ:k 4o؅>zhD+#'ݸ< 쌍v"/@M~TJ:O25X8;L #_9j|oVx@7:l>Z剒r5*L(ɭus4EEM5nj颫#sHo75EWV~D4LX*c+ԁ%6wXLun ,2IkZ⹣[s7^.AÚ?}s J]QQ|QoT|c t#TN=/޴]#k(|{ݴVeOnkL|6cuFX#>[{džm8`MqoAv >+G7:]lQo Cg8]]j:Wfh :qb2jh;s01DqyL--^*.Fc [(L=d= kBY4TEA+s6W! js14F1!3+?] Nԁ9Jߝ]m%N0х zzݜmvajˉ3ғ-vGkZSQ^R`22yKE - r)ս'!)-85PlI]=٤)0> ˠBDтsEn<շK J"$ӓaC^m3&|-wǬ;{aGk1cYHFGTh{P ?Ξ5iJT)Y=ƀAY3#Cutddnqss|jt5##4\;Uz(iɓ#٨~-cz_]9<kohL.3Q`(k!%3tz%z{@pLl*>ݽe6nA&6-3 _l=gvӹ킕)SᔺV",3Y 7~pRK |w>;Y& E.ݴرn=~{G㩾QH_6":lbg7ADX zW o EÚtW0bRT7d'Ǧjk t'sc_>h}a4ucQXg uϥ+֜M:t Oƍ ۶d" Oykngf;~d䯾h &JqR00rT?][V!އBI)G(WU˅I>N<1{i-h R{cgfP碃KCjNIHsύ!yiy&^J:J,-kxjtU\A޻M cc`/u/vc1ǽfWçᵭ;'-k`kSMbx}&p䡅tDORG2xb"rAx>*b竰y< c1 $A66sS#&a ̾g +A[8Ov+3G]5b ?zwaj-~焯[@^͚띌W.cj8u&YVNrXx(:qGh&ƨpϳ&µpG bǤBݙ 0=q Ol\T vcHl{f&bd81&c+tޒ %I7uKƚ@E` [1/r5\-EoB-_~!{>rۡO]_~Zjy;!T7mn@㰮-ni3iIn3z $.akg)p 'Î.9[-m& ;JN`[f0`R[9B* )ex xXUfx@s)22wt7` emʫ] s=X8h0$teؕ!xj;z;`֦$bd^x<1*lO$XbC>TS@-{_h:fX_7a$K-ݻ\k9?t{ۨ:x4J]_Y^mޮoh׍a~SW{|#n8;hb~\`DOBmNtUTOQ5SJjv\Futv>dpTA0Siaa,|њ S~[-,%%ְh5e"D-YĄ̮AV Ѱ`Zk)5f| כªu0X/'8*~6n'[JBǹq*:S./4}Cɯa9 =%Ak_ǖ㮖Dq'yyBHWZ?0515V.$吴JDzRa&-$V '"YaMIE%>%'Gz〷_#.PY.*jPiùyUIڭd1D67*s{]>K\9.M|9񼨉cB 8->|0Ӈh~A%0!fuEមcQN巔xN %-|Qה׵cHxa] PZ<;OoIbr }cp 7{xgliB0Еω߭Ph'_/57|MԿg ooO&!!ȓ1DZJ$(+#`FoffaہDdafa^Wcd#$%OσD{7/D\|% HMCI&5;NToPOc\ٸU`g+5KޚpCnj*VI&QBEvhqN/79<9"#== {b1" T%v # Qݻ+hk%waBW ΆixeQ,MCMF6eJ%ѻWWؘLDbh0axOe?&`Pޑu#+ez+L77p 1tu9GwO*/>*{ sIp/|);n u5bd=},p*|`᳹dO̠4W#&NGZi5$/ 0u@#Za}v˜ҺmGd-ma4`;B>(BRA;w÷{&TYq. `լywTÉ>lغiRjB7b5gkXXí9T""I\w)~g NRC !4 O}1 ;P^LoCZi)X6B0~o# tF (bBH %\w\d~=>@r= zJL< b԰`*8:3%:bj`h gzӕϏɴTY +k5marbC*t^3ݳ s-qD"RE;gsn#`_g$؇OE%%.0.DZܰ?b2x9`纵`ض/nL Te'kH&Nu58| ]mGcu2j4ۂv73 尹tM[_zaxa'2zΨ$R^\ TUԐQC0* K5(+yT9x {t4)]9'QŇόE3R'7/+~=b%C:ctg8䃔 bߞ#Ձy" -/@i%< -+GS _MУ?ULQ(4v݌H :U'Mx鸿R|OYj pS31^|\/VB97x9 =6ANBeʈB3н%Ycz#l)e@PTTF%D%TB)dHJƅ_ڊ*Wșs??"?.ǹr| ۙ% |v2#:q"W^H]'`$%ݱ'ƒc&gOMFi)qg/ gQwMȮ.(Wq70/>0t2]p Lr&`A ?n؍ <;'Iml1}O-0~eo-Bzsį}곇\8s٢ߐcjC|ħi1}\0ƞADؓVGOegʇHA@X36MNsfJrBV7ҝ5!?q} #^I);|{a}`BFLfFb3RUlbsX#>c䁳(b(TǦ397rѐ6-5Mr-b6 // mcXg+r lb4OLAǿG4#cf{b"u%#1ƬYh+u3_.Y3Y: > pLfo7Ni_dkqqşzhu3 }1L G}O=aA1;| UϵBon4;Zd/URkgd_x-vã0'yZzعӟ$.>'ع`NL_t;l㾨>CUx̅;+o~:GG³"?ݬ+&T]2iÒy/͠pWZ' or%KXptbRBD.6FH(LkՂ&}\qY PYM{sGK qE5Iw̛5UKWíixQ5BP1~ShDy9_nL@cgAkaJeUڄ0s/9 7"˶q5Ll?“E{`δaxrZ7XoK"u??~j}xLd\£Ɯ(|8x1~ed9^N}^.+Z9bL)-:܌=pלxWHj )2!tӅQ/iTTc1Pt?؈b% `8ylT 0 ﻘ&L|>!0&̜9d~dʿGK?rjO9:* #d$ToIvt*$@P>PS7[w"p}u$ Zf >\8NƁJ{ ;I)/fJr86L=?nG[Q[1JLIB1QGHpOfC0B="{w2hia  1>8v # aKbhg%Ǒr\#+ՎGY :B J ̽# Yb&MF Usʟp4r煻SBR5dnq#:.f6&f8L ZȚuœ6p8놾RO$jHP"DXX|M|K|]x^/~ݛAP24,_oAqs1:tAFR5-Wxf | bY?_ !@ra;(tVMπ5 {o7_/^1577ucRG泖[!URleEN$njj4ZSC 2Q͋5M<7Qyc./ j Wd2P~\APR!4< ʼn G[&]a碎X Y2КoW`9=f&R%/,jﱶ=3  2xG"i%ح 3>,ۛ=糜@+RcDuArTI4lvfW{ggNzsvB9<&E U? u10od0͏*=@L4&xE}z\g#%j˖5nqha}J rG27#aqGgywCU &Òny4)obÂdD]DOaw7%' ̮ {N>F&O.sa.)32I8ݟSX`ng_o?&ϙD펁w1f5bU 領s`;'xp?')4Ÿ xu0)gWSW!٩C`sYv!ҺK }8gkPbjc1l$˫ixh3S!XFM[t<!as#l#[t /Yқ4LPJc#Ԗ].%}D W7L o,E̠PJE!?C>'zDLBixB)Z<705dL#+l`;i1O;iqk7SEK|y 63<]#dhF Ot@*mK{2+$nx9Έ~u uÛtE9* >~{6 ?OxSi4ý=(aZ0DrTs|v? j[`ѕKwT_ LkP$&+ @}0gV#^]_m"@K?ם@:,'ǚtqUS7@N81$ 6alzYߌc;/?1}CPěʚg/1 P#4JWؽn?`o#mі]v֍rP*'-{N.4hХ %Ci*$W"2BNɈ2IX,yŕ<Ტ_yӍeH8YT|{tOoq(] {NqS<,R=,c$&&vЭ[(uo7lP<׵+ 2q|^x52&%݂8cFz|< |&s?wJy?)7đ3ƹmwrj̙Z`DG-v OG"# S Ț 7 UWCZXcF-_J D.6 k&_C*/$1~<"l<0kd`&^1xD? SY'ٙm/ӳn> eI!CYQ-+Y~L]VH~$rԏa#iWc0JI* aaaW'C:!@d8wM3:ُ ?K.p'&a|3gA4&.zR WKW>sey1"Uw!>k⒑ůӂi*9h~S D'ùtl|6&DpS4=>\c\a92-5T(p'3M2{%n\qhL^{z#k!Ä7Cڰ} _mLn2ZJ̞Owbѳ\a]/_e-lXɵZ9SbG2[ķ&?M$\:X_.xpi̙AVB¸Rm]˵1jj P˸$=#rjLQ3Bݹۤb"ȩB@4'J8>g^=d&51/e|p Qd} pjYH;˕p?ץ -H-ubг``t6=5.w]uC20k4J3 ḲSLn6TbuG[]Es:֣vyxLK ZuT|^h^ vn] g{qDeBfrt/dt;*j[/4Ba$gӽ- JKH,F-KBI;yooi]g/6Wu2aݭHo>nū %jK:=-K;*}nHKKP^^E JlEGTNfZ9QQ^ftv@"J> 07ǯ[ŀxf%qx 9<"}څ5T*fwѸL,eW'D O6-a %4#CCE4榈C/_bϺ:fRD s1?\b6Hsbva9g SO3+ޙ3A@3,qaas,s730.׺{]]]]mN֎]6;IڴBp0c#!n55k0h@\+a͓ZzbL&2 tN05k|_z%WVQ4<%9t PZb ԔV"T:~q7N;ISb=͓E۽fĺ|}x}ʼfk"%%EoZ_ !5{-g89Ti"XtQa'&XQj? 39gKUkH3b[֩X06o> l0bd8O'%yDնD)ujWuS;)#MkufN?ބwL)ljժT4=rlPTb2(|3PN=o#ugָXۇ dݻۢHw7G ~ny2Ii}י .NܱjEx0ZSBPy޹8AHpẑU 4leWAE7ÇáC-tŸʮ57 ݦZ3s-,, jjnF@VB$Z˕kϺ E0o+29z+qsz\oR 2*-[>*u׍Z=zSO Uw~w#q]k shۅT4/VyKl w.K,݉ض4/DjZ.BCp]=0}]?slE]j 7oK]3@WT%nWoBnq}㴴RҪ-1>>;Gѯ_xr0 ]TeRNU\ba%J7jj HdڊbxX0ΜI]fNwO>]rÚl@H?Z/ BP@QSoJn}Qd!;0nT lyw 6.Pݍp_cȐ0__Xv Gƒx%wTr@&Sْ8vWw:=?_}C :3(+Ue,V9{a\QnTro&7Aqqiز%VŎ1puS[;J~IRz>\yΰA|d-&MFvypqh-28*tfܨcG Zzѷm#rP`'-ZFogԤr@倸+pO,-_?բBAryM'gOrn)(!! VsU?۫u옋*| JSߤРOle}Ƴktl[`̘Xv쓏7gnmQ]wis@bJL swct$T*ᾶx+ 66AAn\a ;ccj4FFzE])\Wi jy-e0c%.Uv-Z?LƗ_m_lΓպЗ:u rkڙprkju%e+-֮N6\H*Ei5GKNPK%ji4-Je]PP[ce)GaQ)zhu &Lp?6@S#44uغ N6%v-{eBUrj-]qmJ<Դdtr嵸t`m_E; ̙Яo(lhQz ^ +O[4@WU\ զ-55M!C9 B|ruLPjV5iawA,,kלx8>-:r >:(̯?IUi\`i0Vh4nl"ܖEUr 'jM |K9q]cTSLL-'oumvV*]??L ֟ƍbQ ܖsm݁8&G31-=~-S+go<~Le7P tQv2AMNY #>![b̜f's@jYS&eؽ{zfW*?TU1bA22\I_QQ?}{b}cq`!<LNtܜyh4ZMr '߆;f.),7T%OYf98Wݻ}K,'O,(zmu]rYr]Y$X.R i(!Օ7iX][ſ=dp=hm 9^K9O&J !yM!>*b_5]roI=rRh2D]+?Ҕ$s\7nk:m5}*게%h_q_vK/s%UVJ^|F=>)WʗgXTmXRł&IVSe3_@7y>.|Ya\#mUMJ"aSK I/. }}RdI6W=[rI^Jf2jF/W)T)yY/0aZ957UDy>Cnڤ\_&zfo |PU6M Lx!4UsV5M)!ޓu/|F^ƋL;7)WY\/U!͌u)SWG}'G6V7df^#Ɉo/*A OFp2UT\+49$^npaX%@)tHtt 〬j'JLQlyV,uslðd.ܕ_V8ˢPZAmUq#RW=ft.(a,o|kxcڄxe u]>d5+ =Tbf$ڦx8k`^.aHLM"qpm t-ߩ%~]"s)ȓؽߋcGC6筡T^XI}`x1)Nx؏ʆ²;쬒w@e9yJd9:adte?p{N{(ԁqJ ,hO IckBƍ'8|T1o0rmA+HSt>@2Y @^ >ÑUa|d96X5Dq6O4E˯Ŏ>8|f%go EìzEQ=?ړ^-mYY_&C)QV5COI}8HN/׊ >X&R;{FKuҭzJ-1`7ۈ4&}(;fh*B B /eaOrLa-KIp¦>͓8%>-=}B >t SN>p}(7sΈҧ% %阡Cɫg؇Umx*ߵCWg-CɋT|ϔ>3C l1޶ َBt vUUDss@ӕ?QPN4**xk{<\ۆŧBig^O>E%W`,{ Ev+fg? ١6BZl>9.7 $gm8.wOWx"kGfS6!ر.6. l2'(9䇟osZ NEeXa bA밗ʀ{SZB+XztvE7x}pPq*=҉{!v؇*pKT88! ¬_#TXQ1.A~aoZ &ړ6RZyIU5?uV֫G.yaִE)W .HqJ?vVSЉqwi}ʩe}%Hαǧs‘ц\-60#:S O~]8cѽ6&v>GH+|*nP짲PI["O)1dyeڞLQ]9.._hˢٜ0?17AQ׿On+BT\a!6 a JX$Lj[h[c,e*|&JǓwBhY7KօKGeFq N(F%WYN^( vm >.90\=mxTr3M C89WpO&Rb,}Fc J.\YzeP-o~Yɶ>&y($g1ws #D1(cS1c;Zc?*VYdψ+%ICTr6c|"ƼrbliS٘#kpM6'qC7='Fؙʆ e9{|r+9Q6$ƿ[ˏ|ߘ'[ҿ)}h~MFT~/|GZ[XZT8㵿F+/]T|6ITr˕:( S6eCz' NQ>}CcJKAh@ \='#هu&S'ߤmr{ ~g6xfwP\Wa-7qF jR9pp`֬YxW~s;ShP)XZ$)D v~U XU}Hђw*IJ] ˴ARiKGb=Ci,tM:& hQ nXv I}t g|r6Axs$^V {Tp } gz|] vNg8!XT79FL O2b%H倗fvVU M,xZdBd[*TEN'ӝh5'hj떴>X!V᫏K)iu3Y1DBY".KGst A[QP!ENUT i?<|BYә((bzSH Oh @#|,{+RZ%XB]('*f m:xžy̘9y(O,|3'(REn DbJsRdɐ?xZ RQL[4](/е倮ȔN7ʽ;jSRM5:Rѕ$ njkķQɕČcĿXEbND6yp:<=JYC"V*䕫 +0d{0̘~?-5 >\n>EQ2`+\>'1ģ480F^ ?1VRA2QI9ӡl&tN63w0^א/HOQ.$Ƥ ɓ3! ȇ\*o|6ǰyӓ{}6 eV<cduHVBT 0fr<2V<87:xrb{1=m&ڔ;+rc5X1] (ﱬT֘8'iJJ&XLrZuV-yQI'R_SrL?g?%bt0|7Onxwai{QvJ_>T҇8LJ~&7\ e,++wb"+ /8)?"e[1ĽI ,(f}.Vw'-sC>_hbQBÇ8~)fd7ǝ@x"fF|K^[֧O wOkyJf:46/گ׮J|ERx_V&$Ѵ+kZ,}K2庤?+!yh3-ô ͜G~cg``-N"*vJ X[p%qR+$vbUUgbQ.q{XZe@|FY~4KKPgk:j>d^)C#YԱ>t@˕6;s^ O~+-FQrFөȎy2*y0Á}f֩$#/XrҼG߅7+mfBZtW$pɕ( "#XW5~QH )PJKBޗغDt%jXF÷B|GQ/̐*`戏?ˠQ}W&7U% r(sq~iSXQFS N f@*]goXk<%PdwE|eRxc4 mt׈-/цr4b-ӂ7(1(;wZ[:#1^H]3ndh+/&|z]y+.{]HKVW+[2*m*TH)in-{&rZc/0k{:-sZ񊓨AY~:|[<ZYrl^~ډl*6vػm+< 6(/_HIK1?xMN f%wƳp0(Ys9|<|OF:Zr/a_vN8/}T(kP\9̥25]3fHVh# -? #h,N$d")3k6lg~‚2YO 8<,Zc+_8?.ńd(J i/'%Lm_Mݹa;;߬ǮRT۱/(yIDd||! YIQwPNx 'sB-'v;„֣+6PSYmDT HoM(LMPۯr@r,p5I,*.򓃹J믎j\ޟJmSt5-+~xȊN'pj,3Ѻ=DZm9vrr|u~s3+D9/NPњ w*_LŋtB 6# OCh+mu'9pUҚ+<Ϩ+ ѪgﲀZ-|b_-v+ξ(8q4\z 5x|hAX7dDad_;Ƴc@#[d@iu P¯݌x oPVeQ_; J0;qPС,4f}ok'>u/OSpxv? p ~R"q{0W&M?b[akd>Z,W $igťr%F5_D剝0sE2j&ŇFakqD2v퉗F A 2 /'q+=as^ux{|ZQuS<"+{;y%x`W&Ei%*i)eܜAn~9B}P0gVڬLD(တ QH. !#Л|47\}E;.~ |ص%ǎwN脇lFθG1rOF?>p!?->X/?z n}v z~2y}֒87NM*TRPk楅_-Ç'"]*2ɯ{QA㭟Ɩ]@*@9nT>x%B5|BGcڭkA+x:ACA,<;VWpȫ]|p_7,m;ws a\-H.1.XgQ^g_ +?mF6}3OSoC Bl)"fubVݳvaj.\:fJe4v]<?BS,F_ xp]*(❛g >Y8a(I§pK\=5< Ga|;'/~8-, 5'MSo>O.V~BFkoCj5⫅V<n܅/5i$]xan8,8m}o;,-kN@gksV⭹Qkor5!`: VvaăDD_snJ;ر#Om3 'mxtT+Ε["knpĉrSIV&[&*T\!<ufO2+Q)=qG\]ѯ_7<>d-0α4f^[GcD>0̼z'vϋ#=QÃ]\aCe86c@L {lɂ(OJDc;Gc7p?nTRy]f~T<<41w`&ahyn 0;6w?=GgAU|*֟Mˆ?ų/߅;)2 f/'XY޸CCu/6-Y[> =}%I-7Pzp/ 58 aR_*xcخ"-ܭ0ij{!=# GO'ZЗvAXX29jU [O t{?lĹt "EM#K<@5Tj88p:iƂϟ#Kq<@WKݼhm ?cѡF`0,U3ӚeKDm wdЪT+>͕\JKʐ["Bii2wveWy/o#7iyQ }EQј`WG[p/iә![R1q+>FB ~咘8]ge̓Q$\%BzBQUR Hմ%gUlJqx1v<^Uv ^oxrdhZnׯ߅KGK> <޸gA&u<gNv>*v#=+`;;;VaAY>ﲆlБ'@cnf2A19>wѵ_VatkćPg%ރ, w#dV>Zh͘kA˷kH0rĬVbe'ޙ9kow{ЩK(,EDM7=qy6PQl1C +v4S<ҴM 9 ]|8 j`nu#mܶ}~ Cv'蟸g8l 26&_^Юo#_riZw;/>2'vm_mG\vQ;ٚ.!xu8t2 KHT YF>/t UxvmPm9<. ?3apK5LI-Bzz2N%raRLJ"(ijS t&!G*o>^6f tS%XØ[ v8E;[;a}678n=CoX9^S~4۪}1ct[$'ة,^߯qi ™3I8z2ooYZ3K 8r"Gu\/:kh1KYϗZ~4~Tx8n:Shm0C"C*'|aSRqmm6>qd׸ycybOԕ`}ú('h{3,h0Zyn~RM*T ~orCzNhܷ`h` zWp{u3wcJn7빛wg/ ϊ;SÖ֨̂2fG9<55<nU%}O}۰u7R 7ґ]808!mx ƍ@UU8}: E'N8BNjdtT56u:A}gHdluWaim&mVX3f[lnYÜp$z%ާbb;ޭ#~[_ٌ23G|QTAx3™Xʴ2K_1aYRߎ Wd0BH+8JwCFڢo{#>&עƌ*+ =le*!"1pjIYv:pc(/xj<[k-l?6S:N.CLo O{@0#'Q܊~pbI]x+L 2 eJ;Lb/7T%pPbIxOK:tQhlQW.2\3Yב0`J_'/ OVrGOb &}{Ҹ2+l5,KʘYf{VtEtzkei. >:!WWYe ԻŻ[+J%|(1~Vs=0l5Tcb!U>c d #$X$I˥M$M>b\b2# 1wӿ7zvΫfN;_o?P J6XZdaAza‰݂; d)ʀOh`#Y"a,1_!C2Jo+,H_\3J6Cʐ$e V2\1@[3֩o+ZI䩡RG_ؽ7-%,\9 YbE{ylj޵ WeȻ"y,"7 uN(}4Vk<‹z ,.?X\c2Ƽ[lhws,&( +V `#E ]|><<8vT6Jhi0/q}cEAs정f9lpyu|?@h<,) c)Hו/RtniDsݙ0oq,CRd4p ȋpʩ8"mgdN^1Lᛖ@9ڞa@vx7ellH?h :R:*2Ů B&_Pq[!IJ=vwn=yOd;|J~=3ﲁӂ<;FE^K!ts qq8{"-0/wSΧY˦fK+I+-? mOM*zTLYJ۞/K+0;H H?@ xhM|G}N~X /c{bxA`9vLޣ-mٟڲ0QSpT7>8B23o? -* 9PykOTjRaSPĞ`W+|.Lb)e fav,Kj(GЅ˭ZrQc;KыiT$6>+\Qw6QjdHCQRن4we;0^.SU? ;B ' :e1'o;Q:{VTKʁB[EOaT4\GbKInz1Y`L215f"14:ow%tnH^Hy?p@G TD%e:4QyfbE-"yGtQy$X_ {P3>YPɂqkI0-mDw0*)TxA}El7 *NX$I] ]1+WhkrbZ.L G{O'n䅬!@IDAT^H2ŷ_, ..7F|$b|\}2|3`H-&4i~@\~oz.Eu[{IN8r,#F|' f. fo2aLoCdjo |h ߢ(乫`߬ώ?(wOm|7o_)X^56X)h ?y?UMڼَ+ڋJ1 ^>oɌ_Tt-n 'c>OJQ| .uE2}!aW8b͐Iʭ|_S .&g2mbyUXw:QŷANߗ&u\ċVǻo,Vo'ws1mg%"oQܩWr;&| oBov+ti7*((T4"T%rI9 r2Qdd|.&RqqeQz?xQqcĊ"r]wي9vpY1[KTHU6aږ?!R:I=G({_7[dY_]oHQm(sm'.oy6aՈb^`}a;xne}ۏ>O9ڇ["X&uY^6so [\n/ۘUV9dh$K3ppţW]3 ŋo}9 +ʹZjXݹevP  ^-'6cj~-4Of\Sb w s)3UK㦓Ê* vhxWp!N#?'KėD[2e<ջ\!'bYB:u//C։o#UpqU&vS6I;(Kj!UUP౺r \xhMTr4͝ܮ66K ŽibiydtT?͈oyWԲ9 XۓYOqb _tōgH'^J50O}T@DCa5Tj&rWE'CkFFK_C}65<&TðginQb63l)Q#?5z#_fhZE<+G$-E/h i&2L0$2WSA *Q];U?FK/fjz|S y^a1oQɽBlWUɽ hy`)qdx"F%)'Y UKkn 6tsP+G7tDɮ,+FC4B5s@h$YFG|{]'|f!!hʉð"m2qCXk_ׇx# th߾x7jJ}d?yŸ:u oFi\sUc1K>55hhՊT{ɍVpػM&b (^%i<ˬՊGrK-4$Y?3Y ])n_C`싾m;q,&Jg` ,B#0y ;ZFA#{K}_[E"wG"x(WePϋى?Bw0xZIAh<65?+ct=;PAa.C+1K?`P/&#йhlv?0{ rmnTg`.yؾw7F`[̟6d٠oȱRZDon† \h |W1hRi. C~\8v, p"^ջ`)C 0CSfCf# <կZK-ܸX_-G-y>!z|HaQ. Oc@7<'#ȹh߃G O`׃s]PnK|F%l7o xbv~;^EP,9aޞ%Lo KްS W9p 8K0/Jzcm˒-w@{zÖʡMu GxWTQ|\>)?~X3;̗aF'h,`52bռ%Ӿ'KVXRXeʼ$ڶ_G@kh_ػm)KL_||Oj,VٹcCxT4Y}%شɂǻDZn&nSCOwCp;d1_Z{/= ^\ֶ}ᘷ`4/cKнU;^q[QS oyÎЮp`߽,-%_~\wG^Kw[Z;-F= *Ge]`e~w KjRA%TTP1@-LTVD@JBc9tś;'꒿kq6v"B]`| MpL&.)BmeӖ=/K~#9POG>+ç??j[Ok3;'js[svUea$ډϧPϜRϾ1}9C}W3pƕu1 W}x߱brp? )/L~sd5v* Ycdh׹{a ,Sq1{`Lz,El;+] >T ~B.(7 ey!V*X}8+]F6v =Pj`ᰱGnJ4>|}"Y]{'v3 ΓG-ʉտAiӧP'b_ wM9oqo԰>&:ٛN"ک!څbh5`0j %|b@hŠݯzu âKt< @PHTWIbɔ\c\ ߾/-mX']QPce"?3\ӛz4I (q7@HX ># QTFrPǷ[1ЯWph-*)U,:(fE@'tA'o . wdXlG?:?a+c0umOx@oHͷ9zQsxO#l?mBz2גv'_ܾ]!zض?=j2:7CU,cbɊ6>QA574n+s; o;5! >8tVڎeݦOբSxqX{>n\j.?gS}wKA6xe|h>rO33i#++?|p/9ڥ\V!q4y}#c#c4a-3ʕ+<ͰمP9)2U8r^/1좞o`D^h QU_L?F L G$-;/N.Ғr(*zR)RDY7Yk/a[3,ޛ^ՊUj4I2R [K?† hCdt6oGېV {_&8u*ܞ)]NhreD~q4v0C~c}(3'+}JR:Rڊׂ^gj[ɡs')PRT-⍧F˜| ߆H>oYis>*+֢yZ8RɝʤdA07mPVin=3cm谗znԯZ%$ Onn.𚝝˗/ŋ4ABB<Ґ|C,)Q)2I˯O~c)@>A '>ٸ\ffN"˧ ~ {;K%: T=WO۰0nӹ5kDSf2+pVAR! 5][;+YhS5+r^ٗӰ r8Q" !R] +;;(%[;yB:'_ƶ]>< 9IgX:ZB*gB$%-ƅz4 yF`ƴI(CȦZ-"SpX(L\4Sa)3,\G6WؾɨxޑR~Rݘ$a{hG{lQfJf%zB|~fo ~[9Eh䖉O>mIWhn,L,`Dht&bf~@S Ls "ZM0+V>iiػo/:K/!J4"Vr u-x<71u*>Qч!?˶}Jo_@A%)R 'м|nw"[I8]GN##5(1oml O/9A=w4m77T"emY`c~HD/k (9KRXyđ7{rU@ Sϐ>EvzgG`Ҕ⭀e(DӳFOQ^Fb.ň|n|LP9,D㝸Z3ubgD=USjx])P\f/OŶ]p0Xi6-iM~ o-n-x!p(ŕ87TF3.jhץY(McwEo`D(#綾Hi`-k*mlI#;o'#P8=9Fn"s*+G ~xgkaӰD'OW)]ock7G#jz~" ߟtC&!XNX,7Fa!Ly4}z/Nò9~I(7U!38O0m)4"EB{i?~Ǚ ?H 7w< 3SX}2dʈ@*pKZ-oLPWMm#z48ͱ[8ps%W }*O |<'bǁ#Pej:.pus"} 7Qޔ=4Fj%(Wv*)7/~+9Ux[xY"ݠ &o R ޚE]l[ZVe\ŀ0A }JJxSB)yib9~13R ٤{R'/66}YS^gqr>;@6$@ۺ|l=xuZ?cPd/QlNat @8dow'AFZӏ깢J= Fp0Ŕ#O6R@r٬_VHW¤ceshԕͬZ< >w/`JpB6q# ӃR-wxo3zG%{{CrQٴ Y'#f,o6؏* G~n0wqF300X &V(}.eŰ2x8CycƊ#HA!MqETR3!$[!"l^36߄gOr奏@4j׈+6o8y:qHɓ`yak[]Qf hikr *79: WӲyasiG>6/%dXz( QfU<КJaq ~8y.ۊ%-o+-<{ ?Ƣ{s|;}"lzyqcOF ݛu~׮E#[`5?5.p@o_U?k~g<+:]͛7*jT\s pE/w} jnjCÊc&*41C!l"I+(7},z<v;V1,X{5䗻 Iu&&_5|*βL_oSPi5RGz+joJ~ Y-Ծd1 IJj(Qjx__$Z=t|夌VOJG+CTCj@VUʡҵ.V,%]@GLxt _ܵ^<N\w(8TͿVS:TEn;^:Jߣ5k1aν1mT3{Z_]:ۭ+itXtWP@+8p =A@d>_:SXt(@? u[nzضb;}{zBq%oW/IJQ$GŋO}eF-RMeܧE)E~%{l@TU!9kM2O)6(uq:oEYR:wW]Q)u9y2xifJwu-rJn}RQfMUE4)H3u)PFo FMEaAhRÛiI 4T`]^c俖K ^:g B>:=TSmn$(Ϲ7_)0gF4] ?oi&J#xPI9o]wA O3>w%/iCpȧM  ( hOs.@LH{ʇJ=ʹO pC\t&Bu HqŵkKyQ$L)X^2W#*>Xy,YmJ9vX< 3$&D)|K֦437G(\6nL :7[t7FQУn!}g\!qm2N[rJX[ߧuݹk+BBr:'O[j.ЯjYY!{)hGqǑ#qLt;(pC SD@#ƉVsjo8CԪQ^g_)IC51P'J^Gbҝ@8Ԧ^[$;G/?|{WfZ"nǮe @;q{r]Q!S9rGSV S(bg{ 8e[kU蓒Y>3 N5bĖؔCE:4V|fQI.3z069<,-ck?ZZ:Nb^1%QQXn66[D)s`lӦpwWڨf^{&h۶ /~5m;ߙyH&nUtP`-J$k/QՁsY˿zUfViܚw]VGi\.]Ns%.%*};bbKě߱< MMk*uO]ܻ.'zwߔ[yPMh`"JM}Ir~nu6Y#dWFo6[ .T'etwcM?]BRW]NSYkVڨsi$cV 1#18@iZ"Zb3lV|IkIiHDJKCqՇ1`CGavP~4"u'U;#4Q91k5Ir|mՁ9hɨ3*Njr2㯢p0^RkB_'I>ga ek_Ju}<n!%)) qB=G$]x}%IkFn9nފ^Ёbd!.d}غZ+n" Ix0TjyE3˫^RT$)הTV{(˾5sc`99!I:F-Z6?o5Tg3<Ҧ!#](}ef- j|itMz]4|-#/ NNeJ;BW|HEʸxS" U$vb^*]ZJe,R'Ly$SER\M-oΧIJ)%R3pes0Ǚ! rUiwHK=Fw20O¢Z5r"evf^?siJ>뗻;B"B"d Q\RJb (-Qc${D܅x$;->i 2\[dI*tyșBid[ a a#P+mFF4g0"!;93ge]Wt_wWmѱy:T"s$=g @{{? 57nKܱ'Nݭ1;9shG3|1ui6Ut?͂w 8iP_r9G|50 i/$LEJ;ƌ&b bIӯXi`cdo_{ k_/͒@#珧# 񈾘ʈp󄇇= %*Rqx tfmeDw:ϧĴqYp2EtrO;,诸|%A14J%vUɇ,)Ix29eqwo]7_i$$ߛ Fpb԰ /\+ig5[<)=Of:&OM¥1+83/% |90Uu/JU>9RYsu;<#hPEK9ue"SN`SDNΤ˳ld"1& 4KDF$5˔ agB;z…F//Gؘ+\_#e&Szo*n?)$5+n&82#ڠ'`f ^s/y(/XS슝Coo̯W] Me'hnK<4jc$ok=F0c-w!?k&vnNGqdv?X@X0xwjL'z~9JLdEKx/ gJ) vâ>L#J \}[<$4/wl⒵ G=qpp|,X #FG¬A3iF]23;<+=0G\z+9czX0S)QJ y8 쇍 Κr=}/Z eHuOq8-,[ |Z74֑ư|#lᣇGIҸH%u5]̚m9T!PdwDxOWw;cqlyɯI1=!(v+:SQ uwW~tm?/Ue }\yE "Ro7TQQ?F`qr#vu'@S5ʪ/B*-)k*a*;dD}JFq+ +~14o-#R7 $cd j(̐뺤]DyA*SW(ȭb]^Ű,+8Ä:fxHڻn@xXo}ۢAlP1VႚY w5 -$̡sfKG FA$H֮vf0*$L&-˝K{|du>|ҸҖh42GR|<OÈ6 ߷ϟ-G'7ȽB֮X$O|1046rŖ-zLWK"3GCaS.\D( W!|Uzv&wm1;[J sDo߆A$ ?ԅ_Ζۢ^'`.aX>±};1wC)LQTZdԃYCN\sf.tkF#4 TE@!Ehtf&\\_"c'  VAK_[*Y'ƵHn^Eȧ:?۴" E Ʀ"|QP*Y"D L-vf0vFx 0#XˀA$uIG;A9d,F\{L݃Q:v@Hkl[s )]0| C"{H9r hKv.-R)c1jò(i2S3QL r# \׬" /Xc2,xo?zƆ7)AzBZ-톆K4LM:1]0ޚAQK*P=a>r,/ů}MMѦSOT(WIBwJ?}IL^v@P* JݲDɂBc幖I<+y2΢D6F^-`619JMLK]Ŋ6WT a"USO(p%emZf$ ;ϫ#\}(p)PN\!ڍEgbK.8p`^r6TU9WԒ(l Lذ6UA = Wш(sH.0Ű%>_נ(@@\\{CW˪;U = 2qb204}x}h{,8fU | ph2{?_J!{%ĆEfΓT@L)K )+7&9QqO~U2K'C}dPBRg莈y0&H G~&hGAS<72SޯaײC?xH919Z^y8/M1HRzF)xbbnU 4 Ml~N$˖ X=B"پ%^ 1iF7dRobhM #Sq9NZ{" (UG:b%_BSC)xfs[QRTBwTjDk8r"CFgꅍX!%2Ak! r)5 N{ Fep)V ҸS-N\@$8 DrјK\ne"bb/! cCVcE[B[آoShEX[x:4JuId+EQÐ2rfE[t f[(3nhcY\3 BW iøWzHJOD2 P31w8\,qbggk@+.u'%3~ƾ, Ƅ8{CQP#U їٿHWf(pW\]bߐ L =0dH#|1| D~;xSJ}DEPFJ" ~Nش{ ~ @yaaaZ&*E:hbmM[ ?>#a~EƢ*i!0-#xb|#z(N]"练ZqX! GiI ܃m!@okJ5 ҰYS9y}ףDT4CD~m=p~nI1Ԉ0X;:֢d")uТ=~n>6囑Vui<_6cϩ OT0JK?[#lvȂ ÂnX&e|@МQ -pz:̳k qvPtqu6[ΩCƺ'gc/P/EN>xkz{:#[wROT1X[Iy=m:ʳbEjii{Tg֪Npu|hKOf4 p|wڟۤ~.(p)PyH (pD}  97tq50׻v4z;!Gio) a_5g:.-%*e]QyZwJqm5ޮsW\^m#1̦m_-غ rNlɈzYX ݂y} Bl1X.Vw@IDAT؊gRаo? {=D!"' CTLkCo'p ~qΜ{H"$ ܽh'3[2uÀ1bnxu5v9˗YIh'ЦNakplՌ(kxcu gUX8FBwƣmlXD%vmc]WPqsB}|e b y cVPSN+NE*u;NaյL%qXCVUJ 1Z jR.^1d9tl;TJotׇ;aO6;,aApK>|MZ,wG9wjo.c;N!.^x+ `ffHdmlLann$? x壁-J$juĶEue%$d5BܕIGD7(-B~Pj\f:w$,]5q$%Tv S>P(0+O\//+;I]: hSƏ'/ ޻K)9ߤ.By|{%Mpi0RI!9K\j@$85_3)ә{jPO~RF-ROb:TS@uۚ>I/FuSKT-}IYME{sEvڕߚf$rAM :Fue4r VFGӎ'5N8sړ#KjJkYh~vҎ}雺f5n9խi<#ɫ5s])#k"u$%ǧOlHV)L$4w__}F_) 6?:TeQHY]H~;::9c}Y[r0_ !1c]aF]TɱxOJ\Yp(H7oGE@/ نmq8{` Fi"eA0W[e#H90 O);tFQs)a¦©eؼy#!\@4$R@WvM9C Z55T~55l[s}걄դ#"T=3͸$SerRvnOU/y Ry˫kVzwZ5;2K lVYW#m]v2>Ϋ}M#r휫w;Nr+j!]uuIG;BJ_l Fa*^zq!p}l%^ԑ`ŸSA 5h2Q[^b#V9}ը2NQ9\=\nWc/#5(@0@3ގ߹~׀B?}jEVzLvǷOB7;HJPSޑ!Px;^A32o&"i|u7,O 乒4u@6rSE7hVTGLdZ.p,nz>t[( -J!ҵ?$&^ok]t {E'z]/hR^^:M05ǁ AuP@)%Upr,U%B kTRF$H19׏^s:_I9~뒎whBh2)6nifJ:YҴ)taHڨ*e V&s0ym`,~+R)݄'Y5%j%y+xʸ^*e^ 2SSIK$/ tc^˭I&C2}Rw!KJBj&SP4qhu1ޑb}N'tפK: (w )8?CNU=h`'^bri6<> cМZo4:u.#&i)[jކS׫}]WS>Hb9QpFsX ҴkXSo438AO&hZVS[^; 8+{g$!f,a'r*Ji-NMV$GWnĎKWsec;. yt'$Ibeع7;./lل[s+ KǦ+Xy%~oVb׫nv<D»we,^v[Z5: (p#jd2z;F[ h#[R^Ey韋CL(]*gRYѯߠ8W-32W )8"㒗)ʊQTD'\&&0fx+7yO~C)AN>êӇ!RǾ1xQ>=4"H!U.݈QW_@77=26ƣaY r )ւHSbQ蜧DKgd܌v+\) w)Ԑ?[w*Z[QGLOg!J ErJ)e1iWD8663nF6 +,[Ӫǭ<=FgbXG9W[^J*TD4渥^ICAWBqA.&2xP'ys&3)‘fY [7Iqո  b:pt7f= ܴ!:6b.}^WEq ]"41 Nm]֔Ѕ K{,C-cbpT B4wvݍ4+7OLgN&r@T9k~9s\\LpƓSb {">Qsx+08Ás9d-,@'lbq/OaoWlXOY(诿XhB\. pdfXw ׎uHGXcظh^͂#wnS|r,$4o7&V>-F'O`[Hu$1D4{uLq>s(o}w,ڌ/R u"}ߖp쮝X9{ ,P7EPEGpNKfCp:Vz^ BF8ZZEon,4Wf" mg$64&ɋ=fmó/wtRW5 aJ0/aҔn\!') |'΂g{b dWv~<]3qdVƤ: ܃ v V+{{{=HvvY-ƫSzX{_O?F~\lW\1/⧏RR9Q}u;;r p b{`bo`jaB]a~,6dOGLJi_c1 pQY^__ cx9a `,݆O]2go4lm|9nG{M) klm_.Ī(3d9' ^<6hW$g"8|̜yܶ_q24i|$SwsIu%Iık( tl9up4b\<,k F,GRvG_7%,KWˬ'rp9Rg%[0vB^ l &qԆH;KG`GԘD"I[#Fb8f|€ƅ O_MyU>ED%UJSlm捇@K8"-֔?9xJ1ys8*Y>KzDEԻ/C ]QH>(ܘ`L-v)I j8$ZGT+ol]CŽ :jR6Udj/ޠ5u8ls"+u49!]j3KxUR&Tl1 pbzdbpm{4q.,_ÛvL7N:WGMBJk/v Z o?(%n0ehHU.VquH7Cnj p"5Χ~uH>谄,FӕԪ U;RCwj(~uͩ}~]{􎨸$.gd 5?|WWtme {318rܭ':[@e s8Yq7Rr;Fwk@.A{afiɐzHM6cш8qN{!̈́JBUP~$($赶4@)X SH%ػ`).OJ=im_|* m {FF pg/E%BCeo~J2,}nNBb,dHW|L?z9?{P8س$}ee`HdZkG$澿#l굟pAXNjipw˦x~֧aQș/ o{7a!hfL_B!xWEofwKRx1̫ԃ0Fnz.L`2>~p +k'AMѳ-.ށ~-~yt} ~cEɥM11N+̉%ӿAqM 7D_\aCX5 |`A5"e4C1+z&HմRh0 +]縕fQZ\;3RV.m >,I>(j9LбrHDq\1Zq{vc*6P!Uclߴ; DcY10th'.B},#nI[U:̵,FǗþ8L iGlR9^3FG?%#~]M5zF5䜢{R>W1 .ߎ# #!%dfbˈNsZH'AJK5N[3%m|tPA.&=eoͰN$ڣN 1|9^-#"‹@KFhܒ% qvYsAw=ץp-_PI)@YZD@0XGH6)|1QBC x?GR& \KllaxI' šW) 6?FEH\ Spڴ%l~}Nk3D؍z,\6կO]` PCFq[㷷Uh_n>XvV#T=s[q-eeL:#; <K6.z4_b܋xscV'ۓ&-='aD4_ȕ ѐ* =(/mj:=[xϥsL*D.]YbSiᏯ ;N[äAZoլ,p#4Q]Wqȶ;7{4{;7a sWä>d!"vLYtuG!J*G54xTNrdFj#4_?W}xCϙz.I㰏fId>svHƎv*!<+/3=Cx񂥝 ٚ|z} uL`Ñoѥ=ڌObw,5}by˜Riq~ң)',׀֔fo]Z+'^F3J=X14Yc` 4ƥsu쵘ȪҦl+b2>q0SlM'0{yYTT0ް5餒*ώ1+ m~ #R^N7uH+u-BttAoFCOϚps PP NU8T0n*C_?d7m)˚[7F``]<֘ $CXjW[SnÜ(Ј_ 4>:s cs `L_1ʂ@Uh[gסtVO֖ڧ(7wŸFHc.&>tDrBm`OpP4),ۢc}6W00_!GW 1`ؚ$1^XKtMVoLNk:gРo4 Tp"9[(4w1QgHj]g# .G!i:;6,,Isz%WBݔbUc^T@,0䓯,'02PXmƏE"n?dKي/BY:$[*Z3?`K:燬r)@TzZۿ>Òzk7u: ܯHz~% QBZb*}2P"+)j:-QCvuSpݵ(->^B1Ou)-,h$֔ P")tZ .T/IRX3x_t7RC9%64]gOG֣^.8~34EH=%uJFh\"! nȡ2 vJv12~ybn w%J-fMyB*8y2Uiika$OLIzƶh$5.z=8]V tRgY_׍ wӥKH&펽Shɟ|ɡQ~EI! '%]!DZe̜4A RjHhˤ$ 'x!FX؋}?xm1`B5d)aʚwX[r>״gST=ׂ}J⊪zЩʵsDǢyԗ5'[j_(.]J )rfi@s4#=kLjGkRP֓!]W7JdENq֖f7]!V=_re*^S'۫HGX̔ÛyCD-!ۖ.a>9}]L>ڄUN+Gߔ^ 5dyo8A &2Zj2e zT>V4˧.a8(}m7#󦛻.ܗypw?/Y!k˟>}3TO 0jlI6 :Rņ;lXPl&UB%Md!Ā Ny{w'*RK3];ZQg%غm?~[Z0Vo 7ɭV%26sܨͭ-OяH# tbOޢKѦM̟?gƔg~>i#EXs+srOestE+[7hc[4`Ri8Qm']Z1# Dl< HMB^Z&B;@3xx\KIloZbWհz{t}IwnLܘWW-2a|,b"n8LXXLc ;Wx@)Klʫph6|8n݊%`X5u5,Ut"@`vвWQ̈́<- .JDjRAQ<?m޴|d+Dů(('o.rsSS8CJˁ|ѯ[ n?;V$ߛ\])\{`=WVe.b )]qNgOg_Eu-P -=>7 ~ Bd›Ϋg}b~BUq"^֧B )yZU-6X"U,aY,[ li !ƕ(B`&:?)ۆlcZ\O-_#uTB`!,&c2JT 1]; \5T={T>9 c\ڹc'ك ?lBaq! t-/?pw0~E+Mu$% LZםdb |VЊwqA rRlj/ s3 p3rPVP#w% B#4 |e}tWp[뮦•{IXɄ gIѺ28$*,0ǩ8#ρv%XA8 _WaN|ǥ#1!|F]R8"Bi+JK<c0df )-}!\sj*l* #7GkN dd!h66 bŚ}(_&3obNZ‹4*[tJb8ٜÙx>}͚ Dn(_eYʾOWߓ>}o4pP}?mcGGG+;%t4(xa:tHKMCv\6-XŒeV:3jcHf@; <;N, v5dP ɒH@{ yt1' &ry5K4̗S;lR?'NaGw"<93 A.(cǠI7#7 xNp1_#\:ebP~[ #'&.Z ^~n hΉ50+%N^ F8(܌,k#NFW.^]>f؛Xx]ɺ|&2a<犾[>Co=RV&HBi9C?P¥[:;[{O{.NƋ Dhvj~(?tH;#bJ//>ps 'n%x6^=gÌ{`ͼC[}~*f?hEƝ`2lE CO+dұwKm⏌N1~c`Hc_) 8W/)o4w©#rr (͢|'klrпR$$disMyW^9%p) iQl7.ʿ9G kFj \k<%P b_s,{xƕL  /&c͚e5͚bj sٿnMف^ȭAQӷqS WrС㇜D_Nul10K6a= UZKlXYْ睤kL0LP2ls.dafحpWEJN:5#%o;3b6ddD9P±1I$l H٭kDsfj@0k ͛^54q|v:>8P%2foF4[ 4 Ə?<ؼ)khtTL}HNN1hɩ&vɽ9Se_#z򮾤{|ȽX%j@cʥ9c2JBF'mT57=|ec>|7ԫ8VH7= UԐ##2‚vՌ~.lE-m} bjh"E*@oȫ-X#QZZ"<aa4CP9[-u Đf?~EAf*@@&%p$@em|a}iqE WWߝUh?Rh'ݴ;vgۻ7q)O%}-9E+)}c!0uC59&2g@?_K,\%H1+wD"!;#d"v٣KYҿzk"Ev:Ns[<~5ft5VjB(%M3,;ϻ3VKYAxE{Rr-#䷆kjj@ tB}HIr%%p%!'쌩%@dffiݻҔZj[5Ԥ hJZ06}˂O& ui_mb:eU3 iGr[ 'o|0a=t?mx  an GS3*UIlL%/ gRg!ԙTEg_ٗڅ <5_ϻrW%%ƛ" !Ʃq4q%i ,Pۈ;\AhT 4%g"wjwSS +D ]-V [[XȹVg@-C [3H|C !~jY5/a u0I^PGiV`zۘQ [|1=ɔA[\([î S;#Ƚan!y 6 r?:Azi g0Fzgp={f# 2>BWv)?u캸 U3J`Bqe^7W E| h-U>Ť.l$Dj:Ag; [B֎bv(4N@qEu#r2+5F/% 9ayIe^K -93j1n8¶R9^h W3AՔU[ !)I$mb6hįsEO#jtK#!rKAYJ9Wz):xbUDoֱHOwq90Feėս_E+ɧ  HIAzZӐDP{t.22 X Z s-[Agm^> 5o ?۠=W+fW&j)))ؼy3vE[>0@+Uf PL~.ݕm_P\t" Hko<>$Cµn㏎CC4\>۲?c'UbWV F\& 'Qs 4TZ6$PScN.k6pnCފ EE8s@ t0?3Cb}krt3"3 >8y81Dv$šB}h'? vyP﹭I𛂓N#I&R#\WGWih޼95j;HDCSRӕAK-x^Hٳg駟A&-?`q0o+"c& F,*w &~<>8S7#ۈ*H}<7`P-M]VoCT†\HVZ@ IW@{ˠڟ2Nc8b~̨"; ߑ+\qVZXSԌe \S,k}Jc8_ i3g':ͻ7`x A*.* Z5䷥>efƕLiv8aC^srsi>n#բB0d |׭EץJӨɽTI5vލ~!**&}OOOj3@ŌUJ@|b6˖= w #(x3i'K1߱fsh2_/VWoqQ=),5t*Ē1X4+P poӤ~ԎD4j%^y`O QgSfԐ+a,|&}<4,;?Y©S9X`~I'r&-4CnwN;a<"4h Uupyޅ.67\Sj}~r:GU!\]NVTfb'8l:ՆSew)rwcI 9Ղ6Eb0gQ}?|8wQAz/e^Y V|4jI m"l9U]2:D[ Vq~t옹wks*Wj,m,H7pt.T/)I&14GX> F;c=y!Q3Q#[5 vOQhבL\mZH7PUE v9@ɀjA޸oD`۱jj1}9ǰhڱ1<}qC7OW*'eSj RRQpΖԡFOv܌IJEj '2pll?}I)*i35KK̜RL&Ep$Bd|;OߌYSWp>!qoL|>yb[3 Dz>y'1x8E854KQQA=P8m/\ Ǧ3,Jʑ"%µ*N&Jr9ԶB:)) гVÕLjk&@IDAT:2 J98']srx?L"fM^V{×6ҳ2P2ad{p }o"sǢw;gq,_i35Y 醹ԑYV+CM4Hۻ +7=_K,;BD߁-9Yb}Y6q},0.CoFB'NY0%F!ҕԴm94 |՗ꁋ5܋!2`1%%UqYbˍu {48X*]%Ú^!Tڢ{bE 6.^9zD`Dh+mhj&ᒵ%,?C:_arjIjH?D|Zs: i `\*/qCkYC]p"9: C@ 'p(. BfHڟ ;F˱1Fo\8cGѴk 7[X ›  [|éI-i Q P)z« 54e ?4EA9`FͿ͍L{-,?c`p/L$w&+gĂ)'i| vFM@{1Zvt!&g2M&*+PRf @_D> 1;2Ф{$^߇h9s $n݇ )$Ey9Bmh,#Ep'W8Xk`N*^[l8f  /u.CCw%%Pփxoɭ}A|9r/ILƓ0J(TsstN^ţuŮ]Ì-R,-mLwhm8T$g{98@CSU,0R0x.KYҷpBر# :Se9Q=x#ci"}ZU9Xb߶xH=l܆m,?-o33.h8bfkoS΍23؈~CUKI~or%5Iih=1~Й.2s>ºbr8@Nt_ıՋrLi8eW,f;>|i EGwcۺ䤘b⭸e0}J"f9^.VH;rVͺ8 i\iv<"lͩm 'o [1Qj[SƟB'0zG`P|MZe`Co  ` *Q|AzV@l|DX٪FSae+cn4e9 vRǦ`L=k={T{ڌl$3Y ޜd%Ŷ^*Q-ldbkN~^>;UXQ7Ь:u `+ z5?=+czŴGC$E6 Zw{IehI1IY{I\$qM  M<ʱzVEgad{&ie:mӏbx4 mv@,CyEY9"0oħJ-NnZ9#_Kdѩ,,Xc&D|&n~Qxdudj;L45sXcGwÚ4a|#HXa3ENFz\8iCbQj[C)6qEay8'}~x =;c_p!5,kdxpzor_F+2"$qǿ1]RjLz`s,N i^7!#c:g9+6VM0H邧rWtA|v>Em~CH(dRg _W3rWѬFvӗ3q"/ϗBItޙc{ D"b,ϻ[V=m:/B\}Zg`($砪E!:ԱO D#]1vK ctrj(Wr.oUr]DrD%s1 (WVY/8>$".x[㚋WGWKs˼:o^+ud WM@>wMf8̞ vZRz@[:!EiƜ29PWu[A6o |=|3Ns!zG lTSB#)Җszu=Okj_4+} /_QgS]PقEG/r /P*һGF"~yjY!}aFiHmu-S82SWݮeEe0hfjχJ]tuwmDZjAN&6D~8=ܽaILYNT?Umե2o`բH==Q !NA-`4^ GO7'VEa2V̍Eㇽ2wjs-=[7cs?Ǥ=Kr)p O6r84vS%.u>axq`!zSzFཟn#Q#(- "[Q grE(Iɐ^bAR86SY,ns-H>AݢeOGA-B!8n6pщBbFmSjaAsPvM'̏j98H 9VҩN1$0O "on"v݄,D?9rzEj;klyrYZS@8ڙbB' &h% [߁co 1efi~M{ÓÊHސn%m >}? AvS ΜR ջ{I9S(Ԕϯ !⽸ZWg(*n * Z0JYa6nZ["/Ѓ"}tȑ'UZd_us帩cpm<= oe 3P\.[ɛe]܋헜k^cA;`:.ٰB@nxOk09g+v!C}eYGF+u+א1Q,^< ? CƊC" Q GgjjqC]Ε~3UqX ׾V~ PBQGmlLF \$/ i.@@,_Kzm\zeӗ}h?\$S~5vd)( mzb;3`AǰKeјưL쐗 2hPkWgiÐ/gQz[.CCC*I"5N3鄦wocʡ(atHMeV-?{hڪJFp)vc<|W?tXf? nc*9 6&5ڎ){7`>`h`kd~=|`QZ *Yq 50~%qgزtr9Z c#N[ NۦQ:x.egMG[T+tΚXv4!73t:` Y G9*ki_%Ѫ](0u3(HQ;\a U bY[t%yȥI \GK s=l܃c;"lG<mY̥{ 0=!0>JE{-I $ N2zGo`c9sEzC%H;ip79pvMLd2gT✰@t4dYF;Mٺ^#ȽDA]o 1)Og#7u [m)E|?7DxRe9ьvaZr-//UoLhpz=j1daŸQ2/=90楷:/gTVF:M/LͨE1e^3@)K+qZI1z9rJKJ"r֣f2xN&@OrR믨8VPj2Lv%ї0'f/h%,ƌrujWA&欟b˂(w?4 mD,L}q|d;x?Ǣ%0s":AS{QrliV4g,#Aorn#? 8U M~eNyS^AGRee)ee:mf.$r/NSzHQV&??y?^̉ۘ.$ /:[vCwbÆ,[b/03Do gGطžje?bbAwwƪ?thhq5IJ,#L)E< +HuلK5l`czcu\τ#iNn>tY&dG5pJG mVz=9-yO!C~{0sTK:i-QhAvzҬIi?}1 N@TAQ|G |tx O&#%nl`T ۆQEbXhy%>-ѳ_l>A8y &<2~Zĭ?6mr>ag~'2i{sCqlxlV"f!txQ~_#~'N[:iV 0>Gs:-YHrBӥ ,^-L2-凗᳷fa@o;ؓnYJ4VuYDOь[1uFNO܃E?ŗ'uܦx37֎,q:-E0ޣ`n;*yKRF>H__;MC7㣗rPGi3gc0w;"?!_>=6yOVf{&B>1OECkwmJV˿zM1]9ח$Ĵi=z9Q}6y.MS![>ۯ #sDܞM}"lݾՑ`ؿWĦ!8W,Î]{Q?3BhڢEdc!M|]qNz]-;v %ىDJ`>+9iKn$%9qzN&c?`Fg2۷$-wt*gH`ش}NΤz= ϽX#?ہf ?Gz?و5Q{vRf1@Y+|fOǑ2;ypN0`%jw &}0}4ꐇsE{,vQ{)u+N;?z & H.ǐJ?bݟʱZb|JS{ Z vnc9xBhVVkW[6!=d' :wjYAmEz$M99?ƦD4, 2K^4 'C[ɉ)/AL#(ۅ[yPkeJOu+kq.N)v|bfC_@ڣv3H3KkxG? C 5s!4=B]yɅx#0 ^~v֢-#EfAhBda`˜"ڄř ){"k;An}"@ÇK!h:倊 ;D3CjOڪsm%$Å]1GȯKA-O;طrz& }Ƀ&0z= x#o|nSC b.rV:mxsh5= XAA` ` _{س} 1݁{:$ҁo].u rg?/c6'm\o?1_̇_0=cgqq<8a+>WޙNhşݘ2So{=`kg]&6P y4L?4l:gĝ7C_6z2Y2:(s|2iz{1V ZOEt%ʷOV&iS N|[h;;[.g|ezBRxN;?r|3-bM*& v19_UHrYZKV$*e* eCjYQ_uSA4mK\E>'?A~V5S[c8OF:U^jک׊kPCw{~RHu~>jRS3s wu.y* ~撖s m5\oVH;ԧHQ(EgPJ$O#>ed]KۍF>Sz&&[P#" 3qy)MmBD#n%@D?>6&~i~XLnJqfǝNǷSfw߅CQ9PQ/LMfh9h&09;,*1v= &?fCeryAϒXDIj%gr\KbF 4/Z[P_8 |0cN8/ɟN7_^48T=;?^X6c8=*|Bd1x6ҳ]RyдY,7L6|7V״ߕWsltX M-ag`Ś`skGa<S-Dth⽢jV1[=~?_CcI"N;0L]Թ^y18ɡav{IB0d@PWH$bV'Wc&㍡obȮ#!+!HTI ZQJ*Ri>"_./|H>1Ñʭ۹kUh^k(gu~ԫ @HΡsZW(~9&ڝ˹v5oʐZ)WEVj} m0\.>z 9+ r8"ٚV[%]_SR򜜫rf~rv2U=_- _>pP/9f(B2]mWSV|aT+`g֕*d >86Bс#4Bp.hAn D8ߎ](me h#He\$j)eCkC+, )-/XI3.9>.y"%bYǐ4 KػK#-K=ѭGGj/H3]wNs Pۻm+#WvΥBSLĞ?'^B6 `t2ेf^c'ԾŘqtkp9-{ /1Co}L7;%{*Ysq8;f8kJz8st`=kG %meƬ5+dLF pK4w2~}'3Lzsl6iy /V솓AF ܸ0`7%:fg$ęB3a50S Cz*;"zzY:߅Z4=8q#bAFL5 k(DbNZG蘱ٴ厳WМNe:ڊqt+7;i?ps)e*Ҫ;E4X,7cD0Xm4WiG$̆K;MGq /u}Bnݩ"ɤ1^0|ȸ;^I4y֤ T<}Δޑ dnZD{fۖ^ ZVL4] (3c _. +G4ȑY,tP)؎~2Wd|( g+35Jƒ<򩄷 kcde\,݊f!vGWq5#,?UV/v.z }]qgIHjݐ}] Vi/s$?wRHs t]cuW>Gr$5O|P˪,Ю{|[t1=:)` oSfh("W |e^XF=Oě\/wy5oT2ЦN:r!"HڣPL@Xtfus$ga ~?7"iIE42$8y0<(Ù<8 ytP.I>Uk4Q]ѹu0%{]Z%W cs/ң>{o<:GЄDud&fމX,^Mb]ȍILZ?Yi'tR5`9 ' \O6p@"ӋKKdH6ו>b8z[Y2>`%pCK@' tOi?O6AH!'$JL ÙSeHZ:C]^DZ hd>o˜gs Ll54C3lϷ萠5<&B pr*F,/KOqdRx20GMW/ v:9Ȑ#IJ; :;Q2Ñ$ISG.>_RWQTq1Uͭ!~jڐvԅqf ?y Gx 2&E̩|8sGT^匧Fg_'$tMhy"*L,܂G3cqq6iZ TKػ ȰKKIs6n{ur.0w@Ϩlc&avsKXHz8!95| |fR\WRq>[:=z[ Y?mxs֐|{_¡S(O7_\{|,Uiس7ctNܷٙy>/폏c£#SO^-`P.Xg:x Gcv_ Tlw:a%Y&$2:O^ a+(wPHA$@Fg;bIj0&C@o9BC=#1o㈢YCr\2!7!ZZ¾oNwV(SժƤH7B.gutq9&n* N`{ӱB)j'fx;|W.Ù| R'6304X[ &7xWS"k毀 nI^ʩz]eIz`bo\F~Os Τ~!g%' :z{,^xm<^|H_x{w9Bg0,uX΁߭}ሹH;7=ٕmR*ns $xea#i,QG&&?9vLE#:ގLGXua^hLw/\LǦuS0I{%t-,`pYٝW蒴Ѷ-i͔k& \ΖT_BjyJ8V\lݫ )Od"2U++e0b|rp,jggZHr~ȿZp:@Pf:˺G>/`(c0) 4:Υs.-CW>ef}.F0r쒀ZXyd9rj?0:]rj)A'WK۽'s@hת5\<@ 1M g45HNςCл8o'?E*O>hkl}7v:]l;bG=5QQjٶ=63Gh[#n\4)4VYHwaLf~ W ~߄a;#Ustc'>RrvFSk$$2%_|C_Gil#D$sJ^Cl\ʫ L<>D.-niCF&Yyq s*eOT l5xj TWщ:v†/1({X5k\̆#PN=9'֒9L7$yM;㾷?w]C ElBv"Ὁ_a{ DTg+m-fHQ=GCMB#4#˕ ,9 8 1v:dneWMLdo%!J؏_>XpA2ͲB0ɄHVZ0P_xX/]^a/~a̞}G E37Ĥd<!L|deم"% u-Y.?! m`'3" G,д_'vYa}2[7(.&QMĽy0ComW af̜~wzҜ5㋞_?[ÇI :j;e=@EY)V~{qj&Y+:_} =9nWmp0m:!aZ̙' Yz FRcCȌ+Cg1$vw#mj(z EjV8~fO#pۈvvq8{Iަ7Rqc:vhDؼK1Tv~7,~}7cFEuukJ] 2hpox e^l_]_E)4T.M1J"M"Jr>CΕŪMF/\!h|}a0~|[?g3RI;Ѳ:t=Hic36WW{ߢأd}|cb^nl?s?f:7_Ac f̘wQ[Knz^wDg/ӧ'+vTPH;!@ n{i,݁ޝ;wʙ3ߜ9T4.m>[6Ӊ"UJڹcpnBzt0h@=Fԥ"E=¯c?Er,-u\O?z#i*xrE8[7 7L1gM%SquS`F7V{#1a*gYJ@\fL_? ^EHčiIUD k%/e)ꘐBFz5G(npv8+Aۑ$|(`)6+40Ue{x=4_Fr&l)/4>i<oݺ?5WQ-ȑyp#ޞ*z MC޵xyg |*\7^D9g]1{*#H[]bpߖǔ'$? ]8߼q Ss& ħCcX'V|bظ9"uX Gg`Gy UkqDWvE iǎ";7=;z9ވ`\C0(v(=QOE3?ןx-:K vC&/| L. (3-\Y7o W& ˍ@8+@jK4t"6T*.^* mN_dٍo>º4$XϠ1oOCDGr0mpu5m[{ozKI~.nKll '0:Fҵ0 ak]FPzХX!x'!˦ y*--Nް >N3|W79>dM3Gjhɩ^5IOkp3>TX b1Rw7+}^pL Vcn:}O>u<>m@aٴᖣ p2G:R_LAD}B?DΥg=FsCE:ˆʆO-Uۯ c/;GMj%^I___LdŘFoi򪮲 ?^Ct dXdt78U[U34D.eفdW‘(ɡE LourļDAW@g#e r:Fx:aػ ,3܎̧Uhd~" FjA%? *[#D"<\2wAgҴT`^t*ܪV|1`H,_ Х[OD҂Oa:3UuxI(8LCu8*WO&Qs='׭2\Jb]_%I9yJIӌr 8AT[x~D_7752Xcͽs t`gggWWWC^ 37A(4NY}7 "t k@R%LUt,ACXN7_6_LS@N=Q޳ 5Ǝy7,;Jptim߿ZMC]Q5v"'/_CڞQTmķonA 74+*`C<} e*B)ږwZ;,qR9&%`@IDAT]#MՊ\Y-`x~ŬkbBT DHgdݏ#87yvAlH(p+V;mV$<[D x\ WZv,z;'6mJ@z/?% .˾ٌZ]2뵪JKBsl,z>p  K,I>m~BzV1|2hfV ܐ-mh!շ-3dT,;{XޱAЏ dZů zQNWddӡ۵U"k1<1X"&#b֭4PE**]@nrqqСCǟb˖۷7wy3N-SŢ\3ZZ1bn嶭X\} y_Z7%Tyz>pqC }ՃH]>luny0!Eύ_y3>現{o\4i_xD60gdS7̊eؐc oQŚ|VPB%f"n{z>z#ؼ^AE3mWf@H3|#\;Np@4s4e:lR/[AyB~Q!$ 41@HzFKSUxն[лwhaҖt2{mTݖj}v1c{`_7 ɓ/D :_,܌y_(YkvOX<<rQ)JzMTh_Z>J~tD HDyž uSNx$g+K6?5MͅǘfeXh9NT. I$wRw7,-nGKN3U:kh/0>cyM,hK.</QlVDFj"9۰:#'}Ƙxύ{.\=SV=cL'.du\s# 9Q;P7G6i޷lZ3'sɒDGj{77v_JB M:Ү*-XU[J+VQ3}xl][*w)Ski F1]%\==ݡt%w^iF nZЛWu%bO:ڑn=OEUqPMej5o XЦ5勭J 耨Fi):UrZ6DdGxfn@hƋ`,!95w+izRgoC_ v зױ6vn2ԾqkamwҜ# TSvdj }Y5kMVWkKqڊrP8wQk'[ңY+l,jU+GՄZ2V1Ӊ7FXOl@ΰ= \ [kQ Sls-MP.Yb s>մKZ1WWACSR+zO8j8hmu13M1'"D~8/tIJߖ17+$< """kP܇{yw=QJy0]GZN))4G"ct6𸶸TlÞk)kW,2}\;iq.(a{S,lʗ ''AaoO|z 9U֦N:rd <廑Y;6*[.vka]C h+[j/S^Oui_lܕL#V!嵾s9RUOJ, YMPLir-O[ HXكVĊN5Z~BI}NG,6e ``KkPV~ʩNqӦ0 ma}(+߄tOTxPPUzAg4/Kx8rb)/݀`[عCrYQfL// :G(O1!X0vЮ;:g%MjQ`̝;~;|Śv-W֪*zwTtE2  D?:44eӖr?@It<;WD]}5ȣĺD>{dVsSҒ2qC)u2]Z/M wڵ+ϟ$ziٯ Еv.[ ~mCN`IWy|c$=DB 11>/\4HU8i4,gT`K;Z$!掸Kٖ$u%m]ʬŒZ fNq˗_,EuZ ֝og+ *肁6C9Ql}1/ZprwrKC toLqu",,bɒE0)ؠ˿m18frD'J-ɭ1 q!iL[4nAt8Hr=Xm)iJDj_'{Ù" BЭQRN#Yr5zgN1җg'Bp5janK7\W!=PN*8"ZڼNOڤ_nZhij}Sl(Р$(Lnݢ[S+^gYy9e//LUƖ%0^LbJ6MIٌYRġC[}Ŧ.6EmƸ|Vgdr F(QׂmK8  fkVķ/¼Y/wR瑎*6{Y^,_Χ:¦6CQZ: ҂B-r6}BWQK:Ph!jV#j,Y 8(ij'R "T i'*6&/lC7_qL!fl_Α#8? &QYP*άym%y?/:wG _o'RQTS2<7@s"o[mhF'7!ft>f&CdEl7`b|};JE:Le7Ykt0tu-#1mhD|D?R% 1aI{Bp t!g(B{(LMši(I,__ڍ \ 2DY545H;Rk/=wT HRТDƙ9;f~5 f(#"m2#~&b+sK_bͱ2wxhrӎPy_WBO<`"^WHK<Ҡz}wQ9?};vb#w"=(cH70W2'b|'d3 ޳a-Rsrruf^&h^ Gf)0dqs14\_xvN̳6x!gfx Uv|7;xefq'HX#zz^gg_S~-^['tW3K%x }9r00~5>U@re12 q;a|۰.y7\E7EXlݪ෢>xC pGm9]E/@Oה<1݃#6 f}CΖem.jh'Q@P̶nr(:S`X Vkb$%jʲ&)ii`a"bGkm U#p,Pf-yH-$gW8ۍGĵz)<Ǒétq"íը*Ác~ݔCyNGק܅kۿbע<5kD Pv.7Dz˸p#ץn/ǎ #Ncwލ`Vcw\z[B Kg4깹9}{戻rs{1f }zr{XOees_<1ým(#lBCmq0g]ÿE 9>0 C€X -X5$ !TlbFUF"!POA2D"<ut}ɏX {PSUT 34}ĬpD3;V )NڹOᕯ}c9vRei>-Qwiu8BmEQ~lqJx"!*ܜc&9r@t2u a׵DFѦmEŽLYpz;jm^)e3v' .6bі/6Y8a]pIH_; lҹ1tb4kD dxjd傡Gⲃg)cs!wͿQ^hs:4s7jͤ0](p9S!'.IE]f\SO.IAKdzUP@Wɢ3Td(/G-O@{8Lj\C lBN;x[L٦$.ztF6R!sZ"k#=ѭ(Ό1só.JT;*0ZsP3ec>M~5k)Gh^rtW8@Y@zъ,7^âZ 165q[yx"͚QaR3 :Kڍcpl_&8IʼnĤw}2~`ܭpd M-H%VuѸc"IT n(/P)SE)7®=.o'ôYiI%l jrA,~w3F=n/A؋UQlp<'x3B\ gXN$[.K7AiCHJ/O;Gl|RxSgb֗G`s?2HZM5R)lNY oHd mCꚣ2Alj79lT6|Ơ@\FD`G,`xNW{OWScAѮKt4& (pf "9Qf)u>C6E nߙ -P& [sŧ'Tw@6P`o&\`F0UQX A@Lޚ 3' x UhYD ꥔cxݮ.mu{G ߴO}gXo .:~|tv޶~*jE'0+삀\hjс8z4 WV4n]᳼pOrSyҫQQjggǓ3Gwv웃F4'azKpj8 !CHy P^u>I܌:L㇗մJ5 % CTooǀ  W_GgEDBcmMQp#qh}"E!GGKL\-1yH,qS݂0w @kckq9]sōFtf"~)6sCMblf l6q$&\/~(ۙyzu*҂-F7z8Ǯ ݈j 4R*w H%Bkz*lT`R-1(Жq:?sPLEG@rQ'@ץ kẃiѧgՍ;qIQơ}zW#2ƴA/-'VZ%Mń_D؞D>T?ɸp=v=|DED{P&!2()N^^Fxf\!@("V\& :Plr"@[VkkXJDI?#`jW2cym]4#cx9os#~{j迈~c> 9AyE5|_Uy`9)HSZ{Lᄚ*'k [Kn{uP6b^fF(gO'L]ZLvt5\X~rO5<=`Dʹ5SJ 1 #9&V7C7­ [[ϢH{$,*@[L~j:aWz =`(Iȉ7LcQ)OocǪ]ݭ7{9sP4'3)(pP`޼yxewPVu>jq&IKw*FA \ @9r*0fDpq啔᭥mJ>~s\m]|W}53gaη? _Ac1Hphҽ3ëܴ́vmt>(,mLWlt>A7:]?ViK\ <a#()S>%AP+H:)uPʺnNҖ3պS_I59WSv~iO: uȆT-h˰ڦ?yPPUj Ur1HJ1\{@q-*g'l?6ޭ}N%F44u 3CB0 V8-&qὝ;^no$L遟~އ%KG?Sa=#۟6EFδpA}3b&RNK F8NS5߭k]|@k!y_Xjm|u$~t%ݩޮSu8]'஥NTn TqxNlےp&$w!N mZIChH"uHi۝M8÷O$\|$ϖ>9UH6~L  ~Wi}EYE 39>œY_'&BBל䫎KbL'd"=9YNA.KhjR,]jͨ1`'#""M9grϖb& tZ pFMhY̨La F(Yׅ%&^HMtQ\Uǒs++<^j_"ej7b b͈q1|2k̐å]>f5 \?JKH Soo:ݴa$|a duT|rp;?X9n!*x-h7ڀuss@~7#-CZj!2JY.RSӰm>TPYN],<%哵tb`M Zb`xڦ!T,q#)Q@ƉhamE^Vwԋ=`Z›™ح n)TAg0Um*KrZaokoo+C nD1n/T0 EYS& \l } 䞉|݆n:]Yixiue̍.&C*nhf O6UZjN[Ix/áZ C^1h.e !ݓݦkƄVA# є誫0k؁RQՅy"pCx{NzrpE8ݼ([%\w?~eOAN2_R pC2ˮڏSo6\-߿F 2 <>N5<⫤̈|%&&t (C [RrS Y'sDkDѓЭ ^ot7/{$5{ eMnjhNj5zThԼ*4PFAӊ5A]:9ZÓ\U//'".Η. ⓖX@|.Mcb.8]?2kKR" "1p@P'anGS!a=Xrȧ >)p/nb{'XI#WGETݓ*AN= t %7k{D"D.6/J.!{`/ 6b*FEE-M9=U!C|L_,O0=\x7ϩ~0e/p}"+m3l({VEI;QR4}! f"k@$n\," Q?N/h8VlxA3WAt?u+ޯHܸ6$M7g4N 7޶;`EyH37><\:S1SNSDNLDCѨӦp(pY\IUVaonzX,A8DjzE5É_EE#?d;dSZ;:;rAɃh~8= i!v: k#"b$ëסb43# z@TAEjF8T[omHo]͡|{K VSģRV3ZK\53g于yg |$6Y,CJ z^) 6Gmכ#2SM= Lgff;QJXv?&Deשo pT+ؐ1_~U9epΕl&zZB@GF4+Vśfc#S0^qnCp"p:v.ߍ=xy >_ޞC児`I Hl& _OAMqh{Qpnߊٳgc ´ǯG|8^g@X2Ԝw?_L_&n[` suD11pLW#51 `+q?Cnx_ЧO5Yy$N(\Qp p~3]`4[Bj9DYb舢/CUN*UûJ( |-\E{U<&囲"\H;NwŘרnPjJٵ|DC@p_ɵg,%+"ܭ+F=XB`F4úy{ }b>NM̀o@39OCtB&7ꑞmv`D|}"ܝh}{b((Jb@ԃNLW?8 GC;]-/[w^r-RݖsrgК8n*nCzf(Dؙqf|e'x㍩ t#E͚Z@%NI:oGZӇG`JpZEOKDjriS^W3_Y|k)e3_ETY(ʙ]< 7SH%]Z[hxAQ`NYxzlihURJ(QW }1큖֡bGb)Oj?mѱ@3b r"#GRadljQKKJɲzriۊ5DgA~].Z)5ӜtL_V˓%r UXsX̟`&'/vY}$T\̞" Qez?jmތ׿݌H-݄AcQCd֓vhrp'G*)wd5 ^k <0" q ~gUX@K!E'R6S7DU|Es)\ \ w[^EDL¥m]ʙJ1QҒrN䣌 *`YZ wX "rwS=JI6pp4& 59x'UzXs* wuTWoi?"3 WAaj| 7$2hưLCu(ƣyzoJQHy`rs#lNR% ,ߎ s{d tSW噯ĿM!m穀Ìz8/=C;e64|6ŰaaOww;Ey͊\uUXLq& [ہE1XU0@IDAT6ŝ? \ ĉ x{'Gc9Jc|m,_q@f&@ 1))pzˏ:Wq3>z=ep"ʮ'\zruzdgmOf @*OXSiƒ~vvMn\@XP;Dq"e< \[BZw 3R!0̃O{_C<ɞDLM@7Nj wWGǏFB퉮((Cx#`n TlOI:wYq*H ZWZˍFXjIqJƪUP^PhW`!`>t="P|Aƾ-AƯ z`GqN"/+Vص(Zs h_*qUx-ɩWީMW& \m-@@:(<_3r_G걫go}% ߆\~NКF "Q+G\쐖R8)#+% 4T[YEY:r_=q Õ\"Ӑ; b;{YMEB:P̅ʮ%hőlK_GV"ۊw*"4:<8ji;o['WnZ SQMr-g=%sH QR]1ޜؾ_]y;O$&|ݥ1۾b77GKkt; +Z19Xo[w;ܾ!+#G{nl] 'R2aiDzt(:V:n&/` pQf#0{,zCʤj$e Xz)^ D kv,x5 "hH@S,0M숦Ŏ . @MԼp pnh S^f#xPl YѦ8ZRk^};r:z5U6NKL%HMʳ-iǚ@Z!]SNSiS2B3mQn4yR <<6MnJvyD-UͣU8) 6aqjTh<(ji/8n w[g/~ =u͢- ;J C w  (GQ^觓S{<%-HXSʑ>ʝP_DFy[sĨ9vs*S ZP[û#2$`1wJ:bx^l$(ېWJ"#u+?^֤ŤH L{&1ViUvPF'bpuu8v-߅t@q\_:|4 {!= om(2=M&Qmjʗ+FEW*r 7h@SZEiEe-9z:ԖD财Ey&GږjTZUSƱ V|Z½Qi-JQ%EUpsbE!KaxO.P~:>~t}.LUz8JIͺ \;F7Y"ZA^rgEW5+fQ9ҐMd{)Wkid.'yM9Z@; 펩N&0?+\cqd1G!j$~ %WAE.-S|@ӑKEᖻcš݅oq+Mn_3JjnGUu&lF*E#9BQY0t$daGp'ݼK3=ׯ,>A-w OBC>Up E4Y0?/^%y4lј0im_J+ŭݎ(,ra(\7~F֜}'2e82r=nXmu2,\!ҭ%x[d zrgDvvf i[:/ȧ،:jWkAE_'t ?wR,ڋ&ܹ`[6iPpT9U0=?7U&7X'H; WwJ0#7?{F4qdy J<"MMC`i'>d]P8E2ٻ{/{o@EE((*b.(E jBz$^9'7$!xYٝݳ;;;MB~@[l"4٩Cg0k^|62k`"5x"; "^j-Q\BjBiMz!ˢݓ d==c[)'g? Ⱦ^`K?Hˆic`w {8^{ľxQ 3]A@)4^"钑"X.n@ tz8d>{jOg|^QhgsI.M}y,4?S`GOuCSlh9$Bx9baٷ3,OD[ Ծ8l=?,|9vRm:w@mh,XDOOa )(oߎ@ X|="n?^{ 0$fN`⃖mM!xcpfr,4% 7` m  ߣ5o>][p1qPVAӶ{1-[0.|(el6tEr9O&(f |10x#x` A𘗣@eDK&gyI' +I}҄Ǖ'N᎘beؙaaw i ьRxC }a}lm(etE˫'h E푕K8I`~;OAaMн{(Ӊ͍PjXxyxXyrK4Z`#@coX4vs^Ƚ)ΈŶ9h9:Geи!Ե'Š'3;k<(o oӋ<#hx<,ᡰ#xW&fS+PL:x:`ǚ? plW2O 0!,t? hvNT3+p¾D"v' %#jHbH[E@Y$?[ ̥?.T8}c~vDyI 5/ ko̲wkDh҂bD?p6nkԌ<ѾWTFYlE!b; KG:2-q6mafh`FgWo}RftBQr#&|jgO︨x\IF*gs!fI۵:4W<5_Sjg-x\J)% Ϻ$J?7-NV2S10kHTxak<D2򠉕)GRN<|3CRҰ8- [>Frx۠5}ϑZ^;>/,O6tV) uqZ)e;g=jۍP*DfBLA,+ZC(S2+>*AΦ1,.@inDT"iaC]w. C/jdECn:Z݆T+-gyn7˯18W{D_Rb PyK/8SI Q6zItA}<50ګ٤ϐ}X?/r,O{}IMQ x:3ܳ3oB߁R>ۏh,[y:@f^)=37Նenk, n;TtHT{ٻ WN&9ً6C zǖ+Ӽ,|g:O{݂uX!Imyt>?g 9-.1琐Ls%UNʛF`Eرtlo۫9SbP/ ?֡7O욳 fФ-ZOgX&.kFh hֳPۚe/pr{y;bΐ{J:5qHH䢠k iZaI[ 쇽釱]06jh`?.xnf C#/=fÂ6/b`(d>~z:`!LJnK,l9&Zbs ^h;iggA,BΚ+cJ|wJa1`^M0IN28nJ(_eXKk,|gr2wno.@hـK;N,1_ů;\g1 f'zuEW"³r"TyE6a"2 ygd8+`IMnP+v "́Dk-uz8| r-@jqOZl͡A0"rkH hY@}CG"_+ *x m '?~^s_0Lx8 ޻=', `A!#dg&@v!hc4RP52AέIH06pkg5\-Ըzc3QC)b:-{?u0w9YڵipX+ C[/<6}<|^@ Vhڧ/zvvҢ [evmϦM04FG >Ӗ`{?fٍ|C^:H 8aԟPj`>`FXp~|;>a›ؼ$h䵇+K6I왻 Gmi(ؿWmAc1ضc 6 )+O>"lVD w;'{*[lݾ 6 k5xLȫO=/FGfGI'H4ԙ"H?2*[n I Neb DCW`R_MrlyrEP\b2n l;i Y8i<7n`>A&rѱG=P^"`B]U7:&&QeLJwip{"1 'ibΜ}k|A-7VV:? : p9y .(V{`uv<7&BxZNΊHbVPJX&cScF>TJ[%*'P)m8[K=$cy?z4$vFt5=mI~>x sz+MuI2I$ga1E3C 5l%#.eT>I#g塘8kHQL)%h5u&i6+:td؁:{g,r"4()^KKb_[+3O fJ3=ڽ_.&͉}L;BlmoMU >#Ѱ}U ;̐|SW?&Fn|vzHTa1Nbd_G'=2@)N==C:CC3k|oOuС)+@(;̓'Xxwf/:aNC+q^x?0 ޟRKhqDˀ4˿ోi(/q@"JEJ/p@.ꅧpSLVW*^~S(YTeW+wT߫Wj풮|vyW;2_gqU\ڮ]<' MtPQu1r8 w%<0kTQRZF%, ܠ%O+иESċCWs">Z STN_2eP_7sǽ\}δְiHt؎ nood^^w7Q: YY8p~;O`묝ЧC'1x۴E @ {*R),KNB닼ӵ'|po:YlEZlEzm\jR[y+ZQqk"RцI';~Z_:[ɋ)uNqfxgx Q[BX:x(GbY{tV i }r.g=҉AkЈN(Mحv.Bz-[⿯-×b089qf|9UTN9c;PxuYEXDcF/CDycXՊXh6u7"Z"so/+J'υ\9A=rowVC)^t~%8uii~a@_-{̯s5ʩbBॕX{kv2k'1y;DBUE_Kb}&UIQI\jת)iΰ8r4;l×_n+AChK^ \!dǵ}k~\оQQ/z޸zN_Gܠ fPkR݊#z+BWZuu^y6'f?>5(cBk236`ƒp nзgnvAԬIr\=qNzcH\Y_DĜBPfzйԎ0y%dhlѐUfhJs2 ѨCcX1HǚlD> cWo4ིkǴL֙Av*M,C0B#RNFRejmT!.^.wsߙ*slt#Y$|]Z;I25̠|KjPT6%jV:i2L}yMA+V P$p *ZݏOGaV ψf8w( y. Gaw<0y*q=VM`BIqaZnLԨ*%^FQɪHu7_H̺> .ת:Vz5J6mKV/Zy%^Fզvt`a7XwFNЗn @i8,>"Ӆ.z_G:]FXD1㳭wd2B匩{A們ʏ ww'wcͼ߫l'nN=BSmBt-^\6ZmFl8/rc3CRc.?V%Sm 9RG{]7i={f1 O-LF\LD& 2Z6UOZY(P_#hdJ}7/i@e` Q)L1xP8դnIhF)ptBIR)4%8"G|d@';LI0 wi@M@ m*#IhSS{1y"APQ'鸟1dL<ݝ 9? MNgp0ޡ@n]jnm8edֲ̞D4 v=SNEL͵6 '}QcݰoR,X@2 /]BO7dܩ7!FV SÎ;P𸌨KljG^A6<&9KD`(9MK2g&ҏ]$uI̪C谸Gq@Lж[x!'BxiP0D.bEȦƴA6hl±=1/4OFoȹ pdn9צA'l['e_]'ٴ2BޅR;e()>5{Hl܆ٖ.ch׺2WnVw=KGLZ'mBƅE@^=fnճ~Ǐ]crju˖> ר+G(QN\EV+T:.D>A <_&/@BG`2"t}CN3jpMcoG2%'YFI.?ݱz) M!Bi}60e0rq7vsݜQ@D_U-M:8޶4!GCHwS.rX%/K^[L~!فo6Q7 2?ۓt7h7AY=v,og>BjN0BOX\.O'=( Ehsaφ l>,R7l(ls>ylch!M} Pҕ#9=o!B1C#_,O-%rjl3 +SeD di.? =ٿӹ' q+L`^1?}:PCˏ/r|3lO󕴶efj߼dsi^ooBɋxآbXvti߹a"V3\ ^ō]Q@GEq!fuhQ`?\ɟN0BrXD/Ѷ֊(?x`}H=O[8bGn Xr#bbc KM*$y6e+ Dp1ٰYN+}#CEWCe)KMT(eH"_24"PGX%+ yݖs\->ƛV "K`iNڴ#929c2윳Ado7ba|_Lbl 8l%>4_l%Yt4vN}~ C G`Kߠڃw54N?ӛh3a"&=|q C!~S0qU 2t8ʃ'PzQA+0mjczu6{F~/$y.Byz:R\j1 5/sV*Ov)aO~VL;~2)WCpEx&hHBzb,=1`t3L0%_F %cת0BMEur}4|h8$)|P#Ֆ±ŪbKfYvy Uuq3&f{7yXpr D&־@ƉVBl85p+uh_ dNeQy9'mN#rQGUrEkַ)җ+MMzD~g(hsR㹋N~a\ ` yhPdr`+` M)[8.ޑM2EzE׃]6@hV7)XeH}vȂ+B<u9݀\vK󟆓q*D>[XvD4-gv4ܹ(ȸ29i> cPMk8Ҧ(CZ`gOݸ}p/V#>["| _/[ 76?> J\Ibprbw'ڙ?8laj.ᛉSd}`Fij/KE~ݞz=ɛ~C 5Gk^ xa϶= V}09g[)UhB_Ǧh2Î}xHDW`xbZ כaU <ڒYifAC -m<^@ra.߄ =KLѸk{xPuulfė1$lh[wf~m(9˖L-i bks`h^tH0"^K*ͣTkEX9nC\#|}&8yCGx*&f4q,pmSȭjR+j3&;✤xxU ǖmd6V%NWW"h[?>{roE Maja"z[/=aPd hAbNg}E|Ncj-c'{!}݇Llp5W:}-ĻamT#Ch:ZiEGI:Eh&bCN' qZyWT8#h[:-+x(40+@`Zң5㏃tbC(vFZ8AX"A,X`hn;Gp>'XɀkZٗ@؟iVF2S@Y e4K=+)nz|gpKLmUPG|}EކΕx*eEXUk="b\,FaѾ[ "2"NtիW dRSK_J aNtgvJ]T/xIFʭpyM"rIG-$GZ2I[n!Tu $B/뗍kHG^_ÚA@&)@U(F |'t0g].oeMz:  ҉h$'!Sk4oAFb]W#^BBDY̓yڃV#C!dqF(Љi\(#\ݧ 5CSB9LNJjK'GҮϙWݩkí0)ȉM=o}[J4ѣq? yl:z$dOO=*rҲᣝ@O̘9$ޞ9ִ+9=/h91 MxDo ׫%Xy>GK((҇(@;[,rv1> Ǭ@C =YQD8FwE S ­Wk$Z:5pr0CIA..%glK576-8dm`]y&jkD?{f%Q5!ؠeRV®C$BoυA*Lm hګ}lTC"˔g&aXQ~5ٿ1,x"MZI%GB$/slOj+/HsE۞DQ< B]>:)kg֩]y_oM;Ɯij|AmW=6g5ضmKe6fk '`e !<5R8)d"2^yץOtJ9*ERAԺʺB:H)Z^^*BO6ݛBJ&]9R<5sȴ[JE5Smj^uߔ;7'/ I0zt[ \D| f,؃]ߴ7WJNwu%vF6߿͔Q΅\Xw9:Mqz3a2$%p$ k9CW/'?D1!C&_وTuޟJ3?ǂg͗0`n9Ocso#kh(Xo㗼mln*>1d ـw',9EQ(Q0p@IЩKM̄ J`DI'LޏQIgCz\Yx*8=B\-gV\劔/d%62C'ԕe7CG64V4T{ݸ@}\$+x14=|1)NUJRoKB*|\_aK~/OC#tߔ.;B{w<1?EP Aj;Y9s& )#&o.BS0iDݽJyaZe7  OXu$;$#[vlCmI]~rga'G~}cZ^#]QJ^k2;?Kc+ o˫vToz*_l*~DVd/ax,t .DS슕L1wTy*EakigGaxu>/E"1HqMmAO+3< А&Qlc3`3F´ %+Wsigjn)j8(M@tIk0%I *!.O.Mq@yYH%Xly#μoOe)aFFND+PA 9OŌh3W4-12lFJ /'lXYO(D|)L}6w8j(4sLVu ;ZFދ?spO+LLNe ۈ)>>@njb.Rj@@z:R?\2p~q<``mBӳJɐu r/ |xX 4R BBS(&G ` f…c| ECC}rO,.SlH8Jc?]leeE Wwj"Vp|4ƎyD$&8X1'<^m9KWoa~1 ɤ<*#7 Q?f`Xq$~toIiW0DPjrZ*n@r$`F4YV08 y_j+U8h^io&s)u`tVȞ۳SpR:ijB̊(OU=@L; yQ>n)l| O/o]%1qt:=;a)O: qX3g?gw/q2b )Ov|vƎhEA& NeY`gгK9o,J!6BxvZ}vnxH{W6 wFpG-wcokɸ/ȝb{+=ݹn: "8"Gu aZSb!ȸ^nXF!H4Ԯ3DR/!56Lj(^70Ѫ>sZtQc! [Ӧp!8à0k~< ל)Bb݂Cmq=o,&r"`VP[|@[g7Ua#NʖS+  uv-Pƕ)/5YvŸu{Wf#NчL4 ^OdP y|9m?N ' F BEC,^Fo z~!v;2_ꇆ9 3wx\+ %V9w> ~]2Rr$/0} XVSW?|>e2bĂ gtcj9Ui黗"95qp 23 Q8qfs) d>4)xwy*Cv=pj'.$J;je-ט8}oO@l iq4'[J%D9ּ1|~ JE9l=̄t: $cOT|6mCю&YQx&r}M{%/OǢ;49!ƒvt-څ"oY2/c7般'8p^@PlMYX4%E(fl<֞HDܑx$KQb:`nf6m_U ^7s@<ьȫ;%U~\C6(ȼlvd^܊Ru%&?#'.h7dAh8p/Bg>zef<9kyf/uNz| ڿi HWzyGQ&!)&XjcYY)Ī"?HE X|Nܕ 5.3JKY0cDj'x{{#g[4o]v-mnQsVp@)E%a۠Z:Q\srWtyia[pXǿ?All\Б ywnq/uĴ+Lx|\5mܠה| 9aJh[ ^&-Yb߹[?_#ᅏ{A+fuOCR9卣 .ko` : .&&rS{.G[j%SiRYr %ٻaB\,eHKc4=bNÂЎZ#Sh}PWtNi|a) )piw{ 4c%ŽBGJ1v"jB(v'|y  } pdhKRM"y* nb#K?urίH9#g ڍDI(%P%GaGc51=[_\[q{R.# ":wIlR;> F;ޕf *YrCd%== 6TH2vE5*F@Ch(K8EsT3Pe%JУP} 3mp]!Ę ifTUb$P) S\maA;p3k<3p6")ҋdѥ1{rkwb(6m۶ȑ#¥#!!IIIHOKMO&.\Fnnbb+N%rFv.>P3\\H6Prd,LhH/XKXX+p)vvpuuUNBY^K{ޱƌGj_z \SQf3#}ؤ OX):]ʀg`ӓU2>(ۼ>;D_*9`6(Z +,=q?!)@5 ]`Ue~vuw'ۈ1:GI)؉gw@AB.lccݽ~`CPT={s~/rOôfc}CBτC9uc5vE/sٳ[c\L+ >gPqgY`a^>l)y5sA $yXc1}ȲÄ1ce/ߢk.\\0Q_')6$S8Xc5XiAc{#Řh5Wm<@FcQHrߕN=oE*-r C -CiPd r~\PBoY_.⬅G-u-Hk<5Sy/ԜyIr;7?._VQֈac"vږt`y)b:>v Rj4 "y;A̟^T8_ӈ?X dlذYNg#ԥ7#.` 'Nk#c{|L oôMW! v^뒡5OrL!S>M8W]E,K_fTA5H'#at zIOXu0 D???\h' Nbv9(1RY&rs8z3‚龬$w@nEՄ9q9Ǔ4p2)!L|fЍWcp_\  (Wós78[T<n]@^c2Eg@ጝNE'oXdgFq`GxKégyhss!B#` k@(q֖w:*̇Dac />BȆ}49> p9r ¸ ]v Hm2_f 4H^9ޡliL3_ rglزj8bЋ0C0-n~g#GIꨦ\Ǝl{vG KL8(>w-ڄ3hɹŠ$ɧp`F(59>z+9v7ᅙ.Xp'0A=X8cN\_ z4u**>B~*[z:pzOǃ%}"d5Fe uy Egɖse))֘[JR[{q9Y477iخL'ToYiIڨ #H~ޱ2mo8.=bw+׍Wp^j0H+įC]_sRΘ$aD(^K>)u cRC mFfA?4P1a%J~/ْ17èv6 2n Rp-õaS,g Wa!ْ~TFiWv/u[--*}A7ޥ]ܫjΨJf;.~@dꟑ#B2K2f<{a͗#MU_2'ƺ =1擖 ٘Gn\5CubV|n6cYj[^bԤ"""""""""pM!kj:\yd֐.?S[3K9,(qg6[9~sE@%9u@é 2sQ^?x%#y(-%WDFҕP /7CEhF9DsonTh._SǩG\^̢znʚw%QBy*+ݬf h6Nmut1u5݌J_J>E0œ>x._) p%,_ %j~;c볱m)t}UD"ui^/^*Rj> *ra89xmI iBd3:"i*3wac\=+Hbrwc>=hHnނMGfv qEŠU8ek].Lo?4Q8}4ϗ͟': ۆ!c–stvDK%zZ])ʤXwf}dNfU·KK,ya|9q6N2$r2d,,LN$?L }*?a+mkLǤ6s W{Ъ8$NeKk\+LۥHHƊ 3 #m|f|+/%aBF&cĕ`3T=OGrV9GvDSwb6GEHJ)㗥'P};NVW\bLq^{>Q+A0~y1J<F+C#Llкo;; J;=^x{(lVrL!Jrlj!k.Wg`ͪi,^.1+/5v !$JscxQ6o+ 7 O>@im砅e>>"Jx5jG&HXQ aKB24q`8bҥ} IJu#Ib$eh_O0%Mȷ4_?,|x4Rk{yZ9Bs|1!g ~syȞ+z ƳS/GO? \4 2kH;#6THZkw}7~y#߾^ŧdpgUë{<cǾ!H6%8D.DDksdG m KbH$mqPT&i5ʱQ[.'RN  oB.ض _;z4O"UcX`Q~5Lp.v\0-d)VHDQI$"$/KIK6UJ,=GmGu@yQ *1lXkC_Xtuu k`gmg@XjJMɕ!5(9gWRh8 4ܙ`WwT撨Dض) Mvi{Ll[0H+˄ BZJh X[UHiԔx; K_ق4w;uEX=FcHJ"v.|`{f$6Z[?wj8M!JLˬ춬*P*Č%*sUTTT!Xȿ"q鹥Sª$J.l!2k#f3 jnZS⹟Fp~̢Wc[_SPV/DŽK"~e:=J?4ᓈ6'%!H('$AڝKvȁyxsq"v` FHȢQ-Fm1}yQ#QBkG[Y@#;KKXqY&(-#N(#=-4(:lvQ3k r~CT^&L*@__ c̻74{?NF9闠4TaKm֠UlW:lʙk[rܟ2|A.Aq^4^6(̣!\Łbb./Z 31{ RSPQՑHuO\EE#P|S2PN~@i 3:Z#n2 3;eo*\zvW@Ћh6ڠ@k'W%]$?2>nyv".9c wV~/q8u$:/Ēu{ #{p!S}{b mVh7-+Dl 5c!C ^Jѩ#g?ImVjsX>s|l5?lDn6=N,/?}3 8?RhW#IH3mƇdE3v#z̃pAB嗢8% >8 R4JrE=""""p!@'K@mA]I=C@X&* X8ӍrhL"aCfi<:̿O,L6ϟ,D/BE/Sae򁗥5zvEs˴X@REkM?[ {Gbh id Sڬb] 髵%{'֘Ï 6}F/KUU -|q=޺{,yίM-6hOzZ!Ӊ/<nf0WX{a`lV- mS@ 6k@|EQ[3ւ|bBsol7mCAqUDyql|aEWerd+ `nh\jJ^aùNTSՀsJKܟ?uJY #tG?7Xj*Ic*eN,6.#NH>\W~~p?`9)oT $ljozPR>P*ϻ-=թ$YmEE@E@EPH!(..z^vW"]qe~ [xzhؽ3uA~c4&`}Xkd(o 'R,9X $KƶjxU~s%IH&1^ IL|0.+h xܕzD״&˦,ǩ#GnmXP.EV9ex4k$+&t X8_Fɤs.1 P<4Izghж'.ZQ%_!$4]F5^k eV~-#PZJ)JjPձCT3);'fZ,:I81 {AқZ .$ Ӹ=JDO-ԫm>9wζay90^5s6k1i.^XVtͻ楏_ͩ"=g._,O6_JՂ*KNŀrsQЁ$MAJ6MXRwT FGuln{Lkb֢$[mAҵy6Nk]\#CV7# }[Lm+>ٳsZ2jdoi΄+//a8DҭLr ,S-Z_N}NM=`j*KɓװWN[RTw =4|}kŭݔi,+h~EB>k׬Fb*7HV?2Mj]58-Ӡvkp9W 5+/gS\5Ԡ:uIZ-U575xg6{3(RjR"9WC3'u8< <}t&-ZA#>BdJZ$$n߉ Gtls *Ƒ@H$ZUaF|s^&tUsy'!^ ʞbͅgiܧXSq˰sNBL3h~{ E7 ۙ;P~^1OsW aVX[W?f4XeشzuTY/BfN<\aKO A,JedƌnÅ*ZS^s,9#1:mQIQE@C6[qJғY_g'נÒo$RՕOTDK$X$&p`l:XSoi$Fa]zKcRuoLnhʗCg"KzE(I ļLx^,֠$!&1:}YtZ@_t=Aرg|?y`:GwÓ Y.^&`jvH,z!$12BOڛat7h7u7bC5_UoK*Nro*3SI9$G2YH_0:/M)3BK TNIޙ(*fX]姥e22'gI0TTD)\%`B-K*,y>gMC9KOQN䒩a C2OBǪ,xvmwuKxiPY!.0ch02ϙ, 2)Z+zT;䣉%s-ĵTSUC b G$7:T IK·:ΜEƹ|ڰwK3i2Pʰ>psfMƱ)Ѡ"7)i e6$Aq b'܇,Iö<>|PƾpxMTWԡ,G%e5V<l뇄~쟰".|Bm=b淭3(3oBexyXS,r֟KVI%fRPP{"n4 7a{l6M{ QiGB!R@5]quTҖ8| 5x`~6 '2h&1C%n~Vtp'xS!> Nk*@Rb!LldJj0ޚ+SCbjhB5v%g_iQ q<ë8ɏNrpIZjƌ cHma '"ӺIok0wxD;gü_5js+֒bWV&8OM~.IzW0 v%._"t7?gEj8{6/"6ڍro1כ;`K_aY| )a:Wk3 99A=`᣾^;$5S<0 ,1On[AΓq+Gמu`Դ~&.Q#v""""w!U`TL:FQnTG+v?&\NF/ӔӿOW;W5XZF¼^*\ P siqhz$e@U.r5ݘ8Y#KB5%YHLCχGK_Cs1ڊRNpsvqrv %싎]Qʨ[g-rLH/NPR8xI ?gϢ-c͘ 3SAWq >=qgq0{®icQk<4M|UmwX8en/{ӁDV-+LkYٺ#+}G? ߮wp2ii{>O{Wͩ h]؋8sN+,]DB >ʼnXQ3ǹ=۰:ѝP<ؿ?]/y7*5tAŊӢ}}÷QaMr[ 4|^@?X%@޷ DB6bDC}5bx9ƫrʨyUTTT0BBd)z|f IJrE!q(5R Fnhk^+M?8SRC#R\xGأ}|k^]]~VOhֱBOu̢utFvH.Tr0ajĮމGoGl_-Cڸy+ X+_ښR )&~^TBХ_4%uCqp4ZG4?vnWT"eo*Z hAK;CDEZQGn2t5؜]ԸerO/fa}|F8#p~5ە?7ԨܿTJE@ yv0A@hk^# 9hJ)_r+W!;:Ni.ن&ơ\/ E`_>=?5urck zT,ƒQD9%c*^SzG0HdK8]ƾᜅ-<4>l2 @CGn`\cl&9ls|v)ݷ'~fxI B7Ff Qt6,RQw5Д,+c0H4y>Tg撴-lmnSv ^9XU緦X?z 'SjjxΒ.^s~ z\f3azرu?n@ͣɗ +It$(ȅ1+n¸_vmhHkK^AȐ4Ӛ q}yvebۏcؐ0|7k,W y7iFO57VIڌ@s)*Vo-MRLC HA+?ũ!bzԓ(/SKB toH$j|Hv`^v;dAfx Yj ÿSWZ~Aszv `I~l-d(TrXQGs΁Еq;0 8C7hҚRP2J}%I& UaϷbpHE-čk6 f O&3v0>B0K/FHo*!񲠧'w]G]ۏ2nՖQ$q>m;IM|gP$IhSfPCIhMQ.-Abp8~j*鞫bdWC̾;{s'+5;b{PXmdń*w66.X6@Qf1^(M/G'|ۅBTTzy< K7+vupjt;i{!x Wz;qVS/tpBFYX)zƑ/R.\R& r WWܿa~Cc2$z\5oRxc"`1:隓ֱbaKуδ74Q(+#z hwӷOF^5IN(,{EըYVftxO$}ŇV& X3WXWd=غvvpp1xQƉkw?]HvELy=Pw˦S\Yyv*EoJ%[: ]=oÅqJq-H֕/!<$JTxΔNwߌ/_,@#^x='܍Pmu?,ƂDdۏ*kG"16Q$ѷvƧ(G#&fW+8e+wgS8p>mOU MuFLl|I>2;+  T |X;nQ-挵桺?# 3 ; nڹbPJ-plFc`â;3;*Oa8Q[p/߰_Ù1t% 3EpwѣDH􏴬6"/F@t~) yxm.<%/Rm*:.RVN5OWnA@.3'?7ADW=V;?Uڡ }R7<̄]Ȓ47 }B\;iN-@O Ut<^!RN*G7DϛÕTT@̷][ñٔY{aԣ7!(yR]m 17E I=ZFVZXP`7UPl3_[QZhnڔ@]V\R-n 3]$3+^%yVEWvمHLHB܂X:ij8q"r%2,jWvTAxdA8U'a0Fݟ%YBBuI%b졫(=-/-CyqLgvKtci1ge|Z+ B^X^ZۨG( |;кR'aUL#Y]QH:P! 8VTs>ۚA@P7(Bw18V|>!:#4SJ2Oz{|/\~'3aB*~e%|^۔t% (U)\ OV%(MQ};\6(QЪc((A4Lda5o&!2+ chX"TXlYNG^5`#ٗ=`%eǥvR~9ZakRHO(L+e\r12B!i5wڌ > 7`HoB1o x~i3y]:MRia8"!=9fL9rկCYmyo,UWXZם")-,7R⟃8}0'w=Ľ݋֭i/N¿xԮ Ϭ ! D$رn? )¾mAQHɊ*t),kn0%Gm;Kvni[@ FF$iGoǝ1H4:dߎ[PTDH{DP.PcW! FPݸ 䙼?ط.gH.^ Pv׳H]Bu/<sV[oE}uV,]{ǖio 0wl7W8;ɉƌ`jAJťeSH1!eFOW 7 *#q#mio$Hu h_hgc,xxVJm7Es{Bo Ifo?nJr±|iI΁…hcz3%9f^ꚝx=Νsw3D=Ug$kSM!g~usҰ&٫aD ^I2I-UQ( [5"/3yH:I΂DwI< F6mj}_JrRw ck$B401ec:Dꁝކc, " Y5Nn%3~!D 'Wb_<+p`s .ܻl+fI˝I]Nz hĸ8x8&)l+R]lg,~x0Zކ5 `&pRѺ -3=oFztwqڵ<<=0b y(88x ;k`Ÿ<U槊aK (.$UeU Zs"򂟟i!ԭ`#TIb O Ok_/Ec eRϤE*G|B6ҥX= .^לKKAƔfYj"_R5|SUR| F~^> P\Xs4)oĿ* ?]*&VY U_*-׶ؙBbɨkvvptt+=24G -~~}礒\ξXgS/70I-6hO Qy)q09:r,Љ\2%"ޤ!]әs6ʹLrC_aKn6o(E^~!n}ř#͛)TbtF~#lF9 ߈6 dWA!2>~}ks8N9A$;p8e'v}7v͝}:KahV^0{0#5ֽ !E}-#-+0Rq)ayK9 Xz&9G@n3ցzPjN?D5o__þ{^ћU[H^LP!Q(,,T>eeTRN\GȮ˵z_or/ ZZiW!O!&I?0CꝐ']R.$@Ut5@ËaMdj ixŦj1g7 6B@㻗G|X3qX5`UXt*wGҸ~i:_/ߨVm' 4rzw $:V\S)P LA7O͒NI1wRO"Ad&kt"y2| 8Lddg0|<>`xF"iɨ`xq#W$*JKr(e.#t{:1EE*+\d#DXHuhТp(? [GʘO1x߽ mAعa3<_&~^_u6SּML*z2,sDZˑ i vgR/t b ;FغB揢8}8} YH-'5.W}ټ)%v=hlx4WOC>8?f݈^M***3G04bb''aTá,/3޻/L}O$s#[h*2 20n8l8X93g[о`p2kMb@gv.h߹/:5Eˋppl%T"gn}LldǒΡ${D62>XͨO) # ^ENvM[\٣9F=6"QZ꧌x=]Kf#sTRmew劓8u(=}ѵeUmDF:'0~ۏN 0xXؖpuXn0[֮I>ZCخ XfK?eRzKGkձ͕OtBA#{xD#5a6%1JN9^.0Gc!4y\ Na]Ц`:MKxp EDE% !]NNC":)V<mh;Mt!5KOFUizL&-]e!̱wTZwwH[\^ݽazn8SG+D777, L@ Ǎ JԮ֡"""D@%yҫ-e$[ktRL* //O>*Es}$V޺UTxO0K`x1zG Jݹ'Ӽ7zsxe~KPԣE/"|k>}ή<9Bc&7E1t?vx폸DOk*LGZ6nc8r$t?`gAjT Jqí{Q{o?6Jb{?l+fWc ¸C^Ƹ4ŅU~,]^"7w OxΌ.7WnT\ZN:Wߙ: nciDkF/>}(O痷Itkp}o67X Uטՠg~s4,+@le1`^-o~>!3#ޑ ' b= +1v)2Q|_Ս&88nn2۟8@|BG9"M'Tڹҥ؟V-"""/F@%\0tZz*̹lJƥ^V8RQ.eNMe cw+"g`쥸# Xֽ#bѯ]8f}%][o#!(٫O0z哌3nFeJ|</C{?k,Ɲ8Xy\*<Ͼb` S%wm(mQk}*EUeH>_|<!nA{J'q_0kg8c!bĢuFMsm7+v{Jij[pve;\1"Knz)Y>I7D9= 3^+O?v|p. ~S_/OYWIr }zINID)ŷq!$T^DDjkj(EeAv?+Rٗc\v -ᵰ Ǯds+l#][x7Hd5=W4[tgKc$iWev?mp ƿ\u\J2||mO;#x"v?Z.55Jre*E]~ ɌwTi3}oolqt65v;coq%A;f|)ޏks7OӧC۶hoQ̱BKЎh}W!:Hn}EP 8S{0>)bO|W USJ}`|8`w_y\{Axӏ3w ?b *^CF羃P2-Gu6&(E)N@_+%WJG++Jh6pv>[ˍjޗ(8f+,1[)ĺ#XV(t*= ~`6L\LqFbZab,b\7.7ͳa|6~8XEŁhe}ר-$CP/h=3mB߰k:@D᫛*** PI6$J+CͿ-^n z:>cO\C24"DFGљb]c‰X~T O|tG?ErEzWr`+lZ Stz^e<3t4"yu`,_ˎupDv)T響</%0NUY>[W_yx71ԫGIA|1WQQ$r9HK^B^J^=EP IU /ߞ;p7G9?0aX0cded,‚oiW[c{=ٟN9 :Ai.zPwUSQ&eؽe2hKT4Z #S&T] /ӞO]y-Ao&SwgFu=X-No2*|***W1*DG빽jp EˠXhSH-sX4s!,냞6X᛭$ORUŒFqZfa-ZOk$$dY7ª$۶%SoT[ի:Gn 29ƻv eBy  w:L +''u?ħwUb*btb;܅^юdv̙w/_Fe(x#,3tO^7]WѨG^gcU;ïJ K$x 82sDVyCM*** *.j?f}AAA\Qr7-H7rejKjVvijk᮸ (˾39 ǥ903rs;y:29@DV}E&h;9BfW LaWjzѹp1\4Ll_[_|&zj$&$"`e#F|܂fLįg>gሳnL@oKx_ۂPg9=:Oc*%ph K>su`in2Y9D*(,-if}+C[DR63vd^\/r}5)U|XPD|+1O Czm4 Ç&ʉ4N3(c2:bm]+ 8G}C >oS}sŬO(B(;4.N05E(5mUZښ;K9F ;IhE) %knr' pKuQ'˅-EVx\𣸡UELqǴSTۃ M1e ; WQNM{na=D1u;GL"}߯m/|z "OBRf^^sc1&}.;"zAHӱ2˓!T=+;.E2ǦaG~( E՚ fM.Fv0wq1 Qqg(?)5sAX<!ӶIA?m\Vbi͆ԑH0O(\(0tBGLT,bSLA)|1͟ {sYѐШ=Ix4ERzϏeR;E 8'Ќ 飶fܟeL7Jfw+1yA 'ֺ^tе,:*k7O0RӲGzˊQRCX>ey!-BiqųYkbeVX\шృT9NhZt[X4k*Qq>anJ,6n9X|/.e䆠jK.8s u ⌔e puO<|W&bKʨ5Av86Q_#1=<|`RE]wB,luU.pu~lԥCl9nށb87neï0ԥ  ̄5c;h7'ct+Kxf+ѾP=Rq)& Tt> *%5 Fzt jF^ |j!\6$]`'*y9`~[ >Ô)r"Vә2״mm7iwjWA"m*ǗR1J\ؔZ3W>ou`zPg7ŁE2(n`߷bt Jo(>2{XDwUY:VıPxfRS@"7v'أgORH?+C^1] u;  ڏg)3bw`ӲŒ[1 g\%cd"M? .Rb ~Q⓺zrJ6A4;אK ' p#6o98E1ζƨq#vTB!MlW;3yE[ j4:og,B&-IFLYjhY~6~Z5W.W=5[܇2}czewq99!׹ 0݄ Hiucp)&+H@'@ݫظq Б@vq11 RSk fxOC9:"s%|B#D@WN/&JX)M%hJXKBT3;m"[S.[삧4٩goXЌ;< #goH$M3=p\Hpp񂟟,ebkyp0Vrl<:BN!T]e9EA|ggmUgqC6h۱=Y ɗRF;FQ5`Mˆ~2ȌۃKi['6 ;/lLBJ J|ц2B *[wѮ 8}zE#GeP\\]s mn3yJD|Q 72n8V>As(|C(C&U`' 9ZMŶ='f_@_4_la++<KfMј0-Y94' 5ܚSdnmJˊF 5"% rBFV/ļ *DO&qwyTY,tc6#.HlVl2a0j;a'k 7uE½|9w*C*Gg#pi߰Œ`;A&,NРŷa[!ػs*P~b7 /n[Ibg4",qį34CM!VomM~5OƵL5\i={G|/:ː~W}|R'Z@\Acę(Ŋ\j/CbS)Cq ĞXvPWM06^&MܚhJk 1p\o&\^ko!~߶|k*fJ' #o oq.ZMP+ Lh# NH2ǽ&2J7]^E{. ټ>PHdғsL' >Kҥ^DZ1/V"/Ur8IDATٗrw>TH 2W ddf =#woԱ( `}^B',j%AeY1 Ka`~v8u)%K^C_CI$b̄X|̛9ѻa)xwޮpv >C;I pÜKh"L~mmլU|06 {|dq&m[)&!|<;vzX_,KVX\̔`db9B*A+to Hz )jsnhB|ڄ _{/9˖,V?KCz ْ=$*zRBS'f~1:xlsF~QovA](5XJ\ 7_yl!WͺxM y;7t@ *tScpB"m'G( ưnpC+.;%d:P =ʗAO^]ɸ7`M;W݆ ƪJu?Z\$;XaKS]" Q$97p#!V@~ _ '7SSK'v bOGiEGF`MIAʕKQqd7T?U:}O q+" ޘ>=B`iϾ#hFkP/F N$PZ?ȟiVLV!Կyb;) NE*-zj+n%AX=R!s[q[ja–=1ً JZWeX"V(Aʙضb:a;‚\\% W]us|j K!%עCkg8fBcd3i3i&&DUXLvWH^R >ml')i0% fI31dK2S+&r%p"?ߪRk(?p4<coĖ:tmu(/ }3@(@.YeDBE0mB TDn$֐GHEHxU!751ڴGc䤕X[ƒfDW d_ӧiPA!V,pV@-gLѿf [t"VLSwTt4Ty 0Gغ$`$$7sjG;m;W&|3Oc_̜{åEj IWap1lDY8L> IXhHE2+r$^<2Ϟ,9d.0;aOn B+1g~ipGz&eږ=AapE@aN zn~[20r$8ZKlb&f<=zmDwi_U' 4 4:MF*1IVts̢۬+tӣت0݀01ʄ9+16>L!4z؍z<836mMIRh}خOWݦ䅊H:p,V) Y"3@/OF7}R($rs a$-_ @3iu$<} 3C;j]%((TՕ" \$ Jd:ly#m?%n(҉k7&-< cT%& g#mcFu0gX={ V1k$vYY{].N!xm`_mˌbR,kgQAc%5hzФ[J YBZѠW)<ޜp&eJ-`/Ŷ_6GZ!3ܔ(Kq,`a Vhս?~b`).Ĺc')嫏N#¡ʹ*v4g:zbwB#!l(l];‘K~{zv9Nh&jQ3im&I({[I}Ƨ -5 w4ts 0 vBb5 kګU/{ia}CRr-ѶK2Sg5n ݳ$KH6U!':bո5ڸYFID01"*6|\kdEc ZAr3m:K/`ߡSx(tǐP&ϛ"I ?z|dޥ$Lú[N6&hp կ~zCy[Ýt$_S_jq'$@קJᯐE$ 9E ܉`Lܶ8i9Ѐ돽[s2C_wkrMCWWL[C cY^rab %  \WOKptW1.X=[Q,·$ ǯQjRԱx‘+pBtȕй֐}l$hKsErJ58DE[r")-L(Vz~x{0,w-qpt<C\XZBf@a)}XTz^OJ`og}q6tC&ꛋs`7vAdbz #&9Ǣ#xr8J E"1WkcN%1yև0_Fqs|9Iֈ_)VS@2{-ww^{id*a6ov}y)p0ĉ l5|wIes6&{#n5 ы6HS{+?cvQKYx}d k;r.:03@HBWMB5_#zZxG_:s z6^ah~0s&K h( z߅&U:r ,l"M`؇boPP S %"'?vT.WBA~H|1)|_Up *ҌXj;. t>O<ӼV1d7:t4NIe .6u1pDPN1[ )#Bt59bbΞ@Hji{@i36fF()) MaKIF88& Շ ]C,):&Nz:=>A͠eR  ;9s3fw֐?3Zɐ2{.NZad錠@Z[Q /co+d?`Px Rz2kt"o%#N&.$>+X6Mm=ll&>AkBuڢMf;Tį3c"mÆ:Q!d?HZ 0uJ6IM<8 N0u%3%j-6 xG/܆wUO h=R̬zFsٙtCUݖ2cvn_ 0f8=.;':ۉg+98㹃0>PI}ۼ%(-? ({7vW' p"@v% < |a GC"TvO:@MYHpnEU_.'޹j[&^w+1yA0{)K w3?>5:qjַ+BOKc=bF^t(r ThLҨ"mVr킷ǍƂ ߈8h BGջ_~.zXtX2zV7zv^P YVլ `3sI ٓ-hO'40˯3n ˳9 pO@'w8 T ,$y0:7-d؝`p7blQ*?ǰ?QƜ' <.rߚ$@=4) (F2uBj& _K& (K/' 4\63 4wo*'^? ax8N" Ny Epy8Nx><' p' p@K'EnKxųO蟚"8Nx³kkaJ$_D^p@!jXo p@-.rkYOc`ˋqB$Bn ߸E`Y ӡHnӢsS")[.;{}~Ze{ȒR((UVVOZ ߏ8z +8 ܺu ׯ_aR|-'P?CB~J| ' < .r߇8N8NI8N8Nx\> 5' p' p@&En>=q' p' pOB'8N8Nhmҧ78N8NIp$>' p' pMMq' p' < .r߇8N8NI?be> IENDB`neutron-8.4.0/doc/source/devref/l2_agents.rst0000664000567000056710000000023213044372736022314 0ustar jenkinsjenkins00000000000000L2 Agent Networking ------------------- .. toctree:: :maxdepth: 3 openvswitch_agent linuxbridge_agent sriov_nic_agent l2_agent_extensions neutron-8.4.0/doc/source/devref/callbacks.rst0000664000567000056710000004600413044372760022361 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Neutron Callback System ======================= In Neutron, core and service components may need to cooperate during the execution of certain operations, or they may need to react upon the occurrence of certain events. For instance, when a Neutron resource is associated to multiple services, the components in charge of these services may need to play an active role in determining what the right state of the resource needs to be. The cooperation may be achieved by making each object aware of each other, but this leads to tight coupling, or alternatively it can be achieved by using a callback-based system, where the same objects are allowed to cooperate in a loose manner. This is particularly important since the spin off of the advanced services like VPN, Firewall and Load Balancer, where each service's codebase lives independently from the core and from one another. This means that the tight coupling is no longer a practical solution for object cooperation. In addition to this, if more services are developed independently, there is no viable integration between them and the Neutron core. A callback system, and its registry, tries to address these issues. In object-oriented software systems, method invocation is also known as message passing: an object passes a message to another object, and it may or may not expect a message back. This point-to-point interaction can take place between the parties directly involved in the communication, or it can happen via an intermediary. The intermediary is then in charge of keeping track of who is interested in the messages and in delivering the messages forth and back, when required. As mentioned earlier, the use of an intermediary has the benefit of decoupling the parties involved in the communications, as now they only need to know about the intermediary; the other benefit is that the use of an intermediary opens up the possibility of multiple party communication: more than one object can express interest in receiving the same message, and the same message can be delivered to more than one object. To this aim, the intermediary is the entity that exists throughout the system lifecycle, as it needs to be able to track whose interest is associated to what message. In a design for a system that enables callback-based communication, the following aspects need to be taken into account: * how to become consumer of messages (i.e. how to be on the receiving end of the message); * how to become producer of messages (i.e. how to be on the sending end of the message); * how to consume/produce messages selectively; Translate and narrow this down to Neutron needs, and this means the design of a callback system where messages are about lifecycle events (e.g. before creation, before deletion, etc.) of Neutron resources (e.g. networks, routers, ports, etc.), where the various parties can express interest in knowing when these events for a specific resources take place. Rather than keeping the conversation abstract, let us delve into some examples, that would help understand better some of the principles behind the provided mechanism. Subscribing to events --------------------- Imagine that you have entity A, B, and C that have some common business over router creation. A wants to tell B and C that the router has been created and that they need to get on and do whatever they are supposed to do. In a callback-less world this would work like so: :: # A is done creating the resource # A gets hold of the references of B and C # A calls B # A calls C B->my_random_method_for_knowing_about_router_created() C->my_random_very_difficult_to_remember_method_about_router_created() If B and/or C change, things become sour. In a callback-based world, things become a lot more uniform and straightforward: :: # B and C ask I to be notified when A is done creating the resource # ... # A is done creating the resource # A gets hold of the reference to the intermediary I # A calls I I->notify() Since B and C will have expressed interest in knowing about A's business, 'I' will deliver the messages to B and C. If B and C changes, A and 'I' do not need to change. In practical terms this scenario would be translated in the code below: :: from neutron.callbacks import events from neutron.callbacks import resources from neutron.callbacks import registry def callback1(resource, event, trigger, **kwargs): print('Callback1 called by trigger: ', trigger) print('kwargs: ', kwargs) def callback2(resource, event, trigger, **kwargs): print('Callback2 called by trigger: ', trigger) print('kwargs: ', kwargs) # B and C express interest with I registry.subscribe(callback1, resources.ROUTER, events.BEFORE_CREATE) registry.subscribe(callback2, resources.ROUTER, events.BEFORE_CREATE) print('Subscribed') # A notifies def do_notify(): kwargs = {'foo': 'bar'} registry.notify(resources.ROUTER, events.BEFORE_CREATE, do_notify, **kwargs) print('Notifying...') do_notify() The output is: :: > Subscribed > Notifying... > Callback2 called by trigger: > kwargs: {'foo': 'bar'} > Callback1 called by trigger: > kwargs: {'foo': 'bar'} Thanks to the intermediary existence throughout the life of the system, A, B, and C are flexible to evolve their internals, dynamics, and lifecycles. Subscribing and aborting events ------------------------------- Interestingly in Neutron, certain events may need to be forbidden from happening due to the nature of the resources involved. To this aim, the callback-based mechanism has been designed to support a use case where, when callbacks subscribe to specific events, the action that results from it, may lead to the propagation of a message back to the sender, so that it itself can be alerted and stop the execution of the activity that led to the message dispatch in the first place. The typical example is where a resource, like a router, is used by one or more high-level service(s), like a VPN or a Firewall, and actions like interface removal or router destruction cannot not take place, because the resource is shared. To address this scenario, special events are introduced, 'BEFORE_*' events, to which callbacks can subscribe and have the opportunity to 'abort', by raising an exception when notified. Since multiple callbacks may express an interest in the same event for a particular resource, and since callbacks are executed independently from one another, this may lead to situations where notifications that occurred before the exception must be aborted. To this aim, when an exception occurs during the notification process, an abort_* event is propagated immediately after. It is up to the callback developer to determine whether subscribing to an abort notification is required in order to revert the actions performed during the initial execution of the callback (when the BEFORE_* event was fired). Exceptions caused by callbacks registered to abort events are ignored. The snippet below shows this in action: :: from neutron.callbacks import events from neutron.callbacks import exceptions from neutron.callbacks import resources from neutron.callbacks import registry def callback1(resource, event, trigger, **kwargs): raise Exception('I am failing!') def callback2(resource, event, trigger, **kwargs): print('Callback2 called by %s on event %s' % (trigger, event)) registry.subscribe(callback1, resources.ROUTER, events.BEFORE_CREATE) registry.subscribe(callback2, resources.ROUTER, events.BEFORE_CREATE) registry.subscribe(callback2, resources.ROUTER, events.ABORT_CREATE) print('Subscribed') def do_notify(): kwargs = {'foo': 'bar'} registry.notify(resources.ROUTER, events.BEFORE_CREATE, do_notify, **kwargs) print('Notifying...') try: do_notify() except exceptions.CallbackFailure as e: print('Error: ', e) The output is: :: > Subscribed > Notifying... > Callback2 called by on event before_create > Callback2 called by on event abort_create > Error: Callback __main__.callback1 failed with "I am failing!" In this case, upon the notification of the BEFORE_CREATE event, Callback1 triggers an exception that can be used to stop the action from taking place in do_notify(). On the other end, Callback2 will be executing twice, once for dealing with the BEFORE_CREATE event, and once to undo the actions during the ABORT_CREATE event. It is worth noting that it is not mandatory to have the same callback register to both BEFORE_* and the respective ABORT_* event; as a matter of fact, it is best to make use of different callbacks to keep the two logic separate. As we can see from the last example, exception which is triggered in some callback will be recorded, and it will not prevent the other remaining callbacks execution. Exception triggered in callback of BEFORE_XXX will make notify process generate an ABORT_XXX event and call the related callback, while exception from PRECOMMIT_XXX will not generate ABORT_XXX event. But both of them will finally raise a unified CallbackFailure exception to the outside. For the exception triggered from other events, like AFTER_XXX and ABORT_XXX there will no exception raised to the outside. Unsubscribing to events ----------------------- There are a few options to unsubscribe registered callbacks: * clear(): it unsubscribes all subscribed callbacks: this can be useful especially when winding down the system, and notifications shall no longer be triggered. * unsubscribe(): it selectively unsubscribes a callback for a specific resource's event. Say callback C has subscribed to event A for resource R, any notification of event A for resource R will no longer be handed over to C, after the unsubscribe() invocation. * unsubscribe_by_resource(): say that callback C has subscribed to event A, B, and C for resource R, any notification of events related to resource R will no longer be handed over to C, after the unsubscribe_by_resource() invocation. * unsubscribe_all(): say that callback C has subscribed to events A, B for resource R1, and events C, D for resource R2, any notification of events pertaining resources R1 and R2 will no longer be handed over to C, after the unsubscribe_all() invocation. The snippet below shows these concepts in action: :: from neutron.callbacks import events from neutron.callbacks import exceptions from neutron.callbacks import resources from neutron.callbacks import registry def callback1(resource, event, trigger, **kwargs): print('Callback1 called by %s on event %s for resource %s' % (trigger, event, resource)) def callback2(resource, event, trigger, **kwargs): print('Callback2 called by %s on event %s for resource %s' % (trigger, event, resource)) registry.subscribe(callback1, resources.ROUTER, events.BEFORE_READ) registry.subscribe(callback1, resources.ROUTER, events.BEFORE_CREATE) registry.subscribe(callback1, resources.ROUTER, events.AFTER_DELETE) registry.subscribe(callback1, resources.PORT, events.BEFORE_UPDATE) registry.subscribe(callback2, resources.ROUTER_GATEWAY, events.BEFORE_UPDATE) print('Subscribed') def do_notify(): print('Notifying...') kwargs = {'foo': 'bar'} registry.notify(resources.ROUTER, events.BEFORE_READ, do_notify, **kwargs) registry.notify(resources.ROUTER, events.BEFORE_CREATE, do_notify, **kwargs) registry.notify(resources.ROUTER, events.AFTER_DELETE, do_notify, **kwargs) registry.notify(resources.PORT, events.BEFORE_UPDATE, do_notify, **kwargs) registry.notify(resources.ROUTER_GATEWAY, events.BEFORE_UPDATE, do_notify, **kwargs) do_notify() registry.unsubscribe(callback1, resources.ROUTER, events.BEFORE_READ) do_notify() registry.unsubscribe_by_resource(callback1, resources.PORT) do_notify() registry.unsubscribe_all(callback1) do_notify() registry.clear() do_notify() The output is: :: Subscribed Notifying... Callback1 called by on event before_read for resource router Callback1 called by on event before_create for resource router Callback1 called by on event after_delete for resource router Callback1 called by on event before_update for resource port Callback2 called by on event before_update for resource router_gateway Notifying... Callback1 called by on event before_create for resource router Callback1 called by on event after_delete for resource router Callback1 called by on event before_update for resource port Callback2 called by on event before_update for resource router_gateway Notifying... Callback1 called by on event before_create for resource router Callback1 called by on event after_delete for resource router Callback2 called by on event before_update for resource router_gateway Notifying... Callback2 called by on event before_update for resource router_gateway Notifying... FAQ --- Are callbacks a mechanism for remote or local communication (intra vs inter-process)? Callbacks as described in this document are a local communication mechanism that allows multiple entities in the same process space to communicate with one another. For Neutron specific remote (IPC) mechanisms, you can see read more in :doc:`RPC API ` or :doc:`Messaging callbacks `. Can I use the callbacks registry to subscribe and notify non-core resources and events? Short answer is yes. The callbacks module defines literals for what are considered core Neutron resources and events. However, the ability to subscribe/notify is not limited to these as you can use your own defined resources and/or events. Just make sure you use string literals, as typos are common, and the registry does not provide any runtime validation. Therefore, make sure you test your code! What is the relationship between Callbacks and Taskflow? There is no overlap between Callbacks and Taskflow or mutual exclusion; as matter of fact they can be combined; You could have a callback that goes on and trigger a taskflow. It is a nice way of separating implementation from abstraction, because you can keep the callback in place and change Taskflow with something else. Is there any ordering guarantee during notifications? No, the ordering in which callbacks are notified is completely arbitrary by design: callbacks should know nothing about each other, and ordering should not matter; a callback will always be notified and its outcome should always be the same regardless as to in which order is it notified. Priorities can be a future extension, if a use case arises that require enforced ordering. How is the the notifying object expected to interact with the subscribing objects? The ``notify`` method implements a one-way communication paradigm: the notifier sends a message without expecting a response back (in other words it fires and forget). However, due to the nature of Python, the payload can be mutated by the subscribing objects, and this can lead to unexpected behavior of your code, if you assume that this is the intentional design. Bear in mind, that passing-by-value using deepcopy was not chosen for efficiency reasons. Having said that, if you intend for the notifier object to expect a response, then the notifier itself would need to act as a subscriber. Is the registry thread-safe? Short answer is no: it is not safe to make mutations while callbacks are being called (more details as to why can be found `here `_). A mutation could happen if a 'subscribe'/'unsubscribe' operation interleaves with the execution of the notify loop. Albeit there is a possibility that things may end up in a bad state, the registry works correctly under the assumption that subscriptions happen at the very beginning of the life of the process and that the unsubscriptions (if any) take place at the very end. In this case, chances that things do go badly may be pretty slim. Making the registry thread-safe will be considered as a future improvement. What kind of operation I can add into callback? For callback function of PRECOMMIT_XXX events, we can't use blocking functions or a function that would take a long time, like communicating to SDN controller over network. Callbacks for PRECOMMIT events are meant to execute DB operations in a transaction context, the errors occured will be taken care by the context manager. What kind of function can be a callback? Anything you fancy: lambdas, 'closures', class, object or module methods. For instance: :: from neutron.callbacks import events from neutron.callbacks import resources from neutron.callbacks import registry def callback1(resource, event, trigger, **kwargs): print('module callback') class MyCallback(object): def callback2(self, resource, event, trigger, **kwargs): print('object callback') @classmethod def callback3(cls, resource, event, trigger, **kwargs): print('class callback') c = MyCallback() registry.subscribe(callback1, resources.ROUTER, events.BEFORE_CREATE) registry.subscribe(c.callback2, resources.ROUTER, events.BEFORE_CREATE) registry.subscribe(MyCallback.callback3, resources.ROUTER, events.BEFORE_CREATE) def do_notify(): def nested_subscribe(resource, event, trigger, **kwargs): print('nested callback') registry.subscribe(nested_subscribe, resources.ROUTER, events.BEFORE_CREATE) kwargs = {'foo': 'bar'} registry.notify(resources.ROUTER, events.BEFORE_CREATE, do_notify, **kwargs) print('Notifying...') do_notify() And the output is going to be: :: Notifying... module callback object callback class callback nested callback neutron-8.4.0/doc/source/devref/l2_agent_extensions.rst0000664000567000056710000001006413044372760024411 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) L2 agent extensions =================== All reference agents support common extension mechanism that allows to easily reuse code between agents and to avoid the need to patch an agent for each new core resource extension. Those extensions can be especially interesting to third parties that don't want to maintain their code in Neutron tree. Extensions are referenced through stevedore entry points defined under neutron.agent.l2.extensions namespace. On each port event, handle_port is triggered by the agent. * neutron.agent.l2.agent_extension: This module defines an abstract extension interface. * neutron.agent.l2.extensions.manager: This module contains a manager that allows to register multiple extensions, and passes handle_port events down to all enabled extensions. Agent API object ---------------- Every agent can pass a so-called agent API object into extensions to expose some of its internals to them in controlled way. If an extension is interested in using the object, it should define consume_api() method that will receive the object before extension's initialize() method is called by the extension manager. This agent API object is part of public Neutron interface for third parties. All changes to the interface will be managed in backwards compatible way. At the moment, only Open vSwitch agent provides an agent API object to extensions. Open vSwitch agent API ~~~~~~~~~~~~~~~~~~~~~~ * neutron.plugins.ml2.drivers.openvswitch.agent.ovs_agent_extension_api Open vSwitch agent API object includes two methods that return wrapped and hardened bridge objects with cookie values allocated for calling extensions. #. request_int_br #. request_tun_br Bridge objects returned by those methods already have new default cookie values allocated for extension flows. All flow management methods (add_flow, mod_flow, ...) enforce those allocated cookies. Extensions are able to use those wrapped bridge objects to set their own flows, while the agent relies on the collection of those allocated values when cleaning up stale flows from the previous agent session:: +-----------+ | Agent API +--------------------------------------------------+ +-----+-----+ | | +-----------+ | |1 +--+ Extension +--+ | | | +-----------+ | | +---+-+-+---+ 2 +--------------+ 3 | | 4 | | Agent +-----+ Ext. manager +-----+--+ .... +--+-----+ +-----------+ +--------------+ | | | +-----------+ | +--+ Extension +--+ +-----------+ Interactions with the agent API object are in the following order:: #1 the agent initializes the agent API object (bridges, other internal state) #2 the agent passes the agent API object into the extension manager #3 the manager passes the agent API object into each extension #4 an extension calls the new agent API object method to receive bridge wrappers with cookies allocated. Call #4 also registers allocated cookies with the agent bridge objects. neutron-8.4.0/doc/source/devref/neutron_api.rst0000664000567000056710000001052413044372760022763 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Neutron public API ================== Neutron main tree serves as a library for multiple subprojects that rely on different modules from neutron.* namespace to accommodate their needs. Specifically, advanced service repositories and open source or vendor plugin/driver repositories do it. Neutron modules differ in their API stability a lot, and there is no part of it that is explicitly marked to be consumed by other projects. That said, there are modules that other projects should definitely avoid relying on. Specifically, no external repository should use anything located under neutron.openstack.common.* import path. This code belongs to oslo-incubator modules and is not meant to work for consumers other than neutron main tree itself. (The only exception is made for advanced service repositories that are tightly controlled by neutron community.) Long story short, if your repository uses those modules, please switch to corresponding oslo libraries or use your own copy of oslo-incubator files. Breakages --------- Neutron API is not very stable, and there are cases when a desired change in neutron tree is expected to trigger breakage for one or more external repositories under the neutron tent. Below you can find a list of known incompatible changes that could or are known to trigger those breakages. The changes are listed in reverse chronological order (newer at the top). * change: Consume ConfigurableMiddleware from oslo_middleware. - commit: If7360608f94625b7d0972267b763f3e7d7624fee - solution: switch to oslo_middleware.base.ConfigurableMiddleware; stop using neutron.wsgi.Middleware and neutron.wsgi.Debug. - severity: Low (some out-of-tree plugins might be affected). * change: Consume sslutils and wsgi modules from oslo.service. - commit: Ibfdf07e665fcfcd093a0e31274e1a6116706aec2 - solution: switch using oslo_service.wsgi.Router; stop using neutron.wsgi.Router. - severity: Low (some out-of-tree plugins might be affected). * change: oslo.service adopted. - commit: 6e693fc91dd79cfbf181e3b015a1816d985ad02c - solution: switch using oslo_service.* namespace; stop using ANY neutron.openstack.* contents. - severity: low (plugins must not rely on that subtree). * change: oslo.utils.fileutils adopted. - commit: I933d02aa48260069149d16caed02b020296b943a - solution: switch using oslo_utils.fileutils module; stop using neutron.openstack.fileutils module. - severity: low (plugins must not rely on that subtree). * change: Reuse caller's session in DB methods. - commit: 47dd65cf986d712e9c6ca5dcf4420dfc44900b66 - solution: Add context to args and reuse. - severity: High (mostly undetected, because 3rd party CI run Tempest tests only). * change: switches to oslo.log, removes neutron.openstack.common.log. - commit: 22328baf1f60719fcaa5b0fbd91c0a3158d09c31 - solution: a) switch to oslo.log; b) copy log module into your tree and use it (may not work due to conflicts between the module and oslo.log configuration options). - severity: High (most CI systems are affected). * change: Implements reorganize-unit-test-tree spec. - commit: 1105782e3914f601b8f4be64939816b1afe8fb54 - solution: Code affected need to update existing unit tests to reflect new locations. - severity: High (mostly undetected, because 3rd party CI run Tempest tests only). * change: drop linux/ovs_lib compat layer. - commit: 3bbf473b49457c4afbfc23fd9f59be8aa08a257d - solution: switch to using neutron/agent/common/ovs_lib.py. - severity: High (most CI systems are affected). neutron-8.4.0/doc/source/devref/network_ip_availability.rst0000664000567000056710000001423013044372760025351 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Network IP Availability Extension ================================= This extension is an information-only API that allows a user or process to determine the amount of IPs that are consumed across networks and their subnets' allocation pools. Each network and embedded subnet returns with values for **used_ips** and **total_ips** making it easy to determine how much of your network's IP space is consumed. This API provides the ability for network administrators to periodically list usage (manual or automated) in order to preemptively add new network capacity when thresholds are exceeded. **Important Note:** This API tracks a network's "consumable" IPs. What's the distinction? After a network and its subnets are created, consumable IPs are: * Consumed in the subnet's allocations (derives used IPs) * Consumed from the subnet's allocation pools (derives total IPs) This API tracks consumable IPs so network administrators know when their subnet's IP pools (and and ultimately a network's) IPs are about to run out. This API does not account reserved IPs such as a subnet's gateway IP or other reserved or unused IPs of a subnet's cidr that are consumed as a result of the subnet creation itself. Enabling in Neutron ------------------- To enable this plugin within neutron, append this pluging class to the comma-delimited plugin list to the end of the **service_plugins** configuration property within your neutron.conf file. Example:: service_plugins=router, network_ip_availability API Specification ----------------- Availability for all networks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GET /v2.0/network-ip-availabilities :: Request to url: v2.0/network-ip-availabilities headers: {'content-type': 'application/json', 'X-Auth-Token': 'SOME_AUTH_TOKEN'} Example response :: Response: HTTP/1.1 200 OK Content-Type: application/json; charset=UTF-8 .. code:: { "network_ip_availabilities": [ { "network_id": "f944c153-3f46-417b-a3c2-487cd9a456b9", "network_name": "net1", "subnet_ip_availability": [ { "cidr": "10.0.0.0/24", "ip_version": 4, "subnet_id": "46b1406a-8373-454c-8eb8-500a09eb77fb", "subnet_name": "", "total_ips": 253, "used_ips": 3 } ], "tenant_id": "test-tenant", "total_ips": 253, "used_ips": 3 }, { "network_id": "47035bae-4f29-4fef-be2e-2941b72528a8", "network_name": "net2", "subnet_ip_availability": [], "tenant_id": "test-tenant", "total_ips": 0, "used_ips": 0 }, { "network_id": "2e3ea0cd-c757-44bf-bb30-42d038687e3f", "network_name": "net3", "subnet_ip_availability": [ { "cidr": "40.0.0.0/24", "ip_version": 4, "subnet_id": "aab6b35c-16b5-489c-a5c7-fec778273495", "subnet_name": "", "total_ips": 253, "used_ips": 2 } ], "tenant_id": "test-tenant", "total_ips": 253, "used_ips": 2 } ] } Availability by network ID ~~~~~~~~~~~~~~~~~~~~~~~~~~ GET /v2.0/network-ip-availabilities/{network\_uuid} :: Request to url: /v2.0/network-ip-availabilities/aba3b29b-c119-4b45-afbd-88e500acd970 headers: {'content-type': 'application/json', 'X-Auth-Token': 'SOME_AUTH_TOKEN'} Example response :: Response: HTTP/1.1 200 OK Content-Type: application/json; charset=UTF-8 .. code:: { "network_ip_availability": { "network_id": "f944c153-3f46-417b-a3c2-487cd9a456b9", "network_name": "net1", "subnet_ip_availability": [ { "cidr": "10.0.0.0/24", "ip_version": 4, "subnet_name": "", "subnet_id": "46b1406a-8373-454c-8eb8-500a09eb77fb", "total_ips": 253, "used_ips": 3 } ], "tenant_id": "test-tenant", "total_ips": 253, "used_ips": 3 } } Supported Query Filters ~~~~~~~~~~~~~~~~~~~~~~~ This API currently supports the following query parameters: * **network_id**: Returns availability for the network matching the network ID. Note: This query (?network_id={network_id_guid})is roughly equivalent to *Availability by network ID* section except it returns the plural response form as a list rather than as an item. * **network_name**: Returns availability for network matching the provided name * **tenant_id**: Returns availability for all networks owned by the provided tenant ID. * **ip_version**: Filters network subnets by those supporting the supplied ip version. Values can be either 4 or 6. Query filters can be combined to further narrow results and what is returned will match all criteria. When a parameter is specified more than once, it will return results that match both. Examples: :: # Fetch IPv4 availability for a specific tenant uuid GET /v2.0/network-ip-availabilities?ip_version=4&tenant_id=example-tenant-uuid # Fetch multiple networks by their ids GET /v2.0/network-ip-availabilities?network_id=uuid_sample_1&network_id=uuid_sample_2 neutron-8.4.0/doc/source/devref/index.rst0000664000567000056710000000471213044372760021551 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Developer Guide =============== In the Developer Guide, you will find information on Neutron's lower level programming APIs. There are sections that cover the core pieces of Neutron, including its database, message queue, and scheduler components. There are also subsections that describe specific plugins inside Neutron. Finally, the developer guide includes information about Neutron testing infrastructure. Programming HowTos and Tutorials -------------------------------- .. toctree:: :maxdepth: 3 effective_neutron development.environment contribute neutron_api client_command_extensions alembic_migrations Neutron Internals ----------------- .. toctree:: :maxdepth: 3 services_and_agents api_layer ml2_ext_manager quota api_extensions plugin-api db_layer policy rpc_api rpc_callbacks layer3 l2_agents ovs_vhostuser quality_of_service service_extensions oslo-incubator callbacks dns_order external_dns_integration upgrade i18n instrumentation address_scopes openvswitch_firewall network_ip_availability tag Testing ------- .. toctree:: :maxdepth: 3 fullstack_testing testing_coverage template_model_sync_test Module Reference ---------------- .. toctree:: :maxdepth: 3 .. todo:: Add in all the big modules as automodule indexes. Indices and tables ------------------ * :ref:`genindex` * :ref:`modindex` * :ref:`search` neutron-8.4.0/doc/source/devref/fullstack_testing.rst0000664000567000056710000000267713044372736024202 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Full Stack Testing ================== Goals ----- * Stabilize the job: - Fix L3 HA failure - Look in to non-deterministic failures when adding a large amount of tests (Possibly bug 1486199). - Switch to kill signal 15 to terminate neutron-server & agents (Bugs 1487548 and 1494363). * Convert the L3 HA failover functional test to a full stack test * Write a test for DHCP HA / Multiple DHCP agents per network * Write DVR tests * Write additional L3 HA tests * Write a test that validates DVR + L3 HA integration after https://bugs.launchpad.net/neutron/+bug/1365473 is fixed. neutron-8.4.0/doc/source/devref/linuxbridge_agent.rst0000664000567000056710000000405413044372760024133 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) L2 Networking with Linux Bridge =============================== This Agent uses the `Linux Bridge `_ to provide L2 connectivity for VM instances running on the compute node to the public network. A graphical illustration of the deployment can be found in `Networking Guide `_ In most common deployments, there is a compute and a network node. On both the compute and the network node, the Linux Bridge Agent will manage virtual switches, connectivity among them, and interaction via virtual ports with other network components such as namespaces and underlying interfaces. Additionally, on the compute node, the Linux Bridge Agent will manage security groups. Three use cases and their packet flow are documented as follows: 1. `Legacy implementation with Linux Bridge `_ 2. `High Availability using L3HA with Linux Bridge `_ 3. `Provider networks with Linux Bridge `_ neutron-8.4.0/doc/source/devref/contribute.rst0000664000567000056710000006566613044372760022637 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Contributing new extensions to Neutron ====================================== Introduction ------------ Neutron has a pluggable architecture, with a number of extension points. This documentation covers aspects relevant to contributing new Neutron v2 core (aka monolithic) plugins, ML2 mechanism drivers, and L3 service plugins. This document will initially cover a number of process-oriented aspects of the contribution process, and proceed to provide a how-to guide that shows how to go from 0 LOC's to successfully contributing new extensions to Neutron. In the remainder of this guide, we will try to use practical examples as much as we can so that people have working solutions they can start from. This guide is for a developer who wants to have a degree of visibility within the OpenStack Networking project. If you are a developer who wants to provide a Neutron-based solution without interacting with the Neutron community, you are free to do so, but you can stop reading now, as this guide is not for you. Plugins and drivers for non-reference implementations are known as "third-party" code. This includes code for supporting vendor products, as well as code for supporting open-source networking implementations. Before the Kilo release these plugins and drivers were included in the Neutron tree. During the Kilo cycle the third-party plugins and drivers underwent the first phase of a process called decomposition. During this phase, each plugin and driver moved the bulk of its logic to a separate git repository, while leaving a thin "shim" in the neutron tree together with the DB models and migrations (and perhaps some config examples). During the Liberty cycle the decomposition concept was taken to its conclusion by allowing third-party code to exist entirely out of tree. Further extension mechanisms have been provided to better support external plugins and drivers that alter the API and/or the data model. In the Mitaka cycle we will **require** all third-party code to be moved out of the neutron tree completely. 'Outside the tree' can be anything that is publicly available: it may be a repo on git.openstack.org for instance, a tarball, a pypi package, etc. A plugin/drivers maintainer team self-governs in order to promote sharing, reuse, innovation, and release of the 'out-of-tree' deliverable. It should not be required for any member of the core team to be involved with this process, although core members of the Neutron team can participate in whichever capacity is deemed necessary to facilitate out-of-tree development. This guide is aimed at you as the maintainer of code that integrates with Neutron but resides in a separate repository. Contribution Process -------------------- If you want to extend OpenStack Networking with your technology, and you want to do it within the visibility of the OpenStack project, follow the guidelines and examples below. We'll describe best practices for: * Design and Development; * Testing and Continuous Integration; * Defect Management; * Backport Management for plugin specific code; * DevStack Integration; * Documentation; Once you have everything in place you may want to add your project to the list of Neutron sub-projects. Submit a patch via a gerrit review to neutron to add your project to ``doc/source/devref/sub_projects.rst``. Design and Development ---------------------- Assuming you have a working repository, any development to your own repo does not need any blueprint, specification or bugs against Neutron. However, if your project is a part of the Neutron Stadium effort, you are expected to participate in the principles of the Four Opens, meaning your design should be done in the open. Thus, it is encouraged to file documentation for changes in your own repository. If your code is hosted on git.openstack.org then the gerrit review system is automatically provided. Contributors should follow the review guidelines similar to those of Neutron. However, you as the maintainer have the flexibility to choose who can approve/merge changes in your own repo. It is recommended (but not required, see `policies `_) that you set up a third-party CI system. This will provide a vehicle for checking the third-party code against Neutron changes. See `Testing and Continuous Integration`_ below for more detailed recommendations. Design documents can still be supplied in form of Restructured Text (RST) documents, within the same third-party library repo. If changes to the common Neutron code are required, an `RFE `_ may need to be filed. However every case is different and you are invited to seek guidance from Neutron core reviewers about what steps to follow. Testing and Continuous Integration ---------------------------------- The following strategies are recommendations only, since third-party CI testing is not a enforced requirement. However, these strategies are employed by the majority of the plugin/driver contributors that actively participate in the Neutron development community, since they have learned from experience how quickly their code can fall out of sync with the rapidly changing Neutron core code base. * You should run unit tests in your own external library (e.g. on git.openstack.org where Jenkins setup is for free). * Your third-party CI should validate third-party integration with Neutron via functional testing. The third-party CI is a communication mechanism. The objective of this mechanism is as follows: * it communicates to you when someone has contributed a change that potentially breaks your code. It is then up to you maintaining the affected plugin/driver to determine whether the failure is transient or real, and resolve the problem if it is. * it communicates to a patch author that they may be breaking a plugin/driver. If they have the time/energy/relationship with the maintainer of the plugin/driver in question, then they can (at their discretion) work to resolve the breakage. * it communicates to the community at large whether a given plugin/driver is being actively maintained. * A maintainer that is perceived to be responsive to failures in their third-party CI jobs is likely to generate community goodwill. It is worth noting that if the plugin/driver repository is hosted on git.openstack.org, due to current openstack-infra limitations, it is not possible to have third-party CI systems participating in the gate pipeline for the repo. This means that the only validation provided during the merge process to the repo is through unit tests. Post-merge hooks can still be exploited to provide third-party CI feedback, and alert you of potential issues. As mentioned above, third-party CI systems will continue to validate Neutron core commits. This will allow them to detect when incompatible changes occur, whether they are in Neutron or in the third-party repo. Defect Management ----------------- Bugs affecting third-party code should *not* be filed in the Neutron project on launchpad. Bug tracking can be done in any system you choose, but by creating a third-party project in launchpad, bugs that affect both Neutron and your code can be more easily tracked using launchpad's "also affects project" feature. Security Issues ~~~~~~~~~~~~~~~ Here are some answers to how to handle security issues in your repo, taken from `this openstack-dev mailing list message `_: - How should security your issues be managed? The OpenStack Vulnerability Management Team (VMT) follows a `documented process `_ which can basically be reused by any project-team when needed. - Should the OpenStack security team be involved? The OpenStack VMT directly oversees vulnerability reporting and disclosure for a `subset of OpenStack source code repositories `_. However they are still quite happy to answer any questions you might have about vulnerability management for your own projects even if they're not part of that set. Feel free to reach out to the VMT in public or in private. Also, the VMT is an autonomous subgroup of the much larger `OpenStack Security project-team `_. They're a knowledgeable bunch and quite responsive if you want to get their opinions or help with security-related issues (vulnerabilities or otherwise). - Does a CVE need to be filed? It can vary widely. If a commercial distribution such as Red Hat is redistributing a vulnerable version of your software then they may assign one anyway even if you don't request one yourself. Or the reporter may request one; the reporter may even be affiliated with an organization who has already assigned/obtained a CVE before they initiate contact with you. - Do the maintainers need to publish OSSN or equivalent documents? OpenStack Security Advisories (OSSA) are official publications of the OpenStack VMT and only cover VMT-supported software. OpenStack Security Notes (OSSN) are published by editors within the OpenStack Security project-team on more general security topics and may even cover issues in non-OpenStack software commonly used in conjunction with OpenStack, so it's at their discretion as to whether they would be able to accommodate a particular issue with an OSSN. However, these are all fairly arbitrary labels, and what really matters in the grand scheme of things is that vulnerabilities are handled seriously, fixed with due urgency and care, and announced widely -- not just on relevant OpenStack mailing lists but also preferably somewhere with broader distribution like the `Open Source Security mailing list `_. The goal is to get information on your vulnerabilities, mitigating measures and fixes into the hands of the people using your software in a timely manner. - Anything else to consider here? The OpenStack VMT is in the process of trying to reinvent itself so that it can better scale within the context of the "Big Tent." This includes making sure the policy/process documentation is more consumable and reusable even by project-teams working on software outside the scope of our charter. It's a work in progress, and any input is welcome on how we can make this function well for everyone. Backport Management Strategies ------------------------------ This section applies only to third-party maintainers who had code in the Neutron tree during the Kilo and earlier releases. It will be obsolete once the Kilo release is no longer supported. If a change made to out-of-tree third-party code needs to be back-ported to in-tree code in a stable branch, you may submit a review without a corresponding master branch change. The change will be evaluated by core reviewers for stable branches to ensure that the backport is justified and that it does not affect Neutron core code stability. DevStack Integration Strategies ------------------------------- When developing and testing a new or existing plugin or driver, the aid provided by DevStack is incredibly valuable: DevStack can help get all the software bits installed, and configured correctly, and more importantly in a predictable way. For DevStack integration there are a few options available, and they may or may not make sense depending on whether you are contributing a new or existing plugin or driver. If you are contributing a new plugin, the approach to choose should be based on `Extras.d Hooks' externally hosted plugins `_. With the extra.d hooks, the DevStack integration is co-located with the third-party integration library, and it leads to the greatest level of flexibility when dealing with DevStack based dev/test deployments. One final consideration is worth making for third-party CI setups: if `Devstack Gate `_ is used, it does provide hook functions that can be executed at specific times of the devstack-gate-wrap script run. For example, the `Neutron Functional job `_ uses them. For more details see `devstack-vm-gate-wrap.sh `_. Project Initial Setup --------------------- The how-to below assumes that the third-party library will be hosted on git.openstack.org. This lets you tap in the entire OpenStack CI infrastructure and can be a great place to start from to contribute your new or existing driver/plugin. The list of steps below are summarized version of what you can find on http://docs.openstack.org/infra/manual/creators.html. They are meant to be the bare minimum you have to complete in order to get you off the ground. * Create a public repository: this can be a personal git.openstack.org repo or any publicly available git repo, e.g. ``https://github.com/john-doe/foo.git``. This would be a temporary buffer to be used to feed the one on git.openstack.org. * Initialize the repository: if you are starting afresh, you may *optionally* want to use cookiecutter to get a skeleton project. You can learn how to use cookiecutter on https://git.openstack.org/cgit/openstack-dev/cookiecutter. If you want to build the repository from an existing Neutron module, you may want to skip this step now, build the history first (next step), and come back here to initialize the remainder of the repository with other files being generated by the cookiecutter (like tox.ini, setup.cfg, setup.py, etc.). * Create a repository on git.openstack.org (see `Official Sub-Projects `_). For this you need the help of the OpenStack infra team. It is worth noting that you only get one shot at creating the repository on git.openstack.org. This is the time you get to choose whether you want to start from a clean slate, or you want to import the repo created during the previous step. In the latter case, you can do so by specifying the upstream section for your project in project-config/gerrit/project.yaml. Steps are documented on the `Repository Creator's Guide `_. * Ask for a Launchpad user to be assigned to the core team created. Steps are documented in `this section `_. * Fix, fix, fix: at this point you have an external base to work on. You can develop against the new git.openstack.org project, the same way you work with any other OpenStack project: you have pep8, docs, and python27 CI jobs that validate your patches when posted to Gerrit. For instance, one thing you would need to do is to define an entry point for your plugin or driver in your own setup.cfg similarly as to how it is done in the `setup.cfg for ODL `_. * Define an entry point for your plugin or driver in setup.cfg * Create third-party CI account: if you do not already have one, follow instructions for `third-party CI `_ to get one. Internationalization support ---------------------------- OpenStack is committed to broad international support. Internationalization (I18n) is one of important areas to make OpenStack ubiquitous. Each project is recommended to support i18n. This section describes how to set up translation support. The description in this section uses the following variables. * repository : ``openstack/${REPOSITORY}`` (e.g., ``openstack/networking-foo``) * top level python path : ``${MODULE_NAME}`` (e.g., ``networking_foo``) oslo.i18n ~~~~~~~~~ * Each subproject repository should have its own oslo.i18n integration wrapper module ``${MODULE_NAME}/_i18n.py``. The detail is found at http://docs.openstack.org/developer/oslo.i18n/usage.html. .. note:: **DOMAIN** name should match your **module** name ``${MODULE_NAME}``. * Import ``_()`` from your ``${MODULE_NAME}/_i18n.py``. .. warning:: Do not use ``_()`` in the builtins namespace which is registered by **gettext.install()** in ``neutron/__init__.py``. It is now deprecated as described in oslo.18n documentation. Setting up translation support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You need to create or edit the following files to start translation support: * setup.cfg * babel.cfg We have a good example for an oslo project at https://review.openstack.org/#/c/98248/. Add the following to ``setup.cfg``:: [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = ${MODULE_NAME}/locale/${MODULE_NAME}.pot [compile_catalog] directory = ${MODULE_NAME}/locale domain = ${MODULE_NAME} [update_catalog] domain = ${MODULE_NAME} output_dir = ${MODULE_NAME}/locale input_file = ${MODULE_NAME}/locale/${MODULE_NAME}.pot Note that ``${MODULE_NAME}`` is used in all names. Create ``babel.cfg`` with the following contents:: [python: **.py] Enable Translation ~~~~~~~~~~~~~~~~~~ To update and import translations, you need to make a change in project-config. A good example is found at https://review.openstack.org/#/c/224222/. After doing this, the necessary jobs will be run and push/pull a message catalog to/from the translation infrastructure. Integrating with the Neutron system ----------------------------------- Configuration Files ~~~~~~~~~~~~~~~~~~~ The ``data_files`` in the ``[files]`` section of ``setup.cfg`` of Neutron shall not contain any third-party references. These shall be located in the same section of the third-party repo's own ``setup.cfg`` file. * Note: Care should be taken when naming sections in configuration files. When the Neutron service or an agent starts, oslo.config loads sections from all specified config files. This means that if a section [foo] exists in multiple config files, duplicate settings will collide. It is therefore recommended to prefix section names with a third-party string, e.g. [vendor_foo]. Since Mitaka, configuration files are not maintained in the git repository but should be generated as follows:: ``tox -e genconfig`` If a 'tox' environment is unavailable, then you can run the following script instead to generate the configuration files:: ./tools/generate_config_file_samples.sh It is advised that subprojects do not keep their configuration files in their respective trees and instead generate them using a similar approach as Neutron does. **ToDo: Inclusion in OpenStack documentation?** Is there a recommended way to have third-party config options listed in the configuration guide in docs.openstack.org? Database Models and Migrations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A third-party repo may contain database models for its own tables. Although these tables are in the Neutron database, they are independently managed entirely within the third-party code. Third-party code shall **never** modify neutron core tables in any way. Each repo has its own *expand* and *contract* `alembic migration branches `_. A third-party repo's alembic migration branches may operate only on tables that are owned by the repo. * Note: Care should be taken when adding new tables. To prevent collision of table names it is **required** to prefix them with a vendor/plugin string. * Note: A third-party maintainer may opt to use a separate database for their tables. This may complicate cases where there are foreign key constraints across schemas for DBMS that do not support this well. Third-party maintainer discretion advised. The database tables owned by a third-party repo can have references to fields in neutron core tables. However, the alembic branch for a plugin/driver repo shall never update any part of a table that it does not own. **Note: What happens when a referenced item changes?** * **Q:** If a driver's table has a reference (for example a foreign key) to a neutron core table, and the referenced item is changed in neutron, what should you do? * **A:** Fortunately, this should be an extremely rare occurrence. Neutron core reviewers will not allow such a change unless there is a very carefully thought-out design decision behind it. That design will include how to address any third-party code affected. (This is another good reason why you should stay actively involved with the Neutron developer community.) The ``neutron-db-manage`` alembic wrapper script for neutron detects alembic branches for installed third-party repos, and the upgrade command automatically applies to all of them. A third-party repo must register its alembic migrations at installation time. This is done by providing an entrypoint in setup.cfg as follows: For a third-party repo named ``networking-foo``, add the alembic_migrations directory as an entrypoint in the ``neutron.db.alembic_migrations`` group:: [entry_points] neutron.db.alembic_migrations = networking-foo = networking_foo.db.migration:alembic_migrations **ToDo: neutron-db-manage autogenerate** The alembic autogenerate command needs to support branches in external repos. Bug #1471333 has been filed for this. DB Model/Migration Testing ~~~~~~~~~~~~~~~~~~~~~~~~~~ Here is a `template functional test `_ third-party maintainers can use to develop tests for model-vs-migration sync in their repos. It is recommended that each third-party CI sets up such a test, and runs it regularly against Neutron master. Entry Points ~~~~~~~~~~~~ The `Python setuptools `_ installs all entry points for packages in one global namespace for an environment. Thus each third-party repo can define its package's own ``[entry_points]`` in its own ``setup.cfg`` file. For example, for the ``networking-foo`` repo:: [entry_points] console_scripts = neutron-foo-agent = networking_foo.cmd.eventlet.agents.foo:main neutron.core_plugins = foo_monolithic = networking_foo.plugins.monolithic.plugin:FooPluginV2 neutron.service_plugins = foo_l3 = networking_foo.services.l3_router.l3_foo:FooL3ServicePlugin neutron.ml2.type_drivers = foo_type = networking_foo.plugins.ml2.drivers.foo:FooType neutron.ml2.mechanism_drivers = foo_ml2 = networking_foo.plugins.ml2.drivers.foo:FooDriver neutron.ml2.extension_drivers = foo_ext = networking_foo.plugins.ml2.drivers.foo:FooExtensionDriver * Note: It is advisable to include ``foo`` in the names of these entry points to avoid conflicts with other third-party packages that may get installed in the same environment. API Extensions ~~~~~~~~~~~~~~ Extensions can be loaded in two ways: #. Use the ``append_api_extensions_path()`` library API. This method is defined in ``neutron/api/extensions.py`` in the neutron tree. #. Leverage the ``api_extensions_path`` config variable when deploying. See the example config file ``etc/neutron.conf`` in the neutron tree where this variable is commented. Service Providers ~~~~~~~~~~~~~~~~~ If your project uses service provider(s) the same way VPNAAS and LBAAS do, you specify your service provider in your ``project_name.conf`` file like so:: [service_providers] # Must be in form: # service_provider=::[:default][,...] In order for Neutron to load this correctly, make sure you do the following in your code:: from neutron.db import servicetype_db service_type_manager = servicetype_db.ServiceTypeManager.get_instance() service_type_manager.add_provider_configuration( YOUR_SERVICE_TYPE, pconf.ProviderConfiguration(YOUR_SERVICE_MODULE)) This is typically required when you instantiate your service plugin class. Interface Drivers ~~~~~~~~~~~~~~~~~ Interface (VIF) drivers for the reference implementations are defined in ``neutron/agent/linux/interface.py``. Third-party interface drivers shall be defined in a similar location within their own repo. The entry point for the interface driver is a Neutron config option. It is up to the installer to configure this item in the ``[default]`` section. For example:: [default] interface_driver = networking_foo.agent.linux.interface.FooInterfaceDriver **ToDo: Interface Driver port bindings.** ``VIF_TYPE_*`` constants in ``neutron/extensions/portbindings.py`` should be moved from neutron core to the repositories where their drivers are implemented. We need to provide some config or hook mechanism for VIF types to be registered by external interface drivers. For Nova, selecting the VIF driver can be done outside of Neutron (using the new `os-vif python library `_?). Armando and Akihiro to discuss. Rootwrap Filters ~~~~~~~~~~~~~~~~ If a third-party repo needs a rootwrap filter for a command that is not used by Neutron core, then the filter shall be defined in the third-party repo. For example, to add a rootwrap filters for commands in repo ``networking-foo``: * In the repo, create the file: ``etc/neutron/rootwrap.d/foo.filters`` * In the repo's ``setup.cfg`` add the filters to data_files:: [files] data_files = etc/neutron/rootwrap.d = etc/neutron/rootwrap.d/foo.filters Extending python-neutronclient ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The maintainer of a third-party component may wish to add extensions to the Neutron CLI client. Thanks to https://review.openstack.org/148318 this can now be accomplished. See `Client Command Extensions `_. Other repo-split items ~~~~~~~~~~~~~~~~~~~~~~ (These are still TBD.) * Splitting policy.json? **ToDo** Armando will investigate. * Generic instructions (or a template) for installing an out-of-tree plugin or driver for Neutron. Possibly something for the networking guide, and/or a template that plugin/driver maintainers can modify and include with their package. neutron-8.4.0/doc/source/devref/template_model_sync_test.rst0000664000567000056710000001237213044372760025531 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Template for ModelMigrationSync for external repos ================================================== This section contains a template for a test which checks that the Python models for database tables are synchronized with the alembic migrations that create the database schema. This test should be implemented in all driver/plugin repositories that were split out from Neutron. What does the test do? ---------------------- This test compares models with the result of existing migrations. It is based on `ModelsMigrationsSync `_ which is provided by oslo.db and was adapted for Neutron. It compares core Neutron models and vendor specific models with migrations from Neutron core and migrations from the driver/plugin repo. This test is functional - it runs against MySQL and PostgreSQL dialects. The detailed description of this test can be found in Neutron Database Layer section - `Tests to verify that database migrations and models are in sync `_. Steps for implementing the test ------------------------------- 1. Import all models in one place ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Create a module ``networking_foo/db/models/head.py`` with the following content: :: from neutron.db.migration.models import head from networking_foo import models # noqa # Alternatively, import separate modules here if the models are not in one # models.py file def get_metadata(): return head.model_base.BASEV2.metadata 2. Implement the test module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The test uses external.py from Neutron. This file contains lists of table names, which were moved out of Neutron: :: VPNAAS_TABLES = [...] LBAAS_TABLES = [...] FWAAS_TABLES = [...] # Arista ML2 driver Models moved to openstack/networking-arista REPO_ARISTA_TABLES = [...] # Models moved to openstack/networking-cisco REPO_CISCO_TABLES = [...] ... TABLES = (FWAAS_TABLES + LBAAS_TABLES + VPNAAS_TABLES + ... + REPO_ARISTA_TABLES + REPO_CISCO_TABLES) Also the test uses **VERSION_TABLE**, it is the name of table in database which contains revision id of head migration. It is preferred to keep this variable in ``networking_foo/db/migration/alembic_migrations/__init__.py`` so it will be easy to use in test. Create a module ``networking_foo/tests/functional/db/test_migrations.py`` with the following content: :: from oslo_config import cfg from neutron.db.migration.alembic_migrations import external from neutron.db.migration import cli as migration from neutron.tests.common import base from neutron.tests.functional.db import test_migrations from networking_foo.db.migration import alembic_migrations from networking_foo.db.models import head # EXTERNAL_TABLES should contain all names of tables that are not related to # current repo. EXTERNAL_TABLES = set(external.TABLES) - set(external.REPO_FOO_TABLES) class _TestModelsMigrationsFoo(test_migrations._TestModelsMigrations): def db_sync(self, engine): cfg.CONF.set_override('connection', engine.url, group='database') for conf in migration.get_alembic_configs(): self.alembic_config = conf self.alembic_config.neutron_config = cfg.CONF migration.do_alembic_command(conf, 'upgrade', 'heads') def get_metadata(self): return head.get_metadata() def include_object(self, object_, name, type_, reflected, compare_to): if type_ == 'table' and (name == 'alembic' or name == alembic_migrations.VERSION_TABLE or name in EXTERNAL_TABLES): return False else: return True class TestModelsMigrationsMysql(_TestModelsMigrationsFoo, base.MySQLTestCase): pass class TestModelsMigrationsPsql(_TestModelsMigrationsFoo, base.PostgreSQLTestCase): pass 3. Add functional requirements ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A separate file ``networking_foo/tests/functional/requirements.txt`` should be created containing the following requirements that are needed for successful test execution. :: psutil>=1.1.1,<2.0.0 psycopg2 PyMySQL>=0.6.2 # MIT License Example implementation `in VPNaaS `_ neutron-8.4.0/doc/source/devref/layer3.rst0000664000567000056710000003247113044372760021644 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Layer 3 Networking in Neutron - via Layer 3 agent & OpenVSwitch =============================================================== This page discusses the usage of Neutron with Layer 3 functionality enabled. Neutron logical network setup ----------------------------- :: vagrant@precise64:~/devstack$ neutron net-list +--------------------------------------+---------+--------------------------------------------------+ | id | name | subnets | +--------------------------------------+---------+--------------------------------------------------+ | 84b6b0cc-503d-448a-962f-43def05e85be | public | 3a56da7c-2f6e-41af-890a-b324d7bc374d | | a4b4518c-800d-4357-9193-57dbb42ac5ee | private | 1a2d26fb-b733-4ab3-992e-88554a87afa6 10.0.0.0/24 | +--------------------------------------+---------+--------------------------------------------------+ vagrant@precise64:~/devstack$ neutron subnet-list +--------------------------------------+------+-------------+--------------------------------------------+ | id | name | cidr | allocation_pools | +--------------------------------------+------+-------------+--------------------------------------------+ | 1a2d26fb-b733-4ab3-992e-88554a87afa6 | | 10.0.0.0/24 | {"start": "10.0.0.2", "end": "10.0.0.254"} | +--------------------------------------+------+-------------+--------------------------------------------+ vagrant@precise64:~/devstack$ neutron port-list +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+ | id | name | mac_address | fixed_ips | +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+ | 0ba8700e-da06-4318-8fe9-00676dd994b8 | | fa:16:3e:78:43:5b | {"subnet_id": "1a2d26fb-b733-4ab3-992e-88554a87afa6", "ip_address": "10.0.0.1"} | | b2044570-ad52-4f31-a2c3-5d767dc9a8a7 | | fa:16:3e:5b:cf:4c | {"subnet_id": "1a2d26fb-b733-4ab3-992e-88554a87afa6", "ip_address": "10.0.0.3"} | | bb60d1bb-0cab-41cb-9678-30d2b2fdb169 | | fa:16:3e:af:a9:bd | {"subnet_id": "1a2d26fb-b733-4ab3-992e-88554a87afa6", "ip_address": "10.0.0.2"} | +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+ vagrant@precise64:~/devstack$ neutron subnet-show 1a2d26fb-b733-4ab3-992e-88554a87afa6 +------------------+--------------------------------------------+ | Field | Value | +------------------+--------------------------------------------+ | allocation_pools | {"start": "10.0.0.2", "end": "10.0.0.254"} | | cidr | 10.0.0.0/24 | | dns_nameservers | | | enable_dhcp | True | | gateway_ip | 10.0.0.1 | | host_routes | | | id | 1a2d26fb-b733-4ab3-992e-88554a87afa6 | | ip_version | 4 | | name | | | network_id | a4b4518c-800d-4357-9193-57dbb42ac5ee | | tenant_id | 3368290ab10f417390acbb754160dbb2 | +------------------+--------------------------------------------+ Neutron logical router setup ---------------------------- * http://docs.openstack.org/networking-guide/scenario_legacy_ovs.html :: vagrant@precise64:~/devstack$ neutron router-list +--------------------------------------+---------+--------------------------------------------------------+ | id | name | external_gateway_info | +--------------------------------------+---------+--------------------------------------------------------+ | 569469c7-a2a5-4d32-9cdd-f0b18a13f45e | router1 | {"network_id": "84b6b0cc-503d-448a-962f-43def05e85be"} | +--------------------------------------+---------+--------------------------------------------------------+ vagrant@precise64:~/devstack$ neutron router-show router1 +-----------------------+--------------------------------------------------------+ | Field | Value | +-----------------------+--------------------------------------------------------+ | admin_state_up | True | | external_gateway_info | {"network_id": "84b6b0cc-503d-448a-962f-43def05e85be"} | | id | 569469c7-a2a5-4d32-9cdd-f0b18a13f45e | | name | router1 | | routes | | | status | ACTIVE | | tenant_id | 3368290ab10f417390acbb754160dbb2 | +-----------------------+--------------------------------------------------------+ vagrant@precise64:~/devstack$ neutron router-port-list router1 +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+ | id | name | mac_address | fixed_ips | +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+ | 0ba8700e-da06-4318-8fe9-00676dd994b8 | | fa:16:3e:78:43:5b | {"subnet_id": "1a2d26fb-b733-4ab3-992e-88554a87afa6", "ip_address": "10.0.0.1"} | +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+ Neutron Routers are realized in OpenVSwitch ------------------------------------------- .. image:: images/under-the-hood-scenario-1-ovs-network.png "router1" in the Neutron logical network is realized through a port ("qr-0ba8700e-da") in OpenVSwitch - attached to "br-int":: vagrant@precise64:~/devstack$ sudo ovs-vsctl show b9b27fc3-5057-47e7-ba64-0b6afe70a398 Bridge br-int Port "qr-0ba8700e-da" tag: 1 Interface "qr-0ba8700e-da" type: internal Port br-int Interface br-int type: internal Port int-br-ex Interface int-br-ex Port "tapbb60d1bb-0c" tag: 1 Interface "tapbb60d1bb-0c" type: internal Port "qvob2044570-ad" tag: 1 Interface "qvob2044570-ad" Port "int-br-eth1" Interface "int-br-eth1" Bridge "br-eth1" Port "phy-br-eth1" Interface "phy-br-eth1" Port "br-eth1" Interface "br-eth1" type: internal Bridge br-ex Port phy-br-ex Interface phy-br-ex Port "qg-0143bce1-08" Interface "qg-0143bce1-08" type: internal Port br-ex Interface br-ex type: internal ovs_version: "1.4.0+build0" vagrant@precise64:~/devstack$ brctl show bridge name bridge id STP enabled interfaces br-eth1 0000.e2e7fc5ccb4d no br-ex 0000.82ee46beaf4d no phy-br-ex qg-39efb3f9-f0 qg-77e0666b-cd br-int 0000.5e46cb509849 no int-br-ex qr-54c9cd83-43 qvo199abeb2-63 qvo1abbbb60-b8 tap74b45335-cc qbr199abeb2-63 8000.ba06e5f8675c no qvb199abeb2-63 tap199abeb2-63 qbr1abbbb60-b8 8000.46a87ed4fb66 no qvb1abbbb60-b8 tap1abbbb60-b8 virbr0 8000.000000000000 yes Finding the router in ip/ipconfig --------------------------------- * http://docs.openstack.org/admin-guide-cloud/networking.html The neutron-l3-agent uses the Linux IP stack and iptables to perform L3 forwarding and NAT. In order to support multiple routers with potentially overlapping IP addresses, neutron-l3-agent defaults to using Linux network namespaces to provide isolated forwarding contexts. As a result, the IP addresses of routers will not be visible simply by running "ip addr list" or "ifconfig" on the node. Similarly, you will not be able to directly ping fixed IPs. To do either of these things, you must run the command within a particular router's network namespace. The namespace will have the name "qrouter-. .. image:: images/under-the-hood-scenario-1-ovs-netns.png For example:: vagrant@precise64:~$ neutron router-list +--------------------------------------+---------+--------------------------------------------------------+ | id | name | external_gateway_info | +--------------------------------------+---------+--------------------------------------------------------+ | ad948c6e-afb6-422a-9a7b-0fc44cbb3910 | router1 | {"network_id": "e6634fef-03fa-482a-9fa7-e0304ce5c995"} | +--------------------------------------+---------+--------------------------------------------------------+ vagrant@precise64:~/devstack$ sudo ip netns exec qrouter-ad948c6e-afb6-422a-9a7b-0fc44cbb3910 ip addr list 18: lo: mtu 16436 qdisc noqueue state UNKNOWN link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo inet6 ::1/128 scope host valid_lft forever preferred_lft forever 19: qr-54c9cd83-43: mtu 1500 qdisc noqueue state UNKNOWN link/ether fa:16:3e:dd:c1:8f brd ff:ff:ff:ff:ff:ff inet 10.0.0.1/24 brd 10.0.0.255 scope global qr-54c9cd83-43 inet6 fe80::f816:3eff:fedd:c18f/64 scope link valid_lft forever preferred_lft forever 20: qg-77e0666b-cd: mtu 1500 qdisc noqueue state UNKNOWN link/ether fa:16:3e:1f:d3:ec brd ff:ff:ff:ff:ff:ff inet 192.168.27.130/28 brd 192.168.27.143 scope global qg-77e0666b-cd inet6 fe80::f816:3eff:fe1f:d3ec/64 scope link valid_lft forever preferred_lft forever Provider Networking ------------------- Neutron can also be configured to create `provider networks `_ Further Reading --------------- * `Packet Pushers - Neutron Network Implementation on Linux `_ * `OpenStack Cloud Administrator Guide `_ * `Neutron - Layer 3 API extension usage guide `_ * `Darragh O'Reilly - The Quantum L3 router and floating IPs `_ neutron-8.4.0/doc/source/devref/dns_order.rst0000664000567000056710000001252713044372736022427 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Keep DNS Nameserver Order Consistency In Neutron ================================================ In Neutron subnets, DNS nameservers are given priority when created or updated. This means if you create a subnet with multiple DNS servers, the order will be retained and guests will receive the DNS servers in the order you created them in when the subnet was created. The same thing applies for update operations on subnets to add, remove, or update DNS servers. Get Subnet Details Info ----------------------- :: changzhi@stack:~/devstack$ neutron subnet-list +--------------------------------------+------+-------------+--------------------------------------------+ | id | name | cidr | allocation_pools | +--------------------------------------+------+-------------+--------------------------------------------+ | 1a2d261b-b233-3ab9-902e-88576a82afa6 | | 10.0.0.0/24 | {"start": "10.0.0.2", "end": "10.0.0.254"} | +--------------------------------------+------+-------------+--------------------------------------------+ changzhi@stack:~/devstack$ neutron subnet-show 1a2d261b-b233-3ab9-902e-88576a82afa6 +------------------+--------------------------------------------+ | Field | Value | +------------------+--------------------------------------------+ | allocation_pools | {"start": "10.0.0.2", "end": "10.0.0.254"} | | cidr | 10.0.0.0/24 | | dns_nameservers | 1.1.1.1 | | | 2.2.2.2 | | | 3.3.3.3 | | enable_dhcp | True | | gateway_ip | 10.0.0.1 | | host_routes | | | id | 1a2d26fb-b733-4ab3-992e-88554a87afa6 | | ip_version | 4 | | name | | | network_id | a404518c-800d-2353-9193-57dbb42ac5ee | | tenant_id | 3868290ab10f417390acbb754160dbb2 | +------------------+--------------------------------------------+ Update Subnet DNS Nameservers ----------------------------- :: neutron subnet-update 1a2d261b-b233-3ab9-902e-88576a82afa6 \ --dns_nameservers list=true 3.3.3.3 2.2.2.2 1.1.1.1 changzhi@stack:~/devstack$ neutron subnet-show 1a2d261b-b233-3ab9-902e-88576a82afa6 +------------------+--------------------------------------------+ | Field | Value | +------------------+--------------------------------------------+ | allocation_pools | {"start": "10.0.0.2", "end": "10.0.0.254"} | | cidr | 10.0.0.0/24 | | dns_nameservers | 3.3.3.3 | | | 2.2.2.2 | | | 1.1.1.1 | | enable_dhcp | True | | gateway_ip | 10.0.0.1 | | host_routes | | | id | 1a2d26fb-b733-4ab3-992e-88554a87afa6 | | ip_version | 4 | | name | | | network_id | a404518c-800d-2353-9193-57dbb42ac5ee | | tenant_id | 3868290ab10f417390acbb754160dbb2 | +------------------+--------------------------------------------+ As shown in above output, the order of the DNS nameservers has been updated. New virtual machines deployed to this subnet will receive the DNS nameservers in this new priority order. Existing virtual machines that have already been deployed will not be immediately affected by changing the DNS nameserver order on the neutron subnet. Virtual machines that are configured to get their IP address via DHCP will detect the DNS nameserver order change when their DHCP lease expires or when the virtual machine is restarted. Existing virtual machines configured with a static IP address will never detect the updated DNS nameserver order. neutron-8.4.0/doc/Makefile0000664000567000056710000000633713044372736016605 0ustar jenkinsjenkins00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXSOURCE = source PAPER = BUILDDIR = build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SPHINXSOURCE) .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest .DEFAULT_GOAL = html help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* if [ -f .autogenerated ] ; then \ cat .autogenerated | xargs rm ; \ rm .autogenerated ; \ fi html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/nova.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/nova.qhc" latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ "run these through (pdf)latex." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." neutron-8.4.0/openstack-common.conf0000664000567000056710000000000013044372760020476 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/0000775000567000056710000000000013044373210016045 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/manager.py0000664000567000056710000002350513044372760020047 0ustar jenkinsjenkins00000000000000# Copyright 2011 VMware, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import weakref from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import periodic_task import six from neutron._i18n import _, _LI from neutron.common import utils from neutron.plugins.common import constants LOG = logging.getLogger(__name__) CORE_PLUGINS_NAMESPACE = 'neutron.core_plugins' class Manager(periodic_task.PeriodicTasks): # Set RPC API version to 1.0 by default. target = oslo_messaging.Target(version='1.0') def __init__(self, host=None): if not host: host = cfg.CONF.host self.host = host conf = getattr(self, "conf", cfg.CONF) super(Manager, self).__init__(conf) def periodic_tasks(self, context, raise_on_error=False): self.run_periodic_tasks(context, raise_on_error=raise_on_error) def init_host(self): """Handle initialization if this is a standalone service. Child classes should override this method. """ pass def after_start(self): """Handler post initialization stuff. Child classes can override this method. """ pass def validate_post_plugin_load(): """Checks if the configuration variables are valid. If the configuration is invalid then the method will return an error message. If all is OK then it will return None. """ if ('dhcp_agents_per_network' in cfg.CONF and cfg.CONF.dhcp_agents_per_network <= 0): msg = _("dhcp_agents_per_network must be >= 1. '%s' " "is invalid.") % cfg.CONF.dhcp_agents_per_network return msg def validate_pre_plugin_load(): """Checks if the configuration variables are valid. If the configuration is invalid then the method will return an error message. If all is OK then it will return None. """ if cfg.CONF.core_plugin is None: msg = _('Neutron core_plugin not configured!') return msg class NeutronManager(object): """Neutron's Manager class. Neutron's Manager class is responsible for parsing a config file and instantiating the correct plugin that concretely implements neutron_plugin_base class. The caller should make sure that NeutronManager is a singleton. """ _instance = None def __init__(self, options=None, config_file=None): # If no options have been provided, create an empty dict if not options: options = {} msg = validate_pre_plugin_load() if msg: LOG.critical(msg) raise Exception(msg) # NOTE(jkoelker) Testing for the subclass with the __subclasshook__ # breaks tach monitoring. It has been removed # intentionally to allow v2 plugins to be monitored # for performance metrics. plugin_provider = cfg.CONF.core_plugin LOG.info(_LI("Loading core plugin: %s"), plugin_provider) self.plugin = self._get_plugin_instance(CORE_PLUGINS_NAMESPACE, plugin_provider) msg = validate_post_plugin_load() if msg: LOG.critical(msg) raise Exception(msg) # core plugin as a part of plugin collection simplifies # checking extensions # TODO(enikanorov): make core plugin the same as # the rest of service plugins self.service_plugins = {constants.CORE: self.plugin} self._load_service_plugins() # Used by pecan WSGI self.resource_plugin_mappings = {} self.resource_controller_mappings = {} @staticmethod def load_class_for_provider(namespace, plugin_provider): """Loads plugin using alias or class name :param namespace: namespace where alias is defined :param plugin_provider: plugin alias or class name :returns plugin that is loaded :raises ImportError if fails to load plugin """ try: return utils.load_class_by_alias_or_classname(namespace, plugin_provider) except ImportError: raise ImportError(_("Plugin '%s' not found.") % plugin_provider) def _get_plugin_instance(self, namespace, plugin_provider): plugin_class = self.load_class_for_provider(namespace, plugin_provider) return plugin_class() def _load_services_from_core_plugin(self): """Puts core plugin in service_plugins for supported services.""" LOG.debug("Loading services supported by the core plugin") # supported service types are derived from supported extensions for ext_alias in getattr(self.plugin, "supported_extension_aliases", []): if ext_alias in constants.EXT_TO_SERVICE_MAPPING: service_type = constants.EXT_TO_SERVICE_MAPPING[ext_alias] self.service_plugins[service_type] = self.plugin LOG.info(_LI("Service %s is supported by the core plugin"), service_type) def _get_default_service_plugins(self): """Get default service plugins to be loaded.""" return constants.DEFAULT_SERVICE_PLUGINS.keys() def _load_service_plugins(self): """Loads service plugins. Starts from the core plugin and checks if it supports advanced services then loads classes provided in configuration. """ # load services from the core plugin first self._load_services_from_core_plugin() plugin_providers = cfg.CONF.service_plugins plugin_providers.extend(self._get_default_service_plugins()) LOG.debug("Loading service plugins: %s", plugin_providers) for provider in plugin_providers: if provider == '': continue LOG.info(_LI("Loading Plugin: %s"), provider) plugin_inst = self._get_plugin_instance('neutron.service_plugins', provider) # only one implementation of svc_type allowed # specifying more than one plugin # for the same type is a fatal exception if plugin_inst.get_plugin_type() in self.service_plugins: raise ValueError(_("Multiple plugins for service " "%s were configured") % plugin_inst.get_plugin_type()) self.service_plugins[plugin_inst.get_plugin_type()] = plugin_inst # search for possible agent notifiers declared in service plugin # (needed by agent management extension) if (hasattr(self.plugin, 'agent_notifiers') and hasattr(plugin_inst, 'agent_notifiers')): self.plugin.agent_notifiers.update(plugin_inst.agent_notifiers) LOG.debug("Successfully loaded %(type)s plugin. " "Description: %(desc)s", {"type": plugin_inst.get_plugin_type(), "desc": plugin_inst.get_plugin_description()}) @classmethod @utils.synchronized("manager") def _create_instance(cls): if not cls.has_instance(): cls._instance = cls() @classmethod def has_instance(cls): return cls._instance is not None @classmethod def clear_instance(cls): cls._instance = None @classmethod def get_instance(cls): # double checked locking if not cls.has_instance(): cls._create_instance() return cls._instance @classmethod def get_plugin(cls): # Return a weakref to minimize gc-preventing references. return weakref.proxy(cls.get_instance().plugin) @classmethod def get_service_plugins(cls): # Return weakrefs to minimize gc-preventing references. service_plugins = cls.get_instance().service_plugins return dict((x, weakref.proxy(y)) for x, y in six.iteritems(service_plugins)) @classmethod def get_unique_service_plugins(cls): service_plugins = cls.get_instance().service_plugins return tuple(weakref.proxy(x) for x in set(service_plugins.values())) @classmethod def set_plugin_for_resource(cls, resource, plugin): cls.get_instance().resource_plugin_mappings[resource] = plugin @classmethod def get_plugin_for_resource(cls, resource): return cls.get_instance().resource_plugin_mappings.get(resource) @classmethod def set_controller_for_resource(cls, resource, controller): cls.get_instance().resource_controller_mappings[resource] = controller @classmethod def get_controller_for_resource(cls, resource): res_ctrl_mappings = cls.get_instance().resource_controller_mappings # If no controller is found for resource, try replacing dashes with # underscores return res_ctrl_mappings.get( resource, res_ctrl_mappings.get(resource.replace('-', '_'))) @classmethod def get_service_plugin_by_path_prefix(cls, path_prefix): service_plugins = cls.get_unique_service_plugins() for service_plugin in service_plugins: plugin_path_prefix = getattr(service_plugin, 'path_prefix', None) if plugin_path_prefix and plugin_path_prefix == path_prefix: return service_plugin neutron-8.4.0/neutron/plugins/0000775000567000056710000000000013044373210017526 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/hyperv/0000775000567000056710000000000013044373210021043 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/hyperv/__init__.py0000664000567000056710000000000013044372760023153 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/hyperv/agent/0000775000567000056710000000000013044373210022141 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/hyperv/agent/__init__.py0000664000567000056710000000000013044372760024251 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/hyperv/agent/security_groups_driver.py0000664000567000056710000000253113044372760027346 0ustar jenkinsjenkins00000000000000#Copyright 2014 Cloudbase Solutions SRL #All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from debtcollector import moves from hyperv.neutron import security_groups_driver as sg_driver from oslo_log import log as logging from neutron._i18n import _LW LOG = logging.getLogger(__name__) # TODO(claudiub): Remove this module at the beginning of the O cycle. new_driver = 'hyperv.neutron.security_groups_driver.HyperVSecurityGroupsDriver' LOG.warning(_LW("You are using the deprecated firewall driver: " "%(deprecated)s.Use the recommended driver %(new)s instead."), {'deprecated': '%s.HyperVSecurityGroupsDriver' % __name__, 'new': new_driver}) HyperVSecurityGroupsDriver = moves.moved_class( sg_driver.HyperVSecurityGroupsDriver, 'HyperVSecurityGroupsDriver', __name__) neutron-8.4.0/neutron/plugins/__init__.py0000664000567000056710000000000013044372736021641 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/common/0000775000567000056710000000000013044373210021016 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/common/__init__.py0000664000567000056710000000000013044372736023131 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/common/utils.py0000664000567000056710000001647413044372760022555 0ustar jenkinsjenkins00000000000000# Copyright 2013 Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Common utilities and helper functions for OpenStack Networking Plugins. """ import hashlib from oslo_config import cfg from oslo_log import log as logging import six import webob.exc from neutron._i18n import _, _LI from neutron.api.v2 import attributes from neutron.common import constants as n_const from neutron.common import exceptions as n_exc from neutron.plugins.common import constants as p_const INTERFACE_HASH_LEN = 6 LOG = logging.getLogger(__name__) def get_deployment_physnet_mtu(): """Retrieves global physical network MTU setting. Plugins should use this function to retrieve the MTU set by the operator that is equal to or less than the MTU of their nodes' physical interfaces. Note that it is the responsibility of the plugin to deduct the value of any encapsulation overhead required before advertising it to VMs. """ return cfg.CONF.global_physnet_mtu def is_valid_vlan_tag(vlan): return p_const.MIN_VLAN_TAG <= vlan <= p_const.MAX_VLAN_TAG def is_valid_gre_id(gre_id): return p_const.MIN_GRE_ID <= gre_id <= p_const.MAX_GRE_ID def is_valid_vxlan_vni(vni): return p_const.MIN_VXLAN_VNI <= vni <= p_const.MAX_VXLAN_VNI def is_valid_geneve_vni(vni): return p_const.MIN_GENEVE_VNI <= vni <= p_const.MAX_GENEVE_VNI def verify_tunnel_range(tunnel_range, tunnel_type): """Raise an exception for invalid tunnel range or malformed range.""" mappings = {p_const.TYPE_GRE: is_valid_gre_id, p_const.TYPE_VXLAN: is_valid_vxlan_vni, p_const.TYPE_GENEVE: is_valid_geneve_vni} if tunnel_type in mappings: for ident in tunnel_range: if not mappings[tunnel_type](ident): raise n_exc.NetworkTunnelRangeError( tunnel_range=tunnel_range, error=_("%(id)s is not a valid %(type)s identifier") % {'id': ident, 'type': tunnel_type}) if tunnel_range[1] < tunnel_range[0]: raise n_exc.NetworkTunnelRangeError( tunnel_range=tunnel_range, error=_("End of tunnel range is less " "than start of tunnel range")) def verify_vlan_range(vlan_range): """Raise an exception for invalid tags or malformed range.""" for vlan_tag in vlan_range: if not is_valid_vlan_tag(vlan_tag): raise n_exc.NetworkVlanRangeError( vlan_range=vlan_range, error=_("%s is not a valid VLAN tag") % vlan_tag) if vlan_range[1] < vlan_range[0]: raise n_exc.NetworkVlanRangeError( vlan_range=vlan_range, error=_("End of VLAN range is less than start of VLAN range")) def parse_network_vlan_range(network_vlan_range): """Interpret a string as network[:vlan_begin:vlan_end].""" entry = network_vlan_range.strip() if ':' in entry: try: network, vlan_min, vlan_max = entry.split(':') vlan_range = (int(vlan_min), int(vlan_max)) except ValueError as ex: raise n_exc.NetworkVlanRangeError(vlan_range=entry, error=ex) if not network: raise n_exc.PhysicalNetworkNameError() verify_vlan_range(vlan_range) return network, vlan_range else: return entry, None def parse_network_vlan_ranges(network_vlan_ranges_cfg_entries): """Interpret a list of strings as network[:vlan_begin:vlan_end] entries.""" networks = {} for entry in network_vlan_ranges_cfg_entries: network, vlan_range = parse_network_vlan_range(entry) if vlan_range: networks.setdefault(network, []).append(vlan_range) else: networks.setdefault(network, []) return networks def in_pending_status(status): return status in (p_const.PENDING_CREATE, p_const.PENDING_UPDATE, p_const.PENDING_DELETE) def _fixup_res_dict(context, attr_name, res_dict, check_allow_post=True): attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[attr_name] try: attributes.populate_tenant_id(context, res_dict, attr_info, True) attributes.verify_attributes(res_dict, attr_info) except webob.exc.HTTPBadRequest as e: # convert webob exception into ValueError as these functions are # for internal use. webob exception doesn't make sense. raise ValueError(e.detail) attributes.fill_default_value(attr_info, res_dict, check_allow_post=check_allow_post) attributes.convert_value(attr_info, res_dict) return res_dict def create_network(core_plugin, context, net): net_data = _fixup_res_dict(context, attributes.NETWORKS, net.get('network', {})) return core_plugin.create_network(context, {'network': net_data}) def create_subnet(core_plugin, context, subnet): subnet_data = _fixup_res_dict(context, attributes.SUBNETS, subnet.get('subnet', {})) return core_plugin.create_subnet(context, {'subnet': subnet_data}) def create_port(core_plugin, context, port, check_allow_post=True): port_data = _fixup_res_dict(context, attributes.PORTS, port.get('port', {}), check_allow_post=check_allow_post) return core_plugin.create_port(context, {'port': port_data}) def get_interface_name(name, prefix='', max_len=n_const.DEVICE_NAME_MAX_LEN): """Construct an interface name based on the prefix and name. The interface name can not exceed the maximum length passed in. Longer names are hashed to help ensure uniqueness. """ requested_name = prefix + name if len(requested_name) <= max_len: return requested_name # We can't just truncate because interfaces may be distinguished # by an ident at the end. A hash over the name should be unique. # Leave part of the interface name on for easier identification if (len(prefix) + INTERFACE_HASH_LEN) > max_len: raise ValueError(_("Too long prefix provided. New name would exceed " "given length for an interface name.")) namelen = max_len - len(prefix) - INTERFACE_HASH_LEN if isinstance(name, six.text_type): hashed_name = hashlib.sha1(name.encode('utf-8')) else: hashed_name = hashlib.sha1(name) new_name = ('%(prefix)s%(truncated)s%(hash)s' % {'prefix': prefix, 'truncated': name[0:namelen], 'hash': hashed_name.hexdigest()[0:INTERFACE_HASH_LEN]}) LOG.info(_LI("The requested interface name %(requested_name)s exceeds the " "%(limit)d character limitation. It was shortened to " "%(new_name)s to fit."), {'requested_name': requested_name, 'limit': max_len, 'new_name': new_name}) return new_name neutron-8.4.0/neutron/plugins/common/constants.py0000664000567000056710000000446113044372760023422 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Neutron well-known service type constants: CORE = "CORE" DUMMY = "DUMMY" LOADBALANCER = "LOADBALANCER" LOADBALANCERV2 = "LOADBALANCERV2" FIREWALL = "FIREWALL" VPN = "VPN" METERING = "METERING" L3_ROUTER_NAT = "L3_ROUTER_NAT" FLAVORS = "FLAVORS" QOS = "QOS" # Maps extension alias to service type that # can be implemented by the core plugin. EXT_TO_SERVICE_MAPPING = { 'dummy': DUMMY, 'lbaas': LOADBALANCER, 'lbaasv2': LOADBALANCERV2, 'fwaas': FIREWALL, 'vpnaas': VPN, 'metering': METERING, 'router': L3_ROUTER_NAT, 'qos': QOS, } # Maps default service plugins entry points to their extension aliases DEFAULT_SERVICE_PLUGINS = { 'auto_allocate': 'auto-allocated-topology', 'tag': 'tag', 'timestamp_core': 'timestamp_core', 'network_ip_availability': 'network-ip-availability' } # Service operation status constants ACTIVE = "ACTIVE" DOWN = "DOWN" CREATED = "CREATED" PENDING_CREATE = "PENDING_CREATE" PENDING_UPDATE = "PENDING_UPDATE" PENDING_DELETE = "PENDING_DELETE" INACTIVE = "INACTIVE" ERROR = "ERROR" ACTIVE_PENDING_STATUSES = ( ACTIVE, PENDING_CREATE, PENDING_UPDATE ) # Network Type constants TYPE_FLAT = 'flat' TYPE_GENEVE = 'geneve' TYPE_GRE = 'gre' TYPE_LOCAL = 'local' TYPE_VXLAN = 'vxlan' TYPE_VLAN = 'vlan' TYPE_NONE = 'none' # Values for network_type # For VLAN Network MIN_VLAN_TAG = 1 MAX_VLAN_TAG = 4094 # For Geneve Tunnel MIN_GENEVE_VNI = 1 MAX_GENEVE_VNI = 2 ** 24 - 1 # For GRE Tunnel MIN_GRE_ID = 1 MAX_GRE_ID = 2 ** 32 - 1 # For VXLAN Tunnel MIN_VXLAN_VNI = 1 MAX_VXLAN_VNI = 2 ** 24 - 1 VXLAN_UDP_PORT = 4789 # Network Type MTU overhead GENEVE_ENCAP_MIN_OVERHEAD = 50 GRE_ENCAP_OVERHEAD = 42 VXLAN_ENCAP_OVERHEAD = 50 neutron-8.4.0/neutron/plugins/ml2/0000775000567000056710000000000013044373210020220 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/config.py0000664000567000056710000000660313044372760022055 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ from neutron.common import constants ml2_opts = [ cfg.ListOpt('type_drivers', default=['local', 'flat', 'vlan', 'gre', 'vxlan', 'geneve'], help=_("List of network type driver entrypoints to be loaded " "from the neutron.ml2.type_drivers namespace.")), cfg.ListOpt('tenant_network_types', default=['local'], help=_("Ordered list of network_types to allocate as tenant " "networks. The default value 'local' is useful for " "single-box testing but provides no connectivity " "between hosts.")), cfg.ListOpt('mechanism_drivers', default=[], help=_("An ordered list of networking mechanism driver " "entrypoints to be loaded from the " "neutron.ml2.mechanism_drivers namespace.")), cfg.ListOpt('extension_drivers', default=[], help=_("An ordered list of extension driver " "entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. " "For example: extension_drivers = port_security,qos")), cfg.IntOpt('path_mtu', default=constants.DEFAULT_NETWORK_MTU, help=_('Maximum size of an IP packet (MTU) that can traverse ' 'the underlying physical network infrastructure without ' 'fragmentation for overlay/tunnel networks. In most ' 'cases, use the same value as the global_physnet_mtu ' 'option.')), cfg.ListOpt('physical_network_mtus', default=[], help=_("A list of mappings of physical networks to MTU " "values. The format of the mapping is " ":. This mapping allows " "specifying a physical network MTU value that " "differs from the default global_physnet_mtu value.")), cfg.StrOpt('external_network_type', help=_("Default network type for external networks when no " "provider attributes are specified. By default it is " "None, which means that if provider attributes are not " "specified while creating external networks then they " "will have the same type as tenant networks. Allowed " "values for external_network_type config option depend " "on the network type values configured in type_drivers " "config option.")) ] cfg.CONF.register_opts(ml2_opts, "ml2") neutron-8.4.0/neutron/plugins/ml2/__init__.py0000664000567000056710000000000013044372736022333 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/plugin.py0000664000567000056710000023312413044372760022106 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from eventlet import greenthread from oslo_concurrency import lockutils from oslo_config import cfg from oslo_db import api as oslo_db_api from oslo_db import exception as os_db_exception from oslo_log import helpers as log_helpers from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import uuidutils from sqlalchemy import exc as sql_exc from sqlalchemy.orm import exc as sa_exc from neutron._i18n import _, _LE, _LI, _LW from neutron.agent import securitygroups_rpc as sg_rpc from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.api.rpc.handlers import dhcp_rpc from neutron.api.rpc.handlers import dvr_rpc from neutron.api.rpc.handlers import metadata_rpc from neutron.api.rpc.handlers import resources_rpc from neutron.api.rpc.handlers import securitygroups_rpc from neutron.api.v2 import attributes from neutron.callbacks import events from neutron.callbacks import exceptions from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants as const from neutron.common import exceptions as exc from neutron.common import ipv6_utils from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.common import utils from neutron.db import address_scope_db from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.db import allowedaddresspairs_db as addr_pair_db from neutron.db import api as db_api from neutron.db import db_base_plugin_v2 from neutron.db import dvr_mac_db from neutron.db import external_net_db from neutron.db import extradhcpopt_db from neutron.db import models_v2 from neutron.db import netmtu_db from neutron.db.quota import driver # noqa from neutron.db import securitygroups_db from neutron.db import securitygroups_rpc_base as sg_db_rpc from neutron.db import vlantransparent_db from neutron.extensions import allowedaddresspairs as addr_pair from neutron.extensions import availability_zone as az_ext from neutron.extensions import extra_dhcp_opt as edo_ext from neutron.extensions import portbindings from neutron.extensions import portsecurity as psec from neutron.extensions import providernet as provider from neutron.extensions import vlantransparent from neutron import manager from neutron.plugins.common import constants as service_constants from neutron.plugins.ml2.common import exceptions as ml2_exc from neutron.plugins.ml2 import config # noqa from neutron.plugins.ml2 import db from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2 import driver_context from neutron.plugins.ml2.extensions import qos as qos_ext from neutron.plugins.ml2 import managers from neutron.plugins.ml2 import models from neutron.plugins.ml2 import rpc from neutron.quota import resource_registry from neutron.services.qos import qos_consts LOG = log.getLogger(__name__) MAX_BIND_TRIES = 10 SERVICE_PLUGINS_REQUIRED_DRIVERS = { 'qos': [qos_ext.QOS_EXT_DRIVER_ALIAS] } class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, dvr_mac_db.DVRDbMixin, external_net_db.External_net_db_mixin, sg_db_rpc.SecurityGroupServerRpcMixin, agentschedulers_db.AZDhcpAgentSchedulerDbMixin, addr_pair_db.AllowedAddressPairsMixin, vlantransparent_db.Vlantransparent_db_mixin, extradhcpopt_db.ExtraDhcpOptMixin, netmtu_db.Netmtu_db_mixin, address_scope_db.AddressScopeDbMixin): """Implement the Neutron L2 abstractions using modules. Ml2Plugin is a Neutron plugin based on separately extensible sets of network types and mechanisms for connecting to networks of those types. The network types and mechanisms are implemented as drivers loaded via Python entry points. Networks can be made up of multiple segments (not yet fully implemented). """ # This attribute specifies whether the plugin supports or not # bulk/pagination/sorting operations. Name mangling is used in # order to ensure it is qualified by class __native_bulk_support = True __native_pagination_support = True __native_sorting_support = True # List of supported extensions _supported_extension_aliases = ["provider", "external-net", "binding", "quotas", "security-group", "agent", "dhcp_agent_scheduler", "multi-provider", "allowed-address-pairs", "extra_dhcp_opt", "subnet_allocation", "net-mtu", "vlan-transparent", "address-scope", "availability_zone", "network_availability_zone", "default-subnetpools"] @property def supported_extension_aliases(self): if not hasattr(self, '_aliases'): aliases = self._supported_extension_aliases[:] aliases += self.extension_manager.extension_aliases() sg_rpc.disable_security_group_extension_by_config(aliases) vlantransparent.disable_extension_by_config(aliases) self._aliases = aliases return self._aliases @resource_registry.tracked_resources( network=models_v2.Network, port=models_v2.Port, subnet=models_v2.Subnet, subnetpool=models_v2.SubnetPool, security_group=securitygroups_db.SecurityGroup, security_group_rule=securitygroups_db.SecurityGroupRule) def __init__(self): # First load drivers, then initialize DB, then initialize drivers self.type_manager = managers.TypeManager() self.extension_manager = managers.ExtensionManager() self.mechanism_manager = managers.MechanismManager() super(Ml2Plugin, self).__init__() self.type_manager.initialize() self.extension_manager.initialize() self.mechanism_manager.initialize() self._setup_dhcp() self._start_rpc_notifiers() self.add_agent_status_check(self.agent_health_check) self._verify_service_plugins_requirements() LOG.info(_LI("Modular L2 Plugin initialization complete")) def _setup_rpc(self): """Initialize components to support agent communication.""" self.endpoints = [ rpc.RpcCallbacks(self.notifier, self.type_manager), securitygroups_rpc.SecurityGroupServerRpcCallback(), dvr_rpc.DVRServerRpcCallback(), dhcp_rpc.DhcpRpcCallback(), agents_db.AgentExtRpcCallback(), metadata_rpc.MetadataRpcCallback(), resources_rpc.ResourcesPullRpcCallback() ] def _setup_dhcp(self): """Initialize components to support DHCP.""" self.network_scheduler = importutils.import_object( cfg.CONF.network_scheduler_driver ) self.start_periodic_dhcp_agent_status_check() def _verify_service_plugins_requirements(self): for service_plugin in cfg.CONF.service_plugins: extension_drivers = SERVICE_PLUGINS_REQUIRED_DRIVERS.get( service_plugin, [] ) for extension_driver in extension_drivers: if extension_driver not in self.extension_manager.names(): raise ml2_exc.ExtensionDriverNotFound( driver=extension_driver, service_plugin=service_plugin ) @property def supported_qos_rule_types(self): return self.mechanism_manager.supported_qos_rule_types @log_helpers.log_method_call def _start_rpc_notifiers(self): """Initialize RPC notifiers for agents.""" self.notifier = rpc.AgentNotifierApi(topics.AGENT) self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( dhcp_rpc_agent_api.DhcpAgentNotifyAPI() ) @log_helpers.log_method_call def start_rpc_listeners(self): """Start the RPC loop to let the plugin communicate with agents.""" self._setup_rpc() self.topic = topics.PLUGIN self.conn = n_rpc.create_connection() self.conn.create_consumer(self.topic, self.endpoints, fanout=False) self.conn.create_consumer( topics.SERVER_RESOURCE_VERSIONS, [resources_rpc.ResourcesPushToServerRpcCallback()], fanout=True) # process state reports despite dedicated rpc workers self.conn.create_consumer(topics.REPORTS, [agents_db.AgentExtRpcCallback()], fanout=False) return self.conn.consume_in_threads() def start_rpc_state_reports_listener(self): self.conn_reports = n_rpc.create_connection(new=True) self.conn_reports.create_consumer(topics.REPORTS, [agents_db.AgentExtRpcCallback()], fanout=False) return self.conn_reports.consume_in_threads() def _filter_nets_provider(self, context, networks, filters): return [network for network in networks if self.type_manager.network_matches_filters(network, filters) ] def _check_mac_update_allowed(self, orig_port, port, binding): unplugged_types = (portbindings.VIF_TYPE_BINDING_FAILED, portbindings.VIF_TYPE_UNBOUND) new_mac = port.get('mac_address') mac_change = (new_mac is not None and orig_port['mac_address'] != new_mac) if (mac_change and binding.vif_type not in unplugged_types): raise exc.PortBound(port_id=orig_port['id'], vif_type=binding.vif_type, old_mac=orig_port['mac_address'], new_mac=port['mac_address']) return mac_change def _process_port_binding(self, mech_context, attrs): session = mech_context._plugin_context.session binding = mech_context._binding port = mech_context.current port_id = port['id'] changes = False host = attributes.ATTR_NOT_SPECIFIED if attrs and portbindings.HOST_ID in attrs: host = attrs.get(portbindings.HOST_ID) or '' original_host = binding.host if (attributes.is_attr_set(host) and original_host != host): binding.host = host changes = True vnic_type = attrs and attrs.get(portbindings.VNIC_TYPE) if (attributes.is_attr_set(vnic_type) and binding.vnic_type != vnic_type): binding.vnic_type = vnic_type changes = True # treat None as clear of profile. profile = None if attrs and portbindings.PROFILE in attrs: profile = attrs.get(portbindings.PROFILE) or {} if profile not in (None, attributes.ATTR_NOT_SPECIFIED, self._get_profile(binding)): binding.profile = jsonutils.dumps(profile) if len(binding.profile) > models.BINDING_PROFILE_LEN: msg = _("binding:profile value too large") raise exc.InvalidInput(error_message=msg) changes = True # Unbind the port if needed. if changes: binding.vif_type = portbindings.VIF_TYPE_UNBOUND binding.vif_details = '' db.clear_binding_levels(session, port_id, original_host) mech_context._clear_binding_levels() port['status'] = const.PORT_STATUS_DOWN super(Ml2Plugin, self).update_port( mech_context._plugin_context, port_id, {attributes.PORT: {'status': const.PORT_STATUS_DOWN}}) if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: binding.vif_type = portbindings.VIF_TYPE_UNBOUND binding.vif_details = '' db.clear_binding_levels(session, port_id, original_host) mech_context._clear_binding_levels() binding.host = '' self._update_port_dict_binding(port, binding) return changes def _bind_port_if_needed(self, context, allow_notify=False, need_notify=False): for count in range(1, MAX_BIND_TRIES + 1): if count > 1: # yield for binding retries so that we give other threads a # chance to do their work greenthread.sleep(0) # multiple attempts shouldn't happen very often so we log each # attempt after the 1st. LOG.info(_LI("Attempt %(count)s to bind port %(port)s"), {'count': count, 'port': context.current['id']}) bind_context, need_notify, try_again = self._attempt_binding( context, need_notify) if count == MAX_BIND_TRIES or not try_again: if self._should_bind_port(context): # At this point, we attempted to bind a port and reached # its final binding state. Binding either succeeded or # exhausted all attempts, thus no need to try again. # Now, the port and its binding state should be committed. context, need_notify, try_again = ( self._commit_port_binding(context, bind_context, need_notify, try_again)) else: context = bind_context if not try_again: if allow_notify and need_notify: self._notify_port_updated(context) return context LOG.error(_LE("Failed to commit binding results for %(port)s " "after %(max)s tries"), {'port': context.current['id'], 'max': MAX_BIND_TRIES}) return context def _should_bind_port(self, context): return (context._binding.host and context._binding.vif_type in (portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED)) def _attempt_binding(self, context, need_notify): try_again = False if self._should_bind_port(context): bind_context = self._bind_port(context) if bind_context.vif_type != portbindings.VIF_TYPE_BINDING_FAILED: # Binding succeeded. Suggest notifying of successful binding. need_notify = True else: # Current attempt binding failed, try to bind again. try_again = True context = bind_context return context, need_notify, try_again def _bind_port(self, orig_context): # Construct a new PortContext from the one from the previous # transaction. port = orig_context.current orig_binding = orig_context._binding new_binding = models.PortBinding( host=orig_binding.host, vnic_type=orig_binding.vnic_type, profile=orig_binding.profile, vif_type=portbindings.VIF_TYPE_UNBOUND, vif_details='' ) self._update_port_dict_binding(port, new_binding) new_context = driver_context.PortContext( self, orig_context._plugin_context, port, orig_context.network.current, new_binding, None) # Attempt to bind the port and return the context with the # result. self.mechanism_manager.bind_port(new_context) return new_context def _commit_port_binding(self, orig_context, bind_context, need_notify, try_again): port_id = orig_context.current['id'] plugin_context = orig_context._plugin_context session = plugin_context.session orig_binding = orig_context._binding new_binding = bind_context._binding # After we've attempted to bind the port, we begin a # transaction, get the current port state, and decide whether # to commit the binding results. with session.begin(subtransactions=True): # Get the current port state and build a new PortContext # reflecting this state as original state for subsequent # mechanism driver update_port_*commit() calls. port_db, cur_binding = db.get_locked_port_and_binding(session, port_id) # Since the mechanism driver bind_port() calls must be made # outside a DB transaction locking the port state, it is # possible (but unlikely) that the port's state could change # concurrently while these calls are being made. If another # thread or process succeeds in binding the port before this # thread commits its results, the already committed results are # used. If attributes such as binding:host_id, binding:profile, # or binding:vnic_type are updated concurrently, the try_again # flag is returned to indicate that the commit was unsuccessful. if not port_db: # The port has been deleted concurrently, so just # return the unbound result from the initial # transaction that completed before the deletion. LOG.debug("Port %s has been deleted concurrently", port_id) return orig_context, False, False oport = self._make_port_dict(port_db) port = self._make_port_dict(port_db) network = bind_context.network.current if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: # REVISIT(rkukura): The PortBinding instance from the # ml2_port_bindings table, returned as cur_binding # from db.get_locked_port_and_binding() above, is # currently not used for DVR distributed ports, and is # replaced here with the DVRPortBinding instance from # the ml2_dvr_port_bindings table specific to the host # on which the distributed port is being bound. It # would be possible to optimize this code to avoid # fetching the PortBinding instance in the DVR case, # and even to avoid creating the unused entry in the # ml2_port_bindings table. But the upcoming resolution # for bug 1367391 will eliminate the # ml2_dvr_port_bindings table, use the # ml2_port_bindings table to store non-host-specific # fields for both distributed and non-distributed # ports, and introduce a new ml2_port_binding_hosts # table for the fields that need to be host-specific # in the distributed case. Since the PortBinding # instance will then be needed, it does not make sense # to optimize this code to avoid fetching it. cur_binding = db.get_dvr_port_binding_by_host( session, port_id, orig_binding.host) cur_context = driver_context.PortContext( self, plugin_context, port, network, cur_binding, None, original_port=oport) # Commit our binding results only if port has not been # successfully bound concurrently by another thread or # process and no binding inputs have been changed. commit = ((cur_binding.vif_type in [portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED]) and orig_binding.host == cur_binding.host and orig_binding.vnic_type == cur_binding.vnic_type and orig_binding.profile == cur_binding.profile) if commit: # Update the port's binding state with our binding # results. cur_binding.vif_type = new_binding.vif_type cur_binding.vif_details = new_binding.vif_details db.clear_binding_levels(session, port_id, cur_binding.host) db.set_binding_levels(session, bind_context._binding_levels) cur_context._binding_levels = bind_context._binding_levels # Update PortContext's port dictionary to reflect the # updated binding state. self._update_port_dict_binding(port, cur_binding) # Update the port status if requested by the bound driver. if (bind_context._binding_levels and bind_context._new_port_status): port_db.status = bind_context._new_port_status port['status'] = bind_context._new_port_status # Call the mechanism driver precommit methods, commit # the results, and call the postcommit methods. self.mechanism_manager.update_port_precommit(cur_context) if commit: # Continue, using the port state as of the transaction that # just finished, whether that transaction committed new # results or discovered concurrent port state changes. # Also, Trigger notification for successful binding commit. self.mechanism_manager.update_port_postcommit(cur_context) need_notify = True try_again = False else: try_again = True return cur_context, need_notify, try_again def _update_port_dict_binding(self, port, binding): port[portbindings.VNIC_TYPE] = binding.vnic_type port[portbindings.PROFILE] = self._get_profile(binding) if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: port[portbindings.HOST_ID] = '' port[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_DISTRIBUTED port[portbindings.VIF_DETAILS] = {} else: port[portbindings.HOST_ID] = binding.host port[portbindings.VIF_TYPE] = binding.vif_type port[portbindings.VIF_DETAILS] = self._get_vif_details(binding) def _get_vif_details(self, binding): if binding.vif_details: try: return jsonutils.loads(binding.vif_details) except Exception: LOG.error(_LE("Serialized vif_details DB value '%(value)s' " "for port %(port)s is invalid"), {'value': binding.vif_details, 'port': binding.port_id}) return {} def _get_profile(self, binding): if binding.profile: try: return jsonutils.loads(binding.profile) except Exception: LOG.error(_LE("Serialized profile DB value '%(value)s' for " "port %(port)s is invalid"), {'value': binding.profile, 'port': binding.port_id}) return {} def _ml2_extend_port_dict_binding(self, port_res, port_db): # None when called during unit tests for other plugins. if port_db.port_binding: self._update_port_dict_binding(port_res, port_db.port_binding) db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attributes.PORTS, ['_ml2_extend_port_dict_binding']) # Register extend dict methods for network and port resources. # Each mechanism driver that supports extend attribute for the resources # can add those attribute to the result. db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attributes.NETWORKS, ['_ml2_md_extend_network_dict']) db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attributes.PORTS, ['_ml2_md_extend_port_dict']) db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attributes.SUBNETS, ['_ml2_md_extend_subnet_dict']) def _ml2_md_extend_network_dict(self, result, netdb): session = db_api.get_session() with session.begin(subtransactions=True): self.extension_manager.extend_network_dict(session, netdb, result) def _ml2_md_extend_port_dict(self, result, portdb): session = db_api.get_session() with session.begin(subtransactions=True): self.extension_manager.extend_port_dict(session, portdb, result) def _ml2_md_extend_subnet_dict(self, result, subnetdb): session = db_api.get_session() with session.begin(subtransactions=True): self.extension_manager.extend_subnet_dict( session, subnetdb, result) # Note - The following hook methods have "ml2" in their names so # that they are not called twice during unit tests due to global # registration of hooks in portbindings_db.py used by other # plugins. def _ml2_port_model_hook(self, context, original_model, query): query = query.outerjoin(models.PortBinding, (original_model.id == models.PortBinding.port_id)) return query def _ml2_port_result_filter_hook(self, query, filters): values = filters and filters.get(portbindings.HOST_ID, []) if not values: return query return query.filter(models.PortBinding.host.in_(values)) db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( models_v2.Port, "ml2_port_bindings", '_ml2_port_model_hook', None, '_ml2_port_result_filter_hook') def _notify_port_updated(self, mech_context): port = mech_context.current segment = mech_context.bottom_bound_segment if not segment: # REVISIT(rkukura): This should notify agent to unplug port network = mech_context.network.current LOG.debug("In _notify_port_updated(), no bound segment for " "port %(port_id)s on network %(network_id)s", {'port_id': port['id'], 'network_id': network['id']}) return self.notifier.port_update(mech_context._plugin_context, port, segment[api.NETWORK_TYPE], segment[api.SEGMENTATION_ID], segment[api.PHYSICAL_NETWORK]) def _delete_objects(self, context, resource, objects): delete_op = getattr(self, 'delete_%s' % resource) for obj in objects: try: delete_op(context, obj['result']['id']) except KeyError: LOG.exception(_LE("Could not find %s to delete."), resource) except Exception: LOG.exception(_LE("Could not delete %(res)s %(id)s."), {'res': resource, 'id': obj['result']['id']}) def _create_bulk_ml2(self, resource, context, request_items): objects = [] collection = "%ss" % resource items = request_items[collection] try: with context.session.begin(subtransactions=True): obj_creator = getattr(self, '_create_%s_db' % resource) for item in items: attrs = item[resource] result, mech_context = obj_creator(context, item) objects.append({'mech_context': mech_context, 'result': result, 'attributes': attrs}) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("An exception occurred while creating " "the %(resource)s:%(item)s"), {'resource': resource, 'item': item}) try: postcommit_op = getattr(self.mechanism_manager, 'create_%s_postcommit' % resource) for obj in objects: postcommit_op(obj['mech_context']) return objects except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): resource_ids = [res['result']['id'] for res in objects] LOG.exception(_LE("mechanism_manager.create_%(res)s" "_postcommit failed for %(res)s: " "'%(failed_id)s'. Deleting " "%(res)ss %(resource_ids)s"), {'res': resource, 'failed_id': obj['result']['id'], 'resource_ids': ', '.join(resource_ids)}) self._delete_objects(context, resource, objects) def _create_network_db(self, context, network): net_data = network[attributes.NETWORK] tenant_id = net_data['tenant_id'] session = context.session with session.begin(subtransactions=True): self._ensure_default_security_group(context, tenant_id) net_db = self.create_network_db(context, network) result = self._make_network_dict(net_db, process_extensions=False, context=context) self.extension_manager.process_create_network(context, net_data, result) self._process_l3_create(context, result, net_data) net_data['id'] = result['id'] self.type_manager.create_network_segments(context, net_data, tenant_id) self.type_manager.extend_network_dict_provider(context, result) # Update the transparent vlan if configured if utils.is_extension_supported(self, 'vlan-transparent'): vlt = vlantransparent.get_vlan_transparent(net_data) net_db['vlan_transparent'] = vlt result['vlan_transparent'] = vlt mech_context = driver_context.NetworkContext(self, context, result) self.mechanism_manager.create_network_precommit(mech_context) if net_data.get(api.MTU, 0) > 0: net_db[api.MTU] = net_data[api.MTU] result[api.MTU] = net_data[api.MTU] if az_ext.AZ_HINTS in net_data: self.validate_availability_zones(context, 'network', net_data[az_ext.AZ_HINTS]) az_hints = az_ext.convert_az_list_to_string( net_data[az_ext.AZ_HINTS]) net_db[az_ext.AZ_HINTS] = az_hints result[az_ext.AZ_HINTS] = az_hints self._apply_dict_extend_functions('networks', result, net_db) return result, mech_context def create_network(self, context, network): result, mech_context = self._create_network_db(context, network) try: self.mechanism_manager.create_network_postcommit(mech_context) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): LOG.error(_LE("mechanism_manager.create_network_postcommit " "failed, deleting network '%s'"), result['id']) self.delete_network(context, result['id']) return result def create_network_bulk(self, context, networks): objects = self._create_bulk_ml2(attributes.NETWORK, context, networks) return [obj['result'] for obj in objects] def update_network(self, context, id, network): net_data = network[attributes.NETWORK] provider._raise_if_updates_provider_attributes(net_data) session = context.session with session.begin(subtransactions=True): original_network = super(Ml2Plugin, self).get_network(context, id) updated_network = super(Ml2Plugin, self).update_network(context, id, network) self.extension_manager.process_update_network(context, net_data, updated_network) self._process_l3_update(context, updated_network, net_data) self.type_manager.extend_network_dict_provider(context, updated_network) # TODO(QoS): Move out to the extension framework somehow. need_network_update_notify = ( qos_consts.QOS_POLICY_ID in net_data and original_network[qos_consts.QOS_POLICY_ID] != updated_network[qos_consts.QOS_POLICY_ID]) mech_context = driver_context.NetworkContext( self, context, updated_network, original_network=original_network) self.mechanism_manager.update_network_precommit(mech_context) # TODO(apech) - handle errors raised by update_network, potentially # by re-calling update_network with the previous attributes. For # now the error is propogated to the caller, which is expected to # either undo/retry the operation or delete the resource. self.mechanism_manager.update_network_postcommit(mech_context) if need_network_update_notify: self.notifier.network_update(context, updated_network) return updated_network def get_network(self, context, id, fields=None): session = context.session with session.begin(subtransactions=True): result = super(Ml2Plugin, self).get_network(context, id, None) self.type_manager.extend_network_dict_provider(context, result) return self._fields(result, fields) def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): session = context.session with session.begin(subtransactions=True): nets = super(Ml2Plugin, self).get_networks(context, filters, None, sorts, limit, marker, page_reverse) self.type_manager.extend_networks_dict_provider(context, nets) nets = self._filter_nets_provider(context, nets, filters) return [self._fields(net, fields) for net in nets] def _delete_ports(self, context, port_ids): for port_id in port_ids: try: self.delete_port(context, port_id) except (exc.PortNotFound, sa_exc.ObjectDeletedError): # concurrent port deletion can be performed by # release_dhcp_port caused by concurrent subnet_delete LOG.info(_LI("Port %s was deleted concurrently"), port_id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Exception auto-deleting port %s"), port_id) def _delete_subnets(self, context, subnet_ids): for subnet_id in subnet_ids: try: self.delete_subnet(context, subnet_id) except (exc.SubnetNotFound, sa_exc.ObjectDeletedError): LOG.info(_LI("Subnet %s was deleted concurrently"), subnet_id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Exception auto-deleting subnet %s"), subnet_id) @utils.transaction_guard def delete_network(self, context, id): # REVISIT(rkukura) The super(Ml2Plugin, self).delete_network() # function is not used because it auto-deletes ports and # subnets from the DB without invoking the derived class's # delete_port() or delete_subnet(), preventing mechanism # drivers from being called. This approach should be revisited # when the API layer is reworked during icehouse. LOG.debug("Deleting network %s", id) session = context.session while True: try: # REVISIT: Serialize this operation with a semaphore # to prevent deadlock waiting to acquire a DB lock # held by another thread in the same process, leading # to 'lock wait timeout' errors. # # Process L3 first, since, depending on the L3 plugin, it may # involve sending RPC notifications, and/or calling delete_port # on this plugin. # Additionally, a rollback may not be enough to undo the # deletion of a floating IP with certain L3 backends. self._process_l3_delete(context, id) # Using query().with_lockmode isn't necessary. Foreign-key # constraints prevent deletion if concurrent creation happens. with session.begin(subtransactions=True): # Get ports to auto-delete. ports = (session.query(models_v2.Port). enable_eagerloads(False). filter_by(network_id=id).all()) LOG.debug("Ports to auto-delete: %s", ports) only_auto_del = all(p.device_owner in db_base_plugin_v2. AUTO_DELETE_PORT_OWNERS for p in ports) if not only_auto_del: LOG.debug("Tenant-owned ports exist") raise exc.NetworkInUse(net_id=id) # Get subnets to auto-delete. subnets = (session.query(models_v2.Subnet). enable_eagerloads(False). filter_by(network_id=id).all()) LOG.debug("Subnets to auto-delete: %s", subnets) if not (ports or subnets): network = self.get_network(context, id) mech_context = driver_context.NetworkContext(self, context, network) self.mechanism_manager.delete_network_precommit( mech_context) self.type_manager.release_network_segments(session, id) record = self._get_network(context, id) LOG.debug("Deleting network record %s", record) session.delete(record) # The segment records are deleted via cascade from the # network record, so explicit removal is not necessary. LOG.debug("Committing transaction") break port_ids = [port.id for port in ports] subnet_ids = [subnet.id for subnet in subnets] except os_db_exception.DBError as e: with excutils.save_and_reraise_exception() as ctxt: if isinstance(e.inner_exception, sql_exc.IntegrityError): ctxt.reraise = False LOG.warning(_LW("A concurrent port creation has " "occurred")) continue self._delete_ports(context, port_ids) self._delete_subnets(context, subnet_ids) try: self.mechanism_manager.delete_network_postcommit(mech_context) except ml2_exc.MechanismDriverError: # TODO(apech) - One or more mechanism driver failed to # delete the network. Ideally we'd notify the caller of # the fact that an error occurred. LOG.error(_LE("mechanism_manager.delete_network_postcommit" " failed")) self.notifier.network_delete(context, id) def _create_subnet_db(self, context, subnet): session = context.session with session.begin(subtransactions=True): result = super(Ml2Plugin, self).create_subnet(context, subnet) self.extension_manager.process_create_subnet( context, subnet[attributes.SUBNET], result) network = self.get_network(context, result['network_id']) mech_context = driver_context.SubnetContext(self, context, result, network) self.mechanism_manager.create_subnet_precommit(mech_context) return result, mech_context def create_subnet(self, context, subnet): result, mech_context = self._create_subnet_db(context, subnet) try: self.mechanism_manager.create_subnet_postcommit(mech_context) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): LOG.error(_LE("mechanism_manager.create_subnet_postcommit " "failed, deleting subnet '%s'"), result['id']) self.delete_subnet(context, result['id']) return result def create_subnet_bulk(self, context, subnets): objects = self._create_bulk_ml2(attributes.SUBNET, context, subnets) return [obj['result'] for obj in objects] def update_subnet(self, context, id, subnet): session = context.session with session.begin(subtransactions=True): original_subnet = super(Ml2Plugin, self).get_subnet(context, id) updated_subnet = super(Ml2Plugin, self).update_subnet( context, id, subnet) self.extension_manager.process_update_subnet( context, subnet[attributes.SUBNET], updated_subnet) network = self.get_network(context, updated_subnet['network_id']) mech_context = driver_context.SubnetContext( self, context, updated_subnet, network, original_subnet=original_subnet) self.mechanism_manager.update_subnet_precommit(mech_context) # TODO(apech) - handle errors raised by update_subnet, potentially # by re-calling update_subnet with the previous attributes. For # now the error is propogated to the caller, which is expected to # either undo/retry the operation or delete the resource. self.mechanism_manager.update_subnet_postcommit(mech_context) return updated_subnet @utils.transaction_guard def delete_subnet(self, context, id): # REVISIT(rkukura) The super(Ml2Plugin, self).delete_subnet() # function is not used because it deallocates the subnet's addresses # from ports in the DB without invoking the derived class's # update_port(), preventing mechanism drivers from being called. # This approach should be revisited when the API layer is reworked # during icehouse. LOG.debug("Deleting subnet %s", id) session = context.session deallocated = set() while True: with session.begin(subtransactions=True): record = self._get_subnet(context, id) subnet = self._make_subnet_dict(record, None, context=context) qry_allocated = (session.query(models_v2.IPAllocation). filter_by(subnet_id=id). join(models_v2.Port)) is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet) # Remove network owned ports, and delete IP allocations # for IPv6 addresses which were automatically generated # via SLAAC if is_auto_addr_subnet: self._subnet_check_ip_allocations_internal_router_ports( context, id) else: qry_allocated = ( qry_allocated.filter(models_v2.Port.device_owner. in_(db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS))) allocated = set(qry_allocated.all()) LOG.debug("Ports to auto-deallocate: %s", allocated) if not is_auto_addr_subnet: user_alloc = self._subnet_get_user_allocation( context, id) if user_alloc: LOG.info(_LI("Found port (%(port_id)s, %(ip)s) " "having IP allocation on subnet " "%(subnet)s, cannot delete"), {'ip': user_alloc.ip_address, 'port_id': user_alloc.port_id, 'subnet': id}) raise exc.SubnetInUse(subnet_id=id) db_base_plugin_v2._check_subnet_not_used(context, id) # SLAAC allocations currently can not be removed using # update_port workflow, and will persist in 'allocated'. # So for now just make sure update_port is called once for # them so MechanismDrivers is aware of the change. # This way SLAAC allocation is deleted by FK on subnet deletion # TODO(pbondar): rework update_port workflow to allow deletion # of SLAAC allocation via update_port. to_deallocate = allocated - deallocated # If to_deallocate is blank, then all known IPAllocations # (except SLAAC allocations) were correctly deleted # during the previous pass. # Check if there are more IP allocations, unless # is_auto_address_subnet is True. If transaction isolation # level is set to READ COMMITTED allocations made # concurrently will be returned by this query and transaction # will be restarted. It works for REPEATABLE READ isolation # level too because this query is executed only once during # transaction, and if concurrent allocations are detected # transaction gets restarted. Executing this query second time # in transaction would result in not seeing allocations # committed by concurrent transactions. if not to_deallocate: if (not is_auto_addr_subnet and self._subnet_check_ip_allocations(context, id)): # allocation found and it was DHCP port # that appeared after autodelete ports were # removed - need to restart whole operation raise os_db_exception.RetryRequest( exc.SubnetInUse(subnet_id=id)) network = self.get_network(context, subnet['network_id']) mech_context = driver_context.SubnetContext(self, context, subnet, network) self.mechanism_manager.delete_subnet_precommit( mech_context) LOG.debug("Deleting subnet record") session.delete(record) # The super(Ml2Plugin, self).delete_subnet() is not called, # so need to manually call delete_subnet for pluggable ipam self.ipam.delete_subnet(context, id) LOG.debug("Committing transaction") break for a in to_deallocate: deallocated.add(a) if a.port: # calling update_port() for each allocation to remove the # IP from the port and call the MechanismDrivers fixed_ips = [{'subnet_id': ip.subnet_id, 'ip_address': ip.ip_address} for ip in a.port.fixed_ips if ip.subnet_id != id] # By default auto-addressed ips are not removed from port # on port update, so mark subnet with 'delete_subnet' flag # to force ip deallocation on port update. if is_auto_addr_subnet: fixed_ips.append({'subnet_id': id, 'delete_subnet': True}) data = {attributes.PORT: {'fixed_ips': fixed_ips}} try: # NOTE Don't inline port_id; needed for PortNotFound. port_id = a.port_id self.update_port(context, port_id, data) except exc.PortNotFound: # NOTE Attempting to access a.port_id here is an error. LOG.debug("Port %s deleted concurrently", port_id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Exception deleting fixed_ip " "from port %s"), port_id) try: self.mechanism_manager.delete_subnet_postcommit(mech_context) except ml2_exc.MechanismDriverError: # TODO(apech) - One or more mechanism driver failed to # delete the subnet. Ideally we'd notify the caller of # the fact that an error occurred. LOG.error(_LE("mechanism_manager.delete_subnet_postcommit failed")) # TODO(yalei) - will be simplified after security group and address pair be # converted to ext driver too. def _portsec_ext_port_create_processing(self, context, port_data, port): attrs = port[attributes.PORT] port_security = ((port_data.get(psec.PORTSECURITY) is None) or port_data[psec.PORTSECURITY]) # allowed address pair checks if self._check_update_has_allowed_address_pairs(port): if not port_security: raise addr_pair.AddressPairAndPortSecurityRequired() else: # remove ATTR_NOT_SPECIFIED attrs[addr_pair.ADDRESS_PAIRS] = [] if port_security: self._ensure_default_security_group_on_port(context, port) elif self._check_update_has_security_groups(port): raise psec.PortSecurityAndIPRequiredForSecurityGroups() def _create_port_db(self, context, port): attrs = port[attributes.PORT] if not attrs.get('status'): attrs['status'] = const.PORT_STATUS_DOWN session = context.session with db_api.exc_to_retry(os_db_exception.DBDuplicateEntry),\ session.begin(subtransactions=True): dhcp_opts = attrs.get(edo_ext.EXTRADHCPOPTS, []) port_db = self.create_port_db(context, port) result = self._make_port_dict(port_db, process_extensions=False) self.extension_manager.process_create_port(context, attrs, result) self._portsec_ext_port_create_processing(context, result, port) # sgids must be got after portsec checked with security group sgids = self._get_security_groups_on_port(context, port) self._process_port_create_security_group(context, result, sgids) network = self.get_network(context, result['network_id']) binding = db.add_port_binding(session, result['id']) mech_context = driver_context.PortContext(self, context, result, network, binding, None) self._process_port_binding(mech_context, attrs) result[addr_pair.ADDRESS_PAIRS] = ( self._process_create_allowed_address_pairs( context, result, attrs.get(addr_pair.ADDRESS_PAIRS))) self._process_port_create_extra_dhcp_opts(context, result, dhcp_opts) self.mechanism_manager.create_port_precommit(mech_context) self._apply_dict_extend_functions('ports', result, port_db) return result, mech_context def create_port(self, context, port): # TODO(kevinbenton): remove when bug/1543094 is fixed. with lockutils.lock(port['port']['network_id'], lock_file_prefix='neutron-create-port', external=True): result, mech_context = self._create_port_db(context, port) # notify any plugin that is interested in port create events kwargs = {'context': context, 'port': result} registry.notify(resources.PORT, events.AFTER_CREATE, self, **kwargs) try: self.mechanism_manager.create_port_postcommit(mech_context) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): LOG.error(_LE("mechanism_manager.create_port_postcommit " "failed, deleting port '%s'"), result['id']) self.delete_port(context, result['id']) # REVISIT(rkukura): Is there any point in calling this before # a binding has been successfully established? self.notify_security_groups_member_updated(context, result) try: bound_context = self._bind_port_if_needed(mech_context) except os_db_exception.DBDeadlock: # bind port can deadlock in normal operation so we just cleanup # the port and let the API retry with excutils.save_and_reraise_exception(): LOG.debug("_bind_port_if_needed deadlock, deleting port %s", result['id']) self.delete_port(context, result['id']) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): LOG.error(_LE("_bind_port_if_needed " "failed, deleting port '%s'"), result['id']) self.delete_port(context, result['id']) return bound_context.current def create_port_bulk(self, context, ports): objects = self._create_bulk_ml2(attributes.PORT, context, ports) # REVISIT(rkukura): Is there any point in calling this before # a binding has been successfully established? results = [obj['result'] for obj in objects] self.notify_security_groups_member_updated_bulk(context, results) for obj in objects: attrs = obj['attributes'] if attrs and attrs.get(portbindings.HOST_ID): kwargs = {'context': context, 'port': obj['result']} registry.notify( resources.PORT, events.AFTER_CREATE, self, **kwargs) try: for obj in objects: obj['bound_context'] = self._bind_port_if_needed( obj['mech_context']) return [obj['bound_context'].current for obj in objects] except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): resource_ids = [res['result']['id'] for res in objects] LOG.error(_LE("_bind_port_if_needed failed. " "Deleting all ports from create bulk '%s'"), resource_ids) self._delete_objects(context, attributes.PORT, objects) # TODO(yalei) - will be simplified after security group and address pair be # converted to ext driver too. def _portsec_ext_port_update_processing(self, updated_port, context, port, id): port_security = ((updated_port.get(psec.PORTSECURITY) is None) or updated_port[psec.PORTSECURITY]) if port_security: return # check the address-pairs if self._check_update_has_allowed_address_pairs(port): # has address pairs in request raise addr_pair.AddressPairAndPortSecurityRequired() elif (not self._check_update_deletes_allowed_address_pairs(port)): # not a request for deleting the address-pairs updated_port[addr_pair.ADDRESS_PAIRS] = ( self.get_allowed_address_pairs(context, id)) # check if address pairs has been in db, if address pairs could # be put in extension driver, we can refine here. if updated_port[addr_pair.ADDRESS_PAIRS]: raise addr_pair.AddressPairAndPortSecurityRequired() # checks if security groups were updated adding/modifying # security groups, port security is set if self._check_update_has_security_groups(port): raise psec.PortSecurityAndIPRequiredForSecurityGroups() elif (not self._check_update_deletes_security_groups(port)): # Update did not have security groups passed in. Check # that port does not have any security groups already on it. filters = {'port_id': [id]} security_groups = ( super(Ml2Plugin, self)._get_port_security_group_bindings( context, filters) ) if security_groups: raise psec.PortSecurityPortHasSecurityGroup() def update_port(self, context, id, port): attrs = port[attributes.PORT] need_port_update_notify = False session = context.session bound_mech_contexts = [] with db_api.exc_to_retry(os_db_exception.DBDuplicateEntry),\ session.begin(subtransactions=True): port_db, binding = db.get_locked_port_and_binding(session, id) if not port_db: raise exc.PortNotFound(port_id=id) mac_address_updated = self._check_mac_update_allowed( port_db, attrs, binding) need_port_update_notify |= mac_address_updated original_port = self._make_port_dict(port_db) updated_port = super(Ml2Plugin, self).update_port(context, id, port) self.extension_manager.process_update_port(context, attrs, updated_port) self._portsec_ext_port_update_processing(updated_port, context, port, id) if (psec.PORTSECURITY in attrs) and ( original_port[psec.PORTSECURITY] != updated_port[psec.PORTSECURITY]): need_port_update_notify = True # TODO(QoS): Move out to the extension framework somehow. # Follow https://review.openstack.org/#/c/169223 for a solution. if (qos_consts.QOS_POLICY_ID in attrs and original_port[qos_consts.QOS_POLICY_ID] != updated_port[qos_consts.QOS_POLICY_ID]): need_port_update_notify = True if addr_pair.ADDRESS_PAIRS in attrs: need_port_update_notify |= ( self.update_address_pairs_on_port(context, id, port, original_port, updated_port)) need_port_update_notify |= self.update_security_group_on_port( context, id, port, original_port, updated_port) network = self.get_network(context, original_port['network_id']) need_port_update_notify |= self._update_extra_dhcp_opts_on_port( context, id, port, updated_port) levels = db.get_binding_levels(session, id, binding.host) mech_context = driver_context.PortContext( self, context, updated_port, network, binding, levels, original_port=original_port) need_port_update_notify |= self._process_port_binding( mech_context, attrs) # For DVR router interface ports we need to retrieve the # DVRPortbinding context instead of the normal port context. # The normal Portbinding context does not have the status # of the ports that are required by the l2pop to process the # postcommit events. # NOTE:Sometimes during the update_port call, the DVR router # interface port may not have the port binding, so we cannot # create a generic bindinglist that will address both the # DVR and non-DVR cases here. # TODO(Swami): This code need to be revisited. if port_db['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: dvr_binding_list = db.get_dvr_port_bindings(session, id) for dvr_binding in dvr_binding_list: levels = db.get_binding_levels(session, id, dvr_binding.host) dvr_mech_context = driver_context.PortContext( self, context, updated_port, network, dvr_binding, levels, original_port=original_port) self.mechanism_manager.update_port_precommit( dvr_mech_context) bound_mech_contexts.append(dvr_mech_context) else: self.mechanism_manager.update_port_precommit(mech_context) bound_mech_contexts.append(mech_context) # Notifications must be sent after the above transaction is complete kwargs = { 'context': context, 'port': updated_port, 'mac_address_updated': mac_address_updated, 'original_port': original_port, } registry.notify(resources.PORT, events.AFTER_UPDATE, self, **kwargs) # Note that DVR Interface ports will have bindings on # multiple hosts, and so will have multiple mech_contexts, # while other ports typically have just one. # Since bound_mech_contexts has both the DVR and non-DVR # contexts we can manage just with a single for loop. try: for mech_context in bound_mech_contexts: self.mechanism_manager.update_port_postcommit( mech_context) except ml2_exc.MechanismDriverError: LOG.error(_LE("mechanism_manager.update_port_postcommit " "failed for port %s"), id) self.check_and_notify_security_group_member_changed( context, original_port, updated_port) need_port_update_notify |= self.is_security_group_member_updated( context, original_port, updated_port) if original_port['admin_state_up'] != updated_port['admin_state_up']: need_port_update_notify = True # NOTE: In the case of DVR ports, the port-binding is done after # router scheduling when sync_routers is called and so this call # below may not be required for DVR routed interfaces. But still # since we don't have the mech_context for the DVR router interfaces # at certain times, we just pass the port-context and return it, so # that we don't disturb other methods that are expecting a return # value. bound_context = self._bind_port_if_needed( mech_context, allow_notify=True, need_notify=need_port_update_notify) return bound_context.current def _process_dvr_port_binding(self, mech_context, context, attrs): session = mech_context._plugin_context.session binding = mech_context._binding port = mech_context.current port_id = port['id'] if binding.vif_type != portbindings.VIF_TYPE_UNBOUND: binding.vif_details = '' binding.vif_type = portbindings.VIF_TYPE_UNBOUND if binding.host: db.clear_binding_levels(session, port_id, binding.host) binding.host = '' self._update_port_dict_binding(port, binding) binding.host = attrs and attrs.get(portbindings.HOST_ID) binding.router_id = attrs and attrs.get('device_id') def update_dvr_port_binding(self, context, id, port): attrs = port[attributes.PORT] host = attrs and attrs.get(portbindings.HOST_ID) host_set = attributes.is_attr_set(host) if not host_set: LOG.error(_LE("No Host supplied to bind DVR Port %s"), id) return session = context.session binding = db.get_dvr_port_binding_by_host(session, id, host) device_id = attrs and attrs.get('device_id') router_id = binding and binding.get('router_id') update_required = (not binding or binding.vif_type == portbindings.VIF_TYPE_BINDING_FAILED or router_id != device_id) if update_required: try: with session.begin(subtransactions=True): orig_port = self.get_port(context, id) if not binding: binding = db.ensure_dvr_port_binding( session, id, host, router_id=device_id) network = self.get_network(context, orig_port['network_id']) levels = db.get_binding_levels(session, id, host) mech_context = driver_context.PortContext(self, context, orig_port, network, binding, levels, original_port=orig_port) self._process_dvr_port_binding(mech_context, context, attrs) except (os_db_exception.DBReferenceError, exc.PortNotFound): LOG.debug("DVR Port %s has been deleted concurrently", id) return self._bind_port_if_needed(mech_context) def _pre_delete_port(self, context, port_id, port_check): """Do some preliminary operations before deleting the port.""" LOG.debug("Deleting port %s", port_id) try: # notify interested parties of imminent port deletion; # a failure here prevents the operation from happening kwargs = { 'context': context, 'port_id': port_id, 'port_check': port_check } registry.notify( resources.PORT, events.BEFORE_DELETE, self, **kwargs) except exceptions.CallbackFailure as e: # NOTE(armax): preserve old check's behavior if len(e.errors) == 1: raise e.errors[0].error raise exc.ServicePortInUse(port_id=port_id, reason=e) def delete_port(self, context, id, l3_port_check=True): self._pre_delete_port(context, id, l3_port_check) # TODO(armax): get rid of the l3 dependency in the with block router_ids = [] l3plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) session = context.session with session.begin(subtransactions=True): port_db, binding = db.get_locked_port_and_binding(session, id) if not port_db: LOG.debug("The port '%s' was deleted", id) return port = self._make_port_dict(port_db) network = self.get_network(context, port['network_id']) bound_mech_contexts = [] device_owner = port['device_owner'] if device_owner == const.DEVICE_OWNER_DVR_INTERFACE: bindings = db.get_dvr_port_bindings(context.session, id) for bind in bindings: levels = db.get_binding_levels(context.session, id, bind.host) mech_context = driver_context.PortContext( self, context, port, network, bind, levels) self.mechanism_manager.delete_port_precommit(mech_context) bound_mech_contexts.append(mech_context) else: levels = db.get_binding_levels(context.session, id, binding.host) mech_context = driver_context.PortContext( self, context, port, network, binding, levels) self.mechanism_manager.delete_port_precommit(mech_context) bound_mech_contexts.append(mech_context) if l3plugin: router_ids = l3plugin.disassociate_floatingips( context, id, do_notify=False) LOG.debug("Calling delete_port for %(port_id)s owned by %(owner)s", {"port_id": id, "owner": device_owner}) super(Ml2Plugin, self).delete_port(context, id) self._post_delete_port( context, port, router_ids, bound_mech_contexts) def _post_delete_port( self, context, port, router_ids, bound_mech_contexts): kwargs = { 'context': context, 'port': port, 'router_ids': router_ids, } registry.notify(resources.PORT, events.AFTER_DELETE, self, **kwargs) try: # Note that DVR Interface ports will have bindings on # multiple hosts, and so will have multiple mech_contexts, # while other ports typically have just one. for mech_context in bound_mech_contexts: self.mechanism_manager.delete_port_postcommit(mech_context) except ml2_exc.MechanismDriverError: # TODO(apech) - One or more mechanism driver failed to # delete the port. Ideally we'd notify the caller of the # fact that an error occurred. LOG.error(_LE("mechanism_manager.delete_port_postcommit failed for" " port %s"), port['id']) self.notifier.port_delete(context, port['id']) self.notify_security_groups_member_updated(context, port) def get_bound_port_context(self, plugin_context, port_id, host=None, cached_networks=None): session = plugin_context.session with session.begin(subtransactions=True): try: port_db = (session.query(models_v2.Port). enable_eagerloads(False). filter(models_v2.Port.id.startswith(port_id)). one()) except sa_exc.NoResultFound: LOG.info(_LI("No ports have port_id starting with %s"), port_id) return except sa_exc.MultipleResultsFound: LOG.error(_LE("Multiple ports have port_id starting with %s"), port_id) return port = self._make_port_dict(port_db) network = (cached_networks or {}).get(port['network_id']) if not network: network = self.get_network(plugin_context, port['network_id']) if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: binding = db.get_dvr_port_binding_by_host( session, port['id'], host) if not binding: LOG.error(_LE("Binding info for DVR port %s not found"), port_id) return None levels = db.get_binding_levels(session, port_db.id, host) port_context = driver_context.PortContext( self, plugin_context, port, network, binding, levels) else: # since eager loads are disabled in port_db query # related attribute port_binding could disappear in # concurrent port deletion. # It's not an error condition. binding = port_db.port_binding if not binding: LOG.info(_LI("Binding info for port %s was not found, " "it might have been deleted already."), port_id) return levels = db.get_binding_levels(session, port_db.id, port_db.port_binding.host) port_context = driver_context.PortContext( self, plugin_context, port, network, binding, levels) return self._bind_port_if_needed(port_context) @oslo_db_api.wrap_db_retry( max_retries=db_api.MAX_RETRIES, retry_on_request=True, exception_checker=lambda e: isinstance(e, (sa_exc.StaleDataError, os_db_exception.DBDeadlock)) ) def update_port_status(self, context, port_id, status, host=None, network=None): """ Returns port_id (non-truncated uuid) if the port exists. Otherwise returns None. network can be passed in to avoid another get_network call if one was already performed by the caller. """ updated = False session = context.session with session.begin(subtransactions=True): port = db.get_port(session, port_id) if not port: LOG.debug("Port %(port)s update to %(val)s by agent not found", {'port': port_id, 'val': status}) return None if (port.status != status and port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE): original_port = self._make_port_dict(port) port.status = status updated_port = self._make_port_dict(port) network = network or self.get_network( context, original_port['network_id']) levels = db.get_binding_levels(session, port.id, port.port_binding.host) mech_context = driver_context.PortContext( self, context, updated_port, network, port.port_binding, levels, original_port=original_port) self.mechanism_manager.update_port_precommit(mech_context) updated = True elif port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: binding = db.get_dvr_port_binding_by_host( session, port['id'], host) if not binding: return binding['status'] = status binding.update(binding) updated = True if (updated and port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE): with session.begin(subtransactions=True): port = db.get_port(session, port_id) if not port: LOG.warning(_LW("Port %s not found during update"), port_id) return original_port = self._make_port_dict(port) network = network or self.get_network( context, original_port['network_id']) port.status = db.generate_dvr_port_status(session, port['id']) updated_port = self._make_port_dict(port) levels = db.get_binding_levels(session, port_id, host) mech_context = (driver_context.PortContext( self, context, updated_port, network, binding, levels, original_port=original_port)) self.mechanism_manager.update_port_precommit(mech_context) if updated: self.mechanism_manager.update_port_postcommit(mech_context) if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: db.delete_dvr_port_binding_if_stale(session, binding) return port['id'] def port_bound_to_host(self, context, port_id, host): port = db.get_port(context.session, port_id) if not port: LOG.debug("No Port match for: %s", port_id) return False if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: bindings = db.get_dvr_port_bindings(context.session, port_id) for b in bindings: if b.host == host: return True LOG.debug("No binding found for DVR port %s", port['id']) return False else: port_host = db.get_port_binding_host(context.session, port_id) return (port_host == host) def get_ports_from_devices(self, context, devices): port_ids_to_devices = dict( (self._device_to_port_id(context, device), device) for device in devices) port_ids = list(port_ids_to_devices.keys()) ports = db.get_ports_and_sgs(context, port_ids) for port in ports: # map back to original requested id port_id = next((port_id for port_id in port_ids if port['id'].startswith(port_id)), None) port['device'] = port_ids_to_devices.get(port_id) return ports @staticmethod def _device_to_port_id(context, device): # REVISIT(rkukura): Consider calling into MechanismDrivers to # process device names, or having MechanismDrivers supply list # of device prefixes to strip. for prefix in const.INTERFACE_PREFIXES: if device.startswith(prefix): return device[len(prefix):] # REVISIT(irenab): Consider calling into bound MD to # handle the get_device_details RPC if not uuidutils.is_uuid_like(device): port = db.get_port_from_device_mac(context, device) if port: return port.id return device def get_workers(self): return self.mechanism_manager.get_workers() neutron-8.4.0/neutron/plugins/ml2/common/0000775000567000056710000000000013044373210021510 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/common/__init__.py0000664000567000056710000000000013044372736023623 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/common/exceptions.py0000664000567000056710000000262213044372760024256 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Exceptions used by ML2.""" from neutron._i18n import _ from neutron.common import exceptions class MechanismDriverError(exceptions.NeutronException): """Mechanism driver call failed.""" message = _("%(method)s failed.") class ExtensionDriverError(exceptions.InvalidInput): """Extension driver call failed.""" message = _("Extension %(driver)s failed.") class ExtensionDriverNotFound(exceptions.InvalidConfigurationOption): """Required extension driver not found in ML2 config.""" message = _("Extension driver %(driver)s required for " "service plugin %(service_plugin)s not found.") class UnknownNetworkType(exceptions.NeutronException): """Network with unknown type.""" message = _("Unknown network type %(network_type)s.") neutron-8.4.0/neutron/plugins/ml2/rpc.py0000664000567000056710000003665713044372760021410 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log import oslo_messaging from sqlalchemy.orm import exc from neutron._i18n import _LE, _LW from neutron.api.rpc.handlers import dvr_rpc from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants as n_const from neutron.common import exceptions from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.db import l3_hamode_db from neutron.extensions import portbindings from neutron.extensions import portsecurity as psec from neutron import manager from neutron.plugins.ml2 import db as ml2_db from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers import type_tunnel from neutron.services.qos import qos_consts # REVISIT(kmestery): Allow the type and mechanism drivers to supply the # mixins and eventually remove the direct dependencies on type_tunnel. LOG = log.getLogger(__name__) class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin): # history # 1.0 Initial version (from openvswitch/linuxbridge) # 1.1 Support Security Group RPC # 1.2 Support get_devices_details_list # 1.3 get_device_details rpc signature upgrade to obtain 'host' and # return value to include fixed_ips and device_owner for # the device port # 1.4 tunnel_sync rpc signature upgrade to obtain 'host' # 1.5 Support update_device_list and # get_devices_details_list_and_failed_devices target = oslo_messaging.Target(version='1.5') def __init__(self, notifier, type_manager): self.setup_tunnel_callback_mixin(notifier, type_manager) super(RpcCallbacks, self).__init__() def get_device_details(self, rpc_context, **kwargs): """Agent requests device details.""" agent_id = kwargs.get('agent_id') device = kwargs.get('device') host = kwargs.get('host') # cached networks used for reducing number of network db calls # for server internal usage only cached_networks = kwargs.get('cached_networks') LOG.debug("Device %(device)s details requested by agent " "%(agent_id)s with host %(host)s", {'device': device, 'agent_id': agent_id, 'host': host}) plugin = manager.NeutronManager.get_plugin() port_id = plugin._device_to_port_id(rpc_context, device) port_context = plugin.get_bound_port_context(rpc_context, port_id, host, cached_networks) if not port_context: LOG.debug("Device %(device)s requested by agent " "%(agent_id)s not found in database", {'device': device, 'agent_id': agent_id}) return {'device': device} segment = port_context.bottom_bound_segment port = port_context.current # caching information about networks for future use if cached_networks is not None: if port['network_id'] not in cached_networks: cached_networks[port['network_id']] = ( port_context.network.current) if not segment: LOG.warning(_LW("Device %(device)s requested by agent " "%(agent_id)s on network %(network_id)s not " "bound, vif_type: %(vif_type)s"), {'device': device, 'agent_id': agent_id, 'network_id': port['network_id'], 'vif_type': port_context.vif_type}) return {'device': device} if (not host or host == port_context.host): new_status = (n_const.PORT_STATUS_BUILD if port['admin_state_up'] else n_const.PORT_STATUS_DOWN) if port['status'] != new_status: plugin.update_port_status(rpc_context, port_id, new_status, host, port_context.network.current) network_qos_policy_id = port_context.network._network.get( qos_consts.QOS_POLICY_ID) entry = {'device': device, 'network_id': port['network_id'], 'port_id': port['id'], 'mac_address': port['mac_address'], 'admin_state_up': port['admin_state_up'], 'network_type': segment[api.NETWORK_TYPE], 'segmentation_id': segment[api.SEGMENTATION_ID], 'physical_network': segment[api.PHYSICAL_NETWORK], 'fixed_ips': port['fixed_ips'], 'device_owner': port['device_owner'], 'allowed_address_pairs': port['allowed_address_pairs'], 'port_security_enabled': port.get(psec.PORTSECURITY, True), 'qos_policy_id': port.get(qos_consts.QOS_POLICY_ID), 'network_qos_policy_id': network_qos_policy_id, 'profile': port[portbindings.PROFILE]} if 'security_groups' in port: entry['security_groups'] = port['security_groups'] LOG.debug("Returning: %s", entry) return entry def get_devices_details_list(self, rpc_context, **kwargs): # cached networks used for reducing number of network db calls cached_networks = {} return [ self.get_device_details( rpc_context, device=device, cached_networks=cached_networks, **kwargs ) for device in kwargs.pop('devices', []) ] def get_devices_details_list_and_failed_devices(self, rpc_context, **kwargs): devices = [] failed_devices = [] cached_networks = {} for device in kwargs.pop('devices', []): try: devices.append(self.get_device_details( rpc_context, device=device, cached_networks=cached_networks, **kwargs)) except Exception: LOG.error(_LE("Failed to get details for device %s"), device) failed_devices.append(device) return {'devices': devices, 'failed_devices': failed_devices} def update_device_down(self, rpc_context, **kwargs): """Device no longer exists on agent.""" # TODO(garyk) - live migration and port status agent_id = kwargs.get('agent_id') device = kwargs.get('device') host = kwargs.get('host') LOG.debug("Device %(device)s no longer exists at agent " "%(agent_id)s", {'device': device, 'agent_id': agent_id}) plugin = manager.NeutronManager.get_plugin() port_id = plugin._device_to_port_id(rpc_context, device) port_exists = True if (host and not plugin.port_bound_to_host(rpc_context, port_id, host)): LOG.debug("Device %(device)s not bound to the" " agent host %(host)s", {'device': device, 'host': host}) else: try: port_exists = bool(plugin.update_port_status( rpc_context, port_id, n_const.PORT_STATUS_DOWN, host)) except exc.StaleDataError: port_exists = False LOG.debug("delete_port and update_device_down are being " "executed concurrently. Ignoring StaleDataError.") return {'device': device, 'exists': port_exists} self.notify_ha_port_status(port_id, rpc_context, n_const.PORT_STATUS_DOWN, host) return {'device': device, 'exists': port_exists} def update_device_up(self, rpc_context, **kwargs): """Device is up on agent.""" agent_id = kwargs.get('agent_id') device = kwargs.get('device') host = kwargs.get('host') LOG.debug("Device %(device)s up at agent %(agent_id)s", {'device': device, 'agent_id': agent_id}) plugin = manager.NeutronManager.get_plugin() port_id = plugin._device_to_port_id(rpc_context, device) if (host and not plugin.port_bound_to_host(rpc_context, port_id, host)): LOG.debug("Device %(device)s not bound to the" " agent host %(host)s", {'device': device, 'host': host}) else: self.update_port_status_to_active(rpc_context, port_id, host) self.notify_ha_port_status(port_id, rpc_context, n_const.PORT_STATUS_ACTIVE, host) def update_port_status_to_active(self, rpc_context, port_id, host): plugin = manager.NeutronManager.get_plugin() port_id = plugin.update_port_status(rpc_context, port_id, n_const.PORT_STATUS_ACTIVE, host) try: # NOTE(armax): it's best to remove all objects from the # session, before we try to retrieve the new port object rpc_context.session.expunge_all() port = plugin._get_port(rpc_context, port_id) except exceptions.PortNotFound: LOG.debug('Port %s not found during update', port_id) else: kwargs = { 'context': rpc_context, 'port': port, 'update_device_up': True } registry.notify( resources.PORT, events.AFTER_UPDATE, plugin, **kwargs) def notify_ha_port_status(self, port_id, rpc_context, status, host): plugin = manager.NeutronManager.get_plugin() l2pop_driver = plugin.mechanism_manager.mech_drivers.get( 'l2population') if not l2pop_driver: return port = ml2_db.get_port(rpc_context.session, port_id) if not port: return is_ha_port = l3_hamode_db.is_ha_router_port(port['device_owner'], port['device_id']) if is_ha_port: port_context = plugin.get_bound_port_context( rpc_context, port_id) port_context.current['status'] = status port_context.current[portbindings.HOST_ID] = host if status == n_const.PORT_STATUS_ACTIVE: l2pop_driver.obj.update_port_up(port_context) else: l2pop_driver.obj.update_port_down(port_context) def update_device_list(self, rpc_context, **kwargs): devices_up = [] failed_devices_up = [] devices_down = [] failed_devices_down = [] devices = kwargs.get('devices_up') if devices: for device in devices: try: self.update_device_up( rpc_context, device=device, **kwargs) except Exception: failed_devices_up.append(device) LOG.error(_LE("Failed to update device %s up"), device) else: devices_up.append(device) devices = kwargs.get('devices_down') if devices: for device in devices: try: dev = self.update_device_down( rpc_context, device=device, **kwargs) except Exception: failed_devices_down.append(device) LOG.error(_LE("Failed to update device %s down"), device) else: devices_down.append(dev) return {'devices_up': devices_up, 'failed_devices_up': failed_devices_up, 'devices_down': devices_down, 'failed_devices_down': failed_devices_down} class AgentNotifierApi(dvr_rpc.DVRAgentRpcApiMixin, sg_rpc.SecurityGroupAgentRpcApiMixin, type_tunnel.TunnelAgentRpcApiMixin): """Agent side of the openvswitch rpc API. API version history: 1.0 - Initial version. 1.1 - Added get_active_networks_info, create_dhcp_port, update_dhcp_port, and removed get_dhcp_port methods. 1.4 - Added network_update """ def __init__(self, topic): self.topic = topic self.topic_network_delete = topics.get_topic_name(topic, topics.NETWORK, topics.DELETE) self.topic_port_update = topics.get_topic_name(topic, topics.PORT, topics.UPDATE) self.topic_port_delete = topics.get_topic_name(topic, topics.PORT, topics.DELETE) self.topic_network_update = topics.get_topic_name(topic, topics.NETWORK, topics.UPDATE) target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) def network_delete(self, context, network_id): cctxt = self.client.prepare(topic=self.topic_network_delete, fanout=True) cctxt.cast(context, 'network_delete', network_id=network_id) def port_update(self, context, port, network_type, segmentation_id, physical_network): cctxt = self.client.prepare(topic=self.topic_port_update, fanout=True) cctxt.cast(context, 'port_update', port=port, network_type=network_type, segmentation_id=segmentation_id, physical_network=physical_network) def port_delete(self, context, port_id): cctxt = self.client.prepare(topic=self.topic_port_delete, fanout=True) cctxt.cast(context, 'port_delete', port_id=port_id) def network_update(self, context, network): cctxt = self.client.prepare(topic=self.topic_network_update, fanout=True, version='1.4') cctxt.cast(context, 'network_update', network=network) neutron-8.4.0/neutron/plugins/ml2/driver_context.py0000664000567000056710000002157513044372760023654 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_serialization import jsonutils from neutron._i18n import _LW from neutron.common import constants from neutron.extensions import portbindings from neutron.plugins.ml2 import db from neutron.plugins.ml2 import driver_api as api LOG = log.getLogger(__name__) class MechanismDriverContext(object): """MechanismDriver context base class.""" def __init__(self, plugin, plugin_context): self._plugin = plugin # This temporarily creates a reference loop, but the # lifetime of PortContext is limited to a single # method call of the plugin. self._plugin_context = plugin_context class NetworkContext(MechanismDriverContext, api.NetworkContext): def __init__(self, plugin, plugin_context, network, original_network=None): super(NetworkContext, self).__init__(plugin, plugin_context) self._network = network self._original_network = original_network self._segments = db.get_network_segments(plugin_context.session, network['id']) @property def current(self): return self._network @property def original(self): return self._original_network @property def network_segments(self): return self._segments class SubnetContext(MechanismDriverContext, api.SubnetContext): def __init__(self, plugin, plugin_context, subnet, network, original_subnet=None): super(SubnetContext, self).__init__(plugin, plugin_context) self._subnet = subnet self._original_subnet = original_subnet self._network_context = NetworkContext(plugin, plugin_context, network) @property def current(self): return self._subnet @property def original(self): return self._original_subnet @property def network(self): return self._network_context class PortContext(MechanismDriverContext, api.PortContext): def __init__(self, plugin, plugin_context, port, network, binding, binding_levels, original_port=None): super(PortContext, self).__init__(plugin, plugin_context) self._port = port self._original_port = original_port self._network_context = NetworkContext(plugin, plugin_context, network) self._binding = binding self._binding_levels = binding_levels self._segments_to_bind = None self._new_bound_segment = None self._next_segments_to_bind = None if original_port: self._original_vif_type = binding.vif_type self._original_vif_details = self._plugin._get_vif_details(binding) self._original_binding_levels = self._binding_levels else: self._original_vif_type = None self._original_vif_details = None self._original_binding_levels = None self._new_port_status = None # The following methods are for use by the ML2 plugin and are not # part of the driver API. def _prepare_to_bind(self, segments_to_bind): self._segments_to_bind = segments_to_bind self._new_bound_segment = None self._next_segments_to_bind = None def _clear_binding_levels(self): self._binding_levels = [] def _push_binding_level(self, binding_level): self._binding_levels.append(binding_level) def _pop_binding_level(self): return self._binding_levels.pop() # The following implement the abstract methods and properties of # the driver API. @property def current(self): return self._port @property def original(self): return self._original_port @property def status(self): # REVISIT(rkukura): Eliminate special DVR case as part of # resolving bug 1367391? if self._port['device_owner'] == constants.DEVICE_OWNER_DVR_INTERFACE: return self._binding.status return self._port['status'] @property def original_status(self): # REVISIT(rkukura): Should return host-specific status for DVR # ports. Fix as part of resolving bug 1367391. if self._original_port: return self._original_port['status'] @property def network(self): return self._network_context @property def binding_levels(self): if self._binding_levels: return [{ api.BOUND_DRIVER: level.driver, api.BOUND_SEGMENT: self._expand_segment(level.segment_id) } for level in self._binding_levels] @property def original_binding_levels(self): if self._original_binding_levels: return [{ api.BOUND_DRIVER: level.driver, api.BOUND_SEGMENT: self._expand_segment(level.segment_id) } for level in self._original_binding_levels] @property def top_bound_segment(self): if self._binding_levels: return self._expand_segment(self._binding_levels[0].segment_id) @property def original_top_bound_segment(self): if self._original_binding_levels: return self._expand_segment( self._original_binding_levels[0].segment_id) @property def bottom_bound_segment(self): if self._binding_levels: return self._expand_segment(self._binding_levels[-1].segment_id) @property def original_bottom_bound_segment(self): if self._original_binding_levels: return self._expand_segment( self._original_binding_levels[-1].segment_id) def _expand_segment(self, segment_id): segment = db.get_segment_by_id(self._plugin_context.session, segment_id) if not segment: LOG.warning(_LW("Could not expand segment %s"), segment_id) return segment @property def host(self): # REVISIT(rkukura): Eliminate special DVR case as part of # resolving bug 1367391? if self._port['device_owner'] == constants.DEVICE_OWNER_DVR_INTERFACE: return self._binding.host return self._port.get(portbindings.HOST_ID) @property def original_host(self): # REVISIT(rkukura): Eliminate special DVR case as part of # resolving bug 1367391? if self._port['device_owner'] == constants.DEVICE_OWNER_DVR_INTERFACE: return self._original_port and self._binding.host else: return (self._original_port and self._original_port.get(portbindings.HOST_ID)) @property def vif_type(self): return self._binding.vif_type @property def original_vif_type(self): return self._original_vif_type @property def vif_details(self): return self._plugin._get_vif_details(self._binding) @property def original_vif_details(self): return self._original_vif_details @property def segments_to_bind(self): return self._segments_to_bind def host_agents(self, agent_type): return self._plugin.get_agents(self._plugin_context, filters={'agent_type': [agent_type], 'host': [self._binding.host]}) def set_binding(self, segment_id, vif_type, vif_details, status=None): # TODO(rkukura) Verify binding allowed, segment in network self._new_bound_segment = segment_id self._binding.vif_type = vif_type self._binding.vif_details = jsonutils.dumps(vif_details) self._new_port_status = status def continue_binding(self, segment_id, next_segments_to_bind): # TODO(rkukura) Verify binding allowed, segment in network self._new_bound_segment = segment_id self._next_segments_to_bind = next_segments_to_bind def allocate_dynamic_segment(self, segment): network_id = self._network_context.current['id'] return self._plugin.type_manager.allocate_dynamic_segment( self._plugin_context.session, network_id, segment) def release_dynamic_segment(self, segment_id): return self._plugin.type_manager.release_dynamic_segment( self._plugin_context.session, segment_id) neutron-8.4.0/neutron/plugins/ml2/README0000664000567000056710000000570413044372736021122 0ustar jenkinsjenkins00000000000000The Modular Layer 2 (ML2) plugin is a framework allowing OpenStack Networking to simultaneously utilize the variety of layer 2 networking technologies found in complex real-world data centers. It supports the Open vSwitch, Linux bridge, and Hyper-V L2 agents, replacing and deprecating the monolithic plugins previously associated with those agents, and can also support hardware devices and SDN controllers. The ML2 framework is intended to greatly simplify adding support for new L2 networking technologies, requiring much less initial and ongoing effort than would be required for an additional monolithic core plugin. It is also intended to foster innovation through its organization as optional driver modules. The ML2 plugin supports all the non-vendor-specific neutron API extensions, and works with the standard neutron DHCP agent. It utilizes the service plugin interface to implement the L3 router abstraction, allowing use of either the standard neutron L3 agent or alternative L3 solutions. Additional service plugins can also be used with the ML2 core plugin. Drivers within ML2 implement separately extensible sets of network types and of mechanisms for accessing networks of those types. Multiple mechanisms can be used simultaneously to access different ports of the same virtual network. Mechanisms can utilize L2 agents via RPC and/or interact with external devices or controllers. By utilizing the multiprovidernet extension, virtual networks can be composed of multiple segments of the same or different types. Type and mechanism drivers are loaded as python entrypoints using the stevedore library. Each available network type is managed by an ML2 type driver. Type drivers maintain any needed type-specific network state, and perform provider network validation and tenant network allocation. As of the havana release, drivers for the local, flat, vlan, gre, and vxlan network types are included. Each available networking mechanism is managed by an ML2 mechanism driver. All registered mechanism drivers are called twice when networks, subnets, and ports are created, updated, or deleted. They are first called as part of the DB transaction, where they can maintain any needed driver-specific state. Once the transaction has been committed, they are called again, at which point they can interact with external devices and controllers. Mechanism drivers are also called as part of the port binding process, to determine whether the associated mechanism can provide connectivity for the network, and if so, the network segment and VIF driver to be used. The havana release includes mechanism drivers for the Open vSwitch, Linux bridge, and Hyper-V L2 agents, and for vendor switches/controllers/etc. It also includes an L2 Population mechanism driver that can help optimize tunneled virtual network traffic. For additional information regarding the ML2 plugin and its collection of type and mechanism drivers, see the OpenStack manuals and http://wiki.openstack.org/wiki/Neutron/ML2. neutron-8.4.0/neutron/plugins/ml2/extensions/0000775000567000056710000000000013044373210022417 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/extensions/__init__.py0000664000567000056710000000000013044372736024532 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/extensions/port_security.py0000664000567000056710000000645413044372760025726 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron._i18n import _LI from neutron.api.v2 import attributes as attrs from neutron.common import utils from neutron.db import common_db_mixin from neutron.db import portsecurity_db_common as ps_db_common from neutron.extensions import portsecurity as psec from neutron.plugins.ml2 import driver_api as api LOG = logging.getLogger(__name__) class PortSecurityExtensionDriver(api.ExtensionDriver, ps_db_common.PortSecurityDbCommon, common_db_mixin.CommonDbMixin): _supported_extension_alias = 'port-security' def initialize(self): LOG.info(_LI("PortSecurityExtensionDriver initialization complete")) @property def extension_alias(self): return self._supported_extension_alias def process_create_network(self, context, data, result): # Create the network extension attributes. if psec.PORTSECURITY not in data: data[psec.PORTSECURITY] = psec.DEFAULT_PORT_SECURITY self._process_network_port_security_create(context, data, result) def process_update_network(self, context, data, result): # Update the network extension attributes. if psec.PORTSECURITY in data: self._process_network_port_security_update(context, data, result) def process_create_port(self, context, data, result): # Create the port extension attributes. data[psec.PORTSECURITY] = self._determine_port_security(context, data) self._process_port_port_security_create(context, data, result) def process_update_port(self, context, data, result): if psec.PORTSECURITY in data: self._process_port_port_security_update( context, data, result) def extend_network_dict(self, session, db_data, result): self._extend_port_security_dict(result, db_data) def extend_port_dict(self, session, db_data, result): self._extend_port_security_dict(result, db_data) def _determine_port_security(self, context, port): """Returns a boolean (port_security_enabled). Port_security is the value associated with the port if one is present otherwise the value associated with the network is returned. """ # we don't apply security groups for dhcp, router if port.get('device_owner') and utils.is_port_trusted(port): return False if attrs.is_attr_set(port.get(psec.PORTSECURITY)): port_security_enabled = port[psec.PORTSECURITY] else: port_security_enabled = self._get_network_security_binding( context, port['network_id']) return port_security_enabled neutron-8.4.0/neutron/plugins/ml2/extensions/dns_integration.py0000664000567000056710000003200713044372760026173 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 IBM # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from neutron._i18n import _LE, _LI from neutron.api.v2 import attributes from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources from neutron.db import dns_db from neutron.db import models_v2 from neutron.extensions import dns from neutron import manager from neutron.plugins.common import utils as plugin_utils from neutron.plugins.ml2 import db from neutron.plugins.ml2 import driver_api as api from neutron.services.externaldns import driver LOG = logging.getLogger(__name__) class DNSExtensionDriver(api.ExtensionDriver): _supported_extension_alias = 'dns-integration' @property def extension_alias(self): return self._supported_extension_alias def process_create_network(self, plugin_context, request_data, db_data): dns_domain = request_data.get(dns.DNSDOMAIN) if not attributes.is_attr_set(dns_domain): return if dns_domain: plugin_context.session.add(dns_db.NetworkDNSDomain( network_id=db_data['id'], dns_domain=dns_domain)) db_data[dns.DNSDOMAIN] = dns_domain def process_update_network(self, plugin_context, request_data, db_data): new_value = request_data.get(dns.DNSDOMAIN) if not attributes.is_attr_set(new_value): return current_dns_domain = db_data.get(dns.DNSDOMAIN) if current_dns_domain == new_value: return net_id = db_data['id'] if current_dns_domain: net_dns_domain = plugin_context.session.query( dns_db.NetworkDNSDomain).filter_by(network_id=net_id).one() if new_value: net_dns_domain['dns_domain'] = new_value db_data[dns.DNSDOMAIN] = new_value else: plugin_context.session.delete(net_dns_domain) db_data[dns.DNSDOMAIN] = '' elif new_value: plugin_context.session.add(dns_db.NetworkDNSDomain( network_id=net_id, dns_domain=new_value)) db_data[dns.DNSDOMAIN] = new_value def process_create_port(self, plugin_context, request_data, db_data): if not request_data[dns.DNSNAME]: return network = self._get_network(plugin_context, db_data['network_id']) if not network[dns.DNSDOMAIN]: return if self.external_dns_not_needed(plugin_context, network): return plugin_context.session.add(dns_db.PortDNS( port_id=db_data['id'], current_dns_name=request_data[dns.DNSNAME], current_dns_domain=network[dns.DNSDOMAIN], previous_dns_name='', previous_dns_domain='')) def process_update_port(self, plugin_context, request_data, db_data): dns_name = request_data.get(dns.DNSNAME) has_fixed_ips = 'fixed_ips' in request_data if dns_name is None and not has_fixed_ips: return network = self._get_network(plugin_context, db_data['network_id']) if not network[dns.DNSDOMAIN]: return if self.external_dns_not_needed(plugin_context, network): return dns_domain = network[dns.DNSDOMAIN] dns_data_db = plugin_context.session.query(dns_db.PortDNS).filter_by( port_id=db_data['id']).one_or_none() if dns_data_db: is_dns_name_changed = (dns_name is not None and dns_data_db['current_dns_name'] != dns_name) if is_dns_name_changed or (has_fixed_ips and dns_data_db['current_dns_name']): dns_data_db['previous_dns_name'] = ( dns_data_db['current_dns_name']) dns_data_db['previous_dns_domain'] = ( dns_data_db['current_dns_domain']) if is_dns_name_changed: dns_data_db['current_dns_name'] = dns_name if dns_name: dns_data_db['current_dns_domain'] = dns_domain else: dns_data_db['current_dns_domain'] = '' return if dns_name: plugin_context.session.add(dns_db.PortDNS( port_id=db_data['id'], current_dns_name=dns_name, current_dns_domain=dns_domain, previous_dns_name='', previous_dns_domain='')) def external_dns_not_needed(self, context, network): """Decide if ports in network need to be sent to the DNS service. :param context: plugin request context :param network: network dictionary :return True or False """ pass def extend_network_dict(self, session, db_data, response_data): response_data[dns.DNSDOMAIN] = '' if db_data.dns_domain: response_data[dns.DNSDOMAIN] = db_data.dns_domain[dns.DNSDOMAIN] return response_data def extend_port_dict(self, session, db_data, response_data): response_data[dns.DNSNAME] = db_data[dns.DNSNAME] return response_data def _get_network(self, context, network_id): plugin = manager.NeutronManager.get_plugin() return plugin.get_network(context, network_id) class DNSExtensionDriverML2(DNSExtensionDriver): def initialize(self): LOG.info(_LI("DNSExtensionDriverML2 initialization complete")) def _is_tunnel_tenant_network(self, provider_net): if provider_net['network_type'] == 'geneve': tunnel_ranges = cfg.CONF.ml2_type_geneve.vni_ranges elif provider_net['network_type'] == 'vxlan': tunnel_ranges = cfg.CONF.ml2_type_vxlan.vni_ranges else: tunnel_ranges = cfg.CONF.ml2_type_gre.tunnel_id_ranges segmentation_id = int(provider_net['segmentation_id']) for entry in tunnel_ranges: entry = entry.strip() tun_min, tun_max = entry.split(':') tun_min = tun_min.strip() tun_max = tun_max.strip() return int(tun_min) <= segmentation_id <= int(tun_max) def _is_vlan_tenant_network(self, provider_net): network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( cfg.CONF.ml2_type_vlan.network_vlan_ranges) vlan_ranges = network_vlan_ranges[provider_net['physical_network']] if not vlan_ranges: return False segmentation_id = int(provider_net['segmentation_id']) for vlan_range in vlan_ranges: if vlan_range[0] <= segmentation_id <= vlan_range[1]: return True def external_dns_not_needed(self, context, network): dns_driver = _get_dns_driver() if not dns_driver: return True if network['router:external']: return True segments = db.get_network_segments(context.session, network['id']) if len(segments) > 1: return False provider_net = segments[0] if provider_net['network_type'] == 'local': return True if provider_net['network_type'] == 'flat': return False if provider_net['network_type'] == 'vlan': return self._is_vlan_tenant_network(provider_net) if provider_net['network_type'] in ['gre', 'vxlan', 'geneve']: return self._is_tunnel_tenant_network(provider_net) return True DNS_DRIVER = None def _get_dns_driver(): global DNS_DRIVER if DNS_DRIVER: return DNS_DRIVER if not cfg.CONF.external_dns_driver: return try: DNS_DRIVER = driver.ExternalDNSService.get_instance() LOG.debug("External DNS driver loaded: %s", cfg.CONF.external_dns_driver) return DNS_DRIVER except ImportError: LOG.exception(_LE("ImportError exception occurred while loading " "the external DNS service driver")) raise dns.ExternalDNSDriverNotFound( driver=cfg.CONF.external_dns_driver) def _create_port_in_external_dns_service(resource, event, trigger, **kwargs): dns_driver = _get_dns_driver() if not dns_driver: return context = kwargs['context'] port = kwargs['port'] dns_data_db = context.session.query(dns_db.PortDNS).filter_by( port_id=port['id']).one_or_none() if not dns_data_db: return records = [ip['ip_address'] for ip in port['fixed_ips']] _send_data_to_external_dns_service(context, dns_driver, dns_data_db['current_dns_domain'], dns_data_db['current_dns_name'], records) def _send_data_to_external_dns_service(context, dns_driver, dns_domain, dns_name, records): try: dns_driver.create_record_set(context, dns_domain, dns_name, records) except (dns.DNSDomainNotFound, dns.DuplicateRecordSet) as e: LOG.exception(_LE("Error publishing port data in external DNS " "service. Name: '%(name)s'. Domain: '%(domain)s'. " "DNS service driver message '%(message)s'") % {"name": dns_name, "domain": dns_domain, "message": e.msg}) def _remove_data_from_external_dns_service(context, dns_driver, dns_domain, dns_name, records): try: dns_driver.delete_record_set(context, dns_domain, dns_name, records) except (dns.DNSDomainNotFound, dns.DuplicateRecordSet) as e: LOG.exception(_LE("Error deleting port data from external DNS " "service. Name: '%(name)s'. Domain: '%(domain)s'. " "IP addresses '%(ips)s'. DNS service driver message " "'%(message)s'") % {"name": dns_name, "domain": dns_domain, "message": e.msg, "ips": ', '.join(records)}) def _update_port_in_external_dns_service(resource, event, trigger, **kwargs): dns_driver = _get_dns_driver() if not dns_driver: return context = kwargs['context'] updated_port = kwargs['port'] original_port = kwargs.get('original_port') if not original_port: return original_ips = [ip['ip_address'] for ip in original_port['fixed_ips']] updated_ips = [ip['ip_address'] for ip in updated_port['fixed_ips']] if (updated_port[dns.DNSNAME] == original_port[dns.DNSNAME] and set(original_ips) == set(updated_ips)): return if (updated_port[dns.DNSNAME] == original_port[dns.DNSNAME] and not original_port[dns.DNSNAME]): return dns_data_db = context.session.query(dns_db.PortDNS).filter_by( port_id=updated_port['id']).one_or_none() if not dns_data_db: return if dns_data_db['previous_dns_name']: _remove_data_from_external_dns_service( context, dns_driver, dns_data_db['previous_dns_domain'], dns_data_db['previous_dns_name'], original_ips) if dns_data_db['current_dns_name']: _send_data_to_external_dns_service(context, dns_driver, dns_data_db['current_dns_domain'], dns_data_db['current_dns_name'], updated_ips) def _delete_port_in_external_dns_service(resource, event, trigger, **kwargs): dns_driver = _get_dns_driver() if not dns_driver: return context = kwargs['context'] port_id = kwargs['port_id'] dns_data_db = context.session.query(dns_db.PortDNS).filter_by( port_id=port_id).one_or_none() if not dns_data_db: return if dns_data_db['current_dns_name']: ip_allocations = context.session.query( models_v2.IPAllocation).filter_by(port_id=port_id).all() records = [alloc['ip_address'] for alloc in ip_allocations] _remove_data_from_external_dns_service( context, dns_driver, dns_data_db['current_dns_domain'], dns_data_db['current_dns_name'], records) def subscribe(): registry.subscribe( _create_port_in_external_dns_service, resources.PORT, events.AFTER_CREATE) registry.subscribe( _update_port_in_external_dns_service, resources.PORT, events.AFTER_UPDATE) registry.subscribe( _delete_port_in_external_dns_service, resources.PORT, events.BEFORE_DELETE) subscribe() neutron-8.4.0/neutron/plugins/ml2/extensions/qos.py0000664000567000056710000000347513044372736023620 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron.core_extensions import base as base_core from neutron.core_extensions import qos as qos_core from neutron.plugins.ml2 import driver_api as api LOG = logging.getLogger(__name__) QOS_EXT_DRIVER_ALIAS = 'qos' class QosExtensionDriver(api.ExtensionDriver): def initialize(self): self.core_ext_handler = qos_core.QosCoreResourceExtension() LOG.debug("QosExtensionDriver initialization complete") def process_create_network(self, context, data, result): self.core_ext_handler.process_fields( context, base_core.NETWORK, data, result) process_update_network = process_create_network def process_create_port(self, context, data, result): self.core_ext_handler.process_fields( context, base_core.PORT, data, result) process_update_port = process_create_port def extend_network_dict(self, session, db_data, result): result.update( self.core_ext_handler.extract_fields( base_core.NETWORK, db_data)) def extend_port_dict(self, session, db_data, result): result.update( self.core_ext_handler.extract_fields(base_core.PORT, db_data)) neutron-8.4.0/neutron/plugins/ml2/managers.py0000664000567000056710000012417113044372760022406 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_utils import excutils import six import stevedore from neutron._i18n import _, _LE, _LI, _LW from neutron.api.v2 import attributes from neutron.common import exceptions as exc from neutron.db import api as db_api from neutron.extensions import external_net from neutron.extensions import multiprovidernet as mpnet from neutron.extensions import portbindings from neutron.extensions import providernet as provider from neutron.extensions import vlantransparent from neutron.plugins.ml2.common import exceptions as ml2_exc from neutron.plugins.ml2 import db from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2 import models from neutron.services.qos import qos_consts LOG = log.getLogger(__name__) MAX_BINDING_LEVELS = 10 class TypeManager(stevedore.named.NamedExtensionManager): """Manage network segment types using drivers.""" def __init__(self): # Mapping from type name to DriverManager self.drivers = {} LOG.info(_LI("Configured type driver names: %s"), cfg.CONF.ml2.type_drivers) super(TypeManager, self).__init__('neutron.ml2.type_drivers', cfg.CONF.ml2.type_drivers, invoke_on_load=True) LOG.info(_LI("Loaded type driver names: %s"), self.names()) self._register_types() self._check_tenant_network_types(cfg.CONF.ml2.tenant_network_types) self._check_external_network_type(cfg.CONF.ml2.external_network_type) def _register_types(self): for ext in self: network_type = ext.obj.get_type() if network_type in self.drivers: LOG.error(_LE("Type driver '%(new_driver)s' ignored because" " type driver '%(old_driver)s' is already" " registered for type '%(type)s'"), {'new_driver': ext.name, 'old_driver': self.drivers[network_type].name, 'type': network_type}) else: self.drivers[network_type] = ext LOG.info(_LI("Registered types: %s"), self.drivers.keys()) def _check_tenant_network_types(self, types): self.tenant_network_types = [] for network_type in types: if network_type in self.drivers: self.tenant_network_types.append(network_type) else: LOG.error(_LE("No type driver for tenant network_type: %s. " "Service terminated!"), network_type) raise SystemExit(1) LOG.info(_LI("Tenant network_types: %s"), self.tenant_network_types) def _check_external_network_type(self, ext_network_type): if ext_network_type and ext_network_type not in self.drivers: LOG.error(_LE("No type driver for external network_type: %s. " "Service terminated!"), ext_network_type) raise SystemExit(1) def _process_provider_segment(self, segment): (network_type, physical_network, segmentation_id) = (self._get_attribute(segment, attr) for attr in provider.ATTRIBUTES) if attributes.is_attr_set(network_type): segment = {api.NETWORK_TYPE: network_type, api.PHYSICAL_NETWORK: physical_network, api.SEGMENTATION_ID: segmentation_id} self.validate_provider_segment(segment) return segment msg = _("network_type required") raise exc.InvalidInput(error_message=msg) def _process_provider_create(self, network): if any(attributes.is_attr_set(network.get(attr)) for attr in provider.ATTRIBUTES): # Verify that multiprovider and provider attributes are not set # at the same time. if attributes.is_attr_set(network.get(mpnet.SEGMENTS)): raise mpnet.SegmentsSetInConjunctionWithProviders() segment = self._get_provider_segment(network) return [self._process_provider_segment(segment)] elif attributes.is_attr_set(network.get(mpnet.SEGMENTS)): segments = [self._process_provider_segment(s) for s in network[mpnet.SEGMENTS]] mpnet.check_duplicate_segments(segments, self.is_partial_segment) return segments def _match_segment(self, segment, filters): return all(not filters.get(attr) or segment.get(attr) in filters[attr] for attr in provider.ATTRIBUTES) def _get_provider_segment(self, network): # TODO(manishg): Placeholder method # Code intended for operating on a provider segment should use # this method to extract the segment, even though currently the # segment attributes are part of the network dictionary. In the # future, network and segment information will be decoupled and # here we will do the job of extracting the segment information. return network def network_matches_filters(self, network, filters): if not filters: return True if any(attributes.is_attr_set(network.get(attr)) for attr in provider.ATTRIBUTES): segments = [self._get_provider_segment(network)] elif attributes.is_attr_set(network.get(mpnet.SEGMENTS)): segments = self._get_attribute(network, mpnet.SEGMENTS) else: return True return any(self._match_segment(s, filters) for s in segments) def _get_attribute(self, attrs, key): value = attrs.get(key) if value is attributes.ATTR_NOT_SPECIFIED: value = None return value def extend_network_dict_provider(self, context, network): # this method is left for backward compat even though it would be # easy to change the callers in tree to use the bulk function return self.extend_networks_dict_provider(context, [network]) def extend_networks_dict_provider(self, context, networks): ids = [network['id'] for network in networks] net_segments = db.get_networks_segments(context.session, ids) for network in networks: segments = net_segments[network['id']] self._extend_network_dict_provider(network, segments) def _extend_network_dict_provider(self, network, segments): if not segments: LOG.debug("Network %s has no segments", network['id']) for attr in provider.ATTRIBUTES: network[attr] = None elif len(segments) > 1: network[mpnet.SEGMENTS] = [ {provider.NETWORK_TYPE: segment[api.NETWORK_TYPE], provider.PHYSICAL_NETWORK: segment[api.PHYSICAL_NETWORK], provider.SEGMENTATION_ID: segment[api.SEGMENTATION_ID]} for segment in segments] else: segment = segments[0] network[provider.NETWORK_TYPE] = segment[api.NETWORK_TYPE] network[provider.PHYSICAL_NETWORK] = segment[api.PHYSICAL_NETWORK] network[provider.SEGMENTATION_ID] = segment[api.SEGMENTATION_ID] def initialize(self): for network_type, driver in six.iteritems(self.drivers): LOG.info(_LI("Initializing driver for type '%s'"), network_type) driver.obj.initialize() def _add_network_segment(self, session, network_id, segment, mtu, segment_index=0): db.add_network_segment(session, network_id, segment, segment_index) if segment.get(api.MTU, 0) > 0: mtu.append(segment[api.MTU]) def create_network_segments(self, context, network, tenant_id): """Call type drivers to create network segments.""" segments = self._process_provider_create(network) session = context.session mtu = [] with session.begin(subtransactions=True): network_id = network['id'] if segments: for segment_index, segment in enumerate(segments): segment = self.reserve_provider_segment( session, segment) self._add_network_segment(session, network_id, segment, mtu, segment_index) elif (cfg.CONF.ml2.external_network_type and self._get_attribute(network, external_net.EXTERNAL)): segment = self._allocate_ext_net_segment(session) self._add_network_segment(session, network_id, segment, mtu) else: segment = self._allocate_tenant_net_segment(session) self._add_network_segment(session, network_id, segment, mtu) network[api.MTU] = min(mtu) if mtu else 0 def is_partial_segment(self, segment): network_type = segment[api.NETWORK_TYPE] driver = self.drivers.get(network_type) if driver: return driver.obj.is_partial_segment(segment) else: msg = _("network_type value '%s' not supported") % network_type raise exc.InvalidInput(error_message=msg) def validate_provider_segment(self, segment): network_type = segment[api.NETWORK_TYPE] driver = self.drivers.get(network_type) if driver: driver.obj.validate_provider_segment(segment) else: msg = _("network_type value '%s' not supported") % network_type raise exc.InvalidInput(error_message=msg) def reserve_provider_segment(self, session, segment): network_type = segment.get(api.NETWORK_TYPE) driver = self.drivers.get(network_type) return driver.obj.reserve_provider_segment(session, segment) def _allocate_segment(self, session, network_type): driver = self.drivers.get(network_type) return driver.obj.allocate_tenant_segment(session) def _allocate_tenant_net_segment(self, session): for network_type in self.tenant_network_types: segment = self._allocate_segment(session, network_type) if segment: return segment raise exc.NoNetworkAvailable() def _allocate_ext_net_segment(self, session): network_type = cfg.CONF.ml2.external_network_type segment = self._allocate_segment(session, network_type) if segment: return segment raise exc.NoNetworkAvailable() def release_network_segments(self, session, network_id): segments = db.get_network_segments(session, network_id, filter_dynamic=None) for segment in segments: network_type = segment.get(api.NETWORK_TYPE) driver = self.drivers.get(network_type) if driver: driver.obj.release_segment(session, segment) else: LOG.error(_LE("Failed to release segment '%s' because " "network type is not supported."), segment) def allocate_dynamic_segment(self, session, network_id, segment): """Allocate a dynamic segment using a partial or full segment dict.""" dynamic_segment = db.get_dynamic_segment( session, network_id, segment.get(api.PHYSICAL_NETWORK), segment.get(api.SEGMENTATION_ID)) if dynamic_segment: return dynamic_segment driver = self.drivers.get(segment.get(api.NETWORK_TYPE)) dynamic_segment = driver.obj.reserve_provider_segment(session, segment) db.add_network_segment(session, network_id, dynamic_segment, is_dynamic=True) return dynamic_segment def release_dynamic_segment(self, session, segment_id): """Delete a dynamic segment.""" segment = db.get_segment_by_id(session, segment_id) if segment: driver = self.drivers.get(segment.get(api.NETWORK_TYPE)) if driver: driver.obj.release_segment(session, segment) db.delete_network_segment(session, segment_id) else: LOG.error(_LE("Failed to release segment '%s' because " "network type is not supported."), segment) else: LOG.debug("No segment found with id %(segment_id)s", segment_id) class MechanismManager(stevedore.named.NamedExtensionManager): """Manage networking mechanisms using drivers.""" def __init__(self): # Registered mechanism drivers, keyed by name. self.mech_drivers = {} # Ordered list of mechanism drivers, defining # the order in which the drivers are called. self.ordered_mech_drivers = [] LOG.info(_LI("Configured mechanism driver names: %s"), cfg.CONF.ml2.mechanism_drivers) super(MechanismManager, self).__init__('neutron.ml2.mechanism_drivers', cfg.CONF.ml2.mechanism_drivers, invoke_on_load=True, name_order=True) LOG.info(_LI("Loaded mechanism driver names: %s"), self.names()) self._register_mechanisms() def _register_mechanisms(self): """Register all mechanism drivers. This method should only be called once in the MechanismManager constructor. """ for ext in self: self.mech_drivers[ext.name] = ext self.ordered_mech_drivers.append(ext) LOG.info(_LI("Registered mechanism drivers: %s"), [driver.name for driver in self.ordered_mech_drivers]) @property def supported_qos_rule_types(self): if not self.ordered_mech_drivers: return [] rule_types = set(qos_consts.VALID_RULE_TYPES) binding_driver_found = False # Recalculate on every call to allow drivers determine supported rule # types dynamically for driver in self.ordered_mech_drivers: driver_obj = driver.obj if driver_obj._supports_port_binding: binding_driver_found = True if hasattr(driver_obj, 'supported_qos_rule_types'): new_rule_types = \ rule_types & set(driver_obj.supported_qos_rule_types) dropped_rule_types = new_rule_types - rule_types if dropped_rule_types: LOG.info( _LI("%(rule_types)s rule types disabled for ml2 " "because %(driver)s does not support them"), {'rule_types': ', '.join(dropped_rule_types), 'driver': driver.name}) rule_types = new_rule_types else: # at least one of drivers does not support QoS, meaning # there are no rule types supported by all of them LOG.warning( _LW("%s does not support QoS; " "no rule types available"), driver.name) return [] if binding_driver_found: rule_types = list(rule_types) else: rule_types = [] LOG.debug("Supported QoS rule types " "(common subset for all mech drivers): %s", rule_types) return rule_types def initialize(self): for driver in self.ordered_mech_drivers: LOG.info(_LI("Initializing mechanism driver '%s'"), driver.name) driver.obj.initialize() def _check_vlan_transparency(self, context): """Helper method for checking vlan transparecncy support. :param context: context parameter to pass to each method call :raises: neutron.extensions.vlantransparent. VlanTransparencyDriverError if any mechanism driver doesn't support vlan transparency. """ if context.current.get('vlan_transparent'): for driver in self.ordered_mech_drivers: if not driver.obj.check_vlan_transparency(context): raise vlantransparent.VlanTransparencyDriverError() def _call_on_drivers(self, method_name, context, continue_on_failure=False, raise_db_retriable=False): """Helper method for calling a method across all mechanism drivers. :param method_name: name of the method to call :param context: context parameter to pass to each method call :param continue_on_failure: whether or not to continue to call all mechanism drivers once one has raised an exception :param raise_db_retriable: whether or not to treat retriable db exception by mechanism drivers to propagate up to upper layer so that upper layer can handle it or error in ML2 player :raises: neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver call fails. or DB retriable error when raise_db_retriable=False. See neutron.db.api.is_retriable for what db exception is retriable """ error = False for driver in self.ordered_mech_drivers: try: getattr(driver.obj, method_name)(context) except Exception as e: if raise_db_retriable and db_api.is_retriable(e): with excutils.save_and_reraise_exception(): LOG.debug("DB exception raised by Mechanism driver " "'%(name)s' in %(method)s", {'name': driver.name, 'method': method_name}, exc_info=e) LOG.exception( _LE("Mechanism driver '%(name)s' failed in %(method)s"), {'name': driver.name, 'method': method_name} ) error = True if not continue_on_failure: break if error: raise ml2_exc.MechanismDriverError( method=method_name ) def create_network_precommit(self, context): """Notify all mechanism drivers during network creation. :raises: DB retriable error if create_network_precommit raises them See neutron.db.api.is_retriable for what db exception is retriable or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver create_network_precommit call fails. Called within the database transaction. If a mechanism driver raises an exception, then a MechanismDriverError is propogated to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ self._check_vlan_transparency(context) self._call_on_drivers("create_network_precommit", context, raise_db_retriable=True) def create_network_postcommit(self, context): """Notify all mechanism drivers after network creation. :raises: neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver create_network_postcommit call fails. Called after the database transaction. If a mechanism driver raises an exception, then a MechanismDriverError is propagated to the caller, where the network will be deleted, triggering any required cleanup. There is no guarantee that all mechanism drivers are called in this case. """ self._call_on_drivers("create_network_postcommit", context) def update_network_precommit(self, context): """Notify all mechanism drivers during network update. :raises: DB retriable error if create_network_precommit raises them See neutron.db.api.is_retriable for what db exception is retriable or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver update_network_precommit call fails. Called within the database transaction. If a mechanism driver raises an exception, then a MechanismDriverError is propogated to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ self._call_on_drivers("update_network_precommit", context, raise_db_retriable=True) def update_network_postcommit(self, context): """Notify all mechanism drivers after network update. :raises: neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver update_network_postcommit call fails. Called after the database transaction. If any mechanism driver raises an error, then the error is logged but we continue to call every other mechanism driver. A MechanismDriverError is then reraised at the end to notify the caller of a failure. """ self._call_on_drivers("update_network_postcommit", context, continue_on_failure=True) def delete_network_precommit(self, context): """Notify all mechanism drivers during network deletion. :raises: DB retriable error if create_network_precommit raises them See neutron.db.api.is_retriable for what db exception is retriable or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver delete_network_precommit call fails. Called within the database transaction. If a mechanism driver raises an exception, then a MechanismDriverError is propogated to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ self._call_on_drivers("delete_network_precommit", context, raise_db_retriable=True) def delete_network_postcommit(self, context): """Notify all mechanism drivers after network deletion. :raises: neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver delete_network_postcommit call fails. Called after the database transaction. If any mechanism driver raises an error, then the error is logged but we continue to call every other mechanism driver. A MechanismDriverError is then reraised at the end to notify the caller of a failure. In general we expect the caller to ignore the error, as the network resource has already been deleted from the database and it doesn't make sense to undo the action by recreating the network. """ self._call_on_drivers("delete_network_postcommit", context, continue_on_failure=True) def create_subnet_precommit(self, context): """Notify all mechanism drivers during subnet creation. :raises: DB retriable error if create_network_precommit raises them See neutron.db.api.is_retriable for what db exception is retriable or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver create_subnet_precommit call fails. Called within the database transaction. If a mechanism driver raises an exception, then a MechanismDriverError is propogated to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ self._call_on_drivers("create_subnet_precommit", context, raise_db_retriable=True) def create_subnet_postcommit(self, context): """Notify all mechanism drivers after subnet creation. :raises: neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver create_subnet_postcommit call fails. Called after the database transaction. If a mechanism driver raises an exception, then a MechanismDriverError is propagated to the caller, where the subnet will be deleted, triggering any required cleanup. There is no guarantee that all mechanism drivers are called in this case. """ self._call_on_drivers("create_subnet_postcommit", context) def update_subnet_precommit(self, context): """Notify all mechanism drivers during subnet update. :raises: DB retriable error if create_network_precommit raises them See neutron.db.api.is_retriable for what db exception is retriable or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver update_subnet_precommit call fails. Called within the database transaction. If a mechanism driver raises an exception, then a MechanismDriverError is propogated to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ self._call_on_drivers("update_subnet_precommit", context, raise_db_retriable=True) def update_subnet_postcommit(self, context): """Notify all mechanism drivers after subnet update. :raises: neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver update_subnet_postcommit call fails. Called after the database transaction. If any mechanism driver raises an error, then the error is logged but we continue to call every other mechanism driver. A MechanismDriverError is then reraised at the end to notify the caller of a failure. """ self._call_on_drivers("update_subnet_postcommit", context, continue_on_failure=True) def delete_subnet_precommit(self, context): """Notify all mechanism drivers during subnet deletion. :raises: DB retriable error if create_network_precommit raises them See neutron.db.api.is_retriable for what db exception is retriable or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver delete_subnet_precommit call fails. Called within the database transaction. If a mechanism driver raises an exception, then a MechanismDriverError is propogated to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ self._call_on_drivers("delete_subnet_precommit", context, raise_db_retriable=True) def delete_subnet_postcommit(self, context): """Notify all mechanism drivers after subnet deletion. :raises: neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver delete_subnet_postcommit call fails. Called after the database transaction. If any mechanism driver raises an error, then the error is logged but we continue to call every other mechanism driver. A MechanismDriverError is then reraised at the end to notify the caller of a failure. In general we expect the caller to ignore the error, as the subnet resource has already been deleted from the database and it doesn't make sense to undo the action by recreating the subnet. """ self._call_on_drivers("delete_subnet_postcommit", context, continue_on_failure=True) def create_port_precommit(self, context): """Notify all mechanism drivers during port creation. :raises: DB retriable error if create_network_precommit raises them See neutron.db.api.is_retriable for what db exception is retriable or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver create_port_precommit call fails. Called within the database transaction. If a mechanism driver raises an exception, then a MechanismDriverError is propogated to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ self._call_on_drivers("create_port_precommit", context, raise_db_retriable=True) def create_port_postcommit(self, context): """Notify all mechanism drivers of port creation. :raises: neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver create_port_postcommit call fails. Called after the database transaction. Errors raised by mechanism drivers are left to propagate to the caller, where the port will be deleted, triggering any required cleanup. There is no guarantee that all mechanism drivers are called in this case. """ self._call_on_drivers("create_port_postcommit", context) def update_port_precommit(self, context): """Notify all mechanism drivers during port update. :raises: DB retriable error if create_network_precommit raises them See neutron.db.api.is_retriable for what db exception is retriable or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver update_port_precommit call fails. Called within the database transaction. If a mechanism driver raises an exception, then a MechanismDriverError is propogated to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ self._call_on_drivers("update_port_precommit", context, raise_db_retriable=True) def update_port_postcommit(self, context): """Notify all mechanism drivers after port update. :raises: neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver update_port_postcommit call fails. Called after the database transaction. If any mechanism driver raises an error, then the error is logged but we continue to call every other mechanism driver. A MechanismDriverError is then reraised at the end to notify the caller of a failure. """ self._call_on_drivers("update_port_postcommit", context, continue_on_failure=True) def delete_port_precommit(self, context): """Notify all mechanism drivers during port deletion. :raises:DB retriable error if create_network_precommit raises them See neutron.db.api.is_retriable for what db exception is retriable or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver delete_port_precommit call fails. Called within the database transaction. If a mechanism driver raises an exception, then a MechanismDriverError is propogated to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ self._call_on_drivers("delete_port_precommit", context, raise_db_retriable=True) def delete_port_postcommit(self, context): """Notify all mechanism drivers after port deletion. :raises: neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver delete_port_postcommit call fails. Called after the database transaction. If any mechanism driver raises an error, then the error is logged but we continue to call every other mechanism driver. A MechanismDriverError is then reraised at the end to notify the caller of a failure. In general we expect the caller to ignore the error, as the port resource has already been deleted from the database and it doesn't make sense to undo the action by recreating the port. """ self._call_on_drivers("delete_port_postcommit", context, continue_on_failure=True) def bind_port(self, context): """Attempt to bind a port using registered mechanism drivers. :param context: PortContext instance describing the port Called outside any transaction to attempt to establish a port binding. """ binding = context._binding LOG.debug("Attempting to bind port %(port)s on host %(host)s " "for vnic_type %(vnic_type)s with profile %(profile)s", {'port': context.current['id'], 'host': context.host, 'vnic_type': binding.vnic_type, 'profile': binding.profile}) context._clear_binding_levels() if not self._bind_port_level(context, 0, context.network.network_segments): binding.vif_type = portbindings.VIF_TYPE_BINDING_FAILED LOG.error(_LE("Failed to bind port %(port)s on host %(host)s " "for vnic_type %(vnic_type)s using segments " "%(segments)s"), {'port': context.current['id'], 'host': context.host, 'vnic_type': binding.vnic_type, 'segments': context.network.network_segments}) def _bind_port_level(self, context, level, segments_to_bind): binding = context._binding port_id = context.current['id'] LOG.debug("Attempting to bind port %(port)s on host %(host)s " "at level %(level)s using segments %(segments)s", {'port': port_id, 'host': context.host, 'level': level, 'segments': segments_to_bind}) if level == MAX_BINDING_LEVELS: LOG.error(_LE("Exceeded maximum binding levels attempting to bind " "port %(port)s on host %(host)s"), {'port': context.current['id'], 'host': context.host}) return False for driver in self.ordered_mech_drivers: if not self._check_driver_to_bind(driver, segments_to_bind, context._binding_levels): continue try: context._prepare_to_bind(segments_to_bind) driver.obj.bind_port(context) segment = context._new_bound_segment if segment: context._push_binding_level( models.PortBindingLevel(port_id=port_id, host=context.host, level=level, driver=driver.name, segment_id=segment)) next_segments = context._next_segments_to_bind if next_segments: # Continue binding another level. if self._bind_port_level(context, level + 1, next_segments): return True else: LOG.warning(_LW("Failed to bind port %(port)s on " "host %(host)s at level %(lvl)s"), {'port': context.current['id'], 'host': context.host, 'lvl': level + 1}) context._pop_binding_level() else: # Binding complete. LOG.debug("Bound port: %(port)s, " "host: %(host)s, " "vif_type: %(vif_type)s, " "vif_details: %(vif_details)s, " "binding_levels: %(binding_levels)s", {'port': port_id, 'host': context.host, 'vif_type': binding.vif_type, 'vif_details': binding.vif_details, 'binding_levels': context.binding_levels}) return True except Exception: LOG.exception(_LE("Mechanism driver %s failed in " "bind_port"), driver.name) def _check_driver_to_bind(self, driver, segments_to_bind, binding_levels): # To prevent a possible binding loop, don't try to bind with # this driver if the same driver has already bound at a higher # level to one of the segments we are currently trying to # bind. Note that it is OK for the same driver to bind at # multiple levels using different segments. for level in binding_levels: if (level.driver == driver and level.segment_id in segments_to_bind): return False return True def get_workers(self): workers = [] for driver in self.ordered_mech_drivers: workers += driver.obj.get_workers() return workers class ExtensionManager(stevedore.named.NamedExtensionManager): """Manage extension drivers using drivers.""" def __init__(self): # Ordered list of extension drivers, defining # the order in which the drivers are called. self.ordered_ext_drivers = [] LOG.info(_LI("Configured extension driver names: %s"), cfg.CONF.ml2.extension_drivers) super(ExtensionManager, self).__init__('neutron.ml2.extension_drivers', cfg.CONF.ml2.extension_drivers, invoke_on_load=True, name_order=True) LOG.info(_LI("Loaded extension driver names: %s"), self.names()) self._register_drivers() def _register_drivers(self): """Register all extension drivers. This method should only be called once in the ExtensionManager constructor. """ for ext in self: self.ordered_ext_drivers.append(ext) LOG.info(_LI("Registered extension drivers: %s"), [driver.name for driver in self.ordered_ext_drivers]) def initialize(self): # Initialize each driver in the list. for driver in self.ordered_ext_drivers: LOG.info(_LI("Initializing extension driver '%s'"), driver.name) driver.obj.initialize() def extension_aliases(self): exts = [] for driver in self.ordered_ext_drivers: alias = driver.obj.extension_alias if alias: exts.append(alias) LOG.info(_LI("Got %(alias)s extension from driver '%(drv)s'"), {'alias': alias, 'drv': driver.name}) return exts def _call_on_ext_drivers(self, method_name, plugin_context, data, result): """Helper method for calling a method across all extension drivers.""" for driver in self.ordered_ext_drivers: try: getattr(driver.obj, method_name)(plugin_context, data, result) except Exception: with excutils.save_and_reraise_exception(): LOG.info(_LI("Extension driver '%(name)s' failed in " "%(method)s"), {'name': driver.name, 'method': method_name}) def process_create_network(self, plugin_context, data, result): """Notify all extension drivers during network creation.""" self._call_on_ext_drivers("process_create_network", plugin_context, data, result) def process_update_network(self, plugin_context, data, result): """Notify all extension drivers during network update.""" self._call_on_ext_drivers("process_update_network", plugin_context, data, result) def process_create_subnet(self, plugin_context, data, result): """Notify all extension drivers during subnet creation.""" self._call_on_ext_drivers("process_create_subnet", plugin_context, data, result) def process_update_subnet(self, plugin_context, data, result): """Notify all extension drivers during subnet update.""" self._call_on_ext_drivers("process_update_subnet", plugin_context, data, result) def process_create_port(self, plugin_context, data, result): """Notify all extension drivers during port creation.""" self._call_on_ext_drivers("process_create_port", plugin_context, data, result) def process_update_port(self, plugin_context, data, result): """Notify all extension drivers during port update.""" self._call_on_ext_drivers("process_update_port", plugin_context, data, result) def _call_on_dict_driver(self, method_name, session, base_model, result): for driver in self.ordered_ext_drivers: try: getattr(driver.obj, method_name)(session, base_model, result) except Exception: LOG.error(_LE("Extension driver '%(name)s' failed in " "%(method)s"), {'name': driver.name, 'method': method_name}) raise ml2_exc.ExtensionDriverError(driver=driver.name) def extend_network_dict(self, session, base_model, result): """Notify all extension drivers to extend network dictionary.""" self._call_on_dict_driver("extend_network_dict", session, base_model, result) def extend_subnet_dict(self, session, base_model, result): """Notify all extension drivers to extend subnet dictionary.""" self._call_on_dict_driver("extend_subnet_dict", session, base_model, result) def extend_port_dict(self, session, base_model, result): """Notify all extension drivers to extend port dictionary.""" self._call_on_dict_driver("extend_port_dict", session, base_model, result) neutron-8.4.0/neutron/plugins/ml2/drivers/0000775000567000056710000000000013044373210021676 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/helpers.py0000664000567000056710000001445013044372760023727 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Thales Services SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log from neutron._i18n import _LE from neutron.common import exceptions as exc from neutron.common import utils from neutron.plugins.common import utils as p_utils from neutron.plugins.ml2 import driver_api as api LOG = log.getLogger(__name__) IDPOOL_SELECT_SIZE = 100 class BaseTypeDriver(api.TypeDriver): """BaseTypeDriver for functions common to Segment and flat.""" def __init__(self): try: self.physnet_mtus = utils.parse_mappings( cfg.CONF.ml2.physical_network_mtus, unique_values=False ) except Exception as e: LOG.error(_LE("Failed to parse physical_network_mtus: %s"), e) self.physnet_mtus = [] def get_mtu(self, physical_network=None): return p_utils.get_deployment_physnet_mtu() class SegmentTypeDriver(BaseTypeDriver): """SegmentTypeDriver for segment allocation. Provide methods helping to perform segment allocation fully or partially specified. """ def __init__(self, model): super(SegmentTypeDriver, self).__init__() self.model = model self.primary_keys = set(dict(model.__table__.columns)) self.primary_keys.remove("allocated") def allocate_fully_specified_segment(self, session, **raw_segment): """Allocate segment fully specified by raw_segment. If segment exists, then try to allocate it and return db object If segment does not exists, then try to create it and return db object If allocation/creation failed, then return None """ network_type = self.get_type() try: with session.begin(subtransactions=True): alloc = (session.query(self.model).filter_by(**raw_segment). first()) if alloc: if alloc.allocated: # Segment already allocated return else: # Segment not allocated LOG.debug("%(type)s segment %(segment)s allocate " "started ", {"type": network_type, "segment": raw_segment}) count = (session.query(self.model). filter_by(allocated=False, **raw_segment). update({"allocated": True})) if count: LOG.debug("%(type)s segment %(segment)s allocate " "done ", {"type": network_type, "segment": raw_segment}) return alloc # Segment allocated or deleted since select LOG.debug("%(type)s segment %(segment)s allocate " "failed: segment has been allocated or " "deleted", {"type": network_type, "segment": raw_segment}) # Segment to create or already allocated LOG.debug("%(type)s segment %(segment)s create started", {"type": network_type, "segment": raw_segment}) alloc = self.model(allocated=True, **raw_segment) alloc.save(session) LOG.debug("%(type)s segment %(segment)s create done", {"type": network_type, "segment": raw_segment}) except db_exc.DBDuplicateEntry: # Segment already allocated (insert failure) alloc = None LOG.debug("%(type)s segment %(segment)s create failed", {"type": network_type, "segment": raw_segment}) return alloc def allocate_partially_specified_segment(self, session, **filters): """Allocate model segment from pool partially specified by filters. Return allocated db object or None. """ network_type = self.get_type() with session.begin(subtransactions=True): select = (session.query(self.model). filter_by(allocated=False, **filters)) # Selected segment can be allocated before update by someone else, allocs = select.limit(IDPOOL_SELECT_SIZE).all() if not allocs: # No resource available return alloc = random.choice(allocs) raw_segment = dict((k, alloc[k]) for k in self.primary_keys) LOG.debug("%(type)s segment allocate from pool " "started with %(segment)s ", {"type": network_type, "segment": raw_segment}) count = (session.query(self.model). filter_by(allocated=False, **raw_segment). update({"allocated": True})) if count: LOG.debug("%(type)s segment allocate from pool " "success with %(segment)s ", {"type": network_type, "segment": raw_segment}) return alloc # Segment allocated since select LOG.debug("Allocate %(type)s segment from pool " "failed with segment %(segment)s", {"type": network_type, "segment": raw_segment}) # saving real exception in case we exceeded amount of attempts raise db_exc.RetryRequest( exc.NoNetworkFoundInMaximumAllowedAttempts()) neutron-8.4.0/neutron/plugins/ml2/drivers/mech_sriov/0000775000567000056710000000000013044373210024034 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/mech_sriov/__init__.py0000664000567000056710000000000013044372736026147 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/0000775000567000056710000000000013044373210026323 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py0000664000567000056710000002147113044372760031202 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log from neutron._i18n import _, _LE, _LW from neutron.common import constants from neutron.extensions import portbindings from neutron.plugins.common import constants as p_const from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers.mech_sriov.mech_driver \ import exceptions as exc from neutron.services.qos import qos_consts LOG = log.getLogger(__name__) VIF_TYPE_HW_VEB = 'hw_veb' VIF_TYPE_HOSTDEV_PHY = 'hostdev_physical' FLAT_VLAN = 0 sriov_opts = [ cfg.ListOpt('supported_pci_vendor_devs', default=['15b3:1004', '8086:10ca'], help=_("Comma-separated list of supported PCI vendor devices, " "as defined by vendor_id:product_id according to the " "PCI ID Repository. Default enables support for Intel " "and Mellanox SR-IOV capable NICs.")), ] cfg.CONF.register_opts(sriov_opts, "ml2_sriov") class SriovNicSwitchMechanismDriver(api.MechanismDriver): """Mechanism Driver for SR-IOV capable NIC based switching. The SriovNicSwitchMechanismDriver integrates the ml2 plugin with the sriovNicSwitch L2 agent depending on configuration option. Port binding with this driver may require the sriovNicSwitch agent to be running on the port's host, and that agent to have connectivity to at least one segment of the port's network. L2 agent is not essential for port binding; port binding is handled by VIF Driver via libvirt domain XML. L2 Agent presents in order to manage port update events. """ supported_qos_rule_types = [qos_consts.RULE_TYPE_BANDWIDTH_LIMIT] def __init__(self, agent_type=constants.AGENT_TYPE_NIC_SWITCH, vif_details={portbindings.CAP_PORT_FILTER: False}, supported_vnic_types=[portbindings.VNIC_DIRECT, portbindings.VNIC_MACVTAP, portbindings.VNIC_DIRECT_PHYSICAL], supported_pci_vendor_info=None): """Initialize base class for SriovNicSwitch L2 agent type. :param agent_type: Constant identifying agent type in agents_db :param vif_details: Dictionary with details for VIF driver when bound :param supported_vnic_types: The binding:vnic_type values we can bind :param supported_pci_vendor_info: The pci_vendor_info values to bind """ self.agent_type = agent_type self.supported_vnic_types = supported_vnic_types # NOTE(ndipanov): PF passthrough requires a different vif type self.vnic_type_for_vif_type = ( {vtype: VIF_TYPE_HOSTDEV_PHY if vtype == portbindings.VNIC_DIRECT_PHYSICAL else VIF_TYPE_HW_VEB for vtype in self.supported_vnic_types}) self.vif_details = vif_details self.supported_network_types = (p_const.TYPE_VLAN, p_const.TYPE_FLAT) def initialize(self): try: self.pci_vendor_info = cfg.CONF.ml2_sriov.supported_pci_vendor_devs self._check_pci_vendor_config(self.pci_vendor_info) except ValueError: LOG.exception(_LE("Failed to parse supported PCI vendor devices")) raise cfg.Error(_("Parsing supported pci_vendor_devs failed")) def bind_port(self, context): LOG.debug("Attempting to bind port %(port)s on " "network %(network)s", {'port': context.current['id'], 'network': context.network.current['id']}) vnic_type = context.current.get(portbindings.VNIC_TYPE, portbindings.VNIC_NORMAL) if vnic_type not in self.supported_vnic_types: LOG.debug("Refusing to bind due to unsupported vnic_type: %s", vnic_type) return vif_type = self.vnic_type_for_vif_type.get(vnic_type, VIF_TYPE_HW_VEB) if not self._check_supported_pci_vendor_device(context): LOG.debug("Refusing to bind due to unsupported pci_vendor device") return if vnic_type == portbindings.VNIC_DIRECT_PHYSICAL: # Physical functions don't support things like QoS properties, # spoof checking, etc. so we might as well side-step the agent # for now. The agent also doesn't currently recognize non-VF # PCI devices so we won't get port status change updates # either. This should be changed in the future so physical # functions can use device mapping checks and the plugin can # get port status updates. self.try_to_bind(context, None, vif_type) return for agent in context.host_agents(self.agent_type): LOG.debug("Checking agent: %s", agent) if agent['alive']: if self.try_to_bind(context, agent, vif_type): return else: LOG.warning(_LW("Attempting to bind with dead agent: %s"), agent) def try_to_bind(self, context, agent, vif_type): for segment in context.segments_to_bind: if self.check_segment(segment, agent): port_status = (constants.PORT_STATUS_ACTIVE if agent is None else constants.PORT_STATUS_DOWN) context.set_binding(segment[api.ID], vif_type, self._get_vif_details(segment), port_status) LOG.debug("Bound using segment: %s", segment) return True return False def check_segment(self, segment, agent=None): """Check if segment can be bound. :param segment: segment dictionary describing segment to bind :param agent: agents_db entry describing agent to bind or None :returns: True if segment can be bound for agent """ network_type = segment[api.NETWORK_TYPE] if network_type in self.supported_network_types: if agent: mappings = agent['configurations'].get('device_mappings', {}) LOG.debug("Checking segment: %(segment)s " "for mappings: %(mappings)s ", {'segment': segment, 'mappings': mappings}) return segment[api.PHYSICAL_NETWORK] in mappings return True return False def check_vlan_transparency(self, context): """SR-IOV driver vlan transparency support.""" return True def _check_supported_pci_vendor_device(self, context): if self.pci_vendor_info: profile = context.current.get(portbindings.PROFILE, {}) if not profile: LOG.debug("Missing profile in port binding") return False pci_vendor_info = profile.get('pci_vendor_info') if not pci_vendor_info: LOG.debug("Missing pci vendor info in profile") return False if pci_vendor_info not in self.pci_vendor_info: LOG.debug("Unsupported pci_vendor %s", pci_vendor_info) return False return True return False def _get_vif_details(self, segment): network_type = segment[api.NETWORK_TYPE] if network_type == p_const.TYPE_FLAT: vlan_id = FLAT_VLAN elif network_type == p_const.TYPE_VLAN: vlan_id = segment[api.SEGMENTATION_ID] else: raise exc.SriovUnsupportedNetworkType(net_type=network_type) vif_details = self.vif_details.copy() vif_details[portbindings.VIF_DETAILS_VLAN] = str(vlan_id) return vif_details @staticmethod def _check_pci_vendor_config(pci_vendor_list): for pci_vendor_info in pci_vendor_list: try: vendor_id, product_id = [ item.strip() for item in pci_vendor_info.split(':') if item.strip()] except ValueError: raise ValueError(_('Incorrect pci_vendor_info: "%s", should be' ' pair vendor_id:product_id') % pci_vendor_info) neutron-8.4.0/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/__init__.py0000664000567000056710000000000013044372736030436 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/exceptions.py0000664000567000056710000000165313044372760031074 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Exceptions used by SRIOV Mechanism Driver.""" from neutron._i18n import _ from neutron.common import exceptions class SriovUnsupportedNetworkType(exceptions.NeutronException): """Method was invoked for unsupported network type.""" message = _("Unsupported network type %(net_type)s.") neutron-8.4.0/neutron/plugins/ml2/drivers/mech_sriov/agent/0000775000567000056710000000000013044373210025132 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py0000664000567000056710000003604513044372760030665 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re from oslo_log import log as logging import six from neutron._i18n import _, _LE, _LW from neutron.common import utils from neutron.plugins.ml2.drivers.mech_sriov.agent.common \ import exceptions as exc from neutron.plugins.ml2.drivers.mech_sriov.agent import pci_lib LOG = logging.getLogger(__name__) class PciOsWrapper(object): """OS wrapper for checking virtual functions""" DEVICE_PATH = "/sys/class/net/%s/device" PCI_PATH = "/sys/class/net/%s/device/virtfn%s/net" VIRTFN_FORMAT = r"^virtfn(?P\d+)" VIRTFN_REG_EX = re.compile(VIRTFN_FORMAT) @classmethod def scan_vf_devices(cls, dev_name): """Scan os directories to get VF devices @param dev_name: pf network device name @return: list of virtual functions """ vf_list = [] dev_path = cls.DEVICE_PATH % dev_name if not os.path.isdir(dev_path): LOG.error(_LE("Failed to get devices for %s"), dev_name) raise exc.InvalidDeviceError(dev_name=dev_name, reason=_("Device not found")) file_list = os.listdir(dev_path) for file_name in file_list: pattern_match = cls.VIRTFN_REG_EX.match(file_name) if pattern_match: vf_index = int(pattern_match.group("vf_index")) file_path = os.path.join(dev_path, file_name) if os.path.islink(file_path): file_link = os.readlink(file_path) pci_slot = os.path.basename(file_link) vf_list.append((pci_slot, vf_index)) return vf_list @classmethod def is_assigned_vf(cls, dev_name, vf_index, ip_link_show_output): """Check if VF is assigned. Checks if a given vf index of a given device name is assigned by checking the relevant path in the system: VF is assigned if: Direct VF: PCI_PATH does not exist. Macvtap VF: macvtap@ interface exists in ip link show @param dev_name: pf network device name @param vf_index: vf index @param ip_link_show_output: 'ip link show' output """ path = cls.PCI_PATH % (dev_name, vf_index) try: ifname_list = os.listdir(path) except OSError: # PCI_PATH does not exist means that the DIRECT VF assigend return True # Note(moshele) kernel < 3.13 doesn't create symbolic link # for macvtap interface. Therefore we workaround it # by parsing ip link show and checking if macvtap interface exists for ifname in ifname_list: if pci_lib.PciDeviceIPWrapper.is_macvtap_assigned( ifname, ip_link_show_output): return True return False class EmbSwitch(object): """Class to manage logical embedded switch entity. Embedded Switch object is logical entity representing all VFs connected to same physical network Each physical network is mapped to PF network device interface, meaning all its VF, excluding the devices in exclude_device list. @ivar pci_slot_map: dictionary for mapping each pci slot to vf index @ivar pci_dev_wrapper: pci device wrapper """ def __init__(self, phys_net, dev_name, exclude_devices): """Constructor @param phys_net: physical network @param dev_name: network device name @param exclude_devices: list of pci slots to exclude """ self.phys_net = phys_net self.dev_name = dev_name self.pci_slot_map = {} self.pci_dev_wrapper = pci_lib.PciDeviceIPWrapper(dev_name) self._load_devices(exclude_devices) def _load_devices(self, exclude_devices): """Load devices from driver and filter if needed. @param exclude_devices: excluded devices mapping device_name: pci slots """ scanned_pci_list = PciOsWrapper.scan_vf_devices(self.dev_name) for pci_slot, vf_index in scanned_pci_list: if pci_slot not in exclude_devices: self.pci_slot_map[pci_slot] = vf_index def get_pci_slot_list(self): """Get list of VF addresses.""" return self.pci_slot_map.keys() def get_assigned_devices_info(self): """Get assigned Virtual Functions mac and pci slot information and populates vf_to_pci_slot mappings @return: list of VF pair (mac address, pci slot) """ vf_to_pci_slot_mapping = {} assigned_devices_info = [] ls = self.pci_dev_wrapper.link_show() for pci_slot, vf_index in self.pci_slot_map.items(): if not PciOsWrapper.is_assigned_vf(self.dev_name, vf_index, ls): continue vf_to_pci_slot_mapping[vf_index] = pci_slot if vf_to_pci_slot_mapping: vf_to_mac_mapping = self.pci_dev_wrapper.get_assigned_macs( list(vf_to_pci_slot_mapping.keys())) for vf_index, mac in vf_to_mac_mapping.items(): pci_slot = vf_to_pci_slot_mapping[vf_index] assigned_devices_info.append((mac, pci_slot)) return assigned_devices_info def get_device_state(self, pci_slot): """Get device state. @param pci_slot: Virtual Function address """ vf_index = self._get_vf_index(pci_slot) return self.pci_dev_wrapper.get_vf_state(vf_index) def set_device_state(self, pci_slot, state): """Set device state. @param pci_slot: Virtual Function address @param state: link state """ vf_index = self._get_vf_index(pci_slot) return self.pci_dev_wrapper.set_vf_state(vf_index, state) def set_device_max_rate(self, pci_slot, max_kbps): """Set device max rate. @param pci_slot: Virtual Function address @param max_kbps: device max rate in kbps """ vf_index = self._get_vf_index(pci_slot) #(Note): ip link set max rate in Mbps therefore #we need to convert the max_kbps to Mbps. #Zero means to disable the rate so the lowest rate #available is 1Mbps. Floating numbers are not allowed if max_kbps > 0 and max_kbps < 1000: max_mbps = 1 else: max_mbps = utils.round_val(max_kbps / 1000.0) log_dict = { 'max_rate': max_mbps, 'max_kbps': max_kbps, 'vf_index': vf_index } if max_kbps % 1000 != 0: LOG.debug("Maximum rate for SR-IOV ports is counted in Mbps; " "setting %(max_rate)s Mbps limit for port %(vf_index)s " "instead of %(max_kbps)s kbps", log_dict) else: LOG.debug("Setting %(max_rate)s Mbps limit for port %(vf_index)s", log_dict) return self.pci_dev_wrapper.set_vf_max_rate(vf_index, max_mbps) def _get_vf_index(self, pci_slot): vf_index = self.pci_slot_map.get(pci_slot) if vf_index is None: LOG.warning(_LW("Cannot find vf index for pci slot %s"), pci_slot) raise exc.InvalidPciSlotError(pci_slot=pci_slot) return vf_index def set_device_spoofcheck(self, pci_slot, enabled): """Set device spoofchecking @param pci_slot: Virtual Function address @param enabled: True to enable spoofcheck, False to disable """ vf_index = self.pci_slot_map.get(pci_slot) if vf_index is None: raise exc.InvalidPciSlotError(pci_slot=pci_slot) return self.pci_dev_wrapper.set_vf_spoofcheck(vf_index, enabled) def get_pci_device(self, pci_slot): """Get mac address for given Virtual Function address @param pci_slot: pci slot @return: MAC address of virtual function """ vf_index = self.pci_slot_map.get(pci_slot) mac = None if vf_index is not None: ls = pci_lib.PciDeviceIPWrapper.link_show() if PciOsWrapper.is_assigned_vf(self.dev_name, vf_index, ls): macs = self.pci_dev_wrapper.get_assigned_macs([vf_index]) mac = macs.get(vf_index) return mac class ESwitchManager(object): """Manages logical Embedded Switch entities for physical network.""" def __new__(cls): # make it a singleton if not hasattr(cls, '_instance'): cls._instance = super(ESwitchManager, cls).__new__(cls) cls.emb_switches_map = {} cls.pci_slot_map = {} return cls._instance def device_exists(self, device_mac, pci_slot): """Verify if device exists. Check if a device mac exists and matches the given VF pci slot @param device_mac: device mac @param pci_slot: VF address """ embedded_switch = self._get_emb_eswitch(device_mac, pci_slot) if embedded_switch: return True return False def get_assigned_devices_info(self, phys_net=None): """Get all assigned devices. Get all assigned devices belongs to given embedded switch @param phys_net: physical network, if none get all assigned devices @return: set of assigned VFs (mac address, pci slot) pair """ if phys_net: eswitch_objects = self.emb_switches_map.get(phys_net, set()) else: eswitch_objects = set() for eswitch_list in self.emb_switches_map.values(): eswitch_objects |= set(eswitch_list) assigned_devices = set() for embedded_switch in eswitch_objects: for device in embedded_switch.get_assigned_devices_info(): assigned_devices.add(device) return assigned_devices def get_device_state(self, device_mac, pci_slot): """Get device state. Get the device state (up/True or down/False) @param device_mac: device mac @param pci_slot: VF PCI slot @return: device state (True/False) None if failed """ embedded_switch = self._get_emb_eswitch(device_mac, pci_slot) if embedded_switch: return embedded_switch.get_device_state(pci_slot) return False def set_device_max_rate(self, device_mac, pci_slot, max_kbps): """Set device max rate Sets the device max rate in kbps @param device_mac: device mac @param pci_slot: pci slot @param max_kbps: device max rate in kbps """ embedded_switch = self._get_emb_eswitch(device_mac, pci_slot) if embedded_switch: embedded_switch.set_device_max_rate(pci_slot, max_kbps) def set_device_state(self, device_mac, pci_slot, admin_state_up): """Set device state Sets the device state (up or down) @param device_mac: device mac @param pci_slot: pci slot @param admin_state_up: device admin state True/False """ embedded_switch = self._get_emb_eswitch(device_mac, pci_slot) if embedded_switch: embedded_switch.set_device_state(pci_slot, admin_state_up) def set_device_spoofcheck(self, device_mac, pci_slot, enabled): """Set device spoofcheck Sets device spoofchecking (enabled or disabled) @param device_mac: device mac @param pci_slot: pci slot @param enabled: device spoofchecking """ embedded_switch = self._get_emb_eswitch(device_mac, pci_slot) if embedded_switch: embedded_switch.set_device_spoofcheck(pci_slot, enabled) def discover_devices(self, device_mappings, exclude_devices): """Discover which Virtual functions to manage. Discover devices, and create embedded switch object for network device @param device_mappings: device mapping physical_network:device_name @param exclude_devices: excluded devices mapping device_name: pci slots """ if exclude_devices is None: exclude_devices = {} for phys_net, dev_names in six.iteritems(device_mappings): for dev_name in dev_names: self._create_emb_switch(phys_net, dev_name, exclude_devices.get(dev_name, set())) def _create_emb_switch(self, phys_net, dev_name, exclude_devices): embedded_switch = EmbSwitch(phys_net, dev_name, exclude_devices) self.emb_switches_map.setdefault(phys_net, []).append(embedded_switch) for pci_slot in embedded_switch.get_pci_slot_list(): self.pci_slot_map[pci_slot] = embedded_switch def _get_emb_eswitch(self, device_mac, pci_slot): """Get embedded switch. Get embedded switch by pci slot and validate pci has device mac @param device_mac: device mac @param pci_slot: pci slot """ embedded_switch = self.pci_slot_map.get(pci_slot) if embedded_switch: used_device_mac = embedded_switch.get_pci_device(pci_slot) if used_device_mac != device_mac: LOG.warning(_LW("device pci mismatch: %(device_mac)s " "- %(pci_slot)s"), {"device_mac": device_mac, "pci_slot": pci_slot}) embedded_switch = None return embedded_switch def clear_max_rate(self, pci_slot): """Clear the max rate Clear the max rate configuration from VF by setting it to 0 @param pci_slot: VF PCI slot """ #(Note): we don't use the self._get_emb_eswitch here, because when #clearing the VF it may be not assigned. This happens when libvirt #releases the VF back to the hypervisor on delete VM. Therefore we #should just clear the VF max rate according to pci_slot no matter #if VF is assigned or not. embedded_switch = self.pci_slot_map.get(pci_slot) if embedded_switch: #(Note): check the pci_slot is not assigned to some # other port before resetting the max rate. if embedded_switch.get_pci_device(pci_slot) is None: embedded_switch.set_device_max_rate(pci_slot, 0) else: LOG.warning(_LW("VF with PCI slot %(pci_slot)s is already " "assigned; skipping reset maximum rate"), {'pci_slot': pci_slot}) else: LOG.error(_LE("PCI slot %(pci_slot)s has no mapping to Embedded " "Switch; skipping"), {'pci_slot': pci_slot}) neutron-8.4.0/neutron/plugins/ml2/drivers/mech_sriov/agent/__init__.py0000664000567000056710000000000013044372736027245 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py0000664000567000056710000004715213044372760030677 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import itertools import socket import sys import time from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import loopingcall import six from neutron._i18n import _, _LE, _LI, _LW from neutron.agent.l2.extensions import manager as ext_manager from neutron.agent import rpc as agent_rpc from neutron.agent import securitygroups_rpc as sg_rpc from neutron.api.rpc.callbacks import resources from neutron.common import config as common_config from neutron.common import constants as n_constants from neutron.common import topics from neutron.common import utils as n_utils from neutron import context from neutron.extensions import portbindings from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config from neutron.plugins.ml2.drivers.mech_sriov.agent.common \ import exceptions as exc from neutron.plugins.ml2.drivers.mech_sriov.agent import eswitch_manager as esm LOG = logging.getLogger(__name__) class SriovNicSwitchRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin): # Set RPC API version to 1.0 by default. # history # 1.1 Support Security Group RPC (works with NoopFirewallDriver) # 1.2 Support DVR (Distributed Virtual Router) RPC (not supported) # 1.3 Added param devices_to_update to security_groups_provider_updated # (works with NoopFirewallDriver) # 1.4 Added support for network_update target = oslo_messaging.Target(version='1.4') def __init__(self, context, agent, sg_agent): super(SriovNicSwitchRpcCallbacks, self).__init__() self.context = context self.agent = agent self.sg_agent = sg_agent def port_update(self, context, **kwargs): LOG.debug("port_update received") port = kwargs.get('port') vnic_type = port.get(portbindings.VNIC_TYPE) if vnic_type and vnic_type == portbindings.VNIC_DIRECT_PHYSICAL: LOG.debug("The SR-IOV agent doesn't handle %s ports.", portbindings.VNIC_DIRECT_PHYSICAL) return # Put the port mac address in the updated_devices set. # Do not store port details, as if they're used for processing # notifications there is no guarantee the notifications are # processed in the same order as the relevant API requests. mac = port['mac_address'] pci_slot = None if port.get(portbindings.PROFILE): pci_slot = port[portbindings.PROFILE].get('pci_slot') if pci_slot: self.agent.updated_devices.add((mac, pci_slot)) LOG.debug("port_update RPC received for port: %(id)s with MAC " "%(mac)s and PCI slot %(pci_slot)s slot", {'id': port['id'], 'mac': mac, 'pci_slot': pci_slot}) else: LOG.debug("No PCI Slot for port %(id)s with MAC %(mac)s; " "skipping", {'id': port['id'], 'mac': mac, 'pci_slot': pci_slot}) def network_update(self, context, **kwargs): network_id = kwargs['network']['id'] LOG.debug("network_update message received for network " "%(network_id)s, with ports: %(ports)s", {'network_id': network_id, 'ports': self.agent.network_ports[network_id]}) for port_data in self.agent.network_ports[network_id]: self.agent.updated_devices.add(port_data['device']) class SriovNicSwitchAgent(object): def __init__(self, physical_devices_mappings, exclude_devices, polling_interval): self.polling_interval = polling_interval self.network_ports = collections.defaultdict(list) self.conf = cfg.CONF self.setup_eswitch_mgr(physical_devices_mappings, exclude_devices) # Stores port update notifications for processing in the main loop self.updated_devices = set() self.context = context.get_admin_context_without_session() self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN) self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN) self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context, self.sg_plugin_rpc) self._setup_rpc() self.ext_manager = self._create_agent_extension_manager( self.connection) configurations = {'device_mappings': physical_devices_mappings, 'extensions': self.ext_manager.names()} #TODO(mangelajo): optimize resource_versions (see ovs agent) self.agent_state = { 'binary': 'neutron-sriov-nic-agent', 'host': self.conf.host, 'topic': n_constants.L2_AGENT_TOPIC, 'configurations': configurations, 'agent_type': n_constants.AGENT_TYPE_NIC_SWITCH, 'resource_versions': resources.LOCAL_RESOURCE_VERSIONS, 'start_flag': True} # The initialization is complete; we can start receiving messages self.connection.consume_in_threads() # Initialize iteration counter self.iter_num = 0 def _setup_rpc(self): self.agent_id = 'nic-switch-agent.%s' % socket.gethostname() LOG.info(_LI("RPC agent_id: %s"), self.agent_id) self.topic = topics.AGENT self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) # RPC network init # Handle updates from service self.endpoints = [SriovNicSwitchRpcCallbacks(self.context, self, self.sg_agent)] # Define the listening consumers for the agent consumers = [[topics.PORT, topics.UPDATE], [topics.NETWORK, topics.UPDATE], [topics.SECURITY_GROUP, topics.UPDATE]] self.connection = agent_rpc.create_consumers(self.endpoints, self.topic, consumers, start_listening=False) report_interval = cfg.CONF.AGENT.report_interval if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval) def _report_state(self): try: self.state_rpc.report_state(self.context, self.agent_state) # we only want to update resource versions on startup self.agent_state.pop('resource_versions', None) self.agent_state.pop('start_flag', None) except Exception: LOG.exception(_LE("Failed reporting state!")) def _create_agent_extension_manager(self, connection): ext_manager.register_opts(self.conf) mgr = ext_manager.AgentExtensionsManager(self.conf) mgr.initialize(connection, 'sriov') return mgr def setup_eswitch_mgr(self, device_mappings, exclude_devices=None): exclude_devices = exclude_devices or {} self.eswitch_mgr = esm.ESwitchManager() self.eswitch_mgr.discover_devices(device_mappings, exclude_devices) def scan_devices(self, registered_devices, updated_devices): curr_devices = self.eswitch_mgr.get_assigned_devices_info() self.agent_state.get('configurations')['devices'] = len(curr_devices) device_info = {} device_info['current'] = curr_devices device_info['added'] = curr_devices - registered_devices # we need to clean up after devices are removed device_info['removed'] = registered_devices - curr_devices # we don't want to process updates for devices that don't exist device_info['updated'] = (updated_devices & curr_devices - device_info['removed']) return device_info def _device_info_has_changes(self, device_info): return (device_info.get('added') or device_info.get('updated') or device_info.get('removed')) def process_network_devices(self, device_info): resync_a = False resync_b = False self.sg_agent.prepare_devices_filter(device_info.get('added')) if device_info.get('updated'): self.sg_agent.refresh_firewall() # Updated devices are processed the same as new ones, as their # admin_state_up may have changed. The set union prevents duplicating # work when a device is new and updated in the same polling iteration. devices_added_updated = (set(device_info.get('added')) | set(device_info.get('updated'))) if devices_added_updated: resync_a = self.treat_devices_added_updated(devices_added_updated) if device_info.get('removed'): resync_b = self.treat_devices_removed(device_info['removed']) # If one of the above operations fails => resync with plugin return (resync_a | resync_b) def treat_device(self, device, pci_slot, admin_state_up, spoofcheck=True): if self.eswitch_mgr.device_exists(device, pci_slot): try: self.eswitch_mgr.set_device_spoofcheck(device, pci_slot, spoofcheck) except Exception: LOG.warning(_LW("Failed to set spoofcheck for device %s"), device) LOG.info(_LI("Device %(device)s spoofcheck %(spoofcheck)s"), {"device": device, "spoofcheck": spoofcheck}) try: self.eswitch_mgr.set_device_state(device, pci_slot, admin_state_up) except exc.IpCommandOperationNotSupportedError: LOG.warning(_LW("Device %s does not support state change"), device) except exc.SriovNicError: LOG.warning(_LW("Failed to set device %s state"), device) return if admin_state_up: # update plugin about port status self.plugin_rpc.update_device_up(self.context, device, self.agent_id, cfg.CONF.host) else: self.plugin_rpc.update_device_down(self.context, device, self.agent_id, cfg.CONF.host) else: LOG.info(_LI("No device with MAC %s defined on agent."), device) def _update_network_ports(self, network_id, port_id, mac_pci_slot): self._clean_network_ports(mac_pci_slot) self.network_ports[network_id].append({ "port_id": port_id, "device": mac_pci_slot}) def _clean_network_ports(self, mac_pci_slot): for netid, ports_list in six.iteritems(self.network_ports): for port_data in ports_list: if mac_pci_slot == port_data['device']: ports_list.remove(port_data) if ports_list == []: self.network_ports.pop(netid) return port_data['port_id'] def treat_devices_added_updated(self, devices_info): try: macs_list = set([device_info[0] for device_info in devices_info]) devices_details_list = self.plugin_rpc.get_devices_details_list( self.context, macs_list, self.agent_id) except Exception as e: LOG.debug("Unable to get port details for devices " "with MAC addresses %(devices)s: %(e)s", {'devices': macs_list, 'e': e}) # resync is needed return True for device_details in devices_details_list: device = device_details['device'] LOG.debug("Port with MAC address %s is added", device) if 'port_id' in device_details: LOG.info(_LI("Port %(device)s updated. Details: %(details)s"), {'device': device, 'details': device_details}) port_id = device_details['port_id'] profile = device_details['profile'] spoofcheck = device_details.get('port_security_enabled', True) self.treat_device(device, profile.get('pci_slot'), device_details['admin_state_up'], spoofcheck) self._update_network_ports(device_details['network_id'], port_id, (device, profile.get('pci_slot'))) self.ext_manager.handle_port(self.context, device_details) else: LOG.info(_LI("Device with MAC %s not defined on plugin"), device) return False def treat_devices_removed(self, devices): resync = False for device in devices: mac, pci_slot = device LOG.info(_LI("Removing device with MAC address %(mac)s and " "PCI slot %(pci_slot)s"), {'mac': mac, 'pci_slot': pci_slot}) try: port_id = self._clean_network_ports(device) if port_id: port = {'port_id': port_id, 'device': mac, 'profile': {'pci_slot': pci_slot}} self.ext_manager.delete_port(self.context, port) else: LOG.warning(_LW("port_id to device with MAC " "%s not found"), mac) dev_details = self.plugin_rpc.update_device_down(self.context, mac, self.agent_id, cfg.CONF.host) except Exception as e: LOG.debug("Removing port failed for device with MAC address " "%(mac)s and PCI slot %(pci_slot)s due to %(exc)s", {'mac': mac, 'pci_slot': pci_slot, 'exc': e}) resync = True continue if dev_details['exists']: LOG.info(_LI("Port with MAC %(mac)s and PCI slot " "%(pci_slot)s updated."), {'mac': mac, 'pci_slot': pci_slot}) else: LOG.debug("Device with MAC %(mac)s and PCI slot " "%(pci_slot)s not defined on plugin", {'mac': mac, 'pci_slot': pci_slot}) return resync def daemon_loop(self): sync = True devices = set() LOG.info(_LI("SRIOV NIC Agent RPC Daemon Started!")) while True: start = time.time() LOG.debug("Agent rpc_loop - iteration:%d started", self.iter_num) if sync: LOG.info(_LI("Agent out of sync with plugin!")) devices.clear() sync = False device_info = {} # Save updated devices dict to perform rollback in case # resync would be needed, and then clear self.updated_devices. # As the greenthread should not yield between these # two statements, this will should be thread-safe. updated_devices_copy = self.updated_devices self.updated_devices = set() try: device_info = self.scan_devices(devices, updated_devices_copy) if self._device_info_has_changes(device_info): LOG.debug("Agent loop found changes! %s", device_info) # If treat devices fails - indicates must resync with # plugin sync = self.process_network_devices(device_info) devices = device_info['current'] except Exception: LOG.exception(_LE("Error in agent loop. Devices info: %s"), device_info) sync = True # Restore devices that were removed from this set earlier # without overwriting ones that may have arrived since. self.updated_devices |= updated_devices_copy # sleep till end of polling interval elapsed = (time.time() - start) if (elapsed < self.polling_interval): time.sleep(self.polling_interval - elapsed) else: LOG.debug("Loop iteration exceeded interval " "(%(polling_interval)s vs. %(elapsed)s)!", {'polling_interval': self.polling_interval, 'elapsed': elapsed}) self.iter_num = self.iter_num + 1 class SriovNicAgentConfigParser(object): def __init__(self): self.device_mappings = {} self.exclude_devices = {} def parse(self): """Parses device_mappings and exclude_devices. Parse and validate the consistency in both mappings """ self.device_mappings = n_utils.parse_mappings( cfg.CONF.SRIOV_NIC.physical_device_mappings, unique_keys=False) self.exclude_devices = config.parse_exclude_devices( cfg.CONF.SRIOV_NIC.exclude_devices) self._validate() def _validate(self): """Validate configuration. Validate that network_device in excluded_device exists in device mappings """ dev_net_set = set(itertools.chain.from_iterable( six.itervalues(self.device_mappings))) for dev_name in self.exclude_devices.keys(): if dev_name not in dev_net_set: raise ValueError(_("Device name %(dev_name)s is missing from " "physical_device_mappings") % {'dev_name': dev_name}) def main(): common_config.init(sys.argv[1:]) common_config.setup_logging() try: config_parser = SriovNicAgentConfigParser() config_parser.parse() device_mappings = config_parser.device_mappings exclude_devices = config_parser.exclude_devices except ValueError: LOG.exception(_LE("Failed on Agent configuration parse. " "Agent terminated!")) raise SystemExit(1) LOG.info(_LI("Physical Devices mappings: %s"), device_mappings) LOG.info(_LI("Exclude Devices: %s"), exclude_devices) polling_interval = cfg.CONF.AGENT.polling_interval try: agent = SriovNicSwitchAgent(device_mappings, exclude_devices, polling_interval) except exc.SriovNicError: LOG.exception(_LE("Agent Initialization Failed")) raise SystemExit(1) # Start everything. LOG.info(_LI("Agent initialized successfully, now running... ")) agent.daemon_loop() neutron-8.4.0/neutron/plugins/ml2/drivers/mech_sriov/agent/common/0000775000567000056710000000000013044373210026422 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/mech_sriov/agent/common/config.py0000664000567000056710000000733113044372760030256 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from neutron._i18n import _ from neutron.agent.common import config def parse_exclude_devices(exclude_list): """Parse Exclude devices list parses excluded device list in the form: dev_name:pci_dev_1;pci_dev_2 @param exclude list: list of string pairs in "key:value" format the key part represents the network device name the value part is a list of PCI slots separated by ";" """ exclude_mapping = {} for dev_mapping in exclude_list: try: dev_name, exclude_devices = dev_mapping.split(":", 1) except ValueError: raise ValueError(_("Invalid mapping: '%s'") % dev_mapping) dev_name = dev_name.strip() if not dev_name: raise ValueError(_("Missing key in mapping: '%s'") % dev_mapping) if dev_name in exclude_mapping: raise ValueError(_("Device %(dev_name)s in mapping: %(mapping)s " "not unique") % {'dev_name': dev_name, 'mapping': dev_mapping}) exclude_devices_list = exclude_devices.split(";") exclude_devices_set = set() for dev in exclude_devices_list: dev = dev.strip() if dev: exclude_devices_set.add(dev) exclude_mapping[dev_name] = exclude_devices_set return exclude_mapping DEFAULT_DEVICE_MAPPINGS = [] DEFAULT_EXCLUDE_DEVICES = [] agent_opts = [ cfg.IntOpt('polling_interval', default=2, help=_("The number of seconds the agent will wait between " "polling for local device changes.")), ] sriov_nic_opts = [ cfg.ListOpt('physical_device_mappings', default=DEFAULT_DEVICE_MAPPINGS, help=_("Comma-separated list of " ": tuples mapping " "physical network names to the agent's node-specific " "physical network device interfaces of SR-IOV physical " "function to be used for VLAN networks. All physical " "networks listed in network_vlan_ranges on the server " "should have mappings to appropriate interfaces on " "each agent.")), cfg.ListOpt('exclude_devices', default=DEFAULT_EXCLUDE_DEVICES, help=_("Comma-separated list of " ": tuples, mapping " "network_device to the agent's node-specific list of " "virtual functions that should not be used for virtual " "networking. vfs_to_exclude is a semicolon-separated " "list of virtual functions to exclude from " "network_device. The network_device in the mapping " "should appear in the physical_device_mappings " "list.")), ] cfg.CONF.register_opts(agent_opts, 'AGENT') cfg.CONF.register_opts(sriov_nic_opts, 'SRIOV_NIC') config.register_agent_state_opts_helper(cfg.CONF) neutron-8.4.0/neutron/plugins/ml2/drivers/mech_sriov/agent/common/__init__.py0000664000567000056710000000000013044372736030535 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/mech_sriov/agent/common/exceptions.py0000664000567000056710000000234113044372760031166 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron._i18n import _ from neutron.common import exceptions as n_exc class SriovNicError(n_exc.NeutronException): pass class InvalidDeviceError(SriovNicError): message = _("Invalid Device %(dev_name)s: %(reason)s") class IpCommandError(SriovNicError): message = _("ip command failed: %(reason)s") class IpCommandOperationNotSupportedError(SriovNicError): message = _("Operation not supported on device %(dev_name)s") class InvalidPciSlotError(SriovNicError): message = _("Invalid pci slot %(pci_slot)s") class IpCommandDeviceError(SriovNicError): message = _("ip command failed on device %(dev_name)s: %(reason)s") neutron-8.4.0/neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py0000664000567000056710000001720613044372760027124 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re from oslo_log import log as logging from neutron._i18n import _LE, _LW from neutron.agent.linux import ip_lib from neutron.plugins.ml2.drivers.mech_sriov.agent.common \ import exceptions as exc LOG = logging.getLogger(__name__) class PciDeviceIPWrapper(ip_lib.IPWrapper): """Wrapper class for ip link commands. wrapper for getting/setting pci device details using ip link... """ VF_PATTERN = r"^vf\s+(?P\d+)\s+" MAC_PATTERN = r"MAC\s+(?P[a-fA-F0-9:]+)," STATE_PATTERN = r"\s+link-state\s+(?P\w+)" ANY_PATTERN = ".*," MACVTAP_PATTERN = r".*macvtap[0-9]+@(?P[a-zA-Z0-9_]+):" VF_LINE_FORMAT = VF_PATTERN + MAC_PATTERN + ANY_PATTERN + STATE_PATTERN VF_DETAILS_REG_EX = re.compile(VF_LINE_FORMAT) MACVTAP_REG_EX = re.compile(MACVTAP_PATTERN) IP_LINK_OP_NOT_SUPPORTED = 'RTNETLINK answers: Operation not supported' class LinkState(object): ENABLE = "enable" DISABLE = "disable" def __init__(self, dev_name): super(PciDeviceIPWrapper, self).__init__() self.dev_name = dev_name def _set_feature(self, vf_index, feature, value): """Sets vf feature Checks if the feature is not supported or there's some general error during ip link invocation and raises exception accordingly. :param vf_index: vf index :param feature: name of a feature to be passed to ip link, such as 'state' or 'spoofchk' :param value: value of the feature setting """ try: self._as_root([], "link", ("set", self.dev_name, "vf", str(vf_index), feature, value)) except Exception as e: if self.IP_LINK_OP_NOT_SUPPORTED in str(e): raise exc.IpCommandOperationNotSupportedError( dev_name=self.dev_name) else: raise exc.IpCommandDeviceError(dev_name=self.dev_name, reason=str(e)) def get_assigned_macs(self, vf_list): """Get assigned mac addresses for vf list. @param vf_list: list of vf indexes @return: dict mapping of vf to mac """ try: out = self._as_root([], "link", ("show", self.dev_name)) except Exception as e: LOG.exception(_LE("Failed executing ip command")) raise exc.IpCommandDeviceError(dev_name=self.dev_name, reason=e) vf_to_mac_mapping = {} vf_lines = self._get_vf_link_show(vf_list, out) if vf_lines: for vf_line in vf_lines: vf_details = self._parse_vf_link_show(vf_line) if vf_details: vf_num = vf_details.get('vf') vf_mac = vf_details.get("MAC") vf_to_mac_mapping[vf_num] = vf_mac return vf_to_mac_mapping def get_vf_state(self, vf_index): """Get vf state {True/False} @param vf_index: vf index @todo: Handle "auto" state """ try: out = self._as_root([], "link", ("show", self.dev_name)) except Exception as e: LOG.exception(_LE("Failed executing ip command")) raise exc.IpCommandDeviceError(dev_name=self.dev_name, reason=e) vf_lines = self._get_vf_link_show([vf_index], out) if vf_lines: vf_details = self._parse_vf_link_show(vf_lines[0]) if vf_details: state = vf_details.get("link-state", self.LinkState.DISABLE) if state != self.LinkState.DISABLE: return True return False def set_vf_state(self, vf_index, state): """sets vf state. @param vf_index: vf index @param state: required state {True/False} """ status_str = self.LinkState.ENABLE if state else \ self.LinkState.DISABLE self._set_feature(vf_index, "state", status_str) def set_vf_spoofcheck(self, vf_index, enabled): """sets vf spoofcheck @param vf_index: vf index @param enabled: True to enable spoof checking, False to disable """ setting = "on" if enabled else "off" self._set_feature(vf_index, "spoofchk", setting) def set_vf_max_rate(self, vf_index, max_tx_rate): """sets vf max rate. @param vf_index: vf index @param max_tx_rate: vf max tx rate in Mbps """ self._set_feature(vf_index, "rate", str(max_tx_rate)) def _get_vf_link_show(self, vf_list, link_show_out): """Get link show output for VFs get vf link show command output filtered by given vf list @param vf_list: list of vf indexes @param link_show_out: link show command output @return: list of output rows regarding given vf_list """ vf_lines = [] for line in link_show_out.split("\n"): line = line.strip() if line.startswith("vf"): details = line.split() index = int(details[1]) if index in vf_list: vf_lines.append(line) if not vf_lines: LOG.warning(_LW("Cannot find vfs %(vfs)s in device %(dev_name)s"), {'vfs': vf_list, 'dev_name': self.dev_name}) return vf_lines def _parse_vf_link_show(self, vf_line): """Parses vf link show command output line. @param vf_line: link show vf line """ vf_details = {} pattern_match = self.VF_DETAILS_REG_EX.match(vf_line) if pattern_match: vf_details["vf"] = int(pattern_match.group("vf_index")) vf_details["MAC"] = pattern_match.group("mac") vf_details["link-state"] = pattern_match.group("state") else: LOG.warning(_LW("failed to parse vf link show line %(line)s: " "for %(device)s"), {'line': vf_line, 'device': self.dev_name}) return vf_details @classmethod def link_show(cls): try: out = cls._execute([], "link", ("show", ), run_as_root=True) except Exception as e: LOG.error(_LE("Failed executing ip command: %s"), e) raise exc.IpCommandError(reason=e) return out @classmethod def is_macvtap_assigned(cls, ifname, ip_link_show_output): """Check if vf has macvtap interface assigned Parses the output of ip link show command and checks if macvtap[0-9]+@ regex matches the output. @param ifname: vf interface name @param ip_link_show_output: 'ip link show' result to parse @return: True on match otherwise False """ for line in ip_link_show_output.splitlines(): pattern_match = cls.MACVTAP_REG_EX.match(line) if pattern_match: if ifname == pattern_match.group('vf_interface'): return True return False neutron-8.4.0/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/0000775000567000056710000000000013044373210030704 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py0000664000567000056710000000456413044372760033455 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron._i18n import _LE, _LI from neutron.agent.l2.extensions import qos from neutron.plugins.ml2.drivers.mech_sriov.agent.common import ( exceptions as exc) from neutron.plugins.ml2.drivers.mech_sriov.agent import eswitch_manager as esm from neutron.plugins.ml2.drivers.mech_sriov.mech_driver import ( mech_driver) LOG = logging.getLogger(__name__) class QosSRIOVAgentDriver(qos.QosAgentDriver): SUPPORTED_RULES = ( mech_driver.SriovNicSwitchMechanismDriver.supported_qos_rule_types) def __init__(self): super(QosSRIOVAgentDriver, self).__init__() self.eswitch_mgr = None def initialize(self): self.eswitch_mgr = esm.ESwitchManager() def create_bandwidth_limit(self, port, rule): self.update_bandwidth_limit(port, rule) def update_bandwidth_limit(self, port, rule): pci_slot = port['profile'].get('pci_slot') device = port['device'] self._set_vf_max_rate(device, pci_slot, rule.max_kbps) def delete_bandwidth_limit(self, port): pci_slot = port['profile'].get('pci_slot') if port.get('device_owner') is None: self.eswitch_mgr.clear_max_rate(pci_slot) else: device = port['device'] self._set_vf_max_rate(device, pci_slot) def _set_vf_max_rate(self, device, pci_slot, max_kbps=0): if self.eswitch_mgr.device_exists(device, pci_slot): try: self.eswitch_mgr.set_device_max_rate( device, pci_slot, max_kbps) except exc.SriovNicError: LOG.exception( _LE("Failed to set device %s max rate"), device) else: LOG.info(_LI("No device with MAC %s defined on agent."), device) neutron-8.4.0/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/__init__.py0000775000567000056710000000000013044372760033017 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/mech_agent.py0000664000567000056710000002204113044372760024352 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_log import log import six from neutron._i18n import _LW from neutron.extensions import portbindings from neutron.plugins.common import constants as p_constants from neutron.plugins.ml2 import driver_api as api LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class AgentMechanismDriverBase(api.MechanismDriver): """Base class for drivers that attach to networks using an L2 agent. The AgentMechanismDriverBase provides common code for mechanism drivers that integrate the ml2 plugin with L2 agents. Port binding with this driver requires the driver's associated agent to be running on the port's host, and that agent to have connectivity to at least one segment of the port's network. MechanismDrivers using this base class must pass the agent type to __init__(), and must implement try_to_bind_segment_for_agent(). """ def __init__(self, agent_type, supported_vnic_types=[portbindings.VNIC_NORMAL]): """Initialize base class for specific L2 agent type. :param agent_type: Constant identifying agent type in agents_db :param supported_vnic_types: The binding:vnic_type values we can bind """ self.agent_type = agent_type self.supported_vnic_types = supported_vnic_types def initialize(self): pass def bind_port(self, context): LOG.debug("Attempting to bind port %(port)s on " "network %(network)s", {'port': context.current['id'], 'network': context.network.current['id']}) vnic_type = context.current.get(portbindings.VNIC_TYPE, portbindings.VNIC_NORMAL) if vnic_type not in self.supported_vnic_types: LOG.debug("Refusing to bind due to unsupported vnic_type: %s", vnic_type) return agents = context.host_agents(self.agent_type) if not agents: LOG.warning(_LW("Port %(pid)s on network %(network)s not bound, " "no agent registered on host %(host)s"), {'pid': context.current['id'], 'network': context.network.current['id'], 'host': context.host}) for agent in agents: LOG.debug("Checking agent: %s", agent) if agent['alive']: for segment in context.segments_to_bind: if self.try_to_bind_segment_for_agent(context, segment, agent): LOG.debug("Bound using segment: %s", segment) return else: LOG.warning(_LW("Refusing to bind port %(pid)s to dead agent: " "%(agent)s"), {'pid': context.current['id'], 'agent': agent}) @abc.abstractmethod def try_to_bind_segment_for_agent(self, context, segment, agent): """Try to bind with segment for agent. :param context: PortContext instance describing the port :param segment: segment dictionary describing segment to bind :param agent: agents_db entry describing agent to bind :returns: True iff segment has been bound for agent Called outside any transaction during bind_port() so that derived MechanismDrivers can use agent_db data along with built-in knowledge of the corresponding agent's capabilities to attempt to bind to the specified network segment for the agent. If the segment can be bound for the agent, this function must call context.set_binding() with appropriate values and then return True. Otherwise, it must return False. """ @six.add_metaclass(abc.ABCMeta) class SimpleAgentMechanismDriverBase(AgentMechanismDriverBase): """Base class for simple drivers using an L2 agent. The SimpleAgentMechanismDriverBase provides common code for mechanism drivers that integrate the ml2 plugin with L2 agents, where the binding:vif_type and binding:vif_details values are the same for all bindings. Port binding with this driver requires the driver's associated agent to be running on the port's host, and that agent to have connectivity to at least one segment of the port's network. MechanismDrivers using this base class must pass the agent type and the values for binding:vif_type and binding:vif_details to __init__(), and must implement check_segment_for_agent(). """ def __init__(self, agent_type, vif_type, vif_details, supported_vnic_types=[portbindings.VNIC_NORMAL]): """Initialize base class for specific L2 agent type. :param agent_type: Constant identifying agent type in agents_db :param vif_type: Value for binding:vif_type when bound :param vif_details: Dictionary with details for VIF driver when bound :param supported_vnic_types: The binding:vnic_type values we can bind """ super(SimpleAgentMechanismDriverBase, self).__init__( agent_type, supported_vnic_types) self.vif_type = vif_type self.vif_details = vif_details def try_to_bind_segment_for_agent(self, context, segment, agent): if self.check_segment_for_agent(segment, agent): context.set_binding(segment[api.ID], self.vif_type, self.vif_details) return True else: return False @abc.abstractmethod def get_allowed_network_types(self, agent=None): """Return the agent's or driver's allowed network types. For example: return ('flat', ...). You can also refer to the configuration the given agent exposes. """ pass @abc.abstractmethod def get_mappings(self, agent): """Return the agent's bridge or interface mappings. For example: agent['configurations'].get('bridge_mappings', {}). """ pass def physnet_in_mappings(self, physnet, mappings): """Is the physical network part of the given mappings?""" return physnet in mappings def check_segment_for_agent(self, segment, agent): """Check if segment can be bound for agent. :param segment: segment dictionary describing segment to bind :param agent: agents_db entry describing agent to bind :returns: True iff segment can be bound for agent Called outside any transaction during bind_port so that derived MechanismDrivers can use agent_db data along with built-in knowledge of the corresponding agent's capabilities to determine whether or not the specified network segment can be bound for the agent. """ mappings = self.get_mappings(agent) allowed_network_types = self.get_allowed_network_types(agent) LOG.debug("Checking segment: %(segment)s " "for mappings: %(mappings)s " "with network types: %(network_types)s", {'segment': segment, 'mappings': mappings, 'network_types': allowed_network_types}) network_type = segment[api.NETWORK_TYPE] if network_type not in allowed_network_types: LOG.debug( 'Network %(network_id)s is of type %(network_type)s ' 'but agent %(agent)s or mechanism driver only ' 'support %(allowed_network_types)s.', {'network_id': segment['id'], 'network_type': network_type, 'agent': agent['host'], 'allowed_network_types': allowed_network_types}) return False if network_type in [p_constants.TYPE_FLAT, p_constants.TYPE_VLAN]: physnet = segment[api.PHYSICAL_NETWORK] if not self.physnet_in_mappings(physnet, mappings): LOG.debug( 'Network %(network_id)s is connected to physical ' 'network %(physnet)s, but agent %(agent)s reported ' 'physical networks %(mappings)s. ' 'The physical network must be configured on the ' 'agent if binding is to succeed.', {'network_id': segment['id'], 'physnet': physnet, 'agent': agent['host'], 'mappings': mappings}) return False return True neutron-8.4.0/neutron/plugins/ml2/drivers/type_gre.py0000664000567000056710000000626613044372760024111 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log import sqlalchemy as sa from sqlalchemy import sql from neutron._i18n import _, _LE from neutron.common import exceptions as n_exc from neutron.db import model_base from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers import type_tunnel LOG = log.getLogger(__name__) gre_opts = [ cfg.ListOpt('tunnel_id_ranges', default=[], help=_("Comma-separated list of : tuples " "enumerating ranges of GRE tunnel IDs that are " "available for tenant network allocation")) ] cfg.CONF.register_opts(gre_opts, "ml2_type_gre") class GreAllocation(model_base.BASEV2): __tablename__ = 'ml2_gre_allocations' gre_id = sa.Column(sa.Integer, nullable=False, primary_key=True, autoincrement=False) allocated = sa.Column(sa.Boolean, nullable=False, default=False, server_default=sql.false(), index=True) class GreEndpoints(model_base.BASEV2): """Represents tunnel endpoint in RPC mode.""" __tablename__ = 'ml2_gre_endpoints' __table_args__ = ( sa.UniqueConstraint('host', name='unique_ml2_gre_endpoints0host'), model_base.BASEV2.__table_args__ ) ip_address = sa.Column(sa.String(64), primary_key=True) host = sa.Column(sa.String(255), nullable=True) def __repr__(self): return "" % self.ip_address class GreTypeDriver(type_tunnel.EndpointTunnelTypeDriver): def __init__(self): super(GreTypeDriver, self).__init__( GreAllocation, GreEndpoints) def get_type(self): return p_const.TYPE_GRE def initialize(self): try: self._initialize(cfg.CONF.ml2_type_gre.tunnel_id_ranges) except n_exc.NetworkTunnelRangeError: LOG.exception(_LE("Failed to parse tunnel_id_ranges. " "Service terminated!")) raise SystemExit() def get_endpoints(self): """Get every gre endpoints from database.""" gre_endpoints = self._get_endpoints() return [{'ip_address': gre_endpoint.ip_address, 'host': gre_endpoint.host} for gre_endpoint in gre_endpoints] def add_endpoint(self, ip, host): return self._add_endpoint(ip, host) def get_mtu(self, physical_network=None): mtu = super(GreTypeDriver, self).get_mtu(physical_network) return mtu - p_const.GRE_ENCAP_OVERHEAD if mtu else 0 neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/0000775000567000056710000000000013044373210024247 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/__init__.py0000664000567000056710000000000013044372736026362 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/mech_driver/0000775000567000056710000000000013044373210026536 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/mech_driver/__init__.py0000664000567000056710000000000013044372736030651 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py0000664000567000056710000001163013044372760032467 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg from neutron.agent import securitygroups_rpc from neutron.common import constants from neutron.extensions import portbindings from neutron.plugins.common import constants as p_constants from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers import mech_agent from neutron.plugins.ml2.drivers.openvswitch.agent.common \ import constants as a_const from neutron.services.qos import qos_consts IPTABLES_FW_DRIVER_FULL = ("neutron.agent.linux.iptables_firewall." "OVSHybridIptablesFirewallDriver") class OpenvswitchMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): """Attach to networks using openvswitch L2 agent. The OpenvswitchMechanismDriver integrates the ml2 plugin with the openvswitch L2 agent. Port binding with this driver requires the openvswitch agent to be running on the port's host, and that agent to have connectivity to at least one segment of the port's network. """ supported_qos_rule_types = [qos_consts.RULE_TYPE_BANDWIDTH_LIMIT] def __init__(self): sg_enabled = securitygroups_rpc.is_firewall_enabled() hybrid_plug_required = (not cfg.CONF.SECURITYGROUP.firewall_driver or cfg.CONF.SECURITYGROUP.firewall_driver in ( IPTABLES_FW_DRIVER_FULL, 'iptables_hybrid')) and sg_enabled vif_details = {portbindings.CAP_PORT_FILTER: sg_enabled, portbindings.OVS_HYBRID_PLUG: hybrid_plug_required} super(OpenvswitchMechanismDriver, self).__init__( constants.AGENT_TYPE_OVS, portbindings.VIF_TYPE_OVS, vif_details) def get_allowed_network_types(self, agent): return (agent['configurations'].get('tunnel_types', []) + [p_constants.TYPE_LOCAL, p_constants.TYPE_FLAT, p_constants.TYPE_VLAN]) def get_mappings(self, agent): return agent['configurations'].get('bridge_mappings', {}) def check_vlan_transparency(self, context): """Currently Openvswitch driver doesn't support vlan transparency.""" return False def try_to_bind_segment_for_agent(self, context, segment, agent): if self.check_segment_for_agent(segment, agent): context.set_binding(segment[api.ID], self.get_vif_type(agent, context), self.get_vif_details(agent, context)) return True else: return False def get_vif_type(self, agent, context): caps = agent['configurations'].get('ovs_capabilities', {}) if (a_const.OVS_DPDK_VHOST_USER in caps.get('iface_types', []) and agent['configurations'].get('datapath_type') == a_const.OVS_DATAPATH_NETDEV): return portbindings.VIF_TYPE_VHOST_USER return self.vif_type def get_vif_details(self, agent, context): a_config = agent['configurations'] if a_config.get('datapath_type') != a_const.OVS_DATAPATH_NETDEV: details = dict(self.vif_details) hybrid = portbindings.OVS_HYBRID_PLUG if hybrid in a_config: # we only override the vif_details for hybrid pluggin set # in the constuctor if the agent specifically requests it details[hybrid] = a_config[hybrid] return details caps = a_config.get('ovs_capabilities', {}) if a_const.OVS_DPDK_VHOST_USER in caps.get('iface_types', []): sock_path = self.agent_vhu_sockpath(agent, context.current['id']) return { portbindings.CAP_PORT_FILTER: False, portbindings.VHOST_USER_MODE: portbindings.VHOST_USER_MODE_CLIENT, portbindings.VHOST_USER_OVS_PLUG: True, portbindings.VHOST_USER_SOCKET: sock_path } return self.vif_details @staticmethod def agent_vhu_sockpath(agent, port_id): """Return the agent's vhost-user socket path for a given port""" sockdir = agent['configurations'].get('vhostuser_socket_dir', a_const.VHOST_USER_SOCKET_DIR) sock_name = (constants.VHOST_USER_DEVICE_PREFIX + port_id)[:14] return os.path.join(sockdir, sock_name) neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/0000775000567000056710000000000013044373210025345 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/0000775000567000056710000000000013044373210026631 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/contrib/0000775000567000056710000000000013044373210030271 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/contrib/rpmbuild/0000775000567000056710000000000013044373210032107 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/contrib/rpmbuild/SPECS/0000775000567000056710000000000013044373210032764 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000017500000000000011220 Lustar 00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.specneutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/contrib/rpmbuild/SPECS/openstack-0000664000567000056710000000135613044372736034774 0ustar jenkinsjenkins00000000000000Name: openstack-neutron-xen-plugins Version: %{version} Release: 1 Summary: Files for XenAPI support. License: ASL 2.0 Group: Applications/Utilities Source0: openstack-neutron-xen-plugins.tar.gz BuildArch: noarch BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) %define debug_package %{nil} %description This package contains files that are required for XenAPI support for Neutron. %prep %setup -q -n openstack-neutron-xen-plugins %install rm -rf $RPM_BUILD_ROOT mkdir -p $RPM_BUILD_ROOT/etc cp -r xapi.d $RPM_BUILD_ROOT/etc chmod a+x $RPM_BUILD_ROOT/etc/xapi.d/plugins/* %clean rm -rf $RPM_BUILD_ROOT %files %defattr(-,root,root,-) /etc/xapi.d/plugins/* neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/contrib/build-rpm.sh0000775000567000056710000000153413044372736032542 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash set -eux thisdir=$(dirname $(readlink -f "$0")) export NEUTRON_ROOT="$thisdir/../../../../../../" export PYTHONPATH=$NEUTRON_ROOT cd $NEUTRON_ROOT VERSION=$(sh -c "(cat $NEUTRON_ROOT/neutron/version.py; \ echo 'print version_info.release_string()') | \ python") cd - PACKAGE=openstack-neutron-xen-plugins RPMBUILD_DIR=$PWD/rpmbuild if [ ! -d $RPMBUILD_DIR ]; then echo $RPMBUILD_DIR is missing exit 1 fi for dir in BUILD BUILDROOT SRPMS RPMS SOURCES; do rm -rf $RPMBUILD_DIR/$dir mkdir -p $RPMBUILD_DIR/$dir done rm -rf /tmp/$PACKAGE mkdir /tmp/$PACKAGE cp -r ../etc/xapi.d /tmp/$PACKAGE tar czf $RPMBUILD_DIR/SOURCES/$PACKAGE.tar.gz -C /tmp $PACKAGE rpmbuild -ba --nodeps --define "_topdir $RPMBUILD_DIR" \ --define "version $VERSION" \ $RPMBUILD_DIR/SPECS/$PACKAGE.spec neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/README0000664000567000056710000000104213044372736027522 0ustar jenkinsjenkins00000000000000This directory contains files that are required for the XenAPI support. They should be installed in the XenServer / Xen Cloud Platform dom0. If you install them manually, you will need to ensure that the newly added files are executable. You can do this by running the following command (from dom0): chmod a+x /etc/xapi.d/plugins/* Otherwise, you can build an rpm by running the following command: ./contrib/build-rpm.sh and install the rpm by running the following command (from dom0): rpm -i openstack-neutron-xen-plugins.rpm neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/etc/0000775000567000056710000000000013044373210027404 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/etc/xapi.d/0000775000567000056710000000000013044373210030567 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/etc/xapi.d/plugins/0000775000567000056710000000000013044373210032250 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/etc/xapi.d/plugins/netwrap0000664000567000056710000000513213044372760033665 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2012 OpenStack Foundation # Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # XenAPI plugin for executing network commands (ovs, iptables, etc) on dom0 # import gettext gettext.install('neutron', unicode=1) try: import json except ImportError: import simplejson as json import subprocess import XenAPIPlugin ALLOWED_CMDS = [ 'ip', 'ipset', 'iptables-save', 'iptables-restore', 'ip6tables-save', 'ip6tables-restore', 'sysctl', # NOTE(yamamoto): of_interface=native doesn't use ovs-ofctl 'ovs-ofctl', 'ovs-vsctl', 'ovsdb-client', 'conntrack', ] class PluginError(Exception): """Base Exception class for all plugin errors.""" def __init__(self, *args): Exception.__init__(self, *args) def _run_command(cmd, cmd_input): """Abstracts out the basics of issuing system commands. If the command returns anything in stderr, a PluginError is raised with that information. Otherwise, the output from stdout is returned. """ pipe = subprocess.PIPE proc = subprocess.Popen(cmd, shell=False, stdin=pipe, stdout=pipe, stderr=pipe, close_fds=True) (out, err) = proc.communicate(cmd_input) return proc.returncode, out, err def run_command(session, args): cmd = json.loads(args.get('cmd')) if cmd and cmd[0] not in ALLOWED_CMDS: msg = _("Dom0 execution of '%s' is not permitted") % cmd[0] raise PluginError(msg) returncode, out, err = _run_command( cmd, json.loads(args.get('cmd_input', 'null'))) if not err: err = "" if not out: out = "" # This runs in Dom0, will return to neutron-ovs-agent in compute node result = {'returncode': returncode, 'out': out, 'err': err} return json.dumps(result) if __name__ == "__main__": XenAPIPlugin.dispatch({"run_command": run_command}) neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/0000775000567000056710000000000013044373210027176 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/0000775000567000056710000000000013044373210031174 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_dvr_process.py0000664000567000056710000000732113044372760034576 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import constants class OVSDVRProcessMixin(object): """Common logic for br-tun and br-phys' DVR_PROCESS tables. Inheriters should provide self.dvr_process_table_id and self.dvr_process_next_table_id. """ def install_dvr_process_ipv4(self, vlan_tag, gateway_ip): # block ARP self.add_flow(table=self.dvr_process_table_id, priority=3, dl_vlan=vlan_tag, proto='arp', nw_dst=gateway_ip, actions='drop') def delete_dvr_process_ipv4(self, vlan_tag, gateway_ip): self.delete_flows(table=self.dvr_process_table_id, dl_vlan=vlan_tag, proto='arp', nw_dst=gateway_ip) def install_dvr_process_ipv6(self, vlan_tag, gateway_mac): # block RA self.add_flow(table=self.dvr_process_table_id, priority=3, dl_vlan=vlan_tag, proto='icmp6', icmp_type=constants.ICMPV6_TYPE_RA, dl_src=gateway_mac, actions='drop') def delete_dvr_process_ipv6(self, vlan_tag, gateway_mac): self.delete_flows(table=self.dvr_process_table_id, dl_vlan=vlan_tag, proto='icmp6', icmp_type=constants.ICMPV6_TYPE_RA, dl_src=gateway_mac) def install_dvr_process(self, vlan_tag, vif_mac, dvr_mac_address): self.add_flow(table=self.dvr_process_table_id, priority=2, dl_vlan=vlan_tag, dl_dst=vif_mac, actions="drop") self.add_flow(table=self.dvr_process_table_id, priority=1, dl_vlan=vlan_tag, dl_src=vif_mac, actions="mod_dl_src:%s,resubmit(,%s)" % (dvr_mac_address, self.dvr_process_next_table_id)) def delete_dvr_process(self, vlan_tag, vif_mac): self.delete_flows(table=self.dvr_process_table_id, dl_vlan=vlan_tag, dl_dst=vif_mac) self.delete_flows(table=self.dvr_process_table_id, dl_vlan=vlan_tag, dl_src=vif_mac) neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/__init__.py0000664000567000056710000000000013044372736033307 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_tun.py0000664000567000056710000002631613044372736033063 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import netaddr from neutron.agent.common import ovs_lib from neutron.plugins.ml2.drivers.openvswitch.agent.common \ import constants from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ import br_dvr_process from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ import ovs_bridge class OVSTunnelBridge(ovs_bridge.OVSAgentBridge, br_dvr_process.OVSDVRProcessMixin): """openvswitch agent tunnel bridge specific logic.""" # Used by OVSDVRProcessMixin dvr_process_table_id = constants.DVR_PROCESS dvr_process_next_table_id = constants.PATCH_LV_TO_TUN def setup_default_table(self, patch_int_ofport, arp_responder_enabled): # Table 0 (default) will sort incoming traffic depending on in_port with self.deferred() as deferred_br: deferred_br.add_flow(priority=1, in_port=patch_int_ofport, actions="resubmit(,%s)" % constants.PATCH_LV_TO_TUN) deferred_br.add_flow(priority=0, actions="drop") if arp_responder_enabled: # ARP broadcast-ed request go to the local ARP_RESPONDER # table to be locally resolved # REVISIT(yamamoto): add arp_op=arp.ARP_REQUEST matcher? deferred_br.add_flow(table=constants.PATCH_LV_TO_TUN, priority=1, proto='arp', dl_dst="ff:ff:ff:ff:ff:ff", actions=("resubmit(,%s)" % constants.ARP_RESPONDER)) # PATCH_LV_TO_TUN table will handle packets coming from patch_int # unicasts go to table UCAST_TO_TUN where remote addresses are # learnt deferred_br.add_flow(table=constants.PATCH_LV_TO_TUN, priority=0, dl_dst="00:00:00:00:00:00/01:00:00:00:00:00", actions=("resubmit(,%s)" % constants.UCAST_TO_TUN)) # Broadcasts/multicasts go to table FLOOD_TO_TUN that handles # flooding deferred_br.add_flow(table=constants.PATCH_LV_TO_TUN, priority=0, dl_dst="01:00:00:00:00:00/01:00:00:00:00:00", actions=("resubmit(,%s)" % constants.FLOOD_TO_TUN)) # Tables [tunnel_type]_TUN_TO_LV will set lvid depending on tun_id # for each tunnel type, and resubmit to table LEARN_FROM_TUN where # remote mac addresses will be learnt for tunnel_type in constants.TUNNEL_NETWORK_TYPES: deferred_br.add_flow(table=constants.TUN_TABLE[tunnel_type], priority=0, actions="drop") # LEARN_FROM_TUN table will have a single flow using a learn action # to dynamically set-up flows in UCAST_TO_TUN corresponding to # remote mac addresses (assumes that lvid has already been set by # a previous flow) learned_flow = ("cookie=%(cookie)s," "table=%(table)s," "priority=1," "hard_timeout=300," "NXM_OF_VLAN_TCI[0..11]," "NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[]," "load:0->NXM_OF_VLAN_TCI[]," "load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[]," "output:NXM_OF_IN_PORT[]" % {'cookie': self.default_cookie, 'table': constants.UCAST_TO_TUN}) # Once remote mac addresses are learnt, output packet to patch_int deferred_br.add_flow(table=constants.LEARN_FROM_TUN, priority=1, actions="learn(%s),output:%s" % (learned_flow, patch_int_ofport)) # Egress unicast will be handled in table UCAST_TO_TUN, where # remote mac addresses will be learned. For now, just add a # default flow that will resubmit unknown unicasts to table # FLOOD_TO_TUN to treat them as broadcasts/multicasts deferred_br.add_flow(table=constants.UCAST_TO_TUN, priority=0, actions="resubmit(,%s)" % constants.FLOOD_TO_TUN) if arp_responder_enabled: # If none of the ARP entries correspond to the requested IP, # the broadcast-ed packet is resubmitted to the flooding table deferred_br.add_flow(table=constants.ARP_RESPONDER, priority=0, actions="resubmit(,%s)" % constants.FLOOD_TO_TUN) # FLOOD_TO_TUN will handle flooding in tunnels based on lvid, # for now, add a default drop action self.install_drop(table_id=constants.FLOOD_TO_TUN) def provision_local_vlan(self, network_type, lvid, segmentation_id, distributed=False): if distributed: table_id = constants.DVR_NOT_LEARN else: table_id = constants.LEARN_FROM_TUN self.add_flow(table=constants.TUN_TABLE[network_type], priority=1, tun_id=segmentation_id, actions="mod_vlan_vid:%s," "resubmit(,%s)" % (lvid, table_id)) def reclaim_local_vlan(self, network_type, segmentation_id): self.delete_flows(table=constants.TUN_TABLE[network_type], tun_id=segmentation_id) @staticmethod def _ofport_set_to_str(ports_set): return ",".join(map(str, ports_set)) def install_flood_to_tun(self, vlan, tun_id, ports, deferred_br=None): br = deferred_br if deferred_br else self br.mod_flow(table=constants.FLOOD_TO_TUN, dl_vlan=vlan, actions="strip_vlan,set_tunnel:%s,output:%s" % (tun_id, self._ofport_set_to_str(ports))) def delete_flood_to_tun(self, vlan, deferred_br=None): br = deferred_br if deferred_br else self br.delete_flows(table=constants.FLOOD_TO_TUN, dl_vlan=vlan) def install_unicast_to_tun(self, vlan, tun_id, port, mac, deferred_br=None): br = deferred_br if deferred_br else self br.add_flow(table=constants.UCAST_TO_TUN, priority=2, dl_vlan=vlan, dl_dst=mac, actions="strip_vlan,set_tunnel:%s,output:%s" % (tun_id, port)) def delete_unicast_to_tun(self, vlan, mac, deferred_br=None): br = deferred_br if deferred_br else self if mac is None: br.delete_flows(table=constants.UCAST_TO_TUN, dl_vlan=vlan) else: br.delete_flows(table=constants.UCAST_TO_TUN, dl_vlan=vlan, dl_dst=mac) def install_arp_responder(self, vlan, ip, mac, deferred_br=None): br = deferred_br if deferred_br else self actions = constants.ARP_RESPONDER_ACTIONS % { 'mac': netaddr.EUI(mac, dialect=netaddr.mac_unix), 'ip': netaddr.IPAddress(ip), } br.add_flow(table=constants.ARP_RESPONDER, priority=1, proto='arp', dl_vlan=vlan, nw_dst='%s' % ip, actions=actions) def delete_arp_responder(self, vlan, ip, deferred_br=None): br = deferred_br if deferred_br else self if ip is None: br.delete_flows(table=constants.ARP_RESPONDER, proto='arp', dl_vlan=vlan) else: br.delete_flows(table=constants.ARP_RESPONDER, proto='arp', dl_vlan=vlan, nw_dst='%s' % ip) def setup_tunnel_port(self, network_type, port, deferred_br=None): br = deferred_br if deferred_br else self br.add_flow(priority=1, in_port=port, actions="resubmit(,%s)" % constants.TUN_TABLE[network_type]) def cleanup_tunnel_port(self, port, deferred_br=None): br = deferred_br if deferred_br else self br.delete_flows(in_port=port) def add_dvr_mac_tun(self, mac, port): # Table DVR_NOT_LEARN ensures unique dvr macs in the cloud # are not learnt, as they may result in flow explosions self.install_output(table_id=constants.DVR_NOT_LEARN, priority=1, eth_src=mac, port=port) def remove_dvr_mac_tun(self, mac): # REVISIT(yamamoto): match in_port as well? self.delete_flows(table_id=constants.DVR_NOT_LEARN, eth_src=mac) def deferred(self): return DeferredOVSTunnelBridge(self) class DeferredOVSTunnelBridge(ovs_lib.DeferredOVSBridge): _METHODS = [ 'install_unicast_to_tun', 'delete_unicast_to_tun', 'install_flood_to_tun', 'delete_flood_to_tun', 'install_arp_responder', 'delete_arp_responder', 'setup_tunnel_port', 'cleanup_tunnel_port', ] def __getattr__(self, name): if name in self._METHODS: m = getattr(self.br, name) return functools.partial(m, deferred_br=self) return super(DeferredOVSTunnelBridge, self).__getattr__(name) neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_int.py0000664000567000056710000001754713044372760033052 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ * references ** OVS agent https://wiki.openstack.org/wiki/Ovs-flow-logic """ import netaddr from neutron.common import constants as const from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ import ovs_bridge class OVSIntegrationBridge(ovs_bridge.OVSAgentBridge): """openvswitch agent br-int specific logic.""" def setup_default_table(self): self.install_normal() self.setup_canary_table() self.install_drop(table_id=constants.ARP_SPOOF_TABLE) def setup_canary_table(self): self.install_drop(constants.CANARY_TABLE) def check_canary_table(self): canary_flows = self.dump_flows(constants.CANARY_TABLE) if canary_flows == '': return constants.OVS_RESTARTED elif canary_flows is None: return constants.OVS_DEAD else: return constants.OVS_NORMAL def provision_local_vlan(self, port, lvid, segmentation_id): if segmentation_id is None: dl_vlan = 0xffff else: dl_vlan = segmentation_id self.add_flow(priority=3, in_port=port, dl_vlan=dl_vlan, actions="mod_vlan_vid:%s,normal" % lvid) def reclaim_local_vlan(self, port, segmentation_id): if segmentation_id is None: dl_vlan = 0xffff else: dl_vlan = segmentation_id self.delete_flows(in_port=port, dl_vlan=dl_vlan) @staticmethod def _dvr_to_src_mac_table_id(network_type): if network_type == p_const.TYPE_VLAN: return constants.DVR_TO_SRC_MAC_VLAN else: return constants.DVR_TO_SRC_MAC def install_dvr_to_src_mac(self, network_type, vlan_tag, gateway_mac, dst_mac, dst_port): table_id = self._dvr_to_src_mac_table_id(network_type) self.add_flow(table=table_id, priority=4, dl_vlan=vlan_tag, dl_dst=dst_mac, actions="strip_vlan,mod_dl_src:%s," "output:%s" % (gateway_mac, dst_port)) def delete_dvr_to_src_mac(self, network_type, vlan_tag, dst_mac): table_id = self._dvr_to_src_mac_table_id(network_type) self.delete_flows(table=table_id, dl_vlan=vlan_tag, dl_dst=dst_mac) def add_dvr_mac_vlan(self, mac, port): self.install_goto(table_id=constants.LOCAL_SWITCHING, priority=4, in_port=port, eth_src=mac, dest_table_id=constants.DVR_TO_SRC_MAC_VLAN) def remove_dvr_mac_vlan(self, mac): # REVISIT(yamamoto): match in_port as well? self.delete_flows(table_id=constants.LOCAL_SWITCHING, eth_src=mac) def add_dvr_mac_tun(self, mac, port): # Table LOCAL_SWITCHING will now sort DVR traffic from other # traffic depending on in_port self.install_goto(table_id=constants.LOCAL_SWITCHING, priority=2, in_port=port, eth_src=mac, dest_table_id=constants.DVR_TO_SRC_MAC) def remove_dvr_mac_tun(self, mac, port): self.delete_flows(table_id=constants.LOCAL_SWITCHING, in_port=port, eth_src=mac) def install_icmpv6_na_spoofing_protection(self, port, ip_addresses): # Allow neighbor advertisements as long as they match addresses # that actually belong to the port. for ip in ip_addresses: self.install_normal( table_id=constants.ARP_SPOOF_TABLE, priority=2, dl_type=const.ETHERTYPE_IPV6, nw_proto=const.PROTO_NUM_IPV6_ICMP, icmp_type=const.ICMPV6_TYPE_NA, nd_target=ip, in_port=port) # Now that the rules are ready, direct icmpv6 neighbor advertisement # traffic from the port into the anti-spoof table. self.add_flow(table=constants.LOCAL_SWITCHING, priority=10, dl_type=const.ETHERTYPE_IPV6, nw_proto=const.PROTO_NUM_IPV6_ICMP, icmp_type=const.ICMPV6_TYPE_NA, in_port=port, actions=("resubmit(,%s)" % constants.ARP_SPOOF_TABLE)) def set_allowed_macs_for_port(self, port, mac_addresses=None, allow_all=False): if allow_all: self.delete_flows(table_id=constants.LOCAL_SWITCHING, in_port=port) self.delete_flows(table_id=constants.MAC_SPOOF_TABLE, in_port=port) return mac_addresses = mac_addresses or [] for address in mac_addresses: self.install_normal( table_id=constants.MAC_SPOOF_TABLE, priority=2, eth_src=address, in_port=port) # normalize so we can see if macs are the same mac_addresses = {netaddr.EUI(mac) for mac in mac_addresses} flows = self.dump_flows_for(table=constants.MAC_SPOOF_TABLE, in_port=port).splitlines() for flow in flows: if 'dl_src' not in flow: continue flow_mac = flow.split('dl_src=')[1].split(' ')[0].split(',')[0] if netaddr.EUI(flow_mac) not in mac_addresses: self.delete_flows(table_id=constants.MAC_SPOOF_TABLE, in_port=port, eth_src=flow_mac) self.add_flow(table=constants.LOCAL_SWITCHING, priority=9, in_port=port, actions=("resubmit(,%s)" % constants.MAC_SPOOF_TABLE)) def install_arp_spoofing_protection(self, port, ip_addresses): # allow ARPs as long as they match addresses that actually # belong to the port. for ip in ip_addresses: self.add_flow( table=constants.ARP_SPOOF_TABLE, priority=2, proto='arp', arp_spa=ip, in_port=port, actions=("resubmit(,%s)" % constants.MAC_SPOOF_TABLE)) # Now that the rules are ready, direct ARP traffic from the port into # the anti-spoof table. # This strategy fails gracefully because OVS versions that can't match # on ARP headers will just process traffic normally. self.add_flow(table=constants.LOCAL_SWITCHING, priority=10, proto='arp', in_port=port, actions=("resubmit(,%s)" % constants.ARP_SPOOF_TABLE)) def delete_arp_spoofing_protection(self, port): self.delete_flows(table_id=constants.LOCAL_SWITCHING, in_port=port, proto='arp') self.delete_flows(table_id=constants.LOCAL_SWITCHING, in_port=port, nw_proto=const.PROTO_NUM_IPV6_ICMP, icmp_type=const.ICMPV6_TYPE_NA) self.delete_arp_spoofing_allow_rules(port) def delete_arp_spoofing_allow_rules(self, port): self.delete_flows(table_id=constants.ARP_SPOOF_TABLE, in_port=port) neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ofswitch.py0000664000567000056710000000772513044372736033423 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from oslo_log import log as logging from neutron._i18n import _LW LOG = logging.getLogger(__name__) # Field name mappings (from Ryu to ovs-ofctl) _keywords = { 'eth_src': 'dl_src', 'eth_dst': 'dl_dst', 'ipv4_src': 'nw_src', 'ipv4_dst': 'nw_dst', 'table_id': 'table', } class OpenFlowSwitchMixin(object): """Mixin to provide common convenient routines for an openflow switch.""" @staticmethod def _conv_args(kwargs): for our_name, ovs_ofctl_name in _keywords.items(): if our_name in kwargs: kwargs[ovs_ofctl_name] = kwargs.pop(our_name) return kwargs def dump_flows(self, table_id): return self.dump_flows_for_table(table_id) def dump_flows_all_tables(self): return self.dump_all_flows() def install_goto_next(self, table_id): self.install_goto(table_id=table_id, dest_table_id=table_id + 1) def install_output(self, port, table_id=0, priority=0, **kwargs): self.add_flow(table=table_id, priority=priority, actions="output:%s" % port, **self._conv_args(kwargs)) def install_normal(self, table_id=0, priority=0, **kwargs): self.add_flow(table=table_id, priority=priority, actions="normal", **self._conv_args(kwargs)) def install_goto(self, dest_table_id, table_id=0, priority=0, **kwargs): self.add_flow(table=table_id, priority=priority, actions="resubmit(,%s)" % dest_table_id, **self._conv_args(kwargs)) def install_drop(self, table_id=0, priority=0, **kwargs): self.add_flow(table=table_id, priority=priority, actions="drop", **self._conv_args(kwargs)) def delete_flows(self, **kwargs): # NOTE(yamamoto): super() points to ovs_lib.OVSBridge. # See ovs_bridge.py how this class is actually used. if kwargs: super(OpenFlowSwitchMixin, self).delete_flows( **self._conv_args(kwargs)) else: super(OpenFlowSwitchMixin, self).remove_all_flows() def _filter_flows(self, flows): cookie_list = self.reserved_cookies LOG.debug("Bridge cookies used to filter flows: %s", cookie_list) cookie_re = re.compile('cookie=(0x[A-Fa-f0-9]*)') table_re = re.compile('table=([0-9]*)') for flow in flows: fl_cookie = cookie_re.search(flow) if not fl_cookie: continue fl_cookie = fl_cookie.group(1) if int(fl_cookie, 16) not in cookie_list: fl_table = table_re.search(flow) if not fl_table: continue fl_table = fl_table.group(1) yield flow, fl_cookie, fl_table def cleanup_flows(self): flows = self.dump_flows_all_tables() for flow, cookie, table in self._filter_flows(flows): # deleting a stale flow should be rare. # it might deserve some attention LOG.warning(_LW("Deleting flow %s"), flow) self.delete_flows(cookie=cookie + '/-1', table=table) neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_phys.py0000664000567000056710000000466113044372736033237 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ import br_dvr_process from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ import ovs_bridge class OVSPhysicalBridge(ovs_bridge.OVSAgentBridge, br_dvr_process.OVSDVRProcessMixin): """openvswitch agent physical bridge specific logic.""" # Used by OVSDVRProcessMixin dvr_process_table_id = constants.DVR_PROCESS_VLAN dvr_process_next_table_id = constants.LOCAL_VLAN_TRANSLATION def setup_default_table(self): self.install_normal() def provision_local_vlan(self, port, lvid, segmentation_id, distributed): table_id = constants.LOCAL_VLAN_TRANSLATION if distributed else 0 if segmentation_id is None: self.add_flow(table=table_id, priority=4, in_port=port, dl_vlan=lvid, actions="strip_vlan,normal") else: self.add_flow(table=table_id, priority=4, in_port=port, dl_vlan=lvid, actions="mod_vlan_vid:%s,normal" % segmentation_id) def reclaim_local_vlan(self, port, lvid): self.delete_flows(in_port=port, dl_vlan=lvid) def add_dvr_mac_vlan(self, mac, port): self.install_output(table_id=constants.DVR_NOT_LEARN_VLAN, priority=2, eth_src=mac, port=port) def remove_dvr_mac_vlan(self, mac): # REVISIT(yamamoto): match in_port as well? self.delete_flows(table_id=constants.DVR_NOT_LEARN_VLAN, eth_src=mac) neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/main.py0000664000567000056710000000243713044372736032514 0ustar jenkinsjenkins00000000000000# Copyright (C) 2015 VA Linux Systems Japan K.K. # Copyright (C) 2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ import br_int from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ import br_phys from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ import br_tun from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent def init_config(): pass def main(): bridge_classes = { 'br_int': br_int.OVSIntegrationBridge, 'br_phys': br_phys.OVSPhysicalBridge, 'br_tun': br_tun.OVSTunnelBridge, } ovs_neutron_agent.main(bridge_classes) neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge.py0000664000567000056710000000265413044372736033714 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.common import ovs_lib from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \ as ovs_consts from neutron.plugins.ml2.drivers.openvswitch.agent.openflow \ import br_cookie from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ import ofswitch class OVSAgentBridge(ofswitch.OpenFlowSwitchMixin, br_cookie.OVSBridgeCookieMixin, ovs_lib.OVSBridge): """Common code for bridges used by OVS agent""" def setup_controllers(self, conf): self.set_protocols([ovs_consts.OPENFLOW10, ovs_consts.OPENFLOW13]) self.del_controller() def drop_port(self, in_port): self.install_drop(priority=2, in_port=in_port) neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/0000775000567000056710000000000013044373210030464 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_dvr_process.py0000664000567000056710000001156413044372736034075 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ryu.lib.packet import ether_types from ryu.lib.packet import icmpv6 from ryu.lib.packet import in_proto class OVSDVRProcessMixin(object): """Common logic for br-tun and br-phys' DVR_PROCESS tables. Inheriters should provide self.dvr_process_table_id and self.dvr_process_next_table_id. """ @staticmethod def _dvr_process_ipv4_match(ofp, ofpp, vlan_tag, gateway_ip): return ofpp.OFPMatch(vlan_vid=vlan_tag | ofp.OFPVID_PRESENT, eth_type=ether_types.ETH_TYPE_ARP, arp_tpa=gateway_ip) def install_dvr_process_ipv4(self, vlan_tag, gateway_ip): # block ARP (_dp, ofp, ofpp) = self._get_dp() match = self._dvr_process_ipv4_match(ofp, ofpp, vlan_tag=vlan_tag, gateway_ip=gateway_ip) self.install_drop(table_id=self.dvr_process_table_id, priority=3, match=match) def delete_dvr_process_ipv4(self, vlan_tag, gateway_ip): (_dp, ofp, ofpp) = self._get_dp() match = self._dvr_process_ipv4_match(ofp, ofpp, vlan_tag=vlan_tag, gateway_ip=gateway_ip) self.delete_flows(table_id=self.dvr_process_table_id, match=match) @staticmethod def _dvr_process_ipv6_match(ofp, ofpp, vlan_tag, gateway_mac): return ofpp.OFPMatch(vlan_vid=vlan_tag | ofp.OFPVID_PRESENT, eth_type=ether_types.ETH_TYPE_IPV6, ip_proto=in_proto.IPPROTO_ICMPV6, icmpv6_type=icmpv6.ND_ROUTER_ADVERT, eth_src=gateway_mac) def install_dvr_process_ipv6(self, vlan_tag, gateway_mac): # block RA (_dp, ofp, ofpp) = self._get_dp() match = self._dvr_process_ipv6_match(ofp, ofpp, vlan_tag=vlan_tag, gateway_mac=gateway_mac) self.install_drop(table_id=self.dvr_process_table_id, priority=3, match=match) def delete_dvr_process_ipv6(self, vlan_tag, gateway_mac): (_dp, ofp, ofpp) = self._get_dp() match = self._dvr_process_ipv6_match(ofp, ofpp, vlan_tag=vlan_tag, gateway_mac=gateway_mac) self.delete_flows(table_id=self.dvr_process_table_id, match=match) @staticmethod def _dvr_process_in_match(ofp, ofpp, vlan_tag, vif_mac): return ofpp.OFPMatch(vlan_vid=vlan_tag | ofp.OFPVID_PRESENT, eth_dst=vif_mac) @staticmethod def _dvr_process_out_match(ofp, ofpp, vlan_tag, vif_mac): return ofpp.OFPMatch(vlan_vid=vlan_tag | ofp.OFPVID_PRESENT, eth_src=vif_mac) def install_dvr_process(self, vlan_tag, vif_mac, dvr_mac_address): (_dp, ofp, ofpp) = self._get_dp() match = self._dvr_process_in_match(ofp, ofpp, vlan_tag=vlan_tag, vif_mac=vif_mac) table_id = self.dvr_process_table_id self.install_drop(table_id=table_id, priority=2, match=match) match = self._dvr_process_out_match(ofp, ofpp, vlan_tag=vlan_tag, vif_mac=vif_mac) actions = [ ofpp.OFPActionSetField(eth_src=dvr_mac_address), ] instructions = [ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions), ofpp.OFPInstructionGotoTable( table_id=self.dvr_process_next_table_id), ] self.install_instructions(table_id=table_id, priority=1, match=match, instructions=instructions) def delete_dvr_process(self, vlan_tag, vif_mac): (_dp, ofp, ofpp) = self._get_dp() table_id = self.dvr_process_table_id match = self._dvr_process_in_match(ofp, ofpp, vlan_tag=vlan_tag, vif_mac=vif_mac) self.delete_flows(table_id=table_id, match=match) match = self._dvr_process_out_match(ofp, ofpp, vlan_tag=vlan_tag, vif_mac=vif_mac) self.delete_flows(table_id=table_id, match=match) neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_ryuapp.py0000664000567000056710000000462513044372736033270 0ustar jenkinsjenkins00000000000000# Copyright (C) 2015 VA Linux Systems Japan K.K. # Copyright (C) 2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from oslo_log import log as logging import ryu.app.ofctl.api # noqa from ryu.base import app_manager from ryu.lib import hub from ryu.ofproto import ofproto_v1_3 from neutron._i18n import _LE from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import br_int from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import br_phys from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import br_tun from neutron.plugins.ml2.drivers.openvswitch.agent \ import ovs_neutron_agent as ovs_agent LOG = logging.getLogger(__name__) def agent_main_wrapper(bridge_classes): try: ovs_agent.main(bridge_classes) except Exception: LOG.exception(_LE("Agent main thread died of an exception")) finally: # The following call terminates Ryu's AppManager.run_apps(), # which is needed for clean shutdown of an agent process. # The close() call must be called in another thread, otherwise # it suicides and ends prematurely. hub.spawn(app_manager.AppManager.get_instance().close) class OVSNeutronAgentRyuApp(app_manager.RyuApp): OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] def start(self): # Start Ryu event loop thread super(OVSNeutronAgentRyuApp, self).start() def _make_br_cls(br_cls): return functools.partial(br_cls, ryu_app=self) # Start agent main loop thread bridge_classes = { 'br_int': _make_br_cls(br_int.OVSIntegrationBridge), 'br_phys': _make_br_cls(br_phys.OVSPhysicalBridge), 'br_tun': _make_br_cls(br_tun.OVSTunnelBridge), } return hub.spawn(agent_main_wrapper, bridge_classes) neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/__init__.py0000664000567000056710000000000013044372736032577 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_tun.py0000664000567000056710000003216613044372760032350 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ryu.lib.packet import arp from ryu.lib.packet import ether_types from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import br_dvr_process from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import ovs_bridge class OVSTunnelBridge(ovs_bridge.OVSAgentBridge, br_dvr_process.OVSDVRProcessMixin): """openvswitch agent tunnel bridge specific logic.""" # Used by OVSDVRProcessMixin dvr_process_table_id = constants.DVR_PROCESS dvr_process_next_table_id = constants.PATCH_LV_TO_TUN def setup_default_table(self, patch_int_ofport, arp_responder_enabled): (dp, ofp, ofpp) = self._get_dp() # Table 0 (default) will sort incoming traffic depending on in_port self.install_goto(dest_table_id=constants.PATCH_LV_TO_TUN, priority=1, in_port=patch_int_ofport) self.install_drop() # default drop if arp_responder_enabled: # ARP broadcast-ed request go to the local ARP_RESPONDER table to # be locally resolved # REVISIT(yamamoto): add arp_op=arp.ARP_REQUEST matcher? self.install_goto(dest_table_id=constants.ARP_RESPONDER, table_id=constants.PATCH_LV_TO_TUN, priority=1, eth_dst="ff:ff:ff:ff:ff:ff", eth_type=ether_types.ETH_TYPE_ARP) # PATCH_LV_TO_TUN table will handle packets coming from patch_int # unicasts go to table UCAST_TO_TUN where remote addresses are learnt self.install_goto(dest_table_id=constants.UCAST_TO_TUN, table_id=constants.PATCH_LV_TO_TUN, eth_dst=('00:00:00:00:00:00', '01:00:00:00:00:00')) # Broadcasts/multicasts go to table FLOOD_TO_TUN that handles flooding self.install_goto(dest_table_id=constants.FLOOD_TO_TUN, table_id=constants.PATCH_LV_TO_TUN, eth_dst=('01:00:00:00:00:00', '01:00:00:00:00:00')) # Tables [tunnel_type]_TUN_TO_LV will set lvid depending on tun_id # for each tunnel type, and resubmit to table LEARN_FROM_TUN where # remote mac addresses will be learnt for tunnel_type in constants.TUNNEL_NETWORK_TYPES: self.install_drop(table_id=constants.TUN_TABLE[tunnel_type]) # LEARN_FROM_TUN table will have a single flow using a learn action to # dynamically set-up flows in UCAST_TO_TUN corresponding to remote mac # addresses (assumes that lvid has already been set by a previous flow) # Once remote mac addresses are learnt, output packet to patch_int flow_specs = [ ofpp.NXFlowSpecMatch(src=('vlan_vid', 0), dst=('vlan_vid', 0), n_bits=12), ofpp.NXFlowSpecMatch(src=('eth_src', 0), dst=('eth_dst', 0), n_bits=48), ofpp.NXFlowSpecLoad(src=0, dst=('vlan_vid', 0), n_bits=12), ofpp.NXFlowSpecLoad(src=('tunnel_id', 0), dst=('tunnel_id', 0), n_bits=64), ofpp.NXFlowSpecOutput(src=('in_port', 0), dst='', n_bits=32), ] actions = [ ofpp.NXActionLearn(table_id=constants.UCAST_TO_TUN, cookie=self.default_cookie, priority=1, hard_timeout=300, specs=flow_specs), ofpp.OFPActionOutput(patch_int_ofport, 0), ] self.install_apply_actions(table_id=constants.LEARN_FROM_TUN, priority=1, actions=actions) # Egress unicast will be handled in table UCAST_TO_TUN, where remote # mac addresses will be learned. For now, just add a default flow that # will resubmit unknown unicasts to table FLOOD_TO_TUN to treat them # as broadcasts/multicasts self.install_goto(dest_table_id=constants.FLOOD_TO_TUN, table_id=constants.UCAST_TO_TUN) if arp_responder_enabled: # If none of the ARP entries correspond to the requested IP, the # broadcast-ed packet is resubmitted to the flooding table self.install_goto(dest_table_id=constants.FLOOD_TO_TUN, table_id=constants.ARP_RESPONDER) # FLOOD_TO_TUN will handle flooding in tunnels based on lvid, # for now, add a default drop action self.install_drop(table_id=constants.FLOOD_TO_TUN) @staticmethod def _local_vlan_match(_ofp, ofpp, tun_id): return ofpp.OFPMatch(tunnel_id=tun_id) def provision_local_vlan(self, network_type, lvid, segmentation_id, distributed=False): (_dp, ofp, ofpp) = self._get_dp() match = self._local_vlan_match(ofp, ofpp, segmentation_id) table_id = constants.TUN_TABLE[network_type] if distributed: dest_table_id = constants.DVR_NOT_LEARN else: dest_table_id = constants.LEARN_FROM_TUN actions = [ ofpp.OFPActionPushVlan(), ofpp.OFPActionSetField(vlan_vid=lvid | ofp.OFPVID_PRESENT), ] instructions = [ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions), ofpp.OFPInstructionGotoTable(table_id=dest_table_id)] self.install_instructions(table_id=table_id, priority=1, match=match, instructions=instructions) def reclaim_local_vlan(self, network_type, segmentation_id): (_dp, ofp, ofpp) = self._get_dp() match = self._local_vlan_match(ofp, ofpp, segmentation_id) table_id = constants.TUN_TABLE[network_type] self.delete_flows(table_id=table_id, match=match) @staticmethod def _flood_to_tun_match(ofp, ofpp, vlan): return ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT) def install_flood_to_tun(self, vlan, tun_id, ports): (_dp, ofp, ofpp) = self._get_dp() match = self._flood_to_tun_match(ofp, ofpp, vlan) actions = [ofpp.OFPActionPopVlan(), ofpp.OFPActionSetField(tunnel_id=tun_id)] for port in ports: actions.append(ofpp.OFPActionOutput(port, 0)) self.install_apply_actions(table_id=constants.FLOOD_TO_TUN, priority=1, match=match, actions=actions) def delete_flood_to_tun(self, vlan): (_dp, ofp, ofpp) = self._get_dp() match = self._flood_to_tun_match(ofp, ofpp, vlan) self.delete_flows(table_id=constants.FLOOD_TO_TUN, match=match) @staticmethod def _unicast_to_tun_match(ofp, ofpp, vlan, mac): return ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT, eth_dst=mac) def install_unicast_to_tun(self, vlan, tun_id, port, mac): (_dp, ofp, ofpp) = self._get_dp() match = self._unicast_to_tun_match(ofp, ofpp, vlan, mac) actions = [ofpp.OFPActionPopVlan(), ofpp.OFPActionSetField(tunnel_id=tun_id), ofpp.OFPActionOutput(port, 0)] self.install_apply_actions(table_id=constants.UCAST_TO_TUN, priority=2, match=match, actions=actions) def delete_unicast_to_tun(self, vlan, mac): (_dp, ofp, ofpp) = self._get_dp() if mac is None: match = ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT) else: match = self._unicast_to_tun_match(ofp, ofpp, vlan, mac) self.delete_flows(table_id=constants.UCAST_TO_TUN, match=match) @staticmethod def _arp_responder_match(ofp, ofpp, vlan, ip): # REVISIT(yamamoto): add arp_op=arp.ARP_REQUEST matcher? return ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT, eth_type=ether_types.ETH_TYPE_ARP, arp_tpa=ip) def install_arp_responder(self, vlan, ip, mac): (dp, ofp, ofpp) = self._get_dp() match = self._arp_responder_match(ofp, ofpp, vlan, ip) actions = [ofpp.OFPActionSetField(arp_op=arp.ARP_REPLY), ofpp.NXActionRegMove(src_field='arp_sha', dst_field='arp_tha', n_bits=48), ofpp.NXActionRegMove(src_field='arp_spa', dst_field='arp_tpa', n_bits=32), ofpp.OFPActionSetField(arp_sha=mac), ofpp.OFPActionSetField(arp_spa=ip), ofpp.NXActionRegMove(src_field='eth_src', dst_field='eth_dst', n_bits=48), ofpp.OFPActionSetField(eth_src_nxm=mac), ofpp.OFPActionOutput(ofp.OFPP_IN_PORT, 0)] self.install_apply_actions(table_id=constants.ARP_RESPONDER, priority=1, match=match, actions=actions) def delete_arp_responder(self, vlan, ip): (_dp, ofp, ofpp) = self._get_dp() if ip is None: # REVISIT(yamamoto): add arp_op=arp.ARP_REQUEST matcher? match = ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT, eth_type=ether_types.ETH_TYPE_ARP) else: match = self._arp_responder_match(ofp, ofpp, vlan, ip) self.delete_flows(table_id=constants.ARP_RESPONDER, match=match) def setup_tunnel_port(self, network_type, port): self.install_goto(dest_table_id=constants.TUN_TABLE[network_type], priority=1, in_port=port) def cleanup_tunnel_port(self, port): self.delete_flows(in_port=port) def add_dvr_mac_tun(self, mac, port): self.install_output(table_id=constants.DVR_NOT_LEARN, priority=1, eth_src=mac, port=port) def remove_dvr_mac_tun(self, mac): # REVISIT(yamamoto): match in_port as well? self.delete_flows(table_id=constants.DVR_NOT_LEARN, eth_src=mac) def deferred(self): # REVISIT(yamamoto): This is for API compat with "ovs-ofctl" # interface. Consider removing this mechanism when obsoleting # "ovs-ofctl" interface. # For "ovs-ofctl" interface, "deferred" mechanism would improve # performance by batching flow-mods with a single ovs-ofctl command # invocation. # On the other hand, for this "native" interface, the overheads of # each flow-mods are already minimum and batching doesn't make much # sense. Thus this method is left as no-op. # It might be possible to send multiple flow-mods with a single # barrier. But it's unclear that level of performance optimization # is desirable while it would certainly complicate error handling. return self def __enter__(self): # REVISIT(yamamoto): See the comment on deferred(). return self def __exit__(self, exc_type, exc_value, traceback): # REVISIT(yamamoto): See the comment on deferred(). pass neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py0000664000567000056710000002374313044372760032335 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ * references ** OVS agent https://wiki.openstack.org/wiki/Ovs-flow-logic """ import netaddr from oslo_log import log as logging from ryu.lib.packet import ether_types from ryu.lib.packet import icmpv6 from ryu.lib.packet import in_proto from neutron._i18n import _LE from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import ovs_bridge LOG = logging.getLogger(__name__) class OVSIntegrationBridge(ovs_bridge.OVSAgentBridge): """openvswitch agent br-int specific logic.""" def setup_default_table(self): self.install_normal() self.setup_canary_table() self.install_drop(table_id=constants.ARP_SPOOF_TABLE) def setup_canary_table(self): self.install_drop(constants.CANARY_TABLE) def check_canary_table(self): try: flows = self.dump_flows(constants.CANARY_TABLE) except RuntimeError: LOG.exception(_LE("Failed to communicate with the switch")) return constants.OVS_DEAD return constants.OVS_NORMAL if flows else constants.OVS_RESTARTED @staticmethod def _local_vlan_match(_ofp, ofpp, port, vlan_vid): return ofpp.OFPMatch(in_port=port, vlan_vid=vlan_vid) def provision_local_vlan(self, port, lvid, segmentation_id): (_dp, ofp, ofpp) = self._get_dp() if segmentation_id is None: vlan_vid = ofp.OFPVID_NONE actions = [ofpp.OFPActionPushVlan()] else: vlan_vid = segmentation_id | ofp.OFPVID_PRESENT actions = [] match = self._local_vlan_match(ofp, ofpp, port, vlan_vid) actions += [ ofpp.OFPActionSetField(vlan_vid=lvid | ofp.OFPVID_PRESENT), ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0), ] self.install_apply_actions(priority=3, match=match, actions=actions) def reclaim_local_vlan(self, port, segmentation_id): (_dp, ofp, ofpp) = self._get_dp() if segmentation_id is None: vlan_vid = ofp.OFPVID_NONE else: vlan_vid = segmentation_id | ofp.OFPVID_PRESENT match = self._local_vlan_match(ofp, ofpp, port, vlan_vid) self.delete_flows(match=match) @staticmethod def _dvr_to_src_mac_match(ofp, ofpp, vlan_tag, dst_mac): return ofpp.OFPMatch(vlan_vid=vlan_tag | ofp.OFPVID_PRESENT, eth_dst=dst_mac) @staticmethod def _dvr_to_src_mac_table_id(network_type): if network_type == p_const.TYPE_VLAN: return constants.DVR_TO_SRC_MAC_VLAN else: return constants.DVR_TO_SRC_MAC def install_dvr_to_src_mac(self, network_type, vlan_tag, gateway_mac, dst_mac, dst_port): table_id = self._dvr_to_src_mac_table_id(network_type) (_dp, ofp, ofpp) = self._get_dp() match = self._dvr_to_src_mac_match(ofp, ofpp, vlan_tag=vlan_tag, dst_mac=dst_mac) actions = [ ofpp.OFPActionPopVlan(), ofpp.OFPActionSetField(eth_src=gateway_mac), ofpp.OFPActionOutput(dst_port, 0), ] self.install_apply_actions(table_id=table_id, priority=4, match=match, actions=actions) def delete_dvr_to_src_mac(self, network_type, vlan_tag, dst_mac): table_id = self._dvr_to_src_mac_table_id(network_type) (_dp, ofp, ofpp) = self._get_dp() match = self._dvr_to_src_mac_match(ofp, ofpp, vlan_tag=vlan_tag, dst_mac=dst_mac) self.delete_flows(table_id=table_id, match=match) def add_dvr_mac_vlan(self, mac, port): self.install_goto(table_id=constants.LOCAL_SWITCHING, priority=4, in_port=port, eth_src=mac, dest_table_id=constants.DVR_TO_SRC_MAC_VLAN) def remove_dvr_mac_vlan(self, mac): # REVISIT(yamamoto): match in_port as well? self.delete_flows(table_id=constants.LOCAL_SWITCHING, eth_src=mac) def add_dvr_mac_tun(self, mac, port): self.install_goto(table_id=constants.LOCAL_SWITCHING, priority=2, in_port=port, eth_src=mac, dest_table_id=constants.DVR_TO_SRC_MAC) def remove_dvr_mac_tun(self, mac, port): self.delete_flows(table_id=constants.LOCAL_SWITCHING, in_port=port, eth_src=mac) @staticmethod def _arp_reply_match(ofp, ofpp, port): return ofpp.OFPMatch(in_port=port, eth_type=ether_types.ETH_TYPE_ARP) @staticmethod def _icmpv6_reply_match(ofp, ofpp, port): return ofpp.OFPMatch(in_port=port, eth_type=ether_types.ETH_TYPE_IPV6, ip_proto=in_proto.IPPROTO_ICMPV6, icmpv6_type=icmpv6.ND_NEIGHBOR_ADVERT) def install_icmpv6_na_spoofing_protection(self, port, ip_addresses): # Allow neighbor advertisements as long as they match addresses # that actually belong to the port. for ip in ip_addresses: masked_ip = self._cidr_to_ryu(ip) self.install_normal( table_id=constants.ARP_SPOOF_TABLE, priority=2, eth_type=ether_types.ETH_TYPE_IPV6, ip_proto=in_proto.IPPROTO_ICMPV6, icmpv6_type=icmpv6.ND_NEIGHBOR_ADVERT, ipv6_nd_target=masked_ip, in_port=port) # Now that the rules are ready, direct icmpv6 neighbor advertisement # traffic from the port into the anti-spoof table. (_dp, ofp, ofpp) = self._get_dp() match = self._icmpv6_reply_match(ofp, ofpp, port=port) self.install_goto(table_id=constants.LOCAL_SWITCHING, priority=10, match=match, dest_table_id=constants.ARP_SPOOF_TABLE) def set_allowed_macs_for_port(self, port, mac_addresses=None, allow_all=False): if allow_all: self.delete_flows(table_id=constants.LOCAL_SWITCHING, in_port=port) self.delete_flows(table_id=constants.MAC_SPOOF_TABLE, in_port=port) return mac_addresses = mac_addresses or [] for address in mac_addresses: self.install_normal( table_id=constants.MAC_SPOOF_TABLE, priority=2, eth_src=address, in_port=port) # normalize so we can see if macs are the same mac_addresses = {netaddr.EUI(mac) for mac in mac_addresses} flows = self.dump_flows(constants.MAC_SPOOF_TABLE) for flow in flows: matches = dict(flow.match.items()) if matches.get('in_port') != port: continue if not matches.get('eth_src'): continue flow_mac = matches['eth_src'] if netaddr.EUI(flow_mac) not in mac_addresses: self.delete_flows(table_id=constants.MAC_SPOOF_TABLE, in_port=port, eth_src=flow_mac) self.install_goto(table_id=constants.LOCAL_SWITCHING, priority=9, in_port=port, dest_table_id=constants.MAC_SPOOF_TABLE) def install_arp_spoofing_protection(self, port, ip_addresses): # allow ARP replies as long as they match addresses that actually # belong to the port. for ip in ip_addresses: masked_ip = self._cidr_to_ryu(ip) self.install_goto(table_id=constants.ARP_SPOOF_TABLE, priority=2, eth_type=ether_types.ETH_TYPE_ARP, arp_spa=masked_ip, in_port=port, dest_table_id=constants.MAC_SPOOF_TABLE) # Now that the rules are ready, direct ARP traffic from the port into # the anti-spoof table. # This strategy fails gracefully because OVS versions that can't match # on ARP headers will just process traffic normally. (_dp, ofp, ofpp) = self._get_dp() match = self._arp_reply_match(ofp, ofpp, port=port) self.install_goto(table_id=constants.LOCAL_SWITCHING, priority=10, match=match, dest_table_id=constants.ARP_SPOOF_TABLE) def delete_arp_spoofing_protection(self, port): (_dp, ofp, ofpp) = self._get_dp() match = self._arp_reply_match(ofp, ofpp, port=port) self.delete_flows(table_id=constants.LOCAL_SWITCHING, match=match) match = self._icmpv6_reply_match(ofp, ofpp, port=port) self.delete_flows(table_id=constants.LOCAL_SWITCHING, match=match) self.delete_arp_spoofing_allow_rules(port) def delete_arp_spoofing_allow_rules(self, port): self.delete_flows(table_id=constants.ARP_SPOOF_TABLE, in_port=port) neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py0000664000567000056710000002000313044372760032670 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet import netaddr from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import timeutils import ryu.app.ofctl.api as ofctl_api import ryu.exception as ryu_exc from neutron._i18n import _LE, _LW LOG = logging.getLogger(__name__) class OpenFlowSwitchMixin(object): """Mixin to provide common convenient routines for an openflow switch. NOTE(yamamoto): super() points to ovs_lib.OVSBridge. See ovs_bridge.py how this class is actually used. """ @staticmethod def _cidr_to_ryu(ip): n = netaddr.IPNetwork(ip) if n.hostmask: return (str(n.ip), str(n.netmask)) return str(n.ip) def __init__(self, *args, **kwargs): self._app = kwargs.pop('ryu_app') super(OpenFlowSwitchMixin, self).__init__(*args, **kwargs) def _get_dp_by_dpid(self, dpid_int): """Get Ryu datapath object for the switch.""" timeout_sec = cfg.CONF.OVS.of_connect_timeout start_time = timeutils.now() while True: dp = ofctl_api.get_datapath(self._app, dpid_int) if dp is not None: break # The switch has not established a connection to us. # Wait for a little. if timeutils.now() > start_time + timeout_sec: m = _LE("Switch connection timeout") LOG.error(m) # NOTE(yamamoto): use RuntimeError for compat with ovs_lib raise RuntimeError(m) eventlet.sleep(1) return dp def _send_msg(self, msg, reply_cls=None, reply_multi=False): timeout_sec = cfg.CONF.OVS.of_request_timeout timeout = eventlet.timeout.Timeout(seconds=timeout_sec) try: result = ofctl_api.send_msg(self._app, msg, reply_cls, reply_multi) except ryu_exc.RyuException as e: m = _LE("ofctl request %(request)s error %(error)s") % { "request": msg, "error": e, } LOG.error(m) # NOTE(yamamoto): use RuntimeError for compat with ovs_lib raise RuntimeError(m) except eventlet.timeout.Timeout as e: with excutils.save_and_reraise_exception() as ctx: if e is timeout: ctx.reraise = False m = _LE("ofctl request %(request)s timed out") % { "request": msg, } LOG.error(m) # NOTE(yamamoto): use RuntimeError for compat with ovs_lib raise RuntimeError(m) finally: timeout.cancel() LOG.debug("ofctl request %(request)s result %(result)s", {"request": msg, "result": result}) return result @staticmethod def _match(_ofp, ofpp, match, **match_kwargs): if match is not None: return match return ofpp.OFPMatch(**match_kwargs) def delete_flows(self, table_id=None, strict=False, priority=0, cookie=0, cookie_mask=0, match=None, **match_kwargs): (dp, ofp, ofpp) = self._get_dp() if table_id is None: table_id = ofp.OFPTT_ALL match = self._match(ofp, ofpp, match, **match_kwargs) if strict: cmd = ofp.OFPFC_DELETE_STRICT else: cmd = ofp.OFPFC_DELETE msg = ofpp.OFPFlowMod(dp, command=cmd, cookie=cookie, cookie_mask=cookie_mask, table_id=table_id, match=match, priority=priority, out_group=ofp.OFPG_ANY, out_port=ofp.OFPP_ANY) self._send_msg(msg) def dump_flows(self, table_id=None): (dp, ofp, ofpp) = self._get_dp() if table_id is None: table_id = ofp.OFPTT_ALL msg = ofpp.OFPFlowStatsRequest(dp, table_id=table_id) replies = self._send_msg(msg, reply_cls=ofpp.OFPFlowStatsReply, reply_multi=True) flows = [] for rep in replies: flows += rep.body return flows def cleanup_flows(self): cookies = set([f.cookie for f in self.dump_flows()]) - \ self.reserved_cookies for c in cookies: LOG.warning(_LW("Deleting flow with cookie 0x%(cookie)x"), {'cookie': c}) self.delete_flows(cookie=c, cookie_mask=((1 << 64) - 1)) def install_goto_next(self, table_id): self.install_goto(table_id=table_id, dest_table_id=table_id + 1) def install_output(self, port, table_id=0, priority=0, match=None, **match_kwargs): (_dp, ofp, ofpp) = self._get_dp() actions = [ofpp.OFPActionOutput(port, 0)] instructions = [ofpp.OFPInstructionActions( ofp.OFPIT_APPLY_ACTIONS, actions)] self.install_instructions(table_id=table_id, priority=priority, instructions=instructions, match=match, **match_kwargs) def install_normal(self, table_id=0, priority=0, match=None, **match_kwargs): (_dp, ofp, _ofpp) = self._get_dp() self.install_output(port=ofp.OFPP_NORMAL, table_id=table_id, priority=priority, match=match, **match_kwargs) def install_goto(self, dest_table_id, table_id=0, priority=0, match=None, **match_kwargs): (_dp, _ofp, ofpp) = self._get_dp() instructions = [ofpp.OFPInstructionGotoTable(table_id=dest_table_id)] self.install_instructions(table_id=table_id, priority=priority, instructions=instructions, match=match, **match_kwargs) def install_drop(self, table_id=0, priority=0, match=None, **match_kwargs): self.install_instructions(table_id=table_id, priority=priority, instructions=[], match=match, **match_kwargs) def install_instructions(self, instructions, table_id=0, priority=0, match=None, **match_kwargs): (dp, ofp, ofpp) = self._get_dp() match = self._match(ofp, ofpp, match, **match_kwargs) msg = ofpp.OFPFlowMod(dp, table_id=table_id, cookie=self.default_cookie, match=match, priority=priority, instructions=instructions) self._send_msg(msg) def install_apply_actions(self, actions, table_id=0, priority=0, match=None, **match_kwargs): (dp, ofp, ofpp) = self._get_dp() instructions = [ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions), ] self.install_instructions(table_id=table_id, priority=priority, match=match, instructions=instructions, **match_kwargs) neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_phys.py0000664000567000056710000000536013044372736032524 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import br_dvr_process from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import ovs_bridge class OVSPhysicalBridge(ovs_bridge.OVSAgentBridge, br_dvr_process.OVSDVRProcessMixin): """openvswitch agent physical bridge specific logic.""" # Used by OVSDVRProcessMixin dvr_process_table_id = constants.DVR_PROCESS_VLAN dvr_process_next_table_id = constants.LOCAL_VLAN_TRANSLATION def setup_default_table(self): self.install_normal() @staticmethod def _local_vlan_match(ofp, ofpp, port, lvid): return ofpp.OFPMatch(in_port=port, vlan_vid=lvid | ofp.OFPVID_PRESENT) def provision_local_vlan(self, port, lvid, segmentation_id, distributed): table_id = constants.LOCAL_VLAN_TRANSLATION if distributed else 0 (_dp, ofp, ofpp) = self._get_dp() match = self._local_vlan_match(ofp, ofpp, port, lvid) if segmentation_id is None: actions = [ofpp.OFPActionPopVlan()] else: vlan_vid = segmentation_id | ofp.OFPVID_PRESENT actions = [ofpp.OFPActionSetField(vlan_vid=vlan_vid)] actions += [ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0)] self.install_apply_actions(table_id=table_id, priority=4, match=match, actions=actions) def reclaim_local_vlan(self, port, lvid): (_dp, ofp, ofpp) = self._get_dp() match = self._local_vlan_match(ofp, ofpp, port, lvid) self.delete_flows(match=match) def add_dvr_mac_vlan(self, mac, port): self.install_output(table_id=constants.DVR_NOT_LEARN_VLAN, priority=2, eth_src=mac, port=port) def remove_dvr_mac_vlan(self, mac): # REVISIT(yamamoto): match in_port as well? self.delete_flows(table_id=constants.DVR_NOT_LEARN_VLAN, eth_src=mac) neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/main.py0000664000567000056710000000235113044372736031777 0ustar jenkinsjenkins00000000000000# Copyright (C) 2015 VA Linux Systems Japan K.K. # Copyright (C) 2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from ryu.base import app_manager from ryu import cfg as ryu_cfg cfg.CONF.import_group( 'OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.common.config') def init_config(): ryu_cfg.CONF(project='ryu', args=[]) ryu_cfg.CONF.ofp_listen_host = cfg.CONF.OVS.of_listen_address ryu_cfg.CONF.ofp_tcp_listen_port = cfg.CONF.OVS.of_listen_port def main(): app_manager.AppManager.run_apps([ 'neutron.plugins.ml2.drivers.openvswitch.agent.' 'openflow.native.ovs_ryuapp', ]) neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge.py0000664000567000056710000001024013044372736033172 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import excutils from neutron._i18n import _LI from neutron.agent.common import ovs_lib from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \ as ovs_consts from neutron.plugins.ml2.drivers.openvswitch.agent.openflow \ import br_cookie from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import ofswitch LOG = logging.getLogger(__name__) class OVSAgentBridge(ofswitch.OpenFlowSwitchMixin, br_cookie.OVSBridgeCookieMixin, ovs_lib.OVSBridge): """Common code for bridges used by OVS agent""" _cached_dpid = None def _get_dp(self): """Get (dp, ofp, ofpp) tuple for the switch. A convenient method for openflow message composers. """ while True: if self._cached_dpid is None: dpid_str = self.get_datapath_id() LOG.info(_LI("Bridge %(br_name)s has datapath-ID %(dpid)s"), {"br_name": self.br_name, "dpid": dpid_str}) self._cached_dpid = int(dpid_str, 16) try: dp = self._get_dp_by_dpid(self._cached_dpid) return dp, dp.ofproto, dp.ofproto_parser except RuntimeError: with excutils.save_and_reraise_exception() as ctx: # Retry if dpid has been changed. # NOTE(yamamoto): Open vSwitch change its dpid on # some events. # REVISIT(yamamoto): Consider to set dpid statically. old_dpid_str = format(self._cached_dpid, '0x') new_dpid_str = self.get_datapath_id() if new_dpid_str != old_dpid_str: LOG.info(_LI("Bridge %(br_name)s changed its " "datapath-ID from %(old)s to %(new)s"), { "br_name": self.br_name, "old": old_dpid_str, "new": new_dpid_str, }) ctx.reraise = False self._cached_dpid = int(new_dpid_str, 16) def setup_controllers(self, conf): controllers = [ "tcp:%(address)s:%(port)s" % { "address": conf.OVS.of_listen_address, "port": conf.OVS.of_listen_port, } ] self.set_protocols([ovs_consts.OPENFLOW10, ovs_consts.OPENFLOW13]) self.set_controller(controllers) # NOTE(ivc): Force "out-of-band" controller connection mode (see # "In-Band Control" [1]). # # By default openvswitch uses "in-band" controller connection mode # which adds hidden OpenFlow rules (only visible by issuing ovs-appctl # bridge/dump-flows
) and leads to a network loop on br-tun. As of # now the OF controller is hosted locally with OVS which fits the # "out-of-band" mode. If the remote OF controller is ever to be # supported by openvswitch agent in the future, "In-Band Control" [1] # should be taken into consideration for physical bridge only, but # br-int and br-tun must be configured with the "out-of-band" # controller connection mode. # # [1] https://github.com/openvswitch/ovs/blob/master/DESIGN.md self.set_controllers_connection_mode("out-of-band") def drop_port(self, in_port): self.install_drop(priority=2, in_port=in_port) neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/__init__.py0000664000567000056710000000000013044372736031311 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/br_cookie.py0000664000567000056710000000345013044372736031522 0ustar jenkinsjenkins00000000000000# Copyright 2016 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.common import ovs_lib class OVSBridgeCookieMixin(object): '''Mixin to provide cookie retention functionality to the OVSAgentBridge ''' def __init__(self, *args, **kwargs): super(OVSBridgeCookieMixin, self).__init__(*args, **kwargs) self._reserved_cookies = set() @property def reserved_cookies(self): if self._default_cookie not in self._reserved_cookies: self._reserved_cookies.add(self._default_cookie) return set(self._reserved_cookies) def request_cookie(self): if self._default_cookie not in self._reserved_cookies: self._reserved_cookies.add(self._default_cookie) uuid_stamp = ovs_lib.generate_random_cookie() while uuid_stamp in self._reserved_cookies: uuid_stamp = ovs_lib.generate_random_cookie() self._reserved_cookies.add(uuid_stamp) return uuid_stamp def set_agent_uuid_stamp(self, val): self._reserved_cookies.add(val) if self._default_cookie in self._reserved_cookies: self._reserved_cookies.remove(self._default_cookie) super(OVSBridgeCookieMixin, self).set_agent_uuid_stamp(val) neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/__init__.py0000664000567000056710000000000013044372736027460 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py0000664000567000056710000007341513044372760032354 0ustar jenkinsjenkins00000000000000# Copyright 2014, Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_utils import excutils from neutron._i18n import _LE, _LI, _LW from neutron.common import constants as n_const from neutron.common import utils as n_utils from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants LOG = logging.getLogger(__name__) cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.' 'agent.common.config') # A class to represent a DVR-hosted subnet including vif_ports resident on # that subnet class LocalDVRSubnetMapping(object): def __init__(self, subnet, csnat_ofport=constants.OFPORT_INVALID): # set of compute ports on this dvr subnet self.compute_ports = {} self.subnet = subnet self.csnat_ofport = csnat_ofport self.dvr_owned = False def __str__(self): return ("subnet = %s compute_ports = %s csnat_port = %s" " is_dvr_owned = %s" % (self.subnet, self.get_compute_ofports(), self.get_csnat_ofport(), self.is_dvr_owned())) def get_subnet_info(self): return self.subnet def set_dvr_owned(self, owned): self.dvr_owned = owned def is_dvr_owned(self): return self.dvr_owned def add_compute_ofport(self, vif_id, ofport): self.compute_ports[vif_id] = ofport def remove_compute_ofport(self, vif_id): self.compute_ports.pop(vif_id, 0) def remove_all_compute_ofports(self): self.compute_ports.clear() def get_compute_ofports(self): return self.compute_ports def set_csnat_ofport(self, ofport): self.csnat_ofport = ofport def get_csnat_ofport(self): return self.csnat_ofport class OVSPort(object): def __init__(self, id, ofport, mac, device_owner): self.id = id self.mac = mac self.ofport = ofport self.subnets = set() self.device_owner = device_owner def __str__(self): return ("OVSPort: id = %s, ofport = %s, mac = %s, " "device_owner = %s, subnets = %s" % (self.id, self.ofport, self.mac, self.device_owner, self.subnets)) def add_subnet(self, subnet_id): self.subnets.add(subnet_id) def remove_subnet(self, subnet_id): self.subnets.remove(subnet_id) def remove_all_subnets(self): self.subnets.clear() def get_subnets(self): return self.subnets def get_device_owner(self): return self.device_owner def get_mac(self): return self.mac def get_ofport(self): return self.ofport class OVSDVRNeutronAgent(object): ''' Implements OVS-based DVR(Distributed Virtual Router), for overlay networks. ''' # history # 1.0 Initial version def __init__(self, context, plugin_rpc, integ_br, tun_br, bridge_mappings, phys_brs, int_ofports, phys_ofports, patch_int_ofport=constants.OFPORT_INVALID, patch_tun_ofport=constants.OFPORT_INVALID, host=None, enable_tunneling=False, enable_distributed_routing=False): self.context = context self.plugin_rpc = plugin_rpc self.host = host self.enable_tunneling = enable_tunneling self.enable_distributed_routing = enable_distributed_routing self.bridge_mappings = bridge_mappings self.phys_brs = phys_brs self.int_ofports = int_ofports self.phys_ofports = phys_ofports self.reset_ovs_parameters(integ_br, tun_br, patch_int_ofport, patch_tun_ofport) self.reset_dvr_parameters() self.dvr_mac_address = None if self.enable_distributed_routing: self.get_dvr_mac_address() self.conf = cfg.CONF def setup_dvr_flows(self): self.setup_dvr_flows_on_integ_br() self.setup_dvr_flows_on_tun_br() self.setup_dvr_flows_on_phys_br() self.setup_dvr_mac_flows_on_all_brs() def reset_ovs_parameters(self, integ_br, tun_br, patch_int_ofport, patch_tun_ofport): '''Reset the openvswitch parameters''' self.int_br = integ_br self.tun_br = tun_br self.patch_int_ofport = patch_int_ofport self.patch_tun_ofport = patch_tun_ofport def reset_dvr_parameters(self): '''Reset the DVR parameters''' self.local_dvr_map = {} self.local_csnat_map = {} self.local_ports = {} self.registered_dvr_macs = set() def get_dvr_mac_address(self): try: self.get_dvr_mac_address_with_retry() except oslo_messaging.RemoteError as e: LOG.error(_LE('L2 agent could not get DVR MAC address at ' 'startup due to RPC error. It happens when the ' 'server does not support this RPC API. Detailed ' 'message: %s'), e) except oslo_messaging.MessagingTimeout: LOG.error(_LE('DVR: Failed to obtain a valid local ' 'DVR MAC address')) if not self.in_distributed_mode(): sys.exit(1) def get_dvr_mac_address_with_retry(self): # Get the local DVR MAC Address from the Neutron Server. # This is the first place where we contact the server on startup # so retry in case it's not ready to respond for retry_count in reversed(range(5)): try: details = self.plugin_rpc.get_dvr_mac_address_by_host( self.context, self.host) except oslo_messaging.MessagingTimeout as e: with excutils.save_and_reraise_exception() as ctx: if retry_count > 0: ctx.reraise = False LOG.warning(_LW('L2 agent could not get DVR MAC ' 'address from server. Retrying. ' 'Detailed message: %s'), e) else: LOG.debug("L2 Agent DVR: Received response for " "get_dvr_mac_address_by_host() from " "plugin: %r", details) self.dvr_mac_address = details['mac_address'] return def setup_dvr_flows_on_integ_br(self): '''Setup up initial dvr flows into br-int''' if not self.in_distributed_mode(): return LOG.info(_LI("L2 Agent operating in DVR Mode with MAC %s"), self.dvr_mac_address) # Remove existing flows in integration bridge if self.conf.AGENT.drop_flows_on_start: self.int_br.delete_flows() # Add a canary flow to int_br to track OVS restarts self.int_br.setup_canary_table() # Insert 'drop' action as the default for Table DVR_TO_SRC_MAC self.int_br.install_drop(table_id=constants.DVR_TO_SRC_MAC, priority=1) self.int_br.install_drop(table_id=constants.DVR_TO_SRC_MAC_VLAN, priority=1) # Insert 'normal' action as the default for Table LOCAL_SWITCHING self.int_br.install_normal(table_id=constants.LOCAL_SWITCHING, priority=1) for physical_network in self.bridge_mappings: self.int_br.install_drop(table_id=constants.LOCAL_SWITCHING, priority=2, in_port=self.int_ofports[ physical_network]) def setup_dvr_flows_on_tun_br(self): '''Setup up initial dvr flows into br-tun''' if not self.enable_tunneling or not self.in_distributed_mode(): return self.tun_br.install_goto(dest_table_id=constants.DVR_PROCESS, priority=1, in_port=self.patch_int_ofport) # table-miss should be sent to learning table self.tun_br.install_goto(table_id=constants.DVR_NOT_LEARN, dest_table_id=constants.LEARN_FROM_TUN) self.tun_br.install_goto(table_id=constants.DVR_PROCESS, dest_table_id=constants.PATCH_LV_TO_TUN) def setup_dvr_flows_on_phys_br(self): '''Setup up initial dvr flows into br-phys''' if not self.in_distributed_mode(): return for physical_network in self.bridge_mappings: self.phys_brs[physical_network].install_goto( in_port=self.phys_ofports[physical_network], priority=2, dest_table_id=constants.DVR_PROCESS_VLAN) self.phys_brs[physical_network].install_goto( priority=1, dest_table_id=constants.DVR_NOT_LEARN_VLAN) self.phys_brs[physical_network].install_goto( table_id=constants.DVR_PROCESS_VLAN, priority=0, dest_table_id=constants.LOCAL_VLAN_TRANSLATION) self.phys_brs[physical_network].install_drop( table_id=constants.LOCAL_VLAN_TRANSLATION, in_port=self.phys_ofports[physical_network], priority=2) self.phys_brs[physical_network].install_normal( table_id=constants.DVR_NOT_LEARN_VLAN, priority=1) def _add_dvr_mac_for_phys_br(self, physical_network, mac): self.int_br.add_dvr_mac_vlan(mac=mac, port=self.int_ofports[physical_network]) phys_br = self.phys_brs[physical_network] phys_br.add_dvr_mac_vlan(mac=mac, port=self.phys_ofports[physical_network]) def _remove_dvr_mac_for_phys_br(self, physical_network, mac): # REVISIT(yamamoto): match in_port as well? self.int_br.remove_dvr_mac_vlan(mac=mac) phys_br = self.phys_brs[physical_network] # REVISIT(yamamoto): match in_port as well? phys_br.remove_dvr_mac_vlan(mac=mac) def _add_dvr_mac_for_tun_br(self, mac): self.int_br.add_dvr_mac_tun(mac=mac, port=self.patch_tun_ofport) self.tun_br.add_dvr_mac_tun(mac=mac, port=self.patch_int_ofport) def _remove_dvr_mac_for_tun_br(self, mac): self.int_br.remove_dvr_mac_tun(mac=mac, port=self.patch_tun_ofport) # REVISIT(yamamoto): match in_port as well? self.tun_br.remove_dvr_mac_tun(mac=mac) def _add_dvr_mac(self, mac): for physical_network in self.bridge_mappings: self._add_dvr_mac_for_phys_br(physical_network, mac) if self.enable_tunneling: self._add_dvr_mac_for_tun_br(mac) LOG.debug("Added DVR MAC flow for %s", mac) self.registered_dvr_macs.add(mac) def _remove_dvr_mac(self, mac): for physical_network in self.bridge_mappings: self._remove_dvr_mac_for_phys_br(physical_network, mac) if self.enable_tunneling: self._remove_dvr_mac_for_tun_br(mac) LOG.debug("Removed DVR MAC flow for %s", mac) self.registered_dvr_macs.remove(mac) def setup_dvr_mac_flows_on_all_brs(self): if not self.in_distributed_mode(): LOG.debug("Not in distributed mode, ignoring invocation " "of get_dvr_mac_address_list() ") return dvr_macs = self.plugin_rpc.get_dvr_mac_address_list(self.context) LOG.debug("L2 Agent DVR: Received these MACs: %r", dvr_macs) for mac in dvr_macs: if mac['mac_address'] == self.dvr_mac_address: continue self._add_dvr_mac(mac['mac_address']) def dvr_mac_address_update(self, dvr_macs): if not self.dvr_mac_address: LOG.debug("Self mac unknown, ignoring this " "dvr_mac_address_update() ") return dvr_host_macs = set() for entry in dvr_macs: if entry['mac_address'] == self.dvr_mac_address: continue dvr_host_macs.add(entry['mac_address']) if dvr_host_macs == self.registered_dvr_macs: LOG.debug("DVR Mac address already up to date") return dvr_macs_added = dvr_host_macs - self.registered_dvr_macs dvr_macs_removed = self.registered_dvr_macs - dvr_host_macs for oldmac in dvr_macs_removed: self._remove_dvr_mac(oldmac) for newmac in dvr_macs_added: self._add_dvr_mac(newmac) def in_distributed_mode(self): return self.dvr_mac_address is not None def process_tunneled_network(self, network_type, lvid, segmentation_id): self.tun_br.provision_local_vlan( network_type=network_type, lvid=lvid, segmentation_id=segmentation_id, distributed=self.in_distributed_mode()) def _bind_distributed_router_interface_port(self, port, lvm, fixed_ips, device_owner): # since distributed router port must have only one fixed # IP, directly use fixed_ips[0] fixed_ip = fixed_ips[0] subnet_uuid = fixed_ip['subnet_id'] if subnet_uuid in self.local_dvr_map: ldm = self.local_dvr_map[subnet_uuid] else: # set up LocalDVRSubnetMapping available for this subnet subnet_info = self.plugin_rpc.get_subnet_for_dvr( self.context, subnet_uuid, fixed_ips=fixed_ips) if not subnet_info: LOG.warning(_LW("DVR: Unable to retrieve subnet information " "for subnet_id %s. The subnet or the gateway " "may have already been deleted"), subnet_uuid) return LOG.debug("get_subnet_for_dvr for subnet %(uuid)s " "returned with %(info)s", {"uuid": subnet_uuid, "info": subnet_info}) ldm = LocalDVRSubnetMapping(subnet_info) self.local_dvr_map[subnet_uuid] = ldm # DVR takes over ldm.set_dvr_owned(True) vlan_to_use = lvm.vlan if lvm.network_type == p_const.TYPE_VLAN: vlan_to_use = lvm.segmentation_id subnet_info = ldm.get_subnet_info() ip_version = subnet_info['ip_version'] local_compute_ports = ( self.plugin_rpc.get_ports_on_host_by_subnet( self.context, self.host, subnet_uuid)) LOG.debug("DVR: List of ports received from " "get_ports_on_host_by_subnet %s", local_compute_ports) vif_by_id = self.int_br.get_vifs_by_ids( [local_port['id'] for local_port in local_compute_ports]) for local_port in local_compute_ports: vif = vif_by_id.get(local_port['id']) if not vif: continue ldm.add_compute_ofport(vif.vif_id, vif.ofport) if vif.vif_id in self.local_ports: # ensure if a compute port is already on # a different dvr routed subnet # if yes, queue this subnet to that port comp_ovsport = self.local_ports[vif.vif_id] comp_ovsport.add_subnet(subnet_uuid) else: # the compute port is discovered first here that its on # a dvr routed subnet queue this subnet to that port comp_ovsport = OVSPort(vif.vif_id, vif.ofport, vif.vif_mac, local_port['device_owner']) comp_ovsport.add_subnet(subnet_uuid) self.local_ports[vif.vif_id] = comp_ovsport # create rule for just this vm port self.int_br.install_dvr_to_src_mac( network_type=lvm.network_type, vlan_tag=vlan_to_use, gateway_mac=subnet_info['gateway_mac'], dst_mac=comp_ovsport.get_mac(), dst_port=comp_ovsport.get_ofport()) if lvm.network_type == p_const.TYPE_VLAN: # TODO(vivek) remove the IPv6 related flows once SNAT is not # used for IPv6 DVR. br = self.phys_brs[lvm.physical_network] if lvm.network_type in constants.TUNNEL_NETWORK_TYPES: br = self.tun_br # TODO(vivek) remove the IPv6 related flows once SNAT is not # used for IPv6 DVR. if ip_version == 4: br.install_dvr_process_ipv4( vlan_tag=lvm.vlan, gateway_ip=subnet_info['gateway_ip']) else: br.install_dvr_process_ipv6( vlan_tag=lvm.vlan, gateway_mac=subnet_info['gateway_mac']) br.install_dvr_process( vlan_tag=lvm.vlan, vif_mac=port.vif_mac, dvr_mac_address=self.dvr_mac_address) # the dvr router interface is itself a port, so capture it # queue this subnet to that port. A subnet appears only once as # a router interface on any given router ovsport = OVSPort(port.vif_id, port.ofport, port.vif_mac, device_owner) ovsport.add_subnet(subnet_uuid) self.local_ports[port.vif_id] = ovsport def _bind_port_on_dvr_subnet(self, port, lvm, fixed_ips, device_owner): # Handle new compute port added use-case subnet_uuid = None for ips in fixed_ips: if ips['subnet_id'] not in self.local_dvr_map: continue subnet_uuid = ips['subnet_id'] ldm = self.local_dvr_map[subnet_uuid] if not ldm.is_dvr_owned(): # well this is CSNAT stuff, let dvr come in # and do plumbing for this vm later continue # This confirms that this compute port belongs # to a dvr hosted subnet. # Accommodate this VM Port into the existing rule in # the integration bridge LOG.debug("DVR: Plumbing compute port %s", port.vif_id) subnet_info = ldm.get_subnet_info() ldm.add_compute_ofport(port.vif_id, port.ofport) if port.vif_id in self.local_ports: # ensure if a compute port is already on a different # dvr routed subnet # if yes, queue this subnet to that port ovsport = self.local_ports[port.vif_id] ovsport.add_subnet(subnet_uuid) else: # the compute port is discovered first here that its # on a dvr routed subnet, queue this subnet to that port ovsport = OVSPort(port.vif_id, port.ofport, port.vif_mac, device_owner) ovsport.add_subnet(subnet_uuid) self.local_ports[port.vif_id] = ovsport vlan_to_use = lvm.vlan if lvm.network_type == p_const.TYPE_VLAN: vlan_to_use = lvm.segmentation_id # create a rule for this vm port self.int_br.install_dvr_to_src_mac( network_type=lvm.network_type, vlan_tag=vlan_to_use, gateway_mac=subnet_info['gateway_mac'], dst_mac=ovsport.get_mac(), dst_port=ovsport.get_ofport()) def _bind_centralized_snat_port_on_dvr_subnet(self, port, lvm, fixed_ips, device_owner): # since centralized-SNAT (CSNAT) port must have only one fixed # IP, directly use fixed_ips[0] fixed_ip = fixed_ips[0] if port.vif_id in self.local_ports: # throw an error if CSNAT port is already on a different # dvr routed subnet ovsport = self.local_ports[port.vif_id] subs = list(ovsport.get_subnets()) if subs[0] == fixed_ip['subnet_id']: return LOG.error(_LE("Centralized-SNAT port %(port)s on subnet " "%(port_subnet)s already seen on a different " "subnet %(orig_subnet)s"), { "port": port.vif_id, "port_subnet": fixed_ip['subnet_id'], "orig_subnet": subs[0], }) return subnet_uuid = fixed_ip['subnet_id'] ldm = None subnet_info = None if subnet_uuid not in self.local_dvr_map: # no csnat ports seen on this subnet - create csnat state # for this subnet subnet_info = self.plugin_rpc.get_subnet_for_dvr( self.context, subnet_uuid, fixed_ips=fixed_ips) if not subnet_info: LOG.warning(_LW("DVR: Unable to retrieve subnet information " "for subnet_id %s. The subnet or the gateway " "may have already been deleted"), subnet_uuid) return LOG.debug("get_subnet_for_dvr for subnet %(uuid)s " "returned with %(info)s", {"uuid": subnet_uuid, "info": subnet_info}) ldm = LocalDVRSubnetMapping(subnet_info, port.ofport) self.local_dvr_map[subnet_uuid] = ldm else: ldm = self.local_dvr_map[subnet_uuid] subnet_info = ldm.get_subnet_info() # Store csnat OF Port in the existing DVRSubnetMap ldm.set_csnat_ofport(port.ofport) # create ovsPort footprint for csnat port ovsport = OVSPort(port.vif_id, port.ofport, port.vif_mac, device_owner) ovsport.add_subnet(subnet_uuid) self.local_ports[port.vif_id] = ovsport vlan_to_use = lvm.vlan if lvm.network_type == p_const.TYPE_VLAN: vlan_to_use = lvm.segmentation_id self.int_br.install_dvr_to_src_mac( network_type=lvm.network_type, vlan_tag=vlan_to_use, gateway_mac=subnet_info['gateway_mac'], dst_mac=ovsport.get_mac(), dst_port=ovsport.get_ofport()) def bind_port_to_dvr(self, port, local_vlan_map, fixed_ips, device_owner): if not self.in_distributed_mode(): return if local_vlan_map.network_type not in (constants.TUNNEL_NETWORK_TYPES + [p_const.TYPE_VLAN]): LOG.debug("DVR: Port %s is with network_type %s not supported" " for dvr plumbing" % (port.vif_id, local_vlan_map.network_type)) return if (port.vif_id in self.local_ports and self.local_ports[port.vif_id].ofport != port.ofport): LOG.info(_LI("DVR: Port %(vif)s changed port number to " "%(ofport)s, rebinding."), {'vif': port.vif_id, 'ofport': port.ofport}) self.unbind_port_from_dvr(port, local_vlan_map) if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE: self._bind_distributed_router_interface_port(port, local_vlan_map, fixed_ips, device_owner) if device_owner and n_utils.is_dvr_serviced(device_owner): self._bind_port_on_dvr_subnet(port, local_vlan_map, fixed_ips, device_owner) if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT: self._bind_centralized_snat_port_on_dvr_subnet(port, local_vlan_map, fixed_ips, device_owner) def _unbind_distributed_router_interface_port(self, port, lvm): ovsport = self.local_ports[port.vif_id] # removal of distributed router interface subnet_ids = ovsport.get_subnets() subnet_set = set(subnet_ids) network_type = lvm.network_type physical_network = lvm.physical_network vlan_to_use = lvm.vlan if network_type == p_const.TYPE_VLAN: vlan_to_use = lvm.segmentation_id # ensure we process for all the subnets laid on this removed port for sub_uuid in subnet_set: if sub_uuid not in self.local_dvr_map: continue ldm = self.local_dvr_map[sub_uuid] subnet_info = ldm.get_subnet_info() ip_version = subnet_info['ip_version'] # DVR is no more owner ldm.set_dvr_owned(False) # remove all vm rules for this dvr subnet # clear of compute_ports altogether compute_ports = ldm.get_compute_ofports() for vif_id in compute_ports: comp_port = self.local_ports[vif_id] self.int_br.delete_dvr_to_src_mac( network_type=network_type, vlan_tag=vlan_to_use, dst_mac=comp_port.get_mac()) ldm.remove_all_compute_ofports() if ldm.get_csnat_ofport() == constants.OFPORT_INVALID: # if there is no csnat port for this subnet, remove # this subnet from local_dvr_map, as no dvr (or) csnat # ports available on this agent anymore self.local_dvr_map.pop(sub_uuid, None) if network_type == p_const.TYPE_VLAN: br = self.phys_brs[physical_network] if network_type in constants.TUNNEL_NETWORK_TYPES: br = self.tun_br if ip_version == 4: br.delete_dvr_process_ipv4( vlan_tag=lvm.vlan, gateway_ip=subnet_info['gateway_ip']) else: br.delete_dvr_process_ipv6( vlan_tag=lvm.vlan, gateway_mac=subnet_info['gateway_mac']) ovsport.remove_subnet(sub_uuid) if lvm.network_type == p_const.TYPE_VLAN: br = self.phys_brs[physical_network] if lvm.network_type in constants.TUNNEL_NETWORK_TYPES: br = self.tun_br br.delete_dvr_process(vlan_tag=lvm.vlan, vif_mac=port.vif_mac) # release port state self.local_ports.pop(port.vif_id, None) def _unbind_port_on_dvr_subnet(self, port, lvm): ovsport = self.local_ports[port.vif_id] # This confirms that this compute port being removed belonged # to a dvr hosted subnet. LOG.debug("DVR: Removing plumbing for compute port %s", port) subnet_ids = ovsport.get_subnets() # ensure we process for all the subnets laid on this port for sub_uuid in subnet_ids: if sub_uuid not in self.local_dvr_map: continue ldm = self.local_dvr_map[sub_uuid] ldm.remove_compute_ofport(port.vif_id) vlan_to_use = lvm.vlan if lvm.network_type == p_const.TYPE_VLAN: vlan_to_use = lvm.segmentation_id # first remove this vm port rule self.int_br.delete_dvr_to_src_mac( network_type=lvm.network_type, vlan_tag=vlan_to_use, dst_mac=ovsport.get_mac()) # release port state self.local_ports.pop(port.vif_id, None) def _unbind_centralized_snat_port_on_dvr_subnet(self, port, lvm): ovsport = self.local_ports[port.vif_id] # This confirms that this compute port being removed belonged # to a dvr hosted subnet. LOG.debug("DVR: Removing plumbing for csnat port %s", port) sub_uuid = list(ovsport.get_subnets())[0] # ensure we process for all the subnets laid on this port if sub_uuid not in self.local_dvr_map: return ldm = self.local_dvr_map[sub_uuid] ldm.set_csnat_ofport(constants.OFPORT_INVALID) vlan_to_use = lvm.vlan if lvm.network_type == p_const.TYPE_VLAN: vlan_to_use = lvm.segmentation_id # then remove csnat port rule self.int_br.delete_dvr_to_src_mac( network_type=lvm.network_type, vlan_tag=vlan_to_use, dst_mac=ovsport.get_mac()) if not ldm.is_dvr_owned(): # if not owned by DVR (only used for csnat), remove this # subnet state altogether self.local_dvr_map.pop(sub_uuid, None) # release port state self.local_ports.pop(port.vif_id, None) def unbind_port_from_dvr(self, vif_port, local_vlan_map): if not self.in_distributed_mode(): return # Handle port removed use-case if vif_port and vif_port.vif_id not in self.local_ports: LOG.debug("DVR: Non distributed port, ignoring %s", vif_port) return ovsport = self.local_ports[vif_port.vif_id] device_owner = ovsport.get_device_owner() if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE: self._unbind_distributed_router_interface_port(vif_port, local_vlan_map) if device_owner and n_utils.is_dvr_serviced(device_owner): self._unbind_port_on_dvr_subnet(vif_port, local_vlan_map) if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT: self._unbind_centralized_snat_port_on_dvr_subnet(vif_port, local_vlan_map) neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py0000664000567000056710000030466713044372760031507 0ustar jenkinsjenkins00000000000000# Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import collections import functools import hashlib import signal import sys import time import netaddr from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import loopingcall from oslo_service import systemd import six from six import moves from neutron._i18n import _, _LE, _LI, _LW from neutron.agent.common import ip_lib from neutron.agent.common import ovs_lib from neutron.agent.common import polling from neutron.agent.common import utils from neutron.agent.l2.extensions import manager as ext_manager from neutron.agent import rpc as agent_rpc from neutron.agent import securitygroups_rpc as sg_rpc from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import dvr_rpc from neutron.common import config from neutron.common import constants as n_const from neutron.common import ipv6_utils as ipv6 from neutron.common import topics from neutron.common import utils as n_utils from neutron import context from neutron.extensions import portbindings from neutron.plugins.common import constants as p_const from neutron.plugins.common import utils as p_utils from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc from neutron.plugins.ml2.drivers.openvswitch.agent.common \ import constants from neutron.plugins.ml2.drivers.openvswitch.agent \ import ovs_agent_extension_api as ovs_ext_api from neutron.plugins.ml2.drivers.openvswitch.agent \ import ovs_dvr_neutron_agent LOG = logging.getLogger(__name__) cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.' 'agent.common.config') cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.' 'common.config') class _mac_mydialect(netaddr.mac_unix): word_fmt = '%.2x' class LocalVLANMapping(object): def __init__(self, vlan, network_type, physical_network, segmentation_id, vif_ports=None): if vif_ports is None: vif_ports = {} self.vlan = vlan self.network_type = network_type self.physical_network = physical_network self.segmentation_id = segmentation_id self.vif_ports = vif_ports # set of tunnel ports on which packets should be flooded self.tun_ofports = set() def __str__(self): return ("lv-id = %s type = %s phys-net = %s phys-id = %s" % (self.vlan, self.network_type, self.physical_network, self.segmentation_id)) class OVSPluginApi(agent_rpc.PluginApi): pass def has_zero_prefixlen_address(ip_addresses): return any(netaddr.IPNetwork(ip).prefixlen == 0 for ip in ip_addresses) class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, l2population_rpc.L2populationRpcCallBackTunnelMixin, dvr_rpc.DVRAgentRpcCallbackMixin): '''Implements OVS-based tunneling, VLANs and flat networks. Two local bridges are created: an integration bridge (defaults to 'br-int') and a tunneling bridge (defaults to 'br-tun'). An additional bridge is created for each physical network interface used for VLANs and/or flat networks. All VM VIFs are plugged into the integration bridge. VM VIFs on a given virtual network share a common "local" VLAN (i.e. not propagated externally). The VLAN id of this local VLAN is mapped to the physical networking details realizing that virtual network. For virtual networks realized as GRE tunnels, a Logical Switch (LS) identifier is used to differentiate tenant traffic on inter-HV tunnels. A mesh of tunnels is created to other Hypervisors in the cloud. These tunnels originate and terminate on the tunneling bridge of each hypervisor. Port patching is done to connect local VLANs on the integration bridge to inter-hypervisor tunnels on the tunnel bridge. For each virtual network realized as a VLAN or flat network, a veth or a pair of patch ports is used to connect the local VLAN on the integration bridge with the physical network bridge, with flow rules adding, modifying, or stripping VLAN tags as necessary. ''' # history # 1.0 Initial version # 1.1 Support Security Group RPC # 1.2 Support DVR (Distributed Virtual Router) RPC # 1.3 Added param devices_to_update to security_groups_provider_updated # 1.4 Added support for network_update target = oslo_messaging.Target(version='1.4') def __init__(self, bridge_classes, conf=None): '''Constructor. :param bridge_classes: a dict for bridge classes. :param conf: an instance of ConfigOpts ''' super(OVSNeutronAgent, self).__init__() self.conf = conf or cfg.CONF self.ovs = ovs_lib.BaseOVS() agent_conf = self.conf.AGENT ovs_conf = self.conf.OVS self.fullsync = False # init bridge classes with configured datapath type. self.br_int_cls, self.br_phys_cls, self.br_tun_cls = ( functools.partial(bridge_classes[b], datapath_type=ovs_conf.datapath_type) for b in ('br_int', 'br_phys', 'br_tun')) self.use_veth_interconnection = ovs_conf.use_veth_interconnection self.veth_mtu = agent_conf.veth_mtu self.available_local_vlans = set(moves.range(p_const.MIN_VLAN_TAG, p_const.MAX_VLAN_TAG)) self.tunnel_types = agent_conf.tunnel_types or [] self.l2_pop = agent_conf.l2_population # TODO(ethuleau): Change ARP responder so it's not dependent on the # ML2 l2 population mechanism driver. self.enable_distributed_routing = agent_conf.enable_distributed_routing self.arp_responder_enabled = agent_conf.arp_responder and self.l2_pop host = self.conf.host self.agent_id = 'ovs-agent-%s' % host if self.tunnel_types: self.enable_tunneling = True else: self.enable_tunneling = False # Validate agent configurations self._check_agent_configurations() # Keep track of int_br's device count for use by _report_state() self.int_br_device_count = 0 self.int_br = self.br_int_cls(ovs_conf.integration_bridge) self.setup_integration_br() # Stores port update notifications for processing in main rpc loop self.updated_ports = set() # Stores port delete notifications self.deleted_ports = set() self.network_ports = collections.defaultdict(set) # keeps association between ports and ofports to detect ofport change self.vifname_to_ofport_map = {} self.setup_rpc() self.bridge_mappings = self._parse_bridge_mappings( ovs_conf.bridge_mappings) self.setup_physical_bridges(self.bridge_mappings) self.local_vlan_map = {} self._reset_tunnel_ofports() self.polling_interval = agent_conf.polling_interval self.minimize_polling = agent_conf.minimize_polling self.ovsdb_monitor_respawn_interval = ( agent_conf.ovsdb_monitor_respawn_interval or constants.DEFAULT_OVSDBMON_RESPAWN) self.local_ip = ovs_conf.local_ip self.tunnel_count = 0 self.vxlan_udp_port = agent_conf.vxlan_udp_port self.dont_fragment = agent_conf.dont_fragment self.tunnel_csum = agent_conf.tunnel_csum self.tun_br = None self.patch_int_ofport = constants.OFPORT_INVALID self.patch_tun_ofport = constants.OFPORT_INVALID if self.enable_tunneling: # The patch_int_ofport and patch_tun_ofport are updated # here inside the call to setup_tunnel_br() self.setup_tunnel_br(ovs_conf.tunnel_bridge) self.init_extension_manager(self.connection) self.dvr_agent = ovs_dvr_neutron_agent.OVSDVRNeutronAgent( self.context, self.dvr_plugin_rpc, self.int_br, self.tun_br, self.bridge_mappings, self.phys_brs, self.int_ofports, self.phys_ofports, self.patch_int_ofport, self.patch_tun_ofport, host, self.enable_tunneling, self.enable_distributed_routing) if self.enable_tunneling: self.setup_tunnel_br_flows() self.dvr_agent.setup_dvr_flows() # Collect additional bridges to monitor self.ancillary_brs = self.setup_ancillary_bridges( ovs_conf.integration_bridge, ovs_conf.tunnel_bridge) # In order to keep existed device's local vlan unchanged, # restore local vlan mapping at start self._restore_local_vlan_map() # Security group agent support self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context, self.sg_plugin_rpc, self.local_vlan_map, defer_refresh_firewall=True, integration_bridge=self.int_br) # we default to False to provide backward compat with out of tree # firewall drivers that expect the logic that existed on the Neutron # server which only enabled hybrid plugging based on the use of the # hybrid driver. hybrid_plug = getattr(self.sg_agent.firewall, 'OVS_HYBRID_PLUG_REQUIRED', False) self.prevent_arp_spoofing = ( agent_conf.prevent_arp_spoofing and not self.sg_agent.firewall.provides_arp_spoofing_protection) #TODO(mangelajo): optimize resource_versions to only report # versions about resources which are common, # or which are used by specific extensions. self.agent_state = { 'binary': 'neutron-openvswitch-agent', 'host': host, 'topic': n_const.L2_AGENT_TOPIC, 'configurations': {'bridge_mappings': self.bridge_mappings, 'tunnel_types': self.tunnel_types, 'tunneling_ip': self.local_ip, 'l2_population': self.l2_pop, 'arp_responder_enabled': self.arp_responder_enabled, 'enable_distributed_routing': self.enable_distributed_routing, 'log_agent_heartbeats': agent_conf.log_agent_heartbeats, 'extensions': self.ext_manager.names(), 'datapath_type': ovs_conf.datapath_type, 'ovs_capabilities': self.ovs.capabilities, 'vhostuser_socket_dir': ovs_conf.vhostuser_socket_dir, portbindings.OVS_HYBRID_PLUG: hybrid_plug}, 'resource_versions': resources.LOCAL_RESOURCE_VERSIONS, 'agent_type': agent_conf.agent_type, 'start_flag': True} report_interval = agent_conf.report_interval if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval) # Initialize iteration counter self.iter_num = 0 self.run_daemon_loop = True self.catch_sigterm = False self.catch_sighup = False # The initialization is complete; we can start receiving messages self.connection.consume_in_threads() self.quitting_rpc_timeout = agent_conf.quitting_rpc_timeout def _parse_bridge_mappings(self, bridge_mappings): try: return n_utils.parse_mappings(bridge_mappings) except ValueError as e: raise ValueError(_("Parsing bridge_mappings failed: %s.") % e) def _report_state(self): # How many devices are likely used by a VM self.agent_state.get('configurations')['devices'] = ( self.int_br_device_count) self.agent_state.get('configurations')['in_distributed_mode'] = ( self.dvr_agent.in_distributed_mode()) try: agent_status = self.state_rpc.report_state(self.context, self.agent_state, True) if agent_status == n_const.AGENT_REVIVED: LOG.info(_LI('Agent has just been revived. ' 'Doing a full sync.')) self.fullsync = True # we only want to update resource versions on startup self.agent_state.pop('resource_versions', None) if self.agent_state.pop('start_flag', None): # On initial start, we notify systemd after initialization # is complete. systemd.notify_once() except Exception: LOG.exception(_LE("Failed reporting state!")) def _restore_local_vlan_map(self): self._local_vlan_hints = {} # skip INVALID and UNASSIGNED to match scan_ports behavior ofport_filter = (ovs_lib.INVALID_OFPORT, ovs_lib.UNASSIGNED_OFPORT) cur_ports = self.int_br.get_vif_ports(ofport_filter) port_names = [p.port_name for p in cur_ports] port_info = self.int_br.get_ports_attributes( "Port", columns=["name", "other_config", "tag"], ports=port_names) by_name = {x['name']: x for x in port_info} for port in cur_ports: # if a port was deleted between get_vif_ports and # get_ports_attributes, we # will get a KeyError try: local_vlan_map = by_name[port.port_name]['other_config'] local_vlan = by_name[port.port_name]['tag'] except KeyError: continue if not local_vlan: continue net_uuid = local_vlan_map.get('net_uuid') if (net_uuid and net_uuid not in self._local_vlan_hints and local_vlan != constants.DEAD_VLAN_TAG): self.available_local_vlans.remove(local_vlan) self._local_vlan_hints[local_vlan_map['net_uuid']] = \ local_vlan def _dispose_local_vlan_hints(self): self.available_local_vlans.update(self._local_vlan_hints.values()) self._local_vlan_hints = {} def _reset_tunnel_ofports(self): self.tun_br_ofports = {p_const.TYPE_GENEVE: {}, p_const.TYPE_GRE: {}, p_const.TYPE_VXLAN: {}} def setup_rpc(self): self.plugin_rpc = OVSPluginApi(topics.PLUGIN) self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN) self.dvr_plugin_rpc = dvr_rpc.DVRServerRpcApi(topics.PLUGIN) self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) # RPC network init self.context = context.get_admin_context_without_session() # Define the listening consumers for the agent consumers = [[topics.PORT, topics.UPDATE], [topics.PORT, topics.DELETE], [constants.TUNNEL, topics.UPDATE], [constants.TUNNEL, topics.DELETE], [topics.SECURITY_GROUP, topics.UPDATE], [topics.DVR, topics.UPDATE], [topics.NETWORK, topics.UPDATE]] if self.l2_pop: consumers.append([topics.L2POPULATION, topics.UPDATE]) self.connection = agent_rpc.create_consumers([self], topics.AGENT, consumers, start_listening=False) def init_extension_manager(self, connection): ext_manager.register_opts(self.conf) self.ext_manager = ( ext_manager.AgentExtensionsManager(self.conf)) self.agent_api = ovs_ext_api.OVSAgentExtensionAPI(self.int_br, self.tun_br) self.ext_manager.initialize( connection, constants.EXTENSION_DRIVER_TYPE, self.agent_api) def get_net_uuid(self, vif_id): for network_id, vlan_mapping in six.iteritems(self.local_vlan_map): if vif_id in vlan_mapping.vif_ports: return network_id def port_update(self, context, **kwargs): port = kwargs.get('port') # Put the port identifier in the updated_ports set. # Even if full port details might be provided to this call, # they are not used since there is no guarantee the notifications # are processed in the same order as the relevant API requests self.updated_ports.add(port['id']) LOG.debug("port_update message processed for port %s", port['id']) def port_delete(self, context, **kwargs): port_id = kwargs.get('port_id') self.deleted_ports.add(port_id) self.updated_ports.discard(port_id) LOG.debug("port_delete message processed for port %s", port_id) def network_update(self, context, **kwargs): network_id = kwargs['network']['id'] for port_id in self.network_ports[network_id]: # notifications could arrive out of order, if the port is deleted # we don't want to update it anymore if port_id not in self.deleted_ports: self.updated_ports.add(port_id) LOG.debug("network_update message processed for network " "%(network_id)s, with ports: %(ports)s", {'network_id': network_id, 'ports': self.network_ports[network_id]}) def _clean_network_ports(self, port_id): for port_set in self.network_ports.values(): if port_id in port_set: port_set.remove(port_id) break def process_deleted_ports(self, port_info): # don't try to process removed ports as deleted ports since # they are already gone if 'removed' in port_info: self.deleted_ports -= port_info['removed'] deleted_ports = list(self.deleted_ports) while self.deleted_ports: port_id = self.deleted_ports.pop() port = self.int_br.get_vif_port_by_id(port_id) self._clean_network_ports(port_id) self.ext_manager.delete_port(self.context, {"vif_port": port, "port_id": port_id}) # move to dead VLAN so deleted ports no # longer have access to the network if port: # don't log errors since there is a chance someone will be # removing the port from the bridge at the same time self.port_dead(port, log_errors=False) self.port_unbound(port_id) # Flush firewall rules after ports are put on dead VLAN to be # more secure self.sg_agent.remove_devices_filter(deleted_ports) def tunnel_update(self, context, **kwargs): LOG.debug("tunnel_update received") if not self.enable_tunneling: return tunnel_ip = kwargs.get('tunnel_ip') tunnel_type = kwargs.get('tunnel_type') if not tunnel_type: LOG.error(_LE("No tunnel_type specified, cannot create tunnels")) return if tunnel_type not in self.tunnel_types: LOG.error(_LE("tunnel_type %s not supported by agent"), tunnel_type) return if tunnel_ip == self.local_ip: return tun_name = self.get_tunnel_name(tunnel_type, self.local_ip, tunnel_ip) if tun_name is None: return if not self.l2_pop: self._setup_tunnel_port(self.tun_br, tun_name, tunnel_ip, tunnel_type) def tunnel_delete(self, context, **kwargs): LOG.debug("tunnel_delete received") if not self.enable_tunneling: return tunnel_ip = kwargs.get('tunnel_ip') if not tunnel_ip: LOG.error(_LE("No tunnel_ip specified, cannot delete tunnels")) return tunnel_type = kwargs.get('tunnel_type') if not tunnel_type: LOG.error(_LE("No tunnel_type specified, cannot delete tunnels")) return if tunnel_type not in self.tunnel_types: LOG.error(_LE("tunnel_type %s not supported by agent"), tunnel_type) return ofport = self.tun_br_ofports[tunnel_type].get(tunnel_ip) self.cleanup_tunnel_port(self.tun_br, ofport, tunnel_type) def _tunnel_port_lookup(self, network_type, remote_ip): return self.tun_br_ofports[network_type].get(remote_ip) def fdb_add(self, context, fdb_entries): LOG.debug("fdb_add received") for lvm, agent_ports in self.get_agent_ports(fdb_entries, self.local_vlan_map): agent_ports.pop(self.local_ip, None) if len(agent_ports): if not self.enable_distributed_routing: with self.tun_br.deferred() as deferred_br: self.fdb_add_tun(context, deferred_br, lvm, agent_ports, self._tunnel_port_lookup) else: self.fdb_add_tun(context, self.tun_br, lvm, agent_ports, self._tunnel_port_lookup) def fdb_remove(self, context, fdb_entries): LOG.debug("fdb_remove received") for lvm, agent_ports in self.get_agent_ports(fdb_entries, self.local_vlan_map): agent_ports.pop(self.local_ip, None) if len(agent_ports): if not self.enable_distributed_routing: with self.tun_br.deferred() as deferred_br: self.fdb_remove_tun(context, deferred_br, lvm, agent_ports, self._tunnel_port_lookup) else: self.fdb_remove_tun(context, self.tun_br, lvm, agent_ports, self._tunnel_port_lookup) def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport): if port_info == n_const.FLOODING_ENTRY: lvm.tun_ofports.add(ofport) br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id, lvm.tun_ofports) else: self.setup_entry_for_arp_reply(br, 'add', lvm.vlan, port_info.mac_address, port_info.ip_address) br.install_unicast_to_tun(lvm.vlan, lvm.segmentation_id, ofport, port_info.mac_address) def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport): if port_info == n_const.FLOODING_ENTRY: if ofport not in lvm.tun_ofports: LOG.debug("attempt to remove a non-existent port %s", ofport) return lvm.tun_ofports.remove(ofport) if len(lvm.tun_ofports) > 0: br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id, lvm.tun_ofports) else: # This local vlan doesn't require any more tunneling br.delete_flood_to_tun(lvm.vlan) else: self.setup_entry_for_arp_reply(br, 'remove', lvm.vlan, port_info.mac_address, port_info.ip_address) br.delete_unicast_to_tun(lvm.vlan, port_info.mac_address) def _fdb_chg_ip(self, context, fdb_entries): LOG.debug("update chg_ip received") with self.tun_br.deferred() as deferred_br: self.fdb_chg_ip_tun(context, deferred_br, fdb_entries, self.local_ip, self.local_vlan_map) def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address, ip_address): '''Set the ARP respond entry. When the l2 population mechanism driver and OVS supports to edit ARP fields, a table (ARP_RESPONDER) to resolve ARP locally is added to the tunnel bridge. ''' if not self.arp_responder_enabled: return ip = netaddr.IPAddress(ip_address) if ip.version == 6: return ip = str(ip) mac = str(netaddr.EUI(mac_address, dialect=_mac_mydialect)) if action == 'add': br.install_arp_responder(local_vid, ip, mac) elif action == 'remove': br.delete_arp_responder(local_vid, ip) else: LOG.warning(_LW('Action %s not supported'), action) def _local_vlan_for_flat(self, lvid, physical_network): phys_br = self.phys_brs[physical_network] phys_port = self.phys_ofports[physical_network] int_br = self.int_br int_port = self.int_ofports[physical_network] phys_br.provision_local_vlan(port=phys_port, lvid=lvid, segmentation_id=None, distributed=False) int_br.provision_local_vlan(port=int_port, lvid=lvid, segmentation_id=None) def _local_vlan_for_vlan(self, lvid, physical_network, segmentation_id): distributed = self.enable_distributed_routing phys_br = self.phys_brs[physical_network] phys_port = self.phys_ofports[physical_network] int_br = self.int_br int_port = self.int_ofports[physical_network] phys_br.provision_local_vlan(port=phys_port, lvid=lvid, segmentation_id=segmentation_id, distributed=distributed) int_br.provision_local_vlan(port=int_port, lvid=lvid, segmentation_id=segmentation_id) def provision_local_vlan(self, net_uuid, network_type, physical_network, segmentation_id): '''Provisions a local VLAN. :param net_uuid: the uuid of the network associated with this vlan. :param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat', 'local', 'geneve') :param physical_network: the physical network for 'vlan' or 'flat' :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' ''' # On a restart or crash of OVS, the network associated with this VLAN # will already be assigned, so check for that here before assigning a # new one. lvm = self.local_vlan_map.get(net_uuid) if lvm: lvid = lvm.vlan else: lvid = self._local_vlan_hints.pop(net_uuid, None) if lvid is None: if not self.available_local_vlans: LOG.error(_LE("No local VLAN available for net-id=%s"), net_uuid) return lvid = self.available_local_vlans.pop() self.local_vlan_map[net_uuid] = LocalVLANMapping(lvid, network_type, physical_network, segmentation_id) LOG.info(_LI("Assigning %(vlan_id)s as local vlan for " "net-id=%(net_uuid)s"), {'vlan_id': lvid, 'net_uuid': net_uuid}) if network_type in constants.TUNNEL_NETWORK_TYPES: if self.enable_tunneling: # outbound broadcast/multicast ofports = list(self.tun_br_ofports[network_type].values()) if ofports: self.tun_br.install_flood_to_tun(lvid, segmentation_id, ofports) # inbound from tunnels: set lvid in the right table # and resubmit to Table LEARN_FROM_TUN for mac learning if self.enable_distributed_routing: self.dvr_agent.process_tunneled_network( network_type, lvid, segmentation_id) else: self.tun_br.provision_local_vlan( network_type=network_type, lvid=lvid, segmentation_id=segmentation_id) else: LOG.error(_LE("Cannot provision %(network_type)s network for " "net-id=%(net_uuid)s - tunneling disabled"), {'network_type': network_type, 'net_uuid': net_uuid}) elif network_type == p_const.TYPE_FLAT: if physical_network in self.phys_brs: self._local_vlan_for_flat(lvid, physical_network) else: LOG.error(_LE("Cannot provision flat network for " "net-id=%(net_uuid)s - no bridge for " "physical_network %(physical_network)s"), {'net_uuid': net_uuid, 'physical_network': physical_network}) elif network_type == p_const.TYPE_VLAN: if physical_network in self.phys_brs: self._local_vlan_for_vlan(lvid, physical_network, segmentation_id) else: LOG.error(_LE("Cannot provision VLAN network for " "net-id=%(net_uuid)s - no bridge for " "physical_network %(physical_network)s"), {'net_uuid': net_uuid, 'physical_network': physical_network}) elif network_type == p_const.TYPE_LOCAL: # no flows needed for local networks pass else: LOG.error(_LE("Cannot provision unknown network type " "%(network_type)s for net-id=%(net_uuid)s"), {'network_type': network_type, 'net_uuid': net_uuid}) def reclaim_local_vlan(self, net_uuid): '''Reclaim a local VLAN. :param net_uuid: the network uuid associated with this vlan. ''' lvm = self.local_vlan_map.pop(net_uuid, None) if lvm is None: LOG.debug("Network %s not used on agent.", net_uuid) return LOG.info(_LI("Reclaiming vlan = %(vlan_id)s from " "net-id = %(net_uuid)s"), {'vlan_id': lvm.vlan, 'net_uuid': net_uuid}) if lvm.network_type in constants.TUNNEL_NETWORK_TYPES: if self.enable_tunneling: self.tun_br.reclaim_local_vlan( network_type=lvm.network_type, segmentation_id=lvm.segmentation_id) self.tun_br.delete_flood_to_tun(lvm.vlan) self.tun_br.delete_unicast_to_tun(lvm.vlan, None) self.tun_br.delete_arp_responder(lvm.vlan, None) if self.l2_pop: # Try to remove tunnel ports if not used by other networks for ofport in lvm.tun_ofports: self.cleanup_tunnel_port(self.tun_br, ofport, lvm.network_type) elif lvm.network_type == p_const.TYPE_FLAT: if lvm.physical_network in self.phys_brs: # outbound br = self.phys_brs[lvm.physical_network] br.reclaim_local_vlan( port=self.phys_ofports[lvm.physical_network], lvid=lvm.vlan) # inbound br = self.int_br br.reclaim_local_vlan( port=self.int_ofports[lvm.physical_network], segmentation_id=None) elif lvm.network_type == p_const.TYPE_VLAN: if lvm.physical_network in self.phys_brs: # outbound br = self.phys_brs[lvm.physical_network] br.reclaim_local_vlan( port=self.phys_ofports[lvm.physical_network], lvid=lvm.vlan) # inbound br = self.int_br br.reclaim_local_vlan( port=self.int_ofports[lvm.physical_network], segmentation_id=lvm.segmentation_id) elif lvm.network_type == p_const.TYPE_LOCAL: # no flows needed for local networks pass else: LOG.error(_LE("Cannot reclaim unknown network type " "%(network_type)s for net-id=%(net_uuid)s"), {'network_type': lvm.network_type, 'net_uuid': net_uuid}) self.available_local_vlans.add(lvm.vlan) def port_bound(self, port, net_uuid, network_type, physical_network, segmentation_id, fixed_ips, device_owner, ovs_restarted): '''Bind port to net_uuid/lsw_id and install flow for inbound traffic to vm. :param port: an ovs_lib.VifPort object. :param net_uuid: the net_uuid this port is to be associated with. :param network_type: the network type ('gre', 'vlan', 'flat', 'local') :param physical_network: the physical network for 'vlan' or 'flat' :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' :param fixed_ips: the ip addresses assigned to this port :param device_owner: the string indicative of owner of this port :param ovs_restarted: indicates if this is called for an OVS restart. ''' if net_uuid not in self.local_vlan_map or ovs_restarted: self.provision_local_vlan(net_uuid, network_type, physical_network, segmentation_id) lvm = self.local_vlan_map[net_uuid] lvm.vif_ports[port.vif_id] = port self.dvr_agent.bind_port_to_dvr(port, lvm, fixed_ips, device_owner) port_other_config = self.int_br.db_get_val("Port", port.port_name, "other_config") if port_other_config is None: if port.vif_id in self.deleted_ports: LOG.debug("Port %s deleted concurrently", port.vif_id) elif port.vif_id in self.updated_ports: LOG.error(_LE("Expected port %s not found"), port.vif_id) else: LOG.debug("Unable to get config for port %s", port.vif_id) return False vlan_mapping = {'net_uuid': net_uuid, 'network_type': network_type, 'physical_network': str(physical_network)} if segmentation_id is not None: vlan_mapping['segmentation_id'] = str(segmentation_id) port_other_config.update(vlan_mapping) self.int_br.set_db_attribute("Port", port.port_name, "other_config", port_other_config) return True def _add_port_tag_info(self, need_binding_ports): port_names = [p['vif_port'].port_name for p in need_binding_ports] port_info = self.int_br.get_ports_attributes( "Port", columns=["name", "tag", "other_config"], ports=port_names, if_exists=True) info_by_port = {x['name']: [x['tag'], x['other_config']] for x in port_info} for port_detail in need_binding_ports: lvm = self.local_vlan_map.get(port_detail['network_id']) if not lvm: continue port = port_detail['vif_port'] cur_info = info_by_port.get(port.port_name) if cur_info is not None and cur_info[0] != lvm.vlan: other_config = cur_info[1] or {} other_config['tag'] = str(lvm.vlan) self.int_br.set_db_attribute( "Port", port.port_name, "other_config", other_config) def _bind_devices(self, need_binding_ports): devices_up = [] devices_down = [] failed_devices = [] port_names = [p['vif_port'].port_name for p in need_binding_ports] port_info = self.int_br.get_ports_attributes( "Port", columns=["name", "tag"], ports=port_names, if_exists=True) tags_by_name = {x['name']: x['tag'] for x in port_info} for port_detail in need_binding_ports: lvm = self.local_vlan_map.get(port_detail['network_id']) if not lvm: # network for port was deleted. skip this port since it # will need to be handled as a DEAD port in the next scan continue port = port_detail['vif_port'] device = port_detail['device'] # Do not bind a port if it's already bound cur_tag = tags_by_name.get(port.port_name) if cur_tag is None: LOG.debug("Port %s was deleted concurrently, skipping it", port.port_name) continue # Uninitialized port has tag set to [] if cur_tag and cur_tag != lvm.vlan: self.int_br.delete_flows(in_port=port.ofport) if self.prevent_arp_spoofing: self.setup_arp_spoofing_protection(self.int_br, port, port_detail) if cur_tag != lvm.vlan: self.int_br.set_db_attribute( "Port", port.port_name, "tag", lvm.vlan) # update plugin about port status # FIXME(salv-orlando): Failures while updating device status # must be handled appropriately. Otherwise this might prevent # neutron server from sending network-vif-* events to the nova # API server, thus possibly preventing instance spawn. if port_detail.get('admin_state_up'): LOG.debug("Setting status for %s to UP", device) devices_up.append(device) else: LOG.debug("Setting status for %s to DOWN", device) devices_down.append(device) if devices_up or devices_down: devices_set = self.plugin_rpc.update_device_list( self.context, devices_up, devices_down, self.agent_id, self.conf.host) failed_devices = (devices_set.get('failed_devices_up') + devices_set.get('failed_devices_down')) if failed_devices: LOG.error(_LE("Configuration for devices %s failed!"), failed_devices) LOG.info(_LI("Configuration for devices up %(up)s and devices " "down %(down)s completed."), {'up': devices_up, 'down': devices_down}) return set(failed_devices) @staticmethod def setup_arp_spoofing_protection(bridge, vif, port_details): if not port_details.get('port_security_enabled', True): LOG.info(_LI("Skipping ARP spoofing rules for port '%s' because " "it has port security disabled"), vif.port_name) bridge.delete_arp_spoofing_protection(port=vif.ofport) bridge.set_allowed_macs_for_port(port=vif.ofport, allow_all=True) return if port_details['device_owner'].startswith( n_const.DEVICE_OWNER_NETWORK_PREFIX): LOG.debug("Skipping ARP spoofing rules for network owned port " "'%s'.", vif.port_name) bridge.delete_arp_spoofing_protection(port=vif.ofport) bridge.set_allowed_macs_for_port(port=vif.ofport, allow_all=True) return # clear any previous flows related to this port in our ARP table bridge.delete_arp_spoofing_allow_rules(port=vif.ofport) # collect all of the addresses and cidrs that belong to the port addresses = {f['ip_address'] for f in port_details['fixed_ips']} mac_addresses = {vif.vif_mac} if port_details.get('allowed_address_pairs'): addresses |= {p['ip_address'] for p in port_details['allowed_address_pairs']} mac_addresses |= {p['mac_address'] for p in port_details['allowed_address_pairs'] if p.get('mac_address')} bridge.set_allowed_macs_for_port(vif.ofport, mac_addresses) ipv6_addresses = {ip for ip in addresses if netaddr.IPNetwork(ip).version == 6} # Allow neighbor advertisements for LLA address. ipv6_addresses |= {str(ipv6.get_ipv6_addr_by_EUI64( n_const.IPV6_LLA_PREFIX, mac)) for mac in mac_addresses} if not has_zero_prefixlen_address(ipv6_addresses): # Install protection only when prefix is not zero because a /0 # prefix allows any address anyway and the nd_target can only # match on /1 or more. bridge.install_icmpv6_na_spoofing_protection(port=vif.ofport, ip_addresses=ipv6_addresses) ipv4_addresses = {ip for ip in addresses if netaddr.IPNetwork(ip).version == 4} if not has_zero_prefixlen_address(ipv4_addresses): # Install protection only when prefix is not zero because a /0 # prefix allows any address anyway and the ARP_SPA can only # match on /1 or more. bridge.install_arp_spoofing_protection(port=vif.ofport, ip_addresses=ipv4_addresses) else: bridge.delete_arp_spoofing_protection(port=vif.ofport) def port_unbound(self, vif_id, net_uuid=None): '''Unbind port. Removes corresponding local vlan mapping object if this is its last VIF. :param vif_id: the id of the vif :param net_uuid: the net_uuid this port is associated with. ''' if net_uuid is None: net_uuid = self.get_net_uuid(vif_id) if not self.local_vlan_map.get(net_uuid): LOG.info(_LI('port_unbound(): net_uuid %s not in local_vlan_map'), net_uuid) return lvm = self.local_vlan_map[net_uuid] if vif_id in lvm.vif_ports: vif_port = lvm.vif_ports[vif_id] self.dvr_agent.unbind_port_from_dvr(vif_port, lvm) lvm.vif_ports.pop(vif_id, None) if not lvm.vif_ports: self.reclaim_local_vlan(net_uuid) def port_dead(self, port, log_errors=True): '''Once a port has no binding, put it on the "dead vlan". :param port: an ovs_lib.VifPort object. ''' # Don't kill a port if it's already dead cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag", log_errors=log_errors) if cur_tag and cur_tag != constants.DEAD_VLAN_TAG: self.int_br.set_db_attribute("Port", port.port_name, "tag", constants.DEAD_VLAN_TAG, log_errors=log_errors) self.int_br.drop_port(in_port=port.ofport) def setup_integration_br(self): '''Setup the integration bridge. ''' # Ensure the integration bridge is created. # ovs_lib.OVSBridge.create() will run # ovs-vsctl -- --may-exist add-br BRIDGE_NAME # which does nothing if bridge already exists. self.int_br.create() self.int_br.set_secure_mode() self.int_br.setup_controllers(self.conf) if self.conf.AGENT.drop_flows_on_start: # Delete the patch port between br-int and br-tun if we're deleting # the flows on br-int, so that traffic doesn't get flooded over # while flows are missing. self.int_br.delete_port(self.conf.OVS.int_peer_patch_port) self.int_br.delete_flows() self.int_br.setup_default_table() def setup_ancillary_bridges(self, integ_br, tun_br): '''Setup ancillary bridges - for example br-ex.''' ovs = ovs_lib.BaseOVS() ovs_bridges = set(ovs.get_bridges()) # Remove all known bridges ovs_bridges.remove(integ_br) if self.enable_tunneling: ovs_bridges.remove(tun_br) br_names = [self.phys_brs[physical_network].br_name for physical_network in self.phys_brs] ovs_bridges.difference_update(br_names) # Filter list of bridges to those that have external # bridge-id's configured br_names = [] for bridge in ovs_bridges: bridge_id = ovs.get_bridge_external_bridge_id(bridge) if bridge_id != bridge: br_names.append(bridge) ovs_bridges.difference_update(br_names) ancillary_bridges = [] for bridge in ovs_bridges: br = ovs_lib.OVSBridge(bridge) LOG.info(_LI('Adding %s to list of bridges.'), bridge) ancillary_bridges.append(br) return ancillary_bridges def setup_tunnel_br(self, tun_br_name=None): '''(re)initialize the tunnel bridge. Creates tunnel bridge, and links it to the integration bridge using a patch port. :param tun_br_name: the name of the tunnel bridge. ''' if not self.tun_br: self.tun_br = self.br_tun_cls(tun_br_name) # tun_br.create() won't recreate bridge if it exists, but will handle # cases where something like datapath_type has changed self.tun_br.create(secure_mode=True) self.tun_br.setup_controllers(self.conf) if (not self.int_br.port_exists(self.conf.OVS.int_peer_patch_port) or self.patch_tun_ofport == ovs_lib.INVALID_OFPORT): self.patch_tun_ofport = self.int_br.add_patch_port( self.conf.OVS.int_peer_patch_port, self.conf.OVS.tun_peer_patch_port) if (not self.tun_br.port_exists(self.conf.OVS.tun_peer_patch_port) or self.patch_int_ofport == ovs_lib.INVALID_OFPORT): self.patch_int_ofport = self.tun_br.add_patch_port( self.conf.OVS.tun_peer_patch_port, self.conf.OVS.int_peer_patch_port) if ovs_lib.INVALID_OFPORT in (self.patch_tun_ofport, self.patch_int_ofport): LOG.error(_LE("Failed to create OVS patch port. Cannot have " "tunneling enabled on this agent, since this " "version of OVS does not support tunnels or patch " "ports. Agent terminated!")) sys.exit(1) if self.conf.AGENT.drop_flows_on_start: self.tun_br.delete_flows() def setup_tunnel_br_flows(self): '''Setup the tunnel bridge. Add all flows to the tunnel bridge. ''' self.tun_br.setup_default_table(self.patch_int_ofport, self.arp_responder_enabled) def setup_physical_bridges(self, bridge_mappings): '''Setup the physical network bridges. Creates physical network bridges and links them to the integration bridge using veths or patch ports. :param bridge_mappings: map physical network names to bridge names. ''' self.phys_brs = {} self.int_ofports = {} self.phys_ofports = {} ip_wrapper = ip_lib.IPWrapper() ovs = ovs_lib.BaseOVS() ovs_bridges = ovs.get_bridges() for physical_network, bridge in six.iteritems(bridge_mappings): LOG.info(_LI("Mapping physical network %(physical_network)s to " "bridge %(bridge)s"), {'physical_network': physical_network, 'bridge': bridge}) # setup physical bridge if bridge not in ovs_bridges: LOG.error(_LE("Bridge %(bridge)s for physical network " "%(physical_network)s does not exist. Agent " "terminated!"), {'physical_network': physical_network, 'bridge': bridge}) sys.exit(1) br = self.br_phys_cls(bridge) # The bridge already exists, so create won't recreate it, but will # handle things like changing the datapath_type br.create() br.set_secure_mode() br.setup_controllers(self.conf) if cfg.CONF.AGENT.drop_flows_on_start: br.delete_flows() br.setup_default_table() self.phys_brs[physical_network] = br # interconnect physical and integration bridges using veth/patches int_if_name = p_utils.get_interface_name( bridge, prefix=constants.PEER_INTEGRATION_PREFIX) phys_if_name = p_utils.get_interface_name( bridge, prefix=constants.PEER_PHYSICAL_PREFIX) # Interface type of port for physical and integration bridges must # be same, so check only one of them. int_type = self.int_br.db_get_val("Interface", int_if_name, "type") if self.use_veth_interconnection: # Drop ports if the interface types doesn't match the # configuration value. if int_type == 'patch': self.int_br.delete_port(int_if_name) br.delete_port(phys_if_name) device = ip_lib.IPDevice(int_if_name) if device.exists(): device.link.delete() # Give udev a chance to process its rules here, to avoid # race conditions between commands launched by udev rules # and the subsequent call to ip_wrapper.add_veth utils.execute(['udevadm', 'settle', '--timeout=10']) int_veth, phys_veth = ip_wrapper.add_veth(int_if_name, phys_if_name) int_ofport = self.int_br.add_port(int_if_name) phys_ofport = br.add_port(phys_if_name) else: # Drop ports if the interface type doesn't match the # configuration value if int_type == 'veth': self.int_br.delete_port(int_if_name) br.delete_port(phys_if_name) # Setup int_br to physical bridge patches. If they already # exist we leave them alone, otherwise we create them but don't # connect them until after the drop rules are in place. if self.int_br.port_exists(int_if_name): int_ofport = self.int_br.get_port_ofport(int_if_name) else: int_ofport = self.int_br.add_patch_port( int_if_name, constants.NONEXISTENT_PEER) if br.port_exists(phys_if_name): phys_ofport = br.get_port_ofport(phys_if_name) else: phys_ofport = br.add_patch_port( phys_if_name, constants.NONEXISTENT_PEER) self.int_ofports[physical_network] = int_ofport self.phys_ofports[physical_network] = phys_ofport # block all untranslated traffic between bridges self.int_br.drop_port(in_port=int_ofport) br.drop_port(in_port=phys_ofport) if self.use_veth_interconnection: # enable veth to pass traffic int_veth.link.set_up() phys_veth.link.set_up() if self.veth_mtu: # set up mtu size for veth interfaces int_veth.link.set_mtu(self.veth_mtu) phys_veth.link.set_mtu(self.veth_mtu) else: # associate patch ports to pass traffic self.int_br.set_db_attribute('Interface', int_if_name, 'options', {'peer': phys_if_name}) br.set_db_attribute('Interface', phys_if_name, 'options', {'peer': int_if_name}) def update_stale_ofport_rules(self): # right now the ARP spoofing rules are the only thing that utilizes # ofport-based rules, so make arp_spoofing protection a conditional # until something else uses ofport if not self.prevent_arp_spoofing: return [] previous = self.vifname_to_ofport_map current = self.int_br.get_vif_port_to_ofport_map() # if any ofport numbers have changed, re-process the devices as # added ports so any rules based on ofport numbers are updated. moved_ports = self._get_ofport_moves(current, previous) # delete any stale rules based on removed ofports ofports_deleted = set(previous.values()) - set(current.values()) for ofport in ofports_deleted: self.int_br.delete_arp_spoofing_protection(port=ofport) self.int_br.set_allowed_macs_for_port(port=ofport, allow_all=True) # store map for next iteration self.vifname_to_ofport_map = current return moved_ports @staticmethod def _get_ofport_moves(current, previous): """Returns a list of moved ports. Takes two port->ofport maps and returns a list ports that moved to a different ofport. Deleted ports are not included. """ port_moves = [] for name, ofport in previous.items(): if name not in current: continue current_ofport = current[name] if ofport != current_ofport: port_moves.append(name) return port_moves def _get_port_info(self, registered_ports, cur_ports, readd_registered_ports): port_info = {'current': cur_ports} # FIXME(salv-orlando): It's not really necessary to return early # if nothing has changed. if not readd_registered_ports and cur_ports == registered_ports: return port_info if readd_registered_ports: port_info['added'] = cur_ports else: port_info['added'] = cur_ports - registered_ports # Update port_info with ports not found on the integration bridge port_info['removed'] = registered_ports - cur_ports return port_info def _update_port_info_failed_devices_stats(self, port_info, failed_devices): # remove failed devices that don't need to be retried failed_devices['added'] -= port_info['removed'] failed_devices['removed'] -= port_info['added'] # Disregard devices that were never noticed by the agent port_info['removed'] &= port_info['current'] # retry failed devices port_info['added'] |= failed_devices['added'] LOG.debug("retrying failed devices %s", failed_devices['added']) port_info['removed'] |= failed_devices['removed'] # Update current ports port_info['current'] |= port_info['added'] port_info['current'] -= port_info['removed'] def process_ports_events(self, events, registered_ports, ancillary_ports, old_ports_not_ready, failed_devices, failed_ancillary_devices, updated_ports=None): port_info = {} port_info['added'] = set() port_info['removed'] = set() port_info['current'] = registered_ports ancillary_port_info = {} ancillary_port_info['added'] = set() ancillary_port_info['removed'] = set() ancillary_port_info['current'] = ancillary_ports ports_not_ready_yet = set() # if a port was added and then removed or viceversa since the agent # can't know the order of the operations, check the status of the port # to determine if the port was added or deleted added_ports = {p['name'] for p in events['added']} removed_ports = {p['name'] for p in events['removed']} ports_removed_and_added = added_ports & removed_ports for p in ports_removed_and_added: if ovs_lib.BaseOVS().port_exists(p): events['removed'] = [e for e in events['removed'] if e['name'] != p] else: events['added'] = [e for e in events['added'] if e['name'] != p] #TODO(rossella_s): scanning the ancillary bridge won't be needed # anymore when https://review.openstack.org/#/c/203381 since the bridge # id stored in external_ids will be used to identify the bridge the # port belongs to cur_ancillary_ports = set() for bridge in self.ancillary_brs: cur_ancillary_ports |= bridge.get_vif_port_set() cur_ancillary_ports |= ancillary_port_info['current'] def _process_port(port, ports, ancillary_ports): # check 'iface-id' is set otherwise is not a port # the agent should care about if 'attached-mac' in port.get('external_ids', []): iface_id = self.int_br.portid_from_external_ids( port['external_ids']) if iface_id: if port['ofport'] == ovs_lib.UNASSIGNED_OFPORT: LOG.debug("Port %s not ready yet on the bridge", iface_id) ports_not_ready_yet.add(port['name']) return # check if port belongs to ancillary bridge if iface_id in cur_ancillary_ports: ancillary_ports.add(iface_id) else: ports.add(iface_id) if old_ports_not_ready: old_ports_not_ready_attrs = self.int_br.get_ports_attributes( 'Interface', columns=['name', 'external_ids', 'ofport'], ports=old_ports_not_ready, if_exists=True) now_ready_ports = set( [p['name'] for p in old_ports_not_ready_attrs]) LOG.debug("Ports %s are now ready", now_ready_ports) old_ports_not_ready_yet = old_ports_not_ready - now_ready_ports removed_ports = set([p['name'] for p in events['removed']]) old_ports_not_ready_yet -= removed_ports LOG.debug("Ports %s were not ready at last iteration and are not " "ready yet", old_ports_not_ready_yet) ports_not_ready_yet |= old_ports_not_ready_yet events['added'].extend(old_ports_not_ready_attrs) for port in events['added']: _process_port(port, port_info['added'], ancillary_port_info['added']) for port in events['removed']: _process_port(port, port_info['removed'], ancillary_port_info['removed']) self._update_port_info_failed_devices_stats(port_info, failed_devices) self._update_port_info_failed_devices_stats(ancillary_port_info, failed_ancillary_devices) if updated_ports is None: updated_ports = set() updated_ports.update(self.check_changed_vlans()) if updated_ports: # Some updated ports might have been removed in the # meanwhile, and therefore should not be processed. # In this case the updated port won't be found among # current ports. updated_ports &= port_info['current'] port_info['updated'] = updated_ports return port_info, ancillary_port_info, ports_not_ready_yet def scan_ports(self, registered_ports, sync, updated_ports=None): cur_ports = self.int_br.get_vif_port_set() self.int_br_device_count = len(cur_ports) port_info = self._get_port_info(registered_ports, cur_ports, sync) if updated_ports is None: updated_ports = set() updated_ports.update(self.check_changed_vlans()) if updated_ports: # Some updated ports might have been removed in the # meanwhile, and therefore should not be processed. # In this case the updated port won't be found among # current ports. updated_ports &= cur_ports if updated_ports: port_info['updated'] = updated_ports return port_info def scan_ancillary_ports(self, registered_ports, sync): cur_ports = set() for bridge in self.ancillary_brs: cur_ports |= bridge.get_vif_port_set() return self._get_port_info(registered_ports, cur_ports, sync) def check_changed_vlans(self): """Return ports which have lost their vlan tag. The returned value is a set of port ids of the ports concerned by a vlan tag loss. """ port_tags = self.int_br.get_port_tag_dict() changed_ports = set() for lvm in self.local_vlan_map.values(): for port in lvm.vif_ports.values(): if ( port.port_name in port_tags and port_tags[port.port_name] != lvm.vlan ): LOG.info( _LI("Port '%(port_name)s' has lost " "its vlan tag '%(vlan_tag)d'!"), {'port_name': port.port_name, 'vlan_tag': lvm.vlan} ) changed_ports.add(port.vif_id) return changed_ports def treat_vif_port(self, vif_port, port_id, network_id, network_type, physical_network, segmentation_id, admin_state_up, fixed_ips, device_owner, ovs_restarted): # When this function is called for a port, the port should have # an OVS ofport configured, as only these ports were considered # for being treated. If that does not happen, it is a potential # error condition of which operators should be aware port_needs_binding = True if not vif_port.ofport: LOG.warning(_LW("VIF port: %s has no ofport configured, " "and might not be able to transmit"), vif_port.vif_id) if vif_port: if admin_state_up: port_needs_binding = self.port_bound( vif_port, network_id, network_type, physical_network, segmentation_id, fixed_ips, device_owner, ovs_restarted) else: LOG.info(_LI("VIF port: %s admin state up disabled, " "putting on the dead VLAN"), vif_port.vif_id) self.port_dead(vif_port) port_needs_binding = False else: LOG.debug("No VIF port for port %s defined on agent.", port_id) return port_needs_binding def _setup_tunnel_port(self, br, port_name, remote_ip, tunnel_type): try: if (netaddr.IPAddress(self.local_ip).version != netaddr.IPAddress(remote_ip).version): LOG.error(_LE("IP version mismatch, cannot create tunnel: " "local_ip=%(lip)s remote_ip=%(rip)s"), {'lip': self.local_ip, 'rip': remote_ip}) return 0 except Exception: LOG.error(_LE("Invalid local or remote IP, cannot create tunnel: " "local_ip=%(lip)s remote_ip=%(rip)s"), {'lip': self.local_ip, 'rip': remote_ip}) return 0 ofport = br.add_tunnel_port(port_name, remote_ip, self.local_ip, tunnel_type, self.vxlan_udp_port, self.dont_fragment, self.tunnel_csum) if ofport == ovs_lib.INVALID_OFPORT: LOG.error(_LE("Failed to set-up %(type)s tunnel port to %(ip)s"), {'type': tunnel_type, 'ip': remote_ip}) return 0 self.tun_br_ofports[tunnel_type][remote_ip] = ofport # Add flow in default table to resubmit to the right # tunneling table (lvid will be set in the latter) br.setup_tunnel_port(tunnel_type, ofport) ofports = self.tun_br_ofports[tunnel_type].values() if ofports and not self.l2_pop: # Update flooding flows to include the new tunnel for vlan_mapping in list(self.local_vlan_map.values()): if vlan_mapping.network_type == tunnel_type: br.install_flood_to_tun(vlan_mapping.vlan, vlan_mapping.segmentation_id, ofports) return ofport def setup_tunnel_port(self, br, remote_ip, network_type): port_name = self.get_tunnel_name( network_type, self.local_ip, remote_ip) if port_name is None: return 0 ofport = self._setup_tunnel_port(br, port_name, remote_ip, network_type) return ofport def cleanup_tunnel_port(self, br, tun_ofport, tunnel_type): # Check if this tunnel port is still used for lvm in self.local_vlan_map.values(): if tun_ofport in lvm.tun_ofports: break # If not, remove it else: items = list(self.tun_br_ofports[tunnel_type].items()) for remote_ip, ofport in items: if ofport == tun_ofport: port_name = self.get_tunnel_name( tunnel_type, self.local_ip, remote_ip) br.delete_port(port_name) br.cleanup_tunnel_port(ofport) self.tun_br_ofports[tunnel_type].pop(remote_ip, None) def treat_devices_added_or_updated(self, devices, ovs_restarted): skipped_devices = [] need_binding_devices = [] security_disabled_devices = [] devices_details_list = ( self.plugin_rpc.get_devices_details_list_and_failed_devices( self.context, devices, self.agent_id, self.conf.host)) failed_devices = set(devices_details_list.get('failed_devices')) devices = devices_details_list.get('devices') vif_by_id = self.int_br.get_vifs_by_ids( [vif['device'] for vif in devices]) for details in devices: device = details['device'] LOG.debug("Processing port: %s", device) port = vif_by_id.get(device) if not port: # The port disappeared and cannot be processed LOG.info(_LI("Port %s was not found on the integration bridge " "and will therefore not be processed"), device) skipped_devices.append(device) continue if 'port_id' in details: LOG.info(_LI("Port %(device)s updated. Details: %(details)s"), {'device': device, 'details': details}) details['vif_port'] = port need_binding = self.treat_vif_port(port, details['port_id'], details['network_id'], details['network_type'], details['physical_network'], details['segmentation_id'], details['admin_state_up'], details['fixed_ips'], details['device_owner'], ovs_restarted) if need_binding: need_binding_devices.append(details) port_security = details['port_security_enabled'] has_sgs = 'security_groups' in details if not port_security or not has_sgs: security_disabled_devices.append(device) self._update_port_network(details['port_id'], details['network_id']) self.ext_manager.handle_port(self.context, details) else: LOG.warning( _LW("Device %s not defined on plugin or binding failed"), device) if (port and port.ofport != -1): self.port_dead(port) return (skipped_devices, need_binding_devices, security_disabled_devices, failed_devices) def _update_port_network(self, port_id, network_id): self._clean_network_ports(port_id) self.network_ports[network_id].add(port_id) def treat_ancillary_devices_added(self, devices): devices_details_list = ( self.plugin_rpc.get_devices_details_list_and_failed_devices( self.context, devices, self.agent_id, self.conf.host)) failed_devices = set(devices_details_list.get('failed_devices')) devices_added = [ d['device'] for d in devices_details_list.get('devices')] # update plugin about port status devices_set_up = ( self.plugin_rpc.update_device_list(self.context, devices_added, [], self.agent_id, self.conf.host)) failed_devices |= set(devices_set_up.get('failed_devices_up')) LOG.info(_LI("Ancillary Ports %(added)s added, failed devices " "%(failed)s"), {'added': devices, 'failed': failed_devices}) return failed_devices def treat_devices_removed(self, devices): self.sg_agent.remove_devices_filter(devices) LOG.info(_LI("Ports %s removed"), devices) devices_down = self.plugin_rpc.update_device_list(self.context, [], devices, self.agent_id, self.conf.host) failed_devices = set(devices_down.get('failed_devices_down')) LOG.debug("Port removal failed for %s", failed_devices) for device in devices: self.ext_manager.delete_port(self.context, {'port_id': device}) self.port_unbound(device) return failed_devices def treat_ancillary_devices_removed(self, devices): LOG.info(_LI("Ancillary ports %s removed"), devices) devices_down = self.plugin_rpc.update_device_list(self.context, [], devices, self.agent_id, self.conf.host) LOG.info(_LI("Devices down %s "), devices_down) failed_devices = set(devices_down.get('failed_devices_down')) if failed_devices: LOG.debug("Port removal failed for %s", failed_devices) for detail in devices_down.get('devices_down'): if detail['exists']: LOG.info(_LI("Port %s updated."), detail['device']) # Nothing to do regarding local networking else: LOG.debug("Device %s not defined on plugin", detail['device']) return failed_devices def process_network_ports(self, port_info, ovs_restarted): failed_devices = {'added': set(), 'removed': set()} # TODO(salv-orlando): consider a solution for ensuring notifications # are processed exactly in the same order in which they were # received. This is tricky because there are two notification # sources: the neutron server, and the ovs db monitor process # If there is an exception while processing security groups ports # will not be wired anyway, and a resync will be triggered # VIF wiring needs to be performed always for 'new' devices. # For updated ports, re-wiring is not needed in most cases, but needs # to be performed anyway when the admin state of a device is changed. # A device might be both in the 'added' and 'updated' # list at the same time; avoid processing it twice. devices_added_updated = (port_info.get('added', set()) | port_info.get('updated', set())) need_binding_devices = [] security_disabled_ports = [] if devices_added_updated: start = time.time() (skipped_devices, need_binding_devices, security_disabled_ports, failed_devices['added']) = ( self.treat_devices_added_or_updated( devices_added_updated, ovs_restarted)) LOG.debug("process_network_ports - iteration:%(iter_num)d - " "treat_devices_added_or_updated completed. " "Skipped %(num_skipped)d devices of " "%(num_current)d devices currently available. " "Time elapsed: %(elapsed).3f", {'iter_num': self.iter_num, 'num_skipped': len(skipped_devices), 'num_current': len(port_info['current']), 'elapsed': time.time() - start}) # Update the list of current ports storing only those which # have been actually processed. port_info['current'] = (port_info['current'] - set(skipped_devices)) # TODO(salv-orlando): Optimize avoiding applying filters # unnecessarily, (eg: when there are no IP address changes) added_ports = port_info.get('added', set()) self._add_port_tag_info(need_binding_devices) if security_disabled_ports: added_ports -= set(security_disabled_ports) self.sg_agent.setup_port_filters(added_ports, port_info.get('updated', set())) failed_devices['added'] |= self._bind_devices(need_binding_devices) if 'removed' in port_info and port_info['removed']: start = time.time() failed_devices['removed'] |= self.treat_devices_removed( port_info['removed']) LOG.debug("process_network_ports - iteration:%(iter_num)d - " "treat_devices_removed completed in %(elapsed).3f", {'iter_num': self.iter_num, 'elapsed': time.time() - start}) return failed_devices def process_ancillary_network_ports(self, port_info): failed_devices = {'added': set(), 'removed': set()} if 'added' in port_info and port_info['added']: start = time.time() failed_added = self.treat_ancillary_devices_added( port_info['added']) LOG.debug("process_ancillary_network_ports - iteration: " "%(iter_num)d - treat_ancillary_devices_added " "completed in %(elapsed).3f", {'iter_num': self.iter_num, 'elapsed': time.time() - start}) failed_devices['added'] = failed_added if 'removed' in port_info and port_info['removed']: start = time.time() failed_removed = self.treat_ancillary_devices_removed( port_info['removed']) failed_devices['removed'] = failed_removed LOG.debug("process_ancillary_network_ports - iteration: " "%(iter_num)d - treat_ancillary_devices_removed " "completed in %(elapsed).3f", {'iter_num': self.iter_num, 'elapsed': time.time() - start}) return failed_devices @classmethod def get_tunnel_hash(cls, ip_address, hashlen): try: addr = netaddr.IPAddress(ip_address) if addr.version == n_const.IP_VERSION_4: # We cannot change this from 8, since it could break # backwards-compatibility return '%08x' % addr else: # Create 32-bit Base32 encoded hash sha1 = hashlib.sha1(ip_address.encode()) iphash = base64.b32encode(sha1.digest()) return iphash[:hashlen].decode().lower() except Exception: LOG.warning(_LW("Invalid remote IP: %s"), ip_address) return def tunnel_sync(self): LOG.info(_LI("Configuring tunnel endpoints to other OVS agents")) try: for tunnel_type in self.tunnel_types: details = self.plugin_rpc.tunnel_sync(self.context, self.local_ip, tunnel_type, self.conf.host) if not self.l2_pop: tunnels = details['tunnels'] for tunnel in tunnels: if self.local_ip != tunnel['ip_address']: remote_ip = tunnel['ip_address'] tun_name = self.get_tunnel_name( tunnel_type, self.local_ip, remote_ip) if tun_name is None: continue self._setup_tunnel_port(self.tun_br, tun_name, tunnel['ip_address'], tunnel_type) except Exception as e: LOG.debug("Unable to sync tunnel IP %(local_ip)s: %(e)s", {'local_ip': self.local_ip, 'e': e}) return True return False @classmethod def get_tunnel_name(cls, network_type, local_ip, remote_ip): # This string is used to build port and interface names in OVS. # Port and interface names can be max 16 characters long, # including NULL, and must be unique per table per host. # We make the name as long as possible given the network_type, # for example, 'vxlan-012345678' or 'geneve-01234567'. # Remove length of network type and dash hashlen = n_const.DEVICE_NAME_MAX_LEN - len(network_type) - 1 remote_tunnel_hash = cls.get_tunnel_hash(remote_ip, hashlen) if not remote_tunnel_hash: return None return '%s-%s' % (network_type, remote_tunnel_hash) def _agent_has_updates(self, polling_manager): return (polling_manager.is_polling_required or self.updated_ports or self.deleted_ports or self.sg_agent.firewall_refresh_needed()) def _port_info_has_changes(self, port_info): return (port_info.get('added') or port_info.get('removed') or port_info.get('updated')) def check_ovs_status(self): # Check for the canary flow status = self.int_br.check_canary_table() if status == constants.OVS_RESTARTED: LOG.warning(_LW("OVS is restarted. OVSNeutronAgent will reset " "bridges and recover ports.")) elif status == constants.OVS_DEAD: LOG.warning(_LW("OVS is dead. OVSNeutronAgent will keep running " "and checking OVS status periodically.")) return status def loop_count_and_wait(self, start_time, port_stats): # sleep till end of polling interval elapsed = time.time() - start_time LOG.debug("Agent rpc_loop - iteration:%(iter_num)d " "completed. Processed ports statistics: " "%(port_stats)s. Elapsed:%(elapsed).3f", {'iter_num': self.iter_num, 'port_stats': port_stats, 'elapsed': elapsed}) if elapsed < self.polling_interval: time.sleep(self.polling_interval - elapsed) else: LOG.debug("Loop iteration exceeded interval " "(%(polling_interval)s vs. %(elapsed)s)!", {'polling_interval': self.polling_interval, 'elapsed': elapsed}) self.iter_num = self.iter_num + 1 def get_port_stats(self, port_info, ancillary_port_info): port_stats = { 'regular': { 'added': len(port_info.get('added', [])), 'updated': len(port_info.get('updated', [])), 'removed': len(port_info.get('removed', []))}} if self.ancillary_brs: port_stats['ancillary'] = { 'added': len(ancillary_port_info.get('added', [])), 'removed': len(ancillary_port_info.get('removed', []))} return port_stats def cleanup_stale_flows(self): bridges = [self.int_br] bridges.extend(self.phys_brs.values()) if self.enable_tunneling: bridges.append(self.tun_br) for bridge in bridges: LOG.info(_LI("Cleaning stale %s flows"), bridge.br_name) bridge.cleanup_flows() def process_port_info(self, start, polling_manager, sync, ovs_restarted, ports, ancillary_ports, updated_ports_copy, consecutive_resyncs, ports_not_ready_yet, failed_devices, failed_ancillary_devices): # There are polling managers that don't have get_events, e.g. # AlwaysPoll used by windows implementations # REVISIT (rossella_s) This needs to be reworked to hide implementation # details regarding polling in BasePollingManager subclasses if sync or not (hasattr(polling_manager, 'get_events')): if sync: LOG.info(_LI("Agent out of sync with plugin!")) consecutive_resyncs = consecutive_resyncs + 1 if (consecutive_resyncs >= constants.MAX_DEVICE_RETRIES): LOG.warning(_LW( "Clearing cache of registered ports," " retries to resync were > %s"), constants.MAX_DEVICE_RETRIES) ports.clear() ancillary_ports.clear() consecutive_resyncs = 0 else: consecutive_resyncs = 0 # TODO(rossella_s): For implementations that use AlwaysPoll # resync if a device failed. This can be improved in future sync = (any(failed_devices.values()) or any(failed_ancillary_devices.values())) # NOTE(rossella_s) don't empty the queue of events # calling polling_manager.get_events() since # the agent might miss some event (for example a port # deletion) reg_ports = (set() if ovs_restarted else ports) port_info = self.scan_ports(reg_ports, sync, updated_ports_copy) # Treat ancillary devices if they exist if self.ancillary_brs: ancillary_port_info = self.scan_ancillary_ports( ancillary_ports, sync) LOG.debug("Agent rpc_loop - iteration:%(iter_num)d" " - ancillary port info retrieved. " "Elapsed:%(elapsed).3f", {'iter_num': self.iter_num, 'elapsed': time.time() - start}) else: ancillary_port_info = {} else: consecutive_resyncs = 0 events = polling_manager.get_events() port_info, ancillary_port_info, ports_not_ready_yet = ( self.process_ports_events(events, ports, ancillary_ports, ports_not_ready_yet, failed_devices, failed_ancillary_devices, updated_ports_copy)) return (port_info, ancillary_port_info, consecutive_resyncs, ports_not_ready_yet) def _remove_devices_not_to_retry(self, failed_devices, failed_ancillary_devices, devices_not_to_retry, ancillary_devices_not_to_retry): """This method removes the devices that exceeded the number of retries from failed_devices and failed_ancillary_devices """ for event in ['added', 'removed']: failed_devices[event] = ( failed_devices[event] - devices_not_to_retry[event]) failed_ancillary_devices[event] = ( failed_ancillary_devices[event] - ancillary_devices_not_to_retry[event]) def _get_devices_not_to_retry(self, failed_devices, failed_ancillary_devices, failed_devices_retries_map): """Return the devices not to retry and update the retries map""" new_failed_devices_retries_map = {} devices_not_to_retry = {} ancillary_devices_not_to_retry = {} def _increase_retries(devices_set): devices_not_to_retry = set() for dev in devices_set: retries = failed_devices_retries_map.get(dev, 0) if retries >= constants.MAX_DEVICE_RETRIES: devices_not_to_retry.add(dev) LOG.warning(_LW( "Device %(dev)s failed for %(times)s times and won't " "be retried anymore"), { 'dev': dev, 'times': constants.MAX_DEVICE_RETRIES}) else: new_failed_devices_retries_map[dev] = retries + 1 return devices_not_to_retry for event in ['added', 'removed']: devices_not_to_retry[event] = _increase_retries( failed_devices[event]) ancillary_devices_not_to_retry[event] = _increase_retries( failed_ancillary_devices[event]) return (new_failed_devices_retries_map, devices_not_to_retry, ancillary_devices_not_to_retry) def update_retries_map_and_remove_devs_not_to_retry( self, failed_devices, failed_ancillary_devices, failed_devices_retries_map): (new_failed_devices_retries_map, devices_not_to_retry, ancillary_devices_not_to_retry) = self._get_devices_not_to_retry( failed_devices, failed_ancillary_devices, failed_devices_retries_map) self._remove_devices_not_to_retry( failed_devices, failed_ancillary_devices, devices_not_to_retry, ancillary_devices_not_to_retry) return new_failed_devices_retries_map def rpc_loop(self, polling_manager=None): if not polling_manager: polling_manager = polling.get_polling_manager( minimize_polling=False) sync = False ports = set() updated_ports_copy = set() ancillary_ports = set() tunnel_sync = True ovs_restarted = False consecutive_resyncs = 0 need_clean_stale_flow = True ports_not_ready_yet = set() failed_devices = {'added': set(), 'removed': set()} failed_ancillary_devices = {'added': set(), 'removed': set()} failed_devices_retries_map = {} while self._check_and_handle_signal(): if self.fullsync: LOG.info(_LI("rpc_loop doing a full sync.")) sync = True self.fullsync = False port_info = {} ancillary_port_info = {} start = time.time() LOG.debug("Agent rpc_loop - iteration:%d started", self.iter_num) ovs_status = self.check_ovs_status() if ovs_status == constants.OVS_RESTARTED: self.setup_integration_br() self.setup_physical_bridges(self.bridge_mappings) if self.enable_tunneling: self._reset_tunnel_ofports() self.setup_tunnel_br() self.setup_tunnel_br_flows() tunnel_sync = True if self.enable_distributed_routing: self.dvr_agent.reset_ovs_parameters(self.int_br, self.tun_br, self.patch_int_ofport, self.patch_tun_ofport) self.dvr_agent.reset_dvr_parameters() self.dvr_agent.setup_dvr_flows() # restart the polling manager so that it will signal as added # all the current ports # REVISIT (rossella_s) Define a method "reset" in # BasePollingManager that will be implemented by AlwaysPoll as # no action and by InterfacePollingMinimizer as start/stop if isinstance( polling_manager, polling.InterfacePollingMinimizer): polling_manager.stop() polling_manager.start() elif ovs_status == constants.OVS_DEAD: # Agent doesn't apply any operations when ovs is dead, to # prevent unexpected failure or crash. Sleep and continue # loop in which ovs status will be checked periodically. port_stats = self.get_port_stats({}, {}) self.loop_count_and_wait(start, port_stats) continue # Notify the plugin of tunnel IP if self.enable_tunneling and tunnel_sync: try: tunnel_sync = self.tunnel_sync() except Exception: LOG.exception( _LE("Error while configuring tunnel endpoints")) tunnel_sync = True ovs_restarted |= (ovs_status == constants.OVS_RESTARTED) devices_need_retry = (any(failed_devices.values()) or any(failed_ancillary_devices.values()) or ports_not_ready_yet) if (self._agent_has_updates(polling_manager) or sync or devices_need_retry): try: LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - " "starting polling. Elapsed:%(elapsed).3f", {'iter_num': self.iter_num, 'elapsed': time.time() - start}) # Save updated ports dict to perform rollback in # case resync would be needed, and then clear # self.updated_ports. As the greenthread should not yield # between these two statements, this will be thread-safe updated_ports_copy = self.updated_ports self.updated_ports = set() (port_info, ancillary_port_info, consecutive_resyncs, ports_not_ready_yet) = (self.process_port_info( start, polling_manager, sync, ovs_restarted, ports, ancillary_ports, updated_ports_copy, consecutive_resyncs, ports_not_ready_yet, failed_devices, failed_ancillary_devices)) sync = False self.process_deleted_ports(port_info) ofport_changed_ports = self.update_stale_ofport_rules() if ofport_changed_ports: port_info.setdefault('updated', set()).update( ofport_changed_ports) LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - " "port information retrieved. " "Elapsed:%(elapsed).3f", {'iter_num': self.iter_num, 'elapsed': time.time() - start}) # Secure and wire/unwire VIFs and update their status # on Neutron server if (self._port_info_has_changes(port_info) or self.sg_agent.firewall_refresh_needed() or ovs_restarted): LOG.debug("Starting to process devices in:%s", port_info) failed_devices = self.process_network_ports( port_info, ovs_restarted) if need_clean_stale_flow: self.cleanup_stale_flows() need_clean_stale_flow = False LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - " "ports processed. Elapsed:%(elapsed).3f", {'iter_num': self.iter_num, 'elapsed': time.time() - start}) ports = port_info['current'] if self.ancillary_brs: failed_ancillary_devices = ( self.process_ancillary_network_ports( ancillary_port_info)) LOG.debug("Agent rpc_loop - iteration: " "%(iter_num)d - ancillary ports " "processed. Elapsed:%(elapsed).3f", {'iter_num': self.iter_num, 'elapsed': time.time() - start}) ancillary_ports = ancillary_port_info['current'] polling_manager.polling_completed() failed_devices_retries_map = ( self.update_retries_map_and_remove_devs_not_to_retry( failed_devices, failed_ancillary_devices, failed_devices_retries_map)) # Keep this flag in the last line of "try" block, # so we can sure that no other Exception occurred. ovs_restarted = False self._dispose_local_vlan_hints() except Exception: LOG.exception(_LE("Error while processing VIF ports")) # Put the ports back in self.updated_port self.updated_ports |= updated_ports_copy sync = True port_stats = self.get_port_stats(port_info, ancillary_port_info) self.loop_count_and_wait(start, port_stats) def daemon_loop(self): # Start everything. LOG.info(_LI("Agent initialized successfully, now running... ")) signal.signal(signal.SIGTERM, self._handle_sigterm) if hasattr(signal, 'SIGHUP'): signal.signal(signal.SIGHUP, self._handle_sighup) with polling.get_polling_manager( self.minimize_polling, self.ovsdb_monitor_respawn_interval) as pm: self.rpc_loop(polling_manager=pm) def _handle_sigterm(self, signum, frame): self.catch_sigterm = True if self.quitting_rpc_timeout: self.set_rpc_timeout(self.quitting_rpc_timeout) def _handle_sighup(self, signum, frame): self.catch_sighup = True def _check_and_handle_signal(self): if self.catch_sigterm: LOG.info(_LI("Agent caught SIGTERM, quitting daemon loop.")) self.run_daemon_loop = False self.catch_sigterm = False if self.catch_sighup: LOG.info(_LI("Agent caught SIGHUP, resetting.")) self.conf.reload_config_files() config.setup_logging() LOG.debug('Full set of CONF:') self.conf.log_opt_values(LOG, logging.DEBUG) self.catch_sighup = False return self.run_daemon_loop def set_rpc_timeout(self, timeout): for rpc_api in (self.plugin_rpc, self.sg_plugin_rpc, self.dvr_plugin_rpc, self.state_rpc): rpc_api.client.timeout = timeout def _check_agent_configurations(self): if (self.enable_distributed_routing and self.enable_tunneling and not self.l2_pop): raise ValueError(_("DVR deployments for VXLAN/GRE/Geneve " "underlays require L2-pop to be enabled, " "in both the Agent and Server side.")) def validate_local_ip(local_ip): """Verify if the ip exists on the agent's host.""" if not ip_lib.IPWrapper().get_device_by_ip(local_ip): LOG.error(_LE("Tunneling can't be enabled with invalid local_ip '%s'." " IP couldn't be found on this host's interfaces."), local_ip) raise SystemExit(1) def validate_tunnel_config(tunnel_types, local_ip): """Verify local ip and tunnel config if tunneling is enabled.""" if not tunnel_types: return validate_local_ip(local_ip) for tun in tunnel_types: if tun not in constants.TUNNEL_NETWORK_TYPES: LOG.error(_LE('Invalid tunnel type specified: %s'), tun) raise SystemExit(1) def prepare_xen_compute(): is_xen_compute_host = 'rootwrap-xen-dom0' in cfg.CONF.AGENT.root_helper if is_xen_compute_host: # Force ip_lib to always use the root helper to ensure that ip # commands target xen dom0 rather than domU. cfg.CONF.register_opts(ip_lib.OPTS) cfg.CONF.set_default('ip_lib_force_root', True) def main(bridge_classes): prepare_xen_compute() validate_tunnel_config(cfg.CONF.AGENT.tunnel_types, cfg.CONF.OVS.local_ip) try: agent = OVSNeutronAgent(bridge_classes, cfg.CONF) except (RuntimeError, ValueError) as e: LOG.error(_LE("%s Agent terminated!"), e) sys.exit(1) agent.daemon_loop() neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/common/0000775000567000056710000000000013044373210026635 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py0000664000567000056710000002151713044372760030473 0ustar jenkinsjenkins00000000000000# Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ from neutron.agent.common import config from neutron.common import constants as n_const from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers.openvswitch.agent.common \ import constants DEFAULT_BRIDGE_MAPPINGS = [] DEFAULT_VLAN_RANGES = [] DEFAULT_TUNNEL_RANGES = [] DEFAULT_TUNNEL_TYPES = [] ovs_opts = [ cfg.StrOpt('integration_bridge', default='br-int', help=_("Integration bridge to use. " "Do not change this parameter unless you have a good " "reason to. This is the name of the OVS integration " "bridge. There is one per hypervisor. The integration " "bridge acts as a virtual 'patch bay'. All VM VIFs are " "attached to this bridge and then 'patched' according " "to their network connectivity.")), cfg.StrOpt('tunnel_bridge', default='br-tun', help=_("Tunnel bridge to use.")), cfg.StrOpt('int_peer_patch_port', default='patch-tun', help=_("Peer patch port in integration bridge for tunnel " "bridge.")), cfg.StrOpt('tun_peer_patch_port', default='patch-int', help=_("Peer patch port in tunnel bridge for integration " "bridge.")), cfg.IPOpt('local_ip', help=_("Local IP address of tunnel endpoint. Can be either " "an IPv4 or IPv6 address.")), cfg.ListOpt('bridge_mappings', default=DEFAULT_BRIDGE_MAPPINGS, help=_("Comma-separated list of : " "tuples mapping physical network names to the agent's " "node-specific Open vSwitch bridge names to be used " "for flat and VLAN networks. The length of bridge " "names should be no more than 11. Each bridge must " "exist, and should have a physical network interface " "configured as a port. All physical networks " "configured on the server should have mappings to " "appropriate bridges on each agent. " "Note: If you remove a bridge from this " "mapping, make sure to disconnect it from the " "integration bridge as it won't be managed by the " "agent anymore. Deprecated for ofagent.")), cfg.BoolOpt('use_veth_interconnection', default=False, help=_("Use veths instead of patch ports to interconnect the " "integration bridge to physical networks. " "Support kernel without Open vSwitch patch port " "support so long as it is set to True.")), cfg.StrOpt('of_interface', default='ovs-ofctl', choices=['ovs-ofctl', 'native'], help=_("OpenFlow interface to use.")), cfg.StrOpt('datapath_type', default=constants.OVS_DATAPATH_SYSTEM, choices=[constants.OVS_DATAPATH_SYSTEM, constants.OVS_DATAPATH_NETDEV], help=_("OVS datapath to use. 'system' is the default value and " "corresponds to the kernel datapath. To enable the " "userspace datapath set this value to 'netdev'.")), cfg.StrOpt('vhostuser_socket_dir', default=constants.VHOST_USER_SOCKET_DIR, help=_("OVS vhost-user socket directory.")), cfg.IPOpt('of_listen_address', default='127.0.0.1', help=_("Address to listen on for OpenFlow connections. " "Used only for 'native' driver.")), cfg.PortOpt('of_listen_port', default=6633, help=_("Port to listen on for OpenFlow connections. " "Used only for 'native' driver.")), cfg.IntOpt('of_connect_timeout', default=30, help=_("Timeout in seconds to wait for " "the local switch connecting the controller. " "Used only for 'native' driver.")), cfg.IntOpt('of_request_timeout', default=10, help=_("Timeout in seconds to wait for a single " "OpenFlow request. " "Used only for 'native' driver.")), ] agent_opts = [ cfg.IntOpt('polling_interval', default=2, help=_("The number of seconds the agent will wait between " "polling for local device changes.")), cfg.BoolOpt('minimize_polling', default=True, help=_("Minimize polling by monitoring ovsdb for interface " "changes.")), cfg.IntOpt('ovsdb_monitor_respawn_interval', default=constants.DEFAULT_OVSDBMON_RESPAWN, help=_("The number of seconds to wait before respawning the " "ovsdb monitor after losing communication with it.")), cfg.ListOpt('tunnel_types', default=DEFAULT_TUNNEL_TYPES, help=_("Network types supported by the agent " "(gre and/or vxlan).")), cfg.PortOpt('vxlan_udp_port', default=p_const.VXLAN_UDP_PORT, help=_("The UDP port to use for VXLAN tunnels.")), cfg.IntOpt('veth_mtu', default=9000, help=_("MTU size of veth interfaces")), cfg.BoolOpt('l2_population', default=False, help=_("Use ML2 l2population mechanism driver to learn " "remote MAC and IPs and improve tunnel scalability.")), cfg.BoolOpt('arp_responder', default=False, help=_("Enable local ARP responder if it is supported. " "Requires OVS 2.1 and ML2 l2population driver. " "Allows the switch (when supporting an overlay) " "to respond to an ARP request locally without " "performing a costly ARP broadcast into the overlay.")), cfg.BoolOpt('prevent_arp_spoofing', default=True, deprecated_for_removal=True, help=_("Enable suppression of ARP responses that don't match " "an IP address that belongs to the port from which " "they originate. Note: This prevents the VMs attached " "to this agent from spoofing, it doesn't protect them " "from other devices which have the capability to spoof " "(e.g. bare metal or VMs attached to agents without " "this flag set to True). Spoofing rules will not be " "added to any ports that have port security disabled. " "For LinuxBridge, this requires ebtables. For OVS, it " "requires a version that supports matching ARP " "headers. This option will be removed in Newton so " "the only way to disable protection will be via the " "port security extension.")), cfg.BoolOpt('dont_fragment', default=True, help=_("Set or un-set the don't fragment (DF) bit on " "outgoing IP packet carrying GRE/VXLAN tunnel.")), cfg.BoolOpt('enable_distributed_routing', default=False, help=_("Make the l2 agent run in DVR mode.")), cfg.IntOpt('quitting_rpc_timeout', default=10, help=_("Set new timeout in seconds for new rpc calls after " "agent receives SIGTERM. If value is set to 0, rpc " "timeout won't be changed")), cfg.BoolOpt('drop_flows_on_start', default=False, help=_("Reset flow table on start. Setting this to True will " "cause brief traffic interruption.")), cfg.BoolOpt('tunnel_csum', default=False, help=_("Set or un-set the tunnel header checksum on " "outgoing IP packet carrying GRE/VXLAN tunnel.")), cfg.StrOpt('agent_type', default=n_const.AGENT_TYPE_OVS, deprecated_for_removal=True, help=_("Selects the Agent Type reported")) ] cfg.CONF.register_opts(ovs_opts, "OVS") cfg.CONF.register_opts(agent_opts, "AGENT") config.register_agent_state_opts_helper(cfg.CONF) neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/common/__init__.py0000664000567000056710000000000013044372736030750 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py0000664000567000056710000000732713044372760031245 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron.plugins.common import constants as p_const # Special vlan_id value in ovs_vlan_allocations table indicating flat network FLAT_VLAN_ID = -1 # Topic for tunnel notifications between the plugin and agent TUNNEL = 'tunnel' # Name prefixes for veth device or patch port pair linking the integration # bridge with the physical bridge for a physical network PEER_INTEGRATION_PREFIX = 'int-' PEER_PHYSICAL_PREFIX = 'phy-' # Nonexistent peer used to create patch ports without associating them, it # allows to define flows before association NONEXISTENT_PEER = 'nonexistent-peer' # The different types of tunnels TUNNEL_NETWORK_TYPES = [p_const.TYPE_GRE, p_const.TYPE_VXLAN, p_const.TYPE_GENEVE] ### OpenFlow table IDs ## Integration bridge (int_br) LOCAL_SWITCHING = 0 # Various tables for DVR use of integration bridge flows DVR_TO_SRC_MAC = 1 DVR_TO_SRC_MAC_VLAN = 2 CANARY_TABLE = 23 # Table for ARP poison/spoofing prevention rules ARP_SPOOF_TABLE = 24 # Table for MAC spoof filtering MAC_SPOOF_TABLE = 25 # Tables used for ovs firewall BASE_EGRESS_TABLE = 71 RULES_EGRESS_TABLE = 72 ACCEPT_OR_INGRESS_TABLE = 73 BASE_INGRESS_TABLE = 81 RULES_INGRESS_TABLE = 82 OVS_FIREWALL_TABLES = ( BASE_EGRESS_TABLE, RULES_EGRESS_TABLE, ACCEPT_OR_INGRESS_TABLE, BASE_INGRESS_TABLE, RULES_INGRESS_TABLE, ) ## Tunnel bridge (tun_br) # Various tables for tunneling flows DVR_PROCESS = 1 PATCH_LV_TO_TUN = 2 GRE_TUN_TO_LV = 3 VXLAN_TUN_TO_LV = 4 GENEVE_TUN_TO_LV = 6 DVR_NOT_LEARN = 9 LEARN_FROM_TUN = 10 UCAST_TO_TUN = 20 ARP_RESPONDER = 21 FLOOD_TO_TUN = 22 ## Physical Bridges (phys_brs) # Various tables for DVR use of physical bridge flows DVR_PROCESS_VLAN = 1 LOCAL_VLAN_TRANSLATION = 2 DVR_NOT_LEARN_VLAN = 3 ### end of OpenFlow table IDs # type for ARP reply in ARP header ARP_REPLY = '0x2' # Map tunnel types to tables number TUN_TABLE = {p_const.TYPE_GRE: GRE_TUN_TO_LV, p_const.TYPE_VXLAN: VXLAN_TUN_TO_LV, p_const.TYPE_GENEVE: GENEVE_TUN_TO_LV} # The default respawn interval for the ovsdb monitor DEFAULT_OVSDBMON_RESPAWN = 30 # Represent invalid OF Port OFPORT_INVALID = -1 ARP_RESPONDER_ACTIONS = ('move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],' 'mod_dl_src:%(mac)s,' 'load:0x2->NXM_OF_ARP_OP[],' 'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],' 'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],' 'load:%(mac)#x->NXM_NX_ARP_SHA[],' 'load:%(ip)#x->NXM_OF_ARP_SPA[],' 'in_port') # Represent ovs status OVS_RESTARTED = 0 OVS_NORMAL = 1 OVS_DEAD = 2 EXTENSION_DRIVER_TYPE = 'ovs' # ovs datapath types OVS_DATAPATH_SYSTEM = 'system' OVS_DATAPATH_NETDEV = 'netdev' OVS_DPDK_VHOST_USER = 'dpdkvhostuser' # default ovs vhost-user socket location VHOST_USER_SOCKET_DIR = '/var/run/openvswitch' MAX_DEVICE_RETRIES = 5 # OpenFlow version constants OPENFLOW10 = "OpenFlow10" OPENFLOW11 = "OpenFlow11" OPENFLOW12 = "OpenFlow12" OPENFLOW13 = "OpenFlow13" OPENFLOW14 = "OpenFlow14" # A placeholder for dead vlans. DEAD_VLAN_TAG = p_const.MAX_VLAN_TAG + 1 neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_agent_extension_api.py0000664000567000056710000000663213044372760032651 0ustar jenkinsjenkins00000000000000# Copyright 2016 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.common import ovs_lib class OVSCookieBridge(object): '''Passthrough bridge adding cookies before calling the underlying bridge This class creates a bridge that will pass all calls to its underlying bridge, except (add/mod/del/dump)_flow calls for which a cookie (reserved at init from the underlying bridge) will be added before calling the underlying bridge. ''' def __init__(self, bridge): """:param bridge: underlying bridge :type bridge: OVSBridge """ self.bridge = bridge self._cookie = self.bridge.request_cookie() @property def default_cookie(self): return self._cookie def do_action_flows(self, action, kwargs_list): # NOTE(tmorin): the OVSBridge code is excluding the 'del' # action from this step where a cookie # is added, but I think we need to keep it so that # an extension does not delete flows of another # extension for kw in kwargs_list: kw.setdefault('cookie', self._cookie) if action is 'mod' or action is 'del': kw['cookie'] = ovs_lib.check_cookie_mask(str(kw['cookie'])) self.bridge.do_action_flows(action, kwargs_list) def add_flow(self, **kwargs): self.do_action_flows('add', [kwargs]) def mod_flow(self, **kwargs): self.do_action_flows('mod', [kwargs]) def delete_flows(self, **kwargs): self.do_action_flows('del', [kwargs]) def __getattr__(self, name): # for all other methods this class is a passthrough return getattr(self.bridge, name) def deferred(self, **kwargs): # NOTE(tmorin): we can't passthrough for deferred() or else the # resulting DeferredOVSBridge apply_flows method would call # the (non-cookie-filtered) do_action_flow of the underlying bridge return ovs_lib.DeferredOVSBridge(self, **kwargs) class OVSAgentExtensionAPI(object): '''Implements the Agent API for Open vSwitch agent. Extensions can gain access to this API by overriding the consume_api method which has been added to the AgentCoreResourceExtension class. ''' def __init__(self, int_br, tun_br): super(OVSAgentExtensionAPI, self).__init__() self.br_int = int_br self.br_tun = tun_br def request_int_br(self): """Allows extensions to request an integration bridge to use for extension specific flows. """ return OVSCookieBridge(self.br_int) def request_tun_br(self): """Allows extensions to request a tunnel bridge to use for extension specific flows. If tunneling is not enabled, this method will return None. """ if not self.br_tun: return None return OVSCookieBridge(self.br_tun) neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/0000775000567000056710000000000013044373210031117 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py0000664000567000056710000000520513044372760033661 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from neutron.agent.common import ovs_lib from neutron.agent.l2.extensions import qos from neutron.plugins.ml2.drivers.openvswitch.mech_driver import ( mech_openvswitch) LOG = logging.getLogger(__name__) class QosOVSAgentDriver(qos.QosAgentDriver): SUPPORTED_RULES = ( mech_openvswitch.OpenvswitchMechanismDriver.supported_qos_rule_types) def __init__(self): super(QosOVSAgentDriver, self).__init__() self.br_int_name = cfg.CONF.OVS.integration_bridge self.br_int = None def initialize(self): self.br_int = ovs_lib.OVSBridge(self.br_int_name) def create_bandwidth_limit(self, port, rule): self.update_bandwidth_limit(port, rule) def update_bandwidth_limit(self, port, rule): vif_port = port.get('vif_port') if not vif_port: port_id = port.get('port_id', None) LOG.debug("update_bandwidth_limit was received for port %s but " "vif_port was not found. It seems that port is already " "deleted", port_id) return max_kbps = rule.max_kbps # NOTE(slaweq): According to ovs docs: # http://openvswitch.org/support/dist-docs/ovs-vswitchd.conf.db.5.html # ovs accepts only integer values of burst: max_burst_kbps = int(self._get_egress_burst_value(rule)) self.br_int.create_egress_bw_limit_for_port(vif_port.port_name, max_kbps, max_burst_kbps) def delete_bandwidth_limit(self, port): vif_port = port.get('vif_port') if not vif_port: port_id = port.get('port_id', None) LOG.debug("delete_bandwidth_limit was received for port %s but " "vif_port was not found. It seems that port is already " "deleted", port_id) return self.br_int.delete_egress_bw_limit_for_port(vif_port.port_name) neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/__init__.py0000664000567000056710000000000013044372736033232 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/main.py0000664000567000056710000000320413044372760026653 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014 Fumihiko Kakuma # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from neutron.common import config as common_config from neutron.common import utils as n_utils LOG = logging.getLogger(__name__) cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.' 'common.config') _main_modules = { 'ovs-ofctl': 'neutron.plugins.ml2.drivers.openvswitch.agent.openflow.' 'ovs_ofctl.main', 'native': 'neutron.plugins.ml2.drivers.openvswitch.agent.openflow.' 'native.main', } def main(): common_config.init(sys.argv[1:]) driver_name = cfg.CONF.OVS.of_interface mod_name = _main_modules[driver_name] mod = importutils.import_module(mod_name) mod.init_config() common_config.setup_logging() n_utils.log_opt_values(LOG) mod.main() neutron-8.4.0/neutron/plugins/ml2/drivers/__init__.py0000664000567000056710000000000013044372736024011 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/type_vlan.py0000664000567000056710000002675013044372760024274 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_log import log from six import moves import sqlalchemy as sa from neutron._i18n import _, _LE, _LI, _LW from neutron.common import exceptions as exc from neutron.db import api as db_api from neutron.db import model_base from neutron.plugins.common import constants as p_const from neutron.plugins.common import utils as plugin_utils from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers import helpers LOG = log.getLogger(__name__) vlan_opts = [ cfg.ListOpt('network_vlan_ranges', default=[], help=_("List of :: or " " specifying physical_network names " "usable for VLAN provider and tenant networks, as " "well as ranges of VLAN tags on each available for " "allocation to tenant networks.")) ] cfg.CONF.register_opts(vlan_opts, "ml2_type_vlan") class VlanAllocation(model_base.BASEV2): """Represent allocation state of a vlan_id on a physical network. If allocated is False, the vlan_id on the physical_network is available for allocation to a tenant network. If allocated is True, the vlan_id on the physical_network is in use, either as a tenant or provider network. When an allocation is released, if the vlan_id for the physical_network is inside the pool described by VlanTypeDriver.network_vlan_ranges, then allocated is set to False. If it is outside the pool, the record is deleted. """ __tablename__ = 'ml2_vlan_allocations' __table_args__ = ( sa.Index('ix_ml2_vlan_allocations_physical_network_allocated', 'physical_network', 'allocated'), model_base.BASEV2.__table_args__,) physical_network = sa.Column(sa.String(64), nullable=False, primary_key=True) vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True, autoincrement=False) allocated = sa.Column(sa.Boolean, nullable=False) class VlanTypeDriver(helpers.SegmentTypeDriver): """Manage state for VLAN networks with ML2. The VlanTypeDriver implements the 'vlan' network_type. VLAN network segments provide connectivity between VMs and other devices using any connected IEEE 802.1Q conformant physical_network segmented into virtual networks via IEEE 802.1Q headers. Up to 4094 VLAN network segments can exist on each available physical_network. """ def __init__(self): super(VlanTypeDriver, self).__init__(VlanAllocation) self._parse_network_vlan_ranges() def _parse_network_vlan_ranges(self): try: self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( cfg.CONF.ml2_type_vlan.network_vlan_ranges) except Exception: LOG.exception(_LE("Failed to parse network_vlan_ranges. " "Service terminated!")) sys.exit(1) LOG.info(_LI("Network VLAN ranges: %s"), self.network_vlan_ranges) def _sync_vlan_allocations(self): session = db_api.get_session() with session.begin(subtransactions=True): # get existing allocations for all physical networks allocations = dict() allocs = (session.query(VlanAllocation). with_lockmode('update')) for alloc in allocs: if alloc.physical_network not in allocations: allocations[alloc.physical_network] = set() allocations[alloc.physical_network].add(alloc) # process vlan ranges for each configured physical network for (physical_network, vlan_ranges) in self.network_vlan_ranges.items(): # determine current configured allocatable vlans for # this physical network vlan_ids = set() for vlan_min, vlan_max in vlan_ranges: vlan_ids |= set(moves.range(vlan_min, vlan_max + 1)) # remove from table unallocated vlans not currently # allocatable if physical_network in allocations: for alloc in allocations[physical_network]: try: # see if vlan is allocatable vlan_ids.remove(alloc.vlan_id) except KeyError: # it's not allocatable, so check if its allocated if not alloc.allocated: # it's not, so remove it from table LOG.debug("Removing vlan %(vlan_id)s on " "physical network " "%(physical_network)s from pool", {'vlan_id': alloc.vlan_id, 'physical_network': physical_network}) session.delete(alloc) del allocations[physical_network] # add missing allocatable vlans to table for vlan_id in sorted(vlan_ids): alloc = VlanAllocation(physical_network=physical_network, vlan_id=vlan_id, allocated=False) session.add(alloc) # remove from table unallocated vlans for any unconfigured # physical networks for allocs in allocations.values(): for alloc in allocs: if not alloc.allocated: LOG.debug("Removing vlan %(vlan_id)s on physical " "network %(physical_network)s from pool", {'vlan_id': alloc.vlan_id, 'physical_network': alloc.physical_network}) session.delete(alloc) def get_type(self): return p_const.TYPE_VLAN def initialize(self): self._sync_vlan_allocations() LOG.info(_LI("VlanTypeDriver initialization complete")) def is_partial_segment(self, segment): return segment.get(api.SEGMENTATION_ID) is None def validate_provider_segment(self, segment): physical_network = segment.get(api.PHYSICAL_NETWORK) segmentation_id = segment.get(api.SEGMENTATION_ID) if physical_network: if physical_network not in self.network_vlan_ranges: msg = (_("physical_network '%s' unknown " " for VLAN provider network") % physical_network) raise exc.InvalidInput(error_message=msg) if segmentation_id: if not plugin_utils.is_valid_vlan_tag(segmentation_id): msg = (_("segmentation_id out of range (%(min)s through " "%(max)s)") % {'min': p_const.MIN_VLAN_TAG, 'max': p_const.MAX_VLAN_TAG}) raise exc.InvalidInput(error_message=msg) elif segmentation_id: msg = _("segmentation_id requires physical_network for VLAN " "provider network") raise exc.InvalidInput(error_message=msg) for key, value in segment.items(): if value and key not in [api.NETWORK_TYPE, api.PHYSICAL_NETWORK, api.SEGMENTATION_ID]: msg = _("%s prohibited for VLAN provider network") % key raise exc.InvalidInput(error_message=msg) def reserve_provider_segment(self, session, segment): filters = {} physical_network = segment.get(api.PHYSICAL_NETWORK) if physical_network is not None: filters['physical_network'] = physical_network vlan_id = segment.get(api.SEGMENTATION_ID) if vlan_id is not None: filters['vlan_id'] = vlan_id if self.is_partial_segment(segment): alloc = self.allocate_partially_specified_segment( session, **filters) if not alloc: raise exc.NoNetworkAvailable() else: alloc = self.allocate_fully_specified_segment( session, **filters) if not alloc: raise exc.VlanIdInUse(**filters) return {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: alloc.physical_network, api.SEGMENTATION_ID: alloc.vlan_id, api.MTU: self.get_mtu(alloc.physical_network)} def allocate_tenant_segment(self, session): alloc = self.allocate_partially_specified_segment(session) if not alloc: return return {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: alloc.physical_network, api.SEGMENTATION_ID: alloc.vlan_id, api.MTU: self.get_mtu(alloc.physical_network)} def release_segment(self, session, segment): physical_network = segment[api.PHYSICAL_NETWORK] vlan_id = segment[api.SEGMENTATION_ID] ranges = self.network_vlan_ranges.get(physical_network, []) inside = any(lo <= vlan_id <= hi for lo, hi in ranges) with session.begin(subtransactions=True): query = (session.query(VlanAllocation). filter_by(physical_network=physical_network, vlan_id=vlan_id)) if inside: count = query.update({"allocated": False}) if count: LOG.debug("Releasing vlan %(vlan_id)s on physical " "network %(physical_network)s to pool", {'vlan_id': vlan_id, 'physical_network': physical_network}) else: count = query.delete() if count: LOG.debug("Releasing vlan %(vlan_id)s on physical " "network %(physical_network)s outside pool", {'vlan_id': vlan_id, 'physical_network': physical_network}) if not count: LOG.warning(_LW("No vlan_id %(vlan_id)s found on physical " "network %(physical_network)s"), {'vlan_id': vlan_id, 'physical_network': physical_network}) def get_mtu(self, physical_network): seg_mtu = super(VlanTypeDriver, self).get_mtu() mtu = [] if seg_mtu > 0: mtu.append(seg_mtu) if physical_network in self.physnet_mtus: mtu.append(int(self.physnet_mtus[physical_network])) return min(mtu) if mtu else 0 neutron-8.4.0/neutron/plugins/ml2/drivers/type_local.py0000664000567000056710000000434613044372760024423 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log import six from neutron._i18n import _, _LI from neutron.common import exceptions as exc from neutron.plugins.common import constants as p_const from neutron.plugins.ml2 import driver_api as api LOG = log.getLogger(__name__) class LocalTypeDriver(api.TypeDriver): """Manage state for local networks with ML2. The LocalTypeDriver implements the 'local' network_type. Local network segments provide connectivity between VMs and other devices running on the same node, provided that a common local network bridging technology is available to those devices. Local network segments do not provide any connectivity between nodes. """ def __init__(self): LOG.info(_LI("ML2 LocalTypeDriver initialization complete")) def get_type(self): return p_const.TYPE_LOCAL def initialize(self): pass def is_partial_segment(self, segment): return False def validate_provider_segment(self, segment): for key, value in six.iteritems(segment): if value and key != api.NETWORK_TYPE: msg = _("%s prohibited for local provider network") % key raise exc.InvalidInput(error_message=msg) def reserve_provider_segment(self, session, segment): # No resources to reserve return segment def allocate_tenant_segment(self, session): # No resources to allocate return {api.NETWORK_TYPE: p_const.TYPE_LOCAL} def release_segment(self, session, segment): # No resources to release pass def get_mtu(self, physical_network=None): pass neutron-8.4.0/neutron/plugins/ml2/drivers/type_vxlan.py0000664000567000056710000000722513044372760024460 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log import sqlalchemy as sa from sqlalchemy import sql from neutron._i18n import _, _LE from neutron.common import exceptions as n_exc from neutron.db import model_base from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers import type_tunnel LOG = log.getLogger(__name__) vxlan_opts = [ cfg.ListOpt('vni_ranges', default=[], help=_("Comma-separated list of : tuples " "enumerating ranges of VXLAN VNI IDs that are " "available for tenant network allocation")), cfg.StrOpt('vxlan_group', help=_("Multicast group for VXLAN. When configured, will " "enable sending all broadcast traffic to this multicast " "group. When left unconfigured, will disable multicast " "VXLAN mode.")), ] cfg.CONF.register_opts(vxlan_opts, "ml2_type_vxlan") class VxlanAllocation(model_base.BASEV2): __tablename__ = 'ml2_vxlan_allocations' vxlan_vni = sa.Column(sa.Integer, nullable=False, primary_key=True, autoincrement=False) allocated = sa.Column(sa.Boolean, nullable=False, default=False, server_default=sql.false(), index=True) class VxlanEndpoints(model_base.BASEV2): """Represents tunnel endpoint in RPC mode.""" __tablename__ = 'ml2_vxlan_endpoints' __table_args__ = ( sa.UniqueConstraint('host', name='unique_ml2_vxlan_endpoints0host'), model_base.BASEV2.__table_args__ ) ip_address = sa.Column(sa.String(64), primary_key=True) udp_port = sa.Column(sa.Integer, nullable=False) host = sa.Column(sa.String(255), nullable=True) def __repr__(self): return "" % self.ip_address class VxlanTypeDriver(type_tunnel.EndpointTunnelTypeDriver): def __init__(self): super(VxlanTypeDriver, self).__init__( VxlanAllocation, VxlanEndpoints) def get_type(self): return p_const.TYPE_VXLAN def initialize(self): try: self._initialize(cfg.CONF.ml2_type_vxlan.vni_ranges) except n_exc.NetworkTunnelRangeError: LOG.exception(_LE("Failed to parse vni_ranges. " "Service terminated!")) raise SystemExit() def get_endpoints(self): """Get every vxlan endpoints from database.""" vxlan_endpoints = self._get_endpoints() return [{'ip_address': vxlan_endpoint.ip_address, 'udp_port': vxlan_endpoint.udp_port, 'host': vxlan_endpoint.host} for vxlan_endpoint in vxlan_endpoints] def add_endpoint(self, ip, host, udp_port=p_const.VXLAN_UDP_PORT): return self._add_endpoint(ip, host, udp_port=udp_port) def get_mtu(self, physical_network=None): mtu = super(VxlanTypeDriver, self).get_mtu() return mtu - p_const.VXLAN_ENCAP_OVERHEAD if mtu else 0 neutron-8.4.0/neutron/plugins/ml2/drivers/type_geneve.py0000664000567000056710000000761613044372760024605 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log import sqlalchemy as sa from sqlalchemy import sql from neutron._i18n import _, _LE from neutron.common import exceptions as n_exc from neutron.db import model_base from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers import type_tunnel LOG = log.getLogger(__name__) geneve_opts = [ cfg.ListOpt('vni_ranges', default=[], help=_("Comma-separated list of : tuples " "enumerating ranges of Geneve VNI IDs that are " "available for tenant network allocation")), cfg.IntOpt('max_header_size', default=p_const.GENEVE_ENCAP_MIN_OVERHEAD, help=_("Geneve encapsulation header size is dynamic, this " "value is used to calculate the maximum MTU " "for the driver. " "This is the sum of the sizes of the outer " "ETH + IP + UDP + GENEVE header sizes. " "The default size for this field is 50, which is the " "size of the Geneve header without any additional " "option headers.")), ] cfg.CONF.register_opts(geneve_opts, "ml2_type_geneve") class GeneveAllocation(model_base.BASEV2): __tablename__ = 'ml2_geneve_allocations' geneve_vni = sa.Column(sa.Integer, nullable=False, primary_key=True, autoincrement=False) allocated = sa.Column(sa.Boolean, nullable=False, default=False, server_default=sql.false(), index=True) class GeneveEndpoints(model_base.BASEV2): """Represents tunnel endpoint in RPC mode.""" __tablename__ = 'ml2_geneve_endpoints' __table_args__ = ( sa.UniqueConstraint('host', name='unique_ml2_geneve_endpoints0host'), model_base.BASEV2.__table_args__ ) ip_address = sa.Column(sa.String(64), primary_key=True) host = sa.Column(sa.String(255), nullable=True) def __repr__(self): return "" % self.ip_address class GeneveTypeDriver(type_tunnel.EndpointTunnelTypeDriver): def __init__(self): super(GeneveTypeDriver, self).__init__(GeneveAllocation, GeneveEndpoints) self.max_encap_size = cfg.CONF.ml2_type_geneve.max_header_size def get_type(self): return p_const.TYPE_GENEVE def initialize(self): try: self._initialize(cfg.CONF.ml2_type_geneve.vni_ranges) except n_exc.NetworkTunnelRangeError: LOG.error(_LE("Failed to parse vni_ranges. " "Service terminated!")) raise SystemExit() def get_endpoints(self): """Get every geneve endpoints from database.""" geneve_endpoints = self._get_endpoints() return [{'ip_address': geneve_endpoint.ip_address, 'host': geneve_endpoint.host} for geneve_endpoint in geneve_endpoints] def add_endpoint(self, ip, host): return self._add_endpoint(ip, host) def get_mtu(self, physical_network=None): mtu = super(GeneveTypeDriver, self).get_mtu() return mtu - self.max_encap_size if mtu else 0 neutron-8.4.0/neutron/plugins/ml2/drivers/linuxbridge/0000775000567000056710000000000013044373210024212 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/linuxbridge/__init__.py0000664000567000056710000000000013044372736026325 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/linuxbridge/mech_driver/0000775000567000056710000000000013044373210026501 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/linuxbridge/mech_driver/mech_linuxbridge.py0000664000567000056710000000434313044372760032400 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent import securitygroups_rpc from neutron.common import constants from neutron.extensions import portbindings from neutron.plugins.common import constants as p_constants from neutron.plugins.ml2.drivers import mech_agent from neutron.services.qos import qos_consts class LinuxbridgeMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): """Attach to networks using linuxbridge L2 agent. The LinuxbridgeMechanismDriver integrates the ml2 plugin with the linuxbridge L2 agent. Port binding with this driver requires the linuxbridge agent to be running on the port's host, and that agent to have connectivity to at least one segment of the port's network. """ supported_qos_rule_types = [qos_consts.RULE_TYPE_BANDWIDTH_LIMIT] def __init__(self): sg_enabled = securitygroups_rpc.is_firewall_enabled() super(LinuxbridgeMechanismDriver, self).__init__( constants.AGENT_TYPE_LINUXBRIDGE, portbindings.VIF_TYPE_BRIDGE, {portbindings.CAP_PORT_FILTER: sg_enabled}) def get_allowed_network_types(self, agent): return (agent['configurations'].get('tunnel_types', []) + [p_constants.TYPE_LOCAL, p_constants.TYPE_FLAT, p_constants.TYPE_VLAN]) def get_mappings(self, agent): mappings = dict(agent['configurations'].get('interface_mappings', {}), **agent['configurations'].get('bridge_mappings', {})) return mappings def check_vlan_transparency(self, context): """Linuxbridge driver vlan transparency support.""" return True neutron-8.4.0/neutron/plugins/ml2/drivers/linuxbridge/mech_driver/__init__.py0000664000567000056710000000000013044372736030614 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/linuxbridge/agent/0000775000567000056710000000000013044373210025310 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/linuxbridge/agent/__init__.py0000664000567000056710000000000013044372736027423 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/linuxbridge/agent/common/0000775000567000056710000000000013044373210026600 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py0000664000567000056710000001024013044372760030425 0ustar jenkinsjenkins00000000000000# Copyright 2012 Cisco Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ DEFAULT_BRIDGE_MAPPINGS = [] DEFAULT_INTERFACE_MAPPINGS = [] DEFAULT_VXLAN_GROUP = '224.0.0.1' DEFAULT_KERNEL_HZ_VALUE = 250 # [Hz] DEFAULT_TC_TBF_LATENCY = 50 # [ms] vxlan_opts = [ cfg.BoolOpt('enable_vxlan', default=True, help=_("Enable VXLAN on the agent. Can be enabled when " "agent is managed by ml2 plugin using linuxbridge " "mechanism driver")), cfg.IntOpt('ttl', help=_("TTL for vxlan interface protocol packets.")), cfg.IntOpt('tos', help=_("TOS for vxlan interface protocol packets.")), cfg.StrOpt('vxlan_group', default=DEFAULT_VXLAN_GROUP, help=_("Multicast group(s) for vxlan interface. A range of " "group addresses may be specified by using CIDR " "notation. Specifying a range allows different VNIs to " "use different group addresses, reducing or eliminating " "spurious broadcast traffic to the tunnel endpoints. " "To reserve a unique group for each possible " "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This " "setting must be the same on all the agents.")), cfg.IPOpt('local_ip', help=_("Local IP address of the VXLAN endpoints.")), cfg.BoolOpt('l2_population', default=False, help=_("Extension to use alongside ml2 plugin's l2population " "mechanism driver. It enables the plugin to populate " "VXLAN forwarding table.")), cfg.BoolOpt('arp_responder', default=False, help=_("Enable local ARP responder which provides local " "responses instead of performing ARP broadcast into " "the overlay. Enabling local ARP responder is not fully" "compatible with the allowed-address-pairs extension.") ), ] bridge_opts = [ cfg.ListOpt('physical_interface_mappings', default=DEFAULT_INTERFACE_MAPPINGS, help=_("Comma-separated list of " ": tuples " "mapping physical network names to the agent's " "node-specific physical network interfaces to be used " "for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should " "have mappings to appropriate interfaces on each " "agent.")), cfg.ListOpt('bridge_mappings', default=DEFAULT_BRIDGE_MAPPINGS, help=_("List of :")), ] qos_options = [ cfg.IntOpt('kernel_hz', default=DEFAULT_KERNEL_HZ_VALUE, help=_("Value of host kernel tick rate (hz) for calculating " "minimum burst value in bandwidth limit rules for " "a port with QoS. See kernel configuration file for " "HZ value and tc-tbf manual for more information.")), cfg.IntOpt('tbf_latency', default=DEFAULT_TC_TBF_LATENCY, help=_("Value of latency (ms) for calculating size of queue " "for a port with QoS. See tc-tbf manual for more " "information.")) ] cfg.CONF.register_opts(vxlan_opts, "VXLAN") cfg.CONF.register_opts(bridge_opts, "LINUX_BRIDGE") cfg.CONF.register_opts(qos_options, "QOS") neutron-8.4.0/neutron/plugins/ml2/drivers/linuxbridge/agent/common/__init__.py0000664000567000056710000000000013044372736030713 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/linuxbridge/agent/common/constants.py0000664000567000056710000000153413044372736031205 0ustar jenkinsjenkins00000000000000# Copyright 2012 Cisco Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. FLAT_VLAN_ID = -1 LOCAL_VLAN_ID = -2 # Supported VXLAN features VXLAN_NONE = 'not_supported' VXLAN_MCAST = 'multicast_flooding' VXLAN_UCAST = 'unicast_flooding' EXTENSION_DRIVER_TYPE = 'linuxbridge' RESOURCE_ID_LENGTH = 11 neutron-8.4.0/neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py0000664000567000056710000001664713044372736030236 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_concurrency import lockutils from oslo_log import log as logging from neutron._i18n import _LI from neutron.agent.linux import ip_lib from neutron.common import utils LOG = logging.getLogger(__name__) SPOOF_CHAIN_PREFIX = 'neutronARP-' MAC_CHAIN_PREFIX = 'neutronMAC-' def setup_arp_spoofing_protection(vif, port_details): current_rules = ebtables(['-L']).splitlines() if not port_details.get('port_security_enabled', True): # clear any previous entries related to this port delete_arp_spoofing_protection([vif], current_rules) LOG.info(_LI("Skipping ARP spoofing rules for port '%s' because " "it has port security disabled"), vif) return if utils.is_port_trusted(port_details): # clear any previous entries related to this port delete_arp_spoofing_protection([vif], current_rules) LOG.debug("Skipping ARP spoofing rules for network owned port " "'%s'.", vif) return _install_mac_spoofing_protection(vif, port_details, current_rules) # collect all of the addresses and cidrs that belong to the port addresses = {f['ip_address'] for f in port_details['fixed_ips']} if port_details.get('allowed_address_pairs'): addresses |= {p['ip_address'] for p in port_details['allowed_address_pairs']} addresses = {ip for ip in addresses if netaddr.IPNetwork(ip).version == 4} if any(netaddr.IPNetwork(ip).prefixlen == 0 for ip in addresses): # don't try to install protection because a /0 prefix allows any # address anyway and the ARP_SPA can only match on /1 or more. return install_arp_spoofing_protection(vif, addresses, current_rules) def chain_name(vif): # start each chain with a common identifier for cleanup to find return '%s%s' % (SPOOF_CHAIN_PREFIX, vif) @lockutils.synchronized('ebtables') def delete_arp_spoofing_protection(vifs, current_rules=None): if not current_rules: current_rules = ebtables(['-L']).splitlines() # delete the jump rule and then delete the whole chain jumps = [vif for vif in vifs if vif_jump_present(vif, current_rules)] for vif in jumps: ebtables(['-D', 'FORWARD', '-i', vif, '-j', chain_name(vif), '-p', 'ARP']) for vif in vifs: if chain_exists(chain_name(vif), current_rules): ebtables(['-X', chain_name(vif)]) _delete_mac_spoofing_protection(vifs, current_rules) def delete_unreferenced_arp_protection(current_vifs): # deletes all jump rules and chains that aren't in current_vifs but match # the spoof prefix output = ebtables(['-L']).splitlines() to_delete = [] for line in output: # we're looking to find and turn the following: # Bridge chain: SPOOF_CHAIN_PREFIXtap199, entries: 0, policy: DROP # into 'tap199' if line.startswith('Bridge chain: %s' % SPOOF_CHAIN_PREFIX): devname = line.split(SPOOF_CHAIN_PREFIX, 1)[1].split(',')[0] if devname not in current_vifs: to_delete.append(devname) LOG.info(_LI("Clearing orphaned ARP spoofing entries for devices %s"), to_delete) delete_arp_spoofing_protection(to_delete, output) @lockutils.synchronized('ebtables') def install_arp_spoofing_protection(vif, addresses, current_rules): # make a VIF-specific ARP chain so we don't conflict with other rules vif_chain = chain_name(vif) if not chain_exists(vif_chain, current_rules): ebtables(['-N', vif_chain, '-P', 'DROP']) # flush the chain to clear previous accepts. this will cause dropped ARP # packets until the allows are installed, but that's better than leaked # spoofed packets and ARP can handle losses. ebtables(['-F', vif_chain]) for addr in addresses: ebtables(['-A', vif_chain, '-p', 'ARP', '--arp-ip-src', addr, '-j', 'ACCEPT']) # check if jump rule already exists, if not, install it if not vif_jump_present(vif, current_rules): ebtables(['-A', 'FORWARD', '-i', vif, '-j', vif_chain, '-p', 'ARP']) def chain_exists(chain, current_rules): for rule in current_rules: if rule.startswith('Bridge chain: %s' % chain): return True return False def vif_jump_present(vif, current_rules): searches = (('-i %s' % vif), ('-j %s' % chain_name(vif)), ('-p ARP')) for line in current_rules: if all(s in line for s in searches): return True return False @lockutils.synchronized('ebtables') def _install_mac_spoofing_protection(vif, port_details, current_rules): mac_addresses = {port_details['mac_address']} if port_details.get('allowed_address_pairs'): mac_addresses |= {p['mac_address'] for p in port_details['allowed_address_pairs']} mac_addresses = list(mac_addresses) vif_chain = _mac_chain_name(vif) # mac filter chain for each vif which has a default deny if not chain_exists(vif_chain, current_rules): ebtables(['-N', vif_chain, '-P', 'DROP']) # check if jump rule already exists, if not, install it if not _mac_vif_jump_present(vif, current_rules): ebtables(['-A', 'FORWARD', '-i', vif, '-j', vif_chain]) # we can't just feed all allowed macs at once because we can exceed # the maximum argument size. limit to 500 per rule. for chunk in (mac_addresses[i:i + 500] for i in range(0, len(mac_addresses), 500)): new_rule = ['-A', vif_chain, '-i', vif, '--among-src', ','.join(chunk), '-j', 'RETURN'] ebtables(new_rule) _delete_vif_mac_rules(vif, current_rules) def _mac_vif_jump_present(vif, current_rules): searches = (('-i %s' % vif), ('-j %s' % _mac_chain_name(vif))) for line in current_rules: if all(s in line for s in searches): return True return False def _mac_chain_name(vif): return '%s%s' % (MAC_CHAIN_PREFIX, vif) def _delete_vif_mac_rules(vif, current_rules): chain = _mac_chain_name(vif) for rule in current_rules: if '-i %s' % vif in rule and '--among-src' in rule: ebtables(['-D', chain] + rule.split()) def _delete_mac_spoofing_protection(vifs, current_rules): # delete the jump rule and then delete the whole chain jumps = [vif for vif in vifs if _mac_vif_jump_present(vif, current_rules)] for vif in jumps: ebtables(['-D', 'FORWARD', '-i', vif, '-j', _mac_chain_name(vif)]) for vif in vifs: chain = _mac_chain_name(vif) if chain_exists(chain, current_rules): ebtables(['-X', chain]) # Used to scope ebtables commands in testing NAMESPACE = None def ebtables(comm): execute = ip_lib.IPWrapper(NAMESPACE).netns.execute return execute(['ebtables'] + comm, run_as_root=True) neutron-8.4.0/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py0000664000567000056710000011560213044372760033144 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2012 Cisco Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # # Performs per host Linux Bridge configuration for Neutron. # Based on the structure of the OpenVSwitch agent in the # Neutron OpenVSwitch Plugin. import sys import netaddr from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import service from oslo_utils import excutils from six import moves from neutron._i18n import _LE, _LI, _LW from neutron.agent.linux import bridge_lib from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.agent import securitygroups_rpc as sg_rpc from neutron.common import config as common_config from neutron.common import constants from neutron.common import exceptions from neutron.common import topics from neutron.common import utils as n_utils from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb from neutron.plugins.ml2.drivers.agent import _common_agent as ca from neutron.plugins.ml2.drivers.agent import config as cagt_config # noqa from neutron.plugins.ml2.drivers.l2pop.rpc_manager \ import l2population_rpc as l2pop_rpc from neutron.plugins.ml2.drivers.linuxbridge.agent import arp_protect from neutron.plugins.ml2.drivers.linuxbridge.agent.common import config # noqa from neutron.plugins.ml2.drivers.linuxbridge.agent.common \ import constants as lconst LOG = logging.getLogger(__name__) LB_AGENT_BINARY = 'neutron-linuxbridge-agent' BRIDGE_NAME_PREFIX = "brq" VXLAN_INTERFACE_PREFIX = "vxlan-" class LinuxBridgeManager(amb.CommonAgentManagerBase): def __init__(self, bridge_mappings, interface_mappings): super(LinuxBridgeManager, self).__init__() self.bridge_mappings = bridge_mappings self.interface_mappings = interface_mappings self.validate_interface_mappings() self.validate_bridge_mappings() self.ip = ip_lib.IPWrapper() # VXLAN related parameters: self.local_ip = cfg.CONF.VXLAN.local_ip self.vxlan_mode = lconst.VXLAN_NONE if cfg.CONF.VXLAN.enable_vxlan: device = self.get_local_ip_device() self.validate_vxlan_group_with_local_ip() self.local_int = device.name self.check_vxlan_support() def validate_interface_mappings(self): for physnet, interface in self.interface_mappings.items(): if not ip_lib.device_exists(interface): LOG.error(_LE("Interface %(intf)s for physical network %(net)s" " does not exist. Agent terminated!"), {'intf': interface, 'net': physnet}) sys.exit(1) def validate_bridge_mappings(self): for physnet, bridge in self.bridge_mappings.items(): if not ip_lib.device_exists(bridge): LOG.error(_LE("Bridge %(brq)s for physical network %(net)s" " does not exist. Agent terminated!"), {'brq': bridge, 'net': physnet}) sys.exit(1) def validate_vxlan_group_with_local_ip(self): if not cfg.CONF.VXLAN.vxlan_group: return try: ip_addr = netaddr.IPAddress(self.local_ip) # Ensure the configured group address/range is valid and multicast group_net = netaddr.IPNetwork(cfg.CONF.VXLAN.vxlan_group) if not group_net.is_multicast(): raise ValueError() if not ip_addr.version == group_net.version: raise ValueError() except (netaddr.core.AddrFormatError, ValueError): LOG.error(_LE("Invalid VXLAN Group: %(group)s, must be an address " "or network (in CIDR notation) in a multicast " "range of the same address family as local_ip: " "%(ip)s"), {'group': cfg.CONF.VXLAN.vxlan_group, 'ip': self.local_ip}) sys.exit(1) def get_local_ip_device(self): """Return the device with local_ip on the host.""" device = self.ip.get_device_by_ip(self.local_ip) if not device: LOG.error(_LE("Tunneling cannot be enabled without the local_ip " "bound to an interface on the host. Please " "configure local_ip %s on the host interface to " "be used for tunneling and restart the agent."), self.local_ip) sys.exit(1) return device def get_existing_bridge_name(self, physical_network): if not physical_network: return None return self.bridge_mappings.get(physical_network) @staticmethod def get_bridge_name(network_id): if not network_id: LOG.warning(_LW("Invalid Network ID, will lead to incorrect " "bridge name")) bridge_name = BRIDGE_NAME_PREFIX + \ network_id[:lconst.RESOURCE_ID_LENGTH] return bridge_name def get_subinterface_name(self, physical_interface, vlan_id): if not vlan_id: LOG.warning(_LW("Invalid VLAN ID, will lead to incorrect " "subinterface name")) subinterface_name = '%s.%s' % (physical_interface, vlan_id) return subinterface_name @staticmethod def get_tap_device_name(interface_id): if not interface_id: LOG.warning(_LW("Invalid Interface ID, will lead to incorrect " "tap device name")) tap_device_name = constants.TAP_DEVICE_PREFIX + \ interface_id[:lconst.RESOURCE_ID_LENGTH] return tap_device_name def get_vxlan_device_name(self, segmentation_id): if 0 <= int(segmentation_id) <= p_const.MAX_VXLAN_VNI: return VXLAN_INTERFACE_PREFIX + str(segmentation_id) else: LOG.warning(_LW("Invalid Segmentation ID: %s, will lead to " "incorrect vxlan device name"), segmentation_id) def get_vxlan_group(self, segmentation_id): net = netaddr.IPNetwork(cfg.CONF.VXLAN.vxlan_group) # Map the segmentation ID to (one of) the group address(es) return str(net.network + (int(segmentation_id) & int(net.hostmask))) def get_deletable_bridges(self): bridge_list = bridge_lib.get_bridge_names() bridges = {b for b in bridge_list if b.startswith(BRIDGE_NAME_PREFIX)} bridges.difference_update(self.bridge_mappings.values()) return bridges def get_tap_devices_count(self, bridge_name): if_list = bridge_lib.BridgeDevice(bridge_name).get_interfaces() return len([interface for interface in if_list if interface.startswith(constants.TAP_DEVICE_PREFIX)]) def ensure_vlan_bridge(self, network_id, phy_bridge_name, physical_interface, vlan_id): """Create a vlan and bridge unless they already exist.""" interface = self.ensure_vlan(physical_interface, vlan_id) if phy_bridge_name: return self.ensure_bridge(phy_bridge_name) else: bridge_name = self.get_bridge_name(network_id) ips, gateway = self.get_interface_details(interface) if self.ensure_bridge(bridge_name, interface, ips, gateway): return interface def ensure_vxlan_bridge(self, network_id, segmentation_id): """Create a vxlan and bridge unless they already exist.""" interface = self.ensure_vxlan(segmentation_id) if not interface: LOG.error(_LE("Failed creating vxlan interface for " "%(segmentation_id)s"), {segmentation_id: segmentation_id}) return bridge_name = self.get_bridge_name(network_id) self.ensure_bridge(bridge_name, interface) return interface def get_interface_details(self, interface): device = self.ip.device(interface) ips = device.addr.list(scope='global') # Update default gateway if necessary gateway = device.route.get_gateway(scope='global') return ips, gateway def ensure_flat_bridge(self, network_id, phy_bridge_name, physical_interface): """Create a non-vlan bridge unless it already exists.""" if phy_bridge_name: return self.ensure_bridge(phy_bridge_name) else: bridge_name = self.get_bridge_name(network_id) ips, gateway = self.get_interface_details(physical_interface) if self.ensure_bridge(bridge_name, physical_interface, ips, gateway): return physical_interface def ensure_local_bridge(self, network_id, phy_bridge_name): """Create a local bridge unless it already exists.""" if phy_bridge_name: bridge_name = phy_bridge_name else: bridge_name = self.get_bridge_name(network_id) return self.ensure_bridge(bridge_name) def ensure_vlan(self, physical_interface, vlan_id): """Create a vlan unless it already exists.""" interface = self.get_subinterface_name(physical_interface, vlan_id) if not ip_lib.device_exists(interface): LOG.debug("Creating subinterface %(interface)s for " "VLAN %(vlan_id)s on interface " "%(physical_interface)s", {'interface': interface, 'vlan_id': vlan_id, 'physical_interface': physical_interface}) try: int_vlan = self.ip.add_vlan(interface, physical_interface, vlan_id) except RuntimeError: with excutils.save_and_reraise_exception() as ctxt: if ip_lib.vlan_in_use(vlan_id): ctxt.reraise = False LOG.error(_LE("Unable to create VLAN interface for " "VLAN ID %s because it is in use by " "another interface."), vlan_id) return int_vlan.disable_ipv6() int_vlan.link.set_up() LOG.debug("Done creating subinterface %s", interface) return interface def ensure_vxlan(self, segmentation_id): """Create a vxlan unless it already exists.""" interface = self.get_vxlan_device_name(segmentation_id) if not ip_lib.device_exists(interface): LOG.debug("Creating vxlan interface %(interface)s for " "VNI %(segmentation_id)s", {'interface': interface, 'segmentation_id': segmentation_id}) args = {'dev': self.local_int} if self.vxlan_mode == lconst.VXLAN_MCAST: args['group'] = self.get_vxlan_group(segmentation_id) if cfg.CONF.VXLAN.ttl: args['ttl'] = cfg.CONF.VXLAN.ttl if cfg.CONF.VXLAN.tos: args['tos'] = cfg.CONF.VXLAN.tos if cfg.CONF.VXLAN.l2_population: args['proxy'] = cfg.CONF.VXLAN.arp_responder try: int_vxlan = self.ip.add_vxlan(interface, segmentation_id, **args) except RuntimeError: with excutils.save_and_reraise_exception() as ctxt: # perform this check after an attempt rather than before # to avoid excessive lookups and a possible race condition. if ip_lib.vxlan_in_use(segmentation_id): ctxt.reraise = False LOG.error(_LE("Unable to create VXLAN interface for " "VNI %s because it is in use by another " "interface."), segmentation_id) return None int_vxlan.disable_ipv6() int_vxlan.link.set_up() LOG.debug("Done creating vxlan interface %s", interface) return interface def update_interface_ip_details(self, destination, source, ips, gateway): if ips or gateway: dst_device = self.ip.device(destination) src_device = self.ip.device(source) # Append IP's to bridge if necessary if ips: for ip in ips: dst_device.addr.add(cidr=ip['cidr']) if gateway: # Ensure that the gateway can be updated by changing the metric metric = 100 if 'metric' in gateway: metric = gateway['metric'] - 1 dst_device.route.add_gateway(gateway=gateway['gateway'], metric=metric) src_device.route.delete_gateway(gateway=gateway['gateway']) # Remove IP's from interface if ips: for ip in ips: src_device.addr.delete(cidr=ip['cidr']) def _bridge_exists_and_ensure_up(self, bridge_name): """Check if the bridge exists and make sure it is up.""" br = ip_lib.IPDevice(bridge_name) br.set_log_fail_as_error(False) try: # If the device doesn't exist this will throw a RuntimeError br.link.set_up() except RuntimeError: return False return True def ensure_bridge(self, bridge_name, interface=None, ips=None, gateway=None): """Create a bridge unless it already exists.""" # _bridge_exists_and_ensure_up instead of device_exists is used here # because there are cases where the bridge exists but it's not UP, # for example: # 1) A greenthread was executing this function and had not yet executed # "ip link set bridge_name up" before eventlet switched to this # thread running the same function # 2) The Nova VIF driver was running concurrently and had just created # the bridge, but had not yet put it UP if not self._bridge_exists_and_ensure_up(bridge_name): LOG.debug("Starting bridge %(bridge_name)s for subinterface " "%(interface)s", {'bridge_name': bridge_name, 'interface': interface}) bridge_device = bridge_lib.BridgeDevice.addbr(bridge_name) if bridge_device.setfd(0): return if bridge_device.disable_stp(): return if bridge_device.disable_ipv6(): return if bridge_device.link.set_up(): return LOG.debug("Done starting bridge %(bridge_name)s for " "subinterface %(interface)s", {'bridge_name': bridge_name, 'interface': interface}) else: bridge_device = bridge_lib.BridgeDevice(bridge_name) if not interface: return bridge_name # Update IP info if necessary self.update_interface_ip_details(bridge_name, interface, ips, gateway) # Check if the interface is part of the bridge if not bridge_device.owns_interface(interface): try: # Check if the interface is not enslaved in another bridge bridge = bridge_lib.BridgeDevice.get_interface_bridge( interface) if bridge: bridge.delif(interface) bridge_device.addif(interface) except Exception as e: LOG.error(_LE("Unable to add %(interface)s to %(bridge_name)s" "! Exception: %(e)s"), {'interface': interface, 'bridge_name': bridge_name, 'e': e}) return return bridge_name def ensure_physical_in_bridge(self, network_id, network_type, physical_network, segmentation_id): if network_type == p_const.TYPE_VXLAN: if self.vxlan_mode == lconst.VXLAN_NONE: LOG.error(_LE("Unable to add vxlan interface for network %s"), network_id) return return self.ensure_vxlan_bridge(network_id, segmentation_id) # NOTE(nick-ma-z): Obtain mappings of physical bridge and interfaces physical_bridge = self.get_existing_bridge_name(physical_network) physical_interface = self.interface_mappings.get(physical_network) if not physical_bridge and not physical_interface: LOG.error(_LE("No bridge or interface mappings" " for physical network %s"), physical_network) return if network_type == p_const.TYPE_FLAT: return self.ensure_flat_bridge(network_id, physical_bridge, physical_interface) elif network_type == p_const.TYPE_VLAN: return self.ensure_vlan_bridge(network_id, physical_bridge, physical_interface, segmentation_id) else: LOG.error(_LE("Unknown network_type %(network_type)s for network " "%(network_id)s."), {network_type: network_type, network_id: network_id}) def add_tap_interface(self, network_id, network_type, physical_network, segmentation_id, tap_device_name, device_owner): """Add tap interface and handle interface missing exeptions.""" try: return self._add_tap_interface(network_id, network_type, physical_network, segmentation_id, tap_device_name, device_owner) except Exception: with excutils.save_and_reraise_exception() as ctx: if not ip_lib.device_exists(tap_device_name): # the exception was likely a side effect of the tap device # being removed during handling so we just return false # like we would if it didn't exist to begin with. ctx.reraise = False return False def _add_tap_interface(self, network_id, network_type, physical_network, segmentation_id, tap_device_name, device_owner): """Add tap interface. If a VIF has been plugged into a network, this function will add the corresponding tap device to the relevant bridge. """ if not ip_lib.device_exists(tap_device_name): LOG.debug("Tap device: %s does not exist on " "this host, skipped", tap_device_name) return False bridge_name = self.get_existing_bridge_name(physical_network) if not bridge_name: bridge_name = self.get_bridge_name(network_id) if network_type == p_const.TYPE_LOCAL: self.ensure_local_bridge(network_id, bridge_name) else: phy_dev_name = self.ensure_physical_in_bridge(network_id, network_type, physical_network, segmentation_id) if not phy_dev_name: return False self.ensure_tap_mtu(tap_device_name, phy_dev_name) # Avoid messing with plugging devices into a bridge that the agent # does not own if device_owner.startswith(constants.DEVICE_OWNER_PREFIXES): # Check if device needs to be added to bridge if not bridge_lib.BridgeDevice.get_interface_bridge( tap_device_name): data = {'tap_device_name': tap_device_name, 'bridge_name': bridge_name} LOG.debug("Adding device %(tap_device_name)s to bridge " "%(bridge_name)s", data) if bridge_lib.BridgeDevice(bridge_name).addif(tap_device_name): return False else: data = {'tap_device_name': tap_device_name, 'device_owner': device_owner, 'bridge_name': bridge_name} LOG.debug("Skip adding device %(tap_device_name)s to " "%(bridge_name)s. It is owned by %(device_owner)s and " "thus added elsewhere.", data) return True def ensure_tap_mtu(self, tap_dev_name, phy_dev_name): """Ensure the MTU on the tap is the same as the physical device.""" phy_dev_mtu = ip_lib.IPDevice(phy_dev_name).link.mtu ip_lib.IPDevice(tap_dev_name).link.set_mtu(phy_dev_mtu) def plug_interface(self, network_id, network_segment, tap_name, device_owner): return self.add_tap_interface(network_id, network_segment.network_type, network_segment.physical_network, network_segment.segmentation_id, tap_name, device_owner) def delete_bridge(self, bridge_name): bridge_device = bridge_lib.BridgeDevice(bridge_name) if bridge_device.exists(): physical_interfaces = set(self.interface_mappings.values()) interfaces_on_bridge = bridge_device.get_interfaces() for interface in interfaces_on_bridge: self.remove_interface(bridge_name, interface) if interface.startswith(VXLAN_INTERFACE_PREFIX): self.delete_interface(interface) else: # Match the vlan/flat interface in the bridge. # If the bridge has an IP, it mean that this IP was moved # from the current interface, which also mean that this # interface was not created by the agent. ips, gateway = self.get_interface_details(bridge_name) if ips: self.update_interface_ip_details(interface, bridge_name, ips, gateway) elif interface not in physical_interfaces: self.delete_interface(interface) try: LOG.debug("Deleting bridge %s", bridge_name) if bridge_device.link.set_down(): return if bridge_device.delbr(): return LOG.debug("Done deleting bridge %s", bridge_name) except RuntimeError: with excutils.save_and_reraise_exception() as ctxt: if not bridge_device.exists(): # the exception was likely a side effect of the bridge # being removed by nova during handling, # so we just return ctxt.reraise = False LOG.debug("Cannot delete bridge %s; it does not exist", bridge_name) return else: LOG.debug("Cannot delete bridge %s; it does not exist", bridge_name) def remove_interface(self, bridge_name, interface_name): bridge_device = bridge_lib.BridgeDevice(bridge_name) if bridge_device.exists(): if not bridge_lib.is_bridged_interface(interface_name): return True LOG.debug("Removing device %(interface_name)s from bridge " "%(bridge_name)s", {'interface_name': interface_name, 'bridge_name': bridge_name}) if bridge_device.delif(interface_name): return False LOG.debug("Done removing device %(interface_name)s from bridge " "%(bridge_name)s", {'interface_name': interface_name, 'bridge_name': bridge_name}) return True else: LOG.debug("Cannot remove device %(interface_name)s bridge " "%(bridge_name)s does not exist", {'interface_name': interface_name, 'bridge_name': bridge_name}) return False def delete_interface(self, interface): device = self.ip.device(interface) if device.exists(): LOG.debug("Deleting interface %s", interface) device.link.set_down() device.link.delete() LOG.debug("Done deleting interface %s", interface) def get_devices_modified_timestamps(self, devices): return {d: bridge_lib.get_interface_bridged_time(d) for d in devices} def get_all_devices(self): devices = set() for device in bridge_lib.get_bridge_names(): if device.startswith(constants.TAP_DEVICE_PREFIX): devices.add(device) return devices def vxlan_ucast_supported(self): if not cfg.CONF.VXLAN.l2_population: return False if not ip_lib.iproute_arg_supported( ['bridge', 'fdb'], 'append'): LOG.warning(_LW('Option "%(option)s" must be supported by command ' '"%(command)s" to enable %(mode)s mode'), {'option': 'append', 'command': 'bridge fdb', 'mode': 'VXLAN UCAST'}) return False test_iface = None for seg_id in moves.range(1, p_const.MAX_VXLAN_VNI + 1): if (ip_lib.device_exists(self.get_vxlan_device_name(seg_id)) or ip_lib.vxlan_in_use(seg_id)): continue test_iface = self.ensure_vxlan(seg_id) break else: LOG.error(_LE('No valid Segmentation ID to perform UCAST test.')) return False try: utils.execute( cmd=['bridge', 'fdb', 'append', constants.FLOODING_ENTRY[0], 'dev', test_iface, 'dst', '1.1.1.1'], run_as_root=True, log_fail_as_error=False) return True except RuntimeError: return False finally: self.delete_interface(test_iface) def vxlan_mcast_supported(self): if not cfg.CONF.VXLAN.vxlan_group: LOG.warning(_LW('VXLAN muticast group(s) must be provided in ' 'vxlan_group option to enable VXLAN MCAST mode')) return False if not ip_lib.iproute_arg_supported( ['ip', 'link', 'add', 'type', 'vxlan'], 'proxy'): LOG.warning(_LW('Option "%(option)s" must be supported by command ' '"%(command)s" to enable %(mode)s mode'), {'option': 'proxy', 'command': 'ip link add type vxlan', 'mode': 'VXLAN MCAST'}) return False return True def check_vxlan_support(self): self.vxlan_mode = lconst.VXLAN_NONE if self.vxlan_ucast_supported(): self.vxlan_mode = lconst.VXLAN_UCAST elif self.vxlan_mcast_supported(): self.vxlan_mode = lconst.VXLAN_MCAST else: raise exceptions.VxlanNetworkUnsupported() LOG.debug('Using %s VXLAN mode', self.vxlan_mode) def fdb_ip_entry_exists(self, mac, ip, interface): entries = utils.execute(['ip', 'neigh', 'show', 'to', ip, 'dev', interface], run_as_root=True) return mac in entries def fdb_bridge_entry_exists(self, mac, interface, agent_ip=None): entries = utils.execute(['bridge', 'fdb', 'show', 'dev', interface], run_as_root=True) if not agent_ip: return mac in entries return (agent_ip in entries and mac in entries) def add_fdb_ip_entry(self, mac, ip, interface): if cfg.CONF.VXLAN.arp_responder: ip_lib.IPDevice(interface).neigh.add(ip, mac) def remove_fdb_ip_entry(self, mac, ip, interface): if cfg.CONF.VXLAN.arp_responder: ip_lib.IPDevice(interface).neigh.delete(ip, mac) def add_fdb_bridge_entry(self, mac, agent_ip, interface, operation="add"): utils.execute(['bridge', 'fdb', operation, mac, 'dev', interface, 'dst', agent_ip], run_as_root=True, check_exit_code=False) def remove_fdb_bridge_entry(self, mac, agent_ip, interface): utils.execute(['bridge', 'fdb', 'del', mac, 'dev', interface, 'dst', agent_ip], run_as_root=True, check_exit_code=False) def add_fdb_entries(self, agent_ip, ports, interface): for mac, ip in ports: if mac != constants.FLOODING_ENTRY[0]: self.add_fdb_ip_entry(mac, ip, interface) self.add_fdb_bridge_entry(mac, agent_ip, interface, operation="replace") elif self.vxlan_mode == lconst.VXLAN_UCAST: if self.fdb_bridge_entry_exists(mac, interface): self.add_fdb_bridge_entry(mac, agent_ip, interface, "append") else: self.add_fdb_bridge_entry(mac, agent_ip, interface) def remove_fdb_entries(self, agent_ip, ports, interface): for mac, ip in ports: if mac != constants.FLOODING_ENTRY[0]: self.remove_fdb_ip_entry(mac, ip, interface) self.remove_fdb_bridge_entry(mac, agent_ip, interface) elif self.vxlan_mode == lconst.VXLAN_UCAST: self.remove_fdb_bridge_entry(mac, agent_ip, interface) def get_agent_id(self): if self.bridge_mappings: mac = utils.get_interface_mac( list(self.bridge_mappings.values())[0]) else: devices = ip_lib.IPWrapper().get_devices(True) if devices: mac = utils.get_interface_mac(devices[0].name) else: LOG.error(_LE("Unable to obtain MAC address for unique ID. " "Agent terminated!")) sys.exit(1) return 'lb%s' % mac.replace(":", "") def get_agent_configurations(self): configurations = {'bridge_mappings': self.bridge_mappings, 'interface_mappings': self.interface_mappings } if self.vxlan_mode != lconst.VXLAN_NONE: configurations['tunneling_ip'] = self.local_ip configurations['tunnel_types'] = [p_const.TYPE_VXLAN] configurations['l2_population'] = cfg.CONF.VXLAN.l2_population return configurations def get_rpc_callbacks(self, context, agent, sg_agent): return LinuxBridgeRpcCallbacks(context, agent, sg_agent) def get_rpc_consumers(self): consumers = [[topics.PORT, topics.UPDATE], [topics.NETWORK, topics.DELETE], [topics.NETWORK, topics.UPDATE], [topics.SECURITY_GROUP, topics.UPDATE]] if cfg.CONF.VXLAN.l2_population: consumers.append([topics.L2POPULATION, topics.UPDATE]) return consumers def ensure_port_admin_state(self, tap_name, admin_state_up): LOG.debug("Setting admin_state_up to %s for device %s", admin_state_up, tap_name) if admin_state_up: ip_lib.IPDevice(tap_name).link.set_up() else: ip_lib.IPDevice(tap_name).link.set_down() def setup_arp_spoofing_protection(self, device, device_details): arp_protect.setup_arp_spoofing_protection(device, device_details) def delete_arp_spoofing_protection(self, devices): arp_protect.delete_arp_spoofing_protection(devices) def delete_unreferenced_arp_protection(self, current_devices): arp_protect.delete_unreferenced_arp_protection(current_devices) def get_extension_driver_type(self): return lconst.EXTENSION_DRIVER_TYPE class LinuxBridgeRpcCallbacks( sg_rpc.SecurityGroupAgentRpcCallbackMixin, l2pop_rpc.L2populationRpcCallBackMixin, amb.CommonAgentManagerRpcCallBackBase): # Set RPC API version to 1.0 by default. # history # 1.1 Support Security Group RPC # 1.3 Added param devices_to_update to security_groups_provider_updated # 1.4 Added support for network_update target = oslo_messaging.Target(version='1.4') def network_delete(self, context, **kwargs): LOG.debug("network_delete received") network_id = kwargs.get('network_id') # NOTE(nick-ma-z): Don't remove pre-existing user-defined bridges if network_id in self.network_map: phynet = self.network_map[network_id].physical_network if phynet and phynet in self.agent.mgr.bridge_mappings: LOG.info(_LI("Physical network %s is defined in " "bridge_mappings and cannot be deleted."), network_id) return else: LOG.debug("Network %s is not on this agent.", network_id) return bridge_name = self.agent.mgr.get_bridge_name(network_id) LOG.debug("Delete %s", bridge_name) self.agent.mgr.delete_bridge(bridge_name) def port_update(self, context, **kwargs): port_id = kwargs['port']['id'] device_name = self.agent.mgr.get_tap_device_name(port_id) # Put the device name in the updated_devices set. # Do not store port details, as if they're used for processing # notifications there is no guarantee the notifications are # processed in the same order as the relevant API requests. self.updated_devices.add(device_name) LOG.debug("port_update RPC received for port: %s", port_id) def network_update(self, context, **kwargs): network_id = kwargs['network']['id'] LOG.debug("network_update message processed for network " "%(network_id)s, with ports: %(ports)s", {'network_id': network_id, 'ports': self.agent.network_ports[network_id]}) for port_data in self.agent.network_ports[network_id]: self.updated_devices.add(port_data['device']) def fdb_add(self, context, fdb_entries): LOG.debug("fdb_add received") for network_id, values in fdb_entries.items(): segment = self.network_map.get(network_id) if not segment: return if segment.network_type != p_const.TYPE_VXLAN: return interface = self.agent.mgr.get_vxlan_device_name( segment.segmentation_id) agent_ports = values.get('ports') for agent_ip, ports in agent_ports.items(): if agent_ip == self.agent.mgr.local_ip: continue self.agent.mgr.add_fdb_entries(agent_ip, ports, interface) def fdb_remove(self, context, fdb_entries): LOG.debug("fdb_remove received") for network_id, values in fdb_entries.items(): segment = self.network_map.get(network_id) if not segment: return if segment.network_type != p_const.TYPE_VXLAN: return interface = self.agent.mgr.get_vxlan_device_name( segment.segmentation_id) agent_ports = values.get('ports') for agent_ip, ports in agent_ports.items(): if agent_ip == self.agent.mgr.local_ip: continue self.agent.mgr.remove_fdb_entries(agent_ip, ports, interface) def _fdb_chg_ip(self, context, fdb_entries): LOG.debug("update chg_ip received") for network_id, agent_ports in fdb_entries.items(): segment = self.network_map.get(network_id) if not segment: return if segment.network_type != p_const.TYPE_VXLAN: return interface = self.agent.mgr.get_vxlan_device_name( segment.segmentation_id) for agent_ip, state in agent_ports.items(): if agent_ip == self.agent.mgr.local_ip: continue after = state.get('after', []) for mac, ip in after: self.agent.mgr.add_fdb_ip_entry(mac, ip, interface) before = state.get('before', []) for mac, ip in before: self.agent.mgr.remove_fdb_ip_entry(mac, ip, interface) def fdb_update(self, context, fdb_entries): LOG.debug("fdb_update received") for action, values in fdb_entries.items(): method = '_fdb_' + action if not hasattr(self, method): raise NotImplementedError() getattr(self, method)(context, values) def main(): common_config.init(sys.argv[1:]) common_config.setup_logging() try: interface_mappings = n_utils.parse_mappings( cfg.CONF.LINUX_BRIDGE.physical_interface_mappings) except ValueError as e: LOG.error(_LE("Parsing physical_interface_mappings failed: %s. " "Agent terminated!"), e) sys.exit(1) LOG.info(_LI("Interface mappings: %s"), interface_mappings) try: bridge_mappings = n_utils.parse_mappings( cfg.CONF.LINUX_BRIDGE.bridge_mappings) except ValueError as e: LOG.error(_LE("Parsing bridge_mappings failed: %s. " "Agent terminated!"), e) sys.exit(1) LOG.info(_LI("Bridge mappings: %s"), bridge_mappings) manager = LinuxBridgeManager(bridge_mappings, interface_mappings) polling_interval = cfg.CONF.AGENT.polling_interval quitting_rpc_timeout = cfg.CONF.AGENT.quitting_rpc_timeout agent = ca.CommonAgentLoop(manager, polling_interval, quitting_rpc_timeout, constants.AGENT_TYPE_LINUXBRIDGE, LB_AGENT_BINARY) LOG.info(_LI("Agent initialized successfully, now running... ")) launcher = service.launch(cfg.CONF, agent) launcher.wait() neutron-8.4.0/neutron/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/0000775000567000056710000000000013044373210031062 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/qos_driver.py0000664000567000056710000000371213044372760033625 0ustar jenkinsjenkins00000000000000# Copyright 2016 OVH SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log from neutron._i18n import _LI from neutron.agent.l2.extensions import qos from neutron.agent.linux import tc_lib from neutron.plugins.ml2.drivers.linuxbridge.mech_driver import ( mech_linuxbridge) LOG = log.getLogger(__name__) class QosLinuxbridgeAgentDriver(qos.QosAgentDriver): SUPPORTED_RULES = ( mech_linuxbridge.LinuxbridgeMechanismDriver.supported_qos_rule_types ) def initialize(self): LOG.info(_LI("Initializing Linux bridge QoS extension")) @log_helpers.log_method_call def create_bandwidth_limit(self, port, rule): tc_wrapper = self._get_tc_wrapper(port) tc_wrapper.set_filters_bw_limit( rule.max_kbps, self._get_egress_burst_value(rule) ) @log_helpers.log_method_call def update_bandwidth_limit(self, port, rule): tc_wrapper = self._get_tc_wrapper(port) tc_wrapper.update_filters_bw_limit( rule.max_kbps, self._get_egress_burst_value(rule) ) @log_helpers.log_method_call def delete_bandwidth_limit(self, port): tc_wrapper = self._get_tc_wrapper(port) tc_wrapper.delete_filters_bw_limit() def _get_tc_wrapper(self, port): return tc_lib.TcCommand( port['device'], cfg.CONF.QOS.kernel_hz, ) neutron-8.4.0/neutron/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/__init__.py0000664000567000056710000000000013044372736033175 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/type_tunnel.py0000664000567000056710000004064713044372760024642 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import itertools import operator from oslo_config import cfg from oslo_db import api as oslo_db_api from oslo_db import exception as db_exc from oslo_log import log from six import moves from sqlalchemy import or_ from neutron._i18n import _, _LI, _LW from neutron.common import exceptions as exc from neutron.common import topics from neutron.db import api as db_api from neutron.plugins.common import utils as plugin_utils from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers import helpers LOG = log.getLogger(__name__) TUNNEL = 'tunnel' def chunks(iterable, chunk_size): """Chunks data into chunk with size<=chunk_size.""" iterator = iter(iterable) chunk = list(itertools.islice(iterator, 0, chunk_size)) while chunk: yield chunk chunk = list(itertools.islice(iterator, 0, chunk_size)) class TunnelTypeDriver(helpers.SegmentTypeDriver): """Define stable abstract interface for ML2 type drivers. tunnel type networks rely on tunnel endpoints. This class defines abstract methods to manage these endpoints. """ BULK_SIZE = 100 def __init__(self, model): super(TunnelTypeDriver, self).__init__(model) self.segmentation_key = next(iter(self.primary_keys)) @abc.abstractmethod def add_endpoint(self, ip, host): """Register the endpoint in the type_driver database. param ip: the IP address of the endpoint param host: the Host name of the endpoint """ @abc.abstractmethod def get_endpoints(self): """Get every endpoint managed by the type_driver :returns a list of dict [{ip_address:endpoint_ip, host:endpoint_host}, ..] """ @abc.abstractmethod def get_endpoint_by_host(self, host): """Get endpoint for a given host managed by the type_driver param host: the Host name of the endpoint if host found in type_driver database :returns db object for that particular host else :returns None """ @abc.abstractmethod def get_endpoint_by_ip(self, ip): """Get endpoint for a given tunnel ip managed by the type_driver param ip: the IP address of the endpoint if ip found in type_driver database :returns db object for that particular ip else :returns None """ @abc.abstractmethod def delete_endpoint(self, ip): """Delete the endpoint in the type_driver database. param ip: the IP address of the endpoint """ @abc.abstractmethod def delete_endpoint_by_host_or_ip(self, host, ip): """Delete the endpoint in the type_driver database. This function will delete any endpoint matching the specified ip or host. param host: the host name of the endpoint param ip: the IP address of the endpoint """ def _initialize(self, raw_tunnel_ranges): self.tunnel_ranges = [] self._parse_tunnel_ranges(raw_tunnel_ranges, self.tunnel_ranges) self.sync_allocations() def _parse_tunnel_ranges(self, tunnel_ranges, current_range): for entry in tunnel_ranges: entry = entry.strip() try: tun_min, tun_max = entry.split(':') tun_min = tun_min.strip() tun_max = tun_max.strip() tunnel_range = int(tun_min), int(tun_max) except ValueError as ex: raise exc.NetworkTunnelRangeError(tunnel_range=entry, error=ex) plugin_utils.verify_tunnel_range(tunnel_range, self.get_type()) current_range.append(tunnel_range) LOG.info(_LI("%(type)s ID ranges: %(range)s"), {'type': self.get_type(), 'range': current_range}) @oslo_db_api.wrap_db_retry( max_retries=db_api.MAX_RETRIES, exception_checker=db_api.is_retriable) def sync_allocations(self): # determine current configured allocatable tunnel ids tunnel_ids = set() for tun_min, tun_max in self.tunnel_ranges: tunnel_ids |= set(moves.range(tun_min, tun_max + 1)) tunnel_id_getter = operator.attrgetter(self.segmentation_key) tunnel_col = getattr(self.model, self.segmentation_key) session = db_api.get_session() with session.begin(subtransactions=True): # remove from table unallocated tunnels not currently allocatable # fetch results as list via all() because we'll be iterating # through them twice allocs = (session.query(self.model). with_lockmode("update").all()) # collect those vnis that needs to be deleted from db unallocateds = ( tunnel_id_getter(a) for a in allocs if not a.allocated) to_remove = (x for x in unallocateds if x not in tunnel_ids) # Immediately delete tunnels in chunks. This leaves no work for # flush at the end of transaction for chunk in chunks(to_remove, self.BULK_SIZE): session.query(self.model).filter( tunnel_col.in_(chunk)).delete(synchronize_session=False) # collect vnis that need to be added existings = {tunnel_id_getter(a) for a in allocs} missings = list(tunnel_ids - existings) for chunk in chunks(missings, self.BULK_SIZE): bulk = [{self.segmentation_key: x, 'allocated': False} for x in chunk] session.execute(self.model.__table__.insert(), bulk) def is_partial_segment(self, segment): return segment.get(api.SEGMENTATION_ID) is None def validate_provider_segment(self, segment): physical_network = segment.get(api.PHYSICAL_NETWORK) if physical_network: msg = _("provider:physical_network specified for %s " "network") % segment.get(api.NETWORK_TYPE) raise exc.InvalidInput(error_message=msg) for key, value in segment.items(): if value and key not in [api.NETWORK_TYPE, api.SEGMENTATION_ID]: msg = (_("%(key)s prohibited for %(tunnel)s provider network"), {'key': key, 'tunnel': segment.get(api.NETWORK_TYPE)}) raise exc.InvalidInput(error_message=msg) def reserve_provider_segment(self, session, segment): if self.is_partial_segment(segment): alloc = self.allocate_partially_specified_segment(session) if not alloc: raise exc.NoNetworkAvailable() else: segmentation_id = segment.get(api.SEGMENTATION_ID) alloc = self.allocate_fully_specified_segment( session, **{self.segmentation_key: segmentation_id}) if not alloc: raise exc.TunnelIdInUse(tunnel_id=segmentation_id) return {api.NETWORK_TYPE: self.get_type(), api.PHYSICAL_NETWORK: None, api.SEGMENTATION_ID: getattr(alloc, self.segmentation_key), api.MTU: self.get_mtu()} def allocate_tenant_segment(self, session): alloc = self.allocate_partially_specified_segment(session) if not alloc: return return {api.NETWORK_TYPE: self.get_type(), api.PHYSICAL_NETWORK: None, api.SEGMENTATION_ID: getattr(alloc, self.segmentation_key), api.MTU: self.get_mtu()} def release_segment(self, session, segment): tunnel_id = segment[api.SEGMENTATION_ID] inside = any(lo <= tunnel_id <= hi for lo, hi in self.tunnel_ranges) info = {'type': self.get_type(), 'id': tunnel_id} with session.begin(subtransactions=True): query = (session.query(self.model). filter_by(**{self.segmentation_key: tunnel_id})) if inside: count = query.update({"allocated": False}) if count: LOG.debug("Releasing %(type)s tunnel %(id)s to pool", info) else: count = query.delete() if count: LOG.debug("Releasing %(type)s tunnel %(id)s outside pool", info) if not count: LOG.warning(_LW("%(type)s tunnel %(id)s not found"), info) def get_allocation(self, session, tunnel_id): return (session.query(self.model). filter_by(**{self.segmentation_key: tunnel_id}). first()) def get_mtu(self, physical_network=None): seg_mtu = super(TunnelTypeDriver, self).get_mtu() mtu = [] if seg_mtu > 0: mtu.append(seg_mtu) if cfg.CONF.ml2.path_mtu > 0: mtu.append(cfg.CONF.ml2.path_mtu) return min(mtu) if mtu else 0 class EndpointTunnelTypeDriver(TunnelTypeDriver): def __init__(self, segment_model, endpoint_model): super(EndpointTunnelTypeDriver, self).__init__(segment_model) self.endpoint_model = endpoint_model self.segmentation_key = next(iter(self.primary_keys)) def get_endpoint_by_host(self, host): LOG.debug("get_endpoint_by_host() called for host %s", host) session = db_api.get_session() return (session.query(self.endpoint_model). filter_by(host=host).first()) def get_endpoint_by_ip(self, ip): LOG.debug("get_endpoint_by_ip() called for ip %s", ip) session = db_api.get_session() return (session.query(self.endpoint_model). filter_by(ip_address=ip).first()) def delete_endpoint(self, ip): LOG.debug("delete_endpoint() called for ip %s", ip) session = db_api.get_session() with session.begin(subtransactions=True): (session.query(self.endpoint_model). filter_by(ip_address=ip).delete()) def delete_endpoint_by_host_or_ip(self, host, ip): LOG.debug("delete_endpoint_by_host_or_ip() called for " "host %(host)s or %(ip)s", {'host': host, 'ip': ip}) session = db_api.get_session() with session.begin(subtransactions=True): session.query(self.endpoint_model).filter( or_(self.endpoint_model.host == host, self.endpoint_model.ip_address == ip)).delete() def _get_endpoints(self): LOG.debug("_get_endpoints() called") session = db_api.get_session() return session.query(self.endpoint_model) def _add_endpoint(self, ip, host, **kwargs): LOG.debug("_add_endpoint() called for ip %s", ip) session = db_api.get_session() try: endpoint = self.endpoint_model(ip_address=ip, host=host, **kwargs) endpoint.save(session) except db_exc.DBDuplicateEntry: endpoint = (session.query(self.endpoint_model). filter_by(ip_address=ip).one()) LOG.warning(_LW("Endpoint with ip %s already exists"), ip) return endpoint class TunnelRpcCallbackMixin(object): def setup_tunnel_callback_mixin(self, notifier, type_manager): self._notifier = notifier self._type_manager = type_manager def tunnel_sync(self, rpc_context, **kwargs): """Update new tunnel. Updates the database with the tunnel IP. All listening agents will also be notified about the new tunnel IP. """ tunnel_ip = kwargs.get('tunnel_ip') if not tunnel_ip: msg = _("Tunnel IP value needed by the ML2 plugin") raise exc.InvalidInput(error_message=msg) tunnel_type = kwargs.get('tunnel_type') if not tunnel_type: msg = _("Network type value needed by the ML2 plugin") raise exc.InvalidInput(error_message=msg) host = kwargs.get('host') driver = self._type_manager.drivers.get(tunnel_type) if driver: # The given conditional statements will verify the following # things: # 1. If host is not passed from an agent, it is a legacy mode. # 2. If passed host and tunnel_ip are not found in the DB, # it is a new endpoint. # 3. If host is passed from an agent and it is not found in DB # but the passed tunnel_ip is found, delete the endpoint # from DB and add the endpoint with (tunnel_ip, host), # it is an upgrade case. # 4. If passed host is found in DB and passed tunnel ip is not # found, delete the endpoint belonging to that host and # add endpoint with latest (tunnel_ip, host), it is a case # where local_ip of an agent got changed. # 5. If the passed host had another ip in the DB the host-id has # roamed to a different IP then delete any reference to the new # local_ip or the host id. Don't notify tunnel_delete for the # old IP since that one could have been taken by a different # agent host-id (neutron-ovs-cleanup should be used to clean up # the stale endpoints). # Finally create a new endpoint for the (tunnel_ip, host). if host: host_endpoint = driver.obj.get_endpoint_by_host(host) ip_endpoint = driver.obj.get_endpoint_by_ip(tunnel_ip) if (ip_endpoint and ip_endpoint.host is None and host_endpoint is None): driver.obj.delete_endpoint(ip_endpoint.ip_address) elif (ip_endpoint and ip_endpoint.host != host): LOG.info( _LI("Tunnel IP %(ip)s was used by host %(host)s and " "will be assigned to %(new_host)s"), {'ip': ip_endpoint.ip_address, 'host': ip_endpoint.host, 'new_host': host}) driver.obj.delete_endpoint_by_host_or_ip( host, ip_endpoint.ip_address) elif (host_endpoint and host_endpoint.ip_address != tunnel_ip): # Notify all other listening agents to delete stale tunnels self._notifier.tunnel_delete(rpc_context, host_endpoint.ip_address, tunnel_type) driver.obj.delete_endpoint(host_endpoint.ip_address) tunnel = driver.obj.add_endpoint(tunnel_ip, host) tunnels = driver.obj.get_endpoints() entry = {'tunnels': tunnels} # Notify all other listening agents self._notifier.tunnel_update(rpc_context, tunnel.ip_address, tunnel_type) # Return the list of tunnels IP's to the agent return entry else: msg = _("Network type value '%s' not supported") % tunnel_type raise exc.InvalidInput(error_message=msg) class TunnelAgentRpcApiMixin(object): def _get_tunnel_update_topic(self): return topics.get_topic_name(self.topic, TUNNEL, topics.UPDATE) def tunnel_update(self, context, tunnel_ip, tunnel_type): cctxt = self.client.prepare(topic=self._get_tunnel_update_topic(), fanout=True) cctxt.cast(context, 'tunnel_update', tunnel_ip=tunnel_ip, tunnel_type=tunnel_type) def _get_tunnel_delete_topic(self): return topics.get_topic_name(self.topic, TUNNEL, topics.DELETE) def tunnel_delete(self, context, tunnel_ip, tunnel_type): cctxt = self.client.prepare(topic=self._get_tunnel_delete_topic(), fanout=True) cctxt.cast(context, 'tunnel_delete', tunnel_ip=tunnel_ip, tunnel_type=tunnel_type) neutron-8.4.0/neutron/plugins/ml2/drivers/agent/0000775000567000056710000000000013044373210022774 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/agent/config.py0000664000567000056710000000504413044372760024627 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron.agent.common import config agent_opts = [ cfg.IntOpt('polling_interval', default=2, help=_("The number of seconds the agent will wait between " "polling for local device changes.")), cfg.IntOpt('quitting_rpc_timeout', default=10, help=_("Set new timeout in seconds for new rpc calls after " "agent receives SIGTERM. If value is set to 0, rpc " "timeout won't be changed")), # TODO(kevinbenton): The following opt is duplicated between the OVS agent # and the Linuxbridge agent to make it easy to back-port. These shared opts # should be moved into a common agent config options location as part of # the deduplication work. cfg.BoolOpt('prevent_arp_spoofing', default=True, deprecated_for_removal=True, help=_("Enable suppression of ARP responses that don't match " "an IP address that belongs to the port from which " "they originate. Note: This prevents the VMs attached " "to this agent from spoofing, it doesn't protect them " "from other devices which have the capability to spoof " "(e.g. bare metal or VMs attached to agents without " "this flag set to True). Spoofing rules will not be " "added to any ports that have port security disabled. " "For LinuxBridge, this requires ebtables. For OVS, it " "requires a version that supports matching ARP " "headers. This option will be removed in Newton so " "the only way to disable protection will be via the " "port security extension.")) ] cfg.CONF.register_opts(agent_opts, "AGENT") config.register_agent_state_opts_helper(cfg.CONF) neutron-8.4.0/neutron/plugins/ml2/drivers/agent/_agent_manager_base.py0000664000567000056710000001674313044372760027313 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_log import log as logging import six LOG = logging.getLogger(__name__) class NetworkSegment(object): """Represents a Neutron network segment""" def __init__(self, network_type, physical_network, segmentation_id): self.network_type = network_type self.physical_network = physical_network self.segmentation_id = segmentation_id @six.add_metaclass(abc.ABCMeta) class CommonAgentManagerRpcCallBackBase(object): """Base class for managers RPC callbacks. This class must be inherited by a RPC callback class that is used in combination with the common agent. """ def __init__(self, context, agent, sg_agent): self.context = context self.agent = agent self.sg_agent = sg_agent self.network_map = {} # stores received port_updates and port_deletes for # processing by the main loop self.updated_devices = set() @abc.abstractmethod def security_groups_rule_updated(self, context, **kwargs): """Callback for security group rule update. :param security_groups: list of updated security_groups """ @abc.abstractmethod def security_groups_member_updated(self, context, **kwargs): """Callback for security group member update. :param security_groups: list of updated security_groups """ @abc.abstractmethod def security_groups_provider_updated(self, context, **kwargs): """Callback for security group provider update.""" def add_network(self, network_id, network_segment): """Add a network to the agent internal network list :param network_id: The UUID of the network :param network_segment: The NetworkSegment object for this network """ self.network_map[network_id] = network_segment def get_and_clear_updated_devices(self): """Get and clear the list of devices for which a update was received. :return: set - A set with updated devices. Format is ['tap1', 'tap2'] """ # Save and reinitialize the set variable that the port_update RPC uses. # This should be thread-safe as the greenthread should not yield # between these two statements. updated_devices = self.updated_devices self.updated_devices = set() return updated_devices @six.add_metaclass(abc.ABCMeta) class CommonAgentManagerBase(object): """Base class for managers that are used with the common agent loop. This class must be inherited by a manager class that is used in combination with the common agent. """ @abc.abstractmethod def ensure_port_admin_state(self, device, admin_state_up): """Enforce admin_state for a port :param device: The device for which the admin_state should be set :param admin_state_up: True for admin_state_up, False for admin_state_down """ @abc.abstractmethod def get_agent_configurations(self): """Establishes the agent configuration map. The content of this map is part of the agent state reports to the neutron server. :return: map -- the map containing the configuration values :rtype: dict """ @abc.abstractmethod def get_agent_id(self): """Calculate the agent id that should be used on this host :return: str -- agent identifier """ @abc.abstractmethod def get_all_devices(self): """Get a list of all devices of the managed type from this host A device in this context is a String that represents a network device. This can for example be the name of the device or its MAC address. This value will be stored in the Plug-in and be part of the device_details. Typically this list is retrieved from the sysfs. E.g. for linuxbridge it returns all names of devices of type 'tap' that start with a certain prefix. :return: set -- the set of all devices e.g. ['tap1', 'tap2'] """ @abc.abstractmethod def get_devices_modified_timestamps(self, devices): """Get a dictionary of modified timestamps by device The devices passed in are expected to be the same format that get_all_devices returns. :return: dict -- A dictionary of timestamps keyed by device """ @abc.abstractmethod def get_extension_driver_type(self): """Get the agent extension driver type. :return: str -- The String defining the agent extension type """ @abc.abstractmethod def get_rpc_callbacks(self, context, agent, sg_agent): """Returns the class containing all the agent rpc callback methods :return: class - the class containing the agent rpc callback methods. It must reflect the CommonAgentManagerRpcCallBackBase Interface. """ @abc.abstractmethod def get_rpc_consumers(self): """Get a list of topics for which an RPC consumer should be created :return: list -- A list of topics. Each topic in this list is a list consisting of a name, an operation, and an optional host param keying the subscription to topic.host for plugin calls. """ @abc.abstractmethod def plug_interface(self, network_id, network_segment, device, device_owner): """Plug the interface (device). :param network_id: The UUID of the Neutron network :param network_segment: The NetworkSegment object for this network :param device: The device that should be plugged :param device_owner: The device owner of the port :return: bool -- True if the interface is plugged now. False if the interface could not be plugged. """ @abc.abstractmethod def setup_arp_spoofing_protection(self, device, device_details): """Setup the arp spoofing protection for the given port. :param device: The device to set up arp spoofing rules for, where device is the device String that is stored in the Neutron Plug-in for this Port. E.g. 'tap1' :param device_details: The device_details map retrieved from the Neutron Plugin """ @abc.abstractmethod def delete_arp_spoofing_protection(self, devices): """Remove the arp spoofing protection for the given ports. :param devices: List of devices that have been removed, where device is the device String that is stored for this port in the Neutron Plug-in. E.g. ['tap1', 'tap2'] """ @abc.abstractmethod def delete_unreferenced_arp_protection(self, current_devices): """Cleanup arp spoofing protection entries. :param current_devices: List of devices that currently exist on this host, where device is the device String that could have been stored in the Neutron Plug-in. E.g. ['tap1', 'tap2'] """ neutron-8.4.0/neutron/plugins/ml2/drivers/agent/__init__.py0000664000567000056710000000000013044372736025107 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/agent/_common_agent.py0000664000567000056710000004603613044372760026175 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import sys import time from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_service import service from neutron._i18n import _LE, _LI from neutron.agent.l2.extensions import manager as ext_manager from neutron.agent import rpc as agent_rpc from neutron.agent import securitygroups_rpc as sg_rpc from neutron.api.rpc.callbacks import resources from neutron.common import config as common_config from neutron.common import constants from neutron.common import topics from neutron import context from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb from neutron.plugins.ml2.drivers.agent import config as cagt_config # noqa LOG = logging.getLogger(__name__) class CommonAgentLoop(service.Service): def __init__(self, manager, polling_interval, quitting_rpc_timeout, agent_type, agent_binary): """Constructor. :param manager: the manager object containing the impl specifics :param polling_interval: interval (secs) to poll DB. :param quitting_rpc_timeout: timeout in seconds for rpc calls after stop is called. :param agent_type: Specifies the type of the agent :param agent_binary: The agent binary string """ super(CommonAgentLoop, self).__init__() self.mgr = manager self._validate_manager_class() self.polling_interval = polling_interval self.quitting_rpc_timeout = quitting_rpc_timeout self.agent_type = agent_type self.agent_binary = agent_binary def _validate_manager_class(self): if not isinstance(self.mgr, amb.CommonAgentManagerBase): LOG.error(_LE("Manager class must inherit from " "CommonAgentManagerBase to ensure CommonAgent " "works properly.")) sys.exit(1) def start(self): self.prevent_arp_spoofing = cfg.CONF.AGENT.prevent_arp_spoofing # stores all configured ports on agent self.network_ports = collections.defaultdict(list) # flag to do a sync after revival self.fullsync = False self.context = context.get_admin_context_without_session() self.setup_rpc() self.init_extension_manager(self.connection) configurations = {'extensions': self.ext_manager.names()} configurations.update(self.mgr.get_agent_configurations()) #TODO(mangelajo): optimize resource_versions (see ovs agent) self.agent_state = { 'binary': self.agent_binary, 'host': cfg.CONF.host, 'topic': constants.L2_AGENT_TOPIC, 'configurations': configurations, 'agent_type': self.agent_type, 'resource_versions': resources.LOCAL_RESOURCE_VERSIONS, 'start_flag': True} report_interval = cfg.CONF.AGENT.report_interval if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval) # The initialization is complete; we can start receiving messages self.connection.consume_in_threads() self.daemon_loop() def stop(self, graceful=True): LOG.info(_LI("Stopping %s agent."), self.agent_type) if graceful and self.quitting_rpc_timeout: self.set_rpc_timeout(self.quitting_rpc_timeout) super(CommonAgentLoop, self).stop(graceful) def reset(self): common_config.setup_logging() def _report_state(self): try: devices = len(self.mgr.get_all_devices()) self.agent_state.get('configurations')['devices'] = devices agent_status = self.state_rpc.report_state(self.context, self.agent_state, True) if agent_status == constants.AGENT_REVIVED: LOG.info(_LI('%s Agent has just been revived. ' 'Doing a full sync.'), self.agent_type) self.fullsync = True # we only want to update resource versions on startup self.agent_state.pop('resource_versions', None) self.agent_state.pop('start_flag', None) except Exception: LOG.exception(_LE("Failed reporting state!")) def _validate_rpc_endpoints(self): if not isinstance(self.endpoints[0], amb.CommonAgentManagerRpcCallBackBase): LOG.error(_LE("RPC Callback class must inherit from " "CommonAgentManagerRpcCallBackBase to ensure " "CommonAgent works properly.")) sys.exit(1) def setup_rpc(self): self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN) self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN) self.sg_agent = sg_rpc.SecurityGroupAgentRpc( self.context, self.sg_plugin_rpc, defer_refresh_firewall=True) self.agent_id = self.mgr.get_agent_id() LOG.info(_LI("RPC agent_id: %s"), self.agent_id) self.topic = topics.AGENT self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) # RPC network init # Handle updates from service self.rpc_callbacks = self.mgr.get_rpc_callbacks(self.context, self, self.sg_agent) self.endpoints = [self.rpc_callbacks] self._validate_rpc_endpoints() # Define the listening consumers for the agent consumers = self.mgr.get_rpc_consumers() self.connection = agent_rpc.create_consumers(self.endpoints, self.topic, consumers, start_listening=False) def init_extension_manager(self, connection): ext_manager.register_opts(cfg.CONF) self.ext_manager = ( ext_manager.AgentExtensionsManager(cfg.CONF)) self.ext_manager.initialize( connection, self.mgr.get_extension_driver_type()) def _clean_network_ports(self, device): for netid, ports_list in self.network_ports.items(): for port_data in ports_list: if device == port_data['device']: ports_list.remove(port_data) if ports_list == []: self.network_ports.pop(netid) return port_data['port_id'] def _update_network_ports(self, network_id, port_id, device): self._clean_network_ports(device) self.network_ports[network_id].append({ "port_id": port_id, "device": device }) def process_network_devices(self, device_info): resync_a = False resync_b = False self.sg_agent.setup_port_filters(device_info.get('added'), device_info.get('updated')) # Updated devices are processed the same as new ones, as their # admin_state_up may have changed. The set union prevents duplicating # work when a device is new and updated in the same polling iteration. devices_added_updated = (set(device_info.get('added')) | set(device_info.get('updated'))) if devices_added_updated: resync_a = self.treat_devices_added_updated(devices_added_updated) if device_info.get('removed'): resync_b = self.treat_devices_removed(device_info['removed']) # If one of the above operations fails => resync with plugin return (resync_a | resync_b) def treat_devices_added_updated(self, devices): try: devices_details_list = self.plugin_rpc.get_devices_details_list( self.context, devices, self.agent_id, host=cfg.CONF.host) except Exception: LOG.exception(_LE("Unable to get port details for %s"), devices) # resync is needed return True for device_details in devices_details_list: device = device_details['device'] LOG.debug("Port %s added", device) if 'port_id' in device_details: LOG.info(_LI("Port %(device)s updated. Details: %(details)s"), {'device': device, 'details': device_details}) if self.prevent_arp_spoofing: self.mgr.setup_arp_spoofing_protection(device, device_details) segment = amb.NetworkSegment( device_details.get('network_type'), device_details['physical_network'], device_details.get('segmentation_id') ) network_id = device_details['network_id'] self.rpc_callbacks.add_network(network_id, segment) interface_plugged = self.mgr.plug_interface( network_id, segment, device, device_details['device_owner']) # REVISIT(scheuran): Changed the way how ports admin_state_up # is implemented. # # Old lb implementation: # - admin_state_up: ensure that tap is plugged into bridge # - admin_state_down: remove tap from bridge # New lb implementation: # - admin_state_up: set tap device state to up # - admin_state_down: set tap device state to down # # However both approaches could result in races with # nova/libvirt and therefore to an invalid system state in the # scenario, where an instance is booted with a port configured # with admin_state_up = False: # # Libvirt does the following actions in exactly # this order (see libvirt virnetdevtap.c) # 1) Create the tap device, set its MAC and MTU # 2) Plug the tap into the bridge # 3) Set the tap online # # Old lb implementation: # A race could occur, if the lb agent removes the tap device # right after step 1). Then libvirt will add it to the bridge # again in step 2). # New lb implementation: # The race could occur if the lb-agent sets the taps device # state to down right after step 2). In step 3) libvirt # might set it to up again. # # This is not an issue if an instance is booted with a port # configured with admin_state_up = True. Libvirt would just # set the tap device up again. # # This refactoring is recommended for the following reasons: # 1) An existing race with libvirt caused by the behavior of # the old implementation. See Bug #1312016 # 2) The new code is much more readable if interface_plugged: self.mgr.ensure_port_admin_state( device, device_details['admin_state_up']) # update plugin about port status if admin_state is up if device_details['admin_state_up']: if interface_plugged: self.plugin_rpc.update_device_up(self.context, device, self.agent_id, cfg.CONF.host) else: self.plugin_rpc.update_device_down(self.context, device, self.agent_id, cfg.CONF.host) self._update_network_ports(device_details['network_id'], device_details['port_id'], device_details['device']) self.ext_manager.handle_port(self.context, device_details) else: LOG.info(_LI("Device %s not defined on plugin"), device) # no resync is needed return False def treat_devices_removed(self, devices): resync = False self.sg_agent.remove_devices_filter(devices) for device in devices: LOG.info(_LI("Attachment %s removed"), device) details = None try: details = self.plugin_rpc.update_device_down(self.context, device, self.agent_id, cfg.CONF.host) except Exception: LOG.exception(_LE("Error occurred while removing port %s"), device) resync = True if details and details['exists']: LOG.info(_LI("Port %s updated."), device) else: LOG.debug("Device %s not defined on plugin", device) port_id = self._clean_network_ports(device) self.ext_manager.delete_port(self.context, {'device': device, 'port_id': port_id}) if self.prevent_arp_spoofing: self.mgr.delete_arp_spoofing_protection(devices) return resync @staticmethod def _get_devices_locally_modified(timestamps, previous_timestamps): """Returns devices with previous timestamps that do not match new. If a device did not have a timestamp previously, it will not be returned because this means it is new. """ return {device for device, timestamp in timestamps.items() if previous_timestamps.get(device) and timestamp != previous_timestamps.get(device)} def scan_devices(self, previous, sync): device_info = {} updated_devices = self.rpc_callbacks.get_and_clear_updated_devices() current_devices = self.mgr.get_all_devices() device_info['current'] = current_devices if previous is None: # This is the first iteration of daemon_loop(). previous = {'added': set(), 'current': set(), 'updated': set(), 'removed': set(), 'timestamps': {}} # clear any orphaned ARP spoofing rules (e.g. interface was # manually deleted) if self.prevent_arp_spoofing: self.mgr.delete_unreferenced_arp_protection(current_devices) # check to see if any devices were locally modified based on their # timestamps changing since the previous iteration. If a timestamp # doesn't exist for a device, this calculation is skipped for that # device. device_info['timestamps'] = self.mgr.get_devices_modified_timestamps( current_devices) locally_updated = self._get_devices_locally_modified( device_info['timestamps'], previous['timestamps']) if locally_updated: LOG.debug("Adding locally changed devices to updated set: %s", locally_updated) updated_devices |= locally_updated if sync: # This is the first iteration, or the previous one had a problem. # Re-add all existing devices. device_info['added'] = current_devices # Retry cleaning devices that may not have been cleaned properly. # And clean any that disappeared since the previous iteration. device_info['removed'] = (previous['removed'] | previous['current'] - current_devices) # Retry updating devices that may not have been updated properly. # And any that were updated since the previous iteration. # Only update devices that currently exist. device_info['updated'] = (previous['updated'] | updated_devices & current_devices) else: device_info['added'] = current_devices - previous['current'] device_info['removed'] = previous['current'] - current_devices device_info['updated'] = updated_devices & current_devices return device_info def _device_info_has_changes(self, device_info): return (device_info.get('added') or device_info.get('updated') or device_info.get('removed')) def daemon_loop(self): LOG.info(_LI("%s Agent RPC Daemon Started!"), self.agent_type) device_info = None sync = True while True: start = time.time() if self.fullsync: sync = True self.fullsync = False if sync: LOG.info(_LI("%s Agent out of sync with plugin!"), self.agent_type) device_info = self.scan_devices(previous=device_info, sync=sync) sync = False if (self._device_info_has_changes(device_info) or self.sg_agent.firewall_refresh_needed()): LOG.debug("Agent loop found changes! %s", device_info) try: sync = self.process_network_devices(device_info) except Exception: LOG.exception(_LE("Error in agent loop. Devices info: %s"), device_info) sync = True # sleep till end of polling interval elapsed = (time.time() - start) if (elapsed < self.polling_interval): time.sleep(self.polling_interval - elapsed) else: LOG.debug("Loop iteration exceeded interval " "(%(polling_interval)s vs. %(elapsed)s)!", {'polling_interval': self.polling_interval, 'elapsed': elapsed}) def set_rpc_timeout(self, timeout): for rpc_api in (self.plugin_rpc, self.sg_plugin_rpc, self.state_rpc): rpc_api.client.timeout = timeout neutron-8.4.0/neutron/plugins/ml2/drivers/l2pop/0000775000567000056710000000000013044373210022732 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/l2pop/config.py0000664000567000056710000000170313044372760024563 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ l2_population_options = [ cfg.IntOpt('agent_boot_time', default=180, help=_('Delay within which agent is expected to update ' 'existing ports whent it restarts')), ] cfg.CONF.register_opts(l2_population_options, "l2pop") neutron-8.4.0/neutron/plugins/ml2/drivers/l2pop/mech_driver.py0000664000567000056710000003314413044372760025611 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from neutron._i18n import _LW from neutron.common import constants as const from neutron import context as n_context from neutron.db import api as db_api from neutron.db import l3_hamode_db from neutron import manager from neutron.plugins.common import constants as service_constants from neutron.plugins.ml2.common import exceptions as ml2_exc from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers.l2pop import config # noqa from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc LOG = logging.getLogger(__name__) class L2populationMechanismDriver(api.MechanismDriver): def __init__(self): super(L2populationMechanismDriver, self).__init__() self.L2populationAgentNotify = l2pop_rpc.L2populationAgentNotifyAPI() def initialize(self): LOG.debug("Experimental L2 population driver") self.rpc_ctx = n_context.get_admin_context_without_session() def _get_port_fdb_entries(self, port): # the port might be concurrently deleted if not port or not port.get('fixed_ips'): return [] return [l2pop_rpc.PortInfo(mac_address=port['mac_address'], ip_address=ip['ip_address']) for ip in port['fixed_ips']] def _get_ha_port_agents_fdb( self, session, network_id, router_id): other_fdb_ports = {} for agent in l2pop_db.get_ha_agents_by_router_id(session, router_id): agent_active_ports = l2pop_db.get_agent_network_active_port_count( session, agent.host, network_id) if agent_active_ports == 0: ip = l2pop_db.get_agent_ip(agent) other_fdb_ports[ip] = [const.FLOODING_ENTRY] return other_fdb_ports def delete_port_postcommit(self, context): port = context.current agent_host = context.host fdb_entries = self._get_agent_fdb(context.bottom_bound_segment, port, agent_host) if port['device_owner'] in l2pop_db.HA_ROUTER_PORTS and fdb_entries: session = db_api.get_session() network_id = port['network_id'] other_fdb_ports = self._get_ha_port_agents_fdb( session, network_id, port['device_id']) fdb_entries[network_id]['ports'] = other_fdb_ports self.L2populationAgentNotify.remove_fdb_entries(self.rpc_ctx, fdb_entries) def _get_diff_ips(self, orig, port): orig_ips = set([ip['ip_address'] for ip in orig['fixed_ips']]) port_ips = set([ip['ip_address'] for ip in port['fixed_ips']]) # check if an ip has been added or removed orig_chg_ips = orig_ips.difference(port_ips) port_chg_ips = port_ips.difference(orig_ips) if orig_chg_ips or port_chg_ips: return orig_chg_ips, port_chg_ips def _fixed_ips_changed(self, context, orig, port, diff_ips): orig_ips, port_ips = diff_ips if (port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE): agent_host = context.host else: agent_host = context.original_host if not agent_host: return agent_ip = l2pop_db.get_agent_ip_by_host(db_api.get_session(), agent_host) orig_mac_ip = [l2pop_rpc.PortInfo(mac_address=port['mac_address'], ip_address=ip) for ip in orig_ips] port_mac_ip = [l2pop_rpc.PortInfo(mac_address=port['mac_address'], ip_address=ip) for ip in port_ips] upd_fdb_entries = {port['network_id']: {agent_ip: {}}} ports = upd_fdb_entries[port['network_id']][agent_ip] if orig_mac_ip: ports['before'] = orig_mac_ip if port_mac_ip: ports['after'] = port_mac_ip self.L2populationAgentNotify.update_fdb_entries( self.rpc_ctx, {'chg_ip': upd_fdb_entries}) return True def update_port_precommit(self, context): port = context.current orig = context.original if (orig['mac_address'] != port['mac_address'] and context.status == const.PORT_STATUS_ACTIVE): LOG.warning(_LW("unable to modify mac_address of ACTIVE port " "%s"), port['id']) raise ml2_exc.MechanismDriverError(method='update_port_precommit') def update_port_postcommit(self, context): port = context.current orig = context.original if l3_hamode_db.is_ha_router_port(port['device_owner'], port['device_id']): return diff_ips = self._get_diff_ips(orig, port) if diff_ips: self._fixed_ips_changed(context, orig, port, diff_ips) if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: if context.status == const.PORT_STATUS_ACTIVE: self.update_port_up(context) if context.status == const.PORT_STATUS_DOWN: agent_host = context.host fdb_entries = self._get_agent_fdb( context.bottom_bound_segment, port, agent_host) self.L2populationAgentNotify.remove_fdb_entries( self.rpc_ctx, fdb_entries) elif (context.host != context.original_host and context.original_status == const.PORT_STATUS_ACTIVE and context.status == const.PORT_STATUS_DOWN): # The port has been migrated. Send notification about port # removal from old host. fdb_entries = self._get_agent_fdb( context.original_bottom_bound_segment, orig, context.original_host) self.L2populationAgentNotify.remove_fdb_entries( self.rpc_ctx, fdb_entries) elif context.status != context.original_status: if context.status == const.PORT_STATUS_ACTIVE: self.update_port_up(context) elif context.status == const.PORT_STATUS_DOWN: fdb_entries = self._get_agent_fdb( context.bottom_bound_segment, port, context.host) self.L2populationAgentNotify.remove_fdb_entries( self.rpc_ctx, fdb_entries) def _validate_segment(self, segment, port_id, agent): if not segment: LOG.debug("Port %(port)s updated by agent %(agent)s isn't bound " "to any segment", {'port': port_id, 'agent': agent}) return False network_types = l2pop_db.get_agent_l2pop_network_types(agent) if network_types is None: network_types = l2pop_db.get_agent_tunnel_types(agent) if segment['network_type'] not in network_types: return False return True def _create_agent_fdb(self, session, agent, segment, network_id): agent_fdb_entries = {network_id: {'segment_id': segment['segmentation_id'], 'network_type': segment['network_type'], 'ports': {}}} tunnel_network_ports = ( l2pop_db.get_distributed_active_network_ports(session, network_id)) fdb_network_ports = ( l2pop_db.get_nondistributed_active_network_ports( session, network_id)) ports = agent_fdb_entries[network_id]['ports'] ports.update(self._get_tunnels( fdb_network_ports + tunnel_network_ports, agent.host)) for agent_ip, fdbs in ports.items(): for binding, agent in fdb_network_ports: if l2pop_db.get_agent_ip(agent) == agent_ip: fdbs.extend(self._get_port_fdb_entries(binding.port)) return agent_fdb_entries def _get_tunnels(self, tunnel_network_ports, exclude_host): agents = {} for _, agent in tunnel_network_ports: if agent.host == exclude_host: continue ip = l2pop_db.get_agent_ip(agent) if not ip: LOG.debug("Unable to retrieve the agent ip, check " "the agent %s configuration.", agent.host) continue if ip not in agents: agents[ip] = [const.FLOODING_ENTRY] return agents def update_port_down(self, context): port = context.current agent_host = context.host l3plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) # when agent transitions to backup, don't remove flood flows if agent_host and l3plugin and getattr( l3plugin, "list_router_ids_on_host", None): admin_context = n_context.get_admin_context() if l3plugin.list_router_ids_on_host( admin_context, agent_host, [port['device_id']]): return fdb_entries = self._get_agent_fdb( context.bottom_bound_segment, port, agent_host) self.L2populationAgentNotify.remove_fdb_entries( self.rpc_ctx, fdb_entries) def update_port_up(self, context): port = context.current agent_host = context.host session = db_api.get_session() agent = l2pop_db.get_agent_by_host(session, agent_host) if not agent: LOG.warning(_LW("Unable to retrieve active L2 agent on host %s"), agent_host) return network_id = port['network_id'] agent_active_ports = l2pop_db.get_agent_network_active_port_count( session, agent_host, network_id) agent_ip = l2pop_db.get_agent_ip(agent) segment = context.bottom_bound_segment if not self._validate_segment(segment, port['id'], agent): return other_fdb_entries = self._get_fdb_entries_template( segment, agent_ip, network_id) other_fdb_ports = other_fdb_entries[network_id]['ports'] if agent_active_ports == 1 or (l2pop_db.get_agent_uptime(agent) < cfg.CONF.l2pop.agent_boot_time): # First port activated on current agent in this network, # we have to provide it with the whole list of fdb entries agent_fdb_entries = self._create_agent_fdb(session, agent, segment, network_id) # And notify other agents to add flooding entry other_fdb_ports[agent_ip].append(const.FLOODING_ENTRY) if agent_fdb_entries[network_id]['ports'].keys(): self.L2populationAgentNotify.add_fdb_entries( self.rpc_ctx, agent_fdb_entries, agent_host) # Notify other agents to add fdb rule for current port if (port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE and not l3_hamode_db.is_ha_router_port(port['device_owner'], port['device_id'])): other_fdb_ports[agent_ip] += self._get_port_fdb_entries(port) self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx, other_fdb_entries) def _get_agent_fdb(self, segment, port, agent_host): if not agent_host: return network_id = port['network_id'] session = db_api.get_session() agent_active_ports = l2pop_db.get_agent_network_active_port_count( session, agent_host, network_id) agent = l2pop_db.get_agent_by_host(db_api.get_session(), agent_host) if not self._validate_segment(segment, port['id'], agent): return agent_ip = l2pop_db.get_agent_ip(agent) other_fdb_entries = self._get_fdb_entries_template( segment, agent_ip, port['network_id']) if agent_active_ports == 0: # Agent is removing its last activated port in this network, # other agents needs to be notified to delete their flooding entry. other_fdb_entries[network_id]['ports'][agent_ip].append( const.FLOODING_ENTRY) # Notify other agents to remove fdb rules for current port if (port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE and not l3_hamode_db.is_ha_router_port(port['device_owner'], port['device_id'])): fdb_entries = self._get_port_fdb_entries(port) other_fdb_entries[network_id]['ports'][agent_ip] += fdb_entries return other_fdb_entries @classmethod def _get_fdb_entries_template(cls, segment, agent_ip, network_id): return { network_id: {'segment_id': segment['segmentation_id'], 'network_type': segment['network_type'], 'ports': {agent_ip: []}}} neutron-8.4.0/neutron/plugins/ml2/drivers/l2pop/__init__.py0000664000567000056710000000000013044372736025045 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/l2pop/rpc_manager/0000775000567000056710000000000013044373210025210 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc.py0000664000567000056710000003074613044372760031101 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import itertools from oslo_config import cfg from oslo_log import helpers as log_helpers import six from neutron.common import constants as n_const from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc @six.add_metaclass(abc.ABCMeta) class L2populationRpcCallBackMixin(object): '''General mixin class of L2-population RPC call back. The following methods are called through RPC. add_fdb_entries(), remove_fdb_entries(), update_fdb_entries() The following methods are used in an agent as internal methods. fdb_add(), fdb_remove(), fdb_update() ''' @log_helpers.log_method_call def add_fdb_entries(self, context, fdb_entries, host=None): if not host or host == cfg.CONF.host: self.fdb_add(context, self._unmarshall_fdb_entries(fdb_entries)) @log_helpers.log_method_call def remove_fdb_entries(self, context, fdb_entries, host=None): if not host or host == cfg.CONF.host: self.fdb_remove(context, self._unmarshall_fdb_entries(fdb_entries)) @log_helpers.log_method_call def update_fdb_entries(self, context, fdb_entries, host=None): if not host or host == cfg.CONF.host: self.fdb_update(context, self._unmarshall_fdb_entries(fdb_entries)) @staticmethod def _unmarshall_fdb_entries(fdb_entries): """Prepares fdb_entries from JSON. All methods in this class that receive messages should call this to unmarshall fdb_entries from the wire. :param fdb_entries: Original fdb_entries data-structure. Looks like: { : { ..., 'ports': { : [ [, ], ... ], ... Or in the case of an update: { 'chg_ip': { '': { '': { 'before': [ [, ], ... ], 'after' : [ [, ], ... ], }, '': { 'before': ... :returns: Deep copy with [, ] converted to PortInfo """ unmarshalled = dict(fdb_entries) chg_ip_nets = [net.values() for net in unmarshalled.get('chg_ip', {}).values()] for agent in itertools.chain.from_iterable(chg_ip_nets): for when in ('before', 'after'): if when in agent: agent[when] = [l2pop_rpc.PortInfo(*pi) for pi in agent[when]] for value in unmarshalled.values(): if 'ports' in value: value['ports'] = dict( (address, [l2pop_rpc.PortInfo(*pi) for pi in port_infos]) for address, port_infos in value['ports'].items() ) return unmarshalled @abc.abstractmethod def fdb_add(self, context, fdb_entries): pass @abc.abstractmethod def fdb_remove(self, context, fdb_entries): pass @abc.abstractmethod def fdb_update(self, context, fdb_entries): pass class L2populationRpcCallBackTunnelMixin(L2populationRpcCallBackMixin): '''Mixin class of L2-population call back for Tunnel. The following methods are all used in agents as internal methods. Some of the methods in this class use Local VLAN Mapping, aka lvm. It's a python object with at least the following attributes: ============ ========================================================= Attribute Description ============ ========================================================= vlan An identifier used by the agent to identify a neutron network. network_type A network type found in neutron.plugins.common.constants. ============ ========================================================= NOTE(yamamoto): "Local VLAN" is an OVS-agent term. OVS-agent internally uses 802.1q VLAN tagging to isolate networks. While this class inherited the terms from OVS-agent, it does not assume the specific underlying technologies. E.g. this class is also used by ofagent, where a different mechanism is used. ''' @abc.abstractmethod def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport): '''Add flow for fdb This method is assumed to be used by method fdb_add_tun. We expect to add a flow entry to send a packet to specified port on bridge. And you may edit some information for local arp response. :param br: represent the bridge on which add_fdb_flow should be applied. :param port_info: PortInfo instance to include mac and ip. .mac_address .ip_address :remote_ip: remote ip address. :param lvm: a local VLAN map of network. :param ofport: a port to add. ''' pass @abc.abstractmethod def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport): '''Delete flow for fdb This method is assumed to be used by method fdb_remove_tun. We expect to delete a flow entry to send a packet to specified port from bridge. And you may delete some information for local arp response. :param br: represent the bridge on which del_fdb_flow should be applied. :param port_info: PortInfo instance to include mac and ip. .mac_address .ip_address :remote_ip: remote ip address. :param lvm: local VLAN map of a network. See add_fdb_flow for more explanation. :param ofport: a port to delete. ''' pass @abc.abstractmethod def setup_tunnel_port(self, br, remote_ip, network_type): '''Setup an added tunnel port. This method is assumed to be used by method fdb_add_tun. We expect to prepare to call add_fdb_flow. It will be mainly adding a port to a bridge. If you need, you may do some preparations for a bridge. :param br: represent the bridge on which setup_tunnel_port should be applied. :param remote_ip: an ip for a port to setup. :param network_type: a type of a network. :returns: an ofport value. value 0 means the port is unavailable. ''' pass @abc.abstractmethod def cleanup_tunnel_port(self, br, tun_ofport, tunnel_type): '''Clean up a deleted tunnel port. This method is assumed to be used by method fdb_remove_tun. We expect to clean up after calling del_fdb_flow. It will be mainly deleting a port from a bridge. If you need, you may do some cleanup for a bridge. :param br: represent the bridge on which cleanup_tunnel_port should be applied. :param tun_ofport: a port value to cleanup. :param tunnel_type: a type of a tunnel. ''' pass @abc.abstractmethod def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address, ip_address): '''Operate the ARP respond information. Update MAC/IPv4 associations, which is typically used by the local ARP responder. For example, OVS-agent sets up flow entries to perform ARP responses. :param br: represent the bridge on which setup_entry_for_arp_reply should be applied. :param action: add/remove flow for arp response information. :param local_vid: id in local VLAN map of network's ARP entry. :param mac_address: MAC string value. :param ip_address: IP string value. ''' pass def get_agent_ports(self, fdb_entries, local_vlan_map): """Generator to yield port info. For each known (i.e found in local_vlan_map) network in fdb_entries, yield (lvm, fdb_entries[network_id]['ports']) pair. :param fdb_entries: l2pop fdb entries :param local_vlan_map: A dict to map network_id to the corresponding lvm entry. """ for network_id, values in fdb_entries.items(): lvm = local_vlan_map.get(network_id) if lvm is None: continue agent_ports = values.get('ports') yield (lvm, agent_ports) @log_helpers.log_method_call def fdb_add_tun(self, context, br, lvm, agent_ports, lookup_port): for remote_ip, ports in agent_ports.items(): # Ensure we have a tunnel port with this remote agent ofport = lookup_port(lvm.network_type, remote_ip) if not ofport: ofport = self.setup_tunnel_port(br, remote_ip, lvm.network_type) if ofport == 0: continue for port in ports: self.add_fdb_flow(br, port, remote_ip, lvm, ofport) @log_helpers.log_method_call def fdb_remove_tun(self, context, br, lvm, agent_ports, lookup_port): for remote_ip, ports in agent_ports.items(): ofport = lookup_port(lvm.network_type, remote_ip) if not ofport: continue for port in ports: self.del_fdb_flow(br, port, remote_ip, lvm, ofport) if port == n_const.FLOODING_ENTRY: # Check if this tunnel port is still used self.cleanup_tunnel_port(br, ofport, lvm.network_type) @log_helpers.log_method_call def fdb_update(self, context, fdb_entries): '''Call methods named '_fdb_'. This method assumes that methods '_fdb_' are defined in class. Currently the following actions are available. chg_ip ''' for action, values in fdb_entries.items(): method = '_fdb_' + action if not hasattr(self, method): raise NotImplementedError() getattr(self, method)(context, values) @log_helpers.log_method_call def fdb_chg_ip_tun(self, context, br, fdb_entries, local_ip, local_vlan_map): '''fdb update when an IP of a port is updated. The ML2 l2-pop mechanism driver sends an fdb update rpc message when an IP of a port is updated. :param context: RPC context. :param br: represent the bridge on which fdb_chg_ip_tun should be applied. :param fdb_entries: fdb dicts that contain all mac/IP information per agent and network. {'net1': {'agent_ip': {'before': PortInfo, 'after': PortInfo } } 'net2': ... } PortInfo has .mac_address and .ip_address attrs. :param local_ip: local IP address of this agent. :param local_vlan_map: A dict to map network_id to the corresponding lvm entry. ''' for network_id, agent_ports in fdb_entries.items(): lvm = local_vlan_map.get(network_id) if not lvm: continue for agent_ip, state in agent_ports.items(): if agent_ip == local_ip: continue after = state.get('after', []) for mac_ip in after: self.setup_entry_for_arp_reply(br, 'add', lvm.vlan, mac_ip.mac_address, mac_ip.ip_address) before = state.get('before', []) for mac_ip in before: self.setup_entry_for_arp_reply(br, 'remove', lvm.vlan, mac_ip.mac_address, mac_ip.ip_address) neutron-8.4.0/neutron/plugins/ml2/drivers/l2pop/rpc_manager/__init__.py0000664000567000056710000000000013044372736027323 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/l2pop/rpc.py0000664000567000056710000000662313044372736024113 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from oslo_log import log as logging import oslo_messaging from neutron.common import rpc as n_rpc from neutron.common import topics LOG = logging.getLogger(__name__) PortInfo = collections.namedtuple("PortInfo", "mac_address ip_address") class L2populationAgentNotifyAPI(object): def __init__(self, topic=topics.AGENT): self.topic = topic self.topic_l2pop_update = topics.get_topic_name(topic, topics.L2POPULATION, topics.UPDATE) target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) def _notification_fanout(self, context, method, fdb_entries): LOG.debug('Fanout notify l2population agents at %(topic)s ' 'the message %(method)s with %(fdb_entries)s', {'topic': self.topic, 'method': method, 'fdb_entries': fdb_entries}) cctxt = self.client.prepare(topic=self.topic_l2pop_update, fanout=True) cctxt.cast(context, method, fdb_entries=fdb_entries) def _notification_host(self, context, method, fdb_entries, host): LOG.debug('Notify l2population agent %(host)s at %(topic)s the ' 'message %(method)s with %(fdb_entries)s', {'host': host, 'topic': self.topic, 'method': method, 'fdb_entries': fdb_entries}) cctxt = self.client.prepare(topic=self.topic_l2pop_update, server=host) cctxt.cast(context, method, fdb_entries=fdb_entries) def add_fdb_entries(self, context, fdb_entries, host=None): if fdb_entries: if host: self._notification_host(context, 'add_fdb_entries', fdb_entries, host) else: self._notification_fanout(context, 'add_fdb_entries', fdb_entries) def remove_fdb_entries(self, context, fdb_entries, host=None): if fdb_entries: if host: self._notification_host(context, 'remove_fdb_entries', fdb_entries, host) else: self._notification_fanout(context, 'remove_fdb_entries', fdb_entries) def update_fdb_entries(self, context, fdb_entries, host=None): if fdb_entries: if host: self._notification_host(context, 'update_fdb_entries', fdb_entries, host) else: self._notification_fanout(context, 'update_fdb_entries', fdb_entries) neutron-8.4.0/neutron/plugins/ml2/drivers/l2pop/README0000664000567000056710000000371013044372736023627 0ustar jenkinsjenkins00000000000000Neutron ML2 l2 population Mechanism Drivers l2 population (l2pop) mechanism drivers implements the ML2 driver to improve open source plugins overlay implementations (VXLAN with Linux bridge and GRE/VXLAN with OVS). This mechanism driver is implemented in ML2 to propagate the forwarding information among agents using a common RPC API. More informations could be found on the wiki page [1]. VXLAN Linux kernel: ------------------- The VXLAN Linux kernel module provide all necessary functionalities to populate the forwarding table and local ARP responder tables. This module appears on release 3.7 of the vanilla Linux kernel in experimental: - 3.8: first stable release, no edge replication (multicast necessary), - 3.9: edge replication only for the broadcasted packets, - 3.11: edge replication for broadcast, multicast and unknown packets. Note: Some distributions (like RHEL) have backported this module on precedent kernel version. OpenvSwitch: ------------ The OVS OpenFlow tables provide all of the necessary functionality to populate the forwarding table and local ARP responder tables. A wiki page describe how the flow tables did evolve on OVS agents: - [2] without local ARP responder - [3] with local ARP responder. /!\ This functionality is only available since the development branch 2.1. It's possible to disable (enable by default) it through the flag 'arp_responder'. /!\ Note: A difference persists between the LB and OVS agents when they are used with the l2-pop mechanism driver (and local ARP responder available). The LB agent will drop unknown unicast (VXLAN bridge mode), whereas the OVS agent will flood it. [1] https://wiki.openstack.org/wiki/L2population_blueprint [2] https://wiki.openstack.org/wiki/Ovs-flow-logic#OVS_flows_logic [3] https://wiki.openstack.org/wiki/Ovs-flow-logic#OVS_flows_logic_with_local_ARP_responderneutron-8.4.0/neutron/plugins/ml2/drivers/l2pop/db.py0000664000567000056710000001744213044372760023712 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from oslo_utils import timeutils from neutron.common import constants as const from neutron.db import agents_db from neutron.db import l3_hamode_db from neutron.db import models_v2 from neutron.plugins.ml2 import models as ml2_models HA_ROUTER_PORTS = (const.DEVICE_OWNER_ROUTER_INTF, const.DEVICE_OWNER_ROUTER_SNAT) def get_agent_ip_by_host(session, agent_host): agent = get_agent_by_host(session, agent_host) if agent: return get_agent_ip(agent) def get_agent_ip(agent): configuration = jsonutils.loads(agent.configurations) return configuration.get('tunneling_ip') def get_agent_uptime(agent): return timeutils.delta_seconds(agent.started_at, agent.heartbeat_timestamp) def get_agent_tunnel_types(agent): configuration = jsonutils.loads(agent.configurations) return configuration.get('tunnel_types') def get_agent_l2pop_network_types(agent): configuration = jsonutils.loads(agent.configurations) return configuration.get('l2pop_network_types') def get_agent_by_host(session, agent_host): """Return a L2 agent on the host.""" with session.begin(subtransactions=True): query = session.query(agents_db.Agent) query = query.filter(agents_db.Agent.host == agent_host) for agent in query: if get_agent_ip(agent): return agent def _get_active_network_ports(session, network_id): with session.begin(subtransactions=True): query = session.query(ml2_models.PortBinding, agents_db.Agent) query = query.join(agents_db.Agent, agents_db.Agent.host == ml2_models.PortBinding.host) query = query.join(models_v2.Port) query = query.filter(models_v2.Port.network_id == network_id, models_v2.Port.status == const.PORT_STATUS_ACTIVE) return query def _ha_router_interfaces_on_network_query(session, network_id): query = session.query(models_v2.Port) query = query.join(l3_hamode_db.L3HARouterAgentPortBinding, l3_hamode_db.L3HARouterAgentPortBinding.router_id == models_v2.Port.device_id) return query.filter( models_v2.Port.network_id == network_id, models_v2.Port.device_owner.in_(HA_ROUTER_PORTS)) def _get_ha_router_interface_ids(session, network_id): query = _ha_router_interfaces_on_network_query(session, network_id) return query.from_self(models_v2.Port.id).distinct() def get_nondistributed_active_network_ports(session, network_id): query = _get_active_network_ports(session, network_id) # Exclude DVR and HA router interfaces query = query.filter(models_v2.Port.device_owner != const.DEVICE_OWNER_DVR_INTERFACE) ha_iface_ids_query = _get_ha_router_interface_ids(session, network_id) query = query.filter(models_v2.Port.id.notin_(ha_iface_ids_query)) return [(bind, agent) for bind, agent in query.all() if get_agent_ip(agent)] def get_dvr_active_network_ports(session, network_id): with session.begin(subtransactions=True): query = session.query(ml2_models.DVRPortBinding, agents_db.Agent) query = query.join(agents_db.Agent, agents_db.Agent.host == ml2_models.DVRPortBinding.host) query = query.join(models_v2.Port) query = query.filter(models_v2.Port.network_id == network_id, models_v2.Port.status == const.PORT_STATUS_ACTIVE, models_v2.Port.device_owner == const.DEVICE_OWNER_DVR_INTERFACE) return [(bind, agent) for bind, agent in query.all() if get_agent_ip(agent)] def get_distributed_active_network_ports(session, network_id): return (get_dvr_active_network_ports(session, network_id) + get_ha_active_network_ports(session, network_id)) def get_ha_active_network_ports(session, network_id): agents = get_ha_agents(session, network_id=network_id) return [(None, agent) for agent in agents] def get_ha_agents(session, network_id=None, router_id=None): query = session.query(agents_db.Agent.host).distinct() query = query.join(l3_hamode_db.L3HARouterAgentPortBinding, l3_hamode_db.L3HARouterAgentPortBinding.l3_agent_id == agents_db.Agent.id) if router_id: query = query.filter( l3_hamode_db.L3HARouterAgentPortBinding.router_id == router_id) elif network_id: query = query.join(models_v2.Port, models_v2.Port.device_id == l3_hamode_db.L3HARouterAgentPortBinding.router_id) query = query.filter(models_v2.Port.network_id == network_id, models_v2.Port.status == const.PORT_STATUS_ACTIVE, models_v2.Port.device_owner.in_(HA_ROUTER_PORTS)) else: return [] # L3HARouterAgentPortBinding will have l3 agent ids of hosting agents. # But we need l2 agent(for tunneling ip) while creating FDB entries. agents_query = session.query(agents_db.Agent) agents_query = agents_query.filter(agents_db.Agent.host.in_(query)) return [agent for agent in agents_query if get_agent_ip(agent)] def get_ha_agents_by_router_id(session, router_id): return get_ha_agents(session, router_id=router_id) def get_agent_network_active_port_count(session, agent_host, network_id): with session.begin(subtransactions=True): query = session.query(models_v2.Port) query1 = query.join(ml2_models.PortBinding) query1 = query1.filter(models_v2.Port.network_id == network_id, models_v2.Port.status == const.PORT_STATUS_ACTIVE, models_v2.Port.device_owner != const.DEVICE_OWNER_DVR_INTERFACE, ml2_models.PortBinding.host == agent_host) ha_iface_ids_query = _get_ha_router_interface_ids(session, network_id) query1 = query1.filter(models_v2.Port.id.notin_(ha_iface_ids_query)) ha_port_count = get_ha_router_active_port_count( session, agent_host, network_id) query2 = query.join(ml2_models.DVRPortBinding) query2 = query2.filter(models_v2.Port.network_id == network_id, ml2_models.DVRPortBinding.status == const.PORT_STATUS_ACTIVE, models_v2.Port.device_owner == const.DEVICE_OWNER_DVR_INTERFACE, ml2_models.DVRPortBinding.host == agent_host) return (query1.count() + query2.count() + ha_port_count) def get_ha_router_active_port_count(session, agent_host, network_id): # Return num of HA router interfaces on the given network and host query = _ha_router_interfaces_on_network_query(session, network_id) query = query.filter(models_v2.Port.status == const.PORT_STATUS_ACTIVE) query = query.join(agents_db.Agent) query = query.filter(agents_db.Agent.host == agent_host) return query.count() neutron-8.4.0/neutron/plugins/ml2/drivers/type_flat.py0000664000567000056710000001332513044372760024254 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log import six import sqlalchemy as sa from neutron._i18n import _, _LI, _LW from neutron.common import exceptions as exc from neutron.db import model_base from neutron.plugins.common import constants as p_const from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers import helpers LOG = log.getLogger(__name__) flat_opts = [ cfg.ListOpt('flat_networks', default='*', help=_("List of physical_network names with which flat " "networks can be created. Use default '*' to allow " "flat networks with arbitrary physical_network names. " "Use an empty list to disable flat networks.")) ] cfg.CONF.register_opts(flat_opts, "ml2_type_flat") class FlatAllocation(model_base.BASEV2): """Represent persistent allocation state of a physical network. If a record exists for a physical network, then that physical network has been allocated as a flat network. """ __tablename__ = 'ml2_flat_allocations' physical_network = sa.Column(sa.String(64), nullable=False, primary_key=True) class FlatTypeDriver(helpers.BaseTypeDriver): """Manage state for flat networks with ML2. The FlatTypeDriver implements the 'flat' network_type. Flat network segments provide connectivity between VMs and other devices using any connected IEEE 802.1D conformant physical_network, without the use of VLAN tags, tunneling, or other segmentation mechanisms. Therefore at most one flat network segment can exist on each available physical_network. """ def __init__(self): super(FlatTypeDriver, self).__init__() self._parse_networks(cfg.CONF.ml2_type_flat.flat_networks) def _parse_networks(self, entries): self.flat_networks = entries if '*' in self.flat_networks: LOG.info(_LI("Arbitrary flat physical_network names allowed")) self.flat_networks = None elif not self.flat_networks: LOG.info(_LI("Flat networks are disabled")) else: LOG.info(_LI("Allowable flat physical_network names: %s"), self.flat_networks) def get_type(self): return p_const.TYPE_FLAT def initialize(self): LOG.info(_LI("ML2 FlatTypeDriver initialization complete")) def is_partial_segment(self, segment): return False def validate_provider_segment(self, segment): physical_network = segment.get(api.PHYSICAL_NETWORK) if not physical_network: msg = _("physical_network required for flat provider network") raise exc.InvalidInput(error_message=msg) if self.flat_networks is not None and not self.flat_networks: msg = _("Flat provider networks are disabled") raise exc.InvalidInput(error_message=msg) if self.flat_networks and physical_network not in self.flat_networks: msg = (_("physical_network '%s' unknown for flat provider network") % physical_network) raise exc.InvalidInput(error_message=msg) for key, value in six.iteritems(segment): if value and key not in [api.NETWORK_TYPE, api.PHYSICAL_NETWORK]: msg = _("%s prohibited for flat provider network") % key raise exc.InvalidInput(error_message=msg) def reserve_provider_segment(self, session, segment): physical_network = segment[api.PHYSICAL_NETWORK] with session.begin(subtransactions=True): try: LOG.debug("Reserving flat network on physical " "network %s", physical_network) alloc = FlatAllocation(physical_network=physical_network) alloc.save(session) except db_exc.DBDuplicateEntry: raise exc.FlatNetworkInUse( physical_network=physical_network) segment[api.MTU] = self.get_mtu(alloc.physical_network) return segment def allocate_tenant_segment(self, session): # Tenant flat networks are not supported. return def release_segment(self, session, segment): physical_network = segment[api.PHYSICAL_NETWORK] with session.begin(subtransactions=True): count = (session.query(FlatAllocation). filter_by(physical_network=physical_network). delete()) if count: LOG.debug("Releasing flat network on physical network %s", physical_network) else: LOG.warning(_LW("No flat network found on physical network %s"), physical_network) def get_mtu(self, physical_network): seg_mtu = super(FlatTypeDriver, self).get_mtu() mtu = [] if seg_mtu > 0: mtu.append(seg_mtu) if physical_network in self.physnet_mtus: mtu.append(int(self.physnet_mtus[physical_network])) return min(mtu) if mtu else 0 neutron-8.4.0/neutron/plugins/ml2/drivers/macvtap/0000775000567000056710000000000013044373210023331 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/macvtap/__init__.py0000664000567000056710000000000013044372736025444 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/macvtap/mech_driver/0000775000567000056710000000000013044373210025620 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/macvtap/mech_driver/__init__.py0000664000567000056710000000000013044372736027733 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/macvtap/mech_driver/mech_macvtap.py0000664000567000056710000000616213044372760030637 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from neutron.common import constants from neutron.extensions import portbindings from neutron.plugins.common import constants as p_constants from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers.macvtap import macvtap_common from neutron.plugins.ml2.drivers import mech_agent LOG = log.getLogger(__name__) MACVTAP_MODE_BRIDGE = 'bridge' class MacvtapMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): """Attach to networks using Macvtap L2 agent. The MacvtapMechanismDriver integrates the ml2 plugin with the macvtap L2 agent. Port binding with this driver requires the macvtap agent to be running on the port's host, and that agent to have connectivity to at least one segment of the port's network. """ def __init__(self): super(MacvtapMechanismDriver, self).__init__( constants.AGENT_TYPE_MACVTAP, portbindings.VIF_TYPE_MACVTAP, {portbindings.CAP_PORT_FILTER: False}) def get_allowed_network_types(self, agent): return [p_constants.TYPE_FLAT, p_constants.TYPE_VLAN] def get_mappings(self, agent): return agent['configurations'].get('interface_mappings', {}) def check_vlan_transparency(self, context): """Macvtap driver vlan transparency support.""" return False def try_to_bind_segment_for_agent(self, context, segment, agent): if self.check_segment_for_agent(segment, agent): vif_details_segment = self.vif_details mappings = self.get_mappings(agent) interface = mappings[segment['physical_network']] network_type = segment[api.NETWORK_TYPE] if network_type == p_constants.TYPE_VLAN: vlan_id = segment[api.SEGMENTATION_ID] macvtap_src = macvtap_common.get_vlan_device_name(interface, vlan_id) vif_details_segment['vlan'] = vlan_id else: macvtap_src = interface vif_details_segment['physical_interface'] = interface vif_details_segment['macvtap_source'] = macvtap_src vif_details_segment['macvtap_mode'] = MACVTAP_MODE_BRIDGE LOG.debug("Macvtap vif_details added to context binding: %s", vif_details_segment) context.set_binding(segment[api.ID], self.vif_type, vif_details_segment) return True return False neutron-8.4.0/neutron/plugins/ml2/drivers/macvtap/agent/0000775000567000056710000000000013044373210024427 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/macvtap/agent/config.py0000664000567000056710000000256513044372760026267 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg DEFAULT_INTERFACE_MAPPINGS = [] macvtap_opts = [ cfg.ListOpt('physical_interface_mappings', default=DEFAULT_INTERFACE_MAPPINGS, help=_("Comma-separated list of " ": tuples " "mapping physical network names to the agent's " "node-specific physical network interfaces to be used " "for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should " "have mappings to appropriate interfaces on each " "agent.")), ] cfg.CONF.register_opts(macvtap_opts, "macvtap") neutron-8.4.0/neutron/plugins/ml2/drivers/macvtap/agent/__init__.py0000664000567000056710000000000013044372736026542 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/plugins/ml2/drivers/macvtap/agent/macvtap_neutron_agent.py0000664000567000056710000002065413044372760031404 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import service from neutron._i18n import _LE, _LI from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.agent import securitygroups_rpc as sg_rpc from neutron.common import config as common_config from neutron.common import constants from neutron.common import topics from neutron.common import utils as n_utils from neutron.plugins.common import constants as p_constants from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb from neutron.plugins.ml2.drivers.agent import _common_agent as ca from neutron.plugins.ml2.drivers.macvtap.agent import config # noqa from neutron.plugins.ml2.drivers.macvtap import macvtap_common LOG = logging.getLogger(__name__) MACVTAP_AGENT_BINARY = "neutron-macvtap-agent" MACVTAP_FS = "/sys/class/net/" EXTENSION_DRIVER_TYPE = 'macvtap' class MacvtapRPCCallBack(sg_rpc.SecurityGroupAgentRpcCallbackMixin, amb.CommonAgentManagerRpcCallBackBase): # Set RPC API version to 1.0 by default. # history # 1.1 Support Security Group RPC # 1.3 Added param devices_to_update to security_groups_provider_updated # 1.4 Added support for network_update target = oslo_messaging.Target(version='1.4') def network_delete(self, context, **kwargs): LOG.debug("network_delete received") network_id = kwargs.get('network_id') if network_id not in self.network_map: LOG.error(_LE("Network %s is not available."), network_id) return segment = self.network_map.get(network_id) if segment and segment.network_type == p_constants.TYPE_VLAN: if_mappings = self.agent.mgr.interface_mappings vlan_device_name = macvtap_common.get_vlan_device_name( if_mappings[segment.physical_network], str(segment.segmentation_id)) ip_dev = ip_lib.IPDevice(vlan_device_name) if ip_dev.exists(): LOG.debug("Delete %s", ip_dev.name) ip_dev.link.delete() else: LOG.debug("Cannot delete vlan device %s; it does not exist", vlan_device_name) def port_update(self, context, **kwargs): port = kwargs['port'] LOG.debug("port_update received for port %s ", port) mac = port['mac_address'] # Put the device name in the updated_devices set. # Do not store port details, as if they're used for processing # notifications there is no guarantee the notifications are # processed in the same order as the relevant API requests. self.updated_devices.add(mac) class MacvtapManager(amb.CommonAgentManagerBase): def __init__(self, interface_mappings): self.interface_mappings = interface_mappings self.validate_interface_mappings() self.mac_device_name_mappings = dict() def validate_interface_mappings(self): for physnet, interface in self.interface_mappings.items(): if not ip_lib.device_exists(interface): LOG.error(_LE("Interface %(intf)s for physical network " "%(net)s does not exist. Agent terminated!"), {'intf': interface, 'net': physnet}) sys.exit(1) def ensure_port_admin_state(self, device, admin_state_up): LOG.debug("Setting admin_state_up to %s for device %s", admin_state_up, device) dev = ip_lib.IPDevice(self.mac_device_name_mappings[device]) if admin_state_up: dev.link.set_up() else: dev.link.set_down() def get_agent_configurations(self): return {'interface_mappings': self.interface_mappings} def get_agent_id(self): devices = ip_lib.IPWrapper().get_devices(True) if devices: mac = utils.get_interface_mac(devices[0].name) return 'macvtap%s' % mac.replace(":", "") else: LOG.error(_LE("Unable to obtain MAC address for unique ID. " "Agent terminated!")) sys.exit(1) def get_devices_modified_timestamps(self, devices): # TODO(kevinbenton): this should be implemented to detect # rapid Nova instance rebuilds. return {} def get_all_devices(self): devices = set() all_device_names = os.listdir(MACVTAP_FS) # Refresh the mac_device_name mapping self.mac_device_name_mappings = dict() for device_name in all_device_names: if device_name.startswith(constants.MACVTAP_DEVICE_PREFIX): mac = utils.get_interface_mac(device_name) self.mac_device_name_mappings[mac] = device_name devices.add(mac) return devices def get_extension_driver_type(self): return EXTENSION_DRIVER_TYPE def get_rpc_callbacks(self, context, agent, sg_agent): return MacvtapRPCCallBack(context, agent, sg_agent) def get_rpc_consumers(self): consumers = [[topics.PORT, topics.UPDATE], [topics.NETWORK, topics.DELETE], [topics.SECURITY_GROUP, topics.UPDATE]] return consumers def plug_interface(self, network_id, network_segment, device, device_owner): # Setting ALLMULTICAST Flag on macvtap device to allow the guest # receiving traffic for arbitrary multicast addresses. # The alternative would be to let libvirt instantiate the macvtap # device with the 'trustGuestRxFilters' option. But doing so, the guest # would be able to change its mac address and therefore the mac # address of the macvtap device. dev = ip_lib.IPDevice(self.mac_device_name_mappings[device]) dev.link.set_allmulticast_on() return True def setup_arp_spoofing_protection(self, device, device_details): pass def delete_arp_spoofing_protection(self, devices): pass def delete_unreferenced_arp_protection(self, current_devices): pass def parse_interface_mappings(): try: interface_mappings = n_utils.parse_mappings( cfg.CONF.macvtap.physical_interface_mappings) LOG.info(_LI("Interface mappings: %s"), interface_mappings) return interface_mappings except ValueError as e: LOG.error(_LE("Parsing physical_interface_mappings failed: %s. " "Agent terminated!"), e) sys.exit(1) def validate_firewall_driver(): fw_driver = cfg.CONF.SECURITYGROUP.firewall_driver supported_fw_drivers = ['neutron.agent.firewall.NoopFirewallDriver', 'noop'] if fw_driver not in supported_fw_drivers: LOG.error(_LE('Unsupported configuration option for "SECURITYGROUP.' 'firewall_driver"! Only the NoopFirewallDriver is ' 'supported by macvtap agent, but "%s" is configured. ' 'Set the firewall_driver to "noop" and start the ' 'agent again. Agent terminated!'), fw_driver) sys.exit(1) def main(): common_config.init(sys.argv[1:]) common_config.setup_logging() validate_firewall_driver() interface_mappings = parse_interface_mappings() manager = MacvtapManager(interface_mappings) polling_interval = cfg.CONF.AGENT.polling_interval quitting_rpc_timeout = cfg.CONF.AGENT.quitting_rpc_timeout agent = ca.CommonAgentLoop(manager, polling_interval, quitting_rpc_timeout, constants.AGENT_TYPE_MACVTAP, MACVTAP_AGENT_BINARY) LOG.info(_LI("Agent initialized successfully, now running... ")) launcher = service.launch(cfg.CONF, agent) launcher.wait() neutron-8.4.0/neutron/plugins/ml2/drivers/macvtap/macvtap_common.py0000664000567000056710000000217513044372760026724 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import constants as n_const from neutron.plugins.common import utils as p_utils MAX_VLAN_POSTFIX_LEN = 5 def get_vlan_device_name(src_dev, vlan): """Generating the vlan device name.""" # Ensure that independent of the vlan len the same name prefix is used. src_dev = p_utils.get_interface_name(src_dev, max_len=n_const.DEVICE_NAME_MAX_LEN - MAX_VLAN_POSTFIX_LEN) return "%s.%s" % (src_dev, vlan) neutron-8.4.0/neutron/plugins/ml2/models.py0000664000567000056710000001221613044372760022070 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from sqlalchemy import orm from neutron.db import model_base from neutron.db import models_v2 from neutron.extensions import portbindings BINDING_PROFILE_LEN = 4095 class NetworkSegment(model_base.BASEV2, model_base.HasId): """Represent persistent state of a network segment. A network segment is a portion of a neutron network with a specific physical realization. A neutron network can consist of one or more segments. """ __tablename__ = 'ml2_network_segments' network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), nullable=False) network_type = sa.Column(sa.String(32), nullable=False) physical_network = sa.Column(sa.String(64)) segmentation_id = sa.Column(sa.Integer) is_dynamic = sa.Column(sa.Boolean, default=False, nullable=False, server_default=sa.sql.false()) segment_index = sa.Column(sa.Integer, nullable=False, server_default='0') class PortBinding(model_base.BASEV2): """Represent binding-related state of a port. A port binding stores the port attributes required for the portbindings extension, as well as internal ml2 state such as which MechanismDriver and which segment are used by the port binding. """ __tablename__ = 'ml2_port_bindings' port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) host = sa.Column(sa.String(255), nullable=False, default='', server_default='') vnic_type = sa.Column(sa.String(64), nullable=False, default=portbindings.VNIC_NORMAL, server_default=portbindings.VNIC_NORMAL) profile = sa.Column(sa.String(BINDING_PROFILE_LEN), nullable=False, default='', server_default='') vif_type = sa.Column(sa.String(64), nullable=False) vif_details = sa.Column(sa.String(4095), nullable=False, default='', server_default='') # Add a relationship to the Port model in order to instruct SQLAlchemy to # eagerly load port bindings port = orm.relationship( models_v2.Port, backref=orm.backref("port_binding", lazy='joined', uselist=False, cascade='delete')) class PortBindingLevel(model_base.BASEV2): """Represent each level of a port binding. Stores information associated with each level of an established port binding. Different levels might correspond to the host and ToR switch, for instance. """ __tablename__ = 'ml2_port_binding_levels' port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) host = sa.Column(sa.String(255), nullable=False, primary_key=True) level = sa.Column(sa.Integer, primary_key=True, autoincrement=False) driver = sa.Column(sa.String(64)) segment_id = sa.Column(sa.String(36), sa.ForeignKey('ml2_network_segments.id', ondelete="SET NULL")) class DVRPortBinding(model_base.BASEV2): """Represent binding-related state of a DVR port. Port binding for all the ports associated to a DVR identified by router_id. """ __tablename__ = 'ml2_dvr_port_bindings' port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) host = sa.Column(sa.String(255), nullable=False, primary_key=True) router_id = sa.Column(sa.String(36), nullable=True) vif_type = sa.Column(sa.String(64), nullable=False) vif_details = sa.Column(sa.String(4095), nullable=False, default='', server_default='') vnic_type = sa.Column(sa.String(64), nullable=False, default=portbindings.VNIC_NORMAL, server_default=portbindings.VNIC_NORMAL) profile = sa.Column(sa.String(BINDING_PROFILE_LEN), nullable=False, default='', server_default='') status = sa.Column(sa.String(16), nullable=False) # Add a relationship to the Port model in order to instruct SQLAlchemy to # eagerly load port bindings port = orm.relationship( models_v2.Port, backref=orm.backref("dvr_port_binding", lazy='joined', cascade='delete')) neutron-8.4.0/neutron/plugins/ml2/db.py0000664000567000056710000003304313044372760021173 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_log import log from oslo_utils import uuidutils import six from sqlalchemy import or_ from sqlalchemy.orm import exc from neutron._i18n import _LE, _LI from neutron.common import constants as n_const from neutron.db import models_v2 from neutron.db import securitygroups_db as sg_db from neutron.extensions import portbindings from neutron import manager from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2 import models LOG = log.getLogger(__name__) # limit the number of port OR LIKE statements in one query MAX_PORTS_PER_QUERY = 500 def _make_segment_dict(record): """Make a segment dictionary out of a DB record.""" return {api.ID: record.id, api.NETWORK_TYPE: record.network_type, api.PHYSICAL_NETWORK: record.physical_network, api.SEGMENTATION_ID: record.segmentation_id} def add_network_segment(session, network_id, segment, segment_index=0, is_dynamic=False): with session.begin(subtransactions=True): record = models.NetworkSegment( id=uuidutils.generate_uuid(), network_id=network_id, network_type=segment.get(api.NETWORK_TYPE), physical_network=segment.get(api.PHYSICAL_NETWORK), segmentation_id=segment.get(api.SEGMENTATION_ID), segment_index=segment_index, is_dynamic=is_dynamic ) session.add(record) segment[api.ID] = record.id LOG.info(_LI("Added segment %(id)s of type %(network_type)s for network" " %(network_id)s"), {'id': record.id, 'network_type': record.network_type, 'network_id': record.network_id}) def get_network_segments(session, network_id, filter_dynamic=False): return get_networks_segments( session, [network_id], filter_dynamic)[network_id] def get_networks_segments(session, network_ids, filter_dynamic=False): with session.begin(subtransactions=True): query = (session.query(models.NetworkSegment). filter(models.NetworkSegment.network_id.in_(network_ids)). order_by(models.NetworkSegment.segment_index)) if filter_dynamic is not None: query = query.filter_by(is_dynamic=filter_dynamic) records = query.all() result = {net_id: [] for net_id in network_ids} for record in records: result[record.network_id].append(_make_segment_dict(record)) return result def get_segment_by_id(session, segment_id): with session.begin(subtransactions=True): try: record = (session.query(models.NetworkSegment). filter_by(id=segment_id). one()) return _make_segment_dict(record) except exc.NoResultFound: return def get_dynamic_segment(session, network_id, physical_network=None, segmentation_id=None): """Return a dynamic segment for the filters provided if one exists.""" with session.begin(subtransactions=True): query = (session.query(models.NetworkSegment). filter_by(network_id=network_id, is_dynamic=True)) if physical_network: query = query.filter_by(physical_network=physical_network) if segmentation_id: query = query.filter_by(segmentation_id=segmentation_id) record = query.first() if record: return _make_segment_dict(record) else: LOG.debug("No dynamic segment found for " "Network:%(network_id)s, " "Physical network:%(physnet)s, " "segmentation_id:%(segmentation_id)s", {'network_id': network_id, 'physnet': physical_network, 'segmentation_id': segmentation_id}) return None def delete_network_segment(session, segment_id): """Release a dynamic segment for the params provided if one exists.""" with session.begin(subtransactions=True): (session.query(models.NetworkSegment). filter_by(id=segment_id).delete()) def add_port_binding(session, port_id): with session.begin(subtransactions=True): record = models.PortBinding( port_id=port_id, vif_type=portbindings.VIF_TYPE_UNBOUND) session.add(record) return record def get_locked_port_and_binding(session, port_id): """Get port and port binding records for update within transaction.""" try: # REVISIT(rkukura): We need the Port and PortBinding records # to both be added to the session and locked for update. A # single joined query should work, but the combination of left # outer joins and postgresql doesn't seem to work. port = (session.query(models_v2.Port). enable_eagerloads(False). filter_by(id=port_id). with_lockmode('update'). one()) binding = (session.query(models.PortBinding). enable_eagerloads(False). filter_by(port_id=port_id). with_lockmode('update'). one()) return port, binding except exc.NoResultFound: return None, None def set_binding_levels(session, levels): if levels: for level in levels: session.add(level) LOG.debug("For port %(port_id)s, host %(host)s, " "set binding levels %(levels)s", {'port_id': levels[0].port_id, 'host': levels[0].host, 'levels': levels}) else: LOG.debug("Attempted to set empty binding levels") def get_binding_levels(session, port_id, host): if host: result = (session.query(models.PortBindingLevel). filter_by(port_id=port_id, host=host). order_by(models.PortBindingLevel.level). all()) LOG.debug("For port %(port_id)s, host %(host)s, " "got binding levels %(levels)s", {'port_id': port_id, 'host': host, 'levels': result}) return result def clear_binding_levels(session, port_id, host): if host: (session.query(models.PortBindingLevel). filter_by(port_id=port_id, host=host). delete()) LOG.debug("For port %(port_id)s, host %(host)s, " "cleared binding levels", {'port_id': port_id, 'host': host}) def ensure_dvr_port_binding(session, port_id, host, router_id=None): record = (session.query(models.DVRPortBinding). filter_by(port_id=port_id, host=host).first()) if record: return record try: with session.begin(subtransactions=True): record = models.DVRPortBinding( port_id=port_id, host=host, router_id=router_id, vif_type=portbindings.VIF_TYPE_UNBOUND, vnic_type=portbindings.VNIC_NORMAL, status=n_const.PORT_STATUS_DOWN) session.add(record) return record except db_exc.DBDuplicateEntry: LOG.debug("DVR Port %s already bound", port_id) return (session.query(models.DVRPortBinding). filter_by(port_id=port_id, host=host).one()) def delete_dvr_port_binding_if_stale(session, binding): if not binding.router_id and binding.status == n_const.PORT_STATUS_DOWN: with session.begin(subtransactions=True): LOG.debug("DVR: Deleting binding %s", binding) session.delete(binding) def get_port(session, port_id): """Get port record for update within transaction.""" with session.begin(subtransactions=True): try: record = (session.query(models_v2.Port). enable_eagerloads(False). filter(models_v2.Port.id.startswith(port_id)). one()) return record except exc.NoResultFound: return except exc.MultipleResultsFound: LOG.error(_LE("Multiple ports have port_id starting with %s"), port_id) return def get_port_from_device_mac(context, device_mac): LOG.debug("get_port_from_device_mac() called for mac %s", device_mac) qry = context.session.query(models_v2.Port).filter_by( mac_address=device_mac) return qry.first() def get_ports_and_sgs(context, port_ids): """Get ports from database with security group info.""" # break large queries into smaller parts if len(port_ids) > MAX_PORTS_PER_QUERY: LOG.debug("Number of ports %(pcount)s exceeds the maximum per " "query %(maxp)s. Partitioning queries.", {'pcount': len(port_ids), 'maxp': MAX_PORTS_PER_QUERY}) return (get_ports_and_sgs(context, port_ids[:MAX_PORTS_PER_QUERY]) + get_ports_and_sgs(context, port_ids[MAX_PORTS_PER_QUERY:])) LOG.debug("get_ports_and_sgs() called for port_ids %s", port_ids) if not port_ids: # if port_ids is empty, avoid querying to DB to ask it for nothing return [] ports_to_sg_ids = get_sg_ids_grouped_by_port(context, port_ids) return [make_port_dict_with_security_groups(port, sec_groups) for port, sec_groups in six.iteritems(ports_to_sg_ids)] def get_sg_ids_grouped_by_port(context, port_ids): sg_ids_grouped_by_port = {} sg_binding_port = sg_db.SecurityGroupPortBinding.port_id with context.session.begin(subtransactions=True): # partial UUIDs must be individually matched with startswith. # full UUIDs may be matched directly in an IN statement partial_uuids = set(port_id for port_id in port_ids if not uuidutils.is_uuid_like(port_id)) full_uuids = set(port_ids) - partial_uuids or_criteria = [models_v2.Port.id.startswith(port_id) for port_id in partial_uuids] if full_uuids: or_criteria.append(models_v2.Port.id.in_(full_uuids)) query = context.session.query( models_v2.Port, sg_db.SecurityGroupPortBinding.security_group_id) query = query.outerjoin(sg_db.SecurityGroupPortBinding, models_v2.Port.id == sg_binding_port) query = query.filter(or_(*or_criteria)) for port, sg_id in query: if port not in sg_ids_grouped_by_port: sg_ids_grouped_by_port[port] = [] if sg_id: sg_ids_grouped_by_port[port].append(sg_id) return sg_ids_grouped_by_port def make_port_dict_with_security_groups(port, sec_groups): plugin = manager.NeutronManager.get_plugin() port_dict = plugin._make_port_dict(port) port_dict['security_groups'] = sec_groups port_dict['security_group_rules'] = [] port_dict['security_group_source_groups'] = [] port_dict['fixed_ips'] = [ip['ip_address'] for ip in port['fixed_ips']] return port_dict def get_port_binding_host(session, port_id): try: with session.begin(subtransactions=True): query = (session.query(models.PortBinding). filter(models.PortBinding.port_id.startswith(port_id)). one()) except exc.NoResultFound: LOG.debug("No binding found for port %(port_id)s", {'port_id': port_id}) return except exc.MultipleResultsFound: LOG.error(_LE("Multiple ports have port_id starting with %s"), port_id) return return query.host def generate_dvr_port_status(session, port_id): # an OR'ed value of status assigned to parent port from the # dvrportbinding bucket query = session.query(models.DVRPortBinding) final_status = n_const.PORT_STATUS_BUILD for bind in query.filter(models.DVRPortBinding.port_id == port_id): if bind.status == n_const.PORT_STATUS_ACTIVE: return bind.status elif bind.status == n_const.PORT_STATUS_DOWN: final_status = bind.status return final_status def get_dvr_port_binding_by_host(session, port_id, host): with session.begin(subtransactions=True): binding = (session.query(models.DVRPortBinding). filter(models.DVRPortBinding.port_id.startswith(port_id), models.DVRPortBinding.host == host).first()) if not binding: LOG.debug("No binding for DVR port %(port_id)s with host " "%(host)s", {'port_id': port_id, 'host': host}) return binding def get_dvr_port_bindings(session, port_id): with session.begin(subtransactions=True): bindings = (session.query(models.DVRPortBinding). filter(models.DVRPortBinding.port_id.startswith(port_id)). all()) if not bindings: LOG.debug("No bindings for DVR port %s", port_id) return bindings neutron-8.4.0/neutron/plugins/ml2/driver_api.py0000664000567000056710000011632213044372760022734 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six # The following keys are used in the segment dictionaries passed via # the driver API. These are defined separately from similar keys in # neutron.extensions.providernet so that drivers don't need to change # if/when providernet moves to the core API. # ID = 'id' NETWORK_TYPE = 'network_type' PHYSICAL_NETWORK = 'physical_network' SEGMENTATION_ID = 'segmentation_id' MTU = 'mtu' # The following keys are used in the binding level dictionaries # available via the binding_levels and original_binding_levels # PortContext properties. BOUND_DRIVER = 'bound_driver' BOUND_SEGMENT = 'bound_segment' @six.add_metaclass(abc.ABCMeta) class TypeDriver(object): """Define stable abstract interface for ML2 type drivers. ML2 type drivers each support a specific network_type for provider and/or tenant network segments. Type drivers must implement this abstract interface, which defines the API by which the plugin uses the driver to manage the persistent type-specific resource allocation state associated with network segments of that type. Network segments are represented by segment dictionaries using the NETWORK_TYPE, PHYSICAL_NETWORK, and SEGMENTATION_ID keys defined above, corresponding to the provider attributes. Future revisions of the TypeDriver API may add additional segment dictionary keys. Attributes not applicable for a particular network_type may either be excluded or stored as None. """ @abc.abstractmethod def get_type(self): """Get driver's network type. :returns network_type value handled by this driver """ pass @abc.abstractmethod def initialize(self): """Perform driver initialization. Called after all drivers have been loaded and the database has been initialized. No abstract methods defined below will be called prior to this method being called. """ pass @abc.abstractmethod def is_partial_segment(self, segment): """Return True if segment is a partially specified segment. :param segment: segment dictionary :returns: boolean """ @abc.abstractmethod def validate_provider_segment(self, segment): """Validate attributes of a provider network segment. :param segment: segment dictionary using keys defined above :raises: neutron.common.exceptions.InvalidInput if invalid Called outside transaction context to validate the provider attributes for a provider network segment. Raise InvalidInput if: - any required attribute is missing - any prohibited or unrecognized attribute is present - any attribute value is not valid The network_type attribute is present in segment, but need not be validated. """ pass @abc.abstractmethod def reserve_provider_segment(self, session, segment): """Reserve resource associated with a provider network segment. :param session: database session :param segment: segment dictionary :returns: segment dictionary Called inside transaction context on session to reserve the type-specific resource for a provider network segment. The segment dictionary passed in was returned by a previous validate_provider_segment() call. """ pass @abc.abstractmethod def allocate_tenant_segment(self, session): """Allocate resource for a new tenant network segment. :param session: database session :returns: segment dictionary using keys defined above Called inside transaction context on session to allocate a new tenant network, typically from a type-specific resource pool. If successful, return a segment dictionary describing the segment. If tenant network segment cannot be allocated (i.e. tenant networks not supported or resource pool is exhausted), return None. """ pass @abc.abstractmethod def release_segment(self, session, segment): """Release network segment. :param session: database session :param segment: segment dictionary using keys defined above Called inside transaction context on session to release a tenant or provider network's type-specific resource. Runtime errors are not expected, but raising an exception will result in rollback of the transaction. """ pass @abc.abstractmethod def get_mtu(self, physical): """Get driver's network MTU. :returns mtu: maximum transmission unit Returns the mtu for the network based on the config values and the network type. """ pass @six.add_metaclass(abc.ABCMeta) class NetworkContext(object): """Context passed to MechanismDrivers for changes to network resources. A NetworkContext instance wraps a network resource. It provides helper methods for accessing other relevant information. Results from expensive operations are cached so that other MechanismDrivers can freely access the same information. """ @abc.abstractproperty def current(self): """Return the network in its current configuration. Return the network, as defined by NeutronPluginBaseV2. create_network and all extensions in the ml2 plugin, with all its properties 'current' at the time the context was established. """ pass @abc.abstractproperty def original(self): """Return the network in its original configuration. Return the network, with all its properties set to their original values prior to a call to update_network. Method is only valid within calls to update_network_precommit and update_network_postcommit. """ pass @abc.abstractproperty def network_segments(self): """Return the segments associated with this network resource.""" pass @six.add_metaclass(abc.ABCMeta) class SubnetContext(object): """Context passed to MechanismDrivers for changes to subnet resources. A SubnetContext instance wraps a subnet resource. It provides helper methods for accessing other relevant information. Results from expensive operations are cached so that other MechanismDrivers can freely access the same information. """ @abc.abstractproperty def current(self): """Return the subnet in its current configuration. Return the subnet, as defined by NeutronPluginBaseV2. create_subnet and all extensions in the ml2 plugin, with all its properties 'current' at the time the context was established. """ pass @abc.abstractproperty def original(self): """Return the subnet in its original configuration. Return the subnet, with all its properties set to their original values prior to a call to update_subnet. Method is only valid within calls to update_subnet_precommit and update_subnet_postcommit. """ pass @six.add_metaclass(abc.ABCMeta) class PortContext(object): """Context passed to MechanismDrivers for changes to port resources. A PortContext instance wraps a port resource. It provides helper methods for accessing other relevant information. Results from expensive operations are cached so that other MechanismDrivers can freely access the same information. """ @abc.abstractproperty def current(self): """Return the port in its current configuration. Return the port, as defined by NeutronPluginBaseV2. create_port and all extensions in the ml2 plugin, with all its properties 'current' at the time the context was established. """ pass @abc.abstractproperty def original(self): """Return the port in its original configuration. Return the port, with all its properties set to their original values prior to a call to update_port. Method is only valid within calls to update_port_precommit and update_port_postcommit. """ pass @abc.abstractproperty def status(self): """Return the status of the current port.""" pass @abc.abstractproperty def original_status(self): """Return the status of the original port. The method is only valid within calls to update_port_precommit and update_port_postcommit. """ pass @abc.abstractproperty def network(self): """Return the NetworkContext associated with this port.""" pass @abc.abstractproperty def binding_levels(self): """Return dictionaries describing the current binding levels. This property returns a list of dictionaries describing each binding level if the port is bound or partially bound, or None if the port is unbound. Each returned dictionary contains the name of the bound driver under the BOUND_DRIVER key, and the bound segment dictionary under the BOUND_SEGMENT key. The first entry (index 0) describes the top-level binding, which always involves one of the port's network's static segments. In the case of a hierarchical binding, subsequent entries describe the lower-level bindings in descending order, which may involve dynamic segments. Adjacent levels where different drivers bind the same static or dynamic segment are possible. The last entry (index -1) describes the bottom-level binding that supplied the port's binding:vif_type and binding:vif_details attribute values. Within calls to MechanismDriver.bind_port, descriptions of the levels above the level currently being bound are returned. """ pass @abc.abstractproperty def original_binding_levels(self): """Return dictionaries describing the original binding levels. This property returns a list of dictionaries describing each original binding level if the port was previously bound, or None if the port was unbound. The content is as described for the binding_levels property. This property is only valid within calls to update_port_precommit and update_port_postcommit. It returns None otherwise. """ pass @abc.abstractproperty def top_bound_segment(self): """Return the current top-level bound segment dictionary. This property returns the current top-level bound segment dictionary, or None if the port is unbound. For a bound port, top_bound_segment is equivalent to binding_levels[0][BOUND_SEGMENT], and returns one of the port's network's static segments. """ pass @abc.abstractproperty def original_top_bound_segment(self): """Return the original top-level bound segment dictionary. This property returns the original top-level bound segment dictionary, or None if the port was previously unbound. For a previously bound port, original_top_bound_segment is equivalent to original_binding_levels[0][BOUND_SEGMENT], and returns one of the port's network's static segments. This property is only valid within calls to update_port_precommit and update_port_postcommit. It returns None otherwise. """ pass @abc.abstractproperty def bottom_bound_segment(self): """Return the current bottom-level bound segment dictionary. This property returns the current bottom-level bound segment dictionary, or None if the port is unbound. For a bound port, bottom_bound_segment is equivalent to binding_levels[-1][BOUND_SEGMENT], and returns the segment whose binding supplied the port's binding:vif_type and binding:vif_details attribute values. """ pass @abc.abstractproperty def original_bottom_bound_segment(self): """Return the original bottom-level bound segment dictionary. This property returns the orignal bottom-level bound segment dictionary, or None if the port was previously unbound. For a previously bound port, original_bottom_bound_segment is equivalent to original_binding_levels[-1][BOUND_SEGMENT], and returns the segment whose binding supplied the port's previous binding:vif_type and binding:vif_details attribute values. This property is only valid within calls to update_port_precommit and update_port_postcommit. It returns None otherwise. """ pass @abc.abstractproperty def host(self): """Return the host with which the port is associated. In the context of a host-specific operation on a distributed port, the host property indicates the host for which the port operation is being performed. Otherwise, it is the same value as current['binding:host_id']. """ pass @abc.abstractproperty def original_host(self): """Return the original host with which the port was associated. In the context of a host-specific operation on a distributed port, the original_host property indicates the host for which the port operation is being performed. Otherwise, it is the same value as original['binding:host_id']. This property is only valid within calls to update_port_precommit and update_port_postcommit. It returns None otherwise. """ pass @abc.abstractproperty def vif_type(self): """Return the vif_type indicating the binding state of the port. In the context of a host-specific operation on a distributed port, the vif_type property indicates the binding state for the host for which the port operation is being performed. Otherwise, it is the same value as current['binding:vif_type']. """ pass @abc.abstractproperty def original_vif_type(self): """Return the original vif_type of the port. In the context of a host-specific operation on a distributed port, the original_vif_type property indicates original binding state for the host for which the port operation is being performed. Otherwise, it is the same value as original['binding:vif_type']. This property is only valid within calls to update_port_precommit and update_port_postcommit. It returns None otherwise. """ pass @abc.abstractproperty def vif_details(self): """Return the vif_details describing the binding of the port. In the context of a host-specific operation on a distributed port, the vif_details property describes the binding for the host for which the port operation is being performed. Otherwise, it is the same value as current['binding:vif_details']. """ pass @abc.abstractproperty def original_vif_details(self): """Return the original vif_details of the port. In the context of a host-specific operation on a distributed port, the original_vif_details property describes the original binding for the host for which the port operation is being performed. Otherwise, it is the same value as original['binding:vif_details']. This property is only valid within calls to update_port_precommit and update_port_postcommit. It returns None otherwise. """ pass @abc.abstractproperty def segments_to_bind(self): """Return the list of segments with which to bind the port. This property returns the list of segment dictionaries with which the mechanism driver may bind the port. When establishing a top-level binding, these will be the port's network's static segments. For each subsequent level, these will be the segments passed to continue_binding by the mechanism driver that bound the level above. This property is only valid within calls to MechanismDriver.bind_port. It returns None otherwise. """ pass @abc.abstractmethod def host_agents(self, agent_type): """Get agents of the specified type on port's host. :param agent_type: Agent type identifier :returns: List of agents_db.Agent records """ pass @abc.abstractmethod def set_binding(self, segment_id, vif_type, vif_details, status=None): """Set the bottom-level binding for the port. :param segment_id: Network segment bound for the port. :param vif_type: The VIF type for the bound port. :param vif_details: Dictionary with details for VIF driver. :param status: Port status to set if not None. This method is called by MechanismDriver.bind_port to indicate success and specify binding details to use for port. The segment_id must identify an item in the current value of the segments_to_bind property. """ pass @abc.abstractmethod def continue_binding(self, segment_id, next_segments_to_bind): """Continue binding the port with different segments. :param segment_id: Network segment partially bound for the port. :param next_segments_to_bind: Segments to continue binding with. This method is called by MechanismDriver.bind_port to indicate it was able to partially bind the port, but that one or more additional mechanism drivers are required to complete the binding. The segment_id must identify an item in the current value of the segments_to_bind property. The list of segments IDs passed as next_segments_to_bind identify dynamic (or static) segments of the port's network that will be used to populate segments_to_bind for the next lower level of a hierarchical binding. """ pass @abc.abstractmethod def allocate_dynamic_segment(self, segment): """Allocate a dynamic segment. :param segment: A partially or fully specified segment dictionary Called by the MechanismDriver.bind_port, create_port or update_port to dynamically allocate a segment for the port using the partial segment specified. The segment dictionary can be a fully or partially specified segment. At a minumim it needs the network_type populated to call on the appropriate type driver. """ pass @abc.abstractmethod def release_dynamic_segment(self, segment_id): """Release an allocated dynamic segment. :param segment_id: UUID of the dynamic network segment. Called by the MechanismDriver.delete_port or update_port to release the dynamic segment allocated for this port. """ pass @six.add_metaclass(abc.ABCMeta) class MechanismDriver(object): """Define stable abstract interface for ML2 mechanism drivers. A mechanism driver is called on the creation, update, and deletion of networks and ports. For every event, there are two methods that get called - one within the database transaction (method suffix of _precommit), one right afterwards (method suffix of _postcommit). Exceptions raised by methods called inside the transaction can rollback, but should not make any blocking calls (for example, REST requests to an outside controller). Methods called after transaction commits can make blocking external calls, though these will block the entire process. Exceptions raised in calls after the transaction commits may cause the associated resource to be deleted. Because rollback outside of the transaction is not done in the update network/port case, all data validation must be done within methods that are part of the database transaction. """ @abc.abstractmethod def initialize(self): """Perform driver initialization. Called after all drivers have been loaded and the database has been initialized. No abstract methods defined below will be called prior to this method being called. """ pass def create_network_precommit(self, context): """Allocate resources for a new network. :param context: NetworkContext instance describing the new network. Create a new network, allocating resources as necessary in the database. Called inside transaction context on session. Call cannot block. Raising an exception will result in a rollback of the current transaction. """ pass def create_network_postcommit(self, context): """Create a network. :param context: NetworkContext instance describing the new network. Called after the transaction commits. Call can block, though will block the entire process so care should be taken to not drastically affect performance. Raising an exception will cause the deletion of the resource. """ pass def update_network_precommit(self, context): """Update resources of a network. :param context: NetworkContext instance describing the new state of the network, as well as the original state prior to the update_network call. Update values of a network, updating the associated resources in the database. Called inside transaction context on session. Raising an exception will result in rollback of the transaction. update_network_precommit is called for all changes to the network state. It is up to the mechanism driver to ignore state or state changes that it does not know or care about. """ pass def update_network_postcommit(self, context): """Update a network. :param context: NetworkContext instance describing the new state of the network, as well as the original state prior to the update_network call. Called after the transaction commits. Call can block, though will block the entire process so care should be taken to not drastically affect performance. Raising an exception will cause the deletion of the resource. update_network_postcommit is called for all changes to the network state. It is up to the mechanism driver to ignore state or state changes that it does not know or care about. """ pass def delete_network_precommit(self, context): """Delete resources for a network. :param context: NetworkContext instance describing the current state of the network, prior to the call to delete it. Delete network resources previously allocated by this mechanism driver for a network. Called inside transaction context on session. Runtime errors are not expected, but raising an exception will result in rollback of the transaction. """ pass def delete_network_postcommit(self, context): """Delete a network. :param context: NetworkContext instance describing the current state of the network, prior to the call to delete it. Called after the transaction commits. Call can block, though will block the entire process so care should be taken to not drastically affect performance. Runtime errors are not expected, and will not prevent the resource from being deleted. """ pass def create_subnet_precommit(self, context): """Allocate resources for a new subnet. :param context: SubnetContext instance describing the new subnet. Create a new subnet, allocating resources as necessary in the database. Called inside transaction context on session. Call cannot block. Raising an exception will result in a rollback of the current transaction. """ pass def create_subnet_postcommit(self, context): """Create a subnet. :param context: SubnetContext instance describing the new subnet. Called after the transaction commits. Call can block, though will block the entire process so care should be taken to not drastically affect performance. Raising an exception will cause the deletion of the resource. """ pass def update_subnet_precommit(self, context): """Update resources of a subnet. :param context: SubnetContext instance describing the new state of the subnet, as well as the original state prior to the update_subnet call. Update values of a subnet, updating the associated resources in the database. Called inside transaction context on session. Raising an exception will result in rollback of the transaction. update_subnet_precommit is called for all changes to the subnet state. It is up to the mechanism driver to ignore state or state changes that it does not know or care about. """ pass def update_subnet_postcommit(self, context): """Update a subnet. :param context: SubnetContext instance describing the new state of the subnet, as well as the original state prior to the update_subnet call. Called after the transaction commits. Call can block, though will block the entire process so care should be taken to not drastically affect performance. Raising an exception will cause the deletion of the resource. update_subnet_postcommit is called for all changes to the subnet state. It is up to the mechanism driver to ignore state or state changes that it does not know or care about. """ pass def delete_subnet_precommit(self, context): """Delete resources for a subnet. :param context: SubnetContext instance describing the current state of the subnet, prior to the call to delete it. Delete subnet resources previously allocated by this mechanism driver for a subnet. Called inside transaction context on session. Runtime errors are not expected, but raising an exception will result in rollback of the transaction. """ pass def delete_subnet_postcommit(self, context): """Delete a subnet. :param context: SubnetContext instance describing the current state of the subnet, prior to the call to delete it. Called after the transaction commits. Call can block, though will block the entire process so care should be taken to not drastically affect performance. Runtime errors are not expected, and will not prevent the resource from being deleted. """ pass def create_port_precommit(self, context): """Allocate resources for a new port. :param context: PortContext instance describing the port. Create a new port, allocating resources as necessary in the database. Called inside transaction context on session. Call cannot block. Raising an exception will result in a rollback of the current transaction. """ pass def create_port_postcommit(self, context): """Create a port. :param context: PortContext instance describing the port. Called after the transaction completes. Call can block, though will block the entire process so care should be taken to not drastically affect performance. Raising an exception will result in the deletion of the resource. """ pass def update_port_precommit(self, context): """Update resources of a port. :param context: PortContext instance describing the new state of the port, as well as the original state prior to the update_port call. Called inside transaction context on session to complete a port update as defined by this mechanism driver. Raising an exception will result in rollback of the transaction. update_port_precommit is called for all changes to the port state. It is up to the mechanism driver to ignore state or state changes that it does not know or care about. """ pass def update_port_postcommit(self, context): """Update a port. :param context: PortContext instance describing the new state of the port, as well as the original state prior to the update_port call. Called after the transaction completes. Call can block, though will block the entire process so care should be taken to not drastically affect performance. Raising an exception will result in the deletion of the resource. update_port_postcommit is called for all changes to the port state. It is up to the mechanism driver to ignore state or state changes that it does not know or care about. """ pass def delete_port_precommit(self, context): """Delete resources of a port. :param context: PortContext instance describing the current state of the port, prior to the call to delete it. Called inside transaction context on session. Runtime errors are not expected, but raising an exception will result in rollback of the transaction. """ pass def delete_port_postcommit(self, context): """Delete a port. :param context: PortContext instance describing the current state of the port, prior to the call to delete it. Called after the transaction completes. Call can block, though will block the entire process so care should be taken to not drastically affect performance. Runtime errors are not expected, and will not prevent the resource from being deleted. """ pass def bind_port(self, context): """Attempt to bind a port. :param context: PortContext instance describing the port This method is called outside any transaction to attempt to establish a port binding using this mechanism driver. Bindings may be created at each of multiple levels of a hierarchical network, and are established from the top level downward. At each level, the mechanism driver determines whether it can bind to any of the network segments in the context.segments_to_bind property, based on the value of the context.host property, any relevant port or network attributes, and its own knowledge of the network topology. At the top level, context.segments_to_bind contains the static segments of the port's network. At each lower level of binding, it contains static or dynamic segments supplied by the driver that bound at the level above. If the driver is able to complete the binding of the port to any segment in context.segments_to_bind, it must call context.set_binding with the binding details. If it can partially bind the port, it must call context.continue_binding with the network segments to be used to bind at the next lower level. If the binding results are committed after bind_port returns, they will be seen by all mechanism drivers as update_port_precommit and update_port_postcommit calls. But if some other thread or process concurrently binds or updates the port, these binding results will not be committed, and update_port_precommit and update_port_postcommit will not be called on the mechanism drivers with these results. Because binding results can be discarded rather than committed, drivers should avoid making persistent state changes in bind_port, or else must ensure that such state changes are eventually cleaned up. Implementing this method explicitly declares the mechanism driver as having the intention to bind ports. This is inspected by the QoS service to identify the available QoS rules you can use with ports. """ pass @property def _supports_port_binding(self): return self.__class__.bind_port != MechanismDriver.bind_port def check_vlan_transparency(self, context): """Check if the network supports vlan transparency. :param context: NetworkContext instance describing the network. Check if the network supports vlan transparency or not. """ pass def get_workers(self): """Get any NeutronWorker instances that should have their own process Any driver that needs to run processes separate from the API or RPC workers, can return a sequence of NeutronWorker instances. """ return () @six.add_metaclass(abc.ABCMeta) class ExtensionDriver(object): """Define stable abstract interface for ML2 extension drivers. An extension driver extends the core resources implemented by the ML2 plugin with additional attributes. Methods that process create and update operations for these resources validate and persist values for extended attributes supplied through the API. Other methods extend the resource dictionaries returned from the API operations with the values of the extended attributes. """ @abc.abstractmethod def initialize(self): """Perform driver initialization. Called after all drivers have been loaded and the database has been initialized. No abstract methods defined below will be called prior to this method being called. """ pass @property def extension_alias(self): """Supported extension alias. Return the alias identifying the core API extension supported by this driver. Do not declare if API extension handling will be left to a service plugin, and we just need to provide core resource extension and updates. """ pass def process_create_network(self, plugin_context, data, result): """Process extended attributes for create network. :param plugin_context: plugin request context :param data: dictionary of incoming network data :param result: network dictionary to extend Called inside transaction context on plugin_context.session to validate and persist any extended network attributes defined by this driver. Extended attribute values must also be added to result. """ pass def process_create_subnet(self, plugin_context, data, result): """Process extended attributes for create subnet. :param plugin_context: plugin request context :param data: dictionary of incoming subnet data :param result: subnet dictionary to extend Called inside transaction context on plugin_context.session to validate and persist any extended subnet attributes defined by this driver. Extended attribute values must also be added to result. """ pass def process_create_port(self, plugin_context, data, result): """Process extended attributes for create port. :param plugin_context: plugin request context :param data: dictionary of incoming port data :param result: port dictionary to extend Called inside transaction context on plugin_context.session to validate and persist any extended port attributes defined by this driver. Extended attribute values must also be added to result. """ pass def process_update_network(self, plugin_context, data, result): """Process extended attributes for update network. :param plugin_context: plugin request context :param data: dictionary of incoming network data :param result: network dictionary to extend Called inside transaction context on plugin_context.session to validate and update any extended network attributes defined by this driver. Extended attribute values, whether updated or not, must also be added to result. """ pass def process_update_subnet(self, plugin_context, data, result): """Process extended attributes for update subnet. :param plugin_context: plugin request context :param data: dictionary of incoming subnet data :param result: subnet dictionary to extend Called inside transaction context on plugin_context.session to validate and update any extended subnet attributes defined by this driver. Extended attribute values, whether updated or not, must also be added to result. """ pass def process_update_port(self, plugin_context, data, result): """Process extended attributes for update port. :param plugin_context: plugin request context :param data: dictionary of incoming port data :param result: port dictionary to extend Called inside transaction context on plugin_context.session to validate and update any extended port attributes defined by this driver. Extended attribute values, whether updated or not, must also be added to result. """ pass def extend_network_dict(self, session, base_model, result): """Add extended attributes to network dictionary. :param session: database session :param base_model: network model data :param result: network dictionary to extend Called inside transaction context on session to add any extended attributes defined by this driver to a network dictionary to be used for mechanism driver calls and/or returned as the result of a network operation. """ pass def extend_subnet_dict(self, session, base_model, result): """Add extended attributes to subnet dictionary. :param session: database session :param base_model: subnet model data :param result: subnet dictionary to extend Called inside transaction context on session to add any extended attributes defined by this driver to a subnet dictionary to be used for mechanism driver calls and/or returned as the result of a subnet operation. """ pass def extend_port_dict(self, session, base_model, result): """Add extended attributes to port dictionary. :param session: database session :param base_model: port model data :param result: port dictionary to extend Called inside transaction context on session to add any extended attributes defined by this driver to a port dictionary to be used for mechanism driver calls and/or returned as the result of a port operation. """ pass neutron-8.4.0/neutron/scheduler/0000775000567000056710000000000013044373210020023 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/scheduler/l3_agent_scheduler.py0000664000567000056710000004657413044372760024160 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import functools import itertools import random from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging import six from sqlalchemy import sql from neutron._i18n import _LE, _LW from neutron.common import constants from neutron.common import utils from neutron.db import api as db_api from neutron.db import l3_agentschedulers_db from neutron.db import l3_db from neutron.db import l3_hamode_db from neutron.extensions import availability_zone as az_ext from neutron.extensions import l3 LOG = logging.getLogger(__name__) cfg.CONF.register_opts(l3_hamode_db.L3_HA_OPTS) @six.add_metaclass(abc.ABCMeta) class L3Scheduler(object): def __init__(self): self.min_ha_agents = cfg.CONF.min_l3_agents_per_router self.max_ha_agents = cfg.CONF.max_l3_agents_per_router @abc.abstractmethod def schedule(self, plugin, context, router_id, candidates=None, hints=None): """Schedule the router to an active L3 agent. Schedule the router only if it is not already scheduled. """ pass def _router_has_binding(self, context, router_id, l3_agent_id): router_binding_model = l3_agentschedulers_db.RouterL3AgentBinding query = context.session.query(router_binding_model) query = query.filter(router_binding_model.router_id == router_id, router_binding_model.l3_agent_id == l3_agent_id) return query.count() > 0 def _filter_unscheduled_routers(self, context, plugin, routers): """Filter from list of routers the ones that are not scheduled.""" unscheduled_routers = [] for router in routers: l3_agents = plugin.get_l3_agents_hosting_routers( context, [router['id']]) if l3_agents: LOG.debug('Router %(router_id)s has already been ' 'hosted by L3 agent %(agent_id)s', {'router_id': router['id'], 'agent_id': l3_agents[0]['id']}) else: unscheduled_routers.append(router) return unscheduled_routers def _get_unscheduled_routers(self, context, plugin): """Get routers with no agent binding.""" # TODO(gongysh) consider the disabled agent's router no_agent_binding = ~sql.exists().where( l3_db.Router.id == l3_agentschedulers_db.RouterL3AgentBinding.router_id) query = context.session.query(l3_db.Router.id).filter(no_agent_binding) query = query.filter(l3_db.Router.status == constants.ROUTER_STATUS_ACTIVE) unscheduled_router_ids = [router_id_[0] for router_id_ in query] if unscheduled_router_ids: return plugin.get_routers( context, filters={'id': unscheduled_router_ids}) return [] def _get_routers_to_schedule(self, context, plugin, router_ids=None): """Verify that the routers specified need to be scheduled. :param context: the context :param plugin: the core plugin :param router_ids: the list of routers to be checked for scheduling :returns: the list of routers to be scheduled """ if router_ids is not None: filters = {'id': router_ids, 'status': [constants.ROUTER_STATUS_ACTIVE]} routers = plugin.get_routers(context, filters=filters) return self._filter_unscheduled_routers(context, plugin, routers) else: return self._get_unscheduled_routers(context, plugin) def _get_routers_can_schedule(self, context, plugin, routers, l3_agent): """Get the subset of routers that can be scheduled on the L3 agent.""" ids_to_discard = set() for router in routers: # check if the l3 agent is compatible with the router candidates = plugin.get_l3_agent_candidates( context, router, [l3_agent]) if not candidates: ids_to_discard.add(router['id']) return [r for r in routers if r['id'] not in ids_to_discard] def auto_schedule_routers(self, plugin, context, host, router_ids): """Schedule non-hosted routers to L3 Agent running on host. If router_ids is given, each router in router_ids is scheduled if it is not scheduled yet. Otherwise all unscheduled routers are scheduled. Do not schedule the routers which are hosted already by active l3 agents. :returns: True if routers have been successfully assigned to host """ l3_agent = plugin.get_enabled_agent_on_host( context, constants.AGENT_TYPE_L3, host) if not l3_agent: return False unscheduled_routers = self._get_routers_to_schedule( context, plugin, router_ids) if not unscheduled_routers: if utils.is_extension_supported( plugin, constants.L3_HA_MODE_EXT_ALIAS): return self._schedule_ha_routers_to_additional_agent( plugin, context, l3_agent) target_routers = self._get_routers_can_schedule( context, plugin, unscheduled_routers, l3_agent) if not target_routers: LOG.warning(_LW('No routers compatible with L3 agent ' 'configuration on host %s'), host) return False self._bind_routers(context, plugin, target_routers, l3_agent) return True def _get_candidates(self, plugin, context, sync_router): """Return L3 agents where a router could be scheduled.""" with context.session.begin(subtransactions=True): # allow one router is hosted by just # one enabled l3 agent hosting since active is just a # timing problem. Non-active l3 agent can return to # active any time current_l3_agents = plugin.get_l3_agents_hosting_routers( context, [sync_router['id']], admin_state_up=True) if current_l3_agents: LOG.debug('Router %(router_id)s has already been hosted ' 'by L3 agent %(agent_id)s', {'router_id': sync_router['id'], 'agent_id': current_l3_agents[0]['id']}) return [] active_l3_agents = plugin.get_l3_agents(context, active=True) if not active_l3_agents: LOG.warning(_LW('No active L3 agents')) return [] candidates = plugin.get_l3_agent_candidates(context, sync_router, active_l3_agents) if not candidates: LOG.warning(_LW('No L3 agents can host the router %s'), sync_router['id']) return candidates def _bind_routers(self, context, plugin, routers, l3_agent): for router in routers: if router.get('ha'): if not self._router_has_binding(context, router['id'], l3_agent.id): self.create_ha_port_and_bind( plugin, context, router['id'], router['tenant_id'], l3_agent) else: self.bind_router(context, router['id'], l3_agent) def bind_router(self, context, router_id, chosen_agent): """Bind the router to the l3 agent which has been chosen.""" try: with context.session.begin(subtransactions=True): binding = l3_agentschedulers_db.RouterL3AgentBinding() binding.l3_agent = chosen_agent binding.router_id = router_id context.session.add(binding) except db_exc.DBDuplicateEntry: LOG.debug('Router %(router_id)s has already been scheduled ' 'to L3 agent %(agent_id)s.', {'agent_id': chosen_agent.id, 'router_id': router_id}) return except db_exc.DBReferenceError: LOG.debug('Router %s has already been removed ' 'by concurrent operation', router_id) return LOG.debug('Router %(router_id)s is scheduled to L3 agent ' '%(agent_id)s', {'router_id': router_id, 'agent_id': chosen_agent.id}) def _schedule_router(self, plugin, context, router_id, candidates=None): sync_router = plugin.get_router(context, router_id) candidates = candidates or self._get_candidates( plugin, context, sync_router) if not candidates: return elif sync_router.get('ha', False): chosen_agents = self._bind_ha_router(plugin, context, router_id, candidates) if not chosen_agents: return chosen_agent = chosen_agents[-1] else: chosen_agent = self._choose_router_agent( plugin, context, candidates) self.bind_router(context, router_id, chosen_agent) return chosen_agent @abc.abstractmethod def _choose_router_agent(self, plugin, context, candidates): """Choose an agent from candidates based on a specific policy.""" pass @abc.abstractmethod def _choose_router_agents_for_ha(self, plugin, context, candidates): """Choose agents from candidates based on a specific policy.""" pass def _get_num_of_agents_for_ha(self, candidates_count): return (min(self.max_ha_agents, candidates_count) if self.max_ha_agents else candidates_count) def _enough_candidates_for_ha(self, candidates): if not candidates or len(candidates) < self.min_ha_agents: LOG.error(_LE("Not enough candidates, a HA router needs at least " "%s agents"), self.min_ha_agents) return False return True def _add_port_from_net(self, plugin, ctxt, router_id, tenant_id, ha_net): """small wrapper function to unpack network id from ha_network""" return plugin.add_ha_port(ctxt, router_id, ha_net.network.id, tenant_id) def create_ha_port_and_bind(self, plugin, context, router_id, tenant_id, agent): """Creates and binds a new HA port for this agent.""" ctxt = context.elevated() creator = functools.partial(self._add_port_from_net, plugin, ctxt, router_id, tenant_id) dep_getter = functools.partial(plugin.get_ha_network, ctxt, tenant_id) dep_creator = functools.partial(plugin._create_ha_network, ctxt, tenant_id) dep_id_attr = 'network_id' try: port_binding = utils.create_object_with_dependency( creator, dep_getter, dep_creator, dep_id_attr)[0] with db_api.autonested_transaction(context.session): port_binding.l3_agent_id = agent['id'] except db_exc.DBDuplicateEntry: LOG.debug("Router %(router)s already scheduled for agent " "%(agent)s", {'router': router_id, 'agent': agent['id']}) except l3.RouterNotFound: LOG.debug('Router %s has already been removed ' 'by concurrent operation', router_id) return self.bind_router(context, router_id, agent) def get_ha_routers_l3_agents_counts(self, context, plugin, filters=None): """Return a mapping (router, # agents) matching specified filters.""" return plugin.get_ha_routers_l3_agents_count(context) def _schedule_ha_routers_to_additional_agent(self, plugin, context, agent): """Bind already scheduled routers to the agent. Retrieve the number of agents per router and check if the router has to be scheduled on the given agent if max_l3_agents_per_router is not yet reached. """ routers_agents = self.get_ha_routers_l3_agents_counts(context, plugin, agent) scheduled = False admin_ctx = context.elevated() underscheduled_routers = [router for router, agents in routers_agents if (not self.max_ha_agents or agents < self.max_ha_agents)] schedulable_routers = self._get_routers_can_schedule( admin_ctx, plugin, underscheduled_routers, agent) for router in schedulable_routers: if not self._router_has_binding(admin_ctx, router['id'], agent.id): self.create_ha_port_and_bind(plugin, admin_ctx, router['id'], router['tenant_id'], agent) scheduled = True return scheduled def _bind_ha_router_to_agents(self, plugin, context, router_id, chosen_agents): port_bindings = plugin.get_ha_router_port_bindings(context, [router_id]) for port_binding, agent in zip(port_bindings, chosen_agents): try: with db_api.autonested_transaction(context.session): port_binding.l3_agent_id = agent.id self.bind_router(context, router_id, agent) except db_exc.DBDuplicateEntry: LOG.debug("Router %(router)s already scheduled for agent " "%(agent)s", {'router': router_id, 'agent': agent.id}) else: LOG.debug('HA Router %(router_id)s is scheduled to L3 agent ' '%(agent_id)s)', {'router_id': router_id, 'agent_id': agent.id}) def _bind_ha_router(self, plugin, context, router_id, candidates): """Bind a HA router to agents based on a specific policy.""" if not self._enough_candidates_for_ha(candidates): return chosen_agents = self._choose_router_agents_for_ha( plugin, context, candidates) self._bind_ha_router_to_agents(plugin, context, router_id, chosen_agents) return chosen_agents class ChanceScheduler(L3Scheduler): """Randomly allocate an L3 agent for a router.""" def schedule(self, plugin, context, router_id, candidates=None): return self._schedule_router( plugin, context, router_id, candidates=candidates) def _choose_router_agent(self, plugin, context, candidates): return random.choice(candidates) def _choose_router_agents_for_ha(self, plugin, context, candidates): num_agents = self._get_num_of_agents_for_ha(len(candidates)) return random.sample(candidates, num_agents) class LeastRoutersScheduler(L3Scheduler): """Allocate to an L3 agent with the least number of routers bound.""" def schedule(self, plugin, context, router_id, candidates=None): return self._schedule_router( plugin, context, router_id, candidates=candidates) def _choose_router_agent(self, plugin, context, candidates): candidate_ids = [candidate['id'] for candidate in candidates] chosen_agent = plugin.get_l3_agent_with_min_routers( context, candidate_ids) return chosen_agent def _choose_router_agents_for_ha(self, plugin, context, candidates): num_agents = self._get_num_of_agents_for_ha(len(candidates)) ordered_agents = plugin.get_l3_agents_ordered_by_num_routers( context, [candidate['id'] for candidate in candidates]) return ordered_agents[:num_agents] class AZLeastRoutersScheduler(LeastRoutersScheduler): """Availability zone aware scheduler. If a router is ha router, allocate L3 agents distributed AZs according to router's az_hints. """ def _get_az_hints(self, router): return (router.get(az_ext.AZ_HINTS) or cfg.CONF.default_availability_zones) def _get_routers_can_schedule(self, context, plugin, routers, l3_agent): """Overwrite L3Scheduler's method to filter by availability zone.""" target_routers = [] for r in routers: az_hints = self._get_az_hints(r) if not az_hints or l3_agent['availability_zone'] in az_hints: target_routers.append(r) if not target_routers: return [] return super(AZLeastRoutersScheduler, self)._get_routers_can_schedule( context, plugin, target_routers, l3_agent) def _get_candidates(self, plugin, context, sync_router): """Overwrite L3Scheduler's method to filter by availability zone.""" all_candidates = ( super(AZLeastRoutersScheduler, self)._get_candidates( plugin, context, sync_router)) candidates = [] az_hints = self._get_az_hints(sync_router) for agent in all_candidates: if not az_hints or agent['availability_zone'] in az_hints: candidates.append(agent) return candidates def get_ha_routers_l3_agents_counts(self, context, plugin, filters=None): """Overwrite L3Scheduler's method to filter by availability zone.""" all_routers_agents = ( super(AZLeastRoutersScheduler, self). get_ha_routers_l3_agents_counts(context, plugin, filters)) if filters is None: return all_routers_agents routers_agents = [] for router, agents in all_routers_agents: az_hints = self._get_az_hints(router) if az_hints and filters['availability_zone'] not in az_hints: continue routers_agents.append((router, agents)) return routers_agents def _choose_router_agents_for_ha(self, plugin, context, candidates): ordered_agents = plugin.get_l3_agents_ordered_by_num_routers( context, [candidate['id'] for candidate in candidates]) num_agents = self._get_num_of_agents_for_ha(len(ordered_agents)) # Order is kept in each az group_by_az = collections.defaultdict(list) for agent in ordered_agents: az = agent['availability_zone'] group_by_az[az].append(agent) selected_agents = [] for az, agents in itertools.cycle(group_by_az.items()): if not agents: continue selected_agents.append(agents.pop(0)) if len(selected_agents) >= num_agents: break return selected_agents neutron-8.4.0/neutron/scheduler/base_scheduler.py0000664000567000056710000000544713044372760023370 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from operator import attrgetter import random class BaseScheduler(object): """The base scheduler (agnostic to resource type). Child classes of BaseScheduler must define the self.resource_filter to filter agents of particular type. """ resource_filter = None @abc.abstractmethod def select(self, plugin, context, resource_hostable_agents, resource_hosted_agents, num_agents_needed): """Return a subset of agents based on the specific scheduling logic.""" def schedule(self, plugin, context, resource): """Select and bind agents to a given resource.""" if not self.resource_filter: return # filter the agents that can host the resource filtered_agents_dict = self.resource_filter.filter_agents( plugin, context, resource) num_agents = filtered_agents_dict['n_agents'] hostable_agents = filtered_agents_dict['hostable_agents'] hosted_agents = filtered_agents_dict['hosted_agents'] chosen_agents = self.select(plugin, context, hostable_agents, hosted_agents, num_agents) # bind the resource to the agents self.resource_filter.bind(context, chosen_agents, resource['id']) return chosen_agents class BaseChanceScheduler(BaseScheduler): """Choose agents randomly.""" def __init__(self, resource_filter): self.resource_filter = resource_filter def select(self, plugin, context, resource_hostable_agents, resource_hosted_agents, num_agents_needed): chosen_agents = random.sample(resource_hostable_agents, num_agents_needed) return chosen_agents class BaseWeightScheduler(BaseScheduler): """Choose agents based on load.""" def __init__(self, resource_filter): self.resource_filter = resource_filter def select(self, plugin, context, resource_hostable_agents, resource_hosted_agents, num_agents_needed): chosen_agents = sorted(resource_hostable_agents, key=attrgetter('load'))[0:num_agents_needed] return chosen_agents neutron-8.4.0/neutron/scheduler/__init__.py0000664000567000056710000000000013044372736022136 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/scheduler/base_resource_filter.py0000664000567000056710000000345213044372760024600 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc class BaseResourceFilter(object): """Encapsulate logic that is specific to the resource type.""" @abc.abstractmethod def filter_agents(self, plugin, context, resource): """Return the agents that can host the resource.""" def bind(self, context, agents, resource_id): """Bind the resource to the agents.""" with context.session.begin(subtransactions=True): res = {} for agent in agents: # Load is being incremented here to reflect latest agent load # even within the agent report interval. This will be very # much necessary when bulk resource creation happens within a # agent report interval time. # NOTE: The resource being bound might or might not be of the # same type which is accounted for the load. It isn't a # problem because "+ 1" here does not meant to predict # precisely what the load of the agent will be. The value will # be corrected by the agent on the next report interval. res['load'] = agent.load + 1 agent.update(res) neutron-8.4.0/neutron/scheduler/dhcp_agent_scheduler.py0000664000567000056710000002505613044372760024550 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import heapq from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging from sqlalchemy import sql from neutron._i18n import _LI, _LW from neutron.common import constants from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.extensions import availability_zone as az_ext from neutron.scheduler import base_resource_filter from neutron.scheduler import base_scheduler LOG = logging.getLogger(__name__) class AutoScheduler(object): def auto_schedule_networks(self, plugin, context, host): """Schedule non-hosted networks to the DHCP agent on the specified host. """ agents_per_network = cfg.CONF.dhcp_agents_per_network # a list of (agent, net_ids) tuples bindings_to_add = [] with context.session.begin(subtransactions=True): fields = ['network_id', 'enable_dhcp'] subnets = plugin.get_subnets(context, fields=fields) net_ids = set(s['network_id'] for s in subnets if s['enable_dhcp']) if not net_ids: LOG.debug('No non-hosted networks') return False query = context.session.query(agents_db.Agent) query = query.filter(agents_db.Agent.agent_type == constants.AGENT_TYPE_DHCP, agents_db.Agent.host == host, agents_db.Agent.admin_state_up == sql.true()) dhcp_agents = query.all() for dhcp_agent in dhcp_agents: if agents_db.AgentDbMixin.is_agent_down( dhcp_agent.heartbeat_timestamp): LOG.warning(_LW('DHCP agent %s is not active'), dhcp_agent.id) continue for net_id in net_ids: agents = plugin.get_dhcp_agents_hosting_networks( context, [net_id]) if len(agents) >= agents_per_network: continue if any(dhcp_agent.id == agent.id for agent in agents): continue net = plugin.get_network(context, net_id) az_hints = (net.get(az_ext.AZ_HINTS) or cfg.CONF.default_availability_zones) if (az_hints and dhcp_agent['availability_zone'] not in az_hints): continue bindings_to_add.append((dhcp_agent, net_id)) # do it outside transaction so particular scheduling results don't # make other to fail for agent, net_id in bindings_to_add: self.resource_filter.bind(context, [agent], net_id) return True class ChanceScheduler(base_scheduler.BaseChanceScheduler, AutoScheduler): def __init__(self): super(ChanceScheduler, self).__init__(DhcpFilter()) class WeightScheduler(base_scheduler.BaseWeightScheduler, AutoScheduler): def __init__(self): super(WeightScheduler, self).__init__(DhcpFilter()) class AZAwareWeightScheduler(WeightScheduler): def select(self, plugin, context, resource_hostable_agents, resource_hosted_agents, num_agents_needed): """AZ aware scheduling If the network has multiple AZs, agents are scheduled as follows: - select AZ with least agents scheduled for the network (nondeterministic for AZs with same amount of agents scheduled) - choose agent in the AZ with WeightScheduler """ hostable_az_agents = collections.defaultdict(list) num_az_agents = {} for agent in resource_hostable_agents: az_agent = agent['availability_zone'] hostable_az_agents[az_agent].append(agent) if az_agent not in num_az_agents: num_az_agents[az_agent] = 0 if num_agents_needed <= 0: return [] for agent in resource_hosted_agents: az_agent = agent['availability_zone'] if az_agent in num_az_agents: num_az_agents[az_agent] += 1 num_az_q = [(value, key) for key, value in num_az_agents.items()] heapq.heapify(num_az_q) chosen_agents = [] while num_agents_needed > 0: num, select_az = heapq.heappop(num_az_q) select_agent = super(AZAwareWeightScheduler, self).select( plugin, context, hostable_az_agents[select_az], [], 1) chosen_agents.append(select_agent[0]) hostable_az_agents[select_az].remove(select_agent[0]) if hostable_az_agents[select_az]: heapq.heappush(num_az_q, (num + 1, select_az)) num_agents_needed -= 1 return chosen_agents class DhcpFilter(base_resource_filter.BaseResourceFilter): def bind(self, context, agents, network_id): """Bind the network to the agents.""" # customize the bind logic bound_agents = agents[:] for agent in agents: context.session.begin(subtransactions=True) # saving agent_id to use it after rollback to avoid # DetachedInstanceError agent_id = agent.id binding = agentschedulers_db.NetworkDhcpAgentBinding() binding.dhcp_agent_id = agent_id binding.network_id = network_id try: context.session.add(binding) # try to actually write the changes and catch integrity # DBDuplicateEntry context.session.commit() except db_exc.DBDuplicateEntry: # it's totally ok, someone just did our job! context.session.rollback() bound_agents.remove(agent) LOG.info(_LI('Agent %s already present'), agent_id) LOG.debug('Network %(network_id)s is scheduled to be ' 'hosted by DHCP agent %(agent_id)s', {'network_id': network_id, 'agent_id': agent_id}) super(DhcpFilter, self).bind(context, bound_agents, network_id) def filter_agents(self, plugin, context, network): """Return the agents that can host the network. This function returns a dictionary which has 3 keys. n_agents: The number of agents should be scheduled. If n_agents=0, all networks are already scheduled or no more agent can host the network. hostable_agents: A list of agents which can host the network. hosted_agents: A list of agents which already hosts the network. """ agents_dict = self._get_network_hostable_dhcp_agents( plugin, context, network) if not agents_dict['hostable_agents'] or agents_dict['n_agents'] <= 0: return {'n_agents': 0, 'hostable_agents': [], 'hosted_agents': agents_dict['hosted_agents']} return agents_dict def _get_dhcp_agents_hosting_network(self, plugin, context, network): """Return dhcp agents hosting the given network or None if a given network is already hosted by enough number of agents. """ agents_per_network = cfg.CONF.dhcp_agents_per_network #TODO(gongysh) don't schedule the networks with only # subnets whose enable_dhcp is false with context.session.begin(subtransactions=True): network_hosted_agents = plugin.get_dhcp_agents_hosting_networks( context, [network['id']]) if len(network_hosted_agents) >= agents_per_network: LOG.debug('Network %s is already hosted by enough agents.', network['id']) return return network_hosted_agents def _get_active_agents(self, plugin, context, az_hints): """Return a list of active dhcp agents.""" with context.session.begin(subtransactions=True): filters = {'agent_type': [constants.AGENT_TYPE_DHCP], 'admin_state_up': [True]} if az_hints: filters['availability_zone'] = az_hints active_dhcp_agents = plugin.get_agents_db( context, filters=filters) if not active_dhcp_agents: LOG.warning(_LW('No more DHCP agents')) return [] return active_dhcp_agents def _get_network_hostable_dhcp_agents(self, plugin, context, network): """Provide information on hostable DHCP agents for network. The returned value includes the number of agents that will actually host the given network, a list of DHCP agents that can host the given network, and a list of DHCP agents currently hosting the network. """ hosted_agents = self._get_dhcp_agents_hosting_network(plugin, context, network) if hosted_agents is None: return {'n_agents': 0, 'hostable_agents': [], 'hosted_agents': []} n_agents = cfg.CONF.dhcp_agents_per_network - len(hosted_agents) az_hints = (network.get(az_ext.AZ_HINTS) or cfg.CONF.default_availability_zones) active_dhcp_agents = self._get_active_agents(plugin, context, az_hints) if not active_dhcp_agents: return {'n_agents': 0, 'hostable_agents': [], 'hosted_agents': hosted_agents} hostable_dhcp_agents = [ agent for agent in set(active_dhcp_agents) if agent not in hosted_agents and plugin.is_eligible_agent( context, True, agent) ] if not hostable_dhcp_agents: return {'n_agents': 0, 'hostable_agents': [], 'hosted_agents': hosted_agents} n_agents = min(len(hostable_dhcp_agents), n_agents) return {'n_agents': n_agents, 'hostable_agents': hostable_dhcp_agents, 'hosted_agents': hosted_agents} neutron-8.4.0/neutron/wsgi.py0000664000567000056710000007061313044372760017410 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility methods for working with WSGI servers """ from __future__ import print_function import errno import socket import sys import time import eventlet.wsgi from oslo_config import cfg import oslo_i18n from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_service import service as common_service from oslo_service import sslutils from oslo_service import systemd from oslo_service import wsgi from oslo_utils import excutils import six import webob.dec import webob.exc from neutron._i18n import _, _LE, _LI from neutron.common import config from neutron.common import exceptions as exception from neutron import context from neutron.db import api from neutron import worker socket_opts = [ cfg.IntOpt('backlog', default=4096, help=_("Number of backlog requests to configure " "the socket with")), cfg.IntOpt('retry_until_window', default=30, help=_("Number of seconds to keep retrying to listen")), cfg.BoolOpt('use_ssl', default=False, help=_('Enable SSL on the API server')), ] CONF = cfg.CONF CONF.register_opts(socket_opts) wsgi.register_opts(CONF) LOG = logging.getLogger(__name__) def encode_body(body): """Encode unicode body. WebOb requires to encode unicode body used to update response body. """ if isinstance(body, six.text_type): return body.encode('utf-8') return body class WorkerService(worker.NeutronWorker): """Wraps a worker to be handled by ProcessLauncher""" def __init__(self, service, application, disable_ssl=False): self._service = service self._application = application self._disable_ssl = disable_ssl self._server = None def start(self): super(WorkerService, self).start() # When api worker is stopped it kills the eventlet wsgi server which # internally closes the wsgi server socket object. This server socket # object becomes not usable which leads to "Bad file descriptor" # errors on service restart. # Duplicate a socket object to keep a file descriptor usable. dup_sock = self._service._socket.dup() if CONF.use_ssl and not self._disable_ssl: dup_sock = sslutils.wrap(CONF, dup_sock) self._server = self._service.pool.spawn(self._service._run, self._application, dup_sock) def wait(self): if isinstance(self._server, eventlet.greenthread.GreenThread): self._server.wait() def stop(self): if isinstance(self._server, eventlet.greenthread.GreenThread): self._server.kill() self._server = None @staticmethod def reset(): config.reset_service() class Server(object): """Server class to manage multiple WSGI sockets and applications.""" def __init__(self, name, num_threads=None, disable_ssl=False): # Raise the default from 8192 to accommodate large tokens eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line self.num_threads = num_threads or CONF.wsgi_default_pool_size self.disable_ssl = disable_ssl # Pool for a greenthread in which wsgi server will be running self.pool = eventlet.GreenPool(1) self.name = name self._server = None # A value of 0 is converted to None because None is what causes the # wsgi server to wait forever. self.client_socket_timeout = CONF.client_socket_timeout or None if CONF.use_ssl and not self.disable_ssl: sslutils.is_enabled(CONF) def _get_socket(self, host, port, backlog): bind_addr = (host, port) # TODO(dims): eventlet's green dns/socket module does not actually # support IPv6 in getaddrinfo(). We need to get around this in the # future or monitor upstream for a fix try: info = socket.getaddrinfo(bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)[0] family = info[0] bind_addr = info[-1] except Exception: LOG.exception(_LE("Unable to listen on %(host)s:%(port)s"), {'host': host, 'port': port}) sys.exit(1) sock = None retry_until = time.time() + CONF.retry_until_window while not sock and time.time() < retry_until: try: sock = eventlet.listen(bind_addr, backlog=backlog, family=family) except socket.error as err: with excutils.save_and_reraise_exception() as ctxt: if err.errno == errno.EADDRINUSE: ctxt.reraise = False eventlet.sleep(0.1) if not sock: raise RuntimeError(_("Could not bind to %(host)s:%(port)s " "after trying for %(time)d seconds") % {'host': host, 'port': port, 'time': CONF.retry_until_window}) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # sockets can hang around forever without keepalive sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) # This option isn't available in the OS X version of eventlet if hasattr(socket, 'TCP_KEEPIDLE'): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, CONF.tcp_keepidle) return sock def start(self, application, port, host='0.0.0.0', workers=0): """Run a WSGI server with the given application.""" self._host = host self._port = port backlog = CONF.backlog self._socket = self._get_socket(self._host, self._port, backlog=backlog) self._launch(application, workers) def _launch(self, application, workers=0): service = WorkerService(self, application, self.disable_ssl) if workers < 1: # The API service should run in the current process. self._server = service # Dump the initial option values cfg.CONF.log_opt_values(LOG, logging.DEBUG) service.start() systemd.notify_once() else: # dispose the whole pool before os.fork, otherwise there will # be shared DB connections in child processes which may cause # DB errors. api.dispose() # The API service runs in a number of child processes. # Minimize the cost of checking for child exit by extending the # wait interval past the default of 0.01s. self._server = common_service.ProcessLauncher(cfg.CONF, wait_interval=1.0) self._server.launch_service(service, workers=workers) @property def host(self): return self._socket.getsockname()[0] if self._socket else self._host @property def port(self): return self._socket.getsockname()[1] if self._socket else self._port def stop(self): self._server.stop() def wait(self): """Wait until all servers have completed running.""" try: self._server.wait() except KeyboardInterrupt: pass def _run(self, application, socket): """Start a WSGI server in a new green thread.""" eventlet.wsgi.server(socket, application, max_size=self.num_threads, log=LOG, keepalive=CONF.wsgi_keep_alive, socket_timeout=self.client_socket_timeout) class Request(wsgi.Request): def best_match_content_type(self): """Determine the most acceptable content-type. Based on: 1) URI extension (.json) 2) Content-type header 3) Accept* headers """ # First lookup http request path parts = self.path.rsplit('.', 1) if len(parts) > 1: _format = parts[1] if _format in ['json']: return 'application/{0}'.format(_format) #Then look up content header type_from_header = self.get_content_type() if type_from_header: return type_from_header ctypes = ['application/json'] #Finally search in Accept-* headers bm = self.accept.best_match(ctypes) return bm or 'application/json' def get_content_type(self): allowed_types = ("application/json") if "Content-Type" not in self.headers: LOG.debug("Missing Content-Type") return None _type = self.content_type if _type in allowed_types: return _type return None def best_match_language(self): """Determines best available locale from the Accept-Language header. :returns: the best language match or None if the 'Accept-Language' header was not available in the request. """ if not self.accept_language: return None all_languages = oslo_i18n.get_available_languages('neutron') return self.accept_language.best_match(all_languages) @property def context(self): if 'neutron.context' not in self.environ: self.environ['neutron.context'] = context.get_admin_context() return self.environ['neutron.context'] class ActionDispatcher(object): """Maps method name to local methods through action name.""" def dispatch(self, *args, **kwargs): """Find and call local method.""" action = kwargs.pop('action', 'default') action_method = getattr(self, str(action), self.default) return action_method(*args, **kwargs) def default(self, data): raise NotImplementedError() class DictSerializer(ActionDispatcher): """Default request body serialization.""" def serialize(self, data, action='default'): return self.dispatch(data, action=action) def default(self, data): return "" class JSONDictSerializer(DictSerializer): """Default JSON request body serialization.""" def default(self, data): def sanitizer(obj): return six.text_type(obj) return encode_body(jsonutils.dumps(data, default=sanitizer)) class ResponseHeaderSerializer(ActionDispatcher): """Default response headers serialization.""" def serialize(self, response, data, action): self.dispatch(response, data, action=action) def default(self, response, data): response.status_int = 200 class ResponseSerializer(object): """Encode the necessary pieces into a response object.""" def __init__(self, body_serializers=None, headers_serializer=None): self.body_serializers = { 'application/json': JSONDictSerializer(), } self.body_serializers.update(body_serializers or {}) self.headers_serializer = (headers_serializer or ResponseHeaderSerializer()) def serialize(self, response_data, content_type, action='default'): """Serialize a dict into a string and wrap in a wsgi.Request object. :param response_data: dict produced by the Controller :param content_type: expected mimetype of serialized response body """ response = webob.Response() self.serialize_headers(response, response_data, action) self.serialize_body(response, response_data, content_type, action) return response def serialize_headers(self, response, data, action): self.headers_serializer.serialize(response, data, action) def serialize_body(self, response, data, content_type, action): response.headers['Content-Type'] = content_type if data is not None: serializer = self.get_body_serializer(content_type) response.body = serializer.serialize(data, action) def get_body_serializer(self, content_type): try: return self.body_serializers[content_type] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) class TextDeserializer(ActionDispatcher): """Default request body deserialization.""" def deserialize(self, datastring, action='default'): return self.dispatch(datastring, action=action) def default(self, datastring): return {} class JSONDeserializer(TextDeserializer): def _from_json(self, datastring): try: return jsonutils.loads(datastring) except ValueError: msg = _("Cannot understand JSON") raise exception.MalformedRequestBody(reason=msg) def default(self, datastring): return {'body': self._from_json(datastring)} class RequestHeadersDeserializer(ActionDispatcher): """Default request headers deserializer.""" def deserialize(self, request, action): return self.dispatch(request, action=action) def default(self, request): return {} class RequestDeserializer(object): """Break up a Request object into more useful pieces.""" def __init__(self, body_deserializers=None, headers_deserializer=None): self.body_deserializers = { 'application/json': JSONDeserializer(), } self.body_deserializers.update(body_deserializers or {}) self.headers_deserializer = (headers_deserializer or RequestHeadersDeserializer()) def deserialize(self, request): """Extract necessary pieces of the request. :param request: Request object :returns tuple of expected controller action name, dictionary of keyword arguments to pass to the controller, the expected content type of the response """ action_args = self.get_action_args(request.environ) action = action_args.pop('action', None) action_args.update(self.deserialize_headers(request, action)) action_args.update(self.deserialize_body(request, action)) accept = self.get_expected_content_type(request) return (action, action_args, accept) def deserialize_headers(self, request, action): return self.headers_deserializer.deserialize(request, action) def deserialize_body(self, request, action): try: content_type = request.best_match_content_type() except exception.InvalidContentType: LOG.debug("Unrecognized Content-Type provided in request") return {} if content_type is None: LOG.debug("No Content-Type provided in request") return {} if not len(request.body) > 0: LOG.debug("Empty body provided in request") return {} try: deserializer = self.get_body_deserializer(content_type) except exception.InvalidContentType: with excutils.save_and_reraise_exception(): LOG.debug("Unable to deserialize body as provided " "Content-Type") return deserializer.deserialize(request.body, action) def get_body_deserializer(self, content_type): try: return self.body_deserializers[content_type] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) def get_expected_content_type(self, request): return request.best_match_content_type() def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" try: args = request_environment['wsgiorg.routing_args'][1].copy() except Exception: return {} try: del args['controller'] except KeyError: pass try: del args['format'] except KeyError: pass return args class Application(object): """Base WSGI application wrapper. Subclasses need to implement __call__.""" @classmethod def factory(cls, global_config, **local_config): """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [app:APPNAME] section of the paste config) will be passed into the `__init__` method as kwargs. A hypothetical configuration would look like: [app:wadl] latest_version = 1.3 paste.app_factory = nova.api.fancy_api:Wadl.factory which would result in a call to the `Wadl` class as import neutron.api.fancy_api fancy_api.Wadl(latest_version='1.3') You could of course re-implement the `factory` method in subclasses, but using the kwarg passing it shouldn't be necessary. """ return cls(**local_config) def __call__(self, environ, start_response): r"""Subclasses will probably want to implement __call__ like this: @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): # Any of the following objects work as responses: # Option 1: simple string res = 'message\n' # Option 2: a nicely formatted HTTP exception page res = exc.HTTPForbidden(explanation='Nice try') # Option 3: a webob Response object (in case you need to play with # headers, or you want to be treated like an iterable, or or or) res = Response(); res.app_iter = open('somefile') # Option 4: any wsgi app to be run next res = self.application # Option 5: you can get a Response object for a wsgi app, too, to # play with headers etc res = req.get_response(self.application) # You can then just return your response... return res # ... or set req.response and return None. req.response = res See the end of http://pythonpaste.org/webob/modules/dec.html for more info. """ raise NotImplementedError(_('You must implement __call__')) class Resource(Application): """WSGI app that handles (de)serialization and controller dispatch. WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method upon its controller. All controller action methods must accept a 'req' argument, which is the incoming wsgi.Request. If the operation is a PUT or POST, the controller method must also accept a 'body' argument (the deserialized request body). They may raise a webob.exc exception or return a dict, which will be serialized by requested content type. """ def __init__(self, controller, fault_body_function, deserializer=None, serializer=None): """Object initialization. :param controller: object that implement methods created by routes lib :param deserializer: object that can serialize the output of a controller into a webob response :param serializer: object that can deserialize a webob request into necessary pieces :param fault_body_function: a function that will build the response body for HTTP errors raised by operations on this resource object """ self.controller = controller self.deserializer = deserializer or RequestDeserializer() self.serializer = serializer or ResponseSerializer() self._fault_body_function = fault_body_function @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" LOG.info(_LI("%(method)s %(url)s"), {"method": request.method, "url": request.url}) try: action, args, accept = self.deserializer.deserialize(request) except exception.InvalidContentType: msg = _("Unsupported Content-Type") LOG.exception(_LE("InvalidContentType: %s"), msg) return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") LOG.exception(_LE("MalformedRequestBody: %s"), msg) return Fault(webob.exc.HTTPBadRequest(explanation=msg)) try: action_result = self.dispatch(request, action, args) except webob.exc.HTTPException as ex: LOG.info(_LI("HTTP exception thrown: %s"), ex) action_result = Fault(ex, self._fault_body_function) except Exception: LOG.exception(_LE("Internal error")) # Do not include the traceback to avoid returning it to clients. action_result = Fault(webob.exc.HTTPServerError(), self._fault_body_function) if isinstance(action_result, dict) or action_result is None: response = self.serializer.serialize(action_result, accept, action=action) else: response = action_result try: LOG.info(_LI("%(url)s returned with HTTP %(status)d"), dict(url=request.url, status=response.status_int)) except AttributeError as e: LOG.info(_LI("%(url)s returned a fault: %(exception)s"), dict(url=request.url, exception=e)) return response def dispatch(self, request, action, action_args): """Find action-spefic method on controller and call it.""" controller_method = getattr(self.controller, action) try: #NOTE(salvatore-orlando): the controller method must have # an argument whose name is 'request' return controller_method(request=request, **action_args) except TypeError as exc: LOG.exception(exc) return Fault(webob.exc.HTTPBadRequest()) def _default_body_function(wrapped_exc): code = wrapped_exc.status_int fault_data = { 'Error': { 'code': code, 'message': wrapped_exc.explanation}} # 'code' is an attribute on the fault tag itself metadata = {'attributes': {'Error': 'code'}} return fault_data, metadata class Fault(webob.exc.HTTPException): """Generates an HTTP response from a webob HTTP exception.""" def __init__(self, exception, body_function=None): """Creates a Fault for the given webob.exc.exception.""" self.wrapped_exc = exception self.status_int = self.wrapped_exc.status_int self._body_function = body_function or _default_body_function @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """Generate a WSGI response based on the exception passed to ctor.""" # Replace the body with fault details. fault_data, metadata = self._body_function(self.wrapped_exc) content_type = req.best_match_content_type() serializer = { 'application/json': JSONDictSerializer(), }[content_type] self.wrapped_exc.body = serializer.serialize(fault_data) self.wrapped_exc.content_type = content_type return self.wrapped_exc # NOTE(salvatore-orlando): this class will go once the # extension API framework is updated class Controller(object): """WSGI app that dispatched to methods. WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method upon itself. All action methods must, in addition to their normal parameters, accept a 'req' argument which is the incoming wsgi.Request. They raise a webob.exc exception, or return a dict which will be serialized by requested content type. """ @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """Call the method specified in req.environ by RoutesMiddleware.""" arg_dict = req.environ['wsgiorg.routing_args'][1] action = arg_dict['action'] method = getattr(self, action) del arg_dict['controller'] del arg_dict['action'] if 'format' in arg_dict: del arg_dict['format'] arg_dict['request'] = req result = method(**arg_dict) if isinstance(result, dict) or result is None: if result is None: status = 204 content_type = '' body = None else: status = 200 content_type = req.best_match_content_type() body = self._serialize(result, content_type) response = webob.Response(status=status, content_type=content_type, body=body) LOG.debug("%(url)s returned with HTTP %(status)d", dict(url=req.url, status=response.status_int)) return response else: return result def _serialize(self, data, content_type): """Serialize the given dict to the provided content_type. Uses self._serialization_metadata if it exists, which is a dict mapping MIME types to information needed to serialize to that type. """ _metadata = getattr(type(self), '_serialization_metadata', {}) serializer = Serializer(_metadata) try: return serializer.serialize(data, content_type) except exception.InvalidContentType: msg = _('The requested content type %s is invalid.') % content_type raise webob.exc.HTTPNotAcceptable(msg) def _deserialize(self, data, content_type): """Deserialize the request body to the specified content type. Uses self._serialization_metadata if it exists, which is a dict mapping MIME types to information needed to serialize to that type. """ _metadata = getattr(type(self), '_serialization_metadata', {}) serializer = Serializer(_metadata) return serializer.deserialize(data, content_type)['body'] # NOTE(salvatore-orlando): this class will go once the # extension API framework is updated class Serializer(object): """Serializes and deserializes dictionaries to certain MIME types.""" def __init__(self, metadata=None): """Create a serializer based on the given WSGI environment. 'metadata' is an optional dict mapping MIME types to information needed to serialize a dictionary to that type. """ self.metadata = metadata or {} def _get_serialize_handler(self, content_type): handlers = { 'application/json': JSONDictSerializer(), } try: return handlers[content_type] except Exception: raise exception.InvalidContentType(content_type=content_type) def serialize(self, data, content_type): """Serialize a dictionary into the specified content type.""" return self._get_serialize_handler(content_type).serialize(data) def deserialize(self, datastring, content_type): """Deserialize a string to a dictionary. The string must be in the format of a supported MIME type. """ try: return self.get_deserialize_handler(content_type).deserialize( datastring) except Exception: raise webob.exc.HTTPBadRequest(_("Could not deserialize data")) def get_deserialize_handler(self, content_type): handlers = { 'application/json': JSONDeserializer(), } try: return handlers[content_type] except Exception: raise exception.InvalidContentType(content_type=content_type) neutron-8.4.0/neutron/hacking/0000775000567000056710000000000013044373210017451 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/hacking/__init__.py0000664000567000056710000000000013044372736021564 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/hacking/checks.py0000664000567000056710000002106513044372760021300 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import pep8 import six # Guidelines for writing new hacking checks # # - Use only for Neutron specific tests. OpenStack general tests # should be submitted to the common 'hacking' module. # - Pick numbers in the range N3xx. Find the current test with # the highest allocated number and then pick the next value. # - Keep the test method code in the source file ordered based # on the N3xx value. # - List the new rule in the top level HACKING.rst file # - Add test cases for each new rule to # neutron/tests/unit/hacking/test_checks.py _all_log_levels = { 'reserved': '_', # this should never be used with a log unless # it is a variable used for a log message and # a exception 'error': '_LE', 'info': '_LI', 'warning': '_LW', 'critical': '_LC', 'exception': '_LE', } _all_hints = set(_all_log_levels.values()) mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])") def _regex_for_level(level, hint): return r".*LOG\.%(level)s\(\s*((%(wrong_hints)s)\(|'|\")" % { 'level': level, 'wrong_hints': '|'.join(_all_hints - set([hint])), } log_translation_hint = re.compile( '|'.join('(?:%s)' % _regex_for_level(level, hint) for level, hint in six.iteritems(_all_log_levels))) log_warn = re.compile( r"(.)*LOG\.(warn)\(\s*('|\"|_)") contextlib_nested = re.compile(r"^with (contextlib\.)?nested\(") def validate_log_translations(logical_line, physical_line, filename): # Translations are not required in the test directory if "neutron/tests" in filename: return if pep8.noqa(physical_line): return msg = "N320: Log messages require translation hints!" if log_translation_hint.match(logical_line): yield (0, msg) def use_jsonutils(logical_line, filename): msg = "N321: jsonutils.%(fun)s must be used instead of json.%(fun)s" # Some files in the tree are not meant to be run from inside Neutron # itself, so we should not complain about them not using jsonutils json_check_skipped_patterns = [ "neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/etc/xapi.d/" "plugins/netwrap", ] for pattern in json_check_skipped_patterns: if pattern in filename: return if "json." in logical_line: json_funcs = ['dumps(', 'dump(', 'loads(', 'load('] for f in json_funcs: pos = logical_line.find('json.%s' % f) if pos != -1: yield (pos, msg % {'fun': f[:-1]}) def no_translate_debug_logs(logical_line, filename): """Check for 'LOG.debug(_(' and 'LOG.debug(_Lx(' As per our translation policy, https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation we shouldn't translate debug level logs. * This check assumes that 'LOG' is a logger. N319 """ for hint in _all_hints: if logical_line.startswith("LOG.debug(%s(" % hint): yield(0, "N319 Don't translate debug level logs") def check_assert_called_once_with(logical_line, filename): # Try to detect unintended calls of nonexistent mock methods like: # assert_called_once # assertCalledOnceWith # assert_has_called # called_once_with if 'neutron/tests/' in filename: if '.assert_called_once_with(' in logical_line: return uncased_line = logical_line.lower().replace('_', '') check_calls = ['.assertcalledonce', '.calledoncewith'] if any(x for x in check_calls if x in uncased_line): msg = ("N322: Possible use of no-op mock method. " "please use assert_called_once_with.") yield (0, msg) if '.asserthascalled' in uncased_line: msg = ("N322: Possible use of no-op mock method. " "please use assert_has_calls.") yield (0, msg) def check_no_contextlib_nested(logical_line, filename): msg = ("N324: contextlib.nested is deprecated. With Python 2.7 and later " "the with-statement supports multiple nested objects. See https://" "docs.python.org/2/library/contextlib.html#contextlib.nested for " "more information.") if contextlib_nested.match(logical_line): yield(0, msg) def check_python3_xrange(logical_line): if re.search(r"\bxrange\s*\(", logical_line): yield(0, "N325: Do not use xrange. Use range, or six.moves.range for " "large loops.") def check_no_basestring(logical_line): if re.search(r"\bbasestring\b", logical_line): msg = ("N326: basestring is not Python3-compatible, use " "six.string_types instead.") yield(0, msg) def check_python3_no_iteritems(logical_line): if re.search(r".*\.iteritems\(\)", logical_line): msg = ("N327: Use six.iteritems() instead of dict.iteritems().") yield(0, msg) def check_asserttrue(logical_line, filename): if 'neutron/tests/' in filename: if re.search(r"assertEqual\(\s*True,[^,]*(,[^,]*)?\)", logical_line): msg = ("N328: Use assertTrue(observed) instead of " "assertEqual(True, observed)") yield (0, msg) if re.search(r"assertEqual\([^,]*,\s*True(,[^,]*)?\)", logical_line): msg = ("N328: Use assertTrue(observed) instead of " "assertEqual(True, observed)") yield (0, msg) def no_mutable_default_args(logical_line): msg = "N329: Method's default argument shouldn't be mutable!" if mutable_default_args.match(logical_line): yield (0, msg) def check_assertfalse(logical_line, filename): if 'neutron/tests/' in filename: if re.search(r"assertEqual\(\s*False,[^,]*(,[^,]*)?\)", logical_line): msg = ("N328: Use assertFalse(observed) instead of " "assertEqual(False, observed)") yield (0, msg) if re.search(r"assertEqual\([^,]*,\s*False(,[^,]*)?\)", logical_line): msg = ("N328: Use assertFalse(observed) instead of " "assertEqual(False, observed)") yield (0, msg) def check_assertempty(logical_line, filename): if 'neutron/tests/' in filename: msg = ("N330: Use assertEqual(*empty*, observed) instead of " "assertEqual(observed, *empty*). *empty* contains " "{}, [], (), set(), '', \"\"") empties = r"(\[\s*\]|\{\s*\}|\(\s*\)|set\(\s*\)|'\s*'|\"\s*\")" reg = r"assertEqual\(([^,]*,\s*)+?%s\)\s*$" % empties if re.search(reg, logical_line): yield (0, msg) def check_assertisinstance(logical_line, filename): if 'neutron/tests/' in filename: if re.search(r"assertTrue\(\s*isinstance\(\s*[^,]*,\s*[^,]*\)\)", logical_line): msg = ("N331: Use assertIsInstance(observed, type) instead " "of assertTrue(isinstance(observed, type))") yield (0, msg) def check_assertequal_for_httpcode(logical_line, filename): msg = ("N332: Use assertEqual(expected_http_code, observed_http_code) " "instead of assertEqual(observed_http_code, expected_http_code)") if 'neutron/tests/' in filename: if re.search(r"assertEqual\(\s*[^,]*,[^,]*HTTP[^\.]*\.code\s*\)", logical_line): yield (0, msg) def check_log_warn_deprecated(logical_line, filename): msg = "N333: Use LOG.warning due to compatibility with py3" if log_warn.match(logical_line): yield (0, msg) def factory(register): register(validate_log_translations) register(use_jsonutils) register(check_assert_called_once_with) register(no_translate_debug_logs) register(check_no_contextlib_nested) register(check_python3_xrange) register(check_no_basestring) register(check_python3_no_iteritems) register(check_asserttrue) register(no_mutable_default_args) register(check_assertfalse) register(check_assertempty) register(check_assertisinstance) register(check_assertequal_for_httpcode) register(check_log_warn_deprecated) neutron-8.4.0/neutron/services/0000775000567000056710000000000013044373210017670 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/externaldns/0000775000567000056710000000000013044373210022217 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/externaldns/__init__.py0000664000567000056710000000000013044372736024332 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/externaldns/driver.py0000664000567000056710000000523713044372736024107 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 IBM # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from oslo_log import log import six from neutron import manager LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class ExternalDNSService(object): """Interface definition for an external dns service driver.""" def __init__(self): """Initialize external dns service driver.""" @classmethod def get_instance(cls): """Return an instance of the configured external DNS driver.""" external_dns_driver_name = cfg.CONF.external_dns_driver mgr = manager.NeutronManager LOG.debug("Loading external dns driver: %s", external_dns_driver_name) driver_class = mgr.load_class_for_provider( 'neutron.services.external_dns_drivers', external_dns_driver_name) return driver_class() @abc.abstractmethod def create_record_set(self, context, dns_domain, dns_name, records): """Create a record set in the specified zone. :param context: neutron api request context :type context: neutron.context.Context :param dns_domain: the dns_domain where the record set will be created :type dns_domain: String :param dns_name: the name associated with the record set :type dns_name: String :param records: the records in the set :type records: List of Strings :raises: neutron.extensions.dns.DNSDomainNotFound neutron.extensions.dns.DuplicateRecordSet """ @abc.abstractmethod def delete_record_set(self, context, dns_domain, dns_name, records): """Delete a record set in the specified zone. :param context: neutron api request context :type context: neutron.context.Context :param dns_domain: the dns_domain from which the record set will be deleted :type dns_domain: String :param dns_name: the dns_name associated with the record set to be deleted :type dns_name: String :param records: the records in the set to be deleted :type records: List of Strings """ neutron-8.4.0/neutron/services/externaldns/drivers/0000775000567000056710000000000013044373210023675 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/externaldns/drivers/__init__.py0000664000567000056710000000000013044372736026010 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/externaldns/drivers/designate/0000775000567000056710000000000013044373210025640 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/externaldns/drivers/designate/__init__.py0000664000567000056710000000000013044372736027753 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/externaldns/drivers/designate/driver.py0000664000567000056710000002177213044372760027527 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 IBM # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from designateclient import exceptions as d_exc from designateclient.v2 import client as d_client from keystoneclient.auth.identity.generic import password from keystoneclient.auth import token_endpoint from keystoneclient import session from oslo_config import cfg from oslo_log import log from neutron._i18n import _ from neutron.extensions import dns from neutron.services.externaldns import driver IPV4_PTR_ZONE_PREFIX_MIN_SIZE = 8 IPV4_PTR_ZONE_PREFIX_MAX_SIZE = 24 IPV6_PTR_ZONE_PREFIX_MIN_SIZE = 4 IPV6_PTR_ZONE_PREFIX_MAX_SIZE = 124 LOG = log.getLogger(__name__) _SESSION = None designate_opts = [ cfg.StrOpt('url', help=_('URL for connecting to designate')), cfg.StrOpt('admin_username', help=_('Username for connecting to designate in admin ' 'context')), cfg.StrOpt('admin_password', help=_('Password for connecting to designate in admin ' 'context'), secret=True), cfg.StrOpt('admin_tenant_id', help=_('Tenant id for connecting to designate in admin ' 'context')), cfg.StrOpt('admin_tenant_name', help=_('Tenant name for connecting to designate in admin ' 'context')), cfg.StrOpt('admin_auth_url', help=_('Authorization URL for connecting to designate in admin ' 'context')), cfg.BoolOpt('insecure', default=False, help=_('Skip cert validation for SSL based admin_auth_url')), cfg.StrOpt('ca_cert', help=_('CA certificate file to use to verify ' 'connecting clients')), cfg.BoolOpt('allow_reverse_dns_lookup', default=True, help=_('Allow the creation of PTR records')), cfg.IntOpt('ipv4_ptr_zone_prefix_size', default=24, help=_('Number of bits in an ipv4 PTR zone that will be considered ' 'network prefix. It has to align to byte boundary. Minimum ' 'value is 8. Maximum value is 24. As a consequence, range ' 'of values is 8, 16 and 24')), cfg.IntOpt('ipv6_ptr_zone_prefix_size', default=120, help=_('Number of bits in an ipv6 PTR zone that will be considered ' 'network prefix. It has to align to nyble boundary. Minimum ' 'value is 4. Maximum value is 124. As a consequence, range ' 'of values is 4, 8, 12, 16,..., 124')), cfg.StrOpt('ptr_zone_email', default='', help=_('The email address to be used when creating PTR zones. ' 'If not specified, the email address will be ' 'admin@')), ] DESIGNATE_GROUP = 'designate' CONF = cfg.CONF CONF.register_opts(designate_opts, DESIGNATE_GROUP) def get_clients(context): global _SESSION if not _SESSION: if CONF.designate.insecure: verify = False else: verify = CONF.designate.ca_cert or True _SESSION = session.Session(verify=verify) auth = token_endpoint.Token(CONF.designate.url, context.auth_token) client = d_client.Client(session=_SESSION, auth=auth) admin_auth = password.Password( auth_url=CONF.designate.admin_auth_url, username=CONF.designate.admin_username, password=CONF.designate.admin_password, tenant_name=CONF.designate.admin_tenant_name, tenant_id=CONF.designate.admin_tenant_id) admin_client = d_client.Client(session=_SESSION, auth=admin_auth) return client, admin_client class Designate(driver.ExternalDNSService): """Driver for Designate.""" def __init__(self): ipv4_ptr_zone_size = CONF.designate.ipv4_ptr_zone_prefix_size ipv6_ptr_zone_size = CONF.designate.ipv6_ptr_zone_prefix_size if (ipv4_ptr_zone_size < IPV4_PTR_ZONE_PREFIX_MIN_SIZE or ipv4_ptr_zone_size > IPV4_PTR_ZONE_PREFIX_MAX_SIZE or (ipv4_ptr_zone_size % 8) != 0): raise dns.InvalidPTRZoneConfiguration( parameter='ipv4_ptr_zone_size', number='8', maximum=str(IPV4_PTR_ZONE_PREFIX_MAX_SIZE), minimum=str(IPV4_PTR_ZONE_PREFIX_MIN_SIZE)) if (ipv6_ptr_zone_size < IPV6_PTR_ZONE_PREFIX_MIN_SIZE or ipv6_ptr_zone_size > IPV6_PTR_ZONE_PREFIX_MAX_SIZE or (ipv6_ptr_zone_size % 4) != 0): raise dns.InvalidPTRZoneConfiguration( parameter='ipv6_ptr_zone_size', number='4', maximum=str(IPV6_PTR_ZONE_PREFIX_MAX_SIZE), minimum=str(IPV6_PTR_ZONE_PREFIX_MIN_SIZE)) def create_record_set(self, context, dns_domain, dns_name, records): designate, designate_admin = get_clients(context) v4, v6 = self._classify_records(records) try: if v4: designate.recordsets.create(dns_domain, dns_name, 'A', v4) if v6: designate.recordsets.create(dns_domain, dns_name, 'AAAA', v6) except d_exc.NotFound: raise dns.DNSDomainNotFound(dns_domain=dns_domain) except d_exc.Conflict: raise dns.DuplicateRecordSet(dns_name=dns_name) if not CONF.designate.allow_reverse_dns_lookup: return # Set up the PTR records recordset_name = '%s.%s' % (dns_name, dns_domain) ptr_zone_email = 'admin@%s' % dns_domain[:-1] if CONF.designate.ptr_zone_email: ptr_zone_email = CONF.designate.ptr_zone_email for record in records: in_addr_name = netaddr.IPAddress(record).reverse_dns in_addr_zone_name = self._get_in_addr_zone_name(in_addr_name) in_addr_zone_description = ( 'An %s zone for reverse lookups set up by Neutron.' % '.'.join(in_addr_name.split('.')[-3:])) try: # Since we don't delete in-addr zones, assume it already # exists. If it doesn't, create it designate_admin.recordsets.create(in_addr_zone_name, in_addr_name, 'PTR', [recordset_name]) except d_exc.NotFound: designate_admin.zones.create( in_addr_zone_name, email=ptr_zone_email, description=in_addr_zone_description) designate_admin.recordsets.create(in_addr_zone_name, in_addr_name, 'PTR', [recordset_name]) def _classify_records(self, records): v4 = [] v6 = [] for record in records: if netaddr.IPAddress(record).version == 4: v4.append(record) else: v6.append(record) return v4, v6 def _get_in_addr_zone_name(self, in_addr_name): units = self._get_bytes_or_nybles_to_skip(in_addr_name) return '.'.join(in_addr_name.split('.')[units:]) def _get_bytes_or_nybles_to_skip(self, in_addr_name): if 'in-addr.arpa' in in_addr_name: return int((32 - CONF.designate.ipv4_ptr_zone_prefix_size) / 8) return int((128 - CONF.designate.ipv6_ptr_zone_prefix_size) / 4) def delete_record_set(self, context, dns_domain, dns_name, records): designate, designate_admin = get_clients(context) ids_to_delete = self._get_ids_ips_to_delete( dns_domain, '%s.%s' % (dns_name, dns_domain), records, designate) for _id in ids_to_delete: designate.recordsets.delete(dns_domain, _id) if not CONF.designate.allow_reverse_dns_lookup: return for record in records: in_addr_name = netaddr.IPAddress(record).reverse_dns in_addr_zone_name = self._get_in_addr_zone_name(in_addr_name) designate_admin.recordsets.delete(in_addr_zone_name, in_addr_name) def _get_ids_ips_to_delete(self, dns_domain, name, records, designate_client): try: recordsets = designate_client.recordsets.list( dns_domain, criterion={"name": "%s" % name}) except d_exc.NotFound: raise dns.DNSDomainNotFound(dns_domain=dns_domain) ids = [rec['id'] for rec in recordsets] ips = [ip for rec in recordsets for ip in rec['records']] if set(ips) != set(records): raise dns.DuplicateRecordSet(dns_name=name) return ids neutron-8.4.0/neutron/services/__init__.py0000664000567000056710000000000013044372736022003 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/firewall/0000775000567000056710000000000013044373210021475 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/firewall/__init__.py0000664000567000056710000000000013044372760023605 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/firewall/agents/0000775000567000056710000000000013044373210022756 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/firewall/agents/l3reference/0000775000567000056710000000000013044373210025153 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/firewall/agents/l3reference/__init__.py0000664000567000056710000000000013044372760027263 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/firewall/agents/l3reference/firewall_l3_agent.py0000664000567000056710000003132313044372760031121 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from neutron._i18n import _, _LE from neutron.agent.linux import ip_lib from neutron.common import exceptions as nexception from neutron import context from neutron.plugins.common import constants from neutron.services.firewall.agents import firewall_agent_api as api from neutron.services import provider_configuration as provconf FIREWALL_PLUGIN = 'q-firewall-plugin' LOG = logging.getLogger(__name__) class FWaaSL3PluginApi(api.FWaaSPluginApiMixin): """Agent side of the FWaaS agent to FWaaS Plugin RPC API.""" def __init__(self, topic, host): super(FWaaSL3PluginApi, self).__init__(topic, host) def get_firewalls_for_tenant(self, context, **kwargs): """Get the Firewalls with rules from the Plugin to send to driver.""" LOG.debug("Retrieve Firewall with rules from Plugin") cctxt = self.client.prepare() return cctxt.call(context, 'get_firewalls_for_tenant', host=self.host) def get_tenants_with_firewalls(self, context, **kwargs): """Get all Tenants that have Firewalls configured from plugin.""" LOG.debug("Retrieve Tenants with Firewalls configured from Plugin") cctxt = self.client.prepare() return cctxt.call(context, 'get_tenants_with_firewalls', host=self.host) class FWaaSL3AgentRpcCallback(api.FWaaSAgentRpcCallbackMixin): """FWaaS Agent support to be used by Neutron L3 agent.""" def __init__(self, conf): LOG.debug("Initializing firewall agent") self.conf = conf fwaas_driver_class_path = provconf.get_provider_driver_class( cfg.CONF.fwaas.driver) self.fwaas_enabled = cfg.CONF.fwaas.enabled # None means l3-agent has no information on the server # configuration due to the lack of RPC support. if self.neutron_service_plugins is not None: fwaas_plugin_configured = (constants.FIREWALL in self.neutron_service_plugins) if fwaas_plugin_configured and not self.fwaas_enabled: msg = _("FWaaS plugin is configured in the server side, but " "FWaaS is disabled in L3-agent.") LOG.error(msg) raise SystemExit(1) self.fwaas_enabled = self.fwaas_enabled and fwaas_plugin_configured if self.fwaas_enabled: try: self.fwaas_driver = importutils.import_object( fwaas_driver_class_path) LOG.debug("FWaaS Driver Loaded: '%s'", fwaas_driver_class_path) except ImportError: msg = _('Error importing FWaaS device driver: %s') raise ImportError(msg % fwaas_driver_class_path) self.services_sync = False # setup RPC to msg fwaas plugin self.fwplugin_rpc = FWaaSL3PluginApi(FIREWALL_PLUGIN, conf.host) super(FWaaSL3AgentRpcCallback, self).__init__(host=conf.host) def _get_router_info_list_for_tenant(self, routers, tenant_id): """Returns the list of router info objects on which to apply the fw.""" root_ip = ip_lib.IPWrapper() # Get the routers for the tenant router_ids = [ router['id'] for router in routers if router['tenant_id'] == tenant_id] local_ns_list = root_ip.get_namespaces() router_info_list = [] # Pick up namespaces for Tenant Routers for rid in router_ids: # for routers without an interface - get_routers returns # the router - but this is not yet populated in router_info if rid not in self.router_info: continue router_ns = self.router_info[rid].ns_name if router_ns in local_ns_list: router_info_list.append(self.router_info[rid]) return router_info_list def _invoke_driver_for_plugin_api(self, context, fw, func_name): """Invoke driver method for plugin API and provide status back.""" LOG.debug("%(func_name)s from agent for fw: %(fwid)s", {'func_name': func_name, 'fwid': fw['id']}) try: routers = self.plugin_rpc.get_routers(context) router_info_list = self._get_router_info_list_for_tenant( routers, fw['tenant_id']) if not router_info_list: LOG.debug('No Routers on tenant: %s', fw['tenant_id']) # fw was created before any routers were added, and if a # delete is sent then we need to ack so that plugin can # cleanup. if func_name == 'delete_firewall': self.fwplugin_rpc.firewall_deleted(context, fw['id']) return LOG.debug("Apply fw on Router List: '%s'", [ri.router['id'] for ri in router_info_list]) # call into the driver try: self.fwaas_driver.__getattribute__(func_name)( self.conf.agent_mode, router_info_list, fw) if fw['admin_state_up']: status = constants.ACTIVE else: status = constants.DOWN except nexception.FirewallInternalDriverError: LOG.error(_LE("Firewall Driver Error for %(func_name)s " "for fw: %(fwid)s"), {'func_name': func_name, 'fwid': fw['id']}) status = constants.ERROR # delete needs different handling if func_name == 'delete_firewall': if status in [constants.ACTIVE, constants.DOWN]: self.fwplugin_rpc.firewall_deleted(context, fw['id']) else: self.fwplugin_rpc.set_firewall_status( context, fw['id'], status) except Exception: LOG.exception( _LE("FWaaS RPC failure in %(func_name)s for fw: %(fwid)s"), {'func_name': func_name, 'fwid': fw['id']}) self.services_sync = True return def _invoke_driver_for_sync_from_plugin(self, ctx, router_info_list, fw): """Invoke the delete driver method for status of PENDING_DELETE and update method for all other status to (re)apply on driver which is Idempotent. """ if fw['status'] == constants.PENDING_DELETE: try: self.fwaas_driver.delete_firewall( self.conf.agent_mode, router_info_list, fw) self.fwplugin_rpc.firewall_deleted( ctx, fw['id']) except nexception.FirewallInternalDriverError: LOG.error(_LE("Firewall Driver Error on fw state %(fwmsg)s " "for fw: %(fwid)s"), {'fwmsg': fw['status'], 'fwid': fw['id']}) self.fwplugin_rpc.set_firewall_status( ctx, fw['id'], constants.ERROR) else: # PENDING_UPDATE, PENDING_CREATE, ... try: self.fwaas_driver.update_firewall( self.conf.agent_mode, router_info_list, fw) if fw['admin_state_up']: status = constants.ACTIVE else: status = constants.DOWN except nexception.FirewallInternalDriverError: LOG.error(_LE("Firewall Driver Error on fw state %(fwmsg)s " "for fw: %(fwid)s"), {'fwmsg': fw['status'], 'fwid': fw['id']}) status = constants.ERROR self.fwplugin_rpc.set_firewall_status( ctx, fw['id'], status) def _process_router_add(self, ri): """On router add, get fw with rules from plugin and update driver.""" LOG.debug("Process router add, router_id: '%s'", ri.router['id']) routers = [] routers.append(ri.router) router_info_list = self._get_router_info_list_for_tenant( routers, ri.router['tenant_id']) if router_info_list: # Get the firewall with rules # for the tenant the router is on. ctx = context.Context('', ri.router['tenant_id']) fw_list = self.fwplugin_rpc.get_firewalls_for_tenant(ctx) LOG.debug("Process router add, fw_list: '%s'", [fw['id'] for fw in fw_list]) for fw in fw_list: self._invoke_driver_for_sync_from_plugin( ctx, router_info_list, fw) def process_router_add(self, ri): """On router add, get fw with rules from plugin and update driver.""" # avoid msg to plugin when fwaas is not configured if not self.fwaas_enabled: return try: self._process_router_add(ri) except Exception: LOG.exception( _LE("FWaaS RPC info call failed for '%s'."), ri.router['id']) self.services_sync = True def process_services_sync(self, ctx): """On RPC issues sync with plugin and apply the sync data.""" # avoid msg to plugin when fwaas is not configured if not self.fwaas_enabled: return try: # get all routers routers = self.plugin_rpc.get_routers(ctx) # get the list of tenants with firewalls configured # from the plugin tenant_ids = self.fwplugin_rpc.get_tenants_with_firewalls(ctx) LOG.debug("Tenants with Firewalls: '%s'", tenant_ids) for tenant_id in tenant_ids: ctx = context.Context('', tenant_id) fw_list = self.fwplugin_rpc.get_firewalls_for_tenant(ctx) if fw_list: # if fw present on tenant router_info_list = self._get_router_info_list_for_tenant( routers, tenant_id) if router_info_list: LOG.debug("Router List: '%s'", [ri.router['id'] for ri in router_info_list]) LOG.debug("fw_list: '%s'", [fw['id'] for fw in fw_list]) # apply sync data on fw for this tenant for fw in fw_list: # fw, routers present on this host for tenant # install LOG.debug("Apply fw on Router List: '%s'", [ri.router['id'] for ri in router_info_list]) # no need to apply sync data for ACTIVE fw if fw['status'] != constants.ACTIVE: self._invoke_driver_for_sync_from_plugin( ctx, router_info_list, fw) self.services_sync = False except Exception: LOG.exception(_LE("Failed fwaas process services sync")) self.services_sync = True def create_firewall(self, context, firewall, host): """Handle Rpc from plugin to create a firewall.""" return self._invoke_driver_for_plugin_api( context, firewall, 'create_firewall') def update_firewall(self, context, firewall, host): """Handle Rpc from plugin to update a firewall.""" return self._invoke_driver_for_plugin_api( context, firewall, 'update_firewall') def delete_firewall(self, context, firewall, host): """Handle Rpc from plugin to delete a firewall.""" return self._invoke_driver_for_plugin_api( context, firewall, 'delete_firewall') neutron-8.4.0/neutron/services/firewall/agents/__init__.py0000664000567000056710000000000013044372760025066 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/firewall/agents/firewall_agent_api.py0000664000567000056710000000465313044372760027165 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import oslo_messaging from neutron._i18n import _ from neutron.common import rpc as n_rpc FWaaSOpts = [ cfg.StrOpt( 'driver', default='', help=_("Name of the FWaaS Driver")), cfg.BoolOpt( 'enabled', default=False, help=_("Enable FWaaS")), ] cfg.CONF.register_opts(FWaaSOpts, 'fwaas') class FWaaSPluginApiMixin(object): """Agent side of the FWaaS agent to FWaaS Plugin RPC API.""" def __init__(self, topic, host): self.host = host target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) def set_firewall_status(self, context, firewall_id, status): """Make a RPC to set the status of a firewall.""" cctxt = self.client.prepare() return cctxt.call(context, 'set_firewall_status', host=self.host, firewall_id=firewall_id, status=status) def firewall_deleted(self, context, firewall_id): """Make a RPC to indicate that the firewall resources are deleted.""" cctxt = self.client.prepare() return cctxt.call(context, 'firewall_deleted', host=self.host, firewall_id=firewall_id) class FWaaSAgentRpcCallbackMixin(object): """Mixin for FWaaS agent Implementations.""" def __init__(self, host): super(FWaaSAgentRpcCallbackMixin, self).__init__(host) def create_firewall(self, context, firewall, host): """Handle RPC cast from plugin to create a firewall.""" pass def update_firewall(self, context, firewall, host): """Handle RPC cast from plugin to update a firewall.""" pass def delete_firewall(self, context, firewall, host): """Handle RPC cast from plugin to delete a firewall.""" pass neutron-8.4.0/neutron/services/timestamp/0000775000567000056710000000000013044373210021673 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/timestamp/timestamp_db.py0000664000567000056710000001101413044372760024723 0ustar jenkinsjenkins00000000000000# Copyright 2015 HuaWei Technologies. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import time from oslo_log import log from oslo_utils import timeutils from sqlalchemy import event from sqlalchemy import exc as sql_exc from sqlalchemy.orm import session as se from neutron._i18n import _LW from neutron.common import exceptions as n_exc from neutron.db import model_base LOG = log.getLogger(__name__) class TimeStamp_db_mixin(object): """Mixin class to add Time Stamp methods.""" ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' def _change_since_result_filter_hook(self, query, filters): # this block is for change_since query # we get the changed_since string from filters. # And translate it from string to datetime type. # Then compare with the timestamp in db which has # datetime type. values = filters and filters.get('changed_since', []) if not values: return query data = filters['changed_since'][0] try: # this block checks queried timestamp format. datetime.datetime.fromtimestamp(time.mktime( time.strptime(data, self.ISO8601_TIME_FORMAT))) except Exception: msg = _LW("The input changed_since must be in the " "following format: YYYY-MM-DDTHH:MM:SS") raise n_exc.InvalidInput(error_message=msg) changed_since_string = timeutils.parse_isotime(data) changed_since = (timeutils. normalize_time(changed_since_string)) target_model_class = list(query._mapper_adapter_map.keys())[0] query = query.join(model_base.StandardAttribute, target_model_class.standard_attr_id == model_base.StandardAttribute.id).filter( model_base.StandardAttribute.updated_at >= changed_since) return query def update_timestamp(self, session, context, instances): objs_list = session.new.union(session.dirty) while objs_list: obj = objs_list.pop() if (isinstance(obj, model_base.HasStandardAttributes) and obj.standard_attr_id): obj.standard_attr.updated_at = timeutils.utcnow() def register_db_events(self): event.listen(model_base.StandardAttribute, 'before_insert', self._add_timestamp) event.listen(se.Session, 'before_flush', self.update_timestamp) def unregister_db_events(self): self._unregister_db_event(model_base.StandardAttribute, 'before_insert', self._add_timestamp) self._unregister_db_event(se.Session, 'before_flush', self.update_timestamp) def _unregister_db_event(self, listen_obj, listened_event, listen_hander): try: event.remove(listen_obj, listened_event, listen_hander) except sql_exc.InvalidRequestError: LOG.warning(_LW("No sqlalchemy event for resource %s found"), listen_obj) def _format_timestamp(self, resource_db, result): result['created_at'] = (resource_db.standard_attr.created_at. strftime(self.ISO8601_TIME_FORMAT)) result['updated_at'] = (resource_db.standard_attr.updated_at. strftime(self.ISO8601_TIME_FORMAT)) def extend_resource_dict_timestamp(self, plugin_obj, resource_res, resource_db): if (resource_db and resource_db.standard_attr.created_at and resource_db.standard_attr.updated_at): self._format_timestamp(resource_db, resource_res) def _add_timestamp(self, mapper, _conn, target): if not target.created_at and not target.updated_at: time = timeutils.utcnow() for field in ['created_at', 'updated_at']: setattr(target, field, time) return target neutron-8.4.0/neutron/services/timestamp/__init__.py0000664000567000056710000000000013044372736024006 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/timestamp/timestamp_plugin.py0000664000567000056710000000364113044372760025643 0ustar jenkinsjenkins00000000000000# Copyright 2015 HuaWei Technologies. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api.v2 import attributes from neutron.db import db_base_plugin_v2 from neutron.db import models_v2 from neutron.services import service_base from neutron.services.timestamp import timestamp_db as ts_db class TimeStampPlugin(service_base.ServicePluginBase, ts_db.TimeStamp_db_mixin): """Implements Neutron Timestamp Service plugin.""" supported_extension_aliases = ['timestamp_core'] def __init__(self): super(TimeStampPlugin, self).__init__() self.register_db_events() for resources in [attributes.NETWORKS, attributes.PORTS, attributes.SUBNETS, attributes.SUBNETPOOLS]: db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( resources, [self.extend_resource_dict_timestamp]) for model in [models_v2.Network, models_v2.Port, models_v2.Subnet, models_v2.SubnetPool]: db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( model, "change_since_query", None, None, self._change_since_result_filter_hook) def get_plugin_type(self): return 'timestamp_core' def get_plugin_description(self): return "Neutron core resources timestamp addition support" neutron-8.4.0/neutron/services/provider_configuration.py0000664000567000056710000002424713044372760025045 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import importlib import itertools import os from oslo_config import cfg from oslo_log import log as logging import stevedore from neutron._i18n import _, _LW from neutron.api.v2 import attributes as attr from neutron.common import exceptions as n_exc LOG = logging.getLogger(__name__) SERVICE_PROVIDERS = 'neutron.service_providers' serviceprovider_opts = [ cfg.MultiStrOpt('service_provider', default=[], help=_('Defines providers for advanced services ' 'using the format: ' '::[:default]')) ] cfg.CONF.register_opts(serviceprovider_opts, 'service_providers') class NeutronModule(object): """A Neutron extension module.""" def __init__(self, service_module): self.module_name = service_module self.repo = { 'mod': self._import_or_none(), 'ini': None } def _import_or_none(self): try: return importlib.import_module(self.module_name) except ImportError: return None def installed(self): LOG.debug("NeutronModule installed = %s", self.module_name) return self.module_name def module(self): return self.repo['mod'] # Return an INI parser for the child module def ini(self, neutron_dir=None): if self.repo['ini'] is None: ini_file = cfg.ConfigOpts() ini_file.register_opts(serviceprovider_opts, 'service_providers') if neutron_dir is not None: neutron_dirs = [neutron_dir] else: try: neutron_dirs = cfg.CONF.config_dirs or ['/etc/neutron'] except cfg.NoSuchOptError: # handle older oslo.config versions (<= 3.8.0) that do not # support config_dirs property neutron_dirs = ['/etc/neutron'] try: config_dir = cfg.CONF.config_dir if config_dir: neutron_dirs = [config_dir] except cfg.NoSuchOptError: pass # load configuration from all matching files to reflect oslo.config # behaviour config_files = [] for neutron_dir in neutron_dirs: ini_path = os.path.join(neutron_dir, '%s.conf' % self.module_name) if os.path.exists(ini_path): config_files.append(ini_path) # NOTE(ihrachys): we could pass project=self.module_name instead to # rely on oslo.config to find configuration files for us, but: # 1. that would render neutron_dir argument ineffective; # 2. that would break loading configuration file from under # /etc/neutron in case no --config-dir is passed. # That's why we need to explicitly construct CLI here. ini_file(args=list(itertools.chain.from_iterable( ['--config-file', file_] for file_ in config_files ))) self.repo['ini'] = ini_file return self.repo['ini'] def service_providers(self): """Return the service providers for the extension module.""" providers = [] # Attempt to read the config from cfg.CONF first; when passing # --config-dir, the option is merged from all the definitions # made across all the imported config files try: providers = cfg.CONF.service_providers.service_provider except cfg.NoSuchOptError: pass # Alternatively, if the option is not available, try to load # it from the provider module's config file; this may be # necessary, if modules are loaded on the fly (DevStack may # be an example) if not providers: providers = self.ini().service_providers.service_provider return providers #global scope function that should be used in service APIs def normalize_provider_name(name): return name.lower() def get_provider_driver_class(driver, namespace=SERVICE_PROVIDERS): """Return path to provider driver class In order to keep backward compatibility with configs < Kilo, we need to translate driver class paths after advanced services split. This is done by defining old class path as entry point in neutron package. """ try: driver_manager = stevedore.driver.DriverManager( namespace, driver).driver except ImportError: return driver except RuntimeError: return driver new_driver = "%s.%s" % (driver_manager.__module__, driver_manager.__name__) LOG.warning(_LW( "The configured driver %(driver)s has been moved, automatically " "using %(new_driver)s instead. Please update your config files, " "as this automatic fixup will be removed in a future release."), {'driver': driver, 'new_driver': new_driver}) return new_driver def parse_service_provider_opt(service_module='neutron'): """Parse service definition opts and returns result.""" def validate_name(name): if len(name) > attr.NAME_MAX_LEN: raise n_exc.Invalid( _("Provider name %(name)s is limited by %(len)s characters") % {'name': name, 'len': attr.NAME_MAX_LEN}) neutron_mod = NeutronModule(service_module) svc_providers_opt = neutron_mod.service_providers() LOG.debug("Service providers = %s", svc_providers_opt) res = [] for prov_def in svc_providers_opt: split = prov_def.split(':') try: svc_type, name, driver = split[:3] except ValueError: raise n_exc.Invalid(_("Invalid service provider format")) validate_name(name) name = normalize_provider_name(name) default = False if len(split) == 4 and split[3]: if split[3] == 'default': default = True else: msg = (_("Invalid provider format. " "Last part should be 'default' or empty: %s") % prov_def) LOG.error(msg) raise n_exc.Invalid(msg) driver = get_provider_driver_class(driver) res.append({'service_type': svc_type, 'name': name, 'driver': driver, 'default': default}) return res class ServiceProviderNotFound(n_exc.InvalidInput): message = _("Service provider '%(provider)s' could not be found " "for service type %(service_type)s") class DefaultServiceProviderNotFound(n_exc.InvalidInput): message = _("Service type %(service_type)s does not have a default " "service provider") class ServiceProviderAlreadyAssociated(n_exc.Conflict): message = _("Resource '%(resource_id)s' is already associated with " "provider '%(provider)s' for service type '%(service_type)s'") class ProviderConfiguration(object): def __init__(self, svc_module='neutron'): self.providers = {} for prov in parse_service_provider_opt(svc_module): self.add_provider(prov) def _ensure_driver_unique(self, driver): for k, v in self.providers.items(): if v['driver'] == driver: msg = (_("Driver %s is not unique across providers") % driver) LOG.error(msg) raise n_exc.Invalid(msg) def _ensure_default_unique(self, type, default): if not default: return for k, v in self.providers.items(): if k[0] == type and v['default']: msg = _("Multiple default providers " "for service %s") % type LOG.error(msg) raise n_exc.Invalid(msg) def add_provider(self, provider): self._ensure_driver_unique(provider['driver']) self._ensure_default_unique(provider['service_type'], provider['default']) provider_type = (provider['service_type'], provider['name']) if provider_type in self.providers: msg = (_("Multiple providers specified for service " "%s") % provider['service_type']) LOG.error(msg) raise n_exc.Invalid(msg) self.providers[provider_type] = {'driver': provider['driver'], 'default': provider['default']} def _check_entry(self, k, v, filters): # small helper to deal with query filters if not filters: return True for index, key in enumerate(['service_type', 'name']): if key in filters: if k[index] not in filters[key]: return False for key in ['driver', 'default']: if key in filters: if v[key] not in filters[key]: return False return True def _fields(self, resource, fields): if fields: return dict(((key, item) for key, item in resource.items() if key in fields)) return resource def get_service_providers(self, filters=None, fields=None): return [self._fields({'service_type': k[0], 'name': k[1], 'driver': v['driver'], 'default': v['default']}, fields) for k, v in self.providers.items() if self._check_entry(k, v, filters)] neutron-8.4.0/neutron/services/tag/0000775000567000056710000000000013044373210020443 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/tag/__init__.py0000664000567000056710000000000013044372736022556 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/tag/tag_plugin.py0000664000567000056710000001204013044372760023154 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import functools from oslo_db import api as oslo_db_api from oslo_db import exception as db_exc from oslo_log import helpers as log_helpers from oslo_log import log as logging from sqlalchemy.orm import exc from neutron.api.v2 import attributes from neutron.db import api as db_api from neutron.db import common_db_mixin from neutron.db import models_v2 from neutron.db import tag_db as tag_model from neutron.extensions import tag as tag_ext LOG = logging.getLogger(__name__) resource_model_map = { attributes.NETWORKS: models_v2.Network, # other resources can be added } def _extend_tags_dict(plugin, response_data, db_data): tags = [tag_db.tag for tag_db in db_data.standard_attr.tags] response_data['tags'] = tags class TagPlugin(common_db_mixin.CommonDbMixin, tag_ext.TagPluginBase): """Implementation of the Neutron Tag Service Plugin.""" supported_extension_aliases = ['tag'] def _get_resource(self, context, resource, resource_id): model = resource_model_map[resource] try: return self._get_by_id(context, model, resource_id) except exc.NoResultFound: raise tag_ext.TagResourceNotFound(resource=resource, resource_id=resource_id) @log_helpers.log_method_call def get_tags(self, context, resource, resource_id): res = self._get_resource(context, resource, resource_id) tags = [tag_db.tag for tag_db in res.standard_attr.tags] return dict(tags=tags) @log_helpers.log_method_call def get_tag(self, context, resource, resource_id, tag): res = self._get_resource(context, resource, resource_id) if not any(tag == tag_db.tag for tag_db in res.standard_attr.tags): raise tag_ext.TagNotFound(tag=tag) @log_helpers.log_method_call @oslo_db_api.wrap_db_retry( max_retries=db_api.MAX_RETRIES, exception_checker=lambda e: isinstance(e, db_exc.DBDuplicateEntry)) def update_tags(self, context, resource, resource_id, body): res = self._get_resource(context, resource, resource_id) new_tags = set(body['tags']) old_tags = {tag_db.tag for tag_db in res.standard_attr.tags} tags_added = new_tags - old_tags tags_removed = old_tags - new_tags with context.session.begin(subtransactions=True): for tag_db in res.standard_attr.tags: if tag_db.tag in tags_removed: context.session.delete(tag_db) for tag in tags_added: tag_db = tag_model.Tag(standard_attr_id=res.standard_attr_id, tag=tag) context.session.add(tag_db) return body @log_helpers.log_method_call def update_tag(self, context, resource, resource_id, tag): res = self._get_resource(context, resource, resource_id) if any(tag == tag_db.tag for tag_db in res.standard_attr.tags): return try: with context.session.begin(subtransactions=True): tag_db = tag_model.Tag(standard_attr_id=res.standard_attr_id, tag=tag) context.session.add(tag_db) except db_exc.DBDuplicateEntry: pass @log_helpers.log_method_call def delete_tags(self, context, resource, resource_id): res = self._get_resource(context, resource, resource_id) with context.session.begin(subtransactions=True): query = context.session.query(tag_model.Tag) query = query.filter_by(standard_attr_id=res.standard_attr_id) query.delete() @log_helpers.log_method_call def delete_tag(self, context, resource, resource_id, tag): res = self._get_resource(context, resource, resource_id) with context.session.begin(subtransactions=True): query = context.session.query(tag_model.Tag) query = query.filter_by(tag=tag, standard_attr_id=res.standard_attr_id) if not query.delete(): raise tag_ext.TagNotFound(tag=tag) # support only _apply_dict_extend_functions supported resources # at the moment. for resource, model in resource_model_map.items(): common_db_mixin.CommonDbMixin.register_dict_extend_funcs( resource, [_extend_tags_dict]) common_db_mixin.CommonDbMixin.register_model_query_hook( model, "tag", None, None, functools.partial(tag_model.apply_tag_filters, model)) neutron-8.4.0/neutron/services/l3_router/0000775000567000056710000000000013044373210021606 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/l3_router/__init__.py0000664000567000056710000000000013044372736023721 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/l3_router/README0000664000567000056710000000255613044372760022507 0ustar jenkinsjenkins00000000000000This service plugin implements the L3 routing functionality (resources router and floatingip) that in earlier releases before Havana was provided by core plugins (openvswitch, linuxbridge, ... etc). Core plugins can now choose not to implement L3 routing functionality and instead delegate that to the L3 routing service plugin. The required changes to a core plugin are in that case: - Do not inherit 'l3_db.L3_NAT_db_mixin' (or its descendants like extraroute) anymore. - Remove "router" from 'supported_extension_aliases'. - Modify any 'self' references to members in L3_NAT_db_mixin to instead use 'manager.NeutronManager.get_service_plugins().get(constants.L3_ROUTER_NAT)' For example, self.prevent_l3_port_deletion(...) becomes something like plugin = manager.NeutronManager.get_service_plugins().get( constants.L3_ROUTER_NAT) if plugin: plugin.prevent_l3_port_deletion(...) If the core plugin has relied on the L3Agent the following must also be changed: - Do not inherit 'l3_rpc_base.L3RpcCallbackMixin' in any '*RpcCallbacks' class. - Do not be a consumer of the topics.L3PLUGIN topic for RPC. To use the L3 routing service plugin, add 'neutron.services.l3_router.l3_router_plugin.L3RouterPlugin' to 'service_plugins' in '/etc/neutron/neutron.conf'. That is, service_plugins = neutron.services.l3_router.l3_router_plugin.L3RouterPlugin neutron-8.4.0/neutron/services/l3_router/l3_router_plugin.py0000664000567000056710000001060613044372760025470 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_utils import importutils from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api from neutron.api.rpc.handlers import l3_rpc from neutron.common import constants as n_const from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.db import common_db_mixin from neutron.db import dns_db from neutron.db import extraroute_db from neutron.db import l3_db from neutron.db import l3_dvr_ha_scheduler_db from neutron.db import l3_dvrscheduler_db from neutron.db import l3_gwmode_db from neutron.db import l3_hamode_db from neutron.db import l3_hascheduler_db from neutron.plugins.common import constants from neutron.quota import resource_registry from neutron.services import service_base class L3RouterPlugin(service_base.ServicePluginBase, common_db_mixin.CommonDbMixin, extraroute_db.ExtraRoute_db_mixin, l3_hamode_db.L3_HA_NAT_db_mixin, l3_gwmode_db.L3_NAT_db_mixin, l3_dvr_ha_scheduler_db.L3_DVR_HA_scheduler_db_mixin, dns_db.DNSDbMixin): """Implementation of the Neutron L3 Router Service Plugin. This class implements a L3 service plugin that provides router and floatingip resources and manages associated request/response. All DB related work is implemented in classes l3_db.L3_NAT_db_mixin, l3_hamode_db.L3_HA_NAT_db_mixin, l3_dvr_db.L3_NAT_with_dvr_db_mixin, and extraroute_db.ExtraRoute_db_mixin. """ supported_extension_aliases = ["dvr", "router", "ext-gw-mode", "extraroute", "l3_agent_scheduler", "l3-ha", "router_availability_zone"] @resource_registry.tracked_resources(router=l3_db.Router, floatingip=l3_db.FloatingIP) def __init__(self): self.router_scheduler = importutils.import_object( cfg.CONF.router_scheduler_driver) self.start_periodic_l3_agent_status_check() super(L3RouterPlugin, self).__init__() if 'dvr' in self.supported_extension_aliases: l3_dvrscheduler_db.subscribe() if 'l3-ha' in self.supported_extension_aliases: l3_hascheduler_db.subscribe() l3_db.subscribe() self.start_rpc_listeners() @log_helpers.log_method_call def start_rpc_listeners(self): # RPC support self.topic = topics.L3PLUGIN self.conn = n_rpc.create_connection() self.agent_notifiers.update( {n_const.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()}) self.endpoints = [l3_rpc.L3RpcCallback()] self.conn.create_consumer(self.topic, self.endpoints, fanout=False) return self.conn.consume_in_threads() def get_plugin_type(self): return constants.L3_ROUTER_NAT def get_plugin_description(self): """returns string description of the plugin.""" return ("L3 Router Service Plugin for basic L3 forwarding" " between (L2) Neutron networks and access to external" " networks via a NAT gateway.") def create_floatingip(self, context, floatingip): """Create floating IP. :param context: Neutron request context :param floatingip: data for the floating IP being created :returns: A floating IP object on success As the l3 router plugin asynchronously creates floating IPs leveraging the l3 agent, the initial status for the floating IP object will be DOWN. """ return super(L3RouterPlugin, self).create_floatingip( context, floatingip, initial_status=n_const.FLOATINGIP_STATUS_DOWN) neutron-8.4.0/neutron/services/service_base.py0000664000567000056710000000632613044372760022714 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils import six from neutron._i18n import _, _LE, _LI from neutron.api import extensions from neutron.db import servicetype_db as sdb from neutron.services import provider_configuration as pconf LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class ServicePluginBase(extensions.PluginInterface): """Define base interface for any Advanced Service plugin.""" supported_extension_aliases = [] @abc.abstractmethod def get_plugin_type(self): """Return one of predefined service types. See neutron/plugins/common/constants.py """ pass @abc.abstractmethod def get_plugin_description(self): """Return string description of the plugin.""" pass def get_workers(self): """Returns a collection of NeutronWorkers""" return () def load_drivers(service_type, plugin): """Loads drivers for specific service. Passes plugin instance to driver's constructor """ service_type_manager = sdb.ServiceTypeManager.get_instance() providers = (service_type_manager. get_service_providers( None, filters={'service_type': [service_type]}) ) if not providers: msg = (_("No providers specified for '%s' service, exiting") % service_type) LOG.error(msg) raise SystemExit(1) drivers = {} for provider in providers: try: drivers[provider['name']] = importutils.import_object( provider['driver'], plugin ) LOG.debug("Loaded '%(provider)s' provider for service " "%(service_type)s", {'provider': provider['driver'], 'service_type': service_type}) except ImportError: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error loading provider '%(provider)s' for " "service %(service_type)s"), {'provider': provider['driver'], 'service_type': service_type}) default_provider = None try: provider = service_type_manager.get_default_service_provider( None, service_type) default_provider = provider['name'] except pconf.DefaultServiceProviderNotFound: LOG.info(_LI("Default provider is not specified for service type %s"), service_type) return drivers, default_provider neutron-8.4.0/neutron/services/metering/0000775000567000056710000000000013044373210021502 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/metering/metering_plugin.py0000664000567000056710000000526513044372760025265 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api.rpc.agentnotifiers import metering_rpc_agent_api from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.db.metering import metering_db from neutron.db.metering import metering_rpc class MeteringPlugin(metering_db.MeteringDbMixin): """Implementation of the Neutron Metering Service Plugin.""" supported_extension_aliases = ["metering"] path_prefix = "/metering" def __init__(self): super(MeteringPlugin, self).__init__() self.meter_rpc = metering_rpc_agent_api.MeteringAgentNotifyAPI() self.start_rpc_listeners() def start_rpc_listeners(self): self.endpoints = [metering_rpc.MeteringRpcCallbacks(self)] self.conn = n_rpc.create_connection() self.conn.create_consumer( topics.METERING_PLUGIN, self.endpoints, fanout=False) return self.conn.consume_in_threads() def create_metering_label(self, context, metering_label): label = super(MeteringPlugin, self).create_metering_label( context, metering_label) data = self.get_sync_data_metering(context) self.meter_rpc.add_metering_label(context, data) return label def delete_metering_label(self, context, label_id): data = self.get_sync_data_metering(context, label_id) label = super(MeteringPlugin, self).delete_metering_label( context, label_id) self.meter_rpc.remove_metering_label(context, data) return label def create_metering_label_rule(self, context, metering_label_rule): rule = super(MeteringPlugin, self).create_metering_label_rule( context, metering_label_rule) data = self.get_sync_data_for_rule(context, rule) self.meter_rpc.add_metering_label_rule(context, data) return rule def delete_metering_label_rule(self, context, rule_id): rule = super(MeteringPlugin, self).delete_metering_label_rule( context, rule_id) data = self.get_sync_data_for_rule(context, rule) self.meter_rpc.remove_metering_label_rule(context, data) return rule neutron-8.4.0/neutron/services/metering/__init__.py0000664000567000056710000000000013044372736023615 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/metering/drivers/0000775000567000056710000000000013044373210023160 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/metering/drivers/noop/0000775000567000056710000000000013044373210024133 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/metering/drivers/noop/noop_driver.py0000664000567000056710000000307213044372736027051 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from neutron.services.metering.drivers import abstract_driver class NoopMeteringDriver(abstract_driver.MeteringAbstractDriver): @log_helpers.log_method_call def update_routers(self, context, routers): pass @log_helpers.log_method_call def remove_router(self, context, router_id): pass @log_helpers.log_method_call def update_metering_label_rules(self, context, routers): pass @log_helpers.log_method_call def add_metering_label_rule(self, context, routers): pass @log_helpers.log_method_call def remove_metering_label_rule(self, context, routers): pass @log_helpers.log_method_call def add_metering_label(self, context, routers): pass @log_helpers.log_method_call def remove_metering_label(self, context, routers): pass @log_helpers.log_method_call def get_traffic_counters(self, context, routers): pass neutron-8.4.0/neutron/services/metering/drivers/noop/__init__.py0000664000567000056710000000000013044372736026246 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/metering/drivers/iptables/0000775000567000056710000000000013044373210024763 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/metering/drivers/iptables/__init__.py0000664000567000056710000000000013044372736027076 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/metering/drivers/iptables/iptables_driver.py0000664000567000056710000003560013044372760030530 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import importutils import six from neutron._i18n import _, _LE, _LI from neutron.agent.common import config from neutron.agent.linux import interface from neutron.agent.linux import iptables_manager from neutron.common import constants as constants from neutron.common import ipv6_utils from neutron.services.metering.drivers import abstract_driver LOG = logging.getLogger(__name__) NS_PREFIX = 'qrouter-' WRAP_NAME = 'neutron-meter' EXTERNAL_DEV_PREFIX = 'qg-' TOP_CHAIN = WRAP_NAME + "-FORWARD" RULE = '-r-' LABEL = '-l-' config.register_interface_driver_opts_helper(cfg.CONF) cfg.CONF.register_opts(interface.OPTS) class IptablesManagerTransaction(object): __transactions = {} def __init__(self, im): self.im = im transaction = self.__transactions.get(im, 0) transaction += 1 self.__transactions[im] = transaction def __enter__(self): return self.im def __exit__(self, type, value, traceback): transaction = self.__transactions.get(self.im) if transaction == 1: self.im.apply() del self.__transactions[self.im] else: transaction -= 1 self.__transactions[self.im] = transaction class RouterWithMetering(object): def __init__(self, conf, router): self.conf = conf self.id = router['id'] self.router = router # TODO(cbrandily): deduplicate ns_name generation in metering/l3 self.ns_name = NS_PREFIX + self.id self.iptables_manager = iptables_manager.IptablesManager( namespace=self.ns_name, binary_name=WRAP_NAME, state_less=True, use_ipv6=ipv6_utils.is_enabled()) self.metering_labels = {} class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): def __init__(self, plugin, conf): self.plugin = plugin self.conf = conf or cfg.CONF self.routers = {} if not self.conf.interface_driver: raise SystemExit(_('An interface driver must be specified')) LOG.info(_LI("Loading interface driver %s"), self.conf.interface_driver) self.driver = importutils.import_object(self.conf.interface_driver, self.conf) def _update_router(self, router): r = self.routers.get(router['id'], RouterWithMetering(self.conf, router)) r.router = router self.routers[r.id] = r return r @log_helpers.log_method_call def update_routers(self, context, routers): # disassociate removed routers router_ids = set(router['id'] for router in routers) for router_id, rm in six.iteritems(self.routers): if router_id not in router_ids: self._process_disassociate_metering_label(rm.router) for router in routers: old_gw_port_id = None old_rm = self.routers.get(router['id']) if old_rm: old_gw_port_id = old_rm.router['gw_port_id'] gw_port_id = router['gw_port_id'] if gw_port_id != old_gw_port_id: if old_rm: with IptablesManagerTransaction(old_rm.iptables_manager): self._process_disassociate_metering_label(router) if gw_port_id: self._process_associate_metering_label(router) elif gw_port_id: self._process_associate_metering_label(router) @log_helpers.log_method_call def remove_router(self, context, router_id): if router_id in self.routers: del self.routers[router_id] def get_external_device_name(self, port_id): return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] def _process_metering_label_rules(self, rm, rules, label_chain, rules_chain): im = rm.iptables_manager ext_dev = self.get_external_device_name(rm.router['gw_port_id']) if not ext_dev: return for rule in rules: self._add_rule_to_chain(ext_dev, rule, im, label_chain, rules_chain) def _process_metering_label_rule_add(self, rm, rule, ext_dev, label_chain, rules_chain): im = rm.iptables_manager self._add_rule_to_chain(ext_dev, rule, im, label_chain, rules_chain) def _process_metering_label_rule_delete(self, rm, rule, ext_dev, label_chain, rules_chain): im = rm.iptables_manager self._remove_rule_from_chain(ext_dev, rule, im, label_chain, rules_chain) def _add_rule_to_chain(self, ext_dev, rule, im, label_chain, rules_chain): ipt_rule = self._prepare_rule(ext_dev, rule, label_chain) if rule['excluded']: im.ipv4['filter'].add_rule(rules_chain, ipt_rule, wrap=False, top=True) else: im.ipv4['filter'].add_rule(rules_chain, ipt_rule, wrap=False, top=False) def _remove_rule_from_chain(self, ext_dev, rule, im, label_chain, rules_chain): ipt_rule = self._prepare_rule(ext_dev, rule, label_chain) if rule['excluded']: im.ipv4['filter'].remove_rule(rules_chain, ipt_rule, wrap=False, top=True) else: im.ipv4['filter'].remove_rule(rules_chain, ipt_rule, wrap=False, top=False) def _prepare_rule(self, ext_dev, rule, label_chain): remote_ip = rule['remote_ip_prefix'] if rule['direction'] == 'egress': dir_opt = '-o %s -d %s' % (ext_dev, remote_ip) else: dir_opt = '-i %s -s %s' % (ext_dev, remote_ip) if rule['excluded']: ipt_rule = '%s -j RETURN' % dir_opt else: ipt_rule = '%s -j %s' % (dir_opt, label_chain) return ipt_rule def _process_associate_metering_label(self, router): self._update_router(router) rm = self.routers.get(router['id']) with IptablesManagerTransaction(rm.iptables_manager): labels = router.get(constants.METERING_LABEL_KEY, []) for label in labels: label_id = label['id'] label_chain = iptables_manager.get_chain_name(WRAP_NAME + LABEL + label_id, wrap=False) rm.iptables_manager.ipv4['filter'].add_chain(label_chain, wrap=False) rules_chain = iptables_manager.get_chain_name(WRAP_NAME + RULE + label_id, wrap=False) rm.iptables_manager.ipv4['filter'].add_chain(rules_chain, wrap=False) rm.iptables_manager.ipv4['filter'].add_rule(TOP_CHAIN, '-j ' + rules_chain, wrap=False) rm.iptables_manager.ipv4['filter'].add_rule(label_chain, '', wrap=False) rules = label.get('rules') if rules: self._process_metering_label_rules(rm, rules, label_chain, rules_chain) rm.metering_labels[label_id] = label def _process_disassociate_metering_label(self, router): rm = self.routers.get(router['id']) if not rm: return with IptablesManagerTransaction(rm.iptables_manager): labels = router.get(constants.METERING_LABEL_KEY, []) for label in labels: label_id = label['id'] if label_id not in rm.metering_labels: continue label_chain = iptables_manager.get_chain_name(WRAP_NAME + LABEL + label_id, wrap=False) rules_chain = iptables_manager.get_chain_name(WRAP_NAME + RULE + label_id, wrap=False) rm.iptables_manager.ipv4['filter'].remove_chain(label_chain, wrap=False) rm.iptables_manager.ipv4['filter'].remove_chain(rules_chain, wrap=False) del rm.metering_labels[label_id] @log_helpers.log_method_call def add_metering_label(self, context, routers): for router in routers: self._process_associate_metering_label(router) @log_helpers.log_method_call def add_metering_label_rule(self, context, routers): for router in routers: self._add_metering_label_rule(router) @log_helpers.log_method_call def remove_metering_label_rule(self, context, routers): for router in routers: self._remove_metering_label_rule(router) @log_helpers.log_method_call def update_metering_label_rules(self, context, routers): for router in routers: self._update_metering_label_rules(router) def _add_metering_label_rule(self, router): self._process_metering_rule_action(router, 'create') def _remove_metering_label_rule(self, router): self._process_metering_rule_action(router, 'delete') def _process_metering_rule_action(self, router, action): rm = self.routers.get(router['id']) if not rm: return ext_dev = self.get_external_device_name(rm.router['gw_port_id']) if not ext_dev: return with IptablesManagerTransaction(rm.iptables_manager): labels = router.get(constants.METERING_LABEL_KEY, []) for label in labels: label_id = label['id'] label_chain = iptables_manager.get_chain_name(WRAP_NAME + LABEL + label_id, wrap=False) rules_chain = iptables_manager.get_chain_name(WRAP_NAME + RULE + label_id, wrap=False) rule = label.get('rule') if rule: if action == 'create': self._process_metering_label_rule_add(rm, rule, ext_dev, label_chain, rules_chain) elif action == 'delete': self._process_metering_label_rule_delete(rm, rule, ext_dev, label_chain, rules_chain) def _update_metering_label_rules(self, router): rm = self.routers.get(router['id']) if not rm: return with IptablesManagerTransaction(rm.iptables_manager): labels = router.get(constants.METERING_LABEL_KEY, []) for label in labels: label_id = label['id'] label_chain = iptables_manager.get_chain_name(WRAP_NAME + LABEL + label_id, wrap=False) rules_chain = iptables_manager.get_chain_name(WRAP_NAME + RULE + label_id, wrap=False) rm.iptables_manager.ipv4['filter'].empty_chain(rules_chain, wrap=False) rules = label.get('rules') if rules: self._process_metering_label_rules(rm, rules, label_chain, rules_chain) @log_helpers.log_method_call def remove_metering_label(self, context, routers): for router in routers: self._process_disassociate_metering_label(router) @log_helpers.log_method_call def get_traffic_counters(self, context, routers): accs = {} for router in routers: rm = self.routers.get(router['id']) if not rm: continue for label_id, label in rm.metering_labels.items(): try: chain = iptables_manager.get_chain_name(WRAP_NAME + LABEL + label_id, wrap=False) chain_acc = rm.iptables_manager.get_traffic_counters( chain, wrap=False, zero=True) except RuntimeError: LOG.exception(_LE('Failed to get traffic counters, ' 'router: %s'), router) continue if not chain_acc: continue acc = accs.get(label_id, {'pkts': 0, 'bytes': 0}) acc['pkts'] += chain_acc['pkts'] acc['bytes'] += chain_acc['bytes'] accs[label_id] = acc return accs neutron-8.4.0/neutron/services/metering/drivers/abstract_driver.py0000664000567000056710000000247113044372736026730 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class MeteringAbstractDriver(object): """Abstract Metering driver.""" def __init__(self, plugin, conf): pass @abc.abstractmethod def update_routers(self, context, routers): pass @abc.abstractmethod def remove_router(self, context, router_id): pass @abc.abstractmethod def update_metering_label_rules(self, context, routers): pass @abc.abstractmethod def add_metering_label(self, context, routers): pass @abc.abstractmethod def remove_metering_label(self, context, routers): pass @abc.abstractmethod def get_traffic_counters(self, context, routers): pass neutron-8.4.0/neutron/services/metering/drivers/__init__.py0000664000567000056710000000000013044372736025273 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/metering/agents/0000775000567000056710000000000013044373210022763 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/metering/agents/__init__.py0000664000567000056710000000000013044372736025076 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/metering/agents/metering_agent.py0000664000567000056710000002652413044372760026347 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import loopingcall from oslo_service import periodic_task from oslo_service import service from oslo_utils import importutils from oslo_utils import timeutils from neutron._i18n import _, _LE, _LI, _LW from neutron.agent.common import config from neutron.agent import rpc as agent_rpc from neutron.common import config as common_config from neutron.common import constants as constants from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.common import utils from neutron import context from neutron import manager from neutron import service as neutron_service LOG = logging.getLogger(__name__) class MeteringPluginRpc(object): def __init__(self, host): # NOTE(yamamoto): super.__init__() call here is not only for # aesthetics. Because of multiple inheritances in MeteringAgent, # it's actually necessary to initialize parent classes of # manager.Manager correctly. super(MeteringPluginRpc, self).__init__() target = oslo_messaging.Target(topic=topics.METERING_PLUGIN, version='1.0') self.client = n_rpc.get_client(target) def _get_sync_data_metering(self, context): try: cctxt = self.client.prepare() return cctxt.call(context, 'get_sync_data_metering', host=self.host) except Exception: LOG.exception(_LE("Failed synchronizing routers")) class MeteringAgent(MeteringPluginRpc, manager.Manager): Opts = [ cfg.StrOpt('driver', default='neutron.services.metering.drivers.noop.' 'noop_driver.NoopMeteringDriver', help=_("Metering driver")), cfg.IntOpt('measure_interval', default=30, help=_("Interval between two metering measures")), cfg.IntOpt('report_interval', default=300, help=_("Interval between two metering reports")), ] def __init__(self, host, conf=None): self.conf = conf or cfg.CONF self._load_drivers() self.context = context.get_admin_context_without_session() self.metering_loop = loopingcall.FixedIntervalLoopingCall( self._metering_loop ) measure_interval = self.conf.measure_interval self.last_report = 0 self.metering_loop.start(interval=measure_interval) self.host = host self.label_tenant_id = {} self.routers = {} self.metering_infos = {} super(MeteringAgent, self).__init__(host=host) def _load_drivers(self): """Loads plugin-driver from configuration.""" LOG.info(_LI("Loading Metering driver %s"), self.conf.driver) if not self.conf.driver: raise SystemExit(_('A metering driver must be specified')) self.metering_driver = importutils.import_object( self.conf.driver, self, self.conf) def _metering_notification(self): for label_id, info in self.metering_infos.items(): data = {'label_id': label_id, 'tenant_id': self.label_tenant_id.get(label_id), 'pkts': info['pkts'], 'bytes': info['bytes'], 'time': info['time'], 'first_update': info['first_update'], 'last_update': info['last_update'], 'host': self.host} LOG.debug("Send metering report: %s", data) notifier = n_rpc.get_notifier('metering') notifier.info(self.context, 'l3.meter', data) info['pkts'] = 0 info['bytes'] = 0 info['time'] = 0 def _purge_metering_info(self): deadline_timestamp = timeutils.utcnow_ts() - self.conf.report_interval label_ids = [ label_id for label_id, info in self.metering_infos.items() if info['last_update'] < deadline_timestamp] for label_id in label_ids: del self.metering_infos[label_id] def _add_metering_info(self, label_id, pkts, bytes): ts = timeutils.utcnow_ts() info = self.metering_infos.get(label_id, {'bytes': 0, 'pkts': 0, 'time': 0, 'first_update': ts, 'last_update': ts}) info['bytes'] += bytes info['pkts'] += pkts info['time'] += ts - info['last_update'] info['last_update'] = ts self.metering_infos[label_id] = info return info def _add_metering_infos(self): self.label_tenant_id = {} for router in self.routers.values(): tenant_id = router['tenant_id'] labels = router.get(constants.METERING_LABEL_KEY, []) for label in labels: label_id = label['id'] self.label_tenant_id[label_id] = tenant_id tenant_id = self.label_tenant_id.get accs = self._get_traffic_counters(self.context, self.routers.values()) if not accs: return for label_id, acc in accs.items(): self._add_metering_info(label_id, acc['pkts'], acc['bytes']) def _metering_loop(self): self._add_metering_infos() ts = timeutils.utcnow_ts() delta = ts - self.last_report report_interval = self.conf.report_interval if delta >= report_interval: self._metering_notification() self._purge_metering_info() self.last_report = ts @utils.synchronized('metering-agent') def _invoke_driver(self, context, meterings, func_name): try: return getattr(self.metering_driver, func_name)(context, meterings) except AttributeError: LOG.exception(_LE("Driver %(driver)s does not implement %(func)s"), {'driver': self.conf.driver, 'func': func_name}) except RuntimeError: LOG.exception(_LE("Driver %(driver)s:%(func)s runtime error"), {'driver': self.conf.driver, 'func': func_name}) @periodic_task.periodic_task(run_immediately=True) def _sync_routers_task(self, context): routers = self._get_sync_data_metering(self.context) if not routers: return self._update_routers(context, routers) def router_deleted(self, context, router_id): self._add_metering_infos() if router_id in self.routers: del self.routers[router_id] return self._invoke_driver(context, router_id, 'remove_router') def routers_updated(self, context, routers=None): if not routers: routers = self._get_sync_data_metering(self.context) if not routers: return self._update_routers(context, routers) def _update_routers(self, context, routers): for router in routers: self.routers[router['id']] = router return self._invoke_driver(context, routers, 'update_routers') def _get_traffic_counters(self, context, routers): LOG.debug("Get router traffic counters") return self._invoke_driver(context, routers, 'get_traffic_counters') def add_metering_label_rule(self, context, routers): return self._invoke_driver(context, routers, 'add_metering_label_rule') def remove_metering_label_rule(self, context, routers): return self._invoke_driver(context, routers, 'remove_metering_label_rule') def update_metering_label_rules(self, context, routers): LOG.debug("Update metering rules from agent") return self._invoke_driver(context, routers, 'update_metering_label_rules') def add_metering_label(self, context, routers): LOG.debug("Creating a metering label from agent") return self._invoke_driver(context, routers, 'add_metering_label') def remove_metering_label(self, context, routers): self._add_metering_infos() LOG.debug("Delete a metering label from agent") return self._invoke_driver(context, routers, 'remove_metering_label') class MeteringAgentWithStateReport(MeteringAgent): def __init__(self, host, conf=None): super(MeteringAgentWithStateReport, self).__init__(host=host, conf=conf) self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) self.agent_state = { 'binary': 'neutron-metering-agent', 'host': host, 'topic': topics.METERING_AGENT, 'configurations': { 'metering_driver': self.conf.driver, 'measure_interval': self.conf.measure_interval, 'report_interval': self.conf.report_interval }, 'start_flag': True, 'agent_type': constants.AGENT_TYPE_METERING} report_interval = cfg.CONF.AGENT.report_interval self.use_call = True if report_interval: self.heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) self.heartbeat.start(interval=report_interval) def _report_state(self): try: self.state_rpc.report_state(self.context, self.agent_state, self.use_call) self.agent_state.pop('start_flag', None) self.use_call = False except AttributeError: # This means the server does not support report_state LOG.warning(_LW("Neutron server does not support state report. " "State report for this agent will be disabled.")) self.heartbeat.stop() return except Exception: LOG.exception(_LE("Failed reporting state!")) def agent_updated(self, context, payload): LOG.info(_LI("agent_updated by server side %s!"), payload) def main(): conf = cfg.CONF conf.register_opts(MeteringAgent.Opts) config.register_agent_state_opts_helper(conf) common_config.init(sys.argv[1:]) config.setup_logging() server = neutron_service.Service.create( binary='neutron-metering-agent', topic=topics.METERING_AGENT, report_interval=cfg.CONF.AGENT.report_interval, manager='neutron.services.metering.agents.' 'metering_agent.MeteringAgentWithStateReport') service.launch(cfg.CONF, server).wait() neutron-8.4.0/neutron/services/flavors/0000775000567000056710000000000013044373210021344 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/flavors/__init__.py0000664000567000056710000000000013044372736023457 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/flavors/flavors_plugin.py0000664000567000056710000000220613044372760024761 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015, Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.db import flavors_db from neutron.plugins.common import constants from neutron.services import service_base class FlavorsPlugin(service_base.ServicePluginBase, flavors_db.FlavorsDbMixin): """Implements Neutron Flavors Service plugin.""" supported_extension_aliases = ['flavors'] def get_plugin_type(self): return constants.FLAVORS def get_plugin_description(self): return "Neutron Flavors and Service Profiles manager plugin" neutron-8.4.0/neutron/services/bgp/0000775000567000056710000000000013044373210020440 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/bgp/scheduler/0000775000567000056710000000000013044373210022416 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/bgp/scheduler/__init__.py0000664000567000056710000000000013044372760024526 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/bgp/scheduler/bgp_dragent_scheduler.py0000664000567000056710000001732113044372760027317 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_log import log as logging from sqlalchemy.orm import exc from sqlalchemy import sql from neutron.db import agents_db from neutron.db import bgp_db from neutron.db import bgp_dragentscheduler_db as bgp_dras_db from neutron._i18n import _LI, _LW from neutron.scheduler import base_resource_filter from neutron.scheduler import base_scheduler from neutron.services.bgp.common import constants as bgp_consts LOG = logging.getLogger(__name__) BGP_SPEAKER_PER_DRAGENT = 1 class BgpDrAgentFilter(base_resource_filter.BaseResourceFilter): def bind(self, context, agents, bgp_speaker_id): """Bind the BgpSpeaker to a BgpDrAgent.""" bound_agents = agents[:] for agent in agents: # saving agent_id to use it after rollback to avoid # DetachedInstanceError agent_id = agent.id binding = bgp_dras_db.BgpSpeakerDrAgentBinding() binding.agent_id = agent_id binding.bgp_speaker_id = bgp_speaker_id try: with context.session.begin(subtransactions=True): context.session.add(binding) except db_exc.DBDuplicateEntry: # it's totally ok, someone just did our job! bound_agents.remove(agent) LOG.info(_LI('BgpDrAgent %s already present'), agent_id) LOG.debug('BgpSpeaker %(bgp_speaker_id)s is scheduled to be ' 'hosted by BgpDrAgent %(agent_id)s', {'bgp_speaker_id': bgp_speaker_id, 'agent_id': agent_id}) super(BgpDrAgentFilter, self).bind(context, bound_agents, bgp_speaker_id) def filter_agents(self, plugin, context, bgp_speaker): """Return the agents that can host the BgpSpeaker.""" agents_dict = self._get_bgp_speaker_hostable_dragents( plugin, context, bgp_speaker) if not agents_dict['hostable_agents'] or agents_dict['n_agents'] <= 0: return {'n_agents': 0, 'hostable_agents': [], 'hosted_agents': []} return agents_dict def _get_active_dragents(self, plugin, context): """Return a list of active BgpDrAgents.""" with context.session.begin(subtransactions=True): active_dragents = plugin.get_agents_db( context, filters={ 'agent_type': [bgp_consts.AGENT_TYPE_BGP_ROUTING], 'admin_state_up': [True]}) if not active_dragents: return [] return active_dragents def _get_num_dragents_hosting_bgp_speaker(self, bgp_speaker_id, dragent_bindings): return sum(1 if dragent_binding.bgp_speaker_id == bgp_speaker_id else 0 for dragent_binding in dragent_bindings) def _get_bgp_speaker_hostable_dragents(self, plugin, context, bgp_speaker): """Return number of additional BgpDrAgents which will actually host the given BgpSpeaker and a list of BgpDrAgents which can host the given BgpSpeaker """ # only one BgpSpeaker can be hosted by a BgpDrAgent for now. dragents_per_bgp_speaker = BGP_SPEAKER_PER_DRAGENT dragent_bindings = plugin.get_dragent_bgp_speaker_bindings(context) agents_hosting = [dragent_binding.agent_id for dragent_binding in dragent_bindings] num_dragents_hosting_bgp_speaker = ( self._get_num_dragents_hosting_bgp_speaker(bgp_speaker['id'], dragent_bindings)) n_agents = dragents_per_bgp_speaker - num_dragents_hosting_bgp_speaker if n_agents <= 0: return {'n_agents': 0, 'hostable_agents': [], 'hosted_agents': []} active_dragents = self._get_active_dragents(plugin, context) hostable_dragents = [ agent for agent in set(active_dragents) if agent.id not in agents_hosting and plugin.is_eligible_agent( active=True, agent=agent) ] if not hostable_dragents: return {'n_agents': 0, 'hostable_agents': [], 'hosted_agents': []} n_agents = min(len(hostable_dragents), n_agents) return {'n_agents': n_agents, 'hostable_agents': hostable_dragents, 'hosted_agents': num_dragents_hosting_bgp_speaker} class BgpDrAgentSchedulerBase(BgpDrAgentFilter): def schedule_unscheduled_bgp_speakers(self, context, host): """Schedule unscheduled BgpSpeaker to a BgpDrAgent. """ LOG.debug('Started auto-scheduling on host %s', host) with context.session.begin(subtransactions=True): query = context.session.query(agents_db.Agent) query = query.filter_by( agent_type=bgp_consts.AGENT_TYPE_BGP_ROUTING, host=host, admin_state_up=sql.true()) try: bgp_dragent = query.one() except (exc.NoResultFound): LOG.debug('No enabled BgpDrAgent on host %s', host) return False if agents_db.AgentDbMixin.is_agent_down( bgp_dragent.heartbeat_timestamp): LOG.warning(_LW('BgpDrAgent %s is down'), bgp_dragent.id) return False if self._is_bgp_speaker_hosted(context, bgp_dragent['id']): # One BgpDrAgent can only host one BGP speaker LOG.debug('BgpDrAgent already hosting a speaker on host %s. ' 'Cannot schedule an another one', host) return False unscheduled_speakers = self._get_unscheduled_bgp_speakers(context) if not unscheduled_speakers: LOG.debug('Nothing to auto-schedule on host %s', host) return False self.bind(context, [bgp_dragent], unscheduled_speakers[0]) return True def _is_bgp_speaker_hosted(self, context, agent_id): speaker_binding_model = bgp_dras_db.BgpSpeakerDrAgentBinding query = context.session.query(speaker_binding_model) query = query.filter(speaker_binding_model.agent_id == agent_id) return query.count() > 0 def _get_unscheduled_bgp_speakers(self, context): """BGP speakers that needs to be scheduled. """ no_agent_binding = ~sql.exists().where( bgp_db.BgpSpeaker.id == bgp_dras_db.BgpSpeakerDrAgentBinding.bgp_speaker_id) query = context.session.query(bgp_db.BgpSpeaker.id).filter( no_agent_binding) return [bgp_speaker_id_[0] for bgp_speaker_id_ in query] class ChanceScheduler(base_scheduler.BaseChanceScheduler, BgpDrAgentSchedulerBase): def __init__(self): super(ChanceScheduler, self).__init__(self) class WeightScheduler(base_scheduler.BaseWeightScheduler, BgpDrAgentSchedulerBase): def __init__(self): super(WeightScheduler, self).__init__(self) neutron-8.4.0/neutron/services/bgp/__init__.py0000664000567000056710000000000013044372760022550 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/bgp/common/0000775000567000056710000000000013044373210021730 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/bgp/common/__init__.py0000664000567000056710000000000013044372760024040 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/bgp/common/opts.py0000664000567000056710000000162513044372760023304 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import neutron.services.bgp.agent.config def list_bgp_agent_opts(): return [ ('BGP', itertools.chain( neutron.services.bgp.agent.config.BGP_DRIVER_OPTS, neutron.services.bgp.agent.config.BGP_PROTO_CONFIG_OPTS) ) ] neutron-8.4.0/neutron/services/bgp/common/constants.py0000664000567000056710000000155513044372760024335 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. AGENT_TYPE_BGP_ROUTING = 'BGP dynamic routing agent' BGP_DRAGENT = 'bgp_dragent' BGP_PLUGIN = 'q-bgp-plugin' # List of supported authentication types. SUPPORTED_AUTH_TYPES = ['none', 'md5'] # Supported AS number range MIN_ASNUM = 1 MAX_ASNUM = 65535 neutron-8.4.0/neutron/services/bgp/bgp_plugin.py0000664000567000056710000003321413044372760023154 0ustar jenkinsjenkins00000000000000# Copyright 2016 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from netaddr import IPAddress from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from neutron.api.rpc.agentnotifiers import bgp_dr_rpc_agent_api from neutron.api.rpc.handlers import bgp_speaker_rpc as bs_rpc from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants as n_const from neutron.common import rpc as n_rpc from neutron import context from neutron.db import bgp_db from neutron.db import bgp_dragentscheduler_db from neutron.extensions import bgp as bgp_ext from neutron.extensions import bgp_dragentscheduler as dras_ext from neutron.services.bgp.common import constants as bgp_consts from neutron.services import service_base PLUGIN_NAME = bgp_ext.BGP_EXT_ALIAS + '_svc_plugin' LOG = logging.getLogger(__name__) class BgpPlugin(service_base.ServicePluginBase, bgp_db.BgpDbMixin, bgp_dragentscheduler_db.BgpDrAgentSchedulerDbMixin): supported_extension_aliases = [bgp_ext.BGP_EXT_ALIAS, dras_ext.BGP_DRAGENT_SCHEDULER_EXT_ALIAS] def __init__(self): super(BgpPlugin, self).__init__() self.bgp_drscheduler = importutils.import_object( cfg.CONF.bgp_drscheduler_driver) self._setup_rpc() self._register_callbacks() def get_plugin_name(self): return PLUGIN_NAME def get_plugin_type(self): return bgp_ext.BGP_EXT_ALIAS def get_plugin_description(self): """returns string description of the plugin.""" return ("BGP dynamic routing service for announcement of next-hops " "for tenant networks, floating IP's, and DVR host routes.") def _setup_rpc(self): self.topic = bgp_consts.BGP_PLUGIN self.conn = n_rpc.create_connection() self.agent_notifiers[bgp_consts.AGENT_TYPE_BGP_ROUTING] = ( bgp_dr_rpc_agent_api.BgpDrAgentNotifyApi() ) self._bgp_rpc = self.agent_notifiers[bgp_consts.AGENT_TYPE_BGP_ROUTING] self.endpoints = [bs_rpc.BgpSpeakerRpcCallback()] self.conn.create_consumer(self.topic, self.endpoints, fanout=False) self.conn.consume_in_threads() def _register_callbacks(self): registry.subscribe(self.floatingip_update_callback, resources.FLOATING_IP, events.AFTER_UPDATE) registry.subscribe(self.router_interface_callback, resources.ROUTER_INTERFACE, events.AFTER_CREATE) registry.subscribe(self.router_interface_callback, resources.ROUTER_INTERFACE, events.BEFORE_CREATE) registry.subscribe(self.router_interface_callback, resources.ROUTER_INTERFACE, events.AFTER_DELETE) registry.subscribe(self.router_gateway_callback, resources.ROUTER_GATEWAY, events.AFTER_CREATE) registry.subscribe(self.router_gateway_callback, resources.ROUTER_GATEWAY, events.AFTER_DELETE) def create_bgp_speaker(self, context, bgp_speaker): bgp_speaker = super(BgpPlugin, self).create_bgp_speaker(context, bgp_speaker) return bgp_speaker def delete_bgp_speaker(self, context, bgp_speaker_id): hosted_bgp_dragents = self.get_dragents_hosting_bgp_speakers( context, [bgp_speaker_id]) super(BgpPlugin, self).delete_bgp_speaker(context, bgp_speaker_id) for agent in hosted_bgp_dragents: self._bgp_rpc.bgp_speaker_removed(context, bgp_speaker_id, agent.host) def add_bgp_peer(self, context, bgp_speaker_id, bgp_peer_info): ret_value = super(BgpPlugin, self).add_bgp_peer(context, bgp_speaker_id, bgp_peer_info) hosted_bgp_dragents = self.get_dragents_hosting_bgp_speakers( context, [bgp_speaker_id]) for agent in hosted_bgp_dragents: self._bgp_rpc.bgp_peer_associated(context, bgp_speaker_id, ret_value['bgp_peer_id'], agent.host) return ret_value def remove_bgp_peer(self, context, bgp_speaker_id, bgp_peer_info): hosted_bgp_dragents = self.get_dragents_hosting_bgp_speakers( context, [bgp_speaker_id]) ret_value = super(BgpPlugin, self).remove_bgp_peer(context, bgp_speaker_id, bgp_peer_info) for agent in hosted_bgp_dragents: self._bgp_rpc.bgp_peer_disassociated(context, bgp_speaker_id, ret_value['bgp_peer_id'], agent.host) def floatingip_update_callback(self, resource, event, trigger, **kwargs): if event != events.AFTER_UPDATE: return ctx = context.get_admin_context() new_router_id = kwargs['router_id'] last_router_id = kwargs['last_known_router_id'] next_hop = kwargs['next_hop'] dest = kwargs['floating_ip_address'] + '/32' bgp_speakers = self._bgp_speakers_for_gw_network_by_family( ctx, kwargs['floating_network_id'], n_const.IP_VERSION_4) if last_router_id and new_router_id != last_router_id: for bgp_speaker in bgp_speakers: self.stop_route_advertisements(ctx, self._bgp_rpc, bgp_speaker.id, [dest]) if next_hop and new_router_id != last_router_id: new_host_route = {'destination': dest, 'next_hop': next_hop} for bgp_speaker in bgp_speakers: self.start_route_advertisements(ctx, self._bgp_rpc, bgp_speaker.id, [new_host_route]) def router_interface_callback(self, resource, event, trigger, **kwargs): if event == events.AFTER_CREATE: self._handle_router_interface_after_create(**kwargs) if event == events.AFTER_DELETE: gw_network = kwargs['network_id'] next_hops = self._next_hops_from_gateway_ips( kwargs['gateway_ips']) ctx = context.get_admin_context() speakers = self._bgp_speakers_for_gateway_network(ctx, gw_network) for speaker in speakers: routes = self._route_list_from_prefixes_and_next_hop( kwargs['cidrs'], next_hops[speaker.ip_version]) self._handle_router_interface_after_delete(gw_network, routes) def _handle_router_interface_after_create(self, **kwargs): gw_network = kwargs['network_id'] if not gw_network: return ctx = context.get_admin_context() with ctx.session.begin(subtransactions=True): speakers = self._bgp_speakers_for_gateway_network(ctx, gw_network) next_hops = self._next_hops_from_gateway_ips( kwargs['gateway_ips']) for speaker in speakers: prefixes = self._tenant_prefixes_by_router( ctx, kwargs['router_id'], speaker.id) next_hop = next_hops.get(speaker.ip_version) if next_hop: rl = self._route_list_from_prefixes_and_next_hop(prefixes, next_hop) self.start_route_advertisements(ctx, self._bgp_rpc, speaker.id, rl) def router_gateway_callback(self, resource, event, trigger, **kwargs): if event == events.AFTER_CREATE: self._handle_router_gateway_after_create(**kwargs) if event == events.AFTER_DELETE: gw_network = kwargs['network_id'] router_id = kwargs['router_id'] next_hops = self._next_hops_from_gateway_ips( kwargs['gateway_ips']) ctx = context.get_admin_context() speakers = self._bgp_speakers_for_gateway_network(ctx, gw_network) for speaker in speakers: if speaker.ip_version in next_hops: next_hop = next_hops[speaker.ip_version] prefixes = self._tenant_prefixes_by_router(ctx, router_id, speaker.id) routes = self._route_list_from_prefixes_and_next_hop( prefixes, next_hop) self._handle_router_interface_after_delete(gw_network, routes) def _handle_router_gateway_after_create(self, **kwargs): ctx = context.get_admin_context() gw_network = kwargs['network_id'] router_id = kwargs['router_id'] with ctx.session.begin(subtransactions=True): speakers = self._bgp_speakers_for_gateway_network(ctx, gw_network) next_hops = self._next_hops_from_gateway_ips(kwargs['gw_ips']) for speaker in speakers: if speaker.ip_version in next_hops: next_hop = next_hops[speaker.ip_version] prefixes = self._tenant_prefixes_by_router(ctx, router_id, speaker.id) routes = self._route_list_from_prefixes_and_next_hop( prefixes, next_hop) self.start_route_advertisements(ctx, self._bgp_rpc, speaker.id, routes) def _handle_router_interface_after_delete(self, gw_network, routes): if gw_network and routes: ctx = context.get_admin_context() speakers = self._bgp_speakers_for_gateway_network(ctx, gw_network) for speaker in speakers: self.stop_route_advertisements(ctx, self._bgp_rpc, speaker.id, routes) def _next_hops_from_gateway_ips(self, gw_ips): if gw_ips: return {IPAddress(ip).version: ip for ip in gw_ips} return {} def start_route_advertisements(self, ctx, bgp_rpc, bgp_speaker_id, routes): agents = self.list_dragent_hosting_bgp_speaker(ctx, bgp_speaker_id) for agent in agents['agents']: bgp_rpc.bgp_routes_advertisement(ctx, bgp_speaker_id, routes, agent['host']) msg = "Starting route advertisements for %s on BgpSpeaker %s" self._debug_log_for_routes(msg, routes, bgp_speaker_id) def stop_route_advertisements(self, ctx, bgp_rpc, bgp_speaker_id, routes): agents = self.list_dragent_hosting_bgp_speaker(ctx, bgp_speaker_id) for agent in agents['agents']: bgp_rpc.bgp_routes_withdrawal(ctx, bgp_speaker_id, routes, agent['host']) msg = "Stopping route advertisements for %s on BgpSpeaker %s" self._debug_log_for_routes(msg, routes, bgp_speaker_id) def _debug_log_for_routes(self, msg, routes, bgp_speaker_id): # Could have a large number of routes passed, check log level first if LOG.isEnabledFor(logging.DEBUG): for route in routes: LOG.debug(msg, route, bgp_speaker_id) neutron-8.4.0/neutron/services/bgp/driver/0000775000567000056710000000000013044373210021733 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/bgp/driver/__init__.py0000664000567000056710000000000013044372760024043 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/bgp/driver/utils.py0000664000567000056710000000525713044372760023467 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import six from neutron.services.bgp.common import constants as bgp_consts from neutron.services.bgp.driver import exceptions as bgp_driver_exc # Parameter validation functions provided are provided by the base. def validate_as_num(param, as_num): if not isinstance(as_num, six.integer_types): raise bgp_driver_exc.InvalidParamType(param=param, param_type='integer') if not (bgp_consts.MIN_ASNUM <= as_num <= bgp_consts.MAX_ASNUM): # Must be in [AS_NUM_MIN, AS_NUM_MAX] range. allowed_range = ('[' + str(bgp_consts.MIN_ASNUM) + '-' + str(bgp_consts.MAX_ASNUM) + ']') raise bgp_driver_exc.InvalidParamRange(param=param, range=allowed_range) def validate_auth(auth_type, password): validate_string(password) if auth_type in bgp_consts.SUPPORTED_AUTH_TYPES: if auth_type != 'none' and password is None: raise bgp_driver_exc.PasswordNotSpecified(auth_type=auth_type) if auth_type == 'none' and password is not None: raise bgp_driver_exc.InvaildAuthType(auth_type=auth_type) else: raise bgp_driver_exc.InvaildAuthType(auth_type=auth_type) def validate_string(param): if param is not None: if not isinstance(param, six.string_types): raise bgp_driver_exc.InvalidParamType(param=param, param_type='string') class BgpMultiSpeakerCache(object): """Class for saving multiple BGP speakers information. Version history: 1.0 - Initial version for caching multiple BGP speaker information. """ def __init__(self): self.cache = {} def get_hosted_bgp_speakers_count(self): return len(self.cache) def put_bgp_speaker(self, local_as, speaker): self.cache[local_as] = speaker def get_bgp_speaker(self, local_as): return self.cache.get(local_as) def remove_bgp_speaker(self, local_as): self.cache.pop(local_as, None) neutron-8.4.0/neutron/services/bgp/driver/ryu/0000775000567000056710000000000013044373210022552 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/bgp/driver/ryu/__init__.py0000664000567000056710000000000013044372760024662 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/bgp/driver/ryu/driver.py0000664000567000056710000002240013044372760024426 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import encodeutils from ryu.services.protocols.bgp import bgpspeaker from ryu.services.protocols.bgp.rtconf.neighbors import CONNECT_MODE_ACTIVE from neutron.services.bgp.driver import base from neutron.services.bgp.driver import exceptions as bgp_driver_exc from neutron.services.bgp.driver import utils from neutron._i18n import _LE, _LI LOG = logging.getLogger(__name__) # Function for logging BGP peer and path changes. def bgp_peer_down_cb(remote_ip, remote_as): LOG.info(_LI('BGP Peer %(peer_ip)s for remote_as=%(peer_as)d went DOWN.'), {'peer_ip': remote_ip, 'peer_as': remote_as}) def bgp_peer_up_cb(remote_ip, remote_as): LOG.info(_LI('BGP Peer %(peer_ip)s for remote_as=%(peer_as)d is UP.'), {'peer_ip': remote_ip, 'peer_as': remote_as}) def best_path_change_cb(event): LOG.info(_LI("Best path change observed. cidr=%(prefix)s, " "nexthop=%(nexthop)s, remote_as=%(remote_as)d, " "is_withdraw=%(is_withdraw)s"), {'prefix': event.prefix, 'nexthop': event.nexthop, 'remote_as': event.remote_as, 'is_withdraw': event.is_withdraw}) class RyuBgpDriver(base.BgpDriverBase): """BGP speaker implementation via Ryu.""" def __init__(self, cfg): LOG.info(_LI('Initializing Ryu driver for BGP Speaker functionality.')) self._read_config(cfg) # Note: Even though Ryu can only support one BGP speaker as of now, # we have tried making the framework generic for the future purposes. self.cache = utils.BgpMultiSpeakerCache() def _read_config(self, cfg): if cfg is None or cfg.bgp_router_id is None: # If either cfg or router_id is not specified, raise voice LOG.error(_LE('BGP router-id MUST be specified for the correct ' 'functional working.')) else: self.routerid = cfg.bgp_router_id LOG.info(_LI('Initialized Ryu BGP Speaker driver interface with ' 'bgp_router_id=%s'), self.routerid) def add_bgp_speaker(self, speaker_as): curr_speaker = self.cache.get_bgp_speaker(speaker_as) if curr_speaker is not None: raise bgp_driver_exc.BgpSpeakerAlreadyScheduled( current_as=speaker_as, rtid=self.routerid) # Ryu can only support One speaker if self.cache.get_hosted_bgp_speakers_count() == 1: raise bgp_driver_exc.BgpSpeakerMaxScheduled(count=1) # Validate input parameters. # speaker_as must be an integer in the allowed range. utils.validate_as_num('local_as', speaker_as) # Notify Ryu about BGP Speaker addition. # Please note: Since, only the route-advertisement support is # implemented we are explicitly setting the bgp_server_port # attribute to 0 which disables listening on port 179. curr_speaker = bgpspeaker.BGPSpeaker(as_number=speaker_as, router_id=self.routerid, bgp_server_port=0, best_path_change_handler=best_path_change_cb, peer_down_handler=bgp_peer_down_cb, peer_up_handler=bgp_peer_up_cb) LOG.info(_LI('Added BGP Speaker for local_as=%(as)d with ' 'router_id= %(rtid)s.'), {'as': speaker_as, 'rtid': self.routerid}) self.cache.put_bgp_speaker(speaker_as, curr_speaker) def delete_bgp_speaker(self, speaker_as): curr_speaker = self.cache.get_bgp_speaker(speaker_as) if not curr_speaker: raise bgp_driver_exc.BgpSpeakerNotAdded(local_as=speaker_as, rtid=self.routerid) # Notify Ryu about BGP Speaker deletion curr_speaker.shutdown() LOG.info(_LI('Removed BGP Speaker for local_as=%(as)d with ' 'router_id=%(rtid)s.'), {'as': speaker_as, 'rtid': self.routerid}) self.cache.remove_bgp_speaker(speaker_as) def add_bgp_peer(self, speaker_as, peer_ip, peer_as, auth_type='none', password=None): curr_speaker = self.cache.get_bgp_speaker(speaker_as) if not curr_speaker: raise bgp_driver_exc.BgpSpeakerNotAdded(local_as=speaker_as, rtid=self.routerid) # Validate peer_ip and peer_as. utils.validate_as_num('remote_as', peer_as) utils.validate_string(peer_ip) utils.validate_auth(auth_type, password) if password is not None: password = encodeutils.to_utf8(password) # Notify Ryu about BGP Peer addition curr_speaker.neighbor_add(address=peer_ip, remote_as=peer_as, password=password, connect_mode=CONNECT_MODE_ACTIVE) LOG.info(_LI('Added BGP Peer %(peer)s for remote_as=%(as)d to ' 'BGP Speaker running for local_as=%(local_as)d.'), {'peer': peer_ip, 'as': peer_as, 'local_as': speaker_as}) def delete_bgp_peer(self, speaker_as, peer_ip): curr_speaker = self.cache.get_bgp_speaker(speaker_as) if not curr_speaker: raise bgp_driver_exc.BgpSpeakerNotAdded(local_as=speaker_as, rtid=self.routerid) # Validate peer_ip. It must be a string. utils.validate_string(peer_ip) # Notify Ryu about BGP Peer removal curr_speaker.neighbor_del(address=peer_ip) LOG.info(_LI('Removed BGP Peer %(peer)s from BGP Speaker ' 'running for local_as=%(local_as)d.'), {'peer': peer_ip, 'local_as': speaker_as}) def advertise_route(self, speaker_as, cidr, nexthop): curr_speaker = self.cache.get_bgp_speaker(speaker_as) if not curr_speaker: raise bgp_driver_exc.BgpSpeakerNotAdded(local_as=speaker_as, rtid=self.routerid) # Validate cidr and nexthop. Both must be strings. utils.validate_string(cidr) utils.validate_string(nexthop) # Notify Ryu about route advertisement curr_speaker.prefix_add(prefix=cidr, next_hop=nexthop) LOG.info(_LI('Route cidr=%(prefix)s, nexthop=%(nexthop)s is ' 'advertised for BGP Speaker running for ' 'local_as=%(local_as)d.'), {'prefix': cidr, 'nexthop': nexthop, 'local_as': speaker_as}) def withdraw_route(self, speaker_as, cidr, nexthop=None): curr_speaker = self.cache.get_bgp_speaker(speaker_as) if not curr_speaker: raise bgp_driver_exc.BgpSpeakerNotAdded(local_as=speaker_as, rtid=self.routerid) # Validate cidr. It must be a string. utils.validate_string(cidr) # Notify Ryu about route withdrawal curr_speaker.prefix_del(prefix=cidr) LOG.info(_LI('Route cidr=%(prefix)s is withdrawn from BGP Speaker ' 'running for local_as=%(local_as)d.'), {'prefix': cidr, 'local_as': speaker_as}) def get_bgp_speaker_statistics(self, speaker_as): LOG.info(_LI('Collecting BGP Speaker statistics for local_as=%d.'), speaker_as) curr_speaker = self.cache.get_bgp_speaker(speaker_as) if not curr_speaker: raise bgp_driver_exc.BgpSpeakerNotAdded(local_as=speaker_as, rtid=self.routerid) # TODO(vikram): Filter and return the necessary information. # Will be done as part of new RFE requirement # https://bugs.launchpad.net/neutron/+bug/1527993 return curr_speaker.neighbor_state_get() def get_bgp_peer_statistics(self, speaker_as, peer_ip): LOG.info(_LI('Collecting BGP Peer statistics for peer_ip=%(peer)s, ' 'running in speaker_as=%(speaker_as)d '), {'peer': peer_ip, 'speaker_as': speaker_as}) curr_speaker = self.cache.get_bgp_speaker(speaker_as) if not curr_speaker: raise bgp_driver_exc.BgpSpeakerNotAdded(local_as=speaker_as, rtid=self.routerid) # TODO(vikram): Filter and return the necessary information. # Will be done as part of new RFE requirement # https://bugs.launchpad.net/neutron/+bug/1527993 return curr_speaker.neighbor_state_get(address=peer_ip) neutron-8.4.0/neutron/services/bgp/driver/base.py0000664000567000056710000001317113044372760023233 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class BgpDriverBase(object): """Base class for BGP Speaking drivers. Any class which provides BGP functionality should extend this defined base class. """ @abc.abstractmethod def add_bgp_speaker(self, speaker_as): """Add a BGP speaker. :param speaker_as: Specifies BGP Speaker autonomous system number. Must be an integer between MIN_ASNUM and MAX_ASNUM. :type speaker_as: integer :raises: BgpSpeakerAlreadyScheduled, BgpSpeakerMaxScheduled, InvalidParamType, InvalidParamRange """ @abc.abstractmethod def delete_bgp_speaker(self, speaker_as): """Deletes BGP speaker. :param speaker_as: Specifies BGP Speaker autonomous system number. Must be an integer between MIN_ASNUM and MAX_ASNUM. :type speaker_as: integer :raises: BgpSpeakerNotAdded """ @abc.abstractmethod def add_bgp_peer(self, speaker_as, peer_ip, peer_as, auth_type='none', password=None): """Add a new BGP peer. :param speaker_as: Specifies BGP Speaker autonomous system number. Must be an integer between MIN_ASNUM and MAX_ASNUM. :type speaker_as: integer :param peer_ip: Specifies the IP address of the peer. :type peer_ip: string :param peer_as: Specifies Autonomous Number of the peer. Must be an integer between MIN_ASNUM and MAX_ASNUM. :type peer_as: integer :param auth_type: Specifies authentication type. By default, authentication will be disabled. :type auth_type: value in SUPPORTED_AUTH_TYPES :param password: Authentication password.By default, authentication will be disabled. :type password: string :raises: BgpSpeakerNotAdded, InvalidParamType, InvalidParamRange, InvaildAuthType, PasswordNotSpecified """ @abc.abstractmethod def delete_bgp_peer(self, speaker_as, peer_ip): """Delete a BGP peer associated with the given peer IP :param speaker_as: Specifies BGP Speaker autonomous system number. Must be an integer between MIN_ASNUM and MAX_ASNUM. :type speaker_as: integer :param peer_ip: Specifies the IP address of the peer. Must be the string representation of an IP address. :type peer_ip: string :raises: BgpSpeakerNotAdded, BgpPeerNotAdded """ @abc.abstractmethod def advertise_route(self, speaker_as, cidr, nexthop): """Add a new prefix to advertise. :param speaker_as: Specifies BGP Speaker autonomous system number. Must be an integer between MIN_ASNUM and MAX_ASNUM. :type speaker_as: integer :param cidr: CIDR of the network to advertise. Must be the string representation of an IP network (e.g., 10.1.1.0/24) :type cidr: string :param nexthop: Specifies the next hop address for the above prefix. :type nexthop: string :raises: BgpSpeakerNotAdded, InvalidParamType """ @abc.abstractmethod def withdraw_route(self, speaker_as, cidr, nexthop=None): """Withdraw an advertised prefix. :param speaker_as: Specifies BGP Speaker autonomous system number. Must be an integer between MIN_ASNUM and MAX_ASNUM. :type speaker_as: integer :param cidr: CIDR of the network to withdraw. Must be the string representation of an IP network (e.g., 10.1.1.0/24) :type cidr: string :param nexthop: Specifies the next hop address for the above prefix. :type nexthop: string :raises: BgpSpeakerNotAdded, RouteNotAdvertised, InvalidParamType """ @abc.abstractmethod def get_bgp_speaker_statistics(self, speaker_as): """Collect BGP Speaker statistics. :param speaker_as: Specifies BGP Speaker autonomous system number. Must be an integer between MIN_ASNUM and MAX_ASNUM. :type speaker_as: integer :raises: BgpSpeakerNotAdded :returns: bgp_speaker_stats: string """ @abc.abstractmethod def get_bgp_peer_statistics(self, speaker_as, peer_ip, peer_as): """Collect BGP Peer statistics. :param speaker_as: Specifies BGP Speaker autonomous system number. Must be an integer between MIN_ASNUM and MAX_ASNUM. :type speaker_as: integer :param peer_ip: Specifies the IP address of the peer. :type peer_ip: string :param peer_as: Specifies the AS number of the peer. Must be an integer between MIN_ASNUM and MAX_ASNUM. :type peer_as: integer . :raises: BgpSpeakerNotAdded, BgpPeerNotAdded :returns: bgp_peer_stats: string """ neutron-8.4.0/neutron/services/bgp/driver/exceptions.py0000664000567000056710000000401013044372760024472 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron._i18n import _ from neutron.common import exceptions as n_exc # BGP Driver Exceptions class BgpSpeakerNotAdded(n_exc.BadRequest): message = _("BGP Speaker for local_as=%(local_as)s with " "router_id=%(rtid)s not added yet.") class BgpSpeakerMaxScheduled(n_exc.BadRequest): message = _("Already hosting maximum number of BGP Speakers. " "Allowed scheduled count=%(count)d") class BgpSpeakerAlreadyScheduled(n_exc.Conflict): message = _("Already hosting BGP Speaker for local_as=%(current_as)d with " "router_id=%(rtid)s.") class BgpPeerNotAdded(n_exc.BadRequest): message = _("BGP Peer %(peer_ip)s for remote_as=%(remote_as)s, running " "for BGP Speaker %(speaker_as)d not added yet.") class RouteNotAdvertised(n_exc.BadRequest): message = _("Route %(cidr)s not advertised for BGP Speaker " "%(speaker_as)d.") class InvalidParamType(n_exc.NeutronException): message = _("Parameter %(param)s must be of %(param_type)s type.") class InvalidParamRange(n_exc.NeutronException): message = _("%(param)s must be in %(range)s range.") class InvaildAuthType(n_exc.BadRequest): message = _("Authentication type not supported. Requested " "type=%(auth_type)s.") class PasswordNotSpecified(n_exc.BadRequest): message = _("Password not specified for authentication " "type=%(auth_type)s.") neutron-8.4.0/neutron/services/bgp/agent/0000775000567000056710000000000013044373210021536 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/bgp/agent/config.py0000664000567000056710000000174313044372760023373 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from neutron._i18n import _ BGP_DRIVER_OPTS = [ cfg.StrOpt('bgp_speaker_driver', help=_("BGP speaker driver class to be instantiated.")) ] BGP_PROTO_CONFIG_OPTS = [ cfg.StrOpt('bgp_router_id', help=_("32-bit BGP identifier, typically an IPv4 address " "owned by the system running the BGP DrAgent.")) ] neutron-8.4.0/neutron/services/bgp/agent/bgp_dragent.py0000664000567000056710000007550413044372760024410 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import collections from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import loopingcall from oslo_service import periodic_task from oslo_utils import importutils from neutron.agent import rpc as agent_rpc from neutron.common import constants from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.common import utils from neutron import context from neutron.extensions import bgp as bgp_ext from neutron._i18n import _, _LE, _LI, _LW from neutron import manager from neutron.services.bgp.common import constants as bgp_consts from neutron.services.bgp.driver import exceptions as driver_exc LOG = logging.getLogger(__name__) class BgpDrAgent(manager.Manager): """BGP Dynamic Routing agent service manager. Note that the public methods of this class are exposed as the server side of an rpc interface. The neutron server uses neutron.api.rpc.agentnotifiers.bgp_dr_rpc_agent_api. BgpDrAgentNotifyApi as the client side to execute the methods here. For more information about changing rpc interfaces, see doc/source/devref/rpc_api.rst. API version history: 1.0 initial Version """ target = oslo_messaging.Target(version='1.0') def __init__(self, host, conf=None): super(BgpDrAgent, self).__init__() self.initialize_driver(conf) self.needs_resync_reasons = collections.defaultdict(list) self.needs_full_sync_reason = None self.cache = BgpSpeakerCache() self.context = context.get_admin_context_without_session() self.plugin_rpc = BgpDrPluginApi(bgp_consts.BGP_PLUGIN, self.context, host) def initialize_driver(self, conf): self.conf = conf or cfg.CONF.BGP try: self.dr_driver_cls = ( importutils.import_object(self.conf.bgp_speaker_driver, self.conf)) except ImportError: LOG.exception(_LE("Error while importing BGP speaker driver %s"), self.conf.bgp_speaker_driver) raise SystemExit(1) def _handle_driver_failure(self, bgp_speaker_id, method, driver_exec): self.schedule_resync(reason=driver_exec, speaker_id=bgp_speaker_id) LOG.error(_LE('Call to driver for BGP Speaker %(bgp_speaker)s ' '%(method)s has failed with exception ' '%(driver_exec)s.'), {'bgp_speaker': bgp_speaker_id, 'method': method, 'driver_exec': driver_exec}) def after_start(self): self.run() LOG.info(_LI("BGP Dynamic Routing agent started")) def run(self): """Activate BGP Dynamic Routing agent.""" self.sync_state(self.context) self.periodic_resync(self.context) @utils.synchronized('bgp-dragent') def sync_state(self, context, full_sync=None, bgp_speakers=None): try: hosted_bgp_speakers = self.plugin_rpc.get_bgp_speakers(context) hosted_bgp_speaker_ids = [bgp_speaker['id'] for bgp_speaker in hosted_bgp_speakers] cached_bgp_speakers = self.cache.get_bgp_speaker_ids() for bgp_speaker_id in cached_bgp_speakers: if bgp_speaker_id not in hosted_bgp_speaker_ids: self.remove_bgp_speaker_from_dragent(bgp_speaker_id) resync_all = not bgp_speakers or full_sync only_bs = set() if resync_all else set(bgp_speakers) for hosted_bgp_speaker in hosted_bgp_speakers: hosted_bs_id = hosted_bgp_speaker['id'] if resync_all or hosted_bs_id in only_bs: if not self.cache.is_bgp_speaker_added(hosted_bs_id): self.safe_configure_dragent_for_bgp_speaker( hosted_bgp_speaker) continue self.sync_bgp_speaker(hosted_bgp_speaker) resync_reason = "Periodic route cache refresh" self.schedule_resync(speaker_id=hosted_bs_id, reason=resync_reason) except Exception as e: self.schedule_full_resync(reason=e) LOG.error(_LE('Unable to sync BGP speaker state.')) def sync_bgp_speaker(self, bgp_speaker): # sync BGP Speakers bgp_peer_ips = set( [bgp_peer['peer_ip'] for bgp_peer in bgp_speaker['peers']]) cached_bgp_peer_ips = set( self.cache.get_bgp_peer_ips(bgp_speaker['id'])) removed_bgp_peer_ips = cached_bgp_peer_ips - bgp_peer_ips for bgp_peer_ip in removed_bgp_peer_ips: self.remove_bgp_peer_from_bgp_speaker(bgp_speaker['id'], bgp_peer_ip) if bgp_peer_ips: self.add_bgp_peers_to_bgp_speaker(bgp_speaker) # sync advertise routes cached_adv_routes = self.cache.get_adv_routes(bgp_speaker['id']) adv_routes = bgp_speaker['advertised_routes'] if cached_adv_routes == adv_routes: return for cached_route in cached_adv_routes: if cached_route not in adv_routes: self.withdraw_route_via_bgp_speaker(bgp_speaker['id'], bgp_speaker['local_as'], cached_route) self.advertise_routes_via_bgp_speaker(bgp_speaker) @utils.exception_logger() def _periodic_resync_helper(self, context): """Resync the BgpDrAgent state at the configured interval.""" if self.needs_resync_reasons or self.needs_full_sync_reason: full_sync = self.needs_full_sync_reason reasons = self.needs_resync_reasons # Reset old reasons self.needs_full_sync_reason = None self.needs_resync_reasons = collections.defaultdict(list) if full_sync: LOG.debug("resync all: %(reason)s", {"reason": full_sync}) for bgp_speaker, reason in reasons.items(): LOG.debug("resync (%(bgp_speaker)s): %(reason)s", {"reason": reason, "bgp_speaker": bgp_speaker}) self.sync_state( context, full_sync=full_sync, bgp_speakers=reasons.keys()) # NOTE: spacing is set 1 sec. The actual interval is controlled # by neutron/service.py which defaults to CONF.periodic_interval @periodic_task.periodic_task(spacing=1) def periodic_resync(self, context): LOG.debug("Started periodic resync.") self._periodic_resync_helper(context) @utils.synchronized('bgp-dr-agent') def bgp_speaker_create_end(self, context, payload): """Handle bgp_speaker_create_end notification event.""" bgp_speaker_id = payload['bgp_speaker']['id'] LOG.debug('Received BGP speaker create notification for ' 'speaker_id=%(speaker_id)s from the neutron server.', {'speaker_id': bgp_speaker_id}) self.add_bgp_speaker_helper(bgp_speaker_id) @utils.synchronized('bgp-dr-agent') def bgp_speaker_remove_end(self, context, payload): """Handle bgp_speaker_create_end notification event.""" bgp_speaker_id = payload['bgp_speaker']['id'] LOG.debug('Received BGP speaker remove notification for ' 'speaker_id=%(speaker_id)s from the neutron server.', {'speaker_id': bgp_speaker_id}) self.remove_bgp_speaker_from_dragent(bgp_speaker_id) @utils.synchronized('bgp-dr-agent') def bgp_peer_association_end(self, context, payload): """Handle bgp_peer_association_end notification event.""" bgp_peer_id = payload['bgp_peer']['peer_id'] bgp_speaker_id = payload['bgp_peer']['speaker_id'] LOG.debug('Received BGP peer associate notification for ' 'speaker_id=%(speaker_id)s peer_id=%(peer_id)s ' 'from the neutron server.', {'speaker_id': bgp_speaker_id, 'peer_id': bgp_peer_id}) self.add_bgp_peer_helper(bgp_speaker_id, bgp_peer_id) @utils.synchronized('bgp-dr-agent') def bgp_peer_disassociation_end(self, context, payload): """Handle bgp_peer_disassociation_end notification event.""" bgp_peer_ip = payload['bgp_peer']['peer_ip'] bgp_speaker_id = payload['bgp_peer']['speaker_id'] LOG.debug('Received BGP peer disassociate notification for ' 'speaker_id=%(speaker_id)s peer_ip=%(peer_ip)s ' 'from the neutron server.', {'speaker_id': bgp_speaker_id, 'peer_ip': bgp_peer_ip}) self.remove_bgp_peer_from_bgp_speaker(bgp_speaker_id, bgp_peer_ip) @utils.synchronized('bgp-dr-agent') def bgp_routes_advertisement_end(self, context, payload): """Handle bgp_routes_advertisement_end notification event.""" bgp_speaker_id = payload['advertise_routes']['speaker_id'] LOG.debug('Received routes advertisement end notification ' 'for speaker_id=%(speaker_id)s from the neutron server.', {'speaker_id': bgp_speaker_id}) routes = payload['advertise_routes']['routes'] self.add_routes_helper(bgp_speaker_id, routes) @utils.synchronized('bgp-dr-agent') def bgp_routes_withdrawal_end(self, context, payload): """Handle bgp_routes_withdrawal_end notification event.""" bgp_speaker_id = payload['withdraw_routes']['speaker_id'] LOG.debug('Received route withdrawal notification for ' 'speaker_id=%(speaker_id)s from the neutron server.', {'speaker_id': bgp_speaker_id}) routes = payload['withdraw_routes']['routes'] self.withdraw_routes_helper(bgp_speaker_id, routes) def add_bgp_speaker_helper(self, bgp_speaker_id): """Add BGP speaker.""" bgp_speaker = self.safe_get_bgp_speaker_info(bgp_speaker_id) if bgp_speaker: self.add_bgp_speaker_on_dragent(bgp_speaker) def add_bgp_peer_helper(self, bgp_speaker_id, bgp_peer_id): """Add BGP peer.""" # Ideally BGP Speaker must be added by now, If not then let's # re-sync. if not self.cache.is_bgp_speaker_added(bgp_speaker_id): self.schedule_resync(speaker_id=bgp_speaker_id, reason="BGP Speaker Out-of-sync") return bgp_peer = self.safe_get_bgp_peer_info(bgp_speaker_id, bgp_peer_id) if bgp_peer: bgp_speaker_as = self.cache.get_bgp_speaker_local_as( bgp_speaker_id) self.add_bgp_peer_to_bgp_speaker(bgp_speaker_id, bgp_speaker_as, bgp_peer) def add_routes_helper(self, bgp_speaker_id, routes): """Advertise routes to BGP speaker.""" # Ideally BGP Speaker must be added by now, If not then let's # re-sync. if not self.cache.is_bgp_speaker_added(bgp_speaker_id): self.schedule_resync(speaker_id=bgp_speaker_id, reason="BGP Speaker Out-of-sync") return bgp_speaker_as = self.cache.get_bgp_speaker_local_as(bgp_speaker_id) for route in routes: self.advertise_route_via_bgp_speaker(bgp_speaker_id, bgp_speaker_as, route) if self.is_resync_scheduled(bgp_speaker_id): break def withdraw_routes_helper(self, bgp_speaker_id, routes): """Withdraw routes advertised by BGP speaker.""" # Ideally BGP Speaker must be added by now, If not then let's # re-sync. if not self.cache.is_bgp_speaker_added(bgp_speaker_id): self.schedule_resync(speaker_id=bgp_speaker_id, reason="BGP Speaker Out-of-sync") return bgp_speaker_as = self.cache.get_bgp_speaker_local_as(bgp_speaker_id) for route in routes: self.withdraw_route_via_bgp_speaker(bgp_speaker_id, bgp_speaker_as, route) if self.is_resync_scheduled(bgp_speaker_id): break def safe_get_bgp_speaker_info(self, bgp_speaker_id): try: bgp_speaker = self.plugin_rpc.get_bgp_speaker_info(self.context, bgp_speaker_id) if not bgp_speaker: LOG.warning(_LW('BGP Speaker %s has been deleted.'), bgp_speaker_id) return bgp_speaker except Exception as e: self.schedule_resync(speaker_id=bgp_speaker_id, reason=e) LOG.error(_LE('BGP Speaker %(bgp_speaker)s info call ' 'failed with reason=%(e)s.'), {'bgp_speaker': bgp_speaker_id, 'e': e}) def safe_get_bgp_peer_info(self, bgp_speaker_id, bgp_peer_id): try: bgp_peer = self.plugin_rpc.get_bgp_peer_info(self.context, bgp_peer_id) if not bgp_peer: LOG.warning(_LW('BGP Peer %s has been deleted.'), bgp_peer) return bgp_peer except Exception as e: self.schedule_resync(speaker_id=bgp_speaker_id, reason=e) LOG.error(_LE('BGP peer %(bgp_peer)s info call ' 'failed with reason=%(e)s.'), {'bgp_peer': bgp_peer_id, 'e': e}) @utils.exception_logger() def safe_configure_dragent_for_bgp_speaker(self, bgp_speaker): try: self.add_bgp_speaker_on_dragent(bgp_speaker) except (bgp_ext.BgpSpeakerNotFound, RuntimeError): LOG.warning(_LW('BGP speaker %s may have been deleted and its ' 'resources may have already been disposed.'), bgp_speaker['id']) def add_bgp_speaker_on_dragent(self, bgp_speaker): # Caching BGP speaker details in BGPSpeakerCache. Will be used # during smooth. self.cache.put_bgp_speaker(bgp_speaker) LOG.debug('Calling driver for adding BGP speaker %(speaker_id)s,' ' speaking for local_as %(local_as)s', {'speaker_id': bgp_speaker['id'], 'local_as': bgp_speaker['local_as']}) try: self.dr_driver_cls.add_bgp_speaker(bgp_speaker['local_as']) except driver_exc.BgpSpeakerAlreadyScheduled: return except Exception as e: self._handle_driver_failure(bgp_speaker['id'], 'add_bgp_speaker', e) # Add peer and route information to the driver. self.add_bgp_peers_to_bgp_speaker(bgp_speaker) self.advertise_routes_via_bgp_speaker(bgp_speaker) self.schedule_resync(speaker_id=bgp_speaker['id'], reason="Periodic route cache refresh") def remove_bgp_speaker_from_dragent(self, bgp_speaker_id): if self.cache.is_bgp_speaker_added(bgp_speaker_id): bgp_speaker_as = self.cache.get_bgp_speaker_local_as( bgp_speaker_id) self.cache.remove_bgp_speaker_by_id(bgp_speaker_id) LOG.debug('Calling driver for removing BGP speaker %(speaker_as)s', {'speaker_as': bgp_speaker_as}) try: self.dr_driver_cls.delete_bgp_speaker(bgp_speaker_as) except Exception as e: self._handle_driver_failure(bgp_speaker_id, 'remove_bgp_speaker', e) return # Ideally, only the added speakers can be removed by the neutron # server. Looks like there might be some synchronization # issue between the server and the agent. Let's initiate a re-sync # to resolve the issue. self.schedule_resync(speaker_id=bgp_speaker_id, reason="BGP Speaker Out-of-sync") def add_bgp_peers_to_bgp_speaker(self, bgp_speaker): for bgp_peer in bgp_speaker['peers']: self.add_bgp_peer_to_bgp_speaker(bgp_speaker['id'], bgp_speaker['local_as'], bgp_peer) if self.is_resync_scheduled(bgp_speaker['id']): break def add_bgp_peer_to_bgp_speaker(self, bgp_speaker_id, bgp_speaker_as, bgp_peer): if self.cache.get_bgp_peer_by_ip(bgp_speaker_id, bgp_peer['peer_ip']): return self.cache.put_bgp_peer(bgp_speaker_id, bgp_peer) LOG.debug('Calling driver interface for adding BGP peer %(peer_ip)s ' 'remote_as=%(remote_as)s to BGP Speaker running for ' 'local_as=%(local_as)d', {'peer_ip': bgp_peer['peer_ip'], 'remote_as': bgp_peer['remote_as'], 'local_as': bgp_speaker_as}) try: self.dr_driver_cls.add_bgp_peer(bgp_speaker_as, bgp_peer['peer_ip'], bgp_peer['remote_as'], bgp_peer['auth_type'], bgp_peer['password']) except Exception as e: self._handle_driver_failure(bgp_speaker_id, 'add_bgp_peer', e) def remove_bgp_peer_from_bgp_speaker(self, bgp_speaker_id, bgp_peer_ip): # Ideally BGP Speaker must be added by now, If not then let's # re-sync. if not self.cache.is_bgp_speaker_added(bgp_speaker_id): self.schedule_resync(speaker_id=bgp_speaker_id, reason="BGP Speaker Out-of-sync") return if self.cache.is_bgp_peer_added(bgp_speaker_id, bgp_peer_ip): self.cache.remove_bgp_peer_by_ip(bgp_speaker_id, bgp_peer_ip) bgp_speaker_as = self.cache.get_bgp_speaker_local_as( bgp_speaker_id) LOG.debug('Calling driver interface to remove BGP peer ' '%(peer_ip)s from BGP Speaker running for ' 'local_as=%(local_as)d', {'peer_ip': bgp_peer_ip, 'local_as': bgp_speaker_as}) try: self.dr_driver_cls.delete_bgp_peer(bgp_speaker_as, bgp_peer_ip) except Exception as e: self._handle_driver_failure(bgp_speaker_id, 'remove_bgp_peer', e) return # Ideally, only the added peers can be removed by the neutron # server. Looks like there might be some synchronization # issue between the server and the agent. Let's initiate a re-sync # to resolve the issue. self.schedule_resync(speaker_id=bgp_speaker_id, reason="BGP Peer Out-of-sync") def advertise_routes_via_bgp_speaker(self, bgp_speaker): for route in bgp_speaker['advertised_routes']: self.advertise_route_via_bgp_speaker(bgp_speaker['id'], bgp_speaker['local_as'], route) if self.is_resync_scheduled(bgp_speaker['id']): break def advertise_route_via_bgp_speaker(self, bgp_speaker_id, bgp_speaker_as, route): if self.cache.is_route_advertised(bgp_speaker_id, route): # Requested route already advertised. Hence, Nothing to be done. return self.cache.put_adv_route(bgp_speaker_id, route) LOG.debug('Calling driver for advertising prefix: %(cidr)s, ' 'next_hop: %(nexthop)s', {'cidr': route['destination'], 'nexthop': route['next_hop']}) try: self.dr_driver_cls.advertise_route(bgp_speaker_as, route['destination'], route['next_hop']) except Exception as e: self._handle_driver_failure(bgp_speaker_id, 'advertise_route', e) def withdraw_route_via_bgp_speaker(self, bgp_speaker_id, bgp_speaker_as, route): if self.cache.is_route_advertised(bgp_speaker_id, route): self.cache.remove_adv_route(bgp_speaker_id, route) LOG.debug('Calling driver for withdrawing prefix: %(cidr)s, ' 'next_hop: %(nexthop)s', {'cidr': route['destination'], 'nexthop': route['next_hop']}) try: self.dr_driver_cls.withdraw_route(bgp_speaker_as, route['destination'], route['next_hop']) except Exception as e: self._handle_driver_failure(bgp_speaker_id, 'withdraw_route', e) return # Ideally, only the advertised routes can be withdrawn by the # neutron server. Looks like there might be some synchronization # issue between the server and the agent. Let's initiate a re-sync # to resolve the issue. self.schedule_resync(speaker_id=bgp_speaker_id, reason="Advertised routes Out-of-sync") def schedule_full_resync(self, reason): LOG.debug('Recording full resync request for all BGP Speakers ' 'with reason=%s', reason) self.needs_full_sync_reason = reason def schedule_resync(self, reason, speaker_id): """Schedule a full resync for a given BGP Speaker. If no BGP Speaker is specified, resync all BGP Speakers. """ LOG.debug('Recording resync request for BGP Speaker %s ' 'with reason=%s', speaker_id, reason) self.needs_resync_reasons[speaker_id].append(reason) def is_resync_scheduled(self, bgp_speaker_id): if bgp_speaker_id not in self.needs_resync_reasons: return False reason = self.needs_resync_reasons[bgp_speaker_id] # Re-sync scheduled for the queried BGP speaker. No point # continuing further. Let's stop processing and wait for # re-sync to happen. LOG.debug('Re-sync already scheduled for BGP Speaker %s ' 'with reason=%s', bgp_speaker_id, reason) return True class BgpDrPluginApi(object): """Agent side of BgpDrAgent RPC API. This class implements the client side of an rpc interface. The server side of this interface can be found in neutron.api.rpc.handlers.bgp_speaker_rpc.BgpSpeakerRpcCallback. For more information about changing rpc interfaces, see doc/source/devref/rpc_api.rst. API version history: 1.0 - Initial version. """ def __init__(self, topic, context, host): self.context = context self.host = host target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) def get_bgp_speakers(self, context): """Make a remote process call to retrieve all BGP speakers info.""" cctxt = self.client.prepare() return cctxt.call(context, 'get_bgp_speakers', host=self.host) def get_bgp_speaker_info(self, context, bgp_speaker_id): """Make a remote process call to retrieve a BGP speaker info.""" cctxt = self.client.prepare() return cctxt.call(context, 'get_bgp_speaker_info', bgp_speaker_id=bgp_speaker_id) def get_bgp_peer_info(self, context, bgp_peer_id): """Make a remote process call to retrieve a BGP peer info.""" cctxt = self.client.prepare() return cctxt.call(context, 'get_bgp_peer_info', bgp_peer_id=bgp_peer_id) class BgpSpeakerCache(object): """Agent cache of the current BGP speaker state. This class is designed to support the advertisement for multiple BGP speaker via a single driver interface. Version history: 1.0 - Initial version for caching the state of BGP speaker. """ def __init__(self): self.cache = {} def get_bgp_speaker_ids(self): return self.cache.keys() def put_bgp_speaker(self, bgp_speaker): if bgp_speaker['id'] in self.cache: self.remove_bgp_speaker_by_id(self.cache[bgp_speaker['id']]) self.cache[bgp_speaker['id']] = {'bgp_speaker': bgp_speaker, 'peers': {}, 'advertised_routes': []} def get_bgp_speaker_by_id(self, bgp_speaker_id): if bgp_speaker_id in self.cache: return self.cache[bgp_speaker_id]['bgp_speaker'] def get_bgp_speaker_local_as(self, bgp_speaker_id): bgp_speaker = self.get_bgp_speaker_by_id(bgp_speaker_id) if bgp_speaker: return bgp_speaker['local_as'] def is_bgp_speaker_added(self, bgp_speaker_id): return self.get_bgp_speaker_by_id(bgp_speaker_id) def remove_bgp_speaker_by_id(self, bgp_speaker_id): if bgp_speaker_id in self.cache: del self.cache[bgp_speaker_id] def put_bgp_peer(self, bgp_speaker_id, bgp_peer): if bgp_peer['peer_ip'] in self.get_bgp_peer_ips(bgp_speaker_id): del self.cache[bgp_speaker_id]['peers'][bgp_peer['peer_ip']] self.cache[bgp_speaker_id]['peers'][bgp_peer['peer_ip']] = bgp_peer def is_bgp_peer_added(self, bgp_speaker_id, bgp_peer_ip): return self.get_bgp_peer_by_ip(bgp_speaker_id, bgp_peer_ip) def get_bgp_peer_ips(self, bgp_speaker_id): bgp_speaker = self.get_bgp_speaker_by_id(bgp_speaker_id) if bgp_speaker: return self.cache[bgp_speaker_id]['peers'].keys() def get_bgp_peer_by_ip(self, bgp_speaker_id, bgp_peer_ip): bgp_speaker = self.get_bgp_speaker_by_id(bgp_speaker_id) if bgp_speaker: return self.cache[bgp_speaker_id]['peers'].get(bgp_peer_ip) def remove_bgp_peer_by_ip(self, bgp_speaker_id, bgp_peer_ip): if bgp_peer_ip in self.get_bgp_peer_ips(bgp_speaker_id): del self.cache[bgp_speaker_id]['peers'][bgp_peer_ip] def put_adv_route(self, bgp_speaker_id, route): self.cache[bgp_speaker_id]['advertised_routes'].append(route) def is_route_advertised(self, bgp_speaker_id, route): routes = self.cache[bgp_speaker_id]['advertised_routes'] for r in routes: if r['destination'] == route['destination'] and ( r['next_hop'] == route['next_hop']): return True return False def remove_adv_route(self, bgp_speaker_id, route): routes = self.cache[bgp_speaker_id]['advertised_routes'] updated_routes = [r for r in routes if ( r['destination'] != route['destination'])] self.cache[bgp_speaker_id]['advertised_routes'] = updated_routes def get_adv_routes(self, bgp_speaker_id): return self.cache[bgp_speaker_id]['advertised_routes'] def get_state(self): bgp_speaker_ids = self.get_bgp_speaker_ids() num_bgp_speakers = len(bgp_speaker_ids) num_bgp_peers = 0 num_advertised_routes = 0 for bgp_speaker_id in bgp_speaker_ids: bgp_speaker = self.get_bgp_speaker_by_id(bgp_speaker_id) num_bgp_peers += len(bgp_speaker['peers']) num_advertised_routes += len(bgp_speaker['advertised_routes']) return {'bgp_speakers': num_bgp_speakers, 'bgp_peers': num_bgp_peers, 'advertise_routes': num_advertised_routes} class BgpDrAgentWithStateReport(BgpDrAgent): def __init__(self, host, conf=None): super(BgpDrAgentWithStateReport, self).__init__(host, conf) self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) self.agent_state = { 'agent_type': bgp_consts.AGENT_TYPE_BGP_ROUTING, 'binary': 'neutron-bgp-dragent', 'configurations': {}, 'host': host, 'topic': bgp_consts.BGP_DRAGENT, 'start_flag': True} report_interval = cfg.CONF.AGENT.report_interval if report_interval: self.heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) self.heartbeat.start(interval=report_interval) def _report_state(self): LOG.debug("Report state task started") try: self.agent_state.get('configurations').update( self.cache.get_state()) ctx = context.get_admin_context_without_session() agent_status = self.state_rpc.report_state(ctx, self.agent_state, True) if agent_status == constants.AGENT_REVIVED: LOG.info(_LI("Agent has just been revived. " "Scheduling full sync")) self.schedule_full_resync( reason=_("Agent has just been revived")) except AttributeError: # This means the server does not support report_state LOG.warning(_LW("Neutron server does not support state report. " "State report for this agent will be disabled.")) self.heartbeat.stop() self.run() return except Exception: LOG.exception(_LE("Failed reporting state!")) return if self.agent_state.pop('start_flag', None): self.run() def agent_updated(self, context, payload): """Handle the agent_updated notification event.""" self.schedule_full_resync( reason=_("BgpDrAgent updated: %s") % payload) LOG.info(_LI("agent_updated by server side %s!"), payload) def after_start(self): LOG.info(_LI("BGP dynamic routing agent started")) neutron-8.4.0/neutron/services/bgp/agent/__init__.py0000664000567000056710000000000013044372760023646 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/bgp/agent/entry.py0000664000567000056710000000331113044372760023260 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from oslo_config import cfg from oslo_service import service from neutron.agent.common import config from neutron.agent.linux import external_process from neutron.common import config as common_config from neutron import service as neutron_service from neutron.services.bgp.agent import config as bgp_dragent_config from neutron.services.bgp.common import constants as bgp_consts def register_options(): config.register_agent_state_opts_helper(cfg.CONF) config.register_root_helper(cfg.CONF) cfg.CONF.register_opts(bgp_dragent_config.BGP_DRIVER_OPTS, 'BGP') cfg.CONF.register_opts(bgp_dragent_config.BGP_PROTO_CONFIG_OPTS, 'BGP') cfg.CONF.register_opts(external_process.OPTS) def main(): register_options() common_config.init(sys.argv[1:]) config.setup_logging() server = neutron_service.Service.create( binary='neutron-bgp-dragent', topic=bgp_consts.BGP_DRAGENT, report_interval=cfg.CONF.AGENT.report_interval, manager='neutron.services.bgp.agent.bgp_dragent.' 'BgpDrAgentWithStateReport') service.launch(cfg.CONF, server).wait() neutron-8.4.0/neutron/services/network_ip_availability/0000775000567000056710000000000013044373210024603 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/network_ip_availability/__init__.py0000664000567000056710000000000013044372736026716 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/network_ip_availability/plugin.py0000664000567000056710000000404313044372760026465 0ustar jenkinsjenkins00000000000000# Copyright 2016 GoDaddy. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import neutron.common.exceptions as exceptions import neutron.db.db_base_plugin_v2 as db_base_plugin_v2 import neutron.db.network_ip_availability_db as ip_availability_db class NetworkIPAvailabilityPlugin(ip_availability_db.IpAvailabilityMixin, db_base_plugin_v2.NeutronDbPluginV2): """This plugin exposes IP availability data for networks and subnets.""" _instance = None supported_extension_aliases = ["network-ip-availability"] @classmethod def get_instance(cls): if cls._instance is None: cls._instance = cls() return cls._instance def get_plugin_description(self): return "Provides IP availability data for each network and subnet." def get_plugin_type(self): return "network-ip-availability" def get_network_ip_availabilities(self, context, filters=None, fields=None): """Returns ip availability data for a collection of networks.""" return super(NetworkIPAvailabilityPlugin, self).get_network_ip_availabilities(context, filters) def get_network_ip_availability(self, context, id=None, fields=None): """Return ip availability data for a specific network id.""" filters = {'network_id': [id]} result = self.get_network_ip_availabilities(context, filters) if result: return result[0] else: raise exceptions.NetworkNotFound(net_id=id) neutron-8.4.0/neutron/services/qos/0000775000567000056710000000000013044373210020472 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/qos/__init__.py0000664000567000056710000000000013044372736022605 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/qos/qos_consts.py0000664000567000056710000000200213044372760023242 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. RULE_TYPE_BANDWIDTH_LIMIT = 'bandwidth_limit' VALID_RULE_TYPES = [RULE_TYPE_BANDWIDTH_LIMIT] QOS_POLICY_ID = 'qos_policy_id' # NOTE(slaweq): Value used to calculate burst value for egress bandwidth limit # if burst is not given by user. In such case burst value will be calculated # as 80% of bw_limit to ensure that at least limits for TCP traffic will work # fine. DEFAULT_BURST_RATE = 0.8 neutron-8.4.0/neutron/services/qos/notification_drivers/0000775000567000056710000000000013044373210024716 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/qos/notification_drivers/manager.py0000664000567000056710000000554513044372760026724 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from neutron._i18n import _, _LI from neutron import manager QOS_DRIVER_NAMESPACE = 'neutron.qos.notification_drivers' QOS_PLUGIN_OPTS = [ cfg.ListOpt('notification_drivers', default=['message_queue'], help=_('Drivers list to use to send the update notification')), ] cfg.CONF.register_opts(QOS_PLUGIN_OPTS, "qos") LOG = logging.getLogger(__name__) class QosServiceNotificationDriverManager(object): def __init__(self): self.notification_drivers = [] self._load_drivers(cfg.CONF.qos.notification_drivers) def update_policy(self, context, qos_policy): for driver in self.notification_drivers: driver.update_policy(context, qos_policy) def delete_policy(self, context, qos_policy): for driver in self.notification_drivers: driver.delete_policy(context, qos_policy) def create_policy(self, context, qos_policy): for driver in self.notification_drivers: driver.create_policy(context, qos_policy) def _load_drivers(self, notification_drivers): """Load all the instances of the configured QoS notification drivers :param notification_drivers: comma separated string """ if not notification_drivers: raise SystemExit(_('A QoS driver must be specified')) LOG.debug("Loading QoS notification drivers: %s", notification_drivers) for notification_driver in notification_drivers: driver_ins = self._load_driver_instance(notification_driver) self.notification_drivers.append(driver_ins) def _load_driver_instance(self, notification_driver): """Returns an instance of the configured QoS notification driver :returns: An instance of Driver for the QoS notification """ mgr = manager.NeutronManager driver = mgr.load_class_for_provider(QOS_DRIVER_NAMESPACE, notification_driver) driver_instance = driver() LOG.info( _LI("Loading %(name)s (%(description)s) notification driver " "for QoS plugin"), {"name": notification_driver, "description": driver_instance.get_description()}) return driver_instance neutron-8.4.0/neutron/services/qos/notification_drivers/message_queue.py0000664000567000056710000000406213044372760030133 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron._i18n import _LW from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks.producer import registry from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron.objects.qos import policy as policy_object from neutron.services.qos.notification_drivers import qos_base LOG = logging.getLogger(__name__) def _get_qos_policy_cb(resource, policy_id, **kwargs): context = kwargs.get('context') if context is None: LOG.warning(_LW( 'Received %(resource)s %(policy_id)s without context'), {'resource': resource, 'policy_id': policy_id} ) return policy = policy_object.QosPolicy.get_object(context, id=policy_id) return policy class RpcQosServiceNotificationDriver( qos_base.QosServiceNotificationDriverBase): """RPC message queue service notification driver for QoS.""" def __init__(self): self.notification_api = resources_rpc.ResourcesPushRpcApi() registry.provide(_get_qos_policy_cb, resources.QOS_POLICY) def get_description(self): return "Message queue updates" def create_policy(self, context, policy): #No need to update agents on create pass def update_policy(self, context, policy): self.notification_api.push(context, policy, events.UPDATED) def delete_policy(self, context, policy): self.notification_api.push(context, policy, events.DELETED) neutron-8.4.0/neutron/services/qos/notification_drivers/__init__.py0000664000567000056710000000000013044372736027031 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/qos/notification_drivers/qos_base.py0000664000567000056710000000243413044372736027103 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class QosServiceNotificationDriverBase(object): """QoS service notification driver base class.""" @abc.abstractmethod def get_description(self): """Get the notification driver description. """ @abc.abstractmethod def create_policy(self, context, policy): """Create the QoS policy.""" @abc.abstractmethod def update_policy(self, context, policy): """Update the QoS policy. Apply changes to the QoS policy. """ @abc.abstractmethod def delete_policy(self, context, policy): """Delete the QoS policy. Remove all rules for this policy and free up all the resources. """ neutron-8.4.0/neutron/services/qos/qos_plugin.py0000664000567000056710000001702113044372760023236 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import exceptions as n_exc from neutron.db import api as db_api from neutron.db import db_base_plugin_common from neutron.extensions import qos from neutron.objects.qos import policy as policy_object from neutron.objects.qos import rule as rule_object from neutron.objects.qos import rule_type as rule_type_object from neutron.services.qos.notification_drivers import manager as driver_mgr from neutron.services.qos import qos_consts class QoSPlugin(qos.QoSPluginBase): """Implementation of the Neutron QoS Service Plugin. This class implements a Quality of Service plugin that provides quality of service parameters over ports and networks. """ supported_extension_aliases = ['qos'] def __init__(self): super(QoSPlugin, self).__init__() self.notification_driver_manager = ( driver_mgr.QosServiceNotificationDriverManager()) @db_base_plugin_common.convert_result_to_dict def create_policy(self, context, policy): policy = policy_object.QosPolicy(context, **policy['policy']) policy.create() self.notification_driver_manager.create_policy(context, policy) return policy @db_base_plugin_common.convert_result_to_dict def update_policy(self, context, policy_id, policy): obj = policy_object.QosPolicy(context, id=policy_id) obj.obj_reset_changes() for k, v in policy['policy'].items(): if k != 'id': setattr(obj, k, v) obj.update() self.notification_driver_manager.update_policy(context, obj) return obj def delete_policy(self, context, policy_id): policy = policy_object.QosPolicy(context) policy.id = policy_id self.notification_driver_manager.delete_policy(context, policy) policy.delete() def _get_policy_obj(self, context, policy_id): obj = policy_object.QosPolicy.get_object(context, id=policy_id) if obj is None: raise n_exc.QosPolicyNotFound(policy_id=policy_id) return obj @db_base_plugin_common.filter_fields @db_base_plugin_common.convert_result_to_dict def get_policy(self, context, policy_id, fields=None): return self._get_policy_obj(context, policy_id) @db_base_plugin_common.filter_fields @db_base_plugin_common.convert_result_to_dict def get_policies(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): return policy_object.QosPolicy.get_objects(context, **filters) #TODO(QoS): Consider adding a proxy catch-all for rules, so # we capture the API function call, and just pass # the rule type as a parameter removing lots of # future code duplication when we have more rules. @db_base_plugin_common.convert_result_to_dict def create_policy_bandwidth_limit_rule(self, context, policy_id, bandwidth_limit_rule): # make sure we will have a policy object to push resource update with db_api.autonested_transaction(context.session): # first, validate that we have access to the policy policy = self._get_policy_obj(context, policy_id) rule = rule_object.QosBandwidthLimitRule( context, qos_policy_id=policy_id, **bandwidth_limit_rule['bandwidth_limit_rule']) rule.create() policy.reload_rules() self.notification_driver_manager.update_policy(context, policy) return rule @db_base_plugin_common.convert_result_to_dict def update_policy_bandwidth_limit_rule(self, context, rule_id, policy_id, bandwidth_limit_rule): # make sure we will have a policy object to push resource update with db_api.autonested_transaction(context.session): # first, validate that we have access to the policy policy = self._get_policy_obj(context, policy_id) # check if the rule belong to the policy policy.get_rule_by_id(rule_id) rule = rule_object.QosBandwidthLimitRule( context, id=rule_id) rule.obj_reset_changes() for k, v in bandwidth_limit_rule['bandwidth_limit_rule'].items(): if k != 'id': setattr(rule, k, v) rule.update() policy.reload_rules() self.notification_driver_manager.update_policy(context, policy) return rule def delete_policy_bandwidth_limit_rule(self, context, rule_id, policy_id): # make sure we will have a policy object to push resource update with db_api.autonested_transaction(context.session): # first, validate that we have access to the policy policy = self._get_policy_obj(context, policy_id) rule = policy.get_rule_by_id(rule_id) rule.delete() policy.reload_rules() self.notification_driver_manager.update_policy(context, policy) @db_base_plugin_common.filter_fields @db_base_plugin_common.convert_result_to_dict def get_policy_bandwidth_limit_rule(self, context, rule_id, policy_id, fields=None): # make sure we have access to the policy when fetching the rule with db_api.autonested_transaction(context.session): # first, validate that we have access to the policy self._get_policy_obj(context, policy_id) rule = rule_object.QosBandwidthLimitRule.get_object( context, id=rule_id) if not rule: raise n_exc.QosRuleNotFound(policy_id=policy_id, rule_id=rule_id) return rule @db_base_plugin_common.filter_fields @db_base_plugin_common.convert_result_to_dict def get_policy_bandwidth_limit_rules(self, context, policy_id, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # make sure we have access to the policy when fetching rules with db_api.autonested_transaction(context.session): # first, validate that we have access to the policy self._get_policy_obj(context, policy_id) filters = filters or dict() filters[qos_consts.QOS_POLICY_ID] = policy_id return rule_object.QosBandwidthLimitRule.get_objects(context, **filters) # TODO(QoS): enforce rule types when accessing rule objects @db_base_plugin_common.filter_fields @db_base_plugin_common.convert_result_to_dict def get_rule_types(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): return rule_type_object.QosRuleType.get_objects(**filters) neutron-8.4.0/neutron/services/rbac/0000775000567000056710000000000013044373210020577 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/rbac/__init__.py0000664000567000056710000000000013044372736022712 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/auto_allocate/0000775000567000056710000000000013044373210022504 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/auto_allocate/__init__.py0000664000567000056710000000000013044372736024617 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/services/auto_allocate/plugin.py0000664000567000056710000000222313044372760024364 0ustar jenkinsjenkins00000000000000# Copyright 2015-2016 Hewlett Packard Enterprise Development Company, LP # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.services.auto_allocate import db class Plugin(db.AutoAllocatedTopologyMixin): _instance = None supported_extension_aliases = ["auto-allocated-topology"] @classmethod def get_instance(cls): if cls._instance is None: cls._instance = cls() return cls._instance def get_plugin_description(self): return "Auto Allocated Topology - aka get me a network." def get_plugin_type(self): return "auto-allocated-topology" neutron-8.4.0/neutron/services/auto_allocate/models.py0000664000567000056710000000241713044372760024356 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015-2016 Hewlett Packard Enterprise Development Company LP # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from neutron.db import model_base class AutoAllocatedTopology(model_base.BASEV2): __tablename__ = 'auto_allocated_topologies' tenant_id = sa.Column(sa.String(255), primary_key=True) network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete='CASCADE'), nullable=False) router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id', ondelete='SET NULL'), nullable=True) neutron-8.4.0/neutron/services/auto_allocate/db.py0000664000567000056710000003323413044372760023461 0ustar jenkinsjenkins00000000000000# Copyright 2015-2016 Hewlett Packard Enterprise Development Company, LP # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import sql from oslo_db import exception as db_exc from oslo_log import log as logging from neutron._i18n import _, _LE from neutron.api.v2 import attributes from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import exceptions as n_exc from neutron.db import common_db_mixin from neutron.db import db_base_plugin_v2 from neutron.db import external_net_db from neutron.db import model_base from neutron.db import models_v2 from neutron.extensions import l3 from neutron import manager from neutron.plugins.common import constants from neutron.plugins.common import utils as p_utils from neutron.services.auto_allocate import exceptions from neutron.services.auto_allocate import models LOG = logging.getLogger(__name__) IS_DEFAULT = 'is_default' CHECK_REQUIREMENTS = 'dry-run' def _extend_external_network_default(self, net_res, net_db): """Add is_default field to 'show' response.""" if net_db.external is not None: net_res[IS_DEFAULT] = net_db.external.is_default return net_res def _ensure_external_network_default_value_callback( resource, event, trigger, context, request, network): """Ensure the is_default db field matches the create/update request.""" is_default = request.get(IS_DEFAULT, False) if event in (events.BEFORE_CREATE, events.BEFORE_UPDATE) and is_default: # ensure there is only one default external network at any given time obj = (context.session.query(external_net_db.ExternalNetwork). filter_by(is_default=True)).first() if obj and network['id'] != obj.network_id: raise exceptions.DefaultExternalNetworkExists( net_id=obj.network_id) # Reflect the status of the is_default on the create/update request obj = (context.session.query(external_net_db.ExternalNetwork). filter_by(network_id=network['id'])) obj.update({IS_DEFAULT: is_default}) class AutoAllocatedTopologyMixin(common_db_mixin.CommonDbMixin): db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attributes.NETWORKS, [_extend_external_network_default]) def __new__(cls, *args, **kwargs): # NOTE(kevinbenton): we subscribe on object construction because # the tests blow away the callback manager for each run new = super(AutoAllocatedTopologyMixin, cls).__new__(cls, *args, **kwargs) registry.subscribe(_ensure_external_network_default_value_callback, resources.EXTERNAL_NETWORK, events.BEFORE_CREATE) registry.subscribe(_ensure_external_network_default_value_callback, resources.EXTERNAL_NETWORK, events.AFTER_CREATE) registry.subscribe(_ensure_external_network_default_value_callback, resources.EXTERNAL_NETWORK, events.BEFORE_UPDATE) return new # TODO(armax): if a tenant modifies auto allocated resources under # the hood the behavior of the get_auto_allocated_topology API is # undetermined. Consider adding callbacks to deal with the following # situations: # - insert subnet -> plug router interface # - delete router -> remove the entire topology # - update subnet -> prevent operation # - update router gateway -> prevent operation # - ... def get_auto_allocated_topology(self, context, tenant_id, fields=None): """Return tenant's network associated to auto-allocated topology. The topology will be provisioned upon return, if network is missing. """ tenant_id = self._validate(context, tenant_id) if CHECK_REQUIREMENTS in fields: # for dry-run requests, simply validates that subsequent # requests can be fullfilled based on a set of requirements # such as existence of default networks, pools, etc. return self._check_requirements(context, tenant_id) elif fields: raise n_exc.BadRequest(resource='auto_allocate', msg=_("Unrecognized field")) # Check for an existent topology network_id = self._get_auto_allocated_network(context, tenant_id) if network_id: return self._response(network_id, tenant_id, fields=fields) # See if we indeed have an external network to connect to, otherwise # we will fail fast default_external_network = self._get_default_external_network( context) # If we reach this point, then we got some work to do! subnets = self._provision_tenant_private_network(context, tenant_id) network_id = subnets[0]['network_id'] router = self._provision_external_connectivity( context, default_external_network, subnets, tenant_id) network_id = self._save( context, tenant_id, network_id, router['id'], subnets) return self._response(network_id, tenant_id, fields=fields) @property def core_plugin(self): if not getattr(self, '_core_plugin', None): self._core_plugin = manager.NeutronManager.get_plugin() return self._core_plugin @property def l3_plugin(self): if not getattr(self, '_l3_plugin', None): self._l3_plugin = manager.NeutronManager.get_service_plugins().get( constants.L3_ROUTER_NAT) return self._l3_plugin def _check_requirements(self, context, tenant_id): """Raise if requirements are not met.""" self._get_default_external_network(context) try: self._get_supported_subnetpools(context) except n_exc.NotFound: raise exceptions.AutoAllocationFailure( reason=_("No default subnetpools defined")) return {'id': 'dry-run=pass', 'tenant_id': tenant_id} def _validate(self, context, tenant_id): """Validate and return the tenant to be associated to the topology.""" if tenant_id == 'None': # NOTE(HenryG): the client might be sending us astray by # passing no tenant; this is really meant to be the tenant # issuing the request, therefore let's get it from the context tenant_id = context.tenant_id if not context.is_admin and tenant_id != context.tenant_id: raise n_exc.NotAuthorized() return tenant_id def _get_auto_allocated_network(self, context, tenant_id): """Get the auto allocated network for the tenant.""" with context.session.begin(subtransactions=True): network = (context.session.query(models.AutoAllocatedTopology). filter_by(tenant_id=tenant_id).first()) if network: return network['network_id'] def _response(self, network_id, tenant_id, fields=None): """Build response for auto-allocated network.""" res = { 'id': network_id, 'tenant_id': tenant_id } return self._fields(res, fields) def _get_default_external_network(self, context): """Get the default external network for the deployment.""" with context.session.begin(subtransactions=True): default_external_networks = (context.session.query( external_net_db.ExternalNetwork). filter_by(is_default=sql.true()). join(models_v2.Network). join(model_base.StandardAttribute). order_by(model_base.StandardAttribute.id).all()) if not default_external_networks: LOG.error(_LE("Unable to find default external network " "for deployment, please create/assign one to " "allow auto-allocation to work correctly.")) raise exceptions.AutoAllocationFailure( reason=_("No default router:external network")) if len(default_external_networks) > 1: LOG.error(_LE("Multiple external default networks detected. " "Network %s is true 'default'."), default_external_networks[0]['network_id']) return default_external_networks[0] def _get_supported_subnetpools(self, context): """Return the default subnet pools available.""" default_subnet_pools = [ self.core_plugin.get_default_subnetpool( context, ver) for ver in (4, 6) ] available_pools = [ s for s in default_subnet_pools if s ] if not available_pools: LOG.error(_LE("No default pools available")) raise n_exc.NotFound() return available_pools def _provision_tenant_private_network(self, context, tenant_id): """Create a tenant private network/subnets.""" network = None try: network_args = { 'name': 'auto_allocated_network', 'admin_state_up': True, 'tenant_id': tenant_id, 'shared': False } network = p_utils.create_network( self.core_plugin, context, {'network': network_args}) subnets = [] for pool in self._get_supported_subnetpools(context): subnet_args = { 'name': 'auto_allocated_subnet_v%s' % pool['ip_version'], 'network_id': network['id'], 'tenant_id': tenant_id, 'ip_version': pool['ip_version'], 'subnetpool_id': pool['id'], } subnets.append(p_utils.create_subnet( self.core_plugin, context, {'subnet': subnet_args})) return subnets except (ValueError, n_exc.BadRequest, n_exc.NotFound): LOG.error(_LE("Unable to auto allocate topology for tenant " "%s due to missing requirements, e.g. default " "or shared subnetpools"), tenant_id) if network: self._cleanup(context, network['id']) raise exceptions.AutoAllocationFailure( reason=_("Unable to provide tenant private network")) def _provision_external_connectivity( self, context, default_external_network, subnets, tenant_id): """Uplink tenant subnet(s) to external network.""" router_args = { 'name': 'auto_allocated_router', l3.EXTERNAL_GW_INFO: default_external_network, 'tenant_id': tenant_id, 'admin_state_up': True } router = None try: router = self.l3_plugin.create_router( context, {'router': router_args}) attached_subnets = [] for subnet in subnets: self.l3_plugin.add_router_interface( context, router['id'], {'subnet_id': subnet['id']}) attached_subnets.append(subnet) return router except n_exc.BadRequest: LOG.error(_LE("Unable to auto allocate topology for tenant " "%s because of router errors."), tenant_id) if router: self._cleanup(context, network_id=subnets[0]['network_id'], router_id=router['id'], subnets=attached_subnets) raise exceptions.AutoAllocationFailure( reason=_("Unable to provide external connectivity")) def _save(self, context, tenant_id, network_id, router_id, subnets): """Save auto-allocated topology, or revert in case of DB errors.""" try: # NOTE(armax): saving the auto allocated topology in a # separate transaction will keep the Neutron DB and the # Neutron plugin backend in sync, thus allowing for a # more bullet proof cleanup. with context.session.begin(subtransactions=True): context.session.add( models.AutoAllocatedTopology( tenant_id=tenant_id, network_id=network_id, router_id=router_id)) except db_exc.DBDuplicateEntry: LOG.error(_LE("Multiple auto-allocated networks detected for " "tenant %(tenant)s. Attempting clean up for " "network %(network)s and router %(router)s"), {'tenant': tenant_id, 'network': network_id, 'router': router_id}) self._cleanup( context, network_id=network_id, router_id=router_id, subnets=subnets) network_id = self._get_auto_allocated_network( context, tenant_id) return network_id def _cleanup(self, context, network_id=None, router_id=None, subnets=None): """Clean up auto allocated resources.""" if router_id: for subnet in subnets or []: self.l3_plugin.remove_router_interface( context, router_id, {'subnet_id': subnet['id']}) self.l3_plugin.delete_router(context, router_id) if network_id: self.core_plugin.delete_network(context, network_id) neutron-8.4.0/neutron/services/auto_allocate/exceptions.py0000664000567000056710000000171413044372760025253 0ustar jenkinsjenkins00000000000000# Copyright 2015-2016 Hewlett Packard Enterprise Development Company, LP # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _ from neutron.common import exceptions as n_exc class AutoAllocationFailure(n_exc.Conflict): message = _("Deployment error: %(reason)s.") class DefaultExternalNetworkExists(n_exc.Conflict): message = _("A default external network already exists: %(net_id)s.") neutron-8.4.0/neutron/policy.py0000664000567000056710000004266713044372760017746 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import re from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_policy import policy from oslo_utils import excutils from oslo_utils import importutils import six from neutron._i18n import _, _LE, _LW from neutron.api.v2 import attributes from neutron.common import constants as const from neutron.common import exceptions LOG = logging.getLogger(__name__) _ENFORCER = None ADMIN_CTX_POLICY = 'context_is_admin' ADVSVC_CTX_POLICY = 'context_is_advsvc' def reset(): global _ENFORCER if _ENFORCER: _ENFORCER.clear() _ENFORCER = None def init(conf=cfg.CONF, policy_file=None): """Init an instance of the Enforcer class.""" global _ENFORCER if not _ENFORCER: _ENFORCER = policy.Enforcer(conf, policy_file=policy_file) _ENFORCER.load_rules(True) def refresh(policy_file=None): """Reset policy and init a new instance of Enforcer.""" reset() init(policy_file=policy_file) def get_resource_and_action(action, pluralized=None): """Return resource and enforce_attr_based_check(boolean) per resource and action extracted from api operation. """ data = action.split(':', 1)[0].split('_', 1) resource = pluralized or ("%ss" % data[-1]) enforce_attr_based_check = data[0] not in ('get', 'delete') return (resource, enforce_attr_based_check) def set_rules(policies, overwrite=True): """Set rules based on the provided dict of rules. :param policies: New policies to use. It should be an instance of dict. :param overwrite: Whether to overwrite current rules or update them with the new rules. """ LOG.debug("Loading policies from file: %s", _ENFORCER.policy_path) init() _ENFORCER.set_rules(policies, overwrite) def _is_attribute_explicitly_set(attribute_name, resource, target, action): """Verify that an attribute is present and is explicitly set.""" if 'update' in action: # In the case of update, the function should not pay attention to a # default value of an attribute, but check whether it was explicitly # marked as being updated instead. return (attribute_name in target[const.ATTRIBUTES_TO_UPDATE] and target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED) result = (attribute_name in target and target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED) if result and 'default' in resource[attribute_name]: return target[attribute_name] != resource[attribute_name]['default'] return result def _should_validate_sub_attributes(attribute, sub_attr): """Verify that sub-attributes are iterable and should be validated.""" validate = attribute.get('validate') return (validate and isinstance(sub_attr, collections.Iterable) and any([k.startswith('type:dict') and v for (k, v) in six.iteritems(validate)])) def _build_subattr_match_rule(attr_name, attr, action, target): """Create the rule to match for sub-attribute policy checks.""" # TODO(salv-orlando): Instead of relying on validator info, introduce # typing for API attributes # Expect a dict as type descriptor validate = attr['validate'] key = list(filter(lambda k: k.startswith('type:dict'), validate.keys())) if not key: LOG.warning(_LW("Unable to find data type descriptor " "for attribute %s"), attr_name) return data = validate[key[0]] if not isinstance(data, dict): LOG.debug("Attribute type descriptor is not a dict. Unable to " "generate any sub-attr policy rule for %s.", attr_name) return sub_attr_rules = [policy.RuleCheck('rule', '%s:%s:%s' % (action, attr_name, sub_attr_name)) for sub_attr_name in data if sub_attr_name in target[attr_name]] return policy.AndCheck(sub_attr_rules) def _process_rules_list(rules, match_rule): """Recursively walk a policy rule to extract a list of match entries.""" if isinstance(match_rule, policy.RuleCheck): rules.append(match_rule.match) elif isinstance(match_rule, policy.AndCheck): for rule in match_rule.rules: _process_rules_list(rules, rule) return rules def _build_match_rule(action, target, pluralized): """Create the rule to match for a given action. The policy rule to be matched is built in the following way: 1) add entries for matching permission on objects 2) add an entry for the specific action (e.g.: create_network) 3) add an entry for attributes of a resource for which the action is being executed (e.g.: create_network:shared) 4) add an entry for sub-attributes of a resource for which the action is being executed (e.g.: create_router:external_gateway_info:network_id) """ match_rule = policy.RuleCheck('rule', action) resource, enforce_attr_based_check = get_resource_and_action( action, pluralized) if enforce_attr_based_check: # assigning to variable with short name for improving readability res_map = attributes.RESOURCE_ATTRIBUTE_MAP if resource in res_map: for attribute_name in res_map[resource]: if _is_attribute_explicitly_set(attribute_name, res_map[resource], target, action): attribute = res_map[resource][attribute_name] if 'enforce_policy' in attribute: attr_rule = policy.RuleCheck('rule', '%s:%s' % (action, attribute_name)) # Build match entries for sub-attributes if _should_validate_sub_attributes( attribute, target[attribute_name]): attr_rule = policy.AndCheck( [attr_rule, _build_subattr_match_rule( attribute_name, attribute, action, target)]) match_rule = policy.AndCheck([match_rule, attr_rule]) return match_rule # This check is registered as 'tenant_id' so that it can override # GenericCheck which was used for validating parent resource ownership. # This will prevent us from having to handling backward compatibility # for policy.json # TODO(salv-orlando): Reinstate GenericCheck for simple tenant_id checks @policy.register('tenant_id') class OwnerCheck(policy.Check): """Resource ownership check. This check verifies the owner of the current resource, or of another resource referenced by the one under analysis. In the former case it falls back to a regular GenericCheck, whereas in the latter case it leverages the plugin to load the referenced resource and perform the check. """ def __init__(self, kind, match): # Process the match try: self.target_field = re.findall(r'^\%\((.*)\)s$', match)[0] except IndexError: err_reason = (_("Unable to identify a target field from:%s. " "Match should be in the form %%()s") % match) LOG.exception(err_reason) raise exceptions.PolicyInitError( policy="%s:%s" % (kind, match), reason=err_reason) super(OwnerCheck, self).__init__(kind, match) def __call__(self, target, creds, enforcer): if self.target_field not in target: # policy needs a plugin check # target field is in the form resource:field # however if they're not separated by a colon, use an underscore # as a separator for backward compatibility def do_split(separator): parent_res, parent_field = self.target_field.split( separator, 1) return parent_res, parent_field for separator in (':', '_'): try: parent_res, parent_field = do_split(separator) break except ValueError: LOG.debug("Unable to find ':' as separator in %s.", self.target_field) else: # If we are here split failed with both separators err_reason = (_("Unable to find resource name in %s") % self.target_field) LOG.error(err_reason) raise exceptions.PolicyCheckError( policy="%s:%s" % (self.kind, self.match), reason=err_reason) parent_foreign_key = attributes.RESOURCE_FOREIGN_KEYS.get( "%ss" % parent_res, None) if not parent_foreign_key: err_reason = (_("Unable to verify match:%(match)s as the " "parent resource: %(res)s was not found") % {'match': self.match, 'res': parent_res}) LOG.error(err_reason) raise exceptions.PolicyCheckError( policy="%s:%s" % (self.kind, self.match), reason=err_reason) # NOTE(salv-orlando): This check currently assumes the parent # resource is handled by the core plugin. It might be worth # having a way to map resources to plugins so to make this # check more general # NOTE(ihrachys): if import is put in global, circular # import failure occurs manager = importutils.import_module('neutron.manager') f = getattr(manager.NeutronManager.get_instance().plugin, 'get_%s' % parent_res) # f *must* exist, if not found it is better to let neutron # explode. Check will be performed with admin context context = importutils.import_module('neutron.context') try: data = f(context.get_admin_context(), target[parent_foreign_key], fields=[parent_field]) target[self.target_field] = data[parent_field] except exceptions.NotFound as e: # NOTE(kevinbenton): a NotFound exception can occur if a # list operation is happening at the same time as one of # the parents and its children being deleted. So we issue # a RetryRequest so the API will redo the lookup and the # problem items will be gone. raise db_exc.RetryRequest(e) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Policy check error while calling %s!'), f) match = self.match % target if self.kind in creds: return match == six.text_type(creds[self.kind]) return False @policy.register('field') class FieldCheck(policy.Check): def __init__(self, kind, match): # Process the match resource, field_value = match.split(':', 1) field, value = field_value.split('=', 1) super(FieldCheck, self).__init__(kind, '%s:%s:%s' % (resource, field, value)) # Value might need conversion - we need help from the attribute map try: attr = attributes.RESOURCE_ATTRIBUTE_MAP[resource][field] conv_func = attr['convert_to'] except KeyError: conv_func = lambda x: x self.field = field self.value = conv_func(value) self.regex = re.compile(value[1:]) if value.startswith('~') else None def __call__(self, target_dict, cred_dict, enforcer): target_value = target_dict.get(self.field) # target_value might be a boolean, explicitly compare with None if target_value is None: LOG.debug("Unable to find requested field: %(field)s in target: " "%(target_dict)s", {'field': self.field, 'target_dict': target_dict}) return False if self.regex: return bool(self.regex.match(target_value)) return target_value == self.value def _prepare_check(context, action, target, pluralized): """Prepare rule, target, and credentials for the policy engine.""" # Compare with None to distinguish case in which target is {} if target is None: target = {} match_rule = _build_match_rule(action, target, pluralized) credentials = context.to_dict() return match_rule, target, credentials def log_rule_list(match_rule): if LOG.isEnabledFor(logging.DEBUG): rules = _process_rules_list([], match_rule) LOG.debug("Enforcing rules: %s", rules) def check(context, action, target, plugin=None, might_not_exist=False, pluralized=None): """Verifies that the action is valid on the target in this context. :param context: neutron context :param action: string representing the action to be checked this should be colon separated for clarity. :param target: dictionary representing the object of the action for object creation this should be a dictionary representing the location of the object e.g. ``{'project_id': context.project_id}`` :param plugin: currently unused and deprecated. Kept for backward compatibility. :param might_not_exist: If True the policy check is skipped (and the function returns True) if the specified policy does not exist. Defaults to false. :param pluralized: pluralized case of resource e.g. firewall_policy -> pluralized = "firewall_policies" :return: Returns True if access is permitted else False. """ # If we already know the context has admin rights do not perform an # additional check and authorize the operation if context.is_admin: return True if might_not_exist and not (_ENFORCER.rules and action in _ENFORCER.rules): return True match_rule, target, credentials = _prepare_check(context, action, target, pluralized) result = _ENFORCER.enforce(match_rule, target, credentials, pluralized=pluralized) # logging applied rules in case of failure if not result: log_rule_list(match_rule) return result def enforce(context, action, target, plugin=None, pluralized=None): """Verifies that the action is valid on the target in this context. :param context: neutron context :param action: string representing the action to be checked this should be colon separated for clarity. :param target: dictionary representing the object of the action for object creation this should be a dictionary representing the location of the object e.g. ``{'project_id': context.project_id}`` :param plugin: currently unused and deprecated. Kept for backward compatibility. :param pluralized: pluralized case of resource e.g. firewall_policy -> pluralized = "firewall_policies" :raises oslo_policy.policy.PolicyNotAuthorized: if verification fails. """ # If we already know the context has admin rights do not perform an # additional check and authorize the operation if context.is_admin: return True rule, target, credentials = _prepare_check(context, action, target, pluralized) try: result = _ENFORCER.enforce(rule, target, credentials, action=action, do_raise=True) except policy.PolicyNotAuthorized: with excutils.save_and_reraise_exception(): log_rule_list(rule) LOG.debug("Failed policy check for '%s'", action) return result def check_is_admin(context): """Verify context has admin rights according to policy settings.""" init() # the target is user-self credentials = context.to_dict() if ADMIN_CTX_POLICY not in _ENFORCER.rules: return False return _ENFORCER.enforce(ADMIN_CTX_POLICY, credentials, credentials) def check_is_advsvc(context): """Verify context has advsvc rights according to policy settings.""" init() # the target is user-self credentials = context.to_dict() if ADVSVC_CTX_POLICY not in _ENFORCER.rules: return False return _ENFORCER.enforce(ADVSVC_CTX_POLICY, credentials, credentials) neutron-8.4.0/neutron/callbacks/0000775000567000056710000000000013044373210017764 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/callbacks/manager.py0000664000567000056710000001510613044372760021764 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from oslo_log import log as logging from oslo_utils import reflection from neutron._i18n import _LE from neutron.callbacks import events from neutron.callbacks import exceptions LOG = logging.getLogger(__name__) class CallbacksManager(object): """A callback system that allows objects to cooperate in a loose manner.""" def __init__(self): self.clear() def subscribe(self, callback, resource, event): """Subscribe callback for a resource event. The same callback may register for more than one event. :param callback: the callback. It must raise or return a boolean. :param resource: the resource. It must be a valid resource. :param event: the event. It must be a valid event. """ LOG.debug("Subscribe: %(callback)s %(resource)s %(event)s", {'callback': callback, 'resource': resource, 'event': event}) callback_id = _get_id(callback) try: self._callbacks[resource][event][callback_id] = callback except KeyError: # Initialize the registry for unknown resources and/or events # prior to enlisting the callback. self._callbacks[resource][event] = {} self._callbacks[resource][event][callback_id] = callback # We keep a copy of callbacks to speed the unsubscribe operation. if callback_id not in self._index: self._index[callback_id] = collections.defaultdict(set) self._index[callback_id][resource].add(event) def unsubscribe(self, callback, resource, event): """Unsubscribe callback from the registry. :param callback: the callback. :param resource: the resource. :param event: the event. """ LOG.debug("Unsubscribe: %(callback)s %(resource)s %(event)s", {'callback': callback, 'resource': resource, 'event': event}) callback_id = self._find(callback) if not callback_id: LOG.debug("Callback %s not found", callback_id) return if resource and event: del self._callbacks[resource][event][callback_id] self._index[callback_id][resource].discard(event) if not self._index[callback_id][resource]: del self._index[callback_id][resource] if not self._index[callback_id]: del self._index[callback_id] else: value = '%s,%s' % (resource, event) raise exceptions.Invalid(element='resource,event', value=value) def unsubscribe_by_resource(self, callback, resource): """Unsubscribe callback for any event associated to the resource. :param callback: the callback. :param resource: the resource. """ callback_id = self._find(callback) if callback_id: if resource in self._index[callback_id]: for event in self._index[callback_id][resource]: del self._callbacks[resource][event][callback_id] del self._index[callback_id][resource] if not self._index[callback_id]: del self._index[callback_id] def unsubscribe_all(self, callback): """Unsubscribe callback for all events and all resources. :param callback: the callback. """ callback_id = self._find(callback) if callback_id: for resource, resource_events in self._index[callback_id].items(): for event in resource_events: del self._callbacks[resource][event][callback_id] del self._index[callback_id] def notify(self, resource, event, trigger, **kwargs): """Notify all subscribed callback(s). Dispatch the resource's event to the subscribed callbacks. :param resource: the resource. :param event: the event. :param trigger: the trigger. A reference to the sender of the event. """ errors = self._notify_loop(resource, event, trigger, **kwargs) if errors: if event.startswith(events.BEFORE): abort_event = event.replace( events.BEFORE, events.ABORT) self._notify_loop(resource, abort_event, trigger, **kwargs) raise exceptions.CallbackFailure(errors=errors) if event.startswith(events.PRECOMMIT): raise exceptions.CallbackFailure(errors=errors) def clear(self): """Brings the manager to a clean slate.""" self._callbacks = collections.defaultdict(dict) self._index = collections.defaultdict(dict) def _notify_loop(self, resource, event, trigger, **kwargs): """The notification loop.""" LOG.debug("Notify callbacks for %(resource)s, %(event)s", {'resource': resource, 'event': event}) errors = [] callbacks = self._callbacks[resource].get(event, {}).items() # TODO(armax): consider using a GreenPile for callback_id, callback in callbacks: try: LOG.debug("Calling callback %s", callback_id) callback(resource, event, trigger, **kwargs) except Exception as e: LOG.exception(_LE("Error during notification for " "%(callback)s %(resource)s, %(event)s"), {'callback': callback_id, 'resource': resource, 'event': event}) errors.append(exceptions.NotificationError(callback_id, e)) return errors def _find(self, callback): """Return the callback_id if found, None otherwise.""" callback_id = _get_id(callback) return callback_id if callback_id in self._index else None def _get_id(callback): """Return a unique identifier for the callback.""" # TODO(armax): consider using something other than names # https://www.python.org/dev/peps/pep-3155/, but this # might be okay for now. return reflection.get_callable_name(callback) neutron-8.4.0/neutron/callbacks/__init__.py0000664000567000056710000000000013044372736022077 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/callbacks/resources.py0000664000567000056710000000176113044372760022366 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # String literals representing core resources. AGENT = 'agent' EXTERNAL_NETWORK = 'external_network' FLOATING_IP = 'floatingip' PORT = 'port' PROCESS = 'process' ROUTER = 'router' ROUTER_GATEWAY = 'router_gateway' ROUTER_INTERFACE = 'router_interface' SECURITY_GROUP = 'security_group' SECURITY_GROUP_RULE = 'security_group_rule' SUBNET = 'subnet' SUBNET_GATEWAY = 'subnet_gateway' SUBNETPOOL_ADDRESS_SCOPE = 'subnetpool_address_scope' neutron-8.4.0/neutron/callbacks/registry.py0000664000567000056710000000265613044372736022233 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.callbacks import manager # TODO(armax): consider adding locking CALLBACK_MANAGER = None def _get_callback_manager(): global CALLBACK_MANAGER if CALLBACK_MANAGER is None: CALLBACK_MANAGER = manager.CallbacksManager() return CALLBACK_MANAGER def subscribe(callback, resource, event): _get_callback_manager().subscribe(callback, resource, event) def unsubscribe(callback, resource, event): _get_callback_manager().unsubscribe(callback, resource, event) def unsubscribe_by_resource(callback, resource): _get_callback_manager().unsubscribe_by_resource(callback, resource) def unsubscribe_all(callback): _get_callback_manager().unsubscribe_all(callback) def notify(resource, event, trigger, **kwargs): _get_callback_manager().notify(resource, event, trigger, **kwargs) def clear(): _get_callback_manager().clear() neutron-8.4.0/neutron/callbacks/events.py0000664000567000056710000000220013044372760021645 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # String literals representing core events. BEFORE_CREATE = 'before_create' BEFORE_READ = 'before_read' BEFORE_UPDATE = 'before_update' BEFORE_DELETE = 'before_delete' PRECOMMIT_CREATE = 'precommit_create' PRECOMMIT_UPDATE = 'precommit_update' PRECOMMIT_DELETE = 'precommit_delete' AFTER_CREATE = 'after_create' AFTER_READ = 'after_read' AFTER_UPDATE = 'after_update' AFTER_DELETE = 'after_delete' ABORT_CREATE = 'abort_create' ABORT_READ = 'abort_read' ABORT_UPDATE = 'abort_update' ABORT_DELETE = 'abort_delete' ABORT = 'abort_' BEFORE = 'before_' PRECOMMIT = 'precommit_' neutron-8.4.0/neutron/callbacks/exceptions.py0000664000567000056710000000324013044372760022527 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _ from neutron.common import exceptions class Invalid(exceptions.NeutronException): message = _("The value '%(value)s' for %(element)s is not valid.") class CallbackFailure(exceptions.MultipleExceptions): def __init__(self, errors): self.errors = errors def __str__(self): if isinstance(self.errors, list): return ','.join(str(error) for error in self.errors) else: return str(self.errors) @property def inner_exceptions(self): if isinstance(self.errors, list): return [self._unpack_if_notification_error(e) for e in self.errors] return [self._unpack_if_notification_error(self.errors)] @staticmethod def _unpack_if_notification_error(exc): if isinstance(exc, NotificationError): return exc.error return exc class NotificationError(object): def __init__(self, callback_id, error): self.callback_id = callback_id self.error = error def __str__(self): return 'Callback %s failed with "%s"' % (self.callback_id, self.error) neutron-8.4.0/neutron/core_extensions/0000775000567000056710000000000013044373210021254 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/core_extensions/__init__.py0000664000567000056710000000000013044372736023367 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/core_extensions/qos.py0000664000567000056710000000655213044372760022451 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import exceptions as n_exc from neutron.core_extensions import base from neutron.db import api as db_api from neutron import manager from neutron.objects.qos import policy as policy_object from neutron.plugins.common import constants as plugin_constants from neutron.services.qos import qos_consts class QosCoreResourceExtension(base.CoreResourceExtension): @property def plugin_loaded(self): if not hasattr(self, '_plugin_loaded'): service_plugins = manager.NeutronManager.get_service_plugins() self._plugin_loaded = plugin_constants.QOS in service_plugins return self._plugin_loaded def _get_policy_obj(self, context, policy_id): obj = policy_object.QosPolicy.get_object(context, id=policy_id) if obj is None: raise n_exc.QosPolicyNotFound(policy_id=policy_id) return obj def _update_port_policy(self, context, port, port_changes): old_policy = policy_object.QosPolicy.get_port_policy( context, port['id']) if old_policy: old_policy.detach_port(port['id']) qos_policy_id = port_changes.get(qos_consts.QOS_POLICY_ID) if qos_policy_id is not None: policy = self._get_policy_obj(context, qos_policy_id) policy.attach_port(port['id']) port[qos_consts.QOS_POLICY_ID] = qos_policy_id def _update_network_policy(self, context, network, network_changes): old_policy = policy_object.QosPolicy.get_network_policy( context, network['id']) if old_policy: old_policy.detach_network(network['id']) qos_policy_id = network_changes.get(qos_consts.QOS_POLICY_ID) if qos_policy_id is not None: policy = self._get_policy_obj(context, qos_policy_id) policy.attach_network(network['id']) network[qos_consts.QOS_POLICY_ID] = qos_policy_id def _exec(self, method_name, context, kwargs): with db_api.autonested_transaction(context.session): return getattr(self, method_name)(context=context, **kwargs) def process_fields(self, context, resource_type, requested_resource, actual_resource): if (qos_consts.QOS_POLICY_ID in requested_resource and self.plugin_loaded): self._exec('_update_%s_policy' % resource_type, context, {resource_type: actual_resource, "%s_changes" % resource_type: requested_resource}) def extract_fields(self, resource_type, resource): if not self.plugin_loaded: return {} binding = resource['qos_policy_binding'] qos_policy_id = binding['policy_id'] if binding else None return {qos_consts.QOS_POLICY_ID: qos_policy_id} neutron-8.4.0/neutron/core_extensions/base.py0000664000567000056710000000277513044372736022567 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six NETWORK = 'network' PORT = 'port' CORE_RESOURCES = [NETWORK, PORT] @six.add_metaclass(abc.ABCMeta) class CoreResourceExtension(object): @abc.abstractmethod def process_fields(self, context, resource_type, requested_resource, actual_resource): """Process extension fields. :param context: neutron api request context :param resource_type: core resource type (one of CORE_RESOURCES) :param requested_resource: resource dict that contains extension fields :param actual_resource: actual resource dict known to plugin """ @abc.abstractmethod def extract_fields(self, resource_type, resource): """Extract extension fields. :param resource_type: core resource type (one of CORE_RESOURCES) :param resource: resource dict that contains extension fields """ neutron-8.4.0/neutron/version.py0000664000567000056710000000125613044372736020124 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('neutron') neutron-8.4.0/neutron/__init__.py0000664000567000056710000000173613044372760020176 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import gettext from debtcollector import removals import six if six.PY2: gettext.install('neutron', unicode=1) else: gettext.install('neutron') six.moves.builtins.__dict__['_'] = removals.remove( message='Builtin _ translation function is deprecated in OpenStack; ' 'use the function from _i18n module for your project.')(_) neutron-8.4.0/neutron/server/0000775000567000056710000000000013044373210017353 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/server/wsgi_pecan.py0000664000567000056710000000200213044372736022052 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from neutron._i18n import _LI from neutron.pecan_wsgi import app as pecan_app from neutron.server import wsgi_eventlet from neutron import service LOG = log.getLogger(__name__) def pecan_wsgi_server(): LOG.info(_LI("Pecan WSGI server starting...")) application = pecan_app.setup_app() neutron_api = service.run_wsgi_app(application) wsgi_eventlet.start_api_and_rpc_workers(neutron_api) neutron-8.4.0/neutron/server/__init__.py0000664000567000056710000000267313044372736021510 0ustar jenkinsjenkins00000000000000# Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # If ../neutron/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... import sys from oslo_config import cfg from neutron._i18n import _ from neutron.common import config def boot_server(server_func): # the configuration will be read into the cfg.CONF global data structure config.init(sys.argv[1:]) config.setup_logging() config.set_config_defaults() if not cfg.CONF.config_file: sys.exit(_("ERROR: Unable to find configuration file via the default" " search paths (~/.neutron/, ~/, /etc/neutron/, /etc/) and" " the '--config-file' option!")) try: server_func() except KeyboardInterrupt: pass except RuntimeError as e: sys.exit(_("ERROR: %s") % e) neutron-8.4.0/neutron/server/rpc_eventlet.py0000664000567000056710000000244013044372760022430 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # If ../neutron/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... import eventlet from oslo_log import log from neutron._i18n import _LI from neutron import service LOG = log.getLogger(__name__) def eventlet_rpc_server(): pool = eventlet.GreenPool() LOG.info(_LI("Eventlet based AMQP RPC server starting...")) try: neutron_rpc = service.serve_rpc() except NotImplementedError: LOG.info(_LI("RPC was already started in parent process by " "plugin.")) else: pool.spawn(neutron_rpc.wait) pool.waitall() neutron-8.4.0/neutron/server/wsgi_eventlet.py0000664000567000056710000000304113044372760022613 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet from oslo_log import log from neutron._i18n import _LI from neutron import service LOG = log.getLogger(__name__) def eventlet_wsgi_server(): neutron_api = service.serve_wsgi(service.NeutronApiService) start_api_and_rpc_workers(neutron_api) def start_api_and_rpc_workers(neutron_api): pool = eventlet.GreenPool() api_thread = pool.spawn(neutron_api.wait) try: neutron_rpc = service.serve_rpc() except NotImplementedError: LOG.info(_LI("RPC was already started in parent process by " "plugin.")) else: rpc_thread = pool.spawn(neutron_rpc.wait) plugin_workers = service.start_plugin_workers() for worker in plugin_workers: pool.spawn(worker.wait) # api and rpc should die together. When one dies, kill the other. rpc_thread.link(lambda gt: api_thread.kill()) api_thread.link(lambda gt: rpc_thread.kill()) pool.waitall() neutron-8.4.0/neutron/openstack/0000775000567000056710000000000013044373210020034 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/openstack/__init__.py0000664000567000056710000000000013044372760022144 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/openstack/common/0000775000567000056710000000000013044373210021324 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/openstack/common/__init__.py0000664000567000056710000000000013044372760023434 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/openstack/common/cache/0000775000567000056710000000000013044373210022367 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/openstack/common/cache/backends.py0000664000567000056710000001716113044372760024532 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six NOTSET = object() @six.add_metaclass(abc.ABCMeta) class BaseCache(object): """Base Cache Abstraction :params parsed_url: Parsed url object. :params options: A dictionary with configuration parameters for the cache. For example: - default_ttl: An integer defining the default ttl for keys. """ def __init__(self, parsed_url, options=None): self._parsed_url = parsed_url self._options = options or {} self._default_ttl = int(self._options.get('default_ttl', 0)) @abc.abstractmethod def _set(self, key, value, ttl, not_exists=False): """Implementations of this class have to override this method.""" def set(self, key, value, ttl, not_exists=False): """Sets or updates a cache entry .. note:: Thread-safety is required and has to be guaranteed by the backend implementation. :params key: Item key as string. :type key: `unicode string` :params value: Value to assign to the key. This can be anything that is handled by the current backend. :params ttl: Key's timeout in seconds. 0 means no timeout. :type ttl: int :params not_exists: If True, the key will be set if it doesn't exist. Otherwise, it'll always be set. :type not_exists: bool :returns: True if the operation succeeds, False otherwise. """ if ttl is None: ttl = self._default_ttl return self._set(key, value, ttl, not_exists) def __setitem__(self, key, value): self.set(key, value, self._default_ttl) def setdefault(self, key, value): """Sets the key value to `value` if it doesn't exist :params key: Item key as string. :type key: `unicode string` :params value: Value to assign to the key. This can be anything that is handled by the current backend. """ try: return self[key] except KeyError: self[key] = value return value @abc.abstractmethod def _get(self, key, default): """Implementations of this class have to override this method.""" def get(self, key, default=None): """Gets one item from the cache .. note:: Thread-safety is required and it has to be guaranteed by the backend implementation. :params key: Key for the item to retrieve from the cache. :params default: The default value to return. :returns: `key`'s value in the cache if it exists, otherwise `default` should be returned. """ return self._get(key, default) def __getitem__(self, key): value = self.get(key, NOTSET) if value is NOTSET: raise KeyError return value @abc.abstractmethod def __delitem__(self, key): """Removes an item from cache. .. note:: Thread-safety is required and it has to be guaranteed by the backend implementation. :params key: The key to remove. :returns: The key value if there's one """ @abc.abstractmethod def _clear(self): """Implementations of this class have to override this method.""" def clear(self): """Removes all items from the cache. .. note:: Thread-safety is required and it has to be guaranteed by the backend implementation. """ return self._clear() @abc.abstractmethod def _incr(self, key, delta): """Implementations of this class have to override this method.""" def incr(self, key, delta=1): """Increments the value for a key :params key: The key for the value to be incremented :params delta: Number of units by which to increment the value. Pass a negative number to decrement the value. :returns: The new value """ return self._incr(key, delta) @abc.abstractmethod def _append_tail(self, key, tail): """Implementations of this class have to override this method.""" def append_tail(self, key, tail): """Appends `tail` to `key`'s value. :params key: The key of the value to which `tail` should be appended. :params tail: The list of values to append to the original. :returns: The new value """ if not hasattr(tail, "__iter__"): raise TypeError('Tail must be an iterable') if not isinstance(tail, list): # NOTE(flaper87): Make sure we pass a list # down to the implementation. Not all drivers # have support for generators, sets or other # iterables. tail = list(tail) return self._append_tail(key, tail) def append(self, key, value): """Appends `value` to `key`'s value. :params key: The key of the value to which `tail` should be appended. :params value: The value to append to the original. :returns: The new value """ return self.append_tail(key, [value]) @abc.abstractmethod def __contains__(self, key): """Verifies that a key exists. :params key: The key to verify. :returns: True if the key exists, otherwise False. """ @abc.abstractmethod def _get_many(self, keys, default): """Implementations of this class have to override this method.""" return ((k, self.get(k, default=default)) for k in keys) def get_many(self, keys, default=NOTSET): """Gets keys' value from cache :params keys: List of keys to retrieve. :params default: The default value to return for each key that is not in the cache. :returns: A generator of (key, value) """ return self._get_many(keys, default) @abc.abstractmethod def _set_many(self, data, ttl): """Implementations of this class have to override this method.""" for key, value in data.items(): self.set(key, value, ttl=ttl) def set_many(self, data, ttl=None): """Puts several items into the cache at once Depending on the backend, this operation may or may not be efficient. The default implementation calls set for each (key, value) pair passed, other backends support set_many operations as part of their protocols. :params data: A dictionary like {key: val} to store in the cache. :params ttl: Key's timeout in seconds. """ if ttl is None: ttl = self._default_ttl self._set_many(data, ttl) def update(self, **kwargs): """Sets several (key, value) paris. Refer to the `set_many` docstring. """ self.set_many(kwargs, ttl=self._default_ttl) @abc.abstractmethod def _unset_many(self, keys): """Implementations of this class have to override this method.""" for key in keys: del self[key] def unset_many(self, keys): """Removes several keys from the cache at once :params keys: List of keys to unset. """ self._unset_many(keys) neutron-8.4.0/neutron/openstack/common/cache/__init__.py0000664000567000056710000000000013044372760024477 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/openstack/common/cache/cache.py0000664000567000056710000000556113044372760024024 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Cache library. Supported configuration options: `default_backend`: Name of the cache backend to use. `key_namespace`: Namespace under which keys will be created. """ ######################################################################## # # THIS MODULE IS DEPRECATED # # Please refer to # https://etherpad.openstack.org/p/kilo-neutron-library-proposals for # the discussion leading to this deprecation. # # We recommend helping with the new oslo.cache library being created # as a wrapper for dogpile. # ######################################################################## from six.moves.urllib import parse from stevedore import driver def _get_oslo_configs(): """Returns the oslo config options to register.""" # NOTE(flaper87): Oslo config should be # optional. Instead of doing try / except # at the top of this file, lets import cfg # here and assume that the caller of this # function already took care of this dependency. from oslo_config import cfg return [ cfg.StrOpt('cache_url', default='memory://', help='URL to connect to the cache back end.') ] def register_oslo_configs(conf): """Registers a cache configuration options :params conf: Config object. :type conf: `cfg.ConfigOptions` """ conf.register_opts(_get_oslo_configs()) def get_cache(url='memory://'): """Loads the cache backend This function loads the cache backend specified in the given configuration. :param conf: Configuration instance to use """ parsed = parse.urlparse(url) backend = parsed.scheme query = parsed.query # NOTE(flaper87): We need the following hack # for python versions < 2.7.5. Previous versions # of python parsed query params just for 'known' # schemes. This was changed in this patch: # http://hg.python.org/cpython/rev/79e6ff3d9afd if not query and '?' in parsed.path: query = parsed.path.split('?', 1)[-1] parameters = parse.parse_qsl(query) kwargs = {'options': dict(parameters)} mgr = driver.DriverManager('neutron.openstack.common.cache.backends', backend, invoke_on_load=True, invoke_args=[parsed], invoke_kwds=kwargs) return mgr.driver neutron-8.4.0/neutron/openstack/common/cache/_backends/0000775000567000056710000000000013044373210024300 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/openstack/common/cache/_backends/__init__.py0000664000567000056710000000000013044372760026410 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/openstack/common/cache/_backends/memory.py0000664000567000056710000001213613044372760026176 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from oslo_concurrency import lockutils from oslo_utils import timeutils from neutron.openstack.common.cache import backends class MemoryBackend(backends.BaseCache): def __init__(self, parsed_url, options=None): super(MemoryBackend, self).__init__(parsed_url, options) self._clear() def _set_unlocked(self, key, value, ttl=0): expires_at = 0 if ttl != 0: expires_at = timeutils.utcnow_ts() + ttl self._cache[key] = (expires_at, value) if expires_at: self._keys_expires[expires_at].add(key) def _set(self, key, value, ttl=0, not_exists=False): with lockutils.lock(key): # NOTE(flaper87): This is needed just in `set` # calls, hence it's not in `_set_unlocked` if not_exists and self._exists_unlocked(key): return False self._set_unlocked(key, value, ttl) return True def _get_unlocked(self, key, default=None): now = timeutils.utcnow_ts() try: timeout, value = self._cache[key] except KeyError: return (0, default) if timeout and now >= timeout: # NOTE(flaper87): Record expired, # remove it from the cache but catch # KeyError and ValueError in case # _purge_expired removed this key already. try: del self._cache[key] except KeyError: pass try: # NOTE(flaper87): Keys with ttl == 0 # don't exist in the _keys_expires dict self._keys_expires[timeout].remove(key) except (KeyError, ValueError): pass return (0, default) return (timeout, value) def _get(self, key, default=None): with lockutils.lock(key): return self._get_unlocked(key, default)[1] def _exists_unlocked(self, key): now = timeutils.utcnow_ts() try: timeout = self._cache[key][0] return not timeout or now <= timeout except KeyError: return False def __contains__(self, key): with lockutils.lock(key): return self._exists_unlocked(key) def _incr_append(self, key, other): with lockutils.lock(key): timeout, value = self._get_unlocked(key) if value is None: return None ttl = timeutils.utcnow_ts() - timeout new_value = value + other self._set_unlocked(key, new_value, ttl) return new_value def _incr(self, key, delta): if not isinstance(delta, int): raise TypeError('delta must be an int instance') return self._incr_append(key, delta) def _append_tail(self, key, tail): return self._incr_append(key, tail) def _purge_expired(self): """Removes expired keys from the cache.""" now = timeutils.utcnow_ts() for timeout in sorted(self._keys_expires.keys()): # NOTE(flaper87): If timeout is greater # than `now`, stop the iteration, remaining # keys have not expired. if now < timeout: break # NOTE(flaper87): Unset every key in # this set from the cache if its timeout # is equal to `timeout`. (The key might # have been updated) for subkey in self._keys_expires.pop(timeout): try: if self._cache[subkey][0] == timeout: del self._cache[subkey] except KeyError: continue def __delitem__(self, key): self._purge_expired() # NOTE(flaper87): Delete the key. Using pop # since it could have been deleted already value = self._cache.pop(key, None) if value: try: # NOTE(flaper87): Keys with ttl == 0 # don't exist in the _keys_expires dict self._keys_expires[value[0]].remove(key) except (KeyError, ValueError): pass def _clear(self): self._cache = {} self._keys_expires = collections.defaultdict(set) def _get_many(self, keys, default): return super(MemoryBackend, self)._get_many(keys, default) def _set_many(self, data, ttl=0): return super(MemoryBackend, self)._set_many(data, ttl) def _unset_many(self, keys): return super(MemoryBackend, self)._unset_many(keys) neutron-8.4.0/neutron/quota/0000775000567000056710000000000013044373210017176 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/quota/__init__.py0000664000567000056710000003143613044372760021327 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Quotas for instances, volumes, and floating ips.""" import sys from oslo_config import cfg from oslo_log import log as logging from oslo_log import versionutils from oslo_utils import importutils import six import webob from neutron._i18n import _, _LI, _LW from neutron.common import exceptions from neutron.db.quota import api as quota_api from neutron.quota import resource_registry LOG = logging.getLogger(__name__) QUOTA_DB_MODULE = 'neutron.db.quota.driver' QUOTA_DB_DRIVER = '%s.DbQuotaDriver' % QUOTA_DB_MODULE QUOTA_CONF_DRIVER = 'neutron.quota.ConfDriver' default_quota_items = ['network', 'subnet', 'port'] quota_opts = [ cfg.ListOpt('quota_items', default=default_quota_items, deprecated_for_removal=True, help=_('Resource name(s) that are supported in quota ' 'features. This option is now deprecated for ' 'removal.')), cfg.IntOpt('default_quota', default=-1, help=_('Default number of resource allowed per tenant. ' 'A negative value means unlimited.')), cfg.IntOpt('quota_network', default=10, help=_('Number of networks allowed per tenant. ' 'A negative value means unlimited.')), cfg.IntOpt('quota_subnet', default=10, help=_('Number of subnets allowed per tenant, ' 'A negative value means unlimited.')), cfg.IntOpt('quota_port', default=50, help=_('Number of ports allowed per tenant. ' 'A negative value means unlimited.')), cfg.StrOpt('quota_driver', default=QUOTA_DB_DRIVER, help=_('Default driver to use for quota checks')), cfg.BoolOpt('track_quota_usage', default=True, help=_('Keep in track in the database of current resource' 'quota usage. Plugins which do not leverage the ' 'neutron database should set this flag to False')), ] # Register the configuration options cfg.CONF.register_opts(quota_opts, 'QUOTAS') class ConfDriver(object): """Configuration driver. Driver to perform necessary checks to enforce quotas and obtain quota information. The default driver utilizes the default values in neutron.conf. """ def _get_quotas(self, context, resources): """Get quotas. A helper method which retrieves the quotas for the specific resources identified by keys, and which apply to the current context. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. """ quotas = {} for resource in resources.values(): quotas[resource.name] = resource.default return quotas def limit_check(self, context, tenant_id, resources, values): """Check simple quota limits. For limits--those quotas for which there is no usage synchronization function--this method checks that a set of proposed values are permitted by the limit restriction. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks. :param tenant_id: The tenant_id to check quota. :param resources: A dictionary of the registered resources. :param values: A dictionary of the values to check against the quota. """ # Ensure no value is less than zero unders = [key for key, val in values.items() if val < 0] if unders: raise exceptions.InvalidQuotaValue(unders=sorted(unders)) # Get the applicable quotas quotas = self._get_quotas(context, resources) # Check the quotas and construct a list of the resources that # would be put over limit by the desired values overs = [key for key, val in values.items() if quotas[key] >= 0 and quotas[key] < val] if overs: raise exceptions.OverQuota(overs=sorted(overs), quotas=quotas, usages={}) @staticmethod def get_tenant_quotas(context, resources, tenant_id): quotas = {} sub_resources = dict((k, v) for k, v in resources.items()) for resource in sub_resources.values(): quotas[resource.name] = resource.default return quotas @staticmethod def get_all_quotas(context, resources): return [] @staticmethod def delete_tenant_quota(context, tenant_id): msg = _('Access to this resource was denied.') raise webob.exc.HTTPForbidden(msg) @staticmethod def update_quota_limit(context, tenant_id, resource, limit): msg = _('Access to this resource was denied.') raise webob.exc.HTTPForbidden(msg) def make_reservation(self, context, tenant_id, resources, deltas, plugin): """This driver does not support reservations. This routine is provided for backward compatibility purposes with the API controllers which have now been adapted to make reservations rather than counting resources and checking limits - as this routine ultimately does. """ for resource in deltas.keys(): count = QUOTAS.count(context, resource, plugin, tenant_id) total_use = deltas.get(resource, 0) + count deltas[resource] = total_use self.limit_check( context, tenant_id, resource_registry.get_all_resources(), deltas) # return a fake reservation - the REST controller expects it return quota_api.ReservationInfo('fake', None, None, None) def commit_reservation(self, context, reservation_id): """This is a noop as this driver does not support reservations.""" def cancel_reservation(self, context, reservation_id): """This is a noop as this driver does not support reservations.""" class QuotaEngine(object): """Represent the set of recognized quotas.""" _instance = None @classmethod def get_instance(cls): if not cls._instance: cls._instance = cls() return cls._instance def __init__(self, quota_driver_class=None): """Initialize a Quota object.""" self._driver = None self._driver_class = quota_driver_class def get_driver(self): if self._driver is None: _driver_class = (self._driver_class or cfg.CONF.QUOTAS.quota_driver) if (_driver_class == QUOTA_DB_DRIVER and QUOTA_DB_MODULE not in sys.modules): # If quotas table is not loaded, force config quota driver. _driver_class = QUOTA_CONF_DRIVER LOG.info(_LI("ConfDriver is used as quota_driver because the " "loaded plugin does not support 'quotas' table.")) if isinstance(_driver_class, six.string_types): _driver_class = importutils.import_object(_driver_class) if isinstance(_driver_class, ConfDriver): versionutils.report_deprecated_feature( LOG, _LW("The quota driver neutron.quota.ConfDriver is " "deprecated as of Liberty. " "neutron.db.quota.driver.DbQuotaDriver should " "be used in its place")) self._driver = _driver_class LOG.info(_LI('Loaded quota_driver: %s.'), _driver_class) return self._driver def count(self, context, resource_name, *args, **kwargs): """Count a resource. For countable resources, invokes the count() function and returns its result. Arguments following the context and resource are passed directly to the count function declared by the resource. :param context: The request context, for access checks. :param resource_name: The name of the resource, as a string. """ # Get the resource res = resource_registry.get_resource(resource_name) if not res or not hasattr(res, 'count'): raise exceptions.QuotaResourceUnknown(unknown=[resource_name]) return res.count(context, *args, **kwargs) def make_reservation(self, context, tenant_id, deltas, plugin): # Verify that resources are managed by the quota engine # Ensure no value is less than zero unders = [key for key, val in deltas.items() if val < 0] if unders: raise exceptions.InvalidQuotaValue(unders=sorted(unders)) requested_resources = set(deltas.keys()) all_resources = resource_registry.get_all_resources() managed_resources = set([res for res in all_resources.keys() if res in requested_resources]) # Make sure we accounted for all of them... unknown_resources = requested_resources - managed_resources if unknown_resources: raise exceptions.QuotaResourceUnknown( unknown=sorted(unknown_resources)) # FIXME(salv-orlando): There should be no reason for sending all the # resource in the registry to the quota driver, but as other driver # APIs request them, this will be sorted out with a different patch. return self.get_driver().make_reservation( context, tenant_id, all_resources, deltas, plugin) def commit_reservation(self, context, reservation_id): self.get_driver().commit_reservation(context, reservation_id) def cancel_reservation(self, context, reservation_id): self.get_driver().cancel_reservation(context, reservation_id) def limit_check(self, context, tenant_id, **values): """Check simple quota limits. For limits--those quotas for which there is no usage synchronization function--this method checks that a set of proposed values are permitted by the limit restriction. The values to check are given as keyword arguments, where the key identifies the specific quota limit to check, and the value is the proposed value. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it is not a countable resource. If any of the proposed values exceeds the respective quota defined for the tenant, an OverQuota exception will be raised. The exception will include a sorted list with the resources which exceed the quota limit. Otherwise, the method returns nothing. :param context: Request context :param tenant_id: Tenant for which the quota limit is being checked :param values: Dict specifying requested deltas for each resource """ # TODO(salv-orlando): Deprecate calls to this API # Verify that resources are managed by the quota engine requested_resources = set(values.keys()) managed_resources = set([res for res in resource_registry.get_all_resources() if res in requested_resources]) # Make sure we accounted for all of them... unknown_resources = requested_resources - managed_resources if unknown_resources: raise exceptions.QuotaResourceUnknown( unknown=sorted(unknown_resources)) return self.get_driver().limit_check( context, tenant_id, resource_registry.get_all_resources(), values) QUOTAS = QuotaEngine.get_instance() def register_resources_from_config(): # This operation is now deprecated. All the neutron core and extended # resource for which quota limits are enforced explicitly register # themselves with the quota engine. for resource_item in (set(cfg.CONF.QUOTAS.quota_items) - set(default_quota_items)): resource_registry.register_resource_by_name(resource_item) register_resources_from_config() neutron-8.4.0/neutron/quota/resource.py0000664000567000056710000003432413044372760021416 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_db import api as oslo_db_api from oslo_db import exception as oslo_db_exception from oslo_log import log from oslo_utils import excutils from sqlalchemy import event from sqlalchemy import exc as sql_exc from neutron._i18n import _LE, _LW from neutron.db import api as db_api from neutron.db.quota import api as quota_api LOG = log.getLogger(__name__) def _count_resource(context, plugin, collection_name, tenant_id): count_getter_name = "get_%s_count" % collection_name # Some plugins support a count method for particular resources, # using a DB's optimized counting features. We try to use that one # if present. Otherwise just use regular getter to retrieve all objects # and count in python, allowing older plugins to still be supported try: obj_count_getter = getattr(plugin, count_getter_name) meh = obj_count_getter(context, filters={'tenant_id': [tenant_id]}) return meh except (NotImplementedError, AttributeError): obj_getter = getattr(plugin, "get_%s" % collection_name) obj_list = obj_getter(context, filters={'tenant_id': [tenant_id]}) return len(obj_list) if obj_list else 0 class BaseResource(object): """Describe a single resource for quota checking.""" def __init__(self, name, flag, plural_name=None): """Initializes a resource. :param name: The name of the resource, i.e., "instances". :param flag: The name of the flag or configuration option :param plural_name: Plural form of the resource name. If not specified, it is generated automatically by appending an 's' to the resource name, unless it ends with a 'y'. In that case the last letter is removed, and 'ies' is appended. Dashes are always converted to underscores. """ self.name = name # If a plural name is not supplied, default to adding an 's' to # the resource name, unless the resource name ends in 'y', in which # case remove the 'y' and add 'ies'. Even if the code should not fiddle # too much with English grammar, this is a rather common and easy to # implement rule. if plural_name: self.plural_name = plural_name elif self.name[-1] == 'y': self.plural_name = "%sies" % self.name[:-1] else: self.plural_name = "%ss" % self.name # always convert dashes to underscores self.plural_name = self.plural_name.replace('-', '_') self.flag = flag @property def default(self): """Return the default value of the quota.""" # Any negative value will be interpreted as an infinite quota, # and stored as -1 for compatibility with current behaviour value = getattr(cfg.CONF.QUOTAS, self.flag, cfg.CONF.QUOTAS.default_quota) return max(value, -1) @property def dirty(self): """Return the current state of the Resource instance. :returns: True if the resource count is out of sync with actual date, False if it is in sync, and None if the resource instance does not track usage. """ class CountableResource(BaseResource): """Describe a resource where the counts are determined by a function.""" def __init__(self, name, count, flag=None, plural_name=None): """Initializes a CountableResource. Countable resources are those resources which directly correspond to objects in the database, i.e., network, subnet, etc.,. A CountableResource must be constructed with a counting function, which will be called to determine the current counts of the resource. The counting function will be passed the context, along with the extra positional and keyword arguments that are passed to Quota.count(). It should return an integer specifying the count. :param name: The name of the resource, i.e., "instances". :param count: A callable which returns the count of the resource. The arguments passed are as described above. :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. :param plural_name: Plural form of the resource name. If not specified, it is generated automatically by appending an 's' to the resource name, unless it ends with a 'y'. In that case the last letter is removed, and 'ies' is appended. Dashes are always converted to underscores. """ super(CountableResource, self).__init__( name, flag=flag, plural_name=plural_name) self._count_func = count def count(self, context, plugin, tenant_id, **kwargs): return self._count_func(context, plugin, self.plural_name, tenant_id) class TrackedResource(BaseResource): """Resource which keeps track of its usage data.""" def __init__(self, name, model_class, flag, plural_name=None): """Initializes an instance for a given resource. TrackedResource are directly mapped to data model classes. Resource usage is tracked in the database, and the model class to which this resource refers is monitored to ensure always "fresh" usage data are employed when performing quota checks. This class operates under the assumption that the model class describing the resource has a tenant identifier attribute. :param name: The name of the resource, i.e., "networks". :param model_class: The sqlalchemy model class of the resource for which this instance is being created :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. :param plural_name: Plural form of the resource name. If not specified, it is generated automatically by appending an 's' to the resource name, unless it ends with a 'y'. In that case the last letter is removed, and 'ies' is appended. Dashes are always converted to underscores. """ super(TrackedResource, self).__init__( name, flag=flag, plural_name=plural_name) # Register events for addition/removal of records in the model class # As tenant_id is immutable for all Neutron objects there is no need # to register a listener for update events self._model_class = model_class self._dirty_tenants = set() self._out_of_sync_tenants = set() @property def dirty(self): return self._dirty_tenants def mark_dirty(self, context): if not self._dirty_tenants: return with db_api.autonested_transaction(context.session): # It is not necessary to protect this operation with a lock. # Indeed when this method is called the request has been processed # and therefore all resources created or deleted. # dirty_tenants will contain all the tenants for which the # resource count is changed. The list might contain also tenants # for which resource count was altered in other requests, but this # won't be harmful. dirty_tenants_snap = self._dirty_tenants.copy() for tenant_id in dirty_tenants_snap: quota_api.set_quota_usage_dirty(context, self.name, tenant_id) LOG.debug(("Persisted dirty status for tenant:%(tenant_id)s " "on resource:%(resource)s"), {'tenant_id': tenant_id, 'resource': self.name}) self._out_of_sync_tenants |= dirty_tenants_snap self._dirty_tenants -= dirty_tenants_snap def _db_event_handler(self, mapper, _conn, target): try: tenant_id = target['tenant_id'] except AttributeError: with excutils.save_and_reraise_exception(): LOG.error(_LE("Model class %s does not have a tenant_id " "attribute"), target) self._dirty_tenants.add(tenant_id) # Retry the operation if a duplicate entry exception is raised. This # can happen is two or more workers are trying to create a resource of a # give kind for the same tenant concurrently. Retrying the operation will # ensure that an UPDATE statement is emitted rather than an INSERT one @oslo_db_api.wrap_db_retry( max_retries=db_api.MAX_RETRIES, exception_checker=lambda exc: isinstance(exc, (oslo_db_exception.DBDuplicateEntry, oslo_db_exception.DBDeadlock))) def _set_quota_usage(self, context, tenant_id, in_use): return quota_api.set_quota_usage( context, self.name, tenant_id, in_use=in_use) def _resync(self, context, tenant_id, in_use): # Update quota usage usage_info = self._set_quota_usage(context, tenant_id, in_use) self._dirty_tenants.discard(tenant_id) self._out_of_sync_tenants.discard(tenant_id) LOG.debug(("Unset dirty status for tenant:%(tenant_id)s on " "resource:%(resource)s"), {'tenant_id': tenant_id, 'resource': self.name}) return usage_info def resync(self, context, tenant_id): if tenant_id not in self._out_of_sync_tenants: return LOG.debug(("Synchronizing usage tracker for tenant:%(tenant_id)s on " "resource:%(resource)s"), {'tenant_id': tenant_id, 'resource': self.name}) in_use = context.session.query(self._model_class).filter_by( tenant_id=tenant_id).count() # Update quota usage return self._resync(context, tenant_id, in_use) def count(self, context, _plugin, tenant_id, resync_usage=True): """Return the current usage count for the resource. This method will fetch aggregate information for resource usage data, unless usage data are marked as "dirty". In the latter case resource usage will be calculated counting rows for tenant_id in the resource's database model. Active reserved amount are instead always calculated by summing amounts for matching records in the 'reservations' database model. The _plugin and _resource parameters are unused but kept for compatibility with the signature of the count method for CountableResource instances. """ # Load current usage data, setting a row-level lock on the DB usage_info = quota_api.get_quota_usage_by_resource_and_tenant( context, self.name, tenant_id, lock_for_update=True) # Always fetch reservations, as they are not tracked by usage counters reservations = quota_api.get_reservations_for_resources( context, tenant_id, [self.name]) reserved = reservations.get(self.name, 0) # If dirty or missing, calculate actual resource usage querying # the database and set/create usage info data # NOTE: this routine "trusts" usage counters at service startup. This # assumption is generally valid, but if the database is tampered with, # or if data migrations do not take care of usage counters, the # assumption will not hold anymore if (tenant_id in self._dirty_tenants or not usage_info or usage_info.dirty): LOG.debug(("Usage tracker for resource:%(resource)s and tenant:" "%(tenant_id)s is out of sync, need to count used " "quota"), {'resource': self.name, 'tenant_id': tenant_id}) in_use = context.session.query(self._model_class).filter_by( tenant_id=tenant_id).count() # Update quota usage, if requested (by default do not do that, as # typically one counts before adding a record, and that would mark # the usage counter as dirty again) if resync_usage: usage_info = self._resync(context, tenant_id, in_use) else: resource = usage_info.resource if usage_info else self.name tenant_id = usage_info.tenant_id if usage_info else tenant_id dirty = usage_info.dirty if usage_info else True usage_info = quota_api.QuotaUsageInfo( resource, tenant_id, in_use, dirty) LOG.debug(("Quota usage for %(resource)s was recalculated. " "Used quota:%(used)d."), {'resource': self.name, 'used': usage_info.used}) return usage_info.used + reserved def register_events(self): event.listen(self._model_class, 'after_insert', self._db_event_handler) event.listen(self._model_class, 'after_delete', self._db_event_handler) def unregister_events(self): try: event.remove(self._model_class, 'after_insert', self._db_event_handler) event.remove(self._model_class, 'after_delete', self._db_event_handler) except sql_exc.InvalidRequestError: LOG.warning(_LW("No sqlalchemy event for resource %s found"), self.name) neutron-8.4.0/neutron/quota/resource_registry.py0000664000567000056710000002127613044372760023350 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log import six from neutron._i18n import _LI, _LW from neutron.quota import resource LOG = log.getLogger(__name__) # Wrappers for easing access to the ResourceRegistry singleton def register_resource(resource): ResourceRegistry.get_instance().register_resource(resource) def register_resource_by_name(resource_name, plural_name=None): ResourceRegistry.get_instance().register_resource_by_name( resource_name, plural_name) def get_all_resources(): return ResourceRegistry.get_instance().resources def get_resource(resource_name): return ResourceRegistry.get_instance().get_resource(resource_name) def is_tracked(resource_name): return ResourceRegistry.get_instance().is_tracked(resource_name) # auxiliary functions and decorators def set_resources_dirty(context): """Sets the dirty bit for resources with usage changes. This routine scans all registered resources, and, for those whose dirty status is True, sets the dirty bit to True in the database for the appropriate tenants. Please note that this routine begins a nested transaction, and it is not recommended that this transaction begins within another transaction. For this reason the function will raise a SqlAlchemy exception if such an attempt is made. :param context: a Neutron request context with a DB session """ if not cfg.CONF.QUOTAS.track_quota_usage: return for res in get_all_resources().values(): with context.session.begin(subtransactions=True): if is_tracked(res.name) and res.dirty: res.mark_dirty(context) def resync_resource(context, resource_name, tenant_id): if not cfg.CONF.QUOTAS.track_quota_usage: return if is_tracked(resource_name): res = get_resource(resource_name) # If the resource is tracked count supports the resync_usage parameter res.resync(context, tenant_id) def mark_resources_dirty(f): """Decorator for functions which alter resource usage. This decorator ensures set_resource_dirty is invoked after completion of the decorated function. """ @six.wraps(f) def wrapper(_self, context, *args, **kwargs): ret_val = f(_self, context, *args, **kwargs) set_resources_dirty(context) return ret_val return wrapper class tracked_resources(object): """Decorator for specifying resources for which usage should be tracked. A plugin class can use this decorator to specify for which resources usage info should be tracked into an appropriate table rather than being explicitly counted. """ def __init__(self, override=False, **kwargs): self._tracked_resources = kwargs self._override = override def __call__(self, f): @six.wraps(f) def wrapper(*args, **kwargs): registry = ResourceRegistry.get_instance() for resource_name in self._tracked_resources: registry.set_tracked_resource( resource_name, self._tracked_resources[resource_name], self._override) return f(*args, **kwargs) return wrapper class ResourceRegistry(object): """Registry for resource subject to quota limits. This class keeps track of Neutron resources for which quota limits are enforced, regardless of whether their usage is being tracked or counted. For tracked-usage resources, that is to say those resources for which there are usage counters which are kept in sync with the actual number of rows in the database, this class allows the plugin to register their names either explicitly or through the @tracked_resources decorator, which should preferably be applied to the __init__ method of the class. """ _instance = None @classmethod def get_instance(cls): if cls._instance is None: cls._instance = cls() return cls._instance def __init__(self): self._resources = {} # Map usage tracked resources to the correspondent db model class self._tracked_resource_mappings = {} def __contains__(self, resource): return resource in self._resources def _create_resource_instance(self, resource_name, plural_name): """Factory function for quota Resource. This routine returns a resource instance of the appropriate type according to system configuration. If QUOTAS.track_quota_usage is True, and there is a model mapping for the current resource, this function will return an instance of AccountedResource; otherwise an instance of CountableResource. """ if (not cfg.CONF.QUOTAS.track_quota_usage or resource_name not in self._tracked_resource_mappings): LOG.info(_LI("Creating instance of CountableResource for " "resource:%s"), resource_name) return resource.CountableResource( resource_name, resource._count_resource, 'quota_%s' % resource_name) else: LOG.info(_LI("Creating instance of TrackedResource for " "resource:%s"), resource_name) return resource.TrackedResource( resource_name, self._tracked_resource_mappings[resource_name], 'quota_%s' % resource_name) def set_tracked_resource(self, resource_name, model_class, override=False): # Do not do anything if tracking is disabled by config if not cfg.CONF.QUOTAS.track_quota_usage: return current_model_class = self._tracked_resource_mappings.setdefault( resource_name, model_class) # Check whether setdefault also set the entry in the dict if current_model_class != model_class: LOG.debug("A model class is already defined for %(resource)s: " "%(current_model_class)s. Override:%(override)s", {'resource': resource_name, 'current_model_class': current_model_class, 'override': override}) if override: self._tracked_resource_mappings[resource_name] = model_class LOG.debug("Tracking information for resource: %s configured", resource_name) def is_tracked(self, resource_name): """Find out if a resource if tracked or not. :param resource_name: name of the resource. :returns True if resource_name is registered and tracked, otherwise False. Please note that here when False it returned it simply means that resource_name is not a TrackedResource instance, it does not necessarily mean that the resource is not registered. """ return resource_name in self._tracked_resource_mappings def register_resource(self, resource): if resource.name in self._resources: LOG.warning(_LW('%s is already registered'), resource.name) if resource.name in self._tracked_resource_mappings: resource.register_events() self._resources[resource.name] = resource def register_resources(self, resources): for res in resources: self.register_resource(res) def register_resource_by_name(self, resource_name, plural_name=None): """Register a resource by name.""" resource = self._create_resource_instance( resource_name, plural_name) self.register_resource(resource) def unregister_resources(self): """Unregister all resources.""" for (res_name, res) in self._resources.items(): if res_name in self._tracked_resource_mappings: res.unregister_events() self._resources.clear() self._tracked_resource_mappings.clear() def get_resource(self, resource_name): """Return a resource given its name. :returns: The resource instance or None if the resource is not found """ return self._resources.get(resource_name) @property def resources(self): return self._resources neutron-8.4.0/neutron/worker.py0000664000567000056710000000311113044372760017735 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_service import service from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources class NeutronWorker(service.ServiceBase): """Partial implementation of the ServiceBase ABC Subclasses will still need to add the other abstract methods defined in service.ServiceBase. See oslo_service for more details. If a plugin needs to handle synchronization with the Neutron database and do this only once instead of in every API worker, for instance, it would define a NeutronWorker class and the plugin would have get_workers return an array of NeutronWorker instances. For example: class MyPlugin(...): def get_workers(self): return [MyPluginWorker()] class MyPluginWorker(NeutronWorker): def start(self): super(MyPluginWorker, self).start() do_sync() """ def start(self): registry.notify(resources.PROCESS, events.AFTER_CREATE, self.start) neutron-8.4.0/neutron/neutron_plugin_base_v2.py0000664000567000056710000004124213044372760023104 0ustar jenkinsjenkins00000000000000# Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ v2 Neutron Plug-in API specification. :class:`NeutronPluginBaseV2` provides the definition of minimum set of methods that needs to be implemented by a v2 Neutron Plug-in. """ import abc import six @six.add_metaclass(abc.ABCMeta) class NeutronPluginBaseV2(object): @abc.abstractmethod def create_subnet(self, context, subnet): """Create a subnet. Create a subnet, which represents a range of IP addresses that can be allocated to devices :param context: neutron api request context :param subnet: dictionary describing the subnet, with keys as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. All keys will be populated. """ pass @abc.abstractmethod def update_subnet(self, context, id, subnet): """Update values of a subnet. :param context: neutron api request context :param id: UUID representing the subnet to update. :param subnet: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. """ pass @abc.abstractmethod def get_subnet(self, context, id, fields=None): """Retrieve a subnet. :param context: neutron api request context :param id: UUID representing the subnet to fetch. :param fields: a list of strings that are valid keys in a subnet dictionary as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Only these fields will be returned. """ pass @abc.abstractmethod def get_subnets(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Retrieve a list of subnets. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. :param context: neutron api request context :param filters: a dictionary with keys that are valid keys for a subnet as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. :param fields: a list of strings that are valid keys in a subnet dictionary as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Only these fields will be returned. """ pass def get_subnets_count(self, context, filters=None): """Return the number of subnets. The result depends on the identity of the user making the request (as indicated by the context) as well as any filters. :param context: neutron api request context :param filters: a dictionary with keys that are valid keys for a network as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. .. note:: this method is optional, as it was not part of the originally defined plugin API. """ raise NotImplementedError() @abc.abstractmethod def delete_subnet(self, context, id): """Delete a subnet. :param context: neutron api request context :param id: UUID representing the subnet to delete. """ pass def create_subnetpool(self, context, subnetpool): """Create a subnet pool. :param context: neutron api request context :param subnetpool: Dictionary representing the subnetpool to create. """ raise NotImplementedError() def update_subnetpool(self, context, id, subnetpool): """Update a subnet pool. :param context: neutron api request context :param subnetpool: Dictionary representing the subnetpool attributes to update. """ raise NotImplementedError() def get_subnetpool(self, context, id, fields=None): """Show a subnet pool. :param context: neutron api request context :param id: The UUID of the subnetpool to show. """ raise NotImplementedError() def get_subnetpools(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Retrieve list of subnet pools.""" raise NotImplementedError() def delete_subnetpool(self, context, id): """Delete a subnet pool. :param context: neutron api request context :param id: The UUID of the subnet pool to delete. """ raise NotImplementedError() @abc.abstractmethod def create_network(self, context, network): """Create a network. Create a network, which represents an L2 network segment which can have a set of subnets and ports associated with it. :param context: neutron api request context :param network: dictionary describing the network, with keys as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. All keys will be populated. """ pass @abc.abstractmethod def update_network(self, context, id, network): """Update values of a network. :param context: neutron api request context :param id: UUID representing the network to update. :param network: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. """ pass @abc.abstractmethod def get_network(self, context, id, fields=None): """Retrieve a network. :param context: neutron api request context :param id: UUID representing the network to fetch. :param fields: a list of strings that are valid keys in a network dictionary as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Only these fields will be returned. """ pass @abc.abstractmethod def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Retrieve a list of networks. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. :param context: neutron api request context :param filters: a dictionary with keys that are valid keys for a network as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. :param fields: a list of strings that are valid keys in a network dictionary as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Only these fields will be returned. """ pass def get_networks_count(self, context, filters=None): """Return the number of networks. The result depends on the identity of the user making the request (as indicated by the context) as well as any filters. :param context: neutron api request context :param filters: a dictionary with keys that are valid keys for a network as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. NOTE: this method is optional, as it was not part of the originally defined plugin API. """ raise NotImplementedError() @abc.abstractmethod def delete_network(self, context, id): """Delete a network. :param context: neutron api request context :param id: UUID representing the network to delete. """ pass @abc.abstractmethod def create_port(self, context, port): """Create a port. Create a port, which is a connection point of a device (e.g., a VM NIC) to attach to a L2 neutron network. :param context: neutron api request context :param port: dictionary describing the port, with keys as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. All keys will be populated. """ pass @abc.abstractmethod def update_port(self, context, id, port): """Update values of a port. :param context: neutron api request context :param id: UUID representing the port to update. :param port: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. """ pass @abc.abstractmethod def get_port(self, context, id, fields=None): """Retrieve a port. :param context: neutron api request context :param id: UUID representing the port to fetch. :param fields: a list of strings that are valid keys in a port dictionary as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Only these fields will be returned. """ pass @abc.abstractmethod def get_ports(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Retrieve a list of ports. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. :param context: neutron api request context :param filters: a dictionary with keys that are valid keys for a port as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. :param fields: a list of strings that are valid keys in a port dictionary as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Only these fields will be returned. """ pass def get_ports_count(self, context, filters=None): """Return the number of ports. The result depends on the identity of the user making the request (as indicated by the context) as well as any filters. :param context: neutron api request context :param filters: a dictionary with keys that are valid keys for a network as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. .. note:: this method is optional, as it was not part of the originally defined plugin API. """ raise NotImplementedError() @abc.abstractmethod def delete_port(self, context, id): """Delete a port. :param context: neutron api request context :param id: UUID representing the port to delete. """ pass def start_rpc_listeners(self): """Start the RPC listeners. Most plugins start RPC listeners implicitly on initialization. In order to support multiple process RPC, the plugin needs to expose control over when this is started. .. note:: this method is optional, as it was not part of the originally defined plugin API. """ raise NotImplementedError() def start_rpc_state_reports_listener(self): """Start the RPC listeners consuming state reports queue. This optional method creates rpc consumer for REPORTS queue only. .. note:: this method is optional, as it was not part of the originally defined plugin API. """ raise NotImplementedError() def rpc_workers_supported(self): """Return whether the plugin supports multiple RPC workers. A plugin that supports multiple RPC workers should override the start_rpc_listeners method to ensure that this method returns True and that start_rpc_listeners is called at the appropriate time. Alternately, a plugin can override this method to customize detection of support for multiple rpc workers .. note:: this method is optional, as it was not part of the originally defined plugin API. """ return (self.__class__.start_rpc_listeners != NeutronPluginBaseV2.start_rpc_listeners) def rpc_state_report_workers_supported(self): """Return whether the plugin supports state report RPC workers. .. note:: this method is optional, as it was not part of the originally defined plugin API. """ return (self.__class__.start_rpc_state_reports_listener != NeutronPluginBaseV2.start_rpc_state_reports_listener) def get_workers(self): """Returns a collection NeutronWorker instances If a plugin needs to define worker processes outside of API/RPC workers then it will override this and return a collection of NeutronWorker instances """ return () neutron-8.4.0/neutron/i18n.py0000664000567000056710000000241313044372760017207 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(dougwig) - remove this file at the beginning of N. from debtcollector import moves import neutron._i18n message = "moved to neutron._i18n; please migrate to local oslo_i18n " \ "usage, as defined in the devref and at " \ "http://docs.openstack.org/developer/oslo.i18n/usage.html" _ = moves.moved_function(neutron._i18n._, '_', __name__, message=message) _LI = moves.moved_function(neutron._i18n._LI, '_LI', __name__, message=message) _LW = moves.moved_function(neutron._i18n._LW, '_LW', __name__, message=message) _LE = moves.moved_function(neutron._i18n._LE, '_LE', __name__, message=message) _LC = moves.moved_function(neutron._i18n._LC, '_LC', __name__, message=message) neutron-8.4.0/neutron/debug/0000775000567000056710000000000013044373210017133 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/debug/__init__.py0000664000567000056710000000000013044372736021246 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/debug/debug_agent.py0000664000567000056710000001614013044372760021764 0ustar jenkinsjenkins00000000000000# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import shlex import socket import netaddr from oslo_log import log as logging from neutron._i18n import _LW from neutron.agent.linux import dhcp from neutron.agent.linux import ip_lib from neutron.common import constants from neutron.extensions import portbindings LOG = logging.getLogger(__name__) DEVICE_OWNER_NETWORK_PROBE = 'network:probe' DEVICE_OWNER_COMPUTE_PROBE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'probe' class NeutronDebugAgent(object): def __init__(self, conf, client, driver): self.conf = conf self.client = client self.driver = driver def _get_namespace(self, port): return "qprobe-%s" % port.id def create_probe(self, network_id, device_owner='network'): network = self._get_network(network_id) bridge = None if network.external: bridge = self.conf.external_network_bridge port = self._create_port(network, device_owner) interface_name = self.driver.get_device_name(port) namespace = self._get_namespace(port) if ip_lib.device_exists(interface_name, namespace=namespace): LOG.debug('Reusing existing device: %s.', interface_name) else: self.driver.plug(network.id, port.id, interface_name, port.mac_address, bridge=bridge, namespace=namespace) ip_cidrs = [] for fixed_ip in port.fixed_ips: subnet = fixed_ip.subnet net = netaddr.IPNetwork(subnet.cidr) ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen) ip_cidrs.append(ip_cidr) self.driver.init_l3(interface_name, ip_cidrs, namespace=namespace) return port def _get_subnet(self, subnet_id): subnet_dict = self.client.show_subnet(subnet_id)['subnet'] return dhcp.DictModel(subnet_dict) def _get_network(self, network_id): network_dict = self.client.show_network(network_id)['network'] network = dhcp.DictModel(network_dict) network.external = network_dict.get('router:external') obj_subnet = [self._get_subnet(s_id) for s_id in network.subnets] network.subnets = obj_subnet return network def clear_probes(self): """Returns number of deleted probes""" ports = self.client.list_ports( device_id=socket.gethostname(), device_owner=[DEVICE_OWNER_NETWORK_PROBE, DEVICE_OWNER_COMPUTE_PROBE]) info = ports['ports'] for port in info: self.delete_probe(port['id']) return len(info) def delete_probe(self, port_id): port = dhcp.DictModel(self.client.show_port(port_id)['port']) network = self._get_network(port.network_id) bridge = None if network.external: bridge = self.conf.external_network_bridge ip = ip_lib.IPWrapper() namespace = self._get_namespace(port) if ip.netns.exists(namespace): self.driver.unplug(self.driver.get_device_name(port), bridge=bridge, namespace=namespace) try: ip.netns.delete(namespace) except Exception: LOG.warning(_LW('Failed to delete namespace %s'), namespace) else: self.driver.unplug(self.driver.get_device_name(port), bridge=bridge) self.client.delete_port(port.id) def list_probes(self): ports = self.client.list_ports( device_owner=[DEVICE_OWNER_NETWORK_PROBE, DEVICE_OWNER_COMPUTE_PROBE]) info = ports['ports'] for port in info: port['device_name'] = self.driver.get_device_name( dhcp.DictModel(port)) return info def exec_command(self, port_id, command=None): port = dhcp.DictModel(self.client.show_port(port_id)['port']) ip = ip_lib.IPWrapper() namespace = self._get_namespace(port) if not command: return "sudo ip netns exec %s" % self._get_namespace(port) namespace = ip.ensure_namespace(namespace) return namespace.netns.execute(shlex.split(command)) def ensure_probe(self, network_id): ports = self.client.list_ports(network_id=network_id, device_id=socket.gethostname(), device_owner=DEVICE_OWNER_NETWORK_PROBE) info = ports.get('ports', []) if info: return dhcp.DictModel(info[0]) else: return self.create_probe(network_id) def ping_all(self, network_id=None, timeout=1): if network_id: ports = self.client.list_ports(network_id=network_id)['ports'] else: ports = self.client.list_ports()['ports'] result = "" for port in ports: probe = self.ensure_probe(port['network_id']) if port['device_owner'] == DEVICE_OWNER_NETWORK_PROBE: continue for fixed_ip in port['fixed_ips']: address = fixed_ip['ip_address'] subnet = self._get_subnet(fixed_ip['subnet_id']) if subnet.ip_version == 4: ping_command = 'ping' else: ping_command = 'ping6' result += self.exec_command(probe.id, '%s -c 1 -w %s %s' % (ping_command, timeout, address)) return result def _create_port(self, network, device_owner): host = self.conf.host body = {'port': {'admin_state_up': True, 'network_id': network.id, 'device_id': '%s' % socket.gethostname(), 'device_owner': '%s:probe' % device_owner, 'tenant_id': network.tenant_id, portbindings.HOST_ID: host, 'fixed_ips': [dict(subnet_id=s.id) for s in network.subnets]}} port_dict = self.client.create_port(body)['port'] port = dhcp.DictModel(port_dict) port.network = network for fixed_ip in port.fixed_ips: fixed_ip.subnet = self._get_subnet(fixed_ip.subnet_id) return port neutron-8.4.0/neutron/debug/README0000664000567000056710000000271213044372736020031 0ustar jenkinsjenkins00000000000000Debug Helper Script for Neutron - Configure export NEUTRON_TEST_CONFIG_FILE=/etc/neutron/debug.ini or export NEUTRON_TEST_CONFIG_FILE=/etc/neutron/l3_agent.ini you can also specify config file by --config-file option - Usage neutron-debug commands probe-create Create probe port - create port and interface, then plug it in. This commands returns a port id of a probe port. A probe port is a port which is used to test. The port id is probe id. We can have multiple probe probes in a network, in order to check connectivity between ports. neutron-debug probe-exec probe_id_1 'nc -l 192.168.100.3 22' neutron-debug probe-exec probe_id_2 'nc -vz 192.168.100.4 22' Note: You should use a user and a tenant who has permission to modify network and subnet if you want to probe. For example, you need to be admin user if you want to probe external network. probe-delete Delete probe - delete port then uplug probe-exec 'command' Exec commands on the namespace of the probe `probe-exec ` 'interactive command' Exec interactive command (eg, ssh) probe-list List probes probe-clear Clear All probes ping-all --id --timeout 1 (optional) ping-all is all-in-one command to ping all fixed ip's in all network or a specified network. In the command probe is automatically created if needed. neutron-debug extends the shell of neutronclient, so you can use all the commands of neutron neutron-8.4.0/neutron/debug/commands.py0000664000567000056710000001047713044372736021333 0ustar jenkinsjenkins00000000000000# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cliff import lister from neutronclient.common import utils from neutronclient.neutron import v2_0 as client from neutronclient.neutron.v2_0 import port from neutron._i18n import _, _LI class ProbeCommand(client.NeutronCommand): def get_debug_agent(self): return self.app.debug_agent class CreateProbe(ProbeCommand): """Create probe port and interface, then plug it in.""" def get_parser(self, prog_name): parser = super(CreateProbe, self).get_parser(prog_name) parser.add_argument( 'id', metavar='network_id', help=_('ID of network to probe')) parser.add_argument( '--device-owner', default='network', choices=['network', 'compute'], help=_('Owner type of the device: network/compute')) return parser def take_action(self, parsed_args): debug_agent = self.get_debug_agent() probe_port = debug_agent.create_probe(parsed_args.id, parsed_args.device_owner) self.log.info(_('Probe created : %s '), probe_port.id) class DeleteProbe(ProbeCommand): """Delete probe - delete port then uplug.""" def get_parser(self, prog_name): parser = super(DeleteProbe, self).get_parser(prog_name) parser.add_argument( 'id', metavar='port_id', help=_('ID of probe port to delete')) return parser def take_action(self, parsed_args): debug_agent = self.get_debug_agent() debug_agent.delete_probe(parsed_args.id) self.log.info(_('Probe %s deleted'), parsed_args.id) class ListProbe(ProbeCommand, lister.Lister): """List probes.""" _formatters = {'fixed_ips': port._format_fixed_ips, } def take_action(self, parsed_args): debug_agent = self.get_debug_agent() info = debug_agent.list_probes() columns = sorted(info[0].keys()) if info else [] return (columns, (utils.get_item_properties( s, columns, formatters=self._formatters, ) for s in info), ) class ClearProbe(ProbeCommand): """Clear All probes.""" def take_action(self, parsed_args): debug_agent = self.get_debug_agent() cleared_probes_count = debug_agent.clear_probes() self.log.info(_LI('%d probe(s) deleted'), cleared_probes_count) class ExecProbe(ProbeCommand): """Exec commands on the namespace of the probe.""" def get_parser(self, prog_name): parser = super(ExecProbe, self).get_parser(prog_name) parser.add_argument( 'id', metavar='port_id', help=_('ID of probe port to execute command')) parser.add_argument( 'command', metavar='command', nargs='?', default=None, help=_('Command to execute')) return parser def take_action(self, parsed_args): debug_agent = self.get_debug_agent() result = debug_agent.exec_command(parsed_args.id, parsed_args.command) self.app.stdout.write(result + '\n') class PingAll(ProbeCommand): """Ping all fixed_ip.""" def get_parser(self, prog_name): parser = super(PingAll, self).get_parser(prog_name) parser.add_argument( '--timeout', metavar='', default=10, help=_('Ping timeout')) parser.add_argument( '--id', metavar='network_id', default=None, help=_('ID of network')) return parser def take_action(self, parsed_args): debug_agent = self.get_debug_agent() result = debug_agent.ping_all(parsed_args.id, timeout=parsed_args.timeout) self.app.stdout.write(result + '\n') neutron-8.4.0/neutron/debug/shell.py0000664000567000056710000000646013044372760020633 0ustar jenkinsjenkins00000000000000# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_utils import importutils from neutron._i18n import _ from neutron.agent.common import config from neutron.agent.common import utils from neutron.agent.linux import interface from neutron.debug import debug_agent from neutronclient.common import exceptions as exc from neutronclient import shell COMMAND_V2 = { 'probe-create': importutils.import_class( 'neutron.debug.commands.CreateProbe'), 'probe-delete': importutils.import_class( 'neutron.debug.commands.DeleteProbe'), 'probe-list': importutils.import_class( 'neutron.debug.commands.ListProbe'), 'probe-clear': importutils.import_class( 'neutron.debug.commands.ClearProbe'), 'probe-exec': importutils.import_class( 'neutron.debug.commands.ExecProbe'), 'ping-all': importutils.import_class( 'neutron.debug.commands.PingAll'), #TODO(nati) ping, netcat , nmap, bench } COMMANDS = {'2.0': COMMAND_V2} class NeutronDebugShell(shell.NeutronShell): def __init__(self, api_version): super(NeutronDebugShell, self).__init__(api_version) for k, v in COMMANDS[api_version].items(): self.command_manager.add_command(k, v) def build_option_parser(self, description, version): parser = super(NeutronDebugShell, self).build_option_parser( description, version) default = ( shell.env('NEUTRON_TEST_CONFIG_FILE') or shell.env('QUANTUM_TEST_CONFIG_FILE') ) parser.add_argument( '--config-file', default=default, help=_('Config file for interface driver ' '(You may also use l3_agent.ini)')) return parser def initialize_app(self, argv): super(NeutronDebugShell, self).initialize_app(argv) if not self.options.config_file: raise exc.CommandError( _("You must provide a config file for bridge -" " either --config-file or env[NEUTRON_TEST_CONFIG_FILE]")) client = self.client_manager.neutron cfg.CONF.register_opts(interface.OPTS) cfg.CONF.register_opts(config.EXT_NET_BRIDGE_OPTS) config.register_interface_driver_opts_helper(cfg.CONF) cfg.CONF(['--config-file', self.options.config_file]) config.setup_logging() driver = utils.load_interface_driver(cfg.CONF) self.debug_agent = debug_agent.NeutronDebugAgent(cfg.CONF, client, driver) def main(argv=None): return NeutronDebugShell(shell.NEUTRON_API_VERSION).run( argv or sys.argv[1:]) neutron-8.4.0/neutron/common/0000775000567000056710000000000013044373210017335 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/common/config.py0000664000567000056710000003566613044372760021205 0ustar jenkinsjenkins00000000000000# Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Routines for configuring Neutron """ import sys from keystoneauth1 import loading as ks_loading from oslo_config import cfg from oslo_db import options as db_options from oslo_log import log as logging import oslo_messaging from oslo_middleware import cors from oslo_service import wsgi from neutron._i18n import _, _LI from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import utils from neutron import policy from neutron import version LOG = logging.getLogger(__name__) core_opts = [ cfg.StrOpt('bind_host', default='0.0.0.0', help=_("The host IP to bind to")), cfg.PortOpt('bind_port', default=9696, help=_("The port to bind to")), cfg.StrOpt('api_extensions_path', default="", help=_("The path for API extensions. " "Note that this can be a colon-separated list of paths. " "For example: api_extensions_path = " "extensions:/path/to/more/exts:/even/more/exts. " "The __path__ of neutron.extensions is appended to " "this, so if your extensions are in there you don't " "need to specify them here.")), cfg.StrOpt('auth_strategy', default='keystone', help=_("The type of authentication to use")), cfg.StrOpt('core_plugin', help=_("The core plugin Neutron will use")), cfg.ListOpt('service_plugins', default=[], help=_("The service plugins Neutron will use")), cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00", help=_("The base MAC address Neutron will use for VIFs. " "The first 3 octets will remain unchanged. If the 4th " "octet is not 00, it will also be used. The others " "will be randomly generated.")), cfg.IntOpt('mac_generation_retries', default=16, help=_("How many times Neutron will retry MAC generation")), cfg.BoolOpt('allow_bulk', default=True, help=_("Allow the usage of the bulk API")), cfg.BoolOpt('allow_pagination', default=False, help=_("Allow the usage of the pagination")), cfg.BoolOpt('allow_sorting', default=False, help=_("Allow the usage of the sorting")), cfg.StrOpt('pagination_max_limit', default="-1", help=_("The maximum number of items returned in a single " "response, value was 'infinite' or negative integer " "means no limit")), cfg.ListOpt('default_availability_zones', default=[], help=_("Default value of availability zone hints. The " "availability zone aware schedulers use this when " "the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a " "comma separated string. This value can be empty. " "In this case, even if availability_zone_hints for " "a resource is empty, availability zone is " "considered for high availability while scheduling " "the resource.")), cfg.IntOpt('max_dns_nameservers', default=5, help=_("Maximum number of DNS nameservers per subnet")), cfg.IntOpt('max_subnet_host_routes', default=20, help=_("Maximum number of host routes per subnet")), cfg.IntOpt('max_fixed_ips_per_port', default=5, deprecated_for_removal=True, help=_("Maximum number of fixed ips per port. This option " "is deprecated and will be removed in the N " "release.")), cfg.StrOpt('default_ipv4_subnet_pool', deprecated_for_removal=True, help=_("Default IPv4 subnet pool to be used for automatic " "subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where " "creation of a subnet is being called without a " "subnet pool ID. If not set then no pool " "will be used unless passed explicitly to the subnet " "create. If no pool is used, then a CIDR must be passed " "to create a subnet and that subnet will not be " "allocated from any pool; it will be considered part of " "the tenant's private address space. This option is " "deprecated for removal in the N release.")), cfg.StrOpt('default_ipv6_subnet_pool', deprecated_for_removal=True, help=_("Default IPv6 subnet pool to be used for automatic " "subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where " "creation of a subnet is being called without a " "subnet pool ID. See the description for " "default_ipv4_subnet_pool for more information. This " "option is deprecated for removal in the N release.")), cfg.BoolOpt('ipv6_pd_enabled', default=False, help=_("Enables IPv6 Prefix Delegation for automatic subnet " "CIDR allocation. " "Set to True to enable IPv6 Prefix Delegation for " "subnet allocation in a PD-capable environment. Users " "making subnet creation requests for IPv6 subnets " "without providing a CIDR or subnetpool ID will be " "given a CIDR via the Prefix Delegation mechanism. " "Note that enabling PD will override the behavior of " "the default IPv6 subnetpool.")), cfg.IntOpt('dhcp_lease_duration', default=86400, deprecated_name='dhcp_lease_time', help=_("DHCP lease duration (in seconds). Use -1 to tell " "dnsmasq to use infinite lease times.")), cfg.StrOpt('dns_domain', default='openstacklocal', help=_('Domain to use for building the hostnames')), cfg.StrOpt('external_dns_driver', help=_('Driver for external DNS integration.')), cfg.BoolOpt('dhcp_agent_notification', default=True, help=_("Allow sending resource operation" " notification to DHCP agent")), cfg.BoolOpt('allow_overlapping_ips', default=False, help=_("Allow overlapping IP support in Neutron. " "Attention: the following parameter MUST be set to " "False if Neutron is being used in conjunction with " "Nova security groups.")), cfg.StrOpt('host', default=utils.get_hostname(), sample_default='example.domain', help=_("Hostname to be used by the Neutron server, agents and " "services running on this machine. All the agents and " "services running on this machine must use the same " "host value.")), cfg.BoolOpt('force_gateway_on_subnet', default=True, deprecated_for_removal=True, help=_("Ensure that configured gateway is on subnet. " "For IPv6, validate only if gateway is not a link " "local address. Deprecated, to be removed during the " "Newton release, at which point the gateway will not " "be forced on to subnet.")), cfg.BoolOpt('notify_nova_on_port_status_changes', default=True, help=_("Send notification to nova when port status changes")), cfg.BoolOpt('notify_nova_on_port_data_changes', default=True, help=_("Send notification to nova when port data (fixed_ips/" "floatingip) changes so nova can update its cache.")), cfg.IntOpt('send_events_interval', default=2, help=_('Number of seconds between sending events to nova if ' 'there are any events to send.')), cfg.BoolOpt('advertise_mtu', default=True, help=_('If True, advertise network MTU values if core plugin ' 'calculates them. MTU is advertised to running ' 'instances via DHCP and RA MTU options.')), cfg.StrOpt('ipam_driver', help=_("Neutron IPAM (IP address management) driver to use. " "If ipam_driver is not set (default behavior), no IPAM " "driver is used. In order to use the reference " "implementation of Neutron IPAM driver, " "use 'internal'.")), cfg.BoolOpt('vlan_transparent', default=False, help=_('If True, then allow plugins that support it to ' 'create VLAN transparent networks.')), cfg.StrOpt('web_framework', default='legacy', choices=('legacy', 'pecan'), help=_("This will choose the web framework in which to run " "the Neutron API server. 'pecan' is a new experiemental " "rewrite of the API server.")), cfg.IntOpt('global_physnet_mtu', default=constants.DEFAULT_NETWORK_MTU, deprecated_name='segment_mtu', deprecated_group='ml2', help=_('MTU of the underlying physical network. Neutron uses ' 'this value to calculate MTU for all virtual network ' 'components. For flat and VLAN networks, neutron uses ' 'this value without modification. For overlay networks ' 'such as VXLAN, neutron automatically subtracts the ' 'overlay protocol overhead from this value. Defaults ' 'to 1500, the standard value for Ethernet. If using the ' 'ML2 plug-in with overlay/tunnel networks, also ' 'configure the ml2 path_mtu option with the same value ' 'as the global_physnet_mtu option.')) ] core_cli_opts = [ cfg.StrOpt('state_path', default='/var/lib/neutron', help=_("Where to store Neutron state files. " "This directory must be writable by the agent.")), ] # Register the configuration options cfg.CONF.register_opts(core_opts) cfg.CONF.register_cli_opts(core_cli_opts) wsgi.register_opts(cfg.CONF) # Ensure that the control exchange is set correctly oslo_messaging.set_transport_defaults(control_exchange='neutron') def set_db_defaults(): # Update the default QueuePool parameters. These can be tweaked by the # conf variables - max_pool_size, max_overflow and pool_timeout db_options.set_defaults( cfg.CONF, connection='sqlite://', sqlite_db='', max_pool_size=10, max_overflow=20, pool_timeout=10) set_db_defaults() NOVA_CONF_SECTION = 'nova' ks_loading.register_auth_conf_options(cfg.CONF, NOVA_CONF_SECTION) ks_loading.register_session_conf_options(cfg.CONF, NOVA_CONF_SECTION) nova_opts = [ cfg.StrOpt('region_name', help=_('Name of nova region to use. Useful if keystone manages' ' more than one region.')), cfg.StrOpt('endpoint_type', default='public', choices=['public', 'admin', 'internal'], help=_('Type of the nova endpoint to use. This endpoint will' ' be looked up in the keystone catalog and should be' ' one of public, internal or admin.')), ] cfg.CONF.register_opts(nova_opts, group=NOVA_CONF_SECTION) logging.register_options(cfg.CONF) def init(args, **kwargs): cfg.CONF(args=args, project='neutron', version='%%(prog)s %s' % version.version_info.release_string(), **kwargs) # FIXME(ihrachys): if import is put in global, circular import # failure occurs from neutron.common import rpc as n_rpc n_rpc.init(cfg.CONF) # Validate that the base_mac is of the correct format msg = attributes._validate_regex(cfg.CONF.base_mac, attributes.MAC_PATTERN) if msg: msg = _("Base MAC: %s") % msg raise Exception(msg) def setup_logging(): """Sets up the logging options for a log with supplied name.""" product_name = "neutron" logging.setup(cfg.CONF, product_name) LOG.info(_LI("Logging enabled!")) LOG.info(_LI("%(prog)s version %(version)s"), {'prog': sys.argv[0], 'version': version.version_info.release_string()}) LOG.debug("command line: %s", " ".join(sys.argv)) def reset_service(): # Reset worker in case SIGHUP is called. # Note that this is called only in case a service is running in # daemon mode. setup_logging() set_config_defaults() policy.refresh() def load_paste_app(app_name): """Builds and returns a WSGI app from a paste config file. :param app_name: Name of the application to load """ loader = wsgi.Loader(cfg.CONF) app = loader.load_app(app_name) return app def set_config_defaults(): """This method updates all configuration default values.""" set_cors_middleware_defaults() def set_cors_middleware_defaults(): """Update default configuration options for oslo.middleware.""" # CORS Defaults # TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/ cfg.set_defaults(cors.CORS_OPTS, allow_headers=['X-Auth-Token', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id', 'X-OpenStack-Request-ID'], expose_headers=['X-Auth-Token', 'X-Subject-Token', 'X-Service-Token', 'X-OpenStack-Request-ID', 'OpenStack-Volume-microversion'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'] ) neutron-8.4.0/neutron/common/test_lib.py0000664000567000056710000000405313044372736021532 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Colorizer Code is borrowed from Twisted: # Copyright (c) 2001-2010 Twisted Matrix Laboratories. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # describes parameters used by different unit/functional tests # a plugin-specific testing mechanism should import this dictionary # and override the values in it if needed (e.g., run_tests.py in # neutron/plugins/openvswitch/ ) test_config = {} neutron-8.4.0/neutron/common/_deprecate.py0000664000567000056710000000317113044372760022015 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import debtcollector import inspect import os from neutron._i18n import _ class _DeprecateSubset(object): def __init__(self, my_globals, other_mod): self.other_mod = other_mod self.my_globals = copy.copy(my_globals) def __getattr__(self, name): a = self.my_globals.get(name) if (not name.startswith("__") and not inspect.ismodule(a) and name in vars(self.other_mod)): # These should be enabled after most have been cleaned up # in neutron proper, which may not happen during the busy M-3. if os.getenv('NEUTRON_SHOW_DEPRECATION_WARNINGS'): debtcollector.deprecate( name, message='moved to neutron_lib', version='mitaka', removal_version='newton', stacklevel=4) return vars(self.other_mod)[name] try: return self.my_globals[name] except KeyError: raise AttributeError( _("'module' object has no attribute '%s'") % name) neutron-8.4.0/neutron/common/topics.py0000664000567000056710000000360713044372736021232 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. NETWORK = 'network' SUBNET = 'subnet' PORT = 'port' SECURITY_GROUP = 'security_group' L2POPULATION = 'l2population' DVR = 'dvr' RESOURCES = 'resources' CREATE = 'create' DELETE = 'delete' UPDATE = 'update' AGENT = 'q-agent-notifier' PLUGIN = 'q-plugin' SERVER_RESOURCE_VERSIONS = 'q-server-resource-versions' L3PLUGIN = 'q-l3-plugin' REPORTS = 'q-reports-plugin' DHCP = 'q-dhcp-notifer' METERING_PLUGIN = 'q-metering-plugin' L3_AGENT = 'l3_agent' DHCP_AGENT = 'dhcp_agent' METERING_AGENT = 'metering_agent' RESOURCE_TOPIC_PATTERN = "neutron-vo-%(resource_type)s-%(version)s" def get_topic_name(prefix, table, operation, host=None): """Create a topic name. The topic name needs to be synced between the agent and the plugin. The plugin will send a fanout message to all of the listening agents so that the agents in turn can perform their updates accordingly. :param prefix: Common prefix for the plugin/agent message queues. :param table: The table in question (NETWORK, SUBNET, PORT). :param operation: The operation that invokes notification (CREATE, DELETE, UPDATE) :param host: Add host to the topic :returns: The topic name. """ if host: return '%s-%s-%s.%s' % (prefix, table, operation, host) return '%s-%s-%s' % (prefix, table, operation) neutron-8.4.0/neutron/common/eventlet_utils.py0000664000567000056710000000213013044372760022762 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import eventlet def monkey_patch(): if os.name == 'nt': # eventlet monkey patching the os and thread modules causes # subprocess.Popen to fail on Windows when using pipes due # to missing non-blocking IO support. # # bug report on eventlet: # https://bitbucket.org/eventlet/eventlet/issue/132/ # eventletmonkey_patch-breaks eventlet.monkey_patch(os=False, thread=False) else: eventlet.monkey_patch() neutron-8.4.0/neutron/common/__init__.py0000664000567000056710000000000013044372736021450 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/common/utils.py0000664000567000056710000010127713044372760021070 0ustar jenkinsjenkins00000000000000# Copyright 2011, VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Borrowed from nova code base, more utilities will be added/borrowed as and # when needed. """Utilities and helper functions.""" import collections import datetime import decimal import errno import functools import hashlib import multiprocessing import os import random import signal import socket import sys import tempfile import time import uuid import debtcollector from eventlet.green import subprocess import netaddr from oslo_concurrency import lockutils from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import reflection import six from stevedore import driver from neutron._i18n import _, _LE from neutron.common import constants as n_const from neutron.db import api as db_api TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" LOG = logging.getLogger(__name__) SYNCHRONIZED_PREFIX = 'neutron-' synchronized = lockutils.synchronized_with_prefix(SYNCHRONIZED_PREFIX) class cache_method_results(object): """This decorator is intended for object methods only.""" def __init__(self, func): self.func = func functools.update_wrapper(self, func) self._first_call = True self._not_cached = object() def _get_from_cache(self, target_self, *args, **kwargs): target_self_cls_name = reflection.get_class_name(target_self, fully_qualified=False) func_name = "%(module)s.%(class)s.%(func_name)s" % { 'module': target_self.__module__, 'class': target_self_cls_name, 'func_name': self.func.__name__, } key = (func_name,) + args if kwargs: key += dict2tuple(kwargs) try: item = target_self._cache.get(key, self._not_cached) except TypeError: LOG.debug("Method %(func_name)s cannot be cached due to " "unhashable parameters: args: %(args)s, kwargs: " "%(kwargs)s", {'func_name': func_name, 'args': args, 'kwargs': kwargs}) return self.func(target_self, *args, **kwargs) if item is self._not_cached: item = self.func(target_self, *args, **kwargs) target_self._cache.set(key, item, None) return item def __call__(self, target_self, *args, **kwargs): target_self_cls_name = reflection.get_class_name(target_self, fully_qualified=False) if not hasattr(target_self, '_cache'): raise NotImplementedError( _("Instance of class %(module)s.%(class)s must contain _cache " "attribute") % { 'module': target_self.__module__, 'class': target_self_cls_name}) if not target_self._cache: if self._first_call: LOG.debug("Instance of class %(module)s.%(class)s doesn't " "contain attribute _cache therefore results " "cannot be cached for %(func_name)s.", {'module': target_self.__module__, 'class': target_self_cls_name, 'func_name': self.func.__name__}) self._first_call = False return self.func(target_self, *args, **kwargs) return self._get_from_cache(target_self, *args, **kwargs) def __get__(self, obj, objtype): return functools.partial(self.__call__, obj) @debtcollector.removals.remove(message="This will removed in the N cycle.") def read_cached_file(filename, cache_info, reload_func=None): """Read from a file if it has been modified. :param cache_info: dictionary to hold opaque cache. :param reload_func: optional function to be called with data when file is reloaded due to a modification. :returns: data from file """ mtime = os.path.getmtime(filename) if not cache_info or mtime != cache_info.get('mtime'): LOG.debug("Reloading cached file %s", filename) with open(filename) as fap: cache_info['data'] = fap.read() cache_info['mtime'] = mtime if reload_func: reload_func(cache_info['data']) return cache_info['data'] @debtcollector.removals.remove(message="This will removed in the N cycle.") def find_config_file(options, config_file): """Return the first config file found. We search for the paste config file in the following order: * If --config-file option is used, use that * Search for the configuration files via common cfg directories :retval Full path to config file, or None if no config file found """ fix_path = lambda p: os.path.abspath(os.path.expanduser(p)) if options.get('config_file'): if os.path.exists(options['config_file']): return fix_path(options['config_file']) dir_to_common = os.path.dirname(os.path.abspath(__file__)) root = os.path.join(dir_to_common, '..', '..', '..', '..') # Handle standard directory search for the config file config_file_dirs = [fix_path(os.path.join(os.getcwd(), 'etc')), fix_path(os.path.join('~', '.neutron-venv', 'etc', 'neutron')), fix_path('~'), os.path.join(cfg.CONF.state_path, 'etc'), os.path.join(cfg.CONF.state_path, 'etc', 'neutron'), fix_path(os.path.join('~', '.local', 'etc', 'neutron')), '/usr/etc/neutron', '/usr/local/etc/neutron', '/etc/neutron/', '/etc'] if 'plugin' in options: config_file_dirs = [ os.path.join(x, 'neutron', 'plugins', options['plugin']) for x in config_file_dirs ] if os.path.exists(os.path.join(root, 'plugins')): plugins = [fix_path(os.path.join(root, 'plugins', p, 'etc')) for p in os.listdir(os.path.join(root, 'plugins'))] plugins = [p for p in plugins if os.path.isdir(p)] config_file_dirs.extend(plugins) for cfg_dir in config_file_dirs: cfg_file = os.path.join(cfg_dir, config_file) if os.path.exists(cfg_file): return cfg_file def ensure_dir(dir_path): """Ensure a directory with 755 permissions mode.""" try: os.makedirs(dir_path, 0o755) except OSError as e: # If the directory already existed, don't raise the error. if e.errno != errno.EEXIST: raise def _subprocess_setup(): # Python installs a SIGPIPE handler by default. This is usually not what # non-Python subprocesses expect. signal.signal(signal.SIGPIPE, signal.SIG_DFL) def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False, env=None, preexec_fn=_subprocess_setup, close_fds=True): return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr, preexec_fn=preexec_fn, close_fds=close_fds, env=env) def parse_mappings(mapping_list, unique_values=True, unique_keys=True): """Parse a list of mapping strings into a dictionary. :param mapping_list: a list of strings of the form ':' :param unique_values: values must be unique if True :param unique_keys: keys must be unique if True, else implies that keys and values are not unique :returns: a dict mapping keys to values or to list of values """ mappings = {} for mapping in mapping_list: mapping = mapping.strip() if not mapping: continue split_result = mapping.split(':') if len(split_result) != 2: raise ValueError(_("Invalid mapping: '%s'") % mapping) key = split_result[0].strip() if not key: raise ValueError(_("Missing key in mapping: '%s'") % mapping) value = split_result[1].strip() if not value: raise ValueError(_("Missing value in mapping: '%s'") % mapping) if unique_keys: if key in mappings: raise ValueError(_("Key %(key)s in mapping: '%(mapping)s' not " "unique") % {'key': key, 'mapping': mapping}) if unique_values and value in mappings.values(): raise ValueError(_("Value %(value)s in mapping: '%(mapping)s' " "not unique") % {'value': value, 'mapping': mapping}) mappings[key] = value else: mappings.setdefault(key, []) if value not in mappings[key]: mappings[key].append(value) return mappings def get_hostname(): return socket.gethostname() def get_first_host_ip(net, ip_version): return str(netaddr.IPAddress(net.first + 1, ip_version)) def compare_elements(a, b): """Compare elements if a and b have same elements. This method doesn't consider ordering """ if a is None: a = [] if b is None: b = [] return set(a) == set(b) def safe_sort_key(value): """Return value hash or build one for dictionaries.""" if isinstance(value, collections.Mapping): return sorted(value.items()) return value def dict2str(dic): return ','.join("%s=%s" % (key, val) for key, val in sorted(six.iteritems(dic))) def str2dict(string): res_dict = {} for keyvalue in string.split(','): (key, value) = keyvalue.split('=', 1) res_dict[key] = value return res_dict def dict2tuple(d): items = list(d.items()) items.sort() return tuple(items) def diff_list_of_dict(old_list, new_list): new_set = set([dict2str(l) for l in new_list]) old_set = set([dict2str(l) for l in old_list]) added = new_set - old_set removed = old_set - new_set return [str2dict(a) for a in added], [str2dict(r) for r in removed] def is_extension_supported(plugin, ext_alias): return ext_alias in getattr( plugin, "supported_extension_aliases", []) def log_opt_values(log): cfg.CONF.log_opt_values(log, logging.DEBUG) def get_random_mac(base_mac): mac = [int(base_mac[0], 16), int(base_mac[1], 16), int(base_mac[2], 16), random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff)] if base_mac[3] != '00': mac[3] = int(base_mac[3], 16) return ':'.join(["%02x" % x for x in mac]) def get_random_string(length): """Get a random hex string of the specified length. based on Cinder library cinder/transfer/api.py """ rndstr = "" random.seed(datetime.datetime.now().microsecond) while len(rndstr) < length: base_str = str(random.random()).encode('utf-8') rndstr += hashlib.sha224(base_str).hexdigest() return rndstr[0:length] def get_dhcp_agent_device_id(network_id, host): # Split host so as to always use only the hostname and # not the domain name. This will guarantee consistency # whether a local hostname or an fqdn is passed in. local_hostname = host.split('.')[0] host_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, str(local_hostname)) return 'dhcp%s-%s' % (host_uuid, network_id) def cpu_count(): try: return multiprocessing.cpu_count() except NotImplementedError: return 1 class exception_logger(object): """Wrap a function and log raised exception :param logger: the logger to log the exception default is LOG.exception :returns: origin value if no exception raised; re-raise the exception if any occurred """ def __init__(self, logger=None): self.logger = logger def __call__(self, func): if self.logger is None: LOG = logging.getLogger(func.__module__) self.logger = LOG.exception def call(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: with excutils.save_and_reraise_exception(): self.logger(e) return call def get_other_dvr_serviced_device_owners(): """Return device_owner names for ports that should be serviced by DVR This doesn't return DEVICE_OWNER_COMPUTE_PREFIX since it is a prefix, not a complete device_owner name, so should be handled separately (see is_dvr_serviced() below) """ return [n_const.DEVICE_OWNER_LOADBALANCER, n_const.DEVICE_OWNER_LOADBALANCERV2, n_const.DEVICE_OWNER_DHCP] def get_dvr_allowed_address_pair_device_owners(): """Return device_owner names for allowed_addr_pair ports serviced by DVR This just returns the device owners that are used by the allowed_address_pair ports. Right now only the device_owners shown below are used by the allowed_address_pair ports. Later if other device owners are used for allowed_address_pairs those device_owners should be added to the list below. """ # TODO(Swami): Convert these methods to constants. # Add the constants variable to the neutron-lib return [n_const.DEVICE_OWNER_LOADBALANCER, n_const.DEVICE_OWNER_LOADBALANCERV2] def is_dvr_serviced(device_owner): """Check if the port need to be serviced by DVR Helper function to check the device owners of the ports in the compute and service node to make sure if they are required for DVR or any service directly or indirectly associated with DVR. """ return (device_owner.startswith(n_const.DEVICE_OWNER_COMPUTE_PREFIX) or device_owner in get_other_dvr_serviced_device_owners()) @debtcollector.removals.remove(message="This will removed in the N cycle.") def get_keystone_url(conf): if conf.auth_uri: auth_uri = conf.auth_uri.rstrip('/') else: auth_uri = ('%(protocol)s://%(host)s:%(port)s' % {'protocol': conf.auth_protocol, 'host': conf.auth_host, 'port': conf.auth_port}) # NOTE(ihrachys): all existing consumers assume version 2.0 return '%s/v2.0/' % auth_uri def ip_to_cidr(ip, prefix=None): """Convert an ip with no prefix to cidr notation :param ip: An ipv4 or ipv6 address. Convertable to netaddr.IPNetwork. :param prefix: Optional prefix. If None, the default 32 will be used for ipv4 and 128 for ipv6. """ net = netaddr.IPNetwork(ip) if prefix is not None: # Can't pass ip and prefix separately. Must concatenate strings. net = netaddr.IPNetwork(str(net.ip) + '/' + str(prefix)) return str(net) def fixed_ip_cidrs(fixed_ips): """Create a list of a port's fixed IPs in cidr notation. :param fixed_ips: A neutron port's fixed_ips dictionary """ return [ip_to_cidr(fixed_ip['ip_address'], fixed_ip.get('prefixlen')) for fixed_ip in fixed_ips] def is_cidr_host(cidr): """Determines if the cidr passed in represents a single host network :param cidr: Either an ipv4 or ipv6 cidr. :returns: True if the cidr is /32 for ipv4 or /128 for ipv6. :raises ValueError: raises if cidr does not contain a '/'. This disallows plain IP addresses specifically to avoid ambiguity. """ if '/' not in str(cidr): raise ValueError("cidr doesn't contain a '/'") net = netaddr.IPNetwork(cidr) if net.version == 4: return net.prefixlen == n_const.IPv4_BITS return net.prefixlen == n_const.IPv6_BITS def ip_version_from_int(ip_version_int): if ip_version_int == 4: return n_const.IPv4 if ip_version_int == 6: return n_const.IPv6 raise ValueError(_('Illegal IP version number')) def is_port_trusted(port): """Used to determine if port can be trusted not to attack network. Trust is currently based on the device_owner field starting with 'network:' since we restrict who can use that in the default policy.json file. """ return port['device_owner'].startswith(n_const.DEVICE_OWNER_NETWORK_PREFIX) class DelayedStringRenderer(object): """Takes a callable and its args and calls when __str__ is called Useful for when an argument to a logging statement is expensive to create. This will prevent the callable from being called if it's never converted to a string. """ def __init__(self, function, *args, **kwargs): self.function = function self.args = args self.kwargs = kwargs def __str__(self): return str(self.function(*self.args, **self.kwargs)) def camelize(s): return ''.join(s.replace('_', ' ').title().split()) def round_val(val): # we rely on decimal module since it behaves consistently across Python # versions (2.x vs. 3.x) return int(decimal.Decimal(val).quantize(decimal.Decimal('1'), rounding=decimal.ROUND_HALF_UP)) def replace_file(file_name, data, file_mode=0o644): """Replaces the contents of file_name with data in a safe manner. First write to a temp file and then rename. Since POSIX renames are atomic, the file is unlikely to be corrupted by competing writes. We create the tempfile on the same device to ensure that it can be renamed. """ base_dir = os.path.dirname(os.path.abspath(file_name)) with tempfile.NamedTemporaryFile('w+', dir=base_dir, delete=False) as tmp_file: tmp_file.write(data) os.chmod(tmp_file.name, file_mode) os.rename(tmp_file.name, file_name) def load_class_by_alias_or_classname(namespace, name): """Load class using stevedore alias or the class name :param namespace: namespace where the alias is defined :param name: alias or class name of the class to be loaded :returns class if calls can be loaded :raises ImportError if class cannot be loaded """ if not name: LOG.error(_LE("Alias or class name is not set")) raise ImportError(_("Class not found.")) try: # Try to resolve class by alias mgr = driver.DriverManager(namespace, name) class_to_load = mgr.driver except RuntimeError: e1_info = sys.exc_info() # Fallback to class name try: class_to_load = importutils.import_class(name) except (ImportError, ValueError): LOG.error(_LE("Error loading class by alias"), exc_info=e1_info) LOG.error(_LE("Error loading class by class name"), exc_info=True) raise ImportError(_("Class not found.")) return class_to_load def safe_decode_utf8(s): if six.PY3 and isinstance(s, bytes): return s.decode('utf-8', 'surrogateescape') return s def _hex_format(port, mask=0): def hex_str(num): return format(num, '#06x') if mask > 0: return "%s/%s" % (hex_str(port), hex_str(0xffff & ~mask)) return hex_str(port) def _gen_rules_port_min(port_min, top_bit): """ Encode a port range range(port_min, (port_min | (top_bit - 1)) + 1) into a set of bit value/masks. """ # Processing starts with setting up mask and top_bit variables to their # maximum. Top_bit has the form (1000000) with '1' pointing to the register # being processed, while mask has the form (0111111) with '1' showing # possible range to be covered. # With each rule generation cycle, mask and top_bit are bit shifted to the # right. When top_bit reaches 0 it means that last register was processed. # Let port_min be n bits long, top_bit = 1 << k, 0<=k<=n-1. # Each cycle step checks the following conditions: # 1). port & mask == 0 # This means that remaining bits k..1 are equal to '0' and can be # covered by a single port/mask rule. # If condition 1 doesn't fit, then both top_bit and mask are bit # shifted to the right and condition 2 is checked: # 2). port & top_bit == 0 # This means that kth port bit is equal to '0'. By setting it to '1' # and masking other (k-1) bits all ports in range # [P, P + 2^(k-1)-1] are guaranteed to be covered. # Let p_k be equal to port first (n-k) bits with rest set to 0. # Then P = p_k | top_bit. # Correctness proof: # The remaining range to be encoded in a cycle is calculated as follows: # R = [port_min, port_min | mask]. # If condition 1 holds, then a rule that covers R is generated and the job # is done. # If condition 2 holds, then the rule emitted will cover 2^(k-1) values # from the range. Remaining range R will shrink by 2^(k-1). # If condition 2 doesn't hold, then even after top_bit/mask shift in next # iteration the value of R won't change. # Full cycle example for range [40, 64): # port=0101000, top_bit=1000000, k=6 # * step 1, k=6, R=[40, 63] # top_bit=1000000, mask=0111111 -> condition 1 doesn't hold, shifting # mask/top_bit # top_bit=0100000, mask=0011111 -> condition 2 doesn't hold # * step 2, k=5, R=[40, 63] # top_bit=0100000, mask=0011111 -> condition 1 doesn't hold, shifting # mask/top_bit # top_bit=0010000, mask=0001111 -> condition 2 holds -> 011xxxx or # 0x0030/fff0 # * step 3, k=4, R=[40, 47] # top_bit=0010000, mask=0001111 -> condition 1 doesn't hold, shifting # mask/top_bit # top_bit=0001000, mask=0000111 -> condition 2 doesn't hold # * step 4, k=3, R=[40, 47] # top_bit=0001000, mask=0000111 -> condition 1 holds -> 0101xxx or # 0x0028/fff8 # rules=[0x0030/fff0, 0x0028/fff8] rules = [] mask = top_bit - 1 while True: if (port_min & mask) == 0: # greedy matched a streak of '0' in port_min rules.append(_hex_format(port_min, mask)) break top_bit >>= 1 mask >>= 1 if (port_min & top_bit) == 0: # matched next '0' in port_min to substitute for '1' in resulting # rule rules.append(_hex_format(port_min & ~mask | top_bit, mask)) return rules def _gen_rules_port_max(port_max, top_bit): """ Encode a port range range(port_max & ~(top_bit - 1), port_max + 1) into a set of bit value/masks. """ # Processing starts with setting up mask and top_bit variables to their # maximum. Top_bit has the form (1000000) with '1' pointing to the register # being processed, while mask has the form (0111111) with '1' showing # possible range to be covered. # With each rule generation cycle, mask and top_bit are bit shifted to the # right. When top_bit reaches 0 it means that last register was processed. # Let port_max be n bits long, top_bit = 1 << k, 0<=k<=n-1. # Each cycle step checks the following conditions: # 1). port & mask == mask # This means that remaining bits k..1 are equal to '1' and can be # covered by a single port/mask rule. # If condition 1 doesn't fit, then both top_bit and mask are bit # shifted to the right and condition 2 is checked: # 2). port & top_bit == top_bit # This means that kth port bit is equal to '1'. By setting it to '0' # and masking other (k-1) bits all ports in range # [P, P + 2^(k-1)-1] are guaranteed to be covered. # Let p_k be equal to port first (n-k) bits with rest set to 0. # Then P = p_k | ~top_bit. # Correctness proof: # The remaining range to be encoded in a cycle is calculated as follows: # R = [port_max & ~mask, port_max]. # If condition 1 holds, then a rule that covers R is generated and the job # is done. # If condition 2 holds, then the rule emitted will cover 2^(k-1) values # from the range. Remaining range R will shrink by 2^(k-1). # If condition 2 doesn't hold, then even after top_bit/mask shift in next # iteration the value of R won't change. # Full cycle example for range [64, 105]: # port=1101001, top_bit=1000000, k=6 # * step 1, k=6, R=[64, 105] # top_bit=1000000, mask=0111111 -> condition 1 doesn't hold, shifting # mask/top_bit # top_bit=0100000, mask=0011111 -> condition 2 holds -> 10xxxxx or # 0x0040/ffe0 # * step 2, k=5, R=[96, 105] # top_bit=0100000, mask=0011111 -> condition 1 doesn't hold, shifting # mask/top_bit # top_bit=0010000, mask=0001111 -> condition 2 doesn't hold # * step 3, k=4, R=[96, 105] # top_bit=0010000, mask=0001111 -> condition 1 doesn't hold, shifting # mask/top_bit # top_bit=0001000, mask=0000111 -> condition 2 holds -> 1100xxx or # 0x0060/fff8 # * step 4, k=3, R=[104, 105] # top_bit=0001000, mask=0000111 -> condition 1 doesn't hold, shifting # mask/top_bit # top_bit=0000100, mask=0000011 -> condition 2 doesn't hold # * step 5, k=2, R=[104, 105] # top_bit=0000100, mask=0000011 -> condition 1 doesn't hold, shifting # mask/top_bit # top_bit=0000010, mask=0000001 -> condition 2 doesn't hold # * step 6, k=1, R=[104, 105] # top_bit=0000010, mask=0000001 -> condition 1 holds -> 1101001 or # 0x0068 # rules=[0x0040/ffe0, 0x0060/fff8, 0x0068] rules = [] mask = top_bit - 1 while True: if (port_max & mask) == mask: # greedy matched a streak of '1' in port_max rules.append(_hex_format(port_max & ~mask, mask)) break top_bit >>= 1 mask >>= 1 if (port_max & top_bit) == top_bit: # matched next '1' in port_max to substitute for '0' in resulting # rule rules.append(_hex_format(port_max & ~mask & ~top_bit, mask)) return rules def port_rule_masking(port_min, port_max): """Translate a range [port_min, port_max] into a set of bitwise matches. Each match has the form 'port/mask'. The port and mask are 16-bit numbers written in hexadecimal prefixed by 0x. Each 1-bit in mask requires that the corresponding bit in port must match. Each 0-bit in mask causes the corresponding bit to be ignored. """ # Let binary representation of port_min and port_max be n bits long and # have first m bits in common, 0 <= m <= n. # If remaining (n - m) bits of given ports define 2^(n-m) values, then # [port_min, port_max] range is covered by a single rule. # For example: # n = 6 # port_min = 16 (binary 010000) # port_max = 23 (binary 010111) # Ports have m=3 bits in common with the remaining (n-m)=3 bits # covering range [0, 2^3), which equals to a single 010xxx rule. The algo # will return [0x0010/fff8]. # Else [port_min, port_max] range will be split into 2: range [port_min, T) # and [T, port_max]. Let p_m be the common part of port_min and port_max # with other (n-m) bits set to 0. Then T = p_m | 1 << (n-m-1). # For example: # n = 7 # port_min = 40 (binary 0101000) # port_max = 105 (binary 1101001) # Ports have m=0 bits in common, p_m=000000. Then T=1000000 and the # initial range [40, 105] is divided into [40, 64) and [64, 105]. # Each of the ranges will be processed separately, then the generated rules # will be merged. # Check port_max >= port_min. if port_max < port_min: raise ValueError(_("'port_max' is smaller than 'port_min'")) bitdiff = port_min ^ port_max if bitdiff == 0: # port_min == port_max return [_hex_format(port_min)] # for python3.x, bit_length could be used here top_bit = 1 while top_bit <= bitdiff: top_bit <<= 1 if (port_min & (top_bit - 1) == 0 and port_max & (top_bit - 1) == top_bit - 1): # special case, range of 2^k ports is covered return [_hex_format(port_min, top_bit - 1)] top_bit >>= 1 rules = [] rules.extend(_gen_rules_port_min(port_min, top_bit)) rules.extend(_gen_rules_port_max(port_max, top_bit)) return rules def create_object_with_dependency(creator, dep_getter, dep_creator, dep_id_attr): """Creates an object that binds to a dependency while handling races. creator is a function that expected to take the result of either dep_getter or dep_creator. The result of dep_getter and dep_creator must have an attribute of dep_id_attr be used to determine if the dependency changed during object creation. dep_getter should return None if the dependency does not exist dep_creator can raise a DBDuplicateEntry to indicate that a concurrent create of the dependency occured and the process will restart to get the concurrently created one This function will return both the created object and the dependency it used/created. This function protects against all of the cases where the dependency can be concurrently removed by catching exceptions and restarting the process of creating the dependency if one no longer exists. It will give up after neutron.db.api.MAX_RETRIES and raise the exception it encounters after that. TODO(kevinbenton): currently this does not try to delete the dependency it created. This matches the semantics of the HA network logic it is used for but it should be modified to cleanup in the future. """ result, dependency, dep_id = None, None, None for attempts in range(1, db_api.MAX_RETRIES + 1): # we go to max + 1 here so the exception handlers can raise their # errors at the end try: dependency = dep_getter() or dep_creator() dep_id = getattr(dependency, dep_id_attr) except db_exc.DBDuplicateEntry: # dependency was concurrently created. with excutils.save_and_reraise_exception() as ctx: if attempts < db_api.MAX_RETRIES: # sleep for a random time between 0 and 1 second to # make sure a concurrent worker doesn't retry again # at exactly the same time time.sleep(random.uniform(0, 1)) ctx.reraise = False continue try: result = creator(dependency) break except Exception: with excutils.save_and_reraise_exception() as ctx: # check if dependency we tried to use was removed during # object creation if attempts < db_api.MAX_RETRIES: dependency = dep_getter() if not dependency or dep_id != getattr(dependency, dep_id_attr): ctx.reraise = False return result, dependency def transaction_guard(f): """Ensures that the context passed in is not in a transaction. Various Neutron methods modifying resources have assumptions that they will not be called inside of a transaction because they perform operations that expect all data to be committed to the database (e.g. ML2 postcommit calls) and/or they have side effects on external systems. So calling them in a transaction can lead to consistency errors on failures since the side effect will not be reverted on a DB rollback. If you receive this error, you must alter your code to handle the fact that the thing you are calling can have side effects so using transactions to undo on failures is not possible. """ @functools.wraps(f) def inner(self, context, *args, **kwargs): if context.session.is_active: raise RuntimeError(_("Method cannot be called within a " "transaction.")) return f(self, context, *args, **kwargs) return inner neutron-8.4.0/neutron/common/rpc.py0000664000567000056710000002574613044372760020522 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from debtcollector import removals import random import time from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_messaging import serializer as om_serializer from oslo_service import service from oslo_utils import excutils from neutron._i18n import _LE, _LW from neutron.common import exceptions from neutron import context LOG = logging.getLogger(__name__) TRANSPORT = None NOTIFICATION_TRANSPORT = None NOTIFIER = None ALLOWED_EXMODS = [ exceptions.__name__, ] EXTRA_EXMODS = [] TRANSPORT_ALIASES = { 'neutron.openstack.common.rpc.impl_fake': 'fake', 'neutron.openstack.common.rpc.impl_qpid': 'qpid', 'neutron.openstack.common.rpc.impl_kombu': 'rabbit', 'neutron.openstack.common.rpc.impl_zmq': 'zmq', 'neutron.rpc.impl_fake': 'fake', 'neutron.rpc.impl_qpid': 'qpid', 'neutron.rpc.impl_kombu': 'rabbit', 'neutron.rpc.impl_zmq': 'zmq', } # NOTE(salv-orlando): I am afraid this is a global variable. While not ideal, # they're however widely used throughout the code base. It should be set to # true if the RPC server is not running in the current process space. This # will prevent get_connection from creating connections to the AMQP server RPC_DISABLED = False def init(conf): global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER exmods = get_allowed_exmods() TRANSPORT = oslo_messaging.get_transport(conf, allowed_remote_exmods=exmods, aliases=TRANSPORT_ALIASES) NOTIFICATION_TRANSPORT = oslo_messaging.get_notification_transport( conf, allowed_remote_exmods=exmods, aliases=TRANSPORT_ALIASES) serializer = RequestContextSerializer() NOTIFIER = oslo_messaging.Notifier(NOTIFICATION_TRANSPORT, serializer=serializer) def cleanup(): global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER assert TRANSPORT is not None assert NOTIFICATION_TRANSPORT is not None assert NOTIFIER is not None TRANSPORT.cleanup() NOTIFICATION_TRANSPORT.cleanup() _ContextWrapper.reset_timeouts() TRANSPORT = NOTIFICATION_TRANSPORT = NOTIFIER = None def add_extra_exmods(*args): EXTRA_EXMODS.extend(args) def clear_extra_exmods(): del EXTRA_EXMODS[:] def get_allowed_exmods(): return ALLOWED_EXMODS + EXTRA_EXMODS class _ContextWrapper(object): """Wraps oslo messaging contexts to set the timeout for calls. This intercepts RPC calls and sets the timeout value to the globally adapting value for each method. An oslo messaging timeout results in a doubling of the timeout value for the method on which it timed out. There currently is no logic to reduce the timeout since busy Neutron servers are more frequently the cause of timeouts rather than lost messages. """ _METHOD_TIMEOUTS = collections.defaultdict( lambda: TRANSPORT.conf.rpc_response_timeout) @classmethod def reset_timeouts(cls): cls._METHOD_TIMEOUTS.clear() def __init__(self, original_context): self._original_context = original_context def __getattr__(self, name): return getattr(self._original_context, name) def call(self, ctxt, method, **kwargs): # two methods with the same name in different namespaces should # be tracked independently if self._original_context.target.namespace: scoped_method = '%s.%s' % (self._original_context.target.namespace, method) else: scoped_method = method # set the timeout from the global method timeout tracker for this # method self._original_context.timeout = self._METHOD_TIMEOUTS[scoped_method] try: return self._original_context.call(ctxt, method, **kwargs) except oslo_messaging.MessagingTimeout: with excutils.save_and_reraise_exception(): wait = random.uniform(0, TRANSPORT.conf.rpc_response_timeout) LOG.error(_LE("Timeout in RPC method %(method)s. Waiting for " "%(wait)s seconds before next attempt. If the " "server is not down, consider increasing the " "rpc_response_timeout option as Neutron " "server(s) may be overloaded and unable to " "respond quickly enough."), {'wait': int(round(wait)), 'method': scoped_method}) ceiling = TRANSPORT.conf.rpc_response_timeout * 10 new_timeout = min(self._original_context.timeout * 2, ceiling) if new_timeout > self._METHOD_TIMEOUTS[scoped_method]: LOG.warning(_LW("Increasing timeout for %(method)s calls " "to %(new)s seconds. Restart the agent to " "restore it to the default value."), {'method': scoped_method, 'new': new_timeout}) self._METHOD_TIMEOUTS[scoped_method] = new_timeout time.sleep(wait) class BackingOffClient(oslo_messaging.RPCClient): """An oslo messaging RPC Client that implements a timeout backoff. This has all of the same interfaces as oslo_messaging.RPCClient but if the timeout parameter is not specified, the _ContextWrapper returned will track when call timeout exceptions occur and exponentially increase the timeout for the given call method. """ def prepare(self, *args, **kwargs): ctx = super(BackingOffClient, self).prepare(*args, **kwargs) # don't enclose Contexts that explicitly set a timeout return _ContextWrapper(ctx) if 'timeout' not in kwargs else ctx def get_client(target, version_cap=None, serializer=None): assert TRANSPORT is not None serializer = RequestContextSerializer(serializer) return BackingOffClient(TRANSPORT, target, version_cap=version_cap, serializer=serializer) def get_server(target, endpoints, serializer=None): assert TRANSPORT is not None serializer = RequestContextSerializer(serializer) return oslo_messaging.get_rpc_server(TRANSPORT, target, endpoints, 'eventlet', serializer) def get_notifier(service=None, host=None, publisher_id=None): assert NOTIFIER is not None if not publisher_id: publisher_id = "%s.%s" % (service, host or cfg.CONF.host) return NOTIFIER.prepare(publisher_id=publisher_id) class RequestContextSerializer(om_serializer.Serializer): """This serializer is used to convert RPC common context into Neutron Context. """ def __init__(self, base=None): super(RequestContextSerializer, self).__init__() self._base = base def serialize_entity(self, ctxt, entity): if not self._base: return entity return self._base.serialize_entity(ctxt, entity) def deserialize_entity(self, ctxt, entity): if not self._base: return entity return self._base.deserialize_entity(ctxt, entity) def serialize_context(self, ctxt): return ctxt.to_dict() def deserialize_context(self, ctxt): rpc_ctxt_dict = ctxt.copy() user_id = rpc_ctxt_dict.pop('user_id', None) if not user_id: user_id = rpc_ctxt_dict.pop('user', None) tenant_id = rpc_ctxt_dict.pop('tenant_id', None) if not tenant_id: tenant_id = rpc_ctxt_dict.pop('project_id', None) return context.Context(user_id, tenant_id, **rpc_ctxt_dict) class Service(service.Service): """Service object for binaries running on hosts. A service enables rpc by listening to queues based on topic and host. """ def __init__(self, host, topic, manager=None, serializer=None): super(Service, self).__init__() self.host = host self.topic = topic self.serializer = serializer if manager is None: self.manager = self else: self.manager = manager def start(self): super(Service, self).start() self.conn = create_connection() LOG.debug("Creating Consumer connection for Service %s", self.topic) endpoints = [self.manager] self.conn.create_consumer(self.topic, endpoints) # Hook to allow the manager to do other initializations after # the rpc connection is created. if callable(getattr(self.manager, 'initialize_service_hook', None)): self.manager.initialize_service_hook(self) # Consume from all consumers in threads self.conn.consume_in_threads() def stop(self): # Try to shut the connection down, but if we get any sort of # errors, go ahead and ignore them.. as we're shutting down anyway try: self.conn.close() except Exception: pass super(Service, self).stop() class Connection(object): def __init__(self): super(Connection, self).__init__() self.servers = [] def create_consumer(self, topic, endpoints, fanout=False): target = oslo_messaging.Target( topic=topic, server=cfg.CONF.host, fanout=fanout) server = get_server(target, endpoints) self.servers.append(server) def consume_in_threads(self): for server in self.servers: server.start() return self.servers def close(self): for server in self.servers: server.stop() for server in self.servers: server.wait() class VoidConnection(object): def create_consumer(self, topic, endpoints, fanout=False): pass def consume_in_threads(self): pass def close(self): pass # functions @removals.removed_kwarg('new') def create_connection(new=True): # NOTE(salv-orlando): This is a clever interpretation of the factory design # patter aimed at preventing plugins from initializing RPC servers upon # initialization when they are running in the REST over HTTP API server. # The educated reader will perfectly be able that this a fairly dirty hack # to avoid having to change the initialization process of every plugin. if RPC_DISABLED: return VoidConnection() return Connection() neutron-8.4.0/neutron/common/ipv6_utils.py0000664000567000056710000000563213044372760022032 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ IPv6-related utilities and helper functions. """ import os import netaddr from oslo_log import log from neutron._i18n import _, _LI from neutron.common import constants LOG = log.getLogger(__name__) _IS_IPV6_ENABLED = None def get_ipv6_addr_by_EUI64(prefix, mac): # Check if the prefix is IPv4 address isIPv4 = netaddr.valid_ipv4(prefix) if isIPv4: msg = _("Unable to generate IP address by EUI64 for IPv4 prefix") raise TypeError(msg) try: eui64 = int(netaddr.EUI(mac).eui64()) prefix = netaddr.IPNetwork(prefix) return netaddr.IPAddress(prefix.first + eui64 ^ (1 << 57)) except (ValueError, netaddr.AddrFormatError): raise TypeError(_('Bad prefix or mac format for generating IPv6 ' 'address by EUI-64: %(prefix)s, %(mac)s:') % {'prefix': prefix, 'mac': mac}) except TypeError: raise TypeError(_('Bad prefix type for generate IPv6 address by ' 'EUI-64: %s') % prefix) def is_enabled(): global _IS_IPV6_ENABLED if _IS_IPV6_ENABLED is None: disabled_ipv6_path = "/proc/sys/net/ipv6/conf/default/disable_ipv6" if os.path.exists(disabled_ipv6_path): with open(disabled_ipv6_path, 'r') as f: disabled = f.read().strip() _IS_IPV6_ENABLED = disabled == "0" else: _IS_IPV6_ENABLED = False if not _IS_IPV6_ENABLED: LOG.info(_LI("IPv6 is not enabled on this system.")) return _IS_IPV6_ENABLED def is_auto_address_subnet(subnet): """Check if subnet is an auto address subnet.""" modes = [constants.IPV6_SLAAC, constants.DHCPV6_STATELESS] return (subnet['ipv6_address_mode'] in modes or subnet['ipv6_ra_mode'] in modes) def is_eui64_address(ip_address): """Check if ip address is EUI64.""" ip = netaddr.IPAddress(ip_address) # '0xfffe' addition is used to build EUI-64 from MAC (RFC4291) # Look for it in the middle of the EUI-64 part of address return ip.version == 6 and not ((ip & 0xffff000000) ^ 0xfffe000000) def is_ipv6_pd_enabled(subnet): """Returns True if the subnetpool_id of the given subnet is equal to constants.IPV6_PD_POOL_ID """ return subnet.get('subnetpool_id') == constants.IPV6_PD_POOL_ID neutron-8.4.0/neutron/common/constants.py0000664000567000056710000002001713044372760021734 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from neutron_lib import constants as lib_constants from neutron.common import _deprecate ROUTER_PORT_OWNERS = lib_constants.ROUTER_INTERFACE_OWNERS_SNAT + \ (lib_constants.DEVICE_OWNER_ROUTER_GW,) ROUTER_STATUS_ACTIVE = 'ACTIVE' # NOTE(kevinbenton): a BUILD status for routers could be added in the future # for agents to indicate when they are wiring up the ports. The following is # to indicate when the server is busy building sub-components of a router ROUTER_STATUS_ALLOCATING = 'ALLOCATING' L3_AGENT_MODE_DVR = 'dvr' L3_AGENT_MODE_DVR_SNAT = 'dvr_snat' L3_AGENT_MODE_LEGACY = 'legacy' L3_AGENT_MODE = 'agent_mode' DEVICE_ID_RESERVED_DHCP_PORT = "reserved_dhcp_port" HA_ROUTER_STATE_KEY = '_ha_state' METERING_LABEL_KEY = '_metering_labels' FLOATINGIP_AGENT_INTF_KEY = '_floatingip_agent_interfaces' SNAT_ROUTER_INTF_KEY = '_snat_router_interfaces' HA_NETWORK_NAME = 'HA network tenant %s' HA_SUBNET_NAME = 'HA subnet tenant %s' HA_PORT_NAME = 'HA port tenant %s' MINIMUM_MINIMUM_AGENTS_FOR_HA = 1 DEFAULT_MINIMUM_AGENTS_FOR_HA = 2 HA_ROUTER_STATE_ACTIVE = 'active' HA_ROUTER_STATE_STANDBY = 'standby' AGENT_TYPE_MACVTAP = 'Macvtap agent' PAGINATION_INFINITE = 'infinite' SORT_DIRECTION_ASC = 'asc' SORT_DIRECTION_DESC = 'desc' ETHERTYPE_NAME_ARP = 'arp' ETHERTYPE_ARP = 0x0806 ETHERTYPE_IP = 0x0800 ETHERTYPE_IPV6 = 0x86DD # Protocol names and numbers for Security Groups/Firewalls PROTO_NAME_AH = 'ah' PROTO_NAME_DCCP = 'dccp' PROTO_NAME_EGP = 'egp' PROTO_NAME_ESP = 'esp' PROTO_NAME_GRE = 'gre' PROTO_NAME_ICMP = 'icmp' PROTO_NAME_IGMP = 'igmp' PROTO_NAME_IPV6_ENCAP = 'ipv6-encap' PROTO_NAME_IPV6_FRAG = 'ipv6-frag' PROTO_NAME_IPV6_ICMP = 'ipv6-icmp' PROTO_NAME_IPV6_NONXT = 'ipv6-nonxt' PROTO_NAME_IPV6_OPTS = 'ipv6-opts' PROTO_NAME_IPV6_ROUTE = 'ipv6-route' PROTO_NAME_OSPF = 'ospf' PROTO_NAME_PGM = 'pgm' PROTO_NAME_RSVP = 'rsvp' PROTO_NAME_SCTP = 'sctp' PROTO_NAME_TCP = 'tcp' PROTO_NAME_UDP = 'udp' PROTO_NAME_UDPLITE = 'udplite' PROTO_NAME_VRRP = 'vrrp' # TODO(amotoki): It should be moved to neutron-lib. # For backward-compatibility of security group rule API, # we keep the old value for IPv6 ICMP. # It should be clean up in the future. PROTO_NAME_IPV6_ICMP_LEGACY = 'icmpv6' PROTO_NUM_AH = 51 PROTO_NUM_DCCP = 33 PROTO_NUM_EGP = 8 PROTO_NUM_ESP = 50 PROTO_NUM_GRE = 47 PROTO_NUM_ICMP = 1 PROTO_NUM_IGMP = 2 PROTO_NUM_IPV6_ENCAP = 41 PROTO_NUM_IPV6_FRAG = 44 PROTO_NUM_IPV6_ICMP = 58 PROTO_NUM_IPV6_NONXT = 59 PROTO_NUM_IPV6_OPTS = 60 PROTO_NUM_IPV6_ROUTE = 43 PROTO_NUM_OSPF = 89 PROTO_NUM_PGM = 113 PROTO_NUM_RSVP = 46 PROTO_NUM_SCTP = 132 PROTO_NUM_TCP = 6 PROTO_NUM_UDP = 17 PROTO_NUM_UDPLITE = 136 PROTO_NUM_VRRP = 112 IP_PROTOCOL_MAP = {PROTO_NAME_AH: PROTO_NUM_AH, PROTO_NAME_DCCP: PROTO_NUM_DCCP, PROTO_NAME_EGP: PROTO_NUM_EGP, PROTO_NAME_ESP: PROTO_NUM_ESP, PROTO_NAME_GRE: PROTO_NUM_GRE, PROTO_NAME_ICMP: PROTO_NUM_ICMP, PROTO_NAME_IGMP: PROTO_NUM_IGMP, PROTO_NAME_IPV6_ENCAP: PROTO_NUM_IPV6_ENCAP, PROTO_NAME_IPV6_FRAG: PROTO_NUM_IPV6_FRAG, PROTO_NAME_IPV6_ICMP: PROTO_NUM_IPV6_ICMP, PROTO_NAME_IPV6_NONXT: PROTO_NUM_IPV6_NONXT, PROTO_NAME_IPV6_OPTS: PROTO_NUM_IPV6_OPTS, PROTO_NAME_IPV6_ROUTE: PROTO_NUM_IPV6_ROUTE, PROTO_NAME_OSPF: PROTO_NUM_OSPF, PROTO_NAME_PGM: PROTO_NUM_PGM, PROTO_NAME_RSVP: PROTO_NUM_RSVP, PROTO_NAME_SCTP: PROTO_NUM_SCTP, PROTO_NAME_TCP: PROTO_NUM_TCP, PROTO_NAME_UDP: PROTO_NUM_UDP, PROTO_NAME_UDPLITE: PROTO_NUM_UDPLITE, PROTO_NAME_VRRP: PROTO_NUM_VRRP} IP_PROTOCOL_NAME_ALIASES = {PROTO_NAME_IPV6_ICMP_LEGACY: PROTO_NAME_IPV6_ICMP} IP_PROTOCOL_NUM_TO_NAME_MAP = {str(v): k for k, v in IP_PROTOCOL_MAP.items()} # List of ICMPv6 types that should be allowed by default: # Multicast Listener Query (130), # Multicast Listener Report (131), # Multicast Listener Done (132), # Neighbor Solicitation (135), ICMPV6_TYPE_NC = 135 # Neighbor Advertisement (136) ICMPV6_TYPE_NA = 136 ICMPV6_ALLOWED_TYPES = [130, 131, 132, 135, 136] ICMPV6_TYPE_RA = 134 DHCPV6_STATEFUL = 'dhcpv6-stateful' DHCPV6_STATELESS = 'dhcpv6-stateless' IPV6_SLAAC = 'slaac' IPV6_MODES = [DHCPV6_STATEFUL, DHCPV6_STATELESS, IPV6_SLAAC] IPV6_LLA_PREFIX = 'fe80::/64' # Human-readable ID to which the subnetpool ID should be set to # indicate that IPv6 Prefix Delegation is enabled for a given subnet IPV6_PD_POOL_ID = 'prefix_delegation' # Special provisional prefix for IPv6 Prefix Delegation PROVISIONAL_IPV6_PD_PREFIX = '::/64' # Timeout in seconds for getting an IPv6 LLA LLA_TASK_TIMEOUT = 40 # Linux interface max length DEVICE_NAME_MAX_LEN = 15 # vhost-user device names start with "vhu" VHOST_USER_DEVICE_PREFIX = 'vhu' # Device names start with "macvtap" MACVTAP_DEVICE_PREFIX = 'macvtap' # The vswitch side of a veth pair for a nova iptables filter setup VETH_DEVICE_PREFIX = 'qvo' # prefix for SNAT interface in DVR SNAT_INT_DEV_PREFIX = 'sg-' # Possible prefixes to partial port IDs in interface names used by the OVS, # Linux Bridge, and IVS VIF drivers in Nova and the neutron agents. See the # 'get_ovs_interfaceid' method in Nova (nova/virt/libvirt/vif.py) for details. INTERFACE_PREFIXES = (lib_constants.TAP_DEVICE_PREFIX, VETH_DEVICE_PREFIX, SNAT_INT_DEV_PREFIX) ATTRIBUTES_TO_UPDATE = 'attributes_to_update' # Maximum value integer can take in MySQL and PostgreSQL # In SQLite integer can be stored in 1, 2, 3, 4, 6, or 8 bytes, # but here it will be limited by this value for consistency. DB_INTEGER_MAX_VALUE = 2 ** 31 - 1 # TODO(amuller): Re-define the RPC namespaces once Oslo messaging supports # Targets with multiple namespaces. Neutron will then implement callbacks # for its RPC clients in order to support rolling upgrades. # RPC Interface for agents to call DHCP API implemented on the plugin side RPC_NAMESPACE_DHCP_PLUGIN = None # RPC interface for the metadata service to get info from the plugin side RPC_NAMESPACE_METADATA = None # RPC interface for agent to plugin security group API RPC_NAMESPACE_SECGROUP = None # RPC interface for agent to plugin DVR api RPC_NAMESPACE_DVR = None # RPC interface for reporting state back to the plugin RPC_NAMESPACE_STATE = None # RPC interface for agent to plugin resources API RPC_NAMESPACE_RESOURCES = None # Default network MTU value when not configured DEFAULT_NETWORK_MTU = 1500 IPV6_MIN_MTU = 1280 ROUTER_MARK_MASK = "0xffff" # Agent states as detected by server, used to reply on agent's state report # agent has just been registered AGENT_NEW = 'new' # agent is alive AGENT_ALIVE = 'alive' # agent has just returned to alive after being dead AGENT_REVIVED = 'revived' # Neutron-lib migration shim. This will wrap any constants that are moved # to that library in a deprecation warning, until they can be updated to # import directly from their new location. # If you're wondering why we bother saving _OLD_REF, it is because if we # do not, then the original module we are overwriting gets garbage collected, # and then you will find some super strange behavior with inherited classes # and the like. Saving a ref keeps it around. # WARNING: THESE MUST BE THE LAST TWO LINES IN THIS MODULE _OLD_REF = sys.modules[__name__] sys.modules[__name__] = _deprecate._DeprecateSubset(globals(), lib_constants) # WARNING: THESE MUST BE THE LAST TWO LINES IN THIS MODULE neutron-8.4.0/neutron/common/exceptions.py0000664000567000056710000002713113044372760022105 0ustar jenkinsjenkins00000000000000# Copyright 2011 VMware, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from neutron_lib import exceptions as e from neutron._i18n import _ from neutron.common import _deprecate class MultipleExceptions(Exception): """Container for multiple exceptions encountered. The API layer of Neutron will automatically unpack, translate, filter, and combine the inner exceptions in any exception derived from this class. """ def __init__(self, exceptions, *args, **kwargs): super(MultipleExceptions, self).__init__(*args, **kwargs) self.inner_exceptions = exceptions class SubnetPoolNotFound(e.NotFound): message = _("Subnet pool %(subnetpool_id)s could not be found.") class QosPolicyNotFound(e.NotFound): message = _("QoS policy %(policy_id)s could not be found.") class QosRuleNotFound(e.NotFound): message = _("QoS rule %(rule_id)s for policy %(policy_id)s " "could not be found.") class PortQosBindingNotFound(e.NotFound): message = _("QoS binding for port %(port_id)s and policy %(policy_id)s " "could not be found.") class NetworkQosBindingNotFound(e.NotFound): message = _("QoS binding for network %(net_id)s and policy %(policy_id)s " "could not be found.") class PolicyFileNotFound(e.NotFound): message = _("Policy configuration policy.json could not be found.") class PolicyInitError(e.NeutronException): message = _("Failed to init policy %(policy)s because %(reason)s.") class PolicyCheckError(e.NeutronException): message = _("Failed to check policy %(policy)s because %(reason)s.") class StateInvalid(e.BadRequest): message = _("Unsupported port state: %(port_state)s.") class QosPolicyInUse(e.InUse): message = _("QoS Policy %(policy_id)s is used by " "%(object_type)s %(object_id)s.") class DhcpPortInUse(e.InUse): message = _("Port %(port_id)s is already acquired by another DHCP agent") class HostRoutesExhausted(e.BadRequest): # NOTE(xchenum): probably make sense to use quota exceeded exception? message = _("Unable to complete operation for %(subnet_id)s. " "The number of host routes exceeds the limit %(quota)s.") class DNSNameServersExhausted(e.BadRequest): # NOTE(xchenum): probably make sense to use quota exceeded exception? message = _("Unable to complete operation for %(subnet_id)s. " "The number of DNS nameservers exceeds the limit %(quota)s.") class InvalidIpForNetwork(e.BadRequest): message = _("IP address %(ip_address)s is not a valid IP " "for any of the subnets on the specified network.") class FlatNetworkInUse(e.InUse): message = _("Unable to create the flat network. " "Physical network %(physical_network)s is in use.") class TenantNetworksDisabled(e.ServiceUnavailable): message = _("Tenant network creation is not enabled.") class NoNetworkFoundInMaximumAllowedAttempts(e.ServiceUnavailable): message = _("Unable to create the network. " "No available network found in maximum allowed attempts.") class MalformedRequestBody(e.BadRequest): message = _("Malformed request body: %(reason)s.") class InvalidAllocationPool(e.BadRequest): message = _("The allocation pool %(pool)s is not valid.") class UnsupportedPortDeviceOwner(e.Conflict): message = _("Operation %(op)s is not supported for device_owner " "%(device_owner)s on port %(port_id)s.") class OverlappingAllocationPools(e.Conflict): message = _("Found overlapping allocation pools: " "%(pool_1)s %(pool_2)s for subnet %(subnet_cidr)s.") class OutOfBoundsAllocationPool(e.BadRequest): message = _("The allocation pool %(pool)s spans " "beyond the subnet cidr %(subnet_cidr)s.") class MacAddressGenerationFailure(e.ServiceUnavailable): message = _("Unable to generate unique mac on network %(net_id)s.") class BridgeDoesNotExist(e.NeutronException): message = _("Bridge %(bridge)s does not exist.") class QuotaResourceUnknown(e.NotFound): message = _("Unknown quota resources %(unknown)s.") class QuotaMissingTenant(e.BadRequest): message = _("Tenant-id was missing from quota request.") class InvalidQuotaValue(e.Conflict): message = _("Change would make usage less than 0 for the following " "resources: %(unders)s.") class InvalidSharedSetting(e.Conflict): message = _("Unable to reconfigure sharing settings for network " "%(network)s. Multiple tenants are using it.") class InvalidExtensionEnv(e.BadRequest): message = _("Invalid extension environment: %(reason)s.") class ExtensionsNotFound(e.NotFound): message = _("Extensions not found: %(extensions)s.") class InvalidContentType(e.NeutronException): message = _("Invalid content type %(content_type)s.") class GatewayConflictWithAllocationPools(e.InUse): message = _("Gateway ip %(ip_address)s conflicts with " "allocation pool %(pool)s.") class GatewayIpInUse(e.InUse): message = _("Current gateway ip %(ip_address)s already in use " "by port %(port_id)s. Unable to update.") class NetworkVlanRangeError(e.NeutronException): message = _("Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'.") def __init__(self, **kwargs): # Convert vlan_range tuple to 'start:end' format for display if isinstance(kwargs['vlan_range'], tuple): kwargs['vlan_range'] = "%d:%d" % kwargs['vlan_range'] super(NetworkVlanRangeError, self).__init__(**kwargs) class PhysicalNetworkNameError(e.NeutronException): message = _("Empty physical network name.") class NetworkVxlanPortRangeError(e.NeutronException): message = _("Invalid network VXLAN port range: '%(vxlan_range)s'.") class VxlanNetworkUnsupported(e.NeutronException): message = _("VXLAN network unsupported.") class DuplicatedExtension(e.NeutronException): message = _("Found duplicate extension: %(alias)s.") class DeviceIDNotOwnedByTenant(e.Conflict): message = _("The following device_id %(device_id)s is not owned by your " "tenant or matches another tenants router.") class InvalidCIDR(e.BadRequest): message = _("Invalid CIDR %(input)s given as IP prefix.") class RouterNotCompatibleWithAgent(e.NeutronException): message = _("Router '%(router_id)s' is not compatible with this agent.") class DvrHaRouterNotSupported(e.NeutronException): message = _("Router '%(router_id)s' cannot be both DVR and HA.") class FailToDropPrivilegesExit(SystemExit): """Exit exception raised when a drop privileges action fails.""" code = 99 class FloatingIpSetupException(e.NeutronException): def __init__(self, message=None): self.message = message super(FloatingIpSetupException, self).__init__() class IpTablesApplyException(e.NeutronException): def __init__(self, message=None): self.message = message super(IpTablesApplyException, self).__init__() class NetworkIdOrRouterIdRequiredError(e.NeutronException): message = _('Both network_id and router_id are None. ' 'One must be provided.') class AbortSyncRouters(e.NeutronException): message = _("Aborting periodic_sync_routers_task due to an error.") # Shared *aas exceptions, pending them being refactored out of Neutron # proper. class FirewallInternalDriverError(e.NeutronException): """Fwaas exception for all driver errors. On any failure or exception in the driver, driver should log it and raise this exception to the agent """ message = _("%(driver)s: Internal driver error.") class MissingMinSubnetPoolPrefix(e.BadRequest): message = _("Unspecified minimum subnet pool prefix.") class EmptySubnetPoolPrefixList(e.BadRequest): message = _("Empty subnet pool prefix list.") class PrefixVersionMismatch(e.BadRequest): message = _("Cannot mix IPv4 and IPv6 prefixes in a subnet pool.") class UnsupportedMinSubnetPoolPrefix(e.BadRequest): message = _("Prefix '%(prefix)s' not supported in IPv%(version)s pool.") class IllegalSubnetPoolPrefixBounds(e.BadRequest): message = _("Illegal prefix bounds: %(prefix_type)s=%(prefixlen)s, " "%(base_prefix_type)s=%(base_prefixlen)s.") class IllegalSubnetPoolPrefixUpdate(e.BadRequest): message = _("Illegal update to prefixes: %(msg)s.") class SubnetAllocationError(e.NeutronException): message = _("Failed to allocate subnet: %(reason)s.") class AddressScopePrefixConflict(e.Conflict): message = _("Failed to associate address scope: subnetpools " "within an address scope must have unique prefixes.") class IllegalSubnetPoolAssociationToAddressScope(e.BadRequest): message = _("Illegal subnetpool association: subnetpool %(subnetpool_id)s " "cannot be associated with address scope " "%(address_scope_id)s.") class IllegalSubnetPoolIpVersionAssociationToAddressScope(e.BadRequest): message = _("Illegal subnetpool association: subnetpool %(subnetpool_id)s " "cannot associate with address scope %(address_scope_id)s " "because subnetpool ip_version is not %(ip_version)s.") class IllegalSubnetPoolUpdate(e.BadRequest): message = _("Illegal subnetpool update : %(reason)s.") class MinPrefixSubnetAllocationError(e.BadRequest): message = _("Unable to allocate subnet with prefix length %(prefixlen)s, " "minimum allowed prefix is %(min_prefixlen)s.") class MaxPrefixSubnetAllocationError(e.BadRequest): message = _("Unable to allocate subnet with prefix length %(prefixlen)s, " "maximum allowed prefix is %(max_prefixlen)s.") class SubnetPoolDeleteError(e.BadRequest): message = _("Unable to delete subnet pool: %(reason)s.") class SubnetPoolQuotaExceeded(e.OverQuota): message = _("Per-tenant subnet pool prefix quota exceeded.") class DeviceNotFoundError(e.NeutronException): message = _("Device '%(device_name)s' does not exist.") class NetworkSubnetPoolAffinityError(e.BadRequest): message = _("Subnets hosted on the same network must be allocated from " "the same subnet pool.") class ObjectActionError(e.NeutronException): message = _('Object action %(action)s failed because: %(reason)s.') class CTZoneExhaustedError(e.NeutronException): message = _("IPtables conntrack zones exhausted, iptables rules cannot " "be applied.") # Neutron-lib migration shim. This will wrap any exceptionss that are moved # to that library in a deprecation warning, until they can be updated to # import directly from their new location. # If you're wondering why we bother saving _OLD_REF, it is because if we # do not, then the original module we are overwriting gets garbage collected, # and then you will find some super strange behavior with inherited classes # and the like. Saving a ref keeps it around. # WARNING: THESE MUST BE THE LAST TWO LINES IN THIS MODULE _OLD_REF = sys.modules[__name__] sys.modules[__name__] = _deprecate._DeprecateSubset(globals(), e) # WARNING: THESE MUST BE THE LAST TWO LINES IN THIS MODULE neutron-8.4.0/neutron/pecan_wsgi/0000775000567000056710000000000013044373210020164 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/pecan_wsgi/startup.py0000664000567000056710000002051513044372760022254 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from neutron._i18n import _LI, _LW from neutron.api import extensions from neutron.api.v2 import attributes from neutron.api.v2 import router from neutron import manager from neutron.pecan_wsgi.controllers import resource as res_ctrl from neutron.pecan_wsgi.controllers import utils from neutron import policy from neutron.quota import resource_registry LOG = log.getLogger(__name__) def _plugin_for_resource(collection): if collection in router.RESOURCES.values(): # this is a core resource, return the core plugin return manager.NeutronManager.get_plugin() ext_mgr = extensions.PluginAwareExtensionManager.get_instance() # Multiple extensions can map to the same resource. This happens # because of 'attribute' extensions. These extensions may come from # various plugins and only one of them is the primary one responsible # for the resource while the others just append fields to the response # (e.g. timestamps). So we have to find the plugin that supports the # extension and has the getter for the collection. ext_res_mappings = dict((ext.get_alias(), collection) for ext in ext_mgr.extensions.values() if collection in ext.get_extended_resources('2.0')) LOG.debug("Extension mappings for: %(collection)s: %(aliases)s", {'collection': collection, 'aliases': ext_res_mappings.keys()}) # find the plugin that supports this extension for plugin in ext_mgr.plugins.values(): ext_aliases = ext_mgr.get_plugin_supported_extension_aliases(plugin) for alias in ext_aliases: if (alias in ext_res_mappings and hasattr(plugin, 'get_%s' % collection)): # This plugin implements this resource return plugin LOG.warning(_LW("No plugin found for: %s"), collection) def _handle_plurals(collection): resource = attributes.PLURALS.get(collection) if not resource: if collection.endswith('ies'): resource = "%sy" % collection[:-3] else: resource = collection[:-1] attributes.PLURALS[collection] = resource return resource def initialize_legacy_extensions(legacy_extensions): leftovers = [] for ext in legacy_extensions: ext_resources = ext.get_resources() for ext_resource in ext_resources: controller = ext_resource.controller.controller collection = ext_resource.collection resource = _handle_plurals(collection) if manager.NeutronManager.get_plugin_for_resource(resource): continue # NOTE(blogan): It is possible that a plugin is tied to the # collection rather than the resource. An example of this is # the auto_allocated_topology extension. All other extensions # created their legacy resources with the collection/plural form # except auto_allocated_topology. Making that extension # conform with the rest of extensions could invalidate this, but # it's possible out of tree extensions did the same thing. Since # the auto_allocated_topology resources have already been loaded # we definitely don't want to load them up with shim controllers, # so this will prevent that. if manager.NeutronManager.get_plugin_for_resource(collection): continue # NOTE(blogan): Since this does not have a plugin, we know this # extension has not been loaded and controllers for its resources # have not been created nor set. leftovers.append((collection, resource, controller)) # NOTE(blogan): at this point we have leftover extensions that never # had a controller set which will force us to use shim controllers. for leftover in leftovers: shim_controller = utils.ShimCollectionsController(*leftover) manager.NeutronManager.set_controller_for_resource( shim_controller.collection, shim_controller) def initialize_all(): ext_mgr = extensions.PluginAwareExtensionManager.get_instance() ext_mgr.extend_resources("2.0", attributes.RESOURCE_ATTRIBUTE_MAP) # At this stage we have a fully populated resource attribute map; # build Pecan controllers and routes for every resource (both core # and extensions) pecanized_exts = [ext for ext in ext_mgr.extensions.values() if hasattr(ext, 'get_pecan_controllers')] non_pecanized_exts = set(ext_mgr.extensions.values()) - set(pecanized_exts) pecan_controllers = {} for ext in pecanized_exts: LOG.info(_LI("Extension %s is pecan-aware. Fetching resources " "and controllers"), ext.get_name()) controllers = ext.get_pecan_controllers() # controllers is actually a list of pairs where the first element is # the collection name and the second the actual controller for (collection, coll_controller) in controllers: pecan_controllers[collection] = coll_controller for collection in attributes.RESOURCE_ATTRIBUTE_MAP: resource = _handle_plurals(collection) plugin = _plugin_for_resource(collection) if plugin: manager.NeutronManager.set_plugin_for_resource( resource, plugin) else: LOG.warning(_LW("No plugin found for resource:%s. API calls " "may not be correctly dispatched"), resource) controller = pecan_controllers.get(collection) if not controller: LOG.debug("Building controller for resource:%s", resource) controller = res_ctrl.CollectionsController(collection, resource) else: LOG.debug("There are already controllers for resource: %s", resource) manager.NeutronManager.set_controller_for_resource( controller.collection, controller) LOG.info(_LI("Added controller for resource %(resource)s " "via URI path segment:%(collection)s"), {'resource': resource, 'collection': collection}) initialize_legacy_extensions(non_pecanized_exts) # NOTE(salv-orlando): If you are care about code quality, please read below # Hackiness is strong with the piece of code below. It is used for # populating resource plurals and registering resources with the quota # engine, but the method it calls were not conceived with this aim. # Therefore it only leverages side-effects from those methods. Moreover, # as it is really not advisable to load an instance of # neutron.api.v2.router.APIRouter just to register resources with the # quota engine, core resources are explicitly registered here. # TODO(salv-orlando): The Pecan WSGI support should provide its own # solution to manage resource plurals and registration of resources with # the quota engine for resource in router.RESOURCES.keys(): resource_registry.register_resource_by_name(resource) for ext in ext_mgr.extensions.values(): # make each extension populate its plurals if hasattr(ext, 'get_resources'): ext.get_resources() if hasattr(ext, 'get_extended_resources'): ext.get_extended_resources('v2.0') # Certain policy checks require that the extensions are loaded # and the RESOURCE_ATTRIBUTE_MAP populated before they can be # properly initialized. This can only be claimed with certainty # once this point in the code has been reached. In the event # that the policies have been initialized before this point, # calling reset will cause the next policy check to # re-initialize with all of the required data in place. policy.reset() neutron-8.4.0/neutron/pecan_wsgi/__init__.py0000664000567000056710000000000013044372736022277 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/pecan_wsgi/app.py0000664000567000056710000000636513044372760021341 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystonemiddleware import auth_token from oslo_config import cfg from oslo_middleware import cors from oslo_middleware import request_id import pecan from neutron.api import versions from neutron.common import exceptions as n_exc from neutron.pecan_wsgi import hooks from neutron.pecan_wsgi import startup CONF = cfg.CONF CONF.import_opt('bind_host', 'neutron.common.config') CONF.import_opt('bind_port', 'neutron.common.config') def setup_app(*args, **kwargs): config = { 'server': { 'port': CONF.bind_port, 'host': CONF.bind_host }, 'app': { 'root': 'neutron.pecan_wsgi.controllers.root.RootController', 'modules': ['neutron.pecan_wsgi'], } #TODO(kevinbenton): error templates } pecan_config = pecan.configuration.conf_from_dict(config) app_hooks = [ hooks.ExceptionTranslationHook(), # priority 100 hooks.ContextHook(), # priority 95 hooks.BodyValidationHook(), # priority 120 hooks.OwnershipValidationHook(), # priority 125 hooks.QuotaEnforcementHook(), # priority 130 hooks.NotifierHook(), # priority 135 hooks.PolicyHook(), # priority 140 ] app = pecan.make_app( pecan_config.app.root, debug=False, wrap_app=_wrap_app, force_canonical=False, hooks=app_hooks, guess_content_type_from_ext=True ) startup.initialize_all() return app def _wrap_app(app): app = request_id.RequestId(app) if cfg.CONF.auth_strategy == 'noauth': pass elif cfg.CONF.auth_strategy == 'keystone': app = auth_token.AuthProtocol(app, {}) else: raise n_exc.InvalidConfigurationOption( opt_name='auth_strategy', opt_value=cfg.CONF.auth_strategy) # version can be unauthenticated so it goes outside of auth app = versions.Versions(app) # This should be the last middleware in the list (which results in # it being the first in the middleware chain). This is to ensure # that any errors thrown by other middleware, such as an auth # middleware - are annotated with CORS headers, and thus accessible # by the browser. app = cors.CORS(app, cfg.CONF) app.set_latent( allow_headers=['X-Auth-Token', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id', 'X-OpenStack-Request-ID'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'], expose_headers=['X-Auth-Token', 'X-Subject-Token', 'X-Service-Token', 'X-OpenStack-Request-ID'] ) return app neutron-8.4.0/neutron/pecan_wsgi/controllers/0000775000567000056710000000000013044373210022532 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/pecan_wsgi/controllers/router.py0000664000567000056710000001036013044372760024435 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Taturiello Consulting, Meh. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _LE from oslo_log import log import pecan from pecan import request from neutron import manager from neutron.pecan_wsgi.controllers import resource from neutron.pecan_wsgi.controllers import utils LOG = log.getLogger(__name__) class RouterController(resource.ItemController): """Customize ResourceController for member actions""" ### Pecan generic controllers don't work very well with inheritance @utils.expose(generic=True) def index(self, *args, **kwargs): return super(RouterController, self).index(*args, **kwargs) @utils.when(index, method='HEAD') @utils.when(index, method='POST') @utils.when(index, method='PATCH') def not_supported(self): return super(RouterController, self).not_supported() @utils.when(index, method='PUT') def put(self, *args, **kwargs): return super(RouterController, self).put(*args, **kwargs) @utils.when(index, method='DELETE') def delete(self): return super(RouterController, self).delete() @utils.expose() def _lookup(self, member_action, *remainder): # This check is mainly for the l3-agents resource. If there isn't # a controller for it then we'll just assume its a member action. controller = manager.NeutronManager.get_controller_for_resource( member_action) if not controller: controller = RouterMemberActionController( self.resource, self.item, member_action) return controller, remainder class RoutersController(resource.CollectionsController): item_controller_class = RouterController def __init__(self): super(RoutersController, self).__init__('routers', 'router') class RouterMemberActionController(resource.ItemController): def __init__(self, resource, item, member_action): super(RouterMemberActionController, self).__init__(resource, item) self.member_action = member_action @utils.expose(generic=True) def index(self, *args, **kwargs): pecan.abort(405) @utils.when(index, method='HEAD') @utils.when(index, method='POST') @utils.when(index, method='PATCH') def not_supported(self): return super(RouterMemberActionController, self).not_supported() @utils.when(index, method='PUT') def put(self, *args, **kwargs): neutron_context = request.context['neutron_context'] LOG.debug("Processing member action %(action)s for resource " "%(resource)s identified by %(item)s", {'action': self.member_action, 'resource': self.resource, 'item': self.item}) # NOTE(salv-orlando): The following simply verify that the plugin # has a method for a given action. It therefore enables plugins to # implement actions which are not part of the API specification. # Unfortunately the API extension descriptor does not do a good job # of sanctioning which actions are available on a given resource. # TODO(salv-orlando): prevent plugins from implementing actions # which are not part of the Neutron API spec try: member_action_method = getattr(self.plugin, self.member_action) return member_action_method(neutron_context, self.item, request.context['request_data']) except AttributeError: LOG.error(_LE("Action %(action)s is not defined on resource " "%(resource)s"), {'action': self.member_action, 'resource': self.resource}) pecan.abort(404) neutron-8.4.0/neutron/pecan_wsgi/controllers/__init__.py0000664000567000056710000000154413044372760024660 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.pecan_wsgi.controllers import quota from neutron.pecan_wsgi.controllers import resource from neutron.pecan_wsgi.controllers import router CollectionsController = resource.CollectionsController QuotasController = quota.QuotasController RoutersController = router.RoutersController neutron-8.4.0/neutron/pecan_wsgi/controllers/quota.py0000664000567000056710000001335313044372760024253 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Taturiello Consulting, Meh. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_utils import importutils import pecan from pecan import request from pecan import response from neutron._i18n import _ from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.pecan_wsgi.controllers import utils from neutron.quota import resource_registry LOG = log.getLogger(__name__) RESOURCE_NAME = "quota" TENANT_ID_ATTR = {'tenant_id': {'allow_post': False, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attributes.TENANT_ID_MAX_LEN}, 'is_visible': True}} class QuotasController(utils.NeutronPecanController): def __init__(self): self._driver = importutils.import_class( cfg.CONF.QUOTAS.quota_driver ) super(QuotasController, self).__init__( "%ss" % RESOURCE_NAME, RESOURCE_NAME) def _check_admin(self, context, reason=_("Only admin can view or configure quota")): if not context.is_admin: raise n_exc.AdminRequired(reason=reason) @utils.expose() def _lookup(self, tenant_id, *remainder): return QuotaController(self._driver, tenant_id), remainder @utils.expose(generic=True) def index(self): neutron_context = request.context.get('neutron_context') # FIXME(salv-orlando): There shouldn't be any need to to this eplicit # check. However some behaviours from the "old" extension have # been temporarily carried over here self._check_admin(neutron_context) # TODO(salv-orlando): proper plurals management return {self.collection: self._driver.get_all_quotas( neutron_context, resource_registry.get_all_resources())} @utils.when(index, method='POST') @utils.when(index, method='PUT') @utils.when(index, method='DELETE') def not_supported(self): pecan.abort(405) class QuotaController(utils.NeutronPecanController): def __init__(self, _driver, tenant_id): self._driver = _driver self._tenant_id = tenant_id super(QuotaController, self).__init__( "%ss" % RESOURCE_NAME, RESOURCE_NAME) # Ensure limits for all registered resources are returned attr_dict = attributes.RESOURCE_ATTRIBUTE_MAP[self.collection] for quota_resource in resource_registry.get_all_resources().keys(): attr_dict[quota_resource] = { 'allow_post': False, 'allow_put': True, 'convert_to': attributes.convert_to_int, 'validate': { 'type:range': [-1, constants.DB_INTEGER_MAX_VALUE]}, 'is_visible': True} # The quota resource must always declare a tenant_id attribute, # otherwise the attribute will be stripped off when generating the # response attr_dict.update(TENANT_ID_ATTR) @utils.expose(generic=True) def index(self): return get_tenant_quotas(self._tenant_id, self._driver) @utils.when(index, method='PUT') def put(self, *args, **kwargs): neutron_context = request.context.get('neutron_context') # For put requests there's always going to be a single element quota_data = request.context['resources'][0] for key, value in quota_data.items(): self._driver.update_quota_limit( neutron_context, self._tenant_id, key, value) return get_tenant_quotas(self._tenant_id, self._driver) @utils.when(index, method='DELETE') def delete(self): neutron_context = request.context.get('neutron_context') self._driver.delete_tenant_quota(neutron_context, self._tenant_id) response.status = 204 @utils.when(index, method='POST') def not_supported(self): pecan.abort(405) def get_tenant_quotas(tenant_id, driver=None): if not driver: driver = importutils.import_class(cfg.CONF.QUOTAS.quota_driver) neutron_context = request.context.get('neutron_context') if tenant_id == 'tenant': # NOTE(salv-orlando): Read the following before the code in order # to avoid puking. # There is a weird undocumented behaviour of the Neutron quota API # as 'tenant' is used as an API action to return the identifier # of the tenant in the request context. This is used exclusively # for interaction with python-neutronclient and is a possibly # unnecessary 'whoami' API endpoint. Pending resolution of this # API issue, this controller will just treat the magic string # 'tenant' (and only that string) and return the response expected # by python-neutronclient return {'tenant': {'tenant_id': neutron_context.tenant_id}} tenant_quotas = driver.get_tenant_quotas( neutron_context, resource_registry.get_all_resources(), tenant_id) tenant_quotas['tenant_id'] = tenant_id return {RESOURCE_NAME: tenant_quotas} neutron-8.4.0/neutron/pecan_wsgi/controllers/utils.py0000664000567000056710000000775513044372760024273 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Taturiello Consulting, Meh. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from pecan import request from neutron.api.v2 import attributes as api_attributes from neutron import manager # Utility functions for Pecan controllers. def expose(*args, **kwargs): """Helper function so we don't have to specify json for everything.""" kwargs.setdefault('content_type', 'application/json') kwargs.setdefault('template', 'json') return pecan.expose(*args, **kwargs) def when(index, *args, **kwargs): """Helper function so we don't have to specify json for everything.""" kwargs.setdefault('content_type', 'application/json') kwargs.setdefault('template', 'json') return index.when(*args, **kwargs) class NeutronPecanController(object): def __init__(self, collection, resource): # Ensure dashes are always replaced with underscores self.collection = collection and collection.replace('-', '_') self.resource = resource and resource.replace('-', '_') self._resource_info = api_attributes.get_collection_info(collection) self._plugin = None @property def plugin(self): if not self._plugin: self._plugin = manager.NeutronManager.get_plugin_for_resource( self.resource) return self._plugin class ShimRequest(object): def __init__(self, context): self.context = context class ShimItemController(NeutronPecanController): def __init__(self, collection, resource, item, controller): super(ShimItemController, self).__init__(collection, resource) self.item = item self.controller_delete = getattr(controller, 'delete', None) @expose(generic=True) def index(self): pecan.abort(405) @when(index, method='DELETE') def delete(self): if not self.controller_delete: pecan.abort(405) shim_request = ShimRequest(request.context['neutron_context']) uri_identifiers = request.context['uri_identifiers'] return self.controller_delete(shim_request, self.item, **uri_identifiers) class ShimCollectionsController(NeutronPecanController): def __init__(self, collection, resource, controller): super(ShimCollectionsController, self).__init__(collection, resource) self.controller = controller self.controller_index = getattr(controller, 'index', None) self.controller_create = getattr(controller, 'create', None) @expose(generic=True) def index(self): if not self.controller_index: pecan.abort(405) shim_request = ShimRequest(request.context['neutron_context']) uri_identifiers = request.context['uri_identifiers'] return self.controller_index(shim_request, **uri_identifiers) @when(index, method='POST') def create(self): if not self.controller_create: pecan.abort(405) shim_request = ShimRequest(request.context['neutron_context']) uri_identifiers = request.context['uri_identifiers'] return self.controller_create(shim_request, request.json, **uri_identifiers) @expose() def _lookup(self, item, *remainder): request.context['resource'] = self.resource request.context['resource_id'] = item return ShimItemController(self.collection, self.resource, item, self.controller), remainder neutron-8.4.0/neutron/pecan_wsgi/controllers/root.py0000664000567000056710000001042313044372760024100 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # Copyright (c) 2015 Rackspace, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log import pecan from pecan import request from neutron._i18n import _LW from neutron.api.views import versions as versions_view from neutron import manager from neutron.pecan_wsgi.controllers import extensions as ext_ctrl from neutron.pecan_wsgi.controllers import utils LOG = log.getLogger(__name__) _VERSION_INFO = {} def _load_version_info(version_info): assert version_info['id'] not in _VERSION_INFO _VERSION_INFO[version_info['id']] = version_info def _get_version_info(): return _VERSION_INFO.values() class RootController(object): @utils.expose(generic=True) def index(self): # NOTE(kevinbenton): The pecan framework does not handle # any requests to the root because they are intercepted # by the 'version' returning wrapper. pass @utils.when(index, method='GET') @utils.when(index, method='HEAD') @utils.when(index, method='POST') @utils.when(index, method='PATCH') @utils.when(index, method='PUT') @utils.when(index, method='DELETE') def not_supported(self): pecan.abort(405) class V2Controller(object): # Same data structure as neutron.api.versions.Versions for API backward # compatibility version_info = { 'id': 'v2.0', 'status': 'CURRENT' } _load_version_info(version_info) extensions = ext_ctrl.ExtensionsController() @utils.expose(generic=True) def index(self): builder = versions_view.get_view_builder(pecan.request) return dict(version=builder.build(self.version_info)) @utils.when(index, method='HEAD') @utils.when(index, method='POST') @utils.when(index, method='PATCH') @utils.when(index, method='PUT') @utils.when(index, method='DELETE') def not_supported(self): pecan.abort(405) @utils.expose() def _lookup(self, collection, *remainder): # if collection exists in the extension to service plugins map then # we are assuming that collection is the service plugin and # needs to be remapped. # Example: https://neutron.endpoint/v2.0/lbaas/loadbalancers if (remainder and manager.NeutronManager.get_service_plugin_by_path_prefix( collection)): collection = remainder[0] remainder = remainder[1:] controller = manager.NeutronManager.get_controller_for_resource( collection) if not controller: LOG.warning(_LW("No controller found for: %s - returning response " "code 404"), collection) pecan.abort(404) # Store resource and collection names in pecan request context so that # hooks can leverage them if necessary. The following code uses # attributes from the controller instance to ensure names have been # properly sanitized (eg: replacing dashes with underscores) request.context['resource'] = controller.resource request.context['collection'] = controller.collection # NOTE(blogan): initialize a dict to store the ids of the items walked # in the path for example: /networks/1234 would cause uri_identifiers # to contain: {'network_id': '1234'} # This is for backwards compatibility with legacy extensions that # defined their own controllers and expected kwargs to be passed in # with the uri_identifiers request.context['uri_identifiers'] = {} return controller, remainder # This controller cannot be specified directly as a member of RootController # as its path is not a valid python identifier pecan.route(RootController, 'v2.0', V2Controller()) neutron-8.4.0/neutron/pecan_wsgi/controllers/extensions.py0000664000567000056710000000413713044372760025321 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from neutron.api import extensions from neutron.pecan_wsgi.controllers import utils class ExtensionsController(object): @utils.expose() def _lookup(self, alias, *remainder): return ExtensionController(alias), remainder @utils.expose(generic=True) def index(self): ext_mgr = extensions.PluginAwareExtensionManager.get_instance() exts = [extensions.ExtensionController._translate(ext) for ext in ext_mgr.extensions.values()] return {'extensions': exts} @utils.when(index, method='POST') @utils.when(index, method='PUT') @utils.when(index, method='DELETE') @utils.when(index, method='HEAD') @utils.when(index, method='PATCH') def not_supported(self): pecan.abort(405) class ExtensionController(object): def __init__(self, alias): self.alias = alias @utils.expose(generic=True) def index(self): ext_mgr = extensions.PluginAwareExtensionManager.get_instance() ext = ext_mgr.extensions.get(self.alias, None) if not ext: pecan.abort( 404, detail=_("Extension with alias %s " "does not exist") % self.alias) return {'extension': extensions.ExtensionController._translate(ext)} @utils.when(index, method='POST') @utils.when(index, method='PUT') @utils.when(index, method='DELETE') @utils.when(index, method='HEAD') @utils.when(index, method='PATCH') def not_supported(self): pecan.abort(405) neutron-8.4.0/neutron/pecan_wsgi/controllers/resource.py0000664000567000056710000001177113044372760024753 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import pecan from pecan import request from neutron.api import api_common from neutron.i18n import _LW from neutron import manager from neutron.pecan_wsgi.controllers import utils LOG = logging.getLogger(__name__) class ItemController(utils.NeutronPecanController): def __init__(self, resource, item): super(ItemController, self).__init__(None, resource) self.item = item @utils.expose(generic=True) def index(self, *args, **kwargs): return self.get() def get(self, *args, **kwargs): getter = getattr(self.plugin, 'get_%s' % self.resource) neutron_context = request.context['neutron_context'] return {self.resource: getter(neutron_context, self.item)} @utils.when(index, method='HEAD') @utils.when(index, method='POST') @utils.when(index, method='PATCH') def not_supported(self): pecan.abort(405) @utils.when(index, method='PUT') def put(self, *args, **kwargs): neutron_context = request.context['neutron_context'] resources = request.context['resources'] # TODO(kevinbenton): bulk? updater = getattr(self.plugin, 'update_%s' % self.resource) # Bulk update is not supported, 'resources' always contains a single # elemenet data = {self.resource: resources[0]} return {self.resource: updater(neutron_context, self.item, data)} @utils.when(index, method='DELETE') def delete(self): # TODO(kevinbenton): setting code could be in a decorator pecan.response.status = 204 neutron_context = request.context['neutron_context'] deleter = getattr(self.plugin, 'delete_%s' % self.resource) return deleter(neutron_context, self.item) @utils.expose() def _lookup(self, collection, *remainder): request.context['collection'] = collection controller = manager.NeutronManager.get_controller_for_resource( collection) if not controller: LOG.warning(_LW("No controller found for: %s - returning response " "code 404"), collection) pecan.abort(404) return controller, remainder class CollectionsController(utils.NeutronPecanController): item_controller_class = ItemController @utils.expose() def _lookup(self, item, *remainder): # Store resource identifier in request context request.context['resource_id'] = item uri_identifier = '%s_id' % self.resource request.context['uri_identifiers'][uri_identifier] = item return self.item_controller_class(self.resource, item), remainder @utils.expose(generic=True) def index(self, *args, **kwargs): return self.get(*args, **kwargs) def get(self, *args, **kwargs): # list request # TODO(kevinbenton): use user-provided fields in call to plugin # after making sure policy enforced fields remain kwargs.pop('fields', None) _listify = lambda x: x if isinstance(x, list) else [x] filters = api_common.get_filters_from_dict( {k: _listify(v) for k, v in kwargs.items()}, self._resource_info, skips=['fields', 'sort_key', 'sort_dir', 'limit', 'marker', 'page_reverse']) lister = getattr(self.plugin, 'get_%s' % self.collection) neutron_context = request.context['neutron_context'] return {self.collection: lister(neutron_context, filters=filters)} @utils.when(index, method='HEAD') @utils.when(index, method='PATCH') @utils.when(index, method='PUT') @utils.when(index, method='DELETE') def not_supported(self): pecan.abort(405) @utils.when(index, method='POST') def post(self, *args, **kwargs): # TODO(kevinbenton): emulated bulk! resources = request.context['resources'] pecan.response.status = 201 return self.create(resources) def create(self, resources): if len(resources) > 1: # Bulk! method = 'create_%s_bulk' % self.resource key = self.collection data = {key: [{self.resource: res} for res in resources]} else: method = 'create_%s' % self.resource key = self.resource data = {key: resources[0]} creator = getattr(self.plugin, method) neutron_context = request.context['neutron_context'] return {key: creator(neutron_context, data)} neutron-8.4.0/neutron/pecan_wsgi/constants.py0000664000567000056710000000132313044372736022565 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ACTION_MAP = {'POST': 'create', 'PUT': 'update', 'GET': 'get', 'DELETE': 'delete'} neutron-8.4.0/neutron/pecan_wsgi/hooks/0000775000567000056710000000000013044373210021307 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/pecan_wsgi/hooks/notifier.py0000664000567000056710000001531613044372760023517 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from pecan import hooks from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.common import constants from neutron.common import rpc as n_rpc from neutron import manager from neutron.pecan_wsgi import constants as pecan_constants LOG = log.getLogger(__name__) class NotifierHook(hooks.PecanHook): priority = 135 @property def _notifier(self): if not hasattr(self, '_notifier_inst'): self._notifier_inst = n_rpc.get_notifier('network') return self._notifier_inst def _nova_notify(self, action, resource, *args): action_resource = '%s_%s' % (action, resource) if not hasattr(self, '_nova_notifier'): # this is scoped to avoid a dependency on nova client when nova # notifications aren't enabled from neutron.notifiers import nova self._nova_notifier = nova.Notifier() self._nova_notifier.send_network_change(action_resource, *args) def _notify_dhcp_agent(self, context, resource_name, action, resources): plugin = manager.NeutronManager.get_plugin_for_resource(resource_name) notifier_method = '%s.%s.end' % (resource_name, action) # use plugin's dhcp notifier, if this is already instantiated agent_notifiers = getattr(plugin, 'agent_notifiers', {}) dhcp_agent_notifier = ( agent_notifiers.get(constants.AGENT_TYPE_DHCP) or dhcp_rpc_agent_api.DhcpAgentNotifyAPI() ) # The DHCP Agent does not accept bulk notifications for resource in resources: item = {resource_name: resource} LOG.debug("Sending DHCP agent notification for: %s", item) dhcp_agent_notifier.notify(context, item, notifier_method) def before(self, state): if state.request.method not in ('POST', 'PUT', 'DELETE'): return resource = state.request.context.get('resource') if not resource: return action = pecan_constants.ACTION_MAP.get(state.request.method) event = '%s.%s.start' % (resource, action) if action in ('create', 'update'): # notifier just gets plain old body without any treatment other # than the population of the object ID being operated on payload = state.request.json.copy() if action == 'update': payload['id'] = state.request.context.get('resource_id') elif action == 'delete': resource_id = state.request.context.get('resource_id') payload = {resource + '_id': resource_id} self._notifier.info(state.request.context.get('neutron_context'), event, payload) def after(self, state): # if the after hook is executed the request completed successfully and # therefore notifications must be sent resource_name = state.request.context.get('resource') collection_name = state.request.context.get('collection') neutron_context = state.request.context.get('neutron_context') if not resource_name: LOG.debug("Skipping NotifierHook processing as there was no " "resource associated with the request") return action = pecan_constants.ACTION_MAP.get(state.request.method) if not action or action == 'get': LOG.debug("No notification will be sent for action: %s", action) return if action == 'delete': # The object has been deleted, so we must notify the agent with the # data of the original object data = {collection_name: state.request.context.get('original_resources', [])} else: try: data = jsonutils.loads(state.response.body) except ValueError: if not state.response.body: data = {} resources = [] if data: if resource_name in data: resources = [data[resource_name]] elif collection_name in data: # This was a bulk request resources = data[collection_name] # Send a notification only if a resource can be identified in the # response. This means that for operations such as add_router_interface # no notification will be sent if cfg.CONF.dhcp_agent_notification and data: self._notify_dhcp_agent( neutron_context, resource_name, action, resources) if cfg.CONF.notify_nova_on_port_data_changes: orig = {} if action == 'update': orig = state.request.context.get('original_resources')[0] elif action == 'delete': # NOTE(kevinbenton): the nova notifier is a bit strange because # it expects the original to be in the last argument on a # delete rather than in the 'original_obj' position resources = ( state.request.context.get('original_resources') or []) for resource in resources: self._nova_notify(action, resource_name, orig, {resource_name: resource}) event = '%s.%s.end' % (resource_name, action) if action == 'delete': if state.response.status_int > 300: # don't notify when unsuccessful # NOTE(kevinbenton): we may want to be more strict with the # response codes return resource_id = state.request.context.get('resource_id') payload = {resource_name + '_id': resource_id} elif action in ('create', 'update'): if not resources: # create/update did not complete so no notification return if len(resources) > 1: payload = {collection_name: resources} else: payload = {resource_name: resources[0]} else: return self._notifier.info(neutron_context, event, payload) neutron-8.4.0/neutron/pecan_wsgi/hooks/__init__.py0000664000567000056710000000252513044372760023435 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.pecan_wsgi.hooks import body_validation from neutron.pecan_wsgi.hooks import context from neutron.pecan_wsgi.hooks import notifier from neutron.pecan_wsgi.hooks import ownership_validation from neutron.pecan_wsgi.hooks import policy_enforcement from neutron.pecan_wsgi.hooks import quota_enforcement from neutron.pecan_wsgi.hooks import translation ExceptionTranslationHook = translation.ExceptionTranslationHook ContextHook = context.ContextHook BodyValidationHook = body_validation.BodyValidationHook OwnershipValidationHook = ownership_validation.OwnershipValidationHook PolicyHook = policy_enforcement.PolicyHook QuotaEnforcementHook = quota_enforcement.QuotaEnforcementHook NotifierHook = notifier.NotifierHook neutron-8.4.0/neutron/pecan_wsgi/hooks/ownership_validation.py0000664000567000056710000000412213044372760026121 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pecan import hooks import webob from neutron._i18n import _ from neutron import manager class OwnershipValidationHook(hooks.PecanHook): priority = 125 def before(self, state): if state.request.method != 'POST': return for item in state.request.context.get('resources', []): self._validate_network_tenant_ownership(state, item) def _validate_network_tenant_ownership(self, state, resource_item): # TODO(salvatore-orlando): consider whether this check can be folded # in the policy engine neutron_context = state.request.context.get('neutron_context') resource = state.request.context.get('resource') if (neutron_context.is_admin or neutron_context.is_advsvc or resource not in ('port', 'subnet')): return plugin = manager.NeutronManager.get_plugin() network = plugin.get_network(neutron_context, resource_item['network_id']) # do not perform the check on shared networks if network.get('shared'): return network_owner = network['tenant_id'] if network_owner != resource_item['tenant_id']: msg = _("Tenant %(tenant_id)s not allowed to " "create %(resource)s on this network") raise webob.exc.HTTPForbidden(msg % { "tenant_id": resource_item['tenant_id'], "resource": resource, }) neutron-8.4.0/neutron/pecan_wsgi/hooks/quota_enforcement.py0000664000567000056710000000555413044372760025421 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from oslo_log import log as logging from pecan import hooks from neutron.common import exceptions from neutron import manager from neutron import quota from neutron.quota import resource_registry LOG = logging.getLogger(__name__) class QuotaEnforcementHook(hooks.PecanHook): priority = 130 def before(self, state): resource = state.request.context.get('resource') items = state.request.context.get('resources') if state.request.method != 'POST' or not resource or not items: return plugin = manager.NeutronManager.get_plugin_for_resource(resource) # Store requested resource amounts grouping them by tenant deltas = collections.Counter(map(lambda x: x['tenant_id'], items)) # Perform quota enforcement reservations = [] neutron_context = state.request.context.get('neutron_context') for (tenant_id, delta) in deltas.items(): try: reservation = quota.QUOTAS.make_reservation( neutron_context, tenant_id, {resource: delta}, plugin) LOG.debug("Made reservation on behalf of %(tenant_id)s " "for: %(delta)s", {'tenant_id': tenant_id, 'delta': {resource: delta}}) reservations.append(reservation) except exceptions.QuotaResourceUnknown as e: # Quotas cannot be enforced on this resource LOG.debug(e) # Save the reservations in the request context so that they can be # retrieved in the 'after' hook state.request.context['reservations'] = reservations def after(self, state): # Commit reservation(s) reservations = state.request.context.get('reservations') if not reservations: return neutron_context = state.request.context.get('neutron_context') with neutron_context.session.begin(): # Commit the reservation(s) for reservation in reservations: quota.QUOTAS.commit_reservation( neutron_context, reservation.reservation_id) resource_registry.set_resources_dirty(neutron_context) neutron-8.4.0/neutron/pecan_wsgi/hooks/context.py0000664000567000056710000000460613044372760023364 0ustar jenkinsjenkins00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_middleware import request_id from pecan import hooks from neutron import context class ContextHook(hooks.PecanHook): """Configures a request context and attaches it to the request. The following HTTP request headers are used: X-User-Id or X-User: Used for context.user_id. X-Project-Id: Used for context.tenant_id. X-Project-Name: Used for context.tenant_name. X-Auth-Token: Used for context.auth_token. X-Roles: Used for setting context.is_admin flag to either True or False. The flag is set to True, if X-Roles contains either an administrator or admin substring. Otherwise it is set to False. """ priority = 95 def before(self, state): user_id = state.request.headers.get('X-User-Id') user_id = state.request.headers.get('X-User', user_id) user_name = state.request.headers.get('X-User-Name', '') tenant_id = state.request.headers.get('X-Project-Id') tenant_name = state.request.headers.get('X-Project-Name') auth_token = state.request.headers.get('X-Auth-Token') roles = state.request.headers.get('X-Roles', '').split(',') roles = [r.strip() for r in roles] creds = {'roles': roles} req_id = state.request.headers.get(request_id.ENV_REQUEST_ID) # TODO(kevinbenton): is_admin logic # Create a context with the authentication data ctx = context.Context(user_id, tenant_id=tenant_id, roles=creds['roles'], user_name=user_name, tenant_name=tenant_name, request_id=req_id, auth_token=auth_token) # Inject the context... state.request.context['neutron_context'] = ctx neutron-8.4.0/neutron/pecan_wsgi/hooks/body_validation.py0000664000567000056710000000477413044372760025055 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_serialization import jsonutils from pecan import hooks from neutron.api.v2 import attributes as v2_attributes from neutron.api.v2 import base as v2_base LOG = log.getLogger(__name__) class BodyValidationHook(hooks.PecanHook): priority = 120 def before(self, state): if state.request.method not in ('POST', 'PUT'): return resource = state.request.context.get('resource') collection = state.request.context.get('collection') neutron_context = state.request.context['neutron_context'] is_create = state.request.method == 'POST' if not resource: return try: json_data = jsonutils.loads(state.request.body) except ValueError: LOG.debug("No JSON Data in %(method)s request for %(collection)s", {'method': state.request.method, 'collections': collection}) return # Raw data are consumed by member actions such as add_router_interface state.request.context['request_data'] = json_data if not (resource in json_data or collection in json_data): # there is no resource in the request. This can happen when a # member action is being processed or on agent scheduler operations return # Prepare data to be passed to the plugin from request body data = v2_base.Controller.prepare_request_body( neutron_context, json_data, is_create, resource, v2_attributes.get_collection_info(collection), allow_bulk=is_create) if collection in data: state.request.context['resources'] = [item[resource] for item in data[collection]] else: state.request.context['resources'] = [data[resource]] neutron-8.4.0/neutron/pecan_wsgi/hooks/translation.py0000664000567000056710000000275613044372760024242 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from pecan import hooks import webob.exc from neutron._i18n import _, _LE from neutron.api.v2 import base as v2base LOG = logging.getLogger(__name__) class ExceptionTranslationHook(hooks.PecanHook): def on_error(self, state, e): # if it's already an http error, just return to let it go through if isinstance(e, webob.exc.WSGIHTTPException): return for exc_class, to_class in v2base.FAULT_MAP.items(): if isinstance(e, exc_class): raise to_class(getattr(e, 'msg', e.message)) # leaked unexpected exception, convert to boring old 500 error and # hide message from user in case it contained sensitive details LOG.exception(_LE("An unexpected exception was caught: %s"), e) return webob.exc.HTTPInternalServerError( _("An unexpected internal error occurred.")) neutron-8.4.0/neutron/pecan_wsgi/hooks/policy_enforcement.py0000664000567000056710000002305013044372760025556 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_policy import policy as oslo_policy from oslo_utils import excutils from pecan import hooks import webob from neutron._i18n import _ from neutron.api.v2 import attributes as v2_attributes from neutron.common import constants as const from neutron.extensions import quotasv2 from neutron import manager from neutron.pecan_wsgi import constants as pecan_constants from neutron.pecan_wsgi.controllers import quota from neutron import policy def _custom_getter(resource, resource_id): """Helper function to retrieve resources not served by any plugin.""" if resource == quotasv2.RESOURCE_NAME: return quota.get_tenant_quotas(resource_id)[quotasv2.RESOURCE_NAME] def fetch_resource(neutron_context, resource, resource_id): attrs = v2_attributes.get_resource_info(resource) if not attrs: # this isn't a request for a normal resource. it could be # an action like removing a network from a dhcp agent. # return None and assume the custom controller for this will # handle the necessary logic. return field_list = [name for (name, value) in attrs.items() if (value.get('required_by_policy') or value.get('primary_key') or 'default' not in value)] plugin = manager.NeutronManager.get_plugin_for_resource(resource) if plugin: getter = getattr(plugin, 'get_%s' % resource) # TODO(kevinbenton): the parent_id logic currently in base.py return getter(neutron_context, resource_id, fields=field_list) else: # Some legit resources, like quota, do not have a plugin yet. # Retrieving the original object is nevertheless important # for policy checks. return _custom_getter(resource, resource_id) class PolicyHook(hooks.PecanHook): priority = 140 def before(self, state): # This hook should be run only for PUT,POST and DELETE methods and for # requests targeting a neutron resource resources = state.request.context.get('resources', []) if state.request.method not in ('POST', 'PUT', 'DELETE'): return # As this routine will likely alter the resources, do a shallow copy resources_copy = resources[:] neutron_context = state.request.context.get('neutron_context') resource = state.request.context.get('resource') # If there is no resource for this request, don't bother running authZ # policies if not resource: return collection = state.request.context.get('collection') needs_prefetch = (state.request.method == 'PUT' or state.request.method == 'DELETE') policy.init() action = '%s_%s' % (pecan_constants.ACTION_MAP[state.request.method], resource) # NOTE(salv-orlando): As bulk updates are not supported, in case of PUT # requests there will be only a single item to process, and its # identifier would have been already retrieved by the lookup process; # in the case of DELETE requests there won't be any item to process in # the request body original_resources = [] if needs_prefetch: try: item = resources_copy.pop() except IndexError: # Ops... this was a delete after all! item = {} resource_id = state.request.context.get('resource_id') resource_obj = fetch_resource(neutron_context, resource, resource_id) if resource_obj: original_resources.append(resource_obj) obj = copy.copy(resource_obj) obj.update(item) obj[const.ATTRIBUTES_TO_UPDATE] = item.keys() # Put back the item in the list so that policies could be # enforced resources_copy.append(obj) # TODO(salv-orlando): as other hooks might need to prefetch resources, # store them in the request context. However, this should be done in a # separate hook which is conventietly called before all other hooks state.request.context['original_resources'] = original_resources for item in resources_copy: try: policy.enforce( neutron_context, action, item, pluralized=collection) except oslo_policy.PolicyNotAuthorized: with excutils.save_and_reraise_exception() as ctxt: # If a tenant is modifying it's own object, it's safe to # return a 403. Otherwise, pretend that it doesn't exist # to avoid giving away information. if (needs_prefetch and neutron_context.tenant_id != item['tenant_id']): ctxt.reraise = False msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) def after(self, state): neutron_context = state.request.context.get('neutron_context') resource = state.request.context.get('resource') collection = state.request.context.get('collection') if not resource: # can't filter a resource we don't recognize return # NOTE(kevinbenton): extension listing isn't controlled by policy if resource == 'extension': return try: data = state.response.json except ValueError: return action = '%s_%s' % (pecan_constants.ACTION_MAP[state.request.method], resource) if not data or (resource not in data and collection not in data): return is_single = resource in data key = resource if is_single else collection to_process = [data[resource]] if is_single else data[collection] # in the single case, we enforce which raises on violation # in the plural case, we just check so violating items are hidden policy_method = policy.enforce if is_single else policy.check plugin = manager.NeutronManager.get_plugin_for_resource(resource) try: resp = [self._get_filtered_item(state.request, resource, collection, item) for item in to_process if (state.request.method != 'GET' or policy_method(neutron_context, action, item, plugin=plugin, pluralized=collection))] except oslo_policy.PolicyNotAuthorized as e: # This exception must be explicitly caught as the exception # translation hook won't be called if an error occurs in the # 'after' handler. raise webob.exc.HTTPForbidden(e.message) if is_single: resp = resp[0] state.response.json = {key: resp} def _get_filtered_item(self, request, resource, collection, data): neutron_context = request.context.get('neutron_context') to_exclude = self._exclude_attributes_by_policy( neutron_context, resource, collection, data) return self._filter_attributes(request, data, to_exclude) def _filter_attributes(self, request, data, fields_to_strip): # TODO(kevinbenton): this works but we didn't allow the plugin to # only fetch the fields we are interested in. consider moving this # to the call user_fields = request.params.getall('fields') return dict(item for item in data.items() if (item[0] not in fields_to_strip and (not user_fields or item[0] in user_fields))) def _exclude_attributes_by_policy(self, context, resource, collection, data): """Identifies attributes to exclude according to authZ policies. Return a list of attribute names which should be stripped from the response returned to the user because the user is not authorized to see them. """ attributes_to_exclude = [] for attr_name in data.keys(): attr_data = v2_attributes.get_resource_info( resource).get(attr_name) if attr_data and attr_data['is_visible']: if policy.check( context, # NOTE(kevinbenton): this used to reference a # _plugin_handlers dict, why? 'get_%s:%s' % (resource, attr_name), data, might_not_exist=True, pluralized=collection): # this attribute is visible, check next one continue # if the code reaches this point then either the policy check # failed or the attribute was not visible in the first place attributes_to_exclude.append(attr_name) return attributes_to_exclude neutron-8.4.0/neutron/context.py0000664000567000056710000001014413044372760020114 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Context: context for security/db session.""" import copy import datetime from oslo_context import context as oslo_context from neutron.db import api as db_api from neutron import policy class ContextBase(oslo_context.RequestContext): """Security context and request information. Represents the user taking a given action within the system. """ def __init__(self, user_id, tenant_id, is_admin=None, roles=None, timestamp=None, request_id=None, tenant_name=None, user_name=None, overwrite=True, auth_token=None, is_advsvc=None, **kwargs): """Object initialization. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. :param kwargs: Extra arguments that might be present, but we ignore because they possibly came in from older rpc messages. """ super(ContextBase, self).__init__(auth_token=auth_token, user=user_id, tenant=tenant_id, is_admin=is_admin, request_id=request_id, overwrite=overwrite) self.user_name = user_name self.tenant_name = tenant_name if not timestamp: timestamp = datetime.datetime.utcnow() self.timestamp = timestamp self.roles = roles or [] self.is_advsvc = is_advsvc if self.is_advsvc is None: self.is_advsvc = self.is_admin or policy.check_is_advsvc(self) if self.is_admin is None: self.is_admin = policy.check_is_admin(self) @property def project_id(self): return self.tenant @property def tenant_id(self): return self.tenant @tenant_id.setter def tenant_id(self, tenant_id): self.tenant = tenant_id @property def user_id(self): return self.user @user_id.setter def user_id(self, user_id): self.user = user_id def to_dict(self): context = super(ContextBase, self).to_dict() context.update({ 'user_id': self.user_id, 'tenant_id': self.tenant_id, 'project_id': self.project_id, 'roles': self.roles, 'timestamp': str(self.timestamp), 'tenant_name': self.tenant_name, 'project_name': self.tenant_name, 'user_name': self.user_name, }) return context @classmethod def from_dict(cls, values): return cls(**values) def elevated(self): """Return a version of this context with admin flag set.""" context = copy.copy(self) context.is_admin = True if 'admin' not in [x.lower() for x in context.roles]: context.roles = context.roles + ["admin"] return context class Context(ContextBase): def __init__(self, *args, **kwargs): super(Context, self).__init__(*args, **kwargs) self._session = None @property def session(self): if self._session is None: self._session = db_api.get_session() return self._session def get_admin_context(): return Context(user_id=None, tenant_id=None, is_admin=True, overwrite=False) def get_admin_context_without_session(): return ContextBase(user_id=None, tenant_id=None, is_admin=True) neutron-8.4.0/neutron/opts.py0000664000567000056710000002257413044372760017427 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import itertools import operator from keystoneauth1 import loading as ks_loading from oslo_config import cfg import neutron.agent.common.config import neutron.agent.common.ovs_lib import neutron.agent.dhcp.config import neutron.agent.l2.extensions.manager import neutron.agent.l3.config import neutron.agent.l3.ha import neutron.agent.linux.interface import neutron.agent.linux.pd import neutron.agent.linux.ra import neutron.agent.metadata.config import neutron.agent.ovsdb.api import neutron.agent.securitygroups_rpc import neutron.db.agents_db import neutron.db.agentschedulers_db import neutron.db.dvr_mac_db import neutron.db.extraroute_db import neutron.db.l3_agentschedulers_db import neutron.db.l3_dvr_db import neutron.db.l3_gwmode_db import neutron.db.l3_hamode_db import neutron.db.migration.cli import neutron.extensions.allowedaddresspairs import neutron.extensions.l3 import neutron.extensions.securitygroup import neutron.openstack.common.cache.cache import neutron.plugins.ml2.config import neutron.plugins.ml2.drivers.agent.config import neutron.plugins.ml2.drivers.linuxbridge.agent.common.config import neutron.plugins.ml2.drivers.macvtap.agent.config import neutron.plugins.ml2.drivers.mech_sriov.agent.common.config import neutron.plugins.ml2.drivers.mech_sriov.mech_driver.mech_driver import neutron.plugins.ml2.drivers.openvswitch.agent.common.config import neutron.plugins.ml2.drivers.type_flat import neutron.plugins.ml2.drivers.type_geneve import neutron.plugins.ml2.drivers.type_gre import neutron.plugins.ml2.drivers.type_vlan import neutron.plugins.ml2.drivers.type_vxlan import neutron.quota import neutron.service import neutron.services.metering.agents.metering_agent import neutron.services.qos.notification_drivers.manager import neutron.wsgi NOVA_GROUP = 'nova' CONF = cfg.CONF deprecations = {'nova.cafile': [cfg.DeprecatedOpt('ca_certificates_file', group=NOVA_GROUP)], 'nova.insecure': [cfg.DeprecatedOpt('api_insecure', group=NOVA_GROUP)], 'nova.timeout': [cfg.DeprecatedOpt('url_timeout', group=NOVA_GROUP)]} _nova_options = ks_loading.register_session_conf_options( CONF, NOVA_GROUP, deprecated_opts=deprecations) def list_agent_opts(): return [ ('agent', itertools.chain( neutron.agent.common.config.ROOT_HELPER_OPTS, neutron.agent.common.config.AGENT_STATE_OPTS, neutron.agent.common.config.IPTABLES_OPTS, neutron.agent.common.config.PROCESS_MONITOR_OPTS, neutron.agent.common.config.AVAILABILITY_ZONE_OPTS) ), ('DEFAULT', itertools.chain( neutron.agent.common.config.INTERFACE_DRIVER_OPTS, neutron.agent.metadata.config.SHARED_OPTS, neutron.agent.metadata.config.DRIVER_OPTS) ) ] def list_extension_opts(): return [ ('DEFAULT', neutron.extensions.allowedaddresspairs.allowed_address_pair_opts), ('quotas', itertools.chain( neutron.extensions.l3.l3_quota_opts, neutron.extensions.securitygroup.security_group_quota_opts) ) ] def list_db_opts(): return [ ('DEFAULT', itertools.chain( neutron.db.agents_db.AGENT_OPTS, neutron.db.extraroute_db.extra_route_opts, neutron.db.l3_gwmode_db.OPTS, neutron.db.agentschedulers_db.AGENTS_SCHEDULER_OPTS, neutron.db.dvr_mac_db.dvr_mac_address_opts, neutron.db.l3_dvr_db.router_distributed_opts, neutron.db.l3_agentschedulers_db.L3_AGENTS_SCHEDULER_OPTS, neutron.db.l3_hamode_db.L3_HA_OPTS) ), ('database', neutron.db.migration.cli.get_engine_config()) ] def list_opts(): return [ ('DEFAULT', itertools.chain( neutron.common.config.core_cli_opts, neutron.common.config.core_opts, neutron.wsgi.socket_opts, neutron.service.service_opts) ), (neutron.common.config.NOVA_CONF_SECTION, itertools.chain( neutron.common.config.nova_opts) ), ('quotas', neutron.quota.quota_opts) ] def list_qos_opts(): return [ ('qos', neutron.services.qos.notification_drivers.manager.QOS_PLUGIN_OPTS) ] def list_base_agent_opts(): return [ ('DEFAULT', itertools.chain( neutron.agent.linux.interface.OPTS, neutron.agent.common.config.INTERFACE_DRIVER_OPTS, neutron.agent.common.ovs_lib.OPTS) ), ('AGENT', neutron.agent.common.config.AGENT_STATE_OPTS) ] def list_dhcp_agent_opts(): return [ ('DEFAULT', itertools.chain( neutron.agent.dhcp.config.DHCP_AGENT_OPTS, neutron.agent.dhcp.config.DHCP_OPTS, neutron.agent.dhcp.config.DNSMASQ_OPTS) ) ] def list_linux_bridge_opts(): return [ ('linux_bridge', neutron.plugins.ml2.drivers.linuxbridge.agent.common.config. bridge_opts), ('vxlan', neutron.plugins.ml2.drivers.linuxbridge.agent.common.config. vxlan_opts), ('agent', neutron.plugins.ml2.drivers.agent.config.agent_opts), ('securitygroup', neutron.agent.securitygroups_rpc.security_group_opts) ] def list_l3_agent_opts(): return [ ('DEFAULT', itertools.chain( neutron.agent.l3.config.OPTS, neutron.service.service_opts, neutron.agent.l3.ha.OPTS, neutron.agent.linux.pd.OPTS, neutron.agent.linux.ra.OPTS) ) ] def list_macvtap_opts(): return [ ('macvtap', neutron.plugins.ml2.drivers.macvtap.agent.config.macvtap_opts), ('agent', neutron.plugins.ml2.drivers.agent.config.agent_opts), ('securitygroup', neutron.agent.securitygroups_rpc.security_group_opts) ] def list_metadata_agent_opts(): return [ ('DEFAULT', itertools.chain( neutron.agent.metadata.config.SHARED_OPTS, neutron.agent.metadata.config.METADATA_PROXY_HANDLER_OPTS, neutron.agent.metadata.config.UNIX_DOMAIN_METADATA_PROXY_OPTS, neutron.openstack.common.cache.cache._get_oslo_configs()) ), ('AGENT', neutron.agent.common.config.AGENT_STATE_OPTS) ] def list_metering_agent_opts(): return [ ('DEFAULT', itertools.chain( neutron.services.metering.agents.metering_agent.MeteringAgent. Opts, neutron.agent.common.config.INTERFACE_DRIVER_OPTS) ) ] def list_ml2_conf_opts(): return [ ('ml2', neutron.plugins.ml2.config.ml2_opts), ('ml2_type_flat', neutron.plugins.ml2.drivers.type_flat.flat_opts), ('ml2_type_vlan', neutron.plugins.ml2.drivers.type_vlan.vlan_opts), ('ml2_type_gre', neutron.plugins.ml2.drivers.type_gre.gre_opts), ('ml2_type_vxlan', neutron.plugins.ml2.drivers.type_vxlan.vxlan_opts), ('ml2_type_geneve', neutron.plugins.ml2.drivers.type_geneve.geneve_opts), ('securitygroup', neutron.agent.securitygroups_rpc.security_group_opts) ] def list_ml2_conf_sriov_opts(): return [ ('ml2_sriov', neutron.plugins.ml2.drivers.mech_sriov.mech_driver.mech_driver. sriov_opts) ] def list_ovs_opts(): return [ ('ovs', itertools.chain( neutron.plugins.ml2.drivers.openvswitch.agent.common.config. ovs_opts, neutron.agent.ovsdb.api.OPTS) ), ('agent', neutron.plugins.ml2.drivers.openvswitch.agent.common.config. agent_opts), ('securitygroup', neutron.agent.securitygroups_rpc.security_group_opts) ] def list_sriov_agent_opts(): return [ ('ml2_sriov', neutron.plugins.ml2.drivers.mech_sriov.agent.common.config. sriov_nic_opts), ('agent', neutron.agent.l2.extensions.manager.L2_AGENT_EXT_MANAGER_OPTS) ] def list_auth_opts(): opt_list = copy.deepcopy(_nova_options) opt_list.insert(0, ks_loading.get_auth_common_conf_options()[0]) # NOTE(mhickey): There are a lot of auth plugins, we just generate # the config options for a few common ones plugins = ['password', 'v2password', 'v3password'] for name in plugins: for plugin_option in ks_loading.get_auth_plugin_conf_options(name): if all(option.name != plugin_option.name for option in opt_list): opt_list.append(plugin_option) opt_list.sort(key=operator.attrgetter('name')) return [(NOVA_GROUP, opt_list)] neutron-8.4.0/neutron/auth.py0000664000567000056710000000505513044372736017401 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_middleware import base from oslo_middleware import request_id import webob.dec import webob.exc from neutron import context LOG = logging.getLogger(__name__) class NeutronKeystoneContext(base.ConfigurableMiddleware): """Make a request context from keystone headers.""" @webob.dec.wsgify def __call__(self, req): # Determine the user ID user_id = req.headers.get('X_USER_ID') if not user_id: LOG.debug("X_USER_ID is not found in request") return webob.exc.HTTPUnauthorized() # Determine the tenant tenant_id = req.headers.get('X_PROJECT_ID') # Suck out the roles roles = [r.strip() for r in req.headers.get('X_ROLES', '').split(',')] # Human-friendly names tenant_name = req.headers.get('X_PROJECT_NAME') user_name = req.headers.get('X_USER_NAME') # Use request_id if already set req_id = req.environ.get(request_id.ENV_REQUEST_ID) # Get the auth token auth_token = req.headers.get('X_AUTH_TOKEN', req.headers.get('X_STORAGE_TOKEN')) # Create a context with the authentication data ctx = context.Context(user_id, tenant_id, roles=roles, user_name=user_name, tenant_name=tenant_name, request_id=req_id, auth_token=auth_token) # Inject the context... req.environ['neutron.context'] = ctx return self.application def pipeline_factory(loader, global_conf, **local_conf): """Create a paste pipeline based on the 'auth_strategy' config option.""" pipeline = local_conf[cfg.CONF.auth_strategy] pipeline = pipeline.split() filters = [loader.get_filter(n) for n in pipeline[:-1]] app = loader.get_app(pipeline[-1]) filters.reverse() for filter in filters: app = filter(app) return app neutron-8.4.0/neutron/locale/0000775000567000056710000000000013044373210017304 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/zh_TW/0000775000567000056710000000000013044373210020337 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/zh_TW/LC_MESSAGES/0000775000567000056710000000000013044373210022124 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/zh_TW/LC_MESSAGES/neutron.po0000664000567000056710000045263013044372760024201 0ustar jenkinsjenkins00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # Jennifer , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron 8.2.1.dev52\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-09-01 18:10+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-20 09:07+0000\n" "Last-Translator: Jennifer \n" "Language: zh-TW\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Chinese (Taiwan)\n" #, python-format msgid "" "\n" "Command: %(cmd)s\n" "Exit code: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" msgstr "" "\n" "指令:%(cmd)s\n" "結束碼:%(code)s\n" "標準輸入:%(stdin)s\n" "標準輸出:%(stdout)s\n" "標準錯誤:%(stderr)s" #, python-format msgid "" "%(branch)s HEAD file does not match migration timeline head, expected: " "%(head)s" msgstr "%(branch)s HEAD 檔與移轉時間表表頭不符,預期為:%(head)s" #, python-format msgid "%(driver)s: Internal driver error." msgstr "%(driver)s:內部驅動程式錯誤。" #, python-format msgid "%(id)s is not a valid %(type)s identifier" msgstr "%(id)s 不是有效的 %(type)s ID" #, python-format msgid "" "%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' " "and '%(desc)s'" msgstr "" "對於 sort_dir 來說,%(invalid_dirs)s 是無效值,有效值為 '%(asc)s' 及 " "'%(desc)s'" #, python-format msgid "%(key)s prohibited for %(tunnel)s provider network" msgstr "%(tunnel)s 提供者網路已禁止 %(key)s" #, python-format msgid "" "%(method)s called with network settings %(current)s (original settings " "%(original)s) and network segments %(segments)s" msgstr "" "已使用網路設定 %(current)s(原始設定 %(original)s)及網路區段 %(segments)s 呼" "叫了 %(method)s" #, python-format msgid "" "%(method)s called with port settings %(current)s (original settings " "%(original)s) host %(host)s (original host %(original_host)s) vif type " "%(vif_type)s (original vif type %(original_vif_type)s) vif details " "%(vif_details)s (original vif details %(original_vif_details)s) binding " "levels %(levels)s (original binding levels %(original_levels)s) on network " "%(network)s with segments to bind %(segments_to_bind)s" msgstr "" "在包含區段的網路 %(network)s 上,已使用埠設定 %(current)s(原始設定 " "%(original)s)主機 %(host)s(原始主機 %(original_host)s)VIF 類型 " "%(vif_type)s(原始 VIF 類型 %(original_vif_type)s)VIF 詳細資料 " "%(vif_details)s(原始 VIF 詳細資料 %(original_vif_details)s)連結層次 " "%(levels)s(原始連結層次 %(original_levels)s)呼叫了 %(method)s,以連結 " "%(segments_to_bind)s" #, python-format msgid "" "%(method)s called with subnet settings %(current)s (original settings " "%(original)s)" msgstr "已使用子網路設定 %(current)s(原始設定 %(original)s)呼叫了 %(method)s" #, python-format msgid "%(method)s failed." msgstr "%(method)s 失敗。" #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "%(name)s '%(addr)s' 與 ip_version '%(ip_version)s' 不符" #, python-format msgid "%(param)s must be in %(range)s range." msgstr "%(param)s 必須在 %(range)s 範圍內。" #, python-format msgid "%s cannot be called while in offline mode" msgstr "當 %s 處於離線模式時,無法對其進行呼叫" #, python-format msgid "%s is invalid attribute for sort_key" msgstr "對於 sort_key 來說,%s 是無效的屬性" #, python-format msgid "%s is invalid attribute for sort_keys" msgstr "對於 sort_key 來說,%s 是無效的屬性" #, python-format msgid "%s is not a valid VLAN tag" msgstr "%s 不是有效的 VLAN 標記" #, python-format msgid "%s must be specified" msgstr "必須指定 %s" #, python-format msgid "%s must implement get_port_from_device or get_ports_from_devices." msgstr "%s 必須實作 get_port_from_device 或 get_ports_from_devices。" #, python-format msgid "%s prohibited for VLAN provider network" msgstr "VLAN 提供者網路已禁止 %s" #, python-format msgid "%s prohibited for flat provider network" msgstr "平面提供者網路已禁止 %s" #, python-format msgid "%s prohibited for local provider network" msgstr "本端提供者網路已禁止 %s" #, python-format msgid "" "'%(data)s' contains '%(length)s' characters. Adding a domain name will cause " "it to exceed the maximum length of a FQDN of '%(max_len)s'" msgstr "" "'%(data)s' 包含 '%(length)s' 個字元。新增網域名稱將導致它超出 FQDN 長度上限 " "'%(max_len)s'" #, python-format msgid "" "'%(data)s' contains '%(length)s' characters. Adding a sub-domain will cause " "it to exceed the maximum length of a FQDN of '%(max_len)s'" msgstr "" "'%(data)s' 包含 '%(length)s' 個字元。新增子網域將導致它超出 FQDN 長度上限 " "'%(max_len)s'" #, python-format msgid "'%(data)s' exceeds maximum length of %(max_len)s" msgstr "'%(data)s' 超出長度上限 %(max_len)s" #, python-format msgid "'%(data)s' is not an accepted IP address, '%(ip)s' is recommended" msgstr "'%(data)s' 不是可接受的 IP 位址,建議使用 '%(ip)s'" #, python-format msgid "'%(data)s' is not in %(valid_values)s" msgstr "'%(data)s' 不在 %(valid_values)s 中" #, python-format msgid "'%(data)s' is too large - must be no larger than '%(limit)d'" msgstr "'%(data)s' 太大 - 不得大於 '%(limit)d'" #, python-format msgid "'%(data)s' is too small - must be at least '%(limit)d'" msgstr "'%(data)s' 太小 - 必須至少為 '%(limit)d'" #, python-format msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended" msgstr "'%(data)s' 不是可以辨識的 IP 子網路 CIDR,建議使用 '%(cidr)s'" #, python-format msgid "'%(data)s' not a valid PQDN or FQDN. Reason: %(reason)s" msgstr "'%(data)s' 不是有效的 PQDN 或 FQDN。原因:%(reason)s" #, python-format msgid "'%(host)s' is not a valid nameserver. %(msg)s" msgstr "'%(host)s' 不是有效的名稱伺服器。%(msg)s" #, python-format msgid "'%s' Blank strings are not permitted" msgstr "'%s',不允許空白字串" #, python-format msgid "'%s' cannot be converted to boolean" msgstr "無法將 '%s' 轉換為布林值" #, python-format msgid "'%s' cannot be converted to lowercase string" msgstr "無法將 '%s' 轉換為小寫字串" #, python-format msgid "'%s' contains whitespace" msgstr "'%s' 包含空格" #, python-format msgid "'%s' exceeds the 255 character FQDN limit" msgstr "'%s' 超過了 255 字元 FQDN 限制" #, python-format msgid "'%s' is a FQDN. It should be a relative domain name" msgstr "'%s' 是 FQDN。它應該是相對網域名稱" #, python-format msgid "'%s' is not a FQDN" msgstr "'%s' 不是 FQDN" #, python-format msgid "'%s' is not a dictionary" msgstr "'%s' 不是字典" #, python-format msgid "'%s' is not a list" msgstr "'%s' 不是清單" #, python-format msgid "'%s' is not a valid IP address" msgstr "'%s' 不是有效的 IP 位址" #, python-format msgid "'%s' is not a valid IP subnet" msgstr "'%s' 不是有效的 IP 子網路" #, python-format msgid "'%s' is not a valid MAC address" msgstr "'%s' 不是有效的 MAC 位址" #, python-format msgid "'%s' is not a valid RBAC object type" msgstr "'%s' 不是有效的 RBAC 物件類型" #, python-format msgid "'%s' is not a valid UUID" msgstr "'%s' 不是有效的 UUID" #, python-format msgid "'%s' is not a valid boolean value" msgstr "'%s' 不是有效的布林值" #, python-format msgid "'%s' is not a valid input" msgstr "'%s' 不是有效的輸入" #, python-format msgid "'%s' is not a valid string" msgstr "'%s' 不是有效字串" #, python-format msgid "'%s' is not an integer" msgstr "'%s' 不是整數" #, python-format msgid "'%s' is not an integer or uuid" msgstr "'%s' 不是整數或 UUID" #, python-format msgid "'%s' is not of the form =[value]" msgstr "'%s' 的格式不是 =[value]" #, python-format msgid "'%s' is not supported for filtering" msgstr "'%s' 不支援過濾" #, python-format msgid "'%s' must be a non negative decimal." msgstr "'%s' 必須是非負小數。" #, python-format msgid "'%s' should be non-negative" msgstr "'%s' 應該為非負數" msgid "'.' searches are not implemented" msgstr "未實作 '.' 搜尋" #, python-format msgid "'module' object has no attribute '%s'" msgstr "'module' 物件不含屬性 '%s'" msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' 小於 'port_min'" msgid "" "(Deprecated. Use '--subproject neutron-SERVICE' instead.) The advanced " "service to execute the command against." msgstr "" "(已遭到淘汰。請改用 '--subproject neutron-SERVICE')要對其執行指令的進階服" "務。" msgid "0 is not allowed as CIDR prefix length" msgstr "不接受 0 作為 CIDR 字首長度" msgid "" "32-bit BGP identifier, typically an IPv4 address owned by the system running " "the BGP DrAgent." msgstr "32 位元 BGP ID,通常是執行 BGP DrAgent 之系統所擁有的 IPv4 位址。" msgid "A QoS driver must be specified" msgstr "必須指定服務品質驅動程式" msgid "A cidr must be specified in the absence of a subnet pool" msgstr "如果未指定子網路儲存區,則必須指定 cidr" msgid "" "A decimal value as Vendor's Registered Private Enterprise Number as required " "by RFC3315 DUID-EN." msgstr "十進位值,依 RFC3315 DUID-EN 的需要,作為供應商的已登錄專用企業號碼。" #, python-format msgid "A default external network already exists: %(net_id)s." msgstr "預設外部網路已經存在:%(net_id)s。" msgid "" "A default subnetpool for this IP family has already been set. Only one " "default may exist per IP family" msgstr "已經設定了此 IP 系列的預設子網路儲存區。每個 IP 系列只能存在一個預設值" msgid "A metering driver must be specified" msgstr "必須指定計量驅動程式" msgid "A password must be supplied when using auth_type md5." msgstr "使用 auth_type MD5 時,必須提供密碼。" msgid "API for retrieving service providers for Neutron advanced services" msgstr "此 API 用於擷取 Neutron 進階服務的服務提供者" msgid "Aborting periodic_sync_routers_task due to an error." msgstr "由於發生錯誤,正在中斷 periodic_sync_routers_task。" msgid "Access to this resource was denied." msgstr "已拒絕存取此資源。" msgid "Action to be executed when a child process dies" msgstr "子程序當掉時要執行的動作" msgid "" "Add comments to iptables rules. Set to false to disallow the addition of " "comments to generated iptables rules that describe each rule's purpose. " "System must support the iptables comments module for addition of comments." msgstr "" "將註解新增至 iptables 規則。設為 false 可禁止向所產生用來說明每一個規則之目的" "的 iptables 規則新增註解。系統必須支援 iptables 註解模組才能新增註解。" msgid "Address not present on interface" msgstr "位址未呈現在介面上" #, python-format msgid "Address scope %(address_scope_id)s could not be found" msgstr "找不到位址範圍 %(address_scope_id)s" msgid "" "Address to listen on for OpenFlow connections. Used only for 'native' driver." msgstr "用於接聽 OpenFlow 連線的位址。僅用於 'native' 驅動程式。" msgid "Adds external network attribute to network resource." msgstr "將外部網路屬性新增至網路資源。" msgid "Adds test attributes to core resources." msgstr "將測試屬性新增至核心資源。" #, python-format msgid "Agent %(id)s could not be found" msgstr "找不到代理程式 %(id)s" #, python-format msgid "Agent %(id)s is not a L3 Agent or has been disabled" msgstr "代理程式 %(id)s 不是 L3 代理程式,或者已停用" #, python-format msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled" msgstr "代理程式 %(id)s 不是有效的 DHCP 代理程式,或者已停用" msgid "Agent has just been revived" msgstr "代理程式剛剛恢復" msgid "" "Agent starts with admin_state_up=False when enable_new_agents=False. In the " "case, user's resources will not be scheduled automatically to the agent " "until admin changes admin_state_up to True." msgstr "" "當 enable_new_agents=False 時,代理程式從 admin_state_up=False 開始。在這種情" "況下,不會將使用者的資源自動排程到代理程式,直到管理者將 admin_state_up 變更" "為 True 為止。" #, python-format msgid "Agent updated: %(payload)s" msgstr "已更新代理程式:%(payload)s" #, python-format msgid "" "Agent with agent_type=%(agent_type)s and host=%(host)s could not be found" msgstr "找不到 agent_type = %(agent_type)s 且主機 = %(host)s 的代理程式" msgid "Allow auto scheduling networks to DHCP agent." msgstr "容許自動將網路排程到 DHCP 代理程式。" msgid "Allow auto scheduling of routers to L3 agent." msgstr "容許自動將路由器排程到 L3 代理程式。" msgid "" "Allow overlapping IP support in Neutron. Attention: the following parameter " "MUST be set to False if Neutron is being used in conjunction with Nova " "security groups." msgstr "" "容許 Neutron 中的重疊 IP 支援。注意:如果將 Neutron 與 Nova 安全性群組一起使" "用,則必須將下列參數設為 False。" msgid "Allow running metadata proxy." msgstr "容許執行 meta 資料 Proxy。" msgid "Allow sending resource operation notification to DHCP agent" msgstr "容許將資源作業通知傳送給 DHCP 代理程式" msgid "Allow the creation of PTR records" msgstr "容許建立 PTR 記錄" msgid "Allow the usage of the bulk API" msgstr "容許使用主體 API" msgid "Allow the usage of the pagination" msgstr "容許使用分頁" msgid "Allow the usage of the sorting" msgstr "容許使用排序" msgid "Allow to perform insecure SSL (https) requests to nova metadata" msgstr "容許對 Nova meta 資料執行不安全的 SSL (HTTPS) 要求" msgid "Allowed address pairs must be a list." msgstr "容許使用的位址配對必須是清單。" msgid "AllowedAddressPair must contain ip_address" msgstr "AllowedAddressPair 必須包含 ip_address" msgid "" "Allows for serving metadata requests coming from a dedicated metadata access " "network whose CIDR is 169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send metadata:1 request. In " "this case DHCP Option 121 will not be injected in VMs, as they will be able " "to reach 169.254.169.254 through a router. This option requires " "enable_isolated_metadata = True." msgstr "" "容許負責處理來自專用 meta 資料存取網路的 meta 資料要求,該網路的 CIDR 是 " "169.254.169.254/16(或更大字首)並且已連接至 Neutron 路由器(VM 從此 Neutron " "路由器傳送 metadata:1 要求)。在這種情況下,DHCP 選項 121 將不注入 VM,因為它" "們能夠透過路由器呼叫到 169.254.169.254。這個選項需要 " "enable_isolated_metadata = True。" #, python-format msgid "" "Already hosting BGP Speaker for local_as=%(current_as)d with router_id=" "%(rtid)s." msgstr "" "針對包含 router_id=%(rtid)s 的 local_as=%(current_as)d,已經管理 BGP 喇叭。" #, python-format msgid "" "Already hosting maximum number of BGP Speakers. Allowed scheduled count=" "%(count)d" msgstr "所管理的 BGP 喇叭數目已經達到上限。所容許的已排程計數 = %(count)d" msgid "An RBAC policy already exists with those values." msgstr "包含那些值的 RBAC 原則已經存在。" msgid "An identifier must be specified when updating a subnet" msgstr "更新子網路時,必須提供 ID" msgid "An interface driver must be specified" msgstr "必須指定介面驅動程式" msgid "" "An ordered list of extension driver entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. For example: extension_drivers = " "port_security,qos" msgstr "" "要從 neutron.ml2.extension_drivers 名稱空間載入的延伸驅動程式進入點有序清單。" "例如:extension_drivers = port_security,qos" msgid "" "An ordered list of networking mechanism driver entrypoints to be loaded from " "the neutron.ml2.mechanism_drivers namespace." msgstr "" "要從 neutron.ml2.mechanism_drivers 名稱空間載入的網路機制驅動程式進入點有序清" "單。" msgid "An unexpected internal error occurred." msgstr "發生非預期的內部錯誤。" msgid "An unknown error has occurred. Please try your request again." msgstr "發生不明錯誤。請重試要求。" msgid "Async process didn't respawn" msgstr "同步程序未再次大量產生" #, python-format msgid "Attribute '%s' not allowed in POST" msgstr "POST 中不接受屬性 '%s'" #, python-format msgid "Authentication type not supported. Requested type=%(auth_type)s." msgstr "鑑別類型不受支援。要求的類型 = %(auth_type)s。" msgid "Authorization URL for connecting to designate in admin context" msgstr "用於連接以在管理環境定義中指定的授權 URL" msgid "Automatically remove networks from offline DHCP agents." msgstr "從離線 DHCP 代理程式中自動移除網路。" msgid "" "Automatically reschedule routers from offline L3 agents to online L3 agents." msgstr "自動將路由器從離線 L3 代理程式重新排程至線上 L3 代理程式。" msgid "Availability zone of this node" msgstr "此節點的可用性區域" #, python-format msgid "AvailabilityZone %(availability_zone)s could not be found." msgstr "找不到可用性區域 %(availability_zone)s。" msgid "Available commands" msgstr "可用的指令" #, python-format msgid "" "BGP Peer %(peer_ip)s for remote_as=%(remote_as)s, running for BGP Speaker " "%(speaker_as)d not added yet." msgstr "" "針對 remote_as=%(remote_as)s,尚未新增針對 BGP 喇叭 %(speaker_as)d 執行的 " "BGP 對等項 %(peer_ip)s。" #, python-format msgid "" "BGP Speaker %(bgp_speaker_id)s is already configured to peer with a BGP Peer " "at %(peer_ip)s, it cannot peer with BGP Peer %(bgp_peer_id)s." msgstr "" "BGP 喇叭 %(bgp_speaker_id)s 已經配置成與 %(peer_ip)s 處的 BGP 對等項對等,它" "無法與 BGP 對等項 %(bgp_peer_id)s 對等。" #, python-format msgid "" "BGP Speaker for local_as=%(local_as)s with router_id=%(rtid)s not added yet." msgstr "" "針對包含 router_id=%(rtid)s 的 local_as=%(local_as)s,尚未新增 BGP 喇叭。" #, python-format msgid "" "BGP peer %(bgp_peer_id)s is not associated with BGP speaker " "%(bgp_speaker_id)s." msgstr "BGP 對等項 %(bgp_peer_id)s 未與 BGP 喇叭 %(bgp_speaker_id)s 建立關聯。" #, python-format msgid "BGP peer %(bgp_peer_id)s not authenticated." msgstr "BGP 對等項 %(bgp_peer_id)s 沒有進行鑑別。" #, python-format msgid "BGP peer %(id)s could not be found." msgstr "找不到 BGP 對等項 %(id)s。" #, python-format msgid "" "BGP speaker %(bgp_speaker_id)s is not hosted by the BgpDrAgent %(agent_id)s." msgstr "BGP 喇叭 %(bgp_speaker_id)s 不由 BgpDrAgent %(agent_id)s 進行管理。" #, python-format msgid "BGP speaker %(id)s could not be found." msgstr "找不到 BGP 喇叭 %(id)s。" msgid "BGP speaker driver class to be instantiated." msgstr "要實例化的 BGP 喇叭驅動程式類別。" msgid "Backend does not support VLAN Transparency." msgstr "後端不支援 VLAN 透通性。" #, python-format msgid "" "Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, " "%(mac)s:" msgstr "" "依 EUI-64 產生 IPv6 位址時使用的字首或 MAC 格式錯誤:%(prefix)s 及 %(mac)s:" #, python-format msgid "Bad prefix type for generate IPv6 address by EUI-64: %s" msgstr "依 EUI-64 產生 IPv6 位址時使用的字首類型錯誤:%s" #, python-format msgid "Base MAC: %s" msgstr "基本 MAC:%s" msgid "" "Base log dir for dnsmasq logging. The log contains DHCP and DNS log " "information and is useful for debugging issues with either DHCP or DNS. If " "this section is null, disable dnsmasq log." msgstr "" "用於 dnsmasq 記載的基本日誌目錄。日誌包含 DHCP 及 DNS 日誌資訊,並且在對 " "DHCP 或 DNS 方面的問題進行除錯時很有用。如果此區段是空值,則將停用 dnsmasq 日" "誌。" #, python-format msgid "BgpDrAgent %(agent_id)s is already associated to a BGP speaker." msgstr "BgpDrAgent %(agent_id)s 已經與 BGP 喇叭建立關聯。" #, python-format msgid "BgpDrAgent %(id)s is invalid or has been disabled." msgstr "BgpDrAgent %(id)s 無效,或者已停用。" #, python-format msgid "BgpDrAgent updated: %s" msgstr "BgpDrAgent 已更新:%s" msgid "Body contains invalid data" msgstr "內文包含無效資料" msgid "Both network_id and router_id are None. One must be provided." msgstr "network_id 及 router_id 均為 None。必須提供其中一個。" #, python-format msgid "Bridge %(bridge)s does not exist." msgstr "橋接器 %(bridge)s 不存在。" #, python-format msgid "Bridge %s does not exist" msgstr "橋接器 %s 不存在" msgid "Bulk operation not supported" msgstr "不支援主體作業" msgid "CIDR to monitor" msgstr "要監視的 CIDR" #, python-format msgid "Callback for %(resource_type)s not found" msgstr "找不到 %(resource_type)s 的回呼" #, python-format msgid "Callback for %(resource_type)s returned wrong resource type" msgstr "%(resource_type)s 的回呼傳回了錯誤的資源類型" #, python-format msgid "Cannot add floating IP to port %s that has no fixed IPv4 addresses" msgstr "無法將浮動 IP 新增至沒有固定 IPv4 位址的埠 %s" #, python-format msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip" msgstr "無法將浮動 IP 新增至子網路 %s 上沒有 gateway_ip 的埠" #, python-format msgid "Cannot add multiple callbacks for %(resource_type)s" msgstr "無法新增 %(resource_type)s 的多重回呼" #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "無法從 IPv%(pool_ver)s 子網路儲存區配置 IPv%(req_ver)s 子網路" msgid "Cannot allocate requested subnet from the available set of prefixes" msgstr "無法從可用的字首集配置所要求的子網路" #, python-format msgid "" "Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with port " "%(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already has a " "floating IP on external network %(net_id)s." msgstr "" "無法使浮動 IP %(floating_ip_address)s (%(fip_id)s) 與使用固定 IP " "%(fixed_ip)s 的埠 %(port_id)s 建立關聯,因為該固定 IP 在外部網路 %(net_id)s " "上已經有浮動 IP。" msgid "" "Cannot change HA attribute of active routers. Please set router " "admin_state_up to False prior to upgrade." msgstr "" "無法變更作用中路由器的 HA 屬性。請先將路由器的 admin_state_up 設為 False,然" "後再升級。" #, python-format msgid "" "Cannot create floating IP and bind it to %s, since that is not an IPv4 " "address." msgstr "無法建立浮動 IP 並將其連結至 %s,因為這不是一個 IPv4 位址。" #, python-format msgid "" "Cannot create floating IP and bind it to Port %s, since that port is owned " "by a different tenant." msgstr "無法建立浮動 IP 並將其連結至埠 %s,因為該埠是由其他承租人擁有。" msgid "Cannot create resource for another tenant" msgstr "無法給另一個承租人建立資源" msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "在設定了 ipv6 屬性的情況下,無法停用 enable_dhcp" #, python-format msgid "Cannot find %(table)s with %(col)s=%(match)s" msgstr "找不到具有 %(col)s=%(match)s 的 %(table)s" #, python-format msgid "Cannot handle subnet of type %(subnet_type)s" msgstr "無法處理類型為 %(subnet_type)s 的子網路" msgid "Cannot have multiple IPv4 subnets on router port" msgstr "路由器埠上不能具有多個 IPv4 子網路" #, python-format msgid "" "Cannot have multiple router ports with the same network id if both contain " "IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s" msgstr "" "不能具有多個包含相同網路 ID 的路由器埠(如果兩者都包含 IPv6 子網路)。現有埠 " "%(p)s 具有 IPv6 子網路和網路 ID %(nid)s" #, python-format msgid "" "Cannot host distributed router %(router_id)s on legacy L3 agent %(agent_id)s." msgstr "無法在舊式 L3 代理程式 %(agent_id)s 上管理分散式路由器 %(router_id)s。" msgid "Cannot match priority on flow deletion or modification" msgstr "無法符合流程刪除作業或修改作業上的優先順序" msgid "Cannot mix IPv4 and IPv6 prefixes in a subnet pool." msgstr "不能在一個子網路儲存區中混合 IPv4 與 IPv6 字首。" msgid "Cannot specify both --service and --subproject." msgstr "無法同時指定 --service 和 --subproject。" msgid "Cannot specify both subnet-id and port-id" msgstr "無法同時指定 subnet-id 及 port-id" msgid "Cannot understand JSON" msgstr "無法理解 JSON" #, python-format msgid "Cannot update read-only attribute %s" msgstr "無法更新唯讀屬性 %s" msgid "" "Cannot upgrade active router to distributed. Please set router " "admin_state_up to False prior to upgrade." msgstr "" "無法將作用中路由器升級至分散式。請先將路由器的 admin_state_up 設定為 False," "然後再升級。" msgid "Certificate Authority public key (CA cert) file for ssl" msgstr "用於 SSL 的「憑證管理中心」公開金鑰(CA 憑證)檔案" #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s." msgstr "變更會使下列資源的用量小於 0:%(unders)s。" msgid "Check ebtables installation" msgstr "檢查 ebtables 安裝" msgid "Check for ARP header match support" msgstr "檢查 ARP 標頭符合支援" msgid "Check for ARP responder support" msgstr "檢查 ARP 回應者支援" msgid "Check for ICMPv6 header match support" msgstr "檢查 ICMPv6 標頭符合支援" msgid "Check for OVS Geneve support" msgstr "檢查 OVS Geneve 支援" msgid "Check for OVS vxlan support" msgstr "檢查 OVS vxlan 支援" msgid "Check for VF management support" msgstr "檢查 VF 管理支援" msgid "Check for iproute2 vxlan support" msgstr "檢查 iproute2 vxlan 支援" msgid "Check for nova notification support" msgstr "檢查 Nova 通知支援" msgid "Check for patch port support" msgstr "檢查修補程式埠支援" msgid "Check ip6tables installation" msgstr "檢查 ip6tables 安裝" msgid "Check ipset installation" msgstr "檢查 ipset 安裝" msgid "Check keepalived IPv6 support" msgstr "檢查 keepalived IPv6 支援" msgid "Check minimal dibbler version" msgstr "檢查 dibbler 版本下限" msgid "Check minimal dnsmasq version" msgstr "檢查 dnsmasq 版本下限" msgid "Check netns permission settings" msgstr "檢查 netns 許可權設定" msgid "Check ovs conntrack support" msgstr "檢查 ovs conntrack 支援" msgid "Check ovsdb native interface support" msgstr "檢查 OVSDB 原生介面支援" #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of " "subnet %(sub_id)s" msgstr "" "子網路 %(subnet_id)s 的 CIDR %(subnet_cidr)s 與子網路 %(sub_id)s 的 CIDR " "%(cidr)s 重疊" msgid "Class not found." msgstr "找不到類別。" msgid "Cleanup resources of a specific agent type only." msgstr "只清除特定代理程式類型的資源。" msgid "Client certificate for nova metadata api server." msgstr "Nova meta 資料 API 伺服器的用戶端憑證。" msgid "" "Comma-separated list of : tuples, mapping " "network_device to the agent's node-specific list of virtual functions that " "should not be used for virtual networking. vfs_to_exclude is a semicolon-" "separated list of virtual functions to exclude from network_device. The " "network_device in the mapping should appear in the physical_device_mappings " "list." msgstr "" "network_device 與不應用於虛擬網路之虛擬函數的代理程式節點專用清單的 " ": 值組對映清單(使用逗點區隔)。" "vfs_to_exclude 是要從 network_device 中排除之虛擬函數的分號區隔清單。對映中" "的 network_device 應該出現在 physical_device_mappings 清單中。" msgid "" "Comma-separated list of : tuples mapping physical " "network names to the agent's node-specific Open vSwitch bridge names to be " "used for flat and VLAN networks. The length of bridge names should be no " "more than 11. Each bridge must exist, and should have a physical network " "interface configured as a port. All physical networks configured on the " "server should have mappings to appropriate bridges on each agent. Note: If " "you remove a bridge from this mapping, make sure to disconnect it from the " "integration bridge as it won't be managed by the agent anymore. Deprecated " "for ofagent." msgstr "" "實體網路名稱與要用於平面網路及 VLAN 網路之代理程式節點專用 Open vSwitch 橋接" "器名稱的 : 值組對映清單(使用逗點區隔)。橋接器名稱" "的長度不應超過 11。每一個橋接器都必須存在,並且應該已配置一個實體網路介面作為" "埠。伺服器上配置的所有實體網路都應該具有與每一個代理程式上適當橋接器的對映。" "附註:如果從此對映中移除橋接器,請確保斷開該橋接器與整合橋接器的連線,因為它" "將不再由代理程式進行管理。已針對 OF 代理程式淘汰。" msgid "" "Comma-separated list of : tuples mapping " "physical network names to the agent's node-specific physical network device " "interfaces of SR-IOV physical function to be used for VLAN networks. All " "physical networks listed in network_vlan_ranges on the server should have " "mappings to appropriate interfaces on each agent." msgstr "" "實體網路名稱與要用於 VLAN 網路之 SR-IOV 實體函數的代理程式節點專用實體網路裝" "置介面的 : 值組對映清單(使用逗點區隔)。列" "在伺服器上 network_vlan_ranges 中的所有實體網路都應該具有與每個代理程式上適當" "介面的對映。" msgid "" "Comma-separated list of : tuples " "mapping physical network names to the agent's node-specific physical network " "interfaces to be used for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should have mappings to " "appropriate interfaces on each agent." msgstr "" "實體網路名稱與要用於平面網路及 VLAN 網路之代理程式節點專用實體網路介面的 " ": 值組對映清單(使用逗點區隔)。列在伺" "服器上 network_vlan_ranges 中的所有實體網路都應該具有與每個代理程式上適當介面" "的對映。" msgid "" "Comma-separated list of : tuples enumerating ranges of GRE " "tunnel IDs that are available for tenant network allocation" msgstr "" ": 值組的逗點區隔清單,用於列舉可用於承租人網路配置的 GRE 通" "道 ID 範圍" msgid "" "Comma-separated list of : tuples enumerating ranges of " "Geneve VNI IDs that are available for tenant network allocation" msgstr "" ": 值組的逗點區隔清單,用於列舉可用於承租人網路配置的 " "Geneve VNI ID 範圍" msgid "" "Comma-separated list of : tuples enumerating ranges of " "VXLAN VNI IDs that are available for tenant network allocation" msgstr "" ": 值組的逗點區隔清單,用於列舉可用於承租人網路配置的 VXLAN " "VNI ID 範圍" msgid "" "Comma-separated list of supported PCI vendor devices, as defined by " "vendor_id:product_id according to the PCI ID Repository. Default enables " "support for Intel and Mellanox SR-IOV capable NICs." msgstr "" "受支援之 PCI 供應商裝置的逗點區隔清單,根據 PCI ID 儲存庫由 vendor_id:" "product_id 定義。預設會啟用對 Intel 及支援 Mellanox SR-IOV 之 NIC 的支援。" msgid "" "Comma-separated list of the DNS servers which will be used as forwarders." msgstr "將用來作為轉遞程式的 DNS 伺服器逗點區隔清單。" msgid "Command to execute" msgstr "要執行的指令" msgid "Config file for interface driver (You may also use l3_agent.ini)" msgstr "介面驅動程式的配置檔(您也可使用 l3_agent.ini)" #, python-format msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" msgstr "CIDR %(cidr)s 的乙太網路類型 %(ethertype)s 值有衝突" msgid "" "Controls whether the neutron security group API is enabled in the server. It " "should be false when using no security groups or using the nova security " "group API." msgstr "" "控制是否在伺服器中啟用 Neutron 安全群組 API。當不使用安全群組時或者使用 Nova " "安全群組 API 時,它應該是 false。" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "嘗試 %(time)d 秒後仍無法連結至 %(host)s:%(port)s" #, python-format msgid "Could not connect to %s" msgstr "無法連接至 %s" msgid "Could not deserialize data" msgstr "無法解除序列化資料" #, python-format msgid "Could not retrieve schema from %(conn)s: %(err)s" msgstr "無法擷取 %(conn)s 中的綱目:%(err)s" #, python-format msgid "" "Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable " "to update." msgstr "埠 %(port_id)s 已在使用現行閘道 IP %(ip_address)s。無法更新。" msgid "Currently update of HA mode for a DVR/HA router is not supported." msgstr "目前不支援更新 DVR/HA 路由器的 HA 模式。" msgid "Currently update of HA mode for a distributed router is not supported." msgstr "目前不支援更新分散式路由器的 HA 模式。" msgid "" "Currently update of distributed mode for a DVR/HA router is not supported" msgstr "目前不支援更新 DVR/HA 路由器的分散式模式。" msgid "Currently update of distributed mode for an HA router is not supported." msgstr "目前不支援更新 HA 路由器的分散式模式。" msgid "" "Currently updating a router from DVR/HA to non-DVR non-HA is not supported." msgstr "目前不支援將路由器從 DVR/HA 更新為非 DVR 非 HA。" msgid "Currently updating a router to DVR/HA is not supported." msgstr "目前不支援將路由器更新為 DVR/HA。" msgid "" "DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " "lease times." msgstr "" "DHCP 租賃期限(以秒為單位)。使用 -1 可告知 dnsmasq 使用無限的租賃時間。" msgid "" "DVR deployments for VXLAN/GRE/Geneve underlays require L2-pop to be enabled, " "in both the Agent and Server side." msgstr "" "VXLAN/GRE/Geneve 基礎的 DVR 部署需要同時在代理程式端及伺服器端啟用 L2-pop。" msgid "" "Database engine for which script will be generated when using offline " "migration." msgstr "使用離線移轉時,將對其產生 Script 的資料庫引擎。" msgid "" "Default IPv4 subnet pool to be used for automatic subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where creation of a subnet is " "being called without a subnet pool ID. If not set then no pool will be used " "unless passed explicitly to the subnet create. If no pool is used, then a " "CIDR must be passed to create a subnet and that subnet will not be allocated " "from any pool; it will be considered part of the tenant's private address " "space. This option is deprecated for removal in the N release." msgstr "" "要用於自動子網路 CIDR 配置的預設 IPv4 子網路儲存區。依 UUID 指定要使用的儲存" "區,以便於在沒有子網路儲存區 ID 情況下呼叫建立子網路。如果未設定,則將不使用" "儲存區,除非明確地將儲存區傳遞至子網路建立作業。如果不使用儲存區,則必須傳遞 " "CIDR 以建立子網路,並且將不從任何儲存區中配置該子網路;會將其視為承租人專用位" "址空間的一部分。這個選項已遭到淘汰,以在 N 版本中予以移除。" msgid "" "Default IPv6 subnet pool to be used for automatic subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where creation of a subnet is " "being called without a subnet pool ID. See the description for " "default_ipv4_subnet_pool for more information. This option is deprecated for " "removal in the N release." msgstr "" "要用於自動子網路 CIDR 配置的預設 IPv6 子網路儲存區。依 UUID 指定要使用的儲存" "區,以便於在沒有子網路儲存區 ID 的情況下呼叫建立子網路。如需相關資訊,請參閱 " "default_ipv4_subnet_pool 的說明。這個選項已遭到淘汰,以在 N 版本中予以移除。" msgid "Default driver to use for quota checks" msgstr "要用於配額檢查的預設驅動程式" msgid "Default external networks must be shared to everyone." msgstr "必須將預設外部網路與所有使用者共用。" msgid "" "Default network type for external networks when no provider attributes are " "specified. By default it is None, which means that if provider attributes " "are not specified while creating external networks then they will have the " "same type as tenant networks. Allowed values for external_network_type " "config option depend on the network type values configured in type_drivers " "config option." msgstr "" "在未指定提供者屬性時,外部網路的預設網路類型。依預設,它是「無」,這表示如果" "在建立外部網路時未指定提供者屬性,則它們將具有與承租人網路相同的類型。" "external_network_type 配置選項所接受的值視 type_drivers 配置選項中配置的網路" "類型值而定。" msgid "" "Default number of RBAC entries allowed per tenant. A negative value means " "unlimited." msgstr "每個承租人所容許的預設 RBAC 項目數目。負數值表示無限制。" msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "每個承租人所容許的預設資源數目。負數值表示無限制。" msgid "Default security group" msgstr "預設安全群組" msgid "Default security group already exists." msgstr "預設安全群組已存在。" msgid "" "Default value of availability zone hints. The availability zone aware " "schedulers use this when the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a comma separated string. " "This value can be empty. In this case, even if availability_zone_hints for a " "resource is empty, availability zone is considered for high availability " "while scheduling the resource." msgstr "" "可用性區域提示的預設值。當資源 availability_zone_hints 為空時,可用性區域知道" "排程器使用此項。可以透過逗點區隔的字串來指定多個可用性區域。此值可以是空的。" "在這種情況下,即使資源的 availability_zone_hints 為空,也會在排程資源時將可用" "性區域視為高可用性。" msgid "" "Define the default value of enable_snat if not provided in " "external_gateway_info." msgstr "" "定義 enable_snat 的預設值(如果未在 external_gateway_info 中提供的話)。" msgid "" "Defines providers for advanced services using the format: :" ":[:default]" msgstr "" "使用下列格式,為進階服務定義提供者:::[:default]" msgid "" "Delay within which agent is expected to update existing ports whent it " "restarts" msgstr "延遲時間,代理程式在重新啟動時,應該在此時間內更新現有埠" msgid "Delete the namespace by removing all devices." msgstr "透過移除所有裝置來刪除名稱空間。" #, python-format msgid "Deleting port %s" msgstr "正在刪除埠 %s" #, python-format msgid "Deployment error: %(reason)s." msgstr "部署錯誤:%(reason)s。" msgid "Destroy IPsets even if there is an iptables reference." msgstr "即使有 iptables 參照,也毀損 IPset。" msgid "Destroy all IPsets." msgstr "毀損所有 IPset。" #, python-format msgid "Device %(dev_name)s in mapping: %(mapping)s not unique" msgstr "對映 %(mapping)s 中的裝置 %(dev_name)s 不是唯一的" #, python-format msgid "Device '%(device_name)s' does not exist." msgstr "裝置 '%(device_name)s' 不存在。" msgid "Device has no virtual functions" msgstr "裝置沒有虛擬函數" #, python-format msgid "Device name %(dev_name)s is missing from physical_device_mappings" msgstr "physical_device_mappings 中遺漏了裝置名稱 %(dev_name)s" msgid "Device not found" msgstr "找不到裝置" #, python-format msgid "" "Distributed Virtual Router Mac Address for host %(host)s does not exist." msgstr "主機 %(host)s 的分散式虛擬路由器 MAC 位址不存在。" #, python-format msgid "Domain %(dns_domain)s not found in the external DNS service" msgstr "在外部 DNS 服務中,找不到網域 %(dns_domain)s" msgid "Domain to use for building the hostnames" msgstr "用於建置主機名稱的網域" msgid "" "Domain to use for building the hostnames. This option is deprecated. It has " "been moved to neutron.conf as dns_domain. It will be removed in a future " "release." msgstr "" "用於建置主機名稱的網域。這個選項已遭到淘汰。已將其作為 dns_domain 移至 " "neutron.conf。在未來的版本中,會將其移除。" msgid "Downgrade no longer supported" msgstr "不再支援降級" #, python-format msgid "Driver %s is not unique across providers" msgstr "驅動程式 %s 在提供者之間不是唯一的" msgid "Driver for external DNS integration." msgstr "用於外部 DNS 整合的驅動程式。" msgid "Driver for security groups firewall in the L2 agent" msgstr "L2 代理程式中安全群組防火牆的驅動程式" msgid "Driver to use for scheduling network to DHCP agent" msgstr "用於將網路排程到 DHCP 代理程式的驅動程式" msgid "Driver to use for scheduling router to a default L3 agent" msgstr "用於將路由器排程到預設 L3 代理程式的驅動程式" msgid "" "Driver used for ipv6 prefix delegation. This needs to be an entry point " "defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for " "entry points included with the neutron source." msgstr "" "用於 IPv6 字首委派的驅動程式。這需要是 neutron.agent.linux.pd_drivers 名稱空" "間中定義的一個進入點。如需 Neutron 來源隨附的進入點,請參閱 setup.cfg。" msgid "Driver used for scheduling BGP speakers to BGP DrAgent" msgstr "用於將 BGP 喇叭排程至 BGP DrAgent 的驅動程式" msgid "Drivers list to use to send the update notification" msgstr "用於傳送更新通知的驅動程式清單" #, python-format msgid "Duplicate IP address '%s'" msgstr "重複的 IP 位址 '%s'" #, python-format msgid "" "Duplicate L3HARouterAgentPortBinding is created for router(s) %(router)s. " "Database cannot be upgraded. Please, remove all duplicates before upgrading " "the database." msgstr "" "為路由器 %(router)s 建立了重複的 L3HARouterAgentPortBinding。無法升級資料庫。" "請先移除所有重複項目,然後再升級資料庫。" msgid "Duplicate Metering Rule in POST." msgstr "POST 中的計量規則重複。" msgid "Duplicate Security Group Rule in POST." msgstr "POST 中的安全群組規則重複。" msgid "Duplicate address detected" msgstr "偵測到重複位址" #, python-format msgid "Duplicate hostroute '%s'" msgstr "重複的主機路徑 '%s'" #, python-format msgid "Duplicate items in the list: '%s'" msgstr "清單中的重複項目:'%s'" #, python-format msgid "Duplicate nameserver '%s'" msgstr "重複的名稱伺服器 '%s'" msgid "Duplicate segment entry in request." msgstr "要求中的區段項目重複。" #, python-format msgid "ERROR: %s" msgstr "錯誤:%s" msgid "" "ERROR: Unable to find configuration file via the default search paths (~/." "neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" msgstr "" "錯誤:透過預設搜尋路徑(~/.neutron/、~/、/etc/neutron/ 及 /etc/)與 '--" "config-file' 選項找不到配置檔!" msgid "" "Either one of parameter network_id or router_id must be passed to _get_ports " "method." msgstr "必須將 network_id 或 router_id 中的一個參數傳遞至 _get_ports 方法。" msgid "Either subnet_id or port_id must be specified" msgstr "必須指定 subnet_id 或 port_id" msgid "Empty physical network name." msgstr "空的實體網路名稱。" msgid "Empty subnet pool prefix list." msgstr "空的子網路儲存區字首清單。" msgid "Enable FWaaS" msgstr "啟用 FWaaS" msgid "Enable HA mode for virtual routers." msgstr "啟用虛擬路由器的 HA 模式。" msgid "Enable SSL on the API server" msgstr "在 API 伺服器上啟用 SSL" msgid "" "Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " "plugin using linuxbridge mechanism driver" msgstr "" "在代理程式上啟用 VXLAN。當代理程式是由 ML2 外掛程式(使用 LinuxBridge 機制驅" "動程式)管理時,可以啟用 VXLAN" msgid "" "Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 " "l2population driver. Allows the switch (when supporting an overlay) to " "respond to an ARP request locally without performing a costly ARP broadcast " "into the overlay." msgstr "" "如果支援本端 ARP 回應者,請將其啟用。需要 OVS 2.1 及 ML2 l2population 驅動程" "式。容許交換器(當支援套版時)在本端對 ARP 要求做出回應,但不執行指向套版的高" "成本 ARP 廣播。" msgid "" "Enable local ARP responder which provides local responses instead of " "performing ARP broadcast into the overlay. Enabling local ARP responder is " "not fullycompatible with the allowed-address-pairs extension." msgstr "" "啟用本端 ARP 回應程序,該回應程序提供本端回應,而不執行指向套版的 ARP 廣播。" "啟用本端 ARP 回應程序不與 allowed-address-pairs 延伸完全相容。" msgid "" "Enable services on an agent with admin_state_up False. If this option is " "False, when admin_state_up of an agent is turned False, services on it will " "be disabled. Agents with admin_state_up False are not selected for automatic " "scheduling regardless of this option. But manual scheduling to such agents " "is available if this option is True." msgstr "" "對 admin_state_up 為 False 的代理程式啟用服務。如果此選項為 False,則當代理程" "式的 admin_state_up 變為 False 時,將停用其上的服務。無論此選項為何,都不會選" "取 admin_state_up 為 False 的代理程式以進行自動排程。但如果此選項為 True,則" "可以使用此類代理程式的手動排程。" msgid "" "Enable suppression of ARP responses that don't match an IP address that " "belongs to the port from which they originate. Note: This prevents the VMs " "attached to this agent from spoofing, it doesn't protect them from other " "devices which have the capability to spoof (e.g. bare metal or VMs attached " "to agents without this flag set to True). Spoofing rules will not be added " "to any ports that have port security disabled. For LinuxBridge, this " "requires ebtables. For OVS, it requires a version that supports matching ARP " "headers. This option will be removed in Newton so the only way to disable " "protection will be via the port security extension." msgstr "" "容許抑制與 IP 位址不符的 ARP 回應,該 IP 位址屬於這些回應所源自的埠。附註:這" "會防止盜用已連接至此代理程式的 VM,它不會保護這些代理程式被具備功能的其他裝置" "盜用(例如:裸機或已連接至代理程式且未將此旗標設定為 True 的 VM)。盜用規則將" "不被新增至已停用埠安全的任何埠。針對 LinuxBridge,這需要 ebtables。針對 OVS," "它需要支援相符 ARP 標頭的版本。這個選項將在 Newton 中予以移除,因此透過埠安全" "延伸將是停用保護的唯一方式。" msgid "" "Enable/Disable log watch by metadata proxy. It should be disabled when " "metadata_proxy_user/group is not allowed to read/write its log file and " "copytruncate logrotate option must be used if logrotate is enabled on " "metadata proxy log files. Option default value is deduced from " "metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent " "effective user id/name." msgstr "" "按 meta 資料 Proxy 啟用/停用日誌監看。當不容許 metadata_proxy_user/group 讀" "取/寫入其日誌檔時,應該停用日誌監看,且如果已對 meta 資料 Proxy 日誌檔啟用 " "logrotate,則必須使用 copytruncate logrotate 選項。選項預設值是從 " "metadata_proxy_user 推斷得出的:如果 metadata_proxy_user 為代理程式有效使用" "者 ID/名稱,則已啟用日誌監看。" msgid "" "Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to " "True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable " "environment. Users making subnet creation requests for IPv6 subnets without " "providing a CIDR or subnetpool ID will be given a CIDR via the Prefix " "Delegation mechanism. Note that enabling PD will override the behavior of " "the default IPv6 subnetpool." msgstr "" "針對自動子網路 CIDR 配置啟用 IPv6 字首委派。設為 True 可針對支援 PD 之環境中" "的子網路配置,啟用 IPv6 字首委派。將透過字首委派機制向針對 IPv6 子網路發出子" "網路建立要求但卻未提供 CIDR 或子網路儲存區 ID 的使用者,提供 CIDR。請注意,啟" "用 PD 將置換預設 IPv6 子網路儲存區的行為。" msgid "" "Enables the dnsmasq service to provide name resolution for instances via DNS " "resolvers on the host running the DHCP agent. Effectively removes the '--no-" "resolv' option from the dnsmasq process arguments. Adding custom DNS " "resolvers to the 'dnsmasq_dns_servers' option disables this feature." msgstr "" "容許 dnsmasq 服務透過執行 DHCP 代理程式之主機上的 DNS 解析器,為實例提供名稱" "解析。從 dnsmasq 程序引數中有效地移除 '--no-resolv' 選項。將自訂 DNS 解析器新" "增至 'dnsmasq_dns_servers' 選項會停用此功能。" msgid "Encountered an empty component." msgstr "發現空元件。" msgid "End of VLAN range is less than start of VLAN range" msgstr "VLAN 範圍的終止值小於 VLAN 範圍的起始值" msgid "End of tunnel range is less than start of tunnel range" msgstr "通道範圍的終止值小於通道範圍的起始值" msgid "Enforce using split branches file structure." msgstr "使用分割分支檔案結構來施行。" msgid "" "Ensure that configured gateway is on subnet. For IPv6, validate only if " "gateway is not a link local address. Deprecated, to be removed during the " "Newton release, at which point the gateway will not be forced on to subnet." msgstr "" "請確保所配置的閘道位於子網路上。若為 IPv6,僅當閘道不是鏈結本端位址時,才進行" "驗證。已遭到淘汰,即將在 Newton 版本中予以移除,閘道在該發行版中將不強制在子" "網路上予以啟用。" #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "嘗試執行作業時發生錯誤 %(reason)s。" #, python-format msgid "Error importing FWaaS device driver: %s" msgstr "匯入 FWaaS 裝置驅動程式時發生錯誤:%s" #, python-format msgid "Error parsing dns address %s" msgstr "剖析 DNS 位址 %s 時發生錯誤" #, python-format msgid "Error while reading %s" msgstr "讀取 %s 時發生錯誤" #, python-format msgid "" "Exceeded %s second limit waiting for address to leave the tentative state." msgstr "等待位址離開暫訂狀態時,已超過 %s 秒限制。" msgid "Exceeded maximum amount of fixed ips per port." msgstr "已超出每個埠的固定 IP 數目上限。" msgid "Existing prefixes must be a subset of the new prefixes" msgstr "現有字首必須是新字首的子集" #, python-format msgid "" "Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" msgstr "" "結束碼:%(returncode)d;標準輸入:%(stdin)s;標準輸出:%(stdout)s,標準錯誤:" "%(stderr)s" #, python-format msgid "Extension %(driver)s failed." msgstr "延伸 %(driver)s 失敗。" #, python-format msgid "" "Extension driver %(driver)s required for service plugin %(service_plugin)s " "not found." msgstr "找不到服務外掛程式 %(service_plugin)s 所需的延伸驅動程式 %(driver)s。" msgid "" "Extension to use alongside ml2 plugin's l2population mechanism driver. It " "enables the plugin to populate VXLAN forwarding table." msgstr "" "與 ML2 外掛程式的 l2population 機制驅動程式一起使用的延伸。它支援該外掛程式將" "資料移入 VXLAN 轉遞表格。" #, python-format msgid "Extension with alias %s does not exist" msgstr "別名為 %s 的延伸不存在" msgid "Extensions list to use" msgstr "要使用的延伸清單" #, python-format msgid "Extensions not found: %(extensions)s." msgstr "找不到延伸:%(extensions)s。" #, python-format msgid "External DNS driver %(driver)s could not be found." msgstr "找不到外部 DNS 驅動程式 %(driver)s。" #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "外部 IP %s 與閘道 IP 相同" #, python-format msgid "" "External network %(external_network_id)s is not reachable from subnet " "%(subnet_id)s. Therefore, cannot associate Port %(port_id)s with a Floating " "IP." msgstr "" "無法從子網路 %(subnet_id)s 呼叫到外部網路 %(external_network_id)s。因此,無法" "使埠 %(port_id)s 與浮動 IP 建立關聯。" #, python-format msgid "" "External network %(net_id)s cannot be updated to be made non-external, since " "it has existing gateway ports" msgstr "無法將外部網路 %(net_id)s 更新成非外部網路,因為它具有現有的閘道埠" #, python-format msgid "ExtraDhcpOpt %(id)s could not be found" msgstr "找不到 ExtraDhcpOpt %(id)s" msgid "" "FWaaS plugin is configured in the server side, but FWaaS is disabled in L3-" "agent." msgstr "FWaaS 外掛程式已在伺服器端進行配置,但在 L3 代理程式中已停用 FWaaS。" #, python-format msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." msgstr "無法重新排程路由器 %(router_id)s:找不到適用的 L3 代理程式。" #, python-format msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." msgstr "無法將路由器 %(router_id)s 排程到 L3 代理程式 %(agent_id)s。" #, python-format msgid "" "Failed to allocate a VRID in the network %(network_id)s for the router " "%(router_id)s after %(max_tries)s tries." msgstr "" "在嘗試 %(max_tries)s 次之後,無法在路由器 %(router_id)s 的網路 " "%(network_id)s 中配置 VRID。" #, python-format msgid "Failed to allocate subnet: %(reason)s." msgstr "無法配置子網路:%(reason)s。" msgid "" "Failed to associate address scope: subnetpools within an address scope must " "have unique prefixes." msgstr "無法與位址範圍建立關聯:位址範圍內的子網路儲存區必須具有唯一字首。" #, python-format msgid "Failed to check policy %(policy)s because %(reason)s." msgstr "無法檢查原則 %(policy)s,原因:%(reason)s。" #, python-format msgid "" "Failed to create a duplicate %(object_type)s: for attribute(s) " "%(attributes)s with value(s) %(values)s" msgstr "" "針對屬性 %(attributes)s(值為 %(values)s),無法建立重複的 %(object_type)s" #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips included " "invalid subnet %(subnet_id)s" msgstr "" "無法在網路 %(network_id)s 上建立埠,因為 fixed_ips 包含無效的子網路 " "%(subnet_id)s" #, python-format msgid "Failed to init policy %(policy)s because %(reason)s." msgstr "無法起始設定原則 %(policy)s,原因:%(reason)s。" #, python-format msgid "Failed to locate source for %s." msgstr "找不到 %s 的來源。" #, python-format msgid "Failed to parse request. Parameter '%s' not specified" msgstr "無法剖析要求。未指定參數 '%s'" #, python-format msgid "Failed to parse request. Required attribute '%s' not specified" msgstr "無法剖析要求。未指定必要屬性 '%s'" msgid "Failed to remove supplemental groups" msgstr "無法移除增補群組" #, python-format msgid "Failed to set gid %s" msgstr "無法設定 GID %s" #, python-format msgid "Failed to set uid %s" msgstr "無法設定 UID %s" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "無法將 %(type)s 通道埠設為 %(ip)s" msgid "Failure applying iptables rules" msgstr "套用 iptables 規則時失敗" #, python-format msgid "Failure waiting for address %(address)s to become ready: %(reason)s" msgstr "等待位址 %(address)s 變成備妥時失敗:%(reason)s" msgid "Flat provider networks are disabled" msgstr "已停用平面提供者網路" #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "找不到特性 %(flavor_id)s。" #, python-format msgid "Flavor %(flavor_id)s is used by some service instance." msgstr "特性 %(flavor_id)s 已由某個服務實例使用。" msgid "Flavor is not enabled." msgstr "未啟用特性。" #, python-format msgid "Floating IP %(floatingip_id)s could not be found" msgstr "找不到浮動 IP %(floatingip_id)s" #, python-format msgid "" "Floating IP %(floatingip_id)s is associated with non-IPv4 address " "%s(internal_ip)s and therefore cannot be bound." msgstr "" "浮動 IP %(floatingip_id)s 與非 IPv4 位址 %s(internal_ip)s 相關聯,因此無法將" "其連結。" msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "對於 TCP/UDP 通訊協定,port_range_min 必須小於或等於 port_range_max" #, python-format msgid "For class %(object_type)s missing primary keys: %(missing_keys)s" msgstr "針對類別 %(object_type)s,遺漏了主要索引鍵:%(missing_keys)s" msgid "Force ip_lib calls to use the root helper" msgstr "強制讓 ip_lib 呼叫使用根說明程式" #, python-format msgid "Found duplicate extension: %(alias)s." msgstr "發現重複延伸:%(alias)s。" #, python-format msgid "" "Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet " "%(subnet_cidr)s." msgstr "發現子網路 %(subnet_cidr)s 的配置儲存區 %(pool_1)s %(pool_2)s 重疊。" msgid "Gateway IP version inconsistent with allocation pool version" msgstr "閘道 IP 版本與配置儲存區版本不一致" #, python-format msgid "" "Gateway cannot be updated for router %(router_id)s, since a gateway to " "external network %(net_id)s is required by one or more floating IPs." msgstr "" "無法更新路由器 %(router_id)s 的閘道,因為一個以上的浮動 IP 需要外部網路 " "%(net_id)s 的閘道。" #, python-format msgid "Gateway ip %(ip_address)s conflicts with allocation pool %(pool)s." msgstr "閘道 IP %(ip_address)s 與配置儲存區 %(pool)s 相衝突。" msgid "Gateway is not valid on subnet" msgstr "閘道在子網路上無效" msgid "" "Geneve encapsulation header size is dynamic, this value is used to calculate " "the maximum MTU for the driver. This is the sum of the sizes of the outer " "ETH + IP + UDP + GENEVE header sizes. The default size for this field is 50, " "which is the size of the Geneve header without any additional option headers." msgstr "" "Geneve 封裝標頭大小是動態的,此值用於計算驅動程式的 MTU 上限。這是外部 ETH + " "IP + UDP + GENEVE 標頭大小的大小總和。此欄位的預設大小是 50,這是 Geneve 標頭" "(不含任何其他選項標頭)的大小。" msgid "Group (gid or name) running metadata proxy after its initialization" msgstr "在 meta 資料 Proxy 起始設定之後執行該 Proxy 的群組(GID 或名稱)" msgid "" "Group (gid or name) running metadata proxy after its initialization (if " "empty: agent effective group)." msgstr "" "在 meta 資料 Proxy 起始設定之後執行該 Proxy 的群組(GID 或名稱)(如果為空," "則為代理程式有效群組)。" msgid "Group (gid or name) running this process after its initialization" msgstr "在此程序起始設定之後執行此程序的群組(GID 或名稱)" #, python-format msgid "HEAD file does not match migration timeline head, expected: %s" msgstr "HEAD 檔與移轉時間表表頭不符,預期為:%s" msgid "" "Hostname to be used by the Neutron server, agents and services running on " "this machine. All the agents and services running on this machine must use " "the same host value." msgstr "" "在此機器上執行之 Neutron 伺服器、代理程式及服務要使用的主機名稱。在此機器上執" "行的所有代理程式及服務都必須使用相同的主機值。" msgid "How many times Neutron will retry MAC generation" msgstr "Neutron 將重試 MAC 產生作業的次數" #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" "min) is missing." msgstr "" "提供了 ICMP 代碼 (port-range-max) %(value)s,但遺漏了 ICMP 類型 (port-range-" "min)。" msgid "ID of network" msgstr "網路的 ID" msgid "ID of network to probe" msgstr "要探測的網路 ID" msgid "ID of probe port to delete" msgstr "要刪除的探針埠 ID" msgid "ID of probe port to execute command" msgstr "要執行指令的探針埠 ID" msgid "ID of the router" msgstr "路由器 ID" #, python-format msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s" msgstr "IP 位址 %(ip)s 已經在子網路 %(subnet_id)s 中得到配置" #, python-format msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s" msgstr "IP 位址 %(ip)s 不屬於子網路 %(subnet_id)s" #, python-format msgid "" "IP address %(ip_address)s is not a valid IP for any of the subnets on the " "specified network." msgstr "IP 位址 %(ip_address)s 不是所指定網路上任何子網路的有效 IP。" msgid "IP address used by Nova metadata server." msgstr "Nova meta 資料伺服器所使用的 IP 位址。" msgid "IP allocation failed. Try again later." msgstr "IP 配置失敗。請稍後再試。" msgid "IP allocation requires subnet_id or ip_address" msgstr "IP 配置需要 subnet_id 或 ip_address" #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "IPTablesManager.apply 無法套用下列 iptables 規則集:\n" "%s" msgid "IPtables conntrack zones exhausted, iptables rules cannot be applied." msgstr "iptables conntrack 區域已用盡,無法套用 iptables 規則。" msgid "IPv6 Address Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "針對字首委派,IPv6 位址模式必須是 SLAAC 或 Stateless。" msgid "IPv6 RA Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "針對字首委派,IPv6 RA 模式必須是 SLAAC 或 Stateless。" #, python-format msgid "" "IPv6 address %(address)s can not be directly assigned to a port on subnet " "%(id)s since the subnet is configured for automatic addresses" msgstr "" "無法直接將 IPv6 位址 %(address)s 指派給子網路 %(id)s 上的埠,因為該子網路配置" "為用於自動位址" #, python-format msgid "" "IPv6 address %(ip)s cannot be directly assigned to a port on subnet " "%(subnet_id)s as the subnet is configured for automatic addresses" msgstr "" "無法直接將 IPv6 位址 %(ip)s 指派給子網路%(subnet_id)s 上的埠,因為該子網路配" "置為用於自動位址" #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot be " "added to Neutron Router." msgstr "" "無法將已配置成從外部路由器接收 RA 的 IPv6 子網路 %s 新增至 Neutron 路由器。" msgid "" "If True, advertise network MTU values if core plugin calculates them. MTU is " "advertised to running instances via DHCP and RA MTU options." msgstr "" "如果為 True,則會在核心外掛程式計算網路 MTU 值時公佈這些值。透過 DHCP 和 RA " "MTU 選項,將 MTU 公佈給執行中的實例。" msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "如果為 True,則容許支援它的外掛程式建立 VLAN 透通網路。" msgid "" "If non-empty, the l3 agent can only configure a router that has the matching " "router ID." msgstr "如果不是空的,則 L3 代理程式只能配置一個具有相符路由器 ID 的路由器。" msgid "Illegal IP version number" msgstr "無效的 IP 版本號碼" #, python-format msgid "" "Illegal prefix bounds: %(prefix_type)s=%(prefixlen)s, %(base_prefix_type)s=" "%(base_prefixlen)s." msgstr "" "無效的字首範圍:%(prefix_type)s=%(prefixlen)s,%(base_prefix_type)s=" "%(base_prefixlen)s。" #, python-format msgid "" "Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot " "associate with address scope %(address_scope_id)s because subnetpool " "ip_version is not %(ip_version)s." msgstr "" "子網路儲存區關聯無效:無法將子網路儲存區 %(subnetpool_id)s 與位址範圍 " "%(address_scope_id)s 建立關聯,因為子網路儲存區 ip_version 不是 " "%(ip_version)s。" #, python-format msgid "" "Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot be " "associated with address scope %(address_scope_id)s." msgstr "" "子網路儲存區關聯無效:無法將子網路儲存區 %(subnetpool_id)s 與位址範圍 " "%(address_scope_id)s 建立關聯。" #, python-format msgid "Illegal subnetpool update : %(reason)s." msgstr "子網路儲存區更新無效:%(reason)s。" #, python-format msgid "Illegal update to prefixes: %(msg)s." msgstr "字首更新無效:%(msg)s。" msgid "" "In some cases the Neutron router is not present to provide the metadata IP " "but the DHCP server can be used to provide this info. Setting this value " "will force the DHCP server to append specific host routes to the DHCP " "request. If this option is set, then the metadata service will be activated " "for all the networks." msgstr "" "在部分情況下,Neutron 路由器未呈現以提供 meta 資料 IP,但 DHCP 伺服器可用於提" "供此資訊。設定此值會強制讓 DHCP 伺服器將特定的主機路線附加至 DHCP 要求。如果" "設定此選項,則將對所有網路啟動 meta 資料服務。" #, python-format msgid "Incorrect pci_vendor_info: \"%s\", should be pair vendor_id:product_id" msgstr "pci_vendor_info 不正確:\"%s\",應該是 vendor_id:product_id 配對" msgid "" "Indicates that this L3 agent should also handle routers that do not have an " "external network gateway configured. This option should be True only for a " "single agent in a Neutron deployment, and may be False for all agents if all " "routers must have an external network gateway." msgstr "" "指示此 L3 代理程式還應該處理尚未配置外部網路閘道的路由器。針對 Neutron 部署中" "的單個代理程式,這個選項只應該為 True;如果所有路由器都必須具有外部網路閘道," "則針對所有路由器,這個選項可能為 False。" #, python-format msgid "Instance of class %(module)s.%(class)s must contain _cache attribute" msgstr "類別 %(module)s 的實例。%(class)s 必須包含 _cache 屬性" #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" msgstr "字首空間不足,無法配置子網路大小 /%s" msgid "Insufficient rights for removing default security group." msgstr "權限不足,無法移除預設安全群組。" msgid "" "Integration bridge to use. Do not change this parameter unless you have a " "good reason to. This is the name of the OVS integration bridge. There is one " "per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM " "VIFs are attached to this bridge and then 'patched' according to their " "network connectivity." msgstr "" "要使用的整合橋接器。除非您有適當的原因,否則請勿變更此參數。這是 OVS 整合橋接" "器的名稱。每個 Hypervisor 有一個整合橋接器。整合橋接器充當虛擬的「修補程式機" "架」。所有 VM VIF 都已連接至此橋接器,然後根據其網路連線功能進行「修補」。" msgid "Interface to monitor" msgstr "要監視的介面" msgid "" "Interval between checks of child process liveness (seconds), use 0 to disable" msgstr "子程序存活檢查之間的間隔(秒),使用 0 以停用" msgid "Interval between two metering measures" msgstr "兩次計量測量之間的間隔" msgid "Interval between two metering reports" msgstr "兩次計量報告之間的間隔" #, python-format msgid "Invalid CIDR %(input)s given as IP prefix." msgstr "作為 IP 字首而提供的 CIDR %(input)s 無效。" #, python-format msgid "" "Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address " "format, which requires the prefix to be /64." msgstr "" "IPv6 位址模式的 CIDR %s 無效。OpenStack 使用 EUI-64 位址格式,其需要字首" "為 /64。" #, python-format msgid "Invalid Device %(dev_name)s: %(reason)s" msgstr "無效的裝置 %(dev_name)s:%(reason)s" #, python-format msgid "" "Invalid action '%(action)s' for object type '%(object_type)s'. Valid " "actions: %(valid_actions)s" msgstr "" "針對物件類型 '%(object_type)s' 的動作 '%(action)s' 無效。有效動作:" "%(valid_actions)s" #, python-format msgid "" "Invalid authentication type: %(auth_type)s, valid types are: " "%(valid_auth_types)s" msgstr "無效的鑑別類型:%(auth_type)s,有效的類型為:%(valid_auth_types)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "無效的內容類型 %(content_type)s。" #, python-format msgid "Invalid data format for IP pool: '%s'" msgstr "IP 儲存區的資料格式無效:'%s'" #, python-format msgid "Invalid data format for extra-dhcp-opt: %(data)s" msgstr "extra-dhcp-opt 的資料格式無效:%(data)s" #, python-format msgid "Invalid data format for fixed IP: '%s'" msgstr "固定 IP 的資料格式無效:'%s'" #, python-format msgid "Invalid data format for hostroute: '%s'" msgstr "主機路徑的資料格式無效:'%s'" #, python-format msgid "Invalid data format for nameserver: '%s'" msgstr "名稱伺服器的資料格式無效:'%s'" #, python-format msgid "Invalid ethertype %(ethertype)s for protocol %(protocol)s." msgstr "通訊協定 %(protocol)s 的乙太網路類型 %(ethertype)s 無效。" #, python-format msgid "Invalid extension environment: %(reason)s." msgstr "無效的延伸環境:%(reason)s。" #, python-format msgid "Invalid format for routes: %(routes)s, %(reason)s" msgstr "無效的路徑格式:%(routes)s,%(reason)s" #, python-format msgid "Invalid format: %s" msgstr "無效的格式:%s" #, python-format msgid "Invalid input for %(attr)s. Reason: %(reason)s." msgstr "%(attr)s 的輸入無效。原因:%(reason)s。" #, python-format msgid "" "Invalid input. '%(target_dict)s' must be a dictionary with keys: " "%(expected_keys)s" msgstr "" "無效的輸入。'%(target_dict)s' 必須是含有下列索引鍵的字典:%(expected_keys)s" #, python-format msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s" msgstr "無效的實例狀態:%(state)s,有效的狀態為:%(valid_states)s" #, python-format msgid "Invalid mapping: '%s'" msgstr "無效的對映:'%s'" #, python-format msgid "Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'." msgstr "無效的網路 VLAN 範圍:'%(vlan_range)s' - '%(error)s'。" #, python-format msgid "Invalid network VXLAN port range: '%(vxlan_range)s'." msgstr "無效的網路 VXLAN 埠範圍:'%(vxlan_range)s'。" #, python-format msgid "Invalid pci slot %(pci_slot)s" msgstr "無效的 PCI 插槽 %(pci_slot)s" #, python-format msgid "Invalid provider format. Last part should be 'default' or empty: %s" msgstr "無效的提供者格式。最後一部分應該是 'default' 或空白:%s" #, python-format msgid "Invalid resource type %(resource_type)s" msgstr "資源類型 %(resource_type)s 無效" #, python-format msgid "Invalid route: %s" msgstr "無效的路徑:%s" msgid "Invalid service provider format" msgstr "無效的服務提供者格式" #, python-format msgid "Invalid service type %(service_type)s." msgstr "服務類型 %(service_type)s 無效。" #, python-format msgid "" "Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255." msgstr "" "ICMP %(field)s (%(attr)s) 的值 %(value)s 無效。該值必須在 0 到 255 之間。" #, python-format msgid "Invalid value for port %(port)s" msgstr "埠 %(port)s 的值無效" msgid "" "Iptables mangle mark used to mark ingress from external network. This mark " "will be masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "用於標示外部網路中入口的 iptables 破壞標記。此標記將以 0xffff 進行遮罩,以便" "只使用較低的 16 位元。" msgid "" "Iptables mangle mark used to mark metadata valid requests. This mark will be " "masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "用於標示 meta 資料有效要求的 iptables 破壞標記。此標記將以 0xffff 進行遮罩," "以便只使用較低的 16 位元。" msgid "" "Keep in track in the database of current resourcequota usage. Plugins which " "do not leverage the neutron database should set this flag to False" msgstr "" "保持追蹤資料庫中的現行 resourcequota 使用情形。不利用 Neutron 資料庫的外掛程" "式應該將此旗標設定為 False" msgid "Keepalived didn't respawn" msgstr "Keepalived 未再次大量產生" msgid "Keepalived didn't spawn" msgstr "Keepalived 未大量產生" #, python-format msgid "" "Kernel HZ value %(value)s is not valid. This value must be greater than 0." msgstr "核心 HZ 值 %(value)s 無效。此值必須大於 0。" #, python-format msgid "Key %(key)s in mapping: '%(mapping)s' not unique" msgstr "對映 '%(mapping)s' 中的索引鍵 %(key)s 不是唯一的" msgid "L3 agent failure to setup NAT for floating IPs" msgstr "L3 代理程式無法針對浮動 IP 設定 NAT" msgid "L3 agent failure to setup floating IPs" msgstr "L3 代理程式無法設定浮動 IP" #, python-format msgid "Limit must be an integer 0 or greater and not '%d'" msgstr "限制值必須是大於或等於 0 的整數,而不是 '%d'" msgid "Limit number of leases to prevent a denial-of-service." msgstr "限制租賃次數以防止阻斷服務攻擊。" msgid "List of :" msgstr ": 的清單" msgid "" "List of :: or " "specifying physical_network names usable for VLAN provider and tenant " "networks, as well as ranges of VLAN tags on each available for allocation to " "tenant networks." msgstr "" ":: 的清單,指定可" "用於 VLAN 提供者及承租人網路的 physical_network 名稱,以及在每個可用於配置給" "承租人網路的 physical_network 上指定 VLAN 標記範圍。" msgid "" "List of network type driver entrypoints to be loaded from the neutron.ml2." "type_drivers namespace." msgstr "" "要從 neutron.ml2.type_drivers 名稱空間載入的網路類型驅動程式進入點清單。" msgid "" "List of physical_network names with which flat networks can be created. Use " "default '*' to allow flat networks with arbitrary physical_network names. " "Use an empty list to disable flat networks." msgstr "" "可用來建立平面網路的 physical_network 名稱清單。使用預設值 '*' 可容許使用含有" "任意 physical_network 名稱的平面網路。使用空白清單可停用平面網路。" msgid "Local IP address of the VXLAN endpoints." msgstr "VXLAN 端點的本端 IP 位址。" msgid "Location for Metadata Proxy UNIX domain socket." msgstr "meta 資料 Proxy UNIX 網域 Socket 的位置。" msgid "Location of Metadata Proxy UNIX domain socket" msgstr "meta 資料 Proxy UNIX 網域 Socket 的位置" msgid "Location of pid file of this process." msgstr "此程序的 PID 檔位置。" msgid "Location to store DHCP server config files." msgstr "DHCP 伺服器配置檔的儲存位置。" msgid "Location to store IPv6 PD files." msgstr "用於儲存 IPv6 PD 檔的位置。" msgid "Location to store IPv6 RA config files" msgstr "用於儲存 IPv6 RA 配置檔的位置" msgid "Location to store child pid files" msgstr "子項 PID 檔案的儲存位置" msgid "Location to store keepalived/conntrackd config files" msgstr "用於儲存 keepalived/conntrackd 配置檔的位置" msgid "Log agent heartbeats" msgstr "日誌代理程式活動訊號" msgid "Loopback IP subnet is not supported if enable_dhcp is True." msgstr "如果 enable_dhcp 為 True,則迴圈 IP 子網路不受支援。" msgid "MTU size of veth interfaces" msgstr "veth 介面的 MTU 大小" msgid "Make the l2 agent run in DVR mode." msgstr "讓 L2 代理程式在 DVR 模式下執行。" msgid "Malformed request body" msgstr "要求內文的格式不正確" #, python-format msgid "Malformed request body: %(reason)s." msgstr "要求內文的格式不正確:%(reason)s。" msgid "MaxRtrAdvInterval setting for radvd.conf" msgstr "radvd.conf 的 MaxRtrAdvInterval 設定" msgid "Maximum number of DNS nameservers per subnet" msgstr "每個子網路的 DNS 名稱伺服器數目上限" msgid "" "Maximum number of L3 agents which a HA router will be scheduled on. If it is " "set to 0 then the router will be scheduled on every agent." msgstr "" "將在其中排程 HA 路由器的 L3 代理程式數目上限。如果將其設為 0,則將在每一個代" "理程式上排程該路由器。" msgid "Maximum number of allowed address pairs" msgstr "所容許的位址配對數目上限" msgid "" "Maximum number of fixed ips per port. This option is deprecated and will be " "removed in the N release." msgstr "" "每個埠的固定 IP 數目上限。這個選項已遭到淘汰,且將在 N 版本中予以移除。" msgid "Maximum number of host routes per subnet" msgstr "每個子網路的主機路徑數目上限" msgid "Maximum number of routes per router" msgstr "每個路由器的路徑數目上限" msgid "" "Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce " "mode from metadata_proxy_user/group values, 'user': set metadata proxy " "socket mode to 0o644, to use when metadata_proxy_user is agent effective " "user or root, 'group': set metadata proxy socket mode to 0o664, to use when " "metadata_proxy_group is agent effective group or root, 'all': set metadata " "proxy socket mode to 0o666, to use otherwise." msgstr "" "meta 資料 Proxy UNIX 網域 Socket 模式,容許下列四個值:'deduce':來自 " "metadata_proxy_user/group 值的 deduce 模式;'user':將 meta 資料 Proxy " "Socket 模式設定為 0o644,以在 metadata_proxy_user 是代理程式有效使用者或 " "root 使用者時使用;'group':將 meta 資料 Proxy Socket 模式設定為 0o664,以在 " "metadata_proxy_group 是有效群組或 root 使用者時使用;'all':將 meta 資料 " "Proxy Socket 模式設定為 0o666,以在其他情況下使用。" msgid "Metering driver" msgstr "計量驅動程式" #, python-format msgid "Metering label %(label_id)s does not exist" msgstr "計量標籤 %(label_id)s 不存在" #, python-format msgid "Metering label rule %(rule_id)s does not exist" msgstr "計量標籤規則 %(rule_id)s 不存在" #, python-format msgid "" "Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps " "another" msgstr "" "計量標籤規則 (remote_ip_prefix = %(remote_ip_prefix)s),與另一個計量標籤規則" "重疊" msgid "Method cannot be called within a transaction." msgstr "無法在交易內呼叫方法。" msgid "Migration from distributed router to centralized is not supported" msgstr "不支援從分散式路由器移轉至集中式" msgid "MinRtrAdvInterval setting for radvd.conf" msgstr "radvd.conf 的 MinRtrAdvInterval 設定" msgid "Minimize polling by monitoring ovsdb for interface changes." msgstr "透過監視 OVSDB 是否有介面變更,將輪詢減至最少。" #, python-format msgid "Missing key in mapping: '%s'" msgstr "對映中遺漏了索引鍵:'%s'" #, python-format msgid "Missing value in mapping: '%s'" msgstr "對映中遺漏了值:'%s'" msgid "Multicast IP subnet is not supported if enable_dhcp is True." msgstr "如果 enable_dhcp 為 True,則多重播送 IP 子網路不受支援。" msgid "" "Multicast group for VXLAN. When configured, will enable sending all " "broadcast traffic to this multicast group. When left unconfigured, will " "disable multicast VXLAN mode." msgstr "" "VXLAN 的多重播送群組。當已配置時,容許將所有廣播資料流量傳送至此多重播送群" "組。當保持未配置時,將停用多重播送 VXLAN 模式。" msgid "" "Multicast group(s) for vxlan interface. A range of group addresses may be " "specified by using CIDR notation. Specifying a range allows different VNIs " "to use different group addresses, reducing or eliminating spurious broadcast " "traffic to the tunnel endpoints. To reserve a unique group for each possible " "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on " "all the agents." msgstr "" "VXLAN 介面的多重播送群組。可以使用 CIDR 表示法來指定群組位址的範圍。指定一個" "範圍會容許不同的 VNI 使用不同的群組位址,以減少或刪除傳送至通道端點的虛假廣播" "資料流量。如果要為每一個可能的(24 位元)VNI 保留一個唯一群組,請使用 /8(例" "如 239.0.0.0/8)。在所有代理程式上,此設定必須相同。" #, python-format msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found" msgstr "找到多個 agent_type = %(agent_type)s 且主機 = %(host)s 的代理程式" #, python-format msgid "Multiple default providers for service %s" msgstr "服務 %s 的多個預設提供者" #, python-format msgid "Multiple plugins for service %s were configured" msgstr "已為服務 %s 配置多個外掛程式" #, python-format msgid "Multiple providers specified for service %s" msgstr "為服務 %s 指定了多個提供者" msgid "Multiple tenant_ids in bulk security group rule create not allowed" msgstr "不容許主體安全群組規則建立作業中存在多個 tenant_id" msgid "Must also specify protocol if port range is given." msgstr "如果給定了埠範圍,則也必須指定通訊協定。" msgid "Must specify one or more actions on flow addition or modification" msgstr "必須對流程新增作業或修改作業指定一個以上的動作" #, python-format msgid "Name %(dns_name)s is duplicated in the external DNS service" msgstr "名稱 %(dns_name)s 在外部 DNS 服務中是重複的" #, python-format msgid "" "Name '%s' must be 1-63 characters long, each of which can only be " "alphanumeric or a hyphen." msgstr "名稱 '%s' 的長度必須為 1-63 個字元,每個字元只能是英數字元或連字號。" #, python-format msgid "Name '%s' must not start or end with a hyphen." msgstr "名稱 '%s' 不得以連字號開頭或結尾。" msgid "Name of Open vSwitch bridge to use" msgstr "要使用的 Open vSwitch 橋接器名稱" msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "要使用的 Nova 區域名稱。如果 Keystone 管理多個區域,則很有用。" msgid "Name of the FWaaS Driver" msgstr "FWaaS 驅動程式的名稱" msgid "Namespace of the router" msgstr "路由器的名稱空間" msgid "Native pagination depend on native sorting" msgstr "原生分頁相依於原生排序" #, python-format msgid "" "Need to apply migrations from %(project)s contract branch. This will require " "all Neutron server instances to be shutdown before proceeding with the " "upgrade." msgstr "" "需要套用來自 %(project)s 合約分支的移轉。這將需要先關閉所有 Neutron 伺服器實" "例,然後再繼續升級。" msgid "Negative delta (downgrade) not supported" msgstr "不支援負數差異(降級)" msgid "Negative relative revision (downgrade) not supported" msgstr "不支援負數相對修訂(降級)" #, python-format msgid "" "Network %(network_id)s is already bound to BgpSpeaker %(bgp_speaker_id)s." msgstr "網路 %(network_id)s 已連結至 BGP 喇叭 %(bgp_speaker_id)s。" #, python-format msgid "" "Network %(network_id)s is not associated with BGP speaker %(bgp_speaker_id)s." msgstr "網路 %(network_id)s 未與 BGP 喇叭 %(bgp_speaker_id)s 建立關聯。" #, python-format msgid "Network %(network_id)s is not bound to a BgpSpeaker." msgstr "網路 %(network_id)s 未連結至 BGP 喇叭。" #, python-format msgid "Network %(network_id)s is not bound to a IPv%(ip_version)s BgpSpeaker." msgstr "網路 %(network_id)s 未連結至 IPv%(ip_version)s BGP 喇叭。" #, python-format msgid "Network %s does not contain any IPv4 subnet" msgstr "網路 %s 不包含任何 IPv4 子網路" #, python-format msgid "Network %s is not a valid external network" msgstr "網路 %s 不是有效的外部網路" #, python-format msgid "Network %s is not an external network" msgstr "網路 %s 不是外部網路" #, python-format msgid "" "Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges " "%(excluded_ranges)s was not found." msgstr "" "在 IP 範圍 %(parent_range)s(排除 IP 範圍 %(excluded_ranges)s)中找不到大小" "為 %(size)s 的網路。" msgid "Network that will have instance metadata proxied." msgstr "將對其實例 meta 資料執行 Proxy 動作的網路。" #, python-format msgid "Network type value '%s' not supported" msgstr "不支援網路類型值 '%s'" msgid "Network type value needed by the ML2 plugin" msgstr "ML2 外掛程式需要的網路類型值" msgid "Network types supported by the agent (gre and/or vxlan)." msgstr "代理程式支援的網路類型(GRE 及/或 VXLAN)。" msgid "" "Neutron IPAM (IP address management) driver to use. If ipam_driver is not " "set (default behavior), no IPAM driver is used. In order to use the " "reference implementation of Neutron IPAM driver, use 'internal'." msgstr "" "要使用的 Neutron IPAM(IP 位址管理)驅動程式。如果未設定 ipam_driver(預設行" "為),則將不使用任何 PAM 驅動程式。如果要使用 Neutron IPAM 驅動程式的參照實" "作,請使用「內部」。" msgid "Neutron Service Type Management" msgstr "Neutron 服務類型管理" msgid "Neutron core_plugin not configured!" msgstr "未配置 Neutron core_plugin!" msgid "Neutron plugin provider module" msgstr "Neutron 外掛程式提供者模組" msgid "Neutron quota driver class" msgstr "Neutron 配額驅動程式類別" msgid "New value for first_ip or last_ip has to be specified." msgstr "必須指定 first_ip 或 last_ip 的新值。" msgid "No default router:external network" msgstr "沒有預設 router:external 網路" #, python-format msgid "No default subnetpool found for IPv%s" msgstr "找不到 IPv%s 的預設子網路儲存區" msgid "No default subnetpools defined" msgstr "未定義預設子網路儲存區" #, python-format msgid "No eligible l3 agent associated with external network %s found" msgstr "找不到與外部網路 %s 相關聯的適用 L3 代理程式" #, python-format msgid "No more IP addresses available for subnet %(subnet_id)s." msgstr "沒有其他 IP 位址可用於子網路 %(subnet_id)s。" #, python-format msgid "" "No more Virtual Router Identifier (VRID) available when creating router " "%(router_id)s. The limit of number of HA Routers per tenant is 254." msgstr "" "建立路由器 %(router_id)s 時,沒有其他「虛擬路由器 ID (VRID)」可用。每個承租人" "的 HA 路由器數目限制為 254 個。" msgid "No offline migrations pending." msgstr "沒有擱置中的離線移轉。" #, python-format msgid "No providers specified for '%s' service, exiting" msgstr "未對 '%s' 服務指定提供者,正在結束" #, python-format msgid "No shared key in %s fields" msgstr "%s 欄位中沒有共用金鑰" msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "不容許將路由器手動指派給處於 'dvr' 模式的代理程式。" msgid "Not allowed to manually remove a router from an agent in 'dvr' mode." msgstr "不容許從處於 'dvr' 模式的代理程式中手動移除路由器。" #, python-format msgid "" "Not enough l3 agents available to ensure HA. Minimum required " "%(min_agents)s, available %(num_agents)s." msgstr "" "沒有足夠的 L3 代理程式可用,無法確保 HA。所需的數目下限為 %(min_agents)s,可" "用數目為 %(num_agents)s。" msgid "" "Number of DHCP agents scheduled to host a tenant network. If this number is " "greater than 1, the scheduler automatically assigns multiple DHCP agents for " "a given tenant network, providing high availability for DHCP service." msgstr "" "已排程管理承租人網路的 DHCP 代理程式數目。如果此數目大於 1,則排程器會自動為" "給定的承租人網路指派多個 DHCP 代理程式,為 DHCP 服務提供高可用性。" msgid "Number of RPC worker processes dedicated to state reports queue" msgstr "專用於說明報告佇列的 RPC 工作程式程序數目" msgid "Number of RPC worker processes for service" msgstr "服務的 RPC 工作程式程序數目" msgid "Number of backlog requests to configure the metadata server socket with" msgstr "要配置給 meta 資料伺服器 Socket 的待辦事項要求數目" msgid "Number of backlog requests to configure the socket with" msgstr "要配置給 Socket 的待辦事項要求數目" msgid "" "Number of bits in an ipv4 PTR zone that will be considered network prefix. " "It has to align to byte boundary. Minimum value is 8. Maximum value is 24. " "As a consequence, range of values is 8, 16 and 24" msgstr "" "將被視為網路字首之 IPv4 PTR 區域中的位元數目。它必須與位元組界限對齊。下限值" "為 8。上限值為 24。因此,值的範圍是 8、16 和 24" msgid "" "Number of bits in an ipv6 PTR zone that will be considered network prefix. " "It has to align to nyble boundary. Minimum value is 4. Maximum value is 124. " "As a consequence, range of values is 4, 8, 12, 16,..., 124" msgstr "" "將被視為網路字首之 IPv6 PTR 區域中的位元數目。它必須與 nyble 界限對齊。下限值" "為 4。上限值為 124。因此,值的範圍是 4、8、12、16、...、124" msgid "" "Number of floating IPs allowed per tenant. A negative value means unlimited." msgstr "每個承租人所容許的浮動 IP 數目。負數值表示無限制。" msgid "" "Number of networks allowed per tenant. A negative value means unlimited." msgstr "每個承租人所容許的網路數目。負數值表示無限制。" msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "每個承租人所容許的埠數目。負數值表示無限制。" msgid "Number of routers allowed per tenant. A negative value means unlimited." msgstr "每個承租人所容許的路由器數目。負數值表示無限制。" msgid "" "Number of seconds between sending events to nova if there are any events to " "send." msgstr "兩次將事件傳送至 Nova 之間的秒數(如果有任何事件要傳送)。" msgid "Number of seconds to keep retrying to listen" msgstr "不斷重試接聽的秒數" msgid "" "Number of security groups allowed per tenant. A negative value means " "unlimited." msgstr "每個承租人所容許的安全群組數目。負數值表示無限制。" msgid "" "Number of security rules allowed per tenant. A negative value means " "unlimited." msgstr "每個承租人所容許的安全規則數目。負數值表示無限制。" msgid "" "Number of separate API worker processes for service. If not specified, the " "default is equal to the number of CPUs available for best performance." msgstr "" "服務的獨立 API 工作程式程序數目。如果未指定,則預設值為可用於最佳效能的 CPU " "數目。" msgid "" "Number of separate worker processes for metadata server (defaults to half of " "the number of CPUs)" msgstr "meta 資料伺服器的獨立工作程式程序數目(預設為 CPU 數目的一半)" msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "每個承租人所容許的子網路數目。負數值表示無限制。" msgid "" "Number of threads to use during sync process. Should not exceed connection " "pool size configured on server." msgstr "" "執行同步程序期間要使用的執行緒數目。不應超過伺服器上配置的連線儲存區大小。" msgid "OK" msgstr "確定" msgid "" "OVS datapath to use. 'system' is the default value and corresponds to the " "kernel datapath. To enable the userspace datapath set this value to 'netdev'." msgstr "" "要使用的 OVS 資料路徑。'system' 是預設值,且與核心資料路徑對應。如果要啟用使" "用者空間資料路徑,請將此值設為 'netdev'。" msgid "OVS vhost-user socket directory." msgstr "OVS vhost 使用者 Socket 目錄。" #, python-format msgid "OVSDB Error: %s" msgstr "OVSDB 錯誤:%s" #, python-format msgid "Object action %(action)s failed because: %(reason)s." msgstr "物件動作 %(action)s 失敗,原因:%(reason)s。" msgid "Only admin can view or configure quota" msgstr "只有管理者才能檢視或配置配額" msgid "Only admin is authorized to access quotas for another tenant" msgstr "只有管理者才獲授權來存取另一個承租人的配額" msgid "Only admins can manipulate policies on networks they do not own." msgstr "只有管理者才可以在他們不擁有的網路上操作原則。" msgid "Only admins can manipulate policies on objects they do not own" msgstr "只有管理者才可以操作不歸他們擁有之物件上的原則" msgid "Only allowed to update rules for one security profile at a time" msgstr "一次只容許更新一個安全設定檔的規則" msgid "Only remote_ip_prefix or remote_group_id may be provided." msgstr "只能提供 remote_ip_prefix 或 remote_group_id。" msgid "OpenFlow interface to use." msgstr "要使用的 OpenFlow 介面。" #, python-format msgid "" "Operation %(op)s is not supported for device_owner %(device_owner)s on port " "%(port_id)s." msgstr "埠 %(port_id)s 上的裝置擁有者 %(device_owner)s 不支援作業 %(op)s。" #, python-format msgid "Operation not supported on device %(dev_name)s" msgstr "作業在裝置 %(dev_name)s 上不受支援" msgid "" "Ordered list of network_types to allocate as tenant networks. The default " "value 'local' is useful for single-box testing but provides no connectivity " "between hosts." msgstr "" "要配置為承租人網路的 network_type 有序清單。預設值 'local' 對單框測試很有用," "但卻不提供主機之間的連線功能。" msgid "Override the default dnsmasq settings with this file." msgstr "使用此檔案來置換預設 dnsmasq 設定。" msgid "Owner type of the device: network/compute" msgstr "裝置的擁有者類型:網路/計算" msgid "POST requests are not supported on this resource." msgstr "此資源上不支援 POST 要求。" #, python-format msgid "Package %s not installed" msgstr "未安裝套件 %s" #, python-format msgid "Parameter %(param)s must be of %(param_type)s type." msgstr "參數 %(param)s 必須是 %(param_type)s 類型。" #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "剖析 bridge_mappings 失敗:%s。" msgid "Parsing supported pci_vendor_devs failed" msgstr "剖析受支援的 pci_vendor_devs 失敗" msgid "Password for connecting to designate in admin context" msgstr "用於連接以在管理環境定義中指定的密碼" #, python-format msgid "Password not specified for authentication type=%(auth_type)s." msgstr "針對鑑別類型 = %(auth_type)s,未指定密碼。" msgid "Path to PID file for this process" msgstr "用於此程序的 PID 檔案路徑" msgid "Path to the router directory" msgstr "路由器目錄的路徑" msgid "Peer patch port in integration bridge for tunnel bridge." msgstr "整合橋接器中用於通道橋接器的同層級修補程式埠。" msgid "Peer patch port in tunnel bridge for integration bridge." msgstr "通道橋接器中用於整合橋接器的同層級修補程式埠。" msgid "Per-tenant subnet pool prefix quota exceeded." msgstr "已超出每個承租人的子網路儲存區字首配額。" msgid "Phase upgrade options do not accept revision specification" msgstr "階段升級選項不接受修訂規格" msgid "Ping timeout" msgstr "連通測試逾時值" #, python-format msgid "Plugin '%s' not found." msgstr "找不到外掛程式 '%s'。" msgid "Plugin does not support updating provider attributes" msgstr "外掛程式不支援更新提供者屬性" msgid "Policy configuration policy.json could not be found." msgstr "找不到原則配置 policy.json 檔。" #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "埠 %(id)s 沒有固定 IP %(address)s" #, python-format msgid "Port %(port)s does not exist on %(bridge)s!" msgstr "埠 %(port)s 不存在於 %(bridge)s 上!" #, python-format msgid "Port %(port_id)s is already acquired by another DHCP agent" msgstr "另一個 DHCP 代理程式已經獲得了埠 %(port_id)s" #, python-format msgid "" "Port %(port_id)s is associated with a different tenant than Floating IP " "%(floatingip_id)s and therefore cannot be bound." msgstr "" "埠 %(port_id)s 已與浮動 IP %(floatingip_id)s 之外的承租人建立關聯,因此無法連" "結。" #, python-format msgid "Port %(port_id)s is not managed by this agent. " msgstr "埠 %(port_id)s 不由此代理程式進行管理。" #, python-format msgid "Port %s does not exist" msgstr "埠 %s 不存在" #, python-format msgid "" "Port %s has multiple fixed IPv4 addresses. Must provide a specific IPv4 " "address when assigning a floating IP" msgstr "埠 %s 具有多個固定 IPv4 位址。指派浮動 IP 時,必須提供特定的 IPv4 位址" msgid "" "Port Security must be enabled in order to have allowed address pairs on a " "port." msgstr "必須啟用埠安全,才能在埠上使用位址配對。" msgid "" "Port has security group associated. Cannot disable port security or ip " "address until security group is removed" msgstr "埠已與安全群組建立關聯。無法停用埠安全或 IP 位址,除非將安全群組移除" msgid "" "Port security must be enabled and port must have an IP address in order to " "use security groups." msgstr "埠安全必須加以啟用,而且埠必須具有 IP 位址,才能使用安全群組。" msgid "" "Port to listen on for OpenFlow connections. Used only for 'native' driver." msgstr "用於接聽 OpenFlow 連線的埠。僅用於 'native' 驅動程式。" #, python-format msgid "Prefix '%(prefix)s' not supported in IPv%(version)s pool." msgstr "字首 '%(prefix)s' 在 IPv%(version)s 儲存區中不受支援。" msgid "Prefix Delegation can only be used with IPv6 subnets." msgstr "字首委派只能與 IPv6 子網路搭配使用。" msgid "Private key of client certificate." msgstr "用戶端憑證的私密金鑰。" #, python-format msgid "Probe %s deleted" msgstr "已刪除探針 %s" #, python-format msgid "Probe created : %s " msgstr "已建立探針:%s " msgid "Process is already started" msgstr "程序已啟動" msgid "Process is not running." msgstr "程序不在執行中。" msgid "Protocol to access nova metadata, http or https" msgstr "用於存取 Nova meta 資料的通訊協定:HTTP 或 HTTPS" #, python-format msgid "Provider name %(name)s is limited by %(len)s characters" msgstr "提供者名稱 %(name)s 最多只能包含 %(len)s 個字元" #, python-format msgid "QoS Policy %(policy_id)s is used by %(object_type)s %(object_id)s." msgstr "服務品質原則 %(policy_id)s 由 %(object_type)s %(object_id)s 使用。" #, python-format msgid "" "QoS binding for network %(net_id)s and policy %(policy_id)s could not be " "found." msgstr "找不到網路 %(net_id)s 和原則 %(policy_id)s 的服務品質連結。" #, python-format msgid "" "QoS binding for port %(port_id)s and policy %(policy_id)s could not be found." msgstr "找不到埠 %(port_id)s 和原則 %(policy_id)s 的服務品質連結。" #, python-format msgid "QoS policy %(policy_id)s could not be found." msgstr "找不到服務品質原則 %(policy_id)s。" #, python-format msgid "QoS rule %(rule_id)s for policy %(policy_id)s could not be found." msgstr "找不到原則 %(policy_id)s 的服務品質規則 %(rule_id)s。" #, python-format msgid "RBAC policy of type %(object_type)s with ID %(id)s not found" msgstr "找不到 ID 為 %(id)s 且類型為 %(object_type)s 的 RBAC 原則" #, python-format msgid "" "RBAC policy on object %(object_id)s cannot be removed because other objects " "depend on it.\n" "Details: %(details)s" msgstr "" "無法移除物件 %(object_id)s 上的 RBAC 原則,因為其他物件相依於該原則。\n" "詳細資料:%(details)s" msgid "" "Range of seconds to randomly delay when starting the periodic task scheduler " "to reduce stampeding. (Disable by setting to 0)" msgstr "" "啟動定期作業排程器以減少大混亂時的隨機延遲秒數範圍。(如果要停用,請設為 0)" msgid "Ranges must be in the same IP version" msgstr "範圍必須位於相同的 IP 版本中" msgid "Ranges must be netaddr.IPRange" msgstr "範圍必須是 netaddr.IPRange" msgid "Ranges must not overlap" msgstr "範圍不得重疊" #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.EUI type." msgstr "接收到類型 '%(type)s' 和值 '%(value)s'。預期為 netaddr.EUI 類型。" #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.IPAddress " "type." msgstr "" "接收到類型 '%(type)s' 和值 '%(value)s'。預期為 netaddr.IPAddress 類型。" #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.IPNetwork " "type." msgstr "" "接收到類型 '%(type)s' 和值 '%(value)s'。預期為 netaddr.IPNetwork 類型。" #, python-format msgid "" "Release aware branch labels (%s) are deprecated. Please switch to expand@ " "and contract@ labels." msgstr "版本相關分支標籤 (%s) 已遭到淘汰。請切換至 expand@ 和 contract@ 標籤。" msgid "Remote metadata server experienced an internal server error." msgstr "遠端 meta 資料伺服器發生內部伺服器錯誤。" msgid "" "Repository does not contain HEAD files for contract and expand branches." msgstr "儲存庫不含合約及延伸分支的 HEAD 檔。" msgid "" "Representing the resource type whose load is being reported by the agent. " "This can be \"networks\", \"subnets\" or \"ports\". When specified (Default " "is networks), the server will extract particular load sent as part of its " "agent configuration object from the agent report state, which is the number " "of resources being consumed, at every report_interval.dhcp_load_type can be " "used in combination with network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is " "WeightScheduler, dhcp_load_type can be configured to represent the choice " "for the resource being balanced. Example: dhcp_load_type=networks" msgstr "" "代表將由代理程式報告其負載的資源類型。它可以為 \"networks\"、\"subnets\" 或 " "\"ports\"。指定時(預設值為 networks),伺服器將從代理程式報告狀態(為所耗用" "的資源數目)擷取作為其代理程式配置物件一部分進行傳送的特定負載,擷取間隔為每" "一個 report_interval。dhcp_load_type 可以與 network_scheduler_driver = " "neutron.scheduler.dhcp_agent_scheduler.WeightScheduler 組合使用。當 " "network_scheduler_driver 為 WeightScheduler 時,可以將 dhcp_load_type 配置為" "代表您選擇要進行平衡的資源。範例:dhcp_load_type=networks" msgid "Request Failed: internal server error while processing your request." msgstr "要求失敗:處理要求時發生內部伺服器錯誤。" #, python-format msgid "" "Request contains duplicate address pair: mac_address %(mac_address)s " "ip_address %(ip_address)s." msgstr "" "要求包含重複的位址配對:mac_address %(mac_address)s ip_address " "%(ip_address)s。" #, python-format msgid "" "Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps " "with another subnet" msgstr "" "所要求的網路 %(network_id)s 子網路(CIDR 為 %(cidr)s)與另一個子網路重疊" msgid "" "Reset flow table on start. Setting this to True will cause brief traffic " "interruption." msgstr "" "在啟動時重設流程表格。如果將此項設定為 True,則將導致簡短的資料流量岔斷。" #, python-format msgid "Resource %(resource)s %(resource_id)s could not be found." msgstr "找不到資源 %(resource)s %(resource_id)s。" #, python-format msgid "Resource %(resource_id)s of type %(resource_type)s not found" msgstr "找不到類型為 %(resource_type)s 的資源 %(resource_id)s" #, python-format msgid "" "Resource '%(resource_id)s' is already associated with provider " "'%(provider)s' for service type '%(service_type)s'" msgstr "" "資源 '%(resource_id)s' 已與服務類型 '%(service_type)s' 的提供者 " "'%(provider)s' 建立關聯" msgid "Resource body required" msgstr "需要資源主體" msgid "" "Resource name(s) that are supported in quota features. This option is now " "deprecated for removal." msgstr "配額功能中支援的資源名稱。現在,這個選項已遭到淘汰,將予以移除。" msgid "Resource not found." msgstr "找不到資源。" msgid "Resources required" msgstr "需要資源" msgid "" "Root helper application. Use 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' to use the real root filter facility. Change to 'sudo' to skip the " "filtering and just run the command directly." msgstr "" "根說明程式應用程式。利用 'sudo neutron-rootwrap /etc/neutron/rootwrap.conf' " "來使用實際的根過濾器工具。變更為 'sudo' 以跳過過濾並且僅直接執行該指令。" msgid "Root helper daemon application to use when possible." msgstr "可能時要使用的根說明程式常駐程式應用程式。" msgid "Root permissions are required to drop privileges." msgstr "需要 root 權限才能刪除專用權。" #, python-format msgid "Route %(cidr)s not advertised for BGP Speaker %(speaker_as)d." msgstr "未針對 BGP 喇叭 %(speaker_as)d 通告路徑 %(cidr)s。" #, python-format msgid "Router %(router_id)s %(reason)s" msgstr "路由器 %(router_id)s %(reason)s" #, python-format msgid "Router %(router_id)s could not be found" msgstr "找不到路由器 %(router_id)s" #, python-format msgid "Router %(router_id)s does not have an interface with id %(port_id)s" msgstr "路由器 %(router_id)s 沒有 ID 為 %(port_id)s 的介面" #, python-format msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s" msgstr "路由器 %(router_id)s 在子網路 %(subnet_id)s 上沒有介面" #, python-format msgid "Router '%(router_id)s' cannot be both DVR and HA." msgstr "路由器 '%(router_id)s' 不能同時為 DVR 及 HA。" #, python-format msgid "Router '%(router_id)s' is not compatible with this agent." msgstr "路由器 '%(router_id)s' 與此代理程式不相容。" #, python-format msgid "Router already has a port on subnet %s" msgstr "路由器在子網路 %s 上已經具有埠" #, python-format msgid "" "Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be " "deleted, as it is required by one or more floating IPs." msgstr "" "無法刪除路由器 %(router_id)s 上子網路 %(subnet_id)s 的路由器介面,因為一個以" "上的浮動 IP 需要該介面。" #, python-format msgid "" "Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be " "deleted, as it is required by one or more routes." msgstr "" "無法刪除路由器 %(router_id)s 上子網路 %(subnet_id)s 的路由器介面,因為一個以" "上的路徑需要該介面。" msgid "Router port must have at least one fixed IP" msgstr "路由器埠必須具有至少一個固定 IP" msgid "Router that will have connected instances' metadata proxied." msgstr "將對其已連接的實例 meta 資料執行 Proxy 動作的路由器。" #, python-format msgid "" "Row doesn't exist in the DB. Request info: Table=%(table)s. Columns=" "%(columns)s. Records=%(records)s." msgstr "" "列不存在於資料庫中。要求資訊:表格 = %(table)s。直欄 = %(columns)s。記錄 = " "%(records)s。" msgid "Run as daemon." msgstr "作為常駐程式來執行。" #, python-format msgid "Running %(cmd)s (%(desc)s) for %(project)s ..." msgstr "正在對 %(project)s 執行 %(cmd)s (%(desc)s)..." #, python-format msgid "Running %(cmd)s for %(project)s ..." msgstr "正在對 %(project)s 執行 %(cmd)s..." msgid "Running without keystone AuthN requires that tenant_id is specified" msgstr "在沒有 Keystone AuthN 的情況下執行時,需要指定 tenant_id" msgid "" "Seconds between nodes reporting state to server; should be less than " "agent_down_time, best if it is half or less than agent_down_time." msgstr "" "節點將狀態報告給伺服器的間隔秒數;應該小於 agent_down_time;如果是 " "agent_down_time 的一半或者小於 agent_down_time,則最佳。" msgid "Seconds between running periodic tasks" msgstr "執行定期作業的間隔秒數" msgid "" "Seconds to regard the agent is down; should be at least twice " "report_interval, to be sure the agent is down for good." msgstr "" "將代理程式視為已關閉的秒數;應該至少是 report_interval 的兩倍,以確保代理程式" "已完全關閉。" #, python-format msgid "Security Group %(id)s %(reason)s." msgstr "安全群組 %(id)s %(reason)s。" #, python-format msgid "Security Group Rule %(id)s %(reason)s." msgstr "安全群組規則 %(id)s %(reason)s。" #, python-format msgid "Security group %(id)s does not exist" msgstr "安全群組 %(id)s 不存在" #, python-format msgid "Security group rule %(id)s does not exist" msgstr "安全群組規則 %(id)s 不存在" #, python-format msgid "Security group rule already exists. Rule id is %(rule_id)s." msgstr "安全群組規則已經存在。規則 ID 為 %(rule_id)s。" #, python-format msgid "" "Security group rule for ethertype '%(ethertype)s' not supported. Allowed " "values are %(values)s." msgstr "" "不支援乙太網路類型 '%(ethertype)s' 的安全群組規則。容許的值為 %(values)s。" #, python-format msgid "" "Security group rule protocol %(protocol)s not supported. Only protocol " "values %(values)s and integer representations [0 to 255] are supported." msgstr "" "不支援安全群組規則通訊協定 %(protocol)s。僅支援通訊協定值 %(values)s 和整數表" "示法 [0 到 255]。" msgid "Segments and provider values cannot both be set." msgstr "無法同時設定區段及提供者值。" msgid "Selects the Agent Type reported" msgstr "選取報告的代理程式類型" msgid "" "Send notification to nova when port data (fixed_ips/floatingip) changes so " "nova can update its cache." msgstr "" "當埠資料 (fixed_ips/floatingip) 變更時,將通知傳送至 Nova,以便 Nova 可以更新" "其快取。" msgid "Send notification to nova when port status changes" msgstr "當埠狀態變更時,將通知傳送至 Nova" msgid "" "Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the " "feature is disabled" msgstr "" "給這個項目傳送了用於高可用性設定的許多無償 ARP,如果小於或等於 0,則會停用該" "功能" #, python-format msgid "Service Profile %(sp_id)s could not be found." msgstr "找不到服務設定檔 %(sp_id)s。" #, python-format msgid "Service Profile %(sp_id)s is already associated with flavor %(fl_id)s." msgstr "服務設定檔 %(sp_id)s 已經與特性 %(fl_id)s 建立關聯。" #, python-format msgid "Service Profile %(sp_id)s is not associated with flavor %(fl_id)s." msgstr "服務設定檔 %(sp_id)s 未與特性 %(fl_id)s 建立關聯。" #, python-format msgid "Service Profile %(sp_id)s is used by some service instance." msgstr "服務設定檔 %(sp_id)s 已由某個服務實例使用。" #, python-format msgid "Service Profile driver %(driver)s could not be found." msgstr "找不到服務設定檔驅動程式 %(driver)s。" msgid "Service Profile is not enabled." msgstr "未啟用服務設定檔。" msgid "Service Profile needs either a driver or metainfo." msgstr "服務設定檔需要驅動程式或 meta 資訊。" #, python-format msgid "" "Service provider '%(provider)s' could not be found for service type " "%(service_type)s" msgstr "找不到服務類型 %(service_type)s 的服務提供者 '%(provider)s'" msgid "Service to handle DHCPv6 Prefix delegation." msgstr "用於處理 DHCPv6 字首委派的服務。" #, python-format msgid "Service type %(service_type)s does not have a default service provider" msgstr "服務類型 %(service_type)s 不具有預設服務提供者" msgid "" "Set new timeout in seconds for new rpc calls after agent receives SIGTERM. " "If value is set to 0, rpc timeout won't be changed" msgstr "" "在代理程式接收 SIGTERM 之後為新 RPC 呼叫設定新逾時(以秒為單位)。如果值設定" "為 0,則 RPC 逾時將不會變更" msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "在帶有 GRE/VXLAN 通道的送出 IP 封包上,設定或取消設定「不劃分片段 (DF)」位" "元。" msgid "" "Set or un-set the tunnel header checksum on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "在帶有 GRE/VXLAN 通道的送出 IP 封包上,設定或取消設定通道標頭總和檢查。" msgid "Shared address scope can't be unshared" msgstr "無法將已共用的位址範圍取消共用" msgid "" "Specifying 'tenant_id' other than authenticated tenant in request requires " "admin privileges" msgstr "在要求中指定除已鑑別承租人外的其他 'tenant_id' 時,需要管理者專用權" msgid "String prefix used to match IPset names." msgstr "用來符合 IPset 名稱的字串字首。" #, python-format msgid "Sub-project %s not installed." msgstr "未安裝子專案 %s。" msgid "Subnet for router interface must have a gateway IP" msgstr "路由器介面的子網路必須具有閘道 IP" msgid "" "Subnet has a prefix length that is incompatible with DHCP service enabled." msgstr "子網路的字首長度與已啟用的 DHCP 服務不相容。" #, python-format msgid "Subnet pool %(subnetpool_id)s could not be found." msgstr "找不到子網路儲存區 %(subnetpool_id)s。" msgid "Subnet pool has existing allocations" msgstr "子網路儲存區具有現有的配置" msgid "Subnet used for the l3 HA admin network." msgstr "用於 L3 HA 管理網路的子網路。" msgid "" "Subnets hosted on the same network must be allocated from the same subnet " "pool." msgstr "在同一網路上管理的子網路必須從同一子網路儲存區中進行配置。" msgid "Suffix to append to all namespace names." msgstr "要附加至所有名稱空間名稱的字尾。" msgid "" "System-wide flag to determine the type of router that tenants can create. " "Only admin can override." msgstr "此系統層面旗標用來決定承租人可以建立的路由器類型。只有管理者才能置換。" msgid "TCP Port to listen for metadata server requests." msgstr "用於接聽 meta 資料伺服器要求的 TCP 埠。" msgid "TCP Port used by Neutron metadata namespace proxy." msgstr "Neutron meta 資料名稱空間 Proxy 所使用的 TCP 埠。" msgid "TCP Port used by Nova metadata server." msgstr "Nova meta 資料伺服器所使用的 TCP 埠。" #, python-format msgid "TLD '%s' must not be all numeric" msgstr "TLD '%s' 不得全為數值" msgid "TOS for vxlan interface protocol packets." msgstr "VXLAN 介面通訊協定封包的 TOS。" msgid "TTL for vxlan interface protocol packets." msgstr "VXLAN 介面通訊協定封包的 TTL。" #, python-format msgid "Table %s can only be queried by UUID" msgstr "只能依 UUID 來查詢表格 %s" #, python-format msgid "Tag %(tag)s could not be found." msgstr "找不到標記 %(tag)s。" #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "不容許承租人 %(tenant_id)s 在此網路上建立 %(resource)s" msgid "Tenant id for connecting to designate in admin context" msgstr "用於連接以在管理環境定義中指定的承租人 ID" msgid "Tenant name for connecting to designate in admin context" msgstr "用於連接以在管理環境定義中指定的承租人名稱" msgid "Tenant network creation is not enabled." msgstr "未啟用承租人網路建立作業。" msgid "Tenant-id was missing from quota request." msgstr "配額要求中遺漏了 Tenant-id。" msgid "" "The 'gateway_external_network_id' option must be configured for this agent " "as Neutron has more than one external network." msgstr "" "必須為此代理程式配置 'gateway_external_network_id' 選項,因為 Neutron 具有多" "個外部網路。" msgid "" "The DHCP agent will resync its state with Neutron to recover from any " "transient notification or RPC errors. The interval is number of seconds " "between attempts." msgstr "" "DHCP 代理程式會重新將自己的狀態與 Neutron 進行同步,以從任何暫時性通知或 RPC " "錯誤進行回復。間隔為兩次嘗試之間的秒數。" msgid "" "The DHCP server can assist with providing metadata support on isolated " "networks. Setting this value to True will cause the DHCP server to append " "specific host routes to the DHCP request. The metadata service will only be " "activated when the subnet does not contain any router port. The guest " "instance must be configured to request host routes via DHCP (Option 121). " "This option doesn't have any effect when force_metadata is set to True." msgstr "" "DHCP 伺服器可透過在隔離網路上提供 meta 資料支援進行協助。將此值設為 True 會導" "致 DHCP 伺服器將特定的主機路線附加至 DHCP 要求。僅當子網路不包含任何路由器埠" "時,才啟動 meta 資料服務。訪客實例必須配置成透過 DHCP 來要求主機路線(選項 " "121)。將 force_metadata 設為 True 時,這個選項沒有任何效果。" #, python-format msgid "" "The HA Network CIDR specified in the configuration file isn't valid; " "%(cidr)s." msgstr "配置檔中指定的「HA 網路 CIDR」無效:%(cidr)s。" msgid "The UDP port to use for VXLAN tunnels." msgstr "要用於 VXLAN 通道的 UDP 埠。" #, python-format msgid "" "The address allocation request could not be satisfied because: %(reason)s" msgstr "無法滿足位址配置要求,原因:%(reason)s" msgid "The advertisement interval in seconds" msgstr "廣告間隔(以秒為單位)" #, python-format msgid "The allocation pool %(pool)s is not valid." msgstr "配置儲存區 %(pool)s 無效。" #, python-format msgid "" "The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s." msgstr "配置儲存區 %(pool)s 跨越了子網路 CIDR %(subnet_cidr)s。" #, python-format msgid "" "The attribute '%(attr)s' is reference to other resource, can't used by sort " "'%(resource)s'" msgstr "屬性 '%(attr)s' 是對其他資源的參照,無法供排序 '%(resource)s' 使用" msgid "" "The base MAC address Neutron will use for VIFs. The first 3 octets will " "remain unchanged. If the 4th octet is not 00, it will also be used. The " "others will be randomly generated." msgstr "" "Neutron 將用於 VIF 的基本 MAC 位址。前 3 個八位元組將保持不變。如果第 4 個八" "位元組不是 00,則也將使用該八位元組。其他各項將隨機產生。" msgid "" "The base mac address used for unique DVR instances by Neutron. The first 3 " "octets will remain unchanged. If the 4th octet is not 00, it will also be " "used. The others will be randomly generated. The 'dvr_base_mac' *must* be " "different from 'base_mac' to avoid mixing them up with MAC's allocated for " "tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00. " "The default is 3 octet" msgstr "" "Neutron 用於唯一 DVR 實例的基本 MAC 位址。前 3 個八位元組將保持不變。如果第 " "4 個八位元組不是 00,則也將使用該八位元組。其他各項將隨機產" "生。'dvr_base_mac' *必須*與 'base_mac' 不同,以避免將它們與對承租人埠配置的 " "MAC 混合。4 個八位元組的範例如下:dvr_base_mac = fa:16:3f:4f:00:00。預設值為 " "3 個八位元組" msgid "" "The connection string for the native OVSDB backend. Requires the native " "ovsdb_interface to be enabled." msgstr "原生 OVSDB 後端的連線字串。需要啟用原生 ovsdb_interface。" msgid "The core plugin Neutron will use" msgstr "Neutron 將使用的核心外掛程式" #, python-format msgid "" "The dns_name passed is a FQDN. Its higher level labels must be equal to the " "dns_domain option in neutron.conf, that has been set to '%(dns_domain)s'. It " "must also include one or more valid DNS labels to the left of " "'%(dns_domain)s'" msgstr "" "所傳遞的 dns_name 是 FQDN。它的更高階標籤必須等於 neutron.conf 中的 " "dns_domain 選項,後者已經設定為 '%(dns_domain)s'。它還必須包括 " "'%(dns_domain)s' 左側的一個以上有效 DNS 標籤" #, python-format msgid "" "The dns_name passed is a PQDN and its size is '%(dns_name_len)s'. The " "dns_domain option in neutron.conf is set to %(dns_domain)s, with a length of " "'%(higher_labels_len)s'. When the two are concatenated to form a FQDN (with " "a '.' at the end), the resulting length exceeds the maximum size of " "'%(fqdn_max_len)s'" msgstr "" "所傳遞的 dns_name 是 PQDN,並且它的大小是 '%(dns_name_len)s'。neutron.conf 中" "的 dns_domain 選項已設定為 %(dns_domain)s,長度為'%(higher_labels_len)s'。當" "這兩者連結以形成 FQDN(末尾使用 '.')時,所產生的長度會超過大小上限 " "'%(fqdn_max_len)s'" msgid "The driver used to manage the DHCP server." msgstr "用於管理 DHCP 伺服器的驅動程式。" msgid "The driver used to manage the virtual interface." msgstr "用於管理虛擬介面的驅動程式。" msgid "" "The email address to be used when creating PTR zones. If not specified, the " "email address will be admin@" msgstr "" "建立 PTR 區域時,要使用的電子郵件位址。如果未指定,則電子郵件位址將是 " "admin@" #, python-format msgid "" "The following device_id %(device_id)s is not owned by your tenant or matches " "another tenants router." msgstr "" "下列 device_id %(device_id)s 不是由您的承租人所擁有,或者與另一個承租人路由器" "相符。" msgid "The host IP to bind to" msgstr "要連結至的主機 IP" msgid "The interface for interacting with the OVSDB" msgstr "用來與 OVSDB 互動的介面" msgid "" "The maximum number of items returned in a single response, value was " "'infinite' or negative integer means no limit" msgstr "在單一回應中傳回的項目數上限,值為 'infinite' 或負整數時表示無限制" #, python-format msgid "" "The network %(network_id)s has been already hosted by the DHCP Agent " "%(agent_id)s." msgstr "網路 %(network_id)s 已經由 DHCP 代理程式 %(agent_id)s 管理。" #, python-format msgid "" "The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s." msgstr "網路 %(network_id)s 不是由 DHCP 代理程式 %(agent_id)s 管理。" msgid "" "The network type to use when creating the HA network for an HA router. By " "default or if empty, the first 'tenant_network_types' is used. This is " "helpful when the VRRP traffic should use a specific network which is not the " "default one." msgstr "" "為 HA 路由器建立 HA 網路時要使用的網路類型。依預設或者在空白的情況下,將使用" "第一個 'tenant_network_types'。當 VRRP 資料流量應該使用的特定網路不是預設網路" "時,這很有用。" #, python-format msgid "The number of allowed address pair exceeds the maximum %(quota)s." msgstr "所容許的位址配對數目超過了上限 %(quota)s。" msgid "" "The number of seconds the agent will wait between polling for local device " "changes." msgstr "輪詢本端裝置變更之間代理程式將等待的秒數。" msgid "" "The number of seconds to wait before respawning the ovsdb monitor after " "losing communication with it." msgstr "與 OVSDB 監視器的通訊中斷後重新大量產生 OVSDB 監視器之前等待的秒數。" msgid "The number of sort_keys and sort_dirs must be same" msgstr "sort_key 數目及 sort_dir 數目必須相同" msgid "" "The path for API extensions. Note that this can be a colon-separated list of " "paths. For example: api_extensions_path = extensions:/path/to/more/exts:/" "even/more/exts. The __path__ of neutron.extensions is appended to this, so " "if your extensions are in there you don't need to specify them here." msgstr "" "API 延伸的路徑。請注意,這可以是分號區隔的路徑清單。例如:" "api_extensions_path = extensions:/path/to/more/exts:/even/more/exts。已將 " "neutron.extensions 的 __path__ 附加到此項,所以如果您的延伸在這裡,則不需要在" "這裡指定它們。" msgid "The physical network name with which the HA network can be created." msgstr "建立 HA 網路時可以使用的實體網路名稱。" #, python-format msgid "The port '%s' was deleted" msgstr "已刪除埠 '%s'" msgid "The port to bind to" msgstr "要連結至的埠" #, python-format msgid "The requested content type %s is invalid." msgstr "所要求的內容類型 %s 無效。" msgid "The resource could not be found." msgstr "找不到資源。" #, python-format msgid "" "The router %(router_id)s has been already hosted by the L3 Agent " "%(agent_id)s." msgstr "路由器 %(router_id)s 已經由 L3 代理程式 %(agent_id)s 管理。" msgid "" "The server has either erred or is incapable of performing the requested " "operation." msgstr "伺服器發生錯誤,或者無法執行所要求的作業。" msgid "The service plugins Neutron will use" msgstr "Neutron 將使用的服務外掛程式" #, python-format msgid "The subnet request could not be satisfied because: %(reason)s" msgstr "無法滿足子網路要求,原因:%(reason)s" #, python-format msgid "The subproject to execute the command against. Can be one of: '%s'." msgstr "要對其執行指令的子專案。可以是下列其中一個:'%s'。" msgid "The type of authentication to use" msgstr "要使用的鑑別類型" #, python-format msgid "The value '%(value)s' for %(element)s is not valid." msgstr "%(element)s 的值 '%(value)s' 無效。" msgid "" "The working mode for the agent. Allowed modes are: 'legacy' - this preserves " "the existing behavior where the L3 agent is deployed on a centralized " "networking node to provide L3 services like DNAT, and SNAT. Use this mode if " "you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality " "and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - " "this enables centralized SNAT support in conjunction with DVR. This mode " "must be used for an L3 agent running on a centralized node (or in single-" "host deployments, e.g. devstack)" msgstr "" "代理程式的工作模式。所容許的模式為:「舊式」- 這種模式會將現有行為保留在集中" "式網路節點上用於部署L3 代理程式的位置,以提供 L3 服務(例如 DNAT 和 SNAT)。" "如果您不想採用 DVR,請使用這種模式。'dvr' - 這種模式會啟用 DVR 功能,並且必須" "用於在計算主機上執行的 L3 代理程式。'dvr_snat' - 這種模式會啟用集中式 SNAT 支" "援以及 DVR。這種模式必須用於在集中式節點上執行(或者在單一主機部屬中執行,例" "如 devstack)的 L3 代理程式" msgid "" "There are routers attached to this network that depend on this policy for " "access." msgstr "有依賴於此存取原則的路由器已連接至此網路。" msgid "" "This will choose the web framework in which to run the Neutron API server. " "'pecan' is a new experiemental rewrite of the API server." msgstr "" "這將選擇要在其中執行 Neutron API 伺服器的 Web 架構。'pecan' 是 API 伺服器的新" "試驗性重新撰寫。" msgid "Timeout" msgstr "逾時" msgid "" "Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs " "commands will fail with ALARMCLOCK error." msgstr "" "ovs-vsctl 指令的逾時值(以秒為單位)。如果逾時值到期,則 ovs 指令將失敗,且發" "生 ALARMCLOCK 錯誤。" msgid "" "Timeout in seconds to wait for a single OpenFlow request. Used only for " "'native' driver." msgstr "" "等待單個 OpenFlow 要求的逾時值(以秒為單位)。僅用於 'native' 驅動程式。" msgid "" "Timeout in seconds to wait for the local switch connecting the controller. " "Used only for 'native' driver." msgstr "" "等待本端交換器連接控制器時的逾時值(以秒為單位)。僅用於 'native' 驅動程式。" msgid "" "Too long prefix provided. New name would exceed given length for an " "interface name." msgstr "所提供的字首太長。新名稱將超過介面名稱的給定長度。" msgid "Too many availability_zone_hints specified" msgstr "指定的 availability_zone_hints 太多" msgid "" "True to delete all ports on all the OpenvSwitch bridges. False to delete " "ports created by Neutron on integration and external network bridges." msgstr "" "如果為 True,則刪除所有 OpenvSwitch 橋接器上的所有埠。如果為 False,則刪除 " "Neutron 在整合及外部網路橋接器上建立的埠。" msgid "Tunnel IP value needed by the ML2 plugin" msgstr "ML2 外掛程式需要的通道 IP 值" msgid "Tunnel bridge to use." msgstr "要使用的通道橋接器。" msgid "" "Type of the nova endpoint to use. This endpoint will be looked up in the " "keystone catalog and should be one of public, internal or admin." msgstr "" "要使用之 Nova 端點的類型。此端點將在 Keystone 型錄中予以查閱,並且應該是共" "用、內部或管理的其中一個。" msgid "URL for connecting to designate" msgstr "用於連接以指定的 URL" msgid "URL to database" msgstr "資料庫的 URL" #, python-format msgid "Unable to access %s" msgstr "無法存取 %s" #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, maximum allowed " "prefix is %(max_prefixlen)s." msgstr "" "無法配置字首長度為 %(prefixlen)s 的子網路,容許的字首上限為 " "%(max_prefixlen)s。" #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, minimum allowed " "prefix is %(min_prefixlen)s." msgstr "" "無法配置字首長度為 %(prefixlen)s 的子網路,容許的字首下限為 " "%(min_prefixlen)s。" #, python-format msgid "Unable to calculate %(address_type)s address because of:%(reason)s" msgstr "無法計算 %(address_type)s 位址,原因:%(reason)s" #, python-format msgid "" "Unable to complete operation for %(router_id)s. The number of routes exceeds " "the maximum %(quota)s." msgstr "無法對 %(router_id)s 完成作業。路徑數目超出上限 %(quota)s。" #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of DNS " "nameservers exceeds the limit %(quota)s." msgstr "" "無法對 %(subnet_id)s 完成作業。DNS 名稱伺服器的數目超出限制 %(quota)s。" #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of host routes " "exceeds the limit %(quota)s." msgstr "無法對 %(subnet_id)s 完成作業。主機路徑數目超出限制 %(quota)s。" #, python-format msgid "" "Unable to complete operation on address scope %(address_scope_id)s. There " "are one or more subnet pools in use on the address scope" msgstr "" "無法對位址範圍 %(address_scope_id)s 完成作業。該位址範圍上有一個以上的子網路" "儲存區在使用中" #, python-format msgid "Unable to convert value in %s" msgstr "無法轉換 %s 中的值" msgid "Unable to create the Agent Gateway Port" msgstr "無法建立「代理程式閘道埠」" msgid "Unable to create the SNAT Interface Port" msgstr "無法建立「SNAT 介面埠」" #, python-format msgid "" "Unable to create the flat network. Physical network %(physical_network)s is " "in use." msgstr "無法建立平面網路。實體網路 %(physical_network)s 正在使用中。" msgid "" "Unable to create the network. No available network found in maximum allowed " "attempts." msgstr "無法建立網路。在所容許的嘗試次數上限內,找不到可用的網路。" #, python-format msgid "Unable to delete subnet pool: %(reason)s." msgstr "無法刪除子網路儲存區:%(reason)s。" #, python-format msgid "Unable to determine mac address for %s" msgstr "無法判定 %s 的 MAC 位址" #, python-format msgid "Unable to find '%s' in request body" msgstr "在要求內文中找不到 '%s'" #, python-format msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s" msgstr "在子網路 %(subnet_id)s 上找不到 IP 位址 %(ip_address)s" #, python-format msgid "Unable to find resource name in %s" msgstr "在 %s 中找不到資源名稱" msgid "Unable to generate IP address by EUI64 for IPv4 prefix" msgstr "無法依 EUI-64 針對 IPv4 字首產生 IP 位址" #, python-format msgid "Unable to generate unique DVR mac for host %(host)s." msgstr "無法為主機 %(host)s 產生唯一的 DVR MAC。" #, python-format msgid "Unable to generate unique mac on network %(net_id)s." msgstr "無法在網路 %(net_id)s 上產生唯一 MAC 位址。" #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "無法識別來自 %s 的目標欄位。相符項的格式應該為 %%()s" msgid "Unable to provide external connectivity" msgstr "無法提供外部連線功能" msgid "Unable to provide tenant private network" msgstr "無法提供承租人專用網路" #, python-format msgid "" "Unable to reconfigure sharing settings for network %(network)s. Multiple " "tenants are using it." msgstr "無法重新配置網路 %(network)s 的共用設定。多個承租人正在使用該網路。" #, python-format msgid "Unable to update address scope %(address_scope_id)s : %(reason)s" msgstr "無法更新位址範圍 %(address_scope_id)s:%(reason)s" #, python-format msgid "Unable to update the following object fields: %(fields)s" msgstr "無法更新下列物件欄位:%(fields)s" #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " "found" msgstr "無法驗證相符項 %(match)s,因為找不到母項資源 %(res)s" #, python-format msgid "Unexpected label for script %(script_name)s: %(labels)s" msgstr "Script %(script_name)s 的標籤不符合預期:%(labels)s" #, python-format msgid "Unexpected number of alembic branch points: %(branchpoints)s" msgstr "alembic 分支點數目不符合預期:%(branchpoints)s" #, python-format msgid "Unexpected response code: %s" msgstr "非預期的回應碼:%s" #, python-format msgid "Unexpected response: %s" msgstr "非預期的回應:%s" #, python-format msgid "Unit name '%(unit)s' is not valid." msgstr "單元名稱 '%(unit)s' 無效。" msgid "Unknown API version specified" msgstr "指定的 API 版本不明" #, python-format msgid "Unknown address type %(address_type)s" msgstr "不明的位址類型 %(address_type)s" #, python-format msgid "Unknown attribute '%s'." msgstr "不明屬性 '%s'。" #, python-format msgid "Unknown chain: %r" msgstr "不明鏈:%r" #, python-format msgid "Unknown network type %(network_type)s." msgstr "不明的網路類型 %(network_type)s。" #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "不明的配額資源 %(unknown)s。" msgid "Unmapped error" msgstr "「未對映」錯誤" msgid "Unrecognized action" msgstr "無法辨識的動作" #, python-format msgid "Unrecognized attribute(s) '%s'" msgstr "無法辨識屬性 '%s'" msgid "Unrecognized field" msgstr "無法辨識的欄位" msgid "Unspecified minimum subnet pool prefix." msgstr "未指定子網路儲存區字首下限。" msgid "Unsupported Content-Type" msgstr "不支援的內容類型" #, python-format msgid "Unsupported network type %(net_type)s." msgstr "不支援網路類型 %(net_type)s。" #, python-format msgid "Unsupported port state: %(port_state)s." msgstr "不支援的埠狀態:%(port_state)s。" msgid "Unsupported request type" msgstr "不受支援的要求類型" msgid "Updating default security group not allowed." msgstr "不容許更新預設安全群組。" msgid "" "Use ML2 l2population mechanism driver to learn remote MAC and IPs and " "improve tunnel scalability." msgstr "" "使用 ML2 l2population 機制驅動程式,來瞭解遠端 MAC 及 IP 位址,並提升通道可調" "整性。" msgid "Use broadcast in DHCP replies." msgstr "在 DHCP 回覆中使用廣播。" msgid "Use either --delta or relative revision, not both" msgstr "使用 --delta 或相對修訂,但不要同時使用兩者" msgid "" "Use ipset to speed-up the iptables based security groups. Enabling ipset " "support requires that ipset is installed on L2 agent node." msgstr "" "使用 ipset 來加速 iptables 型安全群組。啟用 ipset 支援需要在 L2 代理程式節點" "上安裝 ipset。" msgid "" "Use the root helper when listing the namespaces on a system. This may not be " "required depending on the security configuration. If the root helper is not " "required, set this to False for a performance improvement." msgstr "" "列出系統上的名稱空間時,請使用根說明程式。視安全性配置而定,可能不需要這樣" "做。如果不需要根說明程式,請將此參數設為 False 以改進效能。" msgid "" "Use veths instead of patch ports to interconnect the integration bridge to " "physical networks. Support kernel without Open vSwitch patch port support so " "long as it is set to True." msgstr "" "使用 veths(而不使用修補程式埠)將整合橋接器交互連接至實體網路。只要將其設為 " "True,即可支援不含 Open vSwitch 修補程式埠支援的核心。" msgid "User (uid or name) running metadata proxy after its initialization" msgstr "在 meta 資料 Proxy 起始設定之後執行該 Proxy 的使用者(UID 或名稱)" msgid "" "User (uid or name) running metadata proxy after its initialization (if " "empty: agent effective user)." msgstr "" "在 meta 資料 Proxy 起始設定之後執行該 Proxy 的使用者(UID 或名稱)(如果為" "空,則為代理程式有效使用者)。" msgid "User (uid or name) running this process after its initialization" msgstr "在此程序起始設定之後執行此程序的使用者(UID 或名稱)" msgid "Username for connecting to designate in admin context" msgstr "用於連接以在管理環境定義中指定的使用者名稱" msgid "" "Uses veth for an OVS interface or not. Support kernels with limited " "namespace support (e.g. RHEL 6.5) so long as ovs_use_veth is set to True." msgstr "" "是否將 veth 用於 OVS 介面。只要 ovs_use_veth 設為 True,就支援具有受限名稱空" "間支援(例如 RHEL 6.5)的核心。" msgid "VRRP authentication password" msgstr "VRRP 鑑別密碼" msgid "VRRP authentication type" msgstr "VRRP 鑑別類型" msgid "VXLAN network unsupported." msgstr "不支援 VXLAN 網路。" #, python-format msgid "" "Validation of dictionary's keys failed. Expected keys: %(expected_keys)s " "Provided keys: %(provided_keys)s" msgstr "" "驗證字典索引鍵失敗。預期索引鍵:%(expected_keys)s,提供的索引鍵:" "%(provided_keys)s" #, python-format msgid "Validator '%s' does not exist." msgstr "驗證器 '%s' 不存在。" #, python-format msgid "Value %(value)s in mapping: '%(mapping)s' not unique" msgstr "對映 '%(mapping)s' 中的值 %(value)s 不是唯一的" #, python-format msgid "" "Value of %(parameter)s has to be multiple of %(number)s, with maximum value " "of %(maximum)s and minimum value of %(minimum)s" msgstr "" "%(parameter)s 的值必須是 %(number)s 的倍數,上限值為 %(maximum)s 且下限值為 " "%(minimum)s" msgid "" "Value of host kernel tick rate (hz) for calculating minimum burst value in " "bandwidth limit rules for a port with QoS. See kernel configuration file for " "HZ value and tc-tbf manual for more information." msgstr "" "主機核心記號率 (hz) 的值,用來透過服務品質計算埠的頻寬限制規則中的激增值下" "限。如需相關資訊,請參閱 HZ 值的核心配置檔和 tc-tbf 手冊。" msgid "" "Value of latency (ms) for calculating size of queue for a port with QoS. See " "tc-tbf manual for more information." msgstr "" "透過服務品質來計算埠的佇列大小時的延遲值(毫秒)。如需相關資訊,請參閱 tc-" "tbf 手冊。" msgid "" "Watch file log. Log watch should be disabled when metadata_proxy_user/group " "has no read/write permissions on metadata proxy log file." msgstr "" "監看日誌檔。當 metadata_proxy_user/group 沒有對 meta 資料 Proxy 日誌檔的讀寫" "許可權時,應該停用日誌監看。" msgid "" "When external_network_bridge is set, each L3 agent can be associated with no " "more than one external network. This value should be set to the UUID of that " "external network. To allow L3 agent support multiple external networks, both " "the external_network_bridge and gateway_external_network_id must be left " "empty." msgstr "" "當設定了 external_network_bridge 時,每一個 L3 代理程式只能與不超過 1 個外部" "網路建立關聯。此值應該設為該外部網路的 UUID。如果要容許 L3 代理程式支援多個外" "部網路,則必須將 external_network_bridge 和 gateway_external_network_id 留" "空。" msgid "" "When proxying metadata requests, Neutron signs the Instance-ID header with a " "shared secret to prevent spoofing. You may select any string for a secret, " "but it must match here and in the configuration used by the Nova Metadata " "Server. NOTE: Nova uses the same config key, but in [neutron] section." msgstr "" "對 meta 資料要求執行 Proxy 動作時,Neutron 會使用共用密碼來簽署 Instance-ID " "標頭,以防止盜用。您可以選取任何字串用作密碼,但該字串在此處以及在 Nova meta " "資料伺服器使用的配置中必須相符。附註:Nova 使用相同的配置金鑰,但卻在 " "[neutron] 區段中。" msgid "" "Where to store Neutron state files. This directory must be writable by the " "agent." msgstr "Neutron 狀態檔的儲存位置。此目錄必須可以由代理程式寫入。" msgid "" "With IPv6, the network used for the external gateway does not need to have " "an associated subnet, since the automatically assigned link-local address " "(LLA) can be used. However, an IPv6 gateway address is needed for use as the " "next-hop for the default route. If no IPv6 gateway address is configured " "here, (and only then) the neutron router will be configured to get its " "default route from router advertisements (RAs) from the upstream router; in " "which case the upstream router must also be configured to send these RAs. " "The ipv6_gateway, when configured, should be the LLA of the interface on the " "upstream router. If a next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated to the network and not " "through this parameter. " msgstr "" "如果使用 IPv6,則用於外部閘道的網路不需要具有相關聯的子網路,因為可以使用自動" "指派的鏈結本端位址 (LLA)。但是,IPv6 閘道位址需要用作預設路由的下一個中繼站。" "如果未在這裡配置 IPv6 閘道位址,(且僅當那時)則將會配置 Neutron 路由器以從上" "游路由器的路由器通告 (RA) 中取得其預設路由;在該情況下,也必須配置上游路由器" "以傳送這些 RA。ipv6_gateway(如果已配置)應該是上游路由器介面的 LLA。如果需要" "使用廣域唯一位址 (GUA) 的下一個中繼站,則需要透過配置給網路的子網路來執行此配" "置,而不是透過此參數。" msgid "You must implement __call__" msgstr "必須實作 __call__" msgid "" "You must provide a config file for bridge - either --config-file or " "env[NEUTRON_TEST_CONFIG_FILE]" msgstr "必須為橋接器提供配置檔:--config-file 或 env[NEUTRON_TEST_CONFIG_FILE]" msgid "You must provide a revision or relative delta" msgstr "必須提供修訂或相對差異" msgid "a subnetpool must be specified in the absence of a cidr" msgstr "如果未指定 cidr,則必須指定子網路儲存區" msgid "add_ha_port cannot be called inside of a transaction." msgstr "無法在交易內呼叫 add_ha_port。" msgid "allocation_pools allowed only for specific subnet requests." msgstr "僅容許用於特定子網路要求的 allocation_pools。" msgid "allocation_pools are not in the subnet" msgstr "allocation_pools 不在子網路中" msgid "allocation_pools use the wrong ip version" msgstr "allocation_pools 使用了錯誤的 IP 版本" msgid "already a synthetic attribute" msgstr "已經是合成屬性" msgid "binding:profile value too large" msgstr "binding:profile 值太大" #, python-format msgid "cannot perform %(event)s due to %(reason)s" msgstr "無法執行 %(event)s,原因:%(reason)s" msgid "cidr and prefixlen must not be supplied together" msgstr "不得同時提供 cidr 和 prefixlen" #, python-format msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid." msgstr "dhcp_agents_per_network 必須大於或等於 1。'%s' 無效。" msgid "dns_domain cannot be specified without a dns_name" msgstr "如果未指定 dns_name,則無法指定 dns_domain" msgid "dns_name cannot be specified without a dns_domain" msgstr "如果未指定 dns_domain,則無法指定 dns_name" msgid "fixed_ip_address cannot be specified without a port_id" msgstr "如果未指定 port_id,則無法指定 fixed_ip_address" #, python-format msgid "gateway_ip %s is not in the subnet" msgstr "gateway_ip %s 不在子網路中" #, python-format msgid "has device owner %s" msgstr "具有裝置擁有者 %s" msgid "in use" msgstr "使用中" #, python-format msgid "ip command failed on device %(dev_name)s: %(reason)s" msgstr "對裝置 %(dev_name)s 執行的 IP 指令失敗:%(reason)s" #, python-format msgid "ip command failed: %(reason)s" msgstr "IP 指令失敗:%(reason)s" #, python-format msgid "ip link capability %(capability)s is not supported" msgstr "不支援 ip link 功能 %(capability)s" #, python-format msgid "ip link command is not supported: %(reason)s" msgstr "不支援 ip link 指令:%(reason)s" msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "如果未指定 cidr 和 subnetpool_id,則必須指定 ip_version" msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "當 ip_version 是 4 時,ipv6_address_mode 無效" msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "當 ip_version 是 4 時,ipv6_ra_mode 無效" msgid "" "ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set to " "False." msgstr "" "如果 enable_dhcp 設為 False,則無法設定 ipv6_ra_mode 和 ipv6_address_mode。" #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " "'%(addr_mode)s' is not valid. If both attributes are set, they must be the " "same value" msgstr "" "如果在 ipv6_address_mode 設為 '%(addr_mode)s' 時將 ipv6_ra_mode 設" "為'%(ra_mode)s',則無效。如果同時設定這兩個屬性,則它們的值必須相同" msgid "mac address update" msgstr "MAC 位址更新" #, python-format msgid "" "max_l3_agents_per_router %(max_agents)s config parameter is not valid. It " "has to be greater than or equal to min_l3_agents_per_router %(min_agents)s." msgstr "" "max_l3_agents_per_router %(max_agents)s 配置參數無效。它必須大於或等於 " "min_l3_agents_per_router %(min_agents)s。" msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "必須提供 2 個確切引數 - cidr 和 MAC" msgid "network_type required" msgstr "需要 network_type" #, python-format msgid "network_type value '%s' not supported" msgstr "不支援 network_type 值 '%s'" msgid "new subnet" msgstr "新子網路" #, python-format msgid "physical_network '%s' unknown for VLAN provider network" msgstr "VLAN 提供者網路的 physical_network '%s' 不明" #, python-format msgid "physical_network '%s' unknown for flat provider network" msgstr "平面提供者網路的 physical_network '%s' 不明" msgid "physical_network required for flat provider network" msgstr "平面提供者網路所需的 physical_network" #, python-format msgid "provider:physical_network specified for %s network" msgstr "為 %s 網路指定了 provider:physical_network" #, python-format msgid "rbac_db_model not found in %s" msgstr "在 %s 中找不到 rbac_db_model" msgid "record" msgstr "記錄" msgid "respawn_interval must be >= 0 if provided." msgstr "如果提供的話,則 respawn_interval 必須大於或等於 0。" #, python-format msgid "segmentation_id out of range (%(min)s through %(max)s)" msgstr "segmentation_id 超出範圍(%(min)s 到 %(max)s)" msgid "segmentation_id requires physical_network for VLAN provider network" msgstr "segmentation_id 需要 VLAN 提供者網路的 physical_network" msgid "shared attribute switching to synthetic" msgstr "共用屬性正在切換至合成屬性" #, python-format msgid "" "subnetpool %(subnetpool_id)s cannot be updated when associated with shared " "address scope %(address_scope_id)s" msgstr "" "當子網路儲存區 %(subnetpool_id)s 與共用位址範圍 %(address_scope_id)s 相關聯" "時,無法更新此子網路儲存區" msgid "subnetpool_id and use_default_subnetpool cannot both be specified" msgstr "不能同時指定 subnetpool_id 和 use_default_subnetpool" msgid "the nexthop is not connected with router" msgstr "下一個中繼站未與路由器連接" msgid "the nexthop is used by router" msgstr "路由器已使用下一個中繼站" #, python-format msgid "unable to load %s" msgstr "無法載入 %s" msgid "" "uuid provided from the command line so external_process can track us via /" "proc/cmdline interface." msgstr "" "已從指令行提供了 UUID,因此, external_process 可以透過 /proc/cmdline 介面對" "我們進行追蹤。" neutron-8.4.0/neutron/locale/fr/0000775000567000056710000000000013044373210017713 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/fr/LC_MESSAGES/0000775000567000056710000000000013044373210021500 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/fr/LC_MESSAGES/neutron.po0000664000567000056710000052135213044372760023553 0ustar jenkinsjenkins00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # François Bureau, 2013 # Maxime COQUEREL , 2014-2015 # Patrice LACHANCE , 2013 # Patrice LACHANCE , 2013 # Sylvie Chesneau , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron 8.2.1.dev52\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-09-01 18:10+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-18 06:01+0000\n" "Last-Translator: Angelique Pillal \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: French\n" #, python-format msgid "" "\n" "Command: %(cmd)s\n" "Exit code: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" msgstr "" "\n" "Commande : %(cmd)s\n" "Code de sortie : %(code)s\n" "Stdin : %(stdin)s\n" "Stdout : %(stdout)s\n" "Stderr : %(stderr)s" #, python-format msgid "" "%(branch)s HEAD file does not match migration timeline head, expected: " "%(head)s" msgstr "" "Le fichier HEAD %(branch)s ne correspond pas à l'élément Head de calendrier " "de migration, attendu : %(head)s" #, python-format msgid "%(driver)s: Internal driver error." msgstr "%(driver)s : erreur de pilote interne." #, python-format msgid "%(id)s is not a valid %(type)s identifier" msgstr "%(id)s n'est pas un identificateur %(type)s valide" #, python-format msgid "" "%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' " "and '%(desc)s'" msgstr "" "%(invalid_dirs)s est une valeur non valide pour sort_dirs ; les valeurs " "valides sont '%(asc)s' et '%(desc)s'" #, python-format msgid "%(key)s prohibited for %(tunnel)s provider network" msgstr "%(key)s interdit pour le réseau de fournisseur %(tunnel)s" #, python-format msgid "" "%(method)s called with network settings %(current)s (original settings " "%(original)s) and network segments %(segments)s" msgstr "" "Méthode %(method)s appelée avec les paramètres réseau %(current)s " "(paramètres d'origine %(original)s) et segments de réseau %(segments)s)" #, python-format msgid "" "%(method)s called with port settings %(current)s (original settings " "%(original)s) host %(host)s (original host %(original_host)s) vif type " "%(vif_type)s (original vif type %(original_vif_type)s) vif details " "%(vif_details)s (original vif details %(original_vif_details)s) binding " "levels %(levels)s (original binding levels %(original_levels)s) on network " "%(network)s with segments to bind %(segments_to_bind)s" msgstr "" "Méthode %(method)s appelée avec les paramètres de port %(current)s " "(paramètres d'origine %(original)s) hôte %(host)s (hôte d'origine " "%(original_host)s) type vif %(vif_type)s (type vif d'origine " "%(original_vif_type)s) détails vif %(vif_details)s (détails vif d'origine " "%(original_vif_details)s) niveaux de liaison %(levels)s (niveaux de liaison " "d'origine %(original_levels)s) sur le réseau %(network)s avec des segments " "de liaison %(segments_to_bind)s" #, python-format msgid "" "%(method)s called with subnet settings %(current)s (original settings " "%(original)s)" msgstr "" "Méthode %(method)s appelée avec les paramètres de sous-réseau %(current)s " "(paramètres d'origine %(original)s)" #, python-format msgid "%(method)s failed." msgstr "%(method)s a échoué." #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "%(name)s '%(addr)s' ne correspond pas à ip_version '%(ip_version)s'" #, python-format msgid "%(param)s must be in %(range)s range." msgstr "%(param)s doit figurer dans la plage %(range)s." #, python-format msgid "%s cannot be called while in offline mode" msgstr "%s ne peut pas être appelé en mode hors ligne" #, python-format msgid "%s is invalid attribute for sort_key" msgstr "%s est un attribut non valide pour sort_key" #, python-format msgid "%s is invalid attribute for sort_keys" msgstr "%s est un attribut non valide pour sort_keys" #, python-format msgid "%s is not a valid VLAN tag" msgstr "%s n'est pas une balise VLAN (réseau local virtuel) valide" #, python-format msgid "%s must be specified" msgstr "%s doit être spécifié" #, python-format msgid "%s must implement get_port_from_device or get_ports_from_devices." msgstr "%s doit implémenter get_port_from_device ou get_ports_from_devices." #, python-format msgid "%s prohibited for VLAN provider network" msgstr "%s interdit pour le réseau de fournisseurs de réseau local virtuel" #, python-format msgid "%s prohibited for flat provider network" msgstr "%s interdit pour le réseau de fournisseurs centralisé" #, python-format msgid "%s prohibited for local provider network" msgstr "%s interdit pour le réseau de fournisseurs local" #, python-format msgid "" "'%(data)s' contains '%(length)s' characters. Adding a domain name will cause " "it to exceed the maximum length of a FQDN of '%(max_len)s'" msgstr "" "'%(data)s' contient '%(length)s' caractères. L'ajout d'un nom de domaine va " "entraîner un dépassement de la longueur maximum de '%(max_len)s' pour un nom " "de domaine complet" #, python-format msgid "" "'%(data)s' contains '%(length)s' characters. Adding a sub-domain will cause " "it to exceed the maximum length of a FQDN of '%(max_len)s'" msgstr "" "'%(data)s' contient '%(length)s' caractères. L'ajout d'un sous-domaine va " "entraîner le dépassement de la longueur maximum de '%(max_len)s' pour un nom " "de domaine complet" #, python-format msgid "'%(data)s' exceeds maximum length of %(max_len)s" msgstr "'%(data)s' dépasse la longueur maximale de %(max_len)s." #, python-format msgid "'%(data)s' is not an accepted IP address, '%(ip)s' is recommended" msgstr "'%(data)s' n'est pas une adresse IP acceptée, '%(ip)s' recommandée" #, python-format msgid "'%(data)s' is not in %(valid_values)s" msgstr "'%(data)s' n'est pas dans %(valid_values)s" #, python-format msgid "'%(data)s' is too large - must be no larger than '%(limit)d'" msgstr "'%(data)s' est trop grand - ne doit pas être supérieur à '%(limit)d'" #, python-format msgid "'%(data)s' is too small - must be at least '%(limit)d'" msgstr "'%(data)s' est trop petit - doit être au moins de '%(limit)d'" #, python-format msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended" msgstr "" "'%(data)s' n'est pas un CIDR de sous-réseau IP reconnu, '%(cidr)s' recommandé" #, python-format msgid "'%(data)s' not a valid PQDN or FQDN. Reason: %(reason)s" msgstr "" "'%(data)s' n'est pas un nom PQDN ou un nom de domaine complet valide. " "Motif : %(reason)s" #, python-format msgid "'%(host)s' is not a valid nameserver. %(msg)s" msgstr "'%(host)s' n'est pas un serveur de noms valide. %(msg)s" #, python-format msgid "'%s' Blank strings are not permitted" msgstr "'%s' Les chaînes à blanc ne sont pas autorisées" #, python-format msgid "'%s' cannot be converted to boolean" msgstr "'%s' ne peut pas être converti en valeur booléenne." #, python-format msgid "'%s' cannot be converted to lowercase string" msgstr "'%s' ne peut pas être converti en une chaîne en minuscules" #, python-format msgid "'%s' contains whitespace" msgstr "'%s' contient des blancs" #, python-format msgid "'%s' exceeds the 255 character FQDN limit" msgstr "'%s' dépasse la limite de nom de domaine complet (255 caractères)" #, python-format msgid "'%s' is a FQDN. It should be a relative domain name" msgstr "" "'%s' est un nom de domaine complet. Devrait être un nom de domaine relatif" #, python-format msgid "'%s' is not a FQDN" msgstr "'%s' n'est pas un nom de domaine complet" #, python-format msgid "'%s' is not a dictionary" msgstr "'%s' n'est pas un dictionnaire." #, python-format msgid "'%s' is not a list" msgstr "'%s' n'est pas une liste." #, python-format msgid "'%s' is not a valid IP address" msgstr "'%s' n'est pas une adresse IP valide." #, python-format msgid "'%s' is not a valid IP subnet" msgstr "'%s' n'est pas un sous-réseau IP valide." #, python-format msgid "'%s' is not a valid MAC address" msgstr "'%s' n'est pas une adresse MAC valide." #, python-format msgid "'%s' is not a valid RBAC object type" msgstr "'%s' n'est pas un type d'objet RBAC valide" #, python-format msgid "'%s' is not a valid UUID" msgstr "'%s' n'est pas un identificateur unique universel (UUID) valide." #, python-format msgid "'%s' is not a valid boolean value" msgstr "'%s' n'est pas une valeur booléenne valide " #, python-format msgid "'%s' is not a valid input" msgstr "'%s' n'est pas une entrée valide." #, python-format msgid "'%s' is not a valid string" msgstr "'%s' n'est pas une chaîne valide." #, python-format msgid "'%s' is not an integer" msgstr "'%s' n'est pas un entier" #, python-format msgid "'%s' is not an integer or uuid" msgstr "'%s' n'est pas un entier ou un identificateur unique universel." #, python-format msgid "'%s' is not of the form =[value]" msgstr "'%s' n'a pas la forme = [valeur]" #, python-format msgid "'%s' is not supported for filtering" msgstr "'%s' n'est pas pris en charge pour le filtrage " #, python-format msgid "'%s' must be a non negative decimal." msgstr "'%s' doit être un nombre décimal non négatif." #, python-format msgid "'%s' should be non-negative" msgstr "'%s' ne doit pas être négatif." msgid "'.' searches are not implemented" msgstr "Les recherches de '.' ne sont pas implémentées. " #, python-format msgid "'module' object has no attribute '%s'" msgstr "L'objet 'module' n'a pas d'attribut '%s'" msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' est plus petit que 'port_min'" msgid "" "(Deprecated. Use '--subproject neutron-SERVICE' instead.) The advanced " "service to execute the command against." msgstr "" "(Obsolète. Utilisez '--subproject neutron-SERVICE' à la place.) Service " "avancé en fonction duquel exécuter la commande. " msgid "0 is not allowed as CIDR prefix length" msgstr "La longueur 0 n'est pas autorisée pour le préfixe CIDR" msgid "" "32-bit BGP identifier, typically an IPv4 address owned by the system running " "the BGP DrAgent." msgstr "" "Identificateur BGP sur 32 bits, généralement une adresse IPv4 appartenant au " "système qui exécute BGP DrAgent." msgid "A QoS driver must be specified" msgstr "Un pilote QoS doit être spécifié" msgid "A cidr must be specified in the absence of a subnet pool" msgstr "" "Une valeur cidr doit être indiquée si aucun pool de sous-réseaux n'est défini" msgid "" "A decimal value as Vendor's Registered Private Enterprise Number as required " "by RFC3315 DUID-EN." msgstr "" "Valeur décimale telle que Numéro d'entreprise privé enregistré du " "fournisseur comme requis par la RFC3315 DUID-EN." #, python-format msgid "A default external network already exists: %(net_id)s." msgstr "Un réseau externe par défaut existe déjà : %(net_id)s." msgid "" "A default subnetpool for this IP family has already been set. Only one " "default may exist per IP family" msgstr "" "Un pool de sous-réseaux par défaut pour cette famille IP est déjà défini. Il " "ne peut y avoir qu'un seul pool par défaut par famille IP." msgid "A metering driver must be specified" msgstr "Un pilote de mesure doit être spécifié." msgid "A password must be supplied when using auth_type md5." msgstr "" "Un mot de passe doit être fourni lors de l'utilisation de auth_type md5." msgid "API for retrieving service providers for Neutron advanced services" msgstr "" "API d'extraction des fournisseurs de service pour les services avancés de " "Neutron" msgid "Aborting periodic_sync_routers_task due to an error." msgstr "Abandon de periodic_sync_routers_task en raison d'une erreur." msgid "Access to this resource was denied." msgstr "L'accès a cette ressource a été refusé." msgid "Action to be executed when a child process dies" msgstr "Action à exécuter quand un processus enfant meurt" msgid "" "Add comments to iptables rules. Set to false to disallow the addition of " "comments to generated iptables rules that describe each rule's purpose. " "System must support the iptables comments module for addition of comments." msgstr "" "Ajouter des commentaires aux règles iptables. Définir sur false pour " "interdire l'ajout de commentaires aux règles iptables générées qui décrivent " "l'objectif de chaque règle. Le système doit prendre en charge le module de " "commentaires iptables pour l'ajout de commentaires." msgid "Address not present on interface" msgstr "L'adresse n'est pas présente sur l'interface" #, python-format msgid "Address scope %(address_scope_id)s could not be found" msgstr "La portée d'adresse %(address_scope_id)s est introuvable" msgid "" "Address to listen on for OpenFlow connections. Used only for 'native' driver." msgstr "" "Adresse à utiliser pour l'écoute des connexions OpenFlow. Utilisée " "uniquement pour le pilote 'natif'." msgid "Adds external network attribute to network resource." msgstr "Ajoute l'attribut de réseau externe à la ressource du réseau." msgid "Adds test attributes to core resources." msgstr "Ajoute des attributs de test aux ressources principales." #, python-format msgid "Agent %(id)s could not be found" msgstr "Agent %(id)s introuvable" #, python-format msgid "Agent %(id)s is not a L3 Agent or has been disabled" msgstr "L'agent %(id)s n'est pas un agent L3 ou a été désactivé" #, python-format msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled" msgstr "L'agent %(id)s n'est pas un agent DHCP valide ou a été désactivé" msgid "Agent has just been revived" msgstr "L'agent vient d'être réactivé" msgid "" "Agent starts with admin_state_up=False when enable_new_agents=False. In the " "case, user's resources will not be scheduled automatically to the agent " "until admin changes admin_state_up to True." msgstr "" "L'agent démarre avec admin_state_up=False si enable_new_agents=False. Dans " "ce cas, les ressources de l'utilisateur ne sont pas planifiées " "automatiquement pour l'agent sauf si l'administrateur affecte la valeur True " "à admin_state_up. " #, python-format msgid "Agent updated: %(payload)s" msgstr "Mise à jour de l'agent : %(payload)s" #, python-format msgid "" "Agent with agent_type=%(agent_type)s and host=%(host)s could not be found" msgstr "Agent introuvable avec agent_type=%(agent_type)s et host=%(host)s" msgid "Allow auto scheduling networks to DHCP agent." msgstr "Autoriser la planification automatique des réseaux pour l'agent DHCP." msgid "Allow auto scheduling of routers to L3 agent." msgstr "Autoriser la planification automatique des routeurs vers l'agent L3." msgid "" "Allow overlapping IP support in Neutron. Attention: the following parameter " "MUST be set to False if Neutron is being used in conjunction with Nova " "security groups." msgstr "" "Autoriser la prise en charge du chevauchement d'IP dans Neutron. " "Attention : le paramètre ci-après DOIT être défini sur False si Neutron est " "utilisé conjointement avec des groupes de sécurité Nova." msgid "Allow running metadata proxy." msgstr "Autorisez le proxy de métadonnées en cours d'exécution." msgid "Allow sending resource operation notification to DHCP agent" msgstr "" "Autoriser l'envoi à l'agent DHCP de notifications d'opérations sur les " "ressources" msgid "Allow the creation of PTR records" msgstr "Autoriser la création d'enregistrements PTR" msgid "Allow the usage of the bulk API" msgstr "Autoriser l'utilisation de l'API de traitement en bloc" msgid "Allow the usage of the pagination" msgstr "Autoriser l'utilisation de la pagination" msgid "Allow the usage of the sorting" msgstr "Autoriser l'utilisation du tri" msgid "Allow to perform insecure SSL (https) requests to nova metadata" msgstr "" "Permet d'effectuer des demandes SSL (https) non sécurisées sur les " "métadonnées nova" msgid "Allowed address pairs must be a list." msgstr "Les paires d'adresses autorisées doivent figurer dans une liste. " msgid "AllowedAddressPair must contain ip_address" msgstr "AllowedAddressPair doit contenir ip_address" msgid "" "Allows for serving metadata requests coming from a dedicated metadata access " "network whose CIDR is 169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send metadata:1 request. In " "this case DHCP Option 121 will not be injected in VMs, as they will be able " "to reach 169.254.169.254 through a router. This option requires " "enable_isolated_metadata = True." msgstr "" "Permet le traitement des demandes de métadonnées en provenance d'un réseau " "d'accès de métadonnées dédié dont le CIDR est 169.254.169.254/16 (ou un " "préfixe plus long), et qui est connecté à un routeur Neutron depuis lequel " "les machines virtuelles envoient une demande metadata:1. Dans ce cas, DHCP " "Option 121 n'est pas injecté dans les machines virtuelles, car celles-ci " "pourront accéder à l'adresse 169.254.169.254 via un routeur. Cette option " "requiert enable_isolated_metadata = True." #, python-format msgid "" "Already hosting BGP Speaker for local_as=%(current_as)d with router_id=" "%(rtid)s." msgstr "" "Routeur BGP déjà hébergé pour local_as=%(current_as)d avec router_id=" "%(rtid)s." #, python-format msgid "" "Already hosting maximum number of BGP Speakers. Allowed scheduled count=" "%(count)d" msgstr "" "Un nombre maximum de routeurs BGP est déjà hébergé. Nombre planifié autorisé=" "%(count)d" msgid "An RBAC policy already exists with those values." msgstr "Une stratégie RBAC existe déjà avec ces valeurs." msgid "An identifier must be specified when updating a subnet" msgstr "" "Un identificateur doit être spécifié lors de la mise à jour d'un sous-réseau" msgid "An interface driver must be specified" msgstr "Un pilote d'interface doit être spécifié." msgid "" "An ordered list of extension driver entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. For example: extension_drivers = " "port_security,qos" msgstr "" "Liste ordonnée des points d'entrée de pilote d'extension à charger à partir " "de l'espace de nom neutron.ml2.extension_drivers namespace. Par exemple : " "extension_drivers = port_security,qos" msgid "" "An ordered list of networking mechanism driver entrypoints to be loaded from " "the neutron.ml2.mechanism_drivers namespace." msgstr "" "Liste ordonnée des points d'entrée de pilote de mécanisme à charger à partir " "de l'espace de nom neutron.ml2.mechanism_drivers." msgid "An unexpected internal error occurred." msgstr "Une erreur inattendue s'est produite." msgid "An unknown error has occurred. Please try your request again." msgstr "Une erreur inconnue s'est produite. Renouvelez votre demande." msgid "Async process didn't respawn" msgstr "Le processus Async n'a pas été relancé" #, python-format msgid "Attribute '%s' not allowed in POST" msgstr "" "Attribut '%s non autorisé dans l'autotest à la mise sous tension (POST)" #, python-format msgid "Authentication type not supported. Requested type=%(auth_type)s." msgstr "" "Type d'authentification non pris en charge. Type demandé=%(auth_type)s." msgid "Authorization URL for connecting to designate in admin context" msgstr "" "URL d'autorisation pour la connexion au réseau désigné dans un contexte admin" msgid "Automatically remove networks from offline DHCP agents." msgstr "Retirer automatiquement les réseaux des agents DHCP hors ligne." msgid "" "Automatically reschedule routers from offline L3 agents to online L3 agents." msgstr "" "Replanifier automatiquement les routeurs pour qu'ils passent d'agents L3 " "hors connexion à des agents L3 connectés." msgid "Availability zone of this node" msgstr "Zone de disponibilité du noeud" #, python-format msgid "AvailabilityZone %(availability_zone)s could not be found." msgstr "AvailabilityZone %(availability_zone)s est introuvable." msgid "Available commands" msgstr "Commandes disponibles" #, python-format msgid "" "BGP Peer %(peer_ip)s for remote_as=%(remote_as)s, running for BGP Speaker " "%(speaker_as)d not added yet." msgstr "" "Homologue BGP %(peer_ip)s pour remote_as=%(remote_as)s, en cours d'exécution " "pour routeur BGP %(speaker_as)d pas encore ajouté." #, python-format msgid "" "BGP Speaker %(bgp_speaker_id)s is already configured to peer with a BGP Peer " "at %(peer_ip)s, it cannot peer with BGP Peer %(bgp_peer_id)s." msgstr "" "Le routeur BGP %(bgp_speaker_id)s est déjà configuré pour communiquer avec " "un homologue BGP à l'adresse %(peer_ip)s, il ne peut pas communiquer avec " "l'homologue BGP %(bgp_peer_id)s." #, python-format msgid "" "BGP Speaker for local_as=%(local_as)s with router_id=%(rtid)s not added yet." msgstr "" "Routeur BGP pour local_as=%(local_as)s avec router_id=%(rtid)s pas encore " "ajouté." #, python-format msgid "" "BGP peer %(bgp_peer_id)s is not associated with BGP speaker " "%(bgp_speaker_id)s." msgstr "" "L'homologue BGP %(bgp_peer_id)s n'est pas associé à un routeur BGP " "%(bgp_speaker_id)s." #, python-format msgid "BGP peer %(bgp_peer_id)s not authenticated." msgstr "Homologue BGP %(bgp_peer_id)s non authentifié." #, python-format msgid "BGP peer %(id)s could not be found." msgstr "L'homologue BGP %(id)s est introuvable." #, python-format msgid "" "BGP speaker %(bgp_speaker_id)s is not hosted by the BgpDrAgent %(agent_id)s." msgstr "" "Le routeur BGP %(bgp_speaker_id)s n'est pas hébergé par BgpDrAgent " "%(agent_id)s." #, python-format msgid "BGP speaker %(id)s could not be found." msgstr "Le routeur BGP %(id)s est introuvable." msgid "BGP speaker driver class to be instantiated." msgstr "Classe de pilote de routeur BGP à instancier." msgid "Backend does not support VLAN Transparency." msgstr "Le back-end ne prend pas en charge la transparence de VLAN." #, python-format msgid "" "Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, " "%(mac)s:" msgstr "" "Type de préfixe ou format d'adresse MAC incorrect pour générer une adresse " "IPv6 par EUI-64 : %(prefix)s, %(mac)s :" #, python-format msgid "Bad prefix type for generate IPv6 address by EUI-64: %s" msgstr "" "Type de préfixe erroné pour la génération de l'adresse IPv6 par EUI-64 : %s" #, python-format msgid "Base MAC: %s" msgstr "MAC de base : %s" msgid "" "Base log dir for dnsmasq logging. The log contains DHCP and DNS log " "information and is useful for debugging issues with either DHCP or DNS. If " "this section is null, disable dnsmasq log." msgstr "" "Répertoire de journaux de base pour la consignation dnsmasq. Le journal " "contient des informations de journal DHCP et DNS et s'avère utile pour " "déboguer les problèmes liés à DHCP ou DNS. Si cette section est NULL, " "désactivez la consignation dnsmasq. " #, python-format msgid "BgpDrAgent %(agent_id)s is already associated to a BGP speaker." msgstr "BgpDrAgent %(agent_id)s est déjà associé à un routeur BGP." #, python-format msgid "BgpDrAgent %(id)s is invalid or has been disabled." msgstr "BgpDrAgent %(id)s n'est pas valide ou il a été désactivé." #, python-format msgid "BgpDrAgent updated: %s" msgstr "BgpDrAgent mis à jour : %s" msgid "Body contains invalid data" msgstr "Le corps contient des données non valides" msgid "Both network_id and router_id are None. One must be provided." msgstr "" "Les paramètres network_id et router_id sont tous deux définis sur None. L'un " "des deux doit obligatoirement être fourni." #, python-format msgid "Bridge %(bridge)s does not exist." msgstr "Le pont %(bridge)s n'existe pas." #, python-format msgid "Bridge %s does not exist" msgstr "Le pont %s n'existe pas" msgid "Bulk operation not supported" msgstr "Opération groupée non prise en charge" msgid "CIDR to monitor" msgstr "CIDR à surveiller" #, python-format msgid "Callback for %(resource_type)s not found" msgstr "Rappel pour %(resource_type)s introuvable" #, python-format msgid "Callback for %(resource_type)s returned wrong resource type" msgstr "" "Le rappel pour %(resource_type)s a renvoyé un type de ressource incorrect" #, python-format msgid "Cannot add floating IP to port %s that has no fixed IPv4 addresses" msgstr "" "Impossible d'ajouter une adresse IP flottante au port %s qui n'a pas " "d'adresse IPv4 fixe" #, python-format msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip" msgstr "" "Impossible d'ajouter une adresse IP flottante au port sur le sous-réseau %s " "qui n'a pas d'adresse IP passerelle (gateway_ip)" #, python-format msgid "Cannot add multiple callbacks for %(resource_type)s" msgstr "Impossible d'ajouter plusieurs rappels pour %(resource_type)s" #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "" "Impossible d'allouer le sous-réseau IPv%(req_ver)s à partir du pool de sous-" "réseau IPv%(pool_ver)s" msgid "Cannot allocate requested subnet from the available set of prefixes" msgstr "" "Impossible d'allouer le sous-réseau demandé à partir de l'ensemble de " "préfixes disponible" #, python-format msgid "" "Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with port " "%(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already has a " "floating IP on external network %(net_id)s." msgstr "" "Impossible d'associer l'adresse IP flottante %(floating_ip_address)s " "(%(fip_id)s) aulport %(port_id)s en utilisant l'adresse IP fixe " "%(fixed_ip)s, car cette adresse IP fixe a déjà une adresse IP flottante sur " "le réseau externe %(net_id)s." msgid "" "Cannot change HA attribute of active routers. Please set router " "admin_state_up to False prior to upgrade." msgstr "" "Impossible de modifier l'attribut HA des routeurs actifs. Définissez le " "paramètre router admin_state_up sur False avant d'effectuer la mise à niveau." #, python-format msgid "" "Cannot create floating IP and bind it to %s, since that is not an IPv4 " "address." msgstr "" "Impossible de créer une adresse IP flottante et de la lier à %s, car il ne " "s'agit pas d'une adresse IPV4. " #, python-format msgid "" "Cannot create floating IP and bind it to Port %s, since that port is owned " "by a different tenant." msgstr "" "Impossible de créer une adresse IP flottante et de la lier au port %s, car " "ce port appartient à un locataire différent." msgid "Cannot create resource for another tenant" msgstr "Impossible de créer une ressource pour un autre locataire" msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "Impossible de désactiver enable_dhcp avec des attributs ipv6 définis" #, python-format msgid "Cannot find %(table)s with %(col)s=%(match)s" msgstr "Impossible de trouver %(table)s avec %(col)s=%(match)s" #, python-format msgid "Cannot handle subnet of type %(subnet_type)s" msgstr "Impossible de gérer le sous-réseau de type %(subnet_type)s" msgid "Cannot have multiple IPv4 subnets on router port" msgstr "Impossible d'avoir plusieurs sous-réseaux IPv4 sur le port de routeur" #, python-format msgid "" "Cannot have multiple router ports with the same network id if both contain " "IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s" msgstr "" "Impossible d'avoir plusieurs ports de routeur avec le même ID réseau s'ils " "contiennent tous des sous-réseaux IPv6. Le port %(p)s existant a un ou " "plusieurs sous-réseaux IPv6 et l'ID réseau %(nid)s" #, python-format msgid "" "Cannot host distributed router %(router_id)s on legacy L3 agent %(agent_id)s." msgstr "" "Impossible d'héberger le routeur distribué %(router_id)s sur l'agent L3 " "existant %(agent_id)s." msgid "Cannot match priority on flow deletion or modification" msgstr "" "Impossible de mettre en correspondance la priorité lors de la suppression ou " "de la modification de flux" msgid "Cannot mix IPv4 and IPv6 prefixes in a subnet pool." msgstr "" "Impossible d'utiliser les préfixes IPv4 et IPv6 dans un même pool de sous-" "réseaux." msgid "Cannot specify both --service and --subproject." msgstr "Impossible de spécifier --service et --subproject." msgid "Cannot specify both subnet-id and port-id" msgstr "Impossible de spécifier à la fois l'ID sous-réseau et l'ID port" msgid "Cannot understand JSON" msgstr "Impossible de comprendre JSON" #, python-format msgid "Cannot update read-only attribute %s" msgstr "Impossible de mettre à jour l'attribut en lecture seule %s" msgid "" "Cannot upgrade active router to distributed. Please set router " "admin_state_up to False prior to upgrade." msgstr "" "Impossible de mettre à niveau un routeur actif vers un routeur distribué. " "Affectez au paramètre admin_state_up la valeur False pour le routeur avant " "de procéder à la mise à niveau. " msgid "Certificate Authority public key (CA cert) file for ssl" msgstr "" "Fichier de clés publiques de l'autorité de certification (CA cert) pour SSL" #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s." msgstr "" "Une modification entraînerait une utilisation inférieure à 0 pour les " "ressources suivantes : %(unders)s." msgid "Check ebtables installation" msgstr "Vérifier l'installation ebtables " msgid "Check for ARP header match support" msgstr "Vérifier la prise en charge de correspondance d'en-tête ARP" msgid "Check for ARP responder support" msgstr "Vérifier la prise en charge de programme de réponse ARP" msgid "Check for ICMPv6 header match support" msgstr "Vérifier la prise en charge de correspondance d'en-tête ICMPv6" msgid "Check for OVS Geneve support" msgstr "Vérifier la prise en charge OVS Geneve" msgid "Check for OVS vxlan support" msgstr "Vérifier la prise en charge OVS vxlan" msgid "Check for VF management support" msgstr "Vérifier la prise en charge de gestion VF" msgid "Check for iproute2 vxlan support" msgstr "Vérifier la prise en charge iproute2 vxlan" msgid "Check for nova notification support" msgstr "Vérifier la prise en charge de notification Nova" msgid "Check for patch port support" msgstr "Vérifier la prise en charge de port de correctif" msgid "Check ip6tables installation" msgstr "Vérifer l'installation ip6tables" msgid "Check ipset installation" msgstr "Vérifier l'installation ipset" msgid "Check keepalived IPv6 support" msgstr "Vérifier la prise en charge keepalived IPv6" msgid "Check minimal dibbler version" msgstr "Vérifier la version minimale de dibbler" msgid "Check minimal dnsmasq version" msgstr "Vérifier la version minimale de dnsmasq" msgid "Check netns permission settings" msgstr "Vérifier les paramètres d'autorisation netns" msgid "Check ovs conntrack support" msgstr "Vérifier la prise en charge ovs conntrack" msgid "Check ovsdb native interface support" msgstr "Vérifer la prise en charge d'interface native ovsdb" #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of " "subnet %(sub_id)s" msgstr "" "Le CIDR %(subnet_cidr)s du sous-réseau %(subnet_id)s chevauche le CIDR " "%(cidr)s du sous-réseau %(sub_id)s" msgid "Class not found." msgstr "Classe introuvable." msgid "Cleanup resources of a specific agent type only." msgstr "Ressources de nettoyage d'un type d'agent spécifique uniquement." msgid "Client certificate for nova metadata api server." msgstr "Certificat client pour le serveur d'API de métadonnées nova." msgid "" "Comma-separated list of : tuples, mapping " "network_device to the agent's node-specific list of virtual functions that " "should not be used for virtual networking. vfs_to_exclude is a semicolon-" "separated list of virtual functions to exclude from network_device. The " "network_device in the mapping should appear in the physical_device_mappings " "list." msgstr "" "Liste des uplets :, séparés par des " "virgules, qui mappent network_device à la liste de fonctions spécifique à un " "noeud d'agent des fonctions virtuelles qui ne doivent pas être utilisées " "pour une mise en réseau virtuelle. vfs_to_exclude est une liste de fonctions " "virtuelles, séparées par des virgules, à exclure de network_device. " "network_device dans le mappage doit figurer dans la liste " "physical_device_mappings." msgid "" "Comma-separated list of : tuples mapping physical " "network names to the agent's node-specific Open vSwitch bridge names to be " "used for flat and VLAN networks. The length of bridge names should be no " "more than 11. Each bridge must exist, and should have a physical network " "interface configured as a port. All physical networks configured on the " "server should have mappings to appropriate bridges on each agent. Note: If " "you remove a bridge from this mapping, make sure to disconnect it from the " "integration bridge as it won't be managed by the agent anymore. Deprecated " "for ofagent." msgstr "" "Liste des uplets :, séparés par des virgules, qui " "mappent les noms de réseau physique au noms de pont Open vSwitch spécifiques " "à un noeud d'agent à utiliser pour les réseaux centralisés et VLAN. La " "longueur des noms de pont ne doit pas dépasser 11. Chaque pont doit exister " "et doit comporter une interface réseau physique configurée en tant que port. " "Tous les réseaux physiques configurés sur le serveur doivent avoir des " "mappages aux ponts appropriés sur chaque agent. Remarque : Si vous retirez " "un pont de ces mappages, assurez-vous de le déconnecter du pont " "d'intégration car il ne sera plus géré par l'agent. Obsolète pour ofagent." msgid "" "Comma-separated list of : tuples mapping " "physical network names to the agent's node-specific physical network device " "interfaces of SR-IOV physical function to be used for VLAN networks. All " "physical networks listed in network_vlan_ranges on the server should have " "mappings to appropriate interfaces on each agent." msgstr "" "Liste des uplets : séparés par des " "virgules, qui mappent les noms de réseau physique aux interfaces d'unité " "réseau physiques spécifiques à un noeud d'agent de la fonction physique SR-" "IOV à utiliser pour les réseaux VLAN. Tous les réseaux physiques répertoriés " "dans network_vlan_ranges sur le serveur doivent avoir des mappages aux " "interfaces appropriées sur chaque agent." msgid "" "Comma-separated list of : tuples " "mapping physical network names to the agent's node-specific physical network " "interfaces to be used for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should have mappings to " "appropriate interfaces on each agent." msgstr "" "Liste des uplets : séparés par des " "virgules qui mappent les noms de réseau physique aux interfaces de réseau " "physique spécifiques à un noeud d'agent à utiliser pour les réseaux " "centralisés et VLAN. Tous les réseaux physiques répertoriés dans " "network_vlan_ranges sur le serveur doivent avoir des mappages aux interfaces " "appropriées sur chaque agent." msgid "" "Comma-separated list of : tuples enumerating ranges of GRE " "tunnel IDs that are available for tenant network allocation" msgstr "" "Liste d'uplets : séparés par des virgules énumérant des " "plages d'ID GRE disponibles pour l'allocation de réseau locataire" msgid "" "Comma-separated list of : tuples enumerating ranges of " "Geneve VNI IDs that are available for tenant network allocation" msgstr "" "Liste d'uplets : séparés par des virgules énumérant des " "plages d'ID VNI Geneve disponibles pour l'allocation de réseau locataire" msgid "" "Comma-separated list of : tuples enumerating ranges of " "VXLAN VNI IDs that are available for tenant network allocation" msgstr "" "Liste d'uplets : séparés par des virgules énumérant des " "plages d'ID VNI VXLAN disponibles pour l'allocation de réseau locataire" msgid "" "Comma-separated list of supported PCI vendor devices, as defined by " "vendor_id:product_id according to the PCI ID Repository. Default enables " "support for Intel and Mellanox SR-IOV capable NICs." msgstr "" "Liste d'unités fournisseurs PCI, séparées par des virgules, prises en " "charge, comme défini par vendor_id:product_id d'après le référentiel d'ID " "PCI. Permet par défaut la prise en charge des NIC compatibles Intel et " "Mellanox SR-IOV." msgid "" "Comma-separated list of the DNS servers which will be used as forwarders." msgstr "" "Liste séparée par des virgules des serveurs DNS qui seront utilisés comme " "réexpéditeurs." msgid "Command to execute" msgstr "Commande à exécuter" msgid "Config file for interface driver (You may also use l3_agent.ini)" msgstr "" "Fichier de configuration du pilote d'interface (vous pouvez aussi utiliser " "l3_agent.ini)" #, python-format msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" msgstr "Valeur en conflit ethertype %(ethertype)s pour le CIDR %(cidr)s" msgid "" "Controls whether the neutron security group API is enabled in the server. It " "should be false when using no security groups or using the nova security " "group API." msgstr "" "Contrôle si l'API de groupe de sécurité neutron est activée sur le serveur. " "La valeur doit être false si aucun groupe de sécurité n'est utilisé ou en " "cas d'utilisation de l'API du groupe de sécurité neutron." #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "" "Echec lors de la liaison à %(host)s:%(port)s après attente de %(time)d " "secondes" #, python-format msgid "Could not connect to %s" msgstr "Impossible de se connecter à %s" msgid "Could not deserialize data" msgstr "Impossible de désérialiser les données" #, python-format msgid "Could not retrieve schema from %(conn)s: %(err)s" msgstr "Impossible d'extraire le schéma de %(conn)s : %(err)s" #, python-format msgid "" "Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable " "to update." msgstr "" "L'adresse IP de la passerelle en cours %(ip_address)s est déjà utilisée par " "le port %(port_id)s. Mise à jour impossible." msgid "Currently update of HA mode for a DVR/HA router is not supported." msgstr "" "La mise à jour du mode HA pour un routeur DVR/HA n'est actuellement pas " "prise en charge." msgid "Currently update of HA mode for a distributed router is not supported." msgstr "" "La mise à jour du mode HA pour un routeur distribué n'est actuellement pas " "prise en charge." msgid "" "Currently update of distributed mode for a DVR/HA router is not supported" msgstr "" "La mise à jour du mode distribué pour un routeur DVR/HA n'est actuellement " "pas prise en charge." msgid "Currently update of distributed mode for an HA router is not supported." msgstr "" "La mise à jour du mode distribué pour un routeur HA n'est actuellement pas " "prise en charge." msgid "" "Currently updating a router from DVR/HA to non-DVR non-HA is not supported." msgstr "" "La mise à jour d'un routeur DVR/HA vers un routeur non-DVR non-HA n'est " "actuellement pas prise en charge. " msgid "Currently updating a router to DVR/HA is not supported." msgstr "" "La mise à jour d'un routeur vers DVR/HA n'est actuellement pas prise en " "charge." msgid "" "DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " "lease times." msgstr "" "Durée de bail DHCP (en secondes). Utiliser -1 pour indiquer à dnsmasq " "d'utiliser des durées de bail illimitées." msgid "" "DVR deployments for VXLAN/GRE/Geneve underlays require L2-pop to be enabled, " "in both the Agent and Server side." msgstr "" "Les déploiements DVR pour les sous-couches VXLAN/GRE/Geneve nécessitent que " "L2-pop soit activé, à la fois côté agent et côté serveur." msgid "" "Database engine for which script will be generated when using offline " "migration." msgstr "" "Moteur de base de données pour lequel le script va être généré lors de " "l'utilisation d'une migration hors ligne." msgid "" "Default IPv4 subnet pool to be used for automatic subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where creation of a subnet is " "being called without a subnet pool ID. If not set then no pool will be used " "unless passed explicitly to the subnet create. If no pool is used, then a " "CIDR must be passed to create a subnet and that subnet will not be allocated " "from any pool; it will be considered part of the tenant's private address " "space. This option is deprecated for removal in the N release." msgstr "" "Pool de sous-réseau IPv4 par défaut à utiliser pour l'allocation CIDR de " "sous-réseau automatique. Indique par UUID le pool à utiliser quand la " "création d'un sous-réseau est appelée sans ID de pool de sous-réseau. Si ce " "paramètre n'est pas défini, aucun pool ne sera utilisé à moins d'être " "indiqué de manière explicite lors de la création de sous-réseau. Si aucun " "pool n'est utilisé, un CIDR doit être transmis pour la création d'un sous-" "réseau, et ce dernier ne sera alloué depuis aucun pool ; il sera considéré " "comme faisant partie de l'espace d'adresse privé du locataire. Cette option " "est obsolète et sera retirée dans la version N." msgid "" "Default IPv6 subnet pool to be used for automatic subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where creation of a subnet is " "being called without a subnet pool ID. See the description for " "default_ipv4_subnet_pool for more information. This option is deprecated for " "removal in the N release." msgstr "" "Pool de sous-réseau IPv6 par défaut à utiliser pour l'allocation CIDR de " "sous-réseau automatique. Indique par UUID le pool à utiliser quand la " "création d'un sous-réseau est appelée sans ID pool de sous-réseau. Voir la " "description de default_ipv4_subnet_pool pour plus d'informations. Cette " "option est obsolète et sera retirée dans la version N." msgid "Default driver to use for quota checks" msgstr "Pilote par défaut à utiliser pour les vérifications de quota" msgid "Default external networks must be shared to everyone." msgstr "Les réseaux externes par défaut doivent être partagés par tous." msgid "" "Default network type for external networks when no provider attributes are " "specified. By default it is None, which means that if provider attributes " "are not specified while creating external networks then they will have the " "same type as tenant networks. Allowed values for external_network_type " "config option depend on the network type values configured in type_drivers " "config option." msgstr "" "Type de réseau par défaut pour des réseaux externes lorsqu'aucun attribut de " "fournisseur n'est spécifié. La valeur par défaut None signifie que si des " "attributs de fournisseur ne sont pas spécifiés lors de la création de " "réseaux externes, ces derniers prennent le même type que les réseaux " "locataires. Les valeurs autorisées pour l'option de configuration " "external_network_type dépendent des valeurs de type de réseau configurées " "dans l'option de configuration type_drivers. " msgid "" "Default number of RBAC entries allowed per tenant. A negative value means " "unlimited." msgstr "" "Nombre par défaut d'entrées RBAC autorisées par locataire. Une valeur " "négative signifie que le nombre est illimité." msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "" "Nombre par défaut de ressources autorisées par locataire. Une valeur " "négative signifie illimité." msgid "Default security group" msgstr "Groupe de sécurité par défaut" msgid "Default security group already exists." msgstr "Le groupe de sécurité par défaut existe déjà." msgid "" "Default value of availability zone hints. The availability zone aware " "schedulers use this when the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a comma separated string. " "This value can be empty. In this case, even if availability_zone_hints for a " "resource is empty, availability zone is considered for high availability " "while scheduling the resource." msgstr "" "Valeur par défaut des suggestions de zone de disponibilité. Les " "planificateurs de zone de disponibilité utilisent cette valeur lorsque le " "paramètre availability_zone_hints de ressources est à blanc. Plusieurs zones " "de disponibilité peuvent être indiquées en les séparant par une virgule. " "Cette valeur peut être vide. Dans ce cas, même si le paramètre " "availability_zone_hints d'une ressource est à blanc, la zone de " "disponibilité est prise en compte pour la haute disponibilité lors de la " "planification de la ressource." msgid "" "Define the default value of enable_snat if not provided in " "external_gateway_info." msgstr "" "Définissez la valeur par défaut de enable_snat si elle n'est pas indiquée " "dans external_gateway_info." msgid "" "Defines providers for advanced services using the format: :" ":[:default]" msgstr "" "Définit des fournisseurs pour les services avancés utilisant le format : " "::[:default]" msgid "" "Delay within which agent is expected to update existing ports whent it " "restarts" msgstr "" "Temps d'attente durant lequel l'agent doit mettre à jour les ports existants " "lors de son redémarrage" msgid "Delete the namespace by removing all devices." msgstr "Supprimez l'espace de nom en retirant toutes les unités." #, python-format msgid "Deleting port %s" msgstr "Suppression du port %s" #, python-format msgid "Deployment error: %(reason)s." msgstr "Erreur de déploiement : %(reason)s." msgid "Destroy IPsets even if there is an iptables reference." msgstr "Destruction des IPsets même s'il existe une référence iptables." msgid "Destroy all IPsets." msgstr "Destruction de tous les IPsets." #, python-format msgid "Device %(dev_name)s in mapping: %(mapping)s not unique" msgstr "Unité %(dev_name)s non unique dans le mappage '%(mapping)s'" #, python-format msgid "Device '%(device_name)s' does not exist." msgstr "L'unité '%(device_name)s' n'existe pas." msgid "Device has no virtual functions" msgstr "L'unité ne possède pas de fonction virtuelle" #, python-format msgid "Device name %(dev_name)s is missing from physical_device_mappings" msgstr "Le nom d'unité %(dev_name)s est manquant dans physical_device_mappings" msgid "Device not found" msgstr "Unité introuvable." #, python-format msgid "" "Distributed Virtual Router Mac Address for host %(host)s does not exist." msgstr "" "L'adresse MAC DVR (routeur virtuel distribué) n'existe pas pour l'hôte " "%(host)s." #, python-format msgid "Domain %(dns_domain)s not found in the external DNS service" msgstr "Domaine %(dns_domain)s non trouvé dans le service DNS externe" msgid "Domain to use for building the hostnames" msgstr "Domaine à utiliser pour générer les noms d'hôte" msgid "" "Domain to use for building the hostnames. This option is deprecated. It has " "been moved to neutron.conf as dns_domain. It will be removed in a future " "release." msgstr "" "Domaine à utiliser pour générer les noms d'hôte. Cette option est obsolète. " "Elle a été transférée dans neutron.conf sous la forme dns_domain. Elle sera " "supprimée dans une édition ultérieure." msgid "Downgrade no longer supported" msgstr "La rétromigration n'est plus prise en charge" #, python-format msgid "Driver %s is not unique across providers" msgstr "Le pilote %s n'est pas unique parmi les fournisseurs" msgid "Driver for external DNS integration." msgstr "Pilote pour intégration DNS externe." msgid "Driver for security groups firewall in the L2 agent" msgstr "Pilote pour le pare-feu de groupes de sécurité dans l'agent L2" msgid "Driver to use for scheduling network to DHCP agent" msgstr "Pilote à utiliser pour la planification du réseau pour l'agent DHCP" msgid "Driver to use for scheduling router to a default L3 agent" msgstr "" "Pilote à utiliser pour la planification du routeur pour un agent L3 par " "défaut" msgid "" "Driver used for ipv6 prefix delegation. This needs to be an entry point " "defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for " "entry points included with the neutron source." msgstr "" "Pilote utilisé pour la délégation de préfixe ipv6. Il doit s'agir d'un point " "d'entrée défini dans l'espace de nom neutron.agent.linux.pd_drivers. Voir " "setup.cfg pour connaître les points d'entrée inclus avec la source de " "neutron. " msgid "Driver used for scheduling BGP speakers to BGP DrAgent" msgstr "Pilote utilisé pour la planification des routeurs BGP dans BGP DrAgent" msgid "Drivers list to use to send the update notification" msgstr "" "Liste de pilotes à utiliser pour envoyer la notification de mise à jour" #, python-format msgid "Duplicate IP address '%s'" msgstr "Adresse IP en double '%s'" #, python-format msgid "" "Duplicate L3HARouterAgentPortBinding is created for router(s) %(router)s. " "Database cannot be upgraded. Please, remove all duplicates before upgrading " "the database." msgstr "" "Un élément L3HARouterAgentPortBinding en double est créé pour le ou les " "routeurs %(router)s. La base de données ne peut pas être mise à niveau. " "Retirez tous les éléments en double avant de mettre à niveau la base de " "données." msgid "Duplicate Metering Rule in POST." msgstr "Règle de mesure en double dans POST." msgid "Duplicate Security Group Rule in POST." msgstr "" "Règle de groupe de sécurité en double dans l'autotest à la mise sous tension " "(POST)." msgid "Duplicate address detected" msgstr "Adresse en double détectée" #, python-format msgid "Duplicate hostroute '%s'" msgstr "Route hôte en double '%s'" #, python-format msgid "Duplicate items in the list: '%s'" msgstr "Eléments en double dans la liste : '%s'" #, python-format msgid "Duplicate nameserver '%s'" msgstr "Serveur de noms en double '%s'" msgid "Duplicate segment entry in request." msgstr "Entrée de segment en double dans la demande." #, python-format msgid "ERROR: %s" msgstr "ERREUR : %s" msgid "" "ERROR: Unable to find configuration file via the default search paths (~/." "neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" msgstr "" "ERREUR : Impossible de trouver le fichier de configuration via les chemins " "de recherche par défaut (~/.neutron/, ~/, /etc/neutron/, /etc/) et l'option " "'--config-file' ." msgid "" "Either one of parameter network_id or router_id must be passed to _get_ports " "method." msgstr "" "Le paramètre network_id ou le paramètre router_id doit être transmis à la " "méthode _get_ports." msgid "Either subnet_id or port_id must be specified" msgstr "L'ID sous-réseau ou l'ID port doit être spécifié." msgid "Empty physical network name." msgstr "Nom du réseau physique vide." msgid "Empty subnet pool prefix list." msgstr "Liste de préfixes de pool de sous-réseaux vide." msgid "Enable FWaaS" msgstr "Activer FWaaS" msgid "Enable HA mode for virtual routers." msgstr "Activer le mode haute disponibilité pour les routeurs virtuels." msgid "Enable SSL on the API server" msgstr "Activer SSL sur le serveur d'API" msgid "" "Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " "plugin using linuxbridge mechanism driver" msgstr "" "Activer VXLAN sur l'agent. Il peut être activé lorsque l'agent est géré par " "le plug-in ml2 utilisant le pilote de mécanisme linuxbridge" msgid "" "Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 " "l2population driver. Allows the switch (when supporting an overlay) to " "respond to an ARP request locally without performing a costly ARP broadcast " "into the overlay." msgstr "" "Activez le canal répondeur ARP local s'il est pris en charge. Requiert le " "pilote l2population OVS 2.1 et ML2. Permet au commutateur (lors de la prise " "en charge d'une superposition) de répondre à une demande ARP locale sans " "effectuer de diffusion ARP coûteuse sur le réseau Overlay." msgid "" "Enable local ARP responder which provides local responses instead of " "performing ARP broadcast into the overlay. Enabling local ARP responder is " "not fullycompatible with the allowed-address-pairs extension." msgstr "" "Activez le canal répondeur ARP local qui fournit des réponses locales au " "lieu d'effectuer une multidiffusion ARP dans la superposition. L'activation " "du canal répondeur ARP local n'est pas pleinement compatible avec " "l'extension de paires d'adresses autorisées." msgid "" "Enable services on an agent with admin_state_up False. If this option is " "False, when admin_state_up of an agent is turned False, services on it will " "be disabled. Agents with admin_state_up False are not selected for automatic " "scheduling regardless of this option. But manual scheduling to such agents " "is available if this option is True." msgstr "" "Activer les services sur un agent ayant admin_state_up avec une valeur " "False. Si cette option est False, lorsque admin_state_up pour un agent se " "voit attribuer la valeur False, les services qui y sont associés seront " "automatiquement désactivés. Les agents ayant admin_state_up avec la valeur " "False ne sont pas sélectionnés pour la planification automatique, quelle que " "soit la valeur de cette option. Toutefois, il est possible de procéder à une " "planification manuelle pour ces agents si cette option a pour valeur True." msgid "" "Enable suppression of ARP responses that don't match an IP address that " "belongs to the port from which they originate. Note: This prevents the VMs " "attached to this agent from spoofing, it doesn't protect them from other " "devices which have the capability to spoof (e.g. bare metal or VMs attached " "to agents without this flag set to True). Spoofing rules will not be added " "to any ports that have port security disabled. For LinuxBridge, this " "requires ebtables. For OVS, it requires a version that supports matching ARP " "headers. This option will be removed in Newton so the only way to disable " "protection will be via the port security extension." msgstr "" "Activez la suppression des réponses ARP qui ne correspondent pas à une " "adresse IP appartenant au port dont elles proviennent. Remarque : Cela " "permet d'éviter l'usurpation des machines virtuelles connectées à cet agent, " "cela ne les protège pas d'autres unités capables d'usurpation (par exemple, " "unités bare metal ou machines virtuelles connectées à des agents sans cet " "indicateur défini sur True). Aucune règle d'usurpation ne sera ajoutée aux " "ports dont la sécurité est désactivée. Pour LinuxBridge, ebtables est " "requis. Pour OVS, il est nécessaire de disposer d'une version prenant en " "charge les en-têtes ARP correspondants. Cette option va être retirée dans " "Newton, l'unique façon de désactiver la protection sera donc via l'extension " "de sécurité de port." msgid "" "Enable/Disable log watch by metadata proxy. It should be disabled when " "metadata_proxy_user/group is not allowed to read/write its log file and " "copytruncate logrotate option must be used if logrotate is enabled on " "metadata proxy log files. Option default value is deduced from " "metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent " "effective user id/name." msgstr "" "Activer/Désactiver la surveillance des journaux par le proxy de métadonnées. " "Doit être désactivée lorsque metadata_proxy_user/group n'est pas autorisé à " "lire/écrire son fichier journal et l'option copytruncate logrotate doit être " "utilisée si logrotate est activé sur les fichiers journaux de proxy de " "métadonnées. La valeur par défaut de l'option est déduite de " "metadata_proxy_user : la surveillance des journaux est activée si " "metadata_proxy_user correspond à l'ID/au nom de l'utilisateur effectif de " "l'agent." msgid "" "Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to " "True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable " "environment. Users making subnet creation requests for IPv6 subnets without " "providing a CIDR or subnetpool ID will be given a CIDR via the Prefix " "Delegation mechanism. Note that enabling PD will override the behavior of " "the default IPv6 subnetpool." msgstr "" "Active la délégation de préfixe IPv6 pour l'allocation CIDR de sous-réseau " "automatique. Définissez ce paramètre sur True pour activer la délégation de " "préfixe IPv6 pour l'allocation de sous-réseau dans un environnement " "compatible PD. Les utilisateurs effectuant des demandes de création de sous-" "réseau pour des sous-réseaux IPv6 sans indiquer de CIDR ou d'ID de pool de " "sous-réseau se verront affecter un CIDR via le mécanisme de délégation de " "préfixe. Notez que l'activation de PD se substitue au comportement du pool " "de sous-réseau IPv6 par défaut." msgid "" "Enables the dnsmasq service to provide name resolution for instances via DNS " "resolvers on the host running the DHCP agent. Effectively removes the '--no-" "resolv' option from the dnsmasq process arguments. Adding custom DNS " "resolvers to the 'dnsmasq_dns_servers' option disables this feature." msgstr "" "Permet au service dnsmasq de fournir la résolution de nom pour les instances " "via des programmes de résolution DNS sur l'hôte exécutant l'agent DHCP. " "Supprime l'option '--no-resolv' des arguments de processus dnsmasq. L'ajout " "de programmes de résolution DNS personnalisés à l'option " "'dnsmasq_dns_servers' désactive cette fonction." msgid "Encountered an empty component." msgstr "Un composant vide a été trouvé." msgid "End of VLAN range is less than start of VLAN range" msgstr "La fin de la plage de réseaux locaux virtuels est inférieure au début" msgid "End of tunnel range is less than start of tunnel range" msgstr "La fin de la plage de tunnels est inférieure au début" msgid "Enforce using split branches file structure." msgstr "" "Imposer l'utilisation d'une structure de fichier de branches fractionnées. " msgid "" "Ensure that configured gateway is on subnet. For IPv6, validate only if " "gateway is not a link local address. Deprecated, to be removed during the " "Newton release, at which point the gateway will not be forced on to subnet." msgstr "" "Assurez-vous que la passerelle configurée est sur le sous-réseau. Pour IPv6, " "validez uniquement si la passerelle n'est pas une adresse locale de liaison. " "Paramètre obsolète, qui sera retiré dans la version Newton, auquel cas la " "passerelle ne sera pas forcée sur le sous-réseau." #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "Erreur %(reason)s lors de la tentative d'exécution de l'opération." #, python-format msgid "Error importing FWaaS device driver: %s" msgstr "Erreur d'importation du pilote de périphérique FWaaS : %s" #, python-format msgid "Error parsing dns address %s" msgstr "Erreur lors de l'analyse syntaxique de l'adresse DNS %s" #, python-format msgid "Error while reading %s" msgstr "Erreur lors de la lecture de %s" #, python-format msgid "" "Exceeded %s second limit waiting for address to leave the tentative state." msgstr "" "Limite dépassée de %s secondes, en attente d'adresse pour quitter l'état de " "tentative." msgid "Exceeded maximum amount of fixed ips per port." msgstr "Nombre maximum d'IP fixes par port dépassé." msgid "Existing prefixes must be a subset of the new prefixes" msgstr "" "Les préfixes existants doivent être un sous-réseau des nouveaux préfixes" #, python-format msgid "" "Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" msgstr "" "Code de sortie : %(returncode)d; Stdin : %(stdin)s; Stdout : %(stdout)s; " "Stderr : %(stderr)s" #, python-format msgid "Extension %(driver)s failed." msgstr "Echec de l'extension %(driver)s. " #, python-format msgid "" "Extension driver %(driver)s required for service plugin %(service_plugin)s " "not found." msgstr "" "Le pilote d'extension %(driver)s requis pour le plug-in de service " "%(service_plugin)s est introuvable." msgid "" "Extension to use alongside ml2 plugin's l2population mechanism driver. It " "enables the plugin to populate VXLAN forwarding table." msgstr "" "Extension à utiliser avec le pilote de mécanisme l2population du plug-in " "ml2. Elle permet au plug-in de remplir la table de réacheminement VXLAN." #, python-format msgid "Extension with alias %s does not exist" msgstr "L'extension avec l'alias %s n'existe pas" msgid "Extensions list to use" msgstr "Liste d'extensions à utiliser." #, python-format msgid "Extensions not found: %(extensions)s." msgstr "Extensions introuvable : %(extensions)s " #, python-format msgid "External DNS driver %(driver)s could not be found." msgstr "Le pilote DNS externe %(driver)s est introuvable." #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "L'adresse IP externe %s est identique à l'adresse IP de passerelle" #, python-format msgid "" "External network %(external_network_id)s is not reachable from subnet " "%(subnet_id)s. Therefore, cannot associate Port %(port_id)s with a Floating " "IP." msgstr "" "Le réseau externe %(external_network_id)s n'est pas accessible à partir du " "sous-réseau %(subnet_id)s. Par conséquent, il est impossible d'associer le " "port %(port_id)s à une adresse IP flottante." #, python-format msgid "" "External network %(net_id)s cannot be updated to be made non-external, since " "it has existing gateway ports" msgstr "" "Le réseau externe %(net_id)s ne peut pas être mis à jour pour devenir non " "externe car il a des ports de passerelle existants" #, python-format msgid "ExtraDhcpOpt %(id)s could not be found" msgstr "ExtraDhcpOpt %(id)s introuvable" msgid "" "FWaaS plugin is configured in the server side, but FWaaS is disabled in L3-" "agent." msgstr "" "Le plug-in FWaaS est configuré côté serveur, mais FWaaS est désactivé dans " "l'agent L3." #, python-format msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." msgstr "" "Echec de replanification du routeur %(router_id)s : aucun agent l3 éligible " "trouvé." #, python-format msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." msgstr "" "Echec de planification du routeur %(router_id)s vers l'agent L3 %(agent_id)s." #, python-format msgid "" "Failed to allocate a VRID in the network %(network_id)s for the router " "%(router_id)s after %(max_tries)s tries." msgstr "" "Echec d'allocation d'un identificateur de routeur virtuel sur le réseau " "%(network_id)s pour le routeur %(router_id)s après %(max_tries)s tentatives." #, python-format msgid "Failed to allocate subnet: %(reason)s." msgstr "Echec d'allocation de sous-réseau : %(reason)s." msgid "" "Failed to associate address scope: subnetpools within an address scope must " "have unique prefixes." msgstr "" "Echec d'association de la portée d'adresse : les pools de sous-réseau au " "sein d'une portée d'adresse doivent avoir des préfixes uniques." #, python-format msgid "Failed to check policy %(policy)s because %(reason)s." msgstr "Echec de vérification de la stratégie %(policy)s car %(reason)s." #, python-format msgid "" "Failed to create a duplicate %(object_type)s: for attribute(s) " "%(attributes)s with value(s) %(values)s" msgstr "" "Echec de création d'un élément %(object_type)s en double pour le ou les " "attributs %(attributes)s avec la ou les valeurs %(values)s" #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips included " "invalid subnet %(subnet_id)s" msgstr "" "Echec de création de port sur le réseau %(network_id)s car les adresses IP " "fixes incluent un sous-réseau non valide %(subnet_id)s " #, python-format msgid "Failed to init policy %(policy)s because %(reason)s." msgstr "Echec d'initialisation de la stratégie %(policy)s car %(reason)s." #, python-format msgid "Failed to locate source for %s." msgstr "Echec de localisation de la source de %s." #, python-format msgid "Failed to parse request. Parameter '%s' not specified" msgstr "Echec d'analyse de la demande. Paramètre '%s' non spécifié" #, python-format msgid "Failed to parse request. Required attribute '%s' not specified" msgstr "Echec d'analyse de la demande. Attribut obligatoire '%s' non spécifié" msgid "Failed to remove supplemental groups" msgstr "Echec de suppression des groupes supplémentaires" #, python-format msgid "Failed to set gid %s" msgstr "Echec de la définition du GID %s" #, python-format msgid "Failed to set uid %s" msgstr "Echec de la définition de l'UID %s" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "Echec de configuration du port de tunnel %(type)s sur %(ip)s" msgid "Failure applying iptables rules" msgstr "Echec lors de la mise à jour des règles iptables" #, python-format msgid "Failure waiting for address %(address)s to become ready: %(reason)s" msgstr "" "Echec lors de l'attente du passage de l'adresse %(address)s à l'état prêt : " "%(reason)s" msgid "Flat provider networks are disabled" msgstr "Les réseaux de fournisseurs centralisés sont désactivés" #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "La version %(flavor_id)s est introuvable." #, python-format msgid "Flavor %(flavor_id)s is used by some service instance." msgstr "" "La version %(flavor_id)s est utilisée par certaines instances de service." msgid "Flavor is not enabled." msgstr "La version n'est pas activée." #, python-format msgid "Floating IP %(floatingip_id)s could not be found" msgstr "L'adresse IP flottante %(floatingip_id)s est introuvable" #, python-format msgid "" "Floating IP %(floatingip_id)s is associated with non-IPv4 address " "%s(internal_ip)s and therefore cannot be bound." msgstr "" "L'adresse IP flottante %(floatingip_id)s est associée à l'adresse non IPV4 " "%s(internal_ip)s, et par conséquent, elle ne peut pas être liée. " msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "" "Pour les protocoles TCP/UDP, port_range_min doit être <= port_range_max" #, python-format msgid "For class %(object_type)s missing primary keys: %(missing_keys)s" msgstr "" "Pour les clés primaires manquantes de la classe %(object_type)s : " "%(missing_keys)s" msgid "Force ip_lib calls to use the root helper" msgstr "Forcer les appels ip_lib à utiliser Root Helper" #, python-format msgid "Found duplicate extension: %(alias)s." msgstr "Extension en double trouvée : %(alias)s." #, python-format msgid "" "Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet " "%(subnet_cidr)s." msgstr "" "Chevauchement de pools d'allocation trouvé : %(pool_1)s %(pool_2)s pour le " "sous-réseau %(subnet_cidr)s." msgid "Gateway IP version inconsistent with allocation pool version" msgstr "" "Version IP de passerelle non cohérente avec la version de pool d'allocation" #, python-format msgid "" "Gateway cannot be updated for router %(router_id)s, since a gateway to " "external network %(net_id)s is required by one or more floating IPs." msgstr "" "La passerelle ne peut pas être mise à jour pour le routeur %(router_id)s, " "car une passerelle vers le réseau externe %(net_id)s est requise par une ou " "plusieurs adresses IP flottantes." #, python-format msgid "Gateway ip %(ip_address)s conflicts with allocation pool %(pool)s." msgstr "" "Conflits de l'IP passerelle %(ip_address)s avec le pool d'allocation " "%(pool)s." msgid "Gateway is not valid on subnet" msgstr "La passerelle n'est pas valide sur le sous-réseau." msgid "" "Geneve encapsulation header size is dynamic, this value is used to calculate " "the maximum MTU for the driver. This is the sum of the sizes of the outer " "ETH + IP + UDP + GENEVE header sizes. The default size for this field is 50, " "which is the size of the Geneve header without any additional option headers." msgstr "" "La taille de l'en-tête d'encapsulation Geneve est dynamique. Cette valeur " "est utilisée pour calculer la valeur MTU maximum du pilote. Il s'agit de la " "somme des tailles des en-têtes ETH + IP + UDP + GENEVE externes. La taille " "par défaut pour cette zone est de 50, taille de l'en-tête Geneve sans aucun " "en-tête d'option supplémentaire." msgid "Group (gid or name) running metadata proxy after its initialization" msgstr "" "Groupe (UID ou nom) exécutant le proxy de métadonnées après son " "initialisation" msgid "" "Group (gid or name) running metadata proxy after its initialization (if " "empty: agent effective group)." msgstr "" "Groupe (UID ou nom) exécutant le proxy de métadonnées après son " "initialisation (si vide : groupe effectif de l'agent)." msgid "Group (gid or name) running this process after its initialization" msgstr "Groupe (UID ou nom) exécutant ce processus après son initialisation" #, python-format msgid "HEAD file does not match migration timeline head, expected: %s" msgstr "" "Le fichier HEAD ne correspond pas à l'élément Head de calendrier de " "migration, attendu : %s" msgid "" "Hostname to be used by the Neutron server, agents and services running on " "this machine. All the agents and services running on this machine must use " "the same host value." msgstr "" "Nom d'hôte qui doit être utilisé par le serveur, les agents et les services " "Neutron qui s'exécutent sur cette machine. Tous les agents et services qui " "s'exécutent sur cette machine doivent utiliser la même valeur d'hôte." msgid "How many times Neutron will retry MAC generation" msgstr "" "Nombre de nouvelles tentatives de génération MAC qui seront effectuées par " "Neutron" #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" "min) is missing." msgstr "" "Le code ICMP (port-range-max) %(value)s est fourni mais le type ICMP (port-" "range-min) est manquant." msgid "ID of network" msgstr "ID du réseau" msgid "ID of network to probe" msgstr "ID du réseau à sonder" msgid "ID of probe port to delete" msgstr "ID du port sonde à supprimer" msgid "ID of probe port to execute command" msgstr "ID du port sonde pour exécuter la commande" msgid "ID of the router" msgstr "Identifiant du routeur" #, python-format msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s" msgstr "L'adresse IP %(ip)s est déjà allouée sur le sous-réseau %(subnet_id)s" #, python-format msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s" msgstr "L'adresse IP %(ip)s n'appartient pas au sous-réseau %(subnet_id)s" #, python-format msgid "" "IP address %(ip_address)s is not a valid IP for any of the subnets on the " "specified network." msgstr "" "L'adresse IP %(ip_address)s n'est pas une adresse IP valide pour les sous-" "réseaux du réseau indiqué." msgid "IP address used by Nova metadata server." msgstr "Adresse IP utilisée par le serveur de métadonnées Nova" msgid "IP allocation failed. Try again later." msgstr "Echec d'allocation d'IP. Réessayez ultérieurement." msgid "IP allocation requires subnet_id or ip_address" msgstr "L'allocation d'adresse IP requiert subnet_id ou ip_address" #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "IPTablesManager.apply n'a pas pu appliquer l'ensemble suivant de règles " "iptables :\n" "%s" msgid "IPtables conntrack zones exhausted, iptables rules cannot be applied." msgstr "" "Zones conntrack IPtables épuisées ; impossible d'appliquer les règles " "iptables. " msgid "IPv6 Address Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "Le mode Adresse IPv6 doit être SLAAC ou Sans état pour la délégation de " "préfixe. " msgid "IPv6 RA Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "Le mode RA IPv6 doit être SLAAC ou Sans état pour la délégation de préfixe. " #, python-format msgid "" "IPv6 address %(address)s can not be directly assigned to a port on subnet " "%(id)s since the subnet is configured for automatic addresses" msgstr "" "L'adresse IPv6 %(address)s ne peut pas être directement affectée à un port " "du sous-réseau %(id)s car celui-ci est configuré pour l'obtention " "automatique d'adresses" #, python-format msgid "" "IPv6 address %(ip)s cannot be directly assigned to a port on subnet " "%(subnet_id)s as the subnet is configured for automatic addresses" msgstr "" "L'adresse IPv6 %(ip)s ne peut pas être directement affectée à un port du " "sous-réseau %(subnet_id)s car celui-ci est configuré pour l'obtention " "automatique d'adresses " #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot be " "added to Neutron Router." msgstr "" "Le sous-réseau IPv6 %s configuré pour recevoir les avertissements (RA) d'un " "routeur externe ne peut pas être ajouté au routeur Neutron." msgid "" "If True, advertise network MTU values if core plugin calculates them. MTU is " "advertised to running instances via DHCP and RA MTU options." msgstr "" "Si ce paramètre est défini sur True, publiez les valeurs MTU réseau si le " "plug-in principal les calcule. MTU est publié pour l'exécution d'instances " "via les options DHCP et RA MTU." msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "" "Si True, autorisez les plug-in qui le prennent en charge à créer des réseaux " "VLAN transparents." msgid "" "If non-empty, the l3 agent can only configure a router that has the matching " "router ID." msgstr "" "S'il n'est pas vide, l'agent l3 peut uniquement configurer un routeur qui " "comporte l'ID routeur correspondant." msgid "Illegal IP version number" msgstr "Numéro de version IP non conforme" #, python-format msgid "" "Illegal prefix bounds: %(prefix_type)s=%(prefixlen)s, %(base_prefix_type)s=" "%(base_prefixlen)s." msgstr "" "Limites de préfixe non conformes : %(prefix_type)s=%(prefixlen)s, " "%(base_prefix_type)s=%(base_prefixlen)s." #, python-format msgid "" "Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot " "associate with address scope %(address_scope_id)s because subnetpool " "ip_version is not %(ip_version)s." msgstr "" "Association de pool de sous-réseaux non conforme : le pool de sous-réseaux " "%(subnetpool_id)s ne peut pas s'associer à la portée d'adresse " "%(address_scope_id)s car ip_version du pool de sous-réseaux n'est pas " "%(ip_version)s." #, python-format msgid "" "Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot be " "associated with address scope %(address_scope_id)s." msgstr "" "Association de pool de sous-réseaux non conforme : le pool de sous-réseaux " "%(subnetpool_id)s ne peut pas être associé à la portée d'adresse " "%(address_scope_id)s." #, python-format msgid "Illegal subnetpool update : %(reason)s." msgstr "Mise à jour de pool de sous-réseaux non conforme : %(reason)s." #, python-format msgid "Illegal update to prefixes: %(msg)s." msgstr "Mise à jour de préfixes non conforme : %(msg)s." msgid "" "In some cases the Neutron router is not present to provide the metadata IP " "but the DHCP server can be used to provide this info. Setting this value " "will force the DHCP server to append specific host routes to the DHCP " "request. If this option is set, then the metadata service will be activated " "for all the networks." msgstr "" "Dans certains cas, le routeur Neutron n'est pas présent pour fournir l'IP de " "métadonnées mais le serveur DHCP peut être utilisé pour fournir ces " "informations. Lorsque cette valeur est définie, cela force le serveur DHCP à " "ajouter des routes hôte spécifiques à la demande DHCP. Lorsque cette option " "est définie, le service de métadonnées est activé pour tous les réseaux." #, python-format msgid "Incorrect pci_vendor_info: \"%s\", should be pair vendor_id:product_id" msgstr "" "Infos pci_vendor_info incorrectes : \"%s\". Devraient figurer sous forme de " "paire vendor_id:product_id" msgid "" "Indicates that this L3 agent should also handle routers that do not have an " "external network gateway configured. This option should be True only for a " "single agent in a Neutron deployment, and may be False for all agents if all " "routers must have an external network gateway." msgstr "" "Indique que cet agent L3 doit aussi traiter les routeurs pour lesquels " "aucune passerelle de réseau externe n'est configurée. Cette option doit être " "définie sur True uniquement pour un seul agent dans un déploiement Neutron ; " "elle peut être définie sur False pour tous les agents si tous les routeurs " "doivent avoir une passerelle de réseau externe." #, python-format msgid "Instance of class %(module)s.%(class)s must contain _cache attribute" msgstr "" "L'instance de la classe %(module)s.%(class)s doit contenir l'attribut _cache." #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" msgstr "" "Espace préfixe insuffisant pour l'allocation de la taille de sous-réseau /%s" msgid "Insufficient rights for removing default security group." msgstr "Droits insuffisants pour retirer le groupe de sécurité par défaut." msgid "" "Integration bridge to use. Do not change this parameter unless you have a " "good reason to. This is the name of the OVS integration bridge. There is one " "per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM " "VIFs are attached to this bridge and then 'patched' according to their " "network connectivity." msgstr "" "Pont d'intégration à utiliser. Ne modifiez pas ce paramètre à moins d'avoir " "une bonne raison pour cela. Il s'agit du nom du pont d'intégration OVS. Il y " "en existe un par hyperviseur. Le pont d'intégration fait office de 'baie " "corrective' virtuelle. Tous les VIF de machine virtuelle sont connectés à ce " "pont puis 'corrigés' d'après leur connectivité réseau." msgid "Interface to monitor" msgstr "Interface à surveiller" msgid "" "Interval between checks of child process liveness (seconds), use 0 to disable" msgstr "" "Intervalle entre les vérifications de l'activité du processus enfant (en " "secondes). Utilisez 0 pour désactiver" msgid "Interval between two metering measures" msgstr "Intervalle entre deux mesures" msgid "Interval between two metering reports" msgstr "Intervalle entre deux rapports de mesures" #, python-format msgid "Invalid CIDR %(input)s given as IP prefix." msgstr "CIDR non valide %(input)s fourni comme préfixe IP." #, python-format msgid "" "Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address " "format, which requires the prefix to be /64." msgstr "" "CIDR %s non valide pour le mode d'adressage IPv6. OpenStack utilise le " "format d'adresse EUI-64 qui exige le préfixe /64." #, python-format msgid "Invalid Device %(dev_name)s: %(reason)s" msgstr "Unité non valide %(dev_name)s : %(reason)s" #, python-format msgid "" "Invalid action '%(action)s' for object type '%(object_type)s'. Valid " "actions: %(valid_actions)s" msgstr "" "Action non valide %(action)s' pour le type d'objet %(object_type)s'. Actions " "valides : %(valid_actions)s" #, python-format msgid "" "Invalid authentication type: %(auth_type)s, valid types are: " "%(valid_auth_types)s" msgstr "" "Type d'authentification non valide : %(auth_type)s, les types valides sont : " "%(valid_auth_types)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "Le type de contenu %(content_type)s n'est pas valide." #, python-format msgid "Invalid data format for IP pool: '%s'" msgstr "Format de données non valide pour le pool IP : '%s'" #, python-format msgid "Invalid data format for extra-dhcp-opt: %(data)s" msgstr "Format de données non valide pour extra-dhcp-opt : %(data)s" #, python-format msgid "Invalid data format for fixed IP: '%s'" msgstr "Format de données non valide pour l'adresse IP fixe : '%s'" #, python-format msgid "Invalid data format for hostroute: '%s'" msgstr "Format de données non valide pour la route hôte : '%s'" #, python-format msgid "Invalid data format for nameserver: '%s'" msgstr "Format de données non valide pour le serveur de noms : '%s'" #, python-format msgid "Invalid ethertype %(ethertype)s for protocol %(protocol)s." msgstr "ethertype %(ethertype)s non valide pour le protocole %(protocol)s." #, python-format msgid "Invalid extension environment: %(reason)s." msgstr "Environnement d'extensions non valide : %(reason)s." #, python-format msgid "Invalid format for routes: %(routes)s, %(reason)s" msgstr "Format de routes non valide : %(routes)s, %(reason)s" #, python-format msgid "Invalid format: %s" msgstr "Format non valide : %s" #, python-format msgid "Invalid input for %(attr)s. Reason: %(reason)s." msgstr "Entrée non valide pour %(attr)s. Raison : %(reason)s." #, python-format msgid "" "Invalid input. '%(target_dict)s' must be a dictionary with keys: " "%(expected_keys)s" msgstr "" "Entrée non valide. '%(target_dict)s' doit être un dictionnaire avec les " "clés : %(expected_keys)s" #, python-format msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s" msgstr "" "Etat d'instance non valide : %(state)s, les états valides sont : " "%(valid_states)s" #, python-format msgid "Invalid mapping: '%s'" msgstr "Mappage non valide : '%s'" #, python-format msgid "Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'." msgstr "Plage VLAN de réseau non valide : '%(vlan_range)s' - '%(error)s'." #, python-format msgid "Invalid network VXLAN port range: '%(vxlan_range)s'." msgstr "Réseau non valide pour la plage de ports VXLAN : '%(vxlan_range)s'." #, python-format msgid "Invalid pci slot %(pci_slot)s" msgstr "Port pci non valide %(pci_slot)s" #, python-format msgid "Invalid provider format. Last part should be 'default' or empty: %s" msgstr "" "Format de fournisseur non valide. La dernière partie doit être 'default' ou " "vide : %s" #, python-format msgid "Invalid resource type %(resource_type)s" msgstr "Type de ressource %(resource_type)s non valide" #, python-format msgid "Invalid route: %s" msgstr "Route non valide : %s" msgid "Invalid service provider format" msgstr "Format de fournisseur de service non valide" #, python-format msgid "Invalid service type %(service_type)s." msgstr "Type de service non valide %(service_type)s." #, python-format msgid "" "Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255." msgstr "" "Valeur non valide pour ICMP %(field)s (%(attr)s) %(value)s. Elle doit être " "comprise entre 0 et 255." #, python-format msgid "Invalid value for port %(port)s" msgstr "Valeur non valide pour le port %(port)s" msgid "" "Iptables mangle mark used to mark ingress from external network. This mark " "will be masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Marque Mangle Iptables utilisée pour marquer les entrées du réseau externe. " "Cette marque sera masquée avec 0xffff de sorte que seuls les 16 bits les " "plus bas soient utilisés. " msgid "" "Iptables mangle mark used to mark metadata valid requests. This mark will be " "masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Marque Mangle Iptables utilisée pour marquer les demandes valides de " "métadonnées. Cette marque sera masquée avec 0xffff afin que seuls les 16 " "bits les plus bas soient utilisés. " msgid "" "Keep in track in the database of current resourcequota usage. Plugins which " "do not leverage the neutron database should set this flag to False" msgstr "" "Assurer le suivi de l'utilisation en cours du quota de ressources dans la " "base de données. Les plug-ins qui n'optimisent pas la base de données " "neutron doivent affecter la valeur False à cet indicateur. " msgid "Keepalived didn't respawn" msgstr "Keepalived n'a pas été relancée" msgid "Keepalived didn't spawn" msgstr "Keepalived n'a pas été lancé" #, python-format msgid "" "Kernel HZ value %(value)s is not valid. This value must be greater than 0." msgstr "" "La valeur HZ du noyau %(value)s n'est pas valide. Cette valeur doit être " "supérieure à 0." #, python-format msgid "Key %(key)s in mapping: '%(mapping)s' not unique" msgstr "Clé %(key)s non unique dans le mappage '%(mapping)s'" msgid "L3 agent failure to setup NAT for floating IPs" msgstr "L'agent L3 n'a pas pu configurer NAT pour les IP flottantes" msgid "L3 agent failure to setup floating IPs" msgstr "L'agent L3 n'a pas pu configurer les IP flottantes" #, python-format msgid "Limit must be an integer 0 or greater and not '%d'" msgstr "La limite doit être un entier supérieur ou égal à 0, et non '%d'" msgid "Limit number of leases to prevent a denial-of-service." msgstr "Limiter le nombre de baux pour éviter un déni de service." msgid "List of :" msgstr "Liste de :" msgid "" "List of :: or " "specifying physical_network names usable for VLAN provider and tenant " "networks, as well as ranges of VLAN tags on each available for allocation to " "tenant networks." msgstr "" "Liste de :: ou " "indiquant des noms physical_network utilisables pour les réseaux de " "fournisseurs VLAN et de locataires, ainsi que les plages de libellés VLAN " "disponibles dans le cadre de l'allocation aux réseaux locataires." msgid "" "List of network type driver entrypoints to be loaded from the neutron.ml2." "type_drivers namespace." msgstr "" "Liste des points d'entrée du pilote de type de réseau à charger à partir de " "l'espace de nom neutron.ml2.type_drivers." msgid "" "List of physical_network names with which flat networks can be created. Use " "default '*' to allow flat networks with arbitrary physical_network names. " "Use an empty list to disable flat networks." msgstr "" "Liste de noms de réseau physique (physical_network) qui peuvent être " "utilisés pour créer des réseaux centralisés. Utilisez par défaut '*' pour " "autoriser les réseaux centralisés avec des noms de réseau physique " "arbitraires. Utilisez une liste vide pour désactiver les réseaux " "centralisés." msgid "Local IP address of the VXLAN endpoints." msgstr "Adresse IP locale des noeuds finaux VXLAN." msgid "Location for Metadata Proxy UNIX domain socket." msgstr "Emplacement du socket de domaine UNIX du proxy de métadonnées." msgid "Location of Metadata Proxy UNIX domain socket" msgstr "Emplacement du socket de domaine UNIX du proxy de métadonnées" msgid "Location of pid file of this process." msgstr "Emplacement du fichier pid de ce processus." msgid "Location to store DHCP server config files." msgstr "Emplacement de stockage des fichiers de configuration du serveur DHCP." msgid "Location to store IPv6 PD files." msgstr "Emplacement pour stocker les fichiers IPv6 PD" msgid "Location to store IPv6 RA config files" msgstr "Emplacement de stockage des fichiers de configuration IPv6 RA" msgid "Location to store child pid files" msgstr "Emplacement de stockage des fichiers PID enfant" msgid "Location to store keepalived/conntrackd config files" msgstr "" "Emplacement de stockage des fichiers de configuration keepalived/conntrackd" msgid "Log agent heartbeats" msgstr "Consigner les pulsations d'agent" msgid "Loopback IP subnet is not supported if enable_dhcp is True." msgstr "" "Le sous-réseau d'IP de bouclage n'est pas pris en charge si le paramètre " "enable_dhcp est défini sur True." msgid "MTU size of veth interfaces" msgstr "Taille MTU des interfaces veth" msgid "Make the l2 agent run in DVR mode." msgstr "Exécuter l'agent l2 en mode DVR." msgid "Malformed request body" msgstr "Format de corps de demande incorrect" #, python-format msgid "Malformed request body: %(reason)s." msgstr "Format de corps de demande incorrect : %(reason)s" msgid "MaxRtrAdvInterval setting for radvd.conf" msgstr "Paramètre MaxRtrAdvInterval pour radvd.conf" msgid "Maximum number of DNS nameservers per subnet" msgstr "Nombre maximum de serveurs de noms DNS par sous-réseau" msgid "" "Maximum number of L3 agents which a HA router will be scheduled on. If it is " "set to 0 then the router will be scheduled on every agent." msgstr "" "Nombre maximum d'agents L3 sur lesquels un routeur HA sera planifié. Si ce " "paramètre est défini sur 0, le routeur sera planifié sur chaque agent." msgid "Maximum number of allowed address pairs" msgstr "Nombre maximal de paires d'adresses autorisé" msgid "" "Maximum number of fixed ips per port. This option is deprecated and will be " "removed in the N release." msgstr "" "Nombre maximum d'IP fixes par port. Cette option est obsolète et elle sera " "retirée dans la version N." msgid "Maximum number of host routes per subnet" msgstr "Nombre maximal de routes hôte par sous-réseau" msgid "Maximum number of routes per router" msgstr "Nombre maximum de routes par routeur" msgid "" "Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce " "mode from metadata_proxy_user/group values, 'user': set metadata proxy " "socket mode to 0o644, to use when metadata_proxy_user is agent effective " "user or root, 'group': set metadata proxy socket mode to 0o664, to use when " "metadata_proxy_group is agent effective group or root, 'all': set metadata " "proxy socket mode to 0o666, to use otherwise." msgstr "" "Mode du socket de domaine UNIX de proxy de métadonnées, 4 valeurs " "autorisées : 'deduce' : mode de déduction à partir des valeurs de " "metadata_proxy_user/group ; 'user' : mode du socket de proxy de métadonnées " "défini sur 0o644, à utiliser lorsque metadata_proxy_user correspond à la " "racine ou à l'utilisateur effectif de l'agent ; 'group' : mode du socket de " "proxy de métadonnées défini sur 0o664,à utiliser lorsque " "metadata_proxy_group correspond à la racine ou au groupe effectif de " "l'agent ; 'all' : mode du socket de proxy de métadonnées défini sur 0o666, à " "utiliser dans les autres cas." msgid "Metering driver" msgstr "Pilote de décompte" #, python-format msgid "Metering label %(label_id)s does not exist" msgstr "Le libellé de mesure %(label_id)s n'existe pas" #, python-format msgid "Metering label rule %(rule_id)s does not exist" msgstr "La règle de libellé de mesure %(rule_id)s n'existe pas" #, python-format msgid "" "Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps " "another" msgstr "" "La règle de libellé de mesure avec remote_ip_prefix %(remote_ip_prefix)s en " "chevauche une autre" msgid "Method cannot be called within a transaction." msgstr "La méthode ne peut pas être appelée au sein d'une transaction." msgid "Migration from distributed router to centralized is not supported" msgstr "" "La migration du routeur distribué vers le routeur centralisé n'est pas prise " "en charge" msgid "MinRtrAdvInterval setting for radvd.conf" msgstr "Paramètre MinRtrAdvInterval pour radvd.conf" msgid "Minimize polling by monitoring ovsdb for interface changes." msgstr "" "Réduire au minimum l'interrogation en surveillant les changements " "d'interface ovsdb." #, python-format msgid "Missing key in mapping: '%s'" msgstr "Clé manquante dans le mappage : '%s'" #, python-format msgid "Missing value in mapping: '%s'" msgstr "Valeur manquante dans le mappage : '%s'" msgid "Multicast IP subnet is not supported if enable_dhcp is True." msgstr "" "Le sous-réseau IP de multidiffusion n'est pas pris en charge si le paramètre " "enable_dhcp est défini sur True." msgid "" "Multicast group for VXLAN. When configured, will enable sending all " "broadcast traffic to this multicast group. When left unconfigured, will " "disable multicast VXLAN mode." msgstr "" "Groupe de multidiffusion pour VXLAN. Lorsque ce paramètre est configuré, il " "permet l'envoi de tout le trafic de diffusion vers ce groupe de " "multidiffusion. Dans le cas contraire, il désactive le mode VXLAN de " "multidiffusion." msgid "" "Multicast group(s) for vxlan interface. A range of group addresses may be " "specified by using CIDR notation. Specifying a range allows different VNIs " "to use different group addresses, reducing or eliminating spurious broadcast " "traffic to the tunnel endpoints. To reserve a unique group for each possible " "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on " "all the agents." msgstr "" "Groupe(s) de multidiffusion pour l'interface vxlan. Une plage d'adresses de " "groupe peut être spécifiée en utilisant la notation CIDR. Si une plage est " "indiquée, différents VNI peuvent utiliser différentes adresses de groupe, ce " "qui réduit ou élimine le trafic de multidiffusion fallacieux vers les noeuds " "finaux de tunnel. Pour réserver un groupe unique pour chaque VNI possible " "(24 bits), utilisez /8, par exemple 239.0.0.0/8. Ce paramètre doit être " "identique sur tous les agents." #, python-format msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found" msgstr "" "Plusieurs agents trouvés avec agent_type=%(agent_type)s et host=%(host)s" #, python-format msgid "Multiple default providers for service %s" msgstr "Plusieurs fournisseurs par défaut pour le service %s" #, python-format msgid "Multiple plugins for service %s were configured" msgstr "Plusieurs plug-in pour le service %s ont été configurés." #, python-format msgid "Multiple providers specified for service %s" msgstr "Plusieurs fournisseurs spécifiés pour le service %s" msgid "Multiple tenant_ids in bulk security group rule create not allowed" msgstr "" "L'existence de plusieurs ID locataire n'est pas autorisée lors de la " "création de règle de groupe de sécurité en bloc." msgid "Must also specify protocol if port range is given." msgstr "" "Un protocole doit également être spécifié si une plage de ports est fournie." msgid "Must specify one or more actions on flow addition or modification" msgstr "" "Doit indiquer une ou plusieurs actions sur l'ajout ou la modification de flux" #, python-format msgid "Name %(dns_name)s is duplicated in the external DNS service" msgstr "Le nom %(dns_name)s est en double dans le service DNS externe" #, python-format msgid "" "Name '%s' must be 1-63 characters long, each of which can only be " "alphanumeric or a hyphen." msgstr "" "Le nom '%s' doit comprendre entre 1 et 63 caractères (seuls les caractères " "alphanumériques et le trait d'union sont admis)." #, python-format msgid "Name '%s' must not start or end with a hyphen." msgstr "Le nom '%s' ne doit pas commencer ni se terminer par un trait d'union." msgid "Name of Open vSwitch bridge to use" msgstr "Nom du pont Open vSwitch à utiliser" msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "Nom de la région nova à utiliser. Utile si keystone gère plusieurs régions." msgid "Name of the FWaaS Driver" msgstr "Nom du pilote FWaaS" msgid "Namespace of the router" msgstr "Espace de nom du routeur" msgid "Native pagination depend on native sorting" msgstr "La mise en page native dépend du tri natif" #, python-format msgid "" "Need to apply migrations from %(project)s contract branch. This will require " "all Neutron server instances to be shutdown before proceeding with the " "upgrade." msgstr "" "La migrations doit être appliquée depuis la branche contract %(project)s. " "Cette opération va nécessiter l'arrêt de toutes les instances de serveur " "Neutron avant la mise à niveau." msgid "Negative delta (downgrade) not supported" msgstr "Delta négatif (rétromigration) non pris en charge" msgid "Negative relative revision (downgrade) not supported" msgstr "Révision relative négative (rétromigration) non prise en charge" #, python-format msgid "" "Network %(network_id)s is already bound to BgpSpeaker %(bgp_speaker_id)s." msgstr "" "Le réseau %(network_id)s est déjà lié au routeur BgpSpeaker " "%(bgp_speaker_id)s." #, python-format msgid "" "Network %(network_id)s is not associated with BGP speaker %(bgp_speaker_id)s." msgstr "" "Le réseau %(network_id)s n'est pas associé au routeur BGP %(bgp_speaker_id)s." #, python-format msgid "Network %(network_id)s is not bound to a BgpSpeaker." msgstr "Le réseau %(network_id)s n'est pas lié à un routeur BGP." #, python-format msgid "Network %(network_id)s is not bound to a IPv%(ip_version)s BgpSpeaker." msgstr "" "Le réseau %(network_id)s n'est pas lié à un routeur BGP IPv%(ip_version)s." #, python-format msgid "Network %s does not contain any IPv4 subnet" msgstr "Le réseau %s ne contient pas de sous-réseau IPv4 " #, python-format msgid "Network %s is not a valid external network" msgstr "Le réseau %s n'est pas un réseau externe valide." #, python-format msgid "Network %s is not an external network" msgstr "Le réseau %s n'est pas un réseau externe" #, python-format msgid "" "Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges " "%(excluded_ranges)s was not found." msgstr "" "Le réseau de taille %(size)s, de plage IP %(parent_range)s (hors plages IP " "%(excluded_ranges)s) est introuvable." msgid "Network that will have instance metadata proxied." msgstr "Réseau dont les métadonnées d'instance seront mandatées." #, python-format msgid "Network type value '%s' not supported" msgstr "Valeur de type de réseau '%s' non prise en charge" msgid "Network type value needed by the ML2 plugin" msgstr "Valeur de type de réseau requise par le plug-in ML2" msgid "Network types supported by the agent (gre and/or vxlan)." msgstr "Types de réseau pris en charge par l'agent (gre et/ou vxlan)." msgid "" "Neutron IPAM (IP address management) driver to use. If ipam_driver is not " "set (default behavior), no IPAM driver is used. In order to use the " "reference implementation of Neutron IPAM driver, use 'internal'." msgstr "" "Pilote IPAM (gestion des adresses IP) de Neutron à utiliser. Si le paramètre " "ipam_driver n'est pas défini (comportement par défaut), aucun pilote IPAM " "n'est utilisé. Pour pouvoir utiliser l'implémentation de référence du pilote " "IPAM de Neutron, indiquez 'internal'." msgid "Neutron Service Type Management" msgstr "Gestion du type de service Neutron" msgid "Neutron core_plugin not configured!" msgstr "Neutron core_plugin n'est pas configuré." msgid "Neutron plugin provider module" msgstr "Module du fournisseur de plug-in Neutron" msgid "Neutron quota driver class" msgstr "Classe de pilote du quota Neutron" msgid "New value for first_ip or last_ip has to be specified." msgstr "Une nouvelle valeur doit être spécifiée pour first_ip ou last_ip." msgid "No default router:external network" msgstr "Aucun réseau router:external par défaut" #, python-format msgid "No default subnetpool found for IPv%s" msgstr "Aucun pool de sous-réseaux par défaut trouvé pour IPv%s" msgid "No default subnetpools defined" msgstr "Aucun pool de sous-réseaux défini" #, python-format msgid "No eligible l3 agent associated with external network %s found" msgstr "Aucun agent l3 admissible associé au réseau %s n'a été trouvé" #, python-format msgid "No more IP addresses available for subnet %(subnet_id)s." msgstr "Plus d'adresses IP disponibles pour le sous-réseau %(subnet_id)s." #, python-format msgid "" "No more Virtual Router Identifier (VRID) available when creating router " "%(router_id)s. The limit of number of HA Routers per tenant is 254." msgstr "" "Plus d'identificateur de routeur virtuel disponible lors de la création du " "routeur %(router_id)s. Le nombre maximum de routeurs haute disponibilité par " "locataire est de 254." msgid "No offline migrations pending." msgstr "Aucune migration hors ligne en attente." #, python-format msgid "No providers specified for '%s' service, exiting" msgstr "Aucun fournisseur indiqué pour le service '%s', sortie" #, python-format msgid "No shared key in %s fields" msgstr "Aucune clé partagée dans les zones %s" msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "" "Non autorisé à affecter manuellement un routeur à un agent en mode 'dvr'." msgid "Not allowed to manually remove a router from an agent in 'dvr' mode." msgstr "" "Non autorisé à retirer manuellement un routeur d'un agent en mode 'dvr'." #, python-format msgid "" "Not enough l3 agents available to ensure HA. Minimum required " "%(min_agents)s, available %(num_agents)s." msgstr "" "Nombre d'agents L3 insuffisant pour assurer la haute disponibilité. Nombre " "minimum requis : %(min_agents)s, nombre disponible : %(num_agents)s." msgid "" "Number of DHCP agents scheduled to host a tenant network. If this number is " "greater than 1, the scheduler automatically assigns multiple DHCP agents for " "a given tenant network, providing high availability for DHCP service." msgstr "" "Nombre d'agents DHCP planifiés pour héberger un réseau locataire. Si ce " "nombre est supérieur à 1, le planificateur affecte automatiquement plusieurs " "agents DHCP pour un réseau locataire donné, ce qui fournit de la haute " "disponibilité au service DHCP. " msgid "Number of RPC worker processes dedicated to state reports queue" msgstr "" "Nombre de processus d'agent RPC dédiés à la file d'attente des rapports " "d'état" msgid "Number of RPC worker processes for service" msgstr "Nombre de processus d'agent RPC pour le service" msgid "Number of backlog requests to configure the metadata server socket with" msgstr "" "Nombre de demandes en attente de configuration avec le socket du serveur de " "métadonnées" msgid "Number of backlog requests to configure the socket with" msgstr "Nombre de demandes en attente avec lequel configurer le socket" msgid "" "Number of bits in an ipv4 PTR zone that will be considered network prefix. " "It has to align to byte boundary. Minimum value is 8. Maximum value is 24. " "As a consequence, range of values is 8, 16 and 24" msgstr "" "Nombre de bits dans une zone PTR ipv4 qui fera office de préfixe réseau. " "Doit s'aligner sur la frontière de bit. La valeur minimum est 8. La valeur " "maximum est 24. Par conséquent, la plage de valeurs est 8, 16 et 24" msgid "" "Number of bits in an ipv6 PTR zone that will be considered network prefix. " "It has to align to nyble boundary. Minimum value is 4. Maximum value is 124. " "As a consequence, range of values is 4, 8, 12, 16,..., 124" msgstr "" "Nombre de bits dans une zone PTR ipv6 qui fera office de préfixe réseau. " "Doit s'aligner sur la frontière nyble. La valeur minimum est 4. La valeur " "maximum est 124. Par conséquent, la plage de valeurs est 4, 8, 12, 16,..., " "124" msgid "" "Number of floating IPs allowed per tenant. A negative value means unlimited." msgstr "" "Nombre d'adresses IP flottantes autorisées par locataire. Une valeur " "négative signifie illimité." msgid "" "Number of networks allowed per tenant. A negative value means unlimited." msgstr "" "Nombre de réseaux autorisés par locataire. Une valeur négative signifie " "illimité." msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "" "Nombre de ports autorisés par locataire. Une valeur négative signifie " "illimité." msgid "Number of routers allowed per tenant. A negative value means unlimited." msgstr "" "Nombre de routeurs autorisés par locataire. Une valeur négative signifie " "illimité" msgid "" "Number of seconds between sending events to nova if there are any events to " "send." msgstr "" "Nombre de secondes entre deux envois d'événements à nova s'il y a des " "événements à envoyer." msgid "Number of seconds to keep retrying to listen" msgstr "Nombre de secondes à attendre avant d'essayer d'écouter à nouveau" msgid "" "Number of security groups allowed per tenant. A negative value means " "unlimited." msgstr "" "Nombre de groupes de sécurité autorisés par locataire. Une valeur négative " "signifie illimité." msgid "" "Number of security rules allowed per tenant. A negative value means " "unlimited." msgstr "" "Nombre de règles de sécurité autorisées par locataire. Une valeur négative " "signifie illimité." msgid "" "Number of separate API worker processes for service. If not specified, the " "default is equal to the number of CPUs available for best performance." msgstr "" "Nombre de processus d'agent d'API distincts pour le service. Si ce nombre " "n'est pas spécifié, la valeur par défaut est égale au nombre d'UC " "disponibles pour optimiser les performances. " msgid "" "Number of separate worker processes for metadata server (defaults to half of " "the number of CPUs)" msgstr "" "Nombre de processus de traitement distincts pour le serveur de métadonnées " "(par défaut, la moitié du nombre d'unités centrales)" msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "" "Nombre de sous-réseaux autorisés par locataire. Une valeur négative signifie " "illimité." msgid "" "Number of threads to use during sync process. Should not exceed connection " "pool size configured on server." msgstr "" "Nombres d'unités d'exécution à utiliser durant le processus de " "synchronisation. Ce nombre ne doit pas être supérieur à la taille de pool de " "connexion configurée sur le serveur." msgid "OK" msgstr "OK" msgid "" "OVS datapath to use. 'system' is the default value and corresponds to the " "kernel datapath. To enable the userspace datapath set this value to 'netdev'." msgstr "" "Chemin de données OVS à utiliser. 'system' est la valeur par défaut qui " "correspond au chemin de données du noyau. Pour activer le chemin de données " "de l'espace utilisateur, définissez cette valeur sur 'netdev'." msgid "OVS vhost-user socket directory." msgstr "Répertoire de socket OVS vhost-user." #, python-format msgid "OVSDB Error: %s" msgstr "Erreur OVSDB : %s " #, python-format msgid "Object action %(action)s failed because: %(reason)s." msgstr "Echec de l'action de l'objet %(action)s car : %(reason)s" msgid "Only admin can view or configure quota" msgstr "Seul l'administrateur peut afficher ou configurer des quotas" msgid "Only admin is authorized to access quotas for another tenant" msgstr "" "Seul l'administrateur est autorisé à accéder aux quotas d'un autre locataire" msgid "Only admins can manipulate policies on networks they do not own." msgstr "" "Seuls les administrateurs peuvent manipuler des stratégies sur des réseaux " "qu'ils ne possèdent pas. " msgid "Only admins can manipulate policies on objects they do not own" msgstr "" "Seuls les administrateurs peuvent gérer des stratégies sur des objets qui ne " "leur appartiennent pas" msgid "Only allowed to update rules for one security profile at a time" msgstr "" "Les règles peuvent uniquement être mises à jour pour un profil de sécurité à " "la fois." msgid "Only remote_ip_prefix or remote_group_id may be provided." msgstr "Seul remote_ip_prefix ou remote_group_id peut être fourni." msgid "OpenFlow interface to use." msgstr "Interface OpenFlow à utiliser. " #, python-format msgid "" "Operation %(op)s is not supported for device_owner %(device_owner)s on port " "%(port_id)s." msgstr "" "L'opération %(op)s n'est pas prise en charge pour device_owner " "%(device_owner)s sur le port %(port_id)s." #, python-format msgid "Operation not supported on device %(dev_name)s" msgstr "Opération non prise en charge sur l'unité %(dev_name)s" msgid "" "Ordered list of network_types to allocate as tenant networks. The default " "value 'local' is useful for single-box testing but provides no connectivity " "between hosts." msgstr "" "Liste ordonnée des éléments network_types à allouer en tant que réseaux " "locataires. La valeur par défaut 'local' est utile pour les tests single-box " "mais elle ne fournit aucune connectivité entre les hôtes." msgid "Override the default dnsmasq settings with this file." msgstr "Remplacez les paramètres dnsmasq par défaut par ce fichier." msgid "Owner type of the device: network/compute" msgstr "Type de propriétaire de l'unité : réseau/ordinateur" msgid "POST requests are not supported on this resource." msgstr "Les demandes POST ne sont pas prises en charge sur cette ressource." #, python-format msgid "Package %s not installed" msgstr "Le package %s n'est pas installé" #, python-format msgid "Parameter %(param)s must be of %(param_type)s type." msgstr "Le paramètre %(param)s doit être de type %(param_type)s." #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "Echec de l'analyse syntaxique des bridge_mappings : %s." msgid "Parsing supported pci_vendor_devs failed" msgstr "Echec de l'analyse syntaxique des pci_vendor_devs pris en charge" msgid "Password for connecting to designate in admin context" msgstr "" "Mot de passe pour la connexion au réseau désigné dans un contexte admin" #, python-format msgid "Password not specified for authentication type=%(auth_type)s." msgstr "" "Mot de passe non spécifié pour le type d'authentification=%(auth_type)s." msgid "Path to PID file for this process" msgstr "Chemin d'accès au fichier PID pour ce processus" msgid "Path to the router directory" msgstr "Chemin d'accès au répertoire du routeur" msgid "Peer patch port in integration bridge for tunnel bridge." msgstr "" "Port correctif homologue dans le pont d'intégration pour le pont de tunnel." msgid "Peer patch port in tunnel bridge for integration bridge." msgstr "" "Port correctif homologue dans le pont d'intégration tunnel pour le pont " "d'intégration." msgid "Per-tenant subnet pool prefix quota exceeded." msgstr "Quota de préfixes de pool de sous-réseaux par locataire dépassé." msgid "Phase upgrade options do not accept revision specification" msgstr "" "Les options de mise à niveau de phase n'acceptent pas la spécification de " "révision" msgid "Ping timeout" msgstr "Délai d'attente de la commande ping" #, python-format msgid "Plugin '%s' not found." msgstr "Le plugin '%s' est introuvable." msgid "Plugin does not support updating provider attributes" msgstr "" "Le plug-in ne prend pas en charge la mise à jour des attributs de fournisseur" msgid "Policy configuration policy.json could not be found." msgstr "La configuration de stratégie policy.json est introuvable." #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "Le port %(id)s ne dispose pas de l'adresse IP fixe %(address)s." #, python-format msgid "Port %(port)s does not exist on %(bridge)s!" msgstr "Le port %(port)s au sein du pont %(bridge)s n'existe pas" #, python-format msgid "Port %(port_id)s is already acquired by another DHCP agent" msgstr "Le port %(port_id)s est déjà acquis par un autre agent DHCP" #, python-format msgid "" "Port %(port_id)s is associated with a different tenant than Floating IP " "%(floatingip_id)s and therefore cannot be bound." msgstr "" "Le port %(port_id)s est associé à un locataire différent de celui de " "l'adresse IP flottante %(floatingip_id)s et ne peut donc pas être lié. " #, python-format msgid "Port %(port_id)s is not managed by this agent. " msgstr "Le port %(port_id)s n'est pas géré par cet agent. " #, python-format msgid "Port %s does not exist" msgstr "Le port %s n'existe pas" #, python-format msgid "" "Port %s has multiple fixed IPv4 addresses. Must provide a specific IPv4 " "address when assigning a floating IP" msgstr "" "Le port %s comporte plusieurs adresses IPv4 fixes. Une adresse IPv4 " "spécifique doit être fournie lors de l'affectation d'une adresse IP " "flottante." msgid "" "Port Security must be enabled in order to have allowed address pairs on a " "port." msgstr "" "La sécurité du port doit être activée pour avoir les paires d'adresses " "autorisées sur un port." msgid "" "Port has security group associated. Cannot disable port security or ip " "address until security group is removed" msgstr "" "Un groupe de sécurité est associé au port. Impossible de désactiver la " "sécurité ou l'adresse IP du port tant que le groupe de sécurité n'a pas été " "retiré" msgid "" "Port security must be enabled and port must have an IP address in order to " "use security groups." msgstr "" "La sécurité du port doit être activée et le port doit avoir une adresse IP " "pour utiliser des groupes de sécurité." msgid "" "Port to listen on for OpenFlow connections. Used only for 'native' driver." msgstr "" "Port à utiliser pour l'écoute des connexions OpenFlow. Utilisé uniquement " "pour le pilote 'natif'." #, python-format msgid "Prefix '%(prefix)s' not supported in IPv%(version)s pool." msgstr "Préfixe '%(prefix)s' non pris en charge dans le pool IPv%(version)s." msgid "Prefix Delegation can only be used with IPv6 subnets." msgstr "" "La délégation de préfixe peut uniquement être utilisée avec des sous-réseaux " "IPv6. " msgid "Private key of client certificate." msgstr "Clé privée pour le certificat client." #, python-format msgid "Probe %s deleted" msgstr "Sonde %s supprimée" #, python-format msgid "Probe created : %s " msgstr "Sonde créée : %s " msgid "Process is already started" msgstr "Le processus est déjà démarré" msgid "Process is not running." msgstr "Le processus n'est pas en fonctionnement." msgid "Protocol to access nova metadata, http or https" msgstr "Protocole d'accès aux métadonnées de nova, HTTP ou https" #, python-format msgid "Provider name %(name)s is limited by %(len)s characters" msgstr "Le nom de fournisseur %(name)s est limité à %(len)s caractères" #, python-format msgid "QoS Policy %(policy_id)s is used by %(object_type)s %(object_id)s." msgstr "" "Stratégie QoS %(policy_id)s utilisée par %(object_type)s %(object_id)s." #, python-format msgid "" "QoS binding for network %(net_id)s and policy %(policy_id)s could not be " "found." msgstr "" "La liaison QoS du réseau %(net_id)s et de la stratégie %(policy_id)s est " "introuvable." #, python-format msgid "" "QoS binding for port %(port_id)s and policy %(policy_id)s could not be found." msgstr "" "La liaison QoS du port %(port_id)s et de la stratégie %(policy_id)s est " "introuvable." #, python-format msgid "QoS policy %(policy_id)s could not be found." msgstr "La stratégie de QoS %(policy_id)s est introuvable." #, python-format msgid "QoS rule %(rule_id)s for policy %(policy_id)s could not be found." msgstr "" "La règle QoS %(rule_id)s pour la stratégie %(policy_id)s est introuvable." #, python-format msgid "RBAC policy of type %(object_type)s with ID %(id)s not found" msgstr "" "La stratégie RBAC de type %(object_type)s avec l'ID %(id)s est introuvable" #, python-format msgid "" "RBAC policy on object %(object_id)s cannot be removed because other objects " "depend on it.\n" "Details: %(details)s" msgstr "" "La stratégie RBAC sur l'objet %(object_id)s ne peut pas être retirée car " "d'autres objets en dépendent.\n" "Détails : %(details)s" msgid "" "Range of seconds to randomly delay when starting the periodic task scheduler " "to reduce stampeding. (Disable by setting to 0)" msgstr "" "Intervalle, en secondes, de retard aléatoire lors du démarrage du " "planificateur de tâches périodiques permettant de réduire les encombrements " "(définissez ce chiffre sur 0 pour désactiver la fonction)." msgid "Ranges must be in the same IP version" msgstr "Les plages doivent être dans la même version IP" msgid "Ranges must be netaddr.IPRange" msgstr "Les plages doivent être netaddr.IPRange" msgid "Ranges must not overlap" msgstr "Les plages ne doivent pas se chevaucher" #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.EUI type." msgstr "Type '%(type)s' et valeur '%(value)s' reçus. Type netaddr.EUI attendu." #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.IPAddress " "type." msgstr "" "Type '%(type)s' et valeur '%(value)s' reçus. Type netaddr.IPAddress attendu." #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.IPNetwork " "type." msgstr "" "Type '%(type)s' et valeur '%(value)s' reçus. Type netaddr.IPNetwork attendu." #, python-format msgid "" "Release aware branch labels (%s) are deprecated. Please switch to expand@ " "and contract@ labels." msgstr "" "Les libellés de branche orientés édition (%s) sont obsolètes. Passez aux " "libellés expand@ et contract@. " msgid "Remote metadata server experienced an internal server error." msgstr "" "Le serveur de métadonnées distant a subi une erreur de serveur interne." msgid "" "Repository does not contain HEAD files for contract and expand branches." msgstr "" "Le référentiel ne contient pas les fichiers HEAD pour les branches contract " "et expand." msgid "" "Representing the resource type whose load is being reported by the agent. " "This can be \"networks\", \"subnets\" or \"ports\". When specified (Default " "is networks), the server will extract particular load sent as part of its " "agent configuration object from the agent report state, which is the number " "of resources being consumed, at every report_interval.dhcp_load_type can be " "used in combination with network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is " "WeightScheduler, dhcp_load_type can be configured to represent the choice " "for the resource being balanced. Example: dhcp_load_type=networks" msgstr "" "Représentation du type de ressource dont la charge est signalée par l'agent. " "Il peut s'agir de \"réseaux\", \"sous-réseaux\" ou \"ports\". Lorsque le " "type est spécifié (la valeur par défaut est réseaux), le serveur extrait la " "charge particulière envoyée en tant que composant de son objet de " "configuration d'agent depuis l'état de rapport d'agent, qui correspond au " "nombre de ressources consommées, à chaque intervalle report_interval." "dhcp_load_type, et pouvant être utilisées en combinaison avec " "network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler." "WeightScheduler Lorsque network_scheduler_driver est WeightScheduler, " "dhcp_load_type peut être configuré pour représenter le choix pour la " "ressource équilibrée. Exemple : dhcp_load_type=networks" msgid "Request Failed: internal server error while processing your request." msgstr "" "Echec de la demande : erreur de serveur interne lors du traitement de votre " "demande." #, python-format msgid "" "Request contains duplicate address pair: mac_address %(mac_address)s " "ip_address %(ip_address)s." msgstr "" "La demande contient une paire d'adresses en double : mac_address " "%(mac_address)s ip_address %(ip_address)s." #, python-format msgid "" "Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps " "with another subnet" msgstr "" "Le sous-réseau demandé avec CIDR : %(cidr)s pour le réseau : %(network_id)s " "chevauche un autre sous-réseau" msgid "" "Reset flow table on start. Setting this to True will cause brief traffic " "interruption." msgstr "" "Réinitialiser la table de flux au démarrage. Affecter la valeur True à ce " "paramètre entraîne une courte interruption du trafic." #, python-format msgid "Resource %(resource)s %(resource_id)s could not be found." msgstr "La ressource %(resource)s %(resource_id)s est introuvable." #, python-format msgid "Resource %(resource_id)s of type %(resource_type)s not found" msgstr "Ressource %(resource_id)s de type %(resource_type)s introuvable" #, python-format msgid "" "Resource '%(resource_id)s' is already associated with provider " "'%(provider)s' for service type '%(service_type)s'" msgstr "" "La ressource '%(resource_id)s' est déjà associée au fournisseur " "'%(provider)s' pour le type de service '%(service_type)s'" msgid "Resource body required" msgstr "Corps de ressource obligatoire" msgid "" "Resource name(s) that are supported in quota features. This option is now " "deprecated for removal." msgstr "" "Nom(s) de ressource pris en charge dans les fonctions de quota. Cette option " "est désormais obsolète et devrait être retirée." msgid "Resource not found." msgstr "Ressource introuvable." msgid "Resources required" msgstr "Ressources obligatoires" msgid "" "Root helper application. Use 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' to use the real root filter facility. Change to 'sudo' to skip the " "filtering and just run the command directly." msgstr "" "Application d'assistant racine. Utilisez 'sudo neutron-rootwrap /etc/neutron/" "rootwrap.conf' pour utiliser la véritable fonction de filtre racine. " "Remplacez par 'sudo' pour ignorer le filtrage et exécuter simplement la " "commande directement." msgid "Root helper daemon application to use when possible." msgstr "Application de démon d'assistant racine à utiliser si possible." msgid "Root permissions are required to drop privileges." msgstr "Les droits root sont obligatoires pour supprimer des privilèges." #, python-format msgid "Route %(cidr)s not advertised for BGP Speaker %(speaker_as)d." msgstr "Route %(cidr)s non publiée pour le routeur BGP %(speaker_as)d." #, python-format msgid "Router %(router_id)s %(reason)s" msgstr "Routeur %(router_id)s %(reason)s" #, python-format msgid "Router %(router_id)s could not be found" msgstr "Le routeur %(router_id)s est introuvable." #, python-format msgid "Router %(router_id)s does not have an interface with id %(port_id)s" msgstr "" "Le routeur %(router_id)s ne comporte pas d'interface avec l'ID %(port_id)s." #, python-format msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s" msgstr "" "Le routeur %(router_id)s ne comporte pas d'interface sur le sous-réseau " "%(subnet_id)s." #, python-format msgid "Router '%(router_id)s' cannot be both DVR and HA." msgstr "Le routeur '%(router_id)s' ne peut pas être à la fois DVR et HA." #, python-format msgid "Router '%(router_id)s' is not compatible with this agent." msgstr "Le routeur '%(router_id)s' n'est pas compatible avec cet agent." #, python-format msgid "Router already has a port on subnet %s" msgstr "Le routeur dispose déjà d'un port sur le sous-réseau %s." #, python-format msgid "" "Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be " "deleted, as it is required by one or more floating IPs." msgstr "" "L'interface de routeur du sous-réseau %(subnet_id)s sur le routeur " "%(router_id)s ne peut pas être supprimée car elle est requise par une ou " "plusieurs adresses IP flottantes." #, python-format msgid "" "Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be " "deleted, as it is required by one or more routes." msgstr "" "L'interface de routeur du sous-réseau %(subnet_id)s sur le routeur " "%(router_id)s ne peut pas être supprimée car elle est requise par une ou " "plusieurs routes." msgid "Router port must have at least one fixed IP" msgstr "Le port de routeur doit avoir au moins une IP fixe" msgid "Router that will have connected instances' metadata proxied." msgstr "Routeur dont les métadonnées d'instance connectées seront mandatées." #, python-format msgid "" "Row doesn't exist in the DB. Request info: Table=%(table)s. Columns=" "%(columns)s. Records=%(records)s." msgstr "" "La ligne n'existe pas dans la base de données. Infos demande : Table=" "%(table)s. Colonnes=%(columns)s. Enregistrements=%(records)s." msgid "Run as daemon." msgstr "Exécuter en tant que démon." #, python-format msgid "Running %(cmd)s (%(desc)s) for %(project)s ..." msgstr "Exécution de %(cmd)s (%(desc)s) pour %(project)s... " #, python-format msgid "Running %(cmd)s for %(project)s ..." msgstr "Exécution de %(cmd)s pour %(project)s... " msgid "Running without keystone AuthN requires that tenant_id is specified" msgstr "L'exécution sans keystone AuthN nécessite que tenant_id soit spécifié" msgid "" "Seconds between nodes reporting state to server; should be less than " "agent_down_time, best if it is half or less than agent_down_time." msgstr "" "Secondes entre les noeuds signalant l'état au serveur ; cette valeur doit " "être inférieure à agent_down_time, et au mieux, inférieure ou égale à la " "moitié de agent_down_time." msgid "Seconds between running periodic tasks" msgstr "Secondes écoulées entre l'exécution de deux tâches périodiques" msgid "" "Seconds to regard the agent is down; should be at least twice " "report_interval, to be sure the agent is down for good." msgstr "" "Nombre de secondes avant de considérer que l'agent est arrêté ; cette valeur " "doit être au moins le double de report_interval, afin de s'assurer que " "l'agent est effectivement arrêté." #, python-format msgid "Security Group %(id)s %(reason)s." msgstr "Groupe de sécurité %(id)s %(reason)s." #, python-format msgid "Security Group Rule %(id)s %(reason)s." msgstr "Règle de groupe de sécurité %(id)s %(reason)s." #, python-format msgid "Security group %(id)s does not exist" msgstr "Le groupe de sécurité %(id)s n'existe pas." #, python-format msgid "Security group rule %(id)s does not exist" msgstr "La règle de groupe de sécurité %(id)s n'existe pas." #, python-format msgid "Security group rule already exists. Rule id is %(rule_id)s." msgstr "" "Une règle de groupe de sécurité existe déjà. L'ID règle est %(rule_id)s." #, python-format msgid "" "Security group rule for ethertype '%(ethertype)s' not supported. Allowed " "values are %(values)s." msgstr "" "Règle de groupe de sécurité pour ethertype '%(ethertype)s' non prise en " "charge. Les valeurs autorisées sont %(values)s." #, python-format msgid "" "Security group rule protocol %(protocol)s not supported. Only protocol " "values %(values)s and integer representations [0 to 255] are supported." msgstr "" "Le protocole %(protocol)s de la règle du groupe de sécurité n'est pas pris " "en charge. Seules les valeurs de protocole %(values)s et les représentations " "sous forme d'entier [0 à 255] sont prises en charge." msgid "Segments and provider values cannot both be set." msgstr "" "Impossible de définir à la fois des segments et des valeurs de fournisseur." msgid "Selects the Agent Type reported" msgstr "Sélectionne le type d'agent signalé" msgid "" "Send notification to nova when port data (fixed_ips/floatingip) changes so " "nova can update its cache." msgstr "" "Envoyer une notification à nova lors de la modification des données de port " "(fixed_ips/floatingip) pour que nova puisse mettre à jour son cache." msgid "Send notification to nova when port status changes" msgstr "Envoyer une notification à nova lors du changement du statut de port" msgid "" "Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the " "feature is disabled" msgstr "" "Envoyez ces nombreux protocoles de résolution d'adresse gratuits pour la " "configuration HA (haute disponibilité) ; si la valeur est inférieure ou " "égale à 0, la fonction est désactivée" #, python-format msgid "Service Profile %(sp_id)s could not be found." msgstr "Le profil de service %(sp_id)s est introuvable." #, python-format msgid "Service Profile %(sp_id)s is already associated with flavor %(fl_id)s." msgstr "" "Le profil de service %(sp_id)s est déjà associé à la version %(fl_id)s." #, python-format msgid "Service Profile %(sp_id)s is not associated with flavor %(fl_id)s." msgstr "" "Le profil de service %(sp_id)s n'est pas associé à la version %(fl_id)s." #, python-format msgid "Service Profile %(sp_id)s is used by some service instance." msgstr "" "Le profil de service %(sp_id)s est utilisé par certaines instances de " "service." #, python-format msgid "Service Profile driver %(driver)s could not be found." msgstr "Le pilote de profil de service %(driver)s est introuvable." msgid "Service Profile is not enabled." msgstr "Le profil de service n'est pas activé." msgid "Service Profile needs either a driver or metainfo." msgstr "Le profil de service a besoin d'un pilote ou d'infos de métadonnées." #, python-format msgid "" "Service provider '%(provider)s' could not be found for service type " "%(service_type)s" msgstr "" "Fournisseur de services '%(provider)s' introuvable pour le type de service " "%(service_type)s" msgid "Service to handle DHCPv6 Prefix delegation." msgstr "Service de traitement de la délégation de préfixe DHCPv6. " #, python-format msgid "Service type %(service_type)s does not have a default service provider" msgstr "" "Le type de service %(service_type)s ne possède pas de fournisseur de " "services par défaut" msgid "" "Set new timeout in seconds for new rpc calls after agent receives SIGTERM. " "If value is set to 0, rpc timeout won't be changed" msgstr "" "Définir un nouveau délai d'attente (en secondes) pour les nouveaux appels " "RPC après que l'agent a reçu SIGTERM. Si la valeur est définie sur 0, le " "délai d'attente RPC reste inchangé" msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "Définissez ou annulez la définition du bit DF sur le paquet IP sortant " "véhiculant le tunnel GRE/VXLAN." msgid "" "Set or un-set the tunnel header checksum on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "Définir ou annuler la définition du total de contrôle de l'en-tête de tunnel " "sur un paquet IP sortant qui transporte le tunnel GRE/VXLAN. " msgid "Shared address scope can't be unshared" msgstr "Impossible d'annuler le partage d'une portée d'adresse partagée" msgid "" "Specifying 'tenant_id' other than authenticated tenant in request requires " "admin privileges" msgstr "" "Pour indiquer un 'tenant_id' autre qu'un projet authentifié dans la demande, " "vous devez disposer de droits admin " msgid "String prefix used to match IPset names." msgstr "Préfixe de chaîne utilisé pour correspondre aux noms IPset." #, python-format msgid "Sub-project %s not installed." msgstr "Le sous-projet %s n'est pas installé." msgid "Subnet for router interface must have a gateway IP" msgstr "" "Le sous-réseau de l'interface de routeur doit avoir une adresse IP de " "passerelle." msgid "" "Subnet has a prefix length that is incompatible with DHCP service enabled." msgstr "" "La longueur de préfixe du sous-réseau est incompatible avec le service DHCP " "activé." #, python-format msgid "Subnet pool %(subnetpool_id)s could not be found." msgstr "Le pool de sous-réseaux %(subnetpool_id)s est introuvable." msgid "Subnet pool has existing allocations" msgstr "Le pool de sous-réseaux dispose d'allocations existantes" msgid "Subnet used for the l3 HA admin network." msgstr "" "Sous-réseau utilisé pour le réseau administrateur haute disponibilité L3." msgid "" "Subnets hosted on the same network must be allocated from the same subnet " "pool." msgstr "" "Les sous-réseaux hébergés sur le même réseau doivent être alloués à partir " "du même pool de sous-réseaux." msgid "Suffix to append to all namespace names." msgstr "Suffixe à ajouter à tous les noms d'espace de nom." msgid "" "System-wide flag to determine the type of router that tenants can create. " "Only admin can override." msgstr "" "Indicateur système pour déterminer le type de routeur que les locataires " "peuvent créer. Seul l'administrateur dispose du droit de substitution." msgid "TCP Port to listen for metadata server requests." msgstr "Port TCP d'écoute des demandes de serveur de métadonnées" msgid "TCP Port used by Neutron metadata namespace proxy." msgstr "Port TCP utilisé par le proxy d'espace de nom de métadonnées Neutron" msgid "TCP Port used by Nova metadata server." msgstr "Port TCP utilisé par le serveur de métadonnées Nova" #, python-format msgid "TLD '%s' must not be all numeric" msgstr "TLD '%s' ne doit pas être entièrement numérique" msgid "TOS for vxlan interface protocol packets." msgstr "TOS pour les paquets du protocole d'interface vxlan." msgid "TTL for vxlan interface protocol packets." msgstr "Durée de vie pour les paquets du protocole d'interface vxlan." #, python-format msgid "Table %s can only be queried by UUID" msgstr "La table %s ne peut être interrogée que par UUID" #, python-format msgid "Tag %(tag)s could not be found." msgstr "Balise %(tag)s introuvable." #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "" "Locataire %(tenant_id)s non autorisé à créer %(resource)s sur ce réseau" msgid "Tenant id for connecting to designate in admin context" msgstr "" "ID locataire pour la connexion au réseau désigné dans un contexte admin" msgid "Tenant name for connecting to designate in admin context" msgstr "" "Nom de locataire pour la connexion au réseau désigné dans un contexte admin" msgid "Tenant network creation is not enabled." msgstr "La création de réseau locataire n'est pas activée." msgid "Tenant-id was missing from quota request." msgstr "ID locataire manquant dans la demande de quota." msgid "" "The 'gateway_external_network_id' option must be configured for this agent " "as Neutron has more than one external network." msgstr "" "L'option 'gateway_external_network_id' doit être configurée pour cet agent " "car Neutron a plusieurs réseaux externes." msgid "" "The DHCP agent will resync its state with Neutron to recover from any " "transient notification or RPC errors. The interval is number of seconds " "between attempts." msgstr "" "L'agent DHCP va resynchroniser son état avec Neutron afin de récupérer après " "une notification transitoire ou des erreurs RPC. L'intervalle est le nombre " "de secondes entre les tentatives." msgid "" "The DHCP server can assist with providing metadata support on isolated " "networks. Setting this value to True will cause the DHCP server to append " "specific host routes to the DHCP request. The metadata service will only be " "activated when the subnet does not contain any router port. The guest " "instance must be configured to request host routes via DHCP (Option 121). " "This option doesn't have any effect when force_metadata is set to True." msgstr "" "Le serveur DHCP peut contribuer à fournir un support de métadonnées sur des " "réseaux isolés. Si cette valeur est définie sur True, le serveur DHCP ajoute " "des routes hôtes spécifiques à la demande DHCP. Le service de métadonnées " "est activé uniquement quand le sous-réseau ne contient aucun port de " "routeur. L'instance invitée doit être configurée pour la demande de routes " "hôtes via DHCP (Option 121). Cette option n'a aucun effet lorsque " "force_metadata est défini sur True." #, python-format msgid "" "The HA Network CIDR specified in the configuration file isn't valid; " "%(cidr)s." msgstr "" "Le CIDR du réseau haute disponibilité indiqué dans le fichier de " "configuration n'est pas valide ; %(cidr)s." msgid "The UDP port to use for VXLAN tunnels." msgstr "Port UDP à utiliser pour les tunnels VXLAN." #, python-format msgid "" "The address allocation request could not be satisfied because: %(reason)s" msgstr "" "Impossible de répondre à la demande d'allocation d'adresse. Motif : " "%(reason)s" msgid "The advertisement interval in seconds" msgstr "Intervalle de publication en secondes" #, python-format msgid "The allocation pool %(pool)s is not valid." msgstr "Le pool d'allocation %(pool)s n'est pas valide." #, python-format msgid "" "The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s." msgstr "" "Le pool d'allocation %(pool)s s'étend au-delà du routage CIDR de sous-réseau " "%(subnet_cidr)s." #, python-format msgid "" "The attribute '%(attr)s' is reference to other resource, can't used by sort " "'%(resource)s'" msgstr "" "L'attribut '%(attr)s' fait référence à une autre ressource, impossible de " "l'utiliser pour le type '%(resource)s'" msgid "" "The base MAC address Neutron will use for VIFs. The first 3 octets will " "remain unchanged. If the 4th octet is not 00, it will also be used. The " "others will be randomly generated." msgstr "" "Adresse MAC de base que Neutron va utiliser pour les VIF. Les 3 premiers " "octets demeurent inchangés. Si le 4e octet est différent de 00, il sera " "également utilisé. Les autres seront générés de manière aléatoire." msgid "" "The base mac address used for unique DVR instances by Neutron. The first 3 " "octets will remain unchanged. If the 4th octet is not 00, it will also be " "used. The others will be randomly generated. The 'dvr_base_mac' *must* be " "different from 'base_mac' to avoid mixing them up with MAC's allocated for " "tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00. " "The default is 3 octet" msgstr "" "Adresse MAC de base utilisée par Neutron pour les instances DVR uniques. Les " "3 premiers octets restent inchangés. Si le 4e octet est différent de 00, il " "sera également utilisé. Les autres seront générés de manière aléatoire. " "L'adresse 'dvr_base_mac' *doit* être différente de l'adresse 'base_mac' pour " "éviter de les confondre avec des adresses MAC allouées pour des ports " "locataires. Exemple à 4 octets : dvr_base_mac = fa:16:3f:4f:00:00. 3 octets " "sont utilisés par défaut." msgid "" "The connection string for the native OVSDB backend. Requires the native " "ovsdb_interface to be enabled." msgstr "" "Chaîne de connexion pour le back-end OVSDB natif. Requiert l'activation de " "l'interface ovsdb_interface native." msgid "The core plugin Neutron will use" msgstr "Le plug-in core de Neutron utilisera" #, python-format msgid "" "The dns_name passed is a FQDN. Its higher level labels must be equal to the " "dns_domain option in neutron.conf, that has been set to '%(dns_domain)s'. It " "must also include one or more valid DNS labels to the left of " "'%(dns_domain)s'" msgstr "" "L'élément dns_name transmis est un nom FQDN. Ses libellés de niveau " "supérieur doivent correspondre à l'option dns_domain dans neutron.conf, dont " "la valeur est %(dns_domain)s'. Il doit également inclure un ou plusieurs " "libellés DNS valides à la gauche de '%(dns_domain)s'" #, python-format msgid "" "The dns_name passed is a PQDN and its size is '%(dns_name_len)s'. The " "dns_domain option in neutron.conf is set to %(dns_domain)s, with a length of " "'%(higher_labels_len)s'. When the two are concatenated to form a FQDN (with " "a '.' at the end), the resulting length exceeds the maximum size of " "'%(fqdn_max_len)s'" msgstr "" "L'élément dns_name transmis est un nom PQDN et sa taille est " "%(dns_name_len)s'. L'option dns_domain dans neutron.conf a pour valeur " "%(dns_domain)s et sa longueur est '%(higher_labels_len)s'. Lorsque les deux " "sont concaténés pour former un nom FQDN (avec un '.' à la fin), la longueur " "obtenue dépasse la taille maximale de '%(fqdn_max_len)s'" msgid "The driver used to manage the DHCP server." msgstr "Pilote utilisé pour gérer le serveur DHCP" msgid "The driver used to manage the virtual interface." msgstr "Pilote utilisé pour gérer l'interface virtuelle" msgid "" "The email address to be used when creating PTR zones. If not specified, the " "email address will be admin@" msgstr "" "Adresse e-mail à utiliser lors de la création de zones PTR. Si elle n'est " "pas indiquée, il s'agira de l'adresse admin@" #, python-format msgid "" "The following device_id %(device_id)s is not owned by your tenant or matches " "another tenants router." msgstr "" "Le device_id %(device_id)s suivant n'appartient pas à votre locataire ou " "correspond au routeur d'un autre locataire." msgid "The host IP to bind to" msgstr "IP hôte pour la liaison" msgid "The interface for interacting with the OVSDB" msgstr "Interface d'interaction avec OVSDB" msgid "" "The maximum number of items returned in a single response, value was " "'infinite' or negative integer means no limit" msgstr "" "Nombre maximal d'éléments renvoyés dans une seule réponse ; la valeur " "définie sur 'infinite' ou sur un entier négatif signifie illimité" #, python-format msgid "" "The network %(network_id)s has been already hosted by the DHCP Agent " "%(agent_id)s." msgstr "" "Le réseau %(network_id)s est déjà hébergé par l'agent DHCP %(agent_id)s." #, python-format msgid "" "The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s." msgstr "" "Le réseau %(network_id)s n'est pas hébergé par l'agent DHCP %(agent_id)s." msgid "" "The network type to use when creating the HA network for an HA router. By " "default or if empty, the first 'tenant_network_types' is used. This is " "helpful when the VRRP traffic should use a specific network which is not the " "default one." msgstr "" "Type de réseau à utiliser lors de la création du réseau haute disponibilité " "pour un routeur haute disponibilité. Par défaut ou si cette zone est vide, " "le premier élément 'tenant_network_types' est utilisé. Cela s'avère utile " "lorsque le trafic VRRP doit utiliser un réseau spécifique différent de celui " "défini par défaut. " #, python-format msgid "The number of allowed address pair exceeds the maximum %(quota)s." msgstr "" "Le nombre de paires d'adresses autorisées dépasse le maximum %(quota)s." msgid "" "The number of seconds the agent will wait between polling for local device " "changes." msgstr "" "Temps en secondes pendant lequel l'agent attend les interrogations sur les " "modifications de l'unité locale." msgid "" "The number of seconds to wait before respawning the ovsdb monitor after " "losing communication with it." msgstr "" "Nombre de secondes d'attente avant relance du moniteur ovsdb après une perte " "de communication avec ce dernier." msgid "The number of sort_keys and sort_dirs must be same" msgstr "" "Le nombre des clés de tri (sort_keys) et celui des répertoires de tri " "(sort_dirs) doivent être identiques" msgid "" "The path for API extensions. Note that this can be a colon-separated list of " "paths. For example: api_extensions_path = extensions:/path/to/more/exts:/" "even/more/exts. The __path__ of neutron.extensions is appended to this, so " "if your extensions are in there you don't need to specify them here." msgstr "" "Chemin des extensions API. Notez qu'il peut s'agir d'une liste de chemins " "séparés par des virgules. Par exemple : api_extensions_path = extensions:/" "path/to/more/exts:/even/more/exts. Le chemin de neutron.extensions y est " "ajouté, de sorte que si vos extensions figurent dans ce chemin, vous n'avez " "pas besoin de les indiquer ici." msgid "The physical network name with which the HA network can be created." msgstr "" "Nom de réseau physique avec lequel le réseau haute disponibilité peut être " "créé. " #, python-format msgid "The port '%s' was deleted" msgstr "Le port '%s' a été supprimé" msgid "The port to bind to" msgstr "Port pour la liaison" #, python-format msgid "The requested content type %s is invalid." msgstr "Le type de contenu demandé %s n'est pas valide." msgid "The resource could not be found." msgstr "La ressource est introuvable." #, python-format msgid "" "The router %(router_id)s has been already hosted by the L3 Agent " "%(agent_id)s." msgstr "Le routeur %(router_id)s est déjà hébergé par l'agent L3 %(agent_id)s." msgid "" "The server has either erred or is incapable of performing the requested " "operation." msgstr "" "Le serveur a perdu la connexion ou est incapable d'effectuer l'opération " "demandée." msgid "The service plugins Neutron will use" msgstr "Plug-in de service qui sera utilisé par Neutron" #, python-format msgid "The subnet request could not be satisfied because: %(reason)s" msgstr "Impossible de répondre à la demande de sous-réseau. Motif : %(reason)s" #, python-format msgid "The subproject to execute the command against. Can be one of: '%s'." msgstr "" "Sous-projet en fonction duquel la commande doit être exécutée. Valeurs " "possibles : '%s'." msgid "The type of authentication to use" msgstr "Type d'authentification à utiliser" #, python-format msgid "The value '%(value)s' for %(element)s is not valid." msgstr "La valeur '%(value)s' pour %(element)s n'est pas valide." msgid "" "The working mode for the agent. Allowed modes are: 'legacy' - this preserves " "the existing behavior where the L3 agent is deployed on a centralized " "networking node to provide L3 services like DNAT, and SNAT. Use this mode if " "you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality " "and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - " "this enables centralized SNAT support in conjunction with DVR. This mode " "must be used for an L3 agent running on a centralized node (or in single-" "host deployments, e.g. devstack)" msgstr "" "Mode de fonctionnement de l'agent. Les modes autorisés sont : 'legacy' - " "préserve le comportement existant où l'agent L3 est déployé sur un noeud " "centralisé de mise en réseau pour fournir des services L3 comme DNAT et " "SNAT. Utilisez ce mode si vous ne voulez pas adopter le routeur virtuel " "distribué (DVR). 'dvr' - ce mode active la fonctionnalité DVR et doit être " "utilisé pour un agent L3 qui s'exécute sur un hôte de calcul. 'dvr_snat' - " "active la prise en charge SNAT centralisée conjointement avec DVR. Ce mode " "doit être utilisé pour un agent L3 fonctionnant sur un noeud centralisé (ou " "dans des déploiements à un seul hôte, par ex. devstack)" msgid "" "There are routers attached to this network that depend on this policy for " "access." msgstr "" "Certains routeurs connectés à ce réseau dépendent de cette stratégie pour " "l'accès." msgid "" "This will choose the web framework in which to run the Neutron API server. " "'pecan' is a new experiemental rewrite of the API server." msgstr "" "Permet de choisir l'infrastructure Web dans laquelle exécuter le serveur API " "de Neutron. 'pecan' est une nouvelle réécriture expérimentale du serveur API." msgid "Timeout" msgstr "Délai d'attente" msgid "" "Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs " "commands will fail with ALARMCLOCK error." msgstr "" "Délai en secondes pour les commandes ovs-vsctl. Si ce délai expire, les " "commandes ovs échouent avec une erreur ALARMCLOCK." msgid "" "Timeout in seconds to wait for a single OpenFlow request. Used only for " "'native' driver." msgstr "" "Délai d'attente en secondes pour une seule demande OpenFlow. Utilisé " "uniquement pour le pilote 'natif'. " msgid "" "Timeout in seconds to wait for the local switch connecting the controller. " "Used only for 'native' driver." msgstr "" "Délai d'attente en secondes pour la connexion du commutateur local au " "contrôleur. Utilisé uniquement pour le pilote 'natif'. " msgid "" "Too long prefix provided. New name would exceed given length for an " "interface name." msgstr "" "Le préfixe fourni est trop long. Le nouveau nom dépasserait la longueur " "indiquée pour un nom d'interface." msgid "Too many availability_zone_hints specified" msgstr "Trop d'éléments availability_zone_hints spécifiés" msgid "" "True to delete all ports on all the OpenvSwitch bridges. False to delete " "ports created by Neutron on integration and external network bridges." msgstr "" "La valeur est vraie (true) pour la suppression de tous les ports sur tous " "les ponts OpenvSwitch. Elle est fausse (false) pour la suppression des ports " "créés par Neutron sur les ponts d'intégration et de réseau externe." msgid "Tunnel IP value needed by the ML2 plugin" msgstr "Valeur IP de tunnel requise par le plug-in ML2" msgid "Tunnel bridge to use." msgstr "Pont de tunnel à utiliser." msgid "" "Type of the nova endpoint to use. This endpoint will be looked up in the " "keystone catalog and should be one of public, internal or admin." msgstr "" "Type de nœud final Nova à utiliser. Ce nœud final sera recherché dans le " "catalogue Keystone et il doit être de type public, interne ou admin." msgid "URL for connecting to designate" msgstr "URL pour la connexion au réseau désigné" msgid "URL to database" msgstr "URL de la base de données" #, python-format msgid "Unable to access %s" msgstr "Impossible d'accéder à %s" #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, maximum allowed " "prefix is %(max_prefixlen)s." msgstr "" "Impossible d'allouer le sous-réseau avec la longueur de préfixe " "%(prefixlen)s, la longueur de préfixe maximum autorisée est de " "%(max_prefixlen)s." #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, minimum allowed " "prefix is %(min_prefixlen)s." msgstr "" "Impossible d'allouer le sous-réseau avec la longueur de préfixe " "%(prefixlen)s, la longueur de préfixe minimum autorisée est de " "%(min_prefixlen)s." #, python-format msgid "Unable to calculate %(address_type)s address because of:%(reason)s" msgstr "Impossible de calculer l'adresse %(address_type)s. Motif : %(reason)s" #, python-format msgid "" "Unable to complete operation for %(router_id)s. The number of routes exceeds " "the maximum %(quota)s." msgstr "" "Impossible de terminer l'opération pour %(router_id)s. Le nombre de routes " "dépasse le maximum %(quota)s." #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of DNS " "nameservers exceeds the limit %(quota)s." msgstr "" "Impossible de terminer l'opération pour le sous-réseau %(subnet_id)s. Le " "nombre de serveurs DNS dépasse la limite %(quota)s." #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of host routes " "exceeds the limit %(quota)s." msgstr "" "Impossible de terminer l'opération pour le sous-réseau %(subnet_id)s. Le " "nombre de routes hôte dépasse la limite %(quota)s." #, python-format msgid "" "Unable to complete operation on address scope %(address_scope_id)s. There " "are one or more subnet pools in use on the address scope" msgstr "" "Impossible de terminer l'opération sur la portée d'adresse " "%(address_scope_id)s. Un ou plusieurs pools de sous-réseaux supplémentaires " "sont utilisés sur la portée d'adresse" #, python-format msgid "Unable to convert value in %s" msgstr "Impossible de convertir la valeur en %s" msgid "Unable to create the Agent Gateway Port" msgstr "Impossible de créer le port de passerelle d'agent" msgid "Unable to create the SNAT Interface Port" msgstr "Impossible de créer le port d'interface SNAT" #, python-format msgid "" "Unable to create the flat network. Physical network %(physical_network)s is " "in use." msgstr "" "Impossible de créer le réseau centralisé. Le réseau physique " "%(physical_network)s est en cours d'utilisation " msgid "" "Unable to create the network. No available network found in maximum allowed " "attempts." msgstr "" "Impossible de créer le réseau. Aucun réseau disponible trouvé avec le " "maximum de tentatives autorisées." #, python-format msgid "Unable to delete subnet pool: %(reason)s." msgstr "Impossible de supprimer le pool de sous-réseaux : %(reason)s." #, python-format msgid "Unable to determine mac address for %s" msgstr "Impossible de déterminer l'adresse mac pour %s" #, python-format msgid "Unable to find '%s' in request body" msgstr "Impossible de trouver '%s' dans le corps de demande" #, python-format msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s" msgstr "" "Impossible de trouver l'adresse IP %(ip_address)s sur le sous-réseau " "%(subnet_id)s" #, python-format msgid "Unable to find resource name in %s" msgstr "Impossible de trouver le nom de la ressource dans %s" msgid "Unable to generate IP address by EUI64 for IPv4 prefix" msgstr "Impossible de générer l'adresse IP par EUI64 pour le préfixe IPv4" #, python-format msgid "Unable to generate unique DVR mac for host %(host)s." msgstr "Impossible de générer une adresse MAC unique pour l'hôte %(host)s." #, python-format msgid "Unable to generate unique mac on network %(net_id)s." msgstr "Impossible de générer une adresse MAC unique sur le réseau %(net_id)s." #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "" "Impossible d'identifier une zone cible à partir de : %s. La correspondance " "doit être au format %%()s" msgid "Unable to provide external connectivity" msgstr "Impossible de fournir une connectivité externe" msgid "Unable to provide tenant private network" msgstr "Impossible de fournir un réseau privé locataire" #, python-format msgid "" "Unable to reconfigure sharing settings for network %(network)s. Multiple " "tenants are using it." msgstr "" "Impossible de reconfigurer les paramètres de partage pour le réseau " "%(network)s. Plusieurs locataires l'utilisent." #, python-format msgid "Unable to update address scope %(address_scope_id)s : %(reason)s" msgstr "" "Impossible de mettre à jour la portée d'adresse %(address_scope_id)s : " "%(reason)s" #, python-format msgid "Unable to update the following object fields: %(fields)s" msgstr "Impossible de mettre à jour les zones d'objet suivantes : %(fields)s" #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " "found" msgstr "" "Impossible de vérifier la correspondance %(match)s comme ressource parent : " "%(res)s introuvable" #, python-format msgid "Unexpected label for script %(script_name)s: %(labels)s" msgstr "Libellé inattendu pour le script %(script_name)s : %(labels)s" #, python-format msgid "Unexpected number of alembic branch points: %(branchpoints)s" msgstr "Nombre de points de branche alembic inattendu : %(branchpoints)s" #, python-format msgid "Unexpected response code: %s" msgstr "Code de réponse inattendu : %s" #, python-format msgid "Unexpected response: %s" msgstr "Réponse inattendue : %s" #, python-format msgid "Unit name '%(unit)s' is not valid." msgstr "Le nom d'unité '%(unit)s' n'est pas valide." msgid "Unknown API version specified" msgstr "Version d'API spécifiée inconnue" #, python-format msgid "Unknown address type %(address_type)s" msgstr "Type d'adresse inconnu %(address_type)s" #, python-format msgid "Unknown attribute '%s'." msgstr "Attribut inconnu '%s'." #, python-format msgid "Unknown chain: %r" msgstr "Chaîne inconnue : %r" #, python-format msgid "Unknown network type %(network_type)s." msgstr "Type de réseau inconnu %(network_type)s." #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Ressources de quota inconnues %(unknown)s." msgid "Unmapped error" msgstr "Erreur de non-correspondance" msgid "Unrecognized action" msgstr "Action non reconnue" #, python-format msgid "Unrecognized attribute(s) '%s'" msgstr "Attribut(s) non reconnu(s) '%s'" msgid "Unrecognized field" msgstr "Zone non reconnue" msgid "Unspecified minimum subnet pool prefix." msgstr "Préfixe de pool de sous-réseaux minimum non spécifié." msgid "Unsupported Content-Type" msgstr "Type de contenu non pris en charge" #, python-format msgid "Unsupported network type %(net_type)s." msgstr "Le type de réseau %(net_type)s n'est pas pris en charge." #, python-format msgid "Unsupported port state: %(port_state)s." msgstr "L'état du port n'est pas pris en charge : %(port_state)s." msgid "Unsupported request type" msgstr "Type de demande non pris en charge" msgid "Updating default security group not allowed." msgstr "Mise à jour du groupe de sécurité par défaut non autorisée" msgid "" "Use ML2 l2population mechanism driver to learn remote MAC and IPs and " "improve tunnel scalability." msgstr "" "Utilisez le pilote de mécanisme l2population ML2 pour connaître les adresses " "MAC et IP distantes et améliorer l'évolutivité du tunnel." msgid "Use broadcast in DHCP replies." msgstr "Utilisez la diffusion dans les réponses DHCP." msgid "Use either --delta or relative revision, not both" msgstr "Utiliser soit --delta, soit une révision relative, mais pas les deux" msgid "" "Use ipset to speed-up the iptables based security groups. Enabling ipset " "support requires that ipset is installed on L2 agent node." msgstr "" "Utiliser ipset pour accélérer les groupes de sécurité basés sur iptables. " "L'activation du support ipset nécessite l'installation d'ipset sur le noeud " "d'agent L2." msgid "" "Use the root helper when listing the namespaces on a system. This may not be " "required depending on the security configuration. If the root helper is not " "required, set this to False for a performance improvement." msgstr "" "Utilisez l'assistant racine lors de l'affichage de la liste des espaces de " "noms sur un système. Cette opération n'est peut-être pas obligatoire selon " "la configuration de sécurité. Si l'assistant racine n'est pas requis, " "définissez cette option sur False afin d'améliorer les performances." msgid "" "Use veths instead of patch ports to interconnect the integration bridge to " "physical networks. Support kernel without Open vSwitch patch port support so " "long as it is set to True." msgstr "" "Utilisez veths au lieu de ports de correctif pour interconnecter le pont " "d'intégration avec des réseaux physiques. Le noyau sans port de correctif " "Open vSwitch est pris en charge si le paramètre est défini sur True." msgid "User (uid or name) running metadata proxy after its initialization" msgstr "" "Utilisateur (UID ou nom) exécutant le proxy de métadonnées après son " "initialisation" msgid "" "User (uid or name) running metadata proxy after its initialization (if " "empty: agent effective user)." msgstr "" "Utilisateur (UID ou nom) exécutant le proxy de métadonnées après son " "initialisation (si vide : utilisateur effectif de l'agent)." msgid "User (uid or name) running this process after its initialization" msgstr "Utilisateur (UID ou nom) exécutant ce process après son initialisation" msgid "Username for connecting to designate in admin context" msgstr "" "Nom d'utilisateur pour la connexion au réseau désigné dans un contexte admin" msgid "" "Uses veth for an OVS interface or not. Support kernels with limited " "namespace support (e.g. RHEL 6.5) so long as ovs_use_veth is set to True." msgstr "" "Indique si veth est utilisé ou non pour une interface OVS. Les noyaux avec " "support limité de l'espace de nom sont pris en charge (par exemple, RHEL " "6.5) tant que le paramètre ovs_use_veth est défini sur True." msgid "VRRP authentication password" msgstr "Mot de passe pour l'authentification VRRP" msgid "VRRP authentication type" msgstr "Type d'authentification VRRP" msgid "VXLAN network unsupported." msgstr "Réseau VXLAN non pris en charge." #, python-format msgid "" "Validation of dictionary's keys failed. Expected keys: %(expected_keys)s " "Provided keys: %(provided_keys)s" msgstr "" "Echec de validation des clés du dictionnaire. Clés attendues : " "%(expected_keys)s Clés fournies : %(provided_keys)s" #, python-format msgid "Validator '%s' does not exist." msgstr "Le validateur '%s' n'existe pas." #, python-format msgid "Value %(value)s in mapping: '%(mapping)s' not unique" msgstr "Valeur %(value)s non unique dans le mappage '%(mapping)s'" #, python-format msgid "" "Value of %(parameter)s has to be multiple of %(number)s, with maximum value " "of %(maximum)s and minimum value of %(minimum)s" msgstr "" "La valeur de %(parameter)s doit être un multiple de %(number)s, avec la " "valeur maximum %(maximum)s et la valeur minimum %(minimum)s" msgid "" "Value of host kernel tick rate (hz) for calculating minimum burst value in " "bandwidth limit rules for a port with QoS. See kernel configuration file for " "HZ value and tc-tbf manual for more information." msgstr "" "Valeur du rythme de noyau hôte (hz) pour le calcul de la valeur de rafale " "minimum dans les règles de limite de bande passante pour un port avec QoS. " "Consultez le fichier de configuration du noyau pour la valeur HZ et le " "manuel tc-tbf pour plus d'informations." msgid "" "Value of latency (ms) for calculating size of queue for a port with QoS. See " "tc-tbf manual for more information." msgstr "" "Valeur de latence (ms) pour le calcul de la taille de file d'attente d'un " "port avec QoS. Consultez le manuel tc-tbf pour plus d'informations." msgid "" "Watch file log. Log watch should be disabled when metadata_proxy_user/group " "has no read/write permissions on metadata proxy log file." msgstr "" "Surveillance des fichiers journaux. La surveillance des journaux doit être " "désactivée lorsque metadata_proxy_user/group ne dispose pas des droits de " "lecture/d'écriture sur le fichier journal du proxy de métadonnées." msgid "" "When external_network_bridge is set, each L3 agent can be associated with no " "more than one external network. This value should be set to the UUID of that " "external network. To allow L3 agent support multiple external networks, both " "the external_network_bridge and gateway_external_network_id must be left " "empty." msgstr "" "Lorsque le paramètre external_network_bridge est défini, chaque agent L3 ne " "peut être associé qu'à un seul réseau externe. Cette valeur doit être " "définie sur l'UUID de ce réseau externe. Pour permettre la prise en charge " "par l'agent L3 de plusieurs réseaux externes, il est nécessaire de laisser à " "blanc les paramètres external_network_bridge et gateway_external_network_id." msgid "" "When proxying metadata requests, Neutron signs the Instance-ID header with a " "shared secret to prevent spoofing. You may select any string for a secret, " "but it must match here and in the configuration used by the Nova Metadata " "Server. NOTE: Nova uses the same config key, but in [neutron] section." msgstr "" "Lors de la mise en cache des demandes de métadonnées, Neutron signe l'en-" "tête Instance-ID à l'aide d'un secret partagé afin d'éviter toute " "usurpation. Vous pouvez choisir une chaîne comme secret, mais elle doit être " "identique ici et dans la configuration utilisée par le serveur de " "métadonnées Nova. REMARQUE : Nova utilise la même clé de configuration, sauf " "dans la section [neutron]." msgid "" "Where to store Neutron state files. This directory must be writable by the " "agent." msgstr "" "Où stocker des fichiers d'état de Neutron. Ce répertoire doit être " "accessible en écriture par l'agent." msgid "" "With IPv6, the network used for the external gateway does not need to have " "an associated subnet, since the automatically assigned link-local address " "(LLA) can be used. However, an IPv6 gateway address is needed for use as the " "next-hop for the default route. If no IPv6 gateway address is configured " "here, (and only then) the neutron router will be configured to get its " "default route from router advertisements (RAs) from the upstream router; in " "which case the upstream router must also be configured to send these RAs. " "The ipv6_gateway, when configured, should be the LLA of the interface on the " "upstream router. If a next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated to the network and not " "through this parameter. " msgstr "" "Avec IPv6, le réseau utilisé pour la passerelle externe ne doit pas " "obligatoirement disposer d'un sous-réseau associé, car l'adresse link-local " "(LLA) automatiquement affectée peut être utilisée. En revanche, une adresse " "de passerelle IPv6 est nécessaire pour pouvoir faire un saut sur le chemin " "par défaut. Si aucune adresse de passerelle IPv6 n'est configurée ici (et " "uniquement dans ce cas), le routeur Neutron sera configuré pour obtenir son " "chemin par défaut à partir des annonces du routeur en amont ; dans cette " "situation, le routeur en amont doit également être configuré pour envoyer " "lesdites annonces. ipv6_gateway, lorsqu'il est configuré, doit constituer la " "LLA de l'interface du routeur en amont. Si un saut utilisant une adresse " "unique globale (GUA) est souhaité, il doit être effectué via un sous-réseau " "attribué au réseau, et non par l'intermédiaire de ce paramètre. " msgid "You must implement __call__" msgstr "Vous devez implémenter __call__" msgid "" "You must provide a config file for bridge - either --config-file or " "env[NEUTRON_TEST_CONFIG_FILE]" msgstr "" "Vous devez fournir un fichier de configuration pour le pont, --config-file " "ou env[NEUTRON_TEST_CONFIG_FILE]" msgid "You must provide a revision or relative delta" msgstr "Vous devez fournir une révision ou un delta relatif." msgid "a subnetpool must be specified in the absence of a cidr" msgstr "Un pool de sous-réseaux doit être spécifié en l'absence d'un cidr" msgid "add_ha_port cannot be called inside of a transaction." msgstr "" "Le paramètre add_ha_port ne peut pas être appelé à l'intérieur d'une " "transaction." msgid "allocation_pools allowed only for specific subnet requests." msgstr "" "allocation_pools autorisé uniquement pour les demandes de sous-réseaux " "spécifiques." msgid "allocation_pools are not in the subnet" msgstr "allocation_pools ne figurent pas sur le sous-réseau" msgid "allocation_pools use the wrong ip version" msgstr "allocation_pools utilise une version IP erronée" msgid "already a synthetic attribute" msgstr "déjà un attribut synthétique" msgid "binding:profile value too large" msgstr "Valeur binding:profile trop grande" #, python-format msgid "cannot perform %(event)s due to %(reason)s" msgstr "Impossible d'exécuter %(event)s en raison de %(reason)s" msgid "cidr and prefixlen must not be supplied together" msgstr "cidr et prefixlen ne doivent pas être fournis ensemble" #, python-format msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid." msgstr "dhcp_agents_per_network doit être >= 1. '%s' n'est pas valide." msgid "dns_domain cannot be specified without a dns_name" msgstr "dns_domain ne peut pas être spécifié sans dns_name" msgid "dns_name cannot be specified without a dns_domain" msgstr "dns_name ne peut pas être spécifié sans dns_domain" msgid "fixed_ip_address cannot be specified without a port_id" msgstr "Impossible de spécifier une adresse IP fixe sans ID port" #, python-format msgid "gateway_ip %s is not in the subnet" msgstr "gateway_ip %s ne figure pas sur le sous-réseau" #, python-format msgid "has device owner %s" msgstr "a le propriétaire d'unité %s" msgid "in use" msgstr "utilisé" #, python-format msgid "ip command failed on device %(dev_name)s: %(reason)s" msgstr "Echec de la commande sur l'unité %(dev_name)s : %(reason)s" #, python-format msgid "ip command failed: %(reason)s" msgstr "Echec de commande IP : %(reason)s" #, python-format msgid "ip link capability %(capability)s is not supported" msgstr "Fonctionnalité de liaison IP %(capability)s non prise en charge" #, python-format msgid "ip link command is not supported: %(reason)s" msgstr "Commande link IP non prise en charge : %(reason)s" msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "" "ip_version doit être indiqué si cidr et subnetpool_id ne sont pas définis" msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "ipv6_address_mode est non valide quand ip_version est 4" msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "ipv6_ra_mode est non valide quand ip_version est 4" msgid "" "ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set to " "False." msgstr "" "ipv6_ra_mode ou ipv6_address_mode ne peut pas être défini quand enable_dhcp " "a la valeur False." #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " "'%(addr_mode)s' is not valid. If both attributes are set, they must be the " "same value" msgstr "" "ipv6_ra_mode défini sur '%(ra_mode)s' avec ipv6_address_mode défini sur " "'%(addr_mode)s' n'est pas admis. Si les deux attributs sont définis, ils " "doivent avoir la même valeur" msgid "mac address update" msgstr "Mise à jour d'adresse MAC" #, python-format msgid "" "max_l3_agents_per_router %(max_agents)s config parameter is not valid. It " "has to be greater than or equal to min_l3_agents_per_router %(min_agents)s." msgstr "" "Le paramètre de configuration max_l3_agents_per_router %(max_agents)s n'est " "pas valide. Il doit être supérieur ou égal à min_l3_agents_per_router " "%(min_agents)s." msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "doit fournir exactement 2 arguments - cidr et MAC" msgid "network_type required" msgstr "network_type requis" #, python-format msgid "network_type value '%s' not supported" msgstr "Valeur network_type '%s' non prise en charge" msgid "new subnet" msgstr "nouveau sous-réseau" #, python-format msgid "physical_network '%s' unknown for VLAN provider network" msgstr "" "physical_network '%s' inconnu pour le réseau de fournisseurs de réseau local " "virtuel" #, python-format msgid "physical_network '%s' unknown for flat provider network" msgstr "" "physical_network '%s' inconnu pour le réseau de fournisseurs centralisé" msgid "physical_network required for flat provider network" msgstr "physical_network obligatoire pour le réseau de fournisseurs centralisé" #, python-format msgid "provider:physical_network specified for %s network" msgstr "provider:physical_network spécifié pour le réseau %s" #, python-format msgid "rbac_db_model not found in %s" msgstr "rbac_db_model non trouvé dans %s" msgid "record" msgstr "enregistrement " msgid "respawn_interval must be >= 0 if provided." msgstr "respawn_interval doit être >= 0 si fourni." #, python-format msgid "segmentation_id out of range (%(min)s through %(max)s)" msgstr "segmentation_id hors plage (%(min)s à %(max)s)" msgid "segmentation_id requires physical_network for VLAN provider network" msgstr "" "segmentation_id requiert physical_network pour le réseau de fournisseurs de " "réseau local virtuel" msgid "shared attribute switching to synthetic" msgstr "commutation d'attribut partagé vers attribut synthétique" #, python-format msgid "" "subnetpool %(subnetpool_id)s cannot be updated when associated with shared " "address scope %(address_scope_id)s" msgstr "" "Le pool de sous-réseaux %(subnetpool_id)s ne peut pas être mis à jour s'il " "est associé à la portée d'adresse partagée %(address_scope_id)s" msgid "subnetpool_id and use_default_subnetpool cannot both be specified" msgstr "" "Les paramètres subnetpool_id et use_default_subnetpool ne peuvent pas être " "tous deux spécifiés" msgid "the nexthop is not connected with router" msgstr "nexthop n'est pas connecté au routeur" msgid "the nexthop is used by router" msgstr "nexthop est utilisé par le routeur" #, python-format msgid "unable to load %s" msgstr "impossible de charger %s" msgid "" "uuid provided from the command line so external_process can track us via /" "proc/cmdline interface." msgstr "" "UUID fourni dans la ligne de commande afin de permettre à external_process " "d'effectuer le suivi de l'UUID via l'interface /proc/cmdline." neutron-8.4.0/neutron/locale/tr_TR/0000775000567000056710000000000013044373210020336 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/tr_TR/LC_MESSAGES/0000775000567000056710000000000013044373210022123 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/tr_TR/LC_MESSAGES/neutron.po0000664000567000056710000024512013044372760024172 0ustar jenkinsjenkins00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # ADİL REŞİT DURSUN , 2015 # Alper Çiftçi , 2015 # Andreas Jaeger , 2015. #zanata # OpenStack Infra , 2015. #zanata # Andreas Jaeger , 2016. #zanata # Mücahit Büyükyılmaz , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron 8.1.3.dev60\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-07-18 18:03+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-09-16 08:28+0000\n" "Last-Translator: Andreas Jaeger \n" "Language: tr-TR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Turkish (Turkey)\n" #, python-format msgid "" "\n" "Command: %(cmd)s\n" "Exit code: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" msgstr "" "\n" "Komut: %(cmd)s\n" "Çıkış kodu: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" #, python-format msgid "%(driver)s: Internal driver error." msgstr "%(driver)s: Dahili sürücü hatası." #, python-format msgid "%(id)s is not a valid %(type)s identifier" msgstr "%(id)s geçerli bir %(type)s tanımlayıcı değil" #, python-format msgid "" "%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' " "and '%(desc)s'" msgstr "" "%(invalid_dirs)s sort_dirs için geçersiz değer, geçerli değer '%(asc)s' ve " "'%(desc)s'" #, python-format msgid "%(key)s prohibited for %(tunnel)s provider network" msgstr "%(key)s %(tunnel)s sağlayıcı ağı için yasaklanmış" #, python-format msgid "" "%(method)s called with network settings %(current)s (original settings " "%(original)s) and network segments %(segments)s" msgstr "" "%(method)s ağ ayarları %(current)s (asıl ayarlar %(original)s) ve " "%(segments)s ağ dilimleri ile çağrıldı" #, python-format msgid "" "%(method)s called with port settings %(current)s (original settings " "%(original)s) host %(host)s (original host %(original_host)s) vif type " "%(vif_type)s (original vif type %(original_vif_type)s) vif details " "%(vif_details)s (original vif details %(original_vif_details)s) binding " "levels %(levels)s (original binding levels %(original_levels)s) on network " "%(network)s with segments to bind %(segments_to_bind)s" msgstr "" "%(method)s bağlantı noktası ayarları %(current)s (asıl ayarlar %(original)s) " "istemci %(host)s (asıl istemci %(original_host)s) vif türü %(vif_type)s " "(asıl vif türü %(original_vif_type)s) vif detayları %(vif_details)s (asıl " "vif detayları %(original_vif_details)s) bağlama seviyeleri %(levels)s (asıl " "bağlama seviyeleri %(original_levels)s) %(network)s ağı üzerinde bağlanacak " "%(segments_to_bind)s dilimleriyle çağrıldı" #, python-format msgid "" "%(method)s called with subnet settings %(current)s (original settings " "%(original)s)" msgstr "" "%(method)s alt ağ ayarları %(current)s (asıl ayarlar %(original)s) ile " "çağrıldı" #, python-format msgid "%(method)s failed." msgstr "%(method)s başarısız." #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "%(name)s '%(addr)s' ip_version '%(ip_version)s' ile eşleşmiyor" #, python-format msgid "%s cannot be called while in offline mode" msgstr "%s çevrim dışı kipte çağrılamaz" #, python-format msgid "%s is invalid attribute for sort_key" msgstr "%s sort_key için geçersiz özniteliktir" #, python-format msgid "%s is invalid attribute for sort_keys" msgstr "%s sort_keys için geçersiz öznitelik" #, python-format msgid "%s is not a valid VLAN tag" msgstr "%s geçerli bir VLAN etiketi değil" #, python-format msgid "%s must implement get_port_from_device or get_ports_from_devices." msgstr "%s get_port_from_device veya get_ports_from_devices uygulamalıdır." #, python-format msgid "%s prohibited for VLAN provider network" msgstr "%s VLAN sağlayıcı ağı için yasaklanmış" #, python-format msgid "%s prohibited for flat provider network" msgstr "%s düz sağlayıcı ağı için yasaklanmış" #, python-format msgid "%s prohibited for local provider network" msgstr "%s yerel sağlayıcı ağı için yasaklanmış" #, python-format msgid "'%(data)s' exceeds maximum length of %(max_len)s" msgstr "'%(data)s' %(max_len)s azami uzunluğunu aşıyor" #, python-format msgid "'%(data)s' is not in %(valid_values)s" msgstr "'%(data)s' %(valid_values)s içinde değil" #, python-format msgid "'%(data)s' is too large - must be no larger than '%(limit)d'" msgstr "'%(data)s' çok büyük - en çok '%(limit)d' olmalı" #, python-format msgid "'%(data)s' is too small - must be at least '%(limit)d'" msgstr "'%(data)s' çok küçük - en az '%(limit)d' olmalı" #, python-format msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended" msgstr "'%(data)s' tanınan bir IP alt ağı cidr'i değil, '%(cidr)s' öneriliyor" #, python-format msgid "'%(host)s' is not a valid nameserver. %(msg)s" msgstr "'%(host)s' geçerli bir isim sunucu değil. %(msg)s" #, python-format msgid "'%s' Blank strings are not permitted" msgstr "'%s' boş karakter dizilerine izin verilmez" #, python-format msgid "'%s' cannot be converted to boolean" msgstr "'%s' bool değere çevrilemez" #, python-format msgid "'%s' contains whitespace" msgstr "'%s' boşluk içeriyor" #, python-format msgid "'%s' is not a dictionary" msgstr "'%s' bir dizin değil" #, python-format msgid "'%s' is not a list" msgstr "'%s' liste değil" #, python-format msgid "'%s' is not a valid IP address" msgstr "'%s' geçerli bir IP adresi değil" #, python-format msgid "'%s' is not a valid IP subnet" msgstr "'%s' geçerli IP alt ağ değil" #, python-format msgid "'%s' is not a valid MAC address" msgstr "'%s' geçerli bir MAC adresi değil" #, python-format msgid "'%s' is not a valid UUID" msgstr "'%s' geçerli bir UUID değil" #, python-format msgid "'%s' is not a valid boolean value" msgstr "'%s' geçerli bir bool değer değil" #, python-format msgid "'%s' is not a valid input" msgstr "'%s' geçerli bir girdi değil" #, python-format msgid "'%s' is not a valid string" msgstr "'%s' geçerli karakter dizisi değil" #, python-format msgid "'%s' is not an integer" msgstr "'%s' tam sayı değil" #, python-format msgid "'%s' is not an integer or uuid" msgstr "'%s' bir tam sayı ya da uuid değil" #, python-format msgid "'%s' is not of the form =[value]" msgstr "'%s' =[değer] biçiminde olmalı" #, python-format msgid "'%s' must be a non negative decimal." msgstr "'%s' negatif olmayan ondalık olmalı." #, python-format msgid "'%s' should be non-negative" msgstr "'%s' negatif olmamalı" msgid "'.' searches are not implemented" msgstr "'.' aramaları uygulanmamış" msgid "0 is not allowed as CIDR prefix length" msgstr "0 CIDR önek uzunluğuna izin verilmez" msgid "A cidr must be specified in the absence of a subnet pool" msgstr "Alt ağ havuzu olmadığında bir cidr belirtilmelidir" msgid "A metering driver must be specified" msgstr "Bir ölçme sürücüsü belirtilmeli" msgid "API for retrieving service providers for Neutron advanced services" msgstr "Neutron gelişmiş servisleri için servis sağlayıcıları alma API'si" msgid "Access to this resource was denied." msgstr "Bu kaynağa erişime izin verilmiyor." msgid "Action to be executed when a child process dies" msgstr "Alt süreç öldüğünde çalıştırılacak eylem" msgid "Address not present on interface" msgstr "Adres arayüzde mevcut değil" #, python-format msgid "Address scope %(address_scope_id)s could not be found" msgstr "Adres kapsamı %(address_scope_id)s bulunamadı" msgid "Adds external network attribute to network resource." msgstr "Ek ağ özniteliğini ağ kaynağına ekler." msgid "Adds test attributes to core resources." msgstr "Çekirdek kaynaklara test özniteliklerini ekler." #, python-format msgid "Agent %(id)s could not be found" msgstr "Ajan %(id)s bulunamadı" #, python-format msgid "Agent %(id)s is not a L3 Agent or has been disabled" msgstr "Ajan %(id)s bir L3 Ajanı değil ya da kapalı" #, python-format msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled" msgstr "Ajan %(id)s geçerli bir DHCP Ajanı değil veya kapalı" #, python-format msgid "Agent updated: %(payload)s" msgstr "Ajan güncellendi: %(payload)s" #, python-format msgid "" "Agent with agent_type=%(agent_type)s and host=%(host)s could not be found" msgstr "agent_type=%(agent_type)s ve istemci=%(host)s olan ajan bulunamadı" msgid "Allow auto scheduling networks to DHCP agent." msgstr "Ağların DHCP ajanlarına otomatik zamanlanmasına izin ver." msgid "Allow auto scheduling of routers to L3 agent." msgstr "Yönlendiricilerin L3 ajanına otomatik zamanlanmasına izin ver." msgid "Allow running metadata proxy." msgstr "Metadata vekili çalıştırmaya izin ver." msgid "Allow sending resource operation notification to DHCP agent" msgstr "DHCP ajanına kaynak işlem bildirimi göndermeye izin ver" msgid "Allow the usage of the bulk API" msgstr "Toplu API'nin kullanımına izin ver" msgid "Allow the usage of the pagination" msgstr "Sayfalama kullanımına izin ver" msgid "Allow the usage of the sorting" msgstr "Sıralama kullanımına izin ver" msgid "Allow to perform insecure SSL (https) requests to nova metadata" msgstr "Nova metadata'ya güvensiz SSL (https) istekleri yapmaya izin ver" msgid "AllowedAddressPair must contain ip_address" msgstr "AllowedAddressPair ip_address içermeli" msgid "An interface driver must be specified" msgstr "Bir arayüz sürücüsü belirtmeniz gerekmektedir" msgid "" "An ordered list of networking mechanism driver entrypoints to be loaded from " "the neutron.ml2.mechanism_drivers namespace." msgstr "" "neutron.ml2.mechanism_drivers isim uzayından yüklenecek ağ mekanizması " "sürücü giriş noktalarının sıralı listesi." msgid "An unknown error has occurred. Please try your request again." msgstr "Bilinmeyen bir hata oluştu. Lütfen tekrar deneyin." #, python-format msgid "Attribute '%s' not allowed in POST" msgstr "'%s' özniteliğine POST içinde izin verilmez" msgid "Automatically remove networks from offline DHCP agents." msgstr "Ağları çevrimdışı DHCP ajanlarından otomatik olarak çıkar." msgid "" "Automatically reschedule routers from offline L3 agents to online L3 agents." msgstr "" "Yönlendiricileri çevrimdışı L3 ajanlarından çevrimiçi L3 ajanlarına otomatik " "olarak yeniden zamanla." msgid "Available commands" msgstr "Kullanılabilir komutlar" msgid "Backend does not support VLAN Transparency." msgstr "Arka uç VLAN şeffaflığını desteklemiyor." #, python-format msgid "" "Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, " "%(mac)s:" msgstr "" "EUI-64 ile IPv6 adresi üretmek için kötü önek ya da mac biçimi: %(prefix)s, " "%(mac)s:" #, python-format msgid "Bad prefix type for generate IPv6 address by EUI-64: %s" msgstr "EUI-64 ile IPv6 adresi üretmek için kötü önek türü: %s" #, python-format msgid "Base MAC: %s" msgstr "Taban MAC: %s" msgid "Body contains invalid data" msgstr "Gövde geçersiz veri içeriyor" #, python-format msgid "Bridge %(bridge)s does not exist." msgstr "Köprü %(bridge)s mevcut değil." #, python-format msgid "Bridge %s does not exist" msgstr "Köprü %s mevcut değil" msgid "Bulk operation not supported" msgstr "Toplu işlem desteklenmiyor" msgid "CIDR to monitor" msgstr "İzlenecek CIDR" #, python-format msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip" msgstr "" "gateway_ip'ye sahip olmayan %s alt ağının bağlantı noktasına değişken IP " "eklenemiyor" #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "IPv%(pool_ver)s alt ağ havuzundan IPv%(req_ver)s alt ağı ayrılamaz" msgid "Cannot allocate requested subnet from the available set of prefixes" msgstr "İstenen alt ağ kullanılabilir önek kümesinden ayrılamıyor" #, python-format msgid "" "Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with port " "%(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already has a " "floating IP on external network %(net_id)s." msgstr "" "Değişken IP %(floating_ip_address)s (%(fip_id)s) sabit IP %(fixed_ip)s " "kullanılarak %(port_id)s bağlantı noktasıyla ilişkilendirilemiyor, çünkü bu " "sabit IP %(net_id)s harici ağında zaten bir değişken IP'ye sahip." #, python-format msgid "" "Cannot create floating IP and bind it to Port %s, since that port is owned " "by a different tenant." msgstr "" "Değişken IP oluşturup %s bağlantı noktasına bağlanamıyor, çünkü bu bağlantı " "noktası başka bir kiracıya ait." msgid "Cannot create resource for another tenant" msgstr "Başka bir kiracı için kaynak oluşturulamıyor" msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "ipv6 öznitelikleri ayarlıyken enable_dhcp kapatılamaz" #, python-format msgid "Cannot find %(table)s with %(col)s=%(match)s" msgstr "%(col)s=%(match)s olan %(table)s bulunamadı" #, python-format msgid "Cannot handle subnet of type %(subnet_type)s" msgstr "%(subnet_type)s türünde alt ağ işlenemiyor" msgid "Cannot have multiple IPv4 subnets on router port" msgstr "Yönlendirici bağlantı noktasında birden fazla IPv4 alt ağı olamaz" #, python-format msgid "" "Cannot have multiple router ports with the same network id if both contain " "IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s" msgstr "" "İkisi de IPv6 alt ağı içeriyorsa aynı ağ id'si ile birden fazla yönlendirici " "bağlantı noktası olamaz. Mevcut bağlantı noktası %(p)s IPv6 alt ağ(lar)ına " "ve %(nid)s ağ kimliğine sahip" msgid "Cannot match priority on flow deletion or modification" msgstr "Akış silme veya değiştirmede öncelik eşleştirilemedi" msgid "Cannot specify both subnet-id and port-id" msgstr "Hem subnet-id hem port-id belirtilemez" msgid "Cannot understand JSON" msgstr "JSON anlaşılamıyor" #, python-format msgid "Cannot update read-only attribute %s" msgstr "Yalnızca okunabilir öznitelik %s güncellenemez" msgid "Certificate Authority public key (CA cert) file for ssl" msgstr "Ssl için Sertifika Yetkilisi açık anahtarı (CA cert)" msgid "Check ebtables installation" msgstr "Ebtables kurulumunu kontrol et" msgid "Check for ARP header match support" msgstr "ARP başlık eşleştirme desteğini kontrol et" msgid "Check for ARP responder support" msgstr "ARP yanıtlayıcısı desteğini kontrol et" msgid "Check for OVS vxlan support" msgstr "OVS vxlan desteğini kontrol et" msgid "Check for VF management support" msgstr "VF yönetim desteğini kontrol et" msgid "Check for iproute2 vxlan support" msgstr "Iproute2 vxlan desteğini kontrol et" msgid "Check for nova notification support" msgstr "Nova bildirim desteğini kontrol et" msgid "Check for patch port support" msgstr "Yama bağlantı noktası desteğini kontrol et" msgid "Check minimal dnsmasq version" msgstr "Asgari dnsmasq sürümünü kontrol et" msgid "Check netns permission settings" msgstr "Netns izin ayarlarını kontrol et" msgid "Check ovsdb native interface support" msgstr "Ovsdb doğal arayüz desteğini kontrol et" #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of " "subnet %(sub_id)s" msgstr "" "%(subnet_id)s alt ağının %(subnet_cidr)s cidr'i %(sub_id)s alt ağının " "%(cidr)s cidr'i ile çakışıyor" msgid "Client certificate for nova metadata api server." msgstr "Nova metadata api sunucusu için istemci sertifikası." msgid "" "Comma-separated list of : tuples enumerating ranges of GRE " "tunnel IDs that are available for tenant network allocation" msgstr "" "Kiracı ağ ayırma için kullanılabilir GRE tünel kimliklerinin aralığını " "numaralandıran : demetlerinin virgülle ayrılmış listesi" msgid "" "Comma-separated list of : tuples enumerating ranges of " "VXLAN VNI IDs that are available for tenant network allocation" msgstr "" "Kiracı ağı ayırmaları için kullanılabilir VXLAN VNI ID'lerinin aralıklarını " "numaralandıran : demetlerinin virgülle ayrılmış listesi" msgid "" "Comma-separated list of the DNS servers which will be used as forwarders." msgstr "" "Yönlendirici olarak kullanılacak DNS sunucularının virgülle ayrılmış listesi." msgid "Command to execute" msgstr "Çalıştırılacak komut" msgid "Config file for interface driver (You may also use l3_agent.ini)" msgstr "" "Arayüz sürücüsü için yapılandırma dosyası (l3_agent.ini de kullanabilirsiniz)" #, python-format msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" msgstr "CIDR %(cidr)s için çatışan değer ethertype %(ethertype)s" msgid "" "Controls whether the neutron security group API is enabled in the server. It " "should be false when using no security groups or using the nova security " "group API." msgstr "" "Neutron güvenlik grubu API'sinin sunucuda etkin olup olmadığını kontrol " "eder. Güvenlik grubu kullanılmadığında veeya nova güvenlik grubu API'si " "kullanıldığında false olmalıdır." #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "%(time)d saniye denedikten sonra %(host)s:%(port)s'a bağlanamadı" msgid "Could not deserialize data" msgstr "Veri serisi çözülemedi" #, python-format msgid "" "Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable " "to update." msgstr "" "Mevcut geçit ip'si %(ip_address)s %(port_id)s bağlantı noktası tarafından " "zaten kullanılıyor. Güncellenemiyor." msgid "" "DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " "lease times." msgstr "" "DHCP kira süresi (saniye olarak). Dnsmasq'a süresiz kira zamanları " "kullanmasını söylemek için -1 kullanın." msgid "Default driver to use for quota checks" msgstr "Kota kontrolleri için kullanılacak varsayılan sürücü" msgid "" "Default network type for external networks when no provider attributes are " "specified. By default it is None, which means that if provider attributes " "are not specified while creating external networks then they will have the " "same type as tenant networks. Allowed values for external_network_type " "config option depend on the network type values configured in type_drivers " "config option." msgstr "" "Sağlayıcı öznitelikleri belirtilmediğinde harici ağlar için varsayılan ağ " "türü. Varsayılan olarak None'dir, bunun anlamı harici ağ oluştururken " "sağlayıcı öznitelikleri belirtilmemişse kiracı ağlarla aynı türe sahip " "olacaklarıdır. external_network_type yapılandırma seçeneği için izin verilen " "değerler type_drivers yapılandırma seçeneğinde yapılandırılan ağ türü " "değerlerine bağlıdır." msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "" "Kiracı başına izin verilecek varsayılan kaynak sayısı. Negatif değer " "sınırsız anlamına gelir." msgid "Default security group" msgstr "Varsayılan güvenlik grubu" msgid "Default security group already exists." msgstr "Varsayılan güvenlik grubu zaten mevcut." msgid "" "Define the default value of enable_snat if not provided in " "external_gateway_info." msgstr "" "external_gateway_info'da sağlanmamışsa enable_snat'ın varsayılan değerini " "tanımla." msgid "" "Defines providers for advanced services using the format: :" ":[:default]" msgstr "" "Şu biçimi kullanarak gleişmiş servisler için sağlayıcılar tanımlar: " "::[:default]" msgid "" "Delay within which agent is expected to update existing ports whent it " "restarts" msgstr "" "Yeniden başlatıldığında mevcut bağlantı noktalarını güncellemesi beklenen " "ajan içindeki gecikme" msgid "Delete the namespace by removing all devices." msgstr "İsim uzayını tüm aygıtları kaldırarak sil." #, python-format msgid "Deleting port %s" msgstr "Bağlantı noktası %s siliniyor" msgid "Destroy IPsets even if there is an iptables reference." msgstr "Iptables referansı olsa bile IPset'leri sil." msgid "Destroy all IPsets." msgstr "Tüm IPset'leri sil." #, python-format msgid "Device %(dev_name)s in mapping: %(mapping)s not unique" msgstr "%(mapping)s eşleştirmesindeki aygıt %(dev_name)s benzersiz" msgid "Device has no virtual functions" msgstr "Aygıt sanal fonksiyonlara sahip değil" #, python-format msgid "Device name %(dev_name)s is missing from physical_device_mappings" msgstr "Aygıt ismi %(dev_name)s physical_device_mappings'de eksik" msgid "Device not found" msgstr "Aygıt bulunamadı" #, python-format msgid "" "Distributed Virtual Router Mac Address for host %(host)s does not exist." msgstr "" "%(host)s istemcisi için Dağıtık Sanal Yönlendirici Mac Adresi mevcut değil." msgid "Domain to use for building the hostnames" msgstr "Makine adlarını inşa için kullanılacak alan" msgid "Downgrade no longer supported" msgstr "Alçaltma artık desteklenmiyor" #, python-format msgid "Driver %s is not unique across providers" msgstr "%s sürücüsü sağlayıcılar arasında benzersiz değil" msgid "Driver for security groups firewall in the L2 agent" msgstr "L2 ajanındaki güvenlik grubunun güvenlik duvarı için sürücü" msgid "Driver to use for scheduling network to DHCP agent" msgstr "Ağın DHCP ajanlarına zamanlanması için kullanılacak sürücü" msgid "Driver to use for scheduling router to a default L3 agent" msgstr "Yönlendiriciyi bir L3 ajanına zamanlamak için gerekli sürücü" #, python-format msgid "Duplicate IP address '%s'" msgstr "Kopya IP adresi '%s'" msgid "Duplicate Metering Rule in POST." msgstr "POST'da kopya ölçme kuralı." msgid "Duplicate Security Group Rule in POST." msgstr "POST'da Kopya Güvenlik Grubu Kuralı." #, python-format msgid "Duplicate hostroute '%s'" msgstr "Kopya istemci rotası '%s'" #, python-format msgid "Duplicate items in the list: '%s'" msgstr "Listede kopya öğeler: '%s'" #, python-format msgid "Duplicate nameserver '%s'" msgstr "Kopya isim sunucu '%s'" msgid "Duplicate segment entry in request." msgstr "İstekte kopya dilim girdisi." #, python-format msgid "ERROR: %s" msgstr "HATA: %s" msgid "" "ERROR: Unable to find configuration file via the default search paths (~/." "neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" msgstr "" "HATA: Varsayılan arama yollarıyla ve (~/.neutron/, ~/, /etc/neutron/, /etc/) " "ve '--config-file' seçeneğiyle yapılandırma dosyası bulunamadı!" msgid "" "Either one of parameter network_id or router_id must be passed to _get_ports " "method." msgstr "" "_get_ports metoduna network_id veya router_id parametrelerinden biri " "verilmelidir." msgid "Either subnet_id or port_id must be specified" msgstr "subnet_id veya port_id belirtilmeli" msgid "Empty physical network name." msgstr "Boş fiziksel ağ ismi." msgid "Enable FWaaS" msgstr "FWaaS'ı etkinleştir" msgid "Enable HA mode for virtual routers." msgstr "Sanal yönlendiriciler için HA kipini etkinleştir." msgid "Enable SSL on the API server" msgstr "API sunucuda SSL etkinleştir" msgid "" "Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " "plugin using linuxbridge mechanism driver" msgstr "" "Ajanda VXLAN etkinleştir. Ajan linuxbridge mekanizma sürücüsünü kullanan ml2 " "eklentisi ile yönetildiğinde etkinleştirilebilir" msgid "" "Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 " "l2population driver. Allows the switch (when supporting an overlay) to " "respond to an ARP request locally without performing a costly ARP broadcast " "into the overlay." msgstr "" "Destekleniyorsa yerel ARP yanıtlayıcıyı etkinleştir. OVS 2.1 ve ML2 " "I2population sürücüsüne ihtiyaç duyar. Anahtarın (kaplama desteklediğinde) " "bir ARP isteğine yerel olarak, kaplamaya maliyetli ARP yayını yapmadan yanıt " "vermesini sağlar." msgid "" "Enable services on an agent with admin_state_up False. If this option is " "False, when admin_state_up of an agent is turned False, services on it will " "be disabled. Agents with admin_state_up False are not selected for automatic " "scheduling regardless of this option. But manual scheduling to such agents " "is available if this option is True." msgstr "" "admin_state_up False olan bir ajan üzerinde servisleri etkinleştir. Bu " "seçenek False ise, bir ajanın admin_state_up'u False yapıldığında, " "üzerindeki servisler kapatılacaktır. admin_state_up False olan ajanlar bu " "seçeneğe bakılmaksızın otomatik zamanlama için seçilmezler. Ama bu seçenek " "True ise bu tür ajanlara elle zamanlama yapılabilir." msgid "" "Enable/Disable log watch by metadata proxy. It should be disabled when " "metadata_proxy_user/group is not allowed to read/write its log file and " "copytruncate logrotate option must be used if logrotate is enabled on " "metadata proxy log files. Option default value is deduced from " "metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent " "effective user id/name." msgstr "" "Metadata vekili tarafından kayıt etkinleştir/kapat. metadata_proxy_user/" "group kayıt dosyasına okuma/yazma iznine sahip olmadığında, kapatılmalıdır " "ve metadata vekil kayıt dosyaları üzerinde logrotate etkinse copytruncate " "logrotate seçeneği kullanılmalıdır. Seçeneğin varsayılan değeri " "metadata_proxy_user'den anlaşılır: metadata_proxy_user ajan etkin kullanıcı " "id/ismi ise izleme kaydı etkinleştirilir." msgid "End of VLAN range is less than start of VLAN range" msgstr "VLAN aralığı sonu VLAN aralığı başından daha küçük" msgid "End of tunnel range is less than start of tunnel range" msgstr "Tünel aralığı sonu tünel aralığı başından daha küçük" #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "İşlem denenirken %(reason)s hatası." #, python-format msgid "Error importing FWaaS device driver: %s" msgstr "FWaaS aygıt sürücüsünü içe aktarmada hata: %s" #, python-format msgid "Error parsing dns address %s" msgstr "%s dns adresinin ayrıştırılmasında hata" #, python-format msgid "Error while reading %s" msgstr "%s okunurken hata" #, python-format msgid "" "Exceeded %s second limit waiting for address to leave the tentative state." msgstr "" "Adresin belirsiz durumdan çıkması için %s saniye bekleme sınırı aşıldı." msgid "Existing prefixes must be a subset of the new prefixes" msgstr "Mevcut önekler yeni öneklerin alt kümesi olmalıdır" msgid "" "Extension to use alongside ml2 plugin's l2population mechanism driver. It " "enables the plugin to populate VXLAN forwarding table." msgstr "" "ml2 eklentisinin l2population mekanizma sürücüsünün yanında kullanılacak " "eklenti. Eklentiyi VXLAN iletim tablosunu doldurması için etkinleştirir." #, python-format msgid "Extension with alias %s does not exist" msgstr "%s rumuzlu eklenti mevcut değil" #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "Harici IP %s geçit IP ile aynı" #, python-format msgid "" "External network %(external_network_id)s is not reachable from subnet " "%(subnet_id)s. Therefore, cannot associate Port %(port_id)s with a Floating " "IP." msgstr "" "Harici ağ %(external_network_id)s %(subnet_id)s alt ağından erişilebilir " "değil. Bu yüzden, %(port_id)s bağlantı noktası bir Değişken IP ile " "ilişkilendirilemiyor." #, python-format msgid "" "External network %(net_id)s cannot be updated to be made non-external, since " "it has existing gateway ports" msgstr "" "Harici ağ %(net_id)s harici-olmayan şekilde olması için güncellenemez, çünkü " "mevcut geçit bağlantı noktaları var" #, python-format msgid "ExtraDhcpOpt %(id)s could not be found" msgstr "ExtraDhcpOpt %(id)s bulunamadı" msgid "" "FWaaS plugin is configured in the server side, but FWaaS is disabled in L3-" "agent." msgstr "" "FWaaS eklentisi sunucu tarafında yapılandırılmış, ama FWaaS L3-agent'de " "kapalı." #, python-format msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." msgstr "" "%(router_id)s yönlendiricisini yeniden zamanlama başarısız: seçilebilir l3 " "ajanı bulunamadı." #, python-format msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." msgstr "" "%(router_id)s yönlendiricisinin %(agent_id)s L3 Ajanına zamanlanması " "başarısız." #, python-format msgid "" "Failed to allocate a VRID in the network %(network_id)s for the router " "%(router_id)s after %(max_tries)s tries." msgstr "" "%(max_tries)s denemeden sonra %(router_id)s yönlendiricisi için " "%(network_id)s ağında VRID ayırma başarısız." #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips included " "invalid subnet %(subnet_id)s" msgstr "" "%(network_id)s ağı üzerinde bağlantı noktası oluşturma başarısız, çünkü " "fixed_ips geçersiz %(subnet_id)s alt ağını içeriyor" #, python-format msgid "Failed to parse request. Parameter '%s' not specified" msgstr "İstek ayrıştırılamadı. '%s' parametresi belirtilmemiş" #, python-format msgid "Failed to parse request. Required attribute '%s' not specified" msgstr "İstek ayrıştırılamıyor. Gerekli öznitelik '%s' belirtilmemiş" msgid "Failed to remove supplemental groups" msgstr "Destekleyici gruplar kaldırılamadı" #, python-format msgid "Failed to set gid %s" msgstr "Gid %s ayarlanamadı" #, python-format msgid "Failed to set uid %s" msgstr "Uid %s ayarlanamadı" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "%(ip)s'ye %(type)s tünel bağlantı noktası kurulumu başarısız" msgid "Failure applying iptables rules" msgstr "Iptables kuralları uygulanırken başarısız olundu" #, python-format msgid "Failure waiting for address %(address)s to become ready: %(reason)s" msgstr "%(address)s adresinin hazır olmasını bekleme başarısız: %(reason)s" #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "%(flavor_id)s örnek türü bulunamadı." #, python-format msgid "Floating IP %(floatingip_id)s could not be found" msgstr "Değişken IP %(floatingip_id)s bulunamadı" msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "" "TCP/UDP iletişim kuralları için, port_range_min <= port_range_max olmalı" msgid "Force ip_lib calls to use the root helper" msgstr "ip_lib çağrılarını kök yardımcıyı kullanmaya zorla" #, python-format msgid "" "Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet " "%(subnet_cidr)s." msgstr "" "%(subnet_cidr)s alt ağı için çakışan ayırma havuzları: %(pool_1)s %(pool_2)s " "bulundu." #, python-format msgid "" "Gateway cannot be updated for router %(router_id)s, since a gateway to " "external network %(net_id)s is required by one or more floating IPs." msgstr "" "Geçit %(router_id)s yönlendiricisi için güncellenemedi, çünkü bir ya da " "fazla değişken IP tarafından %(net_id)s harici ağına bir geçit gerekli." msgid "Gateway is not valid on subnet" msgstr "Geçit alt ağda geçerli değil" msgid "Group (gid or name) running metadata proxy after its initialization" msgstr "" "İlklendirilmesinden sonra metadata vekilini çalıştıran grup (gid veya isim)" msgid "" "Group (gid or name) running metadata proxy after its initialization (if " "empty: agent effective group)." msgstr "" "İlklendirilmesinden sonra metadata vekilini çalıştıran grup (gid veya isim) " "(boşsa: ajan etkin grup)." msgid "Group (gid or name) running this process after its initialization" msgstr "İlklendirilmesinden sonra bu süreci çalıştıran grup (gid veya isim)" msgid "How many times Neutron will retry MAC generation" msgstr "Neutron kaç kere MAC üretmeyi deneyecek" #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" "min) is missing." msgstr "" "ICMP kodu (port-range-max) %(value)s sağlanmış ama ICMP türü (port-range-" "min) eksik." msgid "ID of network" msgstr "Ağ kimliği" msgid "ID of network to probe" msgstr "Sorgulanacak ağ ID'si" msgid "ID of probe port to delete" msgstr "Silinecek deneme bağlantı noktasının kimliği" msgid "ID of probe port to execute command" msgstr "Komutun çalıştırılacağı deneme bağlantı noktası kimliği" msgid "ID of the router" msgstr "Yönetici kimliği" #, python-format msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s" msgstr "%(ip)s IP adresi %(subnet_id)s alt ağında zaten ayrılmış" #, python-format msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s" msgstr "%(ip)s IP adresi %(subnet_id)s alt ağına ait değil" #, python-format msgid "" "IP address %(ip_address)s is not a valid IP for any of the subnets on the " "specified network." msgstr "" "%(ip_address)s IP adresi belirtilen ağdaki alt ağlardan hiçbiri için geçerli " "bir IP değil." msgid "IP address used by Nova metadata server." msgstr "Nova metadata sunucusu tarafından kullanılan IP adresi." msgid "IP allocation requires subnet_id or ip_address" msgstr "IP ayırma subnet_id veya ip_address gerektirir" #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "IPTablesManager.apply aşağıdakı iptables bilgileri uygulanamadı\n" "%s" #, python-format msgid "" "IPv6 address %(address)s can not be directly assigned to a port on subnet " "%(id)s since the subnet is configured for automatic addresses" msgstr "" "Alt ağ otomatik adresler için yapılandırıldığından %(address)s IPv6 adresi " "%(id)s alt ağı üzerinde bir bağlantı noktasına doğrudan atanamaz" #, python-format msgid "" "IPv6 address %(ip)s cannot be directly assigned to a port on subnet " "%(subnet_id)s as the subnet is configured for automatic addresses" msgstr "" "Alt ağ otomatik adres olarak yapıladırıldığı için %(ip)s IPv6 adresi " "doğrudan %(subnet_id)s alt ağındaki bir bağlantı noktasına atanamaz." #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot be " "added to Neutron Router." msgstr "" "Harici bir yönlendiriciden RA almak için yapılandırılmış %s IPv6 alt ağı " "Neutron Yönlendiriciye eklenemez." msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "" "True ise, destekleyen eklentilerin VLAN şeffaf ağlar oluşturmasına izin ver." msgid "Illegal IP version number" msgstr "Kuraldışı IP sürüm numarası" #, python-format msgid "Incorrect pci_vendor_info: \"%s\", should be pair vendor_id:product_id" msgstr "Geçersiz pci_vendor_info: \"%s\", vendor_id:product_id çifti olmalı" #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" msgstr "/%s boyutunda alt ağ ayırmak için yetersiz önek alanı" msgid "Insufficient rights for removing default security group." msgstr "Varsayılan güvenlik grubunu silmek için yeterli izin yok." msgid "Interface to monitor" msgstr "İzlenecek arayüz" msgid "" "Interval between checks of child process liveness (seconds), use 0 to disable" msgstr "" "Alt süreç canlılığı kontrolleri aralığı (saniye), kapatmak için 0 kullanın" msgid "Interval between two metering measures" msgstr "İki ölçüm arasındaki aralık" msgid "Interval between two metering reports" msgstr "İki ölçme raporu arasındaki aralık" #, python-format msgid "" "Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address " "format, which requires the prefix to be /64." msgstr "" "IPv6 adres kipi için geçersiz CIDR %s. OpenStack önekin /64 olmasına ihtiyaç " "duyan EUI-64 adres biçimini kullanır." #, python-format msgid "Invalid Device %(dev_name)s: %(reason)s" msgstr "Geçersiz Aygıt %(dev_name)s: %(reason)s" #, python-format msgid "" "Invalid authentication type: %(auth_type)s, valid types are: " "%(valid_auth_types)s" msgstr "" "Geçersiz kimlik doğrulama türü: %(auth_type)s, geçerli türler: " "%(valid_auth_types)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "Geçersiz içerik türü %(content_type)s." #, python-format msgid "Invalid data format for IP pool: '%s'" msgstr "IP havuzu: '%s' için geçersiz veri biçimi" #, python-format msgid "Invalid data format for extra-dhcp-opt: %(data)s" msgstr "extra-dhcp-opt için geçersiz veri biçimi: %(data)s" #, python-format msgid "Invalid data format for fixed IP: '%s'" msgstr "Sabit IP için geçersiz veri biçimi: '%s'" #, python-format msgid "Invalid data format for hostroute: '%s'" msgstr "İstemci rotası için geçersiz veri biçimi: '%s'" #, python-format msgid "Invalid data format for nameserver: '%s'" msgstr "İsim sunucu için geçersiz veri biçimi: '%s'" #, python-format msgid "Invalid format for routes: %(routes)s, %(reason)s" msgstr "Rotalar için geçersiz biçim: %(routes)s, %(reason)s" #, python-format msgid "Invalid format: %s" msgstr "Geçersiz biçim: %s" #, python-format msgid "Invalid input for %(attr)s. Reason: %(reason)s." msgstr "%(attr)s için geçersiz girdi. Sebep: %(reason)s." #, python-format msgid "" "Invalid input. '%(target_dict)s' must be a dictionary with keys: " "%(expected_keys)s" msgstr "" "Geçersiz girdi. '%(target_dict)s' şu anahtarları içeren bir sözlük olmalı: " "%(expected_keys)s" #, python-format msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s" msgstr "Geçersiz sunucu durumu: %(state)s, geçerli durumlar: %(valid_states)s" #, python-format msgid "Invalid mapping: '%s'" msgstr "Geçersiz eşleştirme: '%s'" #, python-format msgid "Invalid pci slot %(pci_slot)s" msgstr "Geçersiz pci yuvası %(pci_slot)s" #, python-format msgid "Invalid provider format. Last part should be 'default' or empty: %s" msgstr "Geçersiz sağlayıcı biçimi. Son kısım 'default' ya da boş olmalı: %s" #, python-format msgid "Invalid route: %s" msgstr "Geçersiz rota: %s" msgid "Invalid service provider format" msgstr "Geçersiz servis sağlayıcı biçimi" #, python-format msgid "" "Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255." msgstr "" "ICMP %(field)s (%(attr)s) %(value)s için geçersiz değer. 0 dan 255'e kadar " "olmalı." #, python-format msgid "Invalid value for port %(port)s" msgstr "%(port)s bağlantı noktası için geçersiz değer" msgid "Keepalived didn't respawn" msgstr "Keepalived yeniden başlamadı" #, python-format msgid "Key %(key)s in mapping: '%(mapping)s' not unique" msgstr "'%(mapping)s' eşleştirmesindeki %(key)s anahtarı benzersiz değil" #, python-format msgid "Limit must be an integer 0 or greater and not '%d'" msgstr "Sınır tam sayı 0 ya da daha büyüğü olmalı '%d' değil" msgid "Limit number of leases to prevent a denial-of-service." msgstr "Servis engellemeyi önlemek için kiralama sayısını sınırla." msgid "" "List of :: or " "specifying physical_network names usable for VLAN provider and tenant " "networks, as well as ranges of VLAN tags on each available for allocation to " "tenant networks." msgstr "" "VLAN sağlayıcı ve kiracı ağlar için kullanılabilir physical_network " "isimlerini belirten :: veya " " listesi, aynı zamanda her birinde kiracı ağlara ayırma " "için VLAN etiketleri aralıkları." msgid "" "List of network type driver entrypoints to be loaded from the neutron.ml2." "type_drivers namespace." msgstr "" "neutron.ml2.type_drivers isim uzayından yüklenecek ağ türü sürücü giriş " "noktaları listesi." msgid "Local IP address of the VXLAN endpoints." msgstr "VXLAN son uçlarının yerel IP adresi." msgid "Location for Metadata Proxy UNIX domain socket." msgstr "Metadata Vekil UNIX alan soketi için konum." msgid "Location of Metadata Proxy UNIX domain socket" msgstr "Metadata Vekil UNIX alan soketi konumu" msgid "Location of pid file of this process." msgstr "Bu sürecin pid dosyasının konumu." msgid "Location to store IPv6 RA config files" msgstr "IPv6 RA yapılandırma dosyalarının kaydedileceği konum" msgid "Location to store child pid files" msgstr "Alt süreç dosyalarının kaydedileceği konum" msgid "Location to store keepalived/conntrackd config files" msgstr "Keepalived/conntrackd yapılandırma dosyalarının tutulacağı konum" msgid "Log agent heartbeats" msgstr "Ajan kalp atışlarını kaydet" msgid "MTU size of veth interfaces" msgstr "veth arayüzlerinin MTU boyutu" msgid "Make the l2 agent run in DVR mode." msgstr "L2 ajanın DVR kipinde çalışmasını sağla." msgid "Malformed request body" msgstr "Kusurlu istek gövdesi" msgid "Maximum number of allowed address pairs" msgstr "İzin verilen adres çiftlerinin azami sayısı" msgid "Maximum number of host routes per subnet" msgstr "Alt ağ başına azami istemci sayısı" msgid "Metering driver" msgstr "Ölçme sürücüsü" #, python-format msgid "Metering label %(label_id)s does not exist" msgstr "Ölçme etiketi %(label_id)s mevcut değil" #, python-format msgid "Metering label rule %(rule_id)s does not exist" msgstr "Ölçme etiketi kuralı %(rule_id)s mevcut değil" #, python-format msgid "" "Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps " "another" msgstr "" "remote_ip_prefix %(remote_ip_prefix)s sahip ölçme etiket kuralı başka bir " "tanesiyle çatışıyor" msgid "Minimize polling by monitoring ovsdb for interface changes." msgstr "" "Sorgulamayı ovsdb arayüzünü değişiklikler için izleyerek olabildiğince azalt." #, python-format msgid "Missing key in mapping: '%s'" msgstr "Eşleştirmede anahtar eksik: '%s'" #, python-format msgid "Missing value in mapping: '%s'" msgstr "Eşleştirmede değer eksik: '%s'" #, python-format msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found" msgstr "" "agent_type=%(agent_type)s ve istemci=%(host)s olan birden fazla ajan bulundu" #, python-format msgid "Multiple default providers for service %s" msgstr "%s servisi için birden fazla varsayılan sağlayıcı" #, python-format msgid "Multiple plugins for service %s were configured" msgstr "%s servisi için birden fazla eklenti yapılandırılmış" #, python-format msgid "Multiple providers specified for service %s" msgstr "%s servisi için birden fazla sağlayıcı belirtilmiş" msgid "Multiple tenant_ids in bulk security group rule create not allowed" msgstr "" "Toplu güvenlik grubu kuralı oluşturmada birden çok tenant_id'ye izin " "verilmiyor" msgid "Must specify one or more actions on flow addition or modification" msgstr "Akış ekleme ya da değiştirmede bir ya da fazla eylem belirtilmeli" msgid "Name of Open vSwitch bridge to use" msgstr "Kullanılacak Open vSwitch köprüsünün ismi" msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "Kullanılacak nova gölgesinin ismi. Keystone birden fazla bölgeyi yönetiyorsa " "kullanışlıdır." msgid "Name of the FWaaS Driver" msgstr "FWaaS Sürücüsü ismi" msgid "Namespace of the router" msgstr "Yönetici isim uzayı" msgid "Native pagination depend on native sorting" msgstr "Doğal sayfalama doğal sıralamaya bağlıdır" msgid "Negative delta (downgrade) not supported" msgstr "Negatif fark (alt sürüm) desteklenmiyor" msgid "Negative relative revision (downgrade) not supported" msgstr "Negatif ilişkili sürüm (alt sürüm) desteklenmiyor" #, python-format msgid "Network %s is not a valid external network" msgstr "%s ağı geçerli bir harici ağ değil" #, python-format msgid "Network %s is not an external network" msgstr "Ağ %s harici bir ağ değil" #, python-format msgid "" "Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges " "%(excluded_ranges)s was not found." msgstr "" "%(excluded_ranges)s IP aralıkları hariç %(parent_range)s IP aralığından " "%(size)s botyutunda ağ bulunamadı." msgid "Network that will have instance metadata proxied." msgstr "Sunucu metadata'sı vekillenecek ağ." #, python-format msgid "Network type value '%s' not supported" msgstr "Ağ türü değeri '%s' desteklenmiyor" msgid "Network type value needed by the ML2 plugin" msgstr "ML2 eklentisi ağ türü değerine ihtiyaç duyuyor" msgid "Network types supported by the agent (gre and/or vxlan)." msgstr "Ajan tarafından desteklenen ağ türleri (gre ve/veya vxlan)." msgid "Neutron Service Type Management" msgstr "Neutron Servis Türü Yönetimi" msgid "Neutron core_plugin not configured!" msgstr "Neutron core_plugin yapılandırılmamış!" msgid "Neutron plugin provider module" msgstr "Neutron eklenti sağlayıcı modülü" msgid "Neutron quota driver class" msgstr "Neutron kota sürücü sınıf" #, python-format msgid "No eligible l3 agent associated with external network %s found" msgstr "%s harici ağıyla ilişkilendirilmiş uygun l3 ajanı bulunamadı" #, python-format msgid "No more IP addresses available for subnet %(subnet_id)s." msgstr "%(subnet_id)s alt ağı için kullanılabilir başka IP adresi yok." #, python-format msgid "" "No more Virtual Router Identifier (VRID) available when creating router " "%(router_id)s. The limit of number of HA Routers per tenant is 254." msgstr "" "%(router_id)s yönlendiricisi oluşturulurken kullanılabilir Sanal " "Yönlendirici Tanımlayıcı (VRID) yok. Kiracı başına HA Yönlendirici sayısı " "sınırı 254." #, python-format msgid "No providers specified for '%s' service, exiting" msgstr "'%s' servisi için sağlayıcı belirtilmemiş, çıkılıyor" #, python-format msgid "" "Not enough l3 agents available to ensure HA. Minimum required " "%(min_agents)s, available %(num_agents)s." msgstr "" "HA'dan emin olmak için yeterli l3 ajanı yok. Asgari %(min_agents)s gerekli, " "kullanılabilir %(num_agents)s var." msgid "" "Number of DHCP agents scheduled to host a tenant network. If this number is " "greater than 1, the scheduler automatically assigns multiple DHCP agents for " "a given tenant network, providing high availability for DHCP service." msgstr "" "Bir kiracı ağı sunmak için zamanlanan DHCP ajanları sayısı. Bu sayı 1'den " "büyükse, zamanlayıcı verilen bir kiracı ağa otomatik olarak birden çok DHCP " "ajanı atar, ve DHCP servisi için yüksek kullanılabilirlik sağlar." msgid "Number of RPC worker processes for service" msgstr "Servis için RPC işçi süreçlerinin sayısı" msgid "Number of backlog requests to configure the metadata server socket with" msgstr "Metadata sunucu soketinin yapılandırılacağı birikmiş isteklerin sayısı" msgid "Number of backlog requests to configure the socket with" msgstr "Soketin birlikte yapılandırılacağı backlog isteklerinin sayısı" msgid "" "Number of floating IPs allowed per tenant. A negative value means unlimited." msgstr "" "Kiracı başına izin verilen değişken IP sayısı. Negatif değer sınırsız " "demektir." msgid "" "Number of networks allowed per tenant. A negative value means unlimited." msgstr "" "Kiracı başına izin verilen ağ sayısı. Negatif değer sınırsız anlamına gelir." msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "" "Kiracı başına izin verilen bağlantı noktası sayısı. Negatif değer sınırsız " "anlamına gelir." msgid "Number of routers allowed per tenant. A negative value means unlimited." msgstr "" "Kiracı başına izin verilen yönlendirici sayısı. Negatif değer sınırsız " "anlamına gelir." msgid "" "Number of seconds between sending events to nova if there are any events to " "send." msgstr "" "Gönderilecek olay varsa olayların nova'ya gönderilmesi arasında beklenecek " "saniye sayısı." msgid "Number of seconds to keep retrying to listen" msgstr "Dinlemeye devam etmek için saniye sayısı" msgid "" "Number of security groups allowed per tenant. A negative value means " "unlimited." msgstr "" "Kiracı başına izin verilen güvenlik grubu sayısı. Negatif değer sınırsız " "anlamına gelir." msgid "" "Number of security rules allowed per tenant. A negative value means " "unlimited." msgstr "" "Kiracı başına izin verilen güvenlik kuralı sayısı. Negatif bir değer " "sınırsız demektir." msgid "" "Number of separate API worker processes for service. If not specified, the " "default is equal to the number of CPUs available for best performance." msgstr "" "Servis için ayrı API işçi süreçlerinin sayısı. Belirtilmezse, varsayılan " "olarak en iyi performans için CPU sayısına eşit değerdir." msgid "" "Number of separate worker processes for metadata server (defaults to half of " "the number of CPUs)" msgstr "" "Metadata sunucu için ayrı işçi süreçleri sayısı (CPU sayısının yarısı " "varsayılır)" msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "" "Kiracı başına izin verilen alt ağ sayısı, negatif değer sınırsız anlamına " "gelir." msgid "OK" msgstr "Tamam" #, python-format msgid "OVSDB Error: %s" msgstr "OVSDB Hatası: %s" msgid "Only admin can view or configure quota" msgstr "Yalnızca yönetici kotaları görüntüleyebilir ya da yapılandırabilir" msgid "Only admin is authorized to access quotas for another tenant" msgstr "Yalnızca yönetici başka bir kiracı için kotalara erişebilir" msgid "Only allowed to update rules for one security profile at a time" msgstr "" "Tek seferde bir güvenlik profili için kuralların güncellenmesine izin verilir" msgid "Only remote_ip_prefix or remote_group_id may be provided." msgstr "Yalnızca remote_ip_prefix veya remote_group_id sağlanabilir." msgid "OpenFlow interface to use." msgstr "Kullanılacak OpenFlow arayüzü." #, python-format msgid "" "Operation %(op)s is not supported for device_owner %(device_owner)s on port " "%(port_id)s." msgstr "" "İşlem %(op)s %(port_id)s bağlantı noktası üzerinde %(device_owner)s " "device_owner için desteklenmiyor." msgid "Owner type of the device: network/compute" msgstr "Aygıt sahip türü: ağ/hesap" msgid "POST requests are not supported on this resource." msgstr "POST istekleri bu kaynakta desteklenmiyor." #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "bridge_mappins ayrıştırma başarısız: %s." msgid "Parsing supported pci_vendor_devs failed" msgstr "Desteklenen pci_vendor_devs ayrıştırması başarısız" msgid "Path to PID file for this process" msgstr "Bu sürecin PID dosyasının yolu" msgid "Path to the router directory" msgstr "Yönlendirici dizininin yolu" msgid "Peer patch port in integration bridge for tunnel bridge." msgstr "Tünel köprüsü için tümleştirme köprüsündeki eş yama bağlantı noktası." msgid "Peer patch port in tunnel bridge for integration bridge." msgstr "Tümleştirme köprüsü için tünel köprüsündeki eş yama bağlantı noktası." msgid "Ping timeout" msgstr "Ping zaman aşımı" msgid "Plugin does not support updating provider attributes" msgstr "Eklenti sağlayıcı özniteliklerini güncellemeyi desteklemiyor" #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "%(id)s bağlantı noktası %(address)s sabit ip'sine sahip değil" #, python-format msgid "Port %(port)s does not exist on %(bridge)s!" msgstr "Bağlantı noktası %(port)s %(bridge)s köprüsü üzerinde mevcut değil!" #, python-format msgid "" "Port %(port_id)s is associated with a different tenant than Floating IP " "%(floatingip_id)s and therefore cannot be bound." msgstr "" "%(port_id)s bağlantı noktası %(floatingip_id)s değişken IP'sinden başka bir " "kiracı ile ilişkilendirilmiş bu yüzden bağlanamaz." #, python-format msgid "Port %s does not exist" msgstr "Bağlantı noktası %s mevcut değil" msgid "" "Port Security must be enabled in order to have allowed address pairs on a " "port." msgstr "" "Bağlantı noktasında izin verilen adres çiftlerine sahip olmak için bağlantı " "noktası güvenliği etkin olmalı." msgid "" "Port has security group associated. Cannot disable port security or ip " "address until security group is removed" msgstr "" "Bağlantı noktasıyla ilişkili güvenlik grubu var. Güvenlik grubu kaldırılana " "kadar bağlantı noktası güvenliği ya da ip adresi kapatılamaz" msgid "" "Port security must be enabled and port must have an IP address in order to " "use security groups." msgstr "" "Güvenlik gruplarını kullanmak için bağlantı noktası güvenliği etkin olmalı " "ve bağlantı noktası bir IP adresine sahip olmalı." msgid "Private key of client certificate." msgstr "İstemci sertifikasının özel anahtarı." #, python-format msgid "Probe %s deleted" msgstr "Deneme %s silindi" #, python-format msgid "Probe created : %s " msgstr "Deneme oluşturuldu: %s " msgid "Process is already started" msgstr "Süreç zaten başlamış" msgid "Process is not running." msgstr "Süreç çalışmıyor." msgid "Protocol to access nova metadata, http or https" msgstr "Nova metadata'ya erişmek için iletişim kuralı, http veya https" msgid "" "Range of seconds to randomly delay when starting the periodic task scheduler " "to reduce stampeding. (Disable by setting to 0)" msgstr "" "Devresel görev zamanlayıcıyı başlatırken izdiham yaratmayı engellemek için " "beklenecek rastgele saniye aralığı. (0 olarak ayarlayıp kapatabilirsiniz)" msgid "Remote metadata server experienced an internal server error." msgstr "Uzak metadata sunucu dahil sunucu hatası yaşadı." msgid "" "Representing the resource type whose load is being reported by the agent. " "This can be \"networks\", \"subnets\" or \"ports\". When specified (Default " "is networks), the server will extract particular load sent as part of its " "agent configuration object from the agent report state, which is the number " "of resources being consumed, at every report_interval.dhcp_load_type can be " "used in combination with network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is " "WeightScheduler, dhcp_load_type can be configured to represent the choice " "for the resource being balanced. Example: dhcp_load_type=networks" msgstr "" "Ajan tarafından yükü rapor edilen kaynak türünü temsil eder. Bu \"ağlar\", " "\"alt ağlar\", veya \"bağlantı noktaları\" olabilir. Belirtildiğinde " "(varsayılanı ağlardır), sunucu ajan yapılandırma nesnesinin parçası olarak " "gönderilen belirli yükü ajan rapor durumundan çıkartır, ki bu her " "report_interval'da tüketilen kaynak sayısıdır. dhcp_load_type " "network_scheduler_driver WeightScheduler olduğunda network_scheduler_driver " "= neutron.scheduler.WeightScheduler ile birlikte kullanılabilir, " "dhcp_load_type dengelenen kaynak için seçimi temsil edecek şekilde " "yapılandırılabilir. Örneğin: dhcp_load_type=networks" msgid "Request Failed: internal server error while processing your request." msgstr "İstek Başarısız: isteğiniz işlenirken dahili sunucu hatası oluştu." #, python-format msgid "" "Request contains duplicate address pair: mac_address %(mac_address)s " "ip_address %(ip_address)s." msgstr "" "İstek kopya adres çifti içeriyor: mac_address %(mac_address)s ip_address " "%(ip_address)s." #, python-format msgid "" "Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps " "with another subnet" msgstr "" "%(network_id)s ağı için istenen %(cidr)s cidr'e sahip alt ağ başka bir alt " "ağla çatışıyor" #, python-format msgid "" "Resource '%(resource_id)s' is already associated with provider " "'%(provider)s' for service type '%(service_type)s'" msgstr "" "'%(resource_id)s' kaynağı '%(service_type)s' servis türü için zaten " "'%(provider)s' sağlayıcısıyla ilişkilendirilmiş" msgid "Resource body required" msgstr "Kaynak gövdesi gerekiyor" msgid "" "Resource name(s) that are supported in quota features. This option is now " "deprecated for removal." msgstr "" "Kota özelliklerinde desteklenen kaynak isim(ler)i. Bu seçenek artık " "kaldırılmak üzere kullanılmıyor." msgid "Resource not found." msgstr "Kaynak bulunamadı." msgid "Resources required" msgstr "Kaynaklar gerekiyor" msgid "Root helper daemon application to use when possible." msgstr "Mümkün olduğunda kullanılacak kök yardımcı artalan işlemi uygulaması." msgid "Root permissions are required to drop privileges." msgstr "İzinlerin düşürülmesi için Root izinleri gerekli." #, python-format msgid "Router %(router_id)s %(reason)s" msgstr "Yönlendirici %(router_id)s %(reason)s" #, python-format msgid "Router %(router_id)s could not be found" msgstr "Yönlendirici %(router_id)s bulunamadı" #, python-format msgid "Router %(router_id)s does not have an interface with id %(port_id)s" msgstr "" "%(router_id)s yönlendiricisi %(port_id)s kimliğine sahip bir arayüze sahip " "değil" #, python-format msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s" msgstr "" "%(router_id)s yönlendiricisi %(subnet_id)s alt ağı üzerinde arayüze sahip " "değil" #, python-format msgid "Router already has a port on subnet %s" msgstr "Yönlendirici zaten %s alt ağında bir bağlantı noktasına sahip" #, python-format msgid "" "Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be " "deleted, as it is required by one or more floating IPs." msgstr "" "%(router_id)s yönlendiricisi üstündeki %(subnet_id)s alt ağı için " "yönlendirici arayüzü silinemez, bir ya da fazla değişken IP tarafından " "ihtiyaç duyuluyor." #, python-format msgid "" "Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be " "deleted, as it is required by one or more routes." msgstr "" "%(router_id)s yönlendiricisi üzerindeki %(subnet_id)s alt ağı için rota " "arayüzü silinemiyor, çünkü bir ya da fazla rota tarafından ihtiyaç duyuluyor." msgid "Router that will have connected instances' metadata proxied." msgstr "Bağlı sunucuların metadata'larının vekilleneceği yönlendirici." msgid "Run as daemon." msgstr "Artalan işlemi olarak çalış." msgid "" "Seconds between nodes reporting state to server; should be less than " "agent_down_time, best if it is half or less than agent_down_time." msgstr "" "Düğümlerin sunucuya durum raporu yapması arasında geçen saniye; " "agent_down_time'dan az olmalı, en iyisi agent_down_time'ın yarısı ya da daha " "azı olmasıdır." msgid "Seconds between running periodic tasks" msgstr "Devresel görevleri çalıştırma arasındaki saniye" msgid "" "Seconds to regard the agent is down; should be at least twice " "report_interval, to be sure the agent is down for good." msgstr "" "Ajanın çalışmıyor olduğuna karar vermek için geçmesi gereken saniye; ajanın " "gerçekten kapalı olduğundan emin olmak için report_interval değerinin en az " "iki katı olmalı." #, python-format msgid "Security Group %(id)s %(reason)s." msgstr "Güvenlik Grubu %(id)s %(reason)s." #, python-format msgid "Security Group Rule %(id)s %(reason)s." msgstr "Güvenlik Grubu Kuralı %(id)s %(reason)s." #, python-format msgid "Security group %(id)s does not exist" msgstr "%(id)s güvenlik grubu mevcut değil" #, python-format msgid "Security group rule %(id)s does not exist" msgstr "Güvenlik grubu kuralı %(id)s mevcut değil" #, python-format msgid "" "Security group rule protocol %(protocol)s not supported. Only protocol " "values %(values)s and integer representations [0 to 255] are supported." msgstr "" "Güvenlik grubu kuralı iletişim kuralı %(protocol)s desteklenmiyor. Yalnızca " "iletişim kuralı değerleri %(values)s ve tam sayı temsilleri [0 dan 255 e] " "destekleniyor." msgid "Segments and provider values cannot both be set." msgstr "Dilimler ve sağlayıcı değerleri aynı anda ayarlanamaz." msgid "" "Send notification to nova when port data (fixed_ips/floatingip) changes so " "nova can update its cache." msgstr "" "Bağlantı noktası verisi (sabit_ipler/değişkenip) değiştiğinde nova'ya " "bildirim gönder ki nova zulasını güncelleyebilsin." msgid "Send notification to nova when port status changes" msgstr "Bağlantı noktası durumu değiştiğinde nova'ya bildirim gönder" msgid "" "Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the " "feature is disabled" msgstr "" "HA kurulumu için bu kadar karşılıksız ARP gönder, 0'a eşit ya da küçükse, " "özellik kapatılır" #, python-format msgid "" "Service provider '%(provider)s' could not be found for service type " "%(service_type)s" msgstr "" "%(service_type)s servis türü için '%(provider)s' servis sağlayıcı bulunamadı" #, python-format msgid "Service type %(service_type)s does not have a default service provider" msgstr "%(service_type)s servis türü varsayılan servis sağlayıcıya sahip değil" msgid "" "Set new timeout in seconds for new rpc calls after agent receives SIGTERM. " "If value is set to 0, rpc timeout won't be changed" msgstr "" "Ajan SIGTERM aldıktan sonra yeni rpc çağrıları için saniye olarak yeni zaman " "aşımı ayarla. Değer 0 olarak ayarlanırsa, rpc zaman aşımı değiştirilmeyecek" msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "Dışa giden IP paketi taşıyan GRE/VXLAN tünelinde bölümlenme yapma (DF) " "bitini ayarla ya da ayarlama." msgid "Shared address scope can't be unshared" msgstr "Paylaşılan adres kapsamının paylaştırılması durdurulamaz" msgid "" "Specifying 'tenant_id' other than authenticated tenant in request requires " "admin privileges" msgstr "" "İstekte doğrulanmış kiracıdan başka 'tenant_id' belirtme yönetici yetkileri " "gerektirir" msgid "String prefix used to match IPset names." msgstr "IPset isimleriyle eşleştirme için kullanılan karakter dizisi önekleri." msgid "Subnet for router interface must have a gateway IP" msgstr "Yönlendirici arayüzü için alt ağ bir geçit IP'ye sahip olmalı" msgid "" "Subnet has a prefix length that is incompatible with DHCP service enabled." msgstr "Alt ağ etkin DHCP servisiyle uyumsz bir önek uzunluğuna sahip." msgid "Subnet pool has existing allocations" msgstr "Alt ağ havuzunun mevcut ayırmaları var" msgid "Subnet used for the l3 HA admin network." msgstr "L3 HA yönetici ağı için kullanılan alt ağ." msgid "Suffix to append to all namespace names." msgstr "Tüm isim uzaylarına eklenecek son ek." msgid "" "System-wide flag to determine the type of router that tenants can create. " "Only admin can override." msgstr "" "Kiracıların oluşturabileceği yönlendirici türünü belirlemek için sistem " "genelinde bayrak. Yalnızca yönetici üzerine yazabilir." msgid "TCP Port to listen for metadata server requests." msgstr "Metadata sunucu istekleri için dinlenecek TCP bağlantı noktası." msgid "TCP Port used by Neutron metadata namespace proxy." msgstr "" "Neutron metadata isim uzayı vekili tarafından kullanılan TCP bağlantı " "noktası." msgid "TCP Port used by Nova metadata server." msgstr "Nova metadata sunucusu tarafından kullanılan TCP Bağlantı noktası." msgid "TOS for vxlan interface protocol packets." msgstr "Vxlan arayüz iletişim paketleri için TOS." msgid "TTL for vxlan interface protocol packets." msgstr "Vxlan arayüz iletişim kuralı paketleri için TTL." #, python-format msgid "Table %s can only be queried by UUID" msgstr "Tablo %s yalnızca UUID ile sorgulanabilir" #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "" "Kiracı %(tenant_id)s'in bu ağda %(resource)s oluşturmasına izin verilmiyor" msgid "Tenant network creation is not enabled." msgstr "Kiracı ağ oluşturma etkin değil." msgid "" "The 'gateway_external_network_id' option must be configured for this agent " "as Neutron has more than one external network." msgstr "" "Neutron birden fazla harici ağa sahip olduğundan " "'gateway_external_network_id' seçeneği bu ajan için yapılandırılmalıdır." #, python-format msgid "" "The HA Network CIDR specified in the configuration file isn't valid; " "%(cidr)s." msgstr "" "Yapılandırma dosyasında belirtilen HA Ağ CIDR'i geçerli değil; %(cidr)s." msgid "The UDP port to use for VXLAN tunnels." msgstr "VXLAN tünelleri için kullanılacak UDP bağlantı noktası." #, python-format msgid "" "The address allocation request could not be satisfied because: %(reason)s" msgstr "Adres ayırma isteği sağlanamadı çünkü: %(reason)s" msgid "The advertisement interval in seconds" msgstr "Saniye cinsinden duyuru aralığı" #, python-format msgid "The allocation pool %(pool)s is not valid." msgstr "Ayırma havuzu %(pool)s geçerli değil." #, python-format msgid "" "The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s." msgstr "" "Ayırma havuzu %(pool)s %(subnet_cidr)s alt ağ cidr'inin ötesine uzanıyor." #, python-format msgid "" "The attribute '%(attr)s' is reference to other resource, can't used by sort " "'%(resource)s'" msgstr "" "'%(attr)s' özniteliği başka kaynağa referans, '%(resource)s' sıralama " "tarafından kullanılamaz" msgid "The core plugin Neutron will use" msgstr "Neutron'un kullanacağı çekirdek eklenti" msgid "The driver used to manage the DHCP server." msgstr "DHCP sunucusunu yönetmek için kullanılan sürücü." msgid "The driver used to manage the virtual interface." msgstr "Sanal arayüzü yönetmek için kullanılan sürücü." #, python-format msgid "" "The following device_id %(device_id)s is not owned by your tenant or matches " "another tenants router." msgstr "" "device_id %(device_id)s sizin kiracınıza ait değil veya başka bir kiracının " "yönlendiricisiyle eşleşiyor." msgid "The host IP to bind to" msgstr "Bağlanılacak istemci IP'si" msgid "The interface for interacting with the OVSDB" msgstr "OVSDB ile etkileşim için arayüz" msgid "" "The maximum number of items returned in a single response, value was " "'infinite' or negative integer means no limit" msgstr "" "Tek bir yanıtta döndürülecek azami öğe sayısı, 'infinite' değeri ya da " "negatif tam sayı sınır yok demektir" #, python-format msgid "" "The network %(network_id)s has been already hosted by the DHCP Agent " "%(agent_id)s." msgstr "Ağ %(network_id)s zaten %(agent_id)s DHCP Ajanı tarafından sunuluyor." #, python-format msgid "" "The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s." msgstr "Ağ %(network_id)s %(agent_id)s DHCP ajanı tarafından sunulmuyor." #, python-format msgid "The number of allowed address pair exceeds the maximum %(quota)s." msgstr "İzin verilen adres çifti sayısı %(quota)s azami değerini aşıyor." msgid "" "The number of seconds the agent will wait between polling for local device " "changes." msgstr "" "Ajanın yerel aygıt değişiklikleri için sorgulama yapma aralığında " "bekleyeceği saniye sayısı." msgid "" "The number of seconds to wait before respawning the ovsdb monitor after " "losing communication with it." msgstr "" "İletişim koptuktan sonra ovsdb izleyiciyi yeniden başlatmak için beklenecek " "saniye sayısı." msgid "The number of sort_keys and sort_dirs must be same" msgstr "sort_keys ile sort_dirs sayıları aynı olmalı" #, python-format msgid "The port '%s' was deleted" msgstr "Bağlantı noktası '%s' silinmiş" msgid "The port to bind to" msgstr "Bağlanılacak bağlantı noktası" #, python-format msgid "The requested content type %s is invalid." msgstr "İstenen içerik türü %s geçersiz." msgid "The resource could not be found." msgstr "Kaynak bulunamadı." #, python-format msgid "" "The router %(router_id)s has been already hosted by the L3 Agent " "%(agent_id)s." msgstr "" "%(router_id)s yönlendiricisi zaten %(agent_id)s L3 Ajanı tarafından " "sunuluyor." msgid "" "The server has either erred or is incapable of performing the requested " "operation." msgstr "" "Sunucu ya hata verdi ya da istenen işlemi yapabilecek yeterlilikte değil." msgid "The service plugins Neutron will use" msgstr "Neutron'un kullanacağı servis eklentileri" #, python-format msgid "The subnet request could not be satisfied because: %(reason)s" msgstr "Alt ağ isteği sağlanamadı çünkü: %(reason)s" msgid "The type of authentication to use" msgstr "Kullanılacak kimlik doğrulama türü" #, python-format msgid "The value '%(value)s' for %(element)s is not valid." msgstr "%(element)s için '%(value)s' değeri geçerli değil." msgid "" "The working mode for the agent. Allowed modes are: 'legacy' - this preserves " "the existing behavior where the L3 agent is deployed on a centralized " "networking node to provide L3 services like DNAT, and SNAT. Use this mode if " "you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality " "and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - " "this enables centralized SNAT support in conjunction with DVR. This mode " "must be used for an L3 agent running on a centralized node (or in single-" "host deployments, e.g. devstack)" msgstr "" "Ajanın çalışma kipi. İzin verilen kipler: 'legacy' - Bu L3 ajanının DNAT, ve " "SNAT gibi L3 servisleri sağlamak için merkezi ağ düğümüne kurulduğu mevcut " "davranışı korur. DVR'a geçmek istemiyorsanız bu kipi kullanın. 'dvr' - bu " "kip DVR işlevini etkinleştirir ve bir hesap istemcisi üzerinde çalışan L3 " "ajanı için kullanılmalıdır. 'dvr_snat' - bu DVR ile beraber merkezi SNAT " "desteğini etkinleştirir. Bu kip merkezi bir düğümde çalışan L3 ajanı için " "kullanılmalıdır (veya tek-istemcili kurulumlarda, örn. devstack)" msgid "Timeout" msgstr "Zaman aşımı" msgid "" "True to delete all ports on all the OpenvSwitch bridges. False to delete " "ports created by Neutron on integration and external network bridges." msgstr "" "Tüm OpenvSwitch köprülerinde tüm bağlantı noktalarını silmek için True. " "Neutron tarafından tümleştirme ve harici ağ köprüleri üzerinde oluşturulan " "bağlantı noktalarını silmek için False." msgid "Tunnel IP value needed by the ML2 plugin" msgstr "Tünel IP değerine ML2 eklentisi tarafından ihtiyaç duyuluyor" msgid "Tunnel bridge to use." msgstr "Kullanılacak tünel köprüsü." msgid "URL to database" msgstr "Veri tabanı URL'si" #, python-format msgid "Unable to access %s" msgstr "%s'e erişilemiyor" #, python-format msgid "Unable to calculate %(address_type)s address because of:%(reason)s" msgstr "%(reason)s sebebiyle %(address_type)s adresi hesaplanamıyor" #, python-format msgid "" "Unable to complete operation for %(router_id)s. The number of routes exceeds " "the maximum %(quota)s." msgstr "" "%(router_id)s için işlem tamamlanamıyor. Rota sayısı %(quota)s azami " "sayısını aşıyor." #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of DNS " "nameservers exceeds the limit %(quota)s." msgstr "" "%(subnet_id)s için işlem tamamlanamadı. DNS isim sunucuları sayısı %(quota)s " "sayısını aşıyor." #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of host routes " "exceeds the limit %(quota)s." msgstr "" "%(subnet_id)s için işlem tamamlanamadı. İstemci rotaları sayısı %(quota)s " "sınırını aşıyor." #, python-format msgid "Unable to convert value in %s" msgstr "%s degeri dönüştürülemiyor" msgid "Unable to create the Agent Gateway Port" msgstr "Ajan Geçit Bağlantı Noktası oluşturulamıyor" msgid "Unable to create the SNAT Interface Port" msgstr "SNAT Arayüz Bağlantı Noktası oluşturulamıyor" #, python-format msgid "" "Unable to create the flat network. Physical network %(physical_network)s is " "in use." msgstr "Düz ağ oluşturulamıyor. Fiziksel ağ %(physical_network)s kullanımda." msgid "" "Unable to create the network. No available network found in maximum allowed " "attempts." msgstr "" "Ağ oluşturulamıyor. İzin verilen azami deneme içinde kullanılabilir ağ " "bulunamadı." #, python-format msgid "Unable to determine mac address for %s" msgstr "%s içim mac adresi tanımlanamadı" #, python-format msgid "Unable to find '%s' in request body" msgstr "İstek gövdesinde '%s' bulunamadı" #, python-format msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s" msgstr "%(subnet_id)s alt ağında %(ip_address)s IP adresi bulunamıyor" #, python-format msgid "Unable to find resource name in %s" msgstr "%s içinde kaynak ismi bulunamadı" msgid "Unable to generate IP address by EUI64 for IPv4 prefix" msgstr "IPv4 öneki için EUI64 ile IP adresi üretilemedi" #, python-format msgid "Unable to generate unique DVR mac for host %(host)s." msgstr "%(host)s istemcisi için benzersiz DVR mac üretilemedi." #, python-format msgid "Unable to generate unique mac on network %(net_id)s." msgstr "%(net_id)s ağı üzerinde benzersiz mac üretilemedi." #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "" "%s den bir hedef alan tanımlanamadı. Eşleşme %%()s biçiminde " "olmalı" #, python-format msgid "Unable to update address scope %(address_scope_id)s : %(reason)s" msgstr "Adres kapsamı %(address_scope_id)s güncellenemiyor: %(reason)s" #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " "found" msgstr "Eşleşme:%(match)s doğrulanamadı çünkü üst kaynak: %(res)s bulunamadı" #, python-format msgid "Unexpected response code: %s" msgstr "Beklenmedik yanıt kodu: %s" #, python-format msgid "Unexpected response: %s" msgstr "Beklenmeyen yanıt: %s" msgid "Unknown API version specified" msgstr "Bilinmeyen API sürümü belirtildi" #, python-format msgid "Unknown address type %(address_type)s" msgstr "Bilinmeyen adres türü %(address_type)s" #, python-format msgid "Unknown attribute '%s'." msgstr "Bilinmeyen öznitelik '%s'." #, python-format msgid "Unknown chain: %r" msgstr "Tanınmayan zincir: %r" #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Bilinmeyen kota kaynakları %(unknown)s." msgid "Unmapped error" msgstr "Unmapped hata" msgid "Unrecognized action" msgstr "Tanınmayan eylem" #, python-format msgid "Unrecognized attribute(s) '%s'" msgstr "Tanınmayan öznitelik(ler) '%s'" msgid "Unsupported Content-Type" msgstr "Desteklenmeyen içerik türü" #, python-format msgid "Unsupported network type %(net_type)s." msgstr "Desteklenmeyen ağ türü %(net_type)s." msgid "Unsupported request type" msgstr "Desteklenmeyen istek türü" msgid "Updating default security group not allowed." msgstr "Varsayılan güvenlik grubunu güncellemeye izin verilmiyor." msgid "" "Use ML2 l2population mechanism driver to learn remote MAC and IPs and " "improve tunnel scalability." msgstr "" "Uzak MAC ve IP'leri öğrenmek ve tünel ölçeklenebilirliğini artırmak için ML2 " "l2population mekanizması sürücüsünü kullan." msgid "Use either --delta or relative revision, not both" msgstr "Ya --delta ya ilişkili sürüm kullanın, ikisini birden değil" msgid "User (uid or name) running metadata proxy after its initialization" msgstr "" "İlklendirilmesinden sonra metadata vekili çalıştıran kullanıcı (uid veya " "isim)" msgid "" "User (uid or name) running metadata proxy after its initialization (if " "empty: agent effective user)." msgstr "" "İlklendirilmesinden sonra metadata vekilini çalıştıran kullanıcı (uid veya " "isim) (boşsa: ajan etkin kullanıcı)." msgid "User (uid or name) running this process after its initialization" msgstr "" "İlklendirilmesinden sonra bu süreci çalıştıran kullanıcı (uid veya isim)" msgid "VRRP authentication password" msgstr "VRRP kimlik doğrulama parolası" msgid "VRRP authentication type" msgstr "VRRP kimlik doğrulama türü" #, python-format msgid "" "Validation of dictionary's keys failed. Expected keys: %(expected_keys)s " "Provided keys: %(provided_keys)s" msgstr "" "Sözlük anahtarlarının doğrulanması başarısız. Beklenen anahtarlar: " "%(expected_keys)s Sağlanan anahtarlar: %(provided_keys)s" #, python-format msgid "Validator '%s' does not exist." msgstr "Onaylayan '%s' mevcut değil." #, python-format msgid "Value %(value)s in mapping: '%(mapping)s' not unique" msgstr "'%(mapping)s' eşleştirmesindeki %(value)s değeri benzersiz değil" msgid "" "Watch file log. Log watch should be disabled when metadata_proxy_user/group " "has no read/write permissions on metadata proxy log file." msgstr "" "Dosya kaydını izle. Kayıt izleme metadata_proxy_user/group metadata vekil " "kayıt dosyasına okuyamıyorsa/yazamıyorsa kapatılmalıdır." msgid "" "Where to store Neutron state files. This directory must be writable by the " "agent." msgstr "" "Neutron durum dosyalarının nerede depolanacağı. Bu dizin ajan tarafından " "yazılabilir olmalıdır." msgid "" "With IPv6, the network used for the external gateway does not need to have " "an associated subnet, since the automatically assigned link-local address " "(LLA) can be used. However, an IPv6 gateway address is needed for use as the " "next-hop for the default route. If no IPv6 gateway address is configured " "here, (and only then) the neutron router will be configured to get its " "default route from router advertisements (RAs) from the upstream router; in " "which case the upstream router must also be configured to send these RAs. " "The ipv6_gateway, when configured, should be the LLA of the interface on the " "upstream router. If a next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated to the network and not " "through this parameter. " msgstr "" "IPv6 ile, harici geçit için kullanılan ağ ilişkili bir alt ağa sahip olmak " "zorunda değildir, çünkü otomatik olarak atanan bağlantı-yerel adres (LLA) " "kullanılabilir. Ancak bir IPv6 geçit adresine varsayılan rota için sonraki-" "nokta olarak kullanılmak üzere ihtiyaç vardır. Burada bir IPv6 geçit adresi " "yapılandırılmazsa, (ve yalnızca bu durumda) neutron yönlendirici, varsayılan " "rotasını üst seviye yönlendirici duyurularından (RA) alacak şekilde " "yapılandırılır; ki bu durumda üst seviye yönlendirici bu RA'ları gönderecek " "şekilde yapılandırılmalıdır. ipv6_gateway, yapılandırıldığında, üst seviye " "yönlendirici üzerindeki arayüzün LLA'sı olmalıdır. Eğer genel benzersiz " "adres (GUA) kullanan bir sonraki-nokta isteniyorsa, bu ağa ayrılmış bir alt " "ağ vasıtasıyla yapılmalıdır, bu parametre ile değil. " msgid "You must implement __call__" msgstr "__call__ fonksiyonunu uygulamalısınız." msgid "" "You must provide a config file for bridge - either --config-file or " "env[NEUTRON_TEST_CONFIG_FILE]" msgstr "" "Köprü için bir yapılandırma dosyası sağlamalısınız - ya --config-file ya da " "env[NEUTRON_TEST_CONFIG_FILE]" msgid "You must provide a revision or relative delta" msgstr "Bir sürüm ya da ilişkili fark sağlamalısınız" msgid "allocation_pools allowed only for specific subnet requests." msgstr "allocation_pools yalnızca belirli alt ağ istekleri için izinli." msgid "binding:profile value too large" msgstr "bağ:profil değeri çok büyük" msgid "cidr and prefixlen must not be supplied together" msgstr "cidr ve prefixlen birlikte verilmemelidir" #, python-format msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid." msgstr "dhcp_agents_per_network >= 1 olmalı. '%s' geçersiz." msgid "fixed_ip_address cannot be specified without a port_id" msgstr "fixed_ip_addres port_id olmadan belirtilemez" #, python-format msgid "has device owner %s" msgstr "%s aygıt sahibine sahip" msgid "in use" msgstr "kullanımda" #, python-format msgid "ip command failed on device %(dev_name)s: %(reason)s" msgstr "ip komutu %(dev_name)s aygıtı üzerinde başarısız: %(reason)s" #, python-format msgid "ip link capability %(capability)s is not supported" msgstr "ip bağlantı yeteneği %(capability)s desteklenmiyor" #, python-format msgid "ip link command is not supported: %(reason)s" msgstr "ip bağlantı komutu desteklenmiyor: %(reason)s" msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "subnetpool_id ve cidr olmadığında ip_version belirtilmelidir" msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "ip_version 4 olduğunda ipv6_address_mode geçerli değildir" msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "ip_version 4 olduğunda ipv6_ra_mode geçerli değildir" msgid "" "ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set to " "False." msgstr "" "enable_dhcp False olarak ayarlıyken ipv6_ra_mode veya ipv6_address_mode " "ayarlanamaz." #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " "'%(addr_mode)s' is not valid. If both attributes are set, they must be the " "same value" msgstr "" "ipv6_ra_mode kipi '%(ra_mode)s' olarak ipv6_address_mode '%(addr_mode)s' " "olarak ayarlanması geçersizdir. İki öznitelik de ayarlanıyorsa, aynı değerde " "olmalılar" msgid "mac address update" msgstr "mac adres güncellemesi" #, python-format msgid "" "max_l3_agents_per_router %(max_agents)s config parameter is not valid. It " "has to be greater than or equal to min_l3_agents_per_router %(min_agents)s." msgstr "" "max_l3_agents_per_router %(max_agents)s yapılandırma parametresi geçerli " "değil. min_l3_agents_per_router %(min_agents)s den büyük ya da ona eşit " "olmalı." msgid "network_type required" msgstr "network_type gerekli" #, python-format msgid "network_type value '%s' not supported" msgstr "network_type değeri '%s' desteklenmiyor" msgid "new subnet" msgstr "yeni alt ağ" #, python-format msgid "physical_network '%s' unknown for VLAN provider network" msgstr "physical_network '%s' VLAN sağlayıcı ağı için bilinmiyor" #, python-format msgid "physical_network '%s' unknown for flat provider network" msgstr "physical_network '%s' düz sağlayıcı ağı için bilinmiyor" msgid "physical_network required for flat provider network" msgstr "Düz sağlayıcı ağı için physical_network gerekir" #, python-format msgid "provider:physical_network specified for %s network" msgstr "sağlayıcı:physical_network %s ağı için belirtildi" msgid "record" msgstr "kayıt" msgid "respawn_interval must be >= 0 if provided." msgstr "eğer sağlanmışsa respawn_interval >= 0 olmalı." #, python-format msgid "segmentation_id out of range (%(min)s through %(max)s)" msgstr "segmentation_id aralık dışında (%(min)s %(max)s arasında)" msgid "segmentation_id requires physical_network for VLAN provider network" msgstr "" "segmentation_id VLAN sağlayıcı ağı için physical_network'e ihtiyaç duyar" msgid "the nexthop is not connected with router" msgstr "Sonraki nokta yönlendiriciyle bağlı değil" msgid "the nexthop is used by router" msgstr "sonraki nokta yönlendirici tarafından kullanılıyor" msgid "" "uuid provided from the command line so external_process can track us via /" "proc/cmdline interface." msgstr "" "external_process bizi /proc/cmdline arayüzünden takip edebilsin diye komut " "satırından sağlanan uuid." neutron-8.4.0/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-info.po0000664000567000056710000004111413044372760025677 0ustar jenkinsjenkins00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # ADİL REŞİT DURSUN , 2015 # Alper Çiftçi , 2015 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: neutron 8.0.1.dev68\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-04-18 20:06+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-08-21 01:06+0000\n" "Last-Translator: openstackjenkins \n" "Language: tr-TR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Turkish (Turkey)\n" #, python-format msgid "%(action)s failed (client error): %(exc)s" msgstr "%(action)s başarısız (istemci hatası): %(exc)s" #, python-format msgid "%(method)s %(url)s" msgstr "%(method)s %(url)s" #, python-format msgid "%(prog)s version %(version)s" msgstr "%(prog)s sürüm %(version)s" #, python-format msgid "%(type)s ID ranges: %(range)s" msgstr "%(type)s ID aralığı: %(range)s" #, python-format msgid "%(url)s returned a fault: %(exception)s" msgstr "%(url)s hata döndürdü: %(exception)s" #, python-format msgid "%(url)s returned with HTTP %(status)d" msgstr "%(url)s HTTP %(status)d ile geri döndü" #, python-format msgid "%d probe(s) deleted" msgstr "%d sonda silindi" #, python-format msgid "" "Added segment %(id)s of type %(network_type)s for network %(network_id)s" msgstr "%(network_id)s ağı için %(network_type)s türünde %(id)s dilimi eklendi" #, python-format msgid "Adding %s to list of bridges." msgstr "%s köprü listesine ekleniyor." #, python-format msgid "Adding network %(net)s to agent %(agent)s on host %(host)s" msgstr "Ağ %(net)s %(host)s istemcisi üzerinde %(agent)s ajanına ekleniyor" #, python-format msgid "Agent %s already present" msgstr "Ajan %s zaten mevcut" #, python-format msgid "Agent Gateway port does not exist, so create one: %s" msgstr "Ajan geçit bağlantı noktası mevcut değil, bir tane oluştur: %s" msgid "Agent caught SIGHUP, resetting." msgstr "Ajan SIGHUP yakaladı, sıfırlanıyor." msgid "Agent caught SIGTERM, quitting daemon loop." msgstr "Ajan SIGTERM yakaladı, artalan işlemi döngüsünden çıkılıyor." msgid "Agent initialized successfully, now running... " msgstr "Ajan başarıyla ilklendirildi, şimdi çalıştırılıyor... " msgid "Agent out of sync with plugin!" msgstr "Ajan ve eklenti uyumsuz!" msgid "" "Allow sorting is enabled because native pagination requires native sorting" msgstr "" "Sıralamaya izin verme etkin çünkü doğal sayfalama doğal sıralamaya ihtiyaç " "duyar" #, python-format msgid "Allowable flat physical_network names: %s" msgstr "İzin verilebilecek düz fiziksel ağ isimleri: %s" msgid "Arbitrary flat physical_network names allowed" msgstr "Rastgele seçilmiş düz fiziksel ağ isimlerine izin verilmez" #, python-format msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" msgstr "%(vlan_id)s net-id=%(net_uuid)s için yerel olarak atanıyor" #, python-format msgid "Attachment %s removed" msgstr "Eklenti %s kaldırıldı" #, python-format msgid "" "Attempt %(count)s to allocate a VRID in the network %(network)s for the " "router %(router)s" msgstr "" "%(network)s ağında %(router)s yönlendiricisi için VRID ayırmak için girişim " "%(count)s" #, python-format msgid "Attempt %(count)s to bind port %(port)s" msgstr "%(port)s bağlantı noktası bağlama için girişim %(count)s" #, python-format msgid "Attempted to remove port filter which is not filtered %r" msgstr "" "Filtrelenmiş %r olmayan bağlantı noktası filtresi kaldırılmaya çalışıldı" #, python-format msgid "Attempted to update port filter which is not filtered %s" msgstr "%s filtrelenmemiş bağlantı noktası filtresi güncellenmeye çalışıldı" #, python-format msgid "" "Binding info for port %s was not found, it might have been deleted already." msgstr "" "Bağlantı noktası %s için bağlama bilgisi bulunamadı, zaten silinmiş olabilir." #, python-format msgid "" "Cannot apply dhcp option %(opt)s because it's ip_version %(version)d is not " "in port's address IP versions" msgstr "" "Dhcp seçeneği %(opt)s uygulanamıyor çünkü ip_version %(version)d bağlantı " "noktasının adres IP sürümleri içinde değil" #, python-format msgid "Centralizing distributed router %s is not supported" msgstr "Dağıtık yönlendirici %s merkezileştirme desteklenmiyor" #, python-format msgid "Cleaning bridge: %s" msgstr "Köprü temizleniyor: %s" #, python-format msgid "Clearing orphaned ARP spoofing entries for devices %s" msgstr "Aygıtlar %s için sahipsiz ARP aldatma girdileri temizleniyor" msgid "" "ConfDriver is used as quota_driver because the loaded plugin does not " "support 'quotas' table." msgstr "" "Yüklenen eklenti 'quotas' tablosunu desteklemediğinden ConfDriver " "quota_driver olarak kullanılıyor." #, python-format msgid "Configured extension driver names: %s" msgstr "Yapılandırılan eklenti sürücü isimleri: %s" #, python-format msgid "Configured mechanism driver names: %s" msgstr "Yapılandırılan mekanizma sürücü isimleri: %s" #, python-format msgid "Configured type driver names: %s" msgstr "Tür sürücü isimleri yapılandırıldı: %s" msgid "DHCP agent started" msgstr "DHCP ajanı başlatıldı" #, python-format msgid "Default provider is not specified for service type %s" msgstr "%s servis türü için varsayılan sağlayıcı belirtilmemiş" #, python-format msgid "Deleting port: %s" msgstr "Bağlantı noktası siliniyor: %s" #, python-format msgid "Destroying IPset: %s" msgstr "IPset siliniyor: %s" #, python-format msgid "Destroying IPsets with prefix: %s" msgstr "Şu öneke sahip IPset'ler siliniyor: %s" #, python-format msgid "Device %s already exists" msgstr "Aygıt %s zaten mevcut" #, python-format msgid "Device %s not defined on plugin" msgstr "Aygıt %s eklentide tanımlanmamış" #, python-format msgid "Device with MAC %s not defined on plugin" msgstr "%s MAC'ine sahip aygıt eklentide tanımlanmadı" msgid "Disabled allowed-address-pairs extension." msgstr "allowed-address-pairs eklentisi kapatıldı." msgid "Disabled security-group extension." msgstr "Güvenlik grubu eklentisi kapatıldı." msgid "Disabled vlantransparent extension." msgstr "vlantransparent eklentisi kapalı." #, python-format msgid "Exclude Devices: %s" msgstr "Aygıtları Hariç Tut: %s" #, python-format msgid "" "Failed to schedule network %s, no eligible agents or it might be already " "scheduled by another server" msgstr "" "%s ağı zamanlanamadı, uygun ajan yok veya başka bir sunucu tarafından zaten " "zamanlanmış olabilir" #, python-format msgid "Found invalid IP address in pool: %(start)s - %(end)s:" msgstr "Havuzda geçersiz IP adresi bulundu: %(start)s - %(end)s:" #, python-format msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" msgstr "Kesişen aralıklar bulundu: %(l_range)s and %(r_range)s" #, python-format msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" msgstr "Alt ağ CIDR'den büyük havuz bulundu:%(start)s - %(end)s" #, python-format msgid "" "Found port (%(port_id)s, %(ip)s) having IP allocation on subnet %(subnet)s, " "cannot delete" msgstr "" "%(subnet)s alt ağında IP ayrılmış bağlantı noktası (%(port_id)s, %(ip)s) " "bulundu, silinemez" #, python-format msgid "Got %(alias)s extension from driver '%(drv)s'" msgstr "'%(drv)s' sürücüsünden %(alias)s eklentisi alındı" #, python-format msgid "HTTP exception thrown: %s" msgstr "HTTP istisnası fırlatıldı: %s" #, python-format msgid "" "Heartbeat received from %(type)s agent on host %(host)s, uuid %(uuid)s after " "%(delta)s" msgstr "" "%(host)s istemcisi, uuid %(uuid)s üstündeki %(type)s ajandan %(delta)s sonra " "kalp atışı alındı" msgid "IPset cleanup completed successfully" msgstr "IPset temizliği başarıyla tamamlandı" msgid "IPv6 is not enabled on this system." msgstr "IPv6 bu sistemde etkin değil." #, python-format msgid "Initializing driver for type '%s'" msgstr "'%s' türü için sürücü ilklendiriliyor" #, python-format msgid "Initializing extension driver '%s'" msgstr "Eklenti sürücüsü ilklendiriliyor '%s'" msgid "Initializing extension manager." msgstr "Genişletme yöneticisi başlatılıyor" #, python-format msgid "Initializing mechanism driver '%s'" msgstr "Mekanizma sürücüsü ilklendiriliyor '%s'" #, python-format msgid "Interface mappings: %s" msgstr "Arayüz eşleştirmeleri: %s" #, python-format msgid "L2 Agent operating in DVR Mode with MAC %s" msgstr "L2 Ajanı %s MAC'i ile DVR Kipinde çalışıyor" msgid "L3 agent started" msgstr "L3 ajanı başlatıldı" #, python-format msgid "Loaded extension driver names: %s" msgstr "Yüklenen eklenti sürücü isimleri: %s" #, python-format msgid "Loaded extension: %s" msgstr "Yüklenen bölüm: %s" #, python-format msgid "Loaded mechanism driver names: %s" msgstr "Yüklenen mekanizma sürücü isimleri: %s" #, python-format msgid "Loaded quota_driver: %s." msgstr "quota_driver yüklendi: %s." #, python-format msgid "Loaded type driver names: %s" msgstr "Tür sürücü isimleri yüklendi: %s" #, python-format msgid "Loading Metering driver %s" msgstr "Ölçme sürücüsü %s yükleniyor" #, python-format msgid "Loading Plugin: %s" msgstr "Eklenti Yükleniyor: %s" #, python-format msgid "Loading core plugin: %s" msgstr "Çekirdek eklenti yükleniyor: %s" #, python-format msgid "Loading interface driver %s" msgstr "Arayüz sürücüsü %s yükleniyor" msgid "Logging enabled!" msgstr "Günlükleme etkin!" msgid "ML2 FlatTypeDriver initialization complete" msgstr "ML2 FlatTypeDriver ilklendirmesi tamamlandı" msgid "ML2 LocalTypeDriver initialization complete" msgstr "ML2 LocalTypeDriver ilklendirmesi tamamlandı" #, python-format msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" msgstr "Fiziksel ağ %(physical_network)s %(bridge)s köprüsüne eşleştiriliyor" msgid "Modular L2 Plugin initialization complete" msgstr "Modüler L2 Eklentisi ilklendirme tamamlandı" #, python-format msgid "Network VLAN ranges: %s" msgstr "Ağ VLAN aralığı: %s" #, python-format msgid "Neutron service started, listening on %(host)s:%(port)s" msgstr "Neutron servisi başlatıldı, %(host)s:%(port)s üzerinde dinliyor" #, python-format msgid "No device with MAC %s defined on agent." msgstr "Ajanda %s MAC'ine sahip bir aygıt tanımlanmamış." msgid "No ports here to refresh firewall" msgstr "Burda güvenlik duvarını tazelemek için bağlantı noktası yok" #, python-format msgid "Nova event response: %s" msgstr "Nova olay yanıtı: %s" #, python-format msgid "" "Number of active agents lower than max_l3_agents_per_router. L3 agents " "available: %s" msgstr "" "Etkin ajan sayısı max_l3_agents_per_router'den küçük. Kullanılabilir L3 " "ajanları: %s" msgid "OVS cleanup completed successfully" msgstr "OVS temizliği başarıyla tamamlandı" #, python-format msgid "Physical Devices mappings: %s" msgstr "Fiziksel Aygıtların eşleştirmeleri: %s" #, python-format msgid "Port %(device)s updated. Details: %(details)s" msgstr "Bağlantı noktası %(device)s güncellendi. Detaylar: %(details)s" #, python-format msgid "Port %(port_id)s not present in bridge %(br_name)s" msgstr "Bağlantı noktası %(port_id)s %(br_name)s köprüsünde mevcut değil" #, python-format msgid "Port %s updated." msgstr "Bağlantı noktası %s güncellendi." #, python-format msgid "Port %s was deleted concurrently" msgstr "Bağlantı noktası %s eş zamanlı olarak silindi" #, python-format msgid "" "Port %s was not found on the integration bridge and will therefore not be " "processed" msgstr "" "Bağlantı noktası %s tümleştirme köprüsünde bulunamadı ve bu yüzden " "işlenmeyecek" #, python-format msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!" msgstr "" "'%(port_name)s' bağlantı noktası '%(vlan_tag)d' vlan etiketini kaybetti!" msgid "PortSecurityExtensionDriver initialization complete" msgstr "PortSecurityExtensionDriver ilklendirme tamamlandı" #, python-format msgid "Ports %s removed" msgstr "Portlar %s silindi" #, python-format msgid "Preparing filters for devices %s" msgstr "Aygıtlar için filtreler hazırlanıyor %s" #, python-format msgid "Process runs with uid/gid: %(uid)s/%(gid)s" msgstr "Süreç şu uid/gid ile çalışıyor: %(uid)s/%(gid)s" msgid "Provider rule updated" msgstr "Sağlayıcı kuralı güncellendi" #, python-format msgid "RPC agent_id: %s" msgstr "RPC agent_id: %s" msgid "RPC was already started in parent process by plugin." msgstr "RPC üst süreçte eklenti tarafından zaten başlatılmıştı." #, python-format msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" msgstr "vlan = %(vlan_id)s'in net-id = %(net_uuid)s'den iades isteniyor" msgid "Refresh firewall rules" msgstr "Güvenlik duvarı kurallarını tazele" #, python-format msgid "Registered extension drivers: %s" msgstr "Eklenti sürücüleri kaydedildi: %s" #, python-format msgid "Registered mechanism drivers: %s" msgstr "Kaydedilen mekanizma sürücüleri: %s" #, python-format msgid "Registered types: %s" msgstr "Kaydedilen türler: %s" #, python-format msgid "Remove device filter for %r" msgstr "%r için aygıt filtresini kaldır" #, python-format msgid "Removing iptables rule for IPset: %s" msgstr "IPset için iptables kuralı siliniyor: %s" #, python-format msgid "Router %(router_id)s transitioned to %(state)s" msgstr "Yönlendirici %(router_id)s %(state)s durumuna geçti" #, python-format msgid "" "Router %s is not managed by this agent. It was possibly deleted concurrently." msgstr "" "%s yönlendiricisi bu ajan tarafından yönetilmiyor. Muhtemelen eş zamanlı " "olarak silindi." #, python-format msgid "SNAT interface port list does not exist, so create one: %s" msgstr "" "SNAT arayüzü bağlantı noktası listesi mevcut değil, bir tane oluştur: %s" msgid "SRIOV NIC Agent RPC Daemon Started!" msgstr "SRIOV NIC Ajanı RPC Artalan İşlemleri Başlatıldı!" #, python-format msgid "Scheduling unhosted network %s" msgstr "Sunulmamış ağ %s zamanlanıyor" #, python-format msgid "Security group member updated %r" msgstr "Güvenlik grubu üyesi güncellendi %r" #, python-format msgid "Security group rule updated %r" msgstr "Güvenlik grubu kuralı güncellendi %r" #, python-format msgid "Service %s is supported by the core plugin" msgstr "Servis %s çekirdek eklenti tarafından destekleniyor" #, python-format msgid "" "Skipping ARP spoofing rules for port '%s' because it has port security " "disabled" msgstr "" "'%s' bağlantı noktası için ARP aldatma kuralları atlanıyor çünkü bağlanı " "noktası güvenliği kapalı" #, python-format msgid "" "Skipping method %s as firewall is disabled or configured as " "NoopFirewallDriver." msgstr "" "Güvenlik duvarı kapalı ya da NoopFirewallDriver olarak yapılandırıldığından " "%s metodu atlanıyor." msgid "" "Skipping period L3 agent status check because automatic router rescheduling " "is disabled." msgstr "" "Devre L3 ajan durum kontrolü atlanıyor çünkü otomatik yönlendirici yeniden " "zamanlama kapalı." msgid "" "Skipping periodic DHCP agent status check because automatic network " "rescheduling is disabled." msgstr "" "Aralıklı DHCP ajan durum kontrolü atlanıyor çünkü otomatik ağ yeniden " "zamanlama kapalı." #, python-format msgid "Skipping port %s as no IP is configure on it" msgstr "Bağlantı noktası %s atlanıyor çünkü üzerinde yapılandırılmış IP yok" msgid "Specified IP addresses do not match the subnet IP version" msgstr "Belirtilen IP adresleri alt ağ IP sürümüyle eşleşmiyor" #, python-format msgid "Subnet %s was deleted concurrently" msgstr "Alt ağ %s eş zamanlı olarak silindi" msgid "Synchronizing state" msgstr "Durum eşzamanlandırılıyor" msgid "Synchronizing state complete" msgstr "Durum eş zamanlandırma tamamlandı" #, python-format msgid "Tenant network_types: %s" msgstr "Kiracı network_types: %s" #, python-format msgid "" "Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " "%(subnet_id)s (CIDR: %(cidr)s)" msgstr "" "CIDR: %(new_cidr)s için doğrulama başarısız - %(subnet_id)s (CIDR: %(cidr)s) " "ile çakışıyor" msgid "VlanTypeDriver initialization complete" msgstr "VlanTypeDriver ilklendirme tamamlandı" #, python-format msgid "agent_updated by server side %s!" msgstr "ajan sunucu tarafında güncellendi %s!" #, python-format msgid "port_unbound(): net_uuid %s not in local_vlan_map" msgstr "port_unbound(): net_uuid %s local_vlan_map içinde değil" neutron-8.4.0/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-error.po0000664000567000056710000006373413044372760026111 0ustar jenkinsjenkins00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # ADİL REŞİT DURSUN , 2015 # Alper Çiftçi , 2015 # Zana iLHAN , 2015 # OpenStack Infra , 2015. #zanata # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron 8.1.3.dev113\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-08-13 08:46+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-08-20 03:49+0000\n" "Last-Translator: openstackjenkins \n" "Language: tr-TR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Turkish (Turkey)\n" #, python-format msgid "" "%(service)s for %(resource_type)s with uuid %(uuid)s not found. The process " "should not have died" msgstr "" "uuid %(uuid)s ile %(resource_type)s için %(service)s bulunamadı!, İşlem " "sonlanmamış olmalı." #, python-format msgid "%s Agent terminated!" msgstr "%s Ajanı sonlandırıldı!" #, python-format msgid "%s failed" msgstr "%s başarısız" #, python-format msgid "" "%s used in config as ipv6_gateway is not a valid IPv6 link-local address." msgstr "" "ipv6_gateway geçerli bir IPv6 link-local adresi olmadığından yapılandırmada " "%s kullanıldı." #, python-format msgid "" "'rpc_workers = %d' ignored because start_rpc_listeners is not implemented." msgstr "" "henüz start_rpc_listeners implemente edilmediği için 'rpc_workers = %d' göz " "ardı edildi." msgid "Agent Initialization Failed" msgstr "Ajan İlklendirme Başarısız" #, python-format msgid "An error occurred while communicating with async process [%s]." msgstr "[%s] asenkron işlem ile haberleşirken bir hata oluştu." #, python-format msgid "An error occurred while killing [%s]." msgstr "[%s] sonlandırılırken bir hata oluştu." #, python-format msgid "An exception occurred while creating the %(resource)s:%(item)s" msgstr "%(resource)s:%(item)s oluşturulurken bir istisna oluştu" msgid "An interface driver must be specified" msgstr "Bir arayüz sürücüsü belirtmeniz gerekmektedir" #, python-format msgid "Binding info for DVR port %s not found" msgstr "DVR bağlantı noktası %s için bağlama bilgisi bulunamadı" #, python-format msgid "" "Bridge %(bridge)s for physical network %(physical_network)s does not exist. " "Agent terminated!" msgstr "" "%(physical_network)s fiziksel ağı için %(bridge)s köprüsü mevcut değil. Ajan " "sonlandırıldı!" #, python-format msgid "" "Cannot provision %(network_type)s network for net-id=%(net_uuid)s - " "tunneling disabled" msgstr "" "net-id=%(net_uuid)s için %(network_type)s ağı hazırlanamıyor - tünelleme " "kapalı" #, python-format msgid "" "Cannot provision VLAN network for net-id=%(net_uuid)s - no bridge for " "physical_network %(physical_network)s" msgstr "" "net-id=%(net_uuid)s için VLAN ağı hazırlanamıyor - physical_network " "%(physical_network)s için köprü yok" #, python-format msgid "" "Cannot provision flat network for net-id=%(net_uuid)s - no bridge for " "physical_network %(physical_network)s" msgstr "" "net-id=%(net_uuid)s için düz ağ hazırlanamıyor - physical_network " "%(physical_network)s için köprü yok" #, python-format msgid "" "Cannot provision unknown network type %(network_type)s for net-id=" "%(net_uuid)s" msgstr "" "net-id=%(net_uuid)s için %(network_type)s bilinmeyen ağ türü hazırlanamıyor" #, python-format msgid "" "Cannot reclaim unknown network type %(network_type)s for net-id=%(net_uuid)s" msgstr "" "net-id=%(net_uuid)s için bilinmeyen ağ türü %(network_type)s iadesi " "istenemiyor" msgid "Cannot run ebtables. Please ensure that it is installed." msgstr "ebtables çalıştırılamadı. Lütfen kurulu olduğundan emin olun." #, python-format msgid "" "Centralized-SNAT port %(port)s on subnet %(port_subnet)s already seen on a " "different subnet %(orig_subnet)s" msgstr "" "%(port_subnet)s alt ağındaki merkezi-SNAT %(port)s bağlantı noktası başka " "bir alt ağda %(orig_subnet)s görüldü" msgid "" "Check for Open vSwitch ARP responder support failed. Please ensure that the " "version of openvswitch being used has ARP flows support." msgstr "" "Open vSwitch ARP yanıtlayıcısı desteği kontrolü başarısız. Lütfen kullanılan " "openvswitch sürümünün ARP akışı desteği olduğundan emin olun." msgid "" "Check for Open vSwitch VXLAN support failed. Please ensure that the version " "of openvswitch being used has VXLAN support." msgstr "" "Open vSwitch VXLAN desteği kontrolü başarısız. Lütfen kullanılan openvswitch " "sürümünün VXLAN desteği olduğundan emin olun." msgid "" "Check for Open vSwitch patch port support failed. Please ensure that the " "version of openvswitch being used has patch port support or disable features " "requiring patch ports (gre/vxlan, etc.)." msgstr "" "Open vSwitch yama bağlantı noktası desteği kontrolü başarısız. Lütfen " "kullanılan openvswitch sürümünün yama bağlantı noktası desteği olduğundan " "emin olun ya da yama bağlantı noktalarına ihtiyaç duyan özellikleri kapatın " "(gre/vxlan, vs.)." msgid "" "Check for Open vSwitch support of ARP header matching failed. ARP spoofing " "suppression will not work. A newer version of OVS is required." msgstr "" "Open vSwitch ARP başlığı eşleşme desteği kontrolü başarısız. ARP yanıltma " "önleme çalışmayacak. Daha yeni sürüm OVS gerekiyor." msgid "" "Check for VF management support failed. Please ensure that the version of ip " "link being used has VF support." msgstr "" "VF yönetim desteği kontrolü başarısız. Lütfen kullanılan ip bağlantısı " "sürümünün VF desteği olduğundan emin olun." msgid "" "Check for iproute2 VXLAN support failed. Please ensure that the iproute2 has " "VXLAN support." msgstr "" "Iproute2 VXLAN desteği kontrolü başarısız. iproute2'nin VXLAN desteği " "olduğundan emin olun." msgid "Check for native OVSDB support failed." msgstr "Doğal OVSDB desteği kontrolü başarısız." #, python-format msgid "Could not delete %(res)s %(id)s." msgstr "%(res)s %(id)s silinemedi." #, python-format msgid "Could not find %s to delete." msgstr "%s silmek için bulunamadı." #, python-format msgid "Could not retrieve gateway port for subnet %s" msgstr "Alt ağ %s için geçit bağlantı noktası alınamadı" #, python-format msgid "DVR: Duplicate DVR router interface detected for subnet %s" msgstr "DVR: %s alt ağı için çift DVR yönlendirici arayüzü algılandı" msgid "DVR: Failed updating arp entry" msgstr "DVR: arp kayıt güncelleme hatası" msgid "DVR: error adding redirection logic" msgstr "DVR: yönlendirme mantığı ekleme hatası" #, python-format msgid "Driver %(driver)s does not implement %(func)s" msgstr "Sürücü %(driver)s %(func)s yi uygulamıyor" #, python-format msgid "Driver %(driver)s:%(func)s runtime error" msgstr "Sürücü %(driver)s:%(func)s çalışma zamanı hatası" #, python-format msgid "Error during notification for %(callback)s %(resource)s, %(event)s" msgstr "%(callback)s %(resource)s için bilgilendirme sırasında hata, %(event)s" msgid "Error executing command" msgstr "Komut çalıştırırken hata" #, python-format msgid "Error in agent loop. Devices info: %s" msgstr "Ajan döngüsünde hata. Aygıt bilgisi: %s" #, python-format msgid "Error loading provider '%(provider)s' for service %(service_type)s" msgstr "" "%(service_type)s servisi için '%(provider)s' sağlayıcısını yüklemede hata" #, python-format msgid "Error response returned from nova: %s" msgstr "Nova'dan hata yanıtı döndü: %s" #, python-format msgid "Error unable to destroy namespace: %s" msgstr "Hata, isim uzayı: %s silinemedi" #, python-format msgid "Error while deleting router %s" msgstr "Yönlendirici %s silinirken hata" #, python-format msgid "Error while handling pidfile: %s" msgstr "%s pid dosyası işlenirken bir hata oluştu" msgid "Error while processing VIF ports" msgstr "VIF bağlantı noktaları işlenirken hata" #, python-format msgid "Error while writing HA state for %s" msgstr "%s için HA durumu yazılırken hata" #, python-format msgid "Error, unable to destroy IPset: %s" msgstr "Hata, IPset: %s silinemedi" #, python-format msgid "Error, unable to remove iptables rule for IPset: %s" msgstr "Hata, IPset: %s için iptables kuralı kaldırılamıyor" #, python-format msgid "" "Exceeded maximum binding levels attempting to bind port %(port)s on host " "%(host)s" msgstr "" "%(host)s istemcisi üzerinde %(port)s bağlantı noktasına bağlanma girişiminde " "azami bağlanma seviyesi aşıldı" #, python-format msgid "Exception auto-deleting port %s" msgstr "%s bağlanı noktasını otomatik silme sırasında istisna" #, python-format msgid "Exception auto-deleting subnet %s" msgstr "%s alt ağını otomatik silme sırasında istisna" #, python-format msgid "Exception deleting fixed_ip from port %s" msgstr "%s bağlantı noktasından fixed_ip silinirken istisna" msgid "Exception encountered during network rescheduling" msgstr "Ağ yeniden zamanlama sırasında istisna oluştu" msgid "Exception encountered during router rescheduling." msgstr "Yönlendirici yeniden zamanlama sırasında istisna oluştu." msgid "Exception occurs when timer stops" msgstr "Zamanlayıcı durmaya çalışırken hata oluşur." msgid "Exception occurs when waiting for timer" msgstr "Zamanlayıcıyı beklerken hata oluşur" msgid "Exiting agent as programmed in check_child_processes_actions" msgstr "" "check_child_processes_actions deki programlanan ajan/işlevden çıkılıyor " #, python-format msgid "" "Exiting agent because of a malfunction with the %(service)s process " "identified by uuid %(uuid)s" msgstr "" "%(uuid)s ile tanımlanan %(service)s işlemlerden bir uyumsuzluk hatasından " "dolayı çıkılıyor" #, python-format msgid "Extension driver '%(name)s' failed in %(method)s" msgstr "Eklenti sürücüsü '%(name)s' %(method)s içerisinde başarısız" #, python-format msgid "Extension path '%s' doesn't exist!" msgstr "'%s' Uzantı dizini bulunamıyor." #, python-format msgid "FWaaS RPC failure in %(func_name)s for fw: %(fwid)s" msgstr "fw: %(fwid)s için %(func_name)s içinde FWaaS RPC hatası" #, python-format msgid "FWaaS RPC info call failed for '%s'." msgstr "'%s' için FWaaS RPC bilgi çağrısı başarısız" #, python-format msgid "Failed creating vxlan interface for %(segmentation_id)s" msgstr "%(segmentation_id)s için vxlan arayüzü oluşturma başarısız" #, python-format msgid "Failed deleting egress connection state of floatingip %s" msgstr "" "%s floatingip bağlantısının çıkış sevye durumu silinmeye çalışılırken bir " "hata ile karşılaştı." #, python-format msgid "Failed deleting ingress connection state of floatingip %s" msgstr "" "%s floatingip bağlantısının giris sevye durumu silinmeye çalışılırken bir " "hata ile karşılaştı." msgid "Failed executing ip command" msgstr "IP comutu çalıştırılamadı" msgid "Failed fwaas process services sync" msgstr "fwaas süreç servisleri eş zamanlama başarısız" msgid "Failed on Agent configuration parse. Agent terminated!" msgstr "Ajan yapılandırma aşamasında başarısız olundu. Ajan sonlandırıldı!" msgid "Failed reporting state!" msgstr "Raporlama durumu sağlanamıyor." #, python-format msgid "" "Failed sending gratuitous ARP to %(addr)s on %(iface)s in namespace %(ns)s" msgstr "" "%(ns)s bilinirlik alanında bulunan %(iface)s deki %(addr)s ne gereksiz/ ARP " "gönderilemedi." msgid "Failed synchronizing routers" msgstr "Yönlendiricileri eş zamanlama başarısız" msgid "Failed synchronizing routers due to RPC error" msgstr "RPC hatasından dolayı yönlendirici senkronizasyonu sağlanamıyor" #, python-format msgid "Failed to commit binding results for %(port)s after %(max)s tries" msgstr "" "%(port)s için bağlama sonuçlarını gönderme %(max)s denemeden sonra başarısız " "oldu" msgid "" "Failed to create OVS patch port. Cannot have tunneling enabled on this " "agent, since this version of OVS does not support tunnels or patch ports. " "Agent terminated!" msgstr "" "OVS yama bağlantı noktası oluşturma başarısız. Bu ajanda tünelleme " "etkinleştirilemez, çünkü bu OVS sürümü tünelleri ya da yama bağlantı " "noktalarını desteklemiyor. Ajan sonlandırıldı!" #, python-format msgid "Failed to destroy stale namespace %s" msgstr "Vadesi geçmiş isim uzayı %s silinemedi" #, python-format msgid "Failed to fetch router information for '%s'" msgstr "%s icin yönlendirici bilgisine erisilemiyor" #, python-format msgid "Failed to get devices for %s" msgstr "%s için aygıtları alma başarısız" #, python-format msgid "Failed to get traffic counters, router: %s" msgstr "Trafik sayaçları alınamadı, yönlendirici: %s" #, python-format msgid "" "Failed to import required modules. Ensure that the python-openvswitch " "package is installed. Error: %s" msgstr "" "Gerekli modülleri içe aktarma başarısız. python-openvswitch paketinin kurulu " "olduğuna emin olun. Hata: %s" #, python-format msgid "Failed to notify nova on events: %s" msgstr "Nova şu olaylar üzerine bilgilendirilemiyor: %s" msgid "Failed to parse network_vlan_ranges. Service terminated!" msgstr "network_vlan_ranges ayrıştırma başarısız. Servis sonlandırıldı!" msgid "Failed to parse supported PCI vendor devices" msgstr "Desteklenen PCI satıcı aygıtları ayrıştırma başarısız" msgid "Failed to parse tunnel_id_ranges. Service terminated!" msgstr "tunnel_id_ranges ayrıştırma başarısız. Servis sonlandırıldı!" msgid "Failed to parse vni_ranges. Service terminated!" msgstr "vni_ranges ayrıştırma başarısız. Servis sonlandırıldı!" #, python-format msgid "Failed to process compatible router '%s'" msgstr "Uyumlu '%s' yönlendirici bilgisi işlenemiyor" #, python-format msgid "Failed to process or handle event for line %s" msgstr "%s satırı için olay ele alınamıyor ya da işlenemiyor" #, python-format msgid "Failed to release segment '%s' because network type is not supported." msgstr "'%s' dilimi bırakılamadı çünkü ağ türü desteklenmiyor." #, python-format msgid "Failed to reschedule router %s" msgstr "Yönlendirici %s yeniden zamanlama başarısız" #, python-format msgid "Failed to schedule network %s" msgstr "Ağ %s zamanlama başarısız" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "%(ip)s'ye %(type)s tünel bağlantı noktası kurulumu başarısız" #, python-format msgid "Failed trying to delete namespace: %s" msgstr "Bilinirlik alanı silme hatası: %s" #, python-format msgid "Failed unplugging interface '%s'" msgstr "%s arayuzu devre dışı bırakılamadı." #, python-format msgid "Firewall Driver Error for %(func_name)s for fw: %(fwid)s" msgstr "fw: %(fwid)s için %(func_name)s için Güvenlik Duvarı Hatası" #, python-format msgid "Firewall Driver Error on fw state %(fwmsg)s for fw: %(fwid)s" msgstr "" "fw: %(fwid)s için %(fwmsg)s fw durumunda Güvenlik Duvarı Sürücüsü Hatası" msgid "Fork failed" msgstr "Fork yapılırken hata ile karşılaşıldı." #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "IPTablesManager.apply aşağıdakı iptables bilgileri uygulanamadı\n" "%s" msgid "Interface monitor is not active" msgstr "Arayüz izleme etkin değil" msgid "Internal error" msgstr "İçsel hata" #, python-format msgid "InvalidContentType: %s" msgstr "UyumsuzİçerikTipi: %s" #, python-format msgid "MAC generation error after %s attempts" msgstr "%s denemeden sonra MAC üretme hatası" #, python-format msgid "MalformedRequestBody: %s" msgstr "BozukİstekGövdesi: %s" #, python-format msgid "Mechanism driver %s failed in bind_port" msgstr "Mekanizma sürücüsü %s bind_port başarısız" #, python-format msgid "Mechanism driver '%(name)s' failed in %(method)s" msgstr "Mekanizma sürücüsü '%(name)s' %(method)s içinde başarısız oldu" #, python-format msgid "" "Message received from the host: %(host)s during the registration of " "%(agent_name)s has a timestamp: %(agent_time)s. This differs from the " "current server timestamp: %(serv_time)s by %(diff)s seconds, which is more " "than the threshold agent downtime: %(threshold)s." msgstr "" "%(agent_name)s kaydı sırasında %(host)s istemcisinden alınan iletinin " "%(agent_time)s zaman damgası var. Bu mevcut sunucu zaman damgası: " "%(serv_time)s ile %(diff)s saniye farklı, ki bu %(threshold)s eşik ajan " "aksama süresinden fazla." msgid "Missing subnet/agent_gateway_port" msgstr "Eksik subnet/agent_gateway_port" #, python-format msgid "Multiple ports have port_id starting with %s" msgstr "Birden çok bağlantı noktası %s port_id ile başlıyor" #, python-format msgid "Network %s info call failed." msgstr " %s ağ bilgi çağırısı yapılamıyor." #, python-format msgid "" "No FloatingIP agent gateway port returned from server for 'network-id': %s" msgstr "" "Sunucudan 'network-id': %s için DeğişkenIP ajan geçit bağlantı noktası " "dönmedi" #, python-format msgid "No Host supplied to bind DVR Port %s" msgstr "%s DVR Bağlantı noktasına bağlanma için istemci sağlanmadı" msgid "No known API applications configured." msgstr "Hiçi bir tanımlı API uygulaması konfigüre edilmedi." #, python-format msgid "No local VLAN available for net-id=%s" msgstr "net-id=%s için uygun yerel VLAN yok" msgid "No plugin for L3 routing registered to handle router scheduling" msgstr "" "Yönlendirici zamanlamayı işlemesi için L3 yönlendirme için kaydedilmiş " "eklenti yok" #, python-format msgid "" "No plugin for L3 routing registered. Cannot notify agents with the message %s" msgstr "" "L3 yönlendirme için eklenti kaydedilmemiş. Ajanlar %s iletisiyle " "bilgilendirilemiyor" msgid "No tunnel_ip specified, cannot delete tunnels" msgstr "tunnel_ip belirtilmemiş, tüneller silinemiyor" msgid "No tunnel_type specified, cannot create tunnels" msgstr "tunnel_type belirtilmemiş, tünel oluşturulamıyor" msgid "No tunnel_type specified, cannot delete tunnels" msgstr "tunnel_type belirtilmemiş, tüneller silinemiyor" #, python-format msgid "No type driver for external network_type: %s. Service terminated!" msgstr "Harici network_type: %s için tür sürücüsü yok. Servis sonlandırıldı!" #, python-format msgid "No type driver for tenant network_type: %s. Service terminated!" msgstr "Kiracı network_type: %s için tür sürücüsü yok. Servis sonlandırıldı!" msgid "No valid Segmentation ID to perform UCAST test." msgstr "UCAST testi yapmak için geçerli Dilimlendirme ID'si yok." #, python-format msgid "Not enough candidates, a HA router needs at least %s agents" msgstr "Yeterli aday yok, bir HA yönlendirici en az %s ajana ihtiyaç duyar" msgid "" "Nova notifications are enabled, but novaclient is not installed. Either " "disable nova notifications or install python-novaclient." msgstr "" "Nova iletileri etkin, ama novaclient kurulu değil. Ya nova iletilerini " "kapatın ya da python-novaclient kurun." #, python-format msgid "OVS flows could not be applied on bridge %s" msgstr "OVS akışları %s köprüsüne uygulanamıyor." #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "bridge_mappins ayrıştırma başarısız: %s." #, python-format msgid "Parsing physical_interface_mappings failed: %s. Agent terminated!" msgstr "" "physical_interface_mappings ayrıştırma başarısız: %s. Ajan sonlandırıldı!" #, python-format msgid "Pidfile %s already exist. Daemon already running?" msgstr "%s Pid zaten mevcut. Servis zaten calisiyor?" #, python-format msgid "Policy check error while calling %s!" msgstr "%s cağrılırken politika doğrulama hatası oluştu!" #, python-format msgid "Removing incompatible router '%s'" msgstr "Uygunsuz '%s' yönlendirici bilgisi kaldırılıyor" msgid "RuntimeError in obtaining namespace list for namespace cleanup." msgstr "" "İsim uzayı temizliği için isim uzayı listesi elde edilirken RuntimeError." #, python-format msgid "Serialized profile DB value '%(value)s' for port %(port)s is invalid" msgstr "" "%(port)s bağlantı noktası için serileştirilmiş profil DB değeri '%(value)s' " "geçersiz" #, python-format msgid "" "Serialized vif_details DB value '%(value)s' for port %(port)s is invalid" msgstr "" "%(port)s bağlantı noktası için serileştirilmiş vif_details DB değeri " "'%(value)s' geçersiz" #, python-format msgid "The external network bridge '%s' does not exist" msgstr "%s harici ağ geçidi mevcut degil" #, python-format msgid "" "The installed version of dnsmasq is too old. Please update to at least " "version %s." msgstr "Yüklü dnsmasq sürümü çok eski. Lütfen en az %s sürümüne güncelleyin." msgid "The resource could not be found." msgstr "Kaynak bulunamadı." msgid "" "The user that is executing neutron does not have permissions to read the " "namespaces. Enable the use_helper_for_ns_read configuration option." msgstr "" "Neutron'u çalıştıran kullanıcının isim uzaylarını okuma yetkisi yok. " "use_helper_for_ns_read yapılandırma seçeneğini etkinleştirin." #, python-format msgid "" "Type driver '%(new_driver)s' ignored because type driver '%(old_driver)s' is " "already registered for type '%(type)s'" msgstr "" "Tür sürücüsü '%(new_driver)s' atlandı çünkü tür sürücüsü '%(old_driver)s' " "'%(type)s' türü için zaten kaydedilmiş" #, python-format msgid "Unable to %(action)s dhcp for %(net_id)s." msgstr "%(net_id)s için %(action)s dhcp de yapılamıyor. " #, python-format msgid "Unable to add %(interface)s to %(bridge_name)s! Exception: %(e)s" msgstr "%(interface)s %(bridge_name)s e eklenemedi. İstisna: %(e)s" #, python-format msgid "Unable to add vxlan interface for network %s" msgstr "%s ağı için vxlan arayüzü eklenemedi" #, python-format msgid "Unable to convert value in %s" msgstr "%s degeri dönüştürülemiyor" #, python-format msgid "Unable to execute %(cmd)s. Exception: %(exception)s" msgstr "%(cmd)s çalıştırılamadı. Hata: %(exception)s" #, python-format msgid "Unable to find agent %s." msgstr "%s ajanı bulunamıyor." #, python-format msgid "Unable to generate mac address after %s attempts" msgstr "%s denemeden sonra mac adresi üretilemedi" #, python-format msgid "Unable to listen on %(host)s:%(port)s" msgstr "%(host)s:%(port)s dinlenemiyor" msgid "Unable to obtain MAC address for unique ID. Agent terminated!" msgstr "Benzersiz ID için MAC adresi elde edilemedi. Ajan sonlandırıldı!" #, python-format msgid "Unable to parse route \"%s\"" msgstr "\"%s\" rotası ayrıştırılamadı" #, python-format msgid "Unable to process HA router %s without HA port" msgstr "HA bağlantısı olmadan HA yönlendiricisi %s işlenemiyor" #, python-format msgid "Unable to sync network state on deleted network %s" msgstr "Silinmiş %s ağları için senkronizasyon sağlanamıyor" msgid "Unable to sync network state." msgstr "Ağ durumu senkronize edilemiyor." #, python-format msgid "Unable to undo add for %(resource)s %(id)s" msgstr "%(resource)s %(id)s için ekleme geri alınamıyor" msgid "Unexpected error." msgstr "Beklenmeyen hata." #, python-format msgid "" "Unexpected exception occurred while removing network %(net)s from agent " "%(agent)s" msgstr "" "%(net)s ağı %(agent)s ajanından kaldırılırken beklenmedik istisna oluştu" #, python-format msgid "Unexpected exception while checking supported feature via command: %s" msgstr "" "Şu komutla desteklenen özellik kontrolü yapılırken beklenmedik istisna: %s" msgid "Unexpected exception while checking supported ip link command" msgstr "Desteklenen ip bağlantısı komutu kontrol edilirken beklenmedik istisna" #, python-format msgid "Unknown network_type %(network_type)s for network %(network_id)s." msgstr "%(network_id)s ağı için bilinmeyen network_type %(network_type)s." msgid "Unrecoverable error: please check log for details." msgstr "Düzeltilemeyen hata: Lütfen detaylar için loglara bakınız." #, python-format msgid "" "Will not send event %(method)s for network %(net_id)s: no agent available. " "Payload: %(payload)s" msgstr "" "%(net_id)s ağı için %(method)s oalyı gönderilmeyecek: uygun ajan yok. " "Fayadalı yük: %(payload)s" #, python-format msgid "_bind_port_if_needed failed, deleting port '%s'" msgstr "_bind_port_if_needed başarısız, '%s' bağlantı noktası siliniyor" #, python-format msgid "_bind_port_if_needed failed. Deleting all ports from create bulk '%s'" msgstr "" "_bind_port_if_needed başarısız. '%s' toplu oluşturmasından tüm bağlantı " "noktaları siliniyor" #, python-format msgid "" "mechanism_manager.create_%(res)s_postcommit failed for %(res)s: " "'%(failed_id)s'. Deleting %(res)ss %(resource_ids)s" msgstr "" "mechanism_manager.create_%(res)s_postcommit %(res)s: '%(failed_id)s' için " "başarısız. %(res)ss %(resource_ids)s siliniyor" #, python-format msgid "" "mechanism_manager.create_network_postcommit failed, deleting network '%s'" msgstr "" "mechanism_manager.create_network_postcommit başarısız, '%s' ağı siliniyor" #, python-format msgid "mechanism_manager.create_port_postcommit failed, deleting port '%s'" msgstr "" "mechanism_manager.create_port_postcommit başarısız, '%s' bağlantı noktası " "siliniyor" #, python-format msgid "mechanism_manager.create_subnet_postcommit failed, deleting subnet '%s'" msgstr "" "mechanism_manager.create_subnet_postcommit başarısız, alt ağ '%s' siliniyor" msgid "mechanism_manager.delete_network_postcommit failed" msgstr "mechanism_manager.delete_network_postcommit başarısız" #, python-format msgid "mechanism_manager.delete_port_postcommit failed for port %s" msgstr "" "mechanism_manager.delete_port_postcommit %s bağlantı noktası için başarısız" msgid "mechanism_manager.delete_subnet_postcommit failed" msgstr "mechanism_manager.delete_subnet_postcommit başarısız" #, python-format msgid "tunnel_type %s not supported by agent" msgstr "tunnel_type %s ajan tarafından desteklenmiyor" neutron-8.4.0/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-warning.po0000664000567000056710000003373513044372760026423 0ustar jenkinsjenkins00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # ADİL REŞİT DURSUN , 2015 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: neutron 8.1.3.dev11\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-06-22 18:13+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-08-21 01:06+0000\n" "Last-Translator: openstackjenkins \n" "Language: tr-TR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Turkish (Turkey)\n" #, python-format msgid "%(agent_type)s agent %(agent_id)s is not active" msgstr "%(agent_type)s ajanı %(agent_id)s etkin değil" #, python-format msgid "" "%(port_num)d router ports found on the metadata access network. Only the " "port %(port_id)s, for router %(router_id)s will be considered" msgstr "" "Metadata erişim ağında %(port_num)d yönlendirici bağlantı noktası bulundu. " "Yalnızca %(port_id)s bağlantı noktası, %(router_id)s yönlendiricisi için " "değerlendirilecek" #, python-format msgid "%(type)s tunnel %(id)s not found" msgstr "%(type)s tünel %(id)s bulunamadı" msgid "A concurrent port creation has occurred" msgstr "Eş zamanlı bağlantı noktası oluşturma meydana geldi" #, python-format msgid "" "Action %(action)s for network %(net_id)s could not complete successfully: " "%(reason)s" msgstr "" "%(net_id)s ağı için %(action)s eylemi başarıyla tamamlanamadı: %(reason)s" #, python-format msgid "Action %s not supported" msgstr "%s eylemi desteklenmiyor" #, python-format msgid "Attempted to get traffic counters of chain %s which does not exist" msgstr "%s zincirinin mevcut olmayan trafik sayaçları alınmaya çalışıldı" #, python-format msgid "Attempting to bind with dead agent: %s" msgstr "Ölü ajanla bağlama deneniyor: %s" #, python-format msgid "Cannot find vf index for pci slot %s" msgstr "%s pci yuvası için vf indisi bulunamıyor" #, python-format msgid "Cannot find vfs %(vfs)s in device %(dev_name)s" msgstr "%(dev_name)s aygıtında vfs %(vfs)s bulunamıyor" #, python-format msgid "Could not expand segment %s" msgstr "Dilim %s genişletilemedi" #, python-format msgid "DHCP agent %s is not active" msgstr "DHCP ajanı %s etkin değil" msgid "DVR functionality requires a server upgrade." msgstr "DVR işlevselliği sunucu yükseltmesi gerektiriyor." #, python-format msgid "" "Device %(device)s requested by agent %(agent_id)s on network %(network_id)s " "not bound, vif_type: %(vif_type)s" msgstr "" "%(network_id)s ağı üstündeki %(agent_id)s ajanı tarafından istenen " "%(device)s aygıtı bağlı değil, vif_type: %(vif_type)s" #, python-format msgid "Did not find expected name \"%(ext_name)s\" in %(file)s" msgstr "%(file)s içinde beklenen isim \"%(ext_name)s\" bulunamadı" msgid "Driver configuration doesn't match with enable_security_group" msgstr "Sürücü yapılandırması enable_security_group ile eşleşmiyor" #, python-format msgid "Endpoint with ip %s already exists" msgstr "%s ip'sine sahip son uç zaten mevcut" #, python-format msgid "Extension %s not supported by any of loaded plugins" msgstr "Eklenti %s yüklenen hiçbir eklenti tarafından desteklenmiyor" #, python-format msgid "Extension file %(f)s wasn't loaded due to %(exception)s" msgstr "Eklenti dosyası %(f)s %(exception)s sebebiyle yüklenmedi" #, python-format msgid "Failed to delete namespace %s" msgstr "%s isim uzayı silme başarısız" #, python-format msgid "Failed trying to delete interface: %s" msgstr "Arayüzü silme denemesi başarısız: %s" #, python-format msgid "Failed trying to delete namespace: %s" msgstr "Bilinirlik alanı silme hatası: %s" #, python-format msgid "Found failed openvswitch port: %s" msgstr "Başarısız olmuş openvswitch bağlantı noktası bulundu: %s" #, python-format msgid "Found not yet ready openvswitch port: %s" msgstr "Henüz hazır olmayan openvswitch bağlantı noktası bulundu: %s" #, python-format msgid "Info for router %s was not found. Performing router cleanup" msgstr "" "%s yönlendiricisi için bilgi bulunamadı. Yönlendirici temizliği " "gerçekleştiriliyor" msgid "Invalid Interface ID, will lead to incorrect tap device name" msgstr "Geçersiz arayüz kimliği, geçersiz tap aygıt ismine yol açacak" msgid "Invalid Network ID, will lead to incorrect bridge name" msgstr "Geçersiz Ağ ID'si, geçersiz köprü ismine yol açacak" #, python-format msgid "Invalid Segmentation ID: %s, will lead to incorrect vxlan device name" msgstr "" "Geçersiz Dilimlendirme kimliği: %s, geçersiz vxlan aygıt ismine sebep olacak" msgid "Invalid VLAN ID, will lead to incorrect subinterface name" msgstr "Geçersiz VLAN ID'si, geçersiz alt arayüz ismine yol açacak" #, python-format msgid "Invalid remote IP: %s" msgstr "Geçersiz uzak IP: %s" #, python-format msgid "" "Invalid value for pagination_max_limit: %s. It should be an integer greater " "to 0" msgstr "" "pagination_max_limit: %s için geçersiz değer. 0'dan büyük bir tam sayı olmalı" #, python-format msgid "" "L2 agent could not get DVR MAC address from server. Retrying. Detailed " "message: %s" msgstr "" "L2 ajanı sunucudan DVR MAC adresini alamadı. Tekrar deneniyor. Detaylı " "ileti: %s" #, python-format msgid "Loaded plugins do not implement extension %s interface" msgstr "Yüklü eklentiler eklenti %s arayüzünü uygulamıyor" #, python-format msgid "" "Network %s may have been deleted and its resources may have already been " "disposed." msgstr "Ağ %s silinmiş ve kaynakları ortadan kaldırılmış olabilir." msgid "" "Neutron server does not support state report. State report for this agent " "will be disabled." msgstr "" "Neutron sunucusu durum raporu desteklemiyor. Bu ajan için durum raporu " "kapatılacak." msgid "No DHCP agents available, skipping rescheduling" msgstr "Uygun DHCP ajanı yok, yeniden zamanlama atlanıyor" #, python-format msgid "No L3 agents can host the router %s" msgstr "Hiçbir L3 ajanı %s yönlendiricisini sunamaz" msgid "No active L3 agents" msgstr "Etkin L3 ajanı yok" #, python-format msgid "No flat network found on physical network %s" msgstr "Fiziksel ağ %s üzerinde düz ağ bulunamadı" msgid "No more DHCP agents" msgstr "Daha fazla DHCP ajanı yok" #, python-format msgid "No routers compatible with L3 agent configuration on host %s" msgstr "" "Hiçbir yönlendirici %s istemcisi üzerindeki L3 ajanı yapılandırmasıyla " "uyumlu değil" #, python-format msgid "No valid gateway port on subnet %s is found for IPv6 RA" msgstr "" "IPv6 RA için %s alt ağı üzerinde geçerli ağ geçidi bağlantı noktası " "bulunamadı" #, python-format msgid "No vlan_id %(vlan_id)s found on physical network %(physical_network)s" msgstr "%(physical_network)s fiziksel ağında vlan_id %(vlan_id)s bulunamadı" #, python-format msgid "Nova event: %s returned with failed status" msgstr "Nova olayı: %s başarısız durum döndürdü" msgid "" "OVS is dead. OVSNeutronAgent will keep running and checking OVS status " "periodically." msgstr "" "OVS ölü. OVSNeutronAgent çalışmaya devam edip OVS durumunu aralıklarla " "kontrol edecek." msgid "OVS is restarted. OVSNeutronAgent will reset bridges and recover ports." msgstr "" "OVS yeniden başlatıldı. OVSNeutronAgent köprüleri sıfırlayacak ve bağlantı " "noktalarını kurtaracak." #, python-format msgid "" "Only %(active)d of %(total)d DHCP agents associated with network " "'%(net_id)s' are marked as active, so notifications may be sent to inactive " "agents." msgstr "" "'%(net_id)s' ağıyla ilişkilendirilmiş %(total)d DHCP ajanından yalnızca " "%(active)d kadarı etkin olarak işaretlenmiş, yani iletiler etkin olmayan " "ajanlara gönderilebilir." #, python-format msgid "" "Option \"%(option)s\" must be supported by command \"%(command)s\" to enable " "%(mode)s mode" msgstr "" "\"%(option)s\" seçeneği %(mode)s kipini etkinleştirmek için \"%(command)s\" " "komutuyla desteklenmeli" #, python-format msgid "Port %s not found during update" msgstr "%s bağlantı noktası güncelleme sırasında bulunamadı" msgid "Port ID not set! Nova will not be notified of port status change." msgstr "" "Bağlantı noktası kimliği ayarlanmamış! Nova bağlantı noktası durumu " "değişikliğinde bilgilendirilmeyecek." #, python-format msgid "" "Removing network %(network)s from agent %(agent)s because the agent did not " "report to the server in the last %(dead_time)s seconds." msgstr "" "%(network)s ağı %(agent)s ajanından çıkarılıyor çünkü ajan sunucuya son " "%(dead_time)s saniye rapor vermedi." #, python-format msgid "" "Rescheduling router %(router)s from agent %(agent)s because the agent did " "not report to the server in the last %(dead_time)s seconds." msgstr "" "Yönlendirici %(router)s %(agent)s ajanından yeniden zamanlanıyor çünkü ajan " "sunucuya son %(dead_time)s saniye rapor vermedi." msgid "" "Security group agent binding currently not set. This should be set by the " "end of the init process." msgstr "" "Güvenlik grubu ajan bağlama şu an ayarlanmış değil. Bu init sürecinin " "sonunda ayarlanmış olmalı." #, python-format msgid "" "The configured driver %(driver)s has been moved, automatically using " "%(new_driver)s instead. Please update your config files, as this automatic " "fixup will be removed in a future release." msgstr "" "Yapılandırılan sürücü %(driver)s taşınnmış, yerine otomatik olarak " "%(new_driver)s kullanılıyor. Lütfen yapılandırma dosyalarınızı güncelleyin, " "çünkü bu otomatik düzeltme ileri sürümlerde kaldırılacak." msgid "" "The remote metadata server responded with Forbidden. This response usually " "occurs when shared secrets do not match." msgstr "" "Uzak metadata sunucu Yasaklı yanıtı döndü. Bu yanıt genellikle paylaşılan " "gizler eşleşmediğinde oluşur." msgid "" "The user that is executing neutron can read the namespaces without using the " "root_helper. Disable the use_helper_for_ns_read option to avoid a " "performance impact." msgstr "" "Neutron'u çalıştıran kullanıcı root_helper kullanmadan isim uzaylarını " "okuyabilir. Performansı etkilememesi için use_helper_for_ns_read seçeneğini " "kapatın." #, python-format msgid "" "Time since last %s agent reschedule check has exceeded the interval between " "checks. Waiting before check to allow agents to send a heartbeat in case " "there was a clock adjustment." msgstr "" "Son %s ajan yeniden zamanlama kontrolünden sonra geçen zaman kontroller " "arası zaman aralığını aştı. Bir saat ayarlama yapılmış olması durumunu " "hesaba katmak için ajanların kalp atışı gönderebilmesi için kontrolden önce " "bekleniyor." #, python-format msgid "" "Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r %(top)r" msgstr "" "Mevcut olmayan kural silinmeye çalışıldı: %(chain)r %(rule)r %(wrap)r %(top)r" msgid "Tunnel synchronization requires a server upgrade." msgstr "Tünel eş zamanlama sunucu yükseltmesi gerektiriyor." #, python-format msgid "" "Unable to %(action)s dhcp for %(net_id)s: there is a conflict with its " "current state; please check that the network and/or its subnet(s) still " "exist." msgstr "" "%(net_id)s için %(action)s dhcp yapılamadı: mevcut durumuyla ilgili bir " "çatışma var; lütfen ağ ve/veya alt ağ(lar)ının hala mevcut olduğunu kontrol " "edin." #, python-format msgid "Unable to configure IP address for floating IP: %s" msgstr "Değişken IP için IP adresi yapılandırılamıyor: %s" #, python-format msgid "Unable to find data type descriptor for attribute %s" msgstr "%s özniteliği için veri türü tanımlayıcısı bulunamadı" #, python-format msgid "" "Unable to schedule network %s: no agents available; will retry on subsequent " "port and subnet creation events." msgstr "" "Ağ %s zamanlanamadı: hiçbir ajan uygun değil; sonraki bağlantı noktası " "üzerinden ve alt ağ oluşturma olayları tekrar denenecek." #, python-format msgid "Updating lease expiration is now deprecated. Issued from host %s." msgstr "" "Kira sona erme tarihlerini güncelleme artık kullanılmıyor. %s istemcisinden " "yayınlandı." #, python-format msgid "" "VIF port: %s has no ofport configured, and might not be able to transmit" msgstr "" "VIF bağlantı noktası: %s'in yapılandırılmış bir ofport'u yok, aktarım " "yapamayabilir" #, python-format msgid "device pci mismatch: %(device_mac)s - %(pci_slot)s" msgstr "aygıt pci uyuşmazlığı: %(device_mac)s - %(pci_slot)s" #, python-format msgid "failed to parse vf link show line %(line)s: for %(device)s" msgstr "" "vf bağlantısı gösteri satırı %(line)s: %(device)s için ayrıştırma başarısız" #, python-format msgid "" "l3-agent cannot check service plugins enabled at the neutron server when " "startup due to RPC error. It happens when the server does not support this " "RPC API. If the error is UnsupportedVersion you can ignore this warning. " "Detail message: %s" msgstr "" "RPC hatası sebebiyle l3-agent açılışta neutron sunucusundaki neutron servis " "eklentilerinin etkinliğini kontrol edemiyor. Bu durum sunucu bu RPC API'sini " "desteklemediğinde olabilir. Hata UnsupportedVersion ise bu uyarıyı göz ardı " "edebilirsiniz. Detaylı ileti: %s" #, python-format msgid "ofport: %(ofport)s for VIF: %(vif)s is not a positive integer" msgstr "VIF: %(vif)s için ofport: %(ofport)s pozitif tam sayı değil" msgid "" "security_group_info_for_devices rpc call not supported by the server, " "falling back to old security_group_rules_for_devices which scales worse." msgstr "" "security_group_info_for_devices rpc çağrısı sunucu tarafından " "desteklenmiyor, daha kötü ölçeklenen eski security_group_rules_for_devices'e " "dönülüyor." #, python-format msgid "unable to modify mac_address of ACTIVE port %s" msgstr "%s ETKİN bağlantı noktasının mac_address'i değiştirilemiyor" neutron-8.4.0/neutron/locale/es/0000775000567000056710000000000013044373210017713 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/es/LC_MESSAGES/0000775000567000056710000000000013044373210021500 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/es/LC_MESSAGES/neutron.po0000664000567000056710000051374113044372760023556 0ustar jenkinsjenkins00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # Victoria Martínez de la Cruz , 2013 # Eugènia Torrella , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron 8.2.1.dev52\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-09-01 18:10+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-05-02 08:56+0000\n" "Last-Translator: Eugènia Torrella \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Spanish\n" #, python-format msgid "" "\n" "Command: %(cmd)s\n" "Exit code: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" msgstr "" "\n" "Mandato: %(cmd)s\n" "Código de salida: %(code)s\n" "Stdin: %(stdin)s\n" "Salida estándar: %(stdout)s\n" "Error estándar: %(stderr)s" #, python-format msgid "" "%(branch)s HEAD file does not match migration timeline head, expected: " "%(head)s" msgstr "" "El archivo HEAD de %(branch)s no coincide con el head de la línea de tiempo " "de migración, se esperaba: %(head)s" #, python-format msgid "%(driver)s: Internal driver error." msgstr "%(driver)s: Error de controlador interno." #, python-format msgid "%(id)s is not a valid %(type)s identifier" msgstr "%(id)s no es un identificador %(type)s válido" #, python-format msgid "" "%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' " "and '%(desc)s'" msgstr "" "%(invalid_dirs)s no es un valor válido para sort_dirs, los valores válidos " "son '%(asc)s' y '%(desc)s'" #, python-format msgid "%(key)s prohibited for %(tunnel)s provider network" msgstr "%(key)s prohibido para red de proveedor %(tunnel)s" #, python-format msgid "" "%(method)s called with network settings %(current)s (original settings " "%(original)s) and network segments %(segments)s" msgstr "" "%(method)s llamado con configuraciones de red %(current)s (valores " "originales %(original)s) y segmentos de red %(segments)s" #, python-format msgid "" "%(method)s called with port settings %(current)s (original settings " "%(original)s) host %(host)s (original host %(original_host)s) vif type " "%(vif_type)s (original vif type %(original_vif_type)s) vif details " "%(vif_details)s (original vif details %(original_vif_details)s) binding " "levels %(levels)s (original binding levels %(original_levels)s) on network " "%(network)s with segments to bind %(segments_to_bind)s" msgstr "" "%(method)s llamado con ajustes de puerto %(current)s (ajustes originales " "%(original)s) host %(host)s (host original %(original_host)s) tipo de VIF " "%(vif_type)s (tipo de VIF original %(original_vif_type)s) detalles de VIF " "%(vif_details)s (detalles de VIF originales %(original_vif_details)s) " "niveles de enlace %(levels)s (niveles de enlace originales " "%(original_levels)s) en la red %(network)s con segmentos a enlazar " "%(segments_to_bind)s" #, python-format msgid "" "%(method)s called with subnet settings %(current)s (original settings " "%(original)s)" msgstr "" "%(method)s llamado con ajustes de subred %(current)s (ajustes originales " "%(original)s)" #, python-format msgid "%(method)s failed." msgstr "%(method)s ha fallado." #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "%(name)s '%(addr)s' no coincide con la versión de IP '%(ip_version)s'" #, python-format msgid "%(param)s must be in %(range)s range." msgstr "%(param)s debe estar dentro del rango %(range)s." #, python-format msgid "%s cannot be called while in offline mode" msgstr "No se puede invocar a %s en la modalidad fuera de línea" #, python-format msgid "%s is invalid attribute for sort_key" msgstr "%s es un atributo no válido para sort_key" #, python-format msgid "%s is invalid attribute for sort_keys" msgstr "%s no es un atributo válido para sort_keys" #, python-format msgid "%s is not a valid VLAN tag" msgstr "%s no es una etiqueta VLAN válida" #, python-format msgid "%s must be specified" msgstr "Se debe especificar %s" #, python-format msgid "%s must implement get_port_from_device or get_ports_from_devices." msgstr "%s debe implementar get_port_from_device o get_ports_from_devices." #, python-format msgid "%s prohibited for VLAN provider network" msgstr "%s prohibido para la red de proveedor VLAN" #, python-format msgid "%s prohibited for flat provider network" msgstr "%s prohibido para la red de proveedor simple" #, python-format msgid "%s prohibited for local provider network" msgstr "%s prohibido para la red de proveedor local" #, python-format msgid "" "'%(data)s' contains '%(length)s' characters. Adding a domain name will cause " "it to exceed the maximum length of a FQDN of '%(max_len)s'" msgstr "" "'%(data)s' contiene '%(length)s' caracteres. Si se añade un nombre de " "dominio se superará la longitud máxima de un FQDN, que es de '%(max_len)s'" #, python-format msgid "" "'%(data)s' contains '%(length)s' characters. Adding a sub-domain will cause " "it to exceed the maximum length of a FQDN of '%(max_len)s'" msgstr "" "'%(data)s' contiene '%(length)s' caracteres. Si se añade un subdominio se " "superará la longitud máxima de un FQDN, que es de '%(max_len)s'" #, python-format msgid "'%(data)s' exceeds maximum length of %(max_len)s" msgstr "'%(data)s' supera la longitud máxima de %(max_len)s" #, python-format msgid "'%(data)s' is not an accepted IP address, '%(ip)s' is recommended" msgstr "'%(data)s' no es una dirección IP aceptada, se recomienda '%(ip)s'" #, python-format msgid "'%(data)s' is not in %(valid_values)s" msgstr "'%(data)s' no está en %(valid_values)s" #, python-format msgid "'%(data)s' is too large - must be no larger than '%(limit)d'" msgstr "'%(data)s' es muy grande, no debe ser más grande que '%(limit)d'" #, python-format msgid "'%(data)s' is too small - must be at least '%(limit)d'" msgstr "'%(data)s' es muy pequeño, debe ser al menos '%(limit)d'" #, python-format msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended" msgstr "" "'%(data)s' no es un cidr de subred IP reconocido, se recomienda '%(cidr)s'" #, python-format msgid "'%(data)s' not a valid PQDN or FQDN. Reason: %(reason)s" msgstr "'%(data)s' no es un PQDN o FQDN válido. Motivo: %(reason)s" #, python-format msgid "'%(host)s' is not a valid nameserver. %(msg)s" msgstr "'%(host)s' no es un servidor de nombres válido. %(msg)s" #, python-format msgid "'%s' Blank strings are not permitted" msgstr "No se permiten '%s' cadenas en blanco" #, python-format msgid "'%s' cannot be converted to boolean" msgstr "'%s' no se puede convertir a booleano" #, python-format msgid "'%s' cannot be converted to lowercase string" msgstr "'%s' no se puede convertir a cadena en minúsculas" #, python-format msgid "'%s' contains whitespace" msgstr "'%s' contiene espacios en blanco" #, python-format msgid "'%s' exceeds the 255 character FQDN limit" msgstr "'%s' supera el límite FQDN de 255 caracteres" #, python-format msgid "'%s' is a FQDN. It should be a relative domain name" msgstr "'%s' es un FQDN. Debería ser un nombre de dominio relativo" #, python-format msgid "'%s' is not a FQDN" msgstr "'%s' no es un FQDN" #, python-format msgid "'%s' is not a dictionary" msgstr "'%s' no es un diccionario" #, python-format msgid "'%s' is not a list" msgstr "'%s' no es una lista" #, python-format msgid "'%s' is not a valid IP address" msgstr "'%s' no es una dirección IP válida" #, python-format msgid "'%s' is not a valid IP subnet" msgstr "'%s' no es una subred IP válida" #, python-format msgid "'%s' is not a valid MAC address" msgstr "'%s' no es una dirección MAC válida" #, python-format msgid "'%s' is not a valid RBAC object type" msgstr "'%s' no es un tipo de objeto RBAC válido" #, python-format msgid "'%s' is not a valid UUID" msgstr "'%s' no es un UUID válido" #, python-format msgid "'%s' is not a valid boolean value" msgstr "'%s' no es un valor booleano" #, python-format msgid "'%s' is not a valid input" msgstr "'%s' no es una entrada válida" #, python-format msgid "'%s' is not a valid string" msgstr "'%s' no es una cadena válida" #, python-format msgid "'%s' is not an integer" msgstr "'%s' no es un entero" #, python-format msgid "'%s' is not an integer or uuid" msgstr "'%s' no es un entero o uuid" #, python-format msgid "'%s' is not of the form =[value]" msgstr "'%s' no tiene el formato =[valor]" #, python-format msgid "'%s' is not supported for filtering" msgstr "'%s' no se admite para filtrar" #, python-format msgid "'%s' must be a non negative decimal." msgstr "'%s' debe ser un decimal no negativo." #, python-format msgid "'%s' should be non-negative" msgstr "'%s' debe ser no negativo" msgid "'.' searches are not implemented" msgstr "Las búsquedas '.' no están implementadas" #, python-format msgid "'module' object has no attribute '%s'" msgstr "El objeto 'module' no tiene ningún atributo '%s'" msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' es menor que 'port_min'" msgid "" "(Deprecated. Use '--subproject neutron-SERVICE' instead.) The advanced " "service to execute the command against." msgstr "" "(En desuso. Utilice '--subproject neutron-SERVICE' en su lugar). Servicio " "avanzado contra el que ejecutar el mandato." msgid "0 is not allowed as CIDR prefix length" msgstr "0 no está permitido como longitud del prefijo de CIDR" msgid "" "32-bit BGP identifier, typically an IPv4 address owned by the system running " "the BGP DrAgent." msgstr "" "Identificador de BGP de 32 bits, generalmente una dirección IPv4 propiedad " "del sistema que ejecuta el DrAgent de BGP." msgid "A QoS driver must be specified" msgstr "Debe especificar un controlador de calidad de servicio" msgid "A cidr must be specified in the absence of a subnet pool" msgstr "Debe especificarse un cidr en ausencia de una agrupación de subred" msgid "" "A decimal value as Vendor's Registered Private Enterprise Number as required " "by RFC3315 DUID-EN." msgstr "" "Valor decimal como número de empresa privada registrada del proveedor según " "lo exigido en RFC3315 DUID-EN." #, python-format msgid "A default external network already exists: %(net_id)s." msgstr "Ya existe una red externa predeterminada: %(net_id)s." msgid "" "A default subnetpool for this IP family has already been set. Only one " "default may exist per IP family" msgstr "" "Ya se ha definido una agrupación de subredes predeterminada para esta " "familia de IP. Solo puede haber un valor predeterminado por cada familia de " "IP" msgid "A metering driver must be specified" msgstr "Se debe especificar un controlador de medición" msgid "A password must be supplied when using auth_type md5." msgstr "Se debe especificar una contraseña al utilizar auth_type md5." msgid "API for retrieving service providers for Neutron advanced services" msgstr "" "API para recuperar los proveedores de servicio para servicios avanzados de " "Neutron" msgid "Aborting periodic_sync_routers_task due to an error." msgstr "Terminando anormalmente periodic_sync_routers_task debido a un error." msgid "Access to this resource was denied." msgstr "Se ha denegado el acceso a este recurso." msgid "Action to be executed when a child process dies" msgstr "Acción a ejecutar cuando termina un proceso secundario" msgid "" "Add comments to iptables rules. Set to false to disallow the addition of " "comments to generated iptables rules that describe each rule's purpose. " "System must support the iptables comments module for addition of comments." msgstr "" "Añadir comentarios a reglas de iptables. Establézcalo en False para no " "permitir la adición, en las reglas de iptables generadas, de comentarios " "para describir el propósito de cada regla. El sistema debe admitir el módulo " "de comentarios de iptables para que se puedan añadir comentarios." msgid "Address not present on interface" msgstr "La dirección no está presente en la interfaz" #, python-format msgid "Address scope %(address_scope_id)s could not be found" msgstr "No se ha encontrado el ámbito de direcciones %(address_scope_id)s" msgid "" "Address to listen on for OpenFlow connections. Used only for 'native' driver." msgstr "" "Dirección en la que se escuchan las conexiones OpenFlow. Se utiliza sólo " "para el controlador 'native'." msgid "Adds external network attribute to network resource." msgstr "Añade el atributo de red externa al recurso de red." msgid "Adds test attributes to core resources." msgstr "Añade atributos de prueba a recursos de núcleo." #, python-format msgid "Agent %(id)s could not be found" msgstr "No se ha podido encontrar el agente %(id)s." #, python-format msgid "Agent %(id)s is not a L3 Agent or has been disabled" msgstr "El agente %(id)s no es un agente L3 válido o se ha inhabilitado" #, python-format msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled" msgstr "El agente %(id)s no es un agente DHCP válido o se ha inhabilitado" msgid "Agent has just been revived" msgstr "El agente acaba de revivir" msgid "" "Agent starts with admin_state_up=False when enable_new_agents=False. In the " "case, user's resources will not be scheduled automatically to the agent " "until admin changes admin_state_up to True." msgstr "" "El agente se inicia con admin_state_up=False cuando enable_new_agents=False. " "En ese caso, los recursos del usuario no se planificarán automáticamente en " "el agente hasta que el administrador cambie admin_state_up a True." #, python-format msgid "Agent updated: %(payload)s" msgstr "El agente se ha actualizado: %(payload)s" #, python-format msgid "" "Agent with agent_type=%(agent_type)s and host=%(host)s could not be found" msgstr "" "No se ha podido encontrar el agente con agent_type=%(agent_type)s y host=" "%(host)s" msgid "Allow auto scheduling networks to DHCP agent." msgstr "Permitir la planificación automática de redes para el agente DHCP." msgid "Allow auto scheduling of routers to L3 agent." msgstr "Permitir auto programación de direccionadore al agente L3." msgid "" "Allow overlapping IP support in Neutron. Attention: the following parameter " "MUST be set to False if Neutron is being used in conjunction with Nova " "security groups." msgstr "" "Soporte para permitir el solapamiento de IP en Neutron. Atención: el " "siguiente parámetro se DEBE definir a False si se utiliza Neutron " "conjuntamente con los grupos de seguridad de Nova." msgid "Allow running metadata proxy." msgstr "Permitir ejecutar el proxy de metadatos." msgid "Allow sending resource operation notification to DHCP agent" msgstr "" "Notificación de la operación de permitir el envío de recursso al agente DHCP" msgid "Allow the creation of PTR records" msgstr "Permitir la creación de registros PTR" msgid "Allow the usage of the bulk API" msgstr "Permitir el uso de la API masiva" msgid "Allow the usage of the pagination" msgstr "Permitir el uso de la paginación" msgid "Allow the usage of the sorting" msgstr "Permitir el uso de la ordenación" msgid "Allow to perform insecure SSL (https) requests to nova metadata" msgstr "" "Permitir ejecutar solicitudes SSL (https) no seguras en los metadatos de Nova" msgid "Allowed address pairs must be a list." msgstr "Los pares de direcciones permitidos deben ser una lista." msgid "AllowedAddressPair must contain ip_address" msgstr "AllowedAddressPair debe contener ip_address" msgid "" "Allows for serving metadata requests coming from a dedicated metadata access " "network whose CIDR is 169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send metadata:1 request. In " "this case DHCP Option 121 will not be injected in VMs, as they will be able " "to reach 169.254.169.254 through a router. This option requires " "enable_isolated_metadata = True." msgstr "" "Permite atender solicitudes de metadatos procedentes de una red de acceso a " "metadatos dedicada cuyo CIDR es 169.254.169.254/16 (o un prefijo más largo) " "y está conectada a un direccionador Neutron desde el cual las VM envían la " "solicitud metadata:1. En este caso, no se inyectará la opción DHCP 121 en " "las VM, porqué podrán alcanzar 169.254.169.254 mediante un direccionador. " "Esta opción requiere enable_isolated_metadata = True." #, python-format msgid "" "Already hosting BGP Speaker for local_as=%(current_as)d with router_id=" "%(rtid)s." msgstr "" "Ya se está alojando el hablante de BGP para local_as=%(current_as)d con el " "router_id=%(rtid)s." #, python-format msgid "" "Already hosting maximum number of BGP Speakers. Allowed scheduled count=" "%(count)d" msgstr "" "Ya se está alojando el número máximo de hablantes de BGP. Recuento " "planificado permitido=%(count)d" msgid "An RBAC policy already exists with those values." msgstr "Ya existe una política RBAC con esos valores." msgid "An identifier must be specified when updating a subnet" msgstr "Se debe especificar un identificador al actualizar una subred" msgid "An interface driver must be specified" msgstr "Se debe especificar un controlador de interfaz" msgid "" "An ordered list of extension driver entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. For example: extension_drivers = " "port_security,qos" msgstr "" "Lista de puntos de entrada del controlador de extensión para cargar desde el " "espacio de nombres neutron.ml2.extension_drivers. Por ejemplo: " "extension_drivers = port_security,qos" msgid "" "An ordered list of networking mechanism driver entrypoints to be loaded from " "the neutron.ml2.mechanism_drivers namespace." msgstr "" "Una lista ordenada de puntos de entrada de controlador de mecanismo de red a " "cargar desde el espacio de nombres neutron.ml2.mechanism_drivers." msgid "An unexpected internal error occurred." msgstr "Se ha producido un error interno inesperado." msgid "An unknown error has occurred. Please try your request again." msgstr "Se ha producido un error desconocido. Vuelva a intentar la solicitud." msgid "Async process didn't respawn" msgstr "El proceso de asignación no se ha vuelto a generar" #, python-format msgid "Attribute '%s' not allowed in POST" msgstr "El atributo '%s' no está permitido en POST" #, python-format msgid "Authentication type not supported. Requested type=%(auth_type)s." msgstr "Tipo de autenticación no soportada. Tipo solicitado=%(auth_type)s." msgid "Authorization URL for connecting to designate in admin context" msgstr "" "URL de autorización para establecer conexión con el designado en el contexto " "de administración" msgid "Automatically remove networks from offline DHCP agents." msgstr "Eliminar automáticamente las redes de los agentes DHCP fuera de línea." msgid "" "Automatically reschedule routers from offline L3 agents to online L3 agents." msgstr "" "Volver a planificar automáticamente los direccionadores de los agentes L3 " "fuera de línea a los agentes L3 en línea." msgid "Availability zone of this node" msgstr "Zona de disponibilidad de este nodo" #, python-format msgid "AvailabilityZone %(availability_zone)s could not be found." msgstr "" "No se ha podido encontrar la zona de disponibilidad %(availability_zone)s ." msgid "Available commands" msgstr "Mandatos disponibles" #, python-format msgid "" "BGP Peer %(peer_ip)s for remote_as=%(remote_as)s, running for BGP Speaker " "%(speaker_as)d not added yet." msgstr "" "Todavía no se ha añadido el igual de BGP %(peer_ip)s para remote_as=" "%(remote_as)s que se ejecuta para el hablante de BGP %(speaker_as)d." #, python-format msgid "" "BGP Speaker %(bgp_speaker_id)s is already configured to peer with a BGP Peer " "at %(peer_ip)s, it cannot peer with BGP Peer %(bgp_peer_id)s." msgstr "" "El hablante de BGP %(bgp_speaker_id)s ya está configurado para emparejarse " "con un igual de BGP en %(peer_ip)s, no se puede emparejar con el igual de " "BGP %(bgp_peer_id)s." #, python-format msgid "" "BGP Speaker for local_as=%(local_as)s with router_id=%(rtid)s not added yet." msgstr "" "Todavía no se ha añadido el hablante de BGP para local_as=%(local_as)s con " "router_id=%(rtid)s." #, python-format msgid "" "BGP peer %(bgp_peer_id)s is not associated with BGP speaker " "%(bgp_speaker_id)s." msgstr "" "El igual de BGP %(bgp_peer_id)s no está asociado al hablante de BGP " "%(bgp_speaker_id)s." #, python-format msgid "BGP peer %(bgp_peer_id)s not authenticated." msgstr "El igual de BGP %(bgp_peer_id)s no está autenticado." #, python-format msgid "BGP peer %(id)s could not be found." msgstr "No se ha podido encontrar el igual de BGP %(id)s." #, python-format msgid "" "BGP speaker %(bgp_speaker_id)s is not hosted by the BgpDrAgent %(agent_id)s." msgstr "" "El hablante de BGP %(bgp_speaker_id)s no está alojado por el BgpDrAgent " "%(agent_id)s." #, python-format msgid "BGP speaker %(id)s could not be found." msgstr "No se ha podido encontrar el hablante de BGP %(id)s." msgid "BGP speaker driver class to be instantiated." msgstr "Clase de controlador de hablante de BGP a instanciar." msgid "Backend does not support VLAN Transparency." msgstr "El programa de fondo no admite la transparencia de VLAN." #, python-format msgid "" "Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, " "%(mac)s:" msgstr "" "Prefijo o formato de mac incorrecto para generar dirección IPv6 por EUI-64: " "%(prefix)s, %(mac)s:" #, python-format msgid "Bad prefix type for generate IPv6 address by EUI-64: %s" msgstr "Tipo de prefijo incorrecto para generar dirección IPv6 por EUI-64: %s" #, python-format msgid "Base MAC: %s" msgstr "MAC base: %s" msgid "" "Base log dir for dnsmasq logging. The log contains DHCP and DNS log " "information and is useful for debugging issues with either DHCP or DNS. If " "this section is null, disable dnsmasq log." msgstr "" "Directorio de registro base para el registro dnsmasq. El registro contiene " "información de registro DHCP y DNS y es útil para problemas de depuración " "con DHCP o DNS. Si esta sección es nula, inhabilite el registro dnsmasq." #, python-format msgid "BgpDrAgent %(agent_id)s is already associated to a BGP speaker." msgstr "El BgpDrAgent %(agent_id)s ya está alsociado a un hablante de BGP." #, python-format msgid "BgpDrAgent %(id)s is invalid or has been disabled." msgstr "BgpDrAgent %(id)s no es válido o se ha deshabiltado." #, python-format msgid "BgpDrAgent updated: %s" msgstr "BgpDrAgent actualizado: %s" msgid "Body contains invalid data" msgstr "El cuerpo contiene datos no válidos" msgid "Both network_id and router_id are None. One must be provided." msgstr "Tanto network_id como router_id son None. Se debe proporcionar uno." #, python-format msgid "Bridge %(bridge)s does not exist." msgstr "El puente %(bridge)s no existe." #, python-format msgid "Bridge %s does not exist" msgstr "El puente %s no existe" msgid "Bulk operation not supported" msgstr "No se admiten operaciones masivas" msgid "CIDR to monitor" msgstr "CIDR a supervisar" #, python-format msgid "Callback for %(resource_type)s not found" msgstr "No se ha encontrado la devolución de llamada para %(resource_type)s" #, python-format msgid "Callback for %(resource_type)s returned wrong resource type" msgstr "" "La devolución de llamada para %(resource_type)s ha devuelto el tipo de " "recurso equivocado" #, python-format msgid "Cannot add floating IP to port %s that has no fixed IPv4 addresses" msgstr "" "No se puede añadir una IP flotante al puerto %s que no tiene direcciones " "IPv4 fijas" #, python-format msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip" msgstr "" "No se puede añadir IP flotante al puerto en la subred %s que no tiene IP de " "pasarela" #, python-format msgid "Cannot add multiple callbacks for %(resource_type)s" msgstr "" "No se pueden añadir varias devoluciones de llamada para %(resource_type)s" #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "" "No se puede asignar una subred IPv%(req_ver)s de la agrupación de subred IPv" "%(pool_ver)s" msgid "Cannot allocate requested subnet from the available set of prefixes" msgstr "" "No se puede asignar la subred solicitada a partir del conjunto disponible de " "prefijos" #, python-format msgid "" "Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with port " "%(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already has a " "floating IP on external network %(net_id)s." msgstr "" "No se puede asociar la IP flotante %(floating_ip_address)s (%(fip_id)s) con " "el puerto %(port_id)s mediante la IP fija %(fixed_ip)s, porque esa IP fija " "ya tiene una IP flotante en la red externa %(net_id)s." msgid "" "Cannot change HA attribute of active routers. Please set router " "admin_state_up to False prior to upgrade." msgstr "" "No se puede cambiar el atributo HA de los direccionadores activos. Defina " "admin_state_up en el direccionador a False antes de actualizar." #, python-format msgid "" "Cannot create floating IP and bind it to %s, since that is not an IPv4 " "address." msgstr "" "No se puede crear la IP flotante y enlazarla a %s, puesto que no es una " "dirección IPv4 ." #, python-format msgid "" "Cannot create floating IP and bind it to Port %s, since that port is owned " "by a different tenant." msgstr "" "No se puede crear la IP flotante y enlazarla al puerto %s, porque ese puerto " "es propiedad de otro arrendatario distinto." msgid "Cannot create resource for another tenant" msgstr "No se puede crear el recurso para otro arrendatario" msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "No se puede inhabilitar enable_dhcp si hay atributos ipv6 establecidos" #, python-format msgid "Cannot find %(table)s with %(col)s=%(match)s" msgstr "No se encuentra %(table)s con %(col)s=%(match)s" #, python-format msgid "Cannot handle subnet of type %(subnet_type)s" msgstr "No se puede manejar la subred de tipo %(subnet_type)s" msgid "Cannot have multiple IPv4 subnets on router port" msgstr "No puede tener varias subredes IPv4 en el puerto del direccionador" #, python-format msgid "" "Cannot have multiple router ports with the same network id if both contain " "IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s" msgstr "" "No puede tener varios puertos de direccionador con el mismo ID de red si " "ambos contienen subredes IPv6. El puerto existente %(p)s tiene subredes IPv6 " "y un id de red %(nid)s" #, python-format msgid "" "Cannot host distributed router %(router_id)s on legacy L3 agent %(agent_id)s." msgstr "" "No se puede alojar el direccionador distribuido %(router_id)s en el agente " "L3 heredado %(agent_id)s." msgid "Cannot match priority on flow deletion or modification" msgstr "" "No se puede hacer coincidir la prioridad en la supresión o modificación de " "flujo" msgid "Cannot mix IPv4 and IPv6 prefixes in a subnet pool." msgstr "No se pueden mezclar prefijos IPv4 e IPv6 en una agrupación de subred." msgid "Cannot specify both --service and --subproject." msgstr "No se pueden especificar los dos: --service y --subproject." msgid "Cannot specify both subnet-id and port-id" msgstr "No se puede especificar el ID de subred y el ID de puerto" msgid "Cannot understand JSON" msgstr "No se puede entender JSON" #, python-format msgid "Cannot update read-only attribute %s" msgstr "No se puede actualizar el atributo de sólo lectura %s" msgid "" "Cannot upgrade active router to distributed. Please set router " "admin_state_up to False prior to upgrade." msgstr "" "No se puede actualizar el direccionador activo al distribuido. Establezca el " "direccionador admin_state_up en False antes de la actualización." msgid "Certificate Authority public key (CA cert) file for ssl" msgstr "" "Archivo de clave pública de entidad emisora de certificados (CA cert) para " "SSL" #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s." msgstr "" "El cambio produciría un uso inferior a 0 para los recursos siguientes: " "%(unders)s." msgid "Check ebtables installation" msgstr "Comprobar la instalación de ebtables" msgid "Check for ARP header match support" msgstr "Comprobar el soporte de coincidencia de cabeceras ARP" msgid "Check for ARP responder support" msgstr "Comprobar el soporte de respondedor ARP" msgid "Check for ICMPv6 header match support" msgstr "Comprobar el soporte de coincidencia de cabeceras ICMPv6" msgid "Check for OVS Geneve support" msgstr "Comprobar el soporte de OVS Geneve" msgid "Check for OVS vxlan support" msgstr "Comprobar el soporte vxlan OVS" msgid "Check for VF management support" msgstr "Comprobar el soporte de gestión de VF" msgid "Check for iproute2 vxlan support" msgstr "Comprobar el soporte vxlan iproute2" msgid "Check for nova notification support" msgstr "Comprobar el soporte de notificación nova" msgid "Check for patch port support" msgstr "Comprobar el soporte de puerto de parche" msgid "Check ip6tables installation" msgstr "Comprobar la instalación de ip6tables" msgid "Check ipset installation" msgstr "Comprobar la instalación de ipset" msgid "Check keepalived IPv6 support" msgstr "Comprobar el soporte keepalived de IPv6" msgid "Check minimal dibbler version" msgstr "Comprobar la versión mínima de dibbler" msgid "Check minimal dnsmasq version" msgstr "Comprobar la versión mínima de dnsmasq" msgid "Check netns permission settings" msgstr "Comprobar los valores de permiso netns" msgid "Check ovs conntrack support" msgstr "Comprobar el soporte para ovs conntrack" msgid "Check ovsdb native interface support" msgstr "Comprobar el soporte de interfaz nativa ovsdb" #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of " "subnet %(sub_id)s" msgstr "" "El Cidr %(subnet_cidr)s de la subred %(subnet_id)s se solapa con el cidr " "%(cidr)s de la subred %(sub_id)s" msgid "Class not found." msgstr "No se ha encontrado la clase." msgid "Cleanup resources of a specific agent type only." msgstr "Limpiar solo los recursos de un tipo de agente específico." msgid "Client certificate for nova metadata api server." msgstr "Certificado de cliente para el servidor de la API de metadatos de Nova" msgid "" "Comma-separated list of : tuples, mapping " "network_device to the agent's node-specific list of virtual functions that " "should not be used for virtual networking. vfs_to_exclude is a semicolon-" "separated list of virtual functions to exclude from network_device. The " "network_device in the mapping should appear in the physical_device_mappings " "list." msgstr "" "Lista separada por comas de tuplas : que " "correlaciona network_device con la lista específica de nodo del agente de " "funciones virtuales que no deben utilizarse para las redes virtuales. " "vfs_to_exclude es una lista separada por punto y coma de funciones virtuales " "a excluir de network_device. El dispositivo de red (network_device) de la " "correlación debe aparecer en la lista physical_device_mappings." msgid "" "Comma-separated list of : tuples mapping physical " "network names to the agent's node-specific Open vSwitch bridge names to be " "used for flat and VLAN networks. The length of bridge names should be no " "more than 11. Each bridge must exist, and should have a physical network " "interface configured as a port. All physical networks configured on the " "server should have mappings to appropriate bridges on each agent. Note: If " "you remove a bridge from this mapping, make sure to disconnect it from the " "integration bridge as it won't be managed by the agent anymore. Deprecated " "for ofagent." msgstr "" "Lista separada por comas de tuplas : para " "correlacionar nombres de red física con los nombres de puente Open vSwitch " "específicos de nodo del agente a utilizar para redes simples y VLAN. Todos " "los puentes deben existir y deben tener una interfaz de red física " "configurada como puerto. Todas las redes físicas configuradas en el servidor " "deben tener correlaciones a los puentes adecuados en cada uno de los " "agentes. Nota: si elimina un puente de esta correlación, asegúrese de " "desconectarlo del puente de integración puesto que dejará de estar " "gestionado por el agente. En desuso para ofagent." msgid "" "Comma-separated list of : tuples mapping " "physical network names to the agent's node-specific physical network device " "interfaces of SR-IOV physical function to be used for VLAN networks. All " "physical networks listed in network_vlan_ranges on the server should have " "mappings to appropriate interfaces on each agent." msgstr "" "Lista separada por comas de tuplas de : " "que correlaciona nombres de red física con las interfaces de dispositivo de " "red física específica de nodo del agente de la función física SR-IOV que se " "va a utilizar para las redes VLAN. Todas las redes físicas listadas en " "network_vlan_ranges en el servidor deben tener correlaciones con las " "interfaces adecuadas en cada agente." msgid "" "Comma-separated list of : tuples " "mapping physical network names to the agent's node-specific physical network " "interfaces to be used for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should have mappings to " "appropriate interfaces on each agent." msgstr "" "Lista separada por comas de tuplas : " "para correlacionar nombres de red física con las interfaces físicas " "específicas de nodo del agente a utilizar para redes simples y VLAN. Todas " "las redes físicas que se enumeran en network_vlan_ranges en el servidor " "deberían tener correlaciones a las interfaces adecuadas de cada agente." msgid "" "Comma-separated list of : tuples enumerating ranges of GRE " "tunnel IDs that are available for tenant network allocation" msgstr "" "La lista separada por comas de conjuntos de variables : " "enumera los rangos de los ID de túnel GRE que están disponibles para la " "asignación de red de arrendatario" msgid "" "Comma-separated list of : tuples enumerating ranges of " "Geneve VNI IDs that are available for tenant network allocation" msgstr "" "Lista separada por comas de conjuntos de variables : que " "enumeran los rangos de ID de Geneve VNI disponibles para la asignación de " "red de arrendatario" msgid "" "Comma-separated list of : tuples enumerating ranges of " "VXLAN VNI IDs that are available for tenant network allocation" msgstr "" "Lista separada por comas de conjuntos de variables : que " "enumeran los rangos de ID de VXLAN VNI que están disponibles para la " "asignación de red de arrendatario" msgid "" "Comma-separated list of supported PCI vendor devices, as defined by " "vendor_id:product_id according to the PCI ID Repository. Default enables " "support for Intel and Mellanox SR-IOV capable NICs." msgstr "" "Lista separada por comas de dispositivos de proveedor PCI soportados, según " "están definidos en vendor_id:product_id según el repositorio de ID de PCI. " "El valor predeterminado habilita el soporte de NIC con capacidad Intel y " "Mellanox SR-IOV" msgid "" "Comma-separated list of the DNS servers which will be used as forwarders." msgstr "" "Lista separada por comas de los servidores DNS que se utilizarán como " "reenviadores." msgid "Command to execute" msgstr "Mandato a ejecutar" msgid "Config file for interface driver (You may also use l3_agent.ini)" msgstr "" "Archivo de configuración para controlador de interfaz (También puede " "utilizar l3_agent.ini)" #, python-format msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" msgstr "Valor ethertype %(ethertype)s en conflicto para CIDR %(cidr)s" msgid "" "Controls whether the neutron security group API is enabled in the server. It " "should be false when using no security groups or using the nova security " "group API." msgstr "" "Controla si la API de grupo de seguridad de neutron está habilitada en el " "servidor. Debe ser false cuando no hay grupos de seguridad o se utiliza la " "API de grupo de seguridad de nova." #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "" "No se ha podido enlazar a %(host)s:%(port)s tras intentarlo durante %(time)d " "segundos" #, python-format msgid "Could not connect to %s" msgstr "No se ha podido establecer conexión con %s" msgid "Could not deserialize data" msgstr "No se han podido deserializar los datos" #, python-format msgid "Could not retrieve schema from %(conn)s: %(err)s" msgstr "No se ha podido recuperar el esquema de %(conn)s: %(err)s" #, python-format msgid "" "Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable " "to update." msgstr "" "La IP de puerta de enlace actual %(ip_address)s ya está en uso por el puerto " "%(port_id)s. No se ha podido actualizar." msgid "Currently update of HA mode for a DVR/HA router is not supported." msgstr "" "No se admite la actualización actual de la modalidad HA para un router DVR/" "HA." msgid "Currently update of HA mode for a distributed router is not supported." msgstr "" "No se admite la actualización actual de la modalidad HA para un " "direccionador distribuido." msgid "" "Currently update of distributed mode for a DVR/HA router is not supported" msgstr "" "No se admite la actualización actual de la modalidad distribuida para un " "direccionador DVR/HA." msgid "Currently update of distributed mode for an HA router is not supported." msgstr "" "No se admite la actualización actual de la modalidad distribuida para un " "direccionador HA." msgid "" "Currently updating a router from DVR/HA to non-DVR non-HA is not supported." msgstr "" "Actualmente no se admite la actualización de un direccionador de DVR/HA a no-" "DVR, no-HA." msgid "Currently updating a router to DVR/HA is not supported." msgstr "" "Actualmente no se admite la actualización de un direccionador a DVR/HA." msgid "" "DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " "lease times." msgstr "" "Duración de concesión de DHCP (en segundos). Utilice -1 para indicar a " "dnsmasq que utilice tiempos de concesión infinitos." msgid "" "DVR deployments for VXLAN/GRE/Geneve underlays require L2-pop to be enabled, " "in both the Agent and Server side." msgstr "" "Los despliegues de DVR para las subcapas VXLAN/GRE/Geneve requieren que L2-" "pop esté habilitado, tanto en el lado del agente como del servidor." msgid "" "Database engine for which script will be generated when using offline " "migration." msgstr "" "Motor de base de datos para el cual se generará el script cuando se utilice " "la migración fuera de línea." msgid "" "Default IPv4 subnet pool to be used for automatic subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where creation of a subnet is " "being called without a subnet pool ID. If not set then no pool will be used " "unless passed explicitly to the subnet create. If no pool is used, then a " "CIDR must be passed to create a subnet and that subnet will not be allocated " "from any pool; it will be considered part of the tenant's private address " "space. This option is deprecated for removal in the N release." msgstr "" "Agrupación-subred IPv4 predeterminada que se utilizará para la asignación " "CIDR de subred automática. Especifica por UUID la agrupación a utilizar en " "caso de que se llame a la creación de una subred sin un ID de agrupación de " "subred. Si no está definido, no se utilizará ninguna agrupación a no ser que " "se pase explícitamente a la creación de subred. Si no se utiliza ninguna " "agrupación, se debe pasar un CIDR para crear una subred y esa subred no se " "asignará desde ninguna agrupación; se considerará parte del espacio de " "direcciones privado del arrendatario. Esta opción está en desuso y se " "eliminará en el release N." msgid "" "Default IPv6 subnet pool to be used for automatic subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where creation of a subnet is " "being called without a subnet pool ID. See the description for " "default_ipv4_subnet_pool for more information. This option is deprecated for " "removal in the N release." msgstr "" "Agrupación-subred IPv6 predeterminada que se utilizará para la asignación " "CIDR de subred automática. Especifica por UUID la agrupación a utilizar en " "caso de que se llame a la creación de una subred sin un ID de agrupación de " "subred. Consulte la descripción de default_ipv4_subnet_pool para obtener más " "información. Esta opción está en desuso y se eliminará en el relesase N." msgid "Default driver to use for quota checks" msgstr "Controlador predeterminado a utilizar para comprobaciones de cuota" msgid "Default external networks must be shared to everyone." msgstr "" "Las redes externas predeterminadas se deben compartir con todo el mundo." msgid "" "Default network type for external networks when no provider attributes are " "specified. By default it is None, which means that if provider attributes " "are not specified while creating external networks then they will have the " "same type as tenant networks. Allowed values for external_network_type " "config option depend on the network type values configured in type_drivers " "config option." msgstr "" "Tipo de red predeterminado para redes externas si se especifican atributos " "sin proveedor. De forma predeterminada, es Ninguno, que significa que si no " "se especifican atributos de proveedor al crear redes externas, tendrán el " "mismo tipo que las redes de arrendatario. Los valores permitidos de la " "opción de configuración external_network_type dependen de los valores de " "tipo de red configurados en la opción de configuración type_drivers." msgid "" "Default number of RBAC entries allowed per tenant. A negative value means " "unlimited." msgstr "" "Número predeterminado de entradas RBAC permitidas por arrendatario. Un valor " "negativo significa ilimitados." msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "" "Número predeterminado de recursos permitidos por arrendatario. Un valor " "negativo significa ilimitados." msgid "Default security group" msgstr "Grupo de seguridad predeterminado" msgid "Default security group already exists." msgstr "El grupo de seguridad predeterminado ya existe." msgid "" "Default value of availability zone hints. The availability zone aware " "schedulers use this when the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a comma separated string. " "This value can be empty. In this case, even if availability_zone_hints for a " "resource is empty, availability zone is considered for high availability " "while scheduling the resource." msgstr "" "Valor predeteminado de las sugerencias de zonas de disponibilidad. Los " "planificadores conocedores de zonas de disponibilidad utilizan esto cuando " "availability_zone_hints de los recursos está vacío. Se pueden especificar " "múltiples zonas de disponibilidad en una cadena separada por comas. Este " "valor puede estar vacío. En este caso, aunque availability_zone_hints de un " "recurso esté vacío, se tiene en cuenta la zona de disponibilidad para la " "alta disponibilidad al planificar el recurso." msgid "" "Define the default value of enable_snat if not provided in " "external_gateway_info." msgstr "" "Defina el valor predeterminado de enable_snat si no se proporciona en " "external_gateway_info." msgid "" "Defines providers for advanced services using the format: :" ":[:default]" msgstr "" "Define proveedores para servicios avanzados con el formato: :" ":[:predeterminado]" msgid "" "Delay within which agent is expected to update existing ports whent it " "restarts" msgstr "" "Retardo dentro del cual se espera que el agente actualice los puertos " "existentes al reiniciarse" msgid "Delete the namespace by removing all devices." msgstr "Suprimir el espacio de nombres eliminando todos los dispositivos. " #, python-format msgid "Deleting port %s" msgstr "Suprimiendo el puerto %s" #, python-format msgid "Deployment error: %(reason)s." msgstr "Error de despliegue: %(reason)s." msgid "Destroy IPsets even if there is an iptables reference." msgstr "Destruir IPsets aunque haya una referencia a iptables." msgid "Destroy all IPsets." msgstr "Destruir todos los IPsets." #, python-format msgid "Device %(dev_name)s in mapping: %(mapping)s not unique" msgstr "" "El dispositivo %(dev_name)s en la correlación: %(mapping)s no es exclusivo" #, python-format msgid "Device '%(device_name)s' does not exist." msgstr "El dispositivo '%(device_name)s' no existe." msgid "Device has no virtual functions" msgstr "El dispositivo no tiene funciones virtuales" #, python-format msgid "Device name %(dev_name)s is missing from physical_device_mappings" msgstr "" "Falta el nombre de dispositivo %(dev_name)s en physical_device_mappings" msgid "Device not found" msgstr "No se ha encontrado el dispositivo" #, python-format msgid "" "Distributed Virtual Router Mac Address for host %(host)s does not exist." msgstr "" "La dirección Mac del direccionador virtual distribuido para el host %(host)s " "no existe." #, python-format msgid "Domain %(dns_domain)s not found in the external DNS service" msgstr "No se encuentra el dominio %(dns_domain)s en el servicio DNS externo" msgid "Domain to use for building the hostnames" msgstr "Dominio a utilizar para crear los nombres de host" msgid "" "Domain to use for building the hostnames. This option is deprecated. It has " "been moved to neutron.conf as dns_domain. It will be removed in a future " "release." msgstr "" "Dominio que se va a utilizar para crear los nombres de host. Esta opción " "está en desuso. Se ha trasladado a neutron.conf como dns_domain. Se " "eliminará en un release futuro." msgid "Downgrade no longer supported" msgstr "La degradación ya no está soportada" #, python-format msgid "Driver %s is not unique across providers" msgstr "El controlador %s no es único entre los proveedores" msgid "Driver for external DNS integration." msgstr "Controlador para la integración externa de DNS." msgid "Driver for security groups firewall in the L2 agent" msgstr "Controlador para el cortafuegos de grupos de seguridad en el agente L2" msgid "Driver to use for scheduling network to DHCP agent" msgstr "" "Controlador a utilizar para la planificación de la red para el agente DHCP" msgid "Driver to use for scheduling router to a default L3 agent" msgstr "" "Controlador a utilizar para la planificación del direccionador para un " "agente L3 predeterminado" msgid "" "Driver used for ipv6 prefix delegation. This needs to be an entry point " "defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for " "entry points included with the neutron source." msgstr "" "Controlador utilizado para la delegación de prefijos ipv6. Debe ser un punto " "de entrada definido en el espacio de nombres neutron.agent.linux.pd_drivers. " "Consulte setup.cfg para ver los puntos de entrada incluidos con el origen de " "neutron." msgid "Driver used for scheduling BGP speakers to BGP DrAgent" msgstr "" "Controlador utilizado para planificar hablantes de BGP a un DrAgent de BGP" msgid "Drivers list to use to send the update notification" msgstr "" "Lista de controladores que se va a utilizar para enviar la notificación de " "actualización" #, python-format msgid "Duplicate IP address '%s'" msgstr "Dirección IP duplicada '%s'" #, python-format msgid "" "Duplicate L3HARouterAgentPortBinding is created for router(s) %(router)s. " "Database cannot be upgraded. Please, remove all duplicates before upgrading " "the database." msgstr "" "Se ha creado un L3HARouterAgentPortBinding duplicado para el direccionador " "%(router)s. No se puede actualizar la base de datos. Elimine todos los " "duplicados antes de actualizar la base de datos." msgid "Duplicate Metering Rule in POST." msgstr "Regla de medición duplicada en POST." msgid "Duplicate Security Group Rule in POST." msgstr "Regla de grupo de seguridad duplicada en POST." msgid "Duplicate address detected" msgstr "Se ha detectado una dirección duplicada" #, python-format msgid "Duplicate hostroute '%s'" msgstr "Ruta de host '%s' duplicada" #, python-format msgid "Duplicate items in the list: '%s'" msgstr "Elementos duplicados en la lista: '%s'" #, python-format msgid "Duplicate nameserver '%s'" msgstr "Servidor de nombres '%s' duplicado" msgid "Duplicate segment entry in request." msgstr "Entrada de segmento duplicada en la solicitud." #, python-format msgid "ERROR: %s" msgstr "ERROR: %s" msgid "" "ERROR: Unable to find configuration file via the default search paths (~/." "neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" msgstr "" "ERROR: no se ha podido encontrar el archivo de configuración por medio de " "las rutas de búsqueda predeterminada (~/.neutron/, ~/, /etc/neutron/, /etc/) " "¡y la opción '--config-file'!" msgid "" "Either one of parameter network_id or router_id must be passed to _get_ports " "method." msgstr "Debe pasarse un parámetro network_id o router_id al método _get_ports." msgid "Either subnet_id or port_id must be specified" msgstr "Se debe especificar el ID de subred o el ID de puerto" msgid "Empty physical network name." msgstr "Nombre de red física vacío." msgid "Empty subnet pool prefix list." msgstr "Lista de prefijos de agrupación de subred vacía." msgid "Enable FWaaS" msgstr "Habilitar FWaaS" msgid "Enable HA mode for virtual routers." msgstr "Habilitar modo HA para direccionadores virtuales." msgid "Enable SSL on the API server" msgstr "Habilitar SSL en el servidor API" msgid "" "Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " "plugin using linuxbridge mechanism driver" msgstr "" "Habilitar VXLAN en el agente. Se puede habilitar cuando el agente es " "gestionado por ml2 plugin usando controlador de mecanismo linuxbridge" msgid "" "Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 " "l2population driver. Allows the switch (when supporting an overlay) to " "respond to an ARP request locally without performing a costly ARP broadcast " "into the overlay." msgstr "" "Habilite el respondedor ARP local si está soportado. Requiere OVS 2.1 y el " "controlador ML2 l2population. Permite que el conmutador (cuando da soporte a " "una superposición) responda a una solicitud ARP localmente sin realizar una " "difusión de ARP costosa en la superposición." msgid "" "Enable local ARP responder which provides local responses instead of " "performing ARP broadcast into the overlay. Enabling local ARP responder is " "not fullycompatible with the allowed-address-pairs extension." msgstr "" "Habilite el respondedor ARP local que proporciona respuestas locales en " "lugar de llevar a cabo difusión ARP en la superposición. El hecho de " "habilitar el respondedor ARP local no es completamente compatible con la " "ampliación de los pares de direcciones permitidos." msgid "" "Enable services on an agent with admin_state_up False. If this option is " "False, when admin_state_up of an agent is turned False, services on it will " "be disabled. Agents with admin_state_up False are not selected for automatic " "scheduling regardless of this option. But manual scheduling to such agents " "is available if this option is True." msgstr "" "Habilite servicios en un agente con admin_state_up False. Si esta opción es " "False, cuando el valor admin_state_up de un agente se cambia a False, se " "inhabilitan los servicios en el mismo. Los agentes con admin_state_up False " "no se seleccionan para la planificación automática independientemente del " "valor de esta opción. No obstante, si esta opción es True, sí que está " "disponible la planificación manual para estos agentes." msgid "" "Enable suppression of ARP responses that don't match an IP address that " "belongs to the port from which they originate. Note: This prevents the VMs " "attached to this agent from spoofing, it doesn't protect them from other " "devices which have the capability to spoof (e.g. bare metal or VMs attached " "to agents without this flag set to True). Spoofing rules will not be added " "to any ports that have port security disabled. For LinuxBridge, this " "requires ebtables. For OVS, it requires a version that supports matching ARP " "headers. This option will be removed in Newton so the only way to disable " "protection will be via the port security extension." msgstr "" "Habilite la supresión de respuestas ARP que no coincidan con una dirección " "IP que pertenezca al puerto donde se originan. Nota: Esto evita la " "suplantación de las VM conectadas a este agente, no los protege de otros " "dispositivos con capacidad para suplantar (por ejemplo, bare metal o VM " "conectadas a agentes sin este distintivo establecido en True). Las reglas de " "suplantación no se añadirán a los puertos con la seguridad de puerto " "inhabilitada. Para LinuxBridge, esto requiere ebtables. Para OVS, requiere " "una versión que dé soporte a las cabeceras ARP coincidentes. Esta opción se " "eliminará en Newton de forma que la única forma de deshabilitar la " "protección será mediante la extensión de seguridad del puerto." msgid "" "Enable/Disable log watch by metadata proxy. It should be disabled when " "metadata_proxy_user/group is not allowed to read/write its log file and " "copytruncate logrotate option must be used if logrotate is enabled on " "metadata proxy log files. Option default value is deduced from " "metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent " "effective user id/name." msgstr "" "Habilitar/inhabilitar observador de registro por proxy de metadatos. Debe " "inhabilitarse cuando metadata_proxy_user/group no tenga permiso para leer/" "grabar en su archivo de registro y debe utilizarse la opción copytruncate " "logrotate si logrotate está habilitado en los archivos de registro de proxy " "de metadatos. El valor predeterminado de la opción se deduce de " "metadata_proxy_user: el registro de observador está habilitado si " "metadata_proxy_user es el id/nombre de usuario efectivo del agente." msgid "" "Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to " "True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable " "environment. Users making subnet creation requests for IPv6 subnets without " "providing a CIDR or subnetpool ID will be given a CIDR via the Prefix " "Delegation mechanism. Note that enabling PD will override the behavior of " "the default IPv6 subnetpool." msgstr "" "Habilita la delegación de prefijo IPv6 para la asignación automática de CIDR " "de subred. Establézcalo en True para habilitar la delegación de prefijo IPv6 " "para la asignación de subred en un entorno que admita PD (Prefix " "Delegation). A los usuarios que realicen solicitudes de creación de subred " "para subredes IPv6 sin proporcionar un CIDR o un ID de agrupación de subred " "se les dará un CIDR mediante el mecanismo de delegación de prefijos. Tenga " "en cuenta que al habilitar PD se sobrescribirá el comportamiento de la " "agrupación de subred IPv6 predeterminada." msgid "" "Enables the dnsmasq service to provide name resolution for instances via DNS " "resolvers on the host running the DHCP agent. Effectively removes the '--no-" "resolv' option from the dnsmasq process arguments. Adding custom DNS " "resolvers to the 'dnsmasq_dns_servers' option disables this feature." msgstr "" "Permite al servicio dnsmasq proporcionar resolución de nombres para " "instancias mediante resolvedores DNS en el host donde se ejecuta el agente " "DHCP. Elimina la opción '--no-resolv' de los argumentos del proceso dnsmasq. " "Si se añaden resolvedores DNS personalizados a la opción " "'dnsmasq_dns_servers' se deshabilita esta característica." msgid "Encountered an empty component." msgstr "Se ha encontrado un componente vacío." msgid "End of VLAN range is less than start of VLAN range" msgstr "El final del rango VLAN es menor que el inicio del rango VLAN" msgid "End of tunnel range is less than start of tunnel range" msgstr "El final del rango de túnel es menor que el inicio del rango de túnel" msgid "Enforce using split branches file structure." msgstr "Imponer utilizando la estructura del archivo de ramas de división." msgid "" "Ensure that configured gateway is on subnet. For IPv6, validate only if " "gateway is not a link local address. Deprecated, to be removed during the " "Newton release, at which point the gateway will not be forced on to subnet." msgstr "" "Asegúrese de que la pasarela configurada está en la subred. Para IPv6, " "valide sólo si la pasarela no es una dirección local de enlace. En desuso, " "se eliminará durante el release de Newton, en cuyo momento la pasarela no se " "forzará en la subred." #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "Error %(reason)s al intentar realizar la operación." #, python-format msgid "Error importing FWaaS device driver: %s" msgstr "Error al importar el controlador de dispositivo FWaaS: %s" #, python-format msgid "Error parsing dns address %s" msgstr "Error al analizar la dirección dns %s" #, python-format msgid "Error while reading %s" msgstr "Error al leer %s " #, python-format msgid "" "Exceeded %s second limit waiting for address to leave the tentative state." msgstr "" "Se ha superado el límite de %s segundos esperando que la dirección abandone " "el estado de tentativa." msgid "Exceeded maximum amount of fixed ips per port." msgstr "Se ha superado el número máximo de IP fijas por puerto." msgid "Existing prefixes must be a subset of the new prefixes" msgstr "Los prefijos existentes deben ser una subred de los prefijos nuevos" #, python-format msgid "" "Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" msgstr "" "Código de salida: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; " "Stderr: %(stderr)s" #, python-format msgid "Extension %(driver)s failed." msgstr "Error en la extenxión %(driver)s." #, python-format msgid "" "Extension driver %(driver)s required for service plugin %(service_plugin)s " "not found." msgstr "" "No se ha encontrado el controlador de extensión %(driver)s necesario para el " "plugin de servicio %(service_plugin)s." msgid "" "Extension to use alongside ml2 plugin's l2population mechanism driver. It " "enables the plugin to populate VXLAN forwarding table." msgstr "" "Extensión para usar unto con el controlador de mecanismo l2population del " "plug-in ml2. Este habilita el plugin para completar la tabla de reenvío " "VXLAN." #, python-format msgid "Extension with alias %s does not exist" msgstr "La extensión con el alias %s no existe" msgid "Extensions list to use" msgstr "Lista de extensiones que se va a utilizar" #, python-format msgid "Extensions not found: %(extensions)s." msgstr "Extensiones no encontradas: %(extensions)s." #, python-format msgid "External DNS driver %(driver)s could not be found." msgstr "No se ha podido encontrar el controlador DNS externo %(driver)s." #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "El IP externo %s es el mismo que el IP de pasarela" #, python-format msgid "" "External network %(external_network_id)s is not reachable from subnet " "%(subnet_id)s. Therefore, cannot associate Port %(port_id)s with a Floating " "IP." msgstr "" "No se puede alcanzar la red externa %(external_network_id)s desde la subred " "%(subnet_id)s. Por tanto, no se puede asociar el puerto %(port_id)s con una " "IP flotante." #, python-format msgid "" "External network %(net_id)s cannot be updated to be made non-external, since " "it has existing gateway ports" msgstr "" "La red externa %(net_id)s no se puede actualizar para convertirla en no " "externa, ya que tiene puertos de pasarela existentes." #, python-format msgid "ExtraDhcpOpt %(id)s could not be found" msgstr "No se ha podido encontrar ExtraDhcpOpt %(id)s" msgid "" "FWaaS plugin is configured in the server side, but FWaaS is disabled in L3-" "agent." msgstr "" "El plug-in FWaaS está configurado en el lado del servidor, pero FWasS está " "inhabilitado en el agente L3." #, python-format msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." msgstr "" "No se ha podido volver a programar el direccionador %(router_id)s: no se ha " "encontrado ningún agente l3 elegible." #, python-format msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." msgstr "" "Se ha encontrado un error la planificación del direccionador %(router_id)s " "para el agente L3 %(agent_id)s." #, python-format msgid "" "Failed to allocate a VRID in the network %(network_id)s for the router " "%(router_id)s after %(max_tries)s tries." msgstr "" "No se ha podido asignar un VRID en la red %(network_id)s para el " "direccionador %(router_id)s después de %(max_tries)s intentos." #, python-format msgid "Failed to allocate subnet: %(reason)s." msgstr "No se ha podido asignar la subred: %(reason)s." msgid "" "Failed to associate address scope: subnetpools within an address scope must " "have unique prefixes." msgstr "" "No se ha podido asociar el ámbito de dirección: las agrupaciones de subred " "dentro de un ámbito de dirección deben tener prefijos exclusivos." #, python-format msgid "Failed to check policy %(policy)s because %(reason)s." msgstr "No se ha podido comprobar la política %(policy)s porque %(reason)s." #, python-format msgid "" "Failed to create a duplicate %(object_type)s: for attribute(s) " "%(attributes)s with value(s) %(values)s" msgstr "" "Error al crear un duplicado %(object_type)s: para los atributos " "%(attributes)s con los valores %(values)s" #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips included " "invalid subnet %(subnet_id)s" msgstr "" "No se ha podido crear un puerto en la red %(network_id)s, porque fixed_ips " "incluía una subred no válida %(subnet_id)s" #, python-format msgid "Failed to init policy %(policy)s because %(reason)s." msgstr "No se ha podido iniciar la política %(policy)s porque %(reason)s." #, python-format msgid "Failed to locate source for %s." msgstr "No se ha podido buscar el origen de %s." #, python-format msgid "Failed to parse request. Parameter '%s' not specified" msgstr "" "No se ha podido analizar la solicitud. No se ha especificado el parámetro " "'%s'" #, python-format msgid "Failed to parse request. Required attribute '%s' not specified" msgstr "" "No se ha podido analizar la solicitud. No se ha especificado el atributo " "obligatorio '%s'" msgid "Failed to remove supplemental groups" msgstr "No se han podido eliminar los grupos suplementarios" #, python-format msgid "Failed to set gid %s" msgstr "No se ha podido establecer el gid %s" #, python-format msgid "Failed to set uid %s" msgstr "No se ha podido establecer el uid %s" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "Ha fallado al configurar %(type)s el puerto de túnel a %(ip)s" msgid "Failure applying iptables rules" msgstr "Error al aplicar las reglas de iptables" #, python-format msgid "Failure waiting for address %(address)s to become ready: %(reason)s" msgstr "" "Error al esperar que la dirección %(address)s esté preparada: %(reason)s" msgid "Flat provider networks are disabled" msgstr "Las redes de proveedor simples están deshabilitadas" #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "No se ha podido encontrar el tipo %(flavor_id)s." #, python-format msgid "Flavor %(flavor_id)s is used by some service instance." msgstr "El tipo %(flavor_id)s lo utiliza alguna instancia de servicio." msgid "Flavor is not enabled." msgstr "El tipo no está habilitado." #, python-format msgid "Floating IP %(floatingip_id)s could not be found" msgstr "No se ha podido encontrar la IP flotante %(floatingip_id)s." #, python-format msgid "" "Floating IP %(floatingip_id)s is associated with non-IPv4 address " "%s(internal_ip)s and therefore cannot be bound." msgstr "" "La IP flotante %(floatingip_id)s está asociada con una dirección no IPv4 " "%s(internal_ip)s y, por tanto, no se puede enlazar." msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "Para los protocolos TCP/UDP, port_range_min debe ser <= port_range_max" #, python-format msgid "For class %(object_type)s missing primary keys: %(missing_keys)s" msgstr "" "Faltan las claves primarias siguientes para la clase %(object_type)s: " "%(missing_keys)s" msgid "Force ip_lib calls to use the root helper" msgstr "Forzar llamadas ip_lib para usar el ayudante raíz" #, python-format msgid "Found duplicate extension: %(alias)s." msgstr "Se ha encontrado una ampliación duplicada: %(alias)s" #, python-format msgid "" "Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet " "%(subnet_cidr)s." msgstr "" "Se ha encontrado solapamiento de agrupaciones de asignación:%(pool_1)s " "%(pool_2)s para la subred %(subnet_cidr)s." msgid "Gateway IP version inconsistent with allocation pool version" msgstr "" "La versión de la IP de pasarela no es consistente con la versión de la " "agrupación de asignación" #, python-format msgid "" "Gateway cannot be updated for router %(router_id)s, since a gateway to " "external network %(net_id)s is required by one or more floating IPs." msgstr "" "No se puede actualizar la pasarela para el direccionador %(router_id)s, " "porque una o más IP flotantes necesitan una pasarela a la red externa " "%(net_id)s." #, python-format msgid "Gateway ip %(ip_address)s conflicts with allocation pool %(pool)s." msgstr "" "La IP de pasarela %(ip_address)s está en conflicto con la agrupación de " "asignación %(pool)s." msgid "Gateway is not valid on subnet" msgstr "La pasarela no es válida en la subred" msgid "" "Geneve encapsulation header size is dynamic, this value is used to calculate " "the maximum MTU for the driver. This is the sum of the sizes of the outer " "ETH + IP + UDP + GENEVE header sizes. The default size for this field is 50, " "which is the size of the Geneve header without any additional option headers." msgstr "" "El tamaño de la cabecera de encapsulación de Geneve es dinámico, este valor " "se utiliza para calcular el MTU máximo para el controlador. Es la suma de " "los tamaños de las cabeceras exteriores de ETH + IP + UDP + GENEVE. El " "tamaño predeterminado para este campo es 50, que es el tamaño de la cabecera " "de Geneve sin ninguna cabecera de opción adicional." msgid "Group (gid or name) running metadata proxy after its initialization" msgstr "" "Grupo (gid o nombre) que ejecuta el proxy de metadatos después de su " "inicialización" msgid "" "Group (gid or name) running metadata proxy after its initialization (if " "empty: agent effective group)." msgstr "" "Grupo (gid o nombre) que ejecuta el proxy de metadatos después de su " "inicialización (si está vacío: grupo efectivo del agente)." msgid "Group (gid or name) running this process after its initialization" msgstr "" "Grupo (gid o nombre) que ejecuta este proceso después de su inicialización" #, python-format msgid "HEAD file does not match migration timeline head, expected: %s" msgstr "" "El archivo HEAD no coincide con el head de la línea de tiempo de migración, " "se esperaba: %s" msgid "" "Hostname to be used by the Neutron server, agents and services running on " "this machine. All the agents and services running on this machine must use " "the same host value." msgstr "" "Nombre de host a utilizar por los agentes, los servicios y el servidor de " "Neutron que se ejecutan en esta máquina. Todos los agentes y servicios que " "se ejecutan en esta máquina deben utilizar el mismo valor de host." msgid "How many times Neutron will retry MAC generation" msgstr "Las veces que Neutron intentará de nuevo la generación MAC" #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" "min) is missing." msgstr "" "Se proporciona el código ICMP (port-range-max) %(value)s, pero falta el tipo " "ICMP (port-range-min)." msgid "ID of network" msgstr "ID de la red" msgid "ID of network to probe" msgstr "ID de red a analizar" msgid "ID of probe port to delete" msgstr "ID de puerto de analizador a suprimir" msgid "ID of probe port to execute command" msgstr "ID de puerto de analizador para ejecutar mandato" msgid "ID of the router" msgstr "ID del direccionador" #, python-format msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s" msgstr "La dirección IP %(ip)s ya está asignada en la subred %(subnet_id)s" #, python-format msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s" msgstr "La dirección IP %(ip)s no pertenece a la subred %(subnet_id)s" #, python-format msgid "" "IP address %(ip_address)s is not a valid IP for any of the subnets on the " "specified network." msgstr "" "La dirección IP %(ip_address)s no es una IP válida para las subredes en la " "red especificada." msgid "IP address used by Nova metadata server." msgstr "Dirección IP utilizada por el servidor de metadatos de Nova." msgid "IP allocation failed. Try again later." msgstr "Ha fallado la asignación de IP Inténtelo de nuevo más tarde." msgid "IP allocation requires subnet_id or ip_address" msgstr "La asignación de IP necesita subnet_id o ip_address" #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "IPTablesManager.apply no ha podido aplicar el siguiente conjunto de reglas " "de iptables:\n" "%s" msgid "IPtables conntrack zones exhausted, iptables rules cannot be applied." msgstr "" "Las zonas conntrack de IPtables se han agotado, no se pueden aplicar las " "reglas de iptables." msgid "IPv6 Address Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "La modalidad de dirección IPv6 debe ser SLAAC o sin estado para la " "delegación de prefijos." msgid "IPv6 RA Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "La modalidad de IPv6 RA debe ser SLAAC o sin estado para la delegación de " "prefijos." #, python-format msgid "" "IPv6 address %(address)s can not be directly assigned to a port on subnet " "%(id)s since the subnet is configured for automatic addresses" msgstr "" "La dirección IPv6 %(address)s no se puede asignar directamente a un puerto " "en la subred %(id)s, ya que la subred está configurada para direcciones " "automáticas" #, python-format msgid "" "IPv6 address %(ip)s cannot be directly assigned to a port on subnet " "%(subnet_id)s as the subnet is configured for automatic addresses" msgstr "" "La dirección IPv6 %(ip)s no se puede asignar directamente a un puerto en la " "subred %(subnet_id)s ya que la subred está configurada para direcciones " "automáticas" #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot be " "added to Neutron Router." msgstr "" "La subred IPv6 %s configurada para recibir RA de un direccionador externo no " "se puede añadir al direccionador de Neutron." msgid "" "If True, advertise network MTU values if core plugin calculates them. MTU is " "advertised to running instances via DHCP and RA MTU options." msgstr "" "Si es True, anunciar los valores de MTU de red si el plugin del núcleo los " "calcula. MTU se anuncia a las instancias en ejecución a través de las " "opciones para MTU de DHCP y RA." msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "" "Si es True, permite a los plug-in que la soportan crear redes VLAN " "transparentes." msgid "" "If non-empty, the l3 agent can only configure a router that has the matching " "router ID." msgstr "" "Si no está vacío, el agente l3 solo puede configurar un direccionador que " "tenga el ID de direccionador correspondiente." msgid "Illegal IP version number" msgstr "Número de versión IP no permitido" #, python-format msgid "" "Illegal prefix bounds: %(prefix_type)s=%(prefixlen)s, %(base_prefix_type)s=" "%(base_prefixlen)s." msgstr "" "Límites de prefijo no permitidos: %(prefix_type)s=%(prefixlen)s, " "%(base_prefix_type)s=%(base_prefixlen)s." #, python-format msgid "" "Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot " "associate with address scope %(address_scope_id)s because subnetpool " "ip_version is not %(ip_version)s." msgstr "" "Asociación de agrupación de subred no permitida: la agrupación de subred " "%(subnetpool_id)s no se puede asociar al ámbito de dirección " "%(address_scope_id)s porque la versión de IP (ip_version) de la misma no es " "%(ip_version)s." #, python-format msgid "" "Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot be " "associated with address scope %(address_scope_id)s." msgstr "" "Asociación de agrupación de subred no permitida: la agrupación de subred " "%(subnetpool_id)s no se puede asociar al ámbito de dirección " "%(address_scope_id)s." #, python-format msgid "Illegal subnetpool update : %(reason)s." msgstr "Actualización de agrupación de subred no permitida: %(reason)s." #, python-format msgid "Illegal update to prefixes: %(msg)s." msgstr "Actualización no permitida para prefijos: %(msg)s." msgid "" "In some cases the Neutron router is not present to provide the metadata IP " "but the DHCP server can be used to provide this info. Setting this value " "will force the DHCP server to append specific host routes to the DHCP " "request. If this option is set, then the metadata service will be activated " "for all the networks." msgstr "" "En algunos casos, el direccionador Neutron no está presente para " "proporcionar el IP de los metadatos, pero se puede utilizar el servidor " "DHCP para proporcionar esta información. Si se define este valor, se forzará " "al servidor DHCP a añadir rutas de host específicas a la solicitud DHCP. Si " "no se define esta opción, el servicio de metadatos estará activado para " "todas las redes." #, python-format msgid "Incorrect pci_vendor_info: \"%s\", should be pair vendor_id:product_id" msgstr "" "pci_vendor_info incorrecto: \"%s\", debe ser el par vendor_id:product_id" msgid "" "Indicates that this L3 agent should also handle routers that do not have an " "external network gateway configured. This option should be True only for a " "single agent in a Neutron deployment, and may be False for all agents if all " "routers must have an external network gateway." msgstr "" "Indica que este agente L3 también debería manejar direccionadores que no " "tengan una pasarela de red externa configurada. Esta opción solo puede ser " "True para un único agente en un despliegue Neutron, y puede ser False para " "todos los agentes si todos los direccionadores tienen que tener una pasarela " "de red externa." #, python-format msgid "Instance of class %(module)s.%(class)s must contain _cache attribute" msgstr "" "La intancia de clase %(module)s.%(class)s debe contener el atributo _cache" #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" msgstr "Espacio de prefijo insuficiente para asignar el tamaño de subred %s" msgid "Insufficient rights for removing default security group." msgstr "" "No hay derechos suficientes para eliminar el grupo de seguridad " "predeterminado." msgid "" "Integration bridge to use. Do not change this parameter unless you have a " "good reason to. This is the name of the OVS integration bridge. There is one " "per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM " "VIFs are attached to this bridge and then 'patched' according to their " "network connectivity." msgstr "" "Puente de integración a utilizar. No cambie este parámetro a menos que tenga " "un buen motivo para hacerlo. Es el nombre del puente de integración OVS. Hay " "uno por cada hipervisor. El puente de integración actúa como 'bahía de " "parche' virtual. Todas las VIF de la VM se conectan a ese puente y después " "se 'parchean' según su conectividad de red." msgid "Interface to monitor" msgstr "Interfaz a supervisar" msgid "" "Interval between checks of child process liveness (seconds), use 0 to disable" msgstr "" "Intervalo entre comprobaciones de vida de procesos secundarios (segundos), " "utilice 0 para inhabilitarlo" msgid "Interval between two metering measures" msgstr "Intervalo entre dos medidas de medición" msgid "Interval between two metering reports" msgstr "Intervalo entre dos informes de medición" #, python-format msgid "Invalid CIDR %(input)s given as IP prefix." msgstr "Se ha proporcionado un CIDR %(input)s no válido como prefijo de IP" #, python-format msgid "" "Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address " "format, which requires the prefix to be /64." msgstr "" "CIDR %s no válido para la modalidad de dirección IPv6. OpenStack utiliza el " "formato de dirección EUI-64, que requiere que el prefijo sea /64." #, python-format msgid "Invalid Device %(dev_name)s: %(reason)s" msgstr "Dispositivo no válido %(dev_name)s: %(reason)s" #, python-format msgid "" "Invalid action '%(action)s' for object type '%(object_type)s'. Valid " "actions: %(valid_actions)s" msgstr "" "Acción no válida '%(action)s' para el tipo de objeto '%(object_type)s'. " "Acciones válidas : %(valid_actions)s" #, python-format msgid "" "Invalid authentication type: %(auth_type)s, valid types are: " "%(valid_auth_types)s" msgstr "" "Tipo de autenticación no válida: %(auth_type)s, los tipos válidos son: " "%(valid_auth_types)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "Tipo de contenido no válido %(content_type)s." #, python-format msgid "Invalid data format for IP pool: '%s'" msgstr "Formato de datos no válido para agrupación de IP: '%s'" #, python-format msgid "Invalid data format for extra-dhcp-opt: %(data)s" msgstr "Formato de datos no válido para extra-dhcp-opt: %(data)s" #, python-format msgid "Invalid data format for fixed IP: '%s'" msgstr "Formato de datos no válido para IP fija: '%s'" #, python-format msgid "Invalid data format for hostroute: '%s'" msgstr "Formato de datos no válido para la ruta de host: '%s'" #, python-format msgid "Invalid data format for nameserver: '%s'" msgstr "Formato de datos no válido para servidor de nombres: '%s'" #, python-format msgid "Invalid ethertype %(ethertype)s for protocol %(protocol)s." msgstr "Ethertype no válido %(ethertype)s para el protocolo %(protocol)s." #, python-format msgid "Invalid extension environment: %(reason)s." msgstr "Entorno de ampliación no válido: %(reason)s" #, python-format msgid "Invalid format for routes: %(routes)s, %(reason)s" msgstr "Formato no válido: %(routes)s, %(reason)s" #, python-format msgid "Invalid format: %s" msgstr "Formato no válido: %s" #, python-format msgid "Invalid input for %(attr)s. Reason: %(reason)s." msgstr "Entrada no válida para %(attr)s. Motivo: %(reason)s." #, python-format msgid "" "Invalid input. '%(target_dict)s' must be a dictionary with keys: " "%(expected_keys)s" msgstr "" "Entrada no válida. '%(target_dict)s' debe ser un diccionario con claves: " "%(expected_keys)s" #, python-format msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s" msgstr "" "Estado de instancia no válido: %(state)s, los estados válidos son: " "%(valid_states)s" #, python-format msgid "Invalid mapping: '%s'" msgstr "Correlación no válida: '%s'" #, python-format msgid "Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'." msgstr "Rango de VLAN de red no válido: '%(vlan_range)s' - '%(error)s'." #, python-format msgid "Invalid network VXLAN port range: '%(vxlan_range)s'." msgstr "Rango de puerto VXLAN de red no válido: '%(vxlan_range)s'." #, python-format msgid "Invalid pci slot %(pci_slot)s" msgstr "Ranura pci no válida %(pci_slot)s" #, python-format msgid "Invalid provider format. Last part should be 'default' or empty: %s" msgstr "" "Formato de proveedor no válido. La última parte debe ser 'predeterminado' o " "vacío: %s" #, python-format msgid "Invalid resource type %(resource_type)s" msgstr "Tipo de recurso %(resource_type)s no válido" #, python-format msgid "Invalid route: %s" msgstr "Ruta no válida: %s" msgid "Invalid service provider format" msgstr "Formato de proveedor de servicio no válido" #, python-format msgid "Invalid service type %(service_type)s." msgstr "Tipo de servicio no válido: %(service_type)s." #, python-format msgid "" "Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255." msgstr "" "Valor no válido para ICMP %(field)s (%(attr)s) %(value)s. Debe ser 0 a 255." #, python-format msgid "Invalid value for port %(port)s" msgstr "Valor no válido para el puerto %(port)s" msgid "" "Iptables mangle mark used to mark ingress from external network. This mark " "will be masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Marca de corte de iptables utilizada para marcar la entrada desde la red " "externa. Esta marca se enmascara con 0xffff de modo que sólo se utilizarán " "los 16 bits inferiores." msgid "" "Iptables mangle mark used to mark metadata valid requests. This mark will be " "masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Marca de corte de iptables utilizada para marcar solicitudes válidas de " "metadatos. Esta marca se enmascara con 0xffff de modo que sólo se utilizarán " "los 16 bits inferiores." msgid "" "Keep in track in the database of current resourcequota usage. Plugins which " "do not leverage the neutron database should set this flag to False" msgstr "" "Realizar seguimiento en la base de datos del uso de cuota de recursos " "actual. Los plugins que no aprovechan la base de datos neutron deben " "establecer este distintivo en False" msgid "Keepalived didn't respawn" msgstr "Keepalived no se ha vuelto a generar" msgid "Keepalived didn't spawn" msgstr "Keepalived no se ha generado" #, python-format msgid "" "Kernel HZ value %(value)s is not valid. This value must be greater than 0." msgstr "El valor de Kernel HZ %(value)s no es válido. Debe ser mayor que 0." #, python-format msgid "Key %(key)s in mapping: '%(mapping)s' not unique" msgstr "La clave %(key)s de la correlación: '%(mapping)s' no es exclusiva" msgid "L3 agent failure to setup NAT for floating IPs" msgstr "El agente L3 no ha podido configurar NAT para las IP flotantes" msgid "L3 agent failure to setup floating IPs" msgstr "El agente L3 no ha podido configurar las IP flotantes" #, python-format msgid "Limit must be an integer 0 or greater and not '%d'" msgstr "El límite debe ser un entero mayor o igual a 0 y no '%d'" msgid "Limit number of leases to prevent a denial-of-service." msgstr "" "Limitar el número de arrendamientos para evitar denegación de servicio." msgid "List of :" msgstr "Lista de :" msgid "" "List of :: or " "specifying physical_network names usable for VLAN provider and tenant " "networks, as well as ranges of VLAN tags on each available for allocation to " "tenant networks." msgstr "" "Lista de :: o " "especificado los nombres de physical_network utilizables para redes de " "proveedor de VLAN y de arrendatario, así como rangos de etiquetas VLAN en " "cada una, disponibles para asignarlas en las redes de arrendatario." msgid "" "List of network type driver entrypoints to be loaded from the neutron.ml2." "type_drivers namespace." msgstr "" "Lista de puntos de entrada del controlador de tipo de red a cargar desde el " "espacio de nombres neutron.ml2.type_drivers." msgid "" "List of physical_network names with which flat networks can be created. Use " "default '*' to allow flat networks with arbitrary physical_network names. " "Use an empty list to disable flat networks." msgstr "" "Lista de nombres de physical_network con los cuales se pueden crear redes " "simples. Utillice el valor predeterminado '*' para permitir redes simples " "con nombres de physical_network arbitrarios. Utillice una lista vacía para " "deshabilitar las redes simples." msgid "Local IP address of the VXLAN endpoints." msgstr "Dirección IP local de puntos finales VXLAN." msgid "Location for Metadata Proxy UNIX domain socket." msgstr "Ubicación para socket de dominio UNIX de proxy de metadatos." msgid "Location of Metadata Proxy UNIX domain socket" msgstr "Ubicación de socket de dominio UNIX de proxy de metadatos" msgid "Location of pid file of this process." msgstr "Ubicación del archivo pid de este proceso." msgid "Location to store DHCP server config files." msgstr "" "Ubicación donde almacenar los archivos de configuración del servidor DHCP." msgid "Location to store IPv6 PD files." msgstr "Ubicación donde se almacenan los archivos PD de IPv6." msgid "Location to store IPv6 RA config files" msgstr "Ubicación para almacenar archivos de configuración de IPv6 RA" msgid "Location to store child pid files" msgstr "Ubicación para almacenar archivos PID hijos" msgid "Location to store keepalived/conntrackd config files" msgstr "" "Ubicación para almacenar los archivos de configuración keepalived/conntrackd" msgid "Log agent heartbeats" msgstr "Registrar señales de supervisión de agente" msgid "Loopback IP subnet is not supported if enable_dhcp is True." msgstr "" "No se da soporte para la subred de IP de bucle de retorno si enable_dhcp es " "True." msgid "MTU size of veth interfaces" msgstr "Tamaño de MTU de la interfaz de veth" msgid "Make the l2 agent run in DVR mode." msgstr "Hacer que el agente l2 se ejecute en modalidad DVR." msgid "Malformed request body" msgstr "Cuerpo de solicitud con formato incorrecto" #, python-format msgid "Malformed request body: %(reason)s." msgstr "Cuerpo de solicitud mal formado: %(reason)s." msgid "MaxRtrAdvInterval setting for radvd.conf" msgstr "Parámetro MaxRtrAdvInterval para radvd.conf" msgid "Maximum number of DNS nameservers per subnet" msgstr "Número máximo de servidores de nombres DNS por subred" msgid "" "Maximum number of L3 agents which a HA router will be scheduled on. If it is " "set to 0 then the router will be scheduled on every agent." msgstr "" "Número máximo de agentes L3 a los que se replanificará un direccionador HA. " "Si está definido a 0, se replanificará el direccionador a cada uno de los " "agentes." msgid "Maximum number of allowed address pairs" msgstr "Número máximo de pares de direcciones permitidos" msgid "" "Maximum number of fixed ips per port. This option is deprecated and will be " "removed in the N release." msgstr "" "El número máximo de IP fijas por puerto. Esta opción está en desuso y se " "eliminará en el release N." msgid "Maximum number of host routes per subnet" msgstr "Número máximo de rutas de host por subred" msgid "Maximum number of routes per router" msgstr "Número máximo de rutas por direccionador" msgid "" "Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce " "mode from metadata_proxy_user/group values, 'user': set metadata proxy " "socket mode to 0o644, to use when metadata_proxy_user is agent effective " "user or root, 'group': set metadata proxy socket mode to 0o664, to use when " "metadata_proxy_group is agent effective group or root, 'all': set metadata " "proxy socket mode to 0o666, to use otherwise." msgstr "" "Modalidad de socket de dominio UNIX de proxy de metadatos, 4 valores " "permitidos: 'deduce': deducir la modalidad de los valores " "metadata_proxy_user/group; 'user': establecer la modalidad de socket de " "proxy de metadatos en 0o644 para utilizar cuando metadata_proxy_user sea un " "usuario efectivo de agente o root; 'group': establecer la modalidad de " "socket de proxy de metadatos en 0o664 para utilizar cuando " "metadata_proxy_group sea un grupo efectivo de agente o root, 'all': " "establecer la modalidad de socket de proxy de metadatos en 0o666 para " "utilizar en los demás casos." msgid "Metering driver" msgstr "Controlador de medición" #, python-format msgid "Metering label %(label_id)s does not exist" msgstr "La etiqueta de medición %(label_id)s no existe" #, python-format msgid "Metering label rule %(rule_id)s does not exist" msgstr "La regla de etiqueta de medición %(rule_id)s no existe" #, python-format msgid "" "Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps " "another" msgstr "" "La regla de etiqueta de medición con remote_ip_prefix %(remote_ip_prefix)s " "se solapa con otra" msgid "Method cannot be called within a transaction." msgstr "No se puede llamar a este método dentro de una transacción." msgid "Migration from distributed router to centralized is not supported" msgstr "No se admite migrar de un direccionador distribuido a uno centralizado" msgid "MinRtrAdvInterval setting for radvd.conf" msgstr "Parámetro MinRtrAdvInterval para radvd.conf" msgid "Minimize polling by monitoring ovsdb for interface changes." msgstr "Minimizar sondeo supervisando ovsdb para cambios de interfaz." #, python-format msgid "Missing key in mapping: '%s'" msgstr "Falta una clave en la correlación: '%s'" #, python-format msgid "Missing value in mapping: '%s'" msgstr "Falta un valor en la correlación: '%s'" msgid "Multicast IP subnet is not supported if enable_dhcp is True." msgstr "" "No se da soporte para la subred de IP de multidifusión si enable_dhcp es " "True." msgid "" "Multicast group for VXLAN. When configured, will enable sending all " "broadcast traffic to this multicast group. When left unconfigured, will " "disable multicast VXLAN mode." msgstr "" "Grupo de multidifusión para VXLAN. Cuando esté configurada, permitirá enviar " "todo el tráfico de difusión a este grupo de multidifusión. Si se deja sin " "configurar, se deshabilitará el modo de multidifusión VXLAN." msgid "" "Multicast group(s) for vxlan interface. A range of group addresses may be " "specified by using CIDR notation. Specifying a range allows different VNIs " "to use different group addresses, reducing or eliminating spurious broadcast " "traffic to the tunnel endpoints. To reserve a unique group for each possible " "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on " "all the agents." msgstr "" "Grupo de multidifusión para la interfaz VXLAN. Se puede especificar un rango " "de direcciones de grupo utilizando la notación CIDR. Especificar un rango " "permite que diferentes VNI utilicen diferentes direcciones de grupos, " "reduciendo o eliminando tráfico de difusión espurio a los puntos finales del " "tunel. Para reservar un grupo exclusivo para cada posible VNI (24-bit) , " "utilice un /8, como por ejemplo 239.0.0.0/8. Este ajuste debe ser el mismo " "en todos los agentes." #, python-format msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found" msgstr "" "Se han encontrado varios agentes con agent_type=%(agent_type)s y host=" "%(host)s" #, python-format msgid "Multiple default providers for service %s" msgstr "Múltiples proveedores predeterminados para el servicio %s" #, python-format msgid "Multiple plugins for service %s were configured" msgstr "Se han configurado varios plugins para el servicio %s" #, python-format msgid "Multiple providers specified for service %s" msgstr "Múltiples proveedores especificados para el servicio %s" msgid "Multiple tenant_ids in bulk security group rule create not allowed" msgstr "" "No se permiten varios Id de arrendatario en creación de regla de grupo de " "seguridad masiva" msgid "Must also specify protocol if port range is given." msgstr "" "Se debe especificar también el protocolo si se indica el rango de puertos." msgid "Must specify one or more actions on flow addition or modification" msgstr "" "Debe especificar una o más acciones en la adición o modificación de flujo" #, python-format msgid "Name %(dns_name)s is duplicated in the external DNS service" msgstr "El nombre %(dns_name)s está duplicado en el servicio DNS externo" #, python-format msgid "" "Name '%s' must be 1-63 characters long, each of which can only be " "alphanumeric or a hyphen." msgstr "" "El nombre '%s' debe tener 1-63 caracteres de longitud, y sólo pueden ser " "alfanuméricos o guiones." #, python-format msgid "Name '%s' must not start or end with a hyphen." msgstr "El nombre '%s' no puede comenzar o terminar con un guión." msgid "Name of Open vSwitch bridge to use" msgstr "Nombre de puente de Open vSwitch a utilizar" msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "Nombre de región de nova a utilizar. Es útil si keystone gestiona más de una " "región." msgid "Name of the FWaaS Driver" msgstr "Nombre del controlador FWasS" msgid "Namespace of the router" msgstr "Espacio de nombres del direccionador" msgid "Native pagination depend on native sorting" msgstr "La paginación nativa depende de la ordenación nativa" #, python-format msgid "" "Need to apply migrations from %(project)s contract branch. This will require " "all Neutron server instances to be shutdown before proceeding with the " "upgrade." msgstr "" "Es necesario aplicar migraciones desde la bifurcación de contratación de " "%(project)s. Esto requerirá apagar todas las instancias de servidor de " "Neutron antes de proceder con la actualización." msgid "Negative delta (downgrade) not supported" msgstr "El delta negativo (degradación) no está soportado" msgid "Negative relative revision (downgrade) not supported" msgstr "La revisión relativa negativa (degradación) no está soportada" #, python-format msgid "" "Network %(network_id)s is already bound to BgpSpeaker %(bgp_speaker_id)s." msgstr "" "La red %(network_id)s ya está vinculada a un hablante de BGP" "%(bgp_speaker_id)s." #, python-format msgid "" "Network %(network_id)s is not associated with BGP speaker %(bgp_speaker_id)s." msgstr "" "La red %(network_id)s no está asociada al hablante de BGP %(bgp_speaker_id)s." #, python-format msgid "Network %(network_id)s is not bound to a BgpSpeaker." msgstr "La red %(network_id)s no está vinculada a un hablante de BGP." #, python-format msgid "Network %(network_id)s is not bound to a IPv%(ip_version)s BgpSpeaker." msgstr "" "La red %(network_id)s no está vinculada a un hablante de BGP de IPv" "%(ip_version)s." #, python-format msgid "Network %s does not contain any IPv4 subnet" msgstr "La red %s no contiene ninguna subred IPv4" #, python-format msgid "Network %s is not a valid external network" msgstr "La red %s no es una red externa válida" #, python-format msgid "Network %s is not an external network" msgstr "La red %s no es una red externa" #, python-format msgid "" "Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges " "%(excluded_ranges)s was not found." msgstr "" "No se ha encontrado la red de tamaño %(size)s, de rango de IP " "%(parent_range)s, excluyendo los rangos %(excluded_ranges)s." msgid "Network that will have instance metadata proxied." msgstr "Red en la que se ejecutará un proxy en los metadatos de instancia." #, python-format msgid "Network type value '%s' not supported" msgstr "No se admite el valor de tipo de red '%s'" msgid "Network type value needed by the ML2 plugin" msgstr "El plugin ML2 necesita el valor de tipo de red" msgid "Network types supported by the agent (gre and/or vxlan)." msgstr "Tipos de red admitidos por el agente (gre o vxlan)." msgid "" "Neutron IPAM (IP address management) driver to use. If ipam_driver is not " "set (default behavior), no IPAM driver is used. In order to use the " "reference implementation of Neutron IPAM driver, use 'internal'." msgstr "" "Controlador IPAM de Neutron (gestión de direcciones IP) a utilizar. Si " "ipam_driver no está definido (comportamiento predeterminado), no se utiliza " "ningún controlador IPAM. Para utilizar la implementación de referencia del " "controlador IPAM de Neutron, utilice 'internal'." msgid "Neutron Service Type Management" msgstr "Administración del tipo de servicio Neutron" msgid "Neutron core_plugin not configured!" msgstr "core_plugin de Neutron no está configurado." msgid "Neutron plugin provider module" msgstr "Módulo de proveedor de plugin de Neutron" msgid "Neutron quota driver class" msgstr "Clase de controlador de cuota Neutron" msgid "New value for first_ip or last_ip has to be specified." msgstr "Se debe especificar un nuevo valor para first_ip o last_ip." msgid "No default router:external network" msgstr "No hay ninguna red router:external predeterminada" #, python-format msgid "No default subnetpool found for IPv%s" msgstr "" "No se ha encontrado ninguna agrupación de subredes predeterminada para IPv%s" msgid "No default subnetpools defined" msgstr "No se han definido agrupaciones de subredes predeterminadas" #, python-format msgid "No eligible l3 agent associated with external network %s found" msgstr "" "No se ha encontrado ningún agente l3 elegible asociado con la red externa %s" #, python-format msgid "No more IP addresses available for subnet %(subnet_id)s." msgstr "No hay más direcciones IP disponibles en la subred %(subnet_id)s." #, python-format msgid "" "No more Virtual Router Identifier (VRID) available when creating router " "%(router_id)s. The limit of number of HA Routers per tenant is 254." msgstr "" "No hay ningún identificador de direccionador virtual (VRID) al crear el " "direccionador %(router_id)s. El límite del número de direccionadores HA por " "arrendatario es 254." msgid "No offline migrations pending." msgstr "No hay migraciones fuera de línea pendientes." #, python-format msgid "No providers specified for '%s' service, exiting" msgstr "No hay proveedores especificados para el servicio '%s', saliendo" #, python-format msgid "No shared key in %s fields" msgstr "No hay ninguna clave compartida en los campos de %s" msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "" "No está permitido asignar manualmente un direccionador a un agente en modo " "'dvr'." msgid "Not allowed to manually remove a router from an agent in 'dvr' mode." msgstr "" "No está permitido eliminar manualmente un direccionador de un agente en modo " "'dvr'." #, python-format msgid "" "Not enough l3 agents available to ensure HA. Minimum required " "%(min_agents)s, available %(num_agents)s." msgstr "" "No hay suficientes agentes 13 disponibles para garantizar HA. El mínimo " "necesario es %(min_agents)s, disponibles %(num_agents)s." msgid "" "Number of DHCP agents scheduled to host a tenant network. If this number is " "greater than 1, the scheduler automatically assigns multiple DHCP agents for " "a given tenant network, providing high availability for DHCP service." msgstr "" "Número de agentes DHCP programados para alojar una red de arrendatarios. Si " "este número es mayor que 1, el planificador asigna automáticamente varios " "agentes DHCP de una red de arrendatarios dada y proporciona alta " "disponibilidad para el servicio DHCP." msgid "Number of RPC worker processes dedicated to state reports queue" msgstr "" "Número de procesos de trabajador RPC dedicados a la cola de informes de " "estado." msgid "Number of RPC worker processes for service" msgstr "Número de procesos de trabajador RPC por servicio" msgid "Number of backlog requests to configure the metadata server socket with" msgstr "" "Número de solicitudes de backlog con el que configurar el socket de servidor " "de metadatos" msgid "Number of backlog requests to configure the socket with" msgstr "" "Número de solicitudes de registro de reserva con las que configurar el socket" msgid "" "Number of bits in an ipv4 PTR zone that will be considered network prefix. " "It has to align to byte boundary. Minimum value is 8. Maximum value is 24. " "As a consequence, range of values is 8, 16 and 24" msgstr "" "Número de bits en una zona PTR ipv4 que se considerarán prefijo de red. " "Tiene que estar alineado con el límite de bytes. El valor mínimo es 8. El " "valor máximo es 24. En consecuencia, el rango de valores es 8, 16 y 24." msgid "" "Number of bits in an ipv6 PTR zone that will be considered network prefix. " "It has to align to nyble boundary. Minimum value is 4. Maximum value is 124. " "As a consequence, range of values is 4, 8, 12, 16,..., 124" msgstr "" "Número de bits en una zona PTR ipv6 que se considerarán prefijo de red. " "Tiene que estar alineado con el límite de nyble. El valor mínimo es 4. El " "valor máximo es 124. En consecuencia, el rango de valores es 4, 8, 12, " "16,..., 124." msgid "" "Number of floating IPs allowed per tenant. A negative value means unlimited." msgstr "" "Número de IP flotantes permitidas por arrendatario. Un valor negativo " "significa ilimitado." msgid "" "Number of networks allowed per tenant. A negative value means unlimited." msgstr "" "Número de redes permitidas por arrendatario. Un valor negativo significa " "ilimitado." msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "" "Número de puertos permitidos por arrendatario. Un valor negativo significa " "ilimitado." msgid "Number of routers allowed per tenant. A negative value means unlimited." msgstr "" "Número de direccionadores permitidos por arrendatario. Un valor negativo " "significa ilimitado." msgid "" "Number of seconds between sending events to nova if there are any events to " "send." msgstr "" "Número de segundos entre en el envío de sucesos a nova si hay sucesos a " "enviar." msgid "Number of seconds to keep retrying to listen" msgstr "Número de segundos a continuar reintentando escuchar" msgid "" "Number of security groups allowed per tenant. A negative value means " "unlimited." msgstr "" "Número de grupos de seguridad permitidos por arrendatario. Un valor negativo " "significa ilimitados." msgid "" "Number of security rules allowed per tenant. A negative value means " "unlimited." msgstr "" "Número de reglas de seguridad permitidas por arrendatario. Un valor negativo " "significa ilimitados." msgid "" "Number of separate API worker processes for service. If not specified, the " "default is equal to the number of CPUs available for best performance." msgstr "" "Número de procesos de trabajador de API independientes por servicio. Si no " "se especifica, el valor predeterminado es igual al número de CPU disponible " "para un mejor rendimiento." msgid "" "Number of separate worker processes for metadata server (defaults to half of " "the number of CPUs)" msgstr "" "Número de procesos de trabajador independientes por servidor de metadatos " "(por omisión es la mitad del número de unidades de CPU)" msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "" "Número de subredes permitidas por arrendatario. Un valor negativo significa " "ilimitado." msgid "" "Number of threads to use during sync process. Should not exceed connection " "pool size configured on server." msgstr "" "Número de hebras a usar durante el proceso de sincronización. No debería " "exceder el tamaño de la agrupación de conexión configurado en el servidor." msgid "OK" msgstr "Aceptar" msgid "" "OVS datapath to use. 'system' is the default value and corresponds to the " "kernel datapath. To enable the userspace datapath set this value to 'netdev'." msgstr "" "Base de datos OVS a utilizar. 'system' es el valor predeterminado y " "corresponde a la vía de acceso a datos del kernel. Para habilitar la vía de " "acceso a datos del espacio de usuario, defina este valor a 'netdev'." msgid "OVS vhost-user socket directory." msgstr "Directorio del socket de vhost-user de OVS" #, python-format msgid "OVSDB Error: %s" msgstr "Error de OVSDB: %s" #, python-format msgid "Object action %(action)s failed because: %(reason)s." msgstr "La acción objeto %(action)s falló debido a: %(reason)s." msgid "Only admin can view or configure quota" msgstr "Solo los administradores pueden ver o configurar cuotas" msgid "Only admin is authorized to access quotas for another tenant" msgstr "" "Sólo está autorizado el administrador para acceder a cuotas para otro " "arrendatario" msgid "Only admins can manipulate policies on networks they do not own." msgstr "" "Sólo los administradores pueden manipular las políticas en las redes de las " "que no son propietarios" msgid "Only admins can manipulate policies on objects they do not own" msgstr "" "Sólo los admininstradores pueden manipular políticas en objetos de los que " "no son propietarios" msgid "Only allowed to update rules for one security profile at a time" msgstr "Solo se permite actualizar reglas para un perfil de seguridad a la vez" msgid "Only remote_ip_prefix or remote_group_id may be provided." msgstr "Solo se puede proporcionar remote_ip_prefix o remote_group_id." msgid "OpenFlow interface to use." msgstr "Interfaz OpenFlow que se va a utilizar." #, python-format msgid "" "Operation %(op)s is not supported for device_owner %(device_owner)s on port " "%(port_id)s." msgstr "" "No hay soporte para la operación %(op)s para device_owner %(device_owner)s " "en el puerto %(port_id)s." #, python-format msgid "Operation not supported on device %(dev_name)s" msgstr "Operación no admitida en el dispositivo %(dev_name)s" msgid "" "Ordered list of network_types to allocate as tenant networks. The default " "value 'local' is useful for single-box testing but provides no connectivity " "between hosts." msgstr "" "Lista ordenada de network_types para asignar como redes de arrendatarios. El " "valor predeterminado 'local' es útil para pruebas en un solo recuadro, pero " "no proporciona ninguna conectividad entre hosts." msgid "Override the default dnsmasq settings with this file." msgstr "Sobrescribir los valores dnsmasq predeterminados con este archivo." msgid "Owner type of the device: network/compute" msgstr "Tipo de propietario del dispositivo: red/cálculo" msgid "POST requests are not supported on this resource." msgstr "Las solicitudes de POST no son admitidas en este recurso." #, python-format msgid "Package %s not installed" msgstr "El paquete %s no se ha instalado" #, python-format msgid "Parameter %(param)s must be of %(param_type)s type." msgstr "El parámetro %(param)s debe ser de tipo %(param_type)s." #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "El análisis de bridge_mappings ha fallado: %s." msgid "Parsing supported pci_vendor_devs failed" msgstr "El análisis de los pci_vendor_devs soportados ha fallado" msgid "Password for connecting to designate in admin context" msgstr "" "Contraseña para establecer conexión con el designado en el contexto de " "administración" #, python-format msgid "Password not specified for authentication type=%(auth_type)s." msgstr "" "No se ha especificado la contraseña para el tipo de autenticación=" "%(auth_type)s." msgid "Path to PID file for this process" msgstr "Vía de acceso al archivo PID para este proceso" msgid "Path to the router directory" msgstr "Vía de acceso al directorio del direccionador" msgid "Peer patch port in integration bridge for tunnel bridge." msgstr "" "Puerto de parche de igual en puente de integración para puente de túnel." msgid "Peer patch port in tunnel bridge for integration bridge." msgstr "" "Puerto de parche de igual en puente de túnel para puente de integración." msgid "Per-tenant subnet pool prefix quota exceeded." msgstr "" "Se ha superado la cuota de prefijo de agrupación de subred por arrendatario." msgid "Phase upgrade options do not accept revision specification" msgstr "" "Las opciones de actualización de fase no aceptan la especificación de " "revisión" msgid "Ping timeout" msgstr "Tiempo de espera de ping" #, python-format msgid "Plugin '%s' not found." msgstr "No se ha encontrado el plugin '%s'." msgid "Plugin does not support updating provider attributes" msgstr "El plug-in no soporta la actualización de atributos de proveedor" msgid "Policy configuration policy.json could not be found." msgstr "No se ha podido encontrar la configuración de política policy.json." #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "El puerto %(id)s no tiene una IP fija %(address)s" #, python-format msgid "Port %(port)s does not exist on %(bridge)s!" msgstr "El puerto %(port)s no existe en %(bridge)s" #, python-format msgid "Port %(port_id)s is already acquired by another DHCP agent" msgstr "El puerto %(port_id)s ya ha sido adquirido por otro agente DHCP" #, python-format msgid "" "Port %(port_id)s is associated with a different tenant than Floating IP " "%(floatingip_id)s and therefore cannot be bound." msgstr "" "El puerto %(port_id)s está asociado con un arrendatario diferente a la IP " "flotante %(floatingip_id)s y, por lo tanto, no se puede enlazar." #, python-format msgid "Port %(port_id)s is not managed by this agent. " msgstr "El puerto %(port_id)s no está gestionado por este agente." #, python-format msgid "Port %s does not exist" msgstr "El puerto %s no existe" #, python-format msgid "" "Port %s has multiple fixed IPv4 addresses. Must provide a specific IPv4 " "address when assigning a floating IP" msgstr "" "El puerto %s tiene varias direcciones IPv4 fijas. Debe proporcionar una " "dirección IPv4 específica al asignar una IP flotante" msgid "" "Port Security must be enabled in order to have allowed address pairs on a " "port." msgstr "" "La seguridad de puerto debe estar habilitado para tener pares de direcciones " "permitidos en un puerto." msgid "" "Port has security group associated. Cannot disable port security or ip " "address until security group is removed" msgstr "" "El puerto tiene asociado un grupo de seguridad. No se puede inhabilitar la " "seguridad de puerto o la dirección IP hasta que se elimine el grupo de " "seguridad." msgid "" "Port security must be enabled and port must have an IP address in order to " "use security groups." msgstr "" "Se debe habilitar la seguridad de puerto y el puerto debe tener una " "dirección IP para utilizar grupos de seguridad." msgid "" "Port to listen on for OpenFlow connections. Used only for 'native' driver." msgstr "" "Puerto en el que se escuchan las conexiones OpenFlow. Se utiliza sólo para " "el controlador 'native'." #, python-format msgid "Prefix '%(prefix)s' not supported in IPv%(version)s pool." msgstr "" "El prefijo '%(prefix)s' no está soportado en la agrupación IPv%(version)s." msgid "Prefix Delegation can only be used with IPv6 subnets." msgstr "La delegación de prefijos sólo se puede utilizar con subredes IPv6." msgid "Private key of client certificate." msgstr "Clave privada del certificado de cliente." #, python-format msgid "Probe %s deleted" msgstr "Se ha suprimido el analizador %s" #, python-format msgid "Probe created : %s " msgstr "Se ha creado el analizador: %s " msgid "Process is already started" msgstr "El proceso ya se ha iniciado" msgid "Process is not running." msgstr "El proceso no se está ejecutando." msgid "Protocol to access nova metadata, http or https" msgstr "El protocolo para acceder a los metadatos de Nova: http o https" #, python-format msgid "Provider name %(name)s is limited by %(len)s characters" msgstr "El nombre de proveedor %(name)s está limitado a %(len)s caracteres" #, python-format msgid "QoS Policy %(policy_id)s is used by %(object_type)s %(object_id)s." msgstr "" "La política de calidad de servicio %(policy_id)s la está utilizando " "%(object_type)s %(object_id)s." #, python-format msgid "" "QoS binding for network %(net_id)s and policy %(policy_id)s could not be " "found." msgstr "" "No se ha podido encontrar el enlace QoS para la red %(net_id)s y la política " "%(policy_id)s." #, python-format msgid "" "QoS binding for port %(port_id)s and policy %(policy_id)s could not be found." msgstr "" "No se ha podido encontrar el enlace QoS para el puerto %(port_id)s y la " "política %(policy_id)s." #, python-format msgid "QoS policy %(policy_id)s could not be found." msgstr "No se ha podido encontrar la política QoS %(policy_id)s." #, python-format msgid "QoS rule %(rule_id)s for policy %(policy_id)s could not be found." msgstr "" "No se ha podido encontrar la regla QoS %(rule_id)s para la política " "%(policy_id)s." #, python-format msgid "RBAC policy of type %(object_type)s with ID %(id)s not found" msgstr "" "No se ha encontrado la política RBAC de tipo %(object_type)s con el ID %(id)s" #, python-format msgid "" "RBAC policy on object %(object_id)s cannot be removed because other objects " "depend on it.\n" "Details: %(details)s" msgstr "" "No se puede eliminar la política RBAC en el objeto %(object_id)s porque " "otros objetos dependen de ella.\n" "Detalles: %(details)s" msgid "" "Range of seconds to randomly delay when starting the periodic task scheduler " "to reduce stampeding. (Disable by setting to 0)" msgstr "" "Rango de segundos a retrasar aleatoriamente al iniciar el programador de " "tareas periódicas para reducir avalanchas. (para inhabilitarlo, establecerlo " "en 0)" msgid "Ranges must be in the same IP version" msgstr "Los rangos deben ser de la misma versión de IP." msgid "Ranges must be netaddr.IPRange" msgstr "Los rangos deben ser netaddr.IPRange" msgid "Ranges must not overlap" msgstr "Los rangos no se pueden solapar." #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.EUI type." msgstr "" "Se ha recibido el tipo '%(type)s' y el valor '%(value)s'. Se esperaba el " "tipo netaddr.EUI." #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.IPAddress " "type." msgstr "" "Se ha recibido el tipo '%(type)s' y el valor '%(value)s'. Se esperaba el " "tipo netaddr.IPAddress." #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.IPNetwork " "type." msgstr "" "Se ha recibido el tipo '%(type)s' y el valor '%(value)s'. Se esperaba el " "tipo netaddr.IPNetwork." #, python-format msgid "" "Release aware branch labels (%s) are deprecated. Please switch to expand@ " "and contract@ labels." msgstr "" "Las etiquetas de rama para el release (%s) están en desuso. Cambie a las " "etiquetas expand@ y contract@." msgid "Remote metadata server experienced an internal server error." msgstr "" "El servidor de metadatos remoto ha experimentado un error de servidor " "interno. " msgid "" "Repository does not contain HEAD files for contract and expand branches." msgstr "" "El repositorio no contiene archivos HEAD para bifurcaciones de contratación " "y ampliación." msgid "" "Representing the resource type whose load is being reported by the agent. " "This can be \"networks\", \"subnets\" or \"ports\". When specified (Default " "is networks), the server will extract particular load sent as part of its " "agent configuration object from the agent report state, which is the number " "of resources being consumed, at every report_interval.dhcp_load_type can be " "used in combination with network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is " "WeightScheduler, dhcp_load_type can be configured to represent the choice " "for the resource being balanced. Example: dhcp_load_type=networks" msgstr "" "Representando el tipo de recurso cuya carga está notificando el agente. " "Puede ser \"networks\", \"subnets\" o \"ports\". Cuando se especifica (el " "valor predeterminado es \"networks\"), el servidor extraerá la carga " "particular enviada como parte del objeto de configuración de agentes desde " "el estado de informe del agente, que es el número de recursos que se está " "consumiendo, en cada report_interval.dhcp_load_type que puede utilizarse " "junto con network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler." "WeightScheduler. Cuando network_scheduler_driver es WeightScheduler, se " "puede configurar dhcp_load_type para representar la opción para el recurso " "que se está equilibrando. Ejemplo: dhcp_load_type=networks" msgid "Request Failed: internal server error while processing your request." msgstr "" "Ha fallado la solicitud: error interno de servidor al procesar la solicitud." #, python-format msgid "" "Request contains duplicate address pair: mac_address %(mac_address)s " "ip_address %(ip_address)s." msgstr "" "La solicitud contiene un par de direcciones duplicado: mac_address " "%(mac_address)s ip_address %(ip_address)s." #, python-format msgid "" "Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps " "with another subnet" msgstr "" "La subred solicitada con cidr: %(cidr)s para la red: %(network_id)s se " "solapa con otra subred" msgid "" "Reset flow table on start. Setting this to True will cause brief traffic " "interruption." msgstr "" "Restablecer tabla de flujo al iniciar. Si se establece en True, se producirá " "una interrupción breve del tráfico ." #, python-format msgid "Resource %(resource)s %(resource_id)s could not be found." msgstr "No se ha podido encontrar el recurso %(resource)s %(resource_id)s." #, python-format msgid "Resource %(resource_id)s of type %(resource_type)s not found" msgstr "" "No se ha encontrado el recurso %(resource_id)s de tipo %(resource_type)s" #, python-format msgid "" "Resource '%(resource_id)s' is already associated with provider " "'%(provider)s' for service type '%(service_type)s'" msgstr "" "El recurso '%(resource_id)s' ya está asociado con el proveedor " "'%(provider)s' para el tipo de servicio '%(service_type)s'" msgid "Resource body required" msgstr "Se necesita un cuerpo para el recurso" msgid "" "Resource name(s) that are supported in quota features. This option is now " "deprecated for removal." msgstr "" "Nombres de recursos soportados en las características de cuota. Esta opción " "está en desuso para su eliminación." msgid "Resource not found." msgstr "Recurso no encontrado." msgid "Resources required" msgstr "Recursos necesarios " msgid "" "Root helper application. Use 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' to use the real root filter facility. Change to 'sudo' to skip the " "filtering and just run the command directly." msgstr "" "Aplicación de ayudante raíz . Utilice 'sudo neutron-rootwrap /etc/neutron/" "rootwrap.conf ' para utilizar el recurso de filtro raíz real. Cambie a " "'sudo' para saltar el filtrado y ejecutar directamente el mandato." msgid "Root helper daemon application to use when possible." msgstr "Aplicación daemon de ayudante raíz a utilizar cuando sea posible." msgid "Root permissions are required to drop privileges." msgstr "Se necesitan permisos de root para descartar privilegios." #, python-format msgid "Route %(cidr)s not advertised for BGP Speaker %(speaker_as)d." msgstr "" "No se ha anunciado la ruta %(cidr)s para el hablante de BGP %(speaker_as)d." #, python-format msgid "Router %(router_id)s %(reason)s" msgstr "Direccionador %(router_id)s %(reason)s" #, python-format msgid "Router %(router_id)s could not be found" msgstr "No se ha podido encontrar el direccionador %(router_id)s." #, python-format msgid "Router %(router_id)s does not have an interface with id %(port_id)s" msgstr "" "El direccionador %(router_id)s no tiene una interfaz con el id %(port_id)s" #, python-format msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s" msgstr "" "El direccionador %(router_id)s no tiene ninguna interfaz en la subred " "%(subnet_id)s" #, python-format msgid "Router '%(router_id)s' cannot be both DVR and HA." msgstr "El direccionador '%(router_id)s' no puede ser DVR y HA." #, python-format msgid "Router '%(router_id)s' is not compatible with this agent." msgstr "El direccionador '%(router_id)s' no es compatible con este agente." #, python-format msgid "Router already has a port on subnet %s" msgstr "El direccionador ya tiene un puerto en la subred %s" #, python-format msgid "" "Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be " "deleted, as it is required by one or more floating IPs." msgstr "" "La interfaz de direccionador para la subred %(subnet_id)s en el " "direccionador %(router_id)s no se puede suprimir, porque la necesitan una o " "más IP flotantes." #, python-format msgid "" "Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be " "deleted, as it is required by one or more routes." msgstr "" "La interfaz de direccionador para la subred %(subnet_id)s en el " "direccionador %(router_id)s no se puede suprimir, porque la necesitan una o " "más rutas." msgid "Router port must have at least one fixed IP" msgstr "El puerto del direccionador debe tener al menos una IP fija" msgid "Router that will have connected instances' metadata proxied." msgstr "" "Direccionador en el que se ejecutará un proxy en los metadatos de las " "instancias conectadas." #, python-format msgid "" "Row doesn't exist in the DB. Request info: Table=%(table)s. Columns=" "%(columns)s. Records=%(records)s." msgstr "" "La fila no existe en la base de datos. Información de la solicitud: Tabla=" "%(table)s. Columnas=%(columns)s. Registros=%(records)s." msgid "Run as daemon." msgstr "Ejecutar como daemon." #, python-format msgid "Running %(cmd)s (%(desc)s) for %(project)s ..." msgstr "Ejecutando %(cmd)s (%(desc)s) para %(project)s ..." #, python-format msgid "Running %(cmd)s for %(project)s ..." msgstr "Ejecutando %(cmd)s para %(project)s ..." msgid "Running without keystone AuthN requires that tenant_id is specified" msgstr "Ejecutar sin keystone AuthN requiere que tenant_id esté especificado" msgid "" "Seconds between nodes reporting state to server; should be less than " "agent_down_time, best if it is half or less than agent_down_time." msgstr "" "Segundos entre nodos que informan del estado al servidor; debe ser menor que " "agent_down_time, mejor si es la mitad o menos que agent_down_time." msgid "Seconds between running periodic tasks" msgstr "Segundos entre tareas periódicas en ejecución" msgid "" "Seconds to regard the agent is down; should be at least twice " "report_interval, to be sure the agent is down for good." msgstr "" "Segundos para considerar que el agente está inactivo; debe ser como mínimo " "el doble de report_interval, para asegurarse de que el agente está inactivo " "definitivamente." #, python-format msgid "Security Group %(id)s %(reason)s." msgstr "Grupo de seguridad %(id)s %(reason)s." #, python-format msgid "Security Group Rule %(id)s %(reason)s." msgstr "Regla de grupo de seguridad %(id)s %(reason)s." #, python-format msgid "Security group %(id)s does not exist" msgstr "El grupo de seguridad %(id)s no existe" #, python-format msgid "Security group rule %(id)s does not exist" msgstr "La regla de grupo de seguridad %(id)s no existe" #, python-format msgid "Security group rule already exists. Rule id is %(rule_id)s." msgstr "" "La regla de grupo de seguridad ya existe. El id de regla es %(rule_id)s." #, python-format msgid "" "Security group rule for ethertype '%(ethertype)s' not supported. Allowed " "values are %(values)s." msgstr "" "No se admite la regla de grupo de seguridad para ethertype '%(ethertype)s'. " "Los valores permitidos son %(values)s." #, python-format msgid "" "Security group rule protocol %(protocol)s not supported. Only protocol " "values %(values)s and integer representations [0 to 255] are supported." msgstr "" "El protocolo de la regla del grupo de seguridad %(protocol)s no se admite. " "Solo se admiten valores de protocolo %(values)s y representaciones de " "enteros [de 0 a 255]." msgid "Segments and provider values cannot both be set." msgstr "" "Los valores segmentos y proveedor no pueden estar establecidos a la vez." msgid "Selects the Agent Type reported" msgstr "Selecciona el tipo de agente notificado" msgid "" "Send notification to nova when port data (fixed_ips/floatingip) changes so " "nova can update its cache." msgstr "" "Enviar notificación a nova cuando cambien los datos de puerto (fixed_ips/" "floatingip) para que nova pueda actualizar la memoria caché." msgid "Send notification to nova when port status changes" msgstr "Enviar notificación a nova cuando cambie el estado del puerto" msgid "" "Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the " "feature is disabled" msgstr "" "Envíe todos estos ARP innecesarios para la configuración de HA, si el número " "es inferior o igual a 0, la característica se inhabilita" #, python-format msgid "Service Profile %(sp_id)s could not be found." msgstr "No se ha podido encontrar el perfil de servicio %(sp_id)s." #, python-format msgid "Service Profile %(sp_id)s is already associated with flavor %(fl_id)s." msgstr "El perfil de servicio %(sp_id)s ya está asociado al tipo %(fl_id)s." #, python-format msgid "Service Profile %(sp_id)s is not associated with flavor %(fl_id)s." msgstr "El perfil de servicio %(sp_id)s no está asociado al tipo %(fl_id)s." #, python-format msgid "Service Profile %(sp_id)s is used by some service instance." msgstr "" "El perfil de servicio %(sp_id)s lo utiliza alguna instancia de servicio." #, python-format msgid "Service Profile driver %(driver)s could not be found." msgstr "" "No se ha podido encontrar el controlador de perfil de servicio %(driver)s." msgid "Service Profile is not enabled." msgstr "El perfil de servicio no está habilitado." msgid "Service Profile needs either a driver or metainfo." msgstr "El perfil de servicio necesita un controlador o bien metainformación." #, python-format msgid "" "Service provider '%(provider)s' could not be found for service type " "%(service_type)s" msgstr "" "El proveedor de servicio '%(provider)s' no se ha podido encontrar para el " "tipo de servicio %(service_type)s" msgid "Service to handle DHCPv6 Prefix delegation." msgstr "Servicio que gestiona la delegación de prefijos DHCPv6." #, python-format msgid "Service type %(service_type)s does not have a default service provider" msgstr "" "El tipo de servicio %(service_type)s no tiene un proveedor de servicio " "predeterminado" msgid "" "Set new timeout in seconds for new rpc calls after agent receives SIGTERM. " "If value is set to 0, rpc timeout won't be changed" msgstr "" "Establecer el nuevo tiempo de espera en segundos para nuevas llamadas rpc " "después de que el agente reciba SIGTERM. Si el valor se establece en 0, no " "se modificará el tiempo de espera de rpc" msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "Establecer o anular el establecimiento del bit DF (don't fragment) en el " "paquete de IP saliente que lleva el túnel GRE/VXLAN." msgid "" "Set or un-set the tunnel header checksum on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "Establecer o anular el establecimiento de la suma de comprobación de " "cabecera de túnel en paquete de IP saliente que transporta el túnel GRE/" "VXLAN." msgid "Shared address scope can't be unshared" msgstr "No se puede dejar de compartir el ámbito de la dirección compartida" msgid "" "Specifying 'tenant_id' other than authenticated tenant in request requires " "admin privileges" msgstr "" "Para especificar un 'tenant_id' distinto del arrendatario autenticado en la " "solicitud se requieren privilegios administrativos" msgid "String prefix used to match IPset names." msgstr "Prefijo de serie utilizado para coincidir con nombres IPset." #, python-format msgid "Sub-project %s not installed." msgstr "El subproyecto %s no se ha instalado." msgid "Subnet for router interface must have a gateway IP" msgstr "" "La subred para la interfaz de direccionador debe tener una IP de pasarela" msgid "" "Subnet has a prefix length that is incompatible with DHCP service enabled." msgstr "" "La subred tiene una longitud de prefijo que no es compatible con el servicio " "DHCP habilitado." #, python-format msgid "Subnet pool %(subnetpool_id)s could not be found." msgstr "No se ha podido encontrar la agrupación de subred %(subnetpool_id)s." msgid "Subnet pool has existing allocations" msgstr "La agrupación de subred tiene asignaciones existentes" msgid "Subnet used for the l3 HA admin network." msgstr "Subred utilizada con la red de administradores HA l3." msgid "" "Subnets hosted on the same network must be allocated from the same subnet " "pool." msgstr "" "Las subredes alojadas en la misma red se deben asignar desde la misma " "agrupación de subredes." msgid "Suffix to append to all namespace names." msgstr "Sufijo a agregar a todos los nombres del espacio de nombres." msgid "" "System-wide flag to determine the type of router that tenants can create. " "Only admin can override." msgstr "" "Distintivo válido para todo el sistema para determinar el tipo de " "direccionador que pueden crear los arrendatarios. Sólo el administrador " "puede sobreescribirlo." msgid "TCP Port to listen for metadata server requests." msgstr "Puerto TCP para escuchar solicitudes de servidor de metadatos." msgid "TCP Port used by Neutron metadata namespace proxy." msgstr "" "Puerto TCP usado por el proxy de espacio de nombres de metadatos Neutron." msgid "TCP Port used by Nova metadata server." msgstr "Puerto TCP utilizado por el servidor de metadatos de Nova." #, python-format msgid "TLD '%s' must not be all numeric" msgstr "El TLD '%s' no puede ser todo numérico" msgid "TOS for vxlan interface protocol packets." msgstr "TOS para paquetes de protocolo de interfaz vxlan." msgid "TTL for vxlan interface protocol packets." msgstr "TTL para paquetes de protocolo de interfaz vxlan." #, python-format msgid "Table %s can only be queried by UUID" msgstr "La tabla %s sólo la puede consultar UUID" #, python-format msgid "Tag %(tag)s could not be found." msgstr "No se ha podido encontrar la etiqueta %(tag)s." #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "" "El arrendatario %(tenant_id)s no está autorizado a crear %(resource)s en " "esta red" msgid "Tenant id for connecting to designate in admin context" msgstr "" "ID de inquilino para establecer conexión con el designado en el contexto de " "administración" msgid "Tenant name for connecting to designate in admin context" msgstr "" "Nombre de inquilino para establecer conexión con el designado en el contexto " "de administración" msgid "Tenant network creation is not enabled." msgstr "La creación de red de arrendatario no está habilitada." msgid "Tenant-id was missing from quota request." msgstr "Faltaba el ID de arrendatario en la solicitud de cuota." msgid "" "The 'gateway_external_network_id' option must be configured for this agent " "as Neutron has more than one external network." msgstr "" "Se debe configurar la opción 'gateway_external_network_id' para este agente " "ya que Neutron tiene más de una red externa." msgid "" "The DHCP agent will resync its state with Neutron to recover from any " "transient notification or RPC errors. The interval is number of seconds " "between attempts." msgstr "" "El agente DHCP resincronizará su estado con Neutron para recuperarse de " "cualquier posible notificación transitoria o errorres de RPC. El intervalo " "es el número de segundos entre intentos." msgid "" "The DHCP server can assist with providing metadata support on isolated " "networks. Setting this value to True will cause the DHCP server to append " "specific host routes to the DHCP request. The metadata service will only be " "activated when the subnet does not contain any router port. The guest " "instance must be configured to request host routes via DHCP (Option 121). " "This option doesn't have any effect when force_metadata is set to True." msgstr "" "El servidor DHCP puede ajudar a proporcionar soporte para metadatos en " "redes aisladas. Si se define este valor a True, provocará que el servidor " "DHCP añada rutas específicas de host a la solicitud DHCP. El servicio de " "metadatos sólo se activará cuando la subred no contenga ningún puerto de " "direccionador. La instancia de invitado debe estar configurada para " "solicitar rutas de host vía DHCP (Opción 121). Esta opción no tiene ningún " "efecto cuando force_metadata está definido en True." #, python-format msgid "" "The HA Network CIDR specified in the configuration file isn't valid; " "%(cidr)s." msgstr "" "El CIDR de red HA especificado en el archivo de configuración no es válido; " "%(cidr)s." msgid "The UDP port to use for VXLAN tunnels." msgstr "El puerto UDP para a usar para los túneles VXLAN." #, python-format msgid "" "The address allocation request could not be satisfied because: %(reason)s" msgstr "" "No se ha podido satisfacer la solicitud de asignación de dirección porque: " "%(reason)s" msgid "The advertisement interval in seconds" msgstr "Intervalo de anuncio en segundos" #, python-format msgid "The allocation pool %(pool)s is not valid." msgstr "La agrupación de asignación %(pool)s no es válida. " #, python-format msgid "" "The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s." msgstr "" "La agrupación de asignación %(pool)s abarca más allá de cidr de subred " "%(subnet_cidr)s." #, python-format msgid "" "The attribute '%(attr)s' is reference to other resource, can't used by sort " "'%(resource)s'" msgstr "" "Otro recurso hace referencia al atributo '%(attr)s', la ordenación " "'%(resource)s no puede usarlo'" msgid "" "The base MAC address Neutron will use for VIFs. The first 3 octets will " "remain unchanged. If the 4th octet is not 00, it will also be used. The " "others will be randomly generated." msgstr "" "Dirección MAC base que Neutron utiliza para las VIF. Los 3 primeros octetos " "permanecerán sin cambios. Si el cuarto octeto no es 00, también se " "utilizará. Los otros se generan aleatoriamente. " msgid "" "The base mac address used for unique DVR instances by Neutron. The first 3 " "octets will remain unchanged. If the 4th octet is not 00, it will also be " "used. The others will be randomly generated. The 'dvr_base_mac' *must* be " "different from 'base_mac' to avoid mixing them up with MAC's allocated for " "tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00. " "The default is 3 octet" msgstr "" "Dirección mac base que Neutron utiliza para las instancias DVR exclusivas. " "Los 3 primeros octetos permanecerán sin cambios. Si el cuarto octeto no es " "00, también se utilizará. Los otros se generan aleatoriamente. " "'dvr_base_mac' *debe* ser diferente de 'base_mac' para evitar que se mezclen " "con los MAC asignados para los puertos de arrendatario. Un ejemplo de 4 " "octetos sería dvr_base_mac = fa:16:3f:4f:00:00. El valor predeterminado es 3 " "octetos." msgid "" "The connection string for the native OVSDB backend. Requires the native " "ovsdb_interface to be enabled." msgstr "" "La serie de conexión del programa de fondo OVSDB nativo. Requiere que la " "opción nativa ovsdb_interface esté habilitada." msgid "The core plugin Neutron will use" msgstr "El core plugin Neutron usará" #, python-format msgid "" "The dns_name passed is a FQDN. Its higher level labels must be equal to the " "dns_domain option in neutron.conf, that has been set to '%(dns_domain)s'. It " "must also include one or more valid DNS labels to the left of " "'%(dns_domain)s'" msgstr "" "El nombre dns_name pasado es un FQDN. Sus etiquetas de nivel superior deben " "ser iguales que la opción dns_domain en neutron.conf, que se ha establecido " "en '%(dns_domain)s'. También debe incluir una o más etiquetas DNS válidas a " "la izquierda de '%(dns_domain)s'" #, python-format msgid "" "The dns_name passed is a PQDN and its size is '%(dns_name_len)s'. The " "dns_domain option in neutron.conf is set to %(dns_domain)s, with a length of " "'%(higher_labels_len)s'. When the two are concatenated to form a FQDN (with " "a '.' at the end), the resulting length exceeds the maximum size of " "'%(fqdn_max_len)s'" msgstr "" "El nombre dns_name pasado es un PQDN y su tamaño es '%(dns_name_len)s'. La " "opción dns_domain en neutron.conf se ha establecido en %(dns_domain)s, con " "una longitud de '%(higher_labels_len)s'. Cuando se concatenan los dos para " "formar un FQDN (con un '.' al final), la longitud resultante excede el " "tamaño máximo de '%(fqdn_max_len)s'" msgid "The driver used to manage the DHCP server." msgstr "El controlador utilizado para gestionar el servidor DHCP." msgid "The driver used to manage the virtual interface." msgstr "El controlador utilizado para gestionar la interfaz virtual." msgid "" "The email address to be used when creating PTR zones. If not specified, the " "email address will be admin@" msgstr "" "La dirección de correo electrónico a utilizar al crear zonas PTR. Si no se " "especifica la dirección de correo electrónico será admin@" #, python-format msgid "" "The following device_id %(device_id)s is not owned by your tenant or matches " "another tenants router." msgstr "" "El siguiente device_id %(device_id)s no es propiedad de su arrendatario o " "coincide con el direccionador de otros arrendatarios." msgid "The host IP to bind to" msgstr "El IP del host al que se desea crear el enlace" msgid "The interface for interacting with the OVSDB" msgstr "Interfaz para la interacción con la OVSDB" msgid "" "The maximum number of items returned in a single response, value was " "'infinite' or negative integer means no limit" msgstr "" "El número máximo de elementos devueltos en una única respuesta, el valor " "'infinite' o un entero negativo significa que no hay límite" #, python-format msgid "" "The network %(network_id)s has been already hosted by the DHCP Agent " "%(agent_id)s." msgstr "" "La red %(network_id)s ya está alojada por el agente de DHCP %(agent_id)s." #, python-format msgid "" "The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s." msgstr "" "La red %(network_id)s no está alojada por el agente de DHCP %(agent_id)s." msgid "" "The network type to use when creating the HA network for an HA router. By " "default or if empty, the first 'tenant_network_types' is used. This is " "helpful when the VRRP traffic should use a specific network which is not the " "default one." msgstr "" "Tipo de red que se debe utilizar al crear la red de alta disponibilidad para " "un direccionador HA. De forma predeterminada o si está vacío, se utiliza el " "primer 'tenant_network_types'. Esto es útil cuando el tráfico VRRP debe " "utilizar una red específica que no sea el valor predeterminado." #, python-format msgid "The number of allowed address pair exceeds the maximum %(quota)s." msgstr "" "El número de pares de direcciones permitidos excede el máximo de %(quota)s." msgid "" "The number of seconds the agent will wait between polling for local device " "changes." msgstr "" "El número de segundos que el agente esperará entre sondeos de cambios de " "dispositivo local." msgid "" "The number of seconds to wait before respawning the ovsdb monitor after " "losing communication with it." msgstr "" "Número de segundos a esperar antes de volver a generar el supervisor ovsdb " "después de perder la comunicación con el mismo." msgid "The number of sort_keys and sort_dirs must be same" msgstr "El número de sort_keys y sort_dirs debe ser igual" msgid "" "The path for API extensions. Note that this can be a colon-separated list of " "paths. For example: api_extensions_path = extensions:/path/to/more/exts:/" "even/more/exts. The __path__ of neutron.extensions is appended to this, so " "if your extensions are in there you don't need to specify them here." msgstr "" "La vía de acceso para ampliaciones de API. Observe que puede ser una lista " "de vías de acceso separadas por punto y coma. Por ejemplo: " "api_extensions_path = extensions:/path/to/more/exts:/even/more/exts. Además, " "se añade __path__ of neutron.extensions, de forma que si sus extensiones " "están ahí no es necesario especificarlas aquí." msgid "The physical network name with which the HA network can be created." msgstr "Nombre de la red física con la que se puede crear la red HA." #, python-format msgid "The port '%s' was deleted" msgstr "Se ha suprimido el puerto '%s'" msgid "The port to bind to" msgstr "El puerto al que se desea crear el enlace" #, python-format msgid "The requested content type %s is invalid." msgstr "El tipo de contenido solicitado %s no es válido." msgid "The resource could not be found." msgstr "No se ha podido encontrar el recurso." #, python-format msgid "" "The router %(router_id)s has been already hosted by the L3 Agent " "%(agent_id)s." msgstr "" "El direccionador %(router_id)s ya está alojado por el agente L3 %(agent_id)s." msgid "" "The server has either erred or is incapable of performing the requested " "operation." msgstr "" "El servidor tiene un error o no puede ejecutar la operación solicitada." msgid "The service plugins Neutron will use" msgstr "Los plug-ins de servicio que utilizará Neutron" #, python-format msgid "The subnet request could not be satisfied because: %(reason)s" msgstr "No se ha podido satisfacer la solicitud de subred porque: %(reason)s" #, python-format msgid "The subproject to execute the command against. Can be one of: '%s'." msgstr "" "Subproyecto con el que ejecutar el mandato. Puede ser uno de los siguientes: " "'%s'." msgid "The type of authentication to use" msgstr "El tipo de autenticación a utilizar" #, python-format msgid "The value '%(value)s' for %(element)s is not valid." msgstr "El valor de '%(value)s' para %(element)s no es válido." msgid "" "The working mode for the agent. Allowed modes are: 'legacy' - this preserves " "the existing behavior where the L3 agent is deployed on a centralized " "networking node to provide L3 services like DNAT, and SNAT. Use this mode if " "you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality " "and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - " "this enables centralized SNAT support in conjunction with DVR. This mode " "must be used for an L3 agent running on a centralized node (or in single-" "host deployments, e.g. devstack)" msgstr "" "La modalidad de trabajo del agente. Las modalidades permitidas son: " "'heredada' - conserva el comportamiento existente, donde el agente L3 se " "despliega en un nodo de red centralizado para proporcionar servicios de L3 " "como DNAT y SNAT. Utilice esta modalidad si no desea adoptar DVR. 'dvr' - " "esta modalidad habilita la funcionalidad DVR y debe utilizarse para un " "agente L3 que se ejecuta en un host de cálculo. 'dvr_snat' - habilita el " "soporte SNAT centralizado conjuntamente con DVR. Esta modalidad debe " "utilizarse para un agente L3 que se ejecuta en un nodo centralizado (o en " "despliegues de un solo host, por ejemplo, devstack)" msgid "" "There are routers attached to this network that depend on this policy for " "access." msgstr "" "Hay direccionadores conectados a esta red que dependen de esta política para " "poder acceder." msgid "" "This will choose the web framework in which to run the Neutron API server. " "'pecan' is a new experiemental rewrite of the API server." msgstr "" "Con esta opción se elegirá la infraestructura web en la que ejecutar el " "servidor de la API Neutron. 'pecan' es una nueva reescritura experimental " "del servidor de API." msgid "Timeout" msgstr "Tiempo de espera excedido" msgid "" "Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs " "commands will fail with ALARMCLOCK error." msgstr "" "Tiempo de espera en segundos para mandatos ovs-vsctl. Si se excede el tiempo " "de espera, los mandatos ovs fallarán y darán un error de ALARMCLOCK." msgid "" "Timeout in seconds to wait for a single OpenFlow request. Used only for " "'native' driver." msgstr "" "Tiempo en segundos que se espera una solicitud OpenFlow. Se utiliza sólo " "para el controlador 'native'." msgid "" "Timeout in seconds to wait for the local switch connecting the controller. " "Used only for 'native' driver." msgstr "" "Tiempo en segundos que se esperará a que el conmutador local se conecte al " "controlador. Sólo se utiliza para el controlador 'native'." msgid "" "Too long prefix provided. New name would exceed given length for an " "interface name." msgstr "" "Se ha proporcionado un prefijo demasiado largo. El nuevo nombre superaría la " "longitud indicada para un nombre de interfaz." msgid "Too many availability_zone_hints specified" msgstr "Se han especificado demasiadas availability_zone_hints" msgid "" "True to delete all ports on all the OpenvSwitch bridges. False to delete " "ports created by Neutron on integration and external network bridges." msgstr "" "Verdadero para suprimir todos los puertos en todos los puentes OpenvSwitch. " "Falso para suprimir puertos creados por Neutron por los puentes de red " "externos y de integración." msgid "Tunnel IP value needed by the ML2 plugin" msgstr "El plugin ML2 necesita el valor de IP de túnel" msgid "Tunnel bridge to use." msgstr "Puente de túnel para utilizar." msgid "" "Type of the nova endpoint to use. This endpoint will be looked up in the " "keystone catalog and should be one of public, internal or admin." msgstr "" "Tipo de punto final de nova a utilizar. Este punto final se consultará en el " "catálogo de keystone y debe ser uno de los siguientes: público, interno o " "administrativo." msgid "URL for connecting to designate" msgstr "URL para establecer conexión con el designado" msgid "URL to database" msgstr "URL a la base de datos" #, python-format msgid "Unable to access %s" msgstr "No se puede acceder a %s " #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, maximum allowed " "prefix is %(max_prefixlen)s." msgstr "" "No se puede asignar la subred con la longitud de prefijo %(prefixlen)s, el " "prefijo máximo permitido es %(max_prefixlen)s" #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, minimum allowed " "prefix is %(min_prefixlen)s." msgstr "" "No se puede asignar la subred con la longitud de prefijo %(prefixlen)s, el " "prefijo mínimo permitido es %(min_prefixlen)s." #, python-format msgid "Unable to calculate %(address_type)s address because of:%(reason)s" msgstr "" "No se puede calcular la dirección %(address_type)s debido a: %(reason)s" #, python-format msgid "" "Unable to complete operation for %(router_id)s. The number of routes exceeds " "the maximum %(quota)s." msgstr "" "No se ha podido completar la operación para %(router_id)s. El número de " "rutas supera el máximo de %(quota)s." #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of DNS " "nameservers exceeds the limit %(quota)s." msgstr "" "No se ha podido completar la operación para %(subnet_id)s. El número de " "servidores de nombres de DNS supera el límite %(quota)s." #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of host routes " "exceeds the limit %(quota)s." msgstr "" "No se ha podido completar la operación para %(subnet_id)s. El número de " "rutas de host supera el límite %(quota)s." #, python-format msgid "" "Unable to complete operation on address scope %(address_scope_id)s. There " "are one or more subnet pools in use on the address scope" msgstr "" "No es posible completar la operación en el ámbito de direcciones " "%(address_scope_id)s. Hay una o más agrupaciones de subredes en uso en el " "ámbito de direcciones" #, python-format msgid "Unable to convert value in %s" msgstr "No se puede convertir el valor en %s " msgid "Unable to create the Agent Gateway Port" msgstr "No se puede crear el puerto de pasarela de agente" msgid "Unable to create the SNAT Interface Port" msgstr "No se puede crear el puerto de interfaz SNAT" #, python-format msgid "" "Unable to create the flat network. Physical network %(physical_network)s is " "in use." msgstr "" "No se ha podido crear la red plana. La red física %(physical_network)s está " "en uso." msgid "" "Unable to create the network. No available network found in maximum allowed " "attempts." msgstr "" "No se ha podido crear la red. No se ha encontrado ninguna red disponible en " "el máximo de intentos permitidos." #, python-format msgid "Unable to delete subnet pool: %(reason)s." msgstr "No se puede suprimir la agrupación de subred: %(reason)s." #, python-format msgid "Unable to determine mac address for %s" msgstr "No se ha podido determinar la dirección mac para %s" #, python-format msgid "Unable to find '%s' in request body" msgstr "No se puede encontrar '%s' en el cuerpo de la solicitud " #, python-format msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s" msgstr "" "No se ha encontrado la dirección IP %(ip_address)s en la subred %(subnet_id)s" #, python-format msgid "Unable to find resource name in %s" msgstr "No se ha podido encontrar el nombre del recurso en %s" msgid "Unable to generate IP address by EUI64 for IPv4 prefix" msgstr "No se puede generar dirección IP por EUI64 para el prefijo de IPv4" #, python-format msgid "Unable to generate unique DVR mac for host %(host)s." msgstr "No se puede generar la mac DVR exclusiva para el host %(host)s." #, python-format msgid "Unable to generate unique mac on network %(net_id)s." msgstr "No se puede generar un mac exclusivo en la red %(net_id)s. " #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "" "No se ha podido identificar un campo destino desde: %s. La coincidencia debe " "tener la forma %%()s" msgid "Unable to provide external connectivity" msgstr "No se puede proporcionar conectividad externa" msgid "Unable to provide tenant private network" msgstr "No se puede proporcionar una red privada de arrendatario" #, python-format msgid "" "Unable to reconfigure sharing settings for network %(network)s. Multiple " "tenants are using it." msgstr "" "No se ha podido volver a configurar los valores para la red %(network)s. " "Varios arrendatarios la están utilizando." #, python-format msgid "Unable to update address scope %(address_scope_id)s : %(reason)s" msgstr "" "No se puede actualizar el ámbito de direcciones %(address_scope_id)s : " "%(reason)s" #, python-format msgid "Unable to update the following object fields: %(fields)s" msgstr "" "No se han podido actualizar los siguientes campos de objetos: %(fields)s" #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " "found" msgstr "" "No se ha podido verificar la coincidencia:%(match)s como recurso primario: " "no se ha encontrado %(res)s" #, python-format msgid "Unexpected label for script %(script_name)s: %(labels)s" msgstr "Etiqueta inesperada para el script %(script_name)s: %(labels)s" #, python-format msgid "Unexpected number of alembic branch points: %(branchpoints)s" msgstr "Número inesperado de puntos de bifurcación alémbica: %(branchpoints)s" #, python-format msgid "Unexpected response code: %s" msgstr "Código de respuesta inesperado: %s" #, python-format msgid "Unexpected response: %s" msgstr "Respuesta inesperada : %s " #, python-format msgid "Unit name '%(unit)s' is not valid." msgstr "El nombre de unidad '%(unit)s' no es válido." msgid "Unknown API version specified" msgstr "Versión API desconocida especificada" #, python-format msgid "Unknown address type %(address_type)s" msgstr "Tipo de dirección desconocido %(address_type)s" #, python-format msgid "Unknown attribute '%s'." msgstr "Atributo desconocido '%s'." #, python-format msgid "Unknown chain: %r" msgstr "Cadena desconocida: %r" #, python-format msgid "Unknown network type %(network_type)s." msgstr "Tipo de red desconocido %(network_type)s." #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Recursos de cuota desconocidos %(unknown)s." msgid "Unmapped error" msgstr "Error no correlacionado" msgid "Unrecognized action" msgstr "Acción no reconocida" #, python-format msgid "Unrecognized attribute(s) '%s'" msgstr "Atributo(s) no reconocido(s) '%s'" msgid "Unrecognized field" msgstr "Campo no reconocido" msgid "Unspecified minimum subnet pool prefix." msgstr "Prefijo de agrupación de subred mínimo sin especificar." msgid "Unsupported Content-Type" msgstr "Tipo de contenido no soportado" #, python-format msgid "Unsupported network type %(net_type)s." msgstr "Tipo de red no soportado %(net_type)s." #, python-format msgid "Unsupported port state: %(port_state)s." msgstr "Estado de puerto no soportado: %(port_state)s." msgid "Unsupported request type" msgstr "Tipo de solicitud no soportado" msgid "Updating default security group not allowed." msgstr "Actualización del grupo de seguridad predeterminado no permitida." msgid "" "Use ML2 l2population mechanism driver to learn remote MAC and IPs and " "improve tunnel scalability." msgstr "" "Use el controlador del mecanismo ML2 l2population para aprender el uso de " "MAC e IPs remotas y mejorar la escalabilidad del túnel." msgid "Use broadcast in DHCP replies." msgstr "Utilizar la difusión en respuestas DHCP." msgid "Use either --delta or relative revision, not both" msgstr "Utilice --delta o la revisión relativa, pero no ambas" msgid "" "Use ipset to speed-up the iptables based security groups. Enabling ipset " "support requires that ipset is installed on L2 agent node." msgstr "" "Utilice ipset para agilizar los grupos de seguridad basados en iptables. " "Para habilitar el soporte para ipset es necesario que ipset esté instalado " "en el nodo agente L2." msgid "" "Use the root helper when listing the namespaces on a system. This may not be " "required depending on the security configuration. If the root helper is not " "required, set this to False for a performance improvement." msgstr "" "Utilice el ayudante raíz para listar los espacios de nombres en un sistema. " "Es posible que no sea necesario según la configuración de seguridad. Si el " "ayudante raíz no es obligatorio, establézcalo en False para mejorar el " "rendimiento." msgid "" "Use veths instead of patch ports to interconnect the integration bridge to " "physical networks. Support kernel without Open vSwitch patch port support so " "long as it is set to True." msgstr "" "Utilice veths en lugar de puertos de parche para interconectar el puente de " "integración con las redes físicas. Se admite kernel sin soporte de puertos " "de parche Open vSwitch siempre y cuando esté definido a True." msgid "User (uid or name) running metadata proxy after its initialization" msgstr "" "Usuario (uid o nombre) que ejecuta el proxy de metadatos después de su " "inicialización" msgid "" "User (uid or name) running metadata proxy after its initialization (if " "empty: agent effective user)." msgstr "" "Usuario (uid o nombre) que ejecuta el proxy de metadatos después de su " "inicialización (si está vacío: usuario efectivo del agente)." msgid "User (uid or name) running this process after its initialization" msgstr "" "Usuario (uid o nombre) que ejecuta este proceso después de su inicialización" msgid "Username for connecting to designate in admin context" msgstr "" "Nombre de usuario para establecer conexión con el designado en el contexto " "de administración" msgid "" "Uses veth for an OVS interface or not. Support kernels with limited " "namespace support (e.g. RHEL 6.5) so long as ovs_use_veth is set to True." msgstr "" "Si utiliza veth para una interfaz o no. Admite núcleos con soporte limitado " "para espacios de nombres (p.e. RHEL 6.5) siempre y cuando ovs_use_veth esté " "definido a True." msgid "VRRP authentication password" msgstr "Contraseña de autenticación VRRP" msgid "VRRP authentication type" msgstr "Tipo de autenticación VRRP" msgid "VXLAN network unsupported." msgstr "Red VXLAN no soportada." #, python-format msgid "" "Validation of dictionary's keys failed. Expected keys: %(expected_keys)s " "Provided keys: %(provided_keys)s" msgstr "" "La validación de las claves del diccionario ha fallado. Claves esperadas: " "%(expected_keys)s Claves proporcionadas: %(provided_keys)s" #, python-format msgid "Validator '%s' does not exist." msgstr "El validador '%s' no existe." #, python-format msgid "Value %(value)s in mapping: '%(mapping)s' not unique" msgstr "El valor %(value)s en la correlación: '%(mapping)s' no es exclusivo" #, python-format msgid "" "Value of %(parameter)s has to be multiple of %(number)s, with maximum value " "of %(maximum)s and minimum value of %(minimum)s" msgstr "" "El valor de %(parameter)s tiene que ser un múltiplo de %(number)s, con un " "valor máximo de %(maximum)s y un valor mínimo de %(minimum)s" msgid "" "Value of host kernel tick rate (hz) for calculating minimum burst value in " "bandwidth limit rules for a port with QoS. See kernel configuration file for " "HZ value and tc-tbf manual for more information." msgstr "" "Valor del tickrate del kernel host para calcular el valor mínimo de ráfaga " "en las reglas de limitación de ancho de banda para un puerto con QoS, " "Consulte en el archivo de configuración el valor de HZ y consulte el manual " "de tc-tbf para obtener más información." msgid "" "Value of latency (ms) for calculating size of queue for a port with QoS. See " "tc-tbf manual for more information." msgstr "" "Valor de latencia (ms) para calcular el tamaño de la cola para un puerto con " "QoS. Consulte el manual de tc-tbf para obtener más información." msgid "" "Watch file log. Log watch should be disabled when metadata_proxy_user/group " "has no read/write permissions on metadata proxy log file." msgstr "" "Registro de archivo de observador. El observador de registro debe " "inhabilitarse cuando metadata_proxy_user/group no tiene permisos de lectura-" "grabación en el archivo de registro del proxy de metadatos." msgid "" "When external_network_bridge is set, each L3 agent can be associated with no " "more than one external network. This value should be set to the UUID of that " "external network. To allow L3 agent support multiple external networks, both " "the external_network_bridge and gateway_external_network_id must be left " "empty." msgstr "" "Cuando la opción external_network_bridge está establecida, cada agente L3 se " "puede asociar con una única red externa. Este valor se debe definir con el " "UUID de la red externa. Para permitir que el agente L3 admita varias redes " "externas, tanto external_network_bridge como gateway_external_network_id " "deben estar vacías." msgid "" "When proxying metadata requests, Neutron signs the Instance-ID header with a " "shared secret to prevent spoofing. You may select any string for a secret, " "but it must match here and in the configuration used by the Nova Metadata " "Server. NOTE: Nova uses the same config key, but in [neutron] section." msgstr "" "Cuando se envían solicitudes de metadatos por proxy, Neutron firma la " "cabecera de ID de instancia con un secreto compartido para evitar la " "suplantación de identidad. Puede seleccionar cualquier cadena como secreto, " "pero debe coincidir con la que se haya utilizado en la configuración del " "servidor de metadatos de Nova. NOTA: Nova utiliza la misma clave de " "configuración, pero en la sección [neutron]." msgid "" "Where to store Neutron state files. This directory must be writable by the " "agent." msgstr "" "Dónde almacenar archivos de estado se Neutron. El agente debe tener derecho " "de escritura en este directorio." msgid "" "With IPv6, the network used for the external gateway does not need to have " "an associated subnet, since the automatically assigned link-local address " "(LLA) can be used. However, an IPv6 gateway address is needed for use as the " "next-hop for the default route. If no IPv6 gateway address is configured " "here, (and only then) the neutron router will be configured to get its " "default route from router advertisements (RAs) from the upstream router; in " "which case the upstream router must also be configured to send these RAs. " "The ipv6_gateway, when configured, should be the LLA of the interface on the " "upstream router. If a next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated to the network and not " "through this parameter. " msgstr "" "Con IPv6, la red utilizada para la pasarela externa no debe tener una subred " "asociada, ya que puede utilizarse la dirección de enlace local(LLA) asignada " "automáticamente. No obstante, se necesita una dirección de pasarela IPv6 " "para utilizarla como siguiente salto para la ruta predeterminada. Si no se " "configura aquí ninguna dirección de pasarela IPv6, (y sólo entonces) se " "configurará un direccionador de Neutron para obtener su ruta predeterminada " "a partir de los avisos de direccionador (RA) procedentes del direccionador " "en sentido ascendente; en este caso, el direccionador en sentido ascendente " "también debe configurarse para enviar estos RA. Cuando ipv6_gateway, esté " "configurada, debeser la LLA de interfaz en el direccionador en sentido " "ascendente. Si se desea realizar un 'siguiente salto utilizando una " "dirección exclusiva global (GUA), debe hacerse utilizando una subred " "asignada a la red, no mediante este parámetro." msgid "You must implement __call__" msgstr "Debe implementar __call__" msgid "" "You must provide a config file for bridge - either --config-file or " "env[NEUTRON_TEST_CONFIG_FILE]" msgstr "" "Debe proporcionar un archivo config para puente, ya sea --config-file o " "env[NEUTRON_TEST_CONFIG_FILE]" msgid "You must provide a revision or relative delta" msgstr "Debe proporcionar un delta de revisión o relativo" msgid "a subnetpool must be specified in the absence of a cidr" msgstr "se debe especificar una agrupación de subredes si no hay un cidr" msgid "add_ha_port cannot be called inside of a transaction." msgstr "no se puede invocar a add_ha_port dentro de una transacción." msgid "allocation_pools allowed only for specific subnet requests." msgstr "" "allocation_pools sólo se permite para solicitudes de subred específicas." msgid "allocation_pools are not in the subnet" msgstr "allocation_pools no está en la subred" msgid "allocation_pools use the wrong ip version" msgstr "allocation_pools utiliza la versión de IP incorrecta" msgid "already a synthetic attribute" msgstr "ya es un atributo sintético" msgid "binding:profile value too large" msgstr "Valor de binding:profile demasiado grande" #, python-format msgid "cannot perform %(event)s due to %(reason)s" msgstr "no se puede llevar a cabo %(event)s debido a %(reason)s" msgid "cidr and prefixlen must not be supplied together" msgstr "No se puede proporcionar cidr y prefixlen a la vez" #, python-format msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid." msgstr "dhcp_agents_per_network debe ser >= 1. '%s' no es válido." msgid "dns_domain cannot be specified without a dns_name" msgstr "No se puede especificar dns_domain sin un dns_name" msgid "dns_name cannot be specified without a dns_domain" msgstr "No se puede especificar dns_name sin un dns_domain" msgid "fixed_ip_address cannot be specified without a port_id" msgstr "fixed_ip_address no se puede especificar sin un port_id" #, python-format msgid "gateway_ip %s is not in the subnet" msgstr "gateway_ip %s no está en la subred" #, python-format msgid "has device owner %s" msgstr "tiene el propietario de dispositivo %s" msgid "in use" msgstr "en uso" #, python-format msgid "ip command failed on device %(dev_name)s: %(reason)s" msgstr "El mandato ip ha fallado en el dispositivo %(dev_name)s: %(reason)s" #, python-format msgid "ip command failed: %(reason)s" msgstr "Ha fallado el mandato ip: %(reason)s" #, python-format msgid "ip link capability %(capability)s is not supported" msgstr "No hay soporte para la función de ip link %(capability)s" #, python-format msgid "ip link command is not supported: %(reason)s" msgstr "No hay soporte para el mandato ip link: %(reason)s" msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "ip_version debe especificarse en ausencia de cidr y subnetpool_id" msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "ipv6_address_mode no es válido cuando ip_version es 4" msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "ipv6_ra_mode no es válido cuando ip_version es 4" msgid "" "ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set to " "False." msgstr "" "ipv6_ra_mode y ipv6_address_mode no se pueden establecer cuando enable_dhcp " "está establecido en False." #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " "'%(addr_mode)s' is not valid. If both attributes are set, they must be the " "same value" msgstr "" "No es válido establecer ipv6_ra_mode en '%(ra_mode)s' e ipv6_address_mode en " "'%(addr_mode)s'. Si se establecen ambos atributos, deben tener el mismo valor" msgid "mac address update" msgstr "Actualización de la dirección MAC" #, python-format msgid "" "max_l3_agents_per_router %(max_agents)s config parameter is not valid. It " "has to be greater than or equal to min_l3_agents_per_router %(min_agents)s." msgstr "" "El parámetro de configuración max_l3_agents_per_router %(max_agents)s no es " "válido. Tiene que ser mayor o igual que min_l3_agents_per_router " "%(min_agents)s." msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "debe proporcionar exactamente 2 argumentos: cidr y MAC" msgid "network_type required" msgstr "network_type requerido" #, python-format msgid "network_type value '%s' not supported" msgstr "valor network_type '%s' no admitido" msgid "new subnet" msgstr "nueva subred" #, python-format msgid "physical_network '%s' unknown for VLAN provider network" msgstr "physical_network '%s' desconocido para la red del proveedor VLAN" #, python-format msgid "physical_network '%s' unknown for flat provider network" msgstr "physical_network '%s' desconocida para la red de proveedor simple" msgid "physical_network required for flat provider network" msgstr "physical_network es obligatorio para una red de proveedor simple" #, python-format msgid "provider:physical_network specified for %s network" msgstr "se ha especificado provider:physical_network para la red %s" #, python-format msgid "rbac_db_model not found in %s" msgstr "No se ha encontrado rbac_db_model en %s" msgid "record" msgstr "registro" msgid "respawn_interval must be >= 0 if provided." msgstr "respawn_interval debe ser >= 0 si se proporciona." #, python-format msgid "segmentation_id out of range (%(min)s through %(max)s)" msgstr "segmentation_id fuera de rango (%(min)s a %(max)s)" msgid "segmentation_id requires physical_network for VLAN provider network" msgstr "" "segmentation_id requiere physical_network para la red de proveedor VLAN" msgid "shared attribute switching to synthetic" msgstr "atributo compartido cambiando a sintético" #, python-format msgid "" "subnetpool %(subnetpool_id)s cannot be updated when associated with shared " "address scope %(address_scope_id)s" msgstr "" "La agrupación de subred %(subnetpool_id)s no se puede actualizar cuando está " "asociada con el ámbito de dirección compartida %(address_scope_id)s" msgid "subnetpool_id and use_default_subnetpool cannot both be specified" msgstr "" "No se puede especificar a la vez subnetpool_id y use_default_subnetpool" msgid "the nexthop is not connected with router" msgstr "el siguiente salto no está conectado con el direccionador" msgid "the nexthop is used by router" msgstr "el siguiente salto lo está utilizando el direccionador" #, python-format msgid "unable to load %s" msgstr "no se puede cargar %s" msgid "" "uuid provided from the command line so external_process can track us via /" "proc/cmdline interface." msgstr "" "uuid proporcionada desde la línea de mandatos para que external_process " "puede realizar un seguimiento a través de la interfaz /proc/cmdline." neutron-8.4.0/neutron/locale/pt_BR/0000775000567000056710000000000013044373210020312 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/pt_BR/LC_MESSAGES/0000775000567000056710000000000013044373210022077 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/pt_BR/LC_MESSAGES/neutron.po0000664000567000056710000050537013044372760024154 0ustar jenkinsjenkins00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # Gabriel Wainer, 2013 # Carlos Marques , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron 8.2.1.dev52\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-09-01 18:10+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-05-04 12:29+0000\n" "Last-Translator: Carlos Marques \n" "Language: pt-BR\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Portuguese (Brazil)\n" #, python-format msgid "" "\n" "Command: %(cmd)s\n" "Exit code: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" msgstr "" "\n" "Comando: %(cmd)s\n" "Código de saída: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" #, python-format msgid "" "%(branch)s HEAD file does not match migration timeline head, expected: " "%(head)s" msgstr "" "O arquivo HEAD %(branch)s não corresponde ao cabeçalho da linha de tempo de " "migração, esperado: %(head)s" #, python-format msgid "%(driver)s: Internal driver error." msgstr "%(driver)s: Erro interno de driver." #, python-format msgid "%(id)s is not a valid %(type)s identifier" msgstr "%(id)s não é um identificador %(type)s válido" #, python-format msgid "" "%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' " "and '%(desc)s'" msgstr "" "%(invalid_dirs)s é um valor inválido para sort_dirs, o valor válido é " "'%(asc)s' e '%(desc)s'" #, python-format msgid "%(key)s prohibited for %(tunnel)s provider network" msgstr "%(key)s proibida para rede de provedor %(tunnel)s" #, python-format msgid "" "%(method)s called with network settings %(current)s (original settings " "%(original)s) and network segments %(segments)s" msgstr "" "%(method)s chamado com as configurações de rede %(current)s (configurações " "originais %(original)s) e segmentos de rede %(segments)s" #, python-format msgid "" "%(method)s called with port settings %(current)s (original settings " "%(original)s) host %(host)s (original host %(original_host)s) vif type " "%(vif_type)s (original vif type %(original_vif_type)s) vif details " "%(vif_details)s (original vif details %(original_vif_details)s) binding " "levels %(levels)s (original binding levels %(original_levels)s) on network " "%(network)s with segments to bind %(segments_to_bind)s" msgstr "" "O %(method)s chamado com as configurações de porta %(current)s " "(configurações originais %(original)s), host %(host)s (host original " "%(original_host)s), tipo vif %(vif_type)s (tipo de vif original " "%(original_vif_type)s), detalhes de vif %(vif_details)s (detalhes de vif " "originais %(original_vif_details)s) e níveis de ligação %(levels)s (níveis " "de ligação originais %(original_levels)s) na rede %(network)s com segmentos " "para ligar %(segments_to_bind)s" #, python-format msgid "" "%(method)s called with subnet settings %(current)s (original settings " "%(original)s)" msgstr "" "%(method)s chamado com as configurações de sub-rede %(current)s " "(configurações originais %(original)s)" #, python-format msgid "%(method)s failed." msgstr "%(method)s falhou." #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "%(name)s '%(addr)s' não corresponde à ip_version '%(ip_version)s'" #, python-format msgid "%(param)s must be in %(range)s range." msgstr "%(param)s deve estar no intervalo %(range)s." #, python-format msgid "%s cannot be called while in offline mode" msgstr "%s não pode ser chamado enquanto estiver no modo off-line" #, python-format msgid "%s is invalid attribute for sort_key" msgstr "%s é um atributo inválido para sort_key" #, python-format msgid "%s is invalid attribute for sort_keys" msgstr "%s é um atributo inválido para sort_keys" #, python-format msgid "%s is not a valid VLAN tag" msgstr "%s não é um tag de VLAN válido" #, python-format msgid "%s must be specified" msgstr "%s deve ser especificado" #, python-format msgid "%s must implement get_port_from_device or get_ports_from_devices." msgstr "%s deve implementar get_port_from_device ou get_ports_from_devices." #, python-format msgid "%s prohibited for VLAN provider network" msgstr "%s proibido para rede de provedor VLAN" #, python-format msgid "%s prohibited for flat provider network" msgstr "%s proibido para rede de provedor simples" #, python-format msgid "%s prohibited for local provider network" msgstr "%s proibido para rede de provedor local" #, python-format msgid "" "'%(data)s' contains '%(length)s' characters. Adding a domain name will cause " "it to exceed the maximum length of a FQDN of '%(max_len)s'" msgstr "" "'%(data)s' contém '%(length)s' caracteres. Incluir um nome de domínio " "excederá o comprimento máximo de um FQDN de '%(max_len)s'" #, python-format msgid "" "'%(data)s' contains '%(length)s' characters. Adding a sub-domain will cause " "it to exceed the maximum length of a FQDN of '%(max_len)s'" msgstr "" "'%(data)s' contém '%(length)s' caracteres. Incluir um subdomínio excederá o " "comprimento máximo de um FQDN de '%(max_len)s'" #, python-format msgid "'%(data)s' exceeds maximum length of %(max_len)s" msgstr "'%(data)s' excede o comprimento máximo de %(max_len)s" #, python-format msgid "'%(data)s' is not an accepted IP address, '%(ip)s' is recommended" msgstr "'%(data)s' não é um endereço IP aceitável, '%(ip)s' é recomendado" #, python-format msgid "'%(data)s' is not in %(valid_values)s" msgstr "'%(data)s' não está em %(valid_values)s" #, python-format msgid "'%(data)s' is too large - must be no larger than '%(limit)d'" msgstr "'%(data)s' é muito grande - não deve ser maior que '%(limit)d'" #, python-format msgid "'%(data)s' is too small - must be at least '%(limit)d'" msgstr "'%(data)s' é muito pequeno - deve ser pelo menos '%(limit)d'" #, python-format msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended" msgstr "" "'%(data)s' não é um IP do CIDR de sub-rede reconhecido, '%(cidr)s' é " "recomendado" #, python-format msgid "'%(data)s' not a valid PQDN or FQDN. Reason: %(reason)s" msgstr "'%(data)s' não é um PQDN ou FQDN válido. Motivo: %(reason)s" #, python-format msgid "'%(host)s' is not a valid nameserver. %(msg)s" msgstr "'%(host)s' não é um servidor de nomes válido. %(msg)s" #, python-format msgid "'%s' Blank strings are not permitted" msgstr "'%s' Sequências em branco não são permitidas" #, python-format msgid "'%s' cannot be converted to boolean" msgstr "'%s' não pode ser convertido em booleano" #, python-format msgid "'%s' cannot be converted to lowercase string" msgstr "O '%s' não pode ser convertido em sequência minúscula" #, python-format msgid "'%s' contains whitespace" msgstr "'%s' contém espaços em branco" #, python-format msgid "'%s' exceeds the 255 character FQDN limit" msgstr "'%s' excede o limite de 255 caracteres para FQDN" #, python-format msgid "'%s' is a FQDN. It should be a relative domain name" msgstr "'%s' é um FQDN. Ele deve ser um nome de domínio relativo." #, python-format msgid "'%s' is not a FQDN" msgstr "'%s' não é um FQDN" #, python-format msgid "'%s' is not a dictionary" msgstr "'%s' não é um dicionário" #, python-format msgid "'%s' is not a list" msgstr "'%s' não é uma lista" #, python-format msgid "'%s' is not a valid IP address" msgstr "'%s' não é um endereço IP válido" #, python-format msgid "'%s' is not a valid IP subnet" msgstr "'%s' não é uma sub-rede de IP válida" #, python-format msgid "'%s' is not a valid MAC address" msgstr "'%s' não é um endereço MAC válido" #, python-format msgid "'%s' is not a valid RBAC object type" msgstr "'%s' não é um tipo de objeto RBAC válido" #, python-format msgid "'%s' is not a valid UUID" msgstr "'%s' não é um UUID válido" #, python-format msgid "'%s' is not a valid boolean value" msgstr "'%s' não é um booleano válido" #, python-format msgid "'%s' is not a valid input" msgstr "'%s' não é uma entrada válida" #, python-format msgid "'%s' is not a valid string" msgstr "'%s' não é uma sequência válida" #, python-format msgid "'%s' is not an integer" msgstr "'%s' não é um número inteiro" #, python-format msgid "'%s' is not an integer or uuid" msgstr "'%s' não é um número inteiro ou um uuid" #, python-format msgid "'%s' is not of the form =[value]" msgstr "'%s' não do formato =[value]" #, python-format msgid "'%s' is not supported for filtering" msgstr "'%s' não é suportado para filtragem" #, python-format msgid "'%s' must be a non negative decimal." msgstr "'%s' deve ser um decimal não negativo." #, python-format msgid "'%s' should be non-negative" msgstr "'%s' deve ser não negativo" msgid "'.' searches are not implemented" msgstr "Procuras de '.' não são implementadas" #, python-format msgid "'module' object has no attribute '%s'" msgstr "O objeto 'módulo' não possui nenhum atributo '%s'" msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' é menor que 'port_min'" msgid "" "(Deprecated. Use '--subproject neutron-SERVICE' instead.) The advanced " "service to execute the command against." msgstr "" "(Descontinuado. Use '--subproject neutron-SERVICE' .) O serviço avançado com " "relação ao qual executar o comando." msgid "0 is not allowed as CIDR prefix length" msgstr "0 não é permitido como um comprimento do prefixo CIDR" msgid "" "32-bit BGP identifier, typically an IPv4 address owned by the system running " "the BGP DrAgent." msgstr "" "Identificador BGP de 32 bits, normalmente um endereço IPv4 de propriedade do " "sistema que executa o BGP DrAgent." msgid "A QoS driver must be specified" msgstr "Um driver QoS deve ser especificado" msgid "A cidr must be specified in the absence of a subnet pool" msgstr "Um cidr deve ser especificado na ausência de um conjunto de sub-rede" msgid "" "A decimal value as Vendor's Registered Private Enterprise Number as required " "by RFC3315 DUID-EN." msgstr "" "Um valor decimal, como o Número de Empresa Privada Registrada do Fornecedor, " "conforme necessário pelo RFC3315 DUID-EN." #, python-format msgid "A default external network already exists: %(net_id)s." msgstr "Uma rede externa padrão já existe: %(net_id)s." msgid "" "A default subnetpool for this IP family has already been set. Only one " "default may exist per IP family" msgstr "" "Um conjunto de sub-redes padrão para essa família de IP já foi configurado. " "Apenas um padrão pode existir por família de IP." msgid "A metering driver must be specified" msgstr "Um driver de medição deve ser especificado" msgid "A password must be supplied when using auth_type md5." msgstr "Uma senha deve ser fornecida ao usar auth_type md5." msgid "API for retrieving service providers for Neutron advanced services" msgstr "" "API para recuperação de provedores de serviço para serviços avançados do " "Neutron" msgid "Aborting periodic_sync_routers_task due to an error." msgstr "Interrompendo periodic_sync_routers_task devido a um erro." msgid "Access to this resource was denied." msgstr "O acesso à este recurso foi negado." msgid "Action to be executed when a child process dies" msgstr "Ação a ser executada quando um processo-filho é desativado" msgid "" "Add comments to iptables rules. Set to false to disallow the addition of " "comments to generated iptables rules that describe each rule's purpose. " "System must support the iptables comments module for addition of comments." msgstr "" "Adicionar comentários às regras do iptables. Configure como falso para " "desabilitar a adição de comentários para regras de iptables geradas que " "descrevam o propósito de cada regra. O sistema deve suportar o módulo de " "comentários do iptables para adicionar comentários." msgid "Address not present on interface" msgstr "Endereço não está presente na interface" #, python-format msgid "Address scope %(address_scope_id)s could not be found" msgstr "O escopo de endereço %(address_scope_id)s não pôde ser localizado" msgid "" "Address to listen on for OpenFlow connections. Used only for 'native' driver." msgstr "" "Endereço para atender conexões OpenFlow. Usado somente para driver 'native'." msgid "Adds external network attribute to network resource." msgstr "Inclui atributo de rede externo no recurso de rede." msgid "Adds test attributes to core resources." msgstr "Inclui atributos de teste aos recursos principais." #, python-format msgid "Agent %(id)s could not be found" msgstr "O agente %(id)s não pôde ser localizado" #, python-format msgid "Agent %(id)s is not a L3 Agent or has been disabled" msgstr "O agente %(id)s não é um agente L3 ou foi desativado" #, python-format msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled" msgstr "O agente %(id)s não é um Agente DHCP válido ou foi desativado" msgid "Agent has just been revived" msgstr "O agente acabou de ser reativado" msgid "" "Agent starts with admin_state_up=False when enable_new_agents=False. In the " "case, user's resources will not be scheduled automatically to the agent " "until admin changes admin_state_up to True." msgstr "" "O agente inicia com admin_state_up=False quando enable_new_agents=False. No " "caso, os recursos do usuário não serão planejados automaticamente para o " "agente até que o administrador mude admin_state_up para True." #, python-format msgid "Agent updated: %(payload)s" msgstr "Agente atualizado:%(payload)s" #, python-format msgid "" "Agent with agent_type=%(agent_type)s and host=%(host)s could not be found" msgstr "" "O agente com agent_type=%(agent_type)s e host=%(host)s não pôde ser " "localizado" msgid "Allow auto scheduling networks to DHCP agent." msgstr "Permitir o planejamento automático de redes para o agente DHCP." msgid "Allow auto scheduling of routers to L3 agent." msgstr "Permitir planejamento automático de roteadores para agente L3." msgid "" "Allow overlapping IP support in Neutron. Attention: the following parameter " "MUST be set to False if Neutron is being used in conjunction with Nova " "security groups." msgstr "" "Permitir sobreposição de suporte IP no Neutron. Atenção: o parâmetro a " "seguir DEVERÁ ser configurado para False se o Neutron estiver sendo usado em " "conjunto com os grupos de segurança do Nova." msgid "Allow running metadata proxy." msgstr "Permite executar proxy de metadados." msgid "Allow sending resource operation notification to DHCP agent" msgstr "" "Permitir envio de notificação de operação de recurso para o agente DHCP" msgid "Allow the creation of PTR records" msgstr "Permitir a criação de registros PTR" msgid "Allow the usage of the bulk API" msgstr "Permitir o uso da API em massa" msgid "Allow the usage of the pagination" msgstr "Permitir o uso da paginação" msgid "Allow the usage of the sorting" msgstr "Permitir o uso da classificação" msgid "Allow to perform insecure SSL (https) requests to nova metadata" msgstr "" "Permita executar solicitações (https) de SSL inseguras para metadados do Nova" msgid "Allowed address pairs must be a list." msgstr "Pares de endereço permitidos devem ser uma lista." msgid "AllowedAddressPair must contain ip_address" msgstr "AllowedAddressPair deve conter ip_address" msgid "" "Allows for serving metadata requests coming from a dedicated metadata access " "network whose CIDR is 169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send metadata:1 request. In " "this case DHCP Option 121 will not be injected in VMs, as they will be able " "to reach 169.254.169.254 through a router. This option requires " "enable_isolated_metadata = True." msgstr "" "Permite entregar solicitações de metadados provenientes de uma rede de " "acesso de metadados dedicada cujo CIDR é 169.254.169.254/16 (ou um prefixo " "maior), e é conectada a um roteador Neutron a partir do qual as MVs enviam a " "solicitação metadata:1. Nesse caso, a Opção 121 do DHCP não será injetada " "nas MVs já que elas poderão acessar 169.254.169.254 por meio de um roteador. " "Essa opção requer enable_isolated_metadata = True." #, python-format msgid "" "Already hosting BGP Speaker for local_as=%(current_as)d with router_id=" "%(rtid)s." msgstr "" "Já hospedando o Speaker BGP para local_as=%(current_as)d com router_id=" "%(rtid)s." #, python-format msgid "" "Already hosting maximum number of BGP Speakers. Allowed scheduled count=" "%(count)d" msgstr "" "Já hospedando o número máximo de Speakers BGP. Contagem planejada permitida=" "%(count)d" msgid "An RBAC policy already exists with those values." msgstr "Uma política RBAC já existe com esses valores." msgid "An identifier must be specified when updating a subnet" msgstr "Um identificador deve ser especificado ao atualizar uma sub-rede" msgid "An interface driver must be specified" msgstr "Um driver de interface deve ser especificado" msgid "" "An ordered list of extension driver entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. For example: extension_drivers = " "port_security,qos" msgstr "" "Uma lista ordenada de pontos de entrada do driver de extensão a serem " "carregados a partir do namespace neutron.ml2.extension_drivers. Por exemplo: " "extension_drivers = port_security,qos" msgid "" "An ordered list of networking mechanism driver entrypoints to be loaded from " "the neutron.ml2.mechanism_drivers namespace." msgstr "" "Lista ordenada de pontos de entrada do driver de mecanismo de rede que " "serão carregados a partir do namespace neutron.ml2.mechanism_drivers." msgid "An unexpected internal error occurred." msgstr "Ocorreu um erro interno inesperado." msgid "An unknown error has occurred. Please try your request again." msgstr "Ocorreu um erro desconhecido. Tente a solicitação novamente." msgid "Async process didn't respawn" msgstr "O processo assíncrono não sofreu spawn novamente" #, python-format msgid "Attribute '%s' not allowed in POST" msgstr "Atributo '%s' não permitido no autoteste inicial" #, python-format msgid "Authentication type not supported. Requested type=%(auth_type)s." msgstr "Tipo de autenticação não suportado. Tipo solicitado=%(auth_type)s." msgid "Authorization URL for connecting to designate in admin context" msgstr "" "URL de autorização para conexão com o Designado no contexto de administrador" msgid "Automatically remove networks from offline DHCP agents." msgstr "Remover automaticamente as redes de agentes DHCP offline." msgid "" "Automatically reschedule routers from offline L3 agents to online L3 agents." msgstr "" "Reagende roteadores automaticamente de agentes L3 offline para agentes L3 " "online." msgid "Availability zone of this node" msgstr "Zona de disponibilidade deste nó" #, python-format msgid "AvailabilityZone %(availability_zone)s could not be found." msgstr "A AvailabilityZone %(availability_zone)s não pôde ser localizada." msgid "Available commands" msgstr "Comandos disponíveis" #, python-format msgid "" "BGP Peer %(peer_ip)s for remote_as=%(remote_as)s, running for BGP Speaker " "%(speaker_as)d not added yet." msgstr "" "Peer BGP %(peer_ip)s para remote_as=%(remote_as)s, em execução para Speaker " "BGP %(speaker_as)d ainda não incluído." #, python-format msgid "" "BGP Speaker %(bgp_speaker_id)s is already configured to peer with a BGP Peer " "at %(peer_ip)s, it cannot peer with BGP Peer %(bgp_peer_id)s." msgstr "" "O Speaker BGP %(bgp_speaker_id)s já está configurado para estar no mesmo " "nível de um Peer BGP em %(peer_ip)s, ele não pode estar no mesmo nível do " "Peer BGP %(bgp_peer_id)s." #, python-format msgid "" "BGP Speaker for local_as=%(local_as)s with router_id=%(rtid)s not added yet." msgstr "" "Speaker BGP para local_as=%(local_as)s com router_id=%(rtid)s ainda não " "incluído. " #, python-format msgid "" "BGP peer %(bgp_peer_id)s is not associated with BGP speaker " "%(bgp_speaker_id)s." msgstr "" "O peer BGP %(bgp_peer_id)s não está associado ao speaker BGP " "%(bgp_speaker_id)s." #, python-format msgid "BGP peer %(bgp_peer_id)s not authenticated." msgstr "O peer BGP %(bgp_peer_id)s não está autenticado." #, python-format msgid "BGP peer %(id)s could not be found." msgstr "O peer %(id)s não pôde ser localizado." #, python-format msgid "" "BGP speaker %(bgp_speaker_id)s is not hosted by the BgpDrAgent %(agent_id)s." msgstr "" "O speaker BGP %(bgp_speaker_id)s não é hospedado pelo BgpDrAgent " "%(agent_id)s." #, python-format msgid "BGP speaker %(id)s could not be found." msgstr "O speaker %(id)s não pôde ser localizado." msgid "BGP speaker driver class to be instantiated." msgstr "Classe do driver do speaker BGP a ser instanciada." msgid "Backend does not support VLAN Transparency." msgstr "O backend não suporta a Transparência da VLAN." #, python-format msgid "" "Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, " "%(mac)s:" msgstr "" "Prefixo ou formato mac inválido para gerar endereço IPv6 por EUI-64: " "%(prefix)s, %(mac)s:" #, python-format msgid "Bad prefix type for generate IPv6 address by EUI-64: %s" msgstr "Tipo de prefixo inválido para gerar endereço IPv6 por EUI-64: %s" #, python-format msgid "Base MAC: %s" msgstr "MAC Base: %s" msgid "" "Base log dir for dnsmasq logging. The log contains DHCP and DNS log " "information and is useful for debugging issues with either DHCP or DNS. If " "this section is null, disable dnsmasq log." msgstr "" "Diretório de log base para criação de log dnsmasq. O log contém informações " "de log DHCP e DNS e é útil para depurar problemas com DHCP ou DNS. Se esta " "seção for nula, desative o log dnsmasq." #, python-format msgid "BgpDrAgent %(agent_id)s is already associated to a BGP speaker." msgstr "O BgpDrAgent %(agent_id)s já está associado a um speaker BGP." #, python-format msgid "BgpDrAgent %(id)s is invalid or has been disabled." msgstr "BgpDrAgent %(id)s é inválido ou foi desativado." #, python-format msgid "BgpDrAgent updated: %s" msgstr "BgpDrAgent atualizado: %s" msgid "Body contains invalid data" msgstr "O corpo contém dados inválidos" msgid "Both network_id and router_id are None. One must be provided." msgstr "O network_id e o router_id são Nenhum. Um deles deve ser fornecido." #, python-format msgid "Bridge %(bridge)s does not exist." msgstr "A ponte %(bridge)s não existe." #, python-format msgid "Bridge %s does not exist" msgstr "A ponte %s não existe" msgid "Bulk operation not supported" msgstr "Operação em massa não suportada" msgid "CIDR to monitor" msgstr "CIDR para monitorar" #, python-format msgid "Callback for %(resource_type)s not found" msgstr "Retorno de chamada para %(resource_type)s não localizado" #, python-format msgid "Callback for %(resource_type)s returned wrong resource type" msgstr "" "O retorno de chamada para %(resource_type)s retornou um tipo de recurso " "errado" #, python-format msgid "Cannot add floating IP to port %s that has no fixed IPv4 addresses" msgstr "" "Não é possível incluir IP flutuante na porta %s que não tem endereços IPv4 " "fixos" #, python-format msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip" msgstr "" "Não é possível incluir IP flutuante na porta da sub-rede %s que não possui " "gateway_ip" #, python-format msgid "Cannot add multiple callbacks for %(resource_type)s" msgstr "" "Não é possível incluir vários retornos de chamada para %(resource_type)s" #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "" "Não é possível alocar a sub-rede IPv%(req_ver)s a partir do conjunto de sub-" "redes IPv%(pool_ver)s" msgid "Cannot allocate requested subnet from the available set of prefixes" msgstr "" "Não é possível alocar a sub-rede solicitada a partir do conjunto de prefixos " "disponível." #, python-format msgid "" "Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with port " "%(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already has a " "floating IP on external network %(net_id)s." msgstr "" "Não é possível associar o IP flutuante %(floating_ip_address)s (%(fip_id)s) " "à porta %(port_id)s usando IP fixo %(fixed_ip)s, pois esse IP fixo já possui " "um IP flutuante em uma rede externa %(net_id)s." msgid "" "Cannot change HA attribute of active routers. Please set router " "admin_state_up to False prior to upgrade." msgstr "" "Não é possível alterar o atributo de HA de roteadores ativos. Configure o " "roteador admin_state_up para False antes do upgrade." #, python-format msgid "" "Cannot create floating IP and bind it to %s, since that is not an IPv4 " "address." msgstr "" "Não é possível criar IP flutuante e ligá-lo a %s porque não é um endereço " "IPv4." #, python-format msgid "" "Cannot create floating IP and bind it to Port %s, since that port is owned " "by a different tenant." msgstr "" "Não é possível criar IP flutuante e ligá-lo à porta %s, uma vez que a porta " "pertence a um locatário diferente." msgid "Cannot create resource for another tenant" msgstr "Não é possível criar recurso para outro locatário" msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "Não é possível desativar enable_dhcp com conjunto de atributos ipv6" #, python-format msgid "Cannot find %(table)s with %(col)s=%(match)s" msgstr "Não é possível localizar %(table)s com %(col)s=%(match)s" #, python-format msgid "Cannot handle subnet of type %(subnet_type)s" msgstr "Não é possível manipular a sub-rede do tipo %(subnet_type)s" msgid "Cannot have multiple IPv4 subnets on router port" msgstr "Não é possível ter diversas sub-redes IPV4 na porta do roteador" #, python-format msgid "" "Cannot have multiple router ports with the same network id if both contain " "IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s" msgstr "" "Não é possível ter várias portas de roteador com o mesmo ID de rede se ambas " "contiverem sub-redes IPv6. A porta existente %(p)s possui sub-rede(s) IPv6 e " "o ID de rede %(nid)s" #, python-format msgid "" "Cannot host distributed router %(router_id)s on legacy L3 agent %(agent_id)s." msgstr "" "Não é possível hospedar o roteador distribuído %(router_id)s no agente L3 " "legacy %(agent_id)s." msgid "Cannot match priority on flow deletion or modification" msgstr "" "Não é possível corresponder a prioridade na exclusão ou modificação do fluxo" msgid "Cannot mix IPv4 and IPv6 prefixes in a subnet pool." msgstr "" "Não é possível combinar prefixos IPv4 e IPv6 em um conjunto de sub-rede." msgid "Cannot specify both --service and --subproject." msgstr "Não é possível especificar --serviço e --subprojeto." msgid "Cannot specify both subnet-id and port-id" msgstr "Não é possível especificar subnet-id e port-id" msgid "Cannot understand JSON" msgstr "Não é possível entender JSON" #, python-format msgid "Cannot update read-only attribute %s" msgstr "Não é possível atualizar o atributo somente leitura %s" msgid "" "Cannot upgrade active router to distributed. Please set router " "admin_state_up to False prior to upgrade." msgstr "" "Não é possível fazer upgrade do roteador ativo para distribuído. Configure o " "roteador admin_state_up para False antes do upgrade." msgid "Certificate Authority public key (CA cert) file for ssl" msgstr "" "Arquivo de chave pública da Autoridade de Certificação (certificado CA) para " "ssl" #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s." msgstr "A mudança faria uso de menos de 0 dos recursos a seguir: %(unders)s." msgid "Check ebtables installation" msgstr "Verifique a instalação de ebtables" msgid "Check for ARP header match support" msgstr "Verifique se há suporte para correspondência de cabeçalho ARP" msgid "Check for ARP responder support" msgstr "Verifique se há suporte para respondente do ARP" msgid "Check for ICMPv6 header match support" msgstr "Verifique se há suporte para correspondência de cabeçalho ICMPv6" msgid "Check for OVS Geneve support" msgstr "Verifique se há suporte para OVS Geneve" msgid "Check for OVS vxlan support" msgstr "Verifique se há suporte do vxlan do OVS" msgid "Check for VF management support" msgstr "Verifique se há suporte para gerenciamento de VF" msgid "Check for iproute2 vxlan support" msgstr "Verifique se há suporte para vxlan do iproute2" msgid "Check for nova notification support" msgstr "Verifique se há suporte para nova notificação" msgid "Check for patch port support" msgstr "Verifique se há suporte para a porta de correção" msgid "Check ip6tables installation" msgstr "Verificar instalação do ip6tables" msgid "Check ipset installation" msgstr "Verificar instalação do ipset" msgid "Check keepalived IPv6 support" msgstr "Verifique o suporte a keepalived IPv6" msgid "Check minimal dibbler version" msgstr "Verifique a versão do dibbler mínima" msgid "Check minimal dnsmasq version" msgstr "Verifique a versão dnsmasq mínima" msgid "Check netns permission settings" msgstr "Verifique as configurações de permissão netns" msgid "Check ovs conntrack support" msgstr "Verifique o suporte conntrack do OVS" msgid "Check ovsdb native interface support" msgstr "Verifique o suporte da interface nativa ovsdb" #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of " "subnet %(sub_id)s" msgstr "" "O cidr %(subnet_cidr)s de sub-rede %(subnet_id)s se sobrepõe com o cidr " "%(cidr)s da sub-rede %(sub_id)s" msgid "Class not found." msgstr "Classe não encontrada." msgid "Cleanup resources of a specific agent type only." msgstr "Limpar recursos somente de um tipo de agente específico." msgid "Client certificate for nova metadata api server." msgstr "Certificado do cliente para o servidor da API de metadados do Nova." msgid "" "Comma-separated list of : tuples, mapping " "network_device to the agent's node-specific list of virtual functions that " "should not be used for virtual networking. vfs_to_exclude is a semicolon-" "separated list of virtual functions to exclude from network_device. The " "network_device in the mapping should appear in the physical_device_mappings " "list." msgstr "" "Lista separada por vírgulas de tuplas : que " "mapeiam o network_device para a lista específica do nó do agente de funções " "virtuais que não devem ser usadas para rede virtual. vfs_to_exclude é uma " "lista separada por ponto-e-vírgula de funções virtuais para excluir do " "network_device. O network_device no mapeamento deve aparecer na lista " "physical_device_mappings." msgid "" "Comma-separated list of : tuples mapping physical " "network names to the agent's node-specific Open vSwitch bridge names to be " "used for flat and VLAN networks. The length of bridge names should be no " "more than 11. Each bridge must exist, and should have a physical network " "interface configured as a port. All physical networks configured on the " "server should have mappings to appropriate bridges on each agent. Note: If " "you remove a bridge from this mapping, make sure to disconnect it from the " "integration bridge as it won't be managed by the agent anymore. Deprecated " "for ofagent." msgstr "" "Lista separada por vírgula de tuplas : que mapeiam " "nomes de rede física para os nomes de ponte do Open vSwitch específicos do " "nó do agente a serem usados para redes simples e de VLAN. O comprimento dos " "nomes de ponte deve ser de no máximo 11 caracteres. Cada ponte deve existir " "e possuir uma interface de rede física configurada como uma porta. Todas as " "redes físicas configuradas no servidor devem ter mapeamentos para pontes " "apropriadas em cada agente. Nota: Se você remover uma ponte desse " "mapeamento, assegure-se de desconectá-la da ponte de integração, já que ela " "não será gerenciada mais pelo agente. Descontinuado para ofagent. " msgid "" "Comma-separated list of : tuples mapping " "physical network names to the agent's node-specific physical network device " "interfaces of SR-IOV physical function to be used for VLAN networks. All " "physical networks listed in network_vlan_ranges on the server should have " "mappings to appropriate interfaces on each agent." msgstr "" "Lista separada por vírgula de tuplas : que " "mapeiam nomes de rede física para as interfaces de dispositivo de rede " "física específicas do nó do agente da função física SR-IOV a serem usadas " "para redes VLAN. Todas as redes físicas listadas em network_vlan_ranges no " "servidor devem ter mapeamentos para as interfaces apropriadas em cada agente." msgid "" "Comma-separated list of : tuples " "mapping physical network names to the agent's node-specific physical network " "interfaces to be used for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should have mappings to " "appropriate interfaces on each agent." msgstr "" "Lista separada por vírgulas de tuplas :" " que mapeiam nomes de rede física para interfaces de " "rede física específicas do nó do agente a serem usadas para redes simples e " "de VLAN. Todas as redes físicas listadas em network_vlan_ranges no servidor " "devem ter mapeamentos para as interfaces apropriadas em cada agente." msgid "" "Comma-separated list of : tuples enumerating ranges of GRE " "tunnel IDs that are available for tenant network allocation" msgstr "" "Lista separada por vírgula de tuplas : enumerando as " "faixas de IDs de túnel GRE que estão disponíveis para alocação de redes de " "locatário" msgid "" "Comma-separated list of : tuples enumerating ranges of " "Geneve VNI IDs that are available for tenant network allocation" msgstr "" "Lista separada por vírgula de tuplas : enumerando " "intervalos de IDs Geneve VNI que estão disponíveis para alocação de rede " "locatária" msgid "" "Comma-separated list of : tuples enumerating ranges of " "VXLAN VNI IDs that are available for tenant network allocation" msgstr "" "Lista de valores separados por vírgula de tuplas : " "enumerando faixas de IDs VXLAN VNI que estão disponíveis para alocação de " "redes de locatário" msgid "" "Comma-separated list of supported PCI vendor devices, as defined by " "vendor_id:product_id according to the PCI ID Repository. Default enables " "support for Intel and Mellanox SR-IOV capable NICs." msgstr "" "Lista separada por vírgula de dispositivos de fornecedor de PCI suportados, " "conforme definidos pelo vendor_id:product_id de acordo com o Repositório do " "ID do PCI. O padrão permite o suporte para NICs aptos para Intel e Mellanox " "SR-IOV" msgid "" "Comma-separated list of the DNS servers which will be used as forwarders." msgstr "" "Lista separada por vírgula dos servidores DNS que serão utilizados como " "encaminhadores." msgid "Command to execute" msgstr "Comando a ser executado" msgid "Config file for interface driver (You may also use l3_agent.ini)" msgstr "" "Arquivo de configuração para driver de interface (também é possível usar " "l3_agent.ini)" #, python-format msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" msgstr "O valor conflitante ethertype %(ethertype)s para CIDR %(cidr)s" msgid "" "Controls whether the neutron security group API is enabled in the server. It " "should be false when using no security groups or using the nova security " "group API." msgstr "" "Controla se a API do grupo de segurança Neutron está ativada no servidor. " "Deve ser false quando não usar nenhum grupo de segurança ou quando usar a " "API do grupo de segurança do Nova." #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "" "Não foi possível conectar-se ao %(host)s:%(port)s após tentar por %(time)d " "segundos" #, python-format msgid "Could not connect to %s" msgstr "Não foi possível conectar-se ao %s" msgid "Could not deserialize data" msgstr "Não é possível desserializar dados" #, python-format msgid "Could not retrieve schema from %(conn)s: %(err)s" msgstr "Não foi possível recuperar esquema a partir de %(conn)s: %(err)s" #, python-format msgid "" "Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable " "to update." msgstr "" "O IP atual do gateway %(ip_address)s já está em uso pela porta %(port_id)s. " "Não é possível atualizar." msgid "Currently update of HA mode for a DVR/HA router is not supported." msgstr "" "Atualmente a atualização do modo de HA de um roteador DVR/HA não é suportada." msgid "Currently update of HA mode for a distributed router is not supported." msgstr "" "Atualmente a atualização do modo de HA para um roteador distribuído não é " "suportada." msgid "" "Currently update of distributed mode for a DVR/HA router is not supported" msgstr "" "Atualmente a atualização do modo distribuído de um roteador DVR/HA não é " "suportada." msgid "Currently update of distributed mode for an HA router is not supported." msgstr "" "Atualmente a atualização do modo distribuído de um roteador HA não é " "suportada." msgid "" "Currently updating a router from DVR/HA to non-DVR non-HA is not supported." msgstr "" "Atualmente a atualização de um roteador de DVR/HA para sem DVR ou sem HA não " "é suportada." msgid "Currently updating a router to DVR/HA is not supported." msgstr "Atualmente a atualização de um roteador para DVR/HA não é suportada." msgid "" "DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " "lease times." msgstr "" "Duração de lease de DHCP (em segundos). Use -1 para dizer ao dnsmasq para " "usar lease infinitas vezes." msgid "" "DVR deployments for VXLAN/GRE/Geneve underlays require L2-pop to be enabled, " "in both the Agent and Server side." msgstr "" "Implementações de DVR para bases VXLAN/GRE/Geneve requerem que L2-pop esteja " "ativado nos lados do Agente e do Servidor." msgid "" "Database engine for which script will be generated when using offline " "migration." msgstr "" "Mecanismo de bancos de dados para o qual o script será gerado ao usar a " "migração off-line." msgid "" "Default IPv4 subnet pool to be used for automatic subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where creation of a subnet is " "being called without a subnet pool ID. If not set then no pool will be used " "unless passed explicitly to the subnet create. If no pool is used, then a " "CIDR must be passed to create a subnet and that subnet will not be allocated " "from any pool; it will be considered part of the tenant's private address " "space. This option is deprecated for removal in the N release." msgstr "" "O conjunto de sub-rede IPv4 padrão a ser usado para alocação automática de " "CIDR de sub-rede. Especifica pelo UUID o conjunto a ser usado caso a criação " "de uma sub-rede estiver sendo chamada sem um ID de conjunto de sub-rede. Se " "não for configurado, então nenhum conjunto será usado, a menos que seja " "transmitido explicitamente para a criação da sub-rede. Se nenhum conjunto " "for usado, um CIDR deverá ser transmitido para criar uma sub-rede, que não " "será alocada a partir de nenhum conjunto; ela será considerada parte do " "espaço de endereço privado do locatário. Essa opção foi descontinuada e será " "removida na liberação N." msgid "" "Default IPv6 subnet pool to be used for automatic subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where creation of a subnet is " "being called without a subnet pool ID. See the description for " "default_ipv4_subnet_pool for more information. This option is deprecated for " "removal in the N release." msgstr "" "O conjunto de sub-rede IPv6 padrão a ser usado para alocação automática de " "CIDR de sub-rede. Especifica pelo UUID o conjunto a ser usado caso a criação " "de uma sub-rede estiver sendo chamada sem um ID de conjunto de sub-rede. " "Consulte a descrição de default_ipv4_subnet_pool para obter mais " "informações. Essa opção foi descontinuada e será removida na liberação N." msgid "Default driver to use for quota checks" msgstr "Driver padrão para uso por verificações de cota" msgid "Default external networks must be shared to everyone." msgstr "Redes externas padrão devem ser compartilhadas com todos." msgid "" "Default network type for external networks when no provider attributes are " "specified. By default it is None, which means that if provider attributes " "are not specified while creating external networks then they will have the " "same type as tenant networks. Allowed values for external_network_type " "config option depend on the network type values configured in type_drivers " "config option." msgstr "" "Tipo de rede padrão para redes externas quando nenhum atributo de provedor é " "especificado. Por padrão, é Nenhum, o que significa que se os atributos de " "provedor não forem especificados durante a criação de redes externas, eles " "terão o mesmo tipo que as redes locatárias. Os valores permitidos para a " "opção de configuração external_network_type dependem dos valores de tipo de " "rede definidos na opção de configuração type_drivers CapturePostTypes." msgid "" "Default number of RBAC entries allowed per tenant. A negative value means " "unlimited." msgstr "" "Número padrão de entradas RBAC permitido por locatário. Um valor negativo " "significa ilimitado." msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "" "Número padrão de recurso permitido por locatário. Um valor negativo " "significa ilimitado." msgid "Default security group" msgstr "Grupo de segurança padrão" msgid "Default security group already exists." msgstr "O grupo de segurança padrão já existe." msgid "" "Default value of availability zone hints. The availability zone aware " "schedulers use this when the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a comma separated string. " "This value can be empty. In this case, even if availability_zone_hints for a " "resource is empty, availability zone is considered for high availability " "while scheduling the resource." msgstr "" "Valor padrão das dicas de zona de disponibilidade. A zona de disponibilidade " "reconhece que os planejadores utilizam esse valor quando " "availability_zone_hints de recursos estiver vazio. Diversas zonas de " "disponibilidades podem ser especificadas por uma sequência separada por " "vírgulas. Esse valor pode ser vazio. Nesse caso, mesmo que " "availability_zone_hints de um recurso esteja vazio, a zona de " "disponibilidade será considerada para alta disponibilidade ao planejar o " "recurso." msgid "" "Define the default value of enable_snat if not provided in " "external_gateway_info." msgstr "" "Defina o valor padrão de enable_snat se não fornecido em " "external_gateway_info." msgid "" "Defines providers for advanced services using the format: :" ":[:default]" msgstr "" "Define provedores para serviços avançados usando o formato::" ":[:default]" msgid "" "Delay within which agent is expected to update existing ports whent it " "restarts" msgstr "" "Atraso dentro do qual é esperado que o agente atualize as portas existentes " "quando ele reinicia" msgid "Delete the namespace by removing all devices." msgstr "Excluir o namespace ao remover todos os dispositivos." #, python-format msgid "Deleting port %s" msgstr "Excluindo a porta %s" #, python-format msgid "Deployment error: %(reason)s." msgstr "Erro de implementação: %(reason)s" msgid "Destroy IPsets even if there is an iptables reference." msgstr "Destruir os IPsets mesmo se houver uma referência de iptables." msgid "Destroy all IPsets." msgstr "Destruir todos os IPsets." #, python-format msgid "Device %(dev_name)s in mapping: %(mapping)s not unique" msgstr "Dispositivo %(dev_name)s no mapeamento: %(mapping)s não exclusivo" #, python-format msgid "Device '%(device_name)s' does not exist." msgstr "O dispositivo '%(device_name)s' não existe." msgid "Device has no virtual functions" msgstr "O dispositivo não possui funções virtuais" #, python-format msgid "Device name %(dev_name)s is missing from physical_device_mappings" msgstr "" "Nome do dispositivo %(dev_name)s está ausente no physical_device_mappings" msgid "Device not found" msgstr "Dispositivo não localizado." #, python-format msgid "" "Distributed Virtual Router Mac Address for host %(host)s does not exist." msgstr "" "O endereço Mac do Roteador Virtual Distribuído para o host %(host)s não " "existe." #, python-format msgid "Domain %(dns_domain)s not found in the external DNS service" msgstr "Domínio %(dns_domain)s não localizado no serviço DNS externo" msgid "Domain to use for building the hostnames" msgstr "Domínio a ser usado para construir os nomes dos hosts" msgid "" "Domain to use for building the hostnames. This option is deprecated. It has " "been moved to neutron.conf as dns_domain. It will be removed in a future " "release." msgstr "" "Domínio utilizado para a construção dos nomes de host. Esta opção foi " "descontinuada. Ela foi movida para neutron.conf como dns_domain. Ela será " "removida em uma próxima versão." msgid "Downgrade no longer supported" msgstr "O downgrade não é mais suportado" #, python-format msgid "Driver %s is not unique across providers" msgstr "O driver %s não é único em todos provedores" msgid "Driver for external DNS integration." msgstr "O driver para integração do DNS externa." msgid "Driver for security groups firewall in the L2 agent" msgstr "Driver para firewall para grupos de segurança no agente L2" msgid "Driver to use for scheduling network to DHCP agent" msgstr "Driver a ser usado para planejar a rede para o agente DHCP" msgid "Driver to use for scheduling router to a default L3 agent" msgstr "O driver a ser usado para planejar o roteador para um agente L3 padrão" msgid "" "Driver used for ipv6 prefix delegation. This needs to be an entry point " "defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for " "entry points included with the neutron source." msgstr "" "Driver usado para delegação de prefixo ipv6. Este precisa ser um ponto de " "entrada definido no namespace neutron.agent.linux.pd_drivers. Consulte setup." "cfg para obter os pontos de entrada incluídos com a origem do Neutron." msgid "Driver used for scheduling BGP speakers to BGP DrAgent" msgstr "Driver a ser usado para planejar os speakers BGP para o DrAgent BGP" msgid "Drivers list to use to send the update notification" msgstr "" "Lista de drivers a serem usados para enviar a notificação de atualização" #, python-format msgid "Duplicate IP address '%s'" msgstr "Endereço IP duplicado '%s'" #, python-format msgid "" "Duplicate L3HARouterAgentPortBinding is created for router(s) %(router)s. " "Database cannot be upgraded. Please, remove all duplicates before upgrading " "the database." msgstr "" "L3HARouterAgentPortBinding duplicado é criado para um ou mais roteadores " "%(router)s. O banco de dados não pode ser atualizado. Remova todas as " "duplicatas antes de fazer upgrade do banco de dados." msgid "Duplicate Metering Rule in POST." msgstr "Regra de Medição Duplicada no POST." msgid "Duplicate Security Group Rule in POST." msgstr "Regra do Grupo de Segurança Duplicada no Autoteste Inicial." msgid "Duplicate address detected" msgstr "Endereço duplicado detectado" #, python-format msgid "Duplicate hostroute '%s'" msgstr "Hostroute duplicado '%s'" #, python-format msgid "Duplicate items in the list: '%s'" msgstr "Itens duplicados na lista: '%s'" #, python-format msgid "Duplicate nameserver '%s'" msgstr "Servidor de nomes duplicado '%s'" msgid "Duplicate segment entry in request." msgstr "Entrada de segmento duplicada na solicitação." #, python-format msgid "ERROR: %s" msgstr "ERRO: %s" msgid "" "ERROR: Unable to find configuration file via the default search paths (~/." "neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" msgstr "" "ERRO: Não é possível localizar o arquivo de configuração através dos " "caminhos de procura padrão (~/.neutron/, ~/, /etc/neutron/, /etc/) e a opção " "'--config-file'!" msgid "" "Either one of parameter network_id or router_id must be passed to _get_ports " "method." msgstr "" "Um dos parâmetros network_id ou router_id deve ser transmitido para o método " "_get_ports." msgid "Either subnet_id or port_id must be specified" msgstr "subnet_id ou port_id deve ser especificado" msgid "Empty physical network name." msgstr "Nome da rede física vazio." msgid "Empty subnet pool prefix list." msgstr "Lista do prefixo do conjunto de sub-rede vazia." msgid "Enable FWaaS" msgstr "Habilitar FWaaS" msgid "Enable HA mode for virtual routers." msgstr "Ative o modo de HA para roteadores virtuais." msgid "Enable SSL on the API server" msgstr "Habilite SSL no servidor de API" msgid "" "Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " "plugin using linuxbridge mechanism driver" msgstr "" "Ative o VXLAN no agente. Pode ser ativado quando o agente é gerenciado pelo " "plug-in ml2 usando o driver do mecanismo linuxbridge" msgid "" "Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 " "l2population driver. Allows the switch (when supporting an overlay) to " "respond to an ARP request locally without performing a costly ARP broadcast " "into the overlay." msgstr "" "Ative respondente ARP local se ele for suportado. Requer OVS 2.1 e o driver " "ML2 l2population. Permite que o comutador (ao suportar uma sobreposição) " "responda a uma solicitação de ARP localmente sem executar uma transmissão " "dispendiosa de ARP na sobreposição." msgid "" "Enable local ARP responder which provides local responses instead of " "performing ARP broadcast into the overlay. Enabling local ARP responder is " "not fullycompatible with the allowed-address-pairs extension." msgstr "" "Ative o respondente ARP local que fornece respostas locais em vez de " "executar a transmissão ARP na sobreposição. A ativação do respondente ARP " "local não é totalmente compatível com a extensão allowed-address-pairs. " msgid "" "Enable services on an agent with admin_state_up False. If this option is " "False, when admin_state_up of an agent is turned False, services on it will " "be disabled. Agents with admin_state_up False are not selected for automatic " "scheduling regardless of this option. But manual scheduling to such agents " "is available if this option is True." msgstr "" "Ativar os serviços em um agente com admin_state_up False. Se essa opção for " "False, quando admin_state_up de um agente tornar-se False, os serviços nele " "serão desativados. Os agentes com admin_state_up False não são selecionados " "para planejamento automático, independentemente dessa opção. Mas o " "planejamento manual para tais agentes estará disponível se essa opção for " "True." msgid "" "Enable suppression of ARP responses that don't match an IP address that " "belongs to the port from which they originate. Note: This prevents the VMs " "attached to this agent from spoofing, it doesn't protect them from other " "devices which have the capability to spoof (e.g. bare metal or VMs attached " "to agents without this flag set to True). Spoofing rules will not be added " "to any ports that have port security disabled. For LinuxBridge, this " "requires ebtables. For OVS, it requires a version that supports matching ARP " "headers. This option will be removed in Newton so the only way to disable " "protection will be via the port security extension." msgstr "" "Ative a supressão de respostas ARP que não correspondem a um endereço IP que " "pertence à porta da qual elas se originam. Nota: Isso evita que as MVs " "conectadas a esse agente realizem spoof, mas não as protegem de outros " "dispositivos que tiverem a capacidade de realizar spoof (por exemplo, bare " "metal ou MVs conectadas a agentes sem essa sinalização configurada para " "True). Regras de spoofing não serão incluídas em nenhuma porta que tenha a " "segurança de porta desativada. Para LinuxBridge, isso requer ebtables. Para " "OVS, requer uma versão que suporte cabeçalhos de ARP correspondentes. Essa " "opção será removida do Newton, de modo que a única maneira de desativar a " "proteção será por meio da extensão de segurança da porta. " msgid "" "Enable/Disable log watch by metadata proxy. It should be disabled when " "metadata_proxy_user/group is not allowed to read/write its log file and " "copytruncate logrotate option must be used if logrotate is enabled on " "metadata proxy log files. Option default value is deduced from " "metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent " "effective user id/name." msgstr "" "Ativar/desativar a inspeção do log por proxy de metadados. Deve ser " "desativada quando metadata_proxy_user/group não tiver permissão para ler/" "gravar seu arquivo de log e a opção copytruncate logrotate deverá ser usada " "se logrotate for ativado nos arquivos de log de proxy de metadados. O valor " "padrão da opção é deduzido de metadata_proxy_user: o log de inspeção será " "ativado se metadata_proxy_user for o ID/nome do usuário efetivo do agente." msgid "" "Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to " "True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable " "environment. Users making subnet creation requests for IPv6 subnets without " "providing a CIDR or subnetpool ID will be given a CIDR via the Prefix " "Delegation mechanism. Note that enabling PD will override the behavior of " "the default IPv6 subnetpool." msgstr "" "Ativa o IPv6 Prefix Delegation para alocação automática de CIDR de sub-rede. " "Configure para True para ativar o IPv6 Prefix Delegation para alocação de " "sub-rede em um ambiente apto para PD. Os usuários que fazem solicitações de " "criação de sub-rede para sub-redes IPv6 sem fornecer um CIDR ou um ID de " "conjunto de sub-redes receberão um CIDR por meio do mecanismo Prefix " "Delegation. Observe que a ativação do PD substitui o comportamento do " "conjunto de sub-redes IPv6 padrão. " msgid "" "Enables the dnsmasq service to provide name resolution for instances via DNS " "resolvers on the host running the DHCP agent. Effectively removes the '--no-" "resolv' option from the dnsmasq process arguments. Adding custom DNS " "resolvers to the 'dnsmasq_dns_servers' option disables this feature." msgstr "" "Permite que o serviço dnsmasq forneça resolução de nome para instâncias por " "meio dos resolvedores de DNS no host que executa o agente DHCP. Remove " "efetivamente a opção '--no-resolv' dos argumentos do processo dnsmasq. A " "inclusão dos resolvedores de DNS customizados na opção " "'dnsmasq_dns_servers' desativa esse recurso." msgid "Encountered an empty component." msgstr "Foi encontrado um componente vazio." msgid "End of VLAN range is less than start of VLAN range" msgstr "O final da faixa de VLAN é menor que o início da faixa de VLAN" msgid "End of tunnel range is less than start of tunnel range" msgstr "" "O término do intervalo do túnel é inferior ao início do intervalo do túnel" msgid "Enforce using split branches file structure." msgstr "Impingir usando a estrutura do arquivo de ramificações divididas." msgid "" "Ensure that configured gateway is on subnet. For IPv6, validate only if " "gateway is not a link local address. Deprecated, to be removed during the " "Newton release, at which point the gateway will not be forced on to subnet." msgstr "" "Assegure-se de que o gateway configurado esteja na sub-rede. Para IPv6, " "valide apenas se o gateway não for um endereço local do link. Descontinuado, " "será removido durante a liberação Newton, no ponto em que o gateway não será " "forçado para a sub-rede." #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "Erro %(reason)s ao tentar a operação." #, python-format msgid "Error importing FWaaS device driver: %s" msgstr "Erro ao importar o driver de dispositivo FWaaS: %s" #, python-format msgid "Error parsing dns address %s" msgstr "Erro ao analisar endereço dns %s" #, python-format msgid "Error while reading %s" msgstr "Erro ao ler %s" #, python-format msgid "" "Exceeded %s second limit waiting for address to leave the tentative state." msgstr "" "Excedido limite de %s segundos ao aguardar o endereço sair do estado de " "tentativa." msgid "Exceeded maximum amount of fixed ips per port." msgstr "Quantia máxima excedida de IPs fixos por porta." msgid "Existing prefixes must be a subset of the new prefixes" msgstr "Prefixos existentes devem ser um subconjunto dos novos prefixos" #, python-format msgid "" "Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" msgstr "" "Código de saída: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; " "Stderr: %(stderr)s" #, python-format msgid "Extension %(driver)s failed." msgstr "Extensão %(driver)s com falha." #, python-format msgid "" "Extension driver %(driver)s required for service plugin %(service_plugin)s " "not found." msgstr "" "Driver da extensão %(driver)s necessário para o plug-in de serviço " "%(service_plugin)s não localizado." msgid "" "Extension to use alongside ml2 plugin's l2population mechanism driver. It " "enables the plugin to populate VXLAN forwarding table." msgstr "" "Extensão a ser usada ao lado do driver do mecanismo l2population do plug-in " "ml2. Ela permite que o plug-in preencha a tabela de encaminhamento de VXLAN." #, python-format msgid "Extension with alias %s does not exist" msgstr "A extensão com o alias %s não existe" msgid "Extensions list to use" msgstr "Lista de extensões a serem usadas" #, python-format msgid "Extensions not found: %(extensions)s." msgstr "Extensões não localizadas: %(extensions)s." #, python-format msgid "External DNS driver %(driver)s could not be found." msgstr "O driver DNS externo %(driver)s não pôde ser localizado." #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "O IP externo %s é o mesmo que o IP de gateway" #, python-format msgid "" "External network %(external_network_id)s is not reachable from subnet " "%(subnet_id)s. Therefore, cannot associate Port %(port_id)s with a Floating " "IP." msgstr "" "A rede externa %(external_network_id)s não é acessível a partir da sub-rede " "%(subnet_id)s. Portanto, não é possível associar a porta %(port_id)s a um IP " "Flutuante." #, python-format msgid "" "External network %(net_id)s cannot be updated to be made non-external, since " "it has existing gateway ports" msgstr "" "A rede externa %(net_id)s não pode ser atualizada para tornar-se não " "externa, pois ela possui portas de gateway" #, python-format msgid "ExtraDhcpOpt %(id)s could not be found" msgstr "ExtraDhcpOpt %(id)s não pôde ser encontrado" msgid "" "FWaaS plugin is configured in the server side, but FWaaS is disabled in L3-" "agent." msgstr "" "O Plug-in FWaaS está configurado no lado do servidor, mas está desativado em " "FWaaS L3-agent." #, python-format msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." msgstr "" "Falha ao reagendar o roteador %(router_id)s: nenhum agente l3 elegível " "encontrado." #, python-format msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." msgstr "" "Falha ao planejar o roteador %(router_id)s para o Agente L3 %(agent_id)s." #, python-format msgid "" "Failed to allocate a VRID in the network %(network_id)s for the router " "%(router_id)s after %(max_tries)s tries." msgstr "" "Falha ao alocar um VRID na rede %(network_id)s para o roteador %(router_id)s " "após %(max_tries)s tentativas." #, python-format msgid "Failed to allocate subnet: %(reason)s." msgstr "Falha ao alocar a sub-rede: %(reason)s." msgid "" "Failed to associate address scope: subnetpools within an address scope must " "have unique prefixes." msgstr "" "Falha ao associar o escopo de endereço: Os conjuntos de sub-redes dentro de " "um escopo de endereço devem possui prefixos exclusivos." #, python-format msgid "Failed to check policy %(policy)s because %(reason)s." msgstr "Falha ao verificar a política %(policy)s devido a %(reason)s." #, python-format msgid "" "Failed to create a duplicate %(object_type)s: for attribute(s) " "%(attributes)s with value(s) %(values)s" msgstr "" "Falha ao criar um %(object_type)s duplicado: para os atributos " "%(attributes)s com os valores %(values)s" #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips included " "invalid subnet %(subnet_id)s" msgstr "" "Falha ao criar a porta na rede %(network_id)s porque fixed_ips incluía uma " "sub-rede inválida %(subnet_id)s" #, python-format msgid "Failed to init policy %(policy)s because %(reason)s." msgstr "Falha ao iniciar a política %(policy)s devido a %(reason)s." #, python-format msgid "Failed to locate source for %s." msgstr "Falha ao localizar origem para %s." #, python-format msgid "Failed to parse request. Parameter '%s' not specified" msgstr "Falha ao analisar a solicitação. Parâmetro '%s' não especificado" #, python-format msgid "Failed to parse request. Required attribute '%s' not specified" msgstr "" "Falha ao analisar solicitação. Atributo necessário '%s' não especificado" msgid "Failed to remove supplemental groups" msgstr "Falha ao remover grupos suplementares" #, python-format msgid "Failed to set gid %s" msgstr "Falha ao configurar gid %s" #, python-format msgid "Failed to set uid %s" msgstr "Falha ao configurar uid %s" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "Falha ao configurar porta do túnel %(type)s para %(ip)s" msgid "Failure applying iptables rules" msgstr "Falha ao aplicar regras do iptables" #, python-format msgid "Failure waiting for address %(address)s to become ready: %(reason)s" msgstr "Falha ao aguardar o endereço %(address)s ficar pronto: %(reason)s" msgid "Flat provider networks are disabled" msgstr "Redes de provedor simples são desativadas." #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "O tipo %(flavor_id)s não pôde ser localizado." #, python-format msgid "Flavor %(flavor_id)s is used by some service instance." msgstr "O tipo %(flavor_id)s é usado por alguma instância de serviço. " msgid "Flavor is not enabled." msgstr "O tipo não está ativado. " #, python-format msgid "Floating IP %(floatingip_id)s could not be found" msgstr "O IP flutuante %(floatingip_id)s não pôde ser localizado" #, python-format msgid "" "Floating IP %(floatingip_id)s is associated with non-IPv4 address " "%s(internal_ip)s and therefore cannot be bound." msgstr "" "O IP flutuante %(floatingip_id)s está associado ao endereço não IPv4 " "%s(internal_ip)s e, portanto, não pode ser ligada." msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "" "Para protocolos TCP/UDP, port_range_min deve ser menor ou igual ao " "port_range_max" #, python-format msgid "For class %(object_type)s missing primary keys: %(missing_keys)s" msgstr "" "Para a classe %(object_type)s, chaves primárias ausentes: %(missing_keys)s" msgid "Force ip_lib calls to use the root helper" msgstr "Força chamadas ip_lib para utilizar o auxiliar raiz" #, python-format msgid "Found duplicate extension: %(alias)s." msgstr "Localizada extensão duplicada: %(alias)s." #, python-format msgid "" "Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet " "%(subnet_cidr)s." msgstr "" "Conjuntos de alocação de sobreposição localizados:%(pool_1)s %(pool_2)s para " "a sub-rede %(subnet_cidr)s." msgid "Gateway IP version inconsistent with allocation pool version" msgstr "" "Versão de IP do gateway inconsistente com a versão do conjunto de alocações." #, python-format msgid "" "Gateway cannot be updated for router %(router_id)s, since a gateway to " "external network %(net_id)s is required by one or more floating IPs." msgstr "" "O gateway não pode ser atualizado para o roteador %(router_id)s porque um " "gateway para rede externa %(net_id)s é necessário por um ou mais IPs " "flutuantes." #, python-format msgid "Gateway ip %(ip_address)s conflicts with allocation pool %(pool)s." msgstr "" "O IP de gateway %(ip_address)s está em conflito com o conjunto de alocações " "%(pool)s." msgid "Gateway is not valid on subnet" msgstr "O gateway não é válido na sub-rede" msgid "" "Geneve encapsulation header size is dynamic, this value is used to calculate " "the maximum MTU for the driver. This is the sum of the sizes of the outer " "ETH + IP + UDP + GENEVE header sizes. The default size for this field is 50, " "which is the size of the Geneve header without any additional option headers." msgstr "" "O tamanho do cabeçalho de encapsulação Geneve é dinâmico, e esse valor é " "usado para calcular o MTU máximo para o driver. Essa é a soma dos tamanhos " "de cabeçalhos ETH + IP + UDP + GENEVE externos. O tamanho padrão para esse " "campo é 50, que é o tamanho do cabeçalho Geneve sem nenhum cabeçalho de " "opção adicional. " msgid "Group (gid or name) running metadata proxy after its initialization" msgstr "" "O grupo (gid ou nome) que executa o proxy de metadados após sua inicialização" msgid "" "Group (gid or name) running metadata proxy after its initialization (if " "empty: agent effective group)." msgstr "" "Grupo (gid ou nome) executando proxy de metadados após sua inicialização (se " "vazio: grupo efetivo do agente)." msgid "Group (gid or name) running this process after its initialization" msgstr "Grupo (gid ou nome) executando esse processo após sua inicialização" #, python-format msgid "HEAD file does not match migration timeline head, expected: %s" msgstr "" "O arquivo HEAD não corresponde ao cabeçalho da linha de tempo de migração, " "esperado: %s" msgid "" "Hostname to be used by the Neutron server, agents and services running on " "this machine. All the agents and services running on this machine must use " "the same host value." msgstr "" "O nome do host a ser usado pelo servidor, agentes e serviços do Neutron em " "execução nesta máquina. Todos os agentes e serviços em execução nesta " "máquina devem usar o mesmo valor do host." msgid "How many times Neutron will retry MAC generation" msgstr "Quantas vezes o Neutron tentará novamente a geração MAC" #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" "min) is missing." msgstr "" "O código do ICMP (port-range-max) %(value)s foi fornecido, mas o tipo do " "ICMP (port-range-min) está ausente." msgid "ID of network" msgstr "ID da rede" msgid "ID of network to probe" msgstr "ID da rede para análise" msgid "ID of probe port to delete" msgstr "ID da porta da análise a ser excluída" msgid "ID of probe port to execute command" msgstr "ID da porta da análise para executar comando" msgid "ID of the router" msgstr "ID do roteador" #, python-format msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s" msgstr "Endereço IP %(ip)s já alocado na sub-rede %(subnet_id)s" #, python-format msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s" msgstr "O endereço IP %(ip)s não pertence à sub-rede %(subnet_id)s" #, python-format msgid "" "IP address %(ip_address)s is not a valid IP for any of the subnets on the " "specified network." msgstr "" "O endereço IP %(ip_address)s não é um IP válido para nenhuma das sub-redes " "na rede especificada." msgid "IP address used by Nova metadata server." msgstr "Endereço IP usado pelo servidor de metadados Nova." msgid "IP allocation failed. Try again later." msgstr "A alocação de IP falhou. Tente novamente mais tarde" msgid "IP allocation requires subnet_id or ip_address" msgstr "A alocação de IP requer subnet_id ou ip_address" #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "IPTablesManager.apply falhou ao aplicar o seguinte conjunto de regras de " "tabelas de IP: \n" "%s" msgid "IPtables conntrack zones exhausted, iptables rules cannot be applied." msgstr "" "Zonas de IPtables conntrack esgotadas; as regras de iptables não podem ser " "aplicadas." msgid "IPv6 Address Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "O Modo de endereço IPv6 deve ser SLAAC ou Stateless para delegação de " "prefixo." msgid "IPv6 RA Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "O modo IPv6 RA deve ser SLAAC ou Stateless para delegação de prefixo." #, python-format msgid "" "IPv6 address %(address)s can not be directly assigned to a port on subnet " "%(id)s since the subnet is configured for automatic addresses" msgstr "" "O endereço IPv6 %(address)s não pode ser designado diretamente a uma porta " "na sub-rede %(id)s porque a sub-rede é configurada para endereços automáticos" #, python-format msgid "" "IPv6 address %(ip)s cannot be directly assigned to a port on subnet " "%(subnet_id)s as the subnet is configured for automatic addresses" msgstr "" "O endereço IPv6 %(ip)s não pode ser designado diretamente a uma porta na sub-" "rede %(subnet_id)s porque a sub-rede está configurada para endereços " "automáticos" #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot be " "added to Neutron Router." msgstr "" "A sub-rede IPv6 %s configurada para receber RAs de um roteador externo não " "pode ser incluída ao Neutron Router." msgid "" "If True, advertise network MTU values if core plugin calculates them. MTU is " "advertised to running instances via DHCP and RA MTU options." msgstr "" "Se True, informará valores de MTU de rede somente se o plug-in principal os " "calcular. O MTU é informado a executar instâncias por meio das opções DHCP e " "MTU RA." msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "" "Se True, então permita que plug-ins que o suportam criem redes transparentes " "da VLAN." msgid "" "If non-empty, the l3 agent can only configure a router that has the matching " "router ID." msgstr "" "Se não estiver vazio, o agente l3 poderá configurar apenas um roteador que " "tenha o ID de roteador correspondente." msgid "Illegal IP version number" msgstr "Número de versão de IP ilegal" #, python-format msgid "" "Illegal prefix bounds: %(prefix_type)s=%(prefixlen)s, %(base_prefix_type)s=" "%(base_prefixlen)s." msgstr "" "Limites de prefixo ilegal: %(prefix_type)s=%(prefixlen)s, " "%(base_prefix_type)s=%(base_prefixlen)s." #, python-format msgid "" "Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot " "associate with address scope %(address_scope_id)s because subnetpool " "ip_version is not %(ip_version)s." msgstr "" "Associação do conjunto de sub-redes ilegal: O conjunto de sub-redes " "%(subnetpool_id)s não pode ser associado ao escopo de endereço " "%(address_scope_id)s porque a ip_version do conjunto de sub-redes não é " "%(ip_version)s." #, python-format msgid "" "Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot be " "associated with address scope %(address_scope_id)s." msgstr "" "Associação do conjunto de sub-redes ilegal: O conjunto de sub-redes " "%(subnetpool_id)s não pode ser associado ao escopo de endereço " "%(address_scope_id)s." #, python-format msgid "Illegal subnetpool update : %(reason)s." msgstr "Atualização ilegal do conjunto de sub-redes: %(reason)s." #, python-format msgid "Illegal update to prefixes: %(msg)s." msgstr "Atualização ilegal para prefixos: %(msg)s." msgid "" "In some cases the Neutron router is not present to provide the metadata IP " "but the DHCP server can be used to provide this info. Setting this value " "will force the DHCP server to append specific host routes to the DHCP " "request. If this option is set, then the metadata service will be activated " "for all the networks." msgstr "" "Em alguns casos, o roteador Neutron não está presente para fornecer o IP de " "metadados, mas o servidor DHCP poderá ser usado para fornecer essas " "informações. A configuração desse valor forçará o servidor DHCP a anexar " "rotas de host específicas à solicitação DHCP. Se essa opção for configurada, " "o serviço de metadados será ativado para todas as redes." #, python-format msgid "Incorrect pci_vendor_info: \"%s\", should be pair vendor_id:product_id" msgstr "pci_vendor_info incorreto: \"%s\", deve ser o par vendor_id:product_id" msgid "" "Indicates that this L3 agent should also handle routers that do not have an " "external network gateway configured. This option should be True only for a " "single agent in a Neutron deployment, and may be False for all agents if all " "routers must have an external network gateway." msgstr "" "Indica que esse agente L3 também deve manipular roteadores que não possuírem " "um gateway de rede externo configurado. Essa opção deverá ser True somente " "para um agente único em uma implementação Neutron, e poderá ser False para " "todos os agentes se todos os roteadores tiverem um gateway de rede externo. " #, python-format msgid "Instance of class %(module)s.%(class)s must contain _cache attribute" msgstr "" "A instância da classe %(module)s.%(class)s deve conter o atributo _cache" #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" msgstr "Espaço de prefixo insuficiente para alocar o tamanho da sub-rede /%s" msgid "Insufficient rights for removing default security group." msgstr "Direitos insuficientes para remover o grupo de segurança padrão." msgid "" "Integration bridge to use. Do not change this parameter unless you have a " "good reason to. This is the name of the OVS integration bridge. There is one " "per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM " "VIFs are attached to this bridge and then 'patched' according to their " "network connectivity." msgstr "" "Ponte de integração a ser utilizada. Não altere esse parâmetro, a menos que " "haja uma boa razão para isso. Esse é o nome da ponte de integração do OVS. " "Há uma por hypervisor. A ponte de integração atua como um 'compartimento de " "correção' virtual. Todos os VIFs da MV são conectados a essa ponte e, em " "seguida, 'corrigidos' de acordo com sua conectividade de rede." msgid "Interface to monitor" msgstr "Interface para monitorar" msgid "" "Interval between checks of child process liveness (seconds), use 0 to disable" msgstr "" "Intervalo entre verificações de atividade de um processo-filho (segundos), " "use 0 para desativar." msgid "Interval between two metering measures" msgstr "Intervalo entre duas medições" msgid "Interval between two metering reports" msgstr "Intervalo entre dois relatórios de medição" #, python-format msgid "Invalid CIDR %(input)s given as IP prefix." msgstr "CIDR inválido %(input)s determinado como prefixo do IP." #, python-format msgid "" "Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address " "format, which requires the prefix to be /64." msgstr "" "CIDR inválido %s para o modo de endereço IPv6. O OpenStack usa o formato de " "endereço EUI-64, que requer que o prefixo seja /64." #, python-format msgid "Invalid Device %(dev_name)s: %(reason)s" msgstr "Dispositivo Inválido %(dev_name)s:%(reason)s" #, python-format msgid "" "Invalid action '%(action)s' for object type '%(object_type)s'. Valid " "actions: %(valid_actions)s" msgstr "" "Ação inválida '%(action)s' para o tipo de objeto '%(object_type)s'. Ações " "válidas: %(valid_actions)s" #, python-format msgid "" "Invalid authentication type: %(auth_type)s, valid types are: " "%(valid_auth_types)s" msgstr "" "Tipo de autenticação inválido: %(auth_type)s, os tipos válidos são: " "%(valid_auth_types)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "Tipo de conteúdo %(content_type)s inválido." #, python-format msgid "Invalid data format for IP pool: '%s'" msgstr "Formato de dados inválido para o pool de IPs: '%s'" #, python-format msgid "Invalid data format for extra-dhcp-opt: %(data)s" msgstr "Formato de dados inválido para extra-dhcp-opt: %(data)s" #, python-format msgid "Invalid data format for fixed IP: '%s'" msgstr "Formato de dados inválido para o IP fixo: '%s'" #, python-format msgid "Invalid data format for hostroute: '%s'" msgstr "Formato de dados inválido para hostroute: '%s'" #, python-format msgid "Invalid data format for nameserver: '%s'" msgstr "Formato de dados inválido para servidor de nomes: '%s'" #, python-format msgid "Invalid ethertype %(ethertype)s for protocol %(protocol)s." msgstr "Ethertype %(ethertype)s inválido para o protocolo %(protocol)s." #, python-format msgid "Invalid extension environment: %(reason)s." msgstr "Ambiente de extensão inválido: %(reason)s." #, python-format msgid "Invalid format for routes: %(routes)s, %(reason)s" msgstr "Formato inválido para rotas: %(routes)s, %(reason)s" #, python-format msgid "Invalid format: %s" msgstr "Formato inválido: %s" #, python-format msgid "Invalid input for %(attr)s. Reason: %(reason)s." msgstr "Entrada inválida para %(attr)s. Motivo: %(reason)s." #, python-format msgid "" "Invalid input. '%(target_dict)s' must be a dictionary with keys: " "%(expected_keys)s" msgstr "" "Entrada inválida. '%(target_dict)s' deve ser um dicionário com chaves: " "%(expected_keys)s" #, python-format msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s" msgstr "" "Estado da instância inválido: %(state)s, os estados válidos são: " "%(valid_states)s" #, python-format msgid "Invalid mapping: '%s'" msgstr "Mapeamento inválido: '%s'" #, python-format msgid "Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'." msgstr "Intervalo de VLAN de rede inválido: '%(vlan_range)s' - '%(error)s'." #, python-format msgid "Invalid network VXLAN port range: '%(vxlan_range)s'." msgstr "Intervalo de portas de VXLAN de rede inválido: '%(vxlan_range)s'." #, python-format msgid "Invalid pci slot %(pci_slot)s" msgstr "Slot pci inválido %(pci_slot)s" #, python-format msgid "Invalid provider format. Last part should be 'default' or empty: %s" msgstr "" "Formato de provedor inválido. A última parte deve ser 'default' ou vazia: %s" #, python-format msgid "Invalid resource type %(resource_type)s" msgstr "Tipo de recurso inválido %(resource_type)s" #, python-format msgid "Invalid route: %s" msgstr "Rota inválida: %s" msgid "Invalid service provider format" msgstr "Formato inválido de provedor de serviços" #, python-format msgid "Invalid service type %(service_type)s." msgstr "Tipo de serviço inválido %(service_type)s." #, python-format msgid "" "Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255." msgstr "" "Valor inválido para ICMP %(field)s (%(attr)s) %(value)s. Deve ser de 0 a 255." #, python-format msgid "Invalid value for port %(port)s" msgstr "Valor inválido para a porta %(port)s" msgid "" "Iptables mangle mark used to mark ingress from external network. This mark " "will be masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Marca de tratamento de Iptables usada para marcar ingresso de rede externa. " "Essa marca será mascarada com 0xffff, de modo que apenas os 16 bits " "inferiores serão usados." msgid "" "Iptables mangle mark used to mark metadata valid requests. This mark will be " "masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Marca de tratamento de Iptables usada para marcar solicitações válidas de " "metadados. Essa marca será mascarada com 0xffff, de modo que apenas os 16 " "bits inferiores serão usados." msgid "" "Keep in track in the database of current resourcequota usage. Plugins which " "do not leverage the neutron database should set this flag to False" msgstr "" "Mantenha o controle no banco de dados do uso de cota de recursos atual. Plug-" "ins que não utilizam o banco de dados Neutron devem configurar essa " "sinalização como False" msgid "Keepalived didn't respawn" msgstr "Keepalived não sofreu spawn novamente" msgid "Keepalived didn't spawn" msgstr "Keepalived não sofreu spawn" #, python-format msgid "" "Kernel HZ value %(value)s is not valid. This value must be greater than 0." msgstr "" "Valor do Kernel HZ %(value)s não é válido. Esse valor deve ser maior que 0." #, python-format msgid "Key %(key)s in mapping: '%(mapping)s' not unique" msgstr "Chave %(key)s no mapeamento: '%(mapping)s' não exclusivo" msgid "L3 agent failure to setup NAT for floating IPs" msgstr "Falha do agente L3 ao configurar o NAT para IPs flutuantes" msgid "L3 agent failure to setup floating IPs" msgstr "Falha do agente L3 ao configurar IPs flutuantes" #, python-format msgid "Limit must be an integer 0 or greater and not '%d'" msgstr "O limite deve ser um número inteiro de 0 ou superior e não '%d'" msgid "Limit number of leases to prevent a denial-of-service." msgstr "Limitar o número de concessões para impedir uma negação de serviço." msgid "List of :" msgstr "Lista de :" msgid "" "List of :: or " "specifying physical_network names usable for VLAN provider and tenant " "networks, as well as ranges of VLAN tags on each available for allocation to " "tenant networks." msgstr "" "Lista de :: ou " "especificando nomes physical_network utilizáveis para provedores VLAN e " "redes de locatário, bem como faixas de tags de VLAN em cada um disponível " "para alocação pelas redes de locatário." msgid "" "List of network type driver entrypoints to be loaded from the neutron.ml2." "type_drivers namespace." msgstr "" "Lista de pontos de entrada do driver de tipo de rede a serem carregados a " "partir do namespace namespace neutron.ml2.type_drivers." msgid "" "List of physical_network names with which flat networks can be created. Use " "default '*' to allow flat networks with arbitrary physical_network names. " "Use an empty list to disable flat networks." msgstr "" "Lista de nomes de physical_network com os quais redes simples podem ser " "criadas. Utilize o padrão '*' para permitir redes simples com nomes " "physical_network arbitrários. Use uma lista vazia para desativar redes " "simples." msgid "Local IP address of the VXLAN endpoints." msgstr "Endereço IP local dos terminais VXLAN." msgid "Location for Metadata Proxy UNIX domain socket." msgstr "Local para soquete de domínio UNIX de Proxy de Metadados." msgid "Location of Metadata Proxy UNIX domain socket" msgstr "Local de soquete de domínio UNIX de Proxy de Metadados" msgid "Location of pid file of this process." msgstr "Local do arquivo pid deste processo." msgid "Location to store DHCP server config files." msgstr "Local para armazenar arquivos de configuração do servidor DHCP" msgid "Location to store IPv6 PD files." msgstr "Local para armazenar arquivos IPv6 PD." msgid "Location to store IPv6 RA config files" msgstr "Local para armazenar arquivos de configuração RA IPv6" msgid "Location to store child pid files" msgstr "Local para armazenar arquivos pid filhos" msgid "Location to store keepalived/conntrackd config files" msgstr "Local para armazenar os arquivos de configuração keepalived/conntrackd" msgid "Log agent heartbeats" msgstr "Registrar pulsações do agente" msgid "Loopback IP subnet is not supported if enable_dhcp is True." msgstr "A sub-rede de IP loopback não será suportada se enable_dhcp for True." msgid "MTU size of veth interfaces" msgstr "Tamanho MTU de interfaces veth" msgid "Make the l2 agent run in DVR mode." msgstr "Faça com que o agente l2 seja executado no modo DVR." msgid "Malformed request body" msgstr "Corpo da solicitação malformado" #, python-format msgid "Malformed request body: %(reason)s." msgstr "Corpo da solicitação malformado: %(reason)s." msgid "MaxRtrAdvInterval setting for radvd.conf" msgstr "Configuração de MaxRtrAdvInterval para o radvd.conf" msgid "Maximum number of DNS nameservers per subnet" msgstr "Número máximo de servidores de nomes DNS por sub-rede" msgid "" "Maximum number of L3 agents which a HA router will be scheduled on. If it is " "set to 0 then the router will be scheduled on every agent." msgstr "" "O número máximo de agentes L3 em que um roteador de HA será planejado. Se " "configurado para 0, o roteador será planejado em cada agente." msgid "Maximum number of allowed address pairs" msgstr "Número máximo de pares de endereço permitidos" msgid "" "Maximum number of fixed ips per port. This option is deprecated and will be " "removed in the N release." msgstr "" "Número máximo de IPs fixos por porta. Essa opção foi descontinuada e será " "removida na liberação N." msgid "Maximum number of host routes per subnet" msgstr "Número máximo de rotas do host por sub-rede" msgid "Maximum number of routes per router" msgstr "Número máximo de rotas por roteador" msgid "" "Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce " "mode from metadata_proxy_user/group values, 'user': set metadata proxy " "socket mode to 0o644, to use when metadata_proxy_user is agent effective " "user or root, 'group': set metadata proxy socket mode to 0o664, to use when " "metadata_proxy_group is agent effective group or root, 'all': set metadata " "proxy socket mode to 0o666, to use otherwise." msgstr "" "Modo de soquete de domínio UNIX de proxy de metadados, 4 valores permitidos: " "'deduce': deduzir modo de valores de metadata_proxy_user/group, 'user': " "definir modo de soquete de proxy de metadados para 0o644, para uso quando " "metadata_proxy_user for usuário ou raiz de agente efetivo, 'group': definir " "modo de soquete de proxy de metadados para 0o664, para uso quando " "metadata_proxy_group for grupo ou raiz de agente efetivo, 'all': definir " "modo de soquete de proxy de metadados para 0o666, para uso de outra forma." msgid "Metering driver" msgstr "Driver de medição" #, python-format msgid "Metering label %(label_id)s does not exist" msgstr "O rótulo de marcação %(label_id)s não existe" #, python-format msgid "Metering label rule %(rule_id)s does not exist" msgstr "Uma regra para o rótulo de medição %(rule_id)s não existe" #, python-format msgid "" "Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps " "another" msgstr "" "A regra de medição com remote_ip_prefix %(remote_ip_prefix)s sobrepõe outra" msgid "Method cannot be called within a transaction." msgstr "O método não pode ser chamado dentro de uma transação." msgid "Migration from distributed router to centralized is not supported" msgstr "A migração do roteador distribuído para centralizado não é suportada" msgid "MinRtrAdvInterval setting for radvd.conf" msgstr "Configuração de MinRtrAdvInterval para o radvd.conf" msgid "Minimize polling by monitoring ovsdb for interface changes." msgstr "Minimizar pesquisa ao monitorar ovsdb para mudanças da interface." #, python-format msgid "Missing key in mapping: '%s'" msgstr "Chave ausente no mapeamento: '%s'" #, python-format msgid "Missing value in mapping: '%s'" msgstr "Valor ausente no mapeamento: '%s'" msgid "Multicast IP subnet is not supported if enable_dhcp is True." msgstr "A sub-rede de IP multicast não será suportada se enable_dhcp for True." msgid "" "Multicast group for VXLAN. When configured, will enable sending all " "broadcast traffic to this multicast group. When left unconfigured, will " "disable multicast VXLAN mode." msgstr "" "O grupo multicast para VXLAN. Quando configurado, permitirá o envio de todo " "o tráfego de transmissão para esse grupo multicast. Quando desconfigurado, " "desativa o modo VXLAN multicast." msgid "" "Multicast group(s) for vxlan interface. A range of group addresses may be " "specified by using CIDR notation. Specifying a range allows different VNIs " "to use different group addresses, reducing or eliminating spurious broadcast " "traffic to the tunnel endpoints. To reserve a unique group for each possible " "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on " "all the agents." msgstr "" "Um ou mais grupos multicast para a interface VXLAN. Um intervalo de " "endereços de grupo pode ser especificado usando a notação CIDR. Especificar " "um intervalo permite que diferentes VNIs utilizem diferentes endereços de " "grupo, reduzindo ou eliminando tráfego de transmissão falso para os " "terminais do túnel. Para reservar um grupo exclusivo para cada VNI (24 bits) " "possível, use um /8 como 239.0.0.0/8. Essa configuração deve ser a mesma em " "todos os agentes." #, python-format msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found" msgstr "" "Vários agentes com agent_type=%(agent_type)s e host=%(host)s localizados" #, python-format msgid "Multiple default providers for service %s" msgstr "Mútliplos provedores padrão para o serviço %s" #, python-format msgid "Multiple plugins for service %s were configured" msgstr "Vários plug-ins para o serviço %s foram configurados" #, python-format msgid "Multiple providers specified for service %s" msgstr "Diversos provedores especificados para o serviço %s" msgid "Multiple tenant_ids in bulk security group rule create not allowed" msgstr "" "Vários tenant_ids na criação da regra do grupo de segurança em massa não " "permitidos" msgid "Must also specify protocol if port range is given." msgstr "" "Deve-se também especificar o protocolo se o intervalo de portas for " "fornecido." msgid "Must specify one or more actions on flow addition or modification" msgstr "Deve especificar uma ou mais ações na adição ou modificação do fluxo" #, python-format msgid "Name %(dns_name)s is duplicated in the external DNS service" msgstr "O nome %(dns_name)s está duplicado no serviço DNS externo. " #, python-format msgid "" "Name '%s' must be 1-63 characters long, each of which can only be " "alphanumeric or a hyphen." msgstr "" "O nome '%s' deve ter de 1 a 63 caracteres de comprimento, cada um dos quais " "podendo ser apenas alfanumérico ou possuir um hífen." #, python-format msgid "Name '%s' must not start or end with a hyphen." msgstr "O nome '%s' não deve começar nem terminar com um hífen." msgid "Name of Open vSwitch bridge to use" msgstr "Nome da ponte Open vSwitch a ser usada" msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "Nome da região do Nova para utilização. Útil se o keystone gerenciar mais de " "uma região." msgid "Name of the FWaaS Driver" msgstr "Nome do driver FWaaS" msgid "Namespace of the router" msgstr "Namespace do roteador" msgid "Native pagination depend on native sorting" msgstr "A paginação nativa depende da classificação nativa" #, python-format msgid "" "Need to apply migrations from %(project)s contract branch. This will require " "all Neutron server instances to be shutdown before proceeding with the " "upgrade." msgstr "" "É necessário aplicar migrações a partir da ramificação de contrato " "%(project)s. Isso requer que todas as instâncias do servidor Neutron sejam " "encerradas antes de continuar com o upgrade." msgid "Negative delta (downgrade) not supported" msgstr "Delta negativo (downgrade) não suportado" msgid "Negative relative revision (downgrade) not supported" msgstr "Revisão relativa negativa (downgrade) não suportada" #, python-format msgid "" "Network %(network_id)s is already bound to BgpSpeaker %(bgp_speaker_id)s." msgstr "A rede %(network_id)s já está ligada ao BgpSpeaker %(bgp_speaker_id)s." #, python-format msgid "" "Network %(network_id)s is not associated with BGP speaker %(bgp_speaker_id)s." msgstr "" "A rede %(network_id)s não está associada ao speaker BGP %(bgp_speaker_id)s." #, python-format msgid "Network %(network_id)s is not bound to a BgpSpeaker." msgstr "A rede %(network_id)s não está ligada ao BgpSpeaker." #, python-format msgid "Network %(network_id)s is not bound to a IPv%(ip_version)s BgpSpeaker." msgstr "" "A rede %(network_id)s não está ligada a um IPv%(ip_version)s BgpSpeaker ." #, python-format msgid "Network %s does not contain any IPv4 subnet" msgstr "A rede %s não contém nenhuma sub-rede IPv4" #, python-format msgid "Network %s is not a valid external network" msgstr "A rede %s não é uma rede externa válida" #, python-format msgid "Network %s is not an external network" msgstr "A rede %s não é uma rede externa" #, python-format msgid "" "Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges " "%(excluded_ranges)s was not found." msgstr "" "A rede de tamanho %(size)s, de intervalo de IP %(parent_range)s, exceto " "intervalos de IP %(excluded_ranges)s, não foi localizada." msgid "Network that will have instance metadata proxied." msgstr "Rede que terá os metadados da instância eme proxy." #, python-format msgid "Network type value '%s' not supported" msgstr "Valor do tipo de rede '%s' não suportado" msgid "Network type value needed by the ML2 plugin" msgstr "Valor de tipo de rede necessário pelo plug-in ML2" msgid "Network types supported by the agent (gre and/or vxlan)." msgstr "Tipos de rede suportados pelo agente (gre e/ou vxlan)." msgid "" "Neutron IPAM (IP address management) driver to use. If ipam_driver is not " "set (default behavior), no IPAM driver is used. In order to use the " "reference implementation of Neutron IPAM driver, use 'internal'." msgstr "" "O driver IPAM (gerenciamento de endereço IP) Neutron a ser usado. Se o " "ipam_driver não for configurado (o comportamento padrão), nenhum driver IPAM " "será utilizado. Para usar a implementação de referência do driver IPAM " "Neutro, use 'internal'." msgid "Neutron Service Type Management" msgstr "Gerenciamento do Tipo de Serviço Neuron" msgid "Neutron core_plugin not configured!" msgstr "core_plugin do Neutron não configurado!" msgid "Neutron plugin provider module" msgstr "Módulo do provedor de plug-in Neutron" msgid "Neutron quota driver class" msgstr "Classe do driver de cota Neutron" msgid "New value for first_ip or last_ip has to be specified." msgstr "Novo valor para first_ip ur last_ip deve ser especificado." msgid "No default router:external network" msgstr "Nenhuma rede router:external padrão" #, python-format msgid "No default subnetpool found for IPv%s" msgstr "Nenhum conjunto de sub-redes padrão localizado para IPv%s" msgid "No default subnetpools defined" msgstr "Nenhum conjunto de sub-redes padrão definido" #, python-format msgid "No eligible l3 agent associated with external network %s found" msgstr "Nenhum agente l3 elegível associado com a rede externa %s localizado" #, python-format msgid "No more IP addresses available for subnet %(subnet_id)s." msgstr "Nenhum outro endereço IP disponível para a sub-rede %(subnet_id)s." #, python-format msgid "" "No more Virtual Router Identifier (VRID) available when creating router " "%(router_id)s. The limit of number of HA Routers per tenant is 254." msgstr "" "Nenhum outro Identificador de Roteador Virtual (VRID) disponível ao criar o " "roteador %(router_id)s. O limite do número de Roteadores de HA por locatário " "é de 254." msgid "No offline migrations pending." msgstr "Nenhuma migração off-line pendente." #, python-format msgid "No providers specified for '%s' service, exiting" msgstr "Nenhum provedor especificado para o serviço '%s', saindo" #, python-format msgid "No shared key in %s fields" msgstr "Nenhuma chave compartilhada nos campos %s" msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "" "Não é permitido designar manualmente um roteador para um agente no modo " "'dvr'." msgid "Not allowed to manually remove a router from an agent in 'dvr' mode." msgstr "" "Não é permitido remover manualmente um roteador de um agente no modo 'dvr'." #, python-format msgid "" "Not enough l3 agents available to ensure HA. Minimum required " "%(min_agents)s, available %(num_agents)s." msgstr "" "Não há l3 agentes disponíveis suficientes para assegurar a alta " "disponibilidade. Mínimo necessário %(min_agents)s, disponível %(num_agents)s." msgid "" "Number of DHCP agents scheduled to host a tenant network. If this number is " "greater than 1, the scheduler automatically assigns multiple DHCP agents for " "a given tenant network, providing high availability for DHCP service." msgstr "" "Número de agentes DHCP planejados para hospedar uma rede de um locatário. Se " "esse número for maior que 1, o planejador designará automaticamente vários " "agentes DHCP para uma determinada rede locatária, fornecendo alta " "disponibilidade para o serviço DHCP." msgid "Number of RPC worker processes dedicated to state reports queue" msgstr "" "Número de processos do trabalhador RPC dedicados à fila de relatórios de " "estado." msgid "Number of RPC worker processes for service" msgstr "Número de processos do trabalhador RPC para o serviço" msgid "Number of backlog requests to configure the metadata server socket with" msgstr "" "Número de solicitações de lista não processada com as quais configurar o " "soquete do servidor de metadados " msgid "Number of backlog requests to configure the socket with" msgstr "" "Número de solicitações de lista não processada com o qual configurar o " "soquete" msgid "" "Number of bits in an ipv4 PTR zone that will be considered network prefix. " "It has to align to byte boundary. Minimum value is 8. Maximum value is 24. " "As a consequence, range of values is 8, 16 and 24" msgstr "" "Número de bits em uma zona PTR IPV4 que será considerada como um prefixo de " "rede. Ele deve estar alinhado ao limite de byte. O valor mínimo é 8. O valor " "máximo é 24. Consequentemente, um intervalo de valores é 8, 16 e 24." msgid "" "Number of bits in an ipv6 PTR zone that will be considered network prefix. " "It has to align to nyble boundary. Minimum value is 4. Maximum value is 124. " "As a consequence, range of values is 4, 8, 12, 16,..., 124" msgstr "" "Número de bits em uma zona PTR IPV6 que será considerada como um prefixo de " "rede. Ele deve estar alinhado ao limite nyble. O valor mínimo é 4. O valor " "máximo é 124. Consequentemente, um intervalo de valores é 4, 8, 12, " "16, ...., 124." msgid "" "Number of floating IPs allowed per tenant. A negative value means unlimited." msgstr "" "Número de IPs flutuantes permitido por locatário. Um valor negativo " "significa ilimitado." msgid "" "Number of networks allowed per tenant. A negative value means unlimited." msgstr "" "Número de redes permitidas por locatário. Um valor negativo significa " "ilimitado." msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "" "Número de portas permitidas por locatário. Um valor negativo significa " "ilimitado." msgid "Number of routers allowed per tenant. A negative value means unlimited." msgstr "" "Número de roteadores permitidos por locatário. Um valor negativo significa " "ilimitado." msgid "" "Number of seconds between sending events to nova if there are any events to " "send." msgstr "" "Número de segundos entre o envio de eventos para o Nova se houver qualquer " "evento a enviar." msgid "Number of seconds to keep retrying to listen" msgstr "Número de segundos para continuar tentando atender novamente" msgid "" "Number of security groups allowed per tenant. A negative value means " "unlimited." msgstr "" "Número de grupos de segurança permitidos por locatário. Um valor negativo " "significa ilimitado." msgid "" "Number of security rules allowed per tenant. A negative value means " "unlimited." msgstr "" "Número de regras de segurança permitidas por locatário. Um valor negativo " "significa ilimitado." msgid "" "Number of separate API worker processes for service. If not specified, the " "default is equal to the number of CPUs available for best performance." msgstr "" "O número de processos do trabalhador API separados para o serviço. Se não " "for especificado, o padrão será igual ao número de CPUs disponíveis para " "melhor desempenho." msgid "" "Number of separate worker processes for metadata server (defaults to half of " "the number of CPUs)" msgstr "" "Número de processos do trabalhador separados para o servidor de metadados " "(padronizado para metade do número de CPUs)" msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "" "Número de sub-redes permitidas por locatário. Um valor negativo significa " "ilimitado." msgid "" "Number of threads to use during sync process. Should not exceed connection " "pool size configured on server." msgstr "" "Número de encadeamentos utilizados durante o processo de sincronização. Não " "devem exceder o tamanho do pool de conexões configurado no servidor." msgid "OK" msgstr "OK" msgid "" "OVS datapath to use. 'system' is the default value and corresponds to the " "kernel datapath. To enable the userspace datapath set this value to 'netdev'." msgstr "" "O caminho de dados do OVS a ser utilizado. 'system' é o valor padrão e " "corresponde ao caminho de dados do kernel. Para ativar o caminho de dados do " "espaço do usuário, configure esse valor para 'netdev'." msgid "OVS vhost-user socket directory." msgstr "O diretório de soquete do usuário vhost do OVS." #, python-format msgid "OVSDB Error: %s" msgstr "Erro de OVSDB: %s" #, python-format msgid "Object action %(action)s failed because: %(reason)s." msgstr "A ação do objeto %(action)s falhou porque: %(reason)s." msgid "Only admin can view or configure quota" msgstr "Somente o administrador pode visualizar ou configurar a cota" msgid "Only admin is authorized to access quotas for another tenant" msgstr "" "Somente o administrador está autorizado a acessar as cotas para outro " "locatário" msgid "Only admins can manipulate policies on networks they do not own." msgstr "" "Apenas administradores podem manipular políticas em redes que não " "pertencerem a eles." msgid "Only admins can manipulate policies on objects they do not own" msgstr "" "Apenas administradores podem manipular políticas em objetos que não " "pertencerem a eles." msgid "Only allowed to update rules for one security profile at a time" msgstr "Permitido apenas atualizar regras para um perfil de segurança por vez" msgid "Only remote_ip_prefix or remote_group_id may be provided." msgstr "Apenas remote_ip_prefix ou remote_group_id pode ser fornecido." msgid "OpenFlow interface to use." msgstr "Interface OpenFlow a ser usada." #, python-format msgid "" "Operation %(op)s is not supported for device_owner %(device_owner)s on port " "%(port_id)s." msgstr "" "A operação %(op)s não é suportada para device_owner %(device_owner)s na " "porta %(port_id)s." #, python-format msgid "Operation not supported on device %(dev_name)s" msgstr "Operação não suportada no dispositivo %(dev_name)s" msgid "" "Ordered list of network_types to allocate as tenant networks. The default " "value 'local' is useful for single-box testing but provides no connectivity " "between hosts." msgstr "" "Lista ordenada de network_types a serem alocados como redes locatárias. O " "valor padrão 'local' é útil para teste single-box, mas não fornece nenhuma " "conectividade entre os hosts." msgid "Override the default dnsmasq settings with this file." msgstr "Sobrescreva as configurações padrão de dnsmasq com este arquivo." msgid "Owner type of the device: network/compute" msgstr "Tipo de proprietário do dispositivo: rede/cálculo" msgid "POST requests are not supported on this resource." msgstr "Solicitações POST não são suportadas neste recurso." #, python-format msgid "Package %s not installed" msgstr "Pacote %s não instalado" #, python-format msgid "Parameter %(param)s must be of %(param_type)s type." msgstr "O parâmetro %(param)s deve ser do tipo %(param_type)s." #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "Falha na análise de bridge_mappings: %s." msgid "Parsing supported pci_vendor_devs failed" msgstr "A análise de pci_vendor_devs suportada falhou" msgid "Password for connecting to designate in admin context" msgstr "Senha para conexão com o designado no contexto de administrador" #, python-format msgid "Password not specified for authentication type=%(auth_type)s." msgstr "Senha não especificada para o tipo de autenticação=%(auth_type)s." msgid "Path to PID file for this process" msgstr "Caminho para o arquivo PID para este processo" msgid "Path to the router directory" msgstr "Caminho para o diretório do roteador" msgid "Peer patch port in integration bridge for tunnel bridge." msgstr "" "Porta de correção do peer na ponte de integração para a ponte do túnel." msgid "Peer patch port in tunnel bridge for integration bridge." msgstr "" "Porta da correção do peer na ponte do túnel para a ponte de integração." msgid "Per-tenant subnet pool prefix quota exceeded." msgstr "Cota de prefixo do conjunto de sub-redes por locatário excedida." msgid "Phase upgrade options do not accept revision specification" msgstr "As opções de upgrade de fase não aceitam especificação de revisão" msgid "Ping timeout" msgstr "Tempo Limite de Ping" #, python-format msgid "Plugin '%s' not found." msgstr "Plugin '%s' não encontrado." msgid "Plugin does not support updating provider attributes" msgstr "O plug-in não suporta atualização de atributos do provedor" msgid "Policy configuration policy.json could not be found." msgstr "A configuração de política policy.json não pôde ser localizada." #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "A porta %(id)s não possui IP fixo %(address)s" #, python-format msgid "Port %(port)s does not exist on %(bridge)s!" msgstr "A porta %(port)s não existe na %(bridge)s!" #, python-format msgid "Port %(port_id)s is already acquired by another DHCP agent" msgstr "A porta %(port_id)s já foi adquirida por outro agente DHCP " #, python-format msgid "" "Port %(port_id)s is associated with a different tenant than Floating IP " "%(floatingip_id)s and therefore cannot be bound." msgstr "" "A porta %(port_id)s está associada a um locatário diferente do IP Flutuante " "%(floatingip_id)s e, portanto, não pode ser ligada." #, python-format msgid "Port %(port_id)s is not managed by this agent. " msgstr "A porta %(port_id)s não é gerenciada por esse agente." #, python-format msgid "Port %s does not exist" msgstr "A porta %s não existe" #, python-format msgid "" "Port %s has multiple fixed IPv4 addresses. Must provide a specific IPv4 " "address when assigning a floating IP" msgstr "" "A porta %s tem vários endereços IPv4 fixos. Deve-se fornecer um endereço " "IPv4 específico ao designar um IP flutuante" msgid "" "Port Security must be enabled in order to have allowed address pairs on a " "port." msgstr "" "A Segurança da Porta deve ser ativada para ter pares de endereços permitidos " "em uma porta." msgid "" "Port has security group associated. Cannot disable port security or ip " "address until security group is removed" msgstr "" "A porta possui grupo de segurança associado. Não é possível desativar a " "segurança da porta ou o endereço IP até que o grupo de segurança seja " "removido" msgid "" "Port security must be enabled and port must have an IP address in order to " "use security groups." msgstr "" "A segurança da porta deve estar ativada e a porta deve ter um endereço IP " "para usar grupos de segurança." msgid "" "Port to listen on for OpenFlow connections. Used only for 'native' driver." msgstr "" "Porta para atender conexões OpenFlow. Usada somente para o driver 'native'." #, python-format msgid "Prefix '%(prefix)s' not supported in IPv%(version)s pool." msgstr "Prefixo ‘%(prefix)s' não suportado no conjunto do IPv%(version)s." msgid "Prefix Delegation can only be used with IPv6 subnets." msgstr "A delegação de prefixo só pode ser usada com sub-redes IPv6." msgid "Private key of client certificate." msgstr "Chave privada de certificado do cliente." #, python-format msgid "Probe %s deleted" msgstr "Análise %s excluída" #, python-format msgid "Probe created : %s " msgstr "Análise criada: %s " msgid "Process is already started" msgstr "O processo já está iniciado" msgid "Process is not running." msgstr "O processo não está em execução." msgid "Protocol to access nova metadata, http or https" msgstr "Protocolo para acessar os metadados do Nova, http ou https" #, python-format msgid "Provider name %(name)s is limited by %(len)s characters" msgstr "O nome do provedor %(name)s é limitado a %(len)s caracteres" #, python-format msgid "QoS Policy %(policy_id)s is used by %(object_type)s %(object_id)s." msgstr "" "A política de QoS %(policy_id)s é usada por %(object_type)s %(object_id)s." #, python-format msgid "" "QoS binding for network %(net_id)s and policy %(policy_id)s could not be " "found." msgstr "" "A ligação do QoS para a rede %(net_id)s e política %(policy_id)s não pôde " "ser localizada." #, python-format msgid "" "QoS binding for port %(port_id)s and policy %(policy_id)s could not be found." msgstr "" "A ligação do QoS para a porta %(port_id)s e política %(policy_id)s não pôde " "ser localizada." #, python-format msgid "QoS policy %(policy_id)s could not be found." msgstr "A política de QoS %(policy_id)s não pôde ser localizada." #, python-format msgid "QoS rule %(rule_id)s for policy %(policy_id)s could not be found." msgstr "" "A regra do QoS %(rule_id)s para a política %(policy_id)s não pôde ser " "localizada." #, python-format msgid "RBAC policy of type %(object_type)s with ID %(id)s not found" msgstr "Política RBAC do tipo %(object_type)s com o ID %(id)s não localizada" #, python-format msgid "" "RBAC policy on object %(object_id)s cannot be removed because other objects " "depend on it.\n" "Details: %(details)s" msgstr "" "A política RBAC no objeto %(object_id)s não pode ser removida porque outros " "objetos dependem dela.\n" "Detalhes: %(details)s" msgid "" "Range of seconds to randomly delay when starting the periodic task scheduler " "to reduce stampeding. (Disable by setting to 0)" msgstr "" "Intervalo de segundos para atrasar aleatoriamente quando iniciar o " "planejador de tarefas periódicas para reduzir o registro de data e hora. " "(Desative configurando como 0)" msgid "Ranges must be in the same IP version" msgstr "Os intervalos devem estar na mesma versão do IP" msgid "Ranges must be netaddr.IPRange" msgstr "Os intervalos devem ser netaddr.IPRange" msgid "Ranges must not overlap" msgstr "Os intervalos não devem se sobrepor" #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.EUI type." msgstr "" "Recebidos o tipo '%(type)s' e o valor '%(value)s'. Esperando o tipo netaddr." "EUI. " #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.IPAddress " "type." msgstr "" "Recebidos o tipo '%(type)s' e o valor '%(value)s'. Esperando o tipo netaddr." "IPAddress." #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.IPNetwork " "type." msgstr "" "Recebidos o tipo '%(type)s' e o valor '%(value)s'. Esperando o tipo netaddr." "IPNetwork." #, python-format msgid "" "Release aware branch labels (%s) are deprecated. Please switch to expand@ " "and contract@ labels." msgstr "" "Rótulos de ramificações cientes da Liberação (%s) foram descontinuados. " "Alterne para os rótulos expand@ e contract@." msgid "Remote metadata server experienced an internal server error." msgstr "" "O servidor de metadados remoto experimentou um erro de servidor interno." msgid "" "Repository does not contain HEAD files for contract and expand branches." msgstr "" "O repositório não contém arquivos HEAD para ramificações de contrato e de " "expansão." msgid "" "Representing the resource type whose load is being reported by the agent. " "This can be \"networks\", \"subnets\" or \"ports\". When specified (Default " "is networks), the server will extract particular load sent as part of its " "agent configuration object from the agent report state, which is the number " "of resources being consumed, at every report_interval.dhcp_load_type can be " "used in combination with network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is " "WeightScheduler, dhcp_load_type can be configured to represent the choice " "for the resource being balanced. Example: dhcp_load_type=networks" msgstr "" "Representando o tipo de recurso cujo carregamento está sendo relatado pelo " "agente. Isso pode ser \"redes\", \"sub-redes\" ou \"portas\". Quando " "especificado (o padrão é redes), o servidor extrai o carregamento particular " "enviado como parte do seu objeto de configuração do agente do relatório de " "estado do agente, que é o número de recursos sendo consumido, em cada " "report_interval.dhcp_load_type que pode ser usado em combinação com " "network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler." "WeightScheduler. Quando o network_scheduler_driver é WeightScheduler, o " "dhcp_load_type pode ser configurado para representar a opção para o recurso " "que está sendo balanceado. Exemplo: dhcp_load_type=networks" msgid "Request Failed: internal server error while processing your request." msgstr "" "Falha de solicitação: erro do servidor interno ao processar sua solicitação." #, python-format msgid "" "Request contains duplicate address pair: mac_address %(mac_address)s " "ip_address %(ip_address)s." msgstr "" "A solicitação contém um par de endereços duplicado: mac_address " "%(mac_address)s ip_address %(ip_address)s." #, python-format msgid "" "Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps " "with another subnet" msgstr "" "Sub-rede solicitada com cidr: %(cidr)s para rede: %(network_id)s se sobrepõe " "com outra sub-rede" msgid "" "Reset flow table on start. Setting this to True will cause brief traffic " "interruption." msgstr "" "Reconfigure a tabela de fluxo ao iniciar. Configurar isso como True causará " "uma breve interrupção do tráfego." #, python-format msgid "Resource %(resource)s %(resource_id)s could not be found." msgstr "O recurso %(resource)s %(resource_id)s não pôde ser localizado." #, python-format msgid "Resource %(resource_id)s of type %(resource_type)s not found" msgstr "Recurso %(resource_id)s do tipo %(resource_type)s não localizado" #, python-format msgid "" "Resource '%(resource_id)s' is already associated with provider " "'%(provider)s' for service type '%(service_type)s'" msgstr "" "O recurso '%(resource_id)s' já está associado com o provedor '%(provider)s' " "para o tipo de serviço '%(service_type)s'" msgid "Resource body required" msgstr "Corpo do recurso necessário" msgid "" "Resource name(s) that are supported in quota features. This option is now " "deprecated for removal." msgstr "" "Nomes de recursos que não são suportados em recursos de cota. Esta opção " "agora foi descontinuada para remoção." msgid "Resource not found." msgstr "Recurso não encontrado." msgid "Resources required" msgstr "Recursos necessários" msgid "" "Root helper application. Use 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' to use the real root filter facility. Change to 'sudo' to skip the " "filtering and just run the command directly." msgstr "" "Aplicação do auxiliar de raiz. Use 'sudo neutron-rootwrap /etc/neutron/" "rootwrap.conf' para usar o recurso de filtragem de raiz real. Altere para " "'sudo' para pular a filtragem e executar apenas o comando diretamente." msgid "Root helper daemon application to use when possible." msgstr "Aplicativo do daemon auxiliar raiz a ser usado quando possível." msgid "Root permissions are required to drop privileges." msgstr "As permissões de raiz são necessárias para descartar privilégios." #, python-format msgid "Route %(cidr)s not advertised for BGP Speaker %(speaker_as)d." msgstr "Rota %(cidr)s não informada para o Speaker BGP %(speaker_as)d." #, python-format msgid "Router %(router_id)s %(reason)s" msgstr "Roteador %(router_id)s %(reason)s" #, python-format msgid "Router %(router_id)s could not be found" msgstr "O roteador %(router_id)s não pôde ser localizado" #, python-format msgid "Router %(router_id)s does not have an interface with id %(port_id)s" msgstr "O roteador %(router_id)s não possui uma interface com o id %(port_id)s" #, python-format msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s" msgstr "" "O roteador %(router_id)s não possui uma interface na sub-rede %(subnet_id)s" #, python-format msgid "Router '%(router_id)s' cannot be both DVR and HA." msgstr "O roteador '%(router_id)s' não pode ser DVR e HA" #, python-format msgid "Router '%(router_id)s' is not compatible with this agent." msgstr "O roteador '%(router_id)s‘ não é compatível com este agente." #, python-format msgid "Router already has a port on subnet %s" msgstr "O roteador já possui uma porta na sub-rede %s" #, python-format msgid "" "Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be " "deleted, as it is required by one or more floating IPs." msgstr "" "A interface do roteador para a sub-rede %(subnet_id)s no roteador " "%(router_id)s não pode ser excluída porque ela é necessária por um ou mais " "IPs flutuantes." #, python-format msgid "" "Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be " "deleted, as it is required by one or more routes." msgstr "" "A interface do roteador para a sub-rede %(subnet_id)s no roteador " "%(router_id)s não pode ser excluída porque ela é necessária por uma ou mais " "rotas." msgid "Router port must have at least one fixed IP" msgstr "A porta do Roteador deve ter pelo menos um IP fixo" msgid "Router that will have connected instances' metadata proxied." msgstr "Roteador que terá conectado metadados de instâncias em proxy." #, python-format msgid "" "Row doesn't exist in the DB. Request info: Table=%(table)s. Columns=" "%(columns)s. Records=%(records)s." msgstr "" "A linha não existe no BD. Informações de solicitação: Table=%(table)s. " "Columns=%(columns)s. Records=%(records)s." msgid "Run as daemon." msgstr "Executar como daemon." #, python-format msgid "Running %(cmd)s (%(desc)s) for %(project)s ..." msgstr "Executando %(cmd)s (%(desc)s) para %(project)s..." #, python-format msgid "Running %(cmd)s for %(project)s ..." msgstr "Executando %(cmd)s para %(project)s..." msgid "Running without keystone AuthN requires that tenant_id is specified" msgstr "A execução sem keystyone AuthN requer que tenant_id seja especificado" msgid "" "Seconds between nodes reporting state to server; should be less than " "agent_down_time, best if it is half or less than agent_down_time." msgstr "" "Segundos entre os nós que relatam o estado para o servidor; deve ser menor " "que agent_down_time, será melhor se for metade ou menos do que " "agent_down_time." msgid "Seconds between running periodic tasks" msgstr "Segundos entre a execução de tarefas periódicas" msgid "" "Seconds to regard the agent is down; should be at least twice " "report_interval, to be sure the agent is down for good." msgstr "" "Segundos para considerar que o agente está inativo; deve ser no mínimo duas " "vezes o report_interval para ter certeza de que o agente está inativo." #, python-format msgid "Security Group %(id)s %(reason)s." msgstr "Grupo de Segurança %(id)s %(reason)s." #, python-format msgid "Security Group Rule %(id)s %(reason)s." msgstr "Regra de grupo de segurança %(id)s %(reason)s." #, python-format msgid "Security group %(id)s does not exist" msgstr "O grupo de segurança %(id)s não existe" #, python-format msgid "Security group rule %(id)s does not exist" msgstr "A regra do grupo de segurança %(id)s não existe" #, python-format msgid "Security group rule already exists. Rule id is %(rule_id)s." msgstr "A regra do grupo de segurança já existe. O ID de regra é %(rule_id)s." #, python-format msgid "" "Security group rule for ethertype '%(ethertype)s' not supported. Allowed " "values are %(values)s." msgstr "" "A regra do grupo de segurança para ethertype '%(ethertype)s' não é " "suportada. Os valores permitidos são %(values)s." #, python-format msgid "" "Security group rule protocol %(protocol)s not supported. Only protocol " "values %(values)s and integer representations [0 to 255] are supported." msgstr "" "Protocolo de regra do grupo de segurança %(protocol)s não suportado. Apenas " "valores de protocolo %(values)s e representações de número inteiro [0 a 255] " "são suportados." msgid "Segments and provider values cannot both be set." msgstr "Valores de segmento e provedor não podem ser configurados." msgid "Selects the Agent Type reported" msgstr "Seleciona o Tipo de agente relatado" msgid "" "Send notification to nova when port data (fixed_ips/floatingip) changes so " "nova can update its cache." msgstr "" "Enviar notificação para o Nova quando dados da porta (fixed_ips/floatingip) " "mudam de modo que o Nova possa atualizar seu cache." msgid "Send notification to nova when port status changes" msgstr "Enviar notificação para o Nova quando o status da porta muda" msgid "" "Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the " "feature is disabled" msgstr "" "Enviar esses vários ARPs gratuitos para a configuração de HA, se for menor " "ou igual a 0, o recurso está desativado" #, python-format msgid "Service Profile %(sp_id)s could not be found." msgstr "O Perfil de Serviço %(sp_id)s não pôde ser localizado." #, python-format msgid "Service Profile %(sp_id)s is already associated with flavor %(fl_id)s." msgstr "O Perfil de Serviço %(sp_id)s já está associado ao tipo %(fl_id)s." #, python-format msgid "Service Profile %(sp_id)s is not associated with flavor %(fl_id)s." msgstr "O Perfil de Serviço %(sp_id)s não está associado ao tipo %(fl_id)s." #, python-format msgid "Service Profile %(sp_id)s is used by some service instance." msgstr "" "O Perfil de Serviço %(sp_id)s é usado por alguma instância de serviço. " #, python-format msgid "Service Profile driver %(driver)s could not be found." msgstr "O driver do Perfil de Serviço %(driver)s não pôde ser localizado." msgid "Service Profile is not enabled." msgstr "O Perfil de Serviço não está ativado." msgid "Service Profile needs either a driver or metainfo." msgstr "O Perfil de Serviço precisa de um driver ou de uma metainfo." #, python-format msgid "" "Service provider '%(provider)s' could not be found for service type " "%(service_type)s" msgstr "" "O provedor de serviços '%(provider)s' não pôde ser encontrado para o tipo de " "serviço %(service_type)s" msgid "Service to handle DHCPv6 Prefix delegation." msgstr "Serviço para manipular a delegação de Prefixo DHCPv6." #, python-format msgid "Service type %(service_type)s does not have a default service provider" msgstr "" "O tipo de serviço %(service_type)s não possui um provedor de serviços padrão" msgid "" "Set new timeout in seconds for new rpc calls after agent receives SIGTERM. " "If value is set to 0, rpc timeout won't be changed" msgstr "" "Configure novo tempo limite em segundos para novas chamadas rpc depois que o " "agente receber SIGTERM. Se o valor for configurado como 0, o tempo limite de " "rpc não será alterado" msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "Configure ou desconfigure o bit don't fragment (DF) no pacote IP de saída " "que transporta o túnel GRE/VXLAN." msgid "" "Set or un-set the tunnel header checksum on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "Configure ou desconfigure a soma de verificação do cabeçalho de túnel no " "pacote IP de saída que transporta o túnel GRE/VXLAN." msgid "Shared address scope can't be unshared" msgstr "O escopo de endereço compartilhado não pode ser descompartilhado" msgid "" "Specifying 'tenant_id' other than authenticated tenant in request requires " "admin privileges" msgstr "" "Especificar 'tenant_id' diferente do locatário autenticado na solicitação " "requer privilégios do administrador" msgid "String prefix used to match IPset names." msgstr "Prefixo de sequência usado para corresponder nomes de IPset." #, python-format msgid "Sub-project %s not installed." msgstr "O subprojeto %s não está instalado." msgid "Subnet for router interface must have a gateway IP" msgstr "A sub-rede para a interface do roteador deve ter um IP de gateway" msgid "" "Subnet has a prefix length that is incompatible with DHCP service enabled." msgstr "" "A sub-rede tem um comprimento de prefixo que é incompatível com o serviço " "DHCP ativado." #, python-format msgid "Subnet pool %(subnetpool_id)s could not be found." msgstr "O conjunto de sub-rede %(subnetpool_id)s não pôde ser localizado." msgid "Subnet pool has existing allocations" msgstr "O conjunto de sub-rede possui alocações existentes" msgid "Subnet used for the l3 HA admin network." msgstr "Sub-rede usada para a rede administrativa de HA l3." msgid "" "Subnets hosted on the same network must be allocated from the same subnet " "pool." msgstr "" "As sub-redes hospedadas na mesma rede devem ser alocadas a partir do mesmo " "conjunto de sub-redes." msgid "Suffix to append to all namespace names." msgstr "Sufixo para anexar a todos os nomes de namespace." msgid "" "System-wide flag to determine the type of router that tenants can create. " "Only admin can override." msgstr "" "Sinalizador do Sistema Inteiro para determinar o tipo de roteador que os " "locatários podem criar. Somente o administrador pode substituir." msgid "TCP Port to listen for metadata server requests." msgstr "A Porta TCP para atender às solicitações do servidor de metadados." msgid "TCP Port used by Neutron metadata namespace proxy." msgstr "Porta TCP usada pelo proxy de namespace de metadados Neutron." msgid "TCP Port used by Nova metadata server." msgstr "Porta TCP usada pelo servidor de metadados Nova." #, python-format msgid "TLD '%s' must not be all numeric" msgstr "O TLD '%s' não deve ser todo numérico" msgid "TOS for vxlan interface protocol packets." msgstr "TOS para pacotes de protocolo da interface vxlan." msgid "TTL for vxlan interface protocol packets." msgstr "TTL para pacotes de protocolo da interface vxlan." #, python-format msgid "Table %s can only be queried by UUID" msgstr "A tabela %s só pode ser consultada por UUID" #, python-format msgid "Tag %(tag)s could not be found." msgstr "A tag %(tag)s não pôde ser localizada." #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "" "Locatário %(tenant_id)s não permitido para criar %(resource)s nesta rede" msgid "Tenant id for connecting to designate in admin context" msgstr "" "ID de locatário para conexão com o designado no contexto de administrador" msgid "Tenant name for connecting to designate in admin context" msgstr "" "Nome do locatário para conexão com o designado no contexto de administrador" msgid "Tenant network creation is not enabled." msgstr "A criação da rede do locatário não está ativada." msgid "Tenant-id was missing from quota request." msgstr "O ID do locatário estava ausente na solicitação de cota." msgid "" "The 'gateway_external_network_id' option must be configured for this agent " "as Neutron has more than one external network." msgstr "" "A opção 'gateway_external_network_id' deve estar configurada para este " "agente pois o Neutron possui mais de uma rede externa." msgid "" "The DHCP agent will resync its state with Neutron to recover from any " "transient notification or RPC errors. The interval is number of seconds " "between attempts." msgstr "" "O agente DHCP ressincronizará seu estado com o Neutron para recuperar-se de " "quaisquer notificações ou erros de RPC temporários. O intervalo é o número " "de segundos entre as tentativas." msgid "" "The DHCP server can assist with providing metadata support on isolated " "networks. Setting this value to True will cause the DHCP server to append " "specific host routes to the DHCP request. The metadata service will only be " "activated when the subnet does not contain any router port. The guest " "instance must be configured to request host routes via DHCP (Option 121). " "This option doesn't have any effect when force_metadata is set to True." msgstr "" "O servidor DHCP pode ajudar a fornecer suporte de metadados em redes " "isoladas. Configurar esse valor para True fará com que o servidor DHCP anexe " "rotas de host específicas à solicitação DHCP. O serviço de metadados será " "ativado somente quando a sub-rede não contiver nenhuma porta do roteador. A " "instância convidada deve ser configurada para solicitar rotas de host por " "meio de DHCP (Opção 121). Essa opção não tem efeito algum quando " "force_metadata estiver configurado para True." #, python-format msgid "" "The HA Network CIDR specified in the configuration file isn't valid; " "%(cidr)s." msgstr "" "O CIDR da Rede de HA especificado no arquivo de configuração não é válido; " "%(cidr)s." msgid "The UDP port to use for VXLAN tunnels." msgstr "A porta UDP utilizada para túneis VXLAN." #, python-format msgid "" "The address allocation request could not be satisfied because: %(reason)s" msgstr "" "A solicitação de alocação de endereço não pôde ser satisfeita devido a: " "%(reason)s" msgid "The advertisement interval in seconds" msgstr "O intervalo de propaganda em segundos" #, python-format msgid "The allocation pool %(pool)s is not valid." msgstr "O pool de alocação %(pool)s não é válido." #, python-format msgid "" "The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s." msgstr "" "O pool de alocações %(pool)s abrange além da sub-rede CIDR %(subnet_cidr)s." #, python-format msgid "" "The attribute '%(attr)s' is reference to other resource, can't used by sort " "'%(resource)s'" msgstr "" "O atributo '%(attr)s' é uma referência a outro recurso e não pode ser usado " "pela classificação '%(resource)s'" msgid "" "The base MAC address Neutron will use for VIFs. The first 3 octets will " "remain unchanged. If the 4th octet is not 00, it will also be used. The " "others will be randomly generated." msgstr "" "O endereço MAC de base que o Neutron usará para VIFs. Os 3 primeiros octetos " "permanecerão inalterados. Se o 4º octeto não for 00, ele também será " "utilizado, Os outros serão gerados aleatoriamente." msgid "" "The base mac address used for unique DVR instances by Neutron. The first 3 " "octets will remain unchanged. If the 4th octet is not 00, it will also be " "used. The others will be randomly generated. The 'dvr_base_mac' *must* be " "different from 'base_mac' to avoid mixing them up with MAC's allocated for " "tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00. " "The default is 3 octet" msgstr "" "O endereço mac base usado para instâncias DVR exclusivas pelo Neutron. Os 3 " "primeiros octetos permanecerão inalterados. Se o 4° octeto não for 00, ele " "também será usado. Os outros serão gerados aleatoriamente. O 'dvr_base_mac' " "*deve* ser diferente de 'base_mac' para evitar misturá-los com os do MAC " "alocados para portas locatárias. Um exemplo de 4 octetos seria dvr_base_mac " "= fa:16:3f:4f:00:00. O padrão é 3 octetos" msgid "" "The connection string for the native OVSDB backend. Requires the native " "ovsdb_interface to be enabled." msgstr "" "A sequência de conexões para o backend de OVSDB nativo requer que a " "ovsdb_intrface seja ativada. " msgid "The core plugin Neutron will use" msgstr "O plug-in principal que o Neutron irá utilizar." #, python-format msgid "" "The dns_name passed is a FQDN. Its higher level labels must be equal to the " "dns_domain option in neutron.conf, that has been set to '%(dns_domain)s'. It " "must also include one or more valid DNS labels to the left of " "'%(dns_domain)s'" msgstr "" "O dns_name transmitido é um FQDN. Seus rótulos de nível superior devem ser " "iguais à opção dns_domain em neutron.conf, que foi configurada para " "'%(dns_domain)s'. Ele também deve incluir um ou mais rótulos de DNS válidos " "à esquerda de '%(dns_domain)s'" #, python-format msgid "" "The dns_name passed is a PQDN and its size is '%(dns_name_len)s'. The " "dns_domain option in neutron.conf is set to %(dns_domain)s, with a length of " "'%(higher_labels_len)s'. When the two are concatenated to form a FQDN (with " "a '.' at the end), the resulting length exceeds the maximum size of " "'%(fqdn_max_len)s'" msgstr "" "O dns_name transmitido é um PQDN e seu tamanho é '%(dns_name_len)s'. A opção " "dns_domain em neutron.conf é configurada para %(dns_domain)s, com um " "comprimento de '%(higher_labels_len)s'. Quando os dois são concatenados para " "formar um FQDN (com um '.' no final), o comprimento resultante excede o " "tamanho máximo de '%(fqdn_max_len)s'" msgid "The driver used to manage the DHCP server." msgstr "O driver usado para gerenciar o servidor DHCP." msgid "The driver used to manage the virtual interface." msgstr "O driver usado para gerenciar a interface virtual." msgid "" "The email address to be used when creating PTR zones. If not specified, the " "email address will be admin@" msgstr "" "O endereço de e-mail a ser usado ao criar zonas PTR. Se não especificado, o " "endereço de e-mail será admin@" #, python-format msgid "" "The following device_id %(device_id)s is not owned by your tenant or matches " "another tenants router." msgstr "" "O seguinte device_id %(device_id)s não pertence ao seu locatário ou " "corresponde a outro roteador de locatários." msgid "The host IP to bind to" msgstr "O IP do host ao qual conectar-se" msgid "The interface for interacting with the OVSDB" msgstr "A interface para interação com o OVSDB" msgid "" "The maximum number of items returned in a single response, value was " "'infinite' or negative integer means no limit" msgstr "" "O número máximo de itens retornados em uma única resposta, o valor era " "'infinito' ou um número inteiro negativo significa que não há limite" #, python-format msgid "" "The network %(network_id)s has been already hosted by the DHCP Agent " "%(agent_id)s." msgstr "A rede %(network_id)s já foi hospedada pelo Agente DHCP %(agent_id)s." #, python-format msgid "" "The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s." msgstr "" "A rede %(network_id)s não está hospedada pelo agente DHCP %(agent_id)s." msgid "" "The network type to use when creating the HA network for an HA router. By " "default or if empty, the first 'tenant_network_types' is used. This is " "helpful when the VRRP traffic should use a specific network which is not the " "default one." msgstr "" "O tipo de rede a ser usado ao criar a rede HA para um roteador HA. Por " "padrão ou se estiver em branco, o primeiro 'tenant_network_types' será " "usado. Isso é útil quando o tráfego VRRP tiver que usar uma rede específica " "que não é a padrão." #, python-format msgid "The number of allowed address pair exceeds the maximum %(quota)s." msgstr "" "O número de pares de endereços permitidos excede o máximo de %(quota)s." msgid "" "The number of seconds the agent will wait between polling for local device " "changes." msgstr "" "O número de segundos que o agente aguardará entre as pesquisas de mudanças " "do dispositivo local." msgid "" "The number of seconds to wait before respawning the ovsdb monitor after " "losing communication with it." msgstr "" "O número de segundos a aguardar antes de efetuar spawn novamente do monitor " "ovsdb após perder comunicação com ele." msgid "The number of sort_keys and sort_dirs must be same" msgstr "Os números de sort_keys e sort_dirs devem ser os mesmos" msgid "" "The path for API extensions. Note that this can be a colon-separated list of " "paths. For example: api_extensions_path = extensions:/path/to/more/exts:/" "even/more/exts. The __path__ of neutron.extensions is appended to this, so " "if your extensions are in there you don't need to specify them here." msgstr "" "O caminho para extensões da API. Note que isso pode ser uma lista separada " "por vírgula de caminhos. Por exemplo: api_extensions_path = extensions:/path/" "to/more/exts:/even/more/exts. O __path__ of neutron.extensions é anexado a " "isso, de modo que se suas extensões estiverem lá, não será necessário " "especificá-las aqui. " msgid "The physical network name with which the HA network can be created." msgstr "O nome da rede física com o qual a rede HA pode ser criada." #, python-format msgid "The port '%s' was deleted" msgstr "A porta '%s' foi excluída" msgid "The port to bind to" msgstr "A porta à qual conectar-se" #, python-format msgid "The requested content type %s is invalid." msgstr "O tipo de conteúdo solicitado %s é inválido." msgid "The resource could not be found." msgstr "O recurso não pôde ser encontrado." #, python-format msgid "" "The router %(router_id)s has been already hosted by the L3 Agent " "%(agent_id)s." msgstr "O roteador %(router_id)s já foi hospedado pelo Agente L3 %(agent_id)s." msgid "" "The server has either erred or is incapable of performing the requested " "operation." msgstr "" "O servidor possui um erro ou é incapaz de executar a operação solicitada." msgid "The service plugins Neutron will use" msgstr "Os plugins de serviço que o Neutron irá utilizar" #, python-format msgid "The subnet request could not be satisfied because: %(reason)s" msgstr "A solicitação de sub-rede não pôde ser satisfeita devido a: %(reason)s" #, python-format msgid "The subproject to execute the command against. Can be one of: '%s'." msgstr "" "O subprojeto com relação ao qual executar o comando. Pode ser um de: '%s'." msgid "The type of authentication to use" msgstr "O tipo de autenticação a ser usado" #, python-format msgid "The value '%(value)s' for %(element)s is not valid." msgstr "O valor ‘%(value)s' para %(element)s não é válido." msgid "" "The working mode for the agent. Allowed modes are: 'legacy' - this preserves " "the existing behavior where the L3 agent is deployed on a centralized " "networking node to provide L3 services like DNAT, and SNAT. Use this mode if " "you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality " "and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - " "this enables centralized SNAT support in conjunction with DVR. This mode " "must be used for an L3 agent running on a centralized node (or in single-" "host deployments, e.g. devstack)" msgstr "" "O modo de trabalho para o agente. Os modos permitidos são: 'legacy' - isso " "preserva o comportamento existente em que o agente L3 é implementado em um " "nó de rede centralizada para fornecer serviços L3, como DNAT e SNAT. Use " "este modo se você não desejar adotar o DVR. 'dvr' - este modo permite a " "funcionalidade de DVR e deve ser usado para um agente L3 que é executado em " "um host de cálculo. 'dvr_snat'- isto permite suporte SNAT centralizado em " "conjunto com DVR. Este modo deve ser usado para um agente L3 em execução em " "um nó centralizado (ou em implementações de host único, por exemplo, " "devstack)" msgid "" "There are routers attached to this network that depend on this policy for " "access." msgstr "" "Há roteadores conectados a essa rede que dependem dessa política para acesso." msgid "" "This will choose the web framework in which to run the Neutron API server. " "'pecan' is a new experiemental rewrite of the API server." msgstr "" "Isso escolherá a estrutura da web na qual executar o servidor da API " "Neutron. O 'pecan' é uma nova regravação experimental do servidor da API." msgid "Timeout" msgstr "Tempo limite" msgid "" "Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs " "commands will fail with ALARMCLOCK error." msgstr "" "Tempo limite em segundos para comandos ovs-vsctl. Se o tempo limite expirar, " "comandos ovs falharão com o erro ALARMCLOCK." msgid "" "Timeout in seconds to wait for a single OpenFlow request. Used only for " "'native' driver." msgstr "" "Tempo limite em segundos a ser aguardado para uma solicitação OpenFlow " "única. Usado somente para driver 'native'." msgid "" "Timeout in seconds to wait for the local switch connecting the controller. " "Used only for 'native' driver." msgstr "" "Tempo limite em segundos de espera para que o comutador local conecte o " "controlador. Usado somente para driver 'native'." msgid "" "Too long prefix provided. New name would exceed given length for an " "interface name." msgstr "" "Prefixo muito longo fornecido. O novo nome excede o comprimento fornecido de " "um nome de instância." msgid "Too many availability_zone_hints specified" msgstr "Muitos availability_zone_hints especificados. " msgid "" "True to delete all ports on all the OpenvSwitch bridges. False to delete " "ports created by Neutron on integration and external network bridges." msgstr "" "True para excluir todas as portas em todas as pontes OpenvSwitch. False para " "excluir portas criadas pelo Neutron na integração e pontes de rede externa." msgid "Tunnel IP value needed by the ML2 plugin" msgstr "Valor do IP do túnel necessário pelo plug-in ML2" msgid "Tunnel bridge to use." msgstr "Ponte do túnel a ser utilizada." msgid "" "Type of the nova endpoint to use. This endpoint will be looked up in the " "keystone catalog and should be one of public, internal or admin." msgstr "" "O tipo do terminal Nova a ser utilizado. Esse terminal será bloqueado no " "catálogo de keystone e deverá ser público, interno ou de administração." msgid "URL for connecting to designate" msgstr "URL para conexão com o designado" msgid "URL to database" msgstr "URL para banco de dados" #, python-format msgid "Unable to access %s" msgstr "Não é possível acessar %s" #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, maximum allowed " "prefix is %(max_prefixlen)s." msgstr "" "Não é possível alocar a sub-rede com o comprimento de prefixo %(prefixlen)s, " "o máximo de prefixo permitido é %(max_prefixlen)s." #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, minimum allowed " "prefix is %(min_prefixlen)s." msgstr "" "Não é possível alocar a sub-rede com o comprimento de prefixo %(prefixlen)s, " "o prefixo mínimo permitido é %(min_prefixlen)s." #, python-format msgid "Unable to calculate %(address_type)s address because of:%(reason)s" msgstr "" "Não é possível calcular o endereço %(address_type)s devido a: %(reason)s" #, python-format msgid "" "Unable to complete operation for %(router_id)s. The number of routes exceeds " "the maximum %(quota)s." msgstr "" "Não é possível concluir a operação para %(router_id)s. O número de rotas " "excede o máximo de %(quota)s." #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of DNS " "nameservers exceeds the limit %(quota)s." msgstr "" "Não é possível concluir a operação para %(subnet_id)s. O número de " "servidores de nomes DNS excede o limite %(quota)s." #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of host routes " "exceeds the limit %(quota)s." msgstr "" "Não é possível concluir a operação para %(subnet_id)s. O número de rotas do " "host excede o limite %(quota)s." #, python-format msgid "" "Unable to complete operation on address scope %(address_scope_id)s. There " "are one or more subnet pools in use on the address scope" msgstr "" "Não é possível concluir a operação no escopo de endereço " "%(address_scope_id)s. Há um ou mais conjuntos de sub-rede em uso no escopo " "de endereço" #, python-format msgid "Unable to convert value in %s" msgstr "Não é possível converter valor em %s" msgid "Unable to create the Agent Gateway Port" msgstr "Não é possível criar a Porta do Gateway do Agente" msgid "Unable to create the SNAT Interface Port" msgstr "Não é possível criar a Porta da Interface SNAT" #, python-format msgid "" "Unable to create the flat network. Physical network %(physical_network)s is " "in use." msgstr "" "Não é possível criar a rede simples. A rede física %(physical_network)s está " "em uso." msgid "" "Unable to create the network. No available network found in maximum allowed " "attempts." msgstr "" "Não é possível criar a rede. Nenhuma rede disponível encontrada no máximo de " "tentativas permitidas." #, python-format msgid "Unable to delete subnet pool: %(reason)s." msgstr "Não é possível excluir o conjunto de sub-redes: %(reason)s." #, python-format msgid "Unable to determine mac address for %s" msgstr "Não foi possível determinar o endereço MAC para %s" #, python-format msgid "Unable to find '%s' in request body" msgstr "Não foi possível localizar '%s' no corpo da solicitação" #, python-format msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s" msgstr "" "Não é possível localizar o endereço IP %(ip_address)s na sub-rede " "%(subnet_id)s" #, python-format msgid "Unable to find resource name in %s" msgstr "Não é possível encontrar o nome de recurso em %s" msgid "Unable to generate IP address by EUI64 for IPv4 prefix" msgstr "Não é possível gerar endereço IP pelo prefixo EUI64 para IPv4" #, python-format msgid "Unable to generate unique DVR mac for host %(host)s." msgstr "Não é possível gerar MAC de DVR exclusivo para o host %(host)s." #, python-format msgid "Unable to generate unique mac on network %(net_id)s." msgstr "Não é possível gerar um mac exclusivo na rede %(net_id)s." #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "" "Não é possível identificar um campo de destino a partir do: %s. A " "correspondência deve estar no formato %%()s" msgid "Unable to provide external connectivity" msgstr "Não é possível fornecer conectividade externa" msgid "Unable to provide tenant private network" msgstr "Não é possível fornecer rede privada do locatário." #, python-format msgid "" "Unable to reconfigure sharing settings for network %(network)s. Multiple " "tenants are using it." msgstr "" "Não é possível redefinir as configurações de compartilhamento para a rede " "%(network)s. Ela está sendo usada por diversos locatários." #, python-format msgid "Unable to update address scope %(address_scope_id)s : %(reason)s" msgstr "" "Não é possível atualizar escopo de endereço %(address_scope_id)s: %(reason)s" #, python-format msgid "Unable to update the following object fields: %(fields)s" msgstr "Não é possível atualizar os campos de objetos a seguir: %(fields)s" #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " "found" msgstr "" "Não é possível verificar correspondência:%(match)s porque o recurso pai: " "%(res)s não foi encontrado" #, python-format msgid "Unexpected label for script %(script_name)s: %(labels)s" msgstr "Rótulo inesperado para o script %(script_name)s: %(labels)s" #, python-format msgid "Unexpected number of alembic branch points: %(branchpoints)s" msgstr "Número inesperado de pontos de ramificação alembic: %(branchpoints)s" #, python-format msgid "Unexpected response code: %s" msgstr "Código de resposta inesperado: %s" #, python-format msgid "Unexpected response: %s" msgstr "Resposta inesperada: %s" #, python-format msgid "Unit name '%(unit)s' is not valid." msgstr "O nome da unidade '%(unit)s' não é válido." msgid "Unknown API version specified" msgstr "Versão de API especificada desconhecida" #, python-format msgid "Unknown address type %(address_type)s" msgstr "Tipo de endereço desconhecido %(address_type)s" #, python-format msgid "Unknown attribute '%s'." msgstr "Atributo desconhecido '%s'." #, python-format msgid "Unknown chain: %r" msgstr "Cadeia desconhecida: %r" #, python-format msgid "Unknown network type %(network_type)s." msgstr "Tipo de rede desconhecido %(network_type)s." #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Recursos da cota desconhecidos %(unknown)s." msgid "Unmapped error" msgstr "Erro não mapeado" msgid "Unrecognized action" msgstr "Ação não reconhecida" #, python-format msgid "Unrecognized attribute(s) '%s'" msgstr "Atributo(s) não reconhecido(s) '%s'" msgid "Unrecognized field" msgstr "Campo não reconhecido" msgid "Unspecified minimum subnet pool prefix." msgstr "Prefixo do conjunto de sub-rede mínimo não especificado." msgid "Unsupported Content-Type" msgstr "Tipo de Conteúdo Não Suportado" #, python-format msgid "Unsupported network type %(net_type)s." msgstr "Tipo de rede não suportado %(net_type)s." #, python-format msgid "Unsupported port state: %(port_state)s." msgstr "Estado da porta não suportado: %(port_state)s." msgid "Unsupported request type" msgstr "Tipo de solicitação não suportado" msgid "Updating default security group not allowed." msgstr "Não é permitido atualizar o grupo de segurança padrão." msgid "" "Use ML2 l2population mechanism driver to learn remote MAC and IPs and " "improve tunnel scalability." msgstr "" "Utilize o driver de mecanismo de população ML2 l2 para aprender sobre MAC e " "IPs remotos e melhorar a escalabilidade do túnel." msgid "Use broadcast in DHCP replies." msgstr "Usar broadcast em respostas DHCP." msgid "Use either --delta or relative revision, not both" msgstr "Use --delta ou revisão relativa, não ambos" msgid "" "Use ipset to speed-up the iptables based security groups. Enabling ipset " "support requires that ipset is installed on L2 agent node." msgstr "" "Utilize ipset para acelerar os grupos de segurança baseados em iptables. " "Para habilitar o suporte a ipset, é necessário que o ipset esteja instalado " "no nó do agente L2." msgid "" "Use the root helper when listing the namespaces on a system. This may not be " "required depending on the security configuration. If the root helper is not " "required, set this to False for a performance improvement." msgstr "" "Use o auxiliar raiz quando listar os namespaces no sistema. Isso pode não " "ser necessário dependendo das configurações de segurança. Se o auxiliar raiz " "não for necessário, configure isto para False para melhorar o desempenho." msgid "" "Use veths instead of patch ports to interconnect the integration bridge to " "physical networks. Support kernel without Open vSwitch patch port support so " "long as it is set to True." msgstr "" "Use veths em vez de portas de correção para interconectar a ponte de " "integração a redes físicas. Suporta kernel sem o suporte da porta de " "correção Open vSwitch desde que configurado como True." msgid "User (uid or name) running metadata proxy after its initialization" msgstr "" "O usuário (uid ou nome) que executa o proxy de metadados após sua " "inicialização" msgid "" "User (uid or name) running metadata proxy after its initialization (if " "empty: agent effective user)." msgstr "" "Usuário (uid ou nome) executando proxy de metadados após sua inicialização " "(se vazio: usuário efetivo do agente)." msgid "User (uid or name) running this process after its initialization" msgstr "Usuário (uid ou nome) executando esse processo após sua inicialização" msgid "Username for connecting to designate in admin context" msgstr "" "Nome de usuário para conexão com o designado no contexto de administrador" msgid "" "Uses veth for an OVS interface or not. Support kernels with limited " "namespace support (e.g. RHEL 6.5) so long as ovs_use_veth is set to True." msgstr "" "Usa veth para uma interface OVS ou não. Os kernels de apoio com namespace " "limitado suportam (por exemplo, RHEL 6.5) enquanto ovs_use_veth estiver " "configurado para True." msgid "VRRP authentication password" msgstr "Senha de autenticação do VRRP" msgid "VRRP authentication type" msgstr "Tipo de autenticação do VRRP" msgid "VXLAN network unsupported." msgstr "Rede VXLAN não suportada." #, python-format msgid "" "Validation of dictionary's keys failed. Expected keys: %(expected_keys)s " "Provided keys: %(provided_keys)s" msgstr "" "A validação de chaves do dicionário falhou. Chaves esperadas: " "%(expected_keys)s Chaves fornecidas: %(provided_keys)s" #, python-format msgid "Validator '%s' does not exist." msgstr "O validador '%s' não existe." #, python-format msgid "Value %(value)s in mapping: '%(mapping)s' not unique" msgstr "Valor %(value)s no mapeamento: '%(mapping)s' não exclusivo" #, python-format msgid "" "Value of %(parameter)s has to be multiple of %(number)s, with maximum value " "of %(maximum)s and minimum value of %(minimum)s" msgstr "" "O valor de %(parameter)s possui um múltiplo de %(number)s, com valor máximo " "de %(maximum)s e valor mínimo de %(minimum)s" msgid "" "Value of host kernel tick rate (hz) for calculating minimum burst value in " "bandwidth limit rules for a port with QoS. See kernel configuration file for " "HZ value and tc-tbf manual for more information." msgstr "" "Valor da taxa tick (hz) do kernel do host para calcular o valor de burst " "mínimo nas regras de limite de largura da banda para uma porta com QoS. " "Consulte o arquivo de configuração do kernel para obter o valor de HZ e o " "manual tc-tbf para obter mais informações." msgid "" "Value of latency (ms) for calculating size of queue for a port with QoS. See " "tc-tbf manual for more information." msgstr "" "Valor de latência (ms) para calcular o tamanho da fila de uma porta com QoS. " "Consulte o manual tc-tbf para obter mais informações." msgid "" "Watch file log. Log watch should be disabled when metadata_proxy_user/group " "has no read/write permissions on metadata proxy log file." msgstr "" "Inspecionar log de arquivo. A inspeção do log deve ser desativada quando " "metadata_proxy_user/group não possuir permissões de leitura/gravação no " "arquivo de log de proxy de metadados." msgid "" "When external_network_bridge is set, each L3 agent can be associated with no " "more than one external network. This value should be set to the UUID of that " "external network. To allow L3 agent support multiple external networks, both " "the external_network_bridge and gateway_external_network_id must be left " "empty." msgstr "" "Quando external_network_bridge é configurado, cada agente L3 apode ser " "associado à, no máximo, uma rede externa. Esse valor deve ser configurado " "para o UUID dessa rede externa. Para permitir que o agente L3 suporte " "diversas redes externas, o external_network_bridge e o " "gateway_external_network_id deverão ser deixados vazios." msgid "" "When proxying metadata requests, Neutron signs the Instance-ID header with a " "shared secret to prevent spoofing. You may select any string for a secret, " "but it must match here and in the configuration used by the Nova Metadata " "Server. NOTE: Nova uses the same config key, but in [neutron] section." msgstr "" "Ao configurar o proxy de solicitações de metadados, o Neutron designa o " "cabeçalho Instance-ID com um segredo compartilhado para evitar spoofing. É " "possível selecionar qualquer sequência de um segredo, mas ela deverá " "corresponder aqui e na configurada usada pelo Nova Metadata Server. NOTA: O " "Nova usa a mesma chave de configuração, mas na seção [neutro]." msgid "" "Where to store Neutron state files. This directory must be writable by the " "agent." msgstr "" "Onde armazenar arquivos de estado Neutron. O agente deve ter permissão de " "gravaçãoa neste diretório." msgid "" "With IPv6, the network used for the external gateway does not need to have " "an associated subnet, since the automatically assigned link-local address " "(LLA) can be used. However, an IPv6 gateway address is needed for use as the " "next-hop for the default route. If no IPv6 gateway address is configured " "here, (and only then) the neutron router will be configured to get its " "default route from router advertisements (RAs) from the upstream router; in " "which case the upstream router must also be configured to send these RAs. " "The ipv6_gateway, when configured, should be the LLA of the interface on the " "upstream router. If a next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated to the network and not " "through this parameter. " msgstr "" "Com IPv6, a rede usada para o gateway externo não precisa ter uma sub-rede " "associada, pois o Link-local Address (LLA) designado automaticamente pode " "ser usado. No entanto, um endereço do gateway IPv6 precisa ser usado como o " "próximo hop para a rota padrão. Se nenhum endereço do gateway IPv6 for " "configurado aqui, (somente então) o roteador Neutron será configurado para " "obter sua rota padrão de router advertisements (RAs) a partir do roteador de " "envio de dados, caso em que o roteador de envio de dados também deve ser " "configurado para enviar esses RAs. O ipv6_gateway, quando configurado, deve " "ser o LLA da interface no roteador de envio de dados. Se um próximo hop " "usando um global unique address (GUA) for desejado, isso precisará ser feito " "por meio de uma sub-rede alocada para a rede e não por meio desse parâmetro. " msgid "You must implement __call__" msgstr "Você deve implementar __call__" msgid "" "You must provide a config file for bridge - either --config-file or " "env[NEUTRON_TEST_CONFIG_FILE]" msgstr "" "É necessário fornecer um arquivo de configuração para a ponte - --config-" "file ou env[NEUTRON_TEST_CONFIG_FILE]" msgid "You must provide a revision or relative delta" msgstr "Você deve fornecer uma revisão ou um delta relativo" msgid "a subnetpool must be specified in the absence of a cidr" msgstr "Um conjunto de sub-redes deve ser especificado na ausência de um CIDR" msgid "add_ha_port cannot be called inside of a transaction." msgstr "add_ha_port não pode ser chamado dentro de uma transação." msgid "allocation_pools allowed only for specific subnet requests." msgstr "" "allocation_pools permitido somente para solicitações de sub-rede específicas." msgid "allocation_pools are not in the subnet" msgstr "allocation_pools não estão na sub-rede" msgid "allocation_pools use the wrong ip version" msgstr "allocation_pools usam versão de IP errada" msgid "already a synthetic attribute" msgstr "já é um atributo sintético" msgid "binding:profile value too large" msgstr "ligação: valor de perfil muito grande" #, python-format msgid "cannot perform %(event)s due to %(reason)s" msgstr "Não é possível executar %(event)s devido a %(reason)s" msgid "cidr and prefixlen must not be supplied together" msgstr "cidr e prefixlen não devem ser fornecidos juntos" #, python-format msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid." msgstr "dhcp_agents_per_network deve ser maior ou igual a 1. '%s' é inválido." msgid "dns_domain cannot be specified without a dns_name" msgstr "dns_domain não pode ser especificado sem um dns_name" msgid "dns_name cannot be specified without a dns_domain" msgstr "dns_name não pode ser especificado sem um dns_domain" msgid "fixed_ip_address cannot be specified without a port_id" msgstr "fixed_ip_address não pode ser especificado sem um port_id" #, python-format msgid "gateway_ip %s is not in the subnet" msgstr "O gateway_ip %s não está na sub-rede" #, python-format msgid "has device owner %s" msgstr "possui o proprietário do dispositivo %s" msgid "in use" msgstr "em uso" #, python-format msgid "ip command failed on device %(dev_name)s: %(reason)s" msgstr "O comando ip falhou no dispositivo %(dev_name)s:%(reason)s" #, python-format msgid "ip command failed: %(reason)s" msgstr "O comando ip falhou: %(reason)s" #, python-format msgid "ip link capability %(capability)s is not supported" msgstr "A capacidade %(capability)s de link de IP não é suportada" #, python-format msgid "ip link command is not supported: %(reason)s" msgstr "O comando de link do IP não é suportado: %(reason)s" msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "" "ip_version deve ser especificado na ausência de cidr e de subnetpool_id" msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "ipv6_address_mode não é válido quando ip_version for 4" msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "O ipv6_ra_mode não é válido quando ip_version for 4" msgid "" "ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set to " "False." msgstr "" "O ipv6_ra_mode ou ipv6_address_mode não pode ser configurado quando " "enable_dhcp estiver configurado para False." #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " "'%(addr_mode)s' is not valid. If both attributes are set, they must be the " "same value" msgstr "" "O ipv6_ra_mode configurado para '%(ra_mode)s' com ipv6_address_mode " "configurado para '%(addr_mode)s' não é válido. Se ambos os atributos forem " "configurados, eles devem ter o mesmo valor" msgid "mac address update" msgstr "atualização do endereço mac" #, python-format msgid "" "max_l3_agents_per_router %(max_agents)s config parameter is not valid. It " "has to be greater than or equal to min_l3_agents_per_router %(min_agents)s." msgstr "" "O parâmetro de configuração max_l3_agents_per_router %(max_agents)s não é " "válido. Ele deve ser maior ou igual a min_l3_agents_per_router " "%(min_agents)s." msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "Deve-se fornece exatamente 2 argumentos - cidr e MAC" msgid "network_type required" msgstr "network_type necessário" #, python-format msgid "network_type value '%s' not supported" msgstr "Valor de network_type '%s' não suportado" msgid "new subnet" msgstr "nova sub-rede" #, python-format msgid "physical_network '%s' unknown for VLAN provider network" msgstr "physical_network '%s' desconhecida para rede de provedor VLAN" #, python-format msgid "physical_network '%s' unknown for flat provider network" msgstr "physical_network '%s' desconhecida para rede de provedor flat" msgid "physical_network required for flat provider network" msgstr "physical_network ne4cessária para rede de provedor simples" #, python-format msgid "provider:physical_network specified for %s network" msgstr "provider:physical_network especificado para a rede %s" #, python-format msgid "rbac_db_model not found in %s" msgstr "rbac_db_model não localizado em %s" msgid "record" msgstr "registro" msgid "respawn_interval must be >= 0 if provided." msgstr "respawn_interval deve ser maior ou igual a 0, se fornecido." #, python-format msgid "segmentation_id out of range (%(min)s through %(max)s)" msgstr "segmentation_id fora da faixa (%(min)s até %(max)s)" msgid "segmentation_id requires physical_network for VLAN provider network" msgstr "O segmentation_id requer physical_network para rede de provedor VLAN" msgid "shared attribute switching to synthetic" msgstr "atributo compartilhado alternando para sintético" #, python-format msgid "" "subnetpool %(subnetpool_id)s cannot be updated when associated with shared " "address scope %(address_scope_id)s" msgstr "" "O conjunto de sub-rede %(subnetpool_id)s não pode ser atualizado quando " "associado ao escopo de endereço compartilhado %(address_scope_id)s" msgid "subnetpool_id and use_default_subnetpool cannot both be specified" msgstr "" "O subnetpool_id e o use_default_subnetpool não podem ser especificados. " msgid "the nexthop is not connected with router" msgstr "o nexthop não está conectado com o roteador" msgid "the nexthop is used by router" msgstr "o nexthop é usado pelo roteador" #, python-format msgid "unable to load %s" msgstr "Não é possível carregar %s" msgid "" "uuid provided from the command line so external_process can track us via /" "proc/cmdline interface." msgstr "" "O uuid fornecido a partir da linha de comandos para que external_process " "possa nos monitorar via interface /proc/cmdline." neutron-8.4.0/neutron/locale/ru/0000775000567000056710000000000013044373210017732 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/ru/LC_MESSAGES/0000775000567000056710000000000013044373210021517 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/ru/LC_MESSAGES/neutron.po0000664000567000056710000063401613044372760023574 0ustar jenkinsjenkins00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Grigory Mokhin , 2016. #zanata # Lucas Palm , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron 8.2.1.dev52\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-09-01 18:10+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-18 09:51+0000\n" "Last-Translator: Grigory Mokhin \n" "Language: ru\n" "Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" "%100>=11 && n%100<=14)? 2 : 3);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Russian\n" #, python-format msgid "" "\n" "Command: %(cmd)s\n" "Exit code: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" msgstr "" "\n" "Команда: %(cmd)s\n" "Код выхода: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" #, python-format msgid "" "%(branch)s HEAD file does not match migration timeline head, expected: " "%(head)s" msgstr "" "Файл HEAD %(branch)s отличается от ожидаемого для графика миграции: %(head)s" #, python-format msgid "%(driver)s: Internal driver error." msgstr "%(driver)s: Внутренняя ошибка драйвера." #, python-format msgid "%(id)s is not a valid %(type)s identifier" msgstr "%(id)s не является допустимым идентификатором %(type)s" #, python-format msgid "" "%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' " "and '%(desc)s'" msgstr "" "Значение %(invalid_dirs)s недопустимо для sort_dirs; допустимое значение: " "'%(asc)s' и '%(desc)s'" #, python-format msgid "%(key)s prohibited for %(tunnel)s provider network" msgstr "%(key)s запрещен для сети поставщика %(tunnel)s" #, python-format msgid "" "%(method)s called with network settings %(current)s (original settings " "%(original)s) and network segments %(segments)s" msgstr "" "Метод %(method)s вызывался с параметрами сети %(current)s (исходные " "параметры %(original)s) и сетевыми сегментами %(segments)s" #, python-format msgid "" "%(method)s called with port settings %(current)s (original settings " "%(original)s) host %(host)s (original host %(original_host)s) vif type " "%(vif_type)s (original vif type %(original_vif_type)s) vif details " "%(vif_details)s (original vif details %(original_vif_details)s) binding " "levels %(levels)s (original binding levels %(original_levels)s) on network " "%(network)s with segments to bind %(segments_to_bind)s" msgstr "" "%(method)s вызван с параметрами порта %(current)s (исходные параметры " "%(original)s) хост %(host)s (исходный хост %(original_host)s) тип vif " "%(vif_type)s (исходный тип vif %(original_vif_type)s) сведения vif " "%(vif_details)s (исходные сведения vif %(original_vif_details)s) уровни " "связывания %(levels)s (исходные уровни связывания %(original_levels)s) в " "сети %(network)s с сегментами для связывания %(segments_to_bind)s" #, python-format msgid "" "%(method)s called with subnet settings %(current)s (original settings " "%(original)s)" msgstr "" "Метод %(method)s вызывался с параметрами подсети %(current)s (исходные " "параметры %(original)s)" #, python-format msgid "%(method)s failed." msgstr "Не удалось выполнить %(method)s." #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "%(name)s '%(addr)s' не соответствует версии IP '%(ip_version)s'" #, python-format msgid "%(param)s must be in %(range)s range." msgstr "%(param)s должен быть в диапазоне %(range)s." #, python-format msgid "%s cannot be called while in offline mode" msgstr "%s нельзя вызывать в режиме без подключения" #, python-format msgid "%s is invalid attribute for sort_key" msgstr "Атрибут %s недопустим для sort_key" #, python-format msgid "%s is invalid attribute for sort_keys" msgstr "Атрибут %s недопустим для sort_keys" #, python-format msgid "%s is not a valid VLAN tag" msgstr "%s не является допустимым тегом VLAN" #, python-format msgid "%s must be specified" msgstr "Необходимо указать %s" #, python-format msgid "%s must implement get_port_from_device or get_ports_from_devices." msgstr "%s должен реализовать get_port_from_device или get_ports_from_devices." #, python-format msgid "%s prohibited for VLAN provider network" msgstr "%s запрещено для сети VLAN провайдера" #, python-format msgid "%s prohibited for flat provider network" msgstr "%s запрещено для одноуровневой сети провайдера" #, python-format msgid "%s prohibited for local provider network" msgstr "%s запрещено для локальной сети провайдера" #, python-format msgid "" "'%(data)s' contains '%(length)s' characters. Adding a domain name will cause " "it to exceed the maximum length of a FQDN of '%(max_len)s'" msgstr "" "Длина '%(data)s' составляет '%(length)s' символов. Добавление доменного " "имени приведет к превышению максимальной длины полного имени домена, " "'%(max_len)s'" #, python-format msgid "" "'%(data)s' contains '%(length)s' characters. Adding a sub-domain will cause " "it to exceed the maximum length of a FQDN of '%(max_len)s'" msgstr "" "Длина '%(data)s' составляет '%(length)s' символов. Добавление поддомена " "приведет к превышению максимальной длины полного имени домена, '%(max_len)s'" #, python-format msgid "'%(data)s' exceeds maximum length of %(max_len)s" msgstr "'%(data)s' превышает максимальную длину %(max_len)s" #, python-format msgid "'%(data)s' is not an accepted IP address, '%(ip)s' is recommended" msgstr "IP-адрес '%(data)s' неприемлем, рекомендуется использовать '%(ip)s'" #, python-format msgid "'%(data)s' is not in %(valid_values)s" msgstr "'%(data)s' отсутствует в %(valid_values)s" #, python-format msgid "'%(data)s' is too large - must be no larger than '%(limit)d'" msgstr "" "Слишком большое значение '%(data)s' - требуется значение не больше " "'%(limit)d'" #, python-format msgid "'%(data)s' is too small - must be at least '%(limit)d'" msgstr "" "Слишком низкое значение '%(data)s' - требуется значение не меньше '%(limit)d'" #, python-format msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended" msgstr "" "Данные '%(data)s' не распознаются как cidr IP-подсети; рекомендуется " "'%(cidr)s'" #, python-format msgid "'%(data)s' not a valid PQDN or FQDN. Reason: %(reason)s" msgstr "'%(data)s' не является допустимым PQDN или FQDN. Причина: %(reason)s" #, python-format msgid "'%(host)s' is not a valid nameserver. %(msg)s" msgstr "'%(host)s' не является допустимым сервером имен. %(msg)s" #, python-format msgid "'%s' Blank strings are not permitted" msgstr "'%s' - Пустые строки недопустимы" #, python-format msgid "'%s' cannot be converted to boolean" msgstr "'%s' невозможно преобразовать в булевский тип" #, python-format msgid "'%s' cannot be converted to lowercase string" msgstr "'%s' невозможно преобразовать в строку в нижнем регистре" #, python-format msgid "'%s' contains whitespace" msgstr "'%s' содержит пробел, символ табуляции или пустой строки" #, python-format msgid "'%s' exceeds the 255 character FQDN limit" msgstr "Длина '%s' превышает ограничение FQDN в 255 символов" #, python-format msgid "'%s' is a FQDN. It should be a relative domain name" msgstr "" "'%s' - это полное имя домена. Должно быть указано относительное имя домена" #, python-format msgid "'%s' is not a FQDN" msgstr "'%s' - это не полное имя домена" #, python-format msgid "'%s' is not a dictionary" msgstr "'%s' не является словарем" #, python-format msgid "'%s' is not a list" msgstr "'%s' не является списком" #, python-format msgid "'%s' is not a valid IP address" msgstr "'%s' не является допустимым IP-адресом" #, python-format msgid "'%s' is not a valid IP subnet" msgstr "'%s' не является допустимой IP-подсетью" #, python-format msgid "'%s' is not a valid MAC address" msgstr "'%s' не является допустимым MAC-адресом" #, python-format msgid "'%s' is not a valid RBAC object type" msgstr "'%s' не является допустимым типом объекта RBAC" #, python-format msgid "'%s' is not a valid UUID" msgstr "'%s' не является допустимым UUID" #, python-format msgid "'%s' is not a valid boolean value" msgstr "'%s' не является допустимым булевским значением" #, python-format msgid "'%s' is not a valid input" msgstr "Недопустимые входные параметры: '%s'" #, python-format msgid "'%s' is not a valid string" msgstr "'%s' не является допустимой строкой" #, python-format msgid "'%s' is not an integer" msgstr "'%s' не является целым" #, python-format msgid "'%s' is not an integer or uuid" msgstr "'%s' не является целым или uuid" #, python-format msgid "'%s' is not of the form =[value]" msgstr "'%s' не в форме <ключ>=[значение]" #, python-format msgid "'%s' is not supported for filtering" msgstr "'%s' не поддерживает фильтрацию" #, python-format msgid "'%s' must be a non negative decimal." msgstr "Значение '%s' должно быть неотрицательным десятичным числом." #, python-format msgid "'%s' should be non-negative" msgstr "Значение '%s' должно быть неотрицательным" msgid "'.' searches are not implemented" msgstr "Поиск '.' не реализован" #, python-format msgid "'module' object has no attribute '%s'" msgstr "Объект 'module' не содержит атрибута '%s'" msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' меньше чем 'port_min'" msgid "" "(Deprecated. Use '--subproject neutron-SERVICE' instead.) The advanced " "service to execute the command against." msgstr "" "(Устарело. Используйте параметр '--subproject neutron-SERVICE'). " "Дополнительная служба для выполнения команды." msgid "0 is not allowed as CIDR prefix length" msgstr "Нулевое значение запрещено в качестве длины префикса CIDR" msgid "" "32-bit BGP identifier, typically an IPv4 address owned by the system running " "the BGP DrAgent." msgstr "" "32-разрядный идентификатор BGP, обычно - адрес IPv4, принадлежащий системе, " "в которой работает BGP DrAgent." msgid "A QoS driver must be specified" msgstr "Должен быть указан драйвер QoS" msgid "A cidr must be specified in the absence of a subnet pool" msgstr "Значение cidr должно быть указано при отсутствии пула подсетей" msgid "" "A decimal value as Vendor's Registered Private Enterprise Number as required " "by RFC3315 DUID-EN." msgstr "" "Десятичное значение в качестве зарегистрированного номера частного " "предприятия производителя в соответствии с RFC3315 DUID-EN." #, python-format msgid "A default external network already exists: %(net_id)s." msgstr "Внешняя сеть по умолчанию уже существует: %(net_id)s." msgid "" "A default subnetpool for this IP family has already been set. Only one " "default may exist per IP family" msgstr "" "Пул подсетей по умолчанию уже задан для этой версии IP. Для версии IP может " "быть задан только один пул по умолчанию" msgid "A metering driver must be specified" msgstr "Необходимо указать драйвер измерений" msgid "A password must be supplied when using auth_type md5." msgstr "При использовании auth_type md5 необходимо указать пароль." msgid "API for retrieving service providers for Neutron advanced services" msgstr "API для получения поставщиков служб для расширенных служб Neutron" msgid "Aborting periodic_sync_routers_task due to an error." msgstr "Задача periodic_sync_routers_task прервана из-за ошибки." msgid "Access to this resource was denied." msgstr "Доступ к этому ресурсу запрещен." msgid "Action to be executed when a child process dies" msgstr "Действие, выполняемое при завершении дочернего процесса" msgid "" "Add comments to iptables rules. Set to false to disallow the addition of " "comments to generated iptables rules that describe each rule's purpose. " "System must support the iptables comments module for addition of comments." msgstr "" "Добавить комментарии в правила iptables. Укажите значение false, чтобы не " "добавлять комментарии в сгенерированные правила iptables с описанием кажого " "правила. Добавление комментариев возможно, только если система поддерживает " "модуль комментариев iptables." msgid "Address not present on interface" msgstr "Адрес не задан для интерфейса" #, python-format msgid "Address scope %(address_scope_id)s could not be found" msgstr "Не удалось найти адресную область %(address_scope_id)s" msgid "" "Address to listen on for OpenFlow connections. Used only for 'native' driver." msgstr "" "Адрес для обработки запросов на соединение OpenFlow. Используется только для " "'встроенного' драйвера." msgid "Adds external network attribute to network resource." msgstr "Добавляет атрибут внешней сети к сетевому ресурсу." msgid "Adds test attributes to core resources." msgstr "Добавляет атрибуты теста в базовые ресурсы." #, python-format msgid "Agent %(id)s could not be found" msgstr "Не найден агент %(id)s" #, python-format msgid "Agent %(id)s is not a L3 Agent or has been disabled" msgstr "Агент %(id)s выключен или не является агентом L3" #, python-format msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled" msgstr "Агент %(id)s выключен или не является допустимым агентом DHCP" msgid "Agent has just been revived" msgstr "Агент был только что заново включен" msgid "" "Agent starts with admin_state_up=False when enable_new_agents=False. In the " "case, user's resources will not be scheduled automatically to the agent " "until admin changes admin_state_up to True." msgstr "" "Агент запускается с параметром admin_state_up=False, если включен параметр " "enable_new_agents=False. В этом случае пользовательские ресурсы не будут " "автоматически запланированы в агенте, пока администратор не изменит " "значение параметра admin_state_up на True." #, python-format msgid "Agent updated: %(payload)s" msgstr "Агент обновлен: %(payload)s" #, python-format msgid "" "Agent with agent_type=%(agent_type)s and host=%(host)s could not be found" msgstr "Не найден агент с agent_type=%(agent_type)s и host=%(host)s" msgid "Allow auto scheduling networks to DHCP agent." msgstr "Разрешить автоматическое планирование сетей для агента DHCP." msgid "Allow auto scheduling of routers to L3 agent." msgstr "Разрешить автоматическое планирование маршрутизаторов для агента L3." msgid "" "Allow overlapping IP support in Neutron. Attention: the following parameter " "MUST be set to False if Neutron is being used in conjunction with Nova " "security groups." msgstr "" "Разрешить поддержку перекрывающихся IP-адресов в Neutron. Внимание: " "следующий параметр ДОЛЖЕН быть задан равным False, если Neutron используется " "совместно с группами защиты Nova." msgid "Allow running metadata proxy." msgstr "Разрешить выполнение прокси метаданных." msgid "Allow sending resource operation notification to DHCP agent" msgstr "Разрешить отправку уведомления об операции ресурса агенту DHCP" msgid "Allow the creation of PTR records" msgstr "Разрешить создание записей PTR" msgid "Allow the usage of the bulk API" msgstr "Разрешить использование Bulk API" msgid "Allow the usage of the pagination" msgstr "Разрешить использование разбиения на страницы" msgid "Allow the usage of the sorting" msgstr "Разрешить использование сортировки" msgid "Allow to perform insecure SSL (https) requests to nova metadata" msgstr "Разрешить незащищенные запросы SSL (https) метаданных nova" msgid "Allowed address pairs must be a list." msgstr "Разрешенные пары адресов должны быть указаны в виде списка." msgid "AllowedAddressPair must contain ip_address" msgstr "AllowedAddressPair должен содержать атрибут ip_address" msgid "" "Allows for serving metadata requests coming from a dedicated metadata access " "network whose CIDR is 169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send metadata:1 request. In " "this case DHCP Option 121 will not be injected in VMs, as they will be able " "to reach 169.254.169.254 through a router. This option requires " "enable_isolated_metadata = True." msgstr "" "Разрешает обслуживать запросы метаданных, исходящие из выделенной сети для " "метаданных с CIDR 169.254.169.254/16 (или подсети), подключенной к " "маршрутизатору Neutron, из которой VM отправляют запрос metadata:1. В этом " "случае DHCP Option 121 не будет добавляться в VM, так как они обращаются к " "сети 169.254.169.254 через машрутизатор. Для этой опции необходимо указать " "enable_isolated_metadata = True." #, python-format msgid "" "Already hosting BGP Speaker for local_as=%(current_as)d with router_id=" "%(rtid)s." msgstr "" "Уже развернут источник BGP для local_as=%(current_as)d с router_id=%(rtid)s." #, python-format msgid "" "Already hosting maximum number of BGP Speakers. Allowed scheduled count=" "%(count)d" msgstr "" "Уже развернуто максимальное число источников BGP. Разрешено запланировать " "%(count)d" msgid "An RBAC policy already exists with those values." msgstr "Стратегия RBAC с такими параметрами уже существует." msgid "An identifier must be specified when updating a subnet" msgstr "При обновлении подсети необходимо указать идентификатор" msgid "An interface driver must be specified" msgstr "Не указан драйвер интерфейса" msgid "" "An ordered list of extension driver entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. For example: extension_drivers = " "port_security,qos" msgstr "" "Упорядоченный список конечных точек драйверов расширения, загружаемых из " "пространства имен neutron.ml2.extension_drivers. Пример: extension_drivers = " "port_security,qos" msgid "" "An ordered list of networking mechanism driver entrypoints to be loaded from " "the neutron.ml2.mechanism_drivers namespace." msgstr "" "Упорядоченный список конечных точек драйверов механизмов создания сетей, " "загружаемых из пространства имен neutron.ml2.mechanism_drivers." msgid "An unexpected internal error occurred." msgstr "Произошла непредвиденная внутренняя ошибка." msgid "An unknown error has occurred. Please try your request again." msgstr "" "Произошла неизвестная ошибка. Пожалуйста, попытайтесь повторить ваш запрос." msgid "Async process didn't respawn" msgstr "Асинхронный процесс не перезапустился" #, python-format msgid "Attribute '%s' not allowed in POST" msgstr "Атрибут '%s' недопустим в POST" #, python-format msgid "Authentication type not supported. Requested type=%(auth_type)s." msgstr "Тип идентификации не поддерживается. Запрошенный тип: %(auth_type)s." msgid "Authorization URL for connecting to designate in admin context" msgstr "" "URL авторизации для подключения к назначенному объекту в административном " "контексте" msgid "Automatically remove networks from offline DHCP agents." msgstr "Автоматически удалять сети из отключенных агентов DHCP." msgid "" "Automatically reschedule routers from offline L3 agents to online L3 agents." msgstr "" "Автоматически перепланировать маршрутизаторы с отключенных агентов L3 на " "включенные агенты L3 ." msgid "Availability zone of this node" msgstr "Зона доступности узла." #, python-format msgid "AvailabilityZone %(availability_zone)s could not be found." msgstr "Не найдена зона доступности %(availability_zone)s." msgid "Available commands" msgstr "Доступные команды" #, python-format msgid "" "BGP Peer %(peer_ip)s for remote_as=%(remote_as)s, running for BGP Speaker " "%(speaker_as)d not added yet." msgstr "" "Партнер BGP %(peer_ip)s для remote_as=%(remote_as)s, работающий для " "источника BGP %(speaker_as)d, еще не добавлен." #, python-format msgid "" "BGP Speaker %(bgp_speaker_id)s is already configured to peer with a BGP Peer " "at %(peer_ip)s, it cannot peer with BGP Peer %(bgp_peer_id)s." msgstr "" "Источник BGP %(bgp_speaker_id)s уже настроен как партнер BGP для " "%(peer_ip)s, он не может быть партнером BGP для %(bgp_peer_id)s." #, python-format msgid "" "BGP Speaker for local_as=%(local_as)s with router_id=%(rtid)s not added yet." msgstr "" "Источник BGP для local_as=%(local_as)s с router_id=%(rtid)s еще не добавлен." #, python-format msgid "" "BGP peer %(bgp_peer_id)s is not associated with BGP speaker " "%(bgp_speaker_id)s." msgstr "" "Партнер BGP %(bgp_peer_id)s не связан с источником BGP %(bgp_speaker_id)s." #, python-format msgid "BGP peer %(bgp_peer_id)s not authenticated." msgstr "Партнер BGP %(bgp_peer_id)s не идентифицирован." #, python-format msgid "BGP peer %(id)s could not be found." msgstr "Не найден партнер BGP %(id)s." #, python-format msgid "" "BGP speaker %(bgp_speaker_id)s is not hosted by the BgpDrAgent %(agent_id)s." msgstr "Источник BGP %(bgp_speaker_id)s не работает в BgpDrAgent %(agent_id)s." #, python-format msgid "BGP speaker %(id)s could not be found." msgstr "Не найден источник BGP %(id)s." msgid "BGP speaker driver class to be instantiated." msgstr "Класс драйвера источника BGP для создания экземпляра." msgid "Backend does not support VLAN Transparency." msgstr "Базовый сервер не поддерживает прозрачный режим VLAN." #, python-format msgid "" "Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, " "%(mac)s:" msgstr "" "Неверный префикс или формат mac для генерации адреса IPv6 с помощью EUI-64: " "%(prefix)s, %(mac)s:" #, python-format msgid "Bad prefix type for generate IPv6 address by EUI-64: %s" msgstr "Неверный тип префикса для генерации адреса IPv6 с помощью EUI-64: %s" #, python-format msgid "Base MAC: %s" msgstr "Базовый MAC: %s" msgid "" "Base log dir for dnsmasq logging. The log contains DHCP and DNS log " "information and is useful for debugging issues with either DHCP or DNS. If " "this section is null, disable dnsmasq log." msgstr "" "Базовый каталог для файлов протокола dnsmasq. Протокол содержит сведения " "DHCP и DNS и используется для отладки ошибок в DHCP или DNS. Если этот " "раздел пуст, протокол dnsmasq отключен." #, python-format msgid "BgpDrAgent %(agent_id)s is already associated to a BGP speaker." msgstr "BgpDrAgent %(agent_id)s уже связан с источником BGP." #, python-format msgid "BgpDrAgent %(id)s is invalid or has been disabled." msgstr "BgpDrAgent %(id)s недопустим или выключен." #, python-format msgid "BgpDrAgent updated: %s" msgstr "BgpDrAgent обновлен: %s" msgid "Body contains invalid data" msgstr "В теле содержатся недопустимые данные" msgid "Both network_id and router_id are None. One must be provided." msgstr "" "И network_id, и router_id имеют значение None. Необходимо задать хотя бы " "один параметр." #, python-format msgid "Bridge %(bridge)s does not exist." msgstr "Мост %(bridge)s не существует." #, python-format msgid "Bridge %s does not exist" msgstr "Мост %s не существует" msgid "Bulk operation not supported" msgstr "Групповая операция не поддерживается" msgid "CIDR to monitor" msgstr "CIDR для монитора" #, python-format msgid "Callback for %(resource_type)s not found" msgstr "Обратный вызов для %(resource_type)s не найден" #, python-format msgid "Callback for %(resource_type)s returned wrong resource type" msgstr "Обратный вызов %(resource_type)s вернул неправильный тип ресурса" #, python-format msgid "Cannot add floating IP to port %s that has no fixed IPv4 addresses" msgstr "" "Не удается добавить нефиксированный IP-адрес в порт %s, содержащий " "фиксированные адреса IPv4" #, python-format msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip" msgstr "" "Невозможно добавить нефиксированный IP-адрес для порта в подсети %s, для " "которой не указан gateway_ip" #, python-format msgid "Cannot add multiple callbacks for %(resource_type)s" msgstr "Не удалось добавить несколько обратных вызовов для %(resource_type)s" #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "" "Не удалось связать подсеть IPv%(req_ver)s из пула подсетей IPv%(pool_ver)s" msgid "Cannot allocate requested subnet from the available set of prefixes" msgstr "Невозможно выделить запрошенную подсеть из доступного набора префиксов" #, python-format msgid "" "Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with port " "%(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already has a " "floating IP on external network %(net_id)s." msgstr "" "Нефиксированный IP-адрес %(floating_ip_address)s (%(fip_id)s) невозможно " "связать с портом %(port_id)s, который использует фиксированный IP-адрес " "%(fixed_ip)s, так как для этого фиксированного IP-адреса уже есть " "нефиксированный IP-адрес во внешней сети %(net_id)s." msgid "" "Cannot change HA attribute of active routers. Please set router " "admin_state_up to False prior to upgrade." msgstr "" "Нельзя изменить атрибут HA для активного маршрутизатора. Присвойте параметру " "admin_state_up маршрутизатора значение False и повторите операцию." #, python-format msgid "" "Cannot create floating IP and bind it to %s, since that is not an IPv4 " "address." msgstr "" "Не удается создать нефиксированный IP-адрес и связать его с %s, так как он " "не является IPv4 адресом." #, python-format msgid "" "Cannot create floating IP and bind it to Port %s, since that port is owned " "by a different tenant." msgstr "" "Невозможно создать нефиксированный IP-адрес и связать его с портом %s, так " "как этот порт принадлежит другому арендатору." msgid "Cannot create resource for another tenant" msgstr "Невозможно создать ресурс для другого арендатора" msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "Невозможно отключить enable_dhcp, если заданы атрибуты ipv6" #, python-format msgid "Cannot find %(table)s with %(col)s=%(match)s" msgstr "Не удается найти %(table)s с %(col)s=%(match)s" #, python-format msgid "Cannot handle subnet of type %(subnet_type)s" msgstr "Не удается обработать подсеть с типом %(subnet_type)s" msgid "Cannot have multiple IPv4 subnets on router port" msgstr "С портом маршрутизатора не может быть связано несколько подсетей IPv4" #, python-format msgid "" "Cannot have multiple router ports with the same network id if both contain " "IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s" msgstr "" "Невозможно иметь несколько портов маршрутизатора с одинаковыми ИД сети, если " "обасодержат подсети IPv6. Существующий порт %(p)s имеет ИД сети и подсетей " "IPv6 %(nid)s" #, python-format msgid "" "Cannot host distributed router %(router_id)s on legacy L3 agent %(agent_id)s." msgstr "" "Распределенный маршрутизатор %(router_id)s не может работать на устаревшем " "агенте L3 %(agent_id)s." msgid "Cannot match priority on flow deletion or modification" msgstr "Невозможно сравнить приоритет при удалении или изменении потока" msgid "Cannot mix IPv4 and IPv6 prefixes in a subnet pool." msgstr "Нельзя смешивать префиксы IPv4 и IPv6 в пуле подсетей." msgid "Cannot specify both --service and --subproject." msgstr "Не удается указать обе опции --service и --subproject." msgid "Cannot specify both subnet-id and port-id" msgstr "subnet-id и port-id нельзя указывать одновременно" msgid "Cannot understand JSON" msgstr "Невозможно распознать JSON" #, python-format msgid "Cannot update read-only attribute %s" msgstr "Невозможно обновить атрибут %s, доступный только для чтения" msgid "" "Cannot upgrade active router to distributed. Please set router " "admin_state_up to False prior to upgrade." msgstr "" "Не удается изменить активный маршрутизатор на распределенный. Присвойте " "параметру admin_state_up маршрутизатор значение False и повторите операцию " "изменения." msgid "Certificate Authority public key (CA cert) file for ssl" msgstr "Файл общего ключа CA (CA cert) для ssl" #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s." msgstr "" "В случае изменения число использования было бы меньше 0 для следующих " "ресурсов: %(unders)s." msgid "Check ebtables installation" msgstr "Проверить установку ebtables" msgid "Check for ARP header match support" msgstr "Проверить наличия поддержки сопоставления заголовка ARP" msgid "Check for ARP responder support" msgstr "Проверка наличия поддержки промежуточного клиента ARP" msgid "Check for ICMPv6 header match support" msgstr "Проверить наличия поддержки сопоставления заголовка ICMPv6" msgid "Check for OVS Geneve support" msgstr "Проверить на наличие поддержки Geneve OVS" msgid "Check for OVS vxlan support" msgstr "Проверить на наличие поддержки OVS vxlan" msgid "Check for VF management support" msgstr "Проверить наличия поддержки управления VF" msgid "Check for iproute2 vxlan support" msgstr "Проверка наличия поддержки iproute2 vxlan" msgid "Check for nova notification support" msgstr "Проверка наличия поддержки уведомлений nova" msgid "Check for patch port support" msgstr "Проверка наличия поддержки портов исправлений" msgid "Check ip6tables installation" msgstr "Проверить установку ip6tables" msgid "Check ipset installation" msgstr "Проверить установку ipset" msgid "Check keepalived IPv6 support" msgstr "Проверить наличие поддержки IPv6 в конфигурации keepalived" msgid "Check minimal dibbler version" msgstr "Проверить минимальную версию dibbler" msgid "Check minimal dnsmasq version" msgstr "Проверить минимальную версию dnsmasq" msgid "Check netns permission settings" msgstr "Проверить параметры прав доступа netns" msgid "Check ovs conntrack support" msgstr "Проверить поддержку conntrack в ovs" msgid "Check ovsdb native interface support" msgstr "Проверить поддержку собственного интерфейса ovsdb" #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of " "subnet %(sub_id)s" msgstr "" "Cidr %(subnet_cidr)s подсети %(subnet_id)s перекрывается с cidr %(cidr)s " "подсети %(sub_id)s" msgid "Class not found." msgstr "Класс не найден." msgid "Cleanup resources of a specific agent type only." msgstr "Очистить ресурсы только для заданного типа агента." msgid "Client certificate for nova metadata api server." msgstr "Сертификат клиента для сервера API метаданных nova." msgid "" "Comma-separated list of : tuples, mapping " "network_device to the agent's node-specific list of virtual functions that " "should not be used for virtual networking. vfs_to_exclude is a semicolon-" "separated list of virtual functions to exclude from network_device. The " "network_device in the mapping should appear in the physical_device_mappings " "list." msgstr "" "Список записей, разделенных запятыми, вида <сетевое-устройство>:<исключаемые-" "vfs>, связывающих сетевое устройство со списком виртуальных функций узла " "агента, которые не должны использоваться для виртуальных сетей. исключаемые-" "vfs - это список виртуальных функций, исключаемых для сетевого устройства, " "разделенный точкой с запятой. Связанное сетевое устройство должно входить в " "список physical_device_mappings." msgid "" "Comma-separated list of : tuples mapping physical " "network names to the agent's node-specific Open vSwitch bridge names to be " "used for flat and VLAN networks. The length of bridge names should be no " "more than 11. Each bridge must exist, and should have a physical network " "interface configured as a port. All physical networks configured on the " "server should have mappings to appropriate bridges on each agent. Note: If " "you remove a bridge from this mapping, make sure to disconnect it from the " "integration bridge as it won't be managed by the agent anymore. Deprecated " "for ofagent." msgstr "" "Список записей, разделенных запятыми, вида <физическая-сеть>:<мост>, " "связывающих имена физических сетей с именами мостов Open vSwitch узла " "агента для одноуровневых сетей и сетей VLAN. Длина имени моста не должна " "превышать 11 символов. Все мосты должны существовать, и на каждом должен " "быть настроен физический сетевой интерфейс как порт. Все физические сети, " "настроенные на сервере, должны быть связаны с соответствующими мостами в " "каждом агенте. Примечание: если мост удален из этой карты связей, то его " "необходимо отключить от моста интеграции, так как он более не будет " "управляться агентом. Устарело для ofagent." msgid "" "Comma-separated list of : tuples mapping " "physical network names to the agent's node-specific physical network device " "interfaces of SR-IOV physical function to be used for VLAN networks. All " "physical networks listed in network_vlan_ranges on the server should have " "mappings to appropriate interfaces on each agent." msgstr "" "Список записей, разделенных запятыми, вида <физическая-сеть>:<сетевое-" "устройство>, связывающих имена физических сетей с физическим сетевым " "устройством узла агента физической функции SR-IOV для сетей VLAN. Все " "физические сети, перечисленные в network_vlan_ranges на сервере, должны " "иметь связи с соответствующими интерфейсами на каждом агенте." msgid "" "Comma-separated list of : tuples " "mapping physical network names to the agent's node-specific physical network " "interfaces to be used for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should have mappings to " "appropriate interfaces on each agent." msgstr "" "Список записей, разделенных запятыми, вида <физическая-сеть>:<физический-" "интерфейс>, связывающих имена физических сетей с физическими сетевыми " "интерфейсами узла агента для одноуровневых сетей и сетей VLAN. Все " "физические сети, перечисленные в network_vlan_ranges на сервере, должны " "иметь связи с соответствующими интерфейсами на каждом агенте." msgid "" "Comma-separated list of : tuples enumerating ranges of GRE " "tunnel IDs that are available for tenant network allocation" msgstr "" "Разделенный запятой список кортежей :, в котором " "перечислены диапазоны ИД туннелей GRE, доступные для выделения сети " "арендатора" msgid "" "Comma-separated list of : tuples enumerating ranges of " "Geneve VNI IDs that are available for tenant network allocation" msgstr "" "Разделенный запятой список кортежей :, в котором " "перечислены ИД VNI Geneve, доступные для выделения сети арендатора" msgid "" "Comma-separated list of : tuples enumerating ranges of " "VXLAN VNI IDs that are available for tenant network allocation" msgstr "" "Разделенный запятой список кортежей :, в котором " "перечислены идентификаторы VNI VXLAN, доступные для выделения сети арендатора" msgid "" "Comma-separated list of supported PCI vendor devices, as defined by " "vendor_id:product_id according to the PCI ID Repository. Default enables " "support for Intel and Mellanox SR-IOV capable NICs." msgstr "" "Список записей, разделенных запятыми, включающий поддерживаемые устройства " "PCI, заданные в формате ИД-поставщика:ИД-продукта в соответствии с реестром " "ИД PCI. Значение по умолчанию включает поддержку карт сетевых адаптеров " "Intel и Mellanox с поддержкой SR-IOV" msgid "" "Comma-separated list of the DNS servers which will be used as forwarders." msgstr "" "Разделенный запятыми список серверов DNS, которые будут использоваться для " "пересылки." msgid "Command to execute" msgstr "Выполняемая команда" msgid "Config file for interface driver (You may also use l3_agent.ini)" msgstr "" "Файл конфигурации для драйвера интерфейса (Можно также использовать l3_agent." "ini)" #, python-format msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" msgstr "Конфликтующее значение ethertype %(ethertype)s для CIDR %(cidr)s" msgid "" "Controls whether the neutron security group API is enabled in the server. It " "should be false when using no security groups or using the nova security " "group API." msgstr "" "Контролирует, включен ли API групп защиты neutron на сервере. Значение " "должно быть false, когда группы защиты не используются или используется API " "групп защиты nova." #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "" "Не удалось подключиться к порту %(host)s:%(port)s по истечении %(time)d " "секунд" #, python-format msgid "Could not connect to %s" msgstr "Не удается подключиться к %s" msgid "Could not deserialize data" msgstr "Не удалось десериализовать данные" #, python-format msgid "Could not retrieve schema from %(conn)s: %(err)s" msgstr "Не удается получить схему из %(conn)s: %(err)s" #, python-format msgid "" "Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable " "to update." msgstr "" "Текущий ip-адрес шлюза %(ip_address)s уже используется портом %(port_id)s. " "Обновление невозможно." msgid "Currently update of HA mode for a DVR/HA router is not supported." msgstr "" "В настоящее время обновление режима высокой готовности для маршрутизатора " "DVR/HA не поддерживается." msgid "Currently update of HA mode for a distributed router is not supported." msgstr "" "В настоящее время обновление режима высокой готовности для распределенного " "маршрутизатора не поддерживается." msgid "" "Currently update of distributed mode for a DVR/HA router is not supported" msgstr "" "В настоящее время обновление распределенного режима для маршрутизатора DVR/" "HA не поддерживается." msgid "Currently update of distributed mode for an HA router is not supported." msgstr "" "В настоящее время обновление распределенного режима для маршрутизатора " "высокой готовности не поддерживается." msgid "" "Currently updating a router from DVR/HA to non-DVR non-HA is not supported." msgstr "" "В настоящее время обновление маршрутизатора DVR/HA на не DVR и не HA не " "поддерживается." msgid "Currently updating a router to DVR/HA is not supported." msgstr "В настоящее время обновление маршрутизатора DVR/HA не поддерживается." msgid "" "DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " "lease times." msgstr "" "Продолжительность выделения адреса DHCP (в секундах). Укажите -1, чтобы " "dnsmasq использовала бесконечное время выделения." msgid "" "DVR deployments for VXLAN/GRE/Geneve underlays require L2-pop to be enabled, " "in both the Agent and Server side." msgstr "" "При развертывании DVR для основных функций VXLAN/GRE/Geneve включить L2-pop " "как на стороне агента, так и на стороне сервера." msgid "" "Database engine for which script will be generated when using offline " "migration." msgstr "" "Служба базы данных, для которой будет создан сценарий при использовании " "миграции с отключением." msgid "" "Default IPv4 subnet pool to be used for automatic subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where creation of a subnet is " "being called without a subnet pool ID. If not set then no pool will be used " "unless passed explicitly to the subnet create. If no pool is used, then a " "CIDR must be passed to create a subnet and that subnet will not be allocated " "from any pool; it will be considered part of the tenant's private address " "space. This option is deprecated for removal in the N release." msgstr "" "Пул подсетей IPv4 по умолчанию для использования при автоматическом " "выделении подсети CIDR. Задается по UUID пула в случае, если подсеть " "создается без указания ИД пула подсетей. Если параметр не задан, то пул " "будет использоваться только в том случае, если он явным образом указан при " "создании подсети. Если не используется никакой пул, то процедуре создания " "подсети необходимо передать CIDR. Такая подсеть не будет выделяться ни из " "одного пула, но будет считаться частью личного адресного пространства " "арендатора. Эта опция устарела и будет удалена в выпуске N. " msgid "" "Default IPv6 subnet pool to be used for automatic subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where creation of a subnet is " "being called without a subnet pool ID. See the description for " "default_ipv4_subnet_pool for more information. This option is deprecated for " "removal in the N release." msgstr "" "Пул подсетей IPv6 по умолчанию для использования при автоматическом " "выделении подсети CIDR. Задается по UUID пула в случае, если подсеть " "создается без указания ИД пула подсетей. См. описание опции " "default_ipv4_subnet_pool. Эта опция устарела и будет удалена в выпуске N. " msgid "Default driver to use for quota checks" msgstr "Драйвер по умолчанию, применяемый для проверки квоты" msgid "Default external networks must be shared to everyone." msgstr "Внешняя сеть по умолчанию должна быть общедоступной." msgid "" "Default network type for external networks when no provider attributes are " "specified. By default it is None, which means that if provider attributes " "are not specified while creating external networks then they will have the " "same type as tenant networks. Allowed values for external_network_type " "config option depend on the network type values configured in type_drivers " "config option." msgstr "" "Тип по умолчанию для внешних сетей, если не указаны атрибуты провайдера. " "Значение по умолчанию Нет, означающее, что если во время создания внешних " "сетей не были указаны атрибуты провайдера, то их тип будет совпадать с типом " "сетей арендатора. Разрешенные значения для опции конфигурации " "external_network_type зависят от значений типа сети, настроенных в опции " "конфигурации type_drivers." msgid "" "Default number of RBAC entries allowed per tenant. A negative value means " "unlimited." msgstr "" "Количество записей RBAC по умолчанию на одного арендатора. Отрицательное " "значение - без ограничений." msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "" "Количество ресурсов по умолчанию на одного арендатора. Отрицательное " "значение - не ограничено." msgid "Default security group" msgstr "Группа защиты по умолчанию" msgid "Default security group already exists." msgstr "Группа защиты по умолчанию уже существует." msgid "" "Default value of availability zone hints. The availability zone aware " "schedulers use this when the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a comma separated string. " "This value can be empty. In this case, even if availability_zone_hints for a " "resource is empty, availability zone is considered for high availability " "while scheduling the resource." msgstr "" "Значение по умолчанию параметра availability_zone_hints. Планировщики, " "учитывающие зону доступности, используют этот параметр, когда параметр " "availability_zone_hints для ресурсов пустой. Несколько зон доступности " "разделяются запятыми. Значение может быть пустым. В этом случае, даже если " "параметр availability_zone_hints для ресурса пустой, зона доступности " "считается пригодной для функций высокой готовности при планировании ресурса." msgid "" "Define the default value of enable_snat if not provided in " "external_gateway_info." msgstr "" "Определить значение по умолчанию enable_snat, если оно не указано в " "external_gateway_info." msgid "" "Defines providers for advanced services using the format: :" ":[:default]" msgstr "" "Определяет поставщиков для расширенных служб в формате: :" ":[:default]" msgid "" "Delay within which agent is expected to update existing ports whent it " "restarts" msgstr "" "Задержка, в течение которой агент, предположительно, должен обновить " "существующие порты при перезапуске" msgid "Delete the namespace by removing all devices." msgstr "Удалите пространство имен, удалив все устройства." #, python-format msgid "Deleting port %s" msgstr "Удаление порта %s" #, python-format msgid "Deployment error: %(reason)s." msgstr "Ошибка развертывания: %(reason)s." msgid "Destroy IPsets even if there is an iptables reference." msgstr "Уничтожить IPset даже при наличии ссылок iptables." msgid "Destroy all IPsets." msgstr "Уничтожить все IPset." #, python-format msgid "Device %(dev_name)s in mapping: %(mapping)s not unique" msgstr "Устройство %(dev_name)s в карте связей %(mapping)s неуникально" #, python-format msgid "Device '%(device_name)s' does not exist." msgstr "Устройство '%(device_name)s' не существует." msgid "Device has no virtual functions" msgstr "У устройства нет виртуальных функций" #, python-format msgid "Device name %(dev_name)s is missing from physical_device_mappings" msgstr "Имя устройства %(dev_name)s не указано в physical_device_mappings" msgid "Device not found" msgstr "Устройство не найдено" #, python-format msgid "" "Distributed Virtual Router Mac Address for host %(host)s does not exist." msgstr "" "MAC-адрес распределенного виртуального маршрутизатора для хоста %(host)s не " "существует." #, python-format msgid "Domain %(dns_domain)s not found in the external DNS service" msgstr "Домен %(dns_domain)s не найден во внешней службе DNS" msgid "Domain to use for building the hostnames" msgstr "Домен, используемый для компоновки имен хостов" msgid "" "Domain to use for building the hostnames. This option is deprecated. It has " "been moved to neutron.conf as dns_domain. It will be removed in a future " "release." msgstr "" "Домен для компоновки имен хостов. Эта опция устарела и перемещена в neutron." "conf как dns_domain. Будет удалена в последующих выпусках." msgid "Downgrade no longer supported" msgstr "Понижение больше не поддерживается" #, python-format msgid "Driver %s is not unique across providers" msgstr "Драйвер %s не является уникальным среди поставщиков" msgid "Driver for external DNS integration." msgstr "Драйвер для интеграции с внешним DNS." msgid "Driver for security groups firewall in the L2 agent" msgstr "Драйвер для брандмауэра групп защиты в агенте L2" msgid "Driver to use for scheduling network to DHCP agent" msgstr "Драйвер, используемый для планирования сети для агента DHCP" msgid "Driver to use for scheduling router to a default L3 agent" msgstr "" "Драйвер, используемый для планирования маршрутизатора для агента L3 по " "умолчанию" msgid "" "Driver used for ipv6 prefix delegation. This needs to be an entry point " "defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for " "entry points included with the neutron source." msgstr "" "Драйвер для делегирования префикса ipv6. Должен быть точкой входа, " "определенной в пространстве имен neutron.agent.linux.pd_drivers. См. файл " "setup.cfg на наличие точек входа, включенных в исходный код neutron." msgid "Driver used for scheduling BGP speakers to BGP DrAgent" msgstr "Драйвер, используемый для планирования источников BGP для BGP DrAgent" msgid "Drivers list to use to send the update notification" msgstr "Список драйверов, применяемых для отправки уведомление об обновлении" #, python-format msgid "Duplicate IP address '%s'" msgstr "Одинаковые IP-адреса: '%s'" #, python-format msgid "" "Duplicate L3HARouterAgentPortBinding is created for router(s) %(router)s. " "Database cannot be upgraded. Please, remove all duplicates before upgrading " "the database." msgstr "" "Обнаружен повторяющийся L3HARouterAgentPortBinding для маршрутизаторов " "%(router)s. Обновление базы данных невозможно. Перед обновлением базы данных " "устраните все повторы." msgid "Duplicate Metering Rule in POST." msgstr "Дубликат правила измерения в POST." msgid "Duplicate Security Group Rule in POST." msgstr "Совпадающие правила группы защиты в POST." msgid "Duplicate address detected" msgstr "Повторяющийся адрес" #, python-format msgid "Duplicate hostroute '%s'" msgstr "Одинаковые маршруты к хосту: '%s'" #, python-format msgid "Duplicate items in the list: '%s'" msgstr "Список содержит одинаковые элементы: '%s'" #, python-format msgid "Duplicate nameserver '%s'" msgstr "Одинаковые серверы имен: '%s'" msgid "Duplicate segment entry in request." msgstr "Дубликат записи сегмента в запросе." #, python-format msgid "ERROR: %s" msgstr "Ошибка: %s" msgid "" "ERROR: Unable to find configuration file via the default search paths (~/." "neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" msgstr "" "ОШИБКА: Не удалось найти файл конфигурации с использованием путей поиска по " "умолчанию (~/.neutron/, ~/, /etc/neutron/, /etc/) и опция '--config-file'!" msgid "" "Either one of parameter network_id or router_id must be passed to _get_ports " "method." msgstr "" "Либо один из параметров network_id, либо router_id должен быть передан в " "метод _get_ports." msgid "Either subnet_id or port_id must be specified" msgstr "Необходимо указать или subnet_id, или port_id" msgid "Empty physical network name." msgstr "Пустое имя физической сети." msgid "Empty subnet pool prefix list." msgstr "Пустой список префиксов пула подсетей." msgid "Enable FWaaS" msgstr "Включить FWaaS" msgid "Enable HA mode for virtual routers." msgstr "Включить режим высокой готовности для виртуальных маршрутизаторов." msgid "Enable SSL on the API server" msgstr "Разрешить применение SSL на сервере API" msgid "" "Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " "plugin using linuxbridge mechanism driver" msgstr "" "Активируйте VXLAN на агенте. Активация возможна, если агентом управляет " "модуль ml2, использующий драйвер механизма linuxbridge" msgid "" "Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 " "l2population driver. Allows the switch (when supporting an overlay) to " "respond to an ARP request locally without performing a costly ARP broadcast " "into the overlay." msgstr "" "Включить локальный промежуточный клиент ARP, если он поддерживается. " "Требуется OVS 2.1 и драйвер ML2 l2population. Позволяет коммутатору (когда " "поддерживается перекрытие) отвечать на запрос ARP локально, без выполнения " "дорогостоящего оповещения ARP в перекрытии." msgid "" "Enable local ARP responder which provides local responses instead of " "performing ARP broadcast into the overlay. Enabling local ARP responder is " "not fullycompatible with the allowed-address-pairs extension." msgstr "" "Включить локальный промежуточный клиент ARP, обеспечивающий локальные ответы " "вместо широковещательной рассылки ARP в перекрывающиеся сети. Локальный " "промежуточный клиент ARP не полностью совместим с расширением пар допустимых " "адресов." msgid "" "Enable services on an agent with admin_state_up False. If this option is " "False, when admin_state_up of an agent is turned False, services on it will " "be disabled. Agents with admin_state_up False are not selected for automatic " "scheduling regardless of this option. But manual scheduling to such agents " "is available if this option is True." msgstr "" "Включить службы на агенте с admin_state_up False. Если эта опция равна " "False, когда admin_state_up агента устанавливается False, службы на нем " "будут выключены. Агенты с admin_state_up False не выбраны для " "автоматического планирования независимо от этой опции. Но ручное " "планирование для таких агентов доступно, если опция равна True." msgid "" "Enable suppression of ARP responses that don't match an IP address that " "belongs to the port from which they originate. Note: This prevents the VMs " "attached to this agent from spoofing, it doesn't protect them from other " "devices which have the capability to spoof (e.g. bare metal or VMs attached " "to agents without this flag set to True). Spoofing rules will not be added " "to any ports that have port security disabled. For LinuxBridge, this " "requires ebtables. For OVS, it requires a version that supports matching ARP " "headers. This option will be removed in Newton so the only way to disable " "protection will be via the port security extension." msgstr "" "Включить подавление ответов ARP, которые не соответствуют IP-адресу, " "относящемуся к порту-источнику этих ответов. Примечание: эта функция " "запрещает VM подключение к этому агенту для защиты от имитации, она не " "защищает от других устройств, совместимых с методами имитации (например " "компьютеров без установленной операционной системы или VM, подключенных к " "агентам, в которых этому флагу присвоено значение Fakse). Правила имитации " "будут добавлены не для всех портов с отключенной защитой. Для LinuxBridge " "требуется ebtables. Для OVS требуется версия, поддерживающая сопоставление " "заголовков ARP. Эта опция будет удалена в Newton, поэтому защиту можно будет " "выключить только через расширение защиты портов." msgid "" "Enable/Disable log watch by metadata proxy. It should be disabled when " "metadata_proxy_user/group is not allowed to read/write its log file and " "copytruncate logrotate option must be used if logrotate is enabled on " "metadata proxy log files. Option default value is deduced from " "metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent " "effective user id/name." msgstr "" "Включить/выключить отслеживание протокола посредством прокси метаданных. Оно " "должно быть выключено, когда metadata_proxy_user/group не разрешено читать/" "писать в файл протокола, и должна использоваться опция copytruncate " "logrotate, если опция logrotate включена для файлов протокола прокси " "метаданных. Значение опции по умолчанию выводится из metadata_proxy_user: " "протокол отслеживания включен, если metadata_proxy_user является " "действующим ИД/именем пользователя агента. " msgid "" "Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to " "True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable " "environment. Users making subnet creation requests for IPv6 subnets without " "providing a CIDR or subnetpool ID will be given a CIDR via the Prefix " "Delegation mechanism. Note that enabling PD will override the behavior of " "the default IPv6 subnetpool." msgstr "" "Разрешает делегирование префикса IPv6 при автоматическом выделении CIDR " "подсети. Присвойте параметру значение True, чтобы включить делегирование " "префикса IPv6 при выделении сети в среде с поддержкой делегирования " "префикса. Пользователи, запрашивающие создание подсети IPv6 без указания " "CIDR или ИД пула подсетей, получают CIDR посредством делегирования префикса. " "Учтите, что включение делегирования префикса переопределяет стандартное " "поведение пула подсетей IPv6." msgid "" "Enables the dnsmasq service to provide name resolution for instances via DNS " "resolvers on the host running the DHCP agent. Effectively removes the '--no-" "resolv' option from the dnsmasq process arguments. Adding custom DNS " "resolvers to the 'dnsmasq_dns_servers' option disables this feature." msgstr "" "Включает службу dnsmasq для обработки запросов DNS на хосте, на котором " "работает агент DHCP. Аннулирует действие опции '--no-resolv' в аргументах " "процесса dnsmasq. Эта функция выключается, если в опцию " "'dnsmasq_dns_servers' добавляются пользовательские обработчики запросов DNS." msgid "Encountered an empty component." msgstr "Обнаружен пустой компонент." msgid "End of VLAN range is less than start of VLAN range" msgstr "Конечное значение диапазона VLAN меньше его начального значения" msgid "End of tunnel range is less than start of tunnel range" msgstr "Конечное значение диапазона туннелей меньше его начального значения" msgid "Enforce using split branches file structure." msgstr "" "Выполнить принудительно с помощью разделения ветвей файловой структуры." msgid "" "Ensure that configured gateway is on subnet. For IPv6, validate only if " "gateway is not a link local address. Deprecated, to be removed during the " "Newton release, at which point the gateway will not be forced on to subnet." msgstr "" "Убедитесь, что в подсети настроен шлюз. Для IPv6 проверка выполняется " "успешно, только если шлюз не является локальным адресом линии связи. " "Устарело, будет удалено в выпуске Newton, в котором шлюз не будет " "обязательным в подсети." #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "Ошибка %(reason)s во время выполнения операции." #, python-format msgid "Error importing FWaaS device driver: %s" msgstr "Ошибка при импорте драйвера устройства FWaaS: %s" #, python-format msgid "Error parsing dns address %s" msgstr "Ошибка при анализе адреса dns %s" #, python-format msgid "Error while reading %s" msgstr "Ошибка при чтении %s" #, python-format msgid "" "Exceeded %s second limit waiting for address to leave the tentative state." msgstr "" "Превышено время ожидания выхода адреса из временного состояния ( %s секунд)" msgid "Exceeded maximum amount of fixed ips per port." msgstr "" "Превышено максимально допустимое число фиксированных IP-адресов на один порт." msgid "Existing prefixes must be a subset of the new prefixes" msgstr "Существующие префиксы должны быть подмножеством новых префиксов" #, python-format msgid "" "Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" msgstr "" "Код возврата: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" #, python-format msgid "Extension %(driver)s failed." msgstr "Сбой расширения %(driver)s." #, python-format msgid "" "Extension driver %(driver)s required for service plugin %(service_plugin)s " "not found." msgstr "" "Не найден драйвер расширения %(driver)s, необходимый для модуля службы " "%(service_plugin)s." msgid "" "Extension to use alongside ml2 plugin's l2population mechanism driver. It " "enables the plugin to populate VXLAN forwarding table." msgstr "" "Расширение для использования наряду с драйвером механизма l2population " "модуля ml2. Оно обеспечивает заполнение модулем таблицы пересылки VXLAN." #, python-format msgid "Extension with alias %s does not exist" msgstr "Расширение с псевдонимом %s не существует" msgid "Extensions list to use" msgstr "Список используемых расширений" #, python-format msgid "Extensions not found: %(extensions)s." msgstr "Расширения не найдены: %(extensions)s." #, python-format msgid "External DNS driver %(driver)s could not be found." msgstr "Драйвер внешней службы DNS %(driver)s не найден." #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "Внешний IP-адрес %s совпадает с IP-адресом шлюза" #, python-format msgid "" "External network %(external_network_id)s is not reachable from subnet " "%(subnet_id)s. Therefore, cannot associate Port %(port_id)s with a Floating " "IP." msgstr "" "Внешняя сеть %(external_network_id)s недостижима из подсети %(subnet_id)s. " "Поэтому порт %(port_id)s невозможно связать с нефиксированным IP-адресом." #, python-format msgid "" "External network %(net_id)s cannot be updated to be made non-external, since " "it has existing gateway ports" msgstr "" "Невозможно изменить внешнюю сеть %(net_id)s, сделав ее не внешней, так как в " "ней существуют порты шлюза" #, python-format msgid "ExtraDhcpOpt %(id)s could not be found" msgstr "Не удалось найти ExtraDhcpOpt %(id)s" msgid "" "FWaaS plugin is configured in the server side, but FWaaS is disabled in L3-" "agent." msgstr "" "Модуль FWaaS настроен на стороне сервера, но FWaaS выключен в агенте L3." #, python-format msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." msgstr "" "Не удалось перепланировать маршрутизатор %(router_id)s: не найден допустимый " "агент L3." #, python-format msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." msgstr "" "Не удалось запланировать маршрутизатор %(router_id)s для агента L3 " "%(agent_id)s." #, python-format msgid "" "Failed to allocate a VRID in the network %(network_id)s for the router " "%(router_id)s after %(max_tries)s tries." msgstr "" "Не удалось выделить VRID в сети %(network_id)s для маршрутизатора " "%(router_id)s за %(max_tries)s попыток." #, python-format msgid "Failed to allocate subnet: %(reason)s." msgstr "Не удалось выделить подсеть: %(reason)s." msgid "" "Failed to associate address scope: subnetpools within an address scope must " "have unique prefixes." msgstr "" "Не удалось связать область адресов: пулы подсетей в области адресов должны " "иметь уникальные префиксы." #, python-format msgid "Failed to check policy %(policy)s because %(reason)s." msgstr "Не удалось проверить стратегию %(policy)s; причина: %(reason)s." #, python-format msgid "" "Failed to create a duplicate %(object_type)s: for attribute(s) " "%(attributes)s with value(s) %(values)s" msgstr "" "Не удалось создать копию %(object_type)s для атрибутов %(attributes)s со " "значениями %(values)s" #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips included " "invalid subnet %(subnet_id)s" msgstr "" "Не удалось создать порт в сети %(network_id)s, так как fixed_ips содержат " "недопустимую подсеть %(subnet_id)s" #, python-format msgid "Failed to init policy %(policy)s because %(reason)s." msgstr "Не удалось инициализировать стратегию %(policy)s; причина: %(reason)s." #, python-format msgid "Failed to locate source for %s." msgstr "Не удалось найти источник для %s." #, python-format msgid "Failed to parse request. Parameter '%s' not specified" msgstr "Не удалось проанализировать запрос. Не указан параметр '%s'" #, python-format msgid "Failed to parse request. Required attribute '%s' not specified" msgstr "" "Не удалось проанализировать запрос. Не указан обязательный атрибут '%s'" msgid "Failed to remove supplemental groups" msgstr "Не удалось удалить дополнительные группы" #, python-format msgid "Failed to set gid %s" msgstr "Не удалось получить gid %s" #, python-format msgid "Failed to set uid %s" msgstr "Не удалось задать uid %s" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "Не удалось настроить порт туннеля %(type)s на %(ip)s" msgid "Failure applying iptables rules" msgstr "Не удалось применить правила iptables" #, python-format msgid "Failure waiting for address %(address)s to become ready: %(reason)s" msgstr "Сбой ожидания готовности адреса %(address)s: %(reason)s" msgid "Flat provider networks are disabled" msgstr "Одноуровневые сети выключены" #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "Разновидность %(flavor_id)s не найдена." #, python-format msgid "Flavor %(flavor_id)s is used by some service instance." msgstr "Разновидность %(flavor_id)s используется одним из экземпляров службы." msgid "Flavor is not enabled." msgstr "Разновидность не включена." #, python-format msgid "Floating IP %(floatingip_id)s could not be found" msgstr "Не найден нефиксированный IP-адрес %(floatingip_id)s" #, python-format msgid "" "Floating IP %(floatingip_id)s is associated with non-IPv4 address " "%s(internal_ip)s and therefore cannot be bound." msgstr "" "Нефиксированный IP-адрес %(floatingip_id)s связан с адресом, " "%s(internal_ip)s, отличным от IPv4, и поэтому для него нельзя задать " "ограничение." msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "" "Для протоколов TCP/UDP значение port_range_min должно быть <= port_range_max" #, python-format msgid "For class %(object_type)s missing primary keys: %(missing_keys)s" msgstr "" "Для класса %(object_type)s отсутствуют первичные ключи: %(missing_keys)s" msgid "Force ip_lib calls to use the root helper" msgstr "" "Использовать в вызовах ip_lib вспомогательную программу для получения прав " "доступа root" #, python-format msgid "Found duplicate extension: %(alias)s." msgstr "Найдены повторяющиеся расширения: %(alias)s." #, python-format msgid "" "Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet " "%(subnet_cidr)s." msgstr "" "Обнаружено перекрытие пулов выделения %(pool_1)s %(pool_2)s для подсети " "%(subnet_cidr)s." msgid "Gateway IP version inconsistent with allocation pool version" msgstr "Версия IP шлюза несовместима с версией для пула выделения адресов" #, python-format msgid "" "Gateway cannot be updated for router %(router_id)s, since a gateway to " "external network %(net_id)s is required by one or more floating IPs." msgstr "" "Невозможно обновить шлюз для маршрутизатора %(router_id)s, так как шлюз к " "внешней сети %(net_id)s требуется одному или нескольким нефиксированным IP-" "адресам." #, python-format msgid "Gateway ip %(ip_address)s conflicts with allocation pool %(pool)s." msgstr "IP-адрес шлюза %(ip_address)s конфликтует с пулом выделения %(pool)s." msgid "Gateway is not valid on subnet" msgstr "Шлюз недопустим в подсети" msgid "" "Geneve encapsulation header size is dynamic, this value is used to calculate " "the maximum MTU for the driver. This is the sum of the sizes of the outer " "ETH + IP + UDP + GENEVE header sizes. The default size for this field is 50, " "which is the size of the Geneve header without any additional option headers." msgstr "" "Размер заголовка Geneve меняется динамически, и его значение используется " "при вычислении MTU для драйвера. Сюда входит сумма размеров заголовков ETH + " "IP + UDP + GENEVE. Размер поля по умолчанию равен 50, это размер заголовков " "Geneve без заголовков дополнительных опций." msgid "Group (gid or name) running metadata proxy after its initialization" msgstr "Группа (gid или имя) использует proxy метаданных после инициализации" msgid "" "Group (gid or name) running metadata proxy after its initialization (if " "empty: agent effective group)." msgstr "" "Группа (gid или имя) использует proxy метаданных после инициализации (если " "пустое, используется группа агента). " msgid "Group (gid or name) running this process after its initialization" msgstr "Группа (gid или имя) запускает этот процесс после инициализации" #, python-format msgid "HEAD file does not match migration timeline head, expected: %s" msgstr "Файл HEAD отличается от ожидаемого для графика миграции: %s" msgid "" "Hostname to be used by the Neutron server, agents and services running on " "this machine. All the agents and services running on this machine must use " "the same host value." msgstr "" "Имя хоста для использования сервером Neutron, агентами и службами, " "запущенными в этой системе. Все агенты и службы, запущенные в этой системе, " "должны использовать одно и то же значение хоста." msgid "How many times Neutron will retry MAC generation" msgstr "Число повторов генерации MAC для Neutron" #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" "min) is missing." msgstr "" "Код ICMP (port-range-max) %(value)s указан, но тип ICMP (port-range-min) " "отсутствует." msgid "ID of network" msgstr "ИД сети" msgid "ID of network to probe" msgstr "ИД сети для тестирования" msgid "ID of probe port to delete" msgstr "ИД удаляемого тестового порта" msgid "ID of probe port to execute command" msgstr "ИД тестового порта для выполнения команды" msgid "ID of the router" msgstr "ИД маршрутизатора" #, python-format msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s" msgstr "IP-адрес %(ip)s уже выделен в подсети %(subnet_id)s" #, python-format msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s" msgstr "IP-адрес %(ip)s не принадлежит подсети %(subnet_id)s" #, python-format msgid "" "IP address %(ip_address)s is not a valid IP for any of the subnets on the " "specified network." msgstr "" "IP-адрес %(ip_address)s не является допустимым IP-адресом ни для одной " "подсети в указанной сети." msgid "IP address used by Nova metadata server." msgstr "IP-адрес, используемый сервером метаданных Nova." msgid "IP allocation failed. Try again later." msgstr "Не удалось выделить IP-адрес. Повторите попытку позже." msgid "IP allocation requires subnet_id or ip_address" msgstr "Для выделения IP-адреса требуется subnet_id или ip_address" #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "Функции IPTablesManager.apply не удалось применить следующий набор правил " "iptables :\n" "%s" msgid "IPtables conntrack zones exhausted, iptables rules cannot be applied." msgstr "" "Зоны контроля состояния соединений IPtables conntrack израсходованы, правила " "iptables не могут быть применены." msgid "IPv6 Address Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "Допустимое значение режима адресов IPv6 для делегирования префикса: SLAAC " "или без сохранения состояния." msgid "IPv6 RA Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "Допустимое значение режима RA IPv6 для делегирования префикса: SLAAC или без " "сохранения состояния." #, python-format msgid "" "IPv6 address %(address)s can not be directly assigned to a port on subnet " "%(id)s since the subnet is configured for automatic addresses" msgstr "" "Адрес IPv6 %(address)s не может быть напрямую связан с портом в подсети " "%(id)s, так как подсеть настроена для автоматических адресов" #, python-format msgid "" "IPv6 address %(ip)s cannot be directly assigned to a port on subnet " "%(subnet_id)s as the subnet is configured for automatic addresses" msgstr "" "Адрес IPv6 %(ip)s нельзя напрямую связывать с портом в подсети " "%(subnet_id)s, так как подсеть настроена для автоматического выделения " "адресов" #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot be " "added to Neutron Router." msgstr "" "Подсеть IPv6 %s, настроенная для приема RA из внешнего маршрутизатора, не " "может быть добавлена в маршрутизатор Neutron." msgid "" "If True, advertise network MTU values if core plugin calculates them. MTU is " "advertised to running instances via DHCP and RA MTU options." msgstr "" "Если параметр задан равным True, оповещать о значениях MTU в сети, когда их " "вычисляет основной модуль. Оповещения о MTU передаются работающим " "экземплярам по DHCP и опции RA MTU." msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "" "Если True, разрешаются модули, поддерживающие создание прозрачных сетей VLAN." msgid "" "If non-empty, the l3 agent can only configure a router that has the matching " "router ID." msgstr "" "Если опция задана, то агент l3 может настроить только маршрутизатор с " "совпадающим ИД маршрутизатора." msgid "Illegal IP version number" msgstr "Запрещенный номер версии IP" #, python-format msgid "" "Illegal prefix bounds: %(prefix_type)s=%(prefixlen)s, %(base_prefix_type)s=" "%(base_prefixlen)s." msgstr "" "Недопустимые ограничения префикса: %(prefix_type)s=%(prefixlen)s, " "%(base_prefix_type)s=%(base_prefixlen)s." #, python-format msgid "" "Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot " "associate with address scope %(address_scope_id)s because subnetpool " "ip_version is not %(ip_version)s." msgstr "" "Недопустимая связь пула подсетей: пул подсетей %(subnetpool_id)s не может " "быть связан с областью адресов %(address_scope_id)s, поскольку ip_version " "для пула подсетей не равен %(ip_version)s." #, python-format msgid "" "Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot be " "associated with address scope %(address_scope_id)s." msgstr "" "Недопустимая связь пула подсетей: пул подсетей %(subnetpool_id)s не может " "быть связан с областью адресов %(address_scope_id)s." #, python-format msgid "Illegal subnetpool update : %(reason)s." msgstr "Недопустимое обновление пула подсетей: %(reason)s." #, python-format msgid "Illegal update to prefixes: %(msg)s." msgstr "Недопустимое обновление префиксов: %(msg)s." msgid "" "In some cases the Neutron router is not present to provide the metadata IP " "but the DHCP server can be used to provide this info. Setting this value " "will force the DHCP server to append specific host routes to the DHCP " "request. If this option is set, then the metadata service will be activated " "for all the networks." msgstr "" "В некоторых ситуациях машрутизатор Neutron отсутствует и не предоставляет " "метаданные для IP, но эту задачу решает сервер DHCP. Если задан этот " "параметр, то сервер DHCP будет добавлять маршруты к хостам в запрос DHCP. С " "этим параметром служба метаданных будет активирована для всех сетей." #, python-format msgid "Incorrect pci_vendor_info: \"%s\", should be pair vendor_id:product_id" msgstr "" "Неправильное pci_vendor_info (\"%s\"), требуется пара ИД-производителя:ИД-" "продукта" msgid "" "Indicates that this L3 agent should also handle routers that do not have an " "external network gateway configured. This option should be True only for a " "single agent in a Neutron deployment, and may be False for all agents if all " "routers must have an external network gateway." msgstr "" "Указывает, что агент L3 должен также обрабатывать маршрутизаторы, для " "которых не настроен шлюз во внешнюю сеть. Опции следует присвоить значение " "True, если в развертывании Neutron участвует только один агент. Опции можно " "присвоить значение False для всех агентов, если все маршрутизаторы должны " "иметь шлюз во внешнюю сеть." #, python-format msgid "Instance of class %(module)s.%(class)s must contain _cache attribute" msgstr "" "Экземпляр класса %(module)s.%(class)s должен содержать _cache attribute" #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" msgstr "Недостаточное пространство префиксов для выделения размера сети /%s" msgid "Insufficient rights for removing default security group." msgstr "" "Отсутствуют требуемые права доступа для удаления группы защиты по умолчанию." msgid "" "Integration bridge to use. Do not change this parameter unless you have a " "good reason to. This is the name of the OVS integration bridge. There is one " "per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM " "VIFs are attached to this bridge and then 'patched' according to their " "network connectivity." msgstr "" "Используемый мост интеграции. Не изменяйте этот параметр без серьезных " "причин. Это имя моста интеграции OVS. Для каждого гипервизора предусмотрен " "один мост. Мост интеграции работает как виртуальная коммутационная панель. " "Все виртуальные интерфейсы VM подключаются к этому мосту и затем " "коммутируются согласно топологии сети." msgid "Interface to monitor" msgstr "Интерфейс для монитора" msgid "" "Interval between checks of child process liveness (seconds), use 0 to disable" msgstr "" "Интервал между проверками работы дочернего процесса (в секундах), 0 для " "отключения" msgid "Interval between two metering measures" msgstr "Интервал между двумя показателями измерений" msgid "Interval between two metering reports" msgstr "Интервал между двумя отчетами измерений" #, python-format msgid "Invalid CIDR %(input)s given as IP prefix." msgstr "Указан недопустимый CIDR %(input)s как префикс IP-адреса." #, python-format msgid "" "Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address " "format, which requires the prefix to be /64." msgstr "" "Недопустимый CIDR %s для режима адресации IPv6. OpenStack использует формат " "адреса EUI-64, для которого требуется префикс /64." #, python-format msgid "Invalid Device %(dev_name)s: %(reason)s" msgstr "Недопустимое устройство %(dev_name)s: %(reason)s" #, python-format msgid "" "Invalid action '%(action)s' for object type '%(object_type)s'. Valid " "actions: %(valid_actions)s" msgstr "" "Недопустимое действие '%(action)s' для типа объекта '%(object_type)s'. " "Допустимые действия: %(valid_actions)s" #, python-format msgid "" "Invalid authentication type: %(auth_type)s, valid types are: " "%(valid_auth_types)s" msgstr "" "Недопустимый тип идентификации: %(auth_type)s. Допустимые типы: " "%(valid_auth_types)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "Недопустимый тип содержимого %(content_type)s." #, python-format msgid "Invalid data format for IP pool: '%s'" msgstr "Недопустимый формат данных для пула IP: '%s'" #, python-format msgid "Invalid data format for extra-dhcp-opt: %(data)s" msgstr "Недопустимый формат данных для extra-dhcp-opt: %(data)s" #, python-format msgid "Invalid data format for fixed IP: '%s'" msgstr "Недопустимый формат данных для фиксированного IP: '%s'" #, python-format msgid "Invalid data format for hostroute: '%s'" msgstr "Недопустимый формат данных для маршрута к хосту: '%s'" #, python-format msgid "Invalid data format for nameserver: '%s'" msgstr "Недопустимый формат данных сервера имен: '%s'" #, python-format msgid "Invalid ethertype %(ethertype)s for protocol %(protocol)s." msgstr "Недопустимый тип %(ethertype)s для протокола %(protocol)s." #, python-format msgid "Invalid extension environment: %(reason)s." msgstr "Недопустимая среда расширения: %(reason)s." #, python-format msgid "Invalid format for routes: %(routes)s, %(reason)s" msgstr "Недопустимый формат маршрутизаторов: %(routes)s, %(reason)s" #, python-format msgid "Invalid format: %s" msgstr "Неправильный формат: %s" #, python-format msgid "Invalid input for %(attr)s. Reason: %(reason)s." msgstr "Недопустимые входные данные для %(attr)s. Причина: %(reason)s." #, python-format msgid "" "Invalid input. '%(target_dict)s' must be a dictionary with keys: " "%(expected_keys)s" msgstr "" "Недопустимые входные параметры. '%(target_dict)s' должен быть словарем с " "ключами %(expected_keys)s" #, python-format msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s" msgstr "" "Недопустимое состояние экземпляра: %(state)s. Допустимые состояния: " "%(valid_states)s" #, python-format msgid "Invalid mapping: '%s'" msgstr "Недопустимое отображение: '%s'" #, python-format msgid "Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'." msgstr "Недопустимый диапазон VLAN сети: '%(vlan_range)s' - '%(error)s'." #, python-format msgid "Invalid network VXLAN port range: '%(vxlan_range)s'." msgstr "Недопустимый диапазон портов сети VXLAN: '%(vxlan_range)s'." #, python-format msgid "Invalid pci slot %(pci_slot)s" msgstr "Недопустимый разъем pci %(pci_slot)s" #, python-format msgid "Invalid provider format. Last part should be 'default' or empty: %s" msgstr "" "Недопустимый формат поставщика. Последняя часть должна иметь вид 'default' " "или быть пустой: %s" #, python-format msgid "Invalid resource type %(resource_type)s" msgstr "Недопустимый тип ресурса %(resource_type)s" #, python-format msgid "Invalid route: %s" msgstr "Недопустимый маршрут: %s" msgid "Invalid service provider format" msgstr "Недопустимый формат поставщика службы" #, python-format msgid "Invalid service type %(service_type)s." msgstr "Недопустимый тип службы %(service_type)s." #, python-format msgid "" "Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255." msgstr "" "Недопустимое значение для ICMP %(field)s (%(attr)s) %(value)s. Значение " "должно лежать в диапазоне от 0 до 255." #, python-format msgid "Invalid value for port %(port)s" msgstr "Недопустимое значение для порта %(port)s" msgid "" "Iptables mangle mark used to mark ingress from external network. This mark " "will be masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Пометка mangle в iptables применяется для пометки входа из внешней сети. Эта " "пометка будет применяться с маской 0xffff для использования только младших " "16 бит." msgid "" "Iptables mangle mark used to mark metadata valid requests. This mark will be " "masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Пометка mangle в Iptables, используемая для пометки допустимых запросов " "метаданных. Эта пометка будет применяться с маской 0xffff для использования " "только младших 16 бит." msgid "" "Keep in track in the database of current resourcequota usage. Plugins which " "do not leverage the neutron database should set this flag to False" msgstr "" "Отслеживает в базе данных текущее использование квоты ресурсов. Модули, " "которые не используют базу данных neutron, должны присваивать этому флагу " "значение False" msgid "Keepalived didn't respawn" msgstr "Демон keepalived не выполнил повторное порождение" msgid "Keepalived didn't spawn" msgstr "Демон keepalived не запустился" #, python-format msgid "" "Kernel HZ value %(value)s is not valid. This value must be greater than 0." msgstr "" "Недопустимое значение HZ ядра %(value)s. Значение должно быть больше 0." #, python-format msgid "Key %(key)s in mapping: '%(mapping)s' not unique" msgstr "Ключ %(key)s в отображении '%(mapping)s' не уникален" msgid "L3 agent failure to setup NAT for floating IPs" msgstr "Ошибка агента L3 при настройке NAT для нефиксированных IP" msgid "L3 agent failure to setup floating IPs" msgstr "Ошибка агента L3 при настройке нефиксированных IP" #, python-format msgid "Limit must be an integer 0 or greater and not '%d'" msgstr "Ограничение должно быть неотрицательным целым и не равно '%d'" msgid "Limit number of leases to prevent a denial-of-service." msgstr "Ограничить число выделений во избежание отказа в обслуживании." msgid "List of :" msgstr "Список <физическая-сеть>:<физический-мост>" msgid "" "List of :: or " "specifying physical_network names usable for VLAN provider and tenant " "networks, as well as ranges of VLAN tags on each available for allocation to " "tenant networks." msgstr "" "Список :: или , " "содержащий имена физических сетей, которые могут использоваться для сетей " "VLAN провайдера и арендатора, а также диапазоны тегов VLAN для каждой сети, " "доступной для выделения арендаторам." msgid "" "List of network type driver entrypoints to be loaded from the neutron.ml2." "type_drivers namespace." msgstr "" "Список конечных точек драйвера типа сети, загружаемых из пространства имен " "neutron.ml2.type_drivers." msgid "" "List of physical_network names with which flat networks can be created. Use " "default '*' to allow flat networks with arbitrary physical_network names. " "Use an empty list to disable flat networks." msgstr "" "Список имен физических сетей, с которыми можно создавать одноуровневые сети. " "Для создания одноуровневых сетей с произвольными именами физических сетей " "используйте символ *. Пустой список запрещает создание одноуровневых сетей." msgid "Local IP address of the VXLAN endpoints." msgstr "Локальный IP-адрес конечных точек VXLAN." msgid "Location for Metadata Proxy UNIX domain socket." msgstr "Расположение сокета домена UNIX прокси метаданных. " msgid "Location of Metadata Proxy UNIX domain socket" msgstr "Расположение сокета домена UNIX прокси метаданных" msgid "Location of pid file of this process." msgstr "Расположение файла pid этого процесса." msgid "Location to store DHCP server config files." msgstr "Расположение для хранения файлов конфигурации сервера DHCP." msgid "Location to store IPv6 PD files." msgstr "Расположение для хранения файлов PD IPv6." msgid "Location to store IPv6 RA config files" msgstr "Расположение для хранения файлов конфигурации RA IPv6" msgid "Location to store child pid files" msgstr "Расположение для хранения дочерних файлов pid" msgid "Location to store keepalived/conntrackd config files" msgstr "Расположение для хранения файлов конфигурации keepalived/conntrackd" msgid "Log agent heartbeats" msgstr "Вести протокол периодических сигналов агента" msgid "Loopback IP subnet is not supported if enable_dhcp is True." msgstr "Петлевая подсеть IP не поддерживается, если enable_dhcp равен True." msgid "MTU size of veth interfaces" msgstr "Размер MTU интерфейсов veth" msgid "Make the l2 agent run in DVR mode." msgstr "Создать агент L2, выполняемый в режиме DVR." msgid "Malformed request body" msgstr "Неправильное тело запроса" #, python-format msgid "Malformed request body: %(reason)s." msgstr "Неправильно сформированное тело запроса: %(reason)s." msgid "MaxRtrAdvInterval setting for radvd.conf" msgstr "Параметр MaxRtrAdvInterval для radvd.conf" msgid "Maximum number of DNS nameservers per subnet" msgstr "Максимальное количество серверов DNS для подсети" msgid "" "Maximum number of L3 agents which a HA router will be scheduled on. If it is " "set to 0 then the router will be scheduled on every agent." msgstr "" "Максимальное число агентов L3, где будет запланирован маршрутизатор высокой " "готовности. Если параметр равен 0, то маршрутизатор будет запланирован на " "каждом агенте." msgid "Maximum number of allowed address pairs" msgstr "Максимальное число разрешенных пар адресов" msgid "" "Maximum number of fixed ips per port. This option is deprecated and will be " "removed in the N release." msgstr "" "Максимальное количество фиксированных IP-адресов на один порт. Эта опция " "устарела и будет удалена в выпуске N. " msgid "Maximum number of host routes per subnet" msgstr "Максимальное количество маршрутов хоста на подсеть" msgid "Maximum number of routes per router" msgstr "Максимальное количество маршрутов на маршрутизатор" msgid "" "Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce " "mode from metadata_proxy_user/group values, 'user': set metadata proxy " "socket mode to 0o644, to use when metadata_proxy_user is agent effective " "user or root, 'group': set metadata proxy socket mode to 0o664, to use when " "metadata_proxy_group is agent effective group or root, 'all': set metadata " "proxy socket mode to 0o666, to use otherwise." msgstr "" "Режим сокета домена UNIX Proxy метаданных, допускается 4 значения: 'deduce': " "получать режим из значений metadata_proxy_user/group, 'user': присвоить " "режиму сокета proxy метаданных значение 0o644 для применения в случае, если " "значением metadata_proxy_user является пользователь root или эффективный " "пользователь агента, 'group': присвоить режиму сокета proxy метаданных " "значение 0o664 для применения в случае, если значением metadata_proxy_group " "является root или эффективная группа агента, 'all': присвоить режиму сокета " "proxy метаданных значение 0o666 для использования в остальных случаях." msgid "Metering driver" msgstr "Драйвер измерения" #, python-format msgid "Metering label %(label_id)s does not exist" msgstr "Метка измерения %(label_id)s не существует" #, python-format msgid "Metering label rule %(rule_id)s does not exist" msgstr "Правило метки измерения %(rule_id)s не существует" #, python-format msgid "" "Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps " "another" msgstr "" "Правило метки измерения с remote_ip_prefix %(remote_ip_prefix)s " "перекрывается другим правилом" msgid "Method cannot be called within a transaction." msgstr "Метод нельзя вызывать внутри транзакции." msgid "Migration from distributed router to centralized is not supported" msgstr "" "Миграция с распределенных маршрутизаторов на централизованный не " "поддерживается" msgid "MinRtrAdvInterval setting for radvd.conf" msgstr "Параметр MinRtrAdvInterval для radvd.conf" msgid "Minimize polling by monitoring ovsdb for interface changes." msgstr "" "Минимизировать опрос путем мониторинга ovsdb на предмет изменений интерфейса." #, python-format msgid "Missing key in mapping: '%s'" msgstr "Отсутствует ключ в отображении: '%s'" #, python-format msgid "Missing value in mapping: '%s'" msgstr "Отсутствует значение в отображении: '%s'" msgid "Multicast IP subnet is not supported if enable_dhcp is True." msgstr "" "Мультикастовая подсеть IP не поддерживается, если enable_dhcp равен True." msgid "" "Multicast group for VXLAN. When configured, will enable sending all " "broadcast traffic to this multicast group. When left unconfigured, will " "disable multicast VXLAN mode." msgstr "" "Многоадресная группа для VXLAN. Если она настроена, то весь " "широковещательный трафик направляется в эту группу. Если она не задана, то " "режим многоадресной передачи VXLAN выключен." msgid "" "Multicast group(s) for vxlan interface. A range of group addresses may be " "specified by using CIDR notation. Specifying a range allows different VNIs " "to use different group addresses, reducing or eliminating spurious broadcast " "traffic to the tunnel endpoints. To reserve a unique group for each possible " "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on " "all the agents." msgstr "" "Многоадресные группы для интерфейса vxlan. Диапазон адресов группы можно " "указать в нотации CIDR. Если указан диапазон, то различные VNI смогут " "использовать разные адреса группы, что снижает или даже исключает " "интенсивный широковещательный трафик для конечных точек туннеля. Для того " "чтобы зарезервировать уникальную группу для каждого возможного VNI (24 " "бита), используйте формат /8, например, 239.0.0.0/8. Этот параметр должен " "быть одинаковым во всех агентах." #, python-format msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found" msgstr "Найдено несколько агентов с agent_type=%(agent_type)s и host=%(host)s" #, python-format msgid "Multiple default providers for service %s" msgstr "Несколько поставщиков по умолчанию для службы %s" #, python-format msgid "Multiple plugins for service %s were configured" msgstr "Для службы %s настроено несколько модулей" #, python-format msgid "Multiple providers specified for service %s" msgstr "Несколько поставщиков задано для службы %s" msgid "Multiple tenant_ids in bulk security group rule create not allowed" msgstr "" "Групповая операция создания нескольких tenant_ids в правиле группы защиты не " "разрешена" msgid "Must also specify protocol if port range is given." msgstr "При указании диапазона портов необходимо задать протокол." msgid "Must specify one or more actions on flow addition or modification" msgstr "" "Необходимо указать одно или несколько действий добавления или изменения " "потока" #, python-format msgid "Name %(dns_name)s is duplicated in the external DNS service" msgstr "Имя %(dns_name)s повторяется во внешней службе DNS" #, python-format msgid "" "Name '%s' must be 1-63 characters long, each of which can only be " "alphanumeric or a hyphen." msgstr "" "Длина имени '%s' должна находиться в диапазоне от 1 до 63 алфавитно-цифровых " "символов или дефисов." #, python-format msgid "Name '%s' must not start or end with a hyphen." msgstr "Имя '%s' не должно начинаться дефисом или оканчиваться им." msgid "Name of Open vSwitch bridge to use" msgstr "Имя используемого моста Open vSwitch" msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "Имя используемого региона nova. Необходимо, если keystone управляет " "несколькими регионами." msgid "Name of the FWaaS Driver" msgstr "Имя драйвера FWaaS" msgid "Namespace of the router" msgstr "Пространство имен маршрутизатора" msgid "Native pagination depend on native sorting" msgstr "Внутреннее разбиение на страницы зависит от внутренней сортировки" #, python-format msgid "" "Need to apply migrations from %(project)s contract branch. This will require " "all Neutron server instances to be shutdown before proceeding with the " "upgrade." msgstr "" "Требуется применить миграцию из ветви contract %(project)s. При этом " "необходимо выключить все серверы Neutron перед началом обновления." msgid "Negative delta (downgrade) not supported" msgstr "Отрицательная дельта (понижение) не поддерживается" msgid "Negative relative revision (downgrade) not supported" msgstr "Отрицательная относительная ревизия (понижение) не поддерживается" #, python-format msgid "" "Network %(network_id)s is already bound to BgpSpeaker %(bgp_speaker_id)s." msgstr "Сеть %(network_id)s уже связана с источником BGP %(bgp_speaker_id)s." #, python-format msgid "" "Network %(network_id)s is not associated with BGP speaker %(bgp_speaker_id)s." msgstr "Сеть %(network_id)s не связана с источником BGP %(bgp_speaker_id)s." #, python-format msgid "Network %(network_id)s is not bound to a BgpSpeaker." msgstr "Сеть %(network_id)s не связана с источником BGP." #, python-format msgid "Network %(network_id)s is not bound to a IPv%(ip_version)s BgpSpeaker." msgstr "Сеть %(network_id)s не связана с источником BGP IPv%(ip_version)s." #, python-format msgid "Network %s does not contain any IPv4 subnet" msgstr "Сеть %s не содержит подсетей IPv4" #, python-format msgid "Network %s is not a valid external network" msgstr "Сеть %s не является допустимой внешней сетью" #, python-format msgid "Network %s is not an external network" msgstr "Сеть %s не является внешней" #, python-format msgid "" "Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges " "%(excluded_ranges)s was not found." msgstr "" "Сеть размера %(size)s из диапазона IP-адресов %(parent_range)s, кроме " "диапазонов IP-адресов %(excluded_ranges)s, не найдена." msgid "Network that will have instance metadata proxied." msgstr "Сеть, у которой метаданные экземпляра будут доступны через посредника." #, python-format msgid "Network type value '%s' not supported" msgstr "Значение типа сети '%s' не поддерживается" msgid "Network type value needed by the ML2 plugin" msgstr "Для модуля ML2 требуется значение типа сети" msgid "Network types supported by the agent (gre and/or vxlan)." msgstr "Типы сетей, поддерживаемые агентом (gre или vxlan)." msgid "" "Neutron IPAM (IP address management) driver to use. If ipam_driver is not " "set (default behavior), no IPAM driver is used. In order to use the " "reference implementation of Neutron IPAM driver, use 'internal'." msgstr "" "Используемый драйвер Neutron IPAM (управление IP-адресами). Если параметр " "ipam_driver не задан (значение по умолчанию), до никакой драйвер IPAM не " "используется. Для применения стандартной реализации драйвера IPAM Neutron " "укажите значение 'internal'." msgid "Neutron Service Type Management" msgstr "Управление типами служб Neutron" msgid "Neutron core_plugin not configured!" msgstr "Не настроен core_plugin Neutron!" msgid "Neutron plugin provider module" msgstr "Модуль провайдера модулей Neutron" msgid "Neutron quota driver class" msgstr "Класс драйвера квоты Neutron" msgid "New value for first_ip or last_ip has to be specified." msgstr "Необходимо указать значение для first_ip или last_ip." msgid "No default router:external network" msgstr "Не задан маршрут по умолчанию во внешнюю сеть" #, python-format msgid "No default subnetpool found for IPv%s" msgstr "Не найден пул подсетей по умолчанию для IPv%s" msgid "No default subnetpools defined" msgstr "Не определены пулы подсетей по умолчанию" #, python-format msgid "No eligible l3 agent associated with external network %s found" msgstr "Не найдены допустимые агенты l3, связанные с внешней сетью %s" #, python-format msgid "No more IP addresses available for subnet %(subnet_id)s." msgstr "В подсети %(subnet_id)s больше нет доступных IP-адресов." #, python-format msgid "" "No more Virtual Router Identifier (VRID) available when creating router " "%(router_id)s. The limit of number of HA Routers per tenant is 254." msgstr "" "Не осталось доступных ИД виртуального маршрутизатора (VRID) при создании " "маршрутизатора %(router_id)s. Ограничение числа маршрутизаторов высокой " "готовности на арендатора составляет 254." msgid "No offline migrations pending." msgstr "Нет ожидающих миграций с выключением." #, python-format msgid "No providers specified for '%s' service, exiting" msgstr "Не заданы поставщики для службы '%s', выход" #, python-format msgid "No shared key in %s fields" msgstr "Нет общего ключа в полях %s" msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "Невозможно вручную присвоить маршрутизатор агенту в режиме 'dvr'." msgid "Not allowed to manually remove a router from an agent in 'dvr' mode." msgstr "Невозможно вручную удалить маршрутизатор из агента в режиме 'dvr'." #, python-format msgid "" "Not enough l3 agents available to ensure HA. Minimum required " "%(min_agents)s, available %(num_agents)s." msgstr "" "Недостаточно агентов L3 для обеспечения высокой готовности. Требуется " "минимум %(min_agents)s, доступно %(num_agents)s." msgid "" "Number of DHCP agents scheduled to host a tenant network. If this number is " "greater than 1, the scheduler automatically assigns multiple DHCP agents for " "a given tenant network, providing high availability for DHCP service." msgstr "" "Число агентов DHCP, запланированных для управления сети арендатора. Если это " "значение больше 1, планировщик автоматически присваивает несколько агентов " "DHCP для заданной сети арендатора, обеспечивая высокую готовность службы " "DHCP." msgid "Number of RPC worker processes dedicated to state reports queue" msgstr "Количество процессов обработчика RPC для очереди отчетов о состоянии" msgid "Number of RPC worker processes for service" msgstr "Количество процессов обработчика RPC для службы" msgid "Number of backlog requests to configure the metadata server socket with" msgstr "" "Количество непереданных запросов для настройки сокета сервера метаданных" msgid "Number of backlog requests to configure the socket with" msgstr "Количество непереданных запросов для настройки сокета" msgid "" "Number of bits in an ipv4 PTR zone that will be considered network prefix. " "It has to align to byte boundary. Minimum value is 8. Maximum value is 24. " "As a consequence, range of values is 8, 16 and 24" msgstr "" "Число разрядов в зоне PTR ipv4, которые будут обрабатываться как префикс " "сети. Должно быть выравнено на границу байта. Минимальное значение: 8. " "Максимальное значение: 24. Допустимые значения: 8, 16 и 24" msgid "" "Number of bits in an ipv6 PTR zone that will be considered network prefix. " "It has to align to nyble boundary. Minimum value is 4. Maximum value is 124. " "As a consequence, range of values is 4, 8, 12, 16,..., 124" msgstr "" "Число разрядов в зоне PTR ipv6, которые будут обрабатываться как префикс " "сети. Должно быть выравнено на границу полубайта. Минимальное значение: 4. " "Максимальное значение: 124. Допустимые значения: 4, 8, 12, 16,..., 124" msgid "" "Number of floating IPs allowed per tenant. A negative value means unlimited." msgstr "" "Количество нефиксированных IP-адресов на одного арендатора. Отрицательное " "значение - не ограничено." msgid "" "Number of networks allowed per tenant. A negative value means unlimited." msgstr "" "Число разрешенных сетей на одного арендатора. Отрицательное значение " "означает отсутствие ограничений." msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "" "Количество портов на одного арендатора. Отрицательное значение - не " "ограничено." msgid "Number of routers allowed per tenant. A negative value means unlimited." msgstr "" "Количество маршрутизаторов на одного арендатора. Отрицательное значение - не " "ограничено." msgid "" "Number of seconds between sending events to nova if there are any events to " "send." msgstr "" "Интервал, в секундах, между отправкой событий nova, если имеются события, " "требующие отправки." msgid "Number of seconds to keep retrying to listen" msgstr "Интервал (в секундах) для продолжения попыток приема" msgid "" "Number of security groups allowed per tenant. A negative value means " "unlimited." msgstr "" "Количество групп защиты на одного арендатора. Отрицательное значение - не " "ограничено." msgid "" "Number of security rules allowed per tenant. A negative value means " "unlimited." msgstr "" "Количество правил защиты на одного арендатора. Отрицательное значение - не " "ограничено." msgid "" "Number of separate API worker processes for service. If not specified, the " "default is equal to the number of CPUs available for best performance." msgstr "" "Число отдельных процессов обработчиков API для службы. Если не указано, по " "умолчанию равно числу доступных процессоров в для достижения максимальной " "производительности." msgid "" "Number of separate worker processes for metadata server (defaults to half of " "the number of CPUs)" msgstr "" "Количество отдельных процессов обработчика для сервера метаданных (значение " "по умолчанию: половина от количества процессоров)" msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "" "Количество подсетей на одного арендатора. Отрицательное значение - не " "ограничено." msgid "" "Number of threads to use during sync process. Should not exceed connection " "pool size configured on server." msgstr "" "Число нитей, используемых в процессе синхронизации. Оно не должно превышать " "размер пула соединений, настроенный на сервере." msgid "OK" msgstr "OK" msgid "" "OVS datapath to use. 'system' is the default value and corresponds to the " "kernel datapath. To enable the userspace datapath set this value to 'netdev'." msgstr "" "Путь к данным OVS. Значение по умолчанию 'system' соответствует пути к " "данным, задаваемому ядром. Для того чтобы использовать пользовательский путь " "к данным, укажите значение netdev'." msgid "OVS vhost-user socket directory." msgstr "Каталог сокетов пользователя для виртуальных хостов OVS." #, python-format msgid "OVSDB Error: %s" msgstr "Ошибка OVSDB: %s" #, python-format msgid "Object action %(action)s failed because: %(reason)s." msgstr "Действие объекта %(action)s не выполнено, причина: %(reason)s." msgid "Only admin can view or configure quota" msgstr "Только администратор может просматривать и настраивать квоту" msgid "Only admin is authorized to access quotas for another tenant" msgstr "Только администратор имеет доступ к квотам других арендаторов" msgid "Only admins can manipulate policies on networks they do not own." msgstr "" "Только администраторы могут управлять стратегиями в сетях, владельцами " "которых они не являются." msgid "Only admins can manipulate policies on objects they do not own" msgstr "" "Только администраторы могут управлять стратегиями объектов, владельцами " "которых они не являются." msgid "Only allowed to update rules for one security profile at a time" msgstr "" "Разрешено обновлять правила одновременно только для одного профайла защиты" msgid "Only remote_ip_prefix or remote_group_id may be provided." msgstr "Можно задать только remote_ip_prefix или remote_group_id." msgid "OpenFlow interface to use." msgstr "Интерфейс OpenFlow для использования." #, python-format msgid "" "Operation %(op)s is not supported for device_owner %(device_owner)s on port " "%(port_id)s." msgstr "" "Операция %(op)s не поддерживается для device_owner %(device_owner)s, порт: " "%(port_id)s." #, python-format msgid "Operation not supported on device %(dev_name)s" msgstr "Операция не поддерживается в устройстве %(dev_name)s" msgid "" "Ordered list of network_types to allocate as tenant networks. The default " "value 'local' is useful for single-box testing but provides no connectivity " "between hosts." msgstr "" "Упорядоченный список типов сетей для выделения в качестве сетей арендатора. " "Значение по умолчанию 'local' полезно для тестирования автономной системы, " "но не позволяет связать хосты." msgid "Override the default dnsmasq settings with this file." msgstr "" "Переопределите параметры по умолчанию для dnsmasq с помощью этого файла." msgid "Owner type of the device: network/compute" msgstr "Тип владельца устройства: network/compute" msgid "POST requests are not supported on this resource." msgstr "Запросы POST не поддерживаются этим ресурсом." #, python-format msgid "Package %s not installed" msgstr "Пакет %s не установлен" #, python-format msgid "Parameter %(param)s must be of %(param_type)s type." msgstr "Параметр %(param)s должен иметь тип %(param_type)s." #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "Синтаксический анализ bridge_mappings не выполнен: %s." msgid "Parsing supported pci_vendor_devs failed" msgstr "Ошибка анализа поддерживаемых pci_vendor_devs" msgid "Password for connecting to designate in admin context" msgstr "" "Пароль для подключения к назначенному объекту в административном контексте" #, python-format msgid "Password not specified for authentication type=%(auth_type)s." msgstr "Пароль для идентификации типа %(auth_type)s не указан." msgid "Path to PID file for this process" msgstr "Путь к файлу PID для этого процесса" msgid "Path to the router directory" msgstr "Путь к каталогу маршрутизатора" msgid "Peer patch port in integration bridge for tunnel bridge." msgstr "Равноправный порт исправлений в мосте интеграции для моста туннеля." msgid "Peer patch port in tunnel bridge for integration bridge." msgstr "Равноправный порт исправлений в мосте туннеля для моста интеграции." msgid "Per-tenant subnet pool prefix quota exceeded." msgstr "Превышена квота префикса подсети для арендатора." msgid "Phase upgrade options do not accept revision specification" msgstr "Опции обновления фазы не принимают спецификацию ревизии" msgid "Ping timeout" msgstr "Тайм-аут проверки связи" #, python-format msgid "Plugin '%s' not found." msgstr "Модуль %s не найден." msgid "Plugin does not support updating provider attributes" msgstr "Модуль не поддерживает обновление атрибутов поставщика" msgid "Policy configuration policy.json could not be found." msgstr "Не найдена конфигурация стратегии policy.json." #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "Порт %(id)s не имеет фиксированного IP-адреса %(address)s" #, python-format msgid "Port %(port)s does not exist on %(bridge)s!" msgstr "Порт %(port)s не существует в %(bridge)s." #, python-format msgid "Port %(port_id)s is already acquired by another DHCP agent" msgstr "Порт %(port_id)s уже занят другим агентом DHCP. " #, python-format msgid "" "Port %(port_id)s is associated with a different tenant than Floating IP " "%(floatingip_id)s and therefore cannot be bound." msgstr "" "Порт %(port_id)s ассоциируется с арендатором, отличным от нефиксированного " "IP %(floatingip_id)s, поэтому его нельзя связать." #, python-format msgid "Port %(port_id)s is not managed by this agent. " msgstr "Порт %(port_id)s не управляется этим агентом. " #, python-format msgid "Port %s does not exist" msgstr "Порт %s не существует" #, python-format msgid "" "Port %s has multiple fixed IPv4 addresses. Must provide a specific IPv4 " "address when assigning a floating IP" msgstr "" "Порт %s содержит несколько фиксированных адресов IPv4. При назначении " "нефиксированного IP-адреса необходимо указать конкретный адрес IPv4" msgid "" "Port Security must be enabled in order to have allowed address pairs on a " "port." msgstr "" "Необходимо включить защиту порта для получения разрешенных пар адресов на " "порту." msgid "" "Port has security group associated. Cannot disable port security or ip " "address until security group is removed" msgstr "" "С портом связана группа защиты. Пока группа защиты не удалена, невозможно " "выключить защиту порта или IP-адрес" msgid "" "Port security must be enabled and port must have an IP address in order to " "use security groups." msgstr "" "Для использования групп защиты необходимо включить защиту порта и присвоить " "ему IP-адрес." msgid "" "Port to listen on for OpenFlow connections. Used only for 'native' driver." msgstr "" "Порт для обработки запросов на соединение OpenFlow. Используется только для " "'встроенного' драйвера." #, python-format msgid "Prefix '%(prefix)s' not supported in IPv%(version)s pool." msgstr "Префикс %(prefix)s не поддерживается в пуле IPv%(version)s." msgid "Prefix Delegation can only be used with IPv6 subnets." msgstr "Делегирование префикса можно использовать только в подсетях IPv6." msgid "Private key of client certificate." msgstr "Личный ключ сертификата клиента." #, python-format msgid "Probe %s deleted" msgstr "Тест %s удален" #, python-format msgid "Probe created : %s " msgstr "Создан тест %s " msgid "Process is already started" msgstr "Процесс уже запущен" msgid "Process is not running." msgstr "Процесс не запущен." msgid "Protocol to access nova metadata, http or https" msgstr "Протокол для доступа к метаданным nova (http или https)" #, python-format msgid "Provider name %(name)s is limited by %(len)s characters" msgstr "Имя поставщика %(name)s, не более %(len)s символов" #, python-format msgid "QoS Policy %(policy_id)s is used by %(object_type)s %(object_id)s." msgstr "" "Стратегия QoS %(policy_id)s используется %(object_type)s %(object_id)s." #, python-format msgid "" "QoS binding for network %(net_id)s and policy %(policy_id)s could not be " "found." msgstr "" "Не найдено связывание QoS для сети %(net_id)s и стратегии %(policy_id)s." #, python-format msgid "" "QoS binding for port %(port_id)s and policy %(policy_id)s could not be found." msgstr "" "Не найдено связывание QoS для порта %(port_id)s и стратегии %(policy_id)s." #, python-format msgid "QoS policy %(policy_id)s could not be found." msgstr "Не найдена стратегия QoS %(policy_id)s." #, python-format msgid "QoS rule %(rule_id)s for policy %(policy_id)s could not be found." msgstr "Не найдено правило QoS %(rule_id)s для стратегии %(policy_id)s." #, python-format msgid "RBAC policy of type %(object_type)s with ID %(id)s not found" msgstr "Не найдена стратегия RBAC с типом %(object_type)s и ИД %(id)s" #, python-format msgid "" "RBAC policy on object %(object_id)s cannot be removed because other objects " "depend on it.\n" "Details: %(details)s" msgstr "" "Не удается удалить стратегию RBAC для объекта %(object_id)s, так как от нее " "зависят другие объекты.\n" "Сведения: %(details)s" msgid "" "Range of seconds to randomly delay when starting the periodic task scheduler " "to reduce stampeding. (Disable by setting to 0)" msgstr "" "Диапазон случайных задержек (в секундах) при запуске планировщика " "периодических задач во избежание взрывного запуска. (Для выключения задайте " "0)" msgid "Ranges must be in the same IP version" msgstr "Версия IP для диапазонов должна совпадать" msgid "Ranges must be netaddr.IPRange" msgstr "Формат диапазонов: netaddr.IPRange" msgid "Ranges must not overlap" msgstr "Диапазоны не должны перекрываться" #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.EUI type." msgstr "" "Получено: тип '%(type)s', значение '%(value)s'. Ожидался тип netaddr.EUI." #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.IPAddress " "type." msgstr "" "Получено: тип '%(type)s', значение '%(value)s'. Ожидался тип netaddr." "IPAddress." #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.IPNetwork " "type." msgstr "" "Получено: тип '%(type)s', значение '%(value)s'. Ожидался тип netaddr." "IPNetwork." #, python-format msgid "" "Release aware branch labels (%s) are deprecated. Please switch to expand@ " "and contract@ labels." msgstr "" "Метки ветви информации о выпуске (%s) устарели. Перейдите на использование " "меток expand@ и contract@." msgid "Remote metadata server experienced an internal server error." msgstr "Внутренняя ошибка удаленного сервера метаданных." msgid "" "Repository does not contain HEAD files for contract and expand branches." msgstr "Хранилище не содержит файлы HEAD для ветвей contract и expand." msgid "" "Representing the resource type whose load is being reported by the agent. " "This can be \"networks\", \"subnets\" or \"ports\". When specified (Default " "is networks), the server will extract particular load sent as part of its " "agent configuration object from the agent report state, which is the number " "of resources being consumed, at every report_interval.dhcp_load_type can be " "used in combination with network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is " "WeightScheduler, dhcp_load_type can be configured to represent the choice " "for the resource being balanced. Example: dhcp_load_type=networks" msgstr "" "Представление типа ресурса, о чьей загрузке сообщает агент. Это может быть " "\"networks\", \"subnets\" или \"ports\". Когда указано (по умолчанию " "networks), сервер извлекает определенную загрузку, отправленную как часть " "его объекта конфигурации агента из состояния отчета агента, который содержит " "количество потребленных ресурсов за каждый интервал report_interval. " "dhcp_load_type можно использовать в сочетании с network_scheduler_driver = " "neutron.scheduler.dhcp_agent_scheduler.WeightScheduler Когда " "network_scheduler_driver - WeightScheduler, dhcp_load_type можно настроить " "для представления выбора балансируемого ресурса. Пример: " "dhcp_load_type=networks" msgid "Request Failed: internal server error while processing your request." msgstr "" "Запрос не выполнен: при обработке запроса произошла внутренняя ошибка " "сервера." #, python-format msgid "" "Request contains duplicate address pair: mac_address %(mac_address)s " "ip_address %(ip_address)s." msgstr "" "В запросе содержится копия пары адресов: mac_address %(mac_address)s " "ip_address %(ip_address)s." #, python-format msgid "" "Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps " "with another subnet" msgstr "" "Запрошенная подсеть с cidr %(cidr)s для сети %(network_id)s перекрывается с " "другой сетью" msgid "" "Reset flow table on start. Setting this to True will cause brief traffic " "interruption." msgstr "" "Выполнить сброс таблицы потоков при старте. При значении True вызовет " "кратковременное прерывание потока." #, python-format msgid "Resource %(resource)s %(resource_id)s could not be found." msgstr "Ресурс %(resource)s %(resource_id)s не найден." #, python-format msgid "Resource %(resource_id)s of type %(resource_type)s not found" msgstr "Ресурс %(resource_id)s с типом %(resource_type)s не найден" #, python-format msgid "" "Resource '%(resource_id)s' is already associated with provider " "'%(provider)s' for service type '%(service_type)s'" msgstr "" "Ресурс '%(resource_id)s' уже связан с поставщиком '%(provider)s' для типа " "службы '%(service_type)s'" msgid "Resource body required" msgstr "Требуется тело ресурса" msgid "" "Resource name(s) that are supported in quota features. This option is now " "deprecated for removal." msgstr "" "Имена ресурсов, которые поддерживаются в функциях квоты. Эта функция " "устарела и будет удалена." msgid "Resource not found." msgstr "Ресурс не найден." msgid "Resources required" msgstr "Требуются ресурсы" msgid "" "Root helper application. Use 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' to use the real root filter facility. Change to 'sudo' to skip the " "filtering and just run the command directly." msgstr "" "Вспомогательное приложение для получения прав root. Команла 'sudo neutron-" "rootwrap /etc/neutron/rootwrap.conf' вызывает утилиту фильтрации с правами " "root. Вызовите 'sudo', чтобы пропустить фильтрацию и выполнить команду " "непосредственно." msgid "Root helper daemon application to use when possible." msgstr "" "Приложение вспомогательного демона для получения прав доступа root " "(используется, когда это возможно)." msgid "Root permissions are required to drop privileges." msgstr "Для сброса прав доступа требуются права доступа пользователя Root." #, python-format msgid "Route %(cidr)s not advertised for BGP Speaker %(speaker_as)d." msgstr "Маршрут %(cidr)s не анонсируется для источника BGP %(speaker_as)d." #, python-format msgid "Router %(router_id)s %(reason)s" msgstr "Маршрутизатор %(router_id)s %(reason)s" #, python-format msgid "Router %(router_id)s could not be found" msgstr "Не найден маршрутизатор %(router_id)s" #, python-format msgid "Router %(router_id)s does not have an interface with id %(port_id)s" msgstr "У маршрутизатора %(router_id)s нет интерфейса с ИД %(port_id)s" #, python-format msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s" msgstr "У маршрутизатора %(router_id)s нет интерфейса в подсети %(subnet_id)s" #, python-format msgid "Router '%(router_id)s' cannot be both DVR and HA." msgstr "Маршрутизатор '%(router_id)s' не может быть одновременно DVR и HA." #, python-format msgid "Router '%(router_id)s' is not compatible with this agent." msgstr "Маршрутизатор '%(router_id)s' несовместим с этим агентом." #, python-format msgid "Router already has a port on subnet %s" msgstr "У маршрутизатора уже есть порт в подсети %s" #, python-format msgid "" "Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be " "deleted, as it is required by one or more floating IPs." msgstr "" "Невозможно удалить интерфейс маршрутизатора для подсети %(subnet_id)s для " "маршрутизатора %(router_id)s, так как он требуется одному или нескольким " "нефиксированным IP-адресам." #, python-format msgid "" "Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be " "deleted, as it is required by one or more routes." msgstr "" "Невозможно удалить интерфейс маршрутизатора для подсети %(subnet_id)s для " "маршрутизатора %(router_id)s, так как он требуется одному или нескольким " "маршрутизаторам." msgid "Router port must have at least one fixed IP" msgstr "Порт маршрутизатора должне иметь хотя бы один фиксированный IP-адрес" msgid "Router that will have connected instances' metadata proxied." msgstr "" "Маршрутизатор, у которого метаданные подключенных экземпляров будут доступны " "через посредника." #, python-format msgid "" "Row doesn't exist in the DB. Request info: Table=%(table)s. Columns=" "%(columns)s. Records=%(records)s." msgstr "" "Строка не существует в базе данных. Запрос: Table=%(table)s. Columns=" "%(columns)s. Records=%(records)s." msgid "Run as daemon." msgstr "Выполнить как демон." #, python-format msgid "Running %(cmd)s (%(desc)s) for %(project)s ..." msgstr "Выполняется %(cmd)s (%(desc)s) для %(project)s ..." #, python-format msgid "Running %(cmd)s for %(project)s ..." msgstr "Выполняется %(cmd)s для %(project)s ..." msgid "Running without keystone AuthN requires that tenant_id is specified" msgstr "Для выполнения без AuthN Keystone следует указать tenant_id" msgid "" "Seconds between nodes reporting state to server; should be less than " "agent_down_time, best if it is half or less than agent_down_time." msgstr "" "Интервал отправки сообщений о состоянии узлов на сервер (в секундах). " "Значение должно быть меньше, чем agent_down_time, оптимально - не больше " "половины значения agent_down_time." msgid "Seconds between running periodic tasks" msgstr "Интервал запуска периодических задач (в секундах)" msgid "" "Seconds to regard the agent is down; should be at least twice " "report_interval, to be sure the agent is down for good." msgstr "" "Интервал (в секундах), в течение которого агент считается выключенным; " "должен по меньшей мере вдвое превышать значение report_interval, чтобы " "убедиться в том, что агент выключен навсегда." #, python-format msgid "Security Group %(id)s %(reason)s." msgstr "Группа защиты %(id)s %(reason)s." #, python-format msgid "Security Group Rule %(id)s %(reason)s." msgstr "Правило группы защиты %(id)s %(reason)s." #, python-format msgid "Security group %(id)s does not exist" msgstr "Группа защиты %(id)s не существует" #, python-format msgid "Security group rule %(id)s does not exist" msgstr "Правило группы защиты %(id)s не существует" #, python-format msgid "Security group rule already exists. Rule id is %(rule_id)s." msgstr "Правило группы защиты уже существует. ИД правила: %(rule_id)s." #, python-format msgid "" "Security group rule for ethertype '%(ethertype)s' not supported. Allowed " "values are %(values)s." msgstr "" "Правило группы защиты для типа '%(ethertype)s' не поддерживается. Допустимые " "значения: %(values)s." #, python-format msgid "" "Security group rule protocol %(protocol)s not supported. Only protocol " "values %(values)s and integer representations [0 to 255] are supported." msgstr "" "Протокол правил группы защиты %(protocol)s не поддерживается. Поддерживаются " "значения %(values)s и целочисленные представления [0 - 255]." msgid "Segments and provider values cannot both be set." msgstr "Нельзя одновременно задавать значения сегментов и поставщика." msgid "Selects the Agent Type reported" msgstr "Выбирает указанный Тип агента" msgid "" "Send notification to nova when port data (fixed_ips/floatingip) changes so " "nova can update its cache." msgstr "" "Отправить уведомление nova в случае изменения данных порта (fixed_ips/" "floatingip), чтобы обеспечить обновление кэша nova." msgid "Send notification to nova when port status changes" msgstr "Отправить уведомление nova в случае изменения состояния порта" msgid "" "Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the " "feature is disabled" msgstr "" "Отправить указанное количество уведомлений ARP для настройки высокой " "готовности. Нулевое или отрицательное значение выключает эту функцию" #, python-format msgid "Service Profile %(sp_id)s could not be found." msgstr "Не найден профайл службы %(sp_id)s." #, python-format msgid "Service Profile %(sp_id)s is already associated with flavor %(fl_id)s." msgstr "Профайл службы %(sp_id)s уже связан с разновидностью %(fl_id)s." #, python-format msgid "Service Profile %(sp_id)s is not associated with flavor %(fl_id)s." msgstr "Профайл службы %(sp_id)s не связан с разновидностью %(fl_id)s." #, python-format msgid "Service Profile %(sp_id)s is used by some service instance." msgstr "Профайл службы %(sp_id)s используется одним из экземпляров службы." #, python-format msgid "Service Profile driver %(driver)s could not be found." msgstr "Драйвер профайла службы %(driver)s не найден." msgid "Service Profile is not enabled." msgstr "Профайл службы не включен." msgid "Service Profile needs either a driver or metainfo." msgstr "Для профайла службы требуется драйвер или мета-информация." #, python-format msgid "" "Service provider '%(provider)s' could not be found for service type " "%(service_type)s" msgstr "" "Поставщик службы '%(provider)s' не найден для типа службы %(service_type)s" msgid "Service to handle DHCPv6 Prefix delegation." msgstr "Служба для обработки делегирования префикса DHCPv6." #, python-format msgid "Service type %(service_type)s does not have a default service provider" msgstr "Тип службы %(service_type)s не содержит поставщика службы по умолчанию" msgid "" "Set new timeout in seconds for new rpc calls after agent receives SIGTERM. " "If value is set to 0, rpc timeout won't be changed" msgstr "" "Задать новый тайм-аут (в секундах) для новых вызовов rpc после получения " "агентом сигнала SIGTERM. При значении 0 тайм-аут rpc не может быть изменен" msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "Установка/сброс бита Не разбивать на фрагменты (DF) в исходящем пакете IP, " "несущем туннель GRE/VXLAN." msgid "" "Set or un-set the tunnel header checksum on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "Устанавливает или сбрасывает контрольную сумму заголовка туннеля в исходящем " "IP-пакете, поддерживающем туннель GRE/VXLAN." msgid "Shared address scope can't be unshared" msgstr "Для общей адресной области нельзя отменить совместное использование" msgid "" "Specifying 'tenant_id' other than authenticated tenant in request requires " "admin privileges" msgstr "" "Указание 'tenant_id', отличного от идентифицированного арендатора в запросе, " "требует прав доступа администратора" msgid "String prefix used to match IPset names." msgstr "Префикс строки для сопоставления имен IPset." #, python-format msgid "Sub-project %s not installed." msgstr "Подпроект %s не установлен." msgid "Subnet for router interface must have a gateway IP" msgstr "" "Маска подсети для интерфейса маршрутизатора должна иметь IP-адрес шлюза" msgid "" "Subnet has a prefix length that is incompatible with DHCP service enabled." msgstr "Длина префикса подсети несовместима с включенной службой DHCP." #, python-format msgid "Subnet pool %(subnetpool_id)s could not be found." msgstr "Не найден пул подсетей %(subnetpool_id)s." msgid "Subnet pool has existing allocations" msgstr "Пул подсетей имеет существующие выделения" msgid "Subnet used for the l3 HA admin network." msgstr "" "Подсеть, используемая для сети администрирования высокой готовности L3." msgid "" "Subnets hosted on the same network must be allocated from the same subnet " "pool." msgstr "" "Подсети в одной и той же сети должны выделяться из одного пула подсетей." msgid "Suffix to append to all namespace names." msgstr "Суффикс, добавляемый ко всем пространствам имен." msgid "" "System-wide flag to determine the type of router that tenants can create. " "Only admin can override." msgstr "" "Общесистемный флаг для определения типа маршрутизаторов, которые арендаторы " "могут создавать. Может быть переопределен только администратором." msgid "TCP Port to listen for metadata server requests." msgstr "Порт TCP для приема запросов сервера метаданных." msgid "TCP Port used by Neutron metadata namespace proxy." msgstr "Порт TCP, применяемый прокси пространства имен метаданных." msgid "TCP Port used by Nova metadata server." msgstr "Порт TCP, используемый сервером метаданных Nova." #, python-format msgid "TLD '%s' must not be all numeric" msgstr "TLD '%s' не должен быть полностью числовым" msgid "TOS for vxlan interface protocol packets." msgstr "TOS для пакетов протокола интерфейса vxlan." msgid "TTL for vxlan interface protocol packets." msgstr "TTL для пакетов протокола интерфейса vxlan." #, python-format msgid "Table %s can only be queried by UUID" msgstr "Запрос таблицы %s может выполняться только по UUID" #, python-format msgid "Tag %(tag)s could not be found." msgstr "Тег %(tag)s не найден." #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "" "Арендатору %(tenant_id)s не разрешено создание ресурса %(resource)s в этой " "сети" msgid "Tenant id for connecting to designate in admin context" msgstr "" "ИД арендатора для подключения к назначенному объекту в административном " "контексте" msgid "Tenant name for connecting to designate in admin context" msgstr "" "Имя арендатора для подключения к назначенному объекту в административном " "контексте" msgid "Tenant network creation is not enabled." msgstr "Создание сети арендатора не разрешено." msgid "Tenant-id was missing from quota request." msgstr "В запросе квоты отсутствует ИД арендатора." msgid "" "The 'gateway_external_network_id' option must be configured for this agent " "as Neutron has more than one external network." msgstr "" "Для этого агента необходимо настроить опцию 'gateway_external_network_id', " "так как Neutron имеет несколько внешних сетей." msgid "" "The DHCP agent will resync its state with Neutron to recover from any " "transient notification or RPC errors. The interval is number of seconds " "between attempts." msgstr "" "Агент DHCP будет заново синхронизировать свое состояние с Neutron для " "восстановления после временных ошибок RPC. Интервал между попытками задается " "в секундах." msgid "" "The DHCP server can assist with providing metadata support on isolated " "networks. Setting this value to True will cause the DHCP server to append " "specific host routes to the DHCP request. The metadata service will only be " "activated when the subnet does not contain any router port. The guest " "instance must be configured to request host routes via DHCP (Option 121). " "This option doesn't have any effect when force_metadata is set to True." msgstr "" "Сервер DHCP может помогать в получении метаданных в изолированных сетях. " "Если параметру присвоено значение True, то сервер DHCP будет добавлять " "маршруты к хостам в запрос DHCP. Служба метаданных активируется, только " "когда подсеть не содержит портов маршрутизатора. Гостевой экземпляр должен " "быть настроен для запросов маршрутов к хостам через DHCP (Option 121). Этот " "параметр ни на что не влияет, если force_metadata задан равным True." #, python-format msgid "" "The HA Network CIDR specified in the configuration file isn't valid; " "%(cidr)s." msgstr "" "В файле конфигурации указан недопустимый адрес CIDR сети высокой готовности. " "%(cidr)s." msgid "The UDP port to use for VXLAN tunnels." msgstr "Порт UDP, применяемый для туннелей VXLAN." #, python-format msgid "" "The address allocation request could not be satisfied because: %(reason)s" msgstr "Не удается выполнить запрос на выделение адреса, причина: %(reason)s" msgid "The advertisement interval in seconds" msgstr "Интервал объявления в секундах" #, python-format msgid "The allocation pool %(pool)s is not valid." msgstr "Пул выделения %(pool)s недопустим." #, python-format msgid "" "The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s." msgstr "" "Пул выделения %(pool)s выходит за пределы cidr подсети %(subnet_cidr)s." #, python-format msgid "" "The attribute '%(attr)s' is reference to other resource, can't used by sort " "'%(resource)s'" msgstr "" "Атрибут '%(attr)s' является ссылкой на другой ресурс и не может " "использоваться для сортировки '%(resource)s'" msgid "" "The base MAC address Neutron will use for VIFs. The first 3 octets will " "remain unchanged. If the 4th octet is not 00, it will also be used. The " "others will be randomly generated." msgstr "" "Базовый mac-адрес, используемый в Neutron для VIF. Первые 3 октета не будут " "изменены. Если 4-й октет не равен 00, он тоже будет использоваться. " "Остальные будут созданы случайным образом." msgid "" "The base mac address used for unique DVR instances by Neutron. The first 3 " "octets will remain unchanged. If the 4th octet is not 00, it will also be " "used. The others will be randomly generated. The 'dvr_base_mac' *must* be " "different from 'base_mac' to avoid mixing them up with MAC's allocated for " "tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00. " "The default is 3 octet" msgstr "" "Базовый mac-адрес, используемый в Neutron для уникальных экземпляров DVR. " "Первые 3 октета не будут изменены. Если 4-й октет не равен 00, он тоже будет " "использоваться. Остальные будут созданы случайным образом. Параметр " "'dvr_base_mac' *должен* отличаться от 'base_mac' для предотвращения " "смешивания их с MAC-адресами, выделенными для портов арендатора. Пример 4 " "октетов: dvr_base_mac = fa:16:3f:4f:00:00. ПО умолчанию используется 3 октета" msgid "" "The connection string for the native OVSDB backend. Requires the native " "ovsdb_interface to be enabled." msgstr "" "Строка соединения для встроенной OVSDB. Должен быть включен встроенный " "ovsdb_interface." msgid "The core plugin Neutron will use" msgstr "Будет использоваться базовый модуль Neutron" #, python-format msgid "" "The dns_name passed is a FQDN. Its higher level labels must be equal to the " "dns_domain option in neutron.conf, that has been set to '%(dns_domain)s'. It " "must also include one or more valid DNS labels to the left of " "'%(dns_domain)s'" msgstr "" "Переданное dns_name является FQDN. Его метки верхнего уровня должны быть " "равны опции dns_domain в файле neutron.conf со значением '%(dns_domain)s'. " "Он должен также включать одну или несколько допустимых меток DNS слева от " "'%(dns_domain)s'" #, python-format msgid "" "The dns_name passed is a PQDN and its size is '%(dns_name_len)s'. The " "dns_domain option in neutron.conf is set to %(dns_domain)s, with a length of " "'%(higher_labels_len)s'. When the two are concatenated to form a FQDN (with " "a '.' at the end), the resulting length exceeds the maximum size of " "'%(fqdn_max_len)s'" msgstr "" "Переданное dns_name является PQDN с размером '%(dns_name_len)s'. Опции " "dns_domain в файле neutron.conf присвоено значение %(dns_domain)s с длиной " "'%(higher_labels_len)s'. При объединении двух имен для формирования FQDN (с " "символом '.' на конце) длина строки результата превысит максимально " "допустимую '%(fqdn_max_len)s'" msgid "The driver used to manage the DHCP server." msgstr "драйвер, используемый для управления сервером DHCP." msgid "The driver used to manage the virtual interface." msgstr "Драйвер, используемый для управления виртуальным интерфейсом." msgid "" "The email address to be used when creating PTR zones. If not specified, the " "email address will be admin@" msgstr "" "Адрес электронной почты для создания записей PTR. Если не указан, будет " "использоваться admin@" #, python-format msgid "" "The following device_id %(device_id)s is not owned by your tenant or matches " "another tenants router." msgstr "" "Следующий device_id %(device_id)s не принадлежит вашему арендатору или " "соответствует маршрутизатору другого арендатора." msgid "The host IP to bind to" msgstr "IP-адрес хоста для подключения к" msgid "The interface for interacting with the OVSDB" msgstr "Интерфейс для взаимодействия с OVSDB" msgid "" "The maximum number of items returned in a single response, value was " "'infinite' or negative integer means no limit" msgstr "" "Максимальное количество элементов, возвращаемых в одном ответе; значение " "было 'infinite' или отрицательным целым, что означает бесконечное число" #, python-format msgid "" "The network %(network_id)s has been already hosted by the DHCP Agent " "%(agent_id)s." msgstr "Сеть %(network_id)s уже была размещена агентом DHCP %(agent_id)s." #, python-format msgid "" "The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s." msgstr "Сеть %(network_id)s не размещена агентом DHCP %(agent_id)s." msgid "" "The network type to use when creating the HA network for an HA router. By " "default or if empty, the first 'tenant_network_types' is used. This is " "helpful when the VRRP traffic should use a specific network which is not the " "default one." msgstr "" "Тип сети при создании сети HA для маршрутизатора HA. По умолчанию (или при " "пустом значении) используется первое значение 'tenant_network_types'. Такой " "подход помогает, если поток данных VRRP должен использовать сеть, не " "являющуюся стандартной." #, python-format msgid "The number of allowed address pair exceeds the maximum %(quota)s." msgstr "Число разрешенных пар адресов превышает максимальное %(quota)s." msgid "" "The number of seconds the agent will wait between polling for local device " "changes." msgstr "" "Интервал опроса агентом локальных устройств на предмет наличия изменений." msgid "" "The number of seconds to wait before respawning the ovsdb monitor after " "losing communication with it." msgstr "" "Время ожидания, в секундах, повторного порождения монитора ovsdb после " "потери соединения с ним." msgid "The number of sort_keys and sort_dirs must be same" msgstr "Количество sort_keys и sort_dirs должно быть одинаковым" msgid "" "The path for API extensions. Note that this can be a colon-separated list of " "paths. For example: api_extensions_path = extensions:/path/to/more/exts:/" "even/more/exts. The __path__ of neutron.extensions is appended to this, so " "if your extensions are in there you don't need to specify them here." msgstr "" "Путь для расширений API. Пути разделяются точкой с запятой. Пример: " "api_extensions_path = extensions:/path/to/more/exts:/even/more/exts. " "__path__ для расширений neutron добавляется автоматически, и если расширения " "содержатся там, их не требуется указывать здесь." msgid "The physical network name with which the HA network can be created." msgstr "Имя физической сети для создания сети HA." #, python-format msgid "The port '%s' was deleted" msgstr "Порт '%s' был удален" msgid "The port to bind to" msgstr "Порт для подключения к" #, python-format msgid "The requested content type %s is invalid." msgstr "Запрашиваемый тип содержимого %s является недопустимым." msgid "The resource could not be found." msgstr "Ресурс не найден." #, python-format msgid "" "The router %(router_id)s has been already hosted by the L3 Agent " "%(agent_id)s." msgstr "Маршрутизатор %(router_id)s уже был размещен агентом L3 %(agent_id)s." msgid "" "The server has either erred or is incapable of performing the requested " "operation." msgstr "" "На сервере возникла ошибка, или он не поддерживает выполнение запрошенной " "операции." msgid "The service plugins Neutron will use" msgstr "Будут использоваться модули служб Neutron" #, python-format msgid "The subnet request could not be satisfied because: %(reason)s" msgstr "Запрос подсети не удается выполнить, причина: %(reason)s" #, python-format msgid "The subproject to execute the command against. Can be one of: '%s'." msgstr "Подпроект для выполнения команды. Допустимые значения: '%s'." msgid "The type of authentication to use" msgstr "Применяемый тип идентификации" #, python-format msgid "The value '%(value)s' for %(element)s is not valid." msgstr "Значение %(value)s для %(element)s недопустимо." msgid "" "The working mode for the agent. Allowed modes are: 'legacy' - this preserves " "the existing behavior where the L3 agent is deployed on a centralized " "networking node to provide L3 services like DNAT, and SNAT. Use this mode if " "you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality " "and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - " "this enables centralized SNAT support in conjunction with DVR. This mode " "must be used for an L3 agent running on a centralized node (or in single-" "host deployments, e.g. devstack)" msgstr "" "Режим работы агента. Допустимые режимы: 'legacy' - сохраняет поведение, при " "котором агент L3 развернут на централизованном сетевом узле для " "предоставления служб L3, таких как DNAT и SNAT. Этот режим используется, " "если внедрять DVR не целесообразно. 'dvr' - этот режим включает " "функциональность DVR и должен использоваться для агентов L3, работающих на " "вычислительном хосте. 'dvr_snat' - этот режим включает поддержку " "централизованного SNAT в дополнение к DVR. Данный режим должен " "использоваться для агентов L3, работающих на централизованном узле (или в " "однохостовых развертываниях, таких как devstack)" msgid "" "There are routers attached to this network that depend on this policy for " "access." msgstr "" "К сети подключены маршрутизаторы, доступ к которым зависит от этой стратегии." msgid "" "This will choose the web framework in which to run the Neutron API server. " "'pecan' is a new experiemental rewrite of the API server." msgstr "" "Укажите веб-среду, а которой работает сервер API Neutron. 'pecan' - это " "новый экспериментальный вариант сервера API." msgid "Timeout" msgstr "Таймаут" msgid "" "Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs " "commands will fail with ALARMCLOCK error." msgstr "" "Тайм-аут команд ovs-vsctl в секундах. По истечении этого времени команды ovs " "завершаются с ошибкой ALARMCLOCK." msgid "" "Timeout in seconds to wait for a single OpenFlow request. Used only for " "'native' driver." msgstr "" "Тайм-аут (в секундах) ожидания одиночного запроса OpenFlow. Используется " "только для 'встроенного' драйвера." msgid "" "Timeout in seconds to wait for the local switch connecting the controller. " "Used only for 'native' driver." msgstr "" "Тайм-аут (в секундах) ожидания соединения локального переключателя с " "контроллером. Используется только для 'встроенного' драйвера." msgid "" "Too long prefix provided. New name would exceed given length for an " "interface name." msgstr "" "Слишком длинный префикс. Новое имя превысило бы заданную длину для имени " "интерфейса." msgid "Too many availability_zone_hints specified" msgstr "Слишком много availability_zone_hints" msgid "" "True to delete all ports on all the OpenvSwitch bridges. False to delete " "ports created by Neutron on integration and external network bridges." msgstr "" "True - удалить все порты для всех мостов OpenvSwitch. False - удалить порты, " "созданные Neutron для мостов интеграции и внешних сетей." msgid "Tunnel IP value needed by the ML2 plugin" msgstr "Для модуля ML2 требуется значение IP-адреса туннеля" msgid "Tunnel bridge to use." msgstr "Используемый мост туннеля." msgid "" "Type of the nova endpoint to use. This endpoint will be looked up in the " "keystone catalog and should be one of public, internal or admin." msgstr "" "Тип используемой конечной точки nova. Поиск конечной точки выполняется в " "каталоге keystone, конечная точка может быть общедоступной, внутренней или " "административной." msgid "URL for connecting to designate" msgstr "URL для подключения к назначенному объекту" msgid "URL to database" msgstr "URL базы данных" #, python-format msgid "Unable to access %s" msgstr "Ошибка доступа к %s" #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, maximum allowed " "prefix is %(max_prefixlen)s." msgstr "" "Невозможно выделить подсеть с длиной префикса %(prefixlen)s, максимальный " "разрешенный префикс - %(max_prefixlen)s." #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, minimum allowed " "prefix is %(min_prefixlen)s." msgstr "" "Невозможно выделить подсеть с длиной префикса %(prefixlen)s, минимальный " "разрешенный префикс - %(min_prefixlen)s." #, python-format msgid "Unable to calculate %(address_type)s address because of:%(reason)s" msgstr "Не удалось вычислить адрес %(address_type)s, причина:%(reason)s" #, python-format msgid "" "Unable to complete operation for %(router_id)s. The number of routes exceeds " "the maximum %(quota)s." msgstr "" "Не удалось выполнить операцию для %(router_id)s. Число маршрутизаторов " "превышает допустимый максимум, равный %(quota)s." #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of DNS " "nameservers exceeds the limit %(quota)s." msgstr "" "Невозможно выполнить операцию для %(subnet_id)s. Число серверов имен DNS " "превышает допустимый максимум %(quota)s." #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of host routes " "exceeds the limit %(quota)s." msgstr "" "Невозможно выполнить операцию для %(subnet_id)s. Число маршрутов хоста " "превышает допустимый максимум %(quota)s." #, python-format msgid "" "Unable to complete operation on address scope %(address_scope_id)s. There " "are one or more subnet pools in use on the address scope" msgstr "" "Не удалось выполнить операцию в адресной области %(address_scope_id)s. В " "адресной области существует один или несколько используемых пулов подсетей" #, python-format msgid "Unable to convert value in %s" msgstr "Невозможно преобразовать значение в %s" msgid "Unable to create the Agent Gateway Port" msgstr "Не удалось создать порт шлюза агента" msgid "Unable to create the SNAT Interface Port" msgstr "Не удалось создать порт интерфейса SNAT" #, python-format msgid "" "Unable to create the flat network. Physical network %(physical_network)s is " "in use." msgstr "" "Невозможно создать одноуровневую сеть. Физическая сеть %(physical_network)s " "занята." msgid "" "Unable to create the network. No available network found in maximum allowed " "attempts." msgstr "" "Не удалось создать сеть. Не найдена доступная сеть за максимальное число " "попыток." #, python-format msgid "Unable to delete subnet pool: %(reason)s." msgstr "Невозможно удалить пул подсетей: %(reason)s." #, python-format msgid "Unable to determine mac address for %s" msgstr "Невозможно определить mac-адрес для %s" #, python-format msgid "Unable to find '%s' in request body" msgstr "Отсутствует '%s' в теле запроса" #, python-format msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s" msgstr "Не удалось найти IP-адрес %(ip_address)s в подсети %(subnet_id)s" #, python-format msgid "Unable to find resource name in %s" msgstr "В %s не найдено имя ресурса" msgid "Unable to generate IP address by EUI64 for IPv4 prefix" msgstr "Невозможно сгенерировать IP-адрес с помощью EUI64 для префикса IPv4" #, python-format msgid "Unable to generate unique DVR mac for host %(host)s." msgstr "Не удалось создать уникальный MAC-адрес DVR для хоста %(host)s." #, python-format msgid "Unable to generate unique mac on network %(net_id)s." msgstr "Невозможно сгенерировать уникальный mac в сети %(net_id)s." #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "" "Невозможно идентифицировать целевое поле из %s. Совпадение должно быть в " "форме %%()s" msgid "Unable to provide external connectivity" msgstr "Не удалось предоставить связь со внешней сетью" msgid "Unable to provide tenant private network" msgstr "Не удалось предоставить частную сеть арендатора" #, python-format msgid "" "Unable to reconfigure sharing settings for network %(network)s. Multiple " "tenants are using it." msgstr "" "Не удается изменить конфигурацию общих параметров для сети %(network)s. Она " "используется несколькими арендаторами." #, python-format msgid "Unable to update address scope %(address_scope_id)s : %(reason)s" msgstr "Не удалось изменить адресную область %(address_scope_id)s : %(reason)s" #, python-format msgid "Unable to update the following object fields: %(fields)s" msgstr "Не удалось обновить следующие поля объекта: %(fields)s" #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " "found" msgstr "" "Невозможно проверить совпадение %(match)s, так как родительский ресурс " "%(res)s не найдено" #, python-format msgid "Unexpected label for script %(script_name)s: %(labels)s" msgstr "Непредвиденная метка для сценария %(script_name)s: %(labels)s" #, python-format msgid "Unexpected number of alembic branch points: %(branchpoints)s" msgstr "Непредвиденное число переходных точек ветвления: %(branchpoints)s" #, python-format msgid "Unexpected response code: %s" msgstr "Непредвиденный код ответа: %s" #, python-format msgid "Unexpected response: %s" msgstr "Непредвиденный ответ: %s" #, python-format msgid "Unit name '%(unit)s' is not valid." msgstr "Недопустимое имя модуля '%(unit)s'." msgid "Unknown API version specified" msgstr "Указана неизвестная версия API" #, python-format msgid "Unknown address type %(address_type)s" msgstr "Неизвестный тип адреса %(address_type)s" #, python-format msgid "Unknown attribute '%s'." msgstr "Неизвестный атрибут '%s'." #, python-format msgid "Unknown chain: %r" msgstr "Неизвестная цепочка: %r" #, python-format msgid "Unknown network type %(network_type)s." msgstr "Неизвестный тип сети %(network_type)s." #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Неизвестные ресурсы квоты: %(unknown)s." msgid "Unmapped error" msgstr "Ошибка без преобразования" msgid "Unrecognized action" msgstr "Неизвестное действие" #, python-format msgid "Unrecognized attribute(s) '%s'" msgstr "Нераспознаваемые атрибуты '%s'" msgid "Unrecognized field" msgstr "Неизвестное поле" msgid "Unspecified minimum subnet pool prefix." msgstr "Не указан минимальный префикс пула подсетей." msgid "Unsupported Content-Type" msgstr "Не поддерживаемый тип содержимого" #, python-format msgid "Unsupported network type %(net_type)s." msgstr "Неподдерживаемый тип сети %(net_type)s." #, python-format msgid "Unsupported port state: %(port_state)s." msgstr "Неподдерживаемое состояние порта: %(port_state)s." msgid "Unsupported request type" msgstr "Неподдерживаемый тип запроса" msgid "Updating default security group not allowed." msgstr "Обновление группы защиты по умолчанию не разрешено." msgid "" "Use ML2 l2population mechanism driver to learn remote MAC and IPs and " "improve tunnel scalability." msgstr "" "Использовать драйвер механизма ML2 l2population для определения удаленных " "MAC- и IP-адресов и улучшения масштабируемости туннеля." msgid "Use broadcast in DHCP replies." msgstr "Использовать широковещательные пакеты в ответах DHCP." msgid "Use either --delta or relative revision, not both" msgstr "Используйте или --delta, или относительную ревизию, но не оба" msgid "" "Use ipset to speed-up the iptables based security groups. Enabling ipset " "support requires that ipset is installed on L2 agent node." msgstr "" "Использовать ipset для ускорения обработки групп защиты на основе iptables. " "Поддержка ipset требует, чтобы ipset был установлен в узле агента L2." msgid "" "Use the root helper when listing the namespaces on a system. This may not be " "required depending on the security configuration. If the root helper is not " "required, set this to False for a performance improvement." msgstr "" "Использовать вспомогательное приложение для получения прав root для чтения " "пространств имен в системе. Это может не потребоваться при соответствующим " "образом настроенной конфигурации защиты. Если вспомогательное приложение для " "получения прав root не используется, присвойте параметру значение false для " "повышения производительности." msgid "" "Use veths instead of patch ports to interconnect the integration bridge to " "physical networks. Support kernel without Open vSwitch patch port support so " "long as it is set to True." msgstr "" "Использовать интерфейсы veth вместо коммутационных портов для связи моста " "интеграции с физическими мостами. Если параметр равен True, то может " "использоваться ядро без поддержки коммутационных портов Open vSwitch." msgid "User (uid or name) running metadata proxy after its initialization" msgstr "" "Пользователь (uid или имя) использует proxy метаданных после инициализации" msgid "" "User (uid or name) running metadata proxy after its initialization (if " "empty: agent effective user)." msgstr "" "Пользователь (uid или имя) использует proxy метаданных после инициализации " "(если пустое, используется пользователь агента). " msgid "User (uid or name) running this process after its initialization" msgstr "Пользователь (uid или имя) запускает этот процесс после инициализации" msgid "Username for connecting to designate in admin context" msgstr "" "Имя пользователя для подключения к назначенному объекту в административном " "контексте" msgid "" "Uses veth for an OVS interface or not. Support kernels with limited " "namespace support (e.g. RHEL 6.5) so long as ovs_use_veth is set to True." msgstr "" "Указывает, следует ли использовать интерфейс veth для интерфейса OVS. " "Обеспечивает работу с ядрами с ограниченной поддержкой пространств имен, " "например, в RHEL 6.5, если ovs_use_veth задан равным True." msgid "VRRP authentication password" msgstr "Пароль идентификации VRRP" msgid "VRRP authentication type" msgstr "Тип идентификации VRRP" msgid "VXLAN network unsupported." msgstr "Сеть VXLAN не поддерживается." #, python-format msgid "" "Validation of dictionary's keys failed. Expected keys: %(expected_keys)s " "Provided keys: %(provided_keys)s" msgstr "" "Проверка ключей словаря не выполнена. Ожидаемые ключи: %(expected_keys)s " "Заданные ключи: %(provided_keys)s" #, python-format msgid "Validator '%s' does not exist." msgstr "Валидатор '%s' не существует." #, python-format msgid "Value %(value)s in mapping: '%(mapping)s' not unique" msgstr "Значение %(value)s в отображении '%(mapping)s' не уникально" #, python-format msgid "" "Value of %(parameter)s has to be multiple of %(number)s, with maximum value " "of %(maximum)s and minimum value of %(minimum)s" msgstr "" "Значение %(parameter)s должно быть кратным %(number)s, при этом максимальное " "значение - %(maximum)s и минимальное - %(minimum)s" msgid "" "Value of host kernel tick rate (hz) for calculating minimum burst value in " "bandwidth limit rules for a port with QoS. See kernel configuration file for " "HZ value and tc-tbf manual for more information." msgstr "" "Частота отсчетов времени в ядре (в Гц) для вычисления минимального значения " "пакета в правилах ограничения пропускной способности канала для порта с QoS. " "За дополнительной информацией обратитесь к описанию параметра ядра HZ и " "руководству по tc-tbf." msgid "" "Value of latency (ms) for calculating size of queue for a port with QoS. See " "tc-tbf manual for more information." msgstr "" "Задержка в миллисекундах для вычисления размера очереди для порта с QoS. За " "дополнительной информацией обратитесь к руководству по tc-tbf." msgid "" "Watch file log. Log watch should be disabled when metadata_proxy_user/group " "has no read/write permissions on metadata proxy log file." msgstr "" "Отслеживать протокол. Отслеживание протокола должно быть выключено, когда " "metadata_proxy_user/group не имеет прав доступа на чтение/запись файла " "протокола прокси метаданных. " msgid "" "When external_network_bridge is set, each L3 agent can be associated with no " "more than one external network. This value should be set to the UUID of that " "external network. To allow L3 agent support multiple external networks, both " "the external_network_bridge and gateway_external_network_id must be left " "empty." msgstr "" "Если задана опция external_network_bridge, то каждый агент L3 может быть " "связан не более чем с одной внешней сетью. Присвойте опции значение UUID " "этой внешней сети. Для того чтобы агент L3 поддерживал несколько внешних " "сетей, оба параметра, external_network_bridge и gateway_external_network_id, " "должны быть пустыми." msgid "" "When proxying metadata requests, Neutron signs the Instance-ID header with a " "shared secret to prevent spoofing. You may select any string for a secret, " "but it must match here and in the configuration used by the Nova Metadata " "Server. NOTE: Nova uses the same config key, but in [neutron] section." msgstr "" "При проксировании запросов метаданных Neutron подписывает заголовок Instance-" "ID общим секретным ключом для предотвращения подмены. Ключом может быть " "любая строка, но она должна совпадать с указанной в конфигурации для сервера " "метаданных Nova. Примечание: Nova использует тот же ключ конфигурации, но в " "разделе [neutron]." msgid "" "Where to store Neutron state files. This directory must be writable by the " "agent." msgstr "" "Расположение хранения файлов состояния Neutron. Этот каталог должен быть " "доступен для записи агентом." msgid "" "With IPv6, the network used for the external gateway does not need to have " "an associated subnet, since the automatically assigned link-local address " "(LLA) can be used. However, an IPv6 gateway address is needed for use as the " "next-hop for the default route. If no IPv6 gateway address is configured " "here, (and only then) the neutron router will be configured to get its " "default route from router advertisements (RAs) from the upstream router; in " "which case the upstream router must also be configured to send these RAs. " "The ipv6_gateway, when configured, should be the LLA of the interface on the " "upstream router. If a next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated to the network and not " "through this parameter. " msgstr "" "При использовании IPv6 применяемой для внешнего шлюза сети не обязательно " "иметь связанную подсеть, так как может быть использован автоматически " "назначаемый адрес link-local (LLA). Однако, адрес шлюза IPv6 необходим в " "качестве следующего узла для маршрута по умолчанию. Если адрес шлюза IPv6 не " "указан здесь, (и только в этом случае) будет настроен маршрутизатор Neutron " "для получения маршрута по умолчанию из объявлений маршрутизатора (RA) от " "маршрутизатора выше по течению. В этом случае маршрутизатор выше по течению " "должен быть также настроен для отправки этих RA. Когда указано значение " "ipv6_gateway, оно должно указывать на LLA интерфейса маршрутизатора выше по " "течению. Если следующий узел, использующийглобальный уникальный адрес (GUA), " "является предпочитаем, это необходимо обеспечить посредством подсети, " "выделенной для сети, а не с помощью этого параметра. " msgid "You must implement __call__" msgstr "Отсутствует реализация __call__" msgid "" "You must provide a config file for bridge - either --config-file or " "env[NEUTRON_TEST_CONFIG_FILE]" msgstr "" "Необходимо задать файл конфигурации для моста, или --config-file, или " "env[NEUTRON_TEST_CONFIG_FILE]" msgid "You must provide a revision or relative delta" msgstr "Необходимо указать ревизию или относительную дельта" msgid "a subnetpool must be specified in the absence of a cidr" msgstr "пул подсетей должен быть указан в отсутствие cidr" msgid "add_ha_port cannot be called inside of a transaction." msgstr "add_ha_port нельзя вызывать внутри транзакции." msgid "allocation_pools allowed only for specific subnet requests." msgstr "allocation_pools разрешено только для определенных запросов подсетей. " msgid "allocation_pools are not in the subnet" msgstr "allocation_pools не принадлежит подсети" msgid "allocation_pools use the wrong ip version" msgstr "Недопустимая версия IP для allocation_pools" msgid "already a synthetic attribute" msgstr "атрибут уже является синтетическим" msgid "binding:profile value too large" msgstr "Слишком большое значение binding:profile" #, python-format msgid "cannot perform %(event)s due to %(reason)s" msgstr "не удается выполнить %(event)s, причина: %(reason)s" msgid "cidr and prefixlen must not be supplied together" msgstr "cidr и prefixlen не должны быть указаны вместе" #, python-format msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid." msgstr "" "Значение dhcp_agents_per_network должно быть >= 1. Значение '%s' недопустимо." msgid "dns_domain cannot be specified without a dns_name" msgstr "dns_domain должен указываться вместе с dns_name" msgid "dns_name cannot be specified without a dns_domain" msgstr "dns_name должен указываться вместе с dns_domain" msgid "fixed_ip_address cannot be specified without a port_id" msgstr "fixed_ip_address должен указываться вместе с port_id" #, python-format msgid "gateway_ip %s is not in the subnet" msgstr "IP шлюза %s не принадлежит подсети" #, python-format msgid "has device owner %s" msgstr "имеет владельца устройства %s" msgid "in use" msgstr "Используется" #, python-format msgid "ip command failed on device %(dev_name)s: %(reason)s" msgstr "Не удалось выполнить команду ip на устройстве %(dev_name)s: %(reason)s" #, python-format msgid "ip command failed: %(reason)s" msgstr "Команда ip не выполнена: %(reason)s" #, python-format msgid "ip link capability %(capability)s is not supported" msgstr "Функция ip link %(capability)s не поддерживается" #, python-format msgid "ip link command is not supported: %(reason)s" msgstr "Команда ip link не поддерживается: %(reason)s" msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "ip_version должно быть указано при отсутствии cidr and subnetpool_id" msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "ipv6_address_mode недопустим, когда ip_version - 4" msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "ipv6_ra_mode недопустим, когда ip_version - 4" msgid "" "ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set to " "False." msgstr "" "Невозможно задать ipv6_ra_mode или ipv6_address_mode, если для enable_dhcp " "задано значение False." #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " "'%(addr_mode)s' is not valid. If both attributes are set, they must be the " "same value" msgstr "" "Для ipv6_ra_mode задано значение '%(ra_mode)s', а значение " "ipv6_address_mode, заданное как '%(addr_mode)s', является недопустимым. Если " "указаны оба атрибута, их значения должны совпадать" msgid "mac address update" msgstr "Обновление mac-адреса" #, python-format msgid "" "max_l3_agents_per_router %(max_agents)s config parameter is not valid. It " "has to be greater than or equal to min_l3_agents_per_router %(min_agents)s." msgstr "" "Недопустимый параметр конфигурации max_l3_agents_per_router %(max_agents)s. " "Он должен быть больше либо равен min_l3_agents_per_router %(min_agents)s." msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "Необходимо задать ровно 2 аргумента - cidr и MAC" msgid "network_type required" msgstr "Требуется network_type" #, python-format msgid "network_type value '%s' not supported" msgstr "Для network_type не поддерживается значение '%s'" msgid "new subnet" msgstr "новая подсеть" #, python-format msgid "physical_network '%s' unknown for VLAN provider network" msgstr "Неизвестная физическая сеть '%s' для сети VLAN провайдера" #, python-format msgid "physical_network '%s' unknown for flat provider network" msgstr "Неизвестная физическая сеть '%s' для одноуровневой сети провайдера" msgid "physical_network required for flat provider network" msgstr "Для одноуровневой сети провайдера требуется physical_network" #, python-format msgid "provider:physical_network specified for %s network" msgstr "Для сети %s указан provider:physical_network" #, python-format msgid "rbac_db_model not found in %s" msgstr "rbac_db_model не найден в %s" msgid "record" msgstr "запись" msgid "respawn_interval must be >= 0 if provided." msgstr "Значение respawn_interval, если оно указано, должно быть >= 0." #, python-format msgid "segmentation_id out of range (%(min)s through %(max)s)" msgstr "segmentation_id вне диапазона (%(min)s - %(max)s)" msgid "segmentation_id requires physical_network for VLAN provider network" msgstr "" "Для segmentation_id требуется physical_network для сети VLAN провайдера" msgid "shared attribute switching to synthetic" msgstr "общий атрибут изменен на синтетический" #, python-format msgid "" "subnetpool %(subnetpool_id)s cannot be updated when associated with shared " "address scope %(address_scope_id)s" msgstr "" "Пул подсетей %(subnetpool_id)s нельзя изменять, если он связан с " "общедоступной адресной областью %(address_scope_id)s" msgid "subnetpool_id and use_default_subnetpool cannot both be specified" msgstr "" "Нельзя одновременно задавать значения subnetpool_id и use_default_subnetpool" msgid "the nexthop is not connected with router" msgstr "следующий узел не соединен с маршрутизатором" msgid "the nexthop is used by router" msgstr "следующий узел используется маршрутизатором" #, python-format msgid "unable to load %s" msgstr "не удалось загрузить %s" msgid "" "uuid provided from the command line so external_process can track us via /" "proc/cmdline interface." msgstr "" "UUID передан из командной строки. Это позволяет внешнему процессу " "отслеживать через интерфейс /proc/cmdline." neutron-8.4.0/neutron/locale/de/0000775000567000056710000000000013044373210017674 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/de/LC_MESSAGES/0000775000567000056710000000000013044373210021461 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/de/LC_MESSAGES/neutron.po0000664000567000056710000051730313044372760023535 0ustar jenkinsjenkins00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # Ettore Atalan , 2014 # Frank Kloeker , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron 8.2.1.dev52\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-09-01 18:10+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-24 05:41+0000\n" "Last-Translator: Monika Wolf \n" "Language: de\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: German\n" #, python-format msgid "" "\n" "Command: %(cmd)s\n" "Exit code: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" msgstr "" "\n" "Befehl: %(cmd)s\n" "Beendigungscode: %(code)s\n" "Standardeingabe: %(stdin)s\n" "Standardausgabe: %(stdout)s\n" "Standardfehler: %(stderr)s" #, python-format msgid "" "%(branch)s HEAD file does not match migration timeline head, expected: " "%(head)s" msgstr "" "%(branch)s-HEAD-Datei stimmt nicht mit Migrationszeitplan für HEAD überein. " "Erwartet: %(head)s" #, python-format msgid "%(driver)s: Internal driver error." msgstr "%(driver)s: Interner Treiberfehler." #, python-format msgid "%(id)s is not a valid %(type)s identifier" msgstr "%(id)s ist keine gültige ID für %(type)s" #, python-format msgid "" "%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' " "and '%(desc)s'" msgstr "" "%(invalid_dirs)s ist ein ungültiger Wert für 'sort_dirs'; gültige Werte sind " "'%(asc)s' und '%(desc)s'" #, python-format msgid "%(key)s prohibited for %(tunnel)s provider network" msgstr "%(key)s untersagt für %(tunnel)s-Anbieter-Netz" #, python-format msgid "" "%(method)s called with network settings %(current)s (original settings " "%(original)s) and network segments %(segments)s" msgstr "" "%(method)s aufgerufen mit den Netzeinstellungen %(current)s (ursprüngliche " "Einstellungen %(original)s) und Netzsegmente %(segments)s" #, python-format msgid "" "%(method)s called with port settings %(current)s (original settings " "%(original)s) host %(host)s (original host %(original_host)s) vif type " "%(vif_type)s (original vif type %(original_vif_type)s) vif details " "%(vif_details)s (original vif details %(original_vif_details)s) binding " "levels %(levels)s (original binding levels %(original_levels)s) on network " "%(network)s with segments to bind %(segments_to_bind)s" msgstr "" "%(method)s aufgerufen mit den Porteinstellungen %(current)s (ursprüngliche " "Einstellungen %(original)s) Host %(host)s (ursprünglicher Host " "%(original_host)s) VIF-Typ %(vif_type)s (ursprünglicher VIF-Typ " "%(original_vif_type)s) VIF-Details %(vif_details)s (ursprüngliche VIF-" "Details %(original_vif_details)s) Bindungsebenen %(levels)s (ursprüngliche " "Bindungsebenen %(original_levels)s) im Netz %(network)s mit zu bindenden " "Segmenten %(segments_to_bind)s." #, python-format msgid "" "%(method)s called with subnet settings %(current)s (original settings " "%(original)s)" msgstr "" "%(method)s aufgerufen mit den Teilnetzeinstellungen %(current)s " "(ursprüngliche Einstellungen '%(original)s')" #, python-format msgid "%(method)s failed." msgstr "%(method)s fehlgeschlagen." #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "" "%(name)s '%(addr)s' stimmt nicht mit 'ip_version' '%(ip_version)s' überein" #, python-format msgid "%(param)s must be in %(range)s range." msgstr "%(param)s muss im Bereich %(range)s liegen." #, python-format msgid "%s cannot be called while in offline mode" msgstr "%s kann nicht im Offlinemodus aufgerufen werden" #, python-format msgid "%s is invalid attribute for sort_key" msgstr "%s ist ein ungültiges Attribut für 'sort_key'" #, python-format msgid "%s is invalid attribute for sort_keys" msgstr "%s ist ein ungültiges Attribut für 'sort_keys'" #, python-format msgid "%s is not a valid VLAN tag" msgstr "%s ist kein gültiger VLAN-Tag" #, python-format msgid "%s must be specified" msgstr "%s muss angegeben werden." #, python-format msgid "%s must implement get_port_from_device or get_ports_from_devices." msgstr "" "%s muss get_port_from_device oder get_ports_from_devices implementieren." #, python-format msgid "%s prohibited for VLAN provider network" msgstr "%s untersagt für VLAN-Provider-Netz" #, python-format msgid "%s prohibited for flat provider network" msgstr "%s untersagt für einfaches Anbieternetzwerk" #, python-format msgid "%s prohibited for local provider network" msgstr "%s untersagt für lokales Anbieternetzwerk" #, python-format msgid "" "'%(data)s' contains '%(length)s' characters. Adding a domain name will cause " "it to exceed the maximum length of a FQDN of '%(max_len)s'" msgstr "" "'%(data)s' enthält '%(length)s' Zeichen. Wenn Sie einen Domänennamen " "hinzufügen, wird die maximal zulässige Länge für einen FQDN von " "'%(max_len)s' überschritten." #, python-format msgid "" "'%(data)s' contains '%(length)s' characters. Adding a sub-domain will cause " "it to exceed the maximum length of a FQDN of '%(max_len)s'" msgstr "" "'%(data)s' enthält '%(length)s' Zeichen. Wenn Sie eine Subdomäne hinzufügen, " "wird die maximal zulässige Länge für einen FQDN von '%(max_len)s' " "überschritten. " #, python-format msgid "'%(data)s' exceeds maximum length of %(max_len)s" msgstr "'%(data)s' überschreitet die Höchstlänge von %(max_len)s" #, python-format msgid "'%(data)s' is not an accepted IP address, '%(ip)s' is recommended" msgstr "'%(data)s' ist keine akzeptierte IP-Adresse. '%(ip)s' wird empfohlen." #, python-format msgid "'%(data)s' is not in %(valid_values)s" msgstr "'%(data)s' befindet sich nicht in %(valid_values)s" #, python-format msgid "'%(data)s' is too large - must be no larger than '%(limit)d'" msgstr "'%(data)s' ist zu groß - darf höchstens '%(limit)d' sein" #, python-format msgid "'%(data)s' is too small - must be at least '%(limit)d'" msgstr "'%(data)s' ist zu klein - muss mindestens '%(limit)d' sein" #, python-format msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended" msgstr "" "'%(data)s' ist keine erkannte IP-Teilnetz-CIDR, '%(cidr)s' wird empfohlen" #, python-format msgid "'%(data)s' not a valid PQDN or FQDN. Reason: %(reason)s" msgstr "'%(data)s' ist kein gültiger PQDN oder FQDN. Grund: %(reason)s" #, python-format msgid "'%(host)s' is not a valid nameserver. %(msg)s" msgstr "'%(host)s' ist kein gültiger Namensserver. %(msg)s" #, python-format msgid "'%s' Blank strings are not permitted" msgstr "'%s' leere Zeichenfolgen sind nicht zulässig" #, python-format msgid "'%s' cannot be converted to boolean" msgstr "'%s' kann nicht in boolesche Zahl umgewandelt werden" #, python-format msgid "'%s' cannot be converted to lowercase string" msgstr "" "'%s' kann nicht in eine Zeichenfolge in Kleinschreibung konvertiert werden" #, python-format msgid "'%s' contains whitespace" msgstr "'%s' enthält Leerzeichen" #, python-format msgid "'%s' exceeds the 255 character FQDN limit" msgstr "'%s' überschreitet den FQDN-Grenzwert von 255 Zeichen" #, python-format msgid "'%s' is a FQDN. It should be a relative domain name" msgstr "'%s' ist ein FQDN. Es muss ein relativer Domänenname sein." #, python-format msgid "'%s' is not a FQDN" msgstr "'%s' ist kein FQDN." #, python-format msgid "'%s' is not a dictionary" msgstr "%s ist kein Verzeichnis" #, python-format msgid "'%s' is not a list" msgstr "'%s' ist keine Liste" #, python-format msgid "'%s' is not a valid IP address" msgstr "'%s' ist keine gültige IP-Adresse" #, python-format msgid "'%s' is not a valid IP subnet" msgstr "'%s' ist kein gültiges IP-Teilnetz" #, python-format msgid "'%s' is not a valid MAC address" msgstr "'%s' ist keine gültige MAC-Adresse" #, python-format msgid "'%s' is not a valid RBAC object type" msgstr "'%s' ist kein gültiger RBAC-Objekttyp" #, python-format msgid "'%s' is not a valid UUID" msgstr "'%s' ist keine gültige UUID" #, python-format msgid "'%s' is not a valid boolean value" msgstr "'%s' ist kein gültiger boolescher Wert" #, python-format msgid "'%s' is not a valid input" msgstr "'%s' ist keine gültige Eingabe" #, python-format msgid "'%s' is not a valid string" msgstr "'%s' ist keine gültige Zeichenfolge" #, python-format msgid "'%s' is not an integer" msgstr "'%s' ist keine Ganzzahl" #, python-format msgid "'%s' is not an integer or uuid" msgstr "'%s' ist keine Ganzzahl und keine UUID" #, python-format msgid "'%s' is not of the form =[value]" msgstr "'%s' ist nicht in der Form =[value]" #, python-format msgid "'%s' is not supported for filtering" msgstr "'%s' wird für die Filterung nicht unterstützt" #, python-format msgid "'%s' must be a non negative decimal." msgstr "'%s' muss eine nicht negative Dezimalzahl sein." #, python-format msgid "'%s' should be non-negative" msgstr "'%s' sollte nicht negativ sein" msgid "'.' searches are not implemented" msgstr "'.'-Suchen sind nicht implementiert" #, python-format msgid "'module' object has no attribute '%s'" msgstr "Das 'module'-Objekt hat kein Attribut '%s'." msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' ist kleiner als 'port_min'" msgid "" "(Deprecated. Use '--subproject neutron-SERVICE' instead.) The advanced " "service to execute the command against." msgstr "" "(Veraltet. Verwenden Sie stattdessen '--subproject neutron-SERVICE'.) Der " "erweiterte Dienst für den der Befehl ausgeführt werden soll." msgid "0 is not allowed as CIDR prefix length" msgstr "0 ist als Länge für CIDR-Präfix nicht zulässig" msgid "" "32-bit BGP identifier, typically an IPv4 address owned by the system running " "the BGP DrAgent." msgstr "" "32-Bit-BGP-Bezeichner, in der Regel eine IPv4-Adresse, deren Eigner das " "System ist, das den BGP-DrAgent ausführt." msgid "A QoS driver must be specified" msgstr "Ein QoS-Treiber muss angegeben werden" msgid "A cidr must be specified in the absence of a subnet pool" msgstr "Ein cidr muss angegeben werden, wenn kein Teilnetzpool vorhanden ist" msgid "" "A decimal value as Vendor's Registered Private Enterprise Number as required " "by RFC3315 DUID-EN." msgstr "" "Ein Dezimalwert als Registered Private Enterprise Number des Anbieters, wie " "es durch RFC3315 DUID-EN gefordert wird." #, python-format msgid "A default external network already exists: %(net_id)s." msgstr "Es ist bereits ein externes Standardnetz vorhanden: %(net_id)s." msgid "" "A default subnetpool for this IP family has already been set. Only one " "default may exist per IP family" msgstr "" "Es wurde bereits ein Standardsubnetzpool für diese IP-Familie definiert. Pro " "IP-Familie darf nur ein Standardpool vorhanden sein. " msgid "A metering driver must be specified" msgstr "Ein Messungstreiber muss angegeben sein" msgid "A password must be supplied when using auth_type md5." msgstr "Bei Verwendung von 'auth_type md5' muss ein Kennwort angegeben werden." msgid "API for retrieving service providers for Neutron advanced services" msgstr "API zum Abrufen von Diensteanbietern für erweiterte Neutron-Dienste" msgid "Aborting periodic_sync_routers_task due to an error." msgstr "periodic_sync_routers_task wird aufgrund eines Fehlers abgebrochen." msgid "Access to this resource was denied." msgstr "Zugriff auf diese Ressource wurde verweigert." msgid "Action to be executed when a child process dies" msgstr "" "Aktion, die ausgeführt werden soll, wenn ein untergeordneter Prozess " "abgebrochen wird" msgid "" "Add comments to iptables rules. Set to false to disallow the addition of " "comments to generated iptables rules that describe each rule's purpose. " "System must support the iptables comments module for addition of comments." msgstr "" "Fügen Sie Kommentare zu iptables-Regeln hinzu. Setzen Sie den Wert auf " "'false', um das Hinzufügen von Kommentaren zu generierten iptables-Regeln, " "die den Zweck der einzelnen Regeln beschreiben, zu unterbinden. Das System " "muss das Modul für iptables-Kommentare zum Hinzufügen von Kommentaren " "unterstützen. " msgid "Address not present on interface" msgstr "Adresse an der Schnittstelle nicht vorhanden." #, python-format msgid "Address scope %(address_scope_id)s could not be found" msgstr "Adressbereich %(address_scope_id)s wurde nicht gefunden" msgid "" "Address to listen on for OpenFlow connections. Used only for 'native' driver." msgstr "" "Adresse, die auf OpenFlow-Verbindungen überwacht werden soll. Wird nur für " "'native' Treiber verwendet." msgid "Adds external network attribute to network resource." msgstr "Fügt ein externes Netzattribut zur Netzressource hinzu." msgid "Adds test attributes to core resources." msgstr "Fügt Testattribute zu Kernressourcen hinzu." #, python-format msgid "Agent %(id)s could not be found" msgstr "Agent %(id)s konnte nicht gefunden werden" #, python-format msgid "Agent %(id)s is not a L3 Agent or has been disabled" msgstr "Agent %(id)s ist kein L3-Agent oder wurde inaktiviert" #, python-format msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled" msgstr "Agent %(id)s ist kein gültiger DHCP-Agent oder wurde inaktiviert" msgid "Agent has just been revived" msgstr "Der Agent wurde gerade erneut aktiviert." msgid "" "Agent starts with admin_state_up=False when enable_new_agents=False. In the " "case, user's resources will not be scheduled automatically to the agent " "until admin changes admin_state_up to True." msgstr "" "Agent startet mit admin_state_up=False, wenn enable_new_agents=False. In " "diesem Fall werden die Ressourcen eines Benutzers nur dann automatisch für " "den Agenten geplant, wenn der Administrator admin_state_up auf True festlegt." #, python-format msgid "Agent updated: %(payload)s" msgstr "Agent aktualisiert: %(payload)s" #, python-format msgid "" "Agent with agent_type=%(agent_type)s and host=%(host)s could not be found" msgstr "" "Agent mit 'agent_type=%(agent_type)s' und 'host=%(host)s' konnte nicht " "gefunden werden" msgid "Allow auto scheduling networks to DHCP agent." msgstr "Automatische Netzzuordnung zum DHCP-Agenten zulassen." msgid "Allow auto scheduling of routers to L3 agent." msgstr "Automatische Routerzuordnung zum L3-Agenten zulassen." msgid "" "Allow overlapping IP support in Neutron. Attention: the following parameter " "MUST be set to False if Neutron is being used in conjunction with Nova " "security groups." msgstr "" "Überschneidung bei IP-Support in Neutron zulassen. Achtung: Die folgenden " "Parameter müssen auf 'False' gesetzt werden, wenn Neutron zusammen mit Nova-" "Sicherheitsgruppen verwendet wird." msgid "Allow running metadata proxy." msgstr "Aktiven Metadaten-Proxy zulassen." msgid "Allow sending resource operation notification to DHCP agent" msgstr "" "Senden von Benachrichtigungen zu Ressourcenoperationen an den DHCP-Agenten " "zulassen" msgid "Allow the creation of PTR records" msgstr "Erstellen von PTR-Datensätzen zulassen" msgid "Allow the usage of the bulk API" msgstr "Nutzung der Massenzuweisungs-API zulassen" msgid "Allow the usage of the pagination" msgstr "Nutzung der Paginierung zulassen" msgid "Allow the usage of the sorting" msgstr "Nutzung der Sortierung zulassen" msgid "Allow to perform insecure SSL (https) requests to nova metadata" msgstr "" "Durchführung von unsicheren SSL-Anforderungen (HTTPS) an Nova-Metadaten" msgid "Allowed address pairs must be a list." msgstr "Zulässige Adresspaare müssen als Liste angegeben werden." msgid "AllowedAddressPair must contain ip_address" msgstr "AllowedAddressPair muss ip_address enthalten" msgid "" "Allows for serving metadata requests coming from a dedicated metadata access " "network whose CIDR is 169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send metadata:1 request. In " "this case DHCP Option 121 will not be injected in VMs, as they will be able " "to reach 169.254.169.254 through a router. This option requires " "enable_isolated_metadata = True." msgstr "" "Ermöglicht die Bereitstellung von Metadatenanforderungen aus einem " "dedizierten Metadatenzugriffsnetz mit der CIDR 169.254.169.254/16 (oder " "einem längeren Präfix), das mit einem Neutron-Router verbunden ist, über den " "die VMs Anforderungen vom Typ metadata:1 senden. In diesem Fall wird die " "DHCP-Option 121 nicht in die VMs injiziert, da sie 169.254.169.254 über " "einen Router erreichen können. Diese Option setzt die Einstellung " "'enable_isolated_metadata = True' voraus." #, python-format msgid "" "Already hosting BGP Speaker for local_as=%(current_as)d with router_id=" "%(rtid)s." msgstr "" "Es wird bereits ein BGP-Speaker für local_as=%(current_as)d mit router_id=" "%(rtid)s gehostet." #, python-format msgid "" "Already hosting maximum number of BGP Speakers. Allowed scheduled count=" "%(count)d" msgstr "" "Maximale Anzahl an BGP-Speakern wird bereits gehostet. Zulässige geplante " "Anzahl=%(count)d" msgid "An RBAC policy already exists with those values." msgstr "Es ist bereits eine RBAC-Richtlinie mit diesen Werten vorhanden." msgid "An identifier must be specified when updating a subnet" msgstr "" "Bei der Aktualisierung eines Subnetzes muss ein Bezeichner angegeben werden." msgid "An interface driver must be specified" msgstr "Ein Schnittstellentreiber muss angegeben sein" msgid "" "An ordered list of extension driver entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. For example: extension_drivers = " "port_security,qos" msgstr "" "Sortierte Liste von Eingangspunkten für Erweiterungstreiber, die aus dem " "Namensraum neutron.ml2.extension_drivers geladen werden sollen. Beispiel: " "extension_drivers = port_security,qos" msgid "" "An ordered list of networking mechanism driver entrypoints to be loaded from " "the neutron.ml2.mechanism_drivers namespace." msgstr "" "Sortierte Liste der Eingangspunkte für Netzmechanismustreiber die aus dem " "Namensbereich neutron.ml2.mechanism_drivers geladen werden." msgid "An unexpected internal error occurred." msgstr "Ein unerwarteter interner Fehler ist aufgetreten." msgid "An unknown error has occurred. Please try your request again." msgstr "" "Ein unbekannter Fehler ist aufgetreten. Stellen Sie Ihre Anforderung erneut." msgid "Async process didn't respawn" msgstr "Der asynchrone Prozess hat keinen erneuten Prozess erstellt." #, python-format msgid "Attribute '%s' not allowed in POST" msgstr "Attribut %s in POST nicht zulässig" #, python-format msgid "Authentication type not supported. Requested type=%(auth_type)s." msgstr "" "Authentifizierungstyp wird nicht unterstützt. Angeforderter Typ " "%(auth_type)s." msgid "Authorization URL for connecting to designate in admin context" msgstr "" "Autorisierungs-URL zum Herstellen einer Verbindung zu Designate im " "Administratorkontext." msgid "Automatically remove networks from offline DHCP agents." msgstr "Netze automatisch von DHCP-Agenten, die offline sind, entfernen." msgid "" "Automatically reschedule routers from offline L3 agents to online L3 agents." msgstr "" "Automatische Neuterminierung für Router von Offline-L3-Agenten zu Online-L3-" "Agenten." msgid "Availability zone of this node" msgstr "Verfügbarkeitszone dieses Knotens" #, python-format msgid "AvailabilityZone %(availability_zone)s could not be found." msgstr "" "Die Verfügbarkeitszone %(availability_zone)s konnte nicht gefunden werden." msgid "Available commands" msgstr "Verfügbare Befehle" #, python-format msgid "" "BGP Peer %(peer_ip)s for remote_as=%(remote_as)s, running for BGP Speaker " "%(speaker_as)d not added yet." msgstr "" "Der BGP-Peer %(peer_ip)s für remote_as=%(remote_as)s, ausgeführt für BGP-" "Speaker %(speaker_as)d wurde noch nicht hinzugefügt." #, python-format msgid "" "BGP Speaker %(bgp_speaker_id)s is already configured to peer with a BGP Peer " "at %(peer_ip)s, it cannot peer with BGP Peer %(bgp_peer_id)s." msgstr "" "BGP-Speaker %(bgp_speaker_id)s ist bereits als Peer für einen BGP-Peer an " "%(peer_ip)s konfiguriert, daher kann er nicht als Peer für den BGP-Peer " "%(bgp_peer_id)s fungieren." #, python-format msgid "" "BGP Speaker for local_as=%(local_as)s with router_id=%(rtid)s not added yet." msgstr "" "Der BGP-Speaker für local_as=%(local_as)s mit router_id=%(rtid)s wurde noch " "nicht hinzugefügt." #, python-format msgid "" "BGP peer %(bgp_peer_id)s is not associated with BGP speaker " "%(bgp_speaker_id)s." msgstr "" "BGP-Peer %(bgp_peer_id)s ist dem BGP-Speaker %(bgp_speaker_id)s nicht " "zugeordnet." #, python-format msgid "BGP peer %(bgp_peer_id)s not authenticated." msgstr "BGP-Peer %(bgp_peer_id)s ist nicht authentifiziert." #, python-format msgid "BGP peer %(id)s could not be found." msgstr "BGP-Peer %(id)s konnte nicht gefunden werden." #, python-format msgid "" "BGP speaker %(bgp_speaker_id)s is not hosted by the BgpDrAgent %(agent_id)s." msgstr "" "BGP-Speaker %(bgp_speaker_id)s wird nicht von BgpDrAgent %(agent_id)s " "gehostet." #, python-format msgid "BGP speaker %(id)s could not be found." msgstr "BGP-Speaker %(id)s konnte nicht gefunden werden." msgid "BGP speaker driver class to be instantiated." msgstr "Die zu instanziierende Treiberklasse des BGP-Speakers." msgid "Backend does not support VLAN Transparency." msgstr "Backend unterstützt keine VLAN-Transparenz." #, python-format msgid "" "Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, " "%(mac)s:" msgstr "" "Falsches Präfix- oder MAC-Format für das Generieren der IPv6-Adresse durch " "EUI-64: %(prefix)s, %(mac)s:" #, python-format msgid "Bad prefix type for generate IPv6 address by EUI-64: %s" msgstr "" "Falscher Präfixtyp für das Generieren der IPv6-Adresse durch EUI-64: %s" #, python-format msgid "Base MAC: %s" msgstr "Basis-MAC-Adresse: %s" msgid "" "Base log dir for dnsmasq logging. The log contains DHCP and DNS log " "information and is useful for debugging issues with either DHCP or DNS. If " "this section is null, disable dnsmasq log." msgstr "" "Basisprotokollverzeichnis für dnsmasq-Protokollierung. Das Protokoll enthält " "DHCP- und DNS-Protokollinformationen und ist für das Debugging von Problemen " "mit DHCP oder DNS nützlich. Wenn dieser Abschnitt null ist, dann " "deaktivieren Sie das dnsmasq-Protokoll." #, python-format msgid "BgpDrAgent %(agent_id)s is already associated to a BGP speaker." msgstr "BgpDrAgent %(agent_id)s ist bereits einem BGP-Speaker zugeordnet." #, python-format msgid "BgpDrAgent %(id)s is invalid or has been disabled." msgstr "BgpDrAgent %(id)s ist ungültig oder wurde inaktiviert." #, python-format msgid "BgpDrAgent updated: %s" msgstr "BgpDrAgent aktualisiert: %s" msgid "Body contains invalid data" msgstr "Hauptteil enthält ungültige Daten" msgid "Both network_id and router_id are None. One must be provided." msgstr "" "Sowohl 'network_id' als auch 'router_id' sind 'None'. Ein Wert muss " "angegeben sein." #, python-format msgid "Bridge %(bridge)s does not exist." msgstr "Brücke %(bridge)s ist nicht vorhanden." #, python-format msgid "Bridge %s does not exist" msgstr "Die Brücke %s ist nicht vorhanden." msgid "Bulk operation not supported" msgstr "Massenoperation nicht unterstützt" msgid "CIDR to monitor" msgstr "Zu überwachendes CIDR" #, python-format msgid "Callback for %(resource_type)s not found" msgstr "Callback nach %(resource_type)s nicht gefunden" #, python-format msgid "Callback for %(resource_type)s returned wrong resource type" msgstr "" "Callback nach %(resource_type)s hat den falschen Ressourcentyp zurückgegeben" #, python-format msgid "Cannot add floating IP to port %s that has no fixed IPv4 addresses" msgstr "" "Zu Port %s, der keine statischen IPv4-Adressen besitzt, kann keine " "dynamische IP hinzugefügt werden" #, python-format msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip" msgstr "" "Dynamische IP-Adresse kann nicht zu Port auf Teilnetz %s ohne 'gateway_ip' " "hinzugefügt werden" #, python-format msgid "Cannot add multiple callbacks for %(resource_type)s" msgstr "" "Es können nicht mehrere Callbacks nach %(resource_type)s hinzugefügt werden" #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "" "IPv%(req_ver)s-Teilnetz kann nicht aus IPv%(pool_ver)s-Teilnetzpool " "zugeordnet werden" msgid "Cannot allocate requested subnet from the available set of prefixes" msgstr "" "Das angeforderte Teilnetz kann nicht aus der verfügbaren Gruppe mit Präfixen " "zugeordnet werden" #, python-format msgid "" "Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with port " "%(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already has a " "floating IP on external network %(net_id)s." msgstr "" "Dynamische IP-Adresse %(floating_ip_address)s (%(fip_id)s) kann Port " "%(port_id)s nicht über statische IP-Adresse %(fixed_ip)s zugeordnet werden, " "da diese statische IP-Adresse bereits über eine dynamische IP-Adresse im " "externen Netz %(net_id)s verfügt." msgid "" "Cannot change HA attribute of active routers. Please set router " "admin_state_up to False prior to upgrade." msgstr "" "HA-Attribut der aktiven Router kann nicht geändert werden. Setzen Sie die " "Routereigenschaft 'admin_state_up' vor der Aktualisierung auf 'False'. " #, python-format msgid "" "Cannot create floating IP and bind it to %s, since that is not an IPv4 " "address." msgstr "" "Dynamische IP-Adresse kann nicht erstellt und an %s gebunden werden, da sie " "keine IPv4-Adresse ist." #, python-format msgid "" "Cannot create floating IP and bind it to Port %s, since that port is owned " "by a different tenant." msgstr "" "Dynamische IP-Adresse kann nicht erstellt und an Port %s gebunden werden, da " "dieser Port einem anderen Nutzer gehört." msgid "Cannot create resource for another tenant" msgstr "Erstellen von Ressource für einen weiteren Nutzer nicht möglich" msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "" "enable_dhcp kann nicht inaktiviert werden, wenn ipv6-Attribute gesetzt sind" #, python-format msgid "Cannot find %(table)s with %(col)s=%(match)s" msgstr "%(table)s mit %(col)s=%(match)s nicht gefunden" #, python-format msgid "Cannot handle subnet of type %(subnet_type)s" msgstr "Teilnetz des Typs %(subnet_type)s kann nicht behandelt werden" msgid "Cannot have multiple IPv4 subnets on router port" msgstr "Mehrere IPv4-Subnetze an Router-Port nicht möglich" #, python-format msgid "" "Cannot have multiple router ports with the same network id if both contain " "IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s" msgstr "" "Mehrere Routerports können nicht dieselbe Netz-ID verwenden, wenn beide IPv6-" "Teilnetze enthalten. Der vorhandene Port %(p)s verfügt über das IPv6-" "Teilnetz und die Netz-ID %(nid)s" #, python-format msgid "" "Cannot host distributed router %(router_id)s on legacy L3 agent %(agent_id)s." msgstr "" "Der verteilte Router %(router_id)s kann am traditionellen L3-Agenten " "%(agent_id)s nicht gehostet werden." msgid "Cannot match priority on flow deletion or modification" msgstr "" "Abgleichen von Priorität bei Ablauflöschung oder Änderung nicht möglich" msgid "Cannot mix IPv4 and IPv6 prefixes in a subnet pool." msgstr "" "IPv4- und IPv6-Präfixe können in einem Subnetzpool nicht gemischt werden." msgid "Cannot specify both --service and --subproject." msgstr "--service und --subproject können nicht gemeinsam angegeben werden." msgid "Cannot specify both subnet-id and port-id" msgstr "Angabe sowohl von Teilnetz-ID als auch von Port-ID nicht möglich" msgid "Cannot understand JSON" msgstr "Kann JSON nicht verstehen" #, python-format msgid "Cannot update read-only attribute %s" msgstr "Schreibgeschütztes Attribut %s kann nicht aktualisiert werden" msgid "" "Cannot upgrade active router to distributed. Please set router " "admin_state_up to False prior to upgrade." msgstr "" "Das Upgrade eines aktiven Routers zu einem dezentralen Router ist nicht " "möglich. Legen Sie für den Router admin_state_up auf False fest, bevor Sie " "die Aktualisierung durchführen." msgid "Certificate Authority public key (CA cert) file for ssl" msgstr "Öffentliche Schlüsseldatei der Zertifizierungsstelle für SSL" #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s." msgstr "" "Durch die Änderung wäre die Nutzung kleiner als 0 für die folgenden " "Ressourcen: %(unders)s." msgid "Check ebtables installation" msgstr "Installation von ebtables überprüfen" msgid "Check for ARP header match support" msgstr "Auf Unterstützung des Vergleichs von ARP-Headern überprüfen" msgid "Check for ARP responder support" msgstr "Überprüfen Sie, ob ARP-Responder unterstützt werden" msgid "Check for ICMPv6 header match support" msgstr "Auf Unterstützung des Vergleichs von ICMPv6-Headern überprüfen" msgid "Check for OVS Geneve support" msgstr "Auf OVS-Geneve-Unterstützung überprüfen" msgid "Check for OVS vxlan support" msgstr "Überprüfen Sie, ob OVS-VXLAN-Unterstützung vorliegt" msgid "Check for VF management support" msgstr "Überprüfen Sie, ob VF-Management unterstützt wird" msgid "Check for iproute2 vxlan support" msgstr "Überprüfen Sie, ob iproute2-VXLAN-Unterstützung vorliegt" msgid "Check for nova notification support" msgstr "Überprüfen Sie, ob Nova-Benachrichtigungen unterstützt werden" msgid "Check for patch port support" msgstr "Überprüfen Sie, ob Patch-Ports unterstützt werden" msgid "Check ip6tables installation" msgstr "Überprüfen Sie die ip6tables-Installation." msgid "Check ipset installation" msgstr "Überprüfen Sie die ipset-Installation." msgid "Check keepalived IPv6 support" msgstr "IPv6-Unterstützung von keepalived überprüfen" msgid "Check minimal dibbler version" msgstr "Mindestversion von dibbler überprüfen" msgid "Check minimal dnsmasq version" msgstr "Überprüfen Sie die Mindestversion für dnsmasq" msgid "Check netns permission settings" msgstr "Überprüfen Sie die netns-Berechtigungseinstellungen" msgid "Check ovs conntrack support" msgstr "Überprüfen Sie, ob OVS-Conntrack-Unterstützung vorhanden ist." msgid "Check ovsdb native interface support" msgstr "Unterstützung für native ovsdb-Schnittstelle überprüfen" #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of " "subnet %(sub_id)s" msgstr "" "Überschneidungen zwischen CIDR %(subnet_cidr)s von Teilnetz %(subnet_id)s " "und CIDR %(cidr)s von Teilnetz %(sub_id)s" msgid "Class not found." msgstr "Klassen nicht gefunden." msgid "Cleanup resources of a specific agent type only." msgstr "Bereinigen Sie nur Ressourcen mit einem bestimmten Agententyp." msgid "Client certificate for nova metadata api server." msgstr "Clientzertifikat zu API-Server für Nova-Metadaten." msgid "" "Comma-separated list of : tuples, mapping " "network_device to the agent's node-specific list of virtual functions that " "should not be used for virtual networking. vfs_to_exclude is a semicolon-" "separated list of virtual functions to exclude from network_device. The " "network_device in the mapping should appear in the physical_device_mappings " "list." msgstr "" "Liste mit durch Kommas voneinander getrennten Tupeln vom Typ " ":, in der eine Netzeinheit der " "knotenspezifischen Liste virtueller Funktionen des Agenten zugeordnet wird, " "die nicht für den virtuellen Netzbetrieb verwendet werden sollen. Bei " "'Auszuschließende_VFS' handelt es sich um eine durch Semikolons getrennte " "Liste virtueller Funktionen, die aus der Netzeinheit auszuschließen sind. " "Die Netzeinheit in der Zuordnung sollte in der Liste " "'physical_device_mappings' angezeigt werden." msgid "" "Comma-separated list of : tuples mapping physical " "network names to the agent's node-specific Open vSwitch bridge names to be " "used for flat and VLAN networks. The length of bridge names should be no " "more than 11. Each bridge must exist, and should have a physical network " "interface configured as a port. All physical networks configured on the " "server should have mappings to appropriate bridges on each agent. Note: If " "you remove a bridge from this mapping, make sure to disconnect it from the " "integration bridge as it won't be managed by the agent anymore. Deprecated " "for ofagent." msgstr "" "Liste mit durch Kommas voneinander getrennten Tuplen vom Typ " ":, in der die physischen Netznamen den " "knotenspezifischen Open vSwitch-Brückennamen des Agenten zugeordnet sind, " "die für einfache und VLAN-Netze verwendet werden sollen. Die Länge der " "Brückennamen darf 11 Zeichen nicht überschreiten. Jede Brücke muss vorhanden " "sein und eine als Port konfigurierte physische Netzschnittstelle haben. Alle " "auf dem Server konfigurierten physischen Netze müssen Zuordnungen zu den " "entsprechenden Brücken in jedem Agenten haben. Hinweis: Wenn Sie aus dieser " "Zuordnung eine Brücke entfernen, stellen Sie sicher, dass Sie die Verbindung " "der Brücke zur Integrationsbrücke unterbrechen, da sie nicht mehr vom " "Agenten verwaltet wird. Veraltet für ofagent." msgid "" "Comma-separated list of : tuples mapping " "physical network names to the agent's node-specific physical network device " "interfaces of SR-IOV physical function to be used for VLAN networks. All " "physical networks listed in network_vlan_ranges on the server should have " "mappings to appropriate interfaces on each agent." msgstr "" "Liste mit durch Kommas voneinander getrennten Tupeln vom Typ " ":, in der die Namen physischer Netze den " "knotenspezifischen Netzeinheitenschnittstellen des Agenten mit der " "physischen SR-IOV-Funktion zugeordnet wird, die für VLAN-Netze verwendet " "werden soll. Alle physischen Netze, die in 'network_vlan_ranges' auf dem " "Server aufgeführt sind, sollten entsprechenden Schnittstellen in jedem " "Agenten zugeordnet werden." msgid "" "Comma-separated list of : tuples " "mapping physical network names to the agent's node-specific physical network " "interfaces to be used for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should have mappings to " "appropriate interfaces on each agent." msgstr "" "Liste mit durch Kommas voneinander getrennten Tupeln vom Typ " ":, in der die physischen Netznamen den " "knotenspezifischen, physischen Netzschnittstellen des Agenten zugeordnet " "sind, die für einfache und VLNA-Netze verwendet werden sollen. Alle " "physischen Netze, die über die Eigenschaft 'network_vlan_ranges' auf dem " "Server aufgelistet sind, müssen Zuordnungen zu den entsprechenden " "Schnittstellen in jedem Agenten haben." msgid "" "Comma-separated list of : tuples enumerating ranges of GRE " "tunnel IDs that are available for tenant network allocation" msgstr "" "Durch Kommas getrennte Liste von : Tupeln, die Bereiche " "von GRE-Tunnel-IDs aufzählen, die für eine Nutzernetzzuordnung verfügbar sind" msgid "" "Comma-separated list of : tuples enumerating ranges of " "Geneve VNI IDs that are available for tenant network allocation" msgstr "" "Durch Kommas getrennte Liste mit den :-Tupeln, die die " "Bereiche der Geneve-VNI-IDs aufzählen, die für eine Nutzernetzzuordnung " "verfügbar sind" msgid "" "Comma-separated list of : tuples enumerating ranges of " "VXLAN VNI IDs that are available for tenant network allocation" msgstr "" "Durch Kommas getrennte Liste von : Tupeln, die Bereiche " "von VXLAN-VNI-IDs aufzählen, die für eine Nutzernetzzuordnung verfügbar sind" msgid "" "Comma-separated list of supported PCI vendor devices, as defined by " "vendor_id:product_id according to the PCI ID Repository. Default enables " "support for Intel and Mellanox SR-IOV capable NICs." msgstr "" "Liste mit durch Kommas voneinander getrennten PCI-Anbietergeräten, definiert " "durch vendor_id:product_id entsprechend dem PCI-ID-Repository. Die " "Standardeinstellung aktiviert die Unterstützung für SR-IOV-fähige " "Netzschnittstellencontroller von Intel und Mellanox." msgid "" "Comma-separated list of the DNS servers which will be used as forwarders." msgstr "" "Durch Kommas getrennte Liste der DNS-Server, die künftig als " "Weiterleitungsserver verwendet werden." msgid "Command to execute" msgstr "Auszuführender Befehl" msgid "Config file for interface driver (You may also use l3_agent.ini)" msgstr "" "Konfigurationsdatei für Schnittstellentreiber (Sie können auch 'l3_agent." "ini' verwenden)" #, python-format msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" msgstr "Kollidierender Wert bei Ethernet-Typ %(ethertype)s für CIDR %(cidr)s" msgid "" "Controls whether the neutron security group API is enabled in the server. It " "should be false when using no security groups or using the nova security " "group API." msgstr "" "Steuert, ob die Neutron-Sicherheitsgruppen-API im Server aktiviert ist. " "Sollte 'false' sein, wenn keine Sicherheitsgruppen verwendet werden oder " "wenn die Nova-Sicherheitsgruppen-API verwendet wird." #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "" "Keine Bindung an %(host)s:%(port)s möglich nach Versuch über %(time)d " "Sekunden" #, python-format msgid "Could not connect to %s" msgstr "Es konnte keine Verbindung zu %s hergestellt werden." msgid "Could not deserialize data" msgstr "Daten konnten nicht deserialisiert werden" #, python-format msgid "Could not retrieve schema from %(conn)s: %(err)s" msgstr "Das Schema konnte nicht aus %(conn)s abgerufen werden: %(err)s" #, python-format msgid "" "Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable " "to update." msgstr "" "Aktuelle Gateway-IP-Adresse %(ip_address)s wird bereits verwendet von Port " "%(port_id)s. Aktualisierung nicht möglich." msgid "Currently update of HA mode for a DVR/HA router is not supported." msgstr "" "Die Aktualisierung des HA-Modus für einen DVT/HA-Router wird momentan nicht " "unterstützt." msgid "Currently update of HA mode for a distributed router is not supported." msgstr "" "Die Aktualisierung des HA-Modus für einen verteilten Router wird momentan " "nicht unterstützt. " msgid "" "Currently update of distributed mode for a DVR/HA router is not supported" msgstr "" "Die Aktualisierung des verteilten Modus für einen DVR-/HA-Router wird " "momentan nicht unterstützt." msgid "Currently update of distributed mode for an HA router is not supported." msgstr "" "Die Aktualisierung des verteilten Modus für einen HA-Router wird momentan " "nicht unterstützt." msgid "" "Currently updating a router from DVR/HA to non-DVR non-HA is not supported." msgstr "" "Die Aktualisierung eines Routers von DVR/HA auf einen Router ohne DVR und " "ohne Hochverfügbarkeit wird momentan nicht unterstützt." msgid "Currently updating a router to DVR/HA is not supported." msgstr "" "Die Aktualisierung eines Routers auf DVR/HA wird momentan nicht unterstützt." msgid "" "DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " "lease times." msgstr "" "DHCP-Leasedauer (in Sekunden). Verwenden Sie -1, damit dnsmasq unbegrenzte " "Leasedauern verwendet." msgid "" "DVR deployments for VXLAN/GRE/Geneve underlays require L2-pop to be enabled, " "in both the Agent and Server side." msgstr "" "DVR-Implementierungen für VXLAN/GRE/Geneve-Underlays erfordern die " "Aktivierung von L2-pop sowohl auf der Agenten- als auch auf der Serverseite." msgid "" "Database engine for which script will be generated when using offline " "migration." msgstr "" "Datenbankengine, für die bei Verwendung der Offline-Migration ein Script " "generiert wird." msgid "" "Default IPv4 subnet pool to be used for automatic subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where creation of a subnet is " "being called without a subnet pool ID. If not set then no pool will be used " "unless passed explicitly to the subnet create. If no pool is used, then a " "CIDR must be passed to create a subnet and that subnet will not be allocated " "from any pool; it will be considered part of the tenant's private address " "space. This option is deprecated for removal in the N release." msgstr "" "Der IPv4-Standardsubnetzpool, der für die automatische Subnetz-CIDR-" "Zuordnung verwendet werden soll. Über die UUID wird der Pool angegeben, der " "in den Fällen verwendet werden soll, wenn die Erstellung eines Subnetzes " "ohne Subnetz-Pool-ID angefordert wird. Wenn diese Einstellung nicht " "definiert ist, wird kein Pool verwendet, es sei denn, er wird explizit für " "die Subnetzerstellung übergeben. Wenn kein Pool verwendet wird, muss eine " "CIDR für die Erstellung eines Subnetzes übergeben werden und dieses Subnetz " "wird nicht aus einem beliebigen Pool zugeordnet. Es wird als Teil des " "privaten Mandantenadressraums betrachtet. Diese Option ist veraltet und wird " "im N-Release entfernt." msgid "" "Default IPv6 subnet pool to be used for automatic subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where creation of a subnet is " "being called without a subnet pool ID. See the description for " "default_ipv4_subnet_pool for more information. This option is deprecated for " "removal in the N release." msgstr "" "Der IPv6-Standardsubnetzpool, der für die automatische Subnetz-CIDR-" "Zuordnung verwendet werden soll. Über die UUID wird der Pool angegeben, der " "in den Fällen verwendet werden soll, wenn die Erstellung eines Subnetzes " "ohne Subnetz-Pool-ID angefordert wird. Weitere Informationen finden Sie in " "der Beschreibung für die Einstellung 'default_ipv4_subnet_pool'. Diese " "Option ist veraltet und wird im N-Release entfernt." msgid "Default driver to use for quota checks" msgstr "Standardtreiber zur Verwendung für Kontingentprüfungen" msgid "Default external networks must be shared to everyone." msgstr "Externe Standardnetze müssen für alle freigegeben werden." msgid "" "Default network type for external networks when no provider attributes are " "specified. By default it is None, which means that if provider attributes " "are not specified while creating external networks then they will have the " "same type as tenant networks. Allowed values for external_network_type " "config option depend on the network type values configured in type_drivers " "config option." msgstr "" "Standardnetztyp für externe Netze, wenn keine Anbieterattribut angegeben " "wurden. Der Standardwert None bedeutet, dass, wenn keine Anbieterattribut " "beim Erstellen von externen Netzen angegeben werden, derselbe Typ wie bei " "Nutzernetzen verwendet wird. Die zulässigen Werte für die " "Konfigurationsoption external_network_type hängen von den konfigurierten " "Netztypwerten in der Konfigurationsoption type_drivers ab." msgid "" "Default number of RBAC entries allowed per tenant. A negative value means " "unlimited." msgstr "" "Standardanzahl an zulässigen RBAC-Einträgen pro Nutzer. Ein negativer Wert " "bedeutet unbegrenzt." msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "" "Standardanzahl an zulässigen Ressourcen pro Nutzer. Ein negativer Wert " "bedeutet unbegrenzt." msgid "Default security group" msgstr "Standardsicherheitsgruppe" msgid "Default security group already exists." msgstr "Standardsicherheitsgruppe ist bereits vorhanden." msgid "" "Default value of availability zone hints. The availability zone aware " "schedulers use this when the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a comma separated string. " "This value can be empty. In this case, even if availability_zone_hints for a " "resource is empty, availability zone is considered for high availability " "while scheduling the resource." msgstr "" "Der Standardwert für die Eigenschaft 'availability_zone_hints'. Die mit " "Verfügbarkeitszonen kompatiblen Scheduler verwenden diesen Wert, wenn der " "Wert für 'availability_zone_hints' der Ressourcen leer ist. Mehrere " "Verfügbarkeitszonen können als Zeichenfolge, durch Kommas getrennt, " "angegeben werden. Dieser Wert kann leer sein. In diesem Fall wird die " "Verfügbarkeitszone bei der Ressourcenplanung als hoch verfügbar betrachtet, " "auch dann, wenn die Eigenschaft 'availability_zone_hints' für eine Ressource " "leer ist. " msgid "" "Define the default value of enable_snat if not provided in " "external_gateway_info." msgstr "" "Definieren Sie den Standardwert von enable_snat, falls in " "external_gateway_info nichts angegeben ist." msgid "" "Defines providers for advanced services using the format: :" ":[:default]" msgstr "" "Definiert Provider für erweiterte Services mit dem folgenden Format: " "::[:default]" msgid "" "Delay within which agent is expected to update existing ports whent it " "restarts" msgstr "" "Verzögerung, in der der Agent die vorhandenen Ports aktualisieren soll, wenn " "ein Neustart erfolgt" msgid "Delete the namespace by removing all devices." msgstr "Löschen Sie den Namensbereich durch Entfernen aller Geräte." #, python-format msgid "Deleting port %s" msgstr "Port %s wird gelöscht" #, python-format msgid "Deployment error: %(reason)s." msgstr "Implementierungsfehler: %(reason)s." msgid "Destroy IPsets even if there is an iptables reference." msgstr "IPsets löschen, auch wenn eine iptables-Referenz vorhanden ist." msgid "Destroy all IPsets." msgstr "Alle IPsets löschen." #, python-format msgid "Device %(dev_name)s in mapping: %(mapping)s not unique" msgstr "Einheit %(dev_name)s in Zuordnung %(mapping)s nicht eindeutig" #, python-format msgid "Device '%(device_name)s' does not exist." msgstr "Das Gerät '%(device_name)s' ist nicht vorhanden." msgid "Device has no virtual functions" msgstr "Einheit verfügt über keine virtuellen Funktionen" #, python-format msgid "Device name %(dev_name)s is missing from physical_device_mappings" msgstr "Einheitenname %(dev_name)s fehlt in physical_device_mappings" msgid "Device not found" msgstr "Einheit nicht gefunden" #, python-format msgid "" "Distributed Virtual Router Mac Address for host %(host)s does not exist." msgstr "" "MAC-Adresse von verteiltem virtuellem Router für Host %(host)s ist nicht " "vorhanden." #, python-format msgid "Domain %(dns_domain)s not found in the external DNS service" msgstr "Die Domäne %(dns_domain)s wurde im externen DNS-Dienst nicht gefunden." msgid "Domain to use for building the hostnames" msgstr "Für das Erstellen von Hostnamen zu verwendende Domäne" msgid "" "Domain to use for building the hostnames. This option is deprecated. It has " "been moved to neutron.conf as dns_domain. It will be removed in a future " "release." msgstr "" "Für das Erstellen von Hostnamen zu verwendende Domäne. Diese Option ist " "veraltet. Sie wurde als 'dns_domain' in 'neutron.conf' verschoben. Sie wird " "in einem zukünftigen Release entfernt." msgid "Downgrade no longer supported" msgstr "Herabstufung wird nicht mehr unterstützt" #, python-format msgid "Driver %s is not unique across providers" msgstr "Treiber %s ist für Anbieter nicht eindeutig" msgid "Driver for external DNS integration." msgstr "Treiber für externe DNS-Integration." msgid "Driver for security groups firewall in the L2 agent" msgstr "Treiber für Sicherheitsgruppen-Firewall im L2-Agenten" msgid "Driver to use for scheduling network to DHCP agent" msgstr "Zu verwendender Treiber bei Netzzuordnung zum DHCP-Agenten" msgid "Driver to use for scheduling router to a default L3 agent" msgstr "Zu verwendender Treiber bei Routerzuordnung zum Standard-L3-Agenten" msgid "" "Driver used for ipv6 prefix delegation. This needs to be an entry point " "defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for " "entry points included with the neutron source." msgstr "" "Treiber, der für die IPv6-Präfixdelegierung verwendet wird. Dies muss ein " "Einstiegspunkt sein, der im Namensbereich neutron.agent.linux.pd_drivers " "definiert ist. In setup.cfg finden Sie die Einstiegspunkte, die in der " "Neutron-Quelle enthalten sind." msgid "Driver used for scheduling BGP speakers to BGP DrAgent" msgstr "Treiber, der verwendet wird, um BGP-Speaker für BGP DrAgent zu planen." msgid "Drivers list to use to send the update notification" msgstr "" "Liste der Treiber, die zum Senden der Benachrichtigung zur Aktualisierung " "verwendet werden soll" #, python-format msgid "Duplicate IP address '%s'" msgstr "Doppelte IP-Adresse '%s'" #, python-format msgid "" "Duplicate L3HARouterAgentPortBinding is created for router(s) %(router)s. " "Database cannot be upgraded. Please, remove all duplicates before upgrading " "the database." msgstr "" "Doppelte L3HARouterAgentPortBinding wird für Router %(router)s erstellt. Es " "konnte kein Aktualisierung für die Datenbank durchgeführt werden. Entfernen " "Sie alle Duplikate, bevor Sie die Aktualisierung der Datenbank durchführen." msgid "Duplicate Metering Rule in POST." msgstr "Doppelte Messungsregel in POST." msgid "Duplicate Security Group Rule in POST." msgstr "Doppelte Sicherheitsgruppenregel in POST." msgid "Duplicate address detected" msgstr "Doppelte Adresse erkannt." #, python-format msgid "Duplicate hostroute '%s'" msgstr "Doppelte Hostroute '%s'" #, python-format msgid "Duplicate items in the list: '%s'" msgstr "Doppelte Elemente in der Liste: '%s'" #, python-format msgid "Duplicate nameserver '%s'" msgstr "Doppelter Namensserver '%s'" msgid "Duplicate segment entry in request." msgstr "Doppelter Segmenteintrag in Anforderung." #, python-format msgid "ERROR: %s" msgstr "FEHLER: %s" msgid "" "ERROR: Unable to find configuration file via the default search paths (~/." "neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" msgstr "" "FEHLER: Konfigurationsdatei kann über die Standardsuchpfade (~/.neutron/, " "~/, /etc/neutron/, /etc/) und über die Option '--config-file' nicht gefunden " "werden!" msgid "" "Either one of parameter network_id or router_id must be passed to _get_ports " "method." msgstr "" "Einer der Parameter network_id und router_id muss an die Methode _get_ports " "übergeben werden." msgid "Either subnet_id or port_id must be specified" msgstr "Entweder 'subnet_id' oder 'port_id' muss angegeben sein" msgid "Empty physical network name." msgstr "Leerer Name für physisches Netz." msgid "Empty subnet pool prefix list." msgstr "Leere Präfixliste für Subnetzpool" msgid "Enable FWaaS" msgstr "FWaaS aktivieren" msgid "Enable HA mode for virtual routers." msgstr "Hochverfügbarkeitsmodus für virtuelle Router aktivieren." msgid "Enable SSL on the API server" msgstr "SSL auf dem API-Server aktivieren" msgid "" "Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " "plugin using linuxbridge mechanism driver" msgstr "" "VXLAN auf dem Agenten aktivieren. Kann aktiviert werden, wenn der Agent vom " "ml2-Plug-in mithilfe eines Linuxbridge-Mechanismus-Treibers verwaltet wird" msgid "" "Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 " "l2population driver. Allows the switch (when supporting an overlay) to " "respond to an ARP request locally without performing a costly ARP broadcast " "into the overlay." msgstr "" "Aktivieren Sie den lokalen ARP-Responder, wenn dies unterstützt wird. Dies " "erfordert OVS 2.1 und einen ML2-l2population-Treiber. Dadurch wird es dem " "Switch (bei Unterstützung eines Overlay) ermöglicht, lokal auf eine ARP-" "Anforderung zu reagieren, ohne einen aufwändigen ARP-Broadcast in das " "Overlay durchzuführen." msgid "" "Enable local ARP responder which provides local responses instead of " "performing ARP broadcast into the overlay. Enabling local ARP responder is " "not fullycompatible with the allowed-address-pairs extension." msgstr "" "Aktivieren Sie den lokalen ARP-Responder, der lokale Antworten bereitstellt " "anstatt ARP-Broadcasts im Overlay durchzuführen. Die Aktivierung eines " "lokalen ARP-Responders ist nicht vollständig kompatibel mit der Erweiterung " "zulässiger Adresspaare." msgid "" "Enable services on an agent with admin_state_up False. If this option is " "False, when admin_state_up of an agent is turned False, services on it will " "be disabled. Agents with admin_state_up False are not selected for automatic " "scheduling regardless of this option. But manual scheduling to such agents " "is available if this option is True." msgstr "" "Aktivieren Sie Services auf einem Agenten mit admin_state_up False. Wenn " "diese Option 'False' lautet und wenn admin_state_up eines Agenten auf " "'False' gesetzt wird, werden die Dienste darauf deaktiviert. Agenten mit " "admin_state_up False werden, unabhängig von dieser Option, nicht für die " "automatische Planung ausgewählt. Die manuelle Planung ist für solche Agenten " "jedoch verfügbar, wenn diese Option auf 'True' gesetzt ist." msgid "" "Enable suppression of ARP responses that don't match an IP address that " "belongs to the port from which they originate. Note: This prevents the VMs " "attached to this agent from spoofing, it doesn't protect them from other " "devices which have the capability to spoof (e.g. bare metal or VMs attached " "to agents without this flag set to True). Spoofing rules will not be added " "to any ports that have port security disabled. For LinuxBridge, this " "requires ebtables. For OVS, it requires a version that supports matching ARP " "headers. This option will be removed in Newton so the only way to disable " "protection will be via the port security extension." msgstr "" "Aktivieren Sie die Unterdrückung von ARP-Antworten, die nicht mit einer IP-" "Adresse übereinstimmen, die zu dem Port gehört, von dem sie stammen. " "Hinweis: Dies verhindert das Spoofing durch die VMs, die an diesen Agenten " "angehängt sind, bietet aber keinen Schutz vor anderen Einheiten, die die " "Möglichkeit des Spoofing haben (z. B. Bare-Metal-Maschinen oder VMs, die an " "Agenten angehängt sind, ohne dass dieses Flag auf True festgelegt wurde). " "Spoofing-Regeln werden Ports, bei denen die Portsicherheit inaktiviert " "wurde, nicht hinzugefügt. Bei LinuxBridge erfordert dies ebtables. Bei OVS " "erfordert dies eine Version, die übereinstimmende ARP-Header unterstützt. " "Diese Option wird in Newton entfernt, sodass der Schutz nur über die " "Portsicherheitserweiterung inaktiviert werden kann." msgid "" "Enable/Disable log watch by metadata proxy. It should be disabled when " "metadata_proxy_user/group is not allowed to read/write its log file and " "copytruncate logrotate option must be used if logrotate is enabled on " "metadata proxy log files. Option default value is deduced from " "metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent " "effective user id/name." msgstr "" "Protokollüberwachung über Metadaten-Proxy aktivieren/deaktivieren. Sie " "sollte deaktiviert werden, wenn metadata_proxy_user/group keine Lese- und " "Schreibberechtigung für die Protokolldatei hat. Außerdem muss die Option " "'copytruncate logrotate' verwendet werden, wenn 'logrotate' für " "Protokolldateien des Metadaten-Proxy aktiviert ist. Der Standardwert für die " "Option wird von metadata_proxy_user abgeleitet: 'watch log' ist aktiviert, " "wenn metadata_proxy_user die Agent-ausführende Benutzer-ID/-Name ist." msgid "" "Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to " "True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable " "environment. Users making subnet creation requests for IPv6 subnets without " "providing a CIDR or subnetpool ID will be given a CIDR via the Prefix " "Delegation mechanism. Note that enabling PD will override the behavior of " "the default IPv6 subnetpool." msgstr "" "Aktiviert die Delegierung von IPv6-Präfixen für die automatische Subnetz-" "CIDR-Zuordnung. Setzen Sie den Wert auf 'True', um die Delegierung von IPv6-" "Präfixen für die Subnetzzuordnung in einer für die Präfixdelegierung " "geeigneten Umgebung zu aktivieren. Benutzer, die " "Subnetzerstellunganforderungen für IPv6-Subnetze ohne Angabe einer CIDR oder " "Subnetzpool-ID stellen, erhalten eine CIDR über den " "Präfixdelegierungsmechanismus. Beachten Sie, dass die Aktivierung der " "Präfixdelegierung das Verhalten für den IPv6-Standardsubnetzpool außer Kraft " "setzt." msgid "" "Enables the dnsmasq service to provide name resolution for instances via DNS " "resolvers on the host running the DHCP agent. Effectively removes the '--no-" "resolv' option from the dnsmasq process arguments. Adding custom DNS " "resolvers to the 'dnsmasq_dns_servers' option disables this feature." msgstr "" "Aktiviert den dnsmasq-Dienst zur Bereitstellung von Namensauflösungen für " "Instanzen mithilfe DNS-Resolvern auf dem Host, auf dem der DHCP-Agent " "ausgeführt wird. Entfernt die Option '--no-resolv' aus den dnsmasq-" "Verarbeitungsargumenten. Dieses Feature wird deaktiviert, wenn angepasste " "DNS-Resolver zur Option 'dnsmasq_dns_servers' hinzugefügt werden." msgid "Encountered an empty component." msgstr "Leere Komponente gefunden." msgid "End of VLAN range is less than start of VLAN range" msgstr "Ende des VLAN-Bereichs ist kleiner als Anfang des VLAN-Bereichs" msgid "End of tunnel range is less than start of tunnel range" msgstr "Ende des Tunnelbereichs ist kleiner als Anfang des Tunnelbereichs" msgid "Enforce using split branches file structure." msgstr "Verwendung einer Dateistruktur mit getrennten Zweigen erzwingen." msgid "" "Ensure that configured gateway is on subnet. For IPv6, validate only if " "gateway is not a link local address. Deprecated, to be removed during the " "Newton release, at which point the gateway will not be forced on to subnet." msgstr "" "Stellen Sie sicher, dass sich das konfigurierte Gateway in einem Subnetz " "befindet. Bei IPv6 muss nur dann geprüft werden, wenn das Gateway keine " "Link-Lokal-Adresse ist. Veraltet, soll beim Newton-Release entfernt werden. " "Zu dem Zeitpunkt muss sich das Gateway nicht mehr zwingend im Subnetz " "befinden." #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "Fehler %(reason)s beim Ausführen der Operation." #, python-format msgid "Error importing FWaaS device driver: %s" msgstr "Fehler beim Importieren von FWaas-Treiber: %s" #, python-format msgid "Error parsing dns address %s" msgstr "Fehler bei Auswertung der DNS-Adresse %s" #, python-format msgid "Error while reading %s" msgstr "Fehler beim Lesen von %s" #, python-format msgid "" "Exceeded %s second limit waiting for address to leave the tentative state." msgstr "" "Der Grenzwert von %s Sekunde(n) wurde überschritten, als darauf gewartet " "wurde, dass sich der vorläufige Status der Adresse ändert." msgid "Exceeded maximum amount of fixed ips per port." msgstr "Maximale Anzahl an festen IP-Adressen pro Port überschritten." msgid "Existing prefixes must be a subset of the new prefixes" msgstr "Vorhandene Präfixe müssen eine Untergruppe der neuen Präfixe sein" #, python-format msgid "" "Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" msgstr "" "Exit-Code: %(returncode)d; Standardeingabe: %(stdin)s; Standardausgabe: " "%(stdout)s; Standardfehler: %(stderr)s" #, python-format msgid "Extension %(driver)s failed." msgstr "Erweiterungs-%(driver)s fehlgeschlagen." #, python-format msgid "" "Extension driver %(driver)s required for service plugin %(service_plugin)s " "not found." msgstr "" "Der Erweiterungstreiber %(driver)s, der für das Dienste-Plugin " "%(service_plugin)s erforderlich ist, wurde nicht gefunden." msgid "" "Extension to use alongside ml2 plugin's l2population mechanism driver. It " "enables the plugin to populate VXLAN forwarding table." msgstr "" "Erweiterung zur Verwendung mit dem l2population-Mechanismus-Treiber des ml2-" "Plug-ins. Sie ermöglicht dem Plug-in das Belegen der VXLAN-" "Weiterleitungstabelle." #, python-format msgid "Extension with alias %s does not exist" msgstr "Erweiterung mit Alias %s ist nicht vorhanden" msgid "Extensions list to use" msgstr "Zur verwendende Liste der Erweiterungen" #, python-format msgid "Extensions not found: %(extensions)s." msgstr "Erweiterungen nicht gefunden: %(extensions)s." #, python-format msgid "External DNS driver %(driver)s could not be found." msgstr "Der externe DNS-Treiber %(driver)s konnte nicht gefunden werden." #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "Externe IP %s entspricht der Gateway-IP" #, python-format msgid "" "External network %(external_network_id)s is not reachable from subnet " "%(subnet_id)s. Therefore, cannot associate Port %(port_id)s with a Floating " "IP." msgstr "" "Externes Netz %(external_network_id)s ist von Teilnetz %(subnet_id)s aus " "nicht erreichbar. Daher kann Port %(port_id)s keiner dynamischen IP-Adresse " "zugeordnet werden." #, python-format msgid "" "External network %(net_id)s cannot be updated to be made non-external, since " "it has existing gateway ports" msgstr "" "Externes Netz %(net_id)s kann nicht so aktualisiert werden, dass es nicht " "mehr extern ist, da es über Gateway-Ports verfügt" #, python-format msgid "ExtraDhcpOpt %(id)s could not be found" msgstr "ExtraDhcpOpt %(id)s konnte nicht gefunden werden" msgid "" "FWaaS plugin is configured in the server side, but FWaaS is disabled in L3-" "agent." msgstr "" "FWaaS-Plug-in ist auf der Serverseite konfiguriert, aber FWaaS ist für L3-" "Agent inaktiviert." #, python-format msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." msgstr "" "Fehler bei Neuterminierung von Router %(router_id)s: kein auswählbarer L3-" "Agent gefunden." #, python-format msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." msgstr "" "Zuordnung des Routers %(router_id)s zum L3-Agenten %(agent_id)s ist " "fehlgeschlagen." #, python-format msgid "" "Failed to allocate a VRID in the network %(network_id)s for the router " "%(router_id)s after %(max_tries)s tries." msgstr "" "Das Zuordnen der ID eines virtuellen Routers im Netz %(network_id)s für den " "Router %(router_id)s ist nach %(max_tries)s Versuchen fehlgeschlagen." #, python-format msgid "Failed to allocate subnet: %(reason)s." msgstr "Fehler beim Zuordnen von Subnetz: %(reason)s." msgid "" "Failed to associate address scope: subnetpools within an address scope must " "have unique prefixes." msgstr "" "Der Adressbereich konnte nicht zugeordnet werden. Subnetzpools innerhalb " "eines Adressbereichs müssen eindeutige Präfixe haben." #, python-format msgid "Failed to check policy %(policy)s because %(reason)s." msgstr "" "Die Richtlinie %(policy)s konnte nicht überprüft werden. Ursache: %(reason)s" #, python-format msgid "" "Failed to create a duplicate %(object_type)s: for attribute(s) " "%(attributes)s with value(s) %(values)s" msgstr "" "Fehler beim Erstellen eines Duplikats von %(object_type)s: für Attribut(e) " "%(attributes)s mit Wert(en) %(values)s" #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips included " "invalid subnet %(subnet_id)s" msgstr "" "Port auf Netz %(network_id)s wurde nicht erstellt, da 'fixed_ips' ungültiges " "Teilnetz %(subnet_id)s enthielt" #, python-format msgid "Failed to init policy %(policy)s because %(reason)s." msgstr "" "Die Richtlinie %(policy)s konnte nicht initialisiert werden. Ursache: " "%(reason)s" #, python-format msgid "Failed to locate source for %s." msgstr "Quelle für %s nicht gefunden." #, python-format msgid "Failed to parse request. Parameter '%s' not specified" msgstr "Anforderung wurde nicht analysiert. Parameter '%s' nicht angegeben" #, python-format msgid "Failed to parse request. Required attribute '%s' not specified" msgstr "" "Anforderung wurde nicht analysiert. Erforderliches Attribut %s nicht " "angegeben" msgid "Failed to remove supplemental groups" msgstr "Fehler beim Entfernen zusätzlicher Gruppen" #, python-format msgid "Failed to set gid %s" msgstr "Fehler beim Festlegen von GID %s" #, python-format msgid "Failed to set uid %s" msgstr "Fehler beim Festlegen von Benutzer-ID %s" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "Fehler bei der Konfiguration eines %(type)s-Tunnel-Ports auf %(ip)s" msgid "Failure applying iptables rules" msgstr "Fehler beim Anwenden von iptables-Regeln." #, python-format msgid "Failure waiting for address %(address)s to become ready: %(reason)s" msgstr "" "Fehler beim Warten darauf, dass Adresse %(address)s bereit ist: %(reason)s" msgid "Flat provider networks are disabled" msgstr "Einfache Anbieternetzwerke sind deaktiviert." #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "Die Variante %(flavor_id)s konnte nicht gefunden werden." #, python-format msgid "Flavor %(flavor_id)s is used by some service instance." msgstr "" "Die Variante %(flavor_id)s wird von einigen Diensteinstanzen verwendet. " msgid "Flavor is not enabled." msgstr "Die Variante ist nicht aktiviert." #, python-format msgid "Floating IP %(floatingip_id)s could not be found" msgstr "Dynamische IP-Adresse %(floatingip_id)s konnte nicht gefunden werden" #, python-format msgid "" "Floating IP %(floatingip_id)s is associated with non-IPv4 address " "%s(internal_ip)s and therefore cannot be bound." msgstr "" "Dynamische IP-Adresse %(floatingip_id)s wurde der Nicht-IPv4-Adresse " "%s(internal_ip)s zugeordnet und kann daher nicht gebunden werden." msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "Für TCP/UDP-Protokolle muss 'port_range_min' '<= port_range_max' sein" #, python-format msgid "For class %(object_type)s missing primary keys: %(missing_keys)s" msgstr "" "Für die Klasse %(object_type)s fehlen primäre Schlüssel: %(missing_keys)s" msgid "Force ip_lib calls to use the root helper" msgstr "ip_lib-Aufrufe erzwingen, um Roothilfeprogramm zu verwenden" #, python-format msgid "Found duplicate extension: %(alias)s." msgstr "Doppelte Erweiterung gefunden: %(alias)s." #, python-format msgid "" "Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet " "%(subnet_cidr)s." msgstr "" "Überschneidung bei Zuordnungspools %(pool_1)s %(pool_2)s für Teilnetz " "%(subnet_cidr)s gefunden." msgid "Gateway IP version inconsistent with allocation pool version" msgstr "" "Die Version der Gateway-IP stimmt nicht mit der Version des Zuordnungspools " "überein." #, python-format msgid "" "Gateway cannot be updated for router %(router_id)s, since a gateway to " "external network %(net_id)s is required by one or more floating IPs." msgstr "" "Gateway kann nicht für Router %(router_id)s aktualisiert werden, da ein " "Gateway zum externen Netz %(net_id)s für eine oder mehrere dynamische IP-" "Adressen erforderlich ist. " #, python-format msgid "Gateway ip %(ip_address)s conflicts with allocation pool %(pool)s." msgstr "" "Gateway-IP '%(ip_address)s' steht im Konflikt mit Zuordnungspool %(pool)s." msgid "Gateway is not valid on subnet" msgstr "Gateway ist auf Teilnetz nicht gültig" msgid "" "Geneve encapsulation header size is dynamic, this value is used to calculate " "the maximum MTU for the driver. This is the sum of the sizes of the outer " "ETH + IP + UDP + GENEVE header sizes. The default size for this field is 50, " "which is the size of the Geneve header without any additional option headers." msgstr "" "Die Größe des Geneve-Kapselungsheaders ist dynamisch. Dieser Wert wird " "verwendet, um den maximalen MTU-Wert für den Treiber zu berechnen. Dies ist " "die Summe aus den Größen der äußeren Headergrößen für ETH + IP + UDP + " "GENEVE. Die Standardgröße für dieses Feld ist 50 und entspricht der Größe " "des Geneve-Headers ohne zusätzliche Optionsheader." msgid "Group (gid or name) running metadata proxy after its initialization" msgstr "" "Gruppe (Gruppen-ID oder Name), die Metadaten-Proxy nach der Initialisierung " "ausführt" msgid "" "Group (gid or name) running metadata proxy after its initialization (if " "empty: agent effective group)." msgstr "" "Gruppe (Gruppen-ID oder Name), die Metadaten-Proxy nach der Initialisierung " "ausführt (falls leer: Agent-ausführende Gruppe)." msgid "Group (gid or name) running this process after its initialization" msgstr "" "Gruppe (Gruppen-ID oder Name), die diesen Prozess nach der Initialisierung " "ausführt" #, python-format msgid "HEAD file does not match migration timeline head, expected: %s" msgstr "" "HEAD-Datei stimmt nicht mit Migrationszeitplan für HEAD überein. Erwartet: %s" msgid "" "Hostname to be used by the Neutron server, agents and services running on " "this machine. All the agents and services running on this machine must use " "the same host value." msgstr "" "Hostname, der vom Neutron-Server, von Agenten und Services auf dieser " "Maschine verwendet werden soll. Alle auf dieser Maschine ausgeführten " "Agenten und Services müssen denselben Hostwert verwenden." msgid "How many times Neutron will retry MAC generation" msgstr "Wie oft Neutron die MAC-Adressenerstellung erneut versuchen wird" #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" "min) is missing." msgstr "" "ICMP-Code (port-range-max) %(value)s ist angegeben, aber ICMP-Typ (port-" "range-min) fehlt." msgid "ID of network" msgstr "Netz-ID" msgid "ID of network to probe" msgstr "ID von Netz das überprüft werden soll" msgid "ID of probe port to delete" msgstr "ID von Überprüfungsport der gelöscht werden soll" msgid "ID of probe port to execute command" msgstr "ID von Überprüfungsport zum Ausführen des Befehls" msgid "ID of the router" msgstr "ID des Routers" #, python-format msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s" msgstr "IP-Adresse %(ip)s bereits in Teilnetz %(subnet_id)s zugeordnet" #, python-format msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s" msgstr "IP-Adresse %(ip)s gehört nicht zu Teilnetz %(subnet_id)s" #, python-format msgid "" "IP address %(ip_address)s is not a valid IP for any of the subnets on the " "specified network." msgstr "" "IP-Adresse %(ip_address)s ist keine gültige IP für die Teilnetze im " "angegebenen Netz." msgid "IP address used by Nova metadata server." msgstr "Von Nova-Metadatenserver verwendete IP-Adresse." msgid "IP allocation failed. Try again later." msgstr "IP-Zuordnung fehlgeschlagen. Versuchen Sie es später noch einmal." msgid "IP allocation requires subnet_id or ip_address" msgstr "'subnet_id' oder 'ip_address' für IP-Zuordnung erforderlich" #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "IPTablesManager.apply hat den folgenden Satz an iptables-Regeln nicht " "angewendet:\n" "%s" msgid "IPtables conntrack zones exhausted, iptables rules cannot be applied." msgstr "" "IPtables-conntrack-Zonen erschöpft. iptables-Regeln können nicht angewendet " "werden." msgid "IPv6 Address Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "Für Präfixdelegierung muss der IPv6-Adressmodus SLAAC oder Stateless sein." msgid "IPv6 RA Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "Für Präfixdelegierung muss der IPv6-RA-Modus SLAAC oder Stateless sein." #, python-format msgid "" "IPv6 address %(address)s can not be directly assigned to a port on subnet " "%(id)s since the subnet is configured for automatic addresses" msgstr "" "IPv6-Adresse %(address)s kann einem Port im Teilnetz %(id)s nicht direkt " "zugeordnet werden, da das Teilnetz für automatische Adressen konfiguriert " "wurde" #, python-format msgid "" "IPv6 address %(ip)s cannot be directly assigned to a port on subnet " "%(subnet_id)s as the subnet is configured for automatic addresses" msgstr "" "IPv6-Adresse %(ip)s kann nicht direkt einem Port im Teilnetz %(subnet_id)s " "zugeordnet werden, da das Teilnetz für automatische Adressen konfiguriert " "wurde" #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot be " "added to Neutron Router." msgstr "" "IPv6-Teilnetz %s, das für den Empfang von RAs von einem externen Router " "konfiguriert ist, kann nicht zum Neutron-Router hinzugefügt werden." msgid "" "If True, advertise network MTU values if core plugin calculates them. MTU is " "advertised to running instances via DHCP and RA MTU options." msgstr "" "Bei 'True' werden MTU-Netzwerte zugänglich gemacht, wenn das Kern-Plugin sie " "berechnet hat. MTU wird den aktiven Instanzen über DHCP und RA-MTU-Optionen " "zugänglich gemacht." msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "" "Bei 'True' sollen Plugins, die dies unterstützen, VLAN-transparente Netze " "erstellen dürfen." msgid "" "If non-empty, the l3 agent can only configure a router that has the matching " "router ID." msgstr "" "Wenn 'namespaces' inaktiviert ist, kann der Agent der Ebene 3 nur einen " "Router konfigurieren, der über die entsprechende Router-ID verfügt." msgid "Illegal IP version number" msgstr "Illegale IP-Versionsnummer" #, python-format msgid "" "Illegal prefix bounds: %(prefix_type)s=%(prefixlen)s, %(base_prefix_type)s=" "%(base_prefixlen)s." msgstr "" "Unzulässige Präfix-Bindungen: %(prefix_type)s=%(prefixlen)s, " "%(base_prefix_type)s=%(base_prefixlen)s." #, python-format msgid "" "Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot " "associate with address scope %(address_scope_id)s because subnetpool " "ip_version is not %(ip_version)s." msgstr "" "Unzulässige Subnetzpoolzuordnung: Subnetzpool %(subnetpool_id)s kann dem " "Addressbereich %(address_scope_id)s nicht zugeordnet werden, da 'ip_version' " "für den Subnetzpool nicht %(ip_version)s ist." #, python-format msgid "" "Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot be " "associated with address scope %(address_scope_id)s." msgstr "" "Unzulässige Subnetzpoolzuordnung: Subnetzpool %(subnetpool_id)s kann nicht " "dem Adressbereich %(address_scope_id)s zugeorndet werden." #, python-format msgid "Illegal subnetpool update : %(reason)s." msgstr "Unzulässige Aktualisierung von Subnetzen : %(reason)s." #, python-format msgid "Illegal update to prefixes: %(msg)s." msgstr "Unzulässige Aktualisierung von Präfixen: %(msg)s." msgid "" "In some cases the Neutron router is not present to provide the metadata IP " "but the DHCP server can be used to provide this info. Setting this value " "will force the DHCP server to append specific host routes to the DHCP " "request. If this option is set, then the metadata service will be activated " "for all the networks." msgstr "" "In einigen Fällen ist kein Neutron-Router vorhanden, um die Metadaten-IP " "bereitzustellen. Der DHCP-Server kann jedoch für die Bereitstellung dieser " "Informationen verwendet werden. Setzen dieses Werts bewirkt, dass der DHCP-" "Server bestimmte Hostrouten an die DHCP-Anforderung anhängt. Bei Aktivierung " "dieser Option wird der Metadatendienst für alle Netze aktiviert. " #, python-format msgid "Incorrect pci_vendor_info: \"%s\", should be pair vendor_id:product_id" msgstr "" "Falsche pci_vendor_info: \"%s\", muss als Paar vendor_id:product_id " "angegeben werden" msgid "" "Indicates that this L3 agent should also handle routers that do not have an " "external network gateway configured. This option should be True only for a " "single agent in a Neutron deployment, and may be False for all agents if all " "routers must have an external network gateway." msgstr "" "Gibt an, dass dieser L3-Agent auch Router ohne ein konfiguriertes externes " "Netzgateway verarbeiten soll. Diese Option sollte nur für einen einzelnen " "Agenten in einer Neutron-Implementierung auf 'True' gesetzt werden und kann " "für alle Agenten auf 'False' gesetzt werden, wenn alle Router ein externes " "Netzgateway erfordern." #, python-format msgid "Instance of class %(module)s.%(class)s must contain _cache attribute" msgstr "" "Die Instanz der Klasse %(module)s.%(class)s muss das Attribut ' _cache' " "enthalten." #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" msgstr "" "Unzureichender Präfixspeicherplatz für die Zuordnung von Teilnetzgröße /%s" msgid "Insufficient rights for removing default security group." msgstr "" "Berechtigungen sind für das Entfernen der Standardsicherheitsgruppe nicht " "ausreichend." msgid "" "Integration bridge to use. Do not change this parameter unless you have a " "good reason to. This is the name of the OVS integration bridge. There is one " "per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM " "VIFs are attached to this bridge and then 'patched' according to their " "network connectivity." msgstr "" "Zu verwendende Integrationsbrücke. Ändern Sie diesen Parameter nur, wenn Sie " "gute Gründe dafür haben. Dies ist der Name der OVS-Integrationsbrücke. Es " "gibt eine pro Hypervisor. Die Integrationsbrücke agiert als virtuelle Patch-" "Bay. Alle VM-VIFs werden an diese Brücke angehängt und anschließend " "entsprechend ihrer Netzkonnektivität gepatched." msgid "Interface to monitor" msgstr "Zu überwachende Schnittstelle" msgid "" "Interval between checks of child process liveness (seconds), use 0 to disable" msgstr "" "Intervall zwischen Überprüfungen der Aktivität von untergeordneten Prozessen " "(Sekunden), verwenden Sie zum Deaktivieren '0'" msgid "Interval between two metering measures" msgstr "Intervall zwischen zwei Messungsmaßnahmen" msgid "Interval between two metering reports" msgstr "Intervall zwischen zwei Messungsberichten" #, python-format msgid "Invalid CIDR %(input)s given as IP prefix." msgstr "Ungültiges CIDR %(input)s als IP-Präfix angegeben." #, python-format msgid "" "Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address " "format, which requires the prefix to be /64." msgstr "" "Ungültiges CIDR %s für IPv6-Adressenmodus. OpenStack verwendet das EUI-64-" "Adressformat, für das das Präfix /64 lauten muss." #, python-format msgid "Invalid Device %(dev_name)s: %(reason)s" msgstr "Ungültige Einheit %(dev_name)s: %(reason)s" #, python-format msgid "" "Invalid action '%(action)s' for object type '%(object_type)s'. Valid " "actions: %(valid_actions)s" msgstr "" "Ungültige Aktion '%(action)s' für Objekttyp '%(object_type)s'. Gültige " "Aktionen: %(valid_actions)s" #, python-format msgid "" "Invalid authentication type: %(auth_type)s, valid types are: " "%(valid_auth_types)s" msgstr "" "Ungültiger Authentifizierungstyp: %(auth_type)s, gültige Typen sind: " "%(valid_auth_types)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "Ungültiger Inhaltstyp %(content_type)s." #, python-format msgid "Invalid data format for IP pool: '%s'" msgstr "Ungültiges Datenformat für IP-Pool: '%s'" #, python-format msgid "Invalid data format for extra-dhcp-opt: %(data)s" msgstr "Ungültiges Datenformat für extra-dhcp-opt: %(data)s" #, python-format msgid "Invalid data format for fixed IP: '%s'" msgstr "Ungültiges Datenformat für statische IP: '%s'" #, python-format msgid "Invalid data format for hostroute: '%s'" msgstr "Ungültiges Datenformat für Hostroute: '%s'" #, python-format msgid "Invalid data format for nameserver: '%s'" msgstr "Ungültiges Datenformat für Namensserver: '%s'" #, python-format msgid "Invalid ethertype %(ethertype)s for protocol %(protocol)s." msgstr "Ungültiger Ethernet-Typ %(ethertype)s für Protokoll %(protocol)s." #, python-format msgid "Invalid extension environment: %(reason)s." msgstr "Ungültige Erweiterungsumgebung: %(reason)s." #, python-format msgid "Invalid format for routes: %(routes)s, %(reason)s" msgstr "Ungültiges Format für Routen: %(routes)s, %(reason)s" #, python-format msgid "Invalid format: %s" msgstr "Ungültiges Format: %s" #, python-format msgid "Invalid input for %(attr)s. Reason: %(reason)s." msgstr "Ungültige Eingabe für %(attr)s. Grund: %(reason)s." #, python-format msgid "" "Invalid input. '%(target_dict)s' must be a dictionary with keys: " "%(expected_keys)s" msgstr "" "Ungültige Eingabe. '%(target_dict)s' muss ein Verzeichnis mit Schlüsseln " "sein: %(expected_keys)s" #, python-format msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s" msgstr "" "Ungültiger Instanzstatus: %(state)s, gültige Status sind: %(valid_states)s" #, python-format msgid "Invalid mapping: '%s'" msgstr "Ungültige Zuordnung: '%s'" #, python-format msgid "Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'." msgstr "Ungültiger Bereich für Netz-VLAN: '%(vlan_range)s' - '%(error)s'." #, python-format msgid "Invalid network VXLAN port range: '%(vxlan_range)s'." msgstr "Ungültiger Netz-VXLAN-Portbereich: '%(vxlan_range)s'." #, python-format msgid "Invalid pci slot %(pci_slot)s" msgstr "Ungültiger PCI-Steckplatz %(pci_slot)s" #, python-format msgid "Invalid provider format. Last part should be 'default' or empty: %s" msgstr "" "Ungültiges Anbieterformat. Letzter Teil sollte 'default' oder leer sein: %s" #, python-format msgid "Invalid resource type %(resource_type)s" msgstr "Ungültiger Ressourcentyp %(resource_type)s" #, python-format msgid "Invalid route: %s" msgstr "Ungültige Route: %s" msgid "Invalid service provider format" msgstr "Ungültiges Diensteanbieterformat" #, python-format msgid "Invalid service type %(service_type)s." msgstr "Ungültiger Dienstetyp %(service_type)s." #, python-format msgid "" "Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255." msgstr "" "Ungültiger Wert für ICMP %(field)s (%(attr)s) %(value)s. Er muss zwischen 0 " "und 255 liegen." #, python-format msgid "Invalid value for port %(port)s" msgstr "Ungültiger Wert für Port %(port)s" msgid "" "Iptables mangle mark used to mark ingress from external network. This mark " "will be masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Iptables-Mangling-Markierung zum Markieren des Eingangs vom externen Netz. " "Diese Markierung wird mit 0xffff maskiert, sodass nur die unteren 16 Bits " "verwendet werden." msgid "" "Iptables mangle mark used to mark metadata valid requests. This mark will be " "masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Iptables-Mangling-Markierung zum Markieren von Metadaten gültiger " "Anforderungen. Diese Markierung wird mit 0xffff maskiert, sodass nur die " "unteren 16 Bits verwendet werden." msgid "" "Keep in track in the database of current resourcequota usage. Plugins which " "do not leverage the neutron database should set this flag to False" msgstr "" "Überwachen der aktuellen Kontingentnutzung in der Datenbank. Bei Plugins, " "die die Neutron-Datenbank nicht verwenden, sollte dieses Flag auf False " "festgelegt werden" msgid "Keepalived didn't respawn" msgstr "Keepalived wurde nicht generiert" msgid "Keepalived didn't spawn" msgstr "Keepalived hat keinen Prozess erstellt." #, python-format msgid "" "Kernel HZ value %(value)s is not valid. This value must be greater than 0." msgstr "" "Der Kernel-HZ-Wert %(value)s ist nicht gültig. Dieser Wert muss größer als 0 " "sein." #, python-format msgid "Key %(key)s in mapping: '%(mapping)s' not unique" msgstr "Schlüssel %(key)s in Zuordnung: '%(mapping)s' nicht eindeutig" msgid "L3 agent failure to setup NAT for floating IPs" msgstr "L3-Agentenfehler bei der NAT-Konfiguration für Floating IPs." msgid "L3 agent failure to setup floating IPs" msgstr "L3-Agentenfehler bei der Konfiguration von Floating IPs." #, python-format msgid "Limit must be an integer 0 or greater and not '%d'" msgstr "Der Grenzwert muss eine Ganzzahl größer 0 sein und nicht '%d'" msgid "Limit number of leases to prevent a denial-of-service." msgstr "Anzahl von Leases begrenzen, um eine Dienstverweigerung zu verhindern." msgid "List of :" msgstr "Liste mit den Elementen :" msgid "" "List of :: or " "specifying physical_network names usable for VLAN provider and tenant " "networks, as well as ranges of VLAN tags on each available for allocation to " "tenant networks." msgstr "" "Liste mit :: oder , " "die physical_network-Namen angeben, die für VLAN-Provider- und Nutzer-Netze " "verwendet werden können, wie auch als Bereiche von VLAN-Tags für jedes " "verfügbare Netz für die Zuordnung zu Nutzernetzen." msgid "" "List of network type driver entrypoints to be loaded from the neutron.ml2." "type_drivers namespace." msgstr "" "Liste der Netztypentreibereingangspunkte, die aus dem Namensbereich neutron." "ml2.type_drivers geladen werden." msgid "" "List of physical_network names with which flat networks can be created. Use " "default '*' to allow flat networks with arbitrary physical_network names. " "Use an empty list to disable flat networks." msgstr "" "Liste von physical_network-Namen, mit denen einfache Netze erstellt werden " "können. Verwenden Sie den Standardwert '*', um einfache Netze mit beliebigen " "physical_network-Namen zuzulassen. Verwenden Sie eine leere Liste, um " "einfache Netze zu inaktivieren." msgid "Local IP address of the VXLAN endpoints." msgstr "Lokale IP-Adresse von VXLAN-Endpunkten." msgid "Location for Metadata Proxy UNIX domain socket." msgstr "Position für UNIX-Domänensocket von Metadaten-Proxy." msgid "Location of Metadata Proxy UNIX domain socket" msgstr "Position von UNIX-Domänensocket von Metadatenproxy" msgid "Location of pid file of this process." msgstr "Position der PID-Datei für diesen Prozess." msgid "Location to store DHCP server config files." msgstr "Position zum Speichern von Konfigurationsdateien des DHCP-Servers." msgid "Location to store IPv6 PD files." msgstr "Position zum Speichern von IPv6-PD-Dateien." msgid "Location to store IPv6 RA config files" msgstr "Position zum Speichern von IPv6-RA-Konfigurationsdateien" msgid "Location to store child pid files" msgstr "Position zum Speichern von untergeordneten PID-Dateien" msgid "Location to store keepalived/conntrackd config files" msgstr "Position zum Speichern von keepalived/conntrackd-Konfigurationsdateien" msgid "Log agent heartbeats" msgstr "Überwachungssignale von Agenten protokollieren" msgid "Loopback IP subnet is not supported if enable_dhcp is True." msgstr "" "Loopback-IP-Subnetz wird nicht unterstützt, wenn 'enable_dhcp' auf 'True' " "gesetzt ist." msgid "MTU size of veth interfaces" msgstr "MTU-Größe von Veth-Schnittstellen" msgid "Make the l2 agent run in DVR mode." msgstr "L2-Agent im DVR-Modus ausführen." msgid "Malformed request body" msgstr "Fehlerhafter Anforderungshauptteil" #, python-format msgid "Malformed request body: %(reason)s." msgstr "Fehlerhafter Anforderungshauptteil: %(reason)s." msgid "MaxRtrAdvInterval setting for radvd.conf" msgstr "MaxRtrAdvInterval-Einstellung für radvd.conf" msgid "Maximum number of DNS nameservers per subnet" msgstr "Maximale Anzahl an DNS-Namensservern pro Subnetz" msgid "" "Maximum number of L3 agents which a HA router will be scheduled on. If it is " "set to 0 then the router will be scheduled on every agent." msgstr "" "Maximale Anzahl an L3-Agenten, für die ein HA-Router geplant wird. Bei " "Angabe von 0 wird der Router für jeden Agenten geplant." msgid "Maximum number of allowed address pairs" msgstr "Maximale Anzahl an zulässigen Adresspaaren" msgid "" "Maximum number of fixed ips per port. This option is deprecated and will be " "removed in the N release." msgstr "" "Maximale Anzahl an festen IPs pro Port. Diese Option ist veraltet und wird " "im N-Release entfernt." msgid "Maximum number of host routes per subnet" msgstr "Maximale Anzahl an Hostroutes pro Subnetz" msgid "Maximum number of routes per router" msgstr "Maximale Anzahl an Routen pro Router" msgid "" "Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce " "mode from metadata_proxy_user/group values, 'user': set metadata proxy " "socket mode to 0o644, to use when metadata_proxy_user is agent effective " "user or root, 'group': set metadata proxy socket mode to 0o664, to use when " "metadata_proxy_group is agent effective group or root, 'all': set metadata " "proxy socket mode to 0o666, to use otherwise." msgstr "" "Modus von UNIX-Domänensocket für Metadaten-Proxy, 4 Werte zulässig: " "'deduce': Modus aus Werten von metadata_proxy_user/group ableiten, 'user': " "Modus von Metadaten-Proxy-Socket auf 0o644 festlegen, zur Verwendung, wenn " "metadata_proxy_user Agent-ausführender Benutzer oder Root ist, 'group': " "Modus von Metadaten-Proxy-Socket auf 0o664 festlegen, zur Verwendung, wenn " "metadata_proxy_group Agent-ausführende Gruppe oder Root ist, 'all': Modus " "von Metadaten-Proxy-Socket auf 0o666 festlegen, zur anderweitigen Verwendung." msgid "Metering driver" msgstr "Messungstreiber" #, python-format msgid "Metering label %(label_id)s does not exist" msgstr "Messungsbezeichnung %(label_id)s ist nicht vorhanden" #, python-format msgid "Metering label rule %(rule_id)s does not exist" msgstr "Messungsbezeichnungsregel %(rule_id)s ist nicht vorhanden" #, python-format msgid "" "Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps " "another" msgstr "" "Messungsbezeichnungsregel mit remote_ip_prefix %(remote_ip_prefix)s weist " "eine Überschneidung mit einer anderen auf" msgid "Method cannot be called within a transaction." msgstr "Die Methode kann nicht aus einer Transaktion heraus aufgerufen werden." msgid "Migration from distributed router to centralized is not supported" msgstr "" "Die Migration von verteiltem Router zu zentralisiertem Router wird nicht " "unterstützt. " msgid "MinRtrAdvInterval setting for radvd.conf" msgstr "MinRtrAdvInterval-Einstellung für radvd.conf" msgid "Minimize polling by monitoring ovsdb for interface changes." msgstr "" "Abfrage minimieren durch Überwachung von ovsdb auf Schnittstellenänderungen." #, python-format msgid "Missing key in mapping: '%s'" msgstr "Fehlender Schlüssel in Zuordnung: '%s'" #, python-format msgid "Missing value in mapping: '%s'" msgstr "Fehlender Wert in Zuordnung: '%s'" msgid "Multicast IP subnet is not supported if enable_dhcp is True." msgstr "" "Multicast-IP-Subnetz wird nicht unterstützt, wenn 'enable_dhcp' auf 'True' " "gesetzt ist." msgid "" "Multicast group for VXLAN. When configured, will enable sending all " "broadcast traffic to this multicast group. When left unconfigured, will " "disable multicast VXLAN mode." msgstr "" "Multicastgruppe für VXLAN. Wenn sie konfiguriert ist, kann der gesamte " "Broadcastverkehr an diese Multicastgruppe gesendet werden. Ohne " "Konfiguration ist der Multicast-VXLAN-Modus inaktiviert." msgid "" "Multicast group(s) for vxlan interface. A range of group addresses may be " "specified by using CIDR notation. Specifying a range allows different VNIs " "to use different group addresses, reducing or eliminating spurious broadcast " "traffic to the tunnel endpoints. To reserve a unique group for each possible " "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on " "all the agents." msgstr "" "Multicastgruppe(n) für VXLAN-Schnittstelle. Ein Gruppenadressbereich, der " "mit der CIDR-Notation angegeben werden kann. Durch die Angabe eines Bereichs " "können unterschiedliche VNIs verschiedene Gruppenadressen verwenden und so " "fehlerhaften Broadcastverkehr an Tunnelendpunkt senkden oder entfernen. Wenn " "Sie eine eindeutige Gruppe für jede mögliche VNI (24 Bit) reservieren " "möchten, verwenden Sie die Einstellung /8, wie z. B. 239.0.0.0/8. Diese " "Einstellung muss für alle Agenten gleich sein." #, python-format msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found" msgstr "" "Mehrere Agenten mit 'agent_type=%(agent_type)s' und 'host=%(host)s' wurden " "gefunden" #, python-format msgid "Multiple default providers for service %s" msgstr "Mehrere Standardanbieter für Dienst %s" #, python-format msgid "Multiple plugins for service %s were configured" msgstr "Mehrere Plugins für Dienst %s wurden konfiguriert" #, python-format msgid "Multiple providers specified for service %s" msgstr "Mehrere Anbieter angegeben für Dienst %s" msgid "Multiple tenant_ids in bulk security group rule create not allowed" msgstr "" "Mehrere 'tenant_ids' bei Erstellung von Sicherheitsgruppenregel für " "Massenerstellung nicht zulässig" msgid "Must also specify protocol if port range is given." msgstr "" "Bei angegebenem Portbereich muss ebenfalls ein Protokoll angegeben werden." msgid "Must specify one or more actions on flow addition or modification" msgstr "" "Angabe von einer oder mehreren Aktionen für Ablaufhinzufügung oder Änderung " "erforderlich" #, python-format msgid "Name %(dns_name)s is duplicated in the external DNS service" msgstr "Der Name %(dns_name)s ist im externen DNS-Dienst doppelt vorhanden." #, python-format msgid "" "Name '%s' must be 1-63 characters long, each of which can only be " "alphanumeric or a hyphen." msgstr "" "Der Name '%s' muss eine Länge von 1 - 63 Zeichen haben, die nur " "alphanumerisch oder ein Bindestrich sein dürfen." #, python-format msgid "Name '%s' must not start or end with a hyphen." msgstr "Der Name '%s' darf nicht mit einem Bindestrich beginnen oder enden." msgid "Name of Open vSwitch bridge to use" msgstr "Name der zu verwendenden Open vSwitch-Brücke" msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "Name der zu verwendenden Nova-Region. Nützlich, wenn Keystone mehrere " "Regionen verwaltet. " msgid "Name of the FWaaS Driver" msgstr "Name des FWaaS-Treibers" msgid "Namespace of the router" msgstr "Namensbereich des Routers" msgid "Native pagination depend on native sorting" msgstr "Die native Paginierung ist von der nativen Sortierung abhängig" #, python-format msgid "" "Need to apply migrations from %(project)s contract branch. This will require " "all Neutron server instances to be shutdown before proceeding with the " "upgrade." msgstr "" "Es müssen Migrationen aus dem %(project)s-Contract-Branch angewendet werden. " "Hierfür müssen alle Neutron-Serverinstanzen heruntergefahren werden, bevor " "die Aktualisierung fortgesetzt werden kann. " msgid "Negative delta (downgrade) not supported" msgstr "Negatives Delta (Herabstufung) nicht unterstützt" msgid "Negative relative revision (downgrade) not supported" msgstr "Negative relative Revision (Herabstufung) nicht unterstützt" #, python-format msgid "" "Network %(network_id)s is already bound to BgpSpeaker %(bgp_speaker_id)s." msgstr "" "Das Netz %(network_id)s ist bereits an den BgpSpeaker %(bgp_speaker_id)s " "gebunden." #, python-format msgid "" "Network %(network_id)s is not associated with BGP speaker %(bgp_speaker_id)s." msgstr "" "Das Netz %(network_id)s ist dem BGP-Speaker %(bgp_speaker_id)s nicht " "zugeordnet." #, python-format msgid "Network %(network_id)s is not bound to a BgpSpeaker." msgstr "Das Netz %(network_id)s ist nicht an einen BgpSpeaker gebunden." #, python-format msgid "Network %(network_id)s is not bound to a IPv%(ip_version)s BgpSpeaker." msgstr "" "Das Netz %(network_id)s ist nicht an einen IPv%(ip_version)s-BgpSpeaker " "gebunden." #, python-format msgid "Network %s does not contain any IPv4 subnet" msgstr "Netz %s enthält kein IPv4-Teilnetz" #, python-format msgid "Network %s is not a valid external network" msgstr "Netz %s ist kein gültiges externes Netz" #, python-format msgid "Network %s is not an external network" msgstr "Netz %s ist kein externes Netz" #, python-format msgid "" "Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges " "%(excluded_ranges)s was not found." msgstr "" "Netz der Größe %(size)s, aus IP-Bereich %(parent_range)s ausschließlich der " "IP-Bereiche %(excluded_ranges)s wurde nicht gefunden." msgid "Network that will have instance metadata proxied." msgstr "Netz, das über Proxy mit den Instanzmetadaten verbunden sein wird." #, python-format msgid "Network type value '%s' not supported" msgstr "Netztypwert '%s' wird nicht unterstützt" msgid "Network type value needed by the ML2 plugin" msgstr "Netztypwert für ML2-Plug-in erforderlich" msgid "Network types supported by the agent (gre and/or vxlan)." msgstr "" "Netztypen, die vom Agenten unterstützt werden ('gre' und/oder 'vxlan')." msgid "" "Neutron IPAM (IP address management) driver to use. If ipam_driver is not " "set (default behavior), no IPAM driver is used. In order to use the " "reference implementation of Neutron IPAM driver, use 'internal'." msgstr "" "Zu verwendender Neutron-IPAM-Treiber (Verwaltung von IP-Adressen). Wenn für " "'ipam_driver' kein Wert gesetzt ist (Standardverhalten), wird kein IPAM-" "Treiber verwendet. Verwenden Sie 'internal', um die Referenzimplementierung " "des Neutron-IPAM-Treibers zu verwenden." msgid "Neutron Service Type Management" msgstr "Neutron-Dienstetypverwaltung" msgid "Neutron core_plugin not configured!" msgstr "Neutron-'core_plugin' nicht konfiguriert!" msgid "Neutron plugin provider module" msgstr "Provider-Modul für Neutron-Plugin" msgid "Neutron quota driver class" msgstr "Neutron-Quotentreiberklasse" msgid "New value for first_ip or last_ip has to be specified." msgstr "Der neue Wert für 'first_ip' oder 'last_ip' muss angegeben werden." msgid "No default router:external network" msgstr "Kein router:external-Standardnetz" #, python-format msgid "No default subnetpool found for IPv%s" msgstr "Kein Standardsubnetzpool für IPv%s gefunden." msgid "No default subnetpools defined" msgstr "Es wurden keine Subnetzpools definiert." #, python-format msgid "No eligible l3 agent associated with external network %s found" msgstr "Kein auswählbarer dem externen Netz %s zugeordneter L3-Agent gefunden" #, python-format msgid "No more IP addresses available for subnet %(subnet_id)s." msgstr "Keine weiteren IP-Adressen für Teilnetz %(subnet_id)s verfügbar." #, python-format msgid "" "No more Virtual Router Identifier (VRID) available when creating router " "%(router_id)s. The limit of number of HA Routers per tenant is 254." msgstr "" "Es war keine ID für virtuelle Router (VRID - Virtual Router Identifier) " "verfügbar beim Erstellen von Router %(router_id)s. Der Grenzwert für die " "Anzahl an Hochverfügbarkeitsroutern pro Nutzer ist 254." msgid "No offline migrations pending." msgstr "Keine Offline-Migrationen anstehend." #, python-format msgid "No providers specified for '%s' service, exiting" msgstr "Keine Anbieter angegeben für Dienste '%s', wird beendet" #, python-format msgid "No shared key in %s fields" msgstr "Kein gemeinsam genutzter Schlüssel in %s-Feldern" msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "" "Es ist nicht zulässig, einem Agenten im Modus 'dvr' manuell einen Router " "zuzuordnen." msgid "Not allowed to manually remove a router from an agent in 'dvr' mode." msgstr "" "Es ist nicht zulässig, einen Router aus einem Agenten im Modus 'dvr' manuell " "zu entfernen." #, python-format msgid "" "Not enough l3 agents available to ensure HA. Minimum required " "%(min_agents)s, available %(num_agents)s." msgstr "" "Es sind nicht genügend L3-Agenten zum Sicherstellen der hohen Verfügbarkeit " "verfügbar. Die erforderliche Mindestanzahl ist %(min_agents)s, verfügbar " "sind %(num_agents)s." msgid "" "Number of DHCP agents scheduled to host a tenant network. If this number is " "greater than 1, the scheduler automatically assigns multiple DHCP agents for " "a given tenant network, providing high availability for DHCP service." msgstr "" "Anzahl der DHCP-Agenten, die zum Hosten eines Mandatennetzwerkes geplant " "werden. Wenn diese Zahl größer als 1 ist, weist der Scheduler automatisch " "mehrere DHCP-Agenten für ein angegebenes Nutzernetz zu, wodurch " "Hochverfügbarkeit für den DHCP-Service erreicht wird." msgid "Number of RPC worker processes dedicated to state reports queue" msgstr "" "Anzahl der RPC-Worker-Prozesse, die der Statusberichtswarteschlange " "zugewiesen ist." msgid "Number of RPC worker processes for service" msgstr "Anzahl der RPC-Verarbeitungsprozesse für den Dienst" msgid "Number of backlog requests to configure the metadata server socket with" msgstr "" "Anzahl der Rückstandanforderungen, mit denen der Metadatenserver-Socket " "konfiguriert werden soll" msgid "Number of backlog requests to configure the socket with" msgstr "" "Anzahl der Rückstandanforderungen, mit denen der Socket konfiguriert werden " "soll" msgid "" "Number of bits in an ipv4 PTR zone that will be considered network prefix. " "It has to align to byte boundary. Minimum value is 8. Maximum value is 24. " "As a consequence, range of values is 8, 16 and 24" msgstr "" "Anzahl von Bits in einer ipv4-PTR-Zone, die als Netzpräfix betrachtet wird. " "Es muss an der Bytegrenze ausgerichtet werden. Der Mindestwert ist 8. Der " "maximal zulässige Wert ist 24. Daraus ergibt sich ein Wertebereich von 8, 16 " "und 24. " msgid "" "Number of bits in an ipv6 PTR zone that will be considered network prefix. " "It has to align to nyble boundary. Minimum value is 4. Maximum value is 124. " "As a consequence, range of values is 4, 8, 12, 16,..., 124" msgstr "" "Anzahl von Bits in einer ipv6-PTR-Zone, die als Netzpräfix betrachtet wird. " "Es muss an der nyble-Grenze ausgerichtet werden. Der Mindestwert ist 4. Der " "maximal zulässige Wert ist 124. Daraus ergibt sich ein Wertebereich von 4, " "8, 12, 16,..., 124." msgid "" "Number of floating IPs allowed per tenant. A negative value means unlimited." msgstr "" "Anzahl an zulässigen dynamischen IPs pro Nutzer. Ein negativer Wert bedeutet " "unbegrenzt." msgid "" "Number of networks allowed per tenant. A negative value means unlimited." msgstr "" "Anzahl an zulässigen Netzen pro Nutzer. Ein negativer Wert bedeutet " "unbegrenzt." msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "" "Anzahl an zulässigen Ports pro Nutzer. Ein negativer Wert bedeutet " "unbegrenzt." msgid "Number of routers allowed per tenant. A negative value means unlimited." msgstr "" "Anzahl an zulässigen Routern pro Nutzer. Ein negativer Wert bedeutet " "unbegrenzt." msgid "" "Number of seconds between sending events to nova if there are any events to " "send." msgstr "" "Anzahl der Sekunden zwischen dem Senden von Ereignissen an Nova, wenn " "Ereignisse zum Senden vorhanden sind. " msgid "Number of seconds to keep retrying to listen" msgstr "" "Anzahl der Sekunden, in denen wiederholt versucht wird, empfangsbereit zu " "sein" msgid "" "Number of security groups allowed per tenant. A negative value means " "unlimited." msgstr "" "Anzahl an zulässigen Sicherheitsgruppen pro Nutzer. Ein negativer Wert " "bedeutet unbegrenzt." msgid "" "Number of security rules allowed per tenant. A negative value means " "unlimited." msgstr "" "Anzahl an zulässigen Sicherheitsregeln pro Nutzer. Ein negativer Wert " "bedeutet unbegrenzt." msgid "" "Number of separate API worker processes for service. If not specified, the " "default is equal to the number of CPUs available for best performance." msgstr "" "Anzahl der separaten API-Worker-Prozesse für Dienst. Ohne Angabe wird als " "Standardwert die Anzahl der verfügbaren CPUs verwendet, damit die beste " "Leistung erzielt werden kann." msgid "" "Number of separate worker processes for metadata server (defaults to half of " "the number of CPUs)" msgstr "" "Anzahl der separaten Worker-Prozesse für Metadatenserver (wird standardmäßig " "auf die Hälfte der Anzahl der CPUs festgelegt)" msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "" "Anzahl an zulässigen Teilnetzen pro Nutzer. Ein negativer Wert bedeutet " "unbegrenzt." msgid "" "Number of threads to use during sync process. Should not exceed connection " "pool size configured on server." msgstr "" "Die Anzahl der während des Synchronisationsprozesses zu verwendenden " "Threads. Die Größe des auf dem Server konfigurierten Verbindungspools darf " "nicht überschritten werden." msgid "OK" msgstr "OK" msgid "" "OVS datapath to use. 'system' is the default value and corresponds to the " "kernel datapath. To enable the userspace datapath set this value to 'netdev'." msgstr "" "Zu verwendender OVS-Datenpfad. 'system' ist der Standardwert und entspricht " "dem Kernel-Datenpfad. Setzen Sie diesen Wert auf 'netdev', wenn Sie den " "Benutzerbereichsdatenpfad aktivieren möchten." msgid "OVS vhost-user socket directory." msgstr "OVS-vhost-user-Socketverzeichnis." #, python-format msgid "OVSDB Error: %s" msgstr "OVSDB-Fehler: %s" #, python-format msgid "Object action %(action)s failed because: %(reason)s." msgstr "Objektaktion %(action)s fehlgeschlagen, weil: %(reason)s." msgid "Only admin can view or configure quota" msgstr "Nur Admins können Kontingente anzeigen oder konfigurieren" msgid "Only admin is authorized to access quotas for another tenant" msgstr "" "Nur Administratoren sind dazu berechtigt, auf Kontingente für andere Nutzer " "zuzugreifen" msgid "Only admins can manipulate policies on networks they do not own." msgstr "" "Nur Administratoren können Richtlinien für Netze bearbeiten, deren Eigner " "sie nicht sind." msgid "Only admins can manipulate policies on objects they do not own" msgstr "" "Nur Administratoren können Richtlinien an Objekten bearbeiten, deren Eigner " "sie nicht sind." msgid "Only allowed to update rules for one security profile at a time" msgstr "" "Aktualisierung von Regeln nicht für mehrere Sicherheitsprofile gleichzeitig " "zulässig" msgid "Only remote_ip_prefix or remote_group_id may be provided." msgstr "Nur Angabe von 'remote_ip_prefix' oder 'remote_group_id' ist zulässig." msgid "OpenFlow interface to use." msgstr "Zu verwendende OpenFlow-Schnittstelle." #, python-format msgid "" "Operation %(op)s is not supported for device_owner %(device_owner)s on port " "%(port_id)s." msgstr "" "Operation %(op)s wird nicht unterstützt für device_owner %(device_owner)s " "auf Port %(port_id)s." #, python-format msgid "Operation not supported on device %(dev_name)s" msgstr "Operation auf Einheit %(dev_name)s nicht unterstützt" msgid "" "Ordered list of network_types to allocate as tenant networks. The default " "value 'local' is useful for single-box testing but provides no connectivity " "between hosts." msgstr "" "Sortierte Liste der network_types für die Zuordnung als Mandantennetze. Der " "Standardwert 'local' ist hilfreich für Einzeltests, bietet jedoch keine " "Konnektivität zwischen Hosts." msgid "Override the default dnsmasq settings with this file." msgstr "Standard-'dnsmasq'-Einstellungen mit dieser Datei außer Kraft setzen." msgid "Owner type of the device: network/compute" msgstr "Eigentümertyp des Geräts: Netz/Rechenknoten" msgid "POST requests are not supported on this resource." msgstr "POST-Anforderungen werden auf dieser Ressource nicht unterstützt." #, python-format msgid "Package %s not installed" msgstr "Paket %s nicht installiert" #, python-format msgid "Parameter %(param)s must be of %(param_type)s type." msgstr "Der Parameter %(param)s muss vom Typ %(param_type)s sein." #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "Analysieren von 'bridge_mappings' fehlgeschlagen: %s." msgid "Parsing supported pci_vendor_devs failed" msgstr "Die Analyse von unterstützten pci_vendor_devs ist fehlgeschlagen" msgid "Password for connecting to designate in admin context" msgstr "" "Kennwort zum Herstellen einer Verbindung zu Designate im " "Administratorkontext." #, python-format msgid "Password not specified for authentication type=%(auth_type)s." msgstr "" "Es wurde kein Kennwort für den Authentifizierungsstyp %(auth_type)s " "angegeben." msgid "Path to PID file for this process" msgstr "Pfad zur PID-Datei für diesen Prozess" msgid "Path to the router directory" msgstr "Pfad zum Routerverzeichnis" msgid "Peer patch port in integration bridge for tunnel bridge." msgstr "Peer-Patch-Port in Integrationsbrücke für Tunnelbrücke." msgid "Peer patch port in tunnel bridge for integration bridge." msgstr "Peer-Patch-Port in Tunnelbrücke für Integrationsbrücke." msgid "Per-tenant subnet pool prefix quota exceeded." msgstr "Kontingent für Subnetzpoolpräfix pro Mandant überschritten." msgid "Phase upgrade options do not accept revision specification" msgstr "Phasenupgradeoptionen akzeptieren keine Revisionsspezifikation" msgid "Ping timeout" msgstr "Ping-Zeitlimitüberschreitung" #, python-format msgid "Plugin '%s' not found." msgstr "Plugin '%s' nicht gefunden." msgid "Plugin does not support updating provider attributes" msgstr "" "Aktualisieren von Provider-Attributen wird von Plugin nicht unterstützt" msgid "Policy configuration policy.json could not be found." msgstr "" "Die Richtlinienkonfiguration 'policy.json' konnte nicht gefunden werden." #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "Port %(id)s verfügt nicht über statische IP-Adresse %(address)s" #, python-format msgid "Port %(port)s does not exist on %(bridge)s!" msgstr "Der Port %(port)s ist an %(bridge)s nicht vorhanden!" #, python-format msgid "Port %(port_id)s is already acquired by another DHCP agent" msgstr "" "Der Port %(port_id)s wird bereits von einem anderen DHCP-Agenten verwendet." #, python-format msgid "" "Port %(port_id)s is associated with a different tenant than Floating IP " "%(floatingip_id)s and therefore cannot be bound." msgstr "" "Port %(port_id)s ist einem anderen Nutzer zugeordnet als die dynamische IP-" "Adresse %(floatingip_id)s und kann daher nicht gebunden werden." #, python-format msgid "Port %(port_id)s is not managed by this agent. " msgstr "Der Port %(port_id)s wird nicht von diesem Agenten verwaltet." #, python-format msgid "Port %s does not exist" msgstr "Der Port %s ist nicht vorhanden." #, python-format msgid "" "Port %s has multiple fixed IPv4 addresses. Must provide a specific IPv4 " "address when assigning a floating IP" msgstr "" "Port %s besitzt mehrere statische IPv4-Adressen. Es muss eine bestimmte IPv4-" "Adresse angegeben werden, wenn eine dynamische IP zugewiesen wird" msgid "" "Port Security must be enabled in order to have allowed address pairs on a " "port." msgstr "" "Portsicherheit muss aktiviert werden, damit zulässige Adresspaare für einen " "Port vorhanden sind." msgid "" "Port has security group associated. Cannot disable port security or ip " "address until security group is removed" msgstr "" "Dem Port ist eine Sicherheitsgruppe zugeordnet. Inaktivieren von " "Portsicherheit oder IP-Adresse nur nach Entfernen der Sicherheitsgruppe " "möglich" msgid "" "Port security must be enabled and port must have an IP address in order to " "use security groups." msgstr "" "Portsicherheit muss aktiviert sein und Port muss über eine IP-Adresse " "verfügen, damit Sicherheitsgruppen verwendet werden können." msgid "" "Port to listen on for OpenFlow connections. Used only for 'native' driver." msgstr "" "Port, der auf OpenFlow-Verbindungen überwacht werden soll. Wird nur für " "'native' Treiber verwendet." #, python-format msgid "Prefix '%(prefix)s' not supported in IPv%(version)s pool." msgstr "Präfix '%(prefix)s' wird in IPv%(version)s-Pool nicht unterstützt." msgid "Prefix Delegation can only be used with IPv6 subnets." msgstr "Präfixdelegierung kann nur bei IPv6-Teilnetzen verwendet werden." msgid "Private key of client certificate." msgstr "Privater Schlüssel für Clientzertifikat." #, python-format msgid "Probe %s deleted" msgstr "Stichprobe %s gelöscht" #, python-format msgid "Probe created : %s " msgstr "Stichprobe erstellt: %s " msgid "Process is already started" msgstr "Prozess wurde bereits gestartet" msgid "Process is not running." msgstr "Prozess läuft nicht." msgid "Protocol to access nova metadata, http or https" msgstr "Protokoll für den Zugriff auf Nova-Metadaten, HTTP oder HTTPS" #, python-format msgid "Provider name %(name)s is limited by %(len)s characters" msgstr "Der Providername %(name)s ist auf %(len)s Zeichen begrenzt." #, python-format msgid "QoS Policy %(policy_id)s is used by %(object_type)s %(object_id)s." msgstr "" "QoS-Richtlinie %(policy_id)s wird durch %(object_type)s %(object_id)s " "verwendet." #, python-format msgid "" "QoS binding for network %(net_id)s and policy %(policy_id)s could not be " "found." msgstr "" "Die QoS-Bindung für das Netz %(net_id)s und die Richtlinie %(policy_id)s " "konnten nicht gefunden werden." #, python-format msgid "" "QoS binding for port %(port_id)s and policy %(policy_id)s could not be found." msgstr "" "Die QoS-Bindung für den Port %(port_id)s und die Richtlinie %(policy_id)s " "konnten nicht gefunden werden." #, python-format msgid "QoS policy %(policy_id)s could not be found." msgstr "Die QoS-Richtlinie %(policy_id)s konnte nicht gefunden werden. " #, python-format msgid "QoS rule %(rule_id)s for policy %(policy_id)s could not be found." msgstr "" "Die QoS-Regel %(rule_id)s für die Richtlinie %(policy_id)s konnte nicht " "gefunden werden." #, python-format msgid "RBAC policy of type %(object_type)s with ID %(id)s not found" msgstr "RBAC-Richtlinie des Typs %(object_type)s mit ID %(id)s nicht gefunden" #, python-format msgid "" "RBAC policy on object %(object_id)s cannot be removed because other objects " "depend on it.\n" "Details: %(details)s" msgstr "" "RBAC-Richtlinie für Objekt %(object_id)s kann nicht entfernt werden, da " "weitere Objekte von ihr abhängen.\n" "Details: %(details)s" msgid "" "Range of seconds to randomly delay when starting the periodic task scheduler " "to reduce stampeding. (Disable by setting to 0)" msgstr "" "Dauer in Sekunden, für die zufallsgeneriert beim Starten des Schedulers für " "regelmäßige Tasks gewartet werden soll, um die Belastung zu reduzieren. " "(Inaktivierung durch Festlegen auf 0)" msgid "Ranges must be in the same IP version" msgstr "Bereiche müssen dieselbe IP-Version haben." msgid "Ranges must be netaddr.IPRange" msgstr "Bereiche müssen 'netaddr.IPRange' sein." msgid "Ranges must not overlap" msgstr "Bereiche dürfen nicht überlappen." #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.EUI type." msgstr "Typ '%(type)s' und Wert '%(value)s'. Erwartet wurde netaddr.EUI-Typ." #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.IPAddress " "type." msgstr "" "Typ '%(type)s' und Wert '%(value)s' empfangen. Erwartet wurde netaddr." "IPAddress-Typ." #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.IPNetwork " "type." msgstr "" "Typ '%(type)s' und Wert '%(value)s' empfangen. Erwartet wurde netaddr." "IPNetwork-Typ." #, python-format msgid "" "Release aware branch labels (%s) are deprecated. Please switch to expand@ " "and contract@ labels." msgstr "" "Versionssensitive Zweigbezeichnungen (%s) werden nicht weiter unterstützt. " "Wechseln Sie zu expand@- und contract@-Bezeichnungen." msgid "Remote metadata server experienced an internal server error." msgstr "Interner Serverfehler bei fernem Metadatenserver." msgid "" "Repository does not contain HEAD files for contract and expand branches." msgstr "" "Das Repository enthält keine HEAD-Dateien für Contract- und Expand-Branches. " msgid "" "Representing the resource type whose load is being reported by the agent. " "This can be \"networks\", \"subnets\" or \"ports\". When specified (Default " "is networks), the server will extract particular load sent as part of its " "agent configuration object from the agent report state, which is the number " "of resources being consumed, at every report_interval.dhcp_load_type can be " "used in combination with network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is " "WeightScheduler, dhcp_load_type can be configured to represent the choice " "for the resource being balanced. Example: dhcp_load_type=networks" msgstr "" "Darstellung des Ressourcentyps, zu dessen Arbeitslast vom Agenten Bericht " "erstattet wird. Dies kann \"networks\", \"subnets\" oder \"ports\" sein. Bei " "Angabe (Standardwert ist 'networks') extrahiert der Server bei jedem " "report_interval eine bestimmte Arbeitslast, die als Teil des " "Agentenkonfigurationsobjekts vom Agentenberichtsstatus, der der Anzahl der " "konsumierten Ressourcen entspricht, gesendet wird. dhcp_load_type kann in " "Verbindung mit network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler verwendet werden. Wenn der " "network_scheduler_driver WeightScheduler ist, kann dhcp_load_type so " "konfiguriert werden, dass die Auswahl für die Ressource mit Lastausgleich " "dargestellt wird. Beispiel: dhcp_load_type=networks" msgid "Request Failed: internal server error while processing your request." msgstr "" "Anforderung fehlgeschlagen: interner Serverfehler bei Verarbeitung Ihrer " "Anforderung." #, python-format msgid "" "Request contains duplicate address pair: mac_address %(mac_address)s " "ip_address %(ip_address)s." msgstr "" "Anforderung enthält doppeltes Adresspaar: mac_address %(mac_address)s " "ip_address %(ip_address)s." #, python-format msgid "" "Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps " "with another subnet" msgstr "" "Angefordertes Teilnetz mit CIDR: %(cidr)s für Netz: %(network_id)s enthält " "Überschneidungen mit einem anderen Teilnetz" msgid "" "Reset flow table on start. Setting this to True will cause brief traffic " "interruption." msgstr "" "Zurücksetzen der Ablauftabelle beim Start. Bei der Einstellung True erfolgt " "eine kurze Unterbrechung des Datenverkehrs." #, python-format msgid "Resource %(resource)s %(resource_id)s could not be found." msgstr "Ressource %(resource)s %(resource_id)s konnte nicht gefunden werden." #, python-format msgid "Resource %(resource_id)s of type %(resource_type)s not found" msgstr "Ressource %(resource_id)s des Typs %(resource_type)s nicht gefunden" #, python-format msgid "" "Resource '%(resource_id)s' is already associated with provider " "'%(provider)s' for service type '%(service_type)s'" msgstr "" "Ressource '%(resource_id)s' ist bereits Anbieter '%(provider)s' für " "Dienstetyp '%(service_type)s' zugeordnet" msgid "Resource body required" msgstr "Ressourcen-Nachrichtentext erforderlich" msgid "" "Resource name(s) that are supported in quota features. This option is now " "deprecated for removal." msgstr "" "Resourcenname(n), die in Quotenfunktionen unterstützt werden. Diese Option " "wird jetzt nicht weiter unterstützt und kann später entfernt werden." msgid "Resource not found." msgstr "Ressource nicht gefunden." msgid "Resources required" msgstr "Ressourcen erforderlich" msgid "" "Root helper application. Use 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' to use the real root filter facility. Change to 'sudo' to skip the " "filtering and just run the command directly." msgstr "" "Roothilfeprogramm. Setzen Sie 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' ab, um die echte Rootfilterfunktion zu verwenden. Wechseln Sie zu " "'sudo', um das Filtern zu überspringen und den Befehl direkt auszuführen. " msgid "Root helper daemon application to use when possible." msgstr "Wenn möglich, Dämonanwendung für Roothilfeprogramm verwenden." msgid "Root permissions are required to drop privileges." msgstr "Rootberechtigungen sind zum Löschen von Berechtigungen erforderlich." #, python-format msgid "Route %(cidr)s not advertised for BGP Speaker %(speaker_as)d." msgstr "" "Route %(cidr)s wurde BGP-Speaker %(speaker_as)d nicht zugänglich gemacht." #, python-format msgid "Router %(router_id)s %(reason)s" msgstr "Router %(router_id)s %(reason)s" #, python-format msgid "Router %(router_id)s could not be found" msgstr "Router %(router_id)s konnte nicht gefunden werden" #, python-format msgid "Router %(router_id)s does not have an interface with id %(port_id)s" msgstr "" "Router %(router_id)s verfügt über keine Schnittstelle mit ID %(port_id)s" #, python-format msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s" msgstr "" "Router %(router_id)s verfügt über keine Schnittstelle auf Teilnetz " "%(subnet_id)s" #, python-format msgid "Router '%(router_id)s' cannot be both DVR and HA." msgstr "Router '%(router_id)s' kann nicht gleichzeitig DVR und HA sein." #, python-format msgid "Router '%(router_id)s' is not compatible with this agent." msgstr "Router '%(router_id)s' ist mit diesem Agenten nicht kompatibel." #, python-format msgid "Router already has a port on subnet %s" msgstr "Router verfügt bereits über einen Port auf Teilnetz %s" #, python-format msgid "" "Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be " "deleted, as it is required by one or more floating IPs." msgstr "" "Routerschnittstelle für Teilnetz %(subnet_id)s auf Router %(router_id)s kann " "nicht gelöscht werden, da sie für eine oder mehrere dynamische IP-Adressen " "erforderlich ist." #, python-format msgid "" "Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be " "deleted, as it is required by one or more routes." msgstr "" "Routerschnittstelle für Teilnetz %(subnet_id)s auf Router %(router_id)s kann " "nicht gelöscht werden, da sie für eine oder mehrere Routen erforderlich ist." msgid "Router port must have at least one fixed IP" msgstr "Der Router-Port muss mindestens eine feste IP-Adresse haben." msgid "Router that will have connected instances' metadata proxied." msgstr "" "Router, mit dem die Metadaten der verbundenen Instanzen über Proxy verbunden " "sein werden." #, python-format msgid "" "Row doesn't exist in the DB. Request info: Table=%(table)s. Columns=" "%(columns)s. Records=%(records)s." msgstr "" "Die Zeile ist in der Datenbank nicht vorhanden. Angeforderte Informationen: " "Tabellen=%(table)s. Spalten=%(columns)s. Datensätze=%(records)s." msgid "Run as daemon." msgstr "Als Dämon ausführen." #, python-format msgid "Running %(cmd)s (%(desc)s) for %(project)s ..." msgstr "Ausführen von %(cmd)s (%(desc)s) für %(project)s ..." #, python-format msgid "Running %(cmd)s for %(project)s ..." msgstr "Ausführen von %(cmd)s für %(project)s ..." msgid "Running without keystone AuthN requires that tenant_id is specified" msgstr "" "Zum Ausführen ohne Keystone-Authentifizierung muss 'tenant_id' angegeben sein" msgid "" "Seconds between nodes reporting state to server; should be less than " "agent_down_time, best if it is half or less than agent_down_time." msgstr "" "Sekunden zwischen Status-Berichten von Knoten an Server; sollte geringer " "sein als agent_down_time; am besten sollte es die Hälfte oder weniger von " "agent_down_time betragen." msgid "Seconds between running periodic tasks" msgstr "Sekunden zwischen Ausführungen regelmäßiger Tasks" msgid "" "Seconds to regard the agent is down; should be at least twice " "report_interval, to be sure the agent is down for good." msgstr "" "Sekunden bis zur Annahme, dass der Agent inaktiv ist; sollte mindestens " "doppelt so hoch sein wie report_interval, damit sichergestellt ist, dass der " "Agent wirklich inaktiv ist." #, python-format msgid "Security Group %(id)s %(reason)s." msgstr "Sicherheitsgruppe %(id)s %(reason)s." #, python-format msgid "Security Group Rule %(id)s %(reason)s." msgstr "Sicherheitsgruppenregel %(id)s %(reason)s." #, python-format msgid "Security group %(id)s does not exist" msgstr "Sicherheitsgruppe %(id)s ist nicht vorhanden" #, python-format msgid "Security group rule %(id)s does not exist" msgstr "Sicherheitsgruppenregel %(id)s ist nicht vorhanden" #, python-format msgid "Security group rule already exists. Rule id is %(rule_id)s." msgstr "" "Die Sicherheitsgruppenregel ist bereits vorhanden. Die Regel-ID ist " "%(rule_id)s." #, python-format msgid "" "Security group rule for ethertype '%(ethertype)s' not supported. Allowed " "values are %(values)s." msgstr "" "Sicherheitsgruppenregel für Ethernet-Typ '%(ethertype)s' wird nicht " "unterstützt. Zulässige Werte: %(values)s." #, python-format msgid "" "Security group rule protocol %(protocol)s not supported. Only protocol " "values %(values)s and integer representations [0 to 255] are supported." msgstr "" "Regelprotokoll %(protocol)s für Sicherheitsgruppe nicht unterstützt. Nur " "Protokollwerte %(values)s und ganzzahlige Darstellungen [0 bis 255] werden " "unterstützt." msgid "Segments and provider values cannot both be set." msgstr "" "Es können nicht Segment- und Providerwerte gleichzeitig festgelegt werden." msgid "Selects the Agent Type reported" msgstr "Wählt den gemeldeten Agententyp aus" msgid "" "Send notification to nova when port data (fixed_ips/floatingip) changes so " "nova can update its cache." msgstr "" "Benachrichtigung an Nova senden, wenn sich die Portdaten (fixed_ips/" "floatingip) ändern, damit Nova den Zwischenspeicher aktualisieren kann. " msgid "Send notification to nova when port status changes" msgstr "Benachrichtigung an Nova senden, wenn sich der Portstatus ändert" msgid "" "Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the " "feature is disabled" msgstr "" "Senden Sie diese Anzahl an Gratuitous ARPs für " "Hochverfügbarkeitskonfiguration; wenn der Wert kleiner oder gleich 0 ist, " "wird die Funktion inaktiviert " #, python-format msgid "Service Profile %(sp_id)s could not be found." msgstr "Das Diensteprofil %(sp_id)s konnte nicht gefunden werden." #, python-format msgid "Service Profile %(sp_id)s is already associated with flavor %(fl_id)s." msgstr "" "Das Diensteprofil %(sp_id)s ist bereits der Variante %(fl_id)s zugeordnet." #, python-format msgid "Service Profile %(sp_id)s is not associated with flavor %(fl_id)s." msgstr "" "Das Serviceprofil %(sp_id)s ist der Variante %(fl_id)s noch nicht zugeordnet." #, python-format msgid "Service Profile %(sp_id)s is used by some service instance." msgstr "" "Das Diensteprofil %(sp_id)s wird von einigen Diensteinstanzen verwendet." #, python-format msgid "Service Profile driver %(driver)s could not be found." msgstr "Der Serviceprofiltreiber %(driver)s konnte nicht gefunden werden." msgid "Service Profile is not enabled." msgstr "Das Diensteprofil ist nicht aktiviert." msgid "Service Profile needs either a driver or metainfo." msgstr "" "Das Serviceprofil erfordert entweder die Angabe eines Treibers oder " "Metainformationen." #, python-format msgid "" "Service provider '%(provider)s' could not be found for service type " "%(service_type)s" msgstr "" "Diensteanbieter '%(provider)s' konnte nicht für Dienstetyp %(service_type)s " "gefunden werden " msgid "Service to handle DHCPv6 Prefix delegation." msgstr "Service zum Behandeln der DHCPv6-Präfixdelegierung." #, python-format msgid "Service type %(service_type)s does not have a default service provider" msgstr "Dienstetyp %(service_type)s weist keinen Standard-Diensteanbieter auf" msgid "" "Set new timeout in seconds for new rpc calls after agent receives SIGTERM. " "If value is set to 0, rpc timeout won't be changed" msgstr "" "Neues Zeitlimit in Sekunden für neue RPC-Aufrufe festlegen, nachdem Agent " "SIGTERM empfängt. Wenn der Wert auf 0 gesetzt ist, wird das RPC-Zeitlimit " "nicht geändert" msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "DF-Bit (Don't Fragment) auf GRE/VXLAN-Tunnel für abgehende IP-Pakete " "festlegen oder die Festlegung aufheben." msgid "" "Set or un-set the tunnel header checksum on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "Kontrollsumme für Tunnelheader auf GRE/VXLAN-Tunnel für abgehende IP-Pakete " "festlegen oder die Festlegung aufheben." msgid "Shared address scope can't be unshared" msgstr "" "Freigabe des gemeinsam genutzten Adressbereichs kann nicht aufgehoben werden" msgid "" "Specifying 'tenant_id' other than authenticated tenant in request requires " "admin privileges" msgstr "" "Um für 'tenant_id' einen anderen Wert als die in der Anforderung " "authentifizierte Nutzer-ID anzugeben, sind Administratorberechtigungen " "erforderlich" msgid "String prefix used to match IPset names." msgstr "Zeichenfolgepräfix zum Abgleichen von IPset-Namen." #, python-format msgid "Sub-project %s not installed." msgstr "Unterprojekt %s nicht installiert." msgid "Subnet for router interface must have a gateway IP" msgstr "" "Teilnetz für Routerschnittstelle muss über eine Gateway-IP-Adresse verfügen" msgid "" "Subnet has a prefix length that is incompatible with DHCP service enabled." msgstr "" "Präfixlänge des Teilnetzes ist mit aktiviertem DHCP-Service nicht kompatibel." #, python-format msgid "Subnet pool %(subnetpool_id)s could not be found." msgstr "Subnetzpool %(subnetpool_id)s konnte nicht gefunden werden." msgid "Subnet pool has existing allocations" msgstr "Der Teilnetzpool verfügt über vorhandene Zuordnungen" msgid "Subnet used for the l3 HA admin network." msgstr "" "Teilnetz, das für das L3-Verwaltungsnetz für hohe Verfügbarkeit verwendet " "wird." msgid "" "Subnets hosted on the same network must be allocated from the same subnet " "pool." msgstr "" "Subnetze, die in demselben Netz gehostet werden, müssen aus demselben " "Subnetzpool zugeordnet werden." msgid "Suffix to append to all namespace names." msgstr "Der an alle Namensraumnamen anzuhängende Suffix." msgid "" "System-wide flag to determine the type of router that tenants can create. " "Only admin can override." msgstr "" "Systemweites Flag zum Bestimmen des Routertyps, den Nutzer erstellen können. " "Kann nur vom Administrator überschrieben werden." msgid "TCP Port to listen for metadata server requests." msgstr "TCP-Port zum Empfangen von Anforderungen des Metadatenservers." msgid "TCP Port used by Neutron metadata namespace proxy." msgstr "Von Neutron-Metadaten-Namensbereichsproxy verwendeter TCP-Port." msgid "TCP Port used by Nova metadata server." msgstr "Von Nova-Metadatenserver verwendeter TCP-Port." #, python-format msgid "TLD '%s' must not be all numeric" msgstr "TLD '%s' darf nicht ausschließlich numerisch sein" msgid "TOS for vxlan interface protocol packets." msgstr "TOS für VXLAN-Schnittstellenprotokollpakete." msgid "TTL for vxlan interface protocol packets." msgstr "TTL für VXLAN-Schnittstellenprotokollpakete." #, python-format msgid "Table %s can only be queried by UUID" msgstr "Tabelle %s kann nur über UUID abgefragt werden" #, python-format msgid "Tag %(tag)s could not be found." msgstr "Schlagwort %(tag)s konnte nicht gefunden werden." #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "Nutzer %(tenant_id)s darf %(resource)s auf diesem Netz nicht erstellen" msgid "Tenant id for connecting to designate in admin context" msgstr "" "Mandanten-ID zum Herstellen einer Verbindung zu Designate im " "Administratorkontext." msgid "Tenant name for connecting to designate in admin context" msgstr "" "Mandantenname zum Herstellen einer Verbindung zu Designate im " "Administratorkontext." msgid "Tenant network creation is not enabled." msgstr "Erstellung von Mandantennetzwerken ist nicht aktiviert." msgid "Tenant-id was missing from quota request." msgstr "Fehlende Mandanten-ID in der Kontigentanforderung." msgid "" "The 'gateway_external_network_id' option must be configured for this agent " "as Neutron has more than one external network." msgstr "" "Die Option 'gateway_external_network_id' muss für diesen Agenten " "konfiguriert werden, da Neutron über mehr als ein externes Netz verfügt." msgid "" "The DHCP agent will resync its state with Neutron to recover from any " "transient notification or RPC errors. The interval is number of seconds " "between attempts." msgstr "" "Der DHCP-Agent resynchronisiert seinen Status mit Neutron zur " "Wiederherstellung nach temporären Benachrichtigungen oder RPC-Fehlern. Das " "Intervall ist die Anzahl der Sekunden zwischen den " "Wiederherstellungsversuchen." msgid "" "The DHCP server can assist with providing metadata support on isolated " "networks. Setting this value to True will cause the DHCP server to append " "specific host routes to the DHCP request. The metadata service will only be " "activated when the subnet does not contain any router port. The guest " "instance must be configured to request host routes via DHCP (Option 121). " "This option doesn't have any effect when force_metadata is set to True." msgstr "" "Der DHCP-Server kann zur Bereitstellung von Metadatenunterstützung für " "isolierte Netze beitragen. Wenn Sie diesen Wert auf 'True' setzen, hängt der " "DHCP-Server bestimmte Hostrouten an die DHCP-Anforderung an. Der " "Metadatendienst wird nur aktiviet, wenn das Subnetz keinen Router-Port " "enthält. Die Gastinstanz muss so konfiguriert sein, dass Hostrouten über " "DHCP (Option 121) angefordert werden. Diese Option ist wirkungslos, wenn " "'force_metadata' auf 'True' gesetzt wird." #, python-format msgid "" "The HA Network CIDR specified in the configuration file isn't valid; " "%(cidr)s." msgstr "" "Das in der Konfigurationsdatei angegebene CIDR für das " "Hochverfügbarkeitsnetz ist nicht gültig; %(cidr)s." msgid "The UDP port to use for VXLAN tunnels." msgstr "UDP-Port für VXLAN-Tunnel." #, python-format msgid "" "The address allocation request could not be satisfied because: %(reason)s" msgstr "" "Die Adresszuordnungsanforderung konnte nicht erfüllt werden: %(reason)s" msgid "The advertisement interval in seconds" msgstr "Ankündigungsintervall in Sekunden" #, python-format msgid "The allocation pool %(pool)s is not valid." msgstr "Der Zuordnungspool %(pool)s ist nicht gültig." #, python-format msgid "" "The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s." msgstr "" "Der Zuordnungspool %(pool)s geht über das Teilnetz-CIDR %(subnet_cidr)s " "hinaus." #, python-format msgid "" "The attribute '%(attr)s' is reference to other resource, can't used by sort " "'%(resource)s'" msgstr "" "Das Attribut %(attr)s ist ein Verweis auf eine andere Ressource und kann bei " "der Sortierung von %(resource)s nicht verwendet werden" msgid "" "The base MAC address Neutron will use for VIFs. The first 3 octets will " "remain unchanged. If the 4th octet is not 00, it will also be used. The " "others will be randomly generated." msgstr "" "Die MAC-Basisadresse, die Neutron für VIFs verwendet. Die ersten drei " "Oktetts bleiben unverändert. Wenn das vierte Oktett nicht 00 ist, wird es " "ebenfalls verwendet. Die anderen werden zufällig generiert. " msgid "" "The base mac address used for unique DVR instances by Neutron. The first 3 " "octets will remain unchanged. If the 4th octet is not 00, it will also be " "used. The others will be randomly generated. The 'dvr_base_mac' *must* be " "different from 'base_mac' to avoid mixing them up with MAC's allocated for " "tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00. " "The default is 3 octet" msgstr "" "Die MAC-Basisadresse, die durch Neutron für eindeutige DVR-Instanzen " "verwendet wird. Die ersten 3 Oktetts bleiben unverändert. Wenn das 4. Oktett " "nicht 00 ist, wird es ebenfalls verwendet. Die anderen werden zufällig " "generiert. Die 'dvr_base_mac' *muss* sich von 'base_mac' unterscheiden, um " "eine Vermischung mit zugeordneten MACs für Nutzerports zu vermeiden. " "Beispiel mit 4 Oktetts: dvr_base_mac = fa:16:3f:4f:00:00. Standardmäßig " "werden 3 Oktetts verwendet" msgid "" "The connection string for the native OVSDB backend. Requires the native " "ovsdb_interface to be enabled." msgstr "" "Die Verbindungszeichenfolge für das native OVSDB-Backend. Erfordert, dass " "die native ovsdb_interface-Einstellung aktiviert ist." msgid "The core plugin Neutron will use" msgstr "Core-Plugin, das Neutron verwenden wird" #, python-format msgid "" "The dns_name passed is a FQDN. Its higher level labels must be equal to the " "dns_domain option in neutron.conf, that has been set to '%(dns_domain)s'. It " "must also include one or more valid DNS labels to the left of " "'%(dns_domain)s'" msgstr "" "Der übergebene dns_name ist ein FQDN. Seine Bezeichnungen auf höherer Ebene " "müssen mit der Option dns_domain in neutron.conf übereinstimmen, die auf " "'%(dns_domain)s' festgelegt wurde. Enthalten sein muss auch mindestens eine " "gültige DNS-Bezeichnung links von '%(dns_domain)s'" #, python-format msgid "" "The dns_name passed is a PQDN and its size is '%(dns_name_len)s'. The " "dns_domain option in neutron.conf is set to %(dns_domain)s, with a length of " "'%(higher_labels_len)s'. When the two are concatenated to form a FQDN (with " "a '.' at the end), the resulting length exceeds the maximum size of " "'%(fqdn_max_len)s'" msgstr "" "Der übergebene dns_name ist ein PQDN mit einer Größe von '%(dns_name_len)s'. " "Die Option dns_domain in neutron.conf wurde auf %(dns_domain)s festgelegt, " "bei einer Länge von '%(higher_labels_len)s'. Wenn beide verkettet werden, um " "einen FQDN zu bilden (mit einem '.' am Ende), überschreitet die Gesamtlänge " "die maximale Größe von '%(fqdn_max_len)s'" msgid "The driver used to manage the DHCP server." msgstr "Der für die Verwaltung des DHCP-Servers verwendete Treiber." msgid "The driver used to manage the virtual interface." msgstr "" "Der für die Verwaltung der virtuellen Schnittstelle verwendete Treiber." msgid "" "The email address to be used when creating PTR zones. If not specified, the " "email address will be admin@" msgstr "" "Die beim Erstellen PTR-Zoenen zu verwendende E-Mail-Adresse. Ohne Angabe " "einer E-Mail-Adresse wird die E-Mail-Adresse admin@ verwendet." #, python-format msgid "" "The following device_id %(device_id)s is not owned by your tenant or matches " "another tenants router." msgstr "" "Die folgende device_id %(device_id)s gehört weder Ihrem Nutzer, noch " "entspricht sie dem Router eines anderen Nutzers." msgid "The host IP to bind to" msgstr "Die Host-IP an die gebunden werden soll" msgid "The interface for interacting with the OVSDB" msgstr "Die Schnittstelle zur Kommunikation mit OVSDB" msgid "" "The maximum number of items returned in a single response, value was " "'infinite' or negative integer means no limit" msgstr "" "Maximale Anzahl an in einer einzelnen Antwort zurückgegebenen Elementen. Der " "Wert 'infinite' oder eine negative Ganzzahl bedeuten, dass es keine " "Begrenzung gibt." #, python-format msgid "" "The network %(network_id)s has been already hosted by the DHCP Agent " "%(agent_id)s." msgstr "" "Das Netz %(network_id)s wurde bereits vom DHCP-Agenten %(agent_id)s gehostet." #, python-format msgid "" "The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s." msgstr "" "Das Netz %(network_id)s wird nicht vom DHCP-Agenten %(agent_id)s gehostet." msgid "" "The network type to use when creating the HA network for an HA router. By " "default or if empty, the first 'tenant_network_types' is used. This is " "helpful when the VRRP traffic should use a specific network which is not the " "default one." msgstr "" "Der Netztyp, der beim Erstellen des HA-Netzes für einen HA-Router verwendet " "werden soll. Standardmäßig oder bei fehlender Angabe wird das erste " "'tenant_network_types' verwendet. Dies ist hilfreich, wenn der VRRP-" "Datenverkehr ein bestimmtes Netz verwenden soll, das nicht das Standardnetz " "ist." #, python-format msgid "The number of allowed address pair exceeds the maximum %(quota)s." msgstr "" "Die Anzahl an zulässigen Adresspaaren überschreitet das Maximum %(quota)s." msgid "" "The number of seconds the agent will wait between polling for local device " "changes." msgstr "" "Die Anzahl an Sekunden, die der Agent zwischen Abfragen lokaler " "Geräteänderungen wartet." msgid "" "The number of seconds to wait before respawning the ovsdb monitor after " "losing communication with it." msgstr "" "Die Anzahl an Sekunden, die gewartet werden soll, bevor die ovsdb-" "Überwachung nach einer Kommunikationsunterbrechung erneut generiert wird." msgid "The number of sort_keys and sort_dirs must be same" msgstr "Die Anzahl an 'sort_keys' und 'sort_dirs' muss gleich sein" msgid "" "The path for API extensions. Note that this can be a colon-separated list of " "paths. For example: api_extensions_path = extensions:/path/to/more/exts:/" "even/more/exts. The __path__ of neutron.extensions is appended to this, so " "if your extensions are in there you don't need to specify them here." msgstr "" "Der Pfad für API-Erweiterungen. Beachten Sie, dass dies eine durch Punkte " "getrennte Liste von Pfaden sein kann. Beispiel: api_extensions_path = " "extensions:/path/to/more/exts:/even/more/exts. An diesen Pfad wird '__path__ " "of neutron.extensions' angehängt, sodass Sie Ihre Erweiterungen hier nicht " "mehr angeben müssen, wenn Sie dort bereits angegeben wurden." msgid "The physical network name with which the HA network can be created." msgstr "" "Der Name des physischen Netzes, mit dem das HA-Netz erstellt werden kann." #, python-format msgid "The port '%s' was deleted" msgstr "Port '%s' wurde gelöscht" msgid "The port to bind to" msgstr "Der Port an den gebunden werden soll" #, python-format msgid "The requested content type %s is invalid." msgstr "Der angeforderte Inhaltstyp %s ist ungültig." msgid "The resource could not be found." msgstr "Die Ressource konnte nicht gefunden werden." #, python-format msgid "" "The router %(router_id)s has been already hosted by the L3 Agent " "%(agent_id)s." msgstr "" "Der Router %(router_id)s wurde bereits vom L3-Agenten %(agent_id)s gehostet." msgid "" "The server has either erred or is incapable of performing the requested " "operation." msgstr "" "Auf dem Server ist entweder ein Fehler aufgetreten oder der Server kann die " "angeforderte Operation nicht ausführen." msgid "The service plugins Neutron will use" msgstr "Service-Plugins, die Neutron verwenden wird" #, python-format msgid "The subnet request could not be satisfied because: %(reason)s" msgstr "Die Teilnetzanforderung konnte nicht erfüllt werden: %(reason)s" #, python-format msgid "The subproject to execute the command against. Can be one of: '%s'." msgstr "" "Das Unterprojekt für das der Befehl ausgeführt werden soll. Mögliche Werte: " "'%s'." msgid "The type of authentication to use" msgstr "Der zu verwendende Authentifizierungtyp" #, python-format msgid "The value '%(value)s' for %(element)s is not valid." msgstr "Der Wert '%(value)s' für %(element)s ist ungültig." msgid "" "The working mode for the agent. Allowed modes are: 'legacy' - this preserves " "the existing behavior where the L3 agent is deployed on a centralized " "networking node to provide L3 services like DNAT, and SNAT. Use this mode if " "you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality " "and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - " "this enables centralized SNAT support in conjunction with DVR. This mode " "must be used for an L3 agent running on a centralized node (or in single-" "host deployments, e.g. devstack)" msgstr "" "Der Betriebsmodus für den Agenten. Zulässige Modi sind: 'legacy' - Hierbei " "wird das aktuelle Verhalten beibehalten, bei dem der Agent der Ebene 3 (L3 - " "Level 3) auf einem zentralisierten Netzknoten implementiert wird, um L3-" "Services wie DNAT und SNAT bereitzustellen. Verwenden Sie diesen Modus, wenn " "Sie DVR nicht annehmen möchten. 'dvr' - Mit diesem Modus wird die DVR-" "Funktionalität aktiviert. Er muss für L3-Agenten verwendet werden, die auf " "einem Rechenhost ausgeführt werden. 'dvr_snat' - Hiermit wird die " "zentralisierte SNAT-Unterstützung in Kombination mit DVR aktiviert. Dieser " "Modus muss für L3-Agenten verwendet werden, die auf einem zentralisierten " "Knoten (oder in Implementierungen mit einem einzelnen Host, z. B. devstack) " "ausgeführt werden." msgid "" "There are routers attached to this network that depend on this policy for " "access." msgstr "" "Diesem Netz sind Router zugeordnet, die für den Zugriff von dieser " "Richtlinie abhängig sind." msgid "" "This will choose the web framework in which to run the Neutron API server. " "'pecan' is a new experiemental rewrite of the API server." msgstr "" "Dies legt das Web-Framework fest, in dem der Neutron-API-Server ausgeführt " "werden soll. 'pecan' ist eine neu programmierte experimentelle Version des " "API-Servers." msgid "Timeout" msgstr "Zeitüberschreitung" msgid "" "Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs " "commands will fail with ALARMCLOCK error." msgstr "" "Zeitlimit in Sekunden für ovs-vsctl-Befehle. Wenn das Zeitlimit abgelaufen " "ist, schlagen ovs-Befehle mit einem Fehler vom Typ ALARMCLOCK fehl." msgid "" "Timeout in seconds to wait for a single OpenFlow request. Used only for " "'native' driver." msgstr "" "Zeitlimit in Sekunden für die Wartezeit auf eine einzelne OpenFlow-" "Anforderung. Wird nur für 'native' Treiber verwendet." msgid "" "Timeout in seconds to wait for the local switch connecting the controller. " "Used only for 'native' driver." msgstr "" "Zeitlimit in Sekunden für die Wartezeit, in der der lokale Switch die " "Verbindung mit dem Controller hergestellt haben muss. Wird nur für 'native' " "Treiber verwendet." msgid "" "Too long prefix provided. New name would exceed given length for an " "interface name." msgstr "" "Es wurde ein Präfix angegeben, das zu lang ist. Der neue Name überschreitet " "damit die für einen Schnittstellennamen vorgegebene Länge." msgid "Too many availability_zone_hints specified" msgstr "Es wurden zu viele 'availability_zone_hints' angegeben." msgid "" "True to delete all ports on all the OpenvSwitch bridges. False to delete " "ports created by Neutron on integration and external network bridges." msgstr "" "'True' zum Löschen aller Ports auf den OpenvSwitch-Brücken. 'False' zum " "Löschen von Ports, die von Neutron auf Integrationsbrücken und externen " "Netzbrücken erstellt wurden." msgid "Tunnel IP value needed by the ML2 plugin" msgstr "Tunnel-IP-Wert für ML2-Plug-in erforderlich" msgid "Tunnel bridge to use." msgstr "Zu verwendende Tunnelbrücke." msgid "" "Type of the nova endpoint to use. This endpoint will be looked up in the " "keystone catalog and should be one of public, internal or admin." msgstr "" "Typ des zu verwendenden Nova-Endpunkts. Dieser Endpunkt wird im Keystone-" "Katalog gesucht und muss vom Typ 'public', 'internal' oder 'admin' sein." msgid "URL for connecting to designate" msgstr "URL zum Herstellen einer Verbindung zu Designate. " msgid "URL to database" msgstr "URL an Datenbank" #, python-format msgid "Unable to access %s" msgstr "Kein Zugriff auf %s möglich" #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, maximum allowed " "prefix is %(max_prefixlen)s." msgstr "" "Das Subnetz mit der Präfixlänge %(prefixlen)s kann nicht zugeordnet werden. " "Die zulässige Maximalpräfixlänge ist %(max_prefixlen)s." #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, minimum allowed " "prefix is %(min_prefixlen)s." msgstr "" "Das Subnetz mit der Präfixlänge %(prefixlen)s kann nicht zugeordnet werden. " "Die zulässige Mindestpräfixlänge ist %(min_prefixlen)s." #, python-format msgid "Unable to calculate %(address_type)s address because of:%(reason)s" msgstr "Fehler beim Berechnen der %(address_type)s-Adresse. Grund: %(reason)s" #, python-format msgid "" "Unable to complete operation for %(router_id)s. The number of routes exceeds " "the maximum %(quota)s." msgstr "" "Operation kann für %(router_id)s nicht abgeschlossen werden. Die Anzahl an " "Routen überschreitet den maximalen Wert %(quota)s." #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of DNS " "nameservers exceeds the limit %(quota)s." msgstr "" "Operation kann für %(subnet_id)s nicht abgeschlossen werden. Die Anzahl an " "DNS-Namensservern überschreitet den Grenzwert %(quota)s." #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of host routes " "exceeds the limit %(quota)s." msgstr "" "Operation kann für %(subnet_id)s nicht abgeschlossen werden. Die Anzahl an " "Hostroutes überschreitet den Grenzwert %(quota)s." #, python-format msgid "" "Unable to complete operation on address scope %(address_scope_id)s. There " "are one or more subnet pools in use on the address scope" msgstr "" "Operation auf Adressbereich %(address_scope_id)s kann nicht abgeschlossen " "werden. Mindestens ein Teilnetzpool wird gerade im Adressbereich verwendet" #, python-format msgid "Unable to convert value in %s" msgstr "Wert in %s kann nicht konvertiert werden" msgid "Unable to create the Agent Gateway Port" msgstr "Agent-Gateway-Port kann nicht erstellt werden" msgid "Unable to create the SNAT Interface Port" msgstr "SNAT-Schnittstellenport kann nicht erstellt werden" #, python-format msgid "" "Unable to create the flat network. Physical network %(physical_network)s is " "in use." msgstr "" "Das einfache Netz kann nicht erstellt werden. Das physische Netz " "%(physical_network)s ist belegt." msgid "" "Unable to create the network. No available network found in maximum allowed " "attempts." msgstr "" "Das Netz kann nicht erstellt werden. Es wurde bei den maximal zulässigen " "Versuchen kein verfügbares Netz gefunden." #, python-format msgid "Unable to delete subnet pool: %(reason)s." msgstr "Löschen von Subnetzpool nicht möglich: %(reason)s" #, python-format msgid "Unable to determine mac address for %s" msgstr "MAC-Adresse für %s kann nicht bestimmt werden" #, python-format msgid "Unable to find '%s' in request body" msgstr "'%s' kann in Anforderungshauptteil nicht gefunden werden" #, python-format msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s" msgstr "" "IP-Adresse %(ip_address)s auf Teilnetz %(subnet_id)s wurde nicht gefunden" #, python-format msgid "Unable to find resource name in %s" msgstr "Ressourcenname kann nicht in %s gefunden werden" msgid "Unable to generate IP address by EUI64 for IPv4 prefix" msgstr "" "IP-Adresse kann nicht mithilfe von EUI64 mit dem IPv4-Präfix generiert werden" #, python-format msgid "Unable to generate unique DVR mac for host %(host)s." msgstr "" "Eindeutige DVR-MAC-Adresse for Host %(host)s kann nicht generiert werden." #, python-format msgid "Unable to generate unique mac on network %(net_id)s." msgstr "" "Eindeutige MAC-Adresse kann auf Netz %(net_id)s nicht generiert werden." #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "" "Zielfeld kann nicht aus %s identifiziert werden. Übereinstimmung sollte im " "Format %%()s vorliegen" msgid "Unable to provide external connectivity" msgstr "Externe Konnektivität kann nicht bereitgestellt werden." msgid "Unable to provide tenant private network" msgstr "Das private Mandantennetz kann nicht bereitgestellt werden." #, python-format msgid "" "Unable to reconfigure sharing settings for network %(network)s. Multiple " "tenants are using it." msgstr "" "Freigabeeinstellungen für Netz %(network)s können nicht rekonfiguriert " "werden. Mehrere Mandanten verwenden es." #, python-format msgid "Unable to update address scope %(address_scope_id)s : %(reason)s" msgstr "" "Adressbereich %(address_scope_id)s konnte nicht aktualisiert werden: " "%(reason)s" #, python-format msgid "Unable to update the following object fields: %(fields)s" msgstr "" "Die folgenden Objektfelder konnten nicht aktualisiert werden: %(fields)s" #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " "found" msgstr "" "Übereinstimmung %(match)s kann nicht als übergeordnete Ressource bestätigt " "werden: %(res)s wurde nicht gefunden" #, python-format msgid "Unexpected label for script %(script_name)s: %(labels)s" msgstr "Nicht erwartete Bezeichnung für Script %(script_name)s: %(labels)s" #, python-format msgid "Unexpected number of alembic branch points: %(branchpoints)s" msgstr "Unerwartete Anzahl an Alembic-Verzweigungspunkten: %(branchpoints)s" #, python-format msgid "Unexpected response code: %s" msgstr "Unerwarteter Antwortcode: %s" #, python-format msgid "Unexpected response: %s" msgstr "Unerwartete Antwort: %s" #, python-format msgid "Unit name '%(unit)s' is not valid." msgstr "Einheitenname '%(unit)s' ist nicht gültig." msgid "Unknown API version specified" msgstr "Unbekannte API-Version angegeben" #, python-format msgid "Unknown address type %(address_type)s" msgstr "Unbekannter Adresstyp %(address_type)s" #, python-format msgid "Unknown attribute '%s'." msgstr "Unbekanntes Attribut '%s'." #, python-format msgid "Unknown chain: %r" msgstr "Unbekannte Kette: %r" #, python-format msgid "Unknown network type %(network_type)s." msgstr "Unerwarteter Netztyp %(network_type)s." #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Unbekannte Quotenressourcen %(unknown)s." msgid "Unmapped error" msgstr "Nicht zugeordneter Fehler" msgid "Unrecognized action" msgstr "Nicht erkannte Aktion" #, python-format msgid "Unrecognized attribute(s) '%s'" msgstr "Nicht erkannte(s) Attribut(e) '%s'" msgid "Unrecognized field" msgstr "Nicht erkanntes Feld" msgid "Unspecified minimum subnet pool prefix." msgstr "Nicht angegebenes Minimum für Präfix für Subnetzpool." msgid "Unsupported Content-Type" msgstr "Nicht unterstützter Inhaltstyp" #, python-format msgid "Unsupported network type %(net_type)s." msgstr "Nicht unterstützter Netztyp %(net_type)s." #, python-format msgid "Unsupported port state: %(port_state)s." msgstr "Nicht unterstützter Portstatus: %(port_state)s." msgid "Unsupported request type" msgstr "Nicht unterstützter Anforderungstyp" msgid "Updating default security group not allowed." msgstr "Aktualisieren von Standardsicherheitsgruppe nicht zulässig." msgid "" "Use ML2 l2population mechanism driver to learn remote MAC and IPs and " "improve tunnel scalability." msgstr "" "ML2-l2population-Mechanismus-Treiber verwenden, um ferne MAC- und IP-" "Adressen abzurufen und die Tunnelskalierbarkeit zu verbessern." msgid "Use broadcast in DHCP replies." msgstr "Verwenden Sie Broadcast in DHCP-Antworten." msgid "Use either --delta or relative revision, not both" msgstr "" "Verwenden Sie entweder --delta oder relative Revision, nicht beides gemeinsam" msgid "" "Use ipset to speed-up the iptables based security groups. Enabling ipset " "support requires that ipset is installed on L2 agent node." msgstr "" "Verwenden Sie ipset, um die Geschwindigkeit der auf iptables basierenden " "Sicherheitsgruppen zu verbessern. Für die Aktivierung der ipset-" "Unterstützung ist es erforderlich, dass ipset auf einem L2-Agentenknoten " "installiert ist." msgid "" "Use the root helper when listing the namespaces on a system. This may not be " "required depending on the security configuration. If the root helper is not " "required, set this to False for a performance improvement." msgstr "" "Verwenden Sie das Roothilfeprogramm beim Auflisten von Namensbereichen in " "einem System. Dies ist möglicherweise je nach Sicherheitskonfiguration nicht " "erforderlich. Wenn das Roothilfeprogramm nicht erforderlich ist, setzen Sie " "es zugunsten einer Leistungsverbesserung auf 'False'." msgid "" "Use veths instead of patch ports to interconnect the integration bridge to " "physical networks. Support kernel without Open vSwitch patch port support so " "long as it is set to True." msgstr "" "Verwenden Sie virtuelles Ethernet anstelle von Patch-Ports, um die " "Integrationsbrücke mit physischen Netzen zu verbinden. Kernels ohne Patch-" "Port-Unterstützung durch Open vSwitch werden unterstützt, vorausgesetzt der " "Wert ist auf 'True' gesetzt." msgid "User (uid or name) running metadata proxy after its initialization" msgstr "" "Benutzer (Benutzer-ID oder Name), der Metadaten-Proxy nach der " "Initialisierung ausführt" msgid "" "User (uid or name) running metadata proxy after its initialization (if " "empty: agent effective user)." msgstr "" "Benutzer (Benutzer-ID oder Name), der Metadaten-Proxy nach der " "Initialisierung ausführt (falls leer: Agent-ausführender Benutzer)." msgid "User (uid or name) running this process after its initialization" msgstr "" "Benutzer (Benutzer-ID oder Name), der diesen Prozess nach der " "Initialisierung ausführt" msgid "Username for connecting to designate in admin context" msgstr "" "Benutzername zum Herstellen einer Verbindung zu Designate im " "Administratorkontext." msgid "" "Uses veth for an OVS interface or not. Support kernels with limited " "namespace support (e.g. RHEL 6.5) so long as ovs_use_veth is set to True." msgstr "" "Gibt an, ob virtuelles Ethernet für eine OVS-Schnittstelle verwendet wird " "oder nicht. Kernels mit eingeschränkter Namensraumunterstützung (z. B. RHEL " "6.5) werden unterstützt, vorausgesetzt ovs_use_veth ist auf 'True' gesetzt." msgid "VRRP authentication password" msgstr "VRRP-Authentifizierungskennwort" msgid "VRRP authentication type" msgstr "VRRP-Authentifizierungstyp" msgid "VXLAN network unsupported." msgstr "VXLAN-Netzwerk nicht unterstützt." #, python-format msgid "" "Validation of dictionary's keys failed. Expected keys: %(expected_keys)s " "Provided keys: %(provided_keys)s" msgstr "" "Überprüfung der Schlüssel für das Verzeichnis ist fehlgeschlagen. Erwartete " "Schlüssel: %(expected_keys)s Angegebene Schlüssel: %(provided_keys)s" #, python-format msgid "Validator '%s' does not exist." msgstr "Der Validator '%s' ist nicht vorhanden." #, python-format msgid "Value %(value)s in mapping: '%(mapping)s' not unique" msgstr "Wert %(value)s in Zuordnung: '%(mapping)s' nicht eindeutig" #, python-format msgid "" "Value of %(parameter)s has to be multiple of %(number)s, with maximum value " "of %(maximum)s and minimum value of %(minimum)s" msgstr "" "Der Wert von %(parameter)s muss ein Vielfaches von %(number)s sein bei einem " "Maximalwert von %(maximum)s und einem Mindestwert von %(minimum)s" msgid "" "Value of host kernel tick rate (hz) for calculating minimum burst value in " "bandwidth limit rules for a port with QoS. See kernel configuration file for " "HZ value and tc-tbf manual for more information." msgstr "" "Der Wert der Host-Kernel-Aktualisierungsrate (hz) für die Berechnung des " "Mindest-Burst-Werts für Bandbreitengrenzwertregeln für einen Port mit QoS. " "Informationen zum HZ-Wert finden Sie in der Kernel-Konfigurationsdatei und " "im tc-tbf-Handbuch." msgid "" "Value of latency (ms) for calculating size of queue for a port with QoS. See " "tc-tbf manual for more information." msgstr "" "Wert der Latenzzeit (ms) für die Berechnung der Warteschlangengröße für " "einen Port mit QoS. Weitere Informationen finden Sie im tc-tbf-Handbuch." msgid "" "Watch file log. Log watch should be disabled when metadata_proxy_user/group " "has no read/write permissions on metadata proxy log file." msgstr "" "Überwachungsdateiprotokoll. Protokollüberwachung sollte deaktiviert sein, " "wenn metadata_proxy_user/group über keine Lese- und Schreibberechtigung für " "die Protokolldatei des Metadaten-Proxys verfügt." msgid "" "When external_network_bridge is set, each L3 agent can be associated with no " "more than one external network. This value should be set to the UUID of that " "external network. To allow L3 agent support multiple external networks, both " "the external_network_bridge and gateway_external_network_id must be left " "empty." msgstr "" "Wenn 'external_network_bridge' definiert ist, kann jeder L3-Agent maximal " "einem externen Netz zugeordnet werden. Dieser Wert sollte für dieses externe " "Netz auf 'UUID' gesetzt werden. Wenn L3-Agenten mehrere externe Netze " "unterstützen können sollen, müssen sowohl der Wert für " "'external_network_bridge' als auch der Wert für " "'gateway_external_network_id' leer bleiben. " msgid "" "When proxying metadata requests, Neutron signs the Instance-ID header with a " "shared secret to prevent spoofing. You may select any string for a secret, " "but it must match here and in the configuration used by the Nova Metadata " "Server. NOTE: Nova uses the same config key, but in [neutron] section." msgstr "" "Beim Proxy-Vorgang von Metadatenanforderungen unterzeichnet Neutron den " "Instanz-ID-Header mit einem geheimen Schlüssel für gemeinsame Nutzung, um " "Spoofing zu verhindern. Sie können für einen geheimen Schlüssel eine " "beliebige Zeichenfolge auswählen. Sie muss jedoch hier und in der vom Nova-" "Metadatenserver verwendeten Konfiguration identisch sein. Hinweis: Nova " "verwendet denselben Konfigurationsschlüssel, allerdings im Abschnitt " "[neutron]." msgid "" "Where to store Neutron state files. This directory must be writable by the " "agent." msgstr "" "Position zum Speichern von Neutron-Statusdateien. Dieses Verzeichnis muss " "für den Agenten beschreibbar sein." msgid "" "With IPv6, the network used for the external gateway does not need to have " "an associated subnet, since the automatically assigned link-local address " "(LLA) can be used. However, an IPv6 gateway address is needed for use as the " "next-hop for the default route. If no IPv6 gateway address is configured " "here, (and only then) the neutron router will be configured to get its " "default route from router advertisements (RAs) from the upstream router; in " "which case the upstream router must also be configured to send these RAs. " "The ipv6_gateway, when configured, should be the LLA of the interface on the " "upstream router. If a next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated to the network and not " "through this parameter. " msgstr "" "Mit IPv6 benötigt das Netz, das für das externe Gateway verwendet wird, kein " "zugehöriges Teilnetz, da die automatisch zugewiesene LLA (Link-Local " "Address) verwendet werden kann. Eine IPv6-Gateway-Adresse ist jedoch für die " "Verwendung als Next-Hop für die Standardroute erforderlich. Ist hier keine " "IPv6-Gateway-Adresse konfiguriert (und nur dann), wird der Neutron-Router so " "konfiguriert, dass er die Standardroute von RAs (Router Advertisements) vom " "vorgeschalteten Router erhält; in diesem Fall muss der vorgeschaltete Router " "ebenfalls zum Senden dieser RAs konfiguriert sein. Wenn das ipv6_gateway " "konfiguriert ist, sollte es die LLA der Schnittstelle auf dem " "vorgeschalteten Router sein. Wenn ein Next-Hop benötigt wird, der eine GUA " "(Global Unique Address) verwendet, muss dies über ein Teilnetz geschehen, " "das dem Netz zugeordnet ist, nicht über diesen Parameter. " msgid "You must implement __call__" msgstr "Sie müssen '__call__' implementieren" msgid "" "You must provide a config file for bridge - either --config-file or " "env[NEUTRON_TEST_CONFIG_FILE]" msgstr "" "Sie müssen eine Konfigurationsdatei für die Brücke angeben: entweder '--" "config-file' oder env[NEUTRON_TEST_CONFIG_FILE]" msgid "You must provide a revision or relative delta" msgstr "Sie müssen eine Überarbeitung oder ein relatives Delta bereitstellen" msgid "a subnetpool must be specified in the absence of a cidr" msgstr "Ein Subnetzpool muss angegeben werden, wenn cidr nicht angegeben ist." msgid "add_ha_port cannot be called inside of a transaction." msgstr "" "'add_ha_port' kann nicht aus einer Transaktion heraus aufgerufenn werden." msgid "allocation_pools allowed only for specific subnet requests." msgstr "" "allocation_pools sind nur für bestimmte Teilnetzanforderungen zulässig." msgid "allocation_pools are not in the subnet" msgstr "allocation_pools sind nicht im Subnetz." msgid "allocation_pools use the wrong ip version" msgstr "allocation_pools verwenden die falsche IP-Version." msgid "already a synthetic attribute" msgstr "Ist bereits ein synthetisches Attribut" msgid "binding:profile value too large" msgstr "Bindung: Profilwert zu groß" #, python-format msgid "cannot perform %(event)s due to %(reason)s" msgstr "Ausführen von %(event)s nicht möglich. Ursache: %(reason)s" msgid "cidr and prefixlen must not be supplied together" msgstr "cidr und prefixlen dürfen nicht gemeinsam angegeben werden" #, python-format msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid." msgstr "dhcp_agents_per_network muss >= 1 sein. '%s' ist ungültig." msgid "dns_domain cannot be specified without a dns_name" msgstr "'dns_domain' kann nicht ohne 'dns_name' angegeben werden." msgid "dns_name cannot be specified without a dns_domain" msgstr "'dns_name' kann nicht ohne 'dns_domain' angegeben werden." msgid "fixed_ip_address cannot be specified without a port_id" msgstr "'fixed_ip_address' kann nicht ohne 'port_id' angegeben werden" #, python-format msgid "gateway_ip %s is not in the subnet" msgstr "gateway_ip %s ist nicht im Subnetz." #, python-format msgid "has device owner %s" msgstr "hat Geräteeigentümer %s" msgid "in use" msgstr "im Gebrauch" #, python-format msgid "ip command failed on device %(dev_name)s: %(reason)s" msgstr "IP-Befehl fehlgeschlagen auf Einheit %(dev_name)s: %(reason)s" #, python-format msgid "ip command failed: %(reason)s" msgstr "IP-Befehl fehlgeschlagen: %(reason)s" #, python-format msgid "ip link capability %(capability)s is not supported" msgstr "IP-Link-Fähigkeit %(capability)s wird nicht unterstützt" #, python-format msgid "ip link command is not supported: %(reason)s" msgstr "IP-Link-Befehl wird nicht unterstützt: %(reason)s" msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "" "ip_version muss angegeben werden, wenn cidr und subnetpool_id nicht " "angegeben sind" msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "ipv6_address_mode ist nicht gültig, wenn ip_version 4 ist" msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "ipv6_ra_mode ist nicht gültig, wenn ip_version 4 ist" msgid "" "ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set to " "False." msgstr "" "ipv6_ra_mode oder ipv6_address_mode darf nicht gesetzt sein, wenn " "enable_dhcp auf 'False' gesetzt ist." #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " "'%(addr_mode)s' is not valid. If both attributes are set, they must be the " "same value" msgstr "" "ipv6_ra_mode kann nicht auf '%(ra_mode)s' gesetzt sein, wenn " "ipv6_address_mode auf '%(addr_mode)s' gesetzt ist. Sind beide Attribute " "gesetzt, müssen sie denselben Wert aufweisen" msgid "mac address update" msgstr "MAC-Adressaktualisierung" #, python-format msgid "" "max_l3_agents_per_router %(max_agents)s config parameter is not valid. It " "has to be greater than or equal to min_l3_agents_per_router %(min_agents)s." msgstr "" "Der Konfigurationsparameter max_l3_agents_per_router %(max_agents)s ist " "ungültig. Ermuss größer-gleich min_l3_agents_per_router %(min_agents)s sein." msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "Es müssen exakt 2 Argumente angegeben werden: cidr und MAC." msgid "network_type required" msgstr "network_type erforderlich" #, python-format msgid "network_type value '%s' not supported" msgstr "network_type-Wert '%s' wird nicht unterstützt" msgid "new subnet" msgstr "Neues Teilnetz" #, python-format msgid "physical_network '%s' unknown for VLAN provider network" msgstr "physical_network '%s' unbekannt für VLAN-Provider-Netz" #, python-format msgid "physical_network '%s' unknown for flat provider network" msgstr "physical_network '%s' unbekannt für einfaches Anbieternetzwerk" msgid "physical_network required for flat provider network" msgstr "physical_network erforderlich für einfaches Anbieternetzwerk" #, python-format msgid "provider:physical_network specified for %s network" msgstr "'provider:physical_network' für %s-Netz angegeben" #, python-format msgid "rbac_db_model not found in %s" msgstr "rbac_db_model nicht in %s gefunden." msgid "record" msgstr "Datensatz" msgid "respawn_interval must be >= 0 if provided." msgstr "respawn_interval muss >= 0 sein, falls angegeben." #, python-format msgid "segmentation_id out of range (%(min)s through %(max)s)" msgstr "" "'segmentation_id' außerhalb des gültigen Bereichs (%(min)s bis %(max)s)" msgid "segmentation_id requires physical_network for VLAN provider network" msgstr "segmentation_id erfordert physical_network für VLAN-Provider-Netz" msgid "shared attribute switching to synthetic" msgstr "Gemeinsam genutztes Attribut wird in synthetisches Attribut geändert." #, python-format msgid "" "subnetpool %(subnetpool_id)s cannot be updated when associated with shared " "address scope %(address_scope_id)s" msgstr "" "Teilnetzpool %(subnetpool_id)s kann nach Zuordnung eines gemeinsam genutzten " "Adressbereichs %(address_scope_id)s nicht aktualisiert werden" msgid "subnetpool_id and use_default_subnetpool cannot both be specified" msgstr "" "Es können nicht subnetpool_id und use_default_subnetpool gleichzeitig " "festgelegt werden." msgid "the nexthop is not connected with router" msgstr "Der nächste Hop ist nicht mit dem Router verbunden" msgid "the nexthop is used by router" msgstr "Der nächste Hop wird vom Router verwendet" #, python-format msgid "unable to load %s" msgstr "%s kann nicht geladen werden." msgid "" "uuid provided from the command line so external_process can track us via /" "proc/cmdline interface." msgstr "" "UUID von der Befehlszeile angegeben, damit external_process uns über /proc/" "cmdline-Schnittstelle verfolgen kann." neutron-8.4.0/neutron/locale/it/0000775000567000056710000000000013044373210017720 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/it/LC_MESSAGES/0000775000567000056710000000000013044373210021505 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/it/LC_MESSAGES/neutron.po0000664000567000056710000050405513044372760023561 0ustar jenkinsjenkins00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # Remo Mattei , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron 8.2.1.dev52\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-09-01 18:10+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-22 03:55+0000\n" "Last-Translator: Andreas Jaeger \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Italian\n" #, python-format msgid "" "\n" "Command: %(cmd)s\n" "Exit code: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" msgstr "" "\n" "Comando: %(cmd)s\n" "Codice di uscita: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" #, python-format msgid "" "%(branch)s HEAD file does not match migration timeline head, expected: " "%(head)s" msgstr "" "Il file HEAD %(branch)s non corrisponde all'head di durata della migrazione, " "previsto: %(head)s" #, python-format msgid "%(driver)s: Internal driver error." msgstr "%(driver)s: errore di driver interno." #, python-format msgid "%(id)s is not a valid %(type)s identifier" msgstr "%(id)s non è un identificativo %(type)s valido" #, python-format msgid "" "%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' " "and '%(desc)s'" msgstr "" "%(invalid_dirs)s non è un valore valido per sort_dirs, il valore valido è " "'%(asc)s' e '%(desc)s'" #, python-format msgid "%(key)s prohibited for %(tunnel)s provider network" msgstr "%(key)s non consentito per la rete del provider %(tunnel)s" #, python-format msgid "" "%(method)s called with network settings %(current)s (original settings " "%(original)s) and network segments %(segments)s" msgstr "" "%(method)s è stato chiamato con le impostazioni di rete %(current)s " "(impostazioni originali %(original)s) e segmenti di rete %(segments)s" #, python-format msgid "" "%(method)s called with port settings %(current)s (original settings " "%(original)s) host %(host)s (original host %(original_host)s) vif type " "%(vif_type)s (original vif type %(original_vif_type)s) vif details " "%(vif_details)s (original vif details %(original_vif_details)s) binding " "levels %(levels)s (original binding levels %(original_levels)s) on network " "%(network)s with segments to bind %(segments_to_bind)s" msgstr "" "%(method)s chiamato con le impostazioni di porta %(current)s (impostazioni " "originali %(original)s) host %(host)s (host originale %(original_host)s) " "tipo vif %(vif_type)s (tipo vif originale %(original_vif_type)s) dettagli " "vif %(vif_details)s (dettagli vif originale %(original_vif_details)s) " "livelli di collegamento %(levels)s (livelli di collegamento originali " "%(original_levels)s) sulla rete %(network)s con segmenti da collegare " "%(segments_to_bind)s" #, python-format msgid "" "%(method)s called with subnet settings %(current)s (original settings " "%(original)s)" msgstr "" "%(method)s è stato chiamato con le impostazioni di sottorete %(current)s " "(impostazioni originali %(original)s)" #, python-format msgid "%(method)s failed." msgstr "%(method)s non riuscito." #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "%(name)s '%(addr)s' non corrisponde alla ip_version '%(ip_version)s'" #, python-format msgid "%(param)s must be in %(range)s range." msgstr "%(param)s deve essere nell'intervallo %(range)s." #, python-format msgid "%s cannot be called while in offline mode" msgstr "%s Impossibile chiamare durante la modalità offline" #, python-format msgid "%s is invalid attribute for sort_key" msgstr "%s è un attributo non valido per sort_key" #, python-format msgid "%s is invalid attribute for sort_keys" msgstr "%s è un attributo non valido per sort_keys" #, python-format msgid "%s is not a valid VLAN tag" msgstr "%s non un tag VLAN valido" #, python-format msgid "%s must be specified" msgstr "%s deve essere specificato" #, python-format msgid "%s must implement get_port_from_device or get_ports_from_devices." msgstr "%s deve implementare get_port_from_device o get_ports_from_devices." #, python-format msgid "%s prohibited for VLAN provider network" msgstr "%s vietato per la rete del provider VLAN" #, python-format msgid "%s prohibited for flat provider network" msgstr "%s vietato per rete flat del provider" #, python-format msgid "%s prohibited for local provider network" msgstr "%s è vietato per la rete del provider locale" #, python-format msgid "" "'%(data)s' contains '%(length)s' characters. Adding a domain name will cause " "it to exceed the maximum length of a FQDN of '%(max_len)s'" msgstr "" "'%(data)s' contiene caratteri di '%(length)s'. Con l'aggiunta di un nome " "dominio la lunghezza massima di un FQDN di '%(max_len)s' verrà superata" #, python-format msgid "" "'%(data)s' contains '%(length)s' characters. Adding a sub-domain will cause " "it to exceed the maximum length of a FQDN of '%(max_len)s'" msgstr "" "'%(data)s' contiene caratteri di '%(length)s'. Con l'aggiunta di un " "sottodominio la lunghezza massima di un FQDN di '%(max_len)s' verrà superata" #, python-format msgid "'%(data)s' exceeds maximum length of %(max_len)s" msgstr "'%(data)s' supera la lunghezza massima di %(max_len)s" #, python-format msgid "'%(data)s' is not an accepted IP address, '%(ip)s' is recommended" msgstr "'%(data)s' non è un indirizzo IP accettato, è consigliato '%(ip)s'" #, python-format msgid "'%(data)s' is not in %(valid_values)s" msgstr "'%(data)s' non è valido in %(valid_values)s" #, python-format msgid "'%(data)s' is too large - must be no larger than '%(limit)d'" msgstr "'%(data)s' è troppo esteso - non deve superare '%(limit)d'" #, python-format msgid "'%(data)s' is too small - must be at least '%(limit)d'" msgstr "'%(data)s' è troppo piccolo - deve essere almeno '%(limit)d'" #, python-format msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended" msgstr "" "'%(data)s' non è un cidr della sottorete IP riconosciuto, si consiglia " "'%(cidr)s'" #, python-format msgid "'%(data)s' not a valid PQDN or FQDN. Reason: %(reason)s" msgstr "'%(data)s' non è PQDN o FQDN valido. Motivo: %(reason)s" #, python-format msgid "'%(host)s' is not a valid nameserver. %(msg)s" msgstr "'%(host)s' non è un nameserver valido. %(msg)s" #, python-format msgid "'%s' Blank strings are not permitted" msgstr "'%s' stringhe vuote non consentite" #, python-format msgid "'%s' cannot be converted to boolean" msgstr "'%s' impossibile convertirlo in booleano" #, python-format msgid "'%s' cannot be converted to lowercase string" msgstr "'%s' non può essere convertito in stringa di caratteri minuscoli" #, python-format msgid "'%s' contains whitespace" msgstr "'%s' contiene spazi vuoti" #, python-format msgid "'%s' exceeds the 255 character FQDN limit" msgstr "'%s' supera il limite FQDN di 255 caratteri" #, python-format msgid "'%s' is a FQDN. It should be a relative domain name" msgstr "'%s' è un FQDN. Deve essere un nome dominio relativo" #, python-format msgid "'%s' is not a FQDN" msgstr "'%s' non è un FQDN" #, python-format msgid "'%s' is not a dictionary" msgstr "'%s' non è un dizionario" #, python-format msgid "'%s' is not a list" msgstr "'%s' non è un elenco" #, python-format msgid "'%s' is not a valid IP address" msgstr "'%s' non è un indirizzo IP valido" #, python-format msgid "'%s' is not a valid IP subnet" msgstr "'%s' non è una sottorete IP valida" #, python-format msgid "'%s' is not a valid MAC address" msgstr "'%s' non è un'indirizzo MAC valido" #, python-format msgid "'%s' is not a valid RBAC object type" msgstr "'%s' non è un tipo di oggetto valido" #, python-format msgid "'%s' is not a valid UUID" msgstr "'%s' non è un valido UUID" #, python-format msgid "'%s' is not a valid boolean value" msgstr "'%s' non è un valore booleano valido" #, python-format msgid "'%s' is not a valid input" msgstr "'%s' non è un input valido" #, python-format msgid "'%s' is not a valid string" msgstr "'%s' non è una stringa valida" #, python-format msgid "'%s' is not an integer" msgstr "'%s' non è un numero intero" #, python-format msgid "'%s' is not an integer or uuid" msgstr "'%s' non è un numero intero o uuid" #, python-format msgid "'%s' is not of the form =[value]" msgstr "'%s' non è nel formato =[value]" #, python-format msgid "'%s' is not supported for filtering" msgstr "'%s' non è supportata per il filtro" #, python-format msgid "'%s' must be a non negative decimal." msgstr "'%s' deve essere un decimale non negativo." #, python-format msgid "'%s' should be non-negative" msgstr "'%s' non dovrebbe essere negativo" msgid "'.' searches are not implemented" msgstr "Le ricerche '.' non sono implementate" #, python-format msgid "'module' object has no attribute '%s'" msgstr "L'oggetto 'module' non ha un attributo '%s'" msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' è più piccolo di 'port_min'" msgid "" "(Deprecated. Use '--subproject neutron-SERVICE' instead.) The advanced " "service to execute the command against." msgstr "" "(Obsoleto; utilizzare '--subproject neutron-SERVICE'). Il servizio avanzato " "su cui eseguire il comando." msgid "0 is not allowed as CIDR prefix length" msgstr "0 non è consentito come lunghezza del prefisso CIDR" msgid "" "32-bit BGP identifier, typically an IPv4 address owned by the system running " "the BGP DrAgent." msgstr "" "Identificativo BGP da 32-bit, in genere un indirizzo IPv4 di proprietà del " "sistema su cui è in esecuzione BGP DrAgent." msgid "A QoS driver must be specified" msgstr "È necessario specificare un driver QoS" msgid "A cidr must be specified in the absence of a subnet pool" msgstr "È necessario specificare un cidr in assenza di un pool di sottoreti" msgid "" "A decimal value as Vendor's Registered Private Enterprise Number as required " "by RFC3315 DUID-EN." msgstr "" "Un valore decimale come il numero dell'azienda privata registrato dal " "fornitore come richiesto da RFC3315 DUID-EN." #, python-format msgid "A default external network already exists: %(net_id)s." msgstr "Una rete esterna predefinita esiste già: %(net_id)s." msgid "" "A default subnetpool for this IP family has already been set. Only one " "default may exist per IP family" msgstr "" "Un pool di sottorete predefinito per questa famiglia IP è già stato " "impostato. Solo un valore predefinito può esistere per famiglia IP" msgid "A metering driver must be specified" msgstr "Specificare un driver di misurazione" msgid "A password must be supplied when using auth_type md5." msgstr "Una password deve essere fornita quando si utilizza auth_type md5." msgid "API for retrieving service providers for Neutron advanced services" msgstr "" "API per il richiamo dei provider del servizio per i servizi Neutron avanzati" msgid "Aborting periodic_sync_routers_task due to an error." msgstr "Interruzione di periodic_sync_routers_task a causa di un errore." msgid "Access to this resource was denied." msgstr "L'accesso a questa risorsa è stato negato." msgid "Action to be executed when a child process dies" msgstr "Azione da eseguire quando termina un processo child" msgid "" "Add comments to iptables rules. Set to false to disallow the addition of " "comments to generated iptables rules that describe each rule's purpose. " "System must support the iptables comments module for addition of comments." msgstr "" "Aggiungere commenti alle regole iptables. Impostare su false per non " "consentire l'aggiunta di commenti alle regole iptables generate che " "descrivono lo scopo di ciascun ruolo. Il sistema deve supportare il modulo " "di commenti iptables per l'aggiunta di commenti." msgid "Address not present on interface" msgstr "Indirizzo non presente sull'interfaccia" #, python-format msgid "Address scope %(address_scope_id)s could not be found" msgstr "Impossibile trovare l'ambito indirizzo %(address_scope_id)s" msgid "" "Address to listen on for OpenFlow connections. Used only for 'native' driver." msgstr "" "Indirizzo di ascolto per le connessioni OpenFlow. Utilizzato solo per driver " "'native'." msgid "Adds external network attribute to network resource." msgstr "Aggiunge l'attributo della rete esterna alla risorsa di rete." msgid "Adds test attributes to core resources." msgstr "Aggiunge gli attributi di test alle risorse principali." #, python-format msgid "Agent %(id)s could not be found" msgstr "Impossibile trovare l'agent %(id)s" #, python-format msgid "Agent %(id)s is not a L3 Agent or has been disabled" msgstr "L'agent %(id)s non è un agent L3 oppure è stato disabilitato" #, python-format msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled" msgstr "Agent %(id)s non è un agent DHCP valido oppure è stato disabilitato" msgid "Agent has just been revived" msgstr "L'agent è già stato ripristinato" msgid "" "Agent starts with admin_state_up=False when enable_new_agents=False. In the " "case, user's resources will not be scheduled automatically to the agent " "until admin changes admin_state_up to True." msgstr "" "L'agent inizia con admin_state_up=False quando enable_new_agents=False. In " "tal caso, le risorse dell'utente non saranno pianificate automaticamente per " "l'agent finché l'admin non modifica admin_state_up in True." #, python-format msgid "Agent updated: %(payload)s" msgstr "Agent aggiornato: %(payload)s" #, python-format msgid "" "Agent with agent_type=%(agent_type)s and host=%(host)s could not be found" msgstr "" "Impossibile trovare l'agent con agent_type=%(agent_type)s e host=%(host)s" msgid "Allow auto scheduling networks to DHCP agent." msgstr "Consenti pianificazione automatica delle reti nell'agent DHCP." msgid "Allow auto scheduling of routers to L3 agent." msgstr "Consenti pianificazione automatica dei router nell'agent L3." msgid "" "Allow overlapping IP support in Neutron. Attention: the following parameter " "MUST be set to False if Neutron is being used in conjunction with Nova " "security groups." msgstr "" "Consentire la sovrapposizione del supporto IP in Neutron. Attenzione: il " "seguente parametro DEVE essere impostato su False se Neutron viene " "utilizzato insieme ai gruppi di sicurezza Nova." msgid "Allow running metadata proxy." msgstr "Consenti l'esecuzione del proxy di metadati." msgid "Allow sending resource operation notification to DHCP agent" msgstr "Consenti notifica operazione di invio risorse all'agent DHCP" msgid "Allow the creation of PTR records" msgstr "Consenti la creazione di record PTR" msgid "Allow the usage of the bulk API" msgstr "Consenti l'utilizzo dell'API bulk" msgid "Allow the usage of the pagination" msgstr "Consenti utilizzo paginazione" msgid "Allow the usage of the sorting" msgstr "Consenti utilizzo ordinamento" msgid "Allow to perform insecure SSL (https) requests to nova metadata" msgstr "" "Consentire l'esecuzione di richieste SSL (https) non protette sui metadati " "nova" msgid "Allowed address pairs must be a list." msgstr "Le coppie di indirizzi consentite devono essere un elenco." msgid "AllowedAddressPair must contain ip_address" msgstr "AllowedAddressPair deve contenere ip_address" msgid "" "Allows for serving metadata requests coming from a dedicated metadata access " "network whose CIDR is 169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send metadata:1 request. In " "this case DHCP Option 121 will not be injected in VMs, as they will be able " "to reach 169.254.169.254 through a router. This option requires " "enable_isolated_metadata = True." msgstr "" "Consente di servire le richieste di metadati da una rete di accesso ai " "metadati dedicata il cui CIDR è 169.254.169.254/16 (o un prefisso più " "esteso) ed è connessa a un router Neutron da cui le VM inviano i metadati:1 " "richiesta. In questo caso, l'opzione DHCP 121 non sarà inserita nelle VM in " "quanto non saranno in grado di raggiungere 169.254.169.254 tramite un " "router. Questa opzione richiede enable_isolated_metadata = True." #, python-format msgid "" "Already hosting BGP Speaker for local_as=%(current_as)d with router_id=" "%(rtid)s." msgstr "" "Numero massimo di speaker BGP per local_as=%(current_as)d con router_id=" "%(rtid)s." #, python-format msgid "" "Already hosting maximum number of BGP Speakers. Allowed scheduled count=" "%(count)d" msgstr "" "Numero massimo di speaker BGP già ospitato. Numero pianificato consentito=" "%(count)d" msgid "An RBAC policy already exists with those values." msgstr "Una politica RBAC esiste già con questi valori." msgid "An identifier must be specified when updating a subnet" msgstr "" "Un identificativo deve essere specificato durante l'aggiornamento di una " "sottorete" msgid "An interface driver must be specified" msgstr "È necessario specificare un driver di interfaccia" msgid "" "An ordered list of extension driver entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. For example: extension_drivers = " "port_security,qos" msgstr "" "Un elenco ordinato di punti di ingresso del driver di estensione da caricare " "dallo spazio dei nomi neutron.ml2.extension_drivers. Ad esempio: " "extension_drivers = port_security,qos" msgid "" "An ordered list of networking mechanism driver entrypoints to be loaded from " "the neutron.ml2.mechanism_drivers namespace." msgstr "" "Un elenco ordinato dei punti di ingresso del driver del meccanismo di rete " "da caricare dallo spazio dei nomi neutron.ml2.mechanism_drivers." msgid "An unexpected internal error occurred." msgstr "Si è verificato un errore interno imprevisto." msgid "An unknown error has occurred. Please try your request again." msgstr "Si è verificato un errore sconosciuto. Ritentare la richiesta." msgid "Async process didn't respawn" msgstr "Il processo async non ha eseguito la nuova generazione" #, python-format msgid "Attribute '%s' not allowed in POST" msgstr "Attributo '%s' non consentito in POST" #, python-format msgid "Authentication type not supported. Requested type=%(auth_type)s." msgstr "Tipo di autenticazione non supportato. Tipo richiesto=%(auth_type)s." msgid "Authorization URL for connecting to designate in admin context" msgstr "URL autorizzazione per la connessione da designare nel contesto admin" msgid "Automatically remove networks from offline DHCP agents." msgstr "Rimuove automaticamente le reti dagli agent DHCP offline." msgid "" "Automatically reschedule routers from offline L3 agents to online L3 agents." msgstr "" "Ripianifica automaticamente i router dagli agent L3 offline agli agent L3 " "online." msgid "Availability zone of this node" msgstr "Zona di disponibilità di questo nodo" #, python-format msgid "AvailabilityZone %(availability_zone)s could not be found." msgstr "Impossibile trovare la zona di disponibilità %(availability_zone)s." msgid "Available commands" msgstr "Comandi disponibili" #, python-format msgid "" "BGP Peer %(peer_ip)s for remote_as=%(remote_as)s, running for BGP Speaker " "%(speaker_as)d not added yet." msgstr "" "Peer BGP %(peer_ip)s per remote_as=%(remote_as)s, in esecuzione per speaker " "BGP %(speaker_as)d non ancora aggiunto." #, python-format msgid "" "BGP Speaker %(bgp_speaker_id)s is already configured to peer with a BGP Peer " "at %(peer_ip)s, it cannot peer with BGP Peer %(bgp_peer_id)s." msgstr "" "Speaker BGP %(bgp_speaker_id)s già configurato per l'associazione con un " "peer BGP su %(peer_ip)s, non può essere associato al peer BGP " "%(bgp_peer_id)s." #, python-format msgid "" "BGP Speaker for local_as=%(local_as)s with router_id=%(rtid)s not added yet." msgstr "" "Speaker BGP per local_as=%(local_as)s con router_id=%(rtid)s non ancora " "aggiunto." #, python-format msgid "" "BGP peer %(bgp_peer_id)s is not associated with BGP speaker " "%(bgp_speaker_id)s." msgstr "" "Peer BGP %(bgp_peer_id)s non associato allo speaker BGP %(bgp_speaker_id)s." #, python-format msgid "BGP peer %(bgp_peer_id)s not authenticated." msgstr "Peer BGP %(bgp_peer_id)s non autenticato." #, python-format msgid "BGP peer %(id)s could not be found." msgstr "Impossibile trovare il peer BGP %(id)s." #, python-format msgid "" "BGP speaker %(bgp_speaker_id)s is not hosted by the BgpDrAgent %(agent_id)s." msgstr "" "Lo speaker BGP %(bgp_speaker_id)s non è ospitato dal BgpDrAgent %(agent_id)s." #, python-format msgid "BGP speaker %(id)s could not be found." msgstr "Impossibile trovare lo speaker BGP %(id)s." msgid "BGP speaker driver class to be instantiated." msgstr "Classe del driver dello speaker BGP da istanziare." msgid "Backend does not support VLAN Transparency." msgstr "Il backend non supporta la trasparenza VLAN." #, python-format msgid "" "Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, " "%(mac)s:" msgstr "" "Prefisso errato o formato rac per la generazione dell'indirizzo IPv6 da " "EUI-64: %(prefix)s, %(mac)s:" #, python-format msgid "Bad prefix type for generate IPv6 address by EUI-64: %s" msgstr "" "Tipo di prefisso errato per la generazione dell'indirizzo IPv6 da EUI-64: %s" #, python-format msgid "Base MAC: %s" msgstr "MAC base: %s" msgid "" "Base log dir for dnsmasq logging. The log contains DHCP and DNS log " "information and is useful for debugging issues with either DHCP or DNS. If " "this section is null, disable dnsmasq log." msgstr "" "Directory log di base per la registrazione dnsmasq. Il log contiene info di " "log DHCP e DNS ed è utile per il debug dei problemi con DHCP o DNS. Se " "questa sezione è null, disabilitare il log dnsmasq." #, python-format msgid "BgpDrAgent %(agent_id)s is already associated to a BGP speaker." msgstr "BgpDrAgent %(agent_id)s già associato a uno speaker BGP." #, python-format msgid "BgpDrAgent %(id)s is invalid or has been disabled." msgstr "BgpDrAgent %(id)s non è valido o è stato disabilitato." #, python-format msgid "BgpDrAgent updated: %s" msgstr "BgpDrAgent aggiornato: %s" msgid "Body contains invalid data" msgstr "Il corpo contiene dati non validi" msgid "Both network_id and router_id are None. One must be provided." msgstr "network_id e router_id non esistono. È necessario fornirne uno." #, python-format msgid "Bridge %(bridge)s does not exist." msgstr "Il bridge %(bridge)s non esiste." #, python-format msgid "Bridge %s does not exist" msgstr "Il bridge %s non esiste" msgid "Bulk operation not supported" msgstr "Operazione massiccia non supportata" msgid "CIDR to monitor" msgstr "CIDR da monitorare" #, python-format msgid "Callback for %(resource_type)s not found" msgstr "Callback per %(resource_type)s non trovata" #, python-format msgid "Callback for %(resource_type)s returned wrong resource type" msgstr "La callback per %(resource_type)s ha restituito un tipo risorsa errato" #, python-format msgid "Cannot add floating IP to port %s that has no fixed IPv4 addresses" msgstr "" "Impossibile aggiungere l'IP mobile alla porta %s che non ha indirizzi IPv4 " "fissi" #, python-format msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip" msgstr "" "Impossibile aggiungere un IP mobile alla porta sulla sottorete %s che non " "dispone di un gateway_ip" #, python-format msgid "Cannot add multiple callbacks for %(resource_type)s" msgstr "Impossibile aggiungere più callback per %(resource_type)s" #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "" "Impossibile assegnare la sottorete IPv%(req_ver)s dal pool di sottoreti IPv" "%(pool_ver)s" msgid "Cannot allocate requested subnet from the available set of prefixes" msgstr "" "Impossibile assegnare la sottorete richiesta dall'insieme di prefissi " "disponibili" #, python-format msgid "" "Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with port " "%(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already has a " "floating IP on external network %(net_id)s." msgstr "" "Impossibile associare un IP mobile %(floating_ip_address)s (%(fip_id)s) alla " "porta %(port_id)s utilizzando un IP fisso %(fixed_ip)s, in quanto quell'IP " "fisso ha già un IP mobile nella rete esterna %(net_id)s." msgid "" "Cannot change HA attribute of active routers. Please set router " "admin_state_up to False prior to upgrade." msgstr "" "Impossibile modificare l'attributo HA di instradamenti attivi. Impostare " "admin_state_up del router su False prima dell'aggiornament." #, python-format msgid "" "Cannot create floating IP and bind it to %s, since that is not an IPv4 " "address." msgstr "" "Impossibile creare l'IP mobile e collegarlo a %s, poiché non è un indirizzo " "IPv4." #, python-format msgid "" "Cannot create floating IP and bind it to Port %s, since that port is owned " "by a different tenant." msgstr "" "Impossibile creare l'IP mobile e collegarlo alla porta %s, poiché tale porta " "è di proprietà di un tenant differente." msgid "Cannot create resource for another tenant" msgstr "Impossibile creare la risorsa per un altro tenant" msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "Impossibile disabilitare enable_dhcp con gli attributi ipv6 impostati" #, python-format msgid "Cannot find %(table)s with %(col)s=%(match)s" msgstr "Impossibile trovare %(table)s con %(col)s=%(match)s" #, python-format msgid "Cannot handle subnet of type %(subnet_type)s" msgstr "Impossibile gestire la sottorete di tipo %(subnet_type)s" msgid "Cannot have multiple IPv4 subnets on router port" msgstr "Impossibile avere più sottoreti IPv4 sulla porta del router" #, python-format msgid "" "Cannot have multiple router ports with the same network id if both contain " "IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s" msgstr "" "Impossibile avere più porte router con lo stesso ID di rete se entrambe " "contengono sottoreti IPv6. La porta esistente %(p)s ha sottoreti IPv6 e ID " "di rete %(nid)s" #, python-format msgid "" "Cannot host distributed router %(router_id)s on legacy L3 agent %(agent_id)s." msgstr "" "Impossibile ospitare il router distribuito %(router_id)s sull'agent legacy " "L3 %(agent_id)s." msgid "Cannot match priority on flow deletion or modification" msgstr "" "Impossibile seguire la priorità nell'eliminazione o modifica del flusso" msgid "Cannot mix IPv4 and IPv6 prefixes in a subnet pool." msgstr "Impossibile combinare i prefissi IPv4 e IPv6 in un pool di sottorete." msgid "Cannot specify both --service and --subproject." msgstr "Impossibile specificare entrambi --service e --subproject." msgid "Cannot specify both subnet-id and port-id" msgstr "Impossibile specificare entrambi subnet_id e port_id" msgid "Cannot understand JSON" msgstr "Impossibile riconoscere JSON" #, python-format msgid "Cannot update read-only attribute %s" msgstr "Impossibile aggiornare l'attributo di sola lettura %s" msgid "" "Cannot upgrade active router to distributed. Please set router " "admin_state_up to False prior to upgrade." msgstr "" "Impossibile aggiornare il router attivo a distribuito. Impostare " "admin_state_up del router su False prima di aggiornare." msgid "Certificate Authority public key (CA cert) file for ssl" msgstr "File di chiave pubblica Certificate Authority (CA cert) per ssl" #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s." msgstr "" "La modifica renderebbe l'utilizzo inferiore a 0 per le seguenti risorse: " "%(unders)s." msgid "Check ebtables installation" msgstr "Controlla installazione di ebtables" msgid "Check for ARP header match support" msgstr "Verifica il supporto di corrispondenza intestazione ARP" msgid "Check for ARP responder support" msgstr "Verifica il supporto responder ARP" msgid "Check for ICMPv6 header match support" msgstr "Verifica il supporto di corrispondenza intestazione ICMPv6" msgid "Check for OVS Geneve support" msgstr "Verifica il supporto OVS Geneve" msgid "Check for OVS vxlan support" msgstr "Verifica il supporto OVS vxlan" msgid "Check for VF management support" msgstr "Verifica il supporto di gestione VF management" msgid "Check for iproute2 vxlan support" msgstr "Verifica il supporto iproute2 vxlan" msgid "Check for nova notification support" msgstr "Verifica il supporto di notifica nova" msgid "Check for patch port support" msgstr "Verifica il supporto porta patch" msgid "Check ip6tables installation" msgstr "Controlla installazione di ip6tables" msgid "Check ipset installation" msgstr "Controlla installazione di ipset" msgid "Check keepalived IPv6 support" msgstr "Controlla supporto IPv6 con keepalive" msgid "Check minimal dibbler version" msgstr "Controlla versione dibbler minima" msgid "Check minimal dnsmasq version" msgstr "Verifica versione dnsmasq minima" msgid "Check netns permission settings" msgstr "Verifica le impostazioni di autorizzazione netns" msgid "Check ovs conntrack support" msgstr "Verifica il supporto OVS conntrack" msgid "Check ovsdb native interface support" msgstr "Verifica supporto interfaccia nativa ovsdb" #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of " "subnet %(sub_id)s" msgstr "" "Cidr %(subnet_cidr)s della sottorete %(subnet_id)s si sovrappone con il cidr " "%(cidr)s della sottorete %(sub_id)s" msgid "Class not found." msgstr "Classe non trovata." msgid "Cleanup resources of a specific agent type only." msgstr "Ripulire solo le risorse di un tipo di agent specifico." msgid "Client certificate for nova metadata api server." msgstr "Certificato client per il server api dei metadati nova" msgid "" "Comma-separated list of : tuples, mapping " "network_device to the agent's node-specific list of virtual functions that " "should not be used for virtual networking. vfs_to_exclude is a semicolon-" "separated list of virtual functions to exclude from network_device. The " "network_device in the mapping should appear in the physical_device_mappings " "list." msgstr "" "Elenco di tuple : che associano " "network_device all'elenco specifico del nodo dell'agent delle funzioni " "virtuali che non devono essere utilizzate per la rete virtuale. " "vfs_to_exclude è un elenco separato da punto e virgola delle funzioni " "virtuali da escludere da network_device. Il network_device nell'associazione " "deve essere presente nell'elenco physical_device_mappings." msgid "" "Comma-separated list of : tuples mapping physical " "network names to the agent's node-specific Open vSwitch bridge names to be " "used for flat and VLAN networks. The length of bridge names should be no " "more than 11. Each bridge must exist, and should have a physical network " "interface configured as a port. All physical networks configured on the " "server should have mappings to appropriate bridges on each agent. Note: If " "you remove a bridge from this mapping, make sure to disconnect it from the " "integration bridge as it won't be managed by the agent anymore. Deprecated " "for ofagent." msgstr "" "Elenco di tuple : separate da virgole che " "associano i nomi della rete fisica ai nomi dei bridge Open vSwitch specifici " "del nodo dell'agent da utilizzare per le reti flat e VLAN. La lunghezza dei " "nomi dei bridge non deve essere superiore a 11. Ciascun bridge deve esistere " "e deve avere un'interfaccia di rete fisica configurata come una porta. Tutte " "le reti fisiche configurate sul server devono avere associazioni ai bridge " "appropriati su ciascun agent. Nota: se si rimuove un bridge da questa " "associazione, disconnetterlo dal bridge di integrazione in quanto non verrà " "gestito dall'agent. Obsoleto per ofagent." msgid "" "Comma-separated list of : tuples mapping " "physical network names to the agent's node-specific physical network device " "interfaces of SR-IOV physical function to be used for VLAN networks. All " "physical networks listed in network_vlan_ranges on the server should have " "mappings to appropriate interfaces on each agent." msgstr "" "Elenco di tuple : separate di virgole che " "associano i nomi della rete fisica alle interfacce del dispositivo di rete " "fisico specifico del nodo dell'agent della funzione fisica SR-IOV da " "utilizzare per le reti VLAN. Tutte le reti fisiche elencate in " "network_vlan_ranges sul server devono avere associazioni alle interfacce " "appropriate su ogni agent." msgid "" "Comma-separated list of : tuples " "mapping physical network names to the agent's node-specific physical network " "interfaces to be used for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should have mappings to " "appropriate interfaces on each agent." msgstr "" "Elenco di tuple : che associano i nomi " "della rete fisica all'interfaccia di rete fisica specifica del nodo " "dell'agent da utilizzare per le reti VLAN. Tutte le reti fisiche elencate in " "network_vlan_ranges sul server devono avere associazioni alle interfacce " "appropriate su ogni agent" msgid "" "Comma-separated list of : tuples enumerating ranges of GRE " "tunnel IDs that are available for tenant network allocation" msgstr "" "Elenco separato da virgole di intervalli di enumerazione tuple :" " ID tunnel GRE disponibili per l'assegnazione di rete tenant" msgid "" "Comma-separated list of : tuples enumerating ranges of " "Geneve VNI IDs that are available for tenant network allocation" msgstr "" "Elenco separato da virgole di intervalli di enumerazione tuple :" " di ID VNI Geneve disponibili per l'assegnazione della rete titolare" msgid "" "Comma-separated list of : tuples enumerating ranges of " "VXLAN VNI IDs that are available for tenant network allocation" msgstr "" "Elenco separato da virgole di intervalli di enumerazione tuple :" " di VXLAN VNI ID disponibili per l'assegnazione della rete tenant" msgid "" "Comma-separated list of supported PCI vendor devices, as defined by " "vendor_id:product_id according to the PCI ID Repository. Default enables " "support for Intel and Mellanox SR-IOV capable NICs." msgstr "" "Elenco separato da virgole di dispositivi vendor PCI supportati, definiti da " "vendor_id:product_id in base al repository ID PCI. L'impostazione " "predefinita abilita il supporto per NIC con capacità Intel e Mellanox." msgid "" "Comma-separated list of the DNS servers which will be used as forwarders." msgstr "" "Elenco separato da virgole dei server DNS che verranno utilizzati come " "server di inoltro." msgid "Command to execute" msgstr "Comando da eseguire" msgid "Config file for interface driver (You may also use l3_agent.ini)" msgstr "" "File di configurazione per il driver di interfaccia (È possibile utilizzare " "anche l3_agent.ini)" #, python-format msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" msgstr "Valore ethertype %(ethertype)s in conflitto per CIDR %(cidr)s" msgid "" "Controls whether the neutron security group API is enabled in the server. It " "should be false when using no security groups or using the nova security " "group API." msgstr "" "Controlla se l'API del gruppo di sicurezza neutron è abilitata sul server. " "Dovrebbe essere impostata su false quando non si utilizzano gruppi di " "sicurezza o si utilizza l'API del gruppo di sicurezza nova." #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "" "Impossibile effettuare il bind a %(host)s:%(port)s dopo aver provato per " "%(time)d secondi" #, python-format msgid "Could not connect to %s" msgstr "Impossibile connettersi a %s" msgid "Could not deserialize data" msgstr "Impossibile deserializzare i dati" #, python-format msgid "Could not retrieve schema from %(conn)s: %(err)s" msgstr "Impossibile richiamare lo schema da %(conn)s: %(err)s" #, python-format msgid "" "Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable " "to update." msgstr "" "L'ip gateway corrente %(ip_address)s è già in uso dalla porta %(port_id)s. " "Impossibile effettuare l'aggiornamento." msgid "Currently update of HA mode for a DVR/HA router is not supported." msgstr "" "Attualmente l'aggiornamento della modalità HA per un router DVR/HA non è " "supportato." msgid "Currently update of HA mode for a distributed router is not supported." msgstr "" "Attualmente l'aggiornamento della modalità HA per un router distribuito non " "è supportato." msgid "" "Currently update of distributed mode for a DVR/HA router is not supported" msgstr "" "Attualmente l'aggiornamento della modalità distribuita per un router DVR/HA " "non è supportato." msgid "Currently update of distributed mode for an HA router is not supported." msgstr "" "Attualmente l'aggiornamento della modalità distribuita per un router " "distribuito non è supportato." msgid "" "Currently updating a router from DVR/HA to non-DVR non-HA is not supported." msgstr "" "Attualmente l'aggiornamento di un router da DVR/HA a non DVR non HA non è " "supportato." msgid "Currently updating a router to DVR/HA is not supported." msgstr "Attualmente l'aggiornamento di un router a DVR/HA non è supportato." msgid "" "DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " "lease times." msgstr "" "Durata rilascio DHCP (in secondi). Utilizzare -1 per informare dnsmasq di " "utilizzare infinite volte il rilascio." msgid "" "DVR deployments for VXLAN/GRE/Geneve underlays require L2-pop to be enabled, " "in both the Agent and Server side." msgstr "" "Le distribuzioni DVR per VXLAN/GRE/Geneve sottostanti richiedono che sia " "abilitato L2-pop, sia sul lato agent che server." msgid "" "Database engine for which script will be generated when using offline " "migration." msgstr "" "Motore di database per cui verrà generato lo script quando si utilizza la " "migrazione offline." msgid "" "Default IPv4 subnet pool to be used for automatic subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where creation of a subnet is " "being called without a subnet pool ID. If not set then no pool will be used " "unless passed explicitly to the subnet create. If no pool is used, then a " "CIDR must be passed to create a subnet and that subnet will not be allocated " "from any pool; it will be considered part of the tenant's private address " "space. This option is deprecated for removal in the N release." msgstr "" "Il pool della sottorete IPv4 predefinito da utilizzare per l'allocazione " "CIDR della sottorete automatica. Specifica mediante UUID il pool da " "utilizzare nel caso in cui la creazione di una sottorete sia richiamata " "senza un ID pool di sottorete. Se non impostato, nessun pool verrà " "utilizzato, a meno che non sia trasmesso esplicitamente per la creazione " "della sottorete. Se non viene utilizzato nessun pool, un CIDR deve essere " "trasmesso per creare una sottorete e quella sottorete non sarà allocata da " "nessun pool; sarà considerata parte dello spazio degli indirizzi privato del " "tenant. Questa opzione è obsoleta per la rimozione nella release N." msgid "" "Default IPv6 subnet pool to be used for automatic subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where creation of a subnet is " "being called without a subnet pool ID. See the description for " "default_ipv4_subnet_pool for more information. This option is deprecated for " "removal in the N release." msgstr "" "Il pool della sottorete IPv6 predefinito da utilizzare per l'allocazione " "CIDR della sottorete automatica. Specifica mediante UUID il pool da " "utilizzare nel caso in cui la creazione di una sottorete sia richiamata " "senza un ID pool di sottorete. Per ulteriori informazioni, vedere la " "descrizione per default_ipv4_subnet_pool. Questa opzione è obsoleta per la " "rimozione nella release N." msgid "Default driver to use for quota checks" msgstr "Driver predefinito da utilizzare per i controlli di quota" msgid "Default external networks must be shared to everyone." msgstr "Le reti esterne predefinite devono essere condivise con chiunque." msgid "" "Default network type for external networks when no provider attributes are " "specified. By default it is None, which means that if provider attributes " "are not specified while creating external networks then they will have the " "same type as tenant networks. Allowed values for external_network_type " "config option depend on the network type values configured in type_drivers " "config option." msgstr "" "Il tipo di rete predefinito per le reti esterne quando non si specificano " "attributi provider. Per impostazione predefinita è Nessuno, che indica che " "se gli attributi provider non sono stati specificati durante la creazione di " "reti esterne, avranno lo stesso tipo delle reti titolari. I valori " "consentiti per l'opzione config external_network_type dipendono dai valori " "del tipo di rete configurati nell'opzione config type_drivers." msgid "" "Default number of RBAC entries allowed per tenant. A negative value means " "unlimited." msgstr "" "Numero predefinito di voci RBAC consentite per titolare. Un valore negativo " "indica un numero illimitato." msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "" "Numero predefinito di risorse consentite per tenant. Un valore negativo " "indica un numero illimitato." msgid "Default security group" msgstr "Gruppo di sicurezza predefinito" msgid "Default security group already exists." msgstr "Il gruppo di sicurezza predefinito già esiste." msgid "" "Default value of availability zone hints. The availability zone aware " "schedulers use this when the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a comma separated string. " "This value can be empty. In this case, even if availability_zone_hints for a " "resource is empty, availability zone is considered for high availability " "while scheduling the resource." msgstr "" "Il valore predefinito dei suggerimenti per la zona di disponibilità. Gli " "scheduler che riconoscono la zona di disponibilità utilizzano questo valore " "quando le risorse availability_zone_hints sono vuote. Più zone di " "disponibilità possono essere specificate mediante una stringa separata da " "virgole. Questo valore non può essere vuoto. In questo caso, anche se " "availability_zone_hints per una risorsa è vuoto, la zona di disponibilità " "viene considerata per l'elevata disponibilità durante la pianificazione " "della risorsa." msgid "" "Define the default value of enable_snat if not provided in " "external_gateway_info." msgstr "" "Definire il valore predefinito di enable_snat se non fornito in " "external_gateway_info." msgid "" "Defines providers for advanced services using the format: :" ":[:default]" msgstr "" "Definisce i provider per i servizi avanzati utilizzando il formato: " "::[:default]" msgid "" "Delay within which agent is expected to update existing ports whent it " "restarts" msgstr "" "Ritardo in cui è previsto che l'agent aggiorni le porte esistenti quando " "viene riavviato" msgid "Delete the namespace by removing all devices." msgstr "Elimina lo spazio dei nomi rimuovendo tutti i dispositivi." #, python-format msgid "Deleting port %s" msgstr "Eliminazione della porta %s" #, python-format msgid "Deployment error: %(reason)s." msgstr "Errore di distribuzione: %(reason)s." msgid "Destroy IPsets even if there is an iptables reference." msgstr "Distruggere gli IPset anche se c'è un riferimento iptables." msgid "Destroy all IPsets." msgstr "Distruggere tutti gli IPset." #, python-format msgid "Device %(dev_name)s in mapping: %(mapping)s not unique" msgstr "Dispositivo %(dev_name)s nell'associazione: %(mapping)s non univoco" #, python-format msgid "Device '%(device_name)s' does not exist." msgstr "Il dispositivo '%(device_name)s' non esiste." msgid "Device has no virtual functions" msgstr "Il dispositivo non ha funzioni virtuali" #, python-format msgid "Device name %(dev_name)s is missing from physical_device_mappings" msgstr "Il nome dispositivo %(dev_name)s manca da physical_device_mappings" msgid "Device not found" msgstr "Dispositivo non trovato" #, python-format msgid "" "Distributed Virtual Router Mac Address for host %(host)s does not exist." msgstr "" "L'indirizzo MAC del router virtuale distribuito per l'host %(host)s non " "esiste." #, python-format msgid "Domain %(dns_domain)s not found in the external DNS service" msgstr "Dominio %(dns_domain)s non trovato nel servizio DNS esterno" msgid "Domain to use for building the hostnames" msgstr "Dominio da utilizzare per creare i nomi host" msgid "" "Domain to use for building the hostnames. This option is deprecated. It has " "been moved to neutron.conf as dns_domain. It will be removed in a future " "release." msgstr "" "Dominio da utilizzare per creare i nomi host. Questa opzione è obsoleta. È " "stata spostata in neutron.conf come dns_domain. Verrà rimossa da qui in una " "futura release." msgid "Downgrade no longer supported" msgstr "Riduzione non più supportata" #, python-format msgid "Driver %s is not unique across providers" msgstr "Il driver %s non è univoco tra i provider" msgid "Driver for external DNS integration." msgstr "Driver per l'integrazione DNS esterna." msgid "Driver for security groups firewall in the L2 agent" msgstr "Driver per il firewall dei gruppi di sicurezza nell'agent L2" msgid "Driver to use for scheduling network to DHCP agent" msgstr "Driver da utilizzare per la pianificazione della rete nell'agent DHCP" msgid "Driver to use for scheduling router to a default L3 agent" msgstr "" "Driver da utilizzare per la pianificazione del router nell'agent L3 " "predefinito" msgid "" "Driver used for ipv6 prefix delegation. This needs to be an entry point " "defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for " "entry points included with the neutron source." msgstr "" "Il driver utilizzato per la delega prefisso ipv6. Deve essere un punto di " "immissione definito nello spazio dei nomi neutron.agent.linux.pd_drivers. " "Consultare setup.cfg per i punti di immissione inclusi con l'origine neutron." msgid "Driver used for scheduling BGP speakers to BGP DrAgent" msgstr "Driver utilizzato per la pianificazione di speaker BGP su BGP DrAgent" msgid "Drivers list to use to send the update notification" msgstr "" "Elenco di driver da utilizzare per inviare la notifica di aggiornamento" #, python-format msgid "Duplicate IP address '%s'" msgstr "Indirizzo IP duplicato '%s'" #, python-format msgid "" "Duplicate L3HARouterAgentPortBinding is created for router(s) %(router)s. " "Database cannot be upgraded. Please, remove all duplicates before upgrading " "the database." msgstr "" "L3HARouterAgentPortBinding duplicato viene creato per i router %(router)s. " "Il database non può essere aggiornato. Rimuovere tutti i duplicati prima di " "aggiornare il database." msgid "Duplicate Metering Rule in POST." msgstr "Regola di misurazione duplicata in POST." msgid "Duplicate Security Group Rule in POST." msgstr "Regola del gruppo di sicurezza duplicata in POST." msgid "Duplicate address detected" msgstr "Rilevato indirizzo duplicato" #, python-format msgid "Duplicate hostroute '%s'" msgstr "Hostroute duplicato '%s'" #, python-format msgid "Duplicate items in the list: '%s'" msgstr "Elementi duplicati nell'elenco: '%s'" #, python-format msgid "Duplicate nameserver '%s'" msgstr "Nameserver duplicato '%s'" msgid "Duplicate segment entry in request." msgstr "Voce del segmento duplicata nella richiesta." #, python-format msgid "ERROR: %s" msgstr "ERRORE: %s" msgid "" "ERROR: Unable to find configuration file via the default search paths (~/." "neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" msgstr "" "ERRORE: Impossibile trovare il file di configurazione utilizzando i percorsi " "di ricerca predefiniti (~/.neutron/, ~/, /etc/neutron/, /etc/) e l'opzione " "'--config-file'!" msgid "" "Either one of parameter network_id or router_id must be passed to _get_ports " "method." msgstr "" "Uno dei parametri network_id o router_id deve essere passato al metodo " "_get_ports." msgid "Either subnet_id or port_id must be specified" msgstr "È necessario specificare subnet_id o port_id" msgid "Empty physical network name." msgstr "Nome rete fisica vuoto." msgid "Empty subnet pool prefix list." msgstr "Elenco prefisso pool di sottorete vuoto." msgid "Enable FWaaS" msgstr "Abilita FWaaS" msgid "Enable HA mode for virtual routers." msgstr "Abilitare la modalità HA per i router virtuali." msgid "Enable SSL on the API server" msgstr "Abilitazione di SSL sul server API" msgid "" "Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " "plugin using linuxbridge mechanism driver" msgstr "" "Abilitare VXLAN sull'agent. Può essere abilitata quando l'agent è gestito " "dal plugin ml2 utilizzando il driver del meccanismo linuxbridge" msgid "" "Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 " "l2population driver. Allows the switch (when supporting an overlay) to " "respond to an ARP request locally without performing a costly ARP broadcast " "into the overlay." msgstr "" "Abilitare il responder ARP locale se è supportato. Richiede il driver OVS " "2.1 e ML2 l2population. Consentire allo switch (quando supporta una " "sovrapposizione) di rispondere ad una richiesta ARP in locale senza eseguire " "un broadcast ARP oneroso nella sovrapposizione." msgid "" "Enable local ARP responder which provides local responses instead of " "performing ARP broadcast into the overlay. Enabling local ARP responder is " "not fullycompatible with the allowed-address-pairs extension." msgstr "" "Abilitare il responder ARP locale che fornisce le risposte locali anziché " "eseguire il broadcast ARP nella sovrapposizione. L'abilitazione del " "responder ARP locale non è completamente compatibile con l'estensione " "allowed-address-pairs." msgid "" "Enable services on an agent with admin_state_up False. If this option is " "False, when admin_state_up of an agent is turned False, services on it will " "be disabled. Agents with admin_state_up False are not selected for automatic " "scheduling regardless of this option. But manual scheduling to such agents " "is available if this option is True." msgstr "" "Abilitare i servizi sull'agent con admin_state_up False. Se questa opzione è " "False, quando admin_state_up di un agent è su False, verranno disabilitati i " "servizi su tale agent. Gli agent con admin_state_up False non vengono " "selezionati per la pianificazione automatica indipendentemente da questa " "opzione. Ma è disponibile la pianificazione manuale di tali agent se questa " "opzione è impostata su True." msgid "" "Enable suppression of ARP responses that don't match an IP address that " "belongs to the port from which they originate. Note: This prevents the VMs " "attached to this agent from spoofing, it doesn't protect them from other " "devices which have the capability to spoof (e.g. bare metal or VMs attached " "to agents without this flag set to True). Spoofing rules will not be added " "to any ports that have port security disabled. For LinuxBridge, this " "requires ebtables. For OVS, it requires a version that supports matching ARP " "headers. This option will be removed in Newton so the only way to disable " "protection will be via the port security extension." msgstr "" "Abilitare la soppressione delle risposte ARP che non corrispondono a un " "indirizzo IP che appartiene alla porta da cui provengono. Nota: ciò previene " "lo spoofing sulle VM collegate a questo agent, non le protegge da altri " "dispositivi che hanno la capacità di spoofing (ad es. baremetal o VM " "collegate ad agent senza questo indicatore impostato su True). Le regole di " "spoofing non verranno aggiunte alle porte che hanno la sicurezza " "disabilitata. Per LinuxBridge, ciò richiede ebtables. Per OVS, richiede una " "versione che supporti la corrispondenza con intestazioni ARP. Questa opzione " "verrà rimossa in Newton, per cui l'unico modo per disabilitare la protezione " "sarà mediante l'estensione di sicurezza della porta." msgid "" "Enable/Disable log watch by metadata proxy. It should be disabled when " "metadata_proxy_user/group is not allowed to read/write its log file and " "copytruncate logrotate option must be used if logrotate is enabled on " "metadata proxy log files. Option default value is deduced from " "metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent " "effective user id/name." msgstr "" "Abilita/disabilita logwatch mediante il proxy di metadati. Dovrebbe essere " "disabilitato quando non è consentita l'opzione metadata_proxy_user/group per " "leggere/scrivere il relativo file di log e deve essere utilizzata l'opzione " "copytruncate logrotate se è abilitata logrotate sui file di log del proxy di " "metadati. Il valore predefinito dell'opzione viene dedotto da " "metadata_proxy_user: il logwatch è abilitato se metadata_proxy_user è l'ID/" "nomedell'utente operativo dell'agent." msgid "" "Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to " "True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable " "environment. Users making subnet creation requests for IPv6 subnets without " "providing a CIDR or subnetpool ID will be given a CIDR via the Prefix " "Delegation mechanism. Note that enabling PD will override the behavior of " "the default IPv6 subnetpool." msgstr "" "Abilita la delegazione del prefisso IPv6 per l'allocazione CIDR della " "sottorete automatica. Impostare su True per abilitare la delegazione del " "prefisso IPv6 per l'allocazione della sottorete in un ambiente con capacità " "PD. Gli utenti che effettuano richieste di creazione di una sottorete per le " "sottoreti IPv6 senza fornire un CIDR o un ID pool di sottorete riceveranno " "un CIDR mediante il meccanismo di delegazione del prefisso. L'abilitazione " "di PD sostituirà il comportamento del pool di sottorete IPv6 predefinito." msgid "" "Enables the dnsmasq service to provide name resolution for instances via DNS " "resolvers on the host running the DHCP agent. Effectively removes the '--no-" "resolv' option from the dnsmasq process arguments. Adding custom DNS " "resolvers to the 'dnsmasq_dns_servers' option disables this feature." msgstr "" "Abilita il servizio dnsmasq a fornire la risoluzione dei nomi per le istanze " "mediante i resolver DNS sull'host che esegue l'agent DHCP. In realtà rimuove " "l'opzione '--no-resolv' dagli argomenti del processo dnsmasq. L'aggiunta di " "risolver DNS personalizzati all'opzione 'dnsmasq_dns_servers' disabilita " "questa funzione." msgid "Encountered an empty component." msgstr "È stato rilevato un componente vuoto." msgid "End of VLAN range is less than start of VLAN range" msgstr "La fine dell'intervallo VLAN è minore dell'inizio dell'intervallo VLAN" msgid "End of tunnel range is less than start of tunnel range" msgstr "" "L'intervallo finale del tunnel è inferiore all'intervallo iniziale del " "tunnel." msgid "Enforce using split branches file structure." msgstr "Applicare l'uso di struttura file con rami suddivisi." msgid "" "Ensure that configured gateway is on subnet. For IPv6, validate only if " "gateway is not a link local address. Deprecated, to be removed during the " "Newton release, at which point the gateway will not be forced on to subnet." msgstr "" "Accertarsi che il gateway configurato sia sulla sottorete. Per IPv6, " "convalidare solo se il gateway non è un indirizzo locale di collegamento. " "Obsoleto, da rimuovere durante la release Newton, a quel punto il gateway " "non verrà forzato sulla sottorete." #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "Errore %(reason)s durante l'operazione." #, python-format msgid "Error importing FWaaS device driver: %s" msgstr "Errore durante l'importazione del driver dell'unità FWaaS: %s" #, python-format msgid "Error parsing dns address %s" msgstr "Errore durante l'analisi dell'indirizzo dns %s" #, python-format msgid "Error while reading %s" msgstr "Errore durante le lettura di %s" #, python-format msgid "" "Exceeded %s second limit waiting for address to leave the tentative state." msgstr "" "Superato il limite di %s in attesa dell'indirizzo da lasciare nello stato di " "tentativo." msgid "Exceeded maximum amount of fixed ips per port." msgstr "Superata la quantità massima di ip fissi per porta." msgid "Existing prefixes must be a subset of the new prefixes" msgstr "I prefissi esistenti devono essere un sottoinsieme dei nuovi prefissi" #, python-format msgid "" "Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" msgstr "" "Codice uscita: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" #, python-format msgid "Extension %(driver)s failed." msgstr "Estensione %(driver)s non riuscita." #, python-format msgid "" "Extension driver %(driver)s required for service plugin %(service_plugin)s " "not found." msgstr "" "Driver di estensione %(driver)s richiesto per il plugin di servizio " "%(service_plugin)s non trovato." msgid "" "Extension to use alongside ml2 plugin's l2population mechanism driver. It " "enables the plugin to populate VXLAN forwarding table." msgstr "" "Estensione per utilizzare insieme del driver del meccanismo l2population del " "plugin m12. Essa abilita il plugin per popolare la tabella di inoltro VXLAN." #, python-format msgid "Extension with alias %s does not exist" msgstr "L'estensione con alias %s non esiste" msgid "Extensions list to use" msgstr "Elenco estensioni da utilizzare" #, python-format msgid "Extensions not found: %(extensions)s." msgstr "Estensioni non trovate: %(extensions)s." #, python-format msgid "External DNS driver %(driver)s could not be found." msgstr "Impossibile trovare il driver DNS %(driver)s." #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "L'IP esterno %s è uguale all'IP gateway" #, python-format msgid "" "External network %(external_network_id)s is not reachable from subnet " "%(subnet_id)s. Therefore, cannot associate Port %(port_id)s with a Floating " "IP." msgstr "" "La rete esterna %(external_network_id)s non è raggiungibile dalla sottorete " "%(subnet_id)s. Pertanto, non è possibile associare la porta %(port_id)s a " "un IP mobile." #, python-format msgid "" "External network %(net_id)s cannot be updated to be made non-external, since " "it has existing gateway ports" msgstr "" "La rete esterna %(net_id)s non può essere aggiornata per diventare una rete " "non esterna in quanto ha già le porte gateway esistenti" #, python-format msgid "ExtraDhcpOpt %(id)s could not be found" msgstr "Impossibile trovare ExtraDhcpOpt %(id)s" msgid "" "FWaaS plugin is configured in the server side, but FWaaS is disabled in L3-" "agent." msgstr "" "Il plugin FWaaS è configurato sul lato server, ma FWaaS è disabilitato " "nell'agent L3." #, python-format msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." msgstr "" "Impossibile ripianificare il router %(router_id)s: non è stato trovato " "nessun agent L3 adatto." #, python-format msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." msgstr "" "Impossibile pianificare il router %(router_id)s per l'agent L3 %(agent_id)s." #, python-format msgid "" "Failed to allocate a VRID in the network %(network_id)s for the router " "%(router_id)s after %(max_tries)s tries." msgstr "" "Impossibile allocare un VRID nella rete %(network_id)s per il router " "%(router_id)s dopo %(max_tries)s tentativi." #, python-format msgid "Failed to allocate subnet: %(reason)s." msgstr "Impossibile assegnare la sottorete: %(reason)s." msgid "" "Failed to associate address scope: subnetpools within an address scope must " "have unique prefixes." msgstr "" "Impossibile associare l'ambito di indirizzo: i pool di sottorete in un " "ambito di indirizzo devono avere prefissi univoci." #, python-format msgid "Failed to check policy %(policy)s because %(reason)s." msgstr "Impossibile verificare la politica %(policy)s perché %(reason)s." #, python-format msgid "" "Failed to create a duplicate %(object_type)s: for attribute(s) " "%(attributes)s with value(s) %(values)s" msgstr "" "Impossibile creare un duplicato %(object_type)s: per gli attributi " "%(attributes)s con valori %(values)s" #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips included " "invalid subnet %(subnet_id)s" msgstr "" "Impossibile creare la porta nella rete %(network_id)s perché fixed_ips ha " "incluso una sottorete %(subnet_id)s non valida" #, python-format msgid "Failed to init policy %(policy)s because %(reason)s." msgstr "Impossibile inizializzare la politica %(policy)s perché %(reason)s." #, python-format msgid "Failed to locate source for %s." msgstr "Impossibile individuare l'origine per %s." #, python-format msgid "Failed to parse request. Parameter '%s' not specified" msgstr "" "Impossibile analizzare la richiesta. Il parametro '%s' non è specificato" #, python-format msgid "Failed to parse request. Required attribute '%s' not specified" msgstr "" "Impossibile analizzare la richiesta. È necessario l'attributo '%s' non " "specificato" msgid "Failed to remove supplemental groups" msgstr "Impossibile rimuovere i gruppi supplementari" #, python-format msgid "Failed to set gid %s" msgstr "Impossibile impostare il gid %s" #, python-format msgid "Failed to set uid %s" msgstr "Impossibile impostare l'uid %s" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "Impossibile impostare la porta tunnel %(type)s su %(ip)s" msgid "Failure applying iptables rules" msgstr "Errore nell'applicazione di regole iptables" #, python-format msgid "Failure waiting for address %(address)s to become ready: %(reason)s" msgstr "" "Errore durante l'attesa della disponibilità dell'indirizzo %(address)s: " "%(reason)s" msgid "Flat provider networks are disabled" msgstr "Le reti flat del provider sono disabilitate" #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "Impossibile trovare il flavor %(flavor_id)s." #, python-format msgid "Flavor %(flavor_id)s is used by some service instance." msgstr "" "Il flavor %(flavor_id)s viene utilizzato dall'istanza di alcuni servizi." msgid "Flavor is not enabled." msgstr "Flavor non è abilitato." #, python-format msgid "Floating IP %(floatingip_id)s could not be found" msgstr "Impossibile trovare l'IP mobile %(floatingip_id)s" #, python-format msgid "" "Floating IP %(floatingip_id)s is associated with non-IPv4 address " "%s(internal_ip)s and therefore cannot be bound." msgstr "" "L'IP mobile %(floatingip_id)s è associato all'indirizzo non IPv4 " "%s(internal_ip)s e pertanto non è possibile collegarlo." msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "Per i protocolli TCP/UDP, port_range_min deve essere <= port_range_max" #, python-format msgid "For class %(object_type)s missing primary keys: %(missing_keys)s" msgstr "" "Per la classe %(object_type)s mancano le chiavi primarie: %(missing_keys)s" msgid "Force ip_lib calls to use the root helper" msgstr "Forzare le chiamate ip_lib ad utilizzare root helper" #, python-format msgid "Found duplicate extension: %(alias)s." msgstr "Trovata estensione duplicata: %(alias)s." #, python-format msgid "" "Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet " "%(subnet_cidr)s." msgstr "" "Trovati pool di allocazione di sovrapposizione:%(pool_1)s %(pool_2)s per la " "sottorete %(subnet_cidr)s." msgid "Gateway IP version inconsistent with allocation pool version" msgstr "Versione IP gateway incoerente con la versione del pool di allocazione" #, python-format msgid "" "Gateway cannot be updated for router %(router_id)s, since a gateway to " "external network %(net_id)s is required by one or more floating IPs." msgstr "" "Non è possibile aggiornare il gateway per il router %(router_id)s, in quanto " "un gateway per la rete esterna %(net_id)s è richiesto da uno o più IP mobili." #, python-format msgid "Gateway ip %(ip_address)s conflicts with allocation pool %(pool)s." msgstr "" "L'ip gateway %(ip_address)s è in conflitto con il pool di allocazione " "%(pool)s." msgid "Gateway is not valid on subnet" msgstr "Il gateway non è valido sulla sottorete" msgid "" "Geneve encapsulation header size is dynamic, this value is used to calculate " "the maximum MTU for the driver. This is the sum of the sizes of the outer " "ETH + IP + UDP + GENEVE header sizes. The default size for this field is 50, " "which is the size of the Geneve header without any additional option headers." msgstr "" "La dimensione dell'intestazione di incapsulamento Geneve è dinamica, questo " "valore viene utilizzato per calcolare la MTU massima per il driver. " "Rappresenta la somma delle dimensioni delle intestazioni ETH + IP + UDP + " "GENEVE esterne. La dimensione predefinita per questo campo è 50, che " "rappresenta la dimensione dell'intestazione Geneve senza intestazioni di " "opzioni aggiuntive." msgid "Group (gid or name) running metadata proxy after its initialization" msgstr "" "Gruppo (gid o nome) che esegue il proxy di metadati dopo la relativa " "inizializzazione" msgid "" "Group (gid or name) running metadata proxy after its initialization (if " "empty: agent effective group)." msgstr "" "Gruppo (gid o nome) che esegue il proxy di metadati dopo la relativa " "inizializzazione (se vuoto: gruppo operativo dell'agent)." msgid "Group (gid or name) running this process after its initialization" msgstr "" "Gruppo (gid o name) che esegue questo processo dopo la relativa " "inizializzazione" #, python-format msgid "HEAD file does not match migration timeline head, expected: %s" msgstr "" "Il file HEAD non corrisponde all'head di durata della migrazione, previsto: " "%s." msgid "" "Hostname to be used by the Neutron server, agents and services running on " "this machine. All the agents and services running on this machine must use " "the same host value." msgstr "" "Il nome host da utilizzare dal server Neutron, gli agent e servizi in " "esecuzione su questa macchina. Tutti gli agent ed i servizi in esecuzione su " "questa macchina devono utilizzare lo stesso valore host." msgid "How many times Neutron will retry MAC generation" msgstr "Quante volte Neutron richiamerà la generazione MAC" #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" "min) is missing." msgstr "" "Il codice ICMP (port-range-max) %(value)s è stato fornito, ma il tipo ICMP " "(port-range-min) manca." msgid "ID of network" msgstr "ID della rete" msgid "ID of network to probe" msgstr "ID di rete per probe" msgid "ID of probe port to delete" msgstr "ID della porta probe da eliminare" msgid "ID of probe port to execute command" msgstr "ID della porta probe per eseguire il comando" msgid "ID of the router" msgstr "ID del router" #, python-format msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s" msgstr "Indirizzo IP %(ip)s già assegnato nella sottorete %(subnet_id)s" #, python-format msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s" msgstr "L'indirizzo IP %(ip)s non appartiene alla sottorete %(subnet_id)s" #, python-format msgid "" "IP address %(ip_address)s is not a valid IP for any of the subnets on the " "specified network." msgstr "" "L'indirizzo IP %(ip_address)s non è un IP valido per nessuna delle sottoreti " "sulla rete specificata." msgid "IP address used by Nova metadata server." msgstr "Indirizzo IP utilizzato dal server di metadati Nova." msgid "IP allocation failed. Try again later." msgstr "Allocazione IP non riuscita. Provare successivamente." msgid "IP allocation requires subnet_id or ip_address" msgstr "L'assegnazione IP richiede subnet_id o ip_address" #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "IPTablesManager.apply non è riuscito ad applicare la seguente serie di " "regole iptables:\n" "%s" msgid "IPtables conntrack zones exhausted, iptables rules cannot be applied." msgstr "" "Zone IPtables conntrack esaurite, impossibile applicare le regole iptables." msgid "IPv6 Address Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "La modalità indirizzo IPv6 deve essere SLAAC o stateless per la delega " "prefisso." msgid "IPv6 RA Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "La modalità RA IPv6 deve essere SLAAC o stateless per la delega prefisso." #, python-format msgid "" "IPv6 address %(address)s can not be directly assigned to a port on subnet " "%(id)s since the subnet is configured for automatic addresses" msgstr "" "L'indirizzo IPv6 %(address)s non può essere assegnato direttamente ad una " "porta sulla sottorete %(id)s perché la sottorete è configurata per gli " "indirizzi automatici" #, python-format msgid "" "IPv6 address %(ip)s cannot be directly assigned to a port on subnet " "%(subnet_id)s as the subnet is configured for automatic addresses" msgstr "" "L'indirizzo IPv6 %(ip)s non può essere assegnato direttamente ad una porta " "sulla sottorete %(subnet_id)s perché la sottorete è configurata per gli " "indirizzi automatici" #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot be " "added to Neutron Router." msgstr "" "La sottorete IPv6 %s configurata per ricevere RA da un router esterno non " "può essere aggiunta a Neutron Router." msgid "" "If True, advertise network MTU values if core plugin calculates them. MTU is " "advertised to running instances via DHCP and RA MTU options." msgstr "" "Se True, annunciare i valori MTU della rete se calcolati dal plugin di base. " "MTU annuncia l'esecuzione delle istanze mediante le opzioni DHCP e RA MTU." msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "" "Se True, consentire ai plugin che lo supportano di creare reti VLAN " "trasparenti." msgid "" "If non-empty, the l3 agent can only configure a router that has the matching " "router ID." msgstr "" "Se non è vuoto, l'agent L3 può solo configurare un router che dispone " "dell'ID router corrispondente." msgid "Illegal IP version number" msgstr "Numero della versione IP non valido" #, python-format msgid "" "Illegal prefix bounds: %(prefix_type)s=%(prefixlen)s, %(base_prefix_type)s=" "%(base_prefixlen)s." msgstr "" "Limiti di prefisso non consentiti: %(prefix_type)s=%(prefixlen)s, " "%(base_prefix_type)s=%(base_prefixlen)s." #, python-format msgid "" "Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot " "associate with address scope %(address_scope_id)s because subnetpool " "ip_version is not %(ip_version)s." msgstr "" "Associazione pool di sottorete non valida: il pool di sottorete " "%(subnetpool_id)s non può essere associato all'ambito di indirizzo " "%(address_scope_id)s perché la versione IP del pool di sottorete non è " "%(ip_version)s." #, python-format msgid "" "Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot be " "associated with address scope %(address_scope_id)s." msgstr "" "Associazione pool di sottorete non valida: il pool di sottorete " "%(subnetpool_id)s non può essere associato all'ambito di indirizzo " "%(address_scope_id)s." #, python-format msgid "Illegal subnetpool update : %(reason)s." msgstr "Aggiornamento pool di sottorete non valido: %(reason)s." #, python-format msgid "Illegal update to prefixes: %(msg)s." msgstr "Aggiornamento non valido dei prefissi: %(msg)s." msgid "" "In some cases the Neutron router is not present to provide the metadata IP " "but the DHCP server can be used to provide this info. Setting this value " "will force the DHCP server to append specific host routes to the DHCP " "request. If this option is set, then the metadata service will be activated " "for all the networks." msgstr "" "In alcuni casi il router Neutron non è presente per fornire l'IP dei " "metadati ma il server DHCP può essere utilizzato per fornire queste " "informazioni. L'impostazione di questo valore su True farà in modo che il " "server DHCP aggiunga instradamenti host specifici alla richiesta DHCP. Se " "questa opzione è impostata, il servizio di metadati verrà attivato per tutte " "le reti." #, python-format msgid "Incorrect pci_vendor_info: \"%s\", should be pair vendor_id:product_id" msgstr "" "pci_vendor_info non corretto: \"%s\", deve essere una coppia di vendor_id:" "product_id" msgid "" "Indicates that this L3 agent should also handle routers that do not have an " "external network gateway configured. This option should be True only for a " "single agent in a Neutron deployment, and may be False for all agents if all " "routers must have an external network gateway." msgstr "" "Indica che questo agent L3 deve anche gestire i router che non hanno un " "gateway di rete esterna configurato. Questa opzione deve essere True solo " "per un singolo agent di una distribuzione Neutron e può essere False per " "tutti gli agent se tutti i router devono avere un gateway di rete esterna." #, python-format msgid "Instance of class %(module)s.%(class)s must contain _cache attribute" msgstr "" "L'istanza di classe %(module)s.%(class)s deve contenere l'attributo _cache" #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" msgstr "" "Spazio prefisso insufficiente per assegnare la dimensione della sottorete /%s" msgid "Insufficient rights for removing default security group." msgstr "" "Diritti non sufficienti per rimuovere il gruppo di sicurezza predefinito." msgid "" "Integration bridge to use. Do not change this parameter unless you have a " "good reason to. This is the name of the OVS integration bridge. There is one " "per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM " "VIFs are attached to this bridge and then 'patched' according to their " "network connectivity." msgstr "" "Il bridge di integrazione da utilizzare. Non modificare questo parametro a " "meno che non si abbia una buona ragione per farlo. Questo è il nome del " "bridge di integrazione OVS. Esiste un bridge per ciascun hypervisor. Il " "bridge di integrazione agisce come un 'patch bay' virtuale. Tutti i VIF VM " "sono collegati a questo bridge e quindi 'corretti' in base alla rispettiva " "connettività di rete." msgid "Interface to monitor" msgstr "Interfaccia da monitorare" msgid "" "Interval between checks of child process liveness (seconds), use 0 to disable" msgstr "" "Intervallo tra i controlli dell'attività del processo child (secondi), " "utilizzare 0 per disabilitare" msgid "Interval between two metering measures" msgstr "Intervallo tra due misure" msgid "Interval between two metering reports" msgstr "Intervallo tra due report di misurazione" #, python-format msgid "Invalid CIDR %(input)s given as IP prefix." msgstr "CIDR non valido %(input)s fornito come prefisso IP." #, python-format msgid "" "Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address " "format, which requires the prefix to be /64." msgstr "" "CIDR %s non valido per la modalità indirizzi IPv6. OpenStack utilizza il " "formato di indirizzi EUI-64, che richiede che il prefisso sia /64." #, python-format msgid "Invalid Device %(dev_name)s: %(reason)s" msgstr "Dispositivo non valido %(dev_name)s: %(reason)s" #, python-format msgid "" "Invalid action '%(action)s' for object type '%(object_type)s'. Valid " "actions: %(valid_actions)s" msgstr "" "Azione non valida '%(action)s' per tipo di oggetto '%(object_type)s'. Azioni " "valide: %(valid_actions)s" #, python-format msgid "" "Invalid authentication type: %(auth_type)s, valid types are: " "%(valid_auth_types)s" msgstr "" "Tipo di autenticazione non valido: %(auth_type)s, i tipi validi sono: " "%(valid_auth_types)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "Tipo di contenuto non valido%(content_type)s." #, python-format msgid "Invalid data format for IP pool: '%s'" msgstr "Formato dati invalido per il pool IP: '%s'" #, python-format msgid "Invalid data format for extra-dhcp-opt: %(data)s" msgstr "Formato di dati non valido per extra-dhcp-opt: %(data)s" #, python-format msgid "Invalid data format for fixed IP: '%s'" msgstr "Formato dati invalido per l'IP fisso: '%s'" #, python-format msgid "Invalid data format for hostroute: '%s'" msgstr "Formato dati invalido per hostroute: '%s'" #, python-format msgid "Invalid data format for nameserver: '%s'" msgstr "Formato dati invalido per il nameserver: '%s'" #, python-format msgid "Invalid ethertype %(ethertype)s for protocol %(protocol)s." msgstr "ethertype %(ethertype)s non valido per il protocollo %(protocol)s." #, python-format msgid "Invalid extension environment: %(reason)s." msgstr "Ambiente di estensione non valido: %(reason)s." #, python-format msgid "Invalid format for routes: %(routes)s, %(reason)s" msgstr "Formato non valido per gli instradamenti: %(routes)s, %(reason)s" #, python-format msgid "Invalid format: %s" msgstr "Formato non valido: %s" #, python-format msgid "Invalid input for %(attr)s. Reason: %(reason)s." msgstr "Input non valido per %(attr)s. Motivo: %(reason)s." #, python-format msgid "" "Invalid input. '%(target_dict)s' must be a dictionary with keys: " "%(expected_keys)s" msgstr "" "Input non valido. '%(target_dict)s' deve essere un dizionario con chiavi: " "%(expected_keys)s" #, python-format msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s" msgstr "" "Stato istanza non valido: %(state)s, gli stati validi sono: %(valid_states)s" #, python-format msgid "Invalid mapping: '%s'" msgstr "Associazione non valida: '%s'" #, python-format msgid "Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'." msgstr "Intervallo VLAN della rete non valido: '%(vlan_range)s' - '%(error)s'." #, python-format msgid "Invalid network VXLAN port range: '%(vxlan_range)s'." msgstr "Intervallo porta VXLAN di rete non valida: '%(vxlan_range)s'." #, python-format msgid "Invalid pci slot %(pci_slot)s" msgstr "pci slot non valido %(pci_slot)s" #, python-format msgid "Invalid provider format. Last part should be 'default' or empty: %s" msgstr "" "Formato del provider non valido. L'ultima parte deve essere 'default' o " "vuota: %s" #, python-format msgid "Invalid resource type %(resource_type)s" msgstr "Tipo di risorsa non valido %(resource_type)s" #, python-format msgid "Invalid route: %s" msgstr "Route invalido: %s" msgid "Invalid service provider format" msgstr "Formato del provider del servizio non valido" #, python-format msgid "Invalid service type %(service_type)s." msgstr "Tipo di servizio non valido %(service_type)s." #, python-format msgid "" "Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255." msgstr "" "Valore non valido per ICMP %(field)s (%(attr)s) %(value)s. Deve essere " "compreso tra 0 e 255." #, python-format msgid "Invalid value for port %(port)s" msgstr "Valore invalido per la porta %(port)s" msgid "" "Iptables mangle mark used to mark ingress from external network. This mark " "will be masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Indicatore mangle iptables per contrassegnare l'ingresso dalla rete esterna. " "Tale indicatoreverrà mascherato con 0xffff in modo che verranno utilizzati " "solo i 16 bit inferiori. " msgid "" "Iptables mangle mark used to mark metadata valid requests. This mark will be " "masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Indicatore mangle iptables per contrassegnare le richieste valide di " "metadati. Tale indicatore verràmascherato con 0xffff in modo che verranno " "utilizzati solo i 16 bit inferiori. " msgid "" "Keep in track in the database of current resourcequota usage. Plugins which " "do not leverage the neutron database should set this flag to False" msgstr "" "Tenere traccia nel database dell'utilizzo corrente della quota di risorsa. I " "plugin che non usano il database neutron devono impostare questo indicatore " "su False" msgid "Keepalived didn't respawn" msgstr "Keepalived non ha eseguito la nuova generazione" msgid "Keepalived didn't spawn" msgstr "Keepalived non ha eseguito la generazione" #, python-format msgid "" "Kernel HZ value %(value)s is not valid. This value must be greater than 0." msgstr "" "Il valore Kernel HZ %(value)s non è valido. Questo valore deve essere " "maggiore di 0." #, python-format msgid "Key %(key)s in mapping: '%(mapping)s' not unique" msgstr "Chiave %(key)s nell'associazione: '%(mapping)s' non univoca" msgid "L3 agent failure to setup NAT for floating IPs" msgstr "Errore dell'agent L3 durante la configurazione di NAT per IP mobili" msgid "L3 agent failure to setup floating IPs" msgstr "Errore dell'agent L3 durante la configurazione di IP mobili" #, python-format msgid "Limit must be an integer 0 or greater and not '%d'" msgstr "Il limite deve essere un numero intero 0 o superiore e non '%d'" msgid "Limit number of leases to prevent a denial-of-service." msgstr "Limitare il numero di lease per evitare un denial-of-service." msgid "List of :" msgstr "Elenco di :" msgid "" "List of :: or " "specifying physical_network names usable for VLAN provider and tenant " "networks, as well as ranges of VLAN tags on each available for allocation to " "tenant networks." msgstr "" "Elenco di :: o che " "specificano nomi physical_network utilizzabili per le reti tenant e provider " "VLAN, come anche gli intervalli di tag VLAN su ciascuno disponibile per " "l'assegnazione alle reti tenant." msgid "" "List of network type driver entrypoints to be loaded from the neutron.ml2." "type_drivers namespace." msgstr "" "Elenco dei punti di ingresso del driver del tipo di rete da caricare dallo " "spazio dei nomi neutron.ml2.type_drivers." msgid "" "List of physical_network names with which flat networks can be created. Use " "default '*' to allow flat networks with arbitrary physical_network names. " "Use an empty list to disable flat networks." msgstr "" "Elenco di nomi physical_network con cui possono essere create reti flat. " "Utilizzare il valore '*' predefinito per consentire reti flat con nomi " "physical_network arbitrari. Utilizzare un elenco vuoto per disabilitare le " "reti flat." msgid "Local IP address of the VXLAN endpoints." msgstr "Indirizzo IP locale degli endpoint VXLAN." msgid "Location for Metadata Proxy UNIX domain socket." msgstr "Ubicazione per il socket del dominio UNIX del proxy di metadati." msgid "Location of Metadata Proxy UNIX domain socket" msgstr "Ubicazione del socket del dominio UNIX del proxy di metadati" msgid "Location of pid file of this process." msgstr "Ubicazione del file pid di questo processo." msgid "Location to store DHCP server config files." msgstr "Ubicazione per archiviare i file di configurazione del server DHCP." msgid "Location to store IPv6 PD files." msgstr "Ubicazione per archiviare i file PD IPv6." msgid "Location to store IPv6 RA config files" msgstr "Ubicazione per memorizzare i file di configurazione IPv6 RA" msgid "Location to store child pid files" msgstr "Ubicazione per archiviare i file pid dell'elemento child" msgid "Location to store keepalived/conntrackd config files" msgstr "" "Ubicazione per archiviare i file di configurazione keepalived/conntrackd" msgid "Log agent heartbeats" msgstr "Registra gli heartbeat dell'agent" msgid "Loopback IP subnet is not supported if enable_dhcp is True." msgstr "La sottorete IP Loopback non è supportata se enable_dhcp è True." msgid "MTU size of veth interfaces" msgstr "Dimensione MTU delle interfacce veth" msgid "Make the l2 agent run in DVR mode." msgstr "Eseguire l'agent L2 in modalità DVR." msgid "Malformed request body" msgstr "Corpo richiesta non corretto" #, python-format msgid "Malformed request body: %(reason)s." msgstr "Corpo richiesta non corretto: %(reason)s." msgid "MaxRtrAdvInterval setting for radvd.conf" msgstr "Impostazione MaxRtrAdvInterval per radvd.conf" msgid "Maximum number of DNS nameservers per subnet" msgstr "Numero massimo di server dei nomi DNS per la sottorete" msgid "" "Maximum number of L3 agents which a HA router will be scheduled on. If it is " "set to 0 then the router will be scheduled on every agent." msgstr "" "Numero massimo di agent L3 su cui verrà pianificato un router HA. Se è " "impostato su 0, il router verrà pianificato su ciascun agent." msgid "Maximum number of allowed address pairs" msgstr "Numero massimo di coppie di indirizzi consentito" msgid "" "Maximum number of fixed ips per port. This option is deprecated and will be " "removed in the N release." msgstr "" "Numero massimo di IP fissi per porta. Questa opzione è obsoleta e sarà " "rimossa nella release N." msgid "Maximum number of host routes per subnet" msgstr "Numero massimo di route host per la sottorete" msgid "Maximum number of routes per router" msgstr "Numero massimo di instradamenti per router" msgid "" "Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce " "mode from metadata_proxy_user/group values, 'user': set metadata proxy " "socket mode to 0o644, to use when metadata_proxy_user is agent effective " "user or root, 'group': set metadata proxy socket mode to 0o664, to use when " "metadata_proxy_group is agent effective group or root, 'all': set metadata " "proxy socket mode to 0o666, to use otherwise." msgstr "" "Modalità socket del dominio UNIX del proxy di metadati, 4 valori consentiti: " "'deduce': modalità deduzione da valori metadata_proxy_user/group, 'user': " "impostare modalità socket proxy metadati su 0o644, da usare quando " "metadata_proxy_user è l'utente effettivo agent o root, 'group': impostare " "modalità socket proxy metadati su 0o664, da usare quando " "metadata_proxy_group è gruppo effettivo agent o root, 'all': impostare " "modalità socket proxy metadati su 0o666, per usare altrimenti." msgid "Metering driver" msgstr "Driver di misurazione" #, python-format msgid "Metering label %(label_id)s does not exist" msgstr "L'etichetta di misurazione %(label_id)s non esiste" #, python-format msgid "Metering label rule %(rule_id)s does not exist" msgstr "La regola di etichetta di misurazione %(rule_id)s non esiste" #, python-format msgid "" "Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps " "another" msgstr "" "La regola di etichetta di misurazione remote_ip_prefix %(remote_ip_prefix)s " "si sovrappone ad un'altra" msgid "Method cannot be called within a transaction." msgstr "Il metodo non può essere richiamato all'interno di una transazione." msgid "Migration from distributed router to centralized is not supported" msgstr "" "La migrazione dal router distribuito al router centralizzato non è " "supportata " msgid "MinRtrAdvInterval setting for radvd.conf" msgstr "Impostazione MinRtrAdvInterval per radvd.conf" msgid "Minimize polling by monitoring ovsdb for interface changes." msgstr "" "Ridurre al minimo il polling controllando ovsdb per le modifiche " "all'interfaccia." #, python-format msgid "Missing key in mapping: '%s'" msgstr "Chiave mancante nell'associazione: '%s'" #, python-format msgid "Missing value in mapping: '%s'" msgstr "Valore mancante nell'associazione: '%s'" msgid "Multicast IP subnet is not supported if enable_dhcp is True." msgstr "La sottorete IP Multicast non è supportata se enable_dhcp è True." msgid "" "Multicast group for VXLAN. When configured, will enable sending all " "broadcast traffic to this multicast group. When left unconfigured, will " "disable multicast VXLAN mode." msgstr "" "Gruppo multicast per VXLAN. Quando configurato, abilita l'invio di tutto il " "traffico broadcast a questo gruppo multicast. Quando non configurato, " "disabilita la modalità multicast VXLAN." msgid "" "Multicast group(s) for vxlan interface. A range of group addresses may be " "specified by using CIDR notation. Specifying a range allows different VNIs " "to use different group addresses, reducing or eliminating spurious broadcast " "traffic to the tunnel endpoints. To reserve a unique group for each possible " "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on " "all the agents." msgstr "" "Gruppo multicast per l'interfaccia vxlan. Un intervallo di indirizzi di " "gruppo può essere specificato utilizzando la notazione CIDR. La definizione " "di un intervallo consente a VNI diversi di utilizzare indirizzi di gruppo " "diversi, riducendo o eliminando il traffico di broadcast spurio agli " "endpoint del tunnel. Per riservare un gruppo univoco per ciascun VNI " "possibile (24-bit), utilizzare /8, ad esempio, 239.0.0.0/8. Questa " "impostazione deve essere la stessa su tutti gli agent." #, python-format msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found" msgstr "Trovati più agent con agent_type=%(agent_type)s e host=%(host)s" #, python-format msgid "Multiple default providers for service %s" msgstr "Più provider predefiniti per il servizio %s" #, python-format msgid "Multiple plugins for service %s were configured" msgstr "Sono stati configurati più plugin per il servizio %s" #, python-format msgid "Multiple providers specified for service %s" msgstr "Più provider specificati per il servizio %s" msgid "Multiple tenant_ids in bulk security group rule create not allowed" msgstr "" "La creazione in massa di più tenant_id nella regola del gruppo di sicurezza " "non è consentita" msgid "Must also specify protocol if port range is given." msgstr "" "È necessario anche specificare il protocollo se è fornito l'intervallo di " "porta." msgid "Must specify one or more actions on flow addition or modification" msgstr "" "È necessario specificare una o più azioni nell'aggiunta o modifica del flusso" #, python-format msgid "Name %(dns_name)s is duplicated in the external DNS service" msgstr "Nome %(dns_name)s duplicato nel servizio DNS esterno" #, python-format msgid "" "Name '%s' must be 1-63 characters long, each of which can only be " "alphanumeric or a hyphen." msgstr "" "Il nome '%s' deve contenere da 1 a 63 caratteri, ciascuno dei quali può " "essere solo alfanumerico o un trattino." #, python-format msgid "Name '%s' must not start or end with a hyphen." msgstr "Il nome '%s' non deve iniziare o terminare con un trattino." msgid "Name of Open vSwitch bridge to use" msgstr "Nome del bridge Open vSwitch da utilizzare" msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "Nome della regione nova da utilizzare. Utile nel caso in cui keystone " "gestisce più di una regione." msgid "Name of the FWaaS Driver" msgstr "Nome del driver FWaaS" msgid "Namespace of the router" msgstr "Spazio dei nomi del router" msgid "Native pagination depend on native sorting" msgstr "La paginazione nativa deipende dall'ordinamento nativo" #, python-format msgid "" "Need to apply migrations from %(project)s contract branch. This will require " "all Neutron server instances to be shutdown before proceeding with the " "upgrade." msgstr "" "È necessario applicare le migrazioni dal ramo di contratto %(project)s. " "Prima di procedere con l'aggiornamento è necessario che tutte le istanze del " "server Neutron vengano chiuse." msgid "Negative delta (downgrade) not supported" msgstr "Delta negativo (riduzione) non supportato" msgid "Negative relative revision (downgrade) not supported" msgstr "Revisione relativa negativa (riduzione) non suportata" #, python-format msgid "" "Network %(network_id)s is already bound to BgpSpeaker %(bgp_speaker_id)s." msgstr "Rete %(network_id)s già associata a BgpSpeaker %(bgp_speaker_id)s." #, python-format msgid "" "Network %(network_id)s is not associated with BGP speaker %(bgp_speaker_id)s." msgstr "Rete %(network_id)s non associata allo speaker BGP %(bgp_speaker_id)s" #, python-format msgid "Network %(network_id)s is not bound to a BgpSpeaker." msgstr "Rete %(network_id)s non associata a un BgpSpeaker." #, python-format msgid "Network %(network_id)s is not bound to a IPv%(ip_version)s BgpSpeaker." msgstr "Rete %(network_id)s non associata a un BgpSpeaker IPv%(ip_version)s." #, python-format msgid "Network %s does not contain any IPv4 subnet" msgstr "La rete %s non contiene alcuna sottorete IPv4" #, python-format msgid "Network %s is not a valid external network" msgstr "La rete %s non è una rete esterna valida" #, python-format msgid "Network %s is not an external network" msgstr "La rete %s non è una rete esterna" #, python-format msgid "" "Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges " "%(excluded_ranges)s was not found." msgstr "" "Rete di dimensione %(size)s, dall'intervallo IP %(parent_range)s esclusi gli " "intervalli IP %(excluded_ranges)s non trovata." msgid "Network that will have instance metadata proxied." msgstr "Rete che avrà i metadati dell'istanza con proxy." #, python-format msgid "Network type value '%s' not supported" msgstr "Valore del tipo di rete '%s' non supportato" msgid "Network type value needed by the ML2 plugin" msgstr "Valore Tipo di rete richiesto dal plugin ML2" msgid "Network types supported by the agent (gre and/or vxlan)." msgstr "Tipi di reti supportati dall'agent (gre e/o vxlan)." msgid "" "Neutron IPAM (IP address management) driver to use. If ipam_driver is not " "set (default behavior), no IPAM driver is used. In order to use the " "reference implementation of Neutron IPAM driver, use 'internal'." msgstr "" "Il driver Neutron IPAM (IP address management) da utilizzare. Se ipam_driver " "non è impostato (comportamento predefinito) non verrà utilizzato nessun " "driver IPAM. Per utilizzare l'implementazione di riferimento del driver " "Neutron IPAM, utilizzare 'internal'." msgid "Neutron Service Type Management" msgstr "Gestione tipo servizio Neutron" msgid "Neutron core_plugin not configured!" msgstr "Neutron core_plugin non configurato!" msgid "Neutron plugin provider module" msgstr "Modulo del provider di plugin Neutron" msgid "Neutron quota driver class" msgstr "Classe driver quota Neutron" msgid "New value for first_ip or last_ip has to be specified." msgstr "È necessario specificare un nuovo valore per first_ip o last_ip." msgid "No default router:external network" msgstr "Nessuna rete router:external predefinita" #, python-format msgid "No default subnetpool found for IPv%s" msgstr "Nessun pool di sottorete predefinito trovato per IPv%s" msgid "No default subnetpools defined" msgstr "Nessun pool di sottorete predefinito definito" #, python-format msgid "No eligible l3 agent associated with external network %s found" msgstr "" "Non è stato trovato nessun agent L3 adatto associato alla rete esterna %s" #, python-format msgid "No more IP addresses available for subnet %(subnet_id)s." msgstr "Indirizzi IP non più disponibili per la sottorete %(subnet_id)s." #, python-format msgid "" "No more Virtual Router Identifier (VRID) available when creating router " "%(router_id)s. The limit of number of HA Routers per tenant is 254." msgstr "" "Nessun altro VRID (Virtual Router Identifier) disponibile durante la " "creazione del router %(router_id)s. Il limite del numero di router HA per " "tenant è 254." msgid "No offline migrations pending." msgstr "Nessuna migrazione offline in sospeso." #, python-format msgid "No providers specified for '%s' service, exiting" msgstr "Nessun provider specificato per il servizio '%s', uscita in corso" #, python-format msgid "No shared key in %s fields" msgstr "Nessuna chiave condivisa in %s campi" msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "" "Attualmente non è consentito assegnare manualmente un router ad un agent in " "modalità 'dvr'." msgid "Not allowed to manually remove a router from an agent in 'dvr' mode." msgstr "" "Attualmente non è consentito rimuovere manualmente un router da un agent in " "modalità 'dvr'." #, python-format msgid "" "Not enough l3 agents available to ensure HA. Minimum required " "%(min_agents)s, available %(num_agents)s." msgstr "" "Non sono presenti agent L3 sufficienti per garantire HA. Sono richiesti " "minimo %(min_agents)s, disponibili %(num_agents)s." msgid "" "Number of DHCP agents scheduled to host a tenant network. If this number is " "greater than 1, the scheduler automatically assigns multiple DHCP agents for " "a given tenant network, providing high availability for DHCP service." msgstr "" "Numero di agent DHCP pianificati per ospitare una rete titolare. Se questo " "numero è maggiore di 1, lo scheduler assegna automaticamente più agent DHCP " "per una data rete titolare, fornendo l'alta disponibilità per il servizio " "DHCP." msgid "Number of RPC worker processes dedicated to state reports queue" msgstr "Numero di processi RPC worker dedicati per indicare la coda di report" msgid "Number of RPC worker processes for service" msgstr "Numero di processi RPC worker per servizio" msgid "Number of backlog requests to configure the metadata server socket with" msgstr "" "Numero di richieste di backlog con cui configurare il socket server dei " "metadati" msgid "Number of backlog requests to configure the socket with" msgstr "Numero di richieste di backlog per configurare il socket con" msgid "" "Number of bits in an ipv4 PTR zone that will be considered network prefix. " "It has to align to byte boundary. Minimum value is 8. Maximum value is 24. " "As a consequence, range of values is 8, 16 and 24" msgstr "" "Il numero di bit in una zona PTR ipv4 che verrà considerato prefisso di " "rete. Deve allinearsi al limite di byte. Il valore minimo è 8. Il valore " "massimo è 24. Di conseguenza, l'intervallo di valori è 8, 16 e 24" msgid "" "Number of bits in an ipv6 PTR zone that will be considered network prefix. " "It has to align to nyble boundary. Minimum value is 4. Maximum value is 124. " "As a consequence, range of values is 4, 8, 12, 16,..., 124" msgstr "" "Il numero di bit in una zona PTR ipv6 che verrà considerato prefisso di " "rete. Deve allinearsi al limite nyble. Il valore minimo è 4. Il valore " "massimo è 124. Di conseguenza, l'intervallo di valori è 4, 8, 12, 16, ...., " "124" msgid "" "Number of floating IPs allowed per tenant. A negative value means unlimited." msgstr "" "Numero di IP mobili consentiti per tenant. Un valore negativo indica un " "numero illimitato." msgid "" "Number of networks allowed per tenant. A negative value means unlimited." msgstr "" "Numero di reti consentite per tenant. Un valore negativo indica un numero " "illimitato." msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "" "Numero di porte consentite per tenant. Un valore negativo indica un numero " "illimitato." msgid "Number of routers allowed per tenant. A negative value means unlimited." msgstr "" "Numero di router consentiti per tenant. Un valore negativo indica un numero " "illimitato." msgid "" "Number of seconds between sending events to nova if there are any events to " "send." msgstr "" "Numero di secondi tra l'invio di eventi a nova se vi sono eventuali eventi " "da inviare." msgid "Number of seconds to keep retrying to listen" msgstr "Numero di secondi per trattenere i nuovi tentativi di ascolto" msgid "" "Number of security groups allowed per tenant. A negative value means " "unlimited." msgstr "" "Numero di gruppi di sicurezza consentiti per tenant. Un valore negativo " "indica un numero illimitato." msgid "" "Number of security rules allowed per tenant. A negative value means " "unlimited." msgstr "" "Numero di regole di sicurezza consentite per tenant. Un valore negativo " "indica un numero illimitato." msgid "" "Number of separate API worker processes for service. If not specified, the " "default is equal to the number of CPUs available for best performance." msgstr "" "Il numero di processi worker API separati per il servizio. Se non " "specificato, il valore predefinito è uguale al numero di CPU disponibili per " "prestazioni ottimali." msgid "" "Number of separate worker processes for metadata server (defaults to half of " "the number of CPUs)" msgstr "" "Numero di processi worker separati per server di metadati (il valore " "predefinito è metà del numero di CPU)" msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "" "Numero di sottoreti consentite per tenant. Un valore negativo indica un " "numero illimitato." msgid "" "Number of threads to use during sync process. Should not exceed connection " "pool size configured on server." msgstr "" "Numero di thread da utilizzare durante il processo di sincronizzazione. Non " "deve superare la dimensione del pool di connessione configurata sul server." msgid "OK" msgstr "OK" msgid "" "OVS datapath to use. 'system' is the default value and corresponds to the " "kernel datapath. To enable the userspace datapath set this value to 'netdev'." msgstr "" "Datapath OVS da utilizzare. 'system' è il valore predefinito e corrisponde " "al datapath del kernel. Per abilitare il datapath dello spazio dei nomi, " "impostare questo valore su 'netdev'." msgid "OVS vhost-user socket directory." msgstr "Directory socket vhost-user OVS." #, python-format msgid "OVSDB Error: %s" msgstr "Errore OVSDB: %s" #, python-format msgid "Object action %(action)s failed because: %(reason)s." msgstr "Azione dell'oggetto %(action)s non riuscita perché: %(reason)s" msgid "Only admin can view or configure quota" msgstr "Solo admin può visualizzare o configurare una quota" msgid "Only admin is authorized to access quotas for another tenant" msgstr "Solo l'admin è autorizzato ad accedere alle quote per un altro tenant" msgid "Only admins can manipulate policies on networks they do not own." msgstr "" "Solo gli admin possono gestire le politiche su reti che non possiedono." msgid "Only admins can manipulate policies on objects they do not own" msgstr "" "Solo gli admin possono gestire le politiche su oggetti che non possiedono" msgid "Only allowed to update rules for one security profile at a time" msgstr "" "Al momento è consentito solo aggiornare le regole per un profilo di " "sicurezza." msgid "Only remote_ip_prefix or remote_group_id may be provided." msgstr "È possibile fornire solo remote_ip_prefix o remote_group_id." msgid "OpenFlow interface to use." msgstr "L'interfaccia OpenFlow da utilizzare." #, python-format msgid "" "Operation %(op)s is not supported for device_owner %(device_owner)s on port " "%(port_id)s." msgstr "" "Operazione %(op)s non supportata per device_owner %(device_owner)s sulla " "porta %(port_id)s." #, python-format msgid "Operation not supported on device %(dev_name)s" msgstr "Operazione non supportata sul dispositivo %(dev_name)s" msgid "" "Ordered list of network_types to allocate as tenant networks. The default " "value 'local' is useful for single-box testing but provides no connectivity " "between hosts." msgstr "" "Elenco ordinato di network_types da assegnare come reti tenant. Il valore " "predefinito 'local' è utile per la verifica single-box ma non fornisce " "alcuna connettività tra host." msgid "Override the default dnsmasq settings with this file." msgstr "" "Sostituire le impostazioni dnsmasq predefinite utilizzando questo file." msgid "Owner type of the device: network/compute" msgstr "Tipo proprietario dell'unità: rete/compute" msgid "POST requests are not supported on this resource." msgstr "Le richieste POST non sono supportate su questa risorsa." #, python-format msgid "Package %s not installed" msgstr "Pacchetto %s non installato" #, python-format msgid "Parameter %(param)s must be of %(param_type)s type." msgstr "Il parametro %(param)s deve essere di tipo %(param_type)s." #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "Analisi bridge_mappings non riuscita: %s." msgid "Parsing supported pci_vendor_devs failed" msgstr "Analisi pci_vendor_devs supportati non riuscita" msgid "Password for connecting to designate in admin context" msgstr "Password per la connessione da designare nel contesto admin" #, python-format msgid "Password not specified for authentication type=%(auth_type)s." msgstr "Password non specificata per il tipo di autenticazione=%(auth_type)s." msgid "Path to PID file for this process" msgstr "Percorso per il file PID per questo processo" msgid "Path to the router directory" msgstr "Percorso per la directory del router" msgid "Peer patch port in integration bridge for tunnel bridge." msgstr "Porta patch peer nel bridge di integrazione per il bridge tunnel." msgid "Peer patch port in tunnel bridge for integration bridge." msgstr "Porta patch peer nel bridge tunnel per il bridge di integrazione." msgid "Per-tenant subnet pool prefix quota exceeded." msgstr "Quota prefisso pool di sottorete per-tenant superata." msgid "Phase upgrade options do not accept revision specification" msgstr "" "Le opzioni di aggiornamento fase non accettano la specifica di revisione" msgid "Ping timeout" msgstr "Timeout di ping" #, python-format msgid "Plugin '%s' not found." msgstr "Plugin '%s' non trovato." msgid "Plugin does not support updating provider attributes" msgstr "Il plugin non supporta l'aggiornamento degli attributi provider" msgid "Policy configuration policy.json could not be found." msgstr "Impossibile trovare la configurazione policy.json della politica." #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "La porta %(id)s non dispone di un ip fisso %(address)s" #, python-format msgid "Port %(port)s does not exist on %(bridge)s!" msgstr "La porta %(port)s non esiste su %(bridge)s!" #, python-format msgid "Port %(port_id)s is already acquired by another DHCP agent" msgstr "La porta %(port_id)s è già acquisita da un altro agent DHCP " #, python-format msgid "" "Port %(port_id)s is associated with a different tenant than Floating IP " "%(floatingip_id)s and therefore cannot be bound." msgstr "" "La porta %(port_id)s è associata ad un diverso tenant rispetto all'IP mobile " "%(floatingip_id)s e pertanto non è possibile unirlo." #, python-format msgid "Port %(port_id)s is not managed by this agent. " msgstr "La porta %(port_id)s non è gestita da questo agent. " #, python-format msgid "Port %s does not exist" msgstr "La porta %s non esiste" #, python-format msgid "" "Port %s has multiple fixed IPv4 addresses. Must provide a specific IPv4 " "address when assigning a floating IP" msgstr "" "La porta %s dispone di più indirizzi IPv4 fissi. È necessario fornirne uno " "specifico durante l'assegnazione di un IP mobile" msgid "" "Port Security must be enabled in order to have allowed address pairs on a " "port." msgstr "" "Abilitare la sicurezza della porta per disporre di coppie di indirizzo " "consentite ad una porta." msgid "" "Port has security group associated. Cannot disable port security or ip " "address until security group is removed" msgstr "" "La porta ha un gruppo sicurezza associato. Impossibile disabilitare la " "sicurezza della porta o l'indirizzo ip finché il gruppo sicrezza non viene " "rimosso" msgid "" "Port security must be enabled and port must have an IP address in order to " "use security groups." msgstr "" "La sicurezza della porta deve essere abilitata e la porta deve avere un " "indirizzo IP per utilizzare i gruppi sicurezza." msgid "" "Port to listen on for OpenFlow connections. Used only for 'native' driver." msgstr "" "Porta di ascolto per le connessioni OpenFlow. Utilizzata solo per driver " "'native'." #, python-format msgid "Prefix '%(prefix)s' not supported in IPv%(version)s pool." msgstr "Il prefisso '%(prefix)s' non è supportato nel pool IPv%(version)s." msgid "Prefix Delegation can only be used with IPv6 subnets." msgstr "La delega prefisso può essere utilizzata solo con sottoreti IPv6." msgid "Private key of client certificate." msgstr "Chiave privata del certificato client." #, python-format msgid "Probe %s deleted" msgstr "Probe %s eliminato" #, python-format msgid "Probe created : %s " msgstr "Probe creato : %s " msgid "Process is already started" msgstr "Processo già avviato" msgid "Process is not running." msgstr "Il processo non è in esecuzione." msgid "Protocol to access nova metadata, http or https" msgstr "Protocollo per accedere ai metadati nova, http o https" #, python-format msgid "Provider name %(name)s is limited by %(len)s characters" msgstr "Il nome del provider %(name)s è limitato a %(len)s caratteri" #, python-format msgid "QoS Policy %(policy_id)s is used by %(object_type)s %(object_id)s." msgstr "" "La politica QoS %(policy_id)s è utilizzata da %(object_type)s %(object_id)s." #, python-format msgid "" "QoS binding for network %(net_id)s and policy %(policy_id)s could not be " "found." msgstr "" "Impossibile trovare il collegamento QoS per la rete %(net_id)s e la politica " "%(policy_id)s." #, python-format msgid "" "QoS binding for port %(port_id)s and policy %(policy_id)s could not be found." msgstr "" "Impossibile trovare il collegamento QoS per la porta %(port_id)s e la " "politica %(policy_id)s." #, python-format msgid "QoS policy %(policy_id)s could not be found." msgstr "Impossibile trovare la politica QoS %(policy_id)s." #, python-format msgid "QoS rule %(rule_id)s for policy %(policy_id)s could not be found." msgstr "" "Impossibile trovare la regola QoS %(rule_id)s for policy %(policy_id)s." #, python-format msgid "RBAC policy of type %(object_type)s with ID %(id)s not found" msgstr "Politica RBAC di tipo %(object_type)s con ID %(id)s non trovata" #, python-format msgid "" "RBAC policy on object %(object_id)s cannot be removed because other objects " "depend on it.\n" "Details: %(details)s" msgstr "" "La politica RBAC sull'oggetto %(object_id)s non può essere rimossa perché " "altri oggetti dipendono da essa.\n" "Dettagli: %(details)s" msgid "" "Range of seconds to randomly delay when starting the periodic task scheduler " "to reduce stampeding. (Disable by setting to 0)" msgstr "" "Intervallo di secondi per ritardare casualmente l'avvio di attività " "periodiche programma di pianificazione per ridurre la modifica data/ora. " "(Disabilitare impostando questa opzione a 0)" msgid "Ranges must be in the same IP version" msgstr "Gli intervalli devono essere nella stessa versione IP" msgid "Ranges must be netaddr.IPRange" msgstr "Gli intervalli devono essere netaddr.IPRange" msgid "Ranges must not overlap" msgstr "Gli intervalli non devono sovrapporsi" #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.EUI type." msgstr "" "Ricevuto tipo '%(type)s' e valore '%(value)s'. Previsto il tipo netaddr.EUI." #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.IPAddress " "type." msgstr "" "Ricevuto tipo '%(type)s' e valore '%(value)s'. Previsto il tipo netaddr." "IPAddress." #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.IPNetwork " "type." msgstr "" "Ricevuto tipo '%(type)s' e valore '%(value)s'. Previsto il tipo netaddr." "IPNetwork." #, python-format msgid "" "Release aware branch labels (%s) are deprecated. Please switch to expand@ " "and contract@ labels." msgstr "" "Le etichette ramo che riconoscono la release (%s) sono sconsigliate. Passare " "alle etichette expand@ e contract@." msgid "Remote metadata server experienced an internal server error." msgstr "Il server di metadati remoto ha rilevato un errore di server interno." msgid "" "Repository does not contain HEAD files for contract and expand branches." msgstr "" "Il repository non contiene i file HEAD per i rami di contratto ed espansione." msgid "" "Representing the resource type whose load is being reported by the agent. " "This can be \"networks\", \"subnets\" or \"ports\". When specified (Default " "is networks), the server will extract particular load sent as part of its " "agent configuration object from the agent report state, which is the number " "of resources being consumed, at every report_interval.dhcp_load_type can be " "used in combination with network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is " "WeightScheduler, dhcp_load_type can be configured to represent the choice " "for the resource being balanced. Example: dhcp_load_type=networks" msgstr "" "Rappresentazione del tipo di risorsa il cui carico è segnalato dall'agent. " "Può essere \"networks\", \"subnets\" o \"ports\". Quando specificato " "(L'impostazione predefinita è networks), il server estrarrà il carico " "particolare inviato come parte del relativo oggetto di configurazione agent " "dallo stato del report agent, il quale rappresenta il numero di risorse " "utilizzate, ad ogni report_interval. dhcp_load_type può essere utilizzato in " "combinazione con network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler Quando network_scheduler_driver è " "WeightScheduler, dhcp_load_type può essere configurato per rappresentare la " "scelta per la risorsa in fase di bilanciamento. Esempio: " "dhcp_load_type=networks" msgid "Request Failed: internal server error while processing your request." msgstr "" "Richiesta non riuscita: errore server interno durante l'elaborazione della " "richiesta." #, python-format msgid "" "Request contains duplicate address pair: mac_address %(mac_address)s " "ip_address %(ip_address)s." msgstr "" "La richiesta contiene una coppia di indirizzo duplicata: mac_address " "%(mac_address)s ip_address %(ip_address)s." #, python-format msgid "" "Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps " "with another subnet" msgstr "" "Sottorete richiesta con cidr: %(cidr)s per la rete: %(network_id)s si " "sovrappone con un'altra sottorete" msgid "" "Reset flow table on start. Setting this to True will cause brief traffic " "interruption." msgstr "" "Reimpostare tabella flusso all'avvio. Impostandolo su True si provoca una " "breve interruzione del traffico." #, python-format msgid "Resource %(resource)s %(resource_id)s could not be found." msgstr "Impossibile trovare la risorsa %(resource)s %(resource_id)s." #, python-format msgid "Resource %(resource_id)s of type %(resource_type)s not found" msgstr "Risorsa %(resource_id)s di tipo %(resource_type)s non trovata" #, python-format msgid "" "Resource '%(resource_id)s' is already associated with provider " "'%(provider)s' for service type '%(service_type)s'" msgstr "" "La risorsa '%(resource_id)s' è già associata al provider '%(provider)s' per " "il tipo di servizio '%(service_type)s'" msgid "Resource body required" msgstr "Corpo risorsa richiesto" msgid "" "Resource name(s) that are supported in quota features. This option is now " "deprecated for removal." msgstr "" "Nomi risorsa supportati nelle funzioni quota. Questa opzione è ora obsoleta " "per la rimozione." msgid "Resource not found." msgstr "Risorsa non trovata." msgid "Resources required" msgstr "Risorse richieste" msgid "" "Root helper application. Use 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' to use the real root filter facility. Change to 'sudo' to skip the " "filtering and just run the command directly." msgstr "" "Applicazione root helper. Utilizzare 'sudo neutron-rootwrap /etc/neutron/" "rootwrap.conf' per utilizzare la funzione di filtro root reale. Passare su " "'sudo' per ignorare il filtro e semplicemente eseguire il comando " "direttamente." msgid "Root helper daemon application to use when possible." msgstr "Applicazione daemon root helper da utilizzare quando possibile." msgid "Root permissions are required to drop privileges." msgstr "Per rilasciare i privilegi sono necessarie le autorizzazioni root." #, python-format msgid "Route %(cidr)s not advertised for BGP Speaker %(speaker_as)d." msgstr "" "Instradamento %(cidr)s non annunciato per lo speaker BGP %(speaker_as)d." #, python-format msgid "Router %(router_id)s %(reason)s" msgstr "Router %(router_id)s %(reason)s" #, python-format msgid "Router %(router_id)s could not be found" msgstr "Impossibile trovare il router %(router_id)s" #, python-format msgid "Router %(router_id)s does not have an interface with id %(port_id)s" msgstr "" "Il router %(router_id)s non dispone di un interfaccia con id %(port_id)s" #, python-format msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s" msgstr "" "Il router %(router_id)s non dispone di un'interfaccia sulla sottorete " "%(subnet_id)s" #, python-format msgid "Router '%(router_id)s' cannot be both DVR and HA." msgstr "Il router '%(router_id)s' non può essere contemporaneamente DVR e HA." #, python-format msgid "Router '%(router_id)s' is not compatible with this agent." msgstr "Il router '%(router_id)s' non è compatibile con questo agent." #, python-format msgid "Router already has a port on subnet %s" msgstr "Il router dispone già di una porta sulla sottorete %s" #, python-format msgid "" "Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be " "deleted, as it is required by one or more floating IPs." msgstr "" "L'interfaccia del router per la sottorete %(subnet_id)s nel router " "%(router_id)s non può essere eliminata, in quanto è richiesta da uno o più " "IP mobili." #, python-format msgid "" "Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be " "deleted, as it is required by one or more routes." msgstr "" "L'interfaccia del router per la sottorete %(subnet_id)s nel router " "%(router_id)s non può essere eliminata, in quanto è richiesta da uno o più " "instradamenti." msgid "Router port must have at least one fixed IP" msgstr "La porta del router deve avere almeno un IP fisso" msgid "Router that will have connected instances' metadata proxied." msgstr "Router che avrà i metadati dell'istanza connessi con proxy." #, python-format msgid "" "Row doesn't exist in the DB. Request info: Table=%(table)s. Columns=" "%(columns)s. Records=%(records)s." msgstr "" "La riga non esiste nel DB. Informazioni richiesta: Tabella=%(table)s. " "Colonne=%(columns)s. Record=%(records)s." msgid "Run as daemon." msgstr "Esegui come daemon." #, python-format msgid "Running %(cmd)s (%(desc)s) for %(project)s ..." msgstr "Esecuzione di %(cmd)s (%(desc)s) per %(project)s ..." #, python-format msgid "Running %(cmd)s for %(project)s ..." msgstr "Esecuzione di %(cmd)s per %(project)s ..." msgid "Running without keystone AuthN requires that tenant_id is specified" msgstr "" "L'esecuzione senza keystone AuthN richiede che id_tenant sia specificato" msgid "" "Seconds between nodes reporting state to server; should be less than " "agent_down_time, best if it is half or less than agent_down_time." msgstr "" "Secondi tra lo stato riportato dai nodi al server; deve essere inferiore di " "agent_down_time, è preferibile che sia la metà o meno di agent_down_time." msgid "Seconds between running periodic tasks" msgstr "Secondi tra l'esecuzione delle attività periodiche" msgid "" "Seconds to regard the agent is down; should be at least twice " "report_interval, to be sure the agent is down for good." msgstr "" "Secondi per considerare che l'agent è inattivo; deve essere almeno il doppio " "di report_interval, per essere sicuri che l'agent è definitivamente inattivo." #, python-format msgid "Security Group %(id)s %(reason)s." msgstr "Gruppo di sicurezza %(id)s %(reason)s." #, python-format msgid "Security Group Rule %(id)s %(reason)s." msgstr "Regola gruppo di sicurezza %(id)s %(reason)s." #, python-format msgid "Security group %(id)s does not exist" msgstr "Il gruppo di sicurezza %(id)s non esiste" #, python-format msgid "Security group rule %(id)s does not exist" msgstr "La regola del gruppo di sicurezza %(id)s non esiste" #, python-format msgid "Security group rule already exists. Rule id is %(rule_id)s." msgstr "" "La regola del gruppo di sicurezza già esiste. L'ID regola è %(rule_id)s." #, python-format msgid "" "Security group rule for ethertype '%(ethertype)s' not supported. Allowed " "values are %(values)s." msgstr "" "Regola del gruppo di sicurezza per ethertype '%(ethertype)s' non supportata. " "I valori consentiti sono %(values)s." #, python-format msgid "" "Security group rule protocol %(protocol)s not supported. Only protocol " "values %(values)s and integer representations [0 to 255] are supported." msgstr "" "Il protocollo della regole del gruppo di sicurezza %(protocol)s non è " "supportato. Solo i valori del protocollo %(values)s e le rappresentazioni " "numeri interi [0-255] sono supportati." msgid "Segments and provider values cannot both be set." msgstr "Impossibile impostare i segmenti e i valori del provider." msgid "Selects the Agent Type reported" msgstr "Seleziona il tipo di agent riportato" msgid "" "Send notification to nova when port data (fixed_ips/floatingip) changes so " "nova can update its cache." msgstr "" "Invia una notifica a nova quando i dati porta (fixed_ips/floatingip) vengono " "modificati e in tal modo nova può aggiornare la propria cache." msgid "Send notification to nova when port status changes" msgstr "Invia una notifica a nova quando lo stato della porta cambia" msgid "" "Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the " "feature is disabled" msgstr "" "Inviare questi ARPs gratuiti per la configurazione HA, se inferiore o uguale " "a 0, la funzione è disabilitata" #, python-format msgid "Service Profile %(sp_id)s could not be found." msgstr "Impossibile trovare il profilo di servizio %(sp_id)s." #, python-format msgid "Service Profile %(sp_id)s is already associated with flavor %(fl_id)s." msgstr "Il profilo di servizio %(sp_id)s è già associato al flavor %(fl_id)s." #, python-format msgid "Service Profile %(sp_id)s is not associated with flavor %(fl_id)s." msgstr "Il profilo di servizio %(sp_id)s non è associato al flavor %(fl_id)s." #, python-format msgid "Service Profile %(sp_id)s is used by some service instance." msgstr "" "Il profilo di servizio %(sp_id)s viene utilizzato dall'istanza di alcuni " "servizi." #, python-format msgid "Service Profile driver %(driver)s could not be found." msgstr "Impossibile trovare il driver %(driver)s del profilo di servizio." msgid "Service Profile is not enabled." msgstr "Il profilo di servizio non è abilitato." msgid "Service Profile needs either a driver or metainfo." msgstr "Il profilo di servizio necessita di un driver o di meta informazioni." #, python-format msgid "" "Service provider '%(provider)s' could not be found for service type " "%(service_type)s" msgstr "" "Provider del servizio '%(provider)s' non trovato per il tipo di servizio " "%(service_type)s" msgid "Service to handle DHCPv6 Prefix delegation." msgstr "Il servizio per gestire la delega prefisso DHCPv6." #, python-format msgid "Service type %(service_type)s does not have a default service provider" msgstr "" "Il tipo del servizio %(service_type)s non ha un provider del servizio " "predefinito" msgid "" "Set new timeout in seconds for new rpc calls after agent receives SIGTERM. " "If value is set to 0, rpc timeout won't be changed" msgstr "" "Impostare il nuovo timeout in secondi per le nuove chiamate rpc dopo che " "l'agent riceve SIGTERM. Se il valore è impostato su 0, il timeout rpc non " "verrà modificato" msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "Impostare o annullare l'impostazione del bit del frammento non DF sul " "pacchetto IP in uscita che trasporta il tunnel GRE/VXLAN." msgid "" "Set or un-set the tunnel header checksum on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "Impostare o annullare l'impostazione del checksum intestazione tunnel sul " "pacchetto IP in uscita che usail tunnel GRE/VXLAN." msgid "Shared address scope can't be unshared" msgstr "Impossibile annullare la condivisione di un ambito indirizzo condiviso" msgid "" "Specifying 'tenant_id' other than authenticated tenant in request requires " "admin privileges" msgstr "" "La specifica di 'tenant_id' diverso da quello autenticato nella richiesta, " "richiede i privilegi admin" msgid "String prefix used to match IPset names." msgstr "Prefisso stringa utilizzato per la corrispondenza con i nomi IPset." #, python-format msgid "Sub-project %s not installed." msgstr "Sottoprogetto %s non installato." msgid "Subnet for router interface must have a gateway IP" msgstr "La sottorete per l'interfaccia del router deve avere un IP gateway" msgid "" "Subnet has a prefix length that is incompatible with DHCP service enabled." msgstr "" "La sottorete ha una lunghezza prefisso incompatibile con il servizio DHCP " "abilitato." #, python-format msgid "Subnet pool %(subnetpool_id)s could not be found." msgstr "Impossibile trovare il pool di sottorete %(subnetpool_id)s" msgid "Subnet pool has existing allocations" msgstr "Il pool di sottoreti ha assegnazioni esistenti" msgid "Subnet used for the l3 HA admin network." msgstr "Sottorete utilizzata per la rete admin HA L3" msgid "" "Subnets hosted on the same network must be allocated from the same subnet " "pool." msgstr "" "Le sottoreti ospitate sulla stessa rete devono essere allocate dallo stesso " "pool di sottoreti." msgid "Suffix to append to all namespace names." msgstr "Suffisso da aggiungere a tutti i nomi dello spazio dei nomi." msgid "" "System-wide flag to determine the type of router that tenants can create. " "Only admin can override." msgstr "" "L'indicatore lato sistema per determinare il tipo di router che i tenant " "possono creare. Solo l'Admin può sovrascrivere." msgid "TCP Port to listen for metadata server requests." msgstr "Porta TCP in ascolto per le richieste del server di metadati." msgid "TCP Port used by Neutron metadata namespace proxy." msgstr "Porta TCP utilizzata dal proxy spazio dei nomi dei metadati Neutron." msgid "TCP Port used by Nova metadata server." msgstr "Porta TCP utilizzata dal server di metadati Nova." #, python-format msgid "TLD '%s' must not be all numeric" msgstr "TLD '%s' non deve contenere tutti caratteri numerici" msgid "TOS for vxlan interface protocol packets." msgstr "Pacchetti del protocollo dell'interfaccia TOS per vxlan." msgid "TTL for vxlan interface protocol packets." msgstr "Pacchetti del protocollo dell'interfaccia TTL per vxlan." #, python-format msgid "Table %s can only be queried by UUID" msgstr "La tabella %s può essere interrogata solo per UUID" #, python-format msgid "Tag %(tag)s could not be found." msgstr "Impossibile trovare il tag %(tag)s." #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "" "Tenant %(tenant_id)s non consentito per creare %(resource)s su questa rete" msgid "Tenant id for connecting to designate in admin context" msgstr "ID tenant per la connessione da designare nel contesto admin" msgid "Tenant name for connecting to designate in admin context" msgstr "Nome tenant per la connessione da designare nel contesto admin" msgid "Tenant network creation is not enabled." msgstr "La creazione della rete tenant non è consentita." msgid "Tenant-id was missing from quota request." msgstr "Tenant-id mancante dalla richiesta della quota." msgid "" "The 'gateway_external_network_id' option must be configured for this agent " "as Neutron has more than one external network." msgstr "" "L'opzione 'gateway_external_network_id' deve essere configurata per questo " "agent poiché Neutron ha più di una rete esterna." msgid "" "The DHCP agent will resync its state with Neutron to recover from any " "transient notification or RPC errors. The interval is number of seconds " "between attempts." msgstr "" "L'agent DHCP risincronizzerà il suo stato con Neutron per il ripristino da " "qualsiasi notifica transitoria o errore RPC. L'intervallo è il numero di " "secondi tra i tentativi." msgid "" "The DHCP server can assist with providing metadata support on isolated " "networks. Setting this value to True will cause the DHCP server to append " "specific host routes to the DHCP request. The metadata service will only be " "activated when the subnet does not contain any router port. The guest " "instance must be configured to request host routes via DHCP (Option 121). " "This option doesn't have any effect when force_metadata is set to True." msgstr "" "Il server DHCP può fornire il supporto di metadati nelle reti isolate. " "L'impostazione di questo valore su True farà in modo che il server DHCP " "aggiunga instradamenti host specifici alla richiesta DHCP. Il servizio di " "metadati verrà attivato solo quando la sottorete non contiene porte del " "router. L'istanza guest deve essere configurata per richiedere gli " "instradamenti host mediante DHCP (Opzione 121). Questa opzione non ha alcun " "effetto quando force_metadata è impostato su True." #, python-format msgid "" "The HA Network CIDR specified in the configuration file isn't valid; " "%(cidr)s." msgstr "" "Il CIDR della rete HA specificato nel file di configurazione non è valido; " "%(cidr)s." msgid "The UDP port to use for VXLAN tunnels." msgstr "La porta UDP da utilizzare per i tunnel VXLAN." #, python-format msgid "" "The address allocation request could not be satisfied because: %(reason)s" msgstr "" "La richiesta di assegnazione dell'indirizzo non può essere soddisfatta " "perché: %(reason)s" msgid "The advertisement interval in seconds" msgstr "L'intervallo di annuncio in secondi" #, python-format msgid "The allocation pool %(pool)s is not valid." msgstr "Il pool di allocazione %(pool)s non è valido." #, python-format msgid "" "The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s." msgstr "" "Il pool di allocazione %(pool)s si estende oltre il cidr della sottorete " "%(subnet_cidr)s." #, python-format msgid "" "The attribute '%(attr)s' is reference to other resource, can't used by sort " "'%(resource)s'" msgstr "" "L'attributo '%(attr)s' è di riferimento ad altre risorse, non può essere " "utilizzato dall'ordinamento '%(resource)s'" msgid "" "The base MAC address Neutron will use for VIFs. The first 3 octets will " "remain unchanged. If the 4th octet is not 00, it will also be used. The " "others will be randomly generated." msgstr "" "L'indirizzo MAC di base utilizzato da Neutron per i VIF. I primi 3 ottetti " "rimangono inalterati. Se il quarto ottetto non è 00, potrà anche essere " "utilizzato. Gli altri vengono generati casualmente. " msgid "" "The base mac address used for unique DVR instances by Neutron. The first 3 " "octets will remain unchanged. If the 4th octet is not 00, it will also be " "used. The others will be randomly generated. The 'dvr_base_mac' *must* be " "different from 'base_mac' to avoid mixing them up with MAC's allocated for " "tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00. " "The default is 3 octet" msgstr "" "L'indirizzo mac di base utilizzato per istanze DVR univoche da Neutron. I " "primi 3 ottetti rimangono inalterati. Se il quarto ottetto non è 00, potrà " "anche essere utilizzato. Gli altri vengono generati casualmente. " "'dvr_base_mac' *deve* essere diverso da 'base_mac' per evitare la confusione " "con i MAC assegnati per le porte titolari. Un esempio di 4 ottetti è " "dvr_base_mac = fa:16:3f:4f:00:00. Il valore predefinito è 3 ottetti" msgid "" "The connection string for the native OVSDB backend. Requires the native " "ovsdb_interface to be enabled." msgstr "" "La stringa di connessione per il backend OVSDB nativo. Richiede che " "ovsdb_interface nativa sia abilitata." msgid "The core plugin Neutron will use" msgstr "Il plugin principale che Neutron utilizzerà" #, python-format msgid "" "The dns_name passed is a FQDN. Its higher level labels must be equal to the " "dns_domain option in neutron.conf, that has been set to '%(dns_domain)s'. It " "must also include one or more valid DNS labels to the left of " "'%(dns_domain)s'" msgstr "" "dns_name passato è un FQDN. Le sue etichette di livello superiore devono " "essere uguali all'opzione dns_domain in neutron.conf, che è stata impostata " "su '%(dns_domain)s'. Deve anche includere una o più etichette DNS valide a " "sinistra di '%(dns_domain)s'" #, python-format msgid "" "The dns_name passed is a PQDN and its size is '%(dns_name_len)s'. The " "dns_domain option in neutron.conf is set to %(dns_domain)s, with a length of " "'%(higher_labels_len)s'. When the two are concatenated to form a FQDN (with " "a '.' at the end), the resulting length exceeds the maximum size of " "'%(fqdn_max_len)s'" msgstr "" "dns_name passato è un PQDN e la sua dimensione è '%(dns_name_len)s'. " "L'opzione dns_domain in neutron.conf è impostata su %(dns_domain)s, con " "lunghezza '%(higher_labels_len)s'. Quando i due sono concatenati per formare " "un FQDN (con un '.' alla fine), la lunghezza risultante supera la dimensione " "massima di '%(fqdn_max_len)s'" msgid "The driver used to manage the DHCP server." msgstr "Il driver utilizzato per gestire il server DHCP." msgid "The driver used to manage the virtual interface." msgstr "Il driver utilizzato per gestire l'interfaccia virtuale." msgid "" "The email address to be used when creating PTR zones. If not specified, the " "email address will be admin@" msgstr "" "L'indirizzo email da utilizzare durante la creazione di zone PTR. Se non " "specificato, l'indirizzo email sarà admin@" #, python-format msgid "" "The following device_id %(device_id)s is not owned by your tenant or matches " "another tenants router." msgstr "" "Il seguente device_id %(device_id)s non è posseduto dal proprio tenant o " "corrisponde ad un altro router tenant." msgid "The host IP to bind to" msgstr "IP host per collegarsi a" msgid "The interface for interacting with the OVSDB" msgstr "L'interfaccia per l'interazione con OVSDB" msgid "" "The maximum number of items returned in a single response, value was " "'infinite' or negative integer means no limit" msgstr "" "Il numero massimo di elementi restituiti in una singola risposta, il valore " "era 'infinite' oppure un numero intero negativo che indica nessun limite" #, python-format msgid "" "The network %(network_id)s has been already hosted by the DHCP Agent " "%(agent_id)s." msgstr "" "La rete %(network_id)s è stata già ospitata dall'agent DHCP %(agent_id)s." #, python-format msgid "" "The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s." msgstr "" "La rete %(network_id)s non è stata ospitata dall'agent DHCP %(agent_id)s." msgid "" "The network type to use when creating the HA network for an HA router. By " "default or if empty, the first 'tenant_network_types' is used. This is " "helpful when the VRRP traffic should use a specific network which is not the " "default one." msgstr "" "Il tipo di rete da utilizzare quando si crea la rete HA per un router HA. " "Per impostazione predefinita o se vuoto, è utilizzato il primo " "'tenant_network_types'. Ciò è utile quando il traffico VRRP deve utilizzare " "una rete specifica che non è quella predefinita." #, python-format msgid "The number of allowed address pair exceeds the maximum %(quota)s." msgstr "" "Il numero di coppie di indirizzi consentite supera quello massimo %(quota)s." msgid "" "The number of seconds the agent will wait between polling for local device " "changes." msgstr "" "Il numero di secondi in l'agent attenderà tra i polling per le modifiche " "dell'unità locale." msgid "" "The number of seconds to wait before respawning the ovsdb monitor after " "losing communication with it." msgstr "" "Il numero di secondi di attesa prima di generare nuovamente il monitor ovsdb " "dopo la perdita di comunicazione." msgid "The number of sort_keys and sort_dirs must be same" msgstr "Il numero di sort_keys e sort_dirs deve essere uguale" msgid "" "The path for API extensions. Note that this can be a colon-separated list of " "paths. For example: api_extensions_path = extensions:/path/to/more/exts:/" "even/more/exts. The __path__ of neutron.extensions is appended to this, so " "if your extensions are in there you don't need to specify them here." msgstr "" "Il percorso per le estensioni API. Può essere un elenco di percorsi separato " "dai due punti. Ad esempio: api_extensions_path = extensions:/path/to/more/" "exts:/even/more/exts. Il __percorso__ di neutron.extensions è aggiunto a " "tale percorso, per cui, se le estensioni si trovano nel percorso non è " "necessario specificarle." msgid "The physical network name with which the HA network can be created." msgstr "Il nome della rete fisica con cui può essere creata la rete HA." #, python-format msgid "The port '%s' was deleted" msgstr "La porta '%s' è stata eliminata" msgid "The port to bind to" msgstr "La porta a cui collegarsi" #, python-format msgid "The requested content type %s is invalid." msgstr "Il tipo di contenuto richiesto %s non è valido." msgid "The resource could not be found." msgstr "Impossibile trovare la risorsa." #, python-format msgid "" "The router %(router_id)s has been already hosted by the L3 Agent " "%(agent_id)s." msgstr "" "Il router %(router_id)s è stato già ospitato dall'agent L3 %(agent_id)s." msgid "" "The server has either erred or is incapable of performing the requested " "operation." msgstr "" "Il server è in errore o non è capace di eseguire l'operazione richiesta." msgid "The service plugins Neutron will use" msgstr "Il plugin del servizio che Neutron utilizzerà" #, python-format msgid "The subnet request could not be satisfied because: %(reason)s" msgstr "" "La richiesta della sottorete non può essere soddisfatta perché: %(reason)s" #, python-format msgid "The subproject to execute the command against. Can be one of: '%s'." msgstr "Il sottoprogetto su cui eseguire il comando. Può essere uno di: '%s'." msgid "The type of authentication to use" msgstr "Il tipo di autenticazione da utilizzare" #, python-format msgid "The value '%(value)s' for %(element)s is not valid." msgstr "Il valore '%(value)s' per %(element)s non è valido." msgid "" "The working mode for the agent. Allowed modes are: 'legacy' - this preserves " "the existing behavior where the L3 agent is deployed on a centralized " "networking node to provide L3 services like DNAT, and SNAT. Use this mode if " "you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality " "and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - " "this enables centralized SNAT support in conjunction with DVR. This mode " "must be used for an L3 agent running on a centralized node (or in single-" "host deployments, e.g. devstack)" msgstr "" "Modalità di funzionamento per l'agent. le modalità consentite sono: 'legacy' " "- questa conserva il comportamento esistente in cui l'agent L3 viene " "distribuito in un nodo di rete centralizzato per fornire i servizi L3 come " "DNAT e SNAT. Utilizzare questa modalità se non si desidera adottare DVR. " "'dvr' - questa modalità consente la funzionalità DVR e deve essere " "utilizzata per un agent L3 che viene eseguito su un host di elaborazione. " "'dvr_snat' - questa consente il supporto SNAT centralizzato insieme a DVR. " "Questa modalità deve essere utilizzata per un agent L3 in esecuzione su un " "nodo centralizzato (o in distribuzioni a singolo host, ad esempio devstack)" msgid "" "There are routers attached to this network that depend on this policy for " "access." msgstr "" "Sono presenti router collegati a questa rete che dipendono da questa " "politica per l'accesso." msgid "" "This will choose the web framework in which to run the Neutron API server. " "'pecan' is a new experiemental rewrite of the API server." msgstr "" "Verrà scelto il framework web in cui eseguire il server Neutron API. 'pecan' " "è una nuova scrittura sperimentale del server API." msgid "Timeout" msgstr "Timeout" msgid "" "Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs " "commands will fail with ALARMCLOCK error." msgstr "" "Timeout in secondi per i comandi ovs-vsctl. Se il timeout scade, i comandi " "ovs non riescono e restituiscono l'errore ALARMCLOCK." msgid "" "Timeout in seconds to wait for a single OpenFlow request. Used only for " "'native' driver." msgstr "" "Timeout in secondi da attendere per una singola richiesta OpenFlow. " "Utilizzato solo per driver 'native'." msgid "" "Timeout in seconds to wait for the local switch connecting the controller. " "Used only for 'native' driver." msgstr "" "Timeout in secondi da attendere per la connessione dello switch locale al " "controller. Utilizzato solo per driver 'native'." msgid "" "Too long prefix provided. New name would exceed given length for an " "interface name." msgstr "" "Fornito prefisso troppo lungo. Il nuovo nome supererebbe la lunghezza " "specificata per un nome di interfaccia." msgid "Too many availability_zone_hints specified" msgstr "Troppi availability_zone_hints specificati" msgid "" "True to delete all ports on all the OpenvSwitch bridges. False to delete " "ports created by Neutron on integration and external network bridges." msgstr "" "True per eliminare tutte le porte su tutti i bridge OpenvSwitch. False per " "eliminare le porte create da Neutron nell'integrazione e i bridge di reti " "esterne." msgid "Tunnel IP value needed by the ML2 plugin" msgstr "Valore IP tunnel IP richiesto dal plugin ML2" msgid "Tunnel bridge to use." msgstr "Bridge del tunnel da utilizzare." msgid "" "Type of the nova endpoint to use. This endpoint will be looked up in the " "keystone catalog and should be one of public, internal or admin." msgstr "" "Tipo di endpoint nova da utilizzare. Questo endpoint verrà ricercato nel " "catalogo keystone e deve essere public, internal o admin." msgid "URL for connecting to designate" msgstr "URL per la connessione da designare" msgid "URL to database" msgstr "URL per il database" #, python-format msgid "Unable to access %s" msgstr "Impossibile accedere a %s" #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, maximum allowed " "prefix is %(max_prefixlen)s." msgstr "" "Impossibile assegnare la sottorete con lunghezza del prefisso %(prefixlen)s, " "il prefisso massimo consentito è %(max_prefixlen)s." #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, minimum allowed " "prefix is %(min_prefixlen)s." msgstr "" "Impossibile assegnare la sottorete con lunghezza del prefisso %(prefixlen)s, " "il prefisso minimo consentito è %(min_prefixlen)s." #, python-format msgid "Unable to calculate %(address_type)s address because of:%(reason)s" msgstr "" "Impossibile calcolare l'indirizzo %(address_type)s a causa di: %(reason)s" #, python-format msgid "" "Unable to complete operation for %(router_id)s. The number of routes exceeds " "the maximum %(quota)s." msgstr "" "Impossibile completare l'operazione per %(router_id)s. Il numero di " "instradamenti supera la quota massima %(quota)s." #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of DNS " "nameservers exceeds the limit %(quota)s." msgstr "" "Impossibile completare l'operazione per %(subnet_id)s. Il numero di server " "nome DNS supera il limite %(quota)s." #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of host routes " "exceeds the limit %(quota)s." msgstr "" "Impossibile completare l'operazione per %(subnet_id)s. Il numero di route " "host supera il limite %(quota)s." #, python-format msgid "" "Unable to complete operation on address scope %(address_scope_id)s. There " "are one or more subnet pools in use on the address scope" msgstr "" "Impossibile completare l'operazione nell'ambito indirizzo " "%(address_scope_id)s. Esiste uno o più pool di sottoreti in uso nell'ambito " "indirizzo" #, python-format msgid "Unable to convert value in %s" msgstr "Impossibile convertire il valore in %s" msgid "Unable to create the Agent Gateway Port" msgstr "Impossibile creare la porta gateway agent" msgid "Unable to create the SNAT Interface Port" msgstr "Impossibile creare la porta dell'interfaccia SNAT" #, python-format msgid "" "Unable to create the flat network. Physical network %(physical_network)s is " "in use." msgstr "" "Impossibile creare la rete flat. La rete fisica %(physical_network)s è in " "uso." msgid "" "Unable to create the network. No available network found in maximum allowed " "attempts." msgstr "" "Impossibile creare la rete. Non è stata trovata alcuna rete nel numero " "massimo di tentativi consentiti." #, python-format msgid "Unable to delete subnet pool: %(reason)s." msgstr "Impossibile eliminare il pool di sottorete: %(reason)s." #, python-format msgid "Unable to determine mac address for %s" msgstr "Impossibile determinare l'indirizzo mac per %s" #, python-format msgid "Unable to find '%s' in request body" msgstr "Impossibile trovare '%s' nel corpo della richiesta" #, python-format msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s" msgstr "" "Impossibile trovare l'indirizzo IP %(ip_address)s nella sottorete " "%(subnet_id)s" #, python-format msgid "Unable to find resource name in %s" msgstr "Impossibile trovare il nome risorsa in %s" msgid "Unable to generate IP address by EUI64 for IPv4 prefix" msgstr "Impossibile generare l'indirizzo IP da EUI64 per il prefisso IPv4" #, python-format msgid "Unable to generate unique DVR mac for host %(host)s." msgstr "Impossibile generare mac DVR univoco per l'host %(host)s." #, python-format msgid "Unable to generate unique mac on network %(net_id)s." msgstr "Impossibile generare mac univoco sulla rete %(net_id)s." #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "" "Impossibile identificare un campo di destinazione da:%s. La corrispondenza " "deve essere presente nel modulo %%()s" msgid "Unable to provide external connectivity" msgstr "Impossibile fornire la connettività esterna" msgid "Unable to provide tenant private network" msgstr "Impossibile fornire la rete privata del tenant" #, python-format msgid "" "Unable to reconfigure sharing settings for network %(network)s. Multiple " "tenants are using it." msgstr "" "Impossibile riconfigurare le impostazioni di condivisione per la rete " "%(network)s. Più tenants la stanno utilizzando." #, python-format msgid "Unable to update address scope %(address_scope_id)s : %(reason)s" msgstr "" "Impossibile aggiornare l'ambito indirizzo %(address_scope_id)s : %(reason)s" #, python-format msgid "Unable to update the following object fields: %(fields)s" msgstr "Impossibile aggiornare i seguenti campi oggetto: %(fields)s" #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " "found" msgstr "" "Impossibile verificare la corrispondenza:%(match)s come risorsa parent: " "%(res)s non è stata trovata" #, python-format msgid "Unexpected label for script %(script_name)s: %(labels)s" msgstr "Etichetta imprevista per lo script %(script_name)s: %(labels)s" #, python-format msgid "Unexpected number of alembic branch points: %(branchpoints)s" msgstr "Numero non previsto di punti di ramo alembic: %(branchpoints)s" #, python-format msgid "Unexpected response code: %s" msgstr "Imprevisto codice di risposta: %s" #, python-format msgid "Unexpected response: %s" msgstr "Risposta imprevista: %s" #, python-format msgid "Unit name '%(unit)s' is not valid." msgstr "Il nome unità '%(unit)s' non è valido." msgid "Unknown API version specified" msgstr "Specificata versione API sconosciuta" #, python-format msgid "Unknown address type %(address_type)s" msgstr "Tipo di indirizzo sconosciuto %(address_type)s" #, python-format msgid "Unknown attribute '%s'." msgstr "Attributo sconosciuto '%s'." #, python-format msgid "Unknown chain: %r" msgstr "Catena sconosciuta: %r" #, python-format msgid "Unknown network type %(network_type)s." msgstr "Tipo di rete %(network_type)s sconosciuto." #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Risorse quota sconosciute %(unknown)s." msgid "Unmapped error" msgstr "Errore non associato" msgid "Unrecognized action" msgstr "Azione non riconosciuta" #, python-format msgid "Unrecognized attribute(s) '%s'" msgstr "Attributi non riconosciuti '%s'" msgid "Unrecognized field" msgstr "Campo non riconosciuto" msgid "Unspecified minimum subnet pool prefix." msgstr "Prefisso minimo pool di sottorete non specificato." msgid "Unsupported Content-Type" msgstr "Tipo-contenuto non supportato" #, python-format msgid "Unsupported network type %(net_type)s." msgstr "Tipo di rete non supportato %(net_type)s." #, python-format msgid "Unsupported port state: %(port_state)s." msgstr "Stato porta non supportato: %(port_state)s." msgid "Unsupported request type" msgstr "Tipo di richiesta non supportato" msgid "Updating default security group not allowed." msgstr "L'aggiornamento del gruppo di sicurezza predefinito non è consentito." msgid "" "Use ML2 l2population mechanism driver to learn remote MAC and IPs and " "improve tunnel scalability." msgstr "" "utilizzare il driver del meccanismo ML2 l2population per conoscere MAC e IP " "remoti e migliorare la scalabilità del tunnel." msgid "Use broadcast in DHCP replies." msgstr "Utilizzare broadcast nelle risposte DHCP." msgid "Use either --delta or relative revision, not both" msgstr "Utilizzare --revisione delta o relativa, non entrambe" msgid "" "Use ipset to speed-up the iptables based security groups. Enabling ipset " "support requires that ipset is installed on L2 agent node." msgstr "" "Utilizzare ipset per velocizzare i gruppi di sicurezza basati su iptable. " "L'abilitazione del supporto ipset richiede che ipset sia installato sul nodo " "dell'agent L2." msgid "" "Use the root helper when listing the namespaces on a system. This may not be " "required depending on the security configuration. If the root helper is not " "required, set this to False for a performance improvement." msgstr "" "Utilizzare il root helper per visualizzare gli spazi dei nomi in un sistema " "operativo. Ciò potrebbe non essere richiesto in base alla configurazione di " "sicurezza. Se il root helper non è richiesto, impostare su False per un " "miglioramento delle prestazioni." msgid "" "Use veths instead of patch ports to interconnect the integration bridge to " "physical networks. Support kernel without Open vSwitch patch port support so " "long as it is set to True." msgstr "" "Utilizzare veths invece delle porte patch per interconnettere il bridge di " "integrazione alle reti fisiche. Supporta kernel senza supporto per porta " "patch Open vSwitch se impostato su True." msgid "User (uid or name) running metadata proxy after its initialization" msgstr "" "Utente (uid o nome) che esegue il proxy di metadati dopo la relativa " "inizializzazione" msgid "" "User (uid or name) running metadata proxy after its initialization (if " "empty: agent effective user)." msgstr "" "Utente (uid o nome) che esegue il proxy di metadati dopo la relativa " "inizializzazione (se vuoto: utente operativo dell'agent)." msgid "User (uid or name) running this process after its initialization" msgstr "" "Utente (uid o name) che esegue questo processo dopo la relativa " "inizializzazione" msgid "Username for connecting to designate in admin context" msgstr "Nome utente per la connessione da designare nel contesto admin" msgid "" "Uses veth for an OVS interface or not. Support kernels with limited " "namespace support (e.g. RHEL 6.5) so long as ovs_use_veth is set to True." msgstr "" "Utilizza o meno veth per un'interfaccia OVS. Supporta kernel con supporto " "dello spazio dei nomi limitato (ad esempio RHEL 6.5) se ovs_use_veth è " "impostato su True." msgid "VRRP authentication password" msgstr "Password di autenticazione VRRP" msgid "VRRP authentication type" msgstr "Tipo di autenticazione VRRP" msgid "VXLAN network unsupported." msgstr "Rete VXLAN non supportata." #, python-format msgid "" "Validation of dictionary's keys failed. Expected keys: %(expected_keys)s " "Provided keys: %(provided_keys)s" msgstr "" "La convalida delle chiavi del dizionario non è riuscita. Chiavi previste: " "%(expected_keys)s Chiavi fornite: %(provided_keys)s" #, python-format msgid "Validator '%s' does not exist." msgstr "Il programma di convalida '%s' non eiste." #, python-format msgid "Value %(value)s in mapping: '%(mapping)s' not unique" msgstr "Valore %(value)s nell'associazione: '%(mapping)s' non univoco" #, python-format msgid "" "Value of %(parameter)s has to be multiple of %(number)s, with maximum value " "of %(maximum)s and minimum value of %(minimum)s" msgstr "" "Il valore di %(parameter)s deve essere multiplo di %(number)s, con valore " "massimo di %(maximum)s e valore minimo di %(minimum)s" msgid "" "Value of host kernel tick rate (hz) for calculating minimum burst value in " "bandwidth limit rules for a port with QoS. See kernel configuration file for " "HZ value and tc-tbf manual for more information." msgstr "" "Il valore della velocità di tick (hz) del kernel host per il calcolo del " "valore burst minimo nelle regole del limite di larghezza di banda per una " "porta con QoS. Vedere il file di configurazione kernel per il valore HZ e il " "manuale tc-tbf per ulteriori informazioni." msgid "" "Value of latency (ms) for calculating size of queue for a port with QoS. See " "tc-tbf manual for more information." msgstr "" "Il valore di latenza (ms) per il calcolo della dimensione della coda per una " "porta con QoS. Per ulteriori informazioni, vedere il manuale tc-tbf." msgid "" "Watch file log. Log watch should be disabled when metadata_proxy_user/group " "has no read/write permissions on metadata proxy log file." msgstr "" "Osserva log file. Logwatch deve essere disabilitato quando " "metadata_proxy_user/group non dispone delle autorizzazioni di lettura/" "scrittura sul file di logdel proxy di metadati." msgid "" "When external_network_bridge is set, each L3 agent can be associated with no " "more than one external network. This value should be set to the UUID of that " "external network. To allow L3 agent support multiple external networks, both " "the external_network_bridge and gateway_external_network_id must be left " "empty." msgstr "" "Quando external_network_bridge è impostato, ciascun agent L3 può essere " "associato con non più di una rete esterna. Questo valore non deve essere " "impostato sull'UUID della rete esterna. Per consentire all'agent L3 di " "supportare più reti esterne, external_network_bridge e " "gateway_external_network_id devono essere lasciati vuoti." msgid "" "When proxying metadata requests, Neutron signs the Instance-ID header with a " "shared secret to prevent spoofing. You may select any string for a secret, " "but it must match here and in the configuration used by the Nova Metadata " "Server. NOTE: Nova uses the same config key, but in [neutron] section." msgstr "" "Quando si trasferiscono richieste di metadati, Neutron firma l'intestazione " "Instance-ID con un segreto condiviso per evitare lo spoofing. È possibile " "selezionare una qualsiasi stringa per un segreto ma deve corrispondere qui e " "nella configurazione utilizzata da Nova Metadata Server. NOTA: Nova utilizza " "la stessa chiave di configurazione, ma nella sezione [neutron]." msgid "" "Where to store Neutron state files. This directory must be writable by the " "agent." msgstr "" "Dove memorizzare i file di stato Neutron. Questa directory deve essere " "scrivibile dall'agent." msgid "" "With IPv6, the network used for the external gateway does not need to have " "an associated subnet, since the automatically assigned link-local address " "(LLA) can be used. However, an IPv6 gateway address is needed for use as the " "next-hop for the default route. If no IPv6 gateway address is configured " "here, (and only then) the neutron router will be configured to get its " "default route from router advertisements (RAs) from the upstream router; in " "which case the upstream router must also be configured to send these RAs. " "The ipv6_gateway, when configured, should be the LLA of the interface on the " "upstream router. If a next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated to the network and not " "through this parameter. " msgstr "" "Con IPv6, non è necessario che la rete utilizzata per il gateway esterno " "disponga di una sottorete associata, poiché verrà utilizzato il LLA (link-" "local address) assegnato automaticamente. Tuttavia, è necessario un " "indirizzo gateway IPv6 per l'utilizzo come successivo hop per " "l'instradamento predefinito. Se qui non è configuratonessun indirizzo " "gateway Ipv6 (e solo poi) verrà configurato il router Neutron per ottenere " "il relativo instradamento predefinito da RA (Router Advertisement) dal " "router upstream; in tal caso il router upstream deve essere anche " "configuratoper inviare questi RA. Ipv6_gateway, quando configurato, " "deveessere il LLA dell'interfaccia sul router upstream. Se si desidera un " "hop successivo che utilizzi un GUA (Global Uunique Address) è necessario " "ottenerlo mediante una sottorete assegnata alla rete e non attraverso questo " "parametro." msgid "You must implement __call__" msgstr "È necessario implementare __call__" msgid "" "You must provide a config file for bridge - either --config-file or " "env[NEUTRON_TEST_CONFIG_FILE]" msgstr "" "È necessario fornire un file di configurazione per il bridge - --config-file " "o env[NEUTRON_TEST_CONFIG_FILE]" msgid "You must provide a revision or relative delta" msgstr "È necessario fornire una revisione o delta relativo" msgid "a subnetpool must be specified in the absence of a cidr" msgstr "un pool di sottorete deve essere specificato in assenza di un cidr" msgid "add_ha_port cannot be called inside of a transaction." msgstr "add_ha_port non può essere richiamato all'interno di una transazione." msgid "allocation_pools allowed only for specific subnet requests." msgstr "" "allocation_pools consentita solo per specifiche richieste della sottorete." msgid "allocation_pools are not in the subnet" msgstr "allocation_pools non presenti nella sottorete" msgid "allocation_pools use the wrong ip version" msgstr "allocation_pools utilizzano la versione IP errata" msgid "already a synthetic attribute" msgstr "è già presente un attributo synthetic" msgid "binding:profile value too large" msgstr "valore binding:profile troppo esteso" #, python-format msgid "cannot perform %(event)s due to %(reason)s" msgstr "impossibile esegure %(event)s a causa di %(reason)s" msgid "cidr and prefixlen must not be supplied together" msgstr "non devono essere forniti insieme cidr e prefixlen" #, python-format msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid." msgstr "dhcp_agents_per_network deve essere >= 1. '%s' non è valido." msgid "dns_domain cannot be specified without a dns_name" msgstr "dns_domain non può essere specificato senza un dns_name" msgid "dns_name cannot be specified without a dns_domain" msgstr "dns_name non può essere specificato senza un dns_domain" msgid "fixed_ip_address cannot be specified without a port_id" msgstr "Impossibile specificare un fixed_ip_address senza un porta_id" #, python-format msgid "gateway_ip %s is not in the subnet" msgstr "gateway_ip %s non presente nella sottorete" #, python-format msgid "has device owner %s" msgstr "ha il proprietario del dispositivo %s" msgid "in use" msgstr "in uso" #, python-format msgid "ip command failed on device %(dev_name)s: %(reason)s" msgstr "comando ip non riuscito sul dispositivo %(dev_name)s: %(reason)s" #, python-format msgid "ip command failed: %(reason)s" msgstr "Comando IP non riuscito: %(reason)s" #, python-format msgid "ip link capability %(capability)s is not supported" msgstr "La funzione ip link %(capability)s non è supportata" #, python-format msgid "ip link command is not supported: %(reason)s" msgstr "Il comando ip link non è supportato: %(reason)s" msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "è necessario specificare ip_version in assenza di cidr e subnetpool_id" msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "ipv6_address_mode non è valida quando ip_version è 4" msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "ipv6_ra_mode non è valida quando ip_version è 4" msgid "" "ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set to " "False." msgstr "" "ipv6_ra_mode o ipv6_address_mode non possono essere impostati quando " "enable_dhcp è impostato su False." #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " "'%(addr_mode)s' is not valid. If both attributes are set, they must be the " "same value" msgstr "" "ipv6_ra_mode impostato su '%(ra_mode)s' con ipv6_address_mode impostato su " "'%(addr_mode)s' non è valido. Se sono impostati entrambi gli attributi, essi " "devono avere lo stesso valore" msgid "mac address update" msgstr "aggiornamento indirizzo mac" #, python-format msgid "" "max_l3_agents_per_router %(max_agents)s config parameter is not valid. It " "has to be greater than or equal to min_l3_agents_per_router %(min_agents)s." msgstr "" "Il parametro di configurazione max_l3_agents_per_router %(max_agents)s non è " "valido. Deve essere maggiore o uguale a min_l3_agents_per_router " "%(min_agents)s." msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "è necessario fornire esattamente 2 argomenti - cidr e MAC" msgid "network_type required" msgstr "network_type obbligatorio" #, python-format msgid "network_type value '%s' not supported" msgstr "Valore network_type '%s' non supportato" msgid "new subnet" msgstr "nuova sottorete" #, python-format msgid "physical_network '%s' unknown for VLAN provider network" msgstr "physical_network '%s' sconosciuta per la rete del provider VLAN" #, python-format msgid "physical_network '%s' unknown for flat provider network" msgstr "physical_network '%s' sconosciuta per rete flat del provider" msgid "physical_network required for flat provider network" msgstr "physical_network richiesta per rete flat del provider" #, python-format msgid "provider:physical_network specified for %s network" msgstr "provider:physical_network specificata per la rete %s" #, python-format msgid "rbac_db_model not found in %s" msgstr "rbac_db_model non trovato in %s" msgid "record" msgstr "record" msgid "respawn_interval must be >= 0 if provided." msgstr "respawn_interval deve essere >= 0 se fornito." #, python-format msgid "segmentation_id out of range (%(min)s through %(max)s)" msgstr "segmentation_id fuori dall'intervallo (da %(min)s a %(max)s)" msgid "segmentation_id requires physical_network for VLAN provider network" msgstr "" "segmentation_id richiede physical_network per la rete del provider VLAN" msgid "shared attribute switching to synthetic" msgstr "passaggio dell'attributo condiviso su synthetic" #, python-format msgid "" "subnetpool %(subnetpool_id)s cannot be updated when associated with shared " "address scope %(address_scope_id)s" msgstr "" "Il pool di sottorete %(subnetpool_id)s non può essere aggiornato quando " "associato all'ambito indirizzo condiviso %(address_scope_id)s" msgid "subnetpool_id and use_default_subnetpool cannot both be specified" msgstr "" "subnetpool_id e use_default_subnetpool non possono essere entrambi " "specificati" msgid "the nexthop is not connected with router" msgstr "l'hop successivo non è connesso al router" msgid "the nexthop is used by router" msgstr "l'hop successivo è utilizzato dal router" #, python-format msgid "unable to load %s" msgstr "impossibile caricare %s" msgid "" "uuid provided from the command line so external_process can track us via /" "proc/cmdline interface." msgstr "" "uuid fornito da riga comandi pertanto external_process può tenere traccia " "dell'utente mediante l'interfaccia /proc/cmdline." neutron-8.4.0/neutron/locale/ja/0000775000567000056710000000000013044373210017676 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/ja/LC_MESSAGES/0000775000567000056710000000000013044373210021463 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/ja/LC_MESSAGES/neutron.po0000664000567000056710000056411513044372760023542 0ustar jenkinsjenkins00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # Akihiro Motoki , 2013 # Akihiro Motoki , 2013 # Sasuke(Kyohei MORIYAMA) <>, 2015 # NachiUeno , 2013 # NachiUeno , 2013 # Sasuke(Kyohei MORIYAMA) <>, 2015 # Tomoyuki KATO , 2013 # Akihiro Motoki , 2015. #zanata # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Akihiro Motoki , 2016. #zanata # Tsutomu Kimura , 2016. #zanata # Yuta Hono , 2016. #zanata # 笹原 昌美 , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron 8.2.1.dev52\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-09-01 18:10+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-06-21 12:39+0000\n" "Last-Translator: Yuta Hono \n" "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Japanese\n" #, python-format msgid "" "\n" "Command: %(cmd)s\n" "Exit code: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" msgstr "" "\n" "コマンド: %(cmd)s\n" "終了コード: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" #, python-format msgid "" "%(branch)s HEAD file does not match migration timeline head, expected: " "%(head)s" msgstr "" "%(branch)s の HEAD ファイルが予期される移行のタイムラインヘッドと合致しませ" "ん: %(head)s" #, python-format msgid "%(driver)s: Internal driver error." msgstr "%(driver)s: 内部ドライバーエラー。" #, python-format msgid "%(id)s is not a valid %(type)s identifier" msgstr "%(id)s は、有効な %(type)s ID ではありません" #, python-format msgid "" "%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' " "and '%(desc)s'" msgstr "" "%(invalid_dirs)s は sort_dirs には無効な値です。有効な値は '%(asc)s' および " "'%(desc)s' です" #, python-format msgid "%(key)s prohibited for %(tunnel)s provider network" msgstr "%(key)s は %(tunnel)s プロバイダーネットワークで禁止されています" #, python-format msgid "" "%(method)s called with network settings %(current)s (original settings " "%(original)s) and network segments %(segments)s" msgstr "" "%(method)s が、ネットワーク設定 %(current)s (元の設定 %(original)s) および" "ネットワークセグメント %(segments)s を使用して呼び出されました" #, python-format msgid "" "%(method)s called with port settings %(current)s (original settings " "%(original)s) host %(host)s (original host %(original_host)s) vif type " "%(vif_type)s (original vif type %(original_vif_type)s) vif details " "%(vif_details)s (original vif details %(original_vif_details)s) binding " "levels %(levels)s (original binding levels %(original_levels)s) on network " "%(network)s with segments to bind %(segments_to_bind)s" msgstr "" "ネットワーク %(network)s 上で、バインド対象のセグメント %(segments_to_bind)s " "とともに、 ポート設定 %(current)s (original settings %(original)s)、ホスト " "%(host)s (original host %(original_host)s)、VIF タイプ %(vif_type)s " "(original vif type %(original_vif_type)s)、VIF 詳細 %(vif_details)s " "(original vif details %(original_vif_details)s)、バインドレベル %(levels)s " "(original binding levels %(original_levels)s) を指定して呼び出した %(method)s" #, python-format msgid "" "%(method)s called with subnet settings %(current)s (original settings " "%(original)s)" msgstr "" "サブネット設定 %(current)s を使用して %(method)s が呼び出されました (元の設" "定: %(original)s)" #, python-format msgid "%(method)s failed." msgstr "%(method)s が失敗しました。" #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "%(name)s '%(addr)s' が ip_version '%(ip_version)s' と一致しません" #, python-format msgid "%(param)s must be in %(range)s range." msgstr "%(param)s は %(range)s の範囲内である必要があります。" #, python-format msgid "%s cannot be called while in offline mode" msgstr "オフラインモードでは、%s を呼び出せません" #, python-format msgid "%s is invalid attribute for sort_key" msgstr "%s は、sort_key には無効な属性です" #, python-format msgid "%s is invalid attribute for sort_keys" msgstr "%sは、sort_keys には無効な属性です" #, python-format msgid "%s is not a valid VLAN tag" msgstr "%s は有効な VLAN タグではありません" #, python-format msgid "%s must be specified" msgstr "%s を指定する必要があります" #, python-format msgid "%s must implement get_port_from_device or get_ports_from_devices." msgstr "" "%s は get_port_from_device または get_ports_from_devices を実装していなければ" "なりません。" #, python-format msgid "%s prohibited for VLAN provider network" msgstr "%s は VLAN プロバイダーネットワークで禁止されています" #, python-format msgid "%s prohibited for flat provider network" msgstr "%s は flat プロバイダーネットワークで禁止されています" #, python-format msgid "%s prohibited for local provider network" msgstr "%s は local プロバイダーネットワークで禁止されています" #, python-format msgid "" "'%(data)s' contains '%(length)s' characters. Adding a domain name will cause " "it to exceed the maximum length of a FQDN of '%(max_len)s'" msgstr "" "'%(data)s' には '%(length)s' 文字が含まれます。ドメイン名を追加すると、FQDN " "の最大長である '%(max_len)s' を超えます" #, python-format msgid "" "'%(data)s' contains '%(length)s' characters. Adding a sub-domain will cause " "it to exceed the maximum length of a FQDN of '%(max_len)s'" msgstr "" "'%(data)s' には '%(length)s' 文字が含まれます。サブドメインを追加すると、" "FQDN の最大長である '%(max_len)s' を超えます" #, python-format msgid "'%(data)s' exceeds maximum length of %(max_len)s" msgstr "'%(data)s が最大長 %(max_len)s を超えています" #, python-format msgid "'%(data)s' is not an accepted IP address, '%(ip)s' is recommended" msgstr "" "'%(data)s' は許容可能な IP アドレスではありません。'%(ip)s' を推奨します。" #, python-format msgid "'%(data)s' is not in %(valid_values)s" msgstr "'%(data)s が %(valid_values)s の中にありません" #, python-format msgid "'%(data)s' is too large - must be no larger than '%(limit)d'" msgstr "'%(data)s' は大きすぎます。'%(limit)d' を超えてはなりません" #, python-format msgid "'%(data)s' is too small - must be at least '%(limit)d'" msgstr "'%(data)s' は小さすぎます。少なくとも '%(limit)d' でなければなりません" #, python-format msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended" msgstr "" "'%(data)s' は認識される IP サブネット cidr ではありません。'%(cidr)s' が推奨" "されます" #, python-format msgid "'%(data)s' not a valid PQDN or FQDN. Reason: %(reason)s" msgstr "'%(data)s' は有効な PQDN でも FQDN でもありません。理由: %(reason)s" #, python-format msgid "'%(host)s' is not a valid nameserver. %(msg)s" msgstr "'%(host)s' は有効なネームサーバーではありません。%(msg)s" #, python-format msgid "'%s' Blank strings are not permitted" msgstr "'%s' のブランク文字列は許可されていません" #, python-format msgid "'%s' cannot be converted to boolean" msgstr "'%s' はブール値に変換できません" #, python-format msgid "'%s' cannot be converted to lowercase string" msgstr "'%s' は小文字の文字列に変換することはできません" #, python-format msgid "'%s' contains whitespace" msgstr "'%s' に空白が含まれています" #, python-format msgid "'%s' exceeds the 255 character FQDN limit" msgstr "'%s' は FQDN の最大長である 255 文字を超えます" #, python-format msgid "'%s' is a FQDN. It should be a relative domain name" msgstr "'%s' は FQDN です。相対ドメイン名である必要があります" #, python-format msgid "'%s' is not a FQDN" msgstr "'%s' は FQDN ではありません" #, python-format msgid "'%s' is not a dictionary" msgstr "'%s' はディクショナリーではありません" #, python-format msgid "'%s' is not a list" msgstr "'%s' はリストではありません" #, python-format msgid "'%s' is not a valid IP address" msgstr "'%s' が有効な IP アドレスではありません" #, python-format msgid "'%s' is not a valid IP subnet" msgstr "'%s' は有効な IP サブネットではありません" #, python-format msgid "'%s' is not a valid MAC address" msgstr "'%s' が有効な MAC アドレスではありません" #, python-format msgid "'%s' is not a valid RBAC object type" msgstr "'%s' は RBAC の有効なオブジェクトタイプではありません" #, python-format msgid "'%s' is not a valid UUID" msgstr "'%s' は有効な UUID ではありません" #, python-format msgid "'%s' is not a valid boolean value" msgstr "'%s' は有効なブール値ではありません" #, python-format msgid "'%s' is not a valid input" msgstr "'%s' は有効な入力ではありません" #, python-format msgid "'%s' is not a valid string" msgstr "'%s' が有効な文字列ではありません" #, python-format msgid "'%s' is not an integer" msgstr "'%s' は整数ではありません" #, python-format msgid "'%s' is not an integer or uuid" msgstr "'%s' は整数または UUID ではありません" #, python-format msgid "'%s' is not of the form =[value]" msgstr "'%s' は =[value] 形式ではありません" #, python-format msgid "'%s' is not supported for filtering" msgstr "'%s' はフィルタリングではサポートされません" #, python-format msgid "'%s' must be a non negative decimal." msgstr "'%s' は負の値ではない 10 進数値である必要があります。" #, python-format msgid "'%s' should be non-negative" msgstr "'%s' は負の値以外でなければなりません" msgid "'.' searches are not implemented" msgstr "'.' 検索は実装されていません" #, python-format msgid "'module' object has no attribute '%s'" msgstr "'module' オブジェクトに属性 '%s' がありません" msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' が 'port_min' よりも小さくなっています" msgid "" "(Deprecated. Use '--subproject neutron-SERVICE' instead.) The advanced " "service to execute the command against." msgstr "" "(提供を終了しています。代わりに '--subproject neutron-SERVICE' を使用してくだ" "さい。) コマンドの実行の対象となる拡張サービス。" msgid "0 is not allowed as CIDR prefix length" msgstr "0 は CIDR プレフィックス長として許可されていません" msgid "" "32-bit BGP identifier, typically an IPv4 address owned by the system running " "the BGP DrAgent." msgstr "" "32 ビットの BGP ID。通常は、BGP DrAgent を実行するシステムが所有する IPv4 ア" "ドレスを指します。" msgid "A QoS driver must be specified" msgstr "QoS ドライバーを指定する必要があります" msgid "A cidr must be specified in the absence of a subnet pool" msgstr "サブネットプールがない場合、cidr の指定は必須です" msgid "" "A decimal value as Vendor's Registered Private Enterprise Number as required " "by RFC3315 DUID-EN." msgstr "RFC3315 DUID-EN が必要とするベンダーが登録した私企業番号の 10 進数値。" #, python-format msgid "A default external network already exists: %(net_id)s." msgstr "デフォルトの外部ネットワークが既に存在します: %(net_id)s" msgid "" "A default subnetpool for this IP family has already been set. Only one " "default may exist per IP family" msgstr "" "この IP ファミリーに対するデフォルトのサブネットプールが既に設定されていま" "す。IP ファミリーごとにデフォルトは 1 つしか設定できません。" msgid "A metering driver must be specified" msgstr "計測ドライバーを指定する必要があります" msgid "A password must be supplied when using auth_type md5." msgstr "auth_type md5 を使用する場合は、パスワードを提供する必要があります。" msgid "API for retrieving service providers for Neutron advanced services" msgstr "Neutron 拡張サービス用のサービスプロバイダーを取得するための API" msgid "Aborting periodic_sync_routers_task due to an error." msgstr "エラーのため periodic_sync_routers_task を中止します。" msgid "Access to this resource was denied." msgstr "このリソースへのアクセスは拒否されました。" msgid "Action to be executed when a child process dies" msgstr "子プロセスが異常終了したときに実行されるアクション" msgid "" "Add comments to iptables rules. Set to false to disallow the addition of " "comments to generated iptables rules that describe each rule's purpose. " "System must support the iptables comments module for addition of comments." msgstr "" "iptables ルールにコメントを追加します。この値を False に設定すると、生成され" "る iptalbes ルールにルールの目的を説明するコメントを追加しなくなります。シス" "テムでは、コメントの追加用に iptables コメントモジュールがサポートされている" "必要があります。" msgid "Address not present on interface" msgstr "インターフェース上に存在しないアドレス" #, python-format msgid "Address scope %(address_scope_id)s could not be found" msgstr "アドレススコープ %(address_scope_id)s が見つかりませんでした" msgid "" "Address to listen on for OpenFlow connections. Used only for 'native' driver." msgstr "" "OpenFlow 接続をリッスンするアドレス。「ネイティブ」のドライバーでのみ使用でき" "ます。" msgid "Adds external network attribute to network resource." msgstr "外部ネットワーク属性がネットワークリソースに追加されます。" msgid "Adds test attributes to core resources." msgstr "テスト属性をコアリソースに追加します。" #, python-format msgid "Agent %(id)s could not be found" msgstr "エージェント %(id)s が見つかりませんでした" #, python-format msgid "Agent %(id)s is not a L3 Agent or has been disabled" msgstr "" "エージェント %(id)s は、L3 エージェントでないか、使用不可になっています" #, python-format msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled" msgstr "" "エージェント %(id)s は、有効な DHCP エージェントでないか、使用不可になってい" "ます" msgid "Agent has just been revived" msgstr "エージェントが復活したばかりです" msgid "" "Agent starts with admin_state_up=False when enable_new_agents=False. In the " "case, user's resources will not be scheduled automatically to the agent " "until admin changes admin_state_up to True." msgstr "" "enable_new_agents=False の場合、エージェントは admin_state_up=False の状態で" "処理を開始します。この場合、 管理者が admin_state_up を True に変更するまで、" "ユーザーのリソースが自動的にエージェントにスケジュール設定されることはありま" "せん。" #, python-format msgid "Agent updated: %(payload)s" msgstr "エージェントが更新されました: %(payload)s" #, python-format msgid "" "Agent with agent_type=%(agent_type)s and host=%(host)s could not be found" msgstr "" "agent_type=%(agent_type)s および host=%(host)s のエージェントが見つかりません" "でした" msgid "Allow auto scheduling networks to DHCP agent." msgstr "DHCP エージェントに対するネットワークの自動スケジューリングを許可" msgid "Allow auto scheduling of routers to L3 agent." msgstr "" "L3 エージェントに対するルーターの自動スケジューリングを許可してください。" msgid "" "Allow overlapping IP support in Neutron. Attention: the following parameter " "MUST be set to False if Neutron is being used in conjunction with Nova " "security groups." msgstr "" "Neutron で重複する IP のサポートを許容します。注意: Nova のセキュリティーグ" "ループとともに Neutron を使用する場合は、以下のパラメーターを必ず False に設" "定する必要があります。" msgid "Allow running metadata proxy." msgstr "メタデータプロキシーの実行を許可します。" msgid "Allow sending resource operation notification to DHCP agent" msgstr "DHCP エージェントへのリソース操作通知の送信の許可" msgid "Allow the creation of PTR records" msgstr "PTR レコードの作成の許可" msgid "Allow the usage of the bulk API" msgstr "Bulk API の使用を許可" msgid "Allow the usage of the pagination" msgstr "ページ編集の使用を許可" msgid "Allow the usage of the sorting" msgstr "ソートの使用を許可" msgid "Allow to perform insecure SSL (https) requests to nova metadata" msgstr "" "Nova メタデータに対する非セキュアな SSL (https) 要求を実行することを許可しま" "す" msgid "Allowed address pairs must be a list." msgstr "許可されたアドレスペアはリストである必要があります。" msgid "AllowedAddressPair must contain ip_address" msgstr "AllowedAddressPair には ip_address が含まれていなければなりません" msgid "" "Allows for serving metadata requests coming from a dedicated metadata access " "network whose CIDR is 169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send metadata:1 request. In " "this case DHCP Option 121 will not be injected in VMs, as they will be able " "to reach 169.254.169.254 through a router. This option requires " "enable_isolated_metadata = True." msgstr "" "CIDR が 169.254.169.254/16 (またはこれより大きなプレフィックス) で VM が " "metadata:1 要求を送信する Neuron ルーターに接続している、特定のメタデータアク" "セスネットワークから出されるメタデータ要求に対応します。この場合、DHCP オプ" "ション 121 はルーターを経由して 169.254.169.254 に到達できるため、VM に挿入さ" "れません。このオプションを設定するには enable_isolated_metadata = True と設定" "する必要があります。" #, python-format msgid "" "Already hosting BGP Speaker for local_as=%(current_as)d with router_id=" "%(rtid)s." msgstr "" "既に router_id=%(rtid)s で local_as=%(current_as)d の BGP スピーカーをホスト" "しています。" #, python-format msgid "" "Already hosting maximum number of BGP Speakers. Allowed scheduled count=" "%(count)d" msgstr "" "既に最大数の BGP スピーカーをホストしています。スケジュール済みの count=" "%(count)d が許容されます" msgid "An RBAC policy already exists with those values." msgstr "これらの値に関して RBAC ポリシーが既に存在します。" msgid "An identifier must be specified when updating a subnet" msgstr "サブネットを更新する際には ID を指定する必要があります" msgid "An interface driver must be specified" msgstr "インターフェースドライバーを指定してください" msgid "" "An ordered list of extension driver entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. For example: extension_drivers = " "port_security,qos" msgstr "" "neutron.ml2.extension_drivers の名前空間からロードされる拡張ドライバーのエン" "トリーポイントを一定の順序に並べたリスト。例: extension_drivers = " "port_security,qos" msgid "" "An ordered list of networking mechanism driver entrypoints to be loaded from " "the neutron.ml2.mechanism_drivers namespace." msgstr "" "neutron.ml2.mechanism_drivers 名前空間からロードされるネットワーキングメカニ" "ズムドライバーのエンドポイントの順序付きリスト。" msgid "An unexpected internal error occurred." msgstr "予期しない内部エラーが発生しました。" msgid "An unknown error has occurred. Please try your request again." msgstr "不明なエラーが発生しました。要求を再試行してください。" msgid "Async process didn't respawn" msgstr "非同期プロセスが再生成されませんでした" #, python-format msgid "Attribute '%s' not allowed in POST" msgstr "属性 '%s' は POST では許可されません" #, python-format msgid "Authentication type not supported. Requested type=%(auth_type)s." msgstr "認証タイプがサポートされません。type=%(auth_type)s が要求されました。" msgid "Authorization URL for connecting to designate in admin context" msgstr "管理者のコンテキストにおける designate への接続用認証 URL" msgid "Automatically remove networks from offline DHCP agents." msgstr "ネットワークをオフライン DHCP エージェントから自動的に削除します。" msgid "" "Automatically reschedule routers from offline L3 agents to online L3 agents." msgstr "" "ルーターのスケジュールをオフライン L3 エージェントからオンライン L3 エージェ" "ントに自動的に変更します。" msgid "Availability zone of this node" msgstr "このノードのアベイラビリティーゾーン" #, python-format msgid "AvailabilityZone %(availability_zone)s could not be found." msgstr "" "アベイラビリティーゾーン %(availability_zone)s が見つかりませんでした。" msgid "Available commands" msgstr "使用可能なコマンド" #, python-format msgid "" "BGP Peer %(peer_ip)s for remote_as=%(remote_as)s, running for BGP Speaker " "%(speaker_as)d not added yet." msgstr "" "まだ追加されていない BGP スピーカー %(speaker_as)d のために実行する、" "remote_as=%(remote_as)s の BGP ピア %(peer_ip)s。" #, python-format msgid "" "BGP Speaker %(bgp_speaker_id)s is already configured to peer with a BGP Peer " "at %(peer_ip)s, it cannot peer with BGP Peer %(bgp_peer_id)s." msgstr "" "BGP スピーカー %(bgp_speaker_id)s は %(peer_ip)s で BGP ピアと連携するよう設" "定されているため、BGP ピア %(bgp_peer_id)s と連携することはできません。" #, python-format msgid "" "BGP Speaker for local_as=%(local_as)s with router_id=%(rtid)s not added yet." msgstr "" "router_id=%(rtid)s がまだ追加されていない local_as=%(local_as)s のBGP スピー" "カー" #, python-format msgid "" "BGP peer %(bgp_peer_id)s is not associated with BGP speaker " "%(bgp_speaker_id)s." msgstr "" "BGP ピア %(bgp_peer_id)s が BGP スピーカー %(bgp_speaker_id)s と関連付けられ" "ていません。" #, python-format msgid "BGP peer %(bgp_peer_id)s not authenticated." msgstr "BGP ピア %(bgp_peer_id)s が認証されていません。" #, python-format msgid "BGP peer %(id)s could not be found." msgstr "BGP ピア %(id)s が見つかりませんでした。" #, python-format msgid "" "BGP speaker %(bgp_speaker_id)s is not hosted by the BgpDrAgent %(agent_id)s." msgstr "" "BGP スピーカー %(bgp_speaker_id)s は BgpDrAgent %(agent_id)s によってホストさ" "れていません。" #, python-format msgid "BGP speaker %(id)s could not be found." msgstr "BGP スピーカー %(id)s が見つかりませんでした。" msgid "BGP speaker driver class to be instantiated." msgstr "インスタンス化される BGP スピーカーのドライバークラス。" msgid "Backend does not support VLAN Transparency." msgstr "バックエンドでは VLAN Transparency はサポートされていません。" #, python-format msgid "" "Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, " "%(mac)s:" msgstr "" "EUI-64 による IPv6 アドレス生成用のプレフィックスまたは mac の形式が正しくあ" "りません: %(prefix)s、%(mac)s:" #, python-format msgid "Bad prefix type for generate IPv6 address by EUI-64: %s" msgstr "" "EUI-64 による IPv6 アドレス生成用のプレフィックスタイプが正しくありません: %s" #, python-format msgid "Base MAC: %s" msgstr "ベース MAC: %s" msgid "" "Base log dir for dnsmasq logging. The log contains DHCP and DNS log " "information and is useful for debugging issues with either DHCP or DNS. If " "this section is null, disable dnsmasq log." msgstr "" "dnsmasq のログを保存する基本となるログディレクトリー。ログには DHCP と DNS の" "ログ情報が含まれ、DHCP または DNS のデバッグを行うために役立ちます。このセク" "ションに何の値も設定しない場合は、dnsmasq ログを無効化します。" #, python-format msgid "BgpDrAgent %(agent_id)s is already associated to a BGP speaker." msgstr "BgpDrAgent %(agent_id)s は既に BGP スピーカーと関連付けられています。" #, python-format msgid "BgpDrAgent %(id)s is invalid or has been disabled." msgstr "BgpDrAgent %(id)s が無効であるか、使用不可にされています。" #, python-format msgid "BgpDrAgent updated: %s" msgstr "更新された BgpDrAgent : %s" msgid "Body contains invalid data" msgstr "本文に無効なデータが含まれています" msgid "Both network_id and router_id are None. One must be provided." msgstr "" "network_id と router_id の両方が None になっています。このうちの 1 つを提供す" "る必要があります。" #, python-format msgid "Bridge %(bridge)s does not exist." msgstr "ブリッジ %(bridge)s は存在しません。" #, python-format msgid "Bridge %s does not exist" msgstr "ブリッジ %s は存在しません" msgid "Bulk operation not supported" msgstr "バルク操作はサポートされていません" msgid "CA certificate file to use to verify connecting clients" msgstr "接続クライアントを検査するために使用される CA 証明書ファイル" msgid "CIDR to monitor" msgstr "モニター対象の CIDR" #, python-format msgid "Callback for %(resource_type)s not found" msgstr "%(resource_type)s のコールバックが見つかりません" #, python-format msgid "Callback for %(resource_type)s returned wrong resource type" msgstr "%(resource_type)s のコールバックが間違ったリソースタイプを返しました" #, python-format msgid "Cannot add floating IP to port %s that has no fixed IPv4 addresses" msgstr "" "固定の IPv4 アドレスを持たないポート %s に Floating IP を追加することはできま" "せん" #, python-format msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip" msgstr "" "gateway_ip のないサブネット %s 上のポートには Floating IP を追加できません" #, python-format msgid "Cannot add multiple callbacks for %(resource_type)s" msgstr "%(resource_type)s に複数のコールバックを追加することはできません" #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "" "IPv%(pool_ver)s サブネットプールから IPv%(req_ver)s サブネットを割り当てるこ" "とはできません" msgid "Cannot allocate requested subnet from the available set of prefixes" msgstr "" "要求されたサブネットを使用可能なプレフィックスのセットから割り振ることができ" "ません" #, python-format msgid "" "Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with port " "%(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already has a " "floating IP on external network %(net_id)s." msgstr "" "Fixed IP %(fixed_ip)s には、既に外部ネットワーク %(net_id)s 上の Floating IP " "があるため、その Fixed IP を使用して Floating IP %(floating_ip_address)s " "(%(fip_id)s) をポート %(port_id)s と関連付けることはできません。" msgid "" "Cannot change HA attribute of active routers. Please set router " "admin_state_up to False prior to upgrade." msgstr "" "アクティブなルーターの HA 属性を変更することはできません。アップグレードを行" "う前に、ルーターの admin_state_up を False に設定してください。" #, python-format msgid "" "Cannot create floating IP and bind it to %s, since that is not an IPv4 " "address." msgstr "" "Floating IP は IPv4 アドレスではないため、Floating IP を作成して %s とバイン" "ドすることはできません。" #, python-format msgid "" "Cannot create floating IP and bind it to Port %s, since that port is owned " "by a different tenant." msgstr "" "ポート %s は別のテナントによって所有されているため、 Floating IP を作成して、" "そのポートにバインドすることはできません。" msgid "Cannot create resource for another tenant" msgstr "別のテナントのリソースを作成できません" msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "ipv6 属性が設定された状態で enable_dhcp を無効にすることはできません" #, python-format msgid "Cannot find %(table)s with %(col)s=%(match)s" msgstr "%(col)s が %(match)s に一致する %(table)s が見つかりません" #, python-format msgid "Cannot handle subnet of type %(subnet_type)s" msgstr "タイプ %(subnet_type)s のサブネットを処理できません" msgid "Cannot have multiple IPv4 subnets on router port" msgstr "ルーターポートには複数の IPv4 サブネットは設定できません" #, python-format msgid "" "Cannot have multiple router ports with the same network id if both contain " "IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s" msgstr "" "同じネットワーク ID を持つ複数のルーターポートのいずれにも IPv6 サブネットが" "含まれる場合、これらのポートは使用できません。既存のポート %(p)s には IPv6 サ" "ブネットがあり、ネットワーク ID は %(nid)s です" #, python-format msgid "" "Cannot host distributed router %(router_id)s on legacy L3 agent %(agent_id)s." msgstr "" "レガシーの L3 エージェント %(agent_id)s で分散ルーター %(router_id)s をホスト" "できません。" msgid "Cannot match priority on flow deletion or modification" msgstr "flow の削除または変更時に、優先度の一致が見つかりません" msgid "Cannot mix IPv4 and IPv6 prefixes in a subnet pool." msgstr "" "1 つのサブネットプールで IPv4 のプレフィックスと IPv6 のプレフィックスを混用" "することはできません。" msgid "Cannot specify both --service and --subproject." msgstr "--service と --subproject の両方を指定することはできません。" msgid "Cannot specify both subnet-id and port-id" msgstr "subnet-id と port-id の両方を指定することはできません" msgid "Cannot understand JSON" msgstr "JSON を解釈できません" #, python-format msgid "Cannot update read-only attribute %s" msgstr "読み取り専用属性 %s を更新できません" msgid "" "Cannot upgrade active router to distributed. Please set router " "admin_state_up to False prior to upgrade." msgstr "" "有効なルーターを分散ルーターにアップグレードすることはできません。アップグ" "レードするには、ルーターの admin_state_up を False に設定してください。" msgid "Certificate Authority public key (CA cert) file for ssl" msgstr "ssl の認証局公開鍵 (CA cert) ファイル" #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s." msgstr "変更によって、以下のリソースの使用量が 0 未満になります: %(unders)s。" msgid "Check ebtables installation" msgstr "ebtables のインストールの検査" msgid "Check for ARP header match support" msgstr "ARP ヘッダーマッチのサポートの検査" msgid "Check for ARP responder support" msgstr "ARP 応答側サポートの検査" msgid "Check for ICMPv6 header match support" msgstr "ICMPv6 ヘッダーマッチのサポートの検査" msgid "Check for OVS Geneve support" msgstr "OVS Geneve サポートの検査" msgid "Check for OVS vxlan support" msgstr "OVS vxlan サポートの検査" msgid "Check for VF management support" msgstr "VF 管理サポートの検査" msgid "Check for iproute2 vxlan support" msgstr "iproute2 vxlan サポートの検査" msgid "Check for nova notification support" msgstr "Nova 通知サポートの検査" msgid "Check for patch port support" msgstr "パッチポートのサポートの検査" msgid "Check ip6tables installation" msgstr "ip6tables のインストールの検査" msgid "Check ipset installation" msgstr "ipset のインストールの検査" msgid "Check keepalived IPv6 support" msgstr "Keepalived の IPv6 サポートの検査" msgid "Check minimal dibbler version" msgstr "Dibbler の最小バージョンの検査" msgid "Check minimal dnsmasq version" msgstr "最小 dnsmasq バージョンの検査" msgid "Check netns permission settings" msgstr "netns 許可設定の検査" msgid "Check ovs conntrack support" msgstr "ovs の conntrack サポートの検査" msgid "Check ovsdb native interface support" msgstr "ovsdb ネイティブインターフェースのサポートの検査" #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of " "subnet %(sub_id)s" msgstr "" "サブネット %(subnet_id)s の CIDR %(subnet_cidr)s がサブネット %(sub_id)s の " "CIDR %(cidr)s とオーバーラップしています" msgid "Class not found." msgstr "クラスが見つかりません。" msgid "Cleanup resources of a specific agent type only." msgstr "特定のエージェントタイプのみのリソースをクリーンアップします。" msgid "Client certificate for nova metadata api server." msgstr "Nova メタデータ API サーバー用のクライアント証明書。" msgid "" "Comma-separated list of : tuples, mapping " "network_device to the agent's node-specific list of virtual functions that " "should not be used for virtual networking. vfs_to_exclude is a semicolon-" "separated list of virtual functions to exclude from network_device. The " "network_device in the mapping should appear in the physical_device_mappings " "list." msgstr "" "コンマで区切られた のリスト: タプルは、仮想" "ネットワーキングに使用してはならない仮想機能のエージェントのノード固有のリス" "トに network_device をマッピングします。vfs_to_exclude は、セミコロンで区切ら" "れた network_device から除外される仮想機能のリストです。マッピングに含まれる " "network_device は、physical_device_mappings リストに表示されます。" msgid "" "Comma-separated list of : tuples mapping physical " "network names to the agent's node-specific Open vSwitch bridge names to be " "used for flat and VLAN networks. The length of bridge names should be no " "more than 11. Each bridge must exist, and should have a physical network " "interface configured as a port. All physical networks configured on the " "server should have mappings to appropriate bridges on each agent. Note: If " "you remove a bridge from this mapping, make sure to disconnect it from the " "integration bridge as it won't be managed by the agent anymore. Deprecated " "for ofagent." msgstr "" "コンマで区切られた のリスト: タプルは物理ネット" "ワーク名をエージェントのノード固有の Open vSwitch ブリッジ名にマッピングし、" "フラットネットワークと VLAN ネットワークで使用できます。ブリッジ名の長さは " "11 を超えることはできません。各ブリッジが存在する必要があり、ポートとして設定" "された物理ネットワークインターフェースを持つ必要があります。サーバー上で設定" "されたすべての物理ネットワークは、各エージェントの適切なブリッジにマッピング" "される必要があります。注意: 統合ブリッジはエージェントによって管理されないた" "め、このマッピングからブリッジを削除する場合は、統合ブリッジから必ず削除して" "ください。ofagent では使用できません。" msgid "" "Comma-separated list of : tuples mapping " "physical network names to the agent's node-specific physical network device " "interfaces of SR-IOV physical function to be used for VLAN networks. All " "physical networks listed in network_vlan_ranges on the server should have " "mappings to appropriate interfaces on each agent." msgstr "" "コンマで区切られた のリスト: 物理ネットワーク名をエージェ" "ントのノード固有の物理ネットワークのデバイスインターフェース (SR-IOV の物理機" "能を持つ) にマッピングする タプルは、VLAN ネットワークで使用" "されます。サーバー上の network_vlan_ranges にリストされるすべての物理ネット" "ワークは、各エージェントの適切なインターフェースにマッピングされる必要があり" "ます。" msgid "" "Comma-separated list of : tuples " "mapping physical network names to the agent's node-specific physical network " "interfaces to be used for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should have mappings to " "appropriate interfaces on each agent." msgstr "" "コンマで区切られた のリスト: 物理ネットワーク名をエージェ" "ントのノード固有の物理ネットワークのインターフェースにマッピングする " " タプルは、フラットネットワークと VLAN ネットワークで使用" "されます。サーバー上の network_vlan_ranges にリストされるすべての物理ネット" "ワークは、各エージェントの適切なインターフェースとマッピングされる必要があり" "ます。" msgid "" "Comma-separated list of : tuples enumerating ranges of GRE " "tunnel IDs that are available for tenant network allocation" msgstr "" "テナントネットワークの割り当てに使用可能な GRE トンネル ID の範囲を列挙する " ": タプルのコンマ区切りリスト" msgid "" "Comma-separated list of : tuples enumerating ranges of " "Geneve VNI IDs that are available for tenant network allocation" msgstr "" "テナントネットワークの割り当てに使用可能な Geneve VNI ID の範囲をエミュレート" "する、コンマで区切った : タプルのリスト。" msgid "" "Comma-separated list of : tuples enumerating ranges of " "VXLAN VNI IDs that are available for tenant network allocation" msgstr "" "テナントネットワークの割り当てに使用可能な VXLAN VNI ID の範囲を列挙する " ": タプルのコンマ区切りリスト" msgid "" "Comma-separated list of supported PCI vendor devices, as defined by " "vendor_id:product_id according to the PCI ID Repository. Default enables " "support for Intel and Mellanox SR-IOV capable NICs." msgstr "" "サポート対象の PCI ベンダーのデバイスに関するコンマで区切られたリスト (PCI " "ID のリポジトリーに基づいて、vendor_id:product_id で定義されます)。デフォルト" "では、 Intel と Mellanox の SR-IOV に対応した NIC をサポートします。" msgid "" "Comma-separated list of the DNS servers which will be used as forwarders." msgstr "フォワーダーとして使用される DNS サーバーのカンマ区切りのリスト。" msgid "Command to execute" msgstr "実行するコマンド" msgid "Config file for interface driver (You may also use l3_agent.ini)" msgstr "" "インターフェースドライバーの構成ファイル (l3_agent.ini を使用することもできま" "す)" #, python-format msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" msgstr "CIDR %(cidr)s とイーサネットタイプ値 %(ethertype)s が競合しています" msgid "" "Controls whether the neutron security group API is enabled in the server. It " "should be false when using no security groups or using the nova security " "group API." msgstr "" "Neutron セキュリティーグループ API をサーバーで有効化するかどうかを制御しま" "す。セキュリティーグループを使用しない場合、または Nova セキュリティーグルー" "プ API を使用する場合には、False にする必要があります。" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "" "%(time)d 秒間試行しましたが %(host)s:%(port)s にバインドできませんでした" #, python-format msgid "Could not connect to %s" msgstr "%s に接続できませんでした" msgid "Could not deserialize data" msgstr "シリアライズされたデータを復元することができませんでした" #, python-format msgid "Could not retrieve schema from %(conn)s: %(err)s" msgstr "%(conn)s からスキーマを取得できませんでした: %(err)s" #, python-format msgid "" "Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable " "to update." msgstr "" "現在のゲートウェイ IP %(ip_address)s はポート %(port_id)s によって既に使用さ" "れています。更新できません。" msgid "Currently update of HA mode for a DVR/HA router is not supported." msgstr "現在、DVR/HA ルーターの HA モードの更新はサポートされません。" msgid "Currently update of HA mode for a distributed router is not supported." msgstr "現在、分散ルーターの HA モードの更新はサポートされません。" msgid "" "Currently update of distributed mode for a DVR/HA router is not supported" msgstr "現在、DVR/HA ルーターの分散モードの更新はサポートされません。" msgid "Currently update of distributed mode for an HA router is not supported." msgstr "現在、HA ルーターの分散モードの更新はサポートされません。" msgid "" "Currently updating a router from DVR/HA to non-DVR non-HA is not supported." msgstr "" "現在、DVR/HA から DVR 以外/HA 以外へのルーターの更新はサポートされません。" msgid "Currently updating a router to DVR/HA is not supported." msgstr "現在、DVR/HA へのルーターの更新はサポートされません。" msgid "" "DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " "lease times." msgstr "" "DHCP リース期間 (秒)。dnsmasq に無制限のリース時間の使用を指示するには、-1 を" "使用します。" msgid "" "DVR deployments for VXLAN/GRE/Geneve underlays require L2-pop to be enabled, " "in both the Agent and Server side." msgstr "" "VXLAN、GRE、Geneve のアンダーレイの DVR 実装環境では、エージェント側とサー" "バー側で L2-pop を有効化する必要があります。" msgid "" "Database engine for which script will be generated when using offline " "migration." msgstr "オフラインで移行を行う際にスクリプトが生成されるデータベースエンジン。" msgid "" "Default IPv4 subnet pool to be used for automatic subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where creation of a subnet is " "being called without a subnet pool ID. If not set then no pool will be used " "unless passed explicitly to the subnet create. If no pool is used, then a " "CIDR must be passed to create a subnet and that subnet will not be allocated " "from any pool; it will be considered part of the tenant's private address " "space. This option is deprecated for removal in the N release." msgstr "" "自動的に サブネットの CIDR を割り当てるために使用されるデフォルトの IPv4 のサ" "ブネットプール。サブネットプールの ID を指定せずにサブネットの作成が呼び出さ" "れた場合に使用されるプールを、 UUID によって指定します。指定されなかった場" "合、サブネットの作成にプールを明示的に渡さない限り、プールは使用されません。" "プールが使用されなかった場合、サブネットを作成するために CIDR を渡す必要があ" "り、いずれのプールからもこのサブネットの割り当ては行われません。これはテナン" "トのプライベートアドレススペースの一部と考えられます。このオプションは N リ" "リースでは削除されます。" msgid "" "Default IPv6 subnet pool to be used for automatic subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where creation of a subnet is " "being called without a subnet pool ID. See the description for " "default_ipv4_subnet_pool for more information. This option is deprecated for " "removal in the N release." msgstr "" "自動的に サブネットの CIDR を割り当てるために使用されるデフォルトの IPv6 のサ" "ブネットプール。サブネットプールの ID を指定せずにサブネットの作成が呼び出さ" "れた場合に使用されるプールを、UUID によって指定します。詳細情報については、" "default_ipv4_subnet_pool の説明を参照してください。このオプションは N リリー" "スでは削除されます。" msgid "Default driver to use for quota checks" msgstr "割り当て量の検査に使用するデフォルトのドライバー" msgid "Default external networks must be shared to everyone." msgstr "デフォルトの外部ネットワークは全メンバーに共有する必要があります" msgid "" "Default network type for external networks when no provider attributes are " "specified. By default it is None, which means that if provider attributes " "are not specified while creating external networks then they will have the " "same type as tenant networks. Allowed values for external_network_type " "config option depend on the network type values configured in type_drivers " "config option." msgstr "" "プロバイダーの属性が指定されない場合の外部ネットワーク用のデフォルトのネット" "ワークタイプ。デフォルト値は None です。これは、外部ネットワークの作成時にプ" "ロバイダーの属性が指定されない場合に、テナントネットワークと同じネットワーク" "タイプを使用することを意味します。external_network_type の設定オプションとし" "て許容される値は、type_drivers の設定オプションで設定されたネットワークタイプ" "値によって決まります。" msgid "" "Default number of RBAC entries allowed per tenant. A negative value means " "unlimited." msgstr "" "テナントごとの RBAC 項目のデフォルト数。負の値がある場合、制限が設定されてい" "ないことを指します。" msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "" "テナント当たりに許可されるリソースのデフォルト数。負の値は無制限を意味しま" "す。" msgid "Default security group" msgstr "デフォルトセキュリティグループ" msgid "Default security group already exists." msgstr "デフォルトのセキュリティーグループが既に存在します。" msgid "" "Default value of availability zone hints. The availability zone aware " "schedulers use this when the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a comma separated string. " "This value can be empty. In this case, even if availability_zone_hints for a " "resource is empty, availability zone is considered for high availability " "while scheduling the resource." msgstr "" "アベイラビリティーゾーンのヒントのデフォルト値。リソースの " "availability_zone_hints が空の場合、アベイラビリティーゾーンを参照するスケ" "ジューラーがこの値を使用します。コンマで区切られた文字列によって複数のアベイ" "ラビリティーゾーンを指定できます。この値は空である場合があります。その場合、" "リソースの availability_zone_hints が空であっても、リソースのスケジューリング" "を行う際に、高可用性を実現するようアベイラビリティーゾーンの検討が行われま" "す。" msgid "" "Define the default value of enable_snat if not provided in " "external_gateway_info." msgstr "" "enable_snat のデフォルト値が external_gateway_info で提供されていない場合は、" "定義してください。" msgid "" "Defines providers for advanced services using the format: :" ":[:default]" msgstr "" "次のフォーマットを使用して拡張サービスのプロバイダーが定義されます: " "::[:default]" msgid "" "Delay within which agent is expected to update existing ports whent it " "restarts" msgstr "" "エージェントが再始動時に既存のポートを更新することが期待される遅延の期間" msgid "Delete the namespace by removing all devices." msgstr "すべてのデバイスを削除して、名前空間を削除します。" #, python-format msgid "Deleting port %s" msgstr "ポート %s を削除しています" #, python-format msgid "Deployment error: %(reason)s." msgstr "デプロイメントエラー: %(reason)s" msgid "Destroy IPsets even if there is an iptables reference." msgstr "iptables の参照がある場合でも IPset を破棄します。" msgid "Destroy all IPsets." msgstr "すべての IPset を破棄します。" #, python-format msgid "Device %(dev_name)s in mapping: %(mapping)s not unique" msgstr "マッピング: %(mapping)s 内のデバイス %(dev_name)s が一意ではありません" #, python-format msgid "Device '%(device_name)s' does not exist." msgstr "デバイス '%(device_name)s' が存在しません。" msgid "Device has no virtual functions" msgstr "デバイスに仮想関数が含まれていません" #, python-format msgid "Device name %(dev_name)s is missing from physical_device_mappings" msgstr "デバイス名 %(dev_name)s が physical_device_mappings にありません" msgid "Device not found" msgstr "デバイスが見つかりません" #, python-format msgid "" "Distributed Virtual Router Mac Address for host %(host)s does not exist." msgstr "ホスト %(host)s の分散仮想ルーター MAC アドレスが存在しません。" #, python-format msgid "Domain %(dns_domain)s not found in the external DNS service" msgstr "ドメイン %(dns_domain)s が外部の DNS サービス内で見つかりません" msgid "Domain to use for building the hostnames" msgstr "ホスト名の作成に使用するドメイン" msgid "" "Domain to use for building the hostnames. This option is deprecated. It has " "been moved to neutron.conf as dns_domain. It will be removed in a future " "release." msgstr "" "ホスト名を作成するために使用するドメイン。このオプションは提供を終了していま" "す。本オプションは、「neutron.conf as dns_domain」で提供します。将来のリリー" "スには本オプションは含まれません。" msgid "Downgrade no longer supported" msgstr "ダウングレードは現在ではサポートされていません" #, python-format msgid "Driver %s is not unique across providers" msgstr "ドライバー %s はプロバイダー全体で固有ではありません" msgid "Driver for external DNS integration." msgstr "外部 DNS の連携のためのドライバー。" msgid "Driver for security groups firewall in the L2 agent" msgstr "L2 エージェントのセキュリティーグループのファイアウォールのドライバー" msgid "Driver to use for scheduling network to DHCP agent" msgstr "" "DHCP エージェントに対するネットワークのスケジューリングに使用するドライバー" msgid "Driver to use for scheduling router to a default L3 agent" msgstr "" "デフォルトの L3 エージェントに対するルーターのスケジューリングに使用するドラ" "イバー" msgid "" "Driver used for ipv6 prefix delegation. This needs to be an entry point " "defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for " "entry points included with the neutron source." msgstr "" "ipv6 のプレフィックスデリゲーションを行うためのドライバー。neutron.agent." "linux.pd_drivers の名前空間で定義したエントリーポイントである必要があります。" "neutron のソースに含まれるエントリーポイントについては、setup.cfg を参照して" "ください。" msgid "Driver used for scheduling BGP speakers to BGP DrAgent" msgstr "" "BGP DrAgent に対する BGP スピーカーのスケジューリングに使用するドライバー" msgid "Drivers list to use to send the update notification" msgstr "更新の通知を送信するために使用するドライバーリスト" #, python-format msgid "Duplicate IP address '%s'" msgstr "重複 IP アドレス '%s'" #, python-format msgid "" "Duplicate L3HARouterAgentPortBinding is created for router(s) %(router)s. " "Database cannot be upgraded. Please, remove all duplicates before upgrading " "the database." msgstr "" "ルーター %(router)s に関して、重複する L3HARouterAgentPortBinding が作成され" "ます。データベースはアップグレードできません。データベースをアップグレードす" "る前にすべての重複を削除してください。" msgid "Duplicate Metering Rule in POST." msgstr "POST で計測規則が重複しています。" msgid "Duplicate Security Group Rule in POST." msgstr "POST に重複するセキュリティーグループルールがあります。" msgid "Duplicate address detected" msgstr "検出された重複アドレス" #, python-format msgid "Duplicate hostroute '%s'" msgstr "重複するホスト経路 '%s'" #, python-format msgid "Duplicate items in the list: '%s'" msgstr "リスト内で重複する項目: '%s'" #, python-format msgid "Duplicate nameserver '%s'" msgstr "重複するネームサーバー '%s'" msgid "Duplicate segment entry in request." msgstr "重複するセグメントエントリーが要求に含まれています。" #, python-format msgid "ERROR: %s" msgstr "エラー: %s" msgid "" "ERROR: Unable to find configuration file via the default search paths (~/." "neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" msgstr "" "エラー: デフォルトの検索パス (~/.neutron/, ~/, /etc/neutron/, /etc/) および " "'--config-file' オプションを使用して、構成ファイルが見つかりません。" msgid "" "Either one of parameter network_id or router_id must be passed to _get_ports " "method." msgstr "" "パラメーター network_id または router_id のいずれかを _get_ports メソッドに渡" "す必要があります。" msgid "Either subnet_id or port_id must be specified" msgstr "subnet_id または port_id のいずれかを指定する必要があります" msgid "Empty physical network name." msgstr "物理ネットワーク名が空です。" msgid "Empty subnet pool prefix list." msgstr "サブネットプールのプレフィックスリストが空です。" msgid "Enable FWaaS" msgstr "FWaaS を有効にします" msgid "Enable HA mode for virtual routers." msgstr "仮想ルーターのために HA モードを有効化します。" msgid "Enable SSL on the API server" msgstr "API サーバー上で SSL を有効にします" msgid "" "Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " "plugin using linuxbridge mechanism driver" msgstr "" "エージェントで VXLAN を有効にしてください。linuxbridge メカニズムドライバーを" "使用してエージェントが ml2 プラグインによって管理されているときに、VXLAN を有" "効にできます" msgid "" "Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 " "l2population driver. Allows the switch (when supporting an overlay) to " "respond to an ARP request locally without performing a costly ARP broadcast " "into the overlay." msgstr "" "ローカルでの ARP 応答がサポートされている場合、これを有効にします。OVS 2.1 お" "よび ML2 l2population ドライバーが必要です。スイッチが、コストのかかる、オー" "バーレイへの ARP ブロードキャストを実行せずに、ARP 要求にローカルで応答するよ" "うにします (オーバーレイがサポートされている場合)。" msgid "" "Enable local ARP responder which provides local responses instead of " "performing ARP broadcast into the overlay. Enabling local ARP responder is " "not fullycompatible with the allowed-address-pairs extension." msgstr "" "オーバーレイに対して ARP ブロードキャストを実行するのではなく、ローカルレスポ" "ンスを提供するローカルの ARP レスポンダーを有効化します。ローカルの ARP レス" "ポンダーの有効化は、許容されるアドレスペアの拡張と完全な互換性があるわけでは" "ありません。" msgid "" "Enable services on an agent with admin_state_up False. If this option is " "False, when admin_state_up of an agent is turned False, services on it will " "be disabled. Agents with admin_state_up False are not selected for automatic " "scheduling regardless of this option. But manual scheduling to such agents " "is available if this option is True." msgstr "" "admin_state_up が False のエージェントでサービスを有効化します。このオプショ" "ンが False の場合、エージェントの admin_state_up が False に変更されると、そ" "のエージェントでのサービスは無効化されます。admin_state_up が False のエー" "ジェントは、このオプションとは無関係に、自動スケジューリング用には選択されま" "せん。ただし、このオプションが True の場合、このようなエージェントに対しては" "手動スケジューリングが使用できます。" msgid "" "Enable suppression of ARP responses that don't match an IP address that " "belongs to the port from which they originate. Note: This prevents the VMs " "attached to this agent from spoofing, it doesn't protect them from other " "devices which have the capability to spoof (e.g. bare metal or VMs attached " "to agents without this flag set to True). Spoofing rules will not be added " "to any ports that have port security disabled. For LinuxBridge, this " "requires ebtables. For OVS, it requires a version that supports matching ARP " "headers. This option will be removed in Newton so the only way to disable " "protection will be via the port security extension." msgstr "" "ARP レスポンスが最初に発生したポートに関連するIP アドレスにマッチしない ARP " "レスポンスを消去します。注意: この結果、このエージェントに追加されたする VM " "はスプーフィングを行うことができなくなるものの、スプーフィングを行うことがで" "きる他のデバイス (このフラグを Trueに設定せずにエージェントに追加されたベアメ" "タルや VM など) からこれらの VM を保護することはできません。スプーフィング" "ルールは、ポートセキュリティーを無効化したポートには追加できません。" "LinuxBridge の場合、このためには ebtables が必要です。OVS の場合は、マッチす" "る ARP ヘッダーをサポートするバージョンが必要です。このオプションは Newtron " "では削除されるため、保護を無効化するにはポートセキュリティーの拡張機能を必ず" "使用する必要があります。" msgid "" "Enable/Disable log watch by metadata proxy. It should be disabled when " "metadata_proxy_user/group is not allowed to read/write its log file and " "copytruncate logrotate option must be used if logrotate is enabled on " "metadata proxy log files. Option default value is deduced from " "metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent " "effective user id/name." msgstr "" "メタデータプロキシーによるログ監視を有効/無効にします。metadata_proxy_user/" "group がログファイルの読み取り/書き込みを許可されていない場合は無効にする必要" "があり、logrotate がメタデータプロキシーのログファイルで有効になっている場合" "は copytruncate logrotate オプションを使用する必要があります。オプションのデ" "フォルト値は metadata_proxy_user から推測されます。監視ログは、" "metadata_proxy_user がエージェント有効ユーザーの ID または名前である場合に有" "効になります。" msgid "" "Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to " "True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable " "environment. Users making subnet creation requests for IPv6 subnets without " "providing a CIDR or subnetpool ID will be given a CIDR via the Prefix " "Delegation mechanism. Note that enabling PD will override the behavior of " "the default IPv6 subnetpool." msgstr "" "自動的にサブネットの CIDR を割り当てるために IPv6 のプレフィックスデリゲー" "ションを有効化します。True に設定した場合、PD に対応した環境でのサブネットの" "割り当てのために、IPv6 のプレフィックスデリゲーションを有効化します。CIDR ま" "たはサブネットプールの ID を指定せずに IPv6 のサブネットの作成要求を行うユー" "ザーには、プレフィックスデリゲーションのメカニズム経由で CIDR が提供されま" "す。PD を有効化するとデフォルトの IPv6 サブネットプールの挙動がオーバーライド" "されることに注意してください。" msgid "" "Enables the dnsmasq service to provide name resolution for instances via DNS " "resolvers on the host running the DHCP agent. Effectively removes the '--no-" "resolv' option from the dnsmasq process arguments. Adding custom DNS " "resolvers to the 'dnsmasq_dns_servers' option disables this feature." msgstr "" "dnsmasq サービスを有効化すると、DHCP エージェントを実行するホスト上で DNS リ" "ゾルバー経由でインスタンスの名前解決を行うことができます。dnsmasq プロセスの" "引数から '--no-resolv' オプションを効果的に削除します。'dnsmasq_dns_servers' " "オプションにカスタムの DNS リゾルバーを追加すると、この機能を無効化できます。" msgid "Encountered an empty component." msgstr "空のコンポーネントが検出されました。" msgid "End of VLAN range is less than start of VLAN range" msgstr "VLAN 範囲の終わりが VLAN 範囲の開始より小さくなっています" msgid "End of tunnel range is less than start of tunnel range" msgstr "トンネル範囲の終わりが、トンネル範囲の開始より小さくなっています" msgid "Enforce using split branches file structure." msgstr "分岐ファイル構造を必ず使用するようにします。" msgid "" "Ensure that configured gateway is on subnet. For IPv6, validate only if " "gateway is not a link local address. Deprecated, to be removed during the " "Newton release, at which point the gateway will not be forced on to subnet." msgstr "" "設定済みのゲートウェイが必ずサブネット上にあるようにします。IPv6 の場合、" "ゲートウェイがリンクローカルアドレスでない場合にのみ、検証を行います。この機" "能は Neutron のリリース時に提供を中止する予定です。リリース時点でゲートウェイ" "が必ずサブネット上に存在するようにはならなくなります。" #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "操作の試行中に発生したエラー %(reason)s。" #, python-format msgid "Error importing FWaaS device driver: %s" msgstr "FWaaS デバイスドライバーのインポート中にエラーが発生しました: %s" #, python-format msgid "Error parsing dns address %s" msgstr "DNS アドレス %s の解析中にエラーが発生しました" #, python-format msgid "Error while reading %s" msgstr "%s の読み取り中にエラーが発生しました" #, python-format msgid "" "Exceeded %s second limit waiting for address to leave the tentative state." msgstr "" "アドレスが一時的な状態を終了するまでの待機時間の上限の %s 秒を超過しました。" msgid "Exceeded maximum amount of fixed ips per port." msgstr "ポートごとの Fixed IP の最大数を超えました" msgid "Existing prefixes must be a subset of the new prefixes" msgstr "" "既存のプレフィックスは新規プレフィックスのサブセットでなければなりません" #, python-format msgid "" "Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" msgstr "" "終了コード: %(returncode)d、Stdin: %(stdin)s、Stdout: %(stdout)s、Stderr: " "%(stderr)s" #, python-format msgid "Extension %(driver)s failed." msgstr "拡張 %(driver)s が失敗しました。" #, python-format msgid "" "Extension driver %(driver)s required for service plugin %(service_plugin)s " "not found." msgstr "" "サービスプラグイン %(service_plugin)s に必要な拡張ドライバー %(driver)s が見" "つかりませんでした。" msgid "" "Extension to use alongside ml2 plugin's l2population mechanism driver. It " "enables the plugin to populate VXLAN forwarding table." msgstr "" "ml2 プラグインの l2population メカニズムドライバーとともに使用する拡張機能。" "これにより、このプラグインは VXLAN 転送テーブルにデータを追加できるようになり" "ます。" #, python-format msgid "Extension with alias %s does not exist" msgstr "エイリアス %s を持つ拡張は存在しません" msgid "Extensions list to use" msgstr "使用する拡張機能リスト" #, python-format msgid "Extensions not found: %(extensions)s." msgstr "拡張が見つかりません: %(extensions)s。" #, python-format msgid "External DNS driver %(driver)s could not be found." msgstr "外部の DNS ドライバー %(driver)s が見つかりませんでした。" #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "外部 IP %s はゲートウェイ IP と同一です" #, python-format msgid "" "External network %(external_network_id)s is not reachable from subnet " "%(subnet_id)s. Therefore, cannot associate Port %(port_id)s with a Floating " "IP." msgstr "" "外部ネットワーク %(external_network_id)s は、サブネット %(subnet_id)s から到" "達可能ではありません。そのため、ポート %(port_id)s を Floating IP と関連付け" "ることができません。" #, python-format msgid "" "External network %(net_id)s cannot be updated to be made non-external, since " "it has existing gateway ports" msgstr "" "外部ネットワーク %(net_id)s は、既存のゲートウェイポートを保持しているため、" "このネットワークを外部以外にするための更新は実行できません" #, python-format msgid "ExtraDhcpOpt %(id)s could not be found" msgstr "ExtraDhcpOpt %(id)s が見つかりませんでした" msgid "" "FWaaS plugin is configured in the server side, but FWaaS is disabled in L3-" "agent." msgstr "" "FWaaS プラグインがサーバー側で設定されていますが、 L3 エージェントでは FWaaS " "は無効になっています。" #, python-format msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." msgstr "" "ルーター %(router_id)s のスケジュール変更に失敗しました: 適格な L3 エージェン" "トが見つかりません。" #, python-format msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." msgstr "" "L3 エージェント %(agent_id)s に対するルーター %(router_id)s のスケジューリン" "グに失敗しました。" #, python-format msgid "" "Failed to allocate a VRID in the network %(network_id)s for the router " "%(router_id)s after %(max_tries)s tries." msgstr "" "%(max_tries)s 回試行しましたが、ルーター %(router_id)s のネットワーク " "%(network_id)s で VRID を割り振ることができませんでした。" #, python-format msgid "Failed to allocate subnet: %(reason)s." msgstr "サブネットの割り当てに失敗しました: %(reason)s。" msgid "" "Failed to associate address scope: subnetpools within an address scope must " "have unique prefixes." msgstr "" "アドレススコープの関連付けに失敗しました: アドレススコープ内のサブネットプー" "ルは固有のプレフィックスを持つ必要があります。" #, python-format msgid "Failed to check policy %(policy)s because %(reason)s." msgstr "%(reason)s のため、ポリシー %(policy)s の検査が失敗しました。" #, python-format msgid "" "Failed to create a duplicate %(object_type)s: for attribute(s) " "%(attributes)s with value(s) %(values)s" msgstr "" "以下について重複する %(object_type)s の作成に失敗しました: 値 %(values)s を持" "つ属性 %(attributes)s" #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips included " "invalid subnet %(subnet_id)s" msgstr "" "fixed_ips が無効なサブネット %(subnet_id)s に含まれていたため、ネットワーク " "%(network_id)s でポートを作成できませんでした" #, python-format msgid "Failed to init policy %(policy)s because %(reason)s." msgstr "%(reason)s のため、ポリシー %(policy)s の初期化が失敗しました。" #, python-format msgid "Failed to locate source for %s." msgstr "%s のソースの特定に失敗しました。" #, python-format msgid "Failed to parse request. Parameter '%s' not specified" msgstr "要求を解析できません。パラメーター '%s' が指定されていません" #, python-format msgid "Failed to parse request. Required attribute '%s' not specified" msgstr "要求を解析できません。必須属性 '%s' が指定されていません" msgid "Failed to remove supplemental groups" msgstr "補足グループの削除に失敗しました" #, python-format msgid "Failed to set gid %s" msgstr "gid %s の設定に失敗しました。" #, python-format msgid "Failed to set uid %s" msgstr "uid %s の設定に失敗しました" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "%(ip)s に対する %(type)s トンネルポートをセットアップできませんでした" msgid "Failure applying iptables rules" msgstr "iptables ルール適用の失敗" #, python-format msgid "Failure waiting for address %(address)s to become ready: %(reason)s" msgstr "アドレス %(address)s の準備ができるまでの待機の失敗: %(reason)s" msgid "Flat provider networks are disabled" msgstr "flat プロバイダーネットワークが無効化されています" #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "フレーバー %(flavor_id)s が見つかりませんでした。" #, python-format msgid "Flavor %(flavor_id)s is used by some service instance." msgstr "あるサービスインスタンスがフレーバー %(flavor_id)s を使用しています。" msgid "Flavor is not enabled." msgstr "フレーバーが有効化されていません。" #, python-format msgid "Floating IP %(floatingip_id)s could not be found" msgstr "Floating IP %(floatingip_id)s が見つかりませんでした" #, python-format msgid "" "Floating IP %(floatingip_id)s is associated with non-IPv4 address " "%s(internal_ip)s and therefore cannot be bound." msgstr "" "Floating IP %(floatingip_id)s は IPv4 ではないアドレス %s(internal_ip)s と関" "連付けられているため、バインドできません。" msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "" "TCP/UDP プロトコルの場合、port_range_min は port_range_max 以下でなければなり" "ません" #, python-format msgid "For class %(object_type)s missing primary keys: %(missing_keys)s" msgstr "" "クラス %(object_type)s に関して欠損しているプライマリーキー: %(missing_keys)s" msgid "Force ip_lib calls to use the root helper" msgstr "ip_lib 呼び出しでルートヘルパーを強制的に使用します" #, python-format msgid "Found duplicate extension: %(alias)s." msgstr "重複する拡張が見つかりました: %(alias)s。" #, python-format msgid "" "Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet " "%(subnet_cidr)s." msgstr "" "サブネットの重なり合った割り当てプール %(pool_1)s %(pool_2)s が見つかりまし" "た%(subnet_cidr)s。" msgid "Gateway IP version inconsistent with allocation pool version" msgstr "" "ゲートウェイの IP バージョンは割り当てプールのバージョンと一致する必要があり" "ます" #, python-format msgid "" "Gateway cannot be updated for router %(router_id)s, since a gateway to " "external network %(net_id)s is required by one or more floating IPs." msgstr "" "外部ネットワーク %(net_id)s へのゲートウェイは、1 つ以上の Floating IP で必要" "なため、ルーター %(router_id)s のゲートウェイを更新できません。" #, python-format msgid "Gateway ip %(ip_address)s conflicts with allocation pool %(pool)s." msgstr "" "ゲートウェイ IP %(ip_address)s が割り当てプール %(pool)s と競合しています。" msgid "Gateway is not valid on subnet" msgstr "ゲートウェイがサブネット上で無効です" msgid "" "Geneve encapsulation header size is dynamic, this value is used to calculate " "the maximum MTU for the driver. This is the sum of the sizes of the outer " "ETH + IP + UDP + GENEVE header sizes. The default size for this field is 50, " "which is the size of the Geneve header without any additional option headers." msgstr "" "Geneve のカプセル化のヘッダーサイズは動的なため、この値を使用してドライバー" "の MTU を計算することができます。この値は外部の ETH、IP、UDP、GENEVE のヘッ" "ダーサイズの合計です。このフィールドのデフォルトのサイズは 50 であり、これは" "追加のオプションヘッダーを使用しない Geneve のヘッダーのサイズです。" msgid "Group (gid or name) running metadata proxy after its initialization" msgstr "メタデータプロキシーを初期化後に実行しているグループ (gid または名前)" msgid "" "Group (gid or name) running metadata proxy after its initialization (if " "empty: agent effective group)." msgstr "" "初期化後にメタデータプロキシーを実行しているグループ (gid または名前) (空の場" "合: エージェント有効グループ)。" msgid "Group (gid or name) running this process after its initialization" msgstr "初期化後にこのプロセスを実行するグループ (gid または名前)" #, python-format msgid "HEAD file does not match migration timeline head, expected: %s" msgstr "HEAD ファイルが予期される移行のタイムラインヘッドと合致しません: %s" msgid "" "Hostname to be used by the Neutron server, agents and services running on " "this machine. All the agents and services running on this machine must use " "the same host value." msgstr "" "このマシン上で稼働する Neutron のサーバー、エージェント、サービスが使用するホ" "スト名。このマシン上で稼働するすべてのエージェントとサービスは同じホスト値を" "使用する必要があります。" msgid "How many times Neutron will retry MAC generation" msgstr "Neutron が MAC の生成を再試行する回数" #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" "min) is missing." msgstr "" "ICMP コード (port-range-max) %(value)s が指定されましたが、ICMP タイプ (port-" "range-min) がありません。" msgid "ID of network" msgstr "ネットワークの ID" msgid "ID of network to probe" msgstr "プローブするネットワークの ID" msgid "ID of probe port to delete" msgstr "削除するプローブポートの ID" msgid "ID of probe port to execute command" msgstr "コマンドを実行するプローブ ポートの ID" msgid "ID of the router" msgstr "ルーターの ID" #, python-format msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s" msgstr "" "IP アドレス %(ip)s が既にサブネット %(subnet_id)s に割り当てられています" #, python-format msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s" msgstr "IP アドレス %(ip)s がサブネット %(subnet_id)s に属していません" #, python-format msgid "" "IP address %(ip_address)s is not a valid IP for any of the subnets on the " "specified network." msgstr "" "IP アドレス %(ip_address)s は、指定されたネットワーク上のどのサブネットに対し" "ても有効な IP ではありません。" msgid "IP address used by Nova metadata server." msgstr "Nova メタデータサーバーによって使用される IP アドレス。" msgid "IP allocation failed. Try again later." msgstr "IP の割り当てが失敗しました。後で再び割り当ててください。" msgid "IP allocation requires subnet_id or ip_address" msgstr "IP を割り当てるには subnet_id または ip_address が必要です" #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "IPTablesManager.apply が、次の一連の iptables 規則の適用に失敗しました: \n" "%s" msgid "IPtables conntrack zones exhausted, iptables rules cannot be applied." msgstr "" "iptables のすべての conntrack ゾーンが使用されたため、iptables ルールを適用で" "きません。" msgid "IPv6 Address Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "プレフィックスデリゲーションを行うには、IPv6 アドレスモードは SLAAC または " "Stateless である必要があります。" msgid "IPv6 RA Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "プレフィックスデリゲーションを行うには、IPv6 RA モードは SLAAC または " "Stateless である必要があります。" #, python-format msgid "" "IPv6 address %(address)s can not be directly assigned to a port on subnet " "%(id)s since the subnet is configured for automatic addresses" msgstr "" "サブネットは自動アドレス用に構成されているため、IPv6 アドレス %(address)s を" "サブネット %(id)s 上のポートに直接割り当てることはできません" #, python-format msgid "" "IPv6 address %(ip)s cannot be directly assigned to a port on subnet " "%(subnet_id)s as the subnet is configured for automatic addresses" msgstr "" "サブネット %(subnet_id)s は自動アドレス用に設定されているため、IPv6 アドレス " "%(ip)s をこのサブネット上のポートに直接割り当てることはできません" #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot be " "added to Neutron Router." msgstr "" "外部ルーターから RA を受け取るように構成された IPv6 サブネット %s をNeutron " "ルーターに追加することはできません。" msgid "" "If True, advertise network MTU values if core plugin calculates them. MTU is " "advertised to running instances via DHCP and RA MTU options." msgstr "" "True が設定されている場合、コアプラグインが MTU 値を計算すると、ネットワーク" "の MTU 値を提供します。DHCP と RA の MTU オプション経由で、MTU は稼働中のイン" "スタンスに提供されます。" msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "" "True の場合、IPAM ドライバーをサポートするプラグインに VLAN トランスペアレン" "トネットワークの作成を許可します。" msgid "" "If non-empty, the l3 agent can only configure a router that has the matching " "router ID." msgstr "" "この値が空でない場合、L3 エージェントは合致するルーター ID を持つルーターのみ" "を設定することができます。" msgid "Illegal IP version number" msgstr "IP バージョン番号が正しくありません" #, python-format msgid "" "Illegal prefix bounds: %(prefix_type)s=%(prefixlen)s, %(base_prefix_type)s=" "%(base_prefixlen)s." msgstr "" "正しくないプレフィックス境界: %(prefix_type)s=" "%(prefixlen)s、%(base_prefix_type)s=%(base_prefixlen)s。" #, python-format msgid "" "Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot " "associate with address scope %(address_scope_id)s because subnetpool " "ip_version is not %(ip_version)s." msgstr "" "サブネットプールの正しくない関連付け: サブネットプールの ip_version が " "%(ip_version)s でないため、サブネットプール %(subnetpool_id)s をアドレスス" "コープ %(address_scope_id)s と関連付けることはできません。" #, python-format msgid "" "Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot be " "associated with address scope %(address_scope_id)s." msgstr "" "サブネットプールの正しくない関連付け: サブネットプール %(subnetpool_id)s を" "アドレススコープ %(address_scope_id)s と関連付けることはできません。" #, python-format msgid "Illegal subnetpool update : %(reason)s." msgstr "正しくないサブネットプールの更新: %(reason)s。" #, python-format msgid "Illegal update to prefixes: %(msg)s." msgstr "プレフィックスに対する正しくない更新: %(msg)s。" msgid "" "In some cases the Neutron router is not present to provide the metadata IP " "but the DHCP server can be used to provide this info. Setting this value " "will force the DHCP server to append specific host routes to the DHCP " "request. If this option is set, then the metadata service will be activated " "for all the networks." msgstr "" "Neutron ルーターが存在せず、メタデータ IP を提供できない場合に、DHCP サーバー" "を使用してこの情報を提供することができます。この値を設定すると、DHCP サーバー" "は DHCP 要求に対して特定のホストの経路を追加します。このオプションを設定する" "と、すべてのネットワークに対してメタデータサービスが有効化されます。" #, python-format msgid "Incorrect pci_vendor_info: \"%s\", should be pair vendor_id:product_id" msgstr "" "不正確な pci_vendor_info: \"%s\" は vendor_id:product_id のペアである必要があ" "ります" msgid "" "Indicates that this L3 agent should also handle routers that do not have an " "external network gateway configured. This option should be True only for a " "single agent in a Neutron deployment, and may be False for all agents if all " "routers must have an external network gateway." msgstr "" "この L3 エージェントが 外部ネットワーク用のゲートウェイを設定していなルーター" "にも対応する必要があることを示しています。すべてのルーターが外部ネットワーク" "用のゲートウェイを持つ必要がある場合、Neutron の実装環境の 1 つのエージェント" "に対してのみ、このオプションを True に設定する必要があり、すべてのエージェン" "トに対しては通常、このオプションを False を設定します。" #, python-format msgid "Instance of class %(module)s.%(class)s must contain _cache attribute" msgstr "" "クラス %(module)s のインスタンス。%(class)s は _cache 属性を含む必要がありま" "す" #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" msgstr "サブネットサイズ /%s を割り振るためのプレフィックス空間が不十分です" msgid "Insufficient rights for removing default security group." msgstr "デフォルトのセキュリティーグループを削除するための権限が不十分です。" msgid "" "Integration bridge to use. Do not change this parameter unless you have a " "good reason to. This is the name of the OVS integration bridge. There is one " "per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM " "VIFs are attached to this bridge and then 'patched' according to their " "network connectivity." msgstr "" "使用する統合ブリッジ。適切な理由がない限り、このパラメーターを変更しないでく" "ださい。これは OVS の統合ブリッジの名前となります。ハイパーバイザーごとに 1 " "つのブリッジが存在します。この統合ブリッジは仮想の「パッチベイ」として機能し" "ます。すべての VM の VIF はこのブリッジに接続し、ネットワーク接続に基づいて" "パッチが適用されます。" msgid "Interface to monitor" msgstr "モニター対象のインターフェース" msgid "" "Interval between checks of child process liveness (seconds), use 0 to disable" msgstr "子プロセスの動作状況を確認する間隔 (秒)、無効にするには 0 を指定します" msgid "Interval between two metering measures" msgstr "2 つの計測間の間隔" msgid "Interval between two metering reports" msgstr "2 つの計測レポート間の間隔" #, python-format msgid "Invalid CIDR %(input)s given as IP prefix." msgstr "IP のプレフィックスとして指定された CIDR %(input)s が無効です。" #, python-format msgid "" "Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address " "format, which requires the prefix to be /64." msgstr "" "IPv6 アドレスモードの CIDR %s が無効です。OpenStack ではプレフィックス /64 を" "必要とする EUI-64 アドレス形式が使用されます。" #, python-format msgid "Invalid Device %(dev_name)s: %(reason)s" msgstr "無効なデバイス %(dev_name)s: %(reason)s" #, python-format msgid "" "Invalid action '%(action)s' for object type '%(object_type)s'. Valid " "actions: %(valid_actions)s" msgstr "" "オブジェクトタイプ '%(object_type)s' の無効なアクション '%(action)s' 。有効な" "アクション: %(valid_actions)s" #, python-format msgid "" "Invalid authentication type: %(auth_type)s, valid types are: " "%(valid_auth_types)s" msgstr "" "認証タイプ %(auth_type)s は無効です。有効なタイプは %(valid_auth_types)s です" #, python-format msgid "Invalid content type %(content_type)s." msgstr "無効なコンテンツタイプ %(content_type)s。" #, python-format msgid "Invalid data format for IP pool: '%s'" msgstr "IP プールに無効なデータ形式: '%s'" #, python-format msgid "Invalid data format for extra-dhcp-opt: %(data)s" msgstr "extra-dhcp-opt のデータ形式が無効です: %(data)s" #, python-format msgid "Invalid data format for fixed IP: '%s'" msgstr "Fixed IP に無効なデータ形式: '%s'" #, python-format msgid "Invalid data format for hostroute: '%s'" msgstr "ホスト経路の無効なデータ形式: '%s'" #, python-format msgid "Invalid data format for nameserver: '%s'" msgstr "ネームサーバーに無効なデータ形式: '%s'" #, python-format msgid "Invalid ethertype %(ethertype)s for protocol %(protocol)s." msgstr "プロトコル %(protocol)s に関する無効なイーサタイプ %(ethertype)s。" #, python-format msgid "Invalid extension environment: %(reason)s." msgstr "無効な拡張環境: %(reason)s。" #, python-format msgid "Invalid format for routes: %(routes)s, %(reason)s" msgstr "ルートの形式が無効です: %(routes)s、%(reason)s" #, python-format msgid "Invalid format: %s" msgstr "無効な形式: %s" #, python-format msgid "Invalid input for %(attr)s. Reason: %(reason)s." msgstr "%(attr)s に無効な入力です。理由: %(reason)s。" #, python-format msgid "" "Invalid input. '%(target_dict)s' must be a dictionary with keys: " "%(expected_keys)s" msgstr "" "無効な入力です。'%(target_dict)s' は、キー %(expected_keys)s を持つディクショ" "ナリーでなければなりません。" #, python-format msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s" msgstr "" "インスタンス状態 %(state)s は無効です。有効な状態は %(valid_states)s です" #, python-format msgid "Invalid mapping: '%s'" msgstr "無効なマッピング: '%s'" #, python-format msgid "Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'." msgstr "無効なネットワーク VLAN の範囲: '%(vlan_range)s' - '%(error)s'" #, python-format msgid "Invalid network VXLAN port range: '%(vxlan_range)s'." msgstr "無効なネットワーク VXLAN ポートの範囲: '%(vxlan_range)s'" #, python-format msgid "Invalid pci slot %(pci_slot)s" msgstr "無効な PCI スロット %(pci_slot)s" #, python-format msgid "Invalid provider format. Last part should be 'default' or empty: %s" msgstr "" "プロバイダーの指定形式が無効です。最後の部分は 'default' または空にしてくださ" "い: %s" #, python-format msgid "Invalid resource type %(resource_type)s" msgstr "無効なリソースタイプ %(resource_type)s" #, python-format msgid "Invalid route: %s" msgstr "無効な経路: %s" msgid "Invalid service provider format" msgstr "サービスプロバイダーの指定形式が無効です" #, python-format msgid "Invalid service type %(service_type)s." msgstr "無効なサービスタイプ %(service_type)s。" #, python-format msgid "" "Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255." msgstr "" "ICMP %(field)s (%(attr)s) %(value)s に無効な値です。これは 0 から 255 までで" "なければなりません。" #, python-format msgid "Invalid value for port %(port)s" msgstr "ポート %(port)s の無効値" msgid "" "Iptables mangle mark used to mark ingress from external network. This mark " "will be masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Iptables は外部ネットワークからの進入を示すために使用されるマークを分割しま" "す。このマークを 0xffff でマスキングすることで、後半の 16 ビットのみを使用し" "ます。" msgid "" "Iptables mangle mark used to mark metadata valid requests. This mark will be " "masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Iptables はメタデータの適切な要求を示すために使用されるマークを分割します。こ" "のマークを 0xffff でマスキングすることで、後半の 16 ビットのみを使用します。" msgid "" "Keep in track in the database of current resourcequota usage. Plugins which " "do not leverage the neutron database should set this flag to False" msgstr "" "現在のリソースのクォータの使用状況に関するデータベースを管理します。neutron " "データベースを使用しないプラグインはこのフラグを False に設定します。" msgid "Keepalived didn't respawn" msgstr "keepalived が再作成されませんでした" msgid "Keepalived didn't spawn" msgstr "Keepalived が生成されませんでした" #, python-format msgid "" "Kernel HZ value %(value)s is not valid. This value must be greater than 0." msgstr "" "カーネル HZ の値 %(value)s が無効です。0 より大きい値にしなければなりません。" #, python-format msgid "Key %(key)s in mapping: '%(mapping)s' not unique" msgstr "マッピング '%(mapping)s' 内のキー %(key)s が固有ではありません" msgid "L3 agent failure to setup NAT for floating IPs" msgstr "L3 エージェントによる Floating IP への NAT のセットアップの失敗" msgid "L3 agent failure to setup floating IPs" msgstr "L3 エージェントによる Floating IP のセットアップの失敗" #, python-format msgid "Limit must be an integer 0 or greater and not '%d'" msgstr "limit は整数 0 以上でなければならず、'%d' にはしないようにしてください" msgid "Limit number of leases to prevent a denial-of-service." msgstr "Denial-of-Service を防ぐためにリースの数を制限してください。" msgid "List of :" msgstr " のリスト: " msgid "" "List of :: or " "specifying physical_network names usable for VLAN provider and tenant " "networks, as well as ranges of VLAN tags on each available for allocation to " "tenant networks." msgstr "" ":: または のリスト。" "このリストには、VLAN プロバイダー/テナントネットワークに使用できる " "physical_network 名が指定されるだけでなく、テナントネットワークに割り振ること" "ができる各物理ネットワークの VLAN タグの範囲も指定されます。" msgid "" "List of network type driver entrypoints to be loaded from the neutron.ml2." "type_drivers namespace." msgstr "" "neutron.ml2.type_drivers 名前空間からロードするネットワークタイプドライバーの" "エントリーポイントのリスト。" msgid "" "List of physical_network names with which flat networks can be created. Use " "default '*' to allow flat networks with arbitrary physical_network names. " "Use an empty list to disable flat networks." msgstr "" "フラットネットワークの作成が可能な physical_network 名のリスト。デフォルト値" "の '*' を使用すると、任意の physical_network 名を持つフラットネットワークを作" "成できます。空のリストを使用すると、フラットネットワークが無効化されます。" msgid "Local IP address of the VXLAN endpoints." msgstr "VXLAN エンドポイントのローカル IP アドレス。" msgid "Location for Metadata Proxy UNIX domain socket." msgstr "メタデータプロキシー UNIX ドメインソケットのロケーション。" msgid "Location of Metadata Proxy UNIX domain socket" msgstr "メタデータプロキシーの UNIX ドメインソケットの場所" msgid "Location of pid file of this process." msgstr "このプロセスの pid ファイルのロケーション。" msgid "Location to store DHCP server config files." msgstr "DHCP サーバーの設定ファイルを保存するロケーション。" msgid "Location to store IPv6 PD files." msgstr "IPv6 PD ファイルを保存するロケーション。" msgid "Location to store IPv6 RA config files" msgstr "IPv6 RA 設定ファイルを保存する場所" msgid "Location to store child pid files" msgstr "子プロセスの PID ファイルを保持する場所" msgid "Location to store keepalived/conntrackd config files" msgstr "keepalived/conntrackd 構成ファイルを保存する場所" msgid "Log agent heartbeats" msgstr "エージェントのハートビートを記録します" msgid "Loopback IP subnet is not supported if enable_dhcp is True." msgstr "" "enable_dhcp が True の場合は、ループバック IP サブネットはサポートされませ" "ん。" msgid "MTU size of veth interfaces" msgstr "veth インターフェースの MTU サイズ" msgid "Make the l2 agent run in DVR mode." msgstr "L2 エージェントを DVR モードで実行します。" msgid "Malformed request body" msgstr "誤った形式のリクエスト本文" #, python-format msgid "Malformed request body: %(reason)s." msgstr "誤った形式の要求本文: %(reason)s。" msgid "MaxRtrAdvInterval setting for radvd.conf" msgstr "radvd.conf の MaxRtrAdvInterval 設定" msgid "Maximum number of DNS nameservers per subnet" msgstr "サブネットごとの DNS ネームサーバーの最大数" msgid "" "Maximum number of L3 agents which a HA router will be scheduled on. If it is " "set to 0 then the router will be scheduled on every agent." msgstr "" "HA ルーターがスケジュール設定される L3 エージェントの最大数。この値を 0 に設" "定すると、ルーターはすべてのエージェントに対してスケジュール設定されます。" msgid "Maximum number of allowed address pairs" msgstr "許可されたアドレスペアの最大数" msgid "" "Maximum number of fixed ips per port. This option is deprecated and will be " "removed in the N release." msgstr "" "ポートごとの 固定 IP の最大数。このオプションは提供を終了しており、N リリース" "では削除されます。" msgid "Maximum number of host routes per subnet" msgstr "サブネットごとのホスト経路の最大数" msgid "Maximum number of routes per router" msgstr "ルーターごとに設定可能な経路の最大数" msgid "" "Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce " "mode from metadata_proxy_user/group values, 'user': set metadata proxy " "socket mode to 0o644, to use when metadata_proxy_user is agent effective " "user or root, 'group': set metadata proxy socket mode to 0o664, to use when " "metadata_proxy_group is agent effective group or root, 'all': set metadata " "proxy socket mode to 0o666, to use otherwise." msgstr "" "メタデータプロキシーの UNIX ドメインのソケットモードでは 4 つの値を使用できま" "す。'deduce' は metadata_proxy_user/group の値からモードを推測します。'user' " "はメタデータプロキシーのソケットモードを 0o644 に設定します " "(metadata_proxy_user がエージェントの有効なユーザーまたはルートである場合に使" "用)。'group' はメタデータプロキシーのソケットモードを 0o664 に設定します " "(metadata_proxy_group がエージェントの有効なグループまたはルートである場合に" "使用)。'all' はメタデータプロキシーのソケットモードを 0o666 に設定します (そ" "の他の場合に使用)。" msgid "Metering driver" msgstr "計測ドライバー" #, python-format msgid "Metering label %(label_id)s does not exist" msgstr "計測ラベル %(label_id)s は存在しません" #, python-format msgid "Metering label rule %(rule_id)s does not exist" msgstr "計測ラベル規則 %(rule_id)s は存在しません" #, python-format msgid "" "Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps " "another" msgstr "" "remote_ip_prefix %(remote_ip_prefix)s を持つ計測ラベル規則が他の計測ラベル規" "則と重なり合っています" msgid "Method cannot be called within a transaction." msgstr "トランザクション内にメソッドを呼び出すことができません" msgid "Migration from distributed router to centralized is not supported" msgstr "分散ルーターから集中ルーターへの移行はサポートされません" msgid "MinRtrAdvInterval setting for radvd.conf" msgstr " radvd.conf の MinRtrAdvInterval 設定" msgid "Minimize polling by monitoring ovsdb for interface changes." msgstr "" "インターフェース変更の検出に関して ovsdb をモニターすることでポーリングが最小" "化されます。" #, python-format msgid "Missing key in mapping: '%s'" msgstr "マッピングにキーがありません: '%s'" #, python-format msgid "Missing value in mapping: '%s'" msgstr "マッピングに値がありません: '%s'" msgid "Multicast IP subnet is not supported if enable_dhcp is True." msgstr "" "enable_dhcp が True の場合は、マルチキャスト IP サブネットはサポートされませ" "ん。" msgid "" "Multicast group for VXLAN. When configured, will enable sending all " "broadcast traffic to this multicast group. When left unconfigured, will " "disable multicast VXLAN mode." msgstr "" "VXLAN のマルチキャストグループ。設定されると、このマルチキャストグループにす" "べてのブロードキャストトラフィックを送信できます。設定されないままにすると、" "マルチキャスト VXLAN モードを無効化します。" msgid "" "Multicast group(s) for vxlan interface. A range of group addresses may be " "specified by using CIDR notation. Specifying a range allows different VNIs " "to use different group addresses, reducing or eliminating spurious broadcast " "traffic to the tunnel endpoints. To reserve a unique group for each possible " "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on " "all the agents." msgstr "" "vxlan インターフェースのマルチキャストグループ。CIDR 表記を使用することで、一" "定の範囲のグループアドレスを指定できます。範囲を指定するとさまざまな VNI がさ" "まざまなグループアドレスを使用できるため、トンネルのエンドポイントへの不適切" "なブロードキャストトラフィックを削減または排除できます。使用する可能性のある" "各 (24 ビットの) VNI に独自のグループを予約するには、 /8 (239.0.0.0/8 など) " "を使用します。この設定はすべてのエージェントで同じである必要があります。" #, python-format msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found" msgstr "" "agent_type=%(agent_type)s および host=%(host)s のエージェントが複数見つかりま" "した" #, python-format msgid "Multiple default providers for service %s" msgstr "サービス %s のデフォルトのプロバイダーが複数あります" #, python-format msgid "Multiple plugins for service %s were configured" msgstr "サービス %s に対して複数のプラグインが構成されていました" #, python-format msgid "Multiple providers specified for service %s" msgstr "複数のプロバイダーがサービス %s に対して指定されました" msgid "Multiple tenant_ids in bulk security group rule create not allowed" msgstr "" "バルクセキュリティーグループルールの作成で複数の tenant_id は許可されません" msgid "Must also specify protocol if port range is given." msgstr "" "ポートの範囲が提供されている場合は、プロトコルも指定する必要があります。" msgid "Must specify one or more actions on flow addition or modification" msgstr "フローの追加または変更について、1 つ以上のアクションを指定してください" #, python-format msgid "Name %(dns_name)s is duplicated in the external DNS service" msgstr "名前 %(dns_name)s が外部の DNS サービス内で重複しています" #, python-format msgid "" "Name '%s' must be 1-63 characters long, each of which can only be " "alphanumeric or a hyphen." msgstr "" "名前 '%s' は 1 文字から 63 文字の長さでなければなりません。それぞれの文字には" "英数字またはハイフンを使用できます。" #, python-format msgid "Name '%s' must not start or end with a hyphen." msgstr "名前 '%s の先頭または末尾にハイフンを使用してはなりません。" msgid "Name of Open vSwitch bridge to use" msgstr "使用する Open vSwitch ブリッジの名前" msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "使用する nova リージョンの名前。Keystone で複数のリージョンを管理する場合に役" "立ちます。" msgid "Name of the FWaaS Driver" msgstr "FWaaS ドライバーの名前" msgid "Namespace of the router" msgstr "ルーターの名前空間" msgid "Native pagination depend on native sorting" msgstr "ネイティブページ編集はネイティブソートに依存します" #, python-format msgid "" "Need to apply migrations from %(project)s contract branch. This will require " "all Neutron server instances to be shutdown before proceeding with the " "upgrade." msgstr "" "%(project)s の収縮枝からの移行を適用する必要があります。アップグレードを行う" "前に、すべての Neutron サーバーのインスタンスをシャットダウンする必要がありま" "す。" msgid "Negative delta (downgrade) not supported" msgstr "負のデルタ (ダウングレード) はサポートされていません" msgid "Negative relative revision (downgrade) not supported" msgstr "負の相対的な変更 (ダウングレード) はサポートされていません" #, python-format msgid "" "Network %(network_id)s is already bound to BgpSpeaker %(bgp_speaker_id)s." msgstr "" "ネットワーク %(network_id)s は既に BGP スピーカー %(bgp_speaker_id)s とバイン" "ドされています。" #, python-format msgid "" "Network %(network_id)s is not associated with BGP speaker %(bgp_speaker_id)s." msgstr "" "ネットワーク %(network_id)s が BGP スピーカー %(bgp_speaker_id)s と関連付けら" "れていません。" #, python-format msgid "Network %(network_id)s is not bound to a BgpSpeaker." msgstr "" "ネットワーク %(network_id)s が BGP スピーカーとバインドされていません。" #, python-format msgid "Network %(network_id)s is not bound to a IPv%(ip_version)s BgpSpeaker." msgstr "" "ネットワーク %(network_id)s は IPv%(ip_version)s の BGP スピーカーにバインド" "されていません。" #, python-format msgid "Network %s does not contain any IPv4 subnet" msgstr "ネットワーク %s には IPv4 サブネットが含まれません" #, python-format msgid "Network %s is not a valid external network" msgstr "ネットワーク %s は有効な外部ネットワークではありません" #, python-format msgid "Network %s is not an external network" msgstr "ネットワーク %s は外部ネットワークではありません" #, python-format msgid "" "Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges " "%(excluded_ranges)s was not found." msgstr "" "IP 範囲 %(parent_range)s (IP 範囲 %(excluded_ranges)s を除く) からのサイズ " "%(size)s のネットワークが見つかりませんでした。" msgid "Network that will have instance metadata proxied." msgstr "インスタンスメタデータがプロキシー処理されるネットワーク。" #, python-format msgid "Network type value '%s' not supported" msgstr "ネットワークタイプ値 '%s' はサポートされていません" msgid "Network type value needed by the ML2 plugin" msgstr "ネットワークタイプ値が ML2 プラグインに必要です" msgid "Network types supported by the agent (gre and/or vxlan)." msgstr "" "エージェントによってサポートされるネットワークタイプ (gre と vxlan のいずれ" "か、または両方)。" msgid "" "Neutron IPAM (IP address management) driver to use. If ipam_driver is not " "set (default behavior), no IPAM driver is used. In order to use the " "reference implementation of Neutron IPAM driver, use 'internal'." msgstr "" "使用する Netron の IPAM (IP アドレス管理) ドライバー。ipam_driver が設定され" "ない場合 (デフォルトの挙動)、IPAM ドライバーは使用されません。Neutron の " "IPAM ドライバーのリファレンス実装を使用するには、 'internal' を使用してくださ" "い。" msgid "Neutron Service Type Management" msgstr "Neutron サービスタイプ管理" msgid "Neutron core_plugin not configured!" msgstr "Neutron の core_plugin が設定されていません。" msgid "Neutron plugin provider module" msgstr "Neutron プラグインプロバイダーモジュール" msgid "Neutron quota driver class" msgstr "Neutron 割り当て量ドライバークラス" msgid "New value for first_ip or last_ip has to be specified." msgstr "first_ip または last_ip に対して新規の値を指定する必要があります。" msgid "No default router:external network" msgstr "デフォルトの router:external ネットワークがありません" #, python-format msgid "No default subnetpool found for IPv%s" msgstr "IPv%s に関するデフォルトのサブネットプールが見つかりません" msgid "No default subnetpools defined" msgstr "定義されたデフォルトのサブネットプールがありません" #, python-format msgid "No eligible l3 agent associated with external network %s found" msgstr "" "外部ネットワーク %s に関連付けられる適格な L3 エージェントが見つかりません" #, python-format msgid "No more IP addresses available for subnet %(subnet_id)s." msgstr "" "サブネット %(subnet_id)s ではこれ以上使用可能な IP アドレスがありません。" #, python-format msgid "" "No more Virtual Router Identifier (VRID) available when creating router " "%(router_id)s. The limit of number of HA Routers per tenant is 254." msgstr "" "ルーター %(router_id)s の作成の際に使用可能な仮想ルーター ID (VRID) がもう存" "在しません。テナントごとの HA ルーター数の上限は 254 です。" msgid "No offline migrations pending." msgstr "オフラインで実行中の移行はありません。" #, python-format msgid "No providers specified for '%s' service, exiting" msgstr "'%s' サービスに対して指定されたプロバイダーはありません。終了します" #, python-format msgid "No shared key in %s fields" msgstr "%s フィールドに共有鍵が存在しません" msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "" "'dvr' モードのエージェントへの手動でのルーター割り当ては許可されません。" msgid "Not allowed to manually remove a router from an agent in 'dvr' mode." msgstr "'dvr' モードのエージェントからの手動でのルーター削除は許可されません。" #, python-format msgid "" "Not enough l3 agents available to ensure HA. Minimum required " "%(min_agents)s, available %(num_agents)s." msgstr "" "HA を確実にするためには、l3 エージェントが十分ではありません。必要な最小数 " "%(min_agents)s、使用可能 %(num_agents)s。" msgid "" "Number of DHCP agents scheduled to host a tenant network. If this number is " "greater than 1, the scheduler automatically assigns multiple DHCP agents for " "a given tenant network, providing high availability for DHCP service." msgstr "" "テナントネットワークをホストするようにスケジュール設定された DHCP エージェン" "トの数。この数が 1 より大きい場合、スケジューラーが自動的に特定のテナントネッ" "トワークに複数の DHCP エージェントを割り当てるため、DHCP サービスの高可用性が" "実現します。" msgid "Number of RPC worker processes dedicated to state reports queue" msgstr "レポートのキューを報告するためにのみ機能する RPC ワーカープロセスの数" msgid "Number of RPC worker processes for service" msgstr "RPC サービスのワーカープロセス数" msgid "Number of backlog requests to configure the metadata server socket with" msgstr "メタデータサーバーソケットの構成に使用されるバックログ要求の数" msgid "Number of backlog requests to configure the socket with" msgstr "ソケットに設定するリクエストのバックログ数" msgid "" "Number of bits in an ipv4 PTR zone that will be considered network prefix. " "It has to align to byte boundary. Minimum value is 8. Maximum value is 24. " "As a consequence, range of values is 8, 16 and 24" msgstr "" "ネットワークのプレフィックスとみなされる Ipv4 PTR ゾーン内のビット数。バイト" "境界と同じ桁数である必要があります。最小値は 8 で、最大値は 24 です。そのた" "め、使用できる値は 8、16、および 24 です。" msgid "" "Number of bits in an ipv6 PTR zone that will be considered network prefix. " "It has to align to nyble boundary. Minimum value is 4. Maximum value is 124. " "As a consequence, range of values is 4, 8, 12, 16,..., 124" msgstr "" "ネットワークのプレフィックスとみなされる Ipv6 PTR ゾーン内のビット数。nyble " "境界と同じ桁数である必要があります。最小値は 4 で、最大値は 124 です。そのた" "め、使用できる値は 4、 8、12、16、... 124 です。" msgid "" "Number of floating IPs allowed per tenant. A negative value means unlimited." msgstr "" "テナント当たりに許可される Floating IP 数。負の値は無制限を意味します。 " msgid "" "Number of networks allowed per tenant. A negative value means unlimited." msgstr "テナント当たりに許可されるネットワーク数。負の値は無制限を意味します。" msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "テナント当たりに許可されるポート数。負の値は無制限を意味します。" msgid "Number of routers allowed per tenant. A negative value means unlimited." msgstr "テナント当たりに許可されるルーター数。負の値は無制限を意味します。" msgid "" "Number of seconds between sending events to nova if there are any events to " "send." msgstr "送信するイベントがある場合の nova へのイベント送信間の秒数。" msgid "Number of seconds to keep retrying to listen" msgstr "リッスンを試行し続ける秒数" msgid "" "Number of security groups allowed per tenant. A negative value means " "unlimited." msgstr "" "テナント当たりに許可されるセキュリティーグループ数。負の値は無制限を意味しま" "す。 " msgid "" "Number of security rules allowed per tenant. A negative value means " "unlimited." msgstr "" "テナント当たりに許可されるセキュリティールール数。負の値は無制限を意味しま" "す。 " msgid "" "Number of separate API worker processes for service. If not specified, the " "default is equal to the number of CPUs available for best performance." msgstr "" "API サービスに使用する個別ワーカープロセスの数。指定されなかった場合、デフォ" "ルト値は性能を最大限得るために使用可能な CPU の数と一致します。" msgid "" "Number of separate worker processes for metadata server (defaults to half of " "the number of CPUs)" msgstr "" "メタデータサーバーの個別のワーカープロセスの数 (デフォルト値は CPU 数の半数)" msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "テナント当たりに許可されるサブネット数。負の値は無制限を意味します。" msgid "" "Number of threads to use during sync process. Should not exceed connection " "pool size configured on server." msgstr "" "同期プロセス中に使用するスレッドの数。サーバーで設定した接続プールサイズの値" "を超えてはなりません。" msgid "OK" msgstr "OK" msgid "" "OVS datapath to use. 'system' is the default value and corresponds to the " "kernel datapath. To enable the userspace datapath set this value to 'netdev'." msgstr "" "使用する OVS データパス。デフォルト値は 'system' であり、これはカーネルのデー" "タパスに該当します。ユーザースペースのデータパスを有効化するには、この値を " "'netdev' に設定します。" msgid "OVS vhost-user socket directory." msgstr "OVS の vhost-user ソケットディレクトリー。" #, python-format msgid "OVSDB Error: %s" msgstr "OVSDB エラー: %s" #, python-format msgid "Object action %(action)s failed because: %(reason)s." msgstr "" "オブジェクトのアクション %(action)s は次の原因で失敗しました: %(reason)s。" msgid "Only admin can view or configure quota" msgstr "admin のみが割り当て量を表示または構成できます" msgid "Only admin is authorized to access quotas for another tenant" msgstr "別のテナントの割り当て量へのアクセスが許可されているのは管理者のみです" msgid "Only admins can manipulate policies on networks they do not own." msgstr "" "自分がオーナーではないネットワークのポリシーを操作できるのは管理者に限られま" "す。" msgid "Only admins can manipulate policies on objects they do not own" msgstr "" "自分がオーナーではないオブジェクトのポリシーを操作できるのは管理者に限られま" "す" msgid "Only allowed to update rules for one security profile at a time" msgstr "一度に 1 つのセキュリティープロファイルのルールのみを更新できます" msgid "Only remote_ip_prefix or remote_group_id may be provided." msgstr "remote_ip_prefix または remote_group_id のみを指定できます。" msgid "OpenFlow interface to use." msgstr "使用する OpenFlow インターフェース。" #, python-format msgid "" "Operation %(op)s is not supported for device_owner %(device_owner)s on port " "%(port_id)s." msgstr "" "操作 %(op)s はポート %(port_id)s の device_owner %(device_owner)s ではサポー" "トされていません。" #, python-format msgid "Operation not supported on device %(dev_name)s" msgstr "デバイス %(dev_name)s でサポートされない処理" msgid "" "Ordered list of network_types to allocate as tenant networks. The default " "value 'local' is useful for single-box testing but provides no connectivity " "between hosts." msgstr "" "テナントネットワークとして割り当てる network_types を一定の順序に並べたリス" "ト。デフォルト値の 'local' はシングルボックステストに役立つものの、ホスト間の" "接続は提供しません。" msgid "Override the default dnsmasq settings with this file." msgstr "" "このファイルを使用して、デフォルトの dnsmasq 設定をオーバーライドします。" msgid "Owner type of the device: network/compute" msgstr "デバイスの所有者タイプ: network/compute" msgid "POST requests are not supported on this resource." msgstr "POST 要求は、このリソースではサポートされていません。" #, python-format msgid "Package %s not installed" msgstr "パッケージ %s はインストールされていません" #, python-format msgid "Parameter %(param)s must be of %(param_type)s type." msgstr "パラメーター %(param)s は %(param_type)s タイプである必要があります。" #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "bridge_mappings の解析に失敗しました: %s。" msgid "Parsing supported pci_vendor_devs failed" msgstr "サポートされている pci_vendor_devs の解析に失敗しました" msgid "Password for connecting to designate in admin context" msgstr "管理者のコンテキストにおける designate への接続用パスワード" #, python-format msgid "Password not specified for authentication type=%(auth_type)s." msgstr "パスワードは認証の type=%(auth_type)s で指定されていません。" msgid "Path to PID file for this process" msgstr "このプロセスの PID ファイルのパス" msgid "Path to the router directory" msgstr "ルーターディレクトリーのパス" msgid "Peer patch port in integration bridge for tunnel bridge." msgstr "トンネルブリッジの統合ブリッジ内のピアパッチポート。" msgid "Peer patch port in tunnel bridge for integration bridge." msgstr "統合ブリッジのトンネルブリッジ内のピアパッチポート。" msgid "Per-tenant subnet pool prefix quota exceeded." msgstr "" "テナントごとのサブネットプールのプレフィックスのクォータを超過しました。" msgid "Phase upgrade options do not accept revision specification" msgstr "" "フェーズのアップグレードオプションでは、変更の指定を行うことはできません" msgid "Ping timeout" msgstr "ping タイムアウト" #, python-format msgid "Plugin '%s' not found." msgstr "プラグイン '%s' が見つかりません。" msgid "Plugin does not support updating provider attributes" msgstr "プラグインでは、プロバイダー属性の更新はサポートされていません" msgid "Policy configuration policy.json could not be found." msgstr "ポリシー設定 policy.json が見つかりませんでした。" #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "ポート %(id)s に Fixed IP %(address)s がありません" #, python-format msgid "Port %(port)s does not exist on %(bridge)s!" msgstr "ポート %(port)s は %(bridge)s に存在しません" #, python-format msgid "Port %(port_id)s is already acquired by another DHCP agent" msgstr "ポート %(port_id)s は既に別の DHCP エージェントに取得されています。" #, python-format msgid "" "Port %(port_id)s is associated with a different tenant than Floating IP " "%(floatingip_id)s and therefore cannot be bound." msgstr "" "ポート %(port_id)s は、Floating IP %(floatingip_id)s とは異なるテナントに関連" "付けられているため、バインドできません。" #, python-format msgid "Port %(port_id)s is not managed by this agent. " msgstr "このエージェントはポート %(port_id)s を管理していません。" #, python-format msgid "Port %s does not exist" msgstr "ポート %s は存在しません" #, python-format msgid "" "Port %s has multiple fixed IPv4 addresses. Must provide a specific IPv4 " "address when assigning a floating IP" msgstr "" "ポート %s には複数の固定 IPv4 アドレスがあります。Floating IP を割り当てる際" "には、特定の IPv4 アドレスを提供する必要があります" msgid "" "Port Security must be enabled in order to have allowed address pairs on a " "port." msgstr "" "ポートセキュリティーは、ポート上で許可されたアドレスペアを持つために有効にす" "る必要があります。" msgid "" "Port has security group associated. Cannot disable port security or ip " "address until security group is removed" msgstr "" "ポートにセキュリティーグループが関連付けられています。セキュリティーグループ" "を削除するまで、ポートセキュリティーおよび IP アドレスを使用不可にすることは" "できません" msgid "" "Port security must be enabled and port must have an IP address in order to " "use security groups." msgstr "" "セキュリティーグループを使用するには、ポートセキュリティーを使用可能にする必" "要があり、ポートには IP アドレスが必要です。" msgid "" "Port to listen on for OpenFlow connections. Used only for 'native' driver." msgstr "" "OpenFlow 接続をリッスンするポート。「ネイティブ」のドライバーでのみ使用できま" "す。" #, python-format msgid "Prefix '%(prefix)s' not supported in IPv%(version)s pool." msgstr "" "プレフィックス '%(prefix)s' は IPv%(version)s プールではサポートされません。" msgid "Prefix Delegation can only be used with IPv6 subnets." msgstr "" "プレフィックスデリゲーションが使用できるのは IPv6 のサブネットに限られます。" msgid "Private key of client certificate." msgstr "クライアント証明書の秘密鍵。" #, python-format msgid "Probe %s deleted" msgstr "プローブ %s が削除されました" #, python-format msgid "Probe created : %s " msgstr "作成されたプローブ: %s " msgid "Process is already started" msgstr "プロセスが既に実行されています" msgid "Process is not running." msgstr "プロセスが実行されていません" msgid "Protocol to access nova metadata, http or https" msgstr "Nova メタデータ、http、または https にアクセスするためのプロトコル" #, python-format msgid "Provider name %(name)s is limited by %(len)s characters" msgstr "プロバイダー名 %(name)s の制限は %(len)s 文字までです。" #, python-format msgid "QoS Policy %(policy_id)s is used by %(object_type)s %(object_id)s." msgstr "" "QoS ポリシー %(policy_id)s は %(object_type)s %(object_id)s によって使用され" "ています。" #, python-format msgid "" "QoS binding for network %(net_id)s and policy %(policy_id)s could not be " "found." msgstr "" "ネットワーク %(net_id)s とポリシー %(policy_id)s にバインドする QoS が見つか" "りませんでした。" #, python-format msgid "" "QoS binding for port %(port_id)s and policy %(policy_id)s could not be found." msgstr "" "ポート %(port_id)s とポリシー %(policy_id)s にバインドする QoS が見つかりませ" "んでした。" #, python-format msgid "QoS policy %(policy_id)s could not be found." msgstr "QoS ポリシー %(policy_id)s が見つかりませんでした。" #, python-format msgid "QoS rule %(rule_id)s for policy %(policy_id)s could not be found." msgstr "" "ポリシー %(policy_id)s の QoS ルール %(rule_id)s が見つかりませんでした。" #, python-format msgid "RBAC policy of type %(object_type)s with ID %(id)s not found" msgstr "" "ID %(id)s を持つタイプ %(object_type)s の RBAC ポリシーが見つかりません" #, python-format msgid "" "RBAC policy on object %(object_id)s cannot be removed because other objects " "depend on it.\n" "Details: %(details)s" msgstr "" "他のオブジェクトがこの RBAC ポリシーに依存しているため、オブジェクト " "%(object_id)s に対する RBAC ポリシーを削除できません。\n" "詳細: %(details)s" msgid "" "Range of seconds to randomly delay when starting the periodic task scheduler " "to reduce stampeding. (Disable by setting to 0)" msgstr "" "集中状態を緩和するため、定期タスクスケジューラーの開始時に挿入するランダムな" "遅延時間 (秒) の範囲。(無効にするには 0 に設定)" msgid "Ranges must be in the same IP version" msgstr "範囲は同じ IP バージョンである必要があります" msgid "Ranges must be netaddr.IPRange" msgstr "範囲は netaddr.IPRange である必要があります" msgid "Ranges must not overlap" msgstr "範囲は重複することはできません" #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.EUI type." msgstr "" "タイプ '%(type)s' と値 '%(value)s' を受領しましたが、予期していたのは " "netaddr.EUI タイプです" #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.IPAddress " "type." msgstr "" "タイプ '%(type)s' と値 '%(value)s' を受領しましたが、予期していたのは " "netaddr.IPAddress タイプです。" #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.IPNetwork " "type." msgstr "" "タイプ '%(type)s' と値 '%(value)s' を受領しましたが、予期していたのは " "netaddr.IPNetwork タイプです。" #, python-format msgid "" "Release aware branch labels (%s) are deprecated. Please switch to expand@ " "and contract@ labels." msgstr "" "リリースを認識する分岐ラベル (%s) は提供を終了しています。expand@ ラベルと " "contract@ ラベルに変更してください。" msgid "Remote metadata server experienced an internal server error." msgstr "リモートメタデータサーバーで内部サーバーエラーが発生しました。" msgid "" "Repository does not contain HEAD files for contract and expand branches." msgstr "リポジトリーに縮小分岐と拡張分岐の HEAD ファイルが含まれていません。" msgid "" "Representing the resource type whose load is being reported by the agent. " "This can be \"networks\", \"subnets\" or \"ports\". When specified (Default " "is networks), the server will extract particular load sent as part of its " "agent configuration object from the agent report state, which is the number " "of resources being consumed, at every report_interval.dhcp_load_type can be " "used in combination with network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is " "WeightScheduler, dhcp_load_type can be configured to represent the choice " "for the resource being balanced. Example: dhcp_load_type=networks" msgstr "" "負荷がエージェントによって報告されているリソースタイプを表します。このタイプ" "には、\"networks\"、\"subnets\"、または \"ports\" があります。指定した場合 " "(デフォルトは networks)、サーバーは、エージェントレポート状態 " "(report_interval ごとに消費されるリソース数) からそのエージェント構成オブジェ" "クトの一部として送信された特定の負荷を抽出します。dhcp_load_type は " "network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler." "WeightScheduler と組み合わせて使用できます。network_scheduler_driver が " "WeightScheduler の場合、dhcp_load_type は平衡を取るリソースの選択肢を表すよう" "に構成することができます。例: dhcp_load_type=networks" msgid "Request Failed: internal server error while processing your request." msgstr "要求が失敗しました。要求の処理中に内部サーバーエラーが発生しました。" #, python-format msgid "" "Request contains duplicate address pair: mac_address %(mac_address)s " "ip_address %(ip_address)s." msgstr "" "重複するアドレスペアが要求に含まれています: mac_address %(mac_address)s " "ip_address %(ip_address)s" #, python-format msgid "" "Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps " "with another subnet" msgstr "" "ネットワーク %(network_id)s の CIDR %(cidr)s を持つ要求されたサブネットは、別" "のサブネットとオーバーラップしています" msgid "" "Reset flow table on start. Setting this to True will cause brief traffic " "interruption." msgstr "" "起動時にフローテーブルをリセットします。この値を True に設定すると、一時的に" "トラフィックが中断します。" #, python-format msgid "Resource %(resource)s %(resource_id)s could not be found." msgstr "リソース %(resource)s %(resource_id)s が見つかりませんでした。" #, python-format msgid "Resource %(resource_id)s of type %(resource_type)s not found" msgstr "タイプ %(resource_type)s のリソース %(resource_id)s は見つかりません" #, python-format msgid "" "Resource '%(resource_id)s' is already associated with provider " "'%(provider)s' for service type '%(service_type)s'" msgstr "" "リソース '%(resource_id)s' は既にサービスタイプ '%(service_type)s' のプロバイ" "ダー '%(provider)s' に関連付けられています" msgid "Resource body required" msgstr "リソース本文が必要です" msgid "" "Resource name(s) that are supported in quota features. This option is now " "deprecated for removal." msgstr "" "クォータ機能でサポートされるリソースの名前。現在、このオプションは提供を終了" "しています。" msgid "Resource not found." msgstr "リソースが見つかりません。" msgid "Resources required" msgstr "リソースが必要です" msgid "" "Root helper application. Use 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' to use the real root filter facility. Change to 'sudo' to skip the " "filtering and just run the command directly." msgstr "" "ルートヘルパーのアプリケーション。実際のルートフィルターの機能を使用するため" "には、'sudo neutron-rootwrap /etc/neutron/rootwrap.conf' を使用します。フィル" "タリングをスキップして、直接コマンドを実行するには、'sudo' を使用します。" msgid "Root helper daemon application to use when possible." msgstr "" "ルートヘルパーのデーモンアプリケーション。このアプリケーションは利用可能な場" "合に使用します。" msgid "Root permissions are required to drop privileges." msgstr "特権を除去するにはルート許可が必要です。" #, python-format msgid "Route %(cidr)s not advertised for BGP Speaker %(speaker_as)d." msgstr "" "BGP スピーカー %(speaker_as)d のためにアドバタイズされない経路 %(cidr)s。" #, python-format msgid "Router %(router_id)s %(reason)s" msgstr "ルーター %(router_id)s %(reason)s" #, python-format msgid "Router %(router_id)s could not be found" msgstr "ルーター %(router_id)s が見つかりませんでした" #, python-format msgid "Router %(router_id)s does not have an interface with id %(port_id)s" msgstr "" "ルーター %(router_id)s に、ID %(port_id)s のインターフェースがありません" #, python-format msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s" msgstr "" "ルーター %(router_id)s に、サブネット %(subnet_id)s 上のインターフェースがあ" "りません" #, python-format msgid "Router '%(router_id)s' cannot be both DVR and HA." msgstr "ルーター '%(router_id)s' を DVR と HA の両方にすることはできません。" #, python-format msgid "Router '%(router_id)s' is not compatible with this agent." msgstr "ルーター '%(router_id)s' はこのエージェントと互換性がありません。" #, python-format msgid "Router already has a port on subnet %s" msgstr "ルーターに、既にサブネット %s 上のポートがあります" #, python-format msgid "" "Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be " "deleted, as it is required by one or more floating IPs." msgstr "" "ルーター %(router_id)s 上のサブネット %(subnet_id)s のルーターインターフェー" "スは、1 つ以上の Floating IP で必要なため削除できません。" #, python-format msgid "" "Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be " "deleted, as it is required by one or more routes." msgstr "" "ルーター %(router_id)s 上のサブネット %(subnet_id)s のルーターインターフェー" "スは、1 つ以上のルートで必要なため削除できません。" msgid "Router port must have at least one fixed IP" msgstr "ルーターポートには 1 つ以上の Fixed IP を設定する必要があります" msgid "Router that will have connected instances' metadata proxied." msgstr "接続済みインスタンスのメタデータがプロキシー処理されるルーター。" #, python-format msgid "" "Row doesn't exist in the DB. Request info: Table=%(table)s. Columns=" "%(columns)s. Records=%(records)s." msgstr "" "行がデータベースに存在しません。要求情報: テーブル =%(table)s、列 =" "%(columns)s、レコード =%(records)s。" msgid "Run as daemon." msgstr "デーモンとして実行します。" #, python-format msgid "Running %(cmd)s (%(desc)s) for %(project)s ..." msgstr "%(project)s に対して %(cmd)s (%(desc)s) を実行中です..." #, python-format msgid "Running %(cmd)s for %(project)s ..." msgstr "%(project)s に対して %(cmd)s を実行中です..." msgid "Running without keystone AuthN requires that tenant_id is specified" msgstr "" "keystone 認証を使用せずに実行するには、tenant_id を指定する必要があります" msgid "" "Seconds between nodes reporting state to server; should be less than " "agent_down_time, best if it is half or less than agent_down_time." msgstr "" "ノード状態をサーバーに報告する間隔 (秒)。agent_down_time 未満である必要があ" "ります。agent_down_time の半分以下であれば最適です。" msgid "Seconds between running periodic tasks" msgstr "定期タスクの実行間隔 (秒)" msgid "" "Seconds to regard the agent is down; should be at least twice " "report_interval, to be sure the agent is down for good." msgstr "" "エージェントがダウンしていると見なすまでの時間 (秒)。エージェントが完全にダウ" "ンしていることを確実にするには、この値を少なくとも report_interval の 2 倍に" "してください。" #, python-format msgid "Security Group %(id)s %(reason)s." msgstr "セキュリティーグループ %(id)s %(reason)s。" #, python-format msgid "Security Group Rule %(id)s %(reason)s." msgstr "セキュリティーグループルール %(id)s %(reason)s。" #, python-format msgid "Security group %(id)s does not exist" msgstr "セキュリティーグループ %(id)s は存在しません" #, python-format msgid "Security group rule %(id)s does not exist" msgstr "セキュリティーグループルール %(id)s は存在しません" #, python-format msgid "Security group rule already exists. Rule id is %(rule_id)s." msgstr "" "セキュリティーグループルールが既に存在しています。ルール ID は %(rule_id)s で" "す。" #, python-format msgid "" "Security group rule for ethertype '%(ethertype)s' not supported. Allowed " "values are %(values)s." msgstr "" "イーサネットタイプ '%(ethertype)s' に関するセキュリティーグループルールが対応" "していません。許容される値は %(values)s です。" #, python-format msgid "" "Security group rule protocol %(protocol)s not supported. Only protocol " "values %(values)s and integer representations [0 to 255] are supported." msgstr "" "セキュリティーグループルールのプロトコル %(protocol)s が対応していません。使" "用できるのは、プロトコルの値 %(values)s と整数値 [0 から 255 まで] のみです。" msgid "Segments and provider values cannot both be set." msgstr "セグメントとプロバイダーの両方を設定することはできません。" msgid "Selects the Agent Type reported" msgstr "報告される Agent Type の選択" msgid "" "Send notification to nova when port data (fixed_ips/floatingip) changes so " "nova can update its cache." msgstr "" "nova がそのキャッシュを更新できるように、ポートデータ (fixed_ips/floatingip) " "が変更されたときに通知を nova に送信します。" msgid "Send notification to nova when port status changes" msgstr "ポート状態の変更時の nova への通知送信" msgid "" "Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the " "feature is disabled" msgstr "" "HA セットアップ用に指定数の Gratuitous ARP を送信します。値が 0 以下の場合、" "この機能は無効です" #, python-format msgid "Service Profile %(sp_id)s could not be found." msgstr "サービスプロファイル %(sp_id)s が見つかりませんでした。" #, python-format msgid "Service Profile %(sp_id)s is already associated with flavor %(fl_id)s." msgstr "" "サービスプロファイル %(sp_id)s は既にフレーバー %(fl_id)s と関連付けられてい" "ます。" #, python-format msgid "Service Profile %(sp_id)s is not associated with flavor %(fl_id)s." msgstr "" "サービスプロファイル %(sp_id)s はフレーバー %(fl_id)s と関連付けられていませ" "ん。" #, python-format msgid "Service Profile %(sp_id)s is used by some service instance." msgstr "" "あるサービスインスタンスがサービスプロファイル %(sp_id)s を使用しています。" #, python-format msgid "Service Profile driver %(driver)s could not be found." msgstr "サービスプロファイルドライバー %(driver)s が見つかりませんでした。" msgid "Service Profile is not enabled." msgstr "サービスプロファイルが有効化されていません。" msgid "Service Profile needs either a driver or metainfo." msgstr "サービスプロファイルにはドライバーかメタ情報が必要です。" #, python-format msgid "" "Service provider '%(provider)s' could not be found for service type " "%(service_type)s" msgstr "" "サービスタイプ %(service_type)s のサービスプロバイダー '%(provider)s' は見つ" "かりませんでした" msgid "Service to handle DHCPv6 Prefix delegation." msgstr "DHCPv6 のプレフィックスデリゲーションを処理するサービス。" #, python-format msgid "Service type %(service_type)s does not have a default service provider" msgstr "" "サービスタイプ %(service_type)s にはデフォルトのサービスプロバイダーがありま" "せん" msgid "" "Set new timeout in seconds for new rpc calls after agent receives SIGTERM. " "If value is set to 0, rpc timeout won't be changed" msgstr "" "エージェントによる SIGTERM 受信後の新規 rpc 呼び出しの新規タイムアウト(秒) を" "設定します。値を 0 に設定すると、rpc タイムアウトは変更されません" msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "GRE/VXLAN トンネルを構成した発信 IP パケットで、フラグメント禁止 (DF) ビット" "を設定または設定解除します。" msgid "" "Set or un-set the tunnel header checksum on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "GRE/VXLAN トンネル を使用する現在有効な IP パケットに対するトンネルヘッダーの" "チェックサムの設定または設定解除を行います。" msgid "Shared address scope can't be unshared" msgstr "共有アドレススコープは共有を解除できません" msgid "" "Specifying 'tenant_id' other than authenticated tenant in request requires " "admin privileges" msgstr "" "認証されているテナント以外の 'tenant_id' を要求で指定するには、管理者特権が必" "要です" msgid "String prefix used to match IPset names." msgstr "IPset 名とのマッチングを行うために使用する文字列のプレフィックス。" #, python-format msgid "Sub-project %s not installed." msgstr "サブプロジェクト %s はインストールされていません" msgid "Subnet for router interface must have a gateway IP" msgstr "ルーターインターフェースのサブネットにはゲートウェイ IP が必要です" msgid "" "Subnet has a prefix length that is incompatible with DHCP service enabled." msgstr "" "サブネットに、有効化された DHCP サービスと互換性のないプレフィックス長が設定" "されています。" #, python-format msgid "Subnet pool %(subnetpool_id)s could not be found." msgstr "サブネットプール %(subnetpool_id)s が見つかりませんでした。" msgid "Subnet pool has existing allocations" msgstr "サブネットプールに既存の割り当てがあります" msgid "Subnet used for the l3 HA admin network." msgstr "l3 HA 管理ネットワークに使用されるサブネット。" msgid "" "Subnets hosted on the same network must be allocated from the same subnet " "pool." msgstr "" "同じネットワーク上でホストされるサブネットは、同じサブネットプールから割り当" "てられる必要があります。" msgid "Suffix to append to all namespace names." msgstr "すべての名前空間の名前に追加するサフィックス。" msgid "" "System-wide flag to determine the type of router that tenants can create. " "Only admin can override." msgstr "" "テナントで作成可能なルーターのタイプを判別するためのシステム全体のフラグ。管" "理者のみがオーバーライドできます。" msgid "TCP Port to listen for metadata server requests." msgstr "メタデータサーバー要求をリッスンするための TCP ポート。" msgid "TCP Port used by Neutron metadata namespace proxy." msgstr "Neutron メタデータ名前空間プロキシーが使用する TCP Port" msgid "TCP Port used by Nova metadata server." msgstr "Nova メタデータサーバーによって使用される TCP ポート。" #, python-format msgid "TLD '%s' must not be all numeric" msgstr "TLD '%s' をすべて数値にすることはできません" msgid "TOS for vxlan interface protocol packets." msgstr "vxlan インターフェースプロトコルパケットの TOS。" msgid "TTL for vxlan interface protocol packets." msgstr "vxlan インターフェースプロトコルパケットの TTL。" #, python-format msgid "Table %s can only be queried by UUID" msgstr "UUID のみがテーブル %s を照会できます" #, python-format msgid "Tag %(tag)s could not be found." msgstr "タグ %(tag)s が見つかりませんでした。" #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "" "テナント %(tenant_id)s は、このネットワークでの %(resource)s の作成を許可され" "ていません" msgid "Tenant id for connecting to designate in admin context" msgstr "管理者のコンテキストにおける designate への接続用テナント ID" msgid "Tenant name for connecting to designate in admin context" msgstr "管理者のコンテキストにおける designate への接続用テナント名" msgid "Tenant network creation is not enabled." msgstr "テナントネットワークの作成は使用できません。" msgid "Tenant-id was missing from quota request." msgstr "テナント ID がクォータ要求にありませんでした。" msgid "" "The 'gateway_external_network_id' option must be configured for this agent " "as Neutron has more than one external network." msgstr "" "Neutron に複数の外部ネットワークがあるため、このエージェントに対して " "'gateway_external_network_id' オプションを設定する必要があります。" msgid "" "The DHCP agent will resync its state with Neutron to recover from any " "transient notification or RPC errors. The interval is number of seconds " "between attempts." msgstr "" "DHCP エージェントが Neutron との間で状態の同期を取ることにより、一時的な通知" "や RPC エラーからのリカバリーを行います。間隔は同期の試行終了から次の試行まで" "の秒数です。" msgid "" "The DHCP server can assist with providing metadata support on isolated " "networks. Setting this value to True will cause the DHCP server to append " "specific host routes to the DHCP request. The metadata service will only be " "activated when the subnet does not contain any router port. The guest " "instance must be configured to request host routes via DHCP (Option 121). " "This option doesn't have any effect when force_metadata is set to True." msgstr "" "DHCP サーバーは孤立したネットワークに対してメタデータサポートを提供することが" "できます。この値を True に設定すると、DHCP サーバーは DHCP 要求に対する特定の" "ホストへの経路を追加できます。このメタデータサービスが有効化されるのは、サブ" "ネットにルーターのポートが含まれない場合に限られます。ゲストインスタンスに" "は、DHCP (オプション 121) 経由でホストの経路を要求するよう設定を行う必要があ" "ります。force_metadata を True に設定する場合、このオプションは機能しません。" #, python-format msgid "" "The HA Network CIDR specified in the configuration file isn't valid; " "%(cidr)s." msgstr "設定ファイルに指定されている HA ネットワーク CIDR が無効です: %(cidr)s" msgid "The UDP port to use for VXLAN tunnels." msgstr "VXLAN トンネルで使用する UDP ポート。" #, python-format msgid "" "The address allocation request could not be satisfied because: %(reason)s" msgstr "%(reason)s のため、アドレスの割り当て要求に対応できませんでした" msgid "The advertisement interval in seconds" msgstr "通知間隔 (秒)" #, python-format msgid "The allocation pool %(pool)s is not valid." msgstr "割り当てプール %(pool)s が無効です。" #, python-format msgid "" "The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s." msgstr "" "割り当てプール %(pool)s がサブネット CIDR %(subnet_cidr)s を越えています。" #, python-format msgid "" "The attribute '%(attr)s' is reference to other resource, can't used by sort " "'%(resource)s'" msgstr "" "属性 '%(attr)s' は他のリソースへの参照であり、ソート '%(resource)s' によって" "使用できません" msgid "" "The base MAC address Neutron will use for VIFs. The first 3 octets will " "remain unchanged. If the 4th octet is not 00, it will also be used. The " "others will be randomly generated." msgstr "" "Neutron が VIF 用に使用する基本の MAC アドレス。最初の 3 つのオクテットは変更" "しません。4 つ目のオクテットが 00 の場合、これも使用できます。その他のオク" "テットはランダムに生成されます。" msgid "" "The base mac address used for unique DVR instances by Neutron. The first 3 " "octets will remain unchanged. If the 4th octet is not 00, it will also be " "used. The others will be randomly generated. The 'dvr_base_mac' *must* be " "different from 'base_mac' to avoid mixing them up with MAC's allocated for " "tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00. " "The default is 3 octet" msgstr "" "Neutron によって独自の DVR インスタンスに使用される基本の MAC アドレス。最初" "の 3 つのオクテットは変更されません。4 つ目のオクテットが 00 でない場合は、" "このオクテットも使用できます。その他のオクテットはランダムに生成されます。 テ" "ナントポートに割り当てられた MAC アドレスとの混同を避けるために、 " "'dvr_base_mac' は 'base_mac' とは違う値である必要があります。4 つ目のオクテッ" "トの例としては、dvr_base_mac = fa:16:3f:4f:00:00 があります。デフォルトは、3 " "つ目のオクテットです。" msgid "" "The connection string for the native OVSDB backend. Requires the native " "ovsdb_interface to be enabled." msgstr "" "ネイティブの OVSDB バックエンドへの接続文字列。ネイティブの ovsdb_interface " "を有効化する必要があります。" msgid "The core plugin Neutron will use" msgstr "Neutron が使用するコアプラグイン" #, python-format msgid "" "The dns_name passed is a FQDN. Its higher level labels must be equal to the " "dns_domain option in neutron.conf, that has been set to '%(dns_domain)s'. It " "must also include one or more valid DNS labels to the left of " "'%(dns_domain)s'" msgstr "" "渡された dns_name は FQDN です。このドメイン名の上位のラベルは、neutron." "conf 内の dns_domain オプション('%(dns_domain)s' に設定済み) と一致する必要" "があります。また、'%(dns_domain)s' の左に 1 つ以上の有効な DNS ラベルが含まれ" "る必要があります。" #, python-format msgid "" "The dns_name passed is a PQDN and its size is '%(dns_name_len)s'. The " "dns_domain option in neutron.conf is set to %(dns_domain)s, with a length of " "'%(higher_labels_len)s'. When the two are concatenated to form a FQDN (with " "a '.' at the end), the resulting length exceeds the maximum size of " "'%(fqdn_max_len)s'" msgstr "" "渡された dns_name は PQDN であり、そのサイズは '%(dns_name_len)s' です。" "neutron.conf 内の dns_domain オプションは %(dns_domain)s に設定さ" "れ、'%(higher_labels_len)s' の文字長が指定されています。この 2 つを連携して " "FQDN を作成すると (末尾に '.' を付ける)、文字長は最大サイズの " "'%(fqdn_max_len)s' を超えます。" msgid "The driver used to manage the DHCP server." msgstr "DHCP サーバーの管理に使用されるドライバー。" msgid "The driver used to manage the virtual interface." msgstr "仮想インターフェースの管理に使用されるドライバー。" msgid "" "The email address to be used when creating PTR zones. If not specified, the " "email address will be admin@" msgstr "" "PTR ゾーンの作成時に使用する E メールアドレス。指定しない場合、E メールアドレ" "スは admin@ になります。" #, python-format msgid "" "The following device_id %(device_id)s is not owned by your tenant or matches " "another tenants router." msgstr "" "次の device_id %(device_id)s はユーザーのテナントによって所有されていないか、" "または別のテナントルーターと一致します。" msgid "The host IP to bind to" msgstr "バインド先のホスト IP" msgid "The interface for interacting with the OVSDB" msgstr "OVSDB と相互作用するためのインターフェース" msgid "" "The maximum number of items returned in a single response, value was " "'infinite' or negative integer means no limit" msgstr "" "1 回の応答で最大数の項目が返されました。値は 'infinite' または (無制限を意味" "する) 負の整数でした" #, python-format msgid "" "The network %(network_id)s has been already hosted by the DHCP Agent " "%(agent_id)s." msgstr "" "ネットワーク %(network_id)s は、既に DHCP エージェント %(agent_id)s によって" "ホストされています。" #, python-format msgid "" "The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s." msgstr "" "ネットワーク %(network_id)s は DHCP エージェント %(agent_id)s によってホスト" "されていません。" msgid "" "The network type to use when creating the HA network for an HA router. By " "default or if empty, the first 'tenant_network_types' is used. This is " "helpful when the VRRP traffic should use a specific network which is not the " "default one." msgstr "" "HA ルーター用に HA ネットワークを作成する際に使用するネットワークタイプ。デ" "フォルトまたはこの値が空の場合、最初の 'tenant_network_types' が使用されま" "す。VRRP トラフィックがデフォルトではない特定のネットワークを使用しなければな" "らない場合には、この設定が役立ちます。" #, python-format msgid "The number of allowed address pair exceeds the maximum %(quota)s." msgstr "許可されたアドレスペアの数が最大の %(quota)s を超えています。" msgid "" "The number of seconds the agent will wait between polling for local device " "changes." msgstr "" "ローカルデバイスの変更のポーリング間にエージェントが待機する間隔 (秒)。" msgid "" "The number of seconds to wait before respawning the ovsdb monitor after " "losing communication with it." msgstr "" "ovsdb モニターとの通信が途絶えた後で ovsdb モニターを再作成する前に待機する時" "間 (秒)" msgid "The number of sort_keys and sort_dirs must be same" msgstr "sort_keys と sort_dirs の数は同じでなければなりません" msgid "" "The path for API extensions. Note that this can be a colon-separated list of " "paths. For example: api_extensions_path = extensions:/path/to/more/exts:/" "even/more/exts. The __path__ of neutron.extensions is appended to this, so " "if your extensions are in there you don't need to specify them here." msgstr "" "API 強化機能のパス。このパスはコロンで区切られたパスのリストであることに注意" "してください。例としては、api_extensions_path = extensions:/path/to/more/" "exts:/even/more/exts があります。このパスには neutron.extensions の __path__ " "が付いているため、強化機能がこのパスにある場合、ここで指定する必要はありませ" "ん。" msgid "The physical network name with which the HA network can be created." msgstr "HA ネットワークを作成可能な物理ネットワーク名。" #, python-format msgid "The port '%s' was deleted" msgstr "ポート '%s' が削除されました" msgid "The port to bind to" msgstr "バインド先のポート" #, python-format msgid "The requested content type %s is invalid." msgstr "要求されたコンテンツタイプ %s は無効です。" msgid "The resource could not be found." msgstr "リソースが見つかりませんでした。" #, python-format msgid "" "The router %(router_id)s has been already hosted by the L3 Agent " "%(agent_id)s." msgstr "" "ルーター %(router_id)s は、既に L3 エージェント %(agent_id)s によってホストさ" "れています。" msgid "" "The server has either erred or is incapable of performing the requested " "operation." msgstr "" "サーバーに誤りがあるか、または要求された操作を実行することができません。" msgid "The service plugins Neutron will use" msgstr "Neutron が使用するサービスプラグイン" #, python-format msgid "The subnet request could not be satisfied because: %(reason)s" msgstr "%(reason)s のため、サブネットの要求に対応できませんでした" #, python-format msgid "The subproject to execute the command against. Can be one of: '%s'." msgstr "" "コマンドの実行の対象となるサブコマンド。'%s' のうちのいずれかにすることができ" "ます。" msgid "The type of authentication to use" msgstr "使用する認証のタイプ" #, python-format msgid "The value '%(value)s' for %(element)s is not valid." msgstr "%(element)s の値 %(value)s は無効です。" msgid "" "The working mode for the agent. Allowed modes are: 'legacy' - this preserves " "the existing behavior where the L3 agent is deployed on a centralized " "networking node to provide L3 services like DNAT, and SNAT. Use this mode if " "you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality " "and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - " "this enables centralized SNAT support in conjunction with DVR. This mode " "must be used for an L3 agent running on a centralized node (or in single-" "host deployments, e.g. devstack)" msgstr "" "エージェントの処理モード。許可されるモードは次のとおりです。'legacy' - この" "モードは、L3 エージェントを中央ネットワーキングノードにデプロイして L3 サービ" "ス (DNAT や SNAT など) を提供する、既存の動作を保持します。DVR を採用しない場" "合、このモードを使用します。'dvr' - このモードは、DVR 機能を有効にします。計" "算ホスト上で実行される L3 エージェントの場合、このモードを使用する必要があり" "ます。'dvr_snat' - このモードは、DVR とともに中央 SNAT サポートを有効にしま" "す。中央ノード (または devstack などの単一ホストでのデプロイメント) 上で実行" "中の L3 の場合、このモードを使用する必要があります。" msgid "" "There are routers attached to this network that depend on this policy for " "access." msgstr "" "このネットワークにはルーターが存在し、ルーターはアクセスの際にこのポリシーを" "使用します。" msgid "" "This will choose the web framework in which to run the Neutron API server. " "'pecan' is a new experiemental rewrite of the API server." msgstr "" "これにより、Neutron の API サーバーを起動する Web フレームワークを選択しま" "す。'pecan' とは API サーバーを新規に実験的に再作成したものです。" msgid "Timeout" msgstr "タイムアウト" msgid "" "Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs " "commands will fail with ALARMCLOCK error." msgstr "" "ovs-vsctl コマンドのタイムアウト時間 (秒)。タイムアウトが発生すると、ovs コマ" "ンドは ALARMCLOCK エラーで失敗します。" msgid "" "Timeout in seconds to wait for a single OpenFlow request. Used only for " "'native' driver." msgstr "" "単一の OpenFlow リクエストを待機するタイムアウト時間 (秒)。「ネイティブ」のド" "ライバーでのみ使用できます。" msgid "" "Timeout in seconds to wait for the local switch connecting the controller. " "Used only for 'native' driver." msgstr "" "コントローラーに接続するローカルスイッチを待機するタイムアウト時間 (秒)。「ネ" "イティブ」のドライバーでのみ使用できます。" msgid "" "Too long prefix provided. New name would exceed given length for an " "interface name." msgstr "" "提供されたプレフィックスが長すぎます。新規の名前がインターフェース名に設定さ" "れた長さを超えます。" msgid "Too many availability_zone_hints specified" msgstr "指定された availability_zone_hints が多すぎます" msgid "" "True to delete all ports on all the OpenvSwitch bridges. False to delete " "ports created by Neutron on integration and external network bridges." msgstr "" "すべての OpenvSwitch ブリッジですべてのポートを削除する場合は True。統合およ" "び外部ネットワークブリッジで Neutron によって作成されたポートを削除する場合" "は False。" msgid "Tunnel IP value needed by the ML2 plugin" msgstr "トンネル IP 値が ML2 プラグインに必要です" msgid "Tunnel bridge to use." msgstr "使用するトンネルブリッジ。" msgid "" "Type of the nova endpoint to use. This endpoint will be looked up in the " "keystone catalog and should be one of public, internal or admin." msgstr "" "使用する nova のエンドポイントのタイプ。このエンドポイントは Keystone のカタ" "ログで参照され、public、internal、または admin のいずれかである必要がありま" "す。" msgid "URL for connecting to designate" msgstr "designate への接続用 URL" msgid "URL to database" msgstr "データベースへの URL" #, python-format msgid "Unable to access %s" msgstr "%s にアクセスできません" #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, maximum allowed " "prefix is %(max_prefixlen)s." msgstr "" "長さが %(prefixlen)s のプレフィックスを持つサブネットを割り当てることはできま" "せん。許可されるプレフィックス長の最大値は %(max_prefixlen)s です。" #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, minimum allowed " "prefix is %(min_prefixlen)s." msgstr "" "長さが %(prefixlen)s のプレフィックスを持つサブネットを割り当てることはできま" "せん。許可されるプレフィックス長の最小値は %(min_prefixlen)s です。" #, python-format msgid "Unable to calculate %(address_type)s address because of:%(reason)s" msgstr "%(reason)s のため %(address_type)s アドレスを計算できません" #, python-format msgid "" "Unable to complete operation for %(router_id)s. The number of routes exceeds " "the maximum %(quota)s." msgstr "" "%(router_id)s の操作を完了できません。ルートの数が最大数 %(quota)s を超えてい" "ます。" #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of DNS " "nameservers exceeds the limit %(quota)s." msgstr "" "%(subnet_id)s の操作を完了できません。DNS ネームサーバーの数が制限 %(quota)s " "を超えています。" #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of host routes " "exceeds the limit %(quota)s." msgstr "" "%(subnet_id)s の操作を完了できません。ホスト経路の数が制限 %(quota)s を超えて" "います。" #, python-format msgid "" "Unable to complete operation on address scope %(address_scope_id)s. There " "are one or more subnet pools in use on the address scope" msgstr "" "アドレススコープ %(address_scope_id)s に関する操作を完了できません。アドレス" "スコープに関して 1 つ以上のサブネットプールが使用されています。" #, python-format msgid "Unable to convert value in %s" msgstr "%s で値を変換できません" msgid "Unable to create the Agent Gateway Port" msgstr "エージェントゲートウェイポートの作成ができません" msgid "Unable to create the SNAT Interface Port" msgstr "SNAT インターフェースポートの作成ができません" #, python-format msgid "" "Unable to create the flat network. Physical network %(physical_network)s is " "in use." msgstr "" "フラットネットワークを作成できません。物理ネットワーク %(physical_network)s " "は使用中です。" msgid "" "Unable to create the network. No available network found in maximum allowed " "attempts." msgstr "" "ネットワークを作成できません。許可される最大試行回数で、使用可能なネットワー" "クが見つかりません。" #, python-format msgid "Unable to delete subnet pool: %(reason)s." msgstr "サブネットプールを削除できません: %(reason)s。" #, python-format msgid "Unable to determine mac address for %s" msgstr "%s の MAC アドレスを決定できません" #, python-format msgid "Unable to find '%s' in request body" msgstr "要求本体で '%s' が見つかりません" #, python-format msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s" msgstr "" "サブネット %(subnet_id)s 上で IP アドレス %(ip_address)s が見つかりません" #, python-format msgid "Unable to find resource name in %s" msgstr "%s にリソース名を見つけることはできません" msgid "Unable to generate IP address by EUI64 for IPv4 prefix" msgstr "EUI64 によって IPv4 プレフィックスの IP アドレスを生成できません" #, python-format msgid "Unable to generate unique DVR mac for host %(host)s." msgstr "ホスト %(host)s に固有の DVR MAC を生成できません。" #, python-format msgid "Unable to generate unique mac on network %(net_id)s." msgstr "ネットワーク %(net_id)s で固有の MAC を生成できません。" #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "" "%s からターゲットフィールドを特定できません。一致の形式は %%()s " "でなければなりません" msgid "Unable to provide external connectivity" msgstr "外部接続を提供できません" msgid "Unable to provide tenant private network" msgstr "テナントのプライベートネットワークを提供できません" #, python-format msgid "" "Unable to reconfigure sharing settings for network %(network)s. Multiple " "tenants are using it." msgstr "" "ネットワーク %(network)s の共有設定を再設定できません。複数のテナントがこの設" "定を使用しています。" #, python-format msgid "Unable to update address scope %(address_scope_id)s : %(reason)s" msgstr "アドレススコープ %(address_scope_id)s を更新できません: %(reason)s" #, python-format msgid "Unable to update the following object fields: %(fields)s" msgstr "以下のオブジェクトのフィールドを更新できません: %(fields)s" #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " "found" msgstr "" "match:%(match)s を親リソースとして検査できません: %(res)s が見つかりませんで" "した" #, python-format msgid "Unexpected label for script %(script_name)s: %(labels)s" msgstr "スクリプト %(script_name)s に関する予期しないラベル: %(labels)s" #, python-format msgid "Unexpected number of alembic branch points: %(branchpoints)s" msgstr "alembic の分岐点の予期しない数: %(branchpoints)s" #, python-format msgid "Unexpected response code: %s" msgstr "予期しない応答コード: %s" #, python-format msgid "Unexpected response: %s" msgstr "予期しない応答: %s" #, python-format msgid "Unit name '%(unit)s' is not valid." msgstr "ユニット名 '%(unit)s' が無効です。" msgid "Unknown API version specified" msgstr "不明な API バージョンが指定されました" #, python-format msgid "Unknown address type %(address_type)s" msgstr "不明なアドレスタイプ %(address_type)s" #, python-format msgid "Unknown attribute '%s'." msgstr "属性 '%s' が不明です。" #, python-format msgid "Unknown chain: %r" msgstr "不明なチェーン: %r" #, python-format msgid "Unknown network type %(network_type)s." msgstr "不明なネットワークタイプ %(network_type)s。" #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "不明なクォータリソース %(unknown)s。" msgid "Unmapped error" msgstr "マップ解除エラー" msgid "Unrecognized action" msgstr "認識されないアクション" #, python-format msgid "Unrecognized attribute(s) '%s'" msgstr "認識されない属性 '%s'" msgid "Unrecognized field" msgstr "認識されないフィールド" msgid "Unspecified minimum subnet pool prefix." msgstr "最小サブネットプールのプレフィックスが指定されていません。" msgid "Unsupported Content-Type" msgstr "サポートされない Content-Type" #, python-format msgid "Unsupported network type %(net_type)s." msgstr "サポートされないネットワークタイプ %(net_type)s" #, python-format msgid "Unsupported port state: %(port_state)s." msgstr "サポートされないポートの状態: %(port_state)s。" msgid "Unsupported request type" msgstr "サポートされない要求タイプです" msgid "Updating default security group not allowed." msgstr "デフォルトのセキュリティーグループの更新は許可されません。" msgid "" "Use ML2 l2population mechanism driver to learn remote MAC and IPs and " "improve tunnel scalability." msgstr "" "リモート MAC および IP を認識してトンネルのスケーラビリティーを向上させるに" "は、ML2 l2population メカニズムドライバーを使用してください。" msgid "Use broadcast in DHCP replies." msgstr "DHCP の応答で ブロードキャストを使用します。" msgid "Use either --delta or relative revision, not both" msgstr "--delta と相対的な変更の両方ではなく、どちらか一方を使用してください" msgid "" "Use ipset to speed-up the iptables based security groups. Enabling ipset " "support requires that ipset is installed on L2 agent node." msgstr "" "iptables ベースのセキュリティーグループの速度を向上させるために、ipset を使用" "します。ipset へのサポートを有効にするには、L2 のエージェントノードに ipset " "をインストールする必要があります。" msgid "" "Use the root helper when listing the namespaces on a system. This may not be " "required depending on the security configuration. If the root helper is not " "required, set this to False for a performance improvement." msgstr "" "システム上の名前空間を一覧表示する際にはルートヘルパーを使用します。セキュリ" "ティー設定によっては、有効にする必要がない場合があります。ルートヘルパーが必" "要ない場合は、パフォーマンスを高めるためにこの値を False に設定します。" msgid "" "Use veths instead of patch ports to interconnect the integration bridge to " "physical networks. Support kernel without Open vSwitch patch port support so " "long as it is set to True." msgstr "" "patch port の代わりに veths を使用して、統合ブリッジを物理ネットワークに相互" "接続します。Open vSwitch の patch port のサポートが True に設定されている限" "り、当該サポートが存在しない場合でもカーネルをサポートします。" msgid "User (uid or name) running metadata proxy after its initialization" msgstr "メタデータプロキシーを初期化後に実行しているユーザー (uid または名前)" msgid "" "User (uid or name) running metadata proxy after its initialization (if " "empty: agent effective user)." msgstr "" "初期化後にメタデータプロキシーを実行しているユーザー (uid または名前) (空の場" "合: エージェント有効ユーザー)。" msgid "User (uid or name) running this process after its initialization" msgstr "初期化後にこのプロセスを実行するユーザー (uid または名前)" msgid "Username for connecting to designate in admin context" msgstr "管理者のコンテキストにおける designate への接続用ユーザー名" msgid "" "Uses veth for an OVS interface or not. Support kernels with limited " "namespace support (e.g. RHEL 6.5) so long as ovs_use_veth is set to True." msgstr "" "OVS インターフェースに veth を使用するかどうか。ovs_use_veth が True に設定" "されている場合は、限定された名前空間のサポート機能を持つカーネル (RHEL 6.5 な" "ど) をサポートします。" msgid "VRRP authentication password" msgstr "VRRP 認証パスワード" msgid "VRRP authentication type" msgstr "VRRP 認証タイプ" msgid "VXLAN network unsupported." msgstr "VXLAN ネットワークはサポートされていません。" #, python-format msgid "" "Validation of dictionary's keys failed. Expected keys: %(expected_keys)s " "Provided keys: %(provided_keys)s" msgstr "" "ディクショナリーのキーの検証に失敗しました。想定されたキー: " "%(expected_keys)s、提供されたキー: %(provided_keys)s" #, python-format msgid "Validator '%s' does not exist." msgstr "バリデーター '%s' は存在しません。" #, python-format msgid "Value %(value)s in mapping: '%(mapping)s' not unique" msgstr "マッピング '%(mapping)s' 内の値 %(value)s が固有ではありません" #, python-format msgid "" "Value of %(parameter)s has to be multiple of %(number)s, with maximum value " "of %(maximum)s and minimum value of %(minimum)s" msgstr "" "%(parameter)s の値は %(number)s の倍数であり、その最大値は %(maximum)s、最小" "値は %(minimum)s である必要があります" msgid "" "Value of host kernel tick rate (hz) for calculating minimum burst value in " "bandwidth limit rules for a port with QoS. See kernel configuration file for " "HZ value and tc-tbf manual for more information." msgstr "" "QoS を設定したポートについて、帯域幅の制限ルールに基づいて最小バースト値を計" "算するためのホストのカーネルのチックレート値 (Hz)。詳細情報については、Hz 値" "のカーネル設定ファイルと tc-tbf マニュアルを参照してください。" msgid "" "Value of latency (ms) for calculating size of queue for a port with QoS. See " "tc-tbf manual for more information." msgstr "" "QoS を設定したポートについて、キューのサイズを検索するたためのレイテンシー値 " "(ミリ秒)。詳細情報については、tc-tbf マニュアルを参照してください。" msgid "" "Watch file log. Log watch should be disabled when metadata_proxy_user/group " "has no read/write permissions on metadata proxy log file." msgstr "" "ファイルログを監視します。metadata_proxy_user/group にメタデータプロキシーの" "ログファイルに対する読み取り/書き込み許可がない場合は、ログ監視を無効にする必" "要があります。" msgid "" "When external_network_bridge is set, each L3 agent can be associated with no " "more than one external network. This value should be set to the UUID of that " "external network. To allow L3 agent support multiple external networks, both " "the external_network_bridge and gateway_external_network_id must be left " "empty." msgstr "" "external_network_bridge を設定すると、各 L3 エージェントには 1 つの外部ネット" "ワークしか割り当てることができなくなります。この値として該当する外部ネット" "ワークの UUID を設定する必要があります。L3 エージェントが複数の外部ネットワー" "クをサポートできるようにするには、external_network_bridge と " "gateway_external_network_id の両方を空の値に設定する必要があります。" msgid "" "When proxying metadata requests, Neutron signs the Instance-ID header with a " "shared secret to prevent spoofing. You may select any string for a secret, " "but it must match here and in the configuration used by the Nova Metadata " "Server. NOTE: Nova uses the same config key, but in [neutron] section." msgstr "" "メタデータ要求のプロキシーを実行する際に、Neutron はスプーフィングを防止する" "ために共有秘密鍵を使用して インスタンス ID ヘッダーに署名します。秘密鍵として" "任意の文字列を選択できるものの、その値はここと Nova Metadata Server が使用す" "る設定で一致する必要があります。注意: Nova は同じ設定鍵を使用するものの、その" "値は [neutron] セクションにあります。" msgid "" "Where to store Neutron state files. This directory must be writable by the " "agent." msgstr "" "Neutron 状態ファイルの保管場所。このディレクトリーは、エージェントが書き込み" "を行える場所でなければなりません。" msgid "" "With IPv6, the network used for the external gateway does not need to have " "an associated subnet, since the automatically assigned link-local address " "(LLA) can be used. However, an IPv6 gateway address is needed for use as the " "next-hop for the default route. If no IPv6 gateway address is configured " "here, (and only then) the neutron router will be configured to get its " "default route from router advertisements (RAs) from the upstream router; in " "which case the upstream router must also be configured to send these RAs. " "The ipv6_gateway, when configured, should be the LLA of the interface on the " "upstream router. If a next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated to the network and not " "through this parameter. " msgstr "" "IPv6 では、自動的に割り当てられたリンクローカルアドレス (LLA) を使用できるた" "め、外部ゲートウェイに使用するネットワークにはサブネットを関連付ける必要はあ" "りません。ただし、IPv6 ゲートウェイアドレスはデフォルト経路のネクストホップと" "して使用するために必要です。IPv6 ゲートウェイアドレスをここで構成しない場合に" "のみ、上流ルーターのルーター広告 (RA) からデフォルト経路を取得するように " "Neutron ルーターが構成されます。この場合、これらの RA を送信するように上流" "ルーターを構成することも必要です。ipv6_gateway を構成する場合、これは上流ルー" "ター上のインターフェースの LLA でなければなりません。グローバルユニークアドレ" "ス (GUA) を使用したネクストホップが必要な場合は、このパラメーターを使用するの" "ではなく、ネットワークに割り振られたサブネットを介してこれを行う必要がありま" "す。" msgid "You must implement __call__" msgstr "__call__ を実装する必要があります" msgid "" "You must provide a config file for bridge - either --config-file or " "env[NEUTRON_TEST_CONFIG_FILE]" msgstr "" "ブリッジの構成ファイルとして --config-file または " "env[NEUTRON_TEST_CONFIG_FILE] のいずれかを指定する必要があります" msgid "You must provide a revision or relative delta" msgstr "変更または相対デルタを指定する必要があります" msgid "a subnetpool must be specified in the absence of a cidr" msgstr "cidr がない場合、サブネットプールの指定は必須です" msgid "add_ha_port cannot be called inside of a transaction." msgstr "トランザクション内に add_ha_port を呼び出すことはできません。" msgid "allocation_pools allowed only for specific subnet requests." msgstr "allocation_pools は特定のサブネット要求にのみ許可されます。" msgid "allocation_pools are not in the subnet" msgstr "allocation_pools がサブネット内に存在しません" msgid "allocation_pools use the wrong ip version" msgstr "allocation_pools が間違ったIP バージョンを使用しています" msgid "already a synthetic attribute" msgstr "既に synthetic 属性を使用" msgid "binding:profile value too large" msgstr "binding:profile 値が大きすぎます" #, python-format msgid "cannot perform %(event)s due to %(reason)s" msgstr "%(reason)s のため %(event)s を実行できません" msgid "cidr and prefixlen must not be supplied together" msgstr "cidr と prefixlen を同時に指定してはなりません" #, python-format msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid." msgstr "" "dhcp_agents_per_network は 1 以上でなければなりません。'%s' は無効です。" msgid "dns_domain cannot be specified without a dns_name" msgstr "dns_domain は dns_name なしでは指定できません" msgid "dns_name cannot be specified without a dns_domain" msgstr "dns_name は dns_domain なしでは指定できません" msgid "fixed_ip_address cannot be specified without a port_id" msgstr "fixed_ip_address は、port_id なしでは指定できません" #, python-format msgid "gateway_ip %s is not in the subnet" msgstr "gateway_ip %s がサブネット内に存在しません" #, python-format msgid "has device owner %s" msgstr "デバイス所有者 %s" msgid "in use" msgstr "使用されています" #, python-format msgid "ip command failed on device %(dev_name)s: %(reason)s" msgstr "ip コマンドがデバイス %(dev_name)s で失敗しました: %(reason)s" #, python-format msgid "ip command failed: %(reason)s" msgstr "IP コマンドが失敗しました: %(reason)s" #, python-format msgid "ip link capability %(capability)s is not supported" msgstr "ip リンク機能 %(capability)s はサポートされていません" #, python-format msgid "ip link command is not supported: %(reason)s" msgstr "ip リンクコマンドはサポートされていません: %(reason)s" msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "cidr および subnetpool_id がない場合、ip_version の指定は必須です" msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "ip_version が 4 の場合、ipv6_address_mode は無効です" msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "ip_version が 4 の場合、ipv6_ra_mode は無効です" msgid "" "ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set to " "False." msgstr "" "enable_dhcp が False に設定されている場合、ipv6_ra_mode または " "ipv6_address_mode を設定することはできません。" #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " "'%(addr_mode)s' is not valid. If both attributes are set, they must be the " "same value" msgstr "" "ipv6_ra_mode が '%(ra_mode)s' に、ipv6_address_mode が '%(addr_mode)s' に設定" "されていますが、これは無効です。両方の属性を設定する場合、これらは同じ値でな" "ければなりません" msgid "mac address update" msgstr "mac アドレス更新" #, python-format msgid "" "max_l3_agents_per_router %(max_agents)s config parameter is not valid. It " "has to be greater than or equal to min_l3_agents_per_router %(min_agents)s." msgstr "" "max_l3_agents_per_router %(max_agents)s 構成パラメーターが無効です。" "min_l3_agents_per_router %(min_agents)s 以上でなければなりません。" msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "必ず 2 つの引数 (cidr および MAC) を提供する必要があります" msgid "network_type required" msgstr "network_type が必要です" #, python-format msgid "network_type value '%s' not supported" msgstr "network_type 値 '%s' はサポートされていません" msgid "new subnet" msgstr "新規サブネット" #, python-format msgid "physical_network '%s' unknown for VLAN provider network" msgstr "VLAN プロバイダーネットワークの physical_network '%s' が不明です" #, python-format msgid "physical_network '%s' unknown for flat provider network" msgstr "flat プロバイダーネットワークの physical_network '%s' が不明です" msgid "physical_network required for flat provider network" msgstr "flat プロバイダーネットワークには physical_network が必要です" #, python-format msgid "provider:physical_network specified for %s network" msgstr "%s ネットワークに provider:physical_network が指定されました" #, python-format msgid "rbac_db_model not found in %s" msgstr "%s で rbac_db_model が見つかりません" msgid "record" msgstr "レコード" msgid "respawn_interval must be >= 0 if provided." msgstr "respawn_interval は、指定する場合は 0 以上にする必要があります。" #, python-format msgid "segmentation_id out of range (%(min)s through %(max)s)" msgstr "segmentation_id が範囲 (%(min)s から %(max)s) 外です" msgid "segmentation_id requires physical_network for VLAN provider network" msgstr "" "segmentation_id には、VLAN プロバイダーネットワークの physical_network が必要" "です" msgid "shared attribute switching to synthetic" msgstr "共有属性を synthetic に変更します" #, python-format msgid "" "subnetpool %(subnetpool_id)s cannot be updated when associated with shared " "address scope %(address_scope_id)s" msgstr "" "サブネットプール %(subnetpool_id)s が共有アドレススコープ " "%(address_scope_id)s と関連付けられている場合は、サブネットプールを更新するこ" "とはできません" msgid "subnetpool_id and use_default_subnetpool cannot both be specified" msgstr "" "subnetpool_id と use_default_subnetpool の両方を指定することはできません" msgid "the nexthop is not connected with router" msgstr "ルーターによってネクストホップが接続されていません" msgid "the nexthop is used by router" msgstr "ネクストホップがルーターによって使用されています" #, python-format msgid "unable to load %s" msgstr "%s をロードできません" msgid "" "uuid provided from the command line so external_process can track us via /" "proc/cmdline interface." msgstr "" "UUID がコマンドラインに指定されたため、external_process で /proc/cmdline イン" "ターフェースを追跡できます。" neutron-8.4.0/neutron/locale/ko_KR/0000775000567000056710000000000013044373210020311 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/ko_KR/LC_MESSAGES/0000775000567000056710000000000013044373210022076 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/ko_KR/LC_MESSAGES/neutron.po0000664000567000056710000052332313044372760024151 0ustar jenkinsjenkins00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # Seong-ho Cho , 2013 # Seong-ho Cho , 2013 # Sungjin Kang , 2013 # Sungjin Kang , 2013 # Sungjin Kang , 2013 # Jun-Sik Shin , 2016. #zanata # Sungjin Kang , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron 8.2.1.dev52\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-09-01 18:10+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-09-01 01:22+0000\n" "Last-Translator: Jun-Sik Shin \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "" "\n" "Command: %(cmd)s\n" "Exit code: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" msgstr "" "\n" "명령: %(cmd)s\n" "종료 코드: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" #, python-format msgid "" "%(branch)s HEAD file does not match migration timeline head, expected: " "%(head)s" msgstr "" "%(branch)s HEAD 파일이 마이그레이션 타임라인 헤드와 일치하지 않음, 예상값: " "%(head)s" #, python-format msgid "%(driver)s: Internal driver error." msgstr "%(driver)s: 내부 드라이버 오류." #, python-format msgid "%(id)s is not a valid %(type)s identifier" msgstr "%(id)s이(가) 올바른 %(type)s ID가 아님" #, python-format msgid "" "%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' " "and '%(desc)s'" msgstr "" "%(invalid_dirs)s은(는) sort_dirs에 대해 올바르지 않은 값이며, 올바른 값은 " "'%(asc)s' 및 '%(desc)s'입니다. " #, python-format msgid "%(key)s prohibited for %(tunnel)s provider network" msgstr "%(tunnel)s 제공자 네트워크에 대해 %(key)s이(가) 금지됨" #, python-format msgid "" "%(method)s called with network settings %(current)s (original settings " "%(original)s) and network segments %(segments)s" msgstr "" "네트워크 설정 %(current)s과(와) 함께 %(method)s이(가) 호출됨(원래 설정 " "%(original)s) 및 네트워크 세그먼트 %(segments)s" #, python-format msgid "" "%(method)s called with port settings %(current)s (original settings " "%(original)s) host %(host)s (original host %(original_host)s) vif type " "%(vif_type)s (original vif type %(original_vif_type)s) vif details " "%(vif_details)s (original vif details %(original_vif_details)s) binding " "levels %(levels)s (original binding levels %(original_levels)s) on network " "%(network)s with segments to bind %(segments_to_bind)s" msgstr "" "다음으로 호출된 %(method)s 메소드. 포트 설정 %(current)s(원래 설정 " "%(original)s) 호스트 %(host)s(원래 호스트 %(original_host)s) vif 유형 " "%(vif_type)s (원래 vif 유형 %(original_vif_type)s) vif 세부 사항 " "%(vif_details)s(원래 vif 세부 사항 %(original_vif_details)s) 바인딩 레벨 " "%(levels)s(원래 바인딩 레벨 %(original_levels)s) - %(segments_to_bind)s을" "(를) 바인드하기 위한 세그먼트가 있는 네트워크 %(network)s에서 호출" #, python-format msgid "" "%(method)s called with subnet settings %(current)s (original settings " "%(original)s)" msgstr "" "%(method)s이(가) 서브넷 설정 %(current)s과(와) 함께 호출됨(원래 설정 " "%(original)s)" #, python-format msgid "%(method)s failed." msgstr "%(method)s이(가) 실패함" #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "" "%(name)s '%(addr)s'이(가) ip_version '%(ip_version)s'과(와) 일치하지 않음" #, python-format msgid "%(param)s must be in %(range)s range." msgstr "%(param)s의 범위는 %(range)s이어야 합니다." #, python-format msgid "%s cannot be called while in offline mode" msgstr "%s은(는) 오프라인 모드 중 호출할 수 없습니다. " #, python-format msgid "%s is invalid attribute for sort_key" msgstr "%s이(는) sort_keys에 대해 올바르지 않은 속성입니다. " #, python-format msgid "%s is invalid attribute for sort_keys" msgstr "%s이(는) sort_keys에 대해 올바르지 않은 속성입니다. " #, python-format msgid "%s is not a valid VLAN tag" msgstr "%s이(가) 올바른 VLAN 태그가 아님" #, python-format msgid "%s must be specified" msgstr "%s을(를) 지정해야 함" #, python-format msgid "%s must implement get_port_from_device or get_ports_from_devices." msgstr "" "%s은(는) get_port_from_device 또는 get_ports_from_devices를 구현해야 합니다." #, python-format msgid "%s prohibited for VLAN provider network" msgstr "VLAN 제공자 네트워크에 대해 %s이(가) 금지됨" #, python-format msgid "%s prohibited for flat provider network" msgstr "플랫 제공자 네트워크에 대해 %s이(가) 금지됨" #, python-format msgid "%s prohibited for local provider network" msgstr "로컬 제공자 네트워크에 대해 %s이(가) 금지됨" #, python-format msgid "" "'%(data)s' contains '%(length)s' characters. Adding a domain name will cause " "it to exceed the maximum length of a FQDN of '%(max_len)s'" msgstr "" "'%(data)s'에 '%(length)s'개의 문자가 포함됩니다. 도메인 이름을 추가하면 FQDN" "의 최대 길이인 '%(max_len)s'이(가) 초과됩니다." #, python-format msgid "" "'%(data)s' contains '%(length)s' characters. Adding a sub-domain will cause " "it to exceed the maximum length of a FQDN of '%(max_len)s'" msgstr "" "'%(data)s'에 '%(length)s'개의 문자가 포함됩니다. 하위 도메인을 추가하면 FQDN" "의 최대 길이인 '%(max_len)s'이(가) 초과됩니다." #, python-format msgid "'%(data)s' exceeds maximum length of %(max_len)s" msgstr "'%(data)s'이(가) %(max_len)s의 최대 길이를 초과함" #, python-format msgid "'%(data)s' is not an accepted IP address, '%(ip)s' is recommended" msgstr "'%(data)s'은(는) 허용되는 IP 주소가 아님, '%(ip)s'이(가) 권장됨" #, python-format msgid "'%(data)s' is not in %(valid_values)s" msgstr "'%(data)s'이(가) %(valid_values)s에 없음" #, python-format msgid "'%(data)s' is too large - must be no larger than '%(limit)d'" msgstr "'%(data)s'이(가) 너무 큼 - '%(limit)d' 이하여야 함" #, python-format msgid "'%(data)s' is too small - must be at least '%(limit)d'" msgstr "'%(data)s'이(가) 너무 작음 - 최소 '%(limit)d'이어야 함 " #, python-format msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended" msgstr "" "'%(data)s'이(가) 인식된 IP 서브넷 cidr이 아닙니다. '%(cidr)s'이(가) 권장됩니" "다. " #, python-format msgid "'%(data)s' not a valid PQDN or FQDN. Reason: %(reason)s" msgstr "'%(data)s'이(가) 올바른 PQDN 또는 FQDN이 아님, 이유: %(reason)s" #, python-format msgid "'%(host)s' is not a valid nameserver. %(msg)s" msgstr "'%(host)s'은(는) 올바른 이름 서버가 아닙니다. %(msg)s" #, python-format msgid "'%s' Blank strings are not permitted" msgstr "'%s' 공백 문자열이 허용되지 않음" #, python-format msgid "'%s' cannot be converted to boolean" msgstr "'%s'은(는) boolean으로 변환될 수 없음" #, python-format msgid "'%s' cannot be converted to lowercase string" msgstr "'%s'은(는) 소문자 문자열로 변환할 수 없음" #, python-format msgid "'%s' contains whitespace" msgstr "'%s'에 공백 문자가 있습니다" #, python-format msgid "'%s' exceeds the 255 character FQDN limit" msgstr "'%s'이(가) 255자 FQDN 한계를 초과함" #, python-format msgid "'%s' is a FQDN. It should be a relative domain name" msgstr "'%s'이(가) FQDN입니다. 상대적 도메인 이름이어야 합니다." #, python-format msgid "'%s' is not a FQDN" msgstr "'%s'이(가) FQDN이 아님" #, python-format msgid "'%s' is not a dictionary" msgstr "'%s'이(가) dictianry가 아님" #, python-format msgid "'%s' is not a list" msgstr "'%s'이(가) 목록이 아님" #, python-format msgid "'%s' is not a valid IP address" msgstr "'%s'이(가) 올바른 IP 주소가 아님" #, python-format msgid "'%s' is not a valid IP subnet" msgstr "'%s'이(가) 올바른 IP 서브넷이 아님" #, python-format msgid "'%s' is not a valid MAC address" msgstr "'%s'이(가) 올바른 MAC 주소가 아님" #, python-format msgid "'%s' is not a valid RBAC object type" msgstr "'%s'은(는) 올바른 RBAC 오브젝트 유형이 아님" #, python-format msgid "'%s' is not a valid UUID" msgstr "'%s'이(가) 올바른 UUID가 아님" #, python-format msgid "'%s' is not a valid boolean value" msgstr "'%s'은(는) 올바른 부울린 값이 아닙니다" #, python-format msgid "'%s' is not a valid input" msgstr "'%s'이(가) 올바른 입력이 아님" #, python-format msgid "'%s' is not a valid string" msgstr "'%s'이(가) 올바른 문자열이 아님" #, python-format msgid "'%s' is not an integer" msgstr "'%s'이(가) 정수가 아님" #, python-format msgid "'%s' is not an integer or uuid" msgstr "'%s'이(가) 정수 또는 uuid가 아님" #, python-format msgid "'%s' is not of the form =[value]" msgstr "'%s'의 양식이 =[value]가 아님" #, python-format msgid "'%s' is not supported for filtering" msgstr "'%s'은(는) 필터링을 위해 지원되지 않음" #, python-format msgid "'%s' must be a non negative decimal." msgstr "'%s'은(는) 음수가 아닌 10진수여야 합니다. " #, python-format msgid "'%s' should be non-negative" msgstr "'%s'은(는) 음수가 아니어야 함" msgid "'.' searches are not implemented" msgstr "'.' 검색이 구현되지 않음" #, python-format msgid "'module' object has no attribute '%s'" msgstr "'module' 오브젝트에 '%s' 속성이 없음" msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max'가 'port_min'보다 작음" msgid "" "(Deprecated. Use '--subproject neutron-SERVICE' instead.) The advanced " "service to execute the command against." msgstr "" "(더 이상 사용되지 않습니다. '--subproject neutron-SERVICE'를 대신 사용하십시" "오.) 명령을 실행할 고급 서비스입니다. " msgid "0 is not allowed as CIDR prefix length" msgstr "0은 CIDR 접두부 길이로 허용되지 않음" msgid "" "32-bit BGP identifier, typically an IPv4 address owned by the system running " "the BGP DrAgent." msgstr "" "32비트 BGP 식별자로서, 일반적으로 BGP DrAgent를 실행 중인 시스템에서 소유한 " "IPv4 주소입니다." msgid "A QoS driver must be specified" msgstr "QoS 드라이버를 지정해야 함" msgid "A cidr must be specified in the absence of a subnet pool" msgstr "서브넷 풀이 없는 경우 cidr을 지정해야 함" msgid "" "A decimal value as Vendor's Registered Private Enterprise Number as required " "by RFC3315 DUID-EN." msgstr "" "RFC3315 DUID-EN에서 요구하는 대로 벤더의 등록된 개인용 엔터프라이즈 번호로서" "의 10진수 값입니다. " #, python-format msgid "A default external network already exists: %(net_id)s." msgstr "기본 외부 네트워크가 이미 있음: %(net_id)s." msgid "" "A default subnetpool for this IP family has already been set. Only one " "default may exist per IP family" msgstr "" "이 IP 제품군의 기본 subnetpool이 이미 설정되었습니다. IP 제품군당 기본값은 하" "나만 있을 수 있습니다." msgid "A metering driver must be specified" msgstr "측정 드라이버를 지정해야 함" msgid "A password must be supplied when using auth_type md5." msgstr "auth_type md5를 사용할 때 암호를 제공해야 합니다." msgid "API for retrieving service providers for Neutron advanced services" msgstr "Neutron 고급 서비스에 대한 서비스 제공자를 검색하기 위한 API" msgid "Aborting periodic_sync_routers_task due to an error." msgstr "오류로 인해 periodic_sync_routers_task를 중단합니다." msgid "Access to this resource was denied." msgstr "이 자원에 대한 액세스가 거부되었습니다." msgid "Action to be executed when a child process dies" msgstr "하위 프로세스가 정지될 때 조치가 실행됨" msgid "" "Add comments to iptables rules. Set to false to disallow the addition of " "comments to generated iptables rules that describe each rule's purpose. " "System must support the iptables comments module for addition of comments." msgstr "" " Iptables 규칙에 주석을 추가하십시오. False로 설정하면 각 규칙의 용도를 설명" "하는 생성된 iptables 규칙에 주석을 추가할 수 없습니다. 시스템에서 주석을 추가" "하기 위한 iptables 주석 모듈을 지원해야 합니다." msgid "Address not present on interface" msgstr "인터페이스에 주소가 없음" #, python-format msgid "Address scope %(address_scope_id)s could not be found" msgstr "주소 범위 %(address_scope_id)s을(를) 찾을 수 없음" msgid "" "Address to listen on for OpenFlow connections. Used only for 'native' driver." msgstr "" "OpenFlow 연결을 위해 청취할 주소입니다. 'native' 드라이버에만 사용됩니다. " msgid "Adds external network attribute to network resource." msgstr "외부 네트워크 속성을 네트워크 자원에 추가합니다." msgid "Adds test attributes to core resources." msgstr "코어 자원에 테스트 속성을 추가합니다." #, python-format msgid "Agent %(id)s could not be found" msgstr "%(id)s 에이전트를 찾을 수 없음" #, python-format msgid "Agent %(id)s is not a L3 Agent or has been disabled" msgstr "%(id)s 에이전트가 L3 에이전트가 아니거나 사용 안함 상태임" #, python-format msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled" msgstr "%(id)s 에이전트가 올바른 DHCP 에이전트가 아니거나 사용 안함 상태임" msgid "Agent has just been revived" msgstr "에이전트가 다시 활성화됨" msgid "" "Agent starts with admin_state_up=False when enable_new_agents=False. In the " "case, user's resources will not be scheduled automatically to the agent " "until admin changes admin_state_up to True." msgstr "" "enable_new_agents=False인 경우 에이전트는 admin_state_up=False로 시작합니다. " "이 경우 사용자의 자원은 관리자가 admin_state_up을 True로 변경할 때까지 에이전" "트에 대해 자동으로 스케줄되지 않습니다. " #, python-format msgid "Agent updated: %(payload)s" msgstr "업데이트된 에이전트: %(payload)s" #, python-format msgid "" "Agent with agent_type=%(agent_type)s and host=%(host)s could not be found" msgstr "agent_type=%(agent_type)s 및 host=%(host)s인 에이전트를 찾을 수 없음" msgid "Allow auto scheduling networks to DHCP agent." msgstr "DHCP 에이전트에 대한 네트워크 자동 스케줄링을 허용합니다. " msgid "Allow auto scheduling of routers to L3 agent." msgstr "L3 에이전트에 대한 라우터 자동 스케줄링을 허용합니다." msgid "" "Allow overlapping IP support in Neutron. Attention: the following parameter " "MUST be set to False if Neutron is being used in conjunction with Nova " "security groups." msgstr "" "Neutron에서 중복 IP 지원을 허용합니다. 주의: Neutron을 Nova 보안 그룹과 함께 " "사용하는 경우 다음 매개변수를 False로 설정해야 합니다." msgid "Allow running metadata proxy." msgstr "메타데이터 프록시 실행을 허용합니다." msgid "Allow sending resource operation notification to DHCP agent" msgstr "DHCP 에이전트에 자원 조작 알림 전송 허용" msgid "Allow the creation of PTR records" msgstr "PTR 레코드의 작성 허용" msgid "Allow the usage of the bulk API" msgstr "벌크 API 사용 허용" msgid "Allow the usage of the pagination" msgstr "페이지 번호 매기기 사용 허용" msgid "Allow the usage of the sorting" msgstr "정렬 사용 허용" msgid "Allow to perform insecure SSL (https) requests to nova metadata" msgstr "Nova 메타데이터에 대한 비보안 SSL(https) 요청 수행 허용" msgid "Allowed address pairs must be a list." msgstr "허용되는 주소 쌍은 목록이어야 합니다. " msgid "AllowedAddressPair must contain ip_address" msgstr "AllowedAddressPair에 ip_address가 포함되어야 함" msgid "" "Allows for serving metadata requests coming from a dedicated metadata access " "network whose CIDR is 169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send metadata:1 request. In " "this case DHCP Option 121 will not be injected in VMs, as they will be able " "to reach 169.254.169.254 through a router. This option requires " "enable_isolated_metadata = True." msgstr "" "CIDR이 169.254.169.254/16(또는 더 큰 접두사)이며 VM에서 metadata:1 요청을 보" "내는 Neutron 라우터에 연결된 전용 메타데이터 액세스 네트워크에서 전송되는 메" "타데이터 요청에 대해 서비스를 제공할 수 있습니다. 이 경우 라우터를 통해 " "169.254.169.254에 연결할 수 있으므로 DHCP 옵션 121은 VM에 삽입되지 않습니다. " "이 옵션에는 enable_isolated_metadata = True가 필요합니다." #, python-format msgid "" "Already hosting BGP Speaker for local_as=%(current_as)d with router_id=" "%(rtid)s." msgstr "" "router_id=%(rtid)s인 local_as=%(current_as)d의 BGP 스피커를 이미 호스트 중입" "니다." #, python-format msgid "" "Already hosting maximum number of BGP Speakers. Allowed scheduled count=" "%(count)d" msgstr "" "이미 BGP 스피커의 최대 수를 호스팅 중입니다. 허용된 스케줄 수=%(count)d" msgid "An RBAC policy already exists with those values." msgstr "해당 값의 RBAC 정책이 이미 있습니다." msgid "An identifier must be specified when updating a subnet" msgstr "서브넷을 업데이트할 때 ID를 지정해야 함" msgid "An interface driver must be specified" msgstr "인터페이스 드라이버가 지정되어야 함" msgid "" "An ordered list of extension driver entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. For example: extension_drivers = " "port_security,qos" msgstr "" "neutron.ml2.mechanism_drivers 네임스페이스로부터 로드할 확장 드라이버 " "endpoint의 정렬된 목록입니다. 예: extension_drivers = port_security,qos" msgid "" "An ordered list of networking mechanism driver entrypoints to be loaded from " "the neutron.ml2.mechanism_drivers namespace." msgstr "" "neutron.ml2.mechanism_drivers 네임스페이스로부터 로드할 네트워킹 메커니즘 드" "라이버 시작점의 정렬된 목록입니다." msgid "An unexpected internal error occurred." msgstr "예상치 않은 내부 오류가 발생했습니다." msgid "An unknown error has occurred. Please try your request again." msgstr "알 수 없는 오류가 발생했습니다. 요청을 다시 시도하십시오. " msgid "Async process didn't respawn" msgstr "비동기 프로세스가 다시 파생되지 않음" #, python-format msgid "Attribute '%s' not allowed in POST" msgstr "'%s' 속성은 POST에서 허용되지 않음" #, python-format msgid "Authentication type not supported. Requested type=%(auth_type)s." msgstr "인증 유형이 지원되지 않습니다. 요청된 유형=%(auth_type)s입니다." msgid "Authorization URL for connecting to designate in admin context" msgstr "관리 컨텍스트에서 지정하기 위해 연결할 인증 URL" msgid "Automatically remove networks from offline DHCP agents." msgstr "오프라인 DHCP 에이전트에서 네트워크를 자동으로 제거합니다." msgid "" "Automatically reschedule routers from offline L3 agents to online L3 agents." msgstr "" "오프라인 L3 에이전트부터 온라인 L3 에이전트까지 라우트를 자동으로 다시 스케줄" "합니다." msgid "Availability zone of this node" msgstr "노드에대한 가용 구역" #, python-format msgid "AvailabilityZone %(availability_zone)s could not be found." msgstr "AvailabilityZone %(availability_zone)s을(를) 찾을 수 없습니다." msgid "Available commands" msgstr "사용 가능한 명령" #, python-format msgid "" "BGP Peer %(peer_ip)s for remote_as=%(remote_as)s, running for BGP Speaker " "%(speaker_as)d not added yet." msgstr "" "BGP 스피커 %(speaker_as)d에 대해 실행 중인 remote_as=%(remote_as)s의 BGP 피" "어 %(peer_ip)s이(가) 아직 추가되지 않았습니다." #, python-format msgid "" "BGP Speaker %(bgp_speaker_id)s is already configured to peer with a BGP Peer " "at %(peer_ip)s, it cannot peer with BGP Peer %(bgp_peer_id)s." msgstr "" "%(peer_ip)s의 BGP 피어로 감시하도록 BGP 스피커 %(bgp_speaker_id)s이(가) 이" "미 구성되어 있습니다. BGP 피어 %(bgp_peer_id)s(으)로 감시할 수 없습니다." #, python-format msgid "" "BGP Speaker for local_as=%(local_as)s with router_id=%(rtid)s not added yet." msgstr "" "router_id=%(rtid)s인 local_as=%(local_as)s의 BGP 스피커가 아직 추가되지 않았" "습니다." #, python-format msgid "" "BGP peer %(bgp_peer_id)s is not associated with BGP speaker " "%(bgp_speaker_id)s." msgstr "" "BGP 피어 %(bgp_peer_id)s이(가) BGP 스피커 %(bgp_speaker_id)s과(와) 연관되지 " "않았습니다." #, python-format msgid "BGP peer %(bgp_peer_id)s not authenticated." msgstr "BGP 피어 %(bgp_peer_id)s이(가) 인증되지 않았습니다." #, python-format msgid "BGP peer %(id)s could not be found." msgstr "BGP 피어 %(id)s을(를) 찾을 수 없습니다." #, python-format msgid "" "BGP speaker %(bgp_speaker_id)s is not hosted by the BgpDrAgent %(agent_id)s." msgstr "" "BgpDrAgent %(agent_id)s에서 BGP 스피커 %(bgp_speaker_id)s을(를) 호스트하지 않" "습니다." #, python-format msgid "BGP speaker %(id)s could not be found." msgstr "BGP 스피커 %(id)s을(를) 찾을 수 없습니다." msgid "BGP speaker driver class to be instantiated." msgstr "인스턴스화할 BGP 스피커 드라이버 클래스." msgid "Backend does not support VLAN Transparency." msgstr "백엔드는 VLAN 투명도를 지원하지 않습니다." #, python-format msgid "" "Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, " "%(mac)s:" msgstr "" "EUI-64에 의해 IPv6 주소를 생성하기 위한 prefix 또는 mac 형식이 잘못되었습니" "다. %(prefix)s, %(mac)s:" #, python-format msgid "Bad prefix type for generate IPv6 address by EUI-64: %s" msgstr "EUI-64에 의해 IPv6 주소를 생성하기 위한 prefix 유형이 잘못됨: %s" #, python-format msgid "Base MAC: %s" msgstr "기본 MAC: %s" msgid "" "Base log dir for dnsmasq logging. The log contains DHCP and DNS log " "information and is useful for debugging issues with either DHCP or DNS. If " "this section is null, disable dnsmasq log." msgstr "" "dnsmasq 로깅을 위한 기본 로그 디렉토리입니다. 이 로그는 DHCP와 DNS 로그 정보" "를 포함하고 있으며 DHCP이나 DNS에 대한 문제를 디버깅하는 데 유용합니다. 이 섹" "션이 널인 경우에는 dnsmasq 로그를 사용 안함으로 설정하십시오. " #, python-format msgid "BgpDrAgent %(agent_id)s is already associated to a BGP speaker." msgstr "" "BgpDrAgent %(agent_id)s이(가) 이미 BGP 스피커에 이미 연관되어 있습니다." #, python-format msgid "BgpDrAgent %(id)s is invalid or has been disabled." msgstr "BgpDrAgent %(id)s이(가) 올바르지 않거나 비활성화되었습니다." #, python-format msgid "BgpDrAgent updated: %s" msgstr "BgpDrAgent가 업데이트됨: %s" msgid "Body contains invalid data" msgstr "본문에 올바르지 않은 데이터가 포함되어 있음" msgid "Both network_id and router_id are None. One must be provided." msgstr "network_id 및 router_id가 모두 None입니다. 하나가 제공되어야 합니다. " #, python-format msgid "Bridge %(bridge)s does not exist." msgstr "%(bridge)s 브릿지가 존재하지 않습니다. " #, python-format msgid "Bridge %s does not exist" msgstr "브릿지 %s이(가) 없음" msgid "Bulk operation not supported" msgstr "Bulk 오퍼레이션은 지원되지 않음" msgid "CA certificate file to use to verify connecting clients" msgstr "클라이언트 연결을 확인하기 위해 사용하는 CA 인증 파일" msgid "CIDR to monitor" msgstr "모니터할 CIDR" #, python-format msgid "Callback for %(resource_type)s not found" msgstr "%(resource_type)s에 대한 콜백을 찾을 수 없음" #, python-format msgid "Callback for %(resource_type)s returned wrong resource type" msgstr "%(resource_type)s에 대한 콜백에서 잘못된 자원 유형을 리턴함" #, python-format msgid "Cannot add floating IP to port %s that has no fixed IPv4 addresses" msgstr "고정 IPv4 주소가 없는 포트 %s에 floating IP를 추가할 수 없음" #, python-format msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip" msgstr "gateway_ip를 갖지 않는 %s 서브넷의 포트에 floating IP를 추가할 수 없음" #, python-format msgid "Cannot add multiple callbacks for %(resource_type)s" msgstr "%(resource_type)s에 대한 다중 콜백을 추가할 수 없음" #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "IPv%(pool_ver)s 서브넷 풀에서 IPv%(req_ver)s 서브넷을 할당할 수 없음" msgid "Cannot allocate requested subnet from the available set of prefixes" msgstr "사용 가능한 prefix 세트에서 요청한 서브넷을 할당할 수 없음" #, python-format msgid "" "Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with port " "%(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already has a " "floating IP on external network %(net_id)s." msgstr "" "Fixed IP는 외부 네트워크 %(net_id)s에서 floating IP를 가지고 있기 때문에 고" "정 IP %(fixed_ip)s을(를) 사용하여 floating IP " "%(floating_ip_address)s(%(fip_id)s)을(를) 포트 %(port_id)s과(와) 연관시킬 수 " "없습니다. " msgid "" "Cannot change HA attribute of active routers. Please set router " "admin_state_up to False prior to upgrade." msgstr "" "활성 라우터의 HA 속성을 변경할 수 없습니다. 업그레이드 전에 라우터 " "admin_state_up을 False로 설정하십시오." #, python-format msgid "" "Cannot create floating IP and bind it to %s, since that is not an IPv4 " "address." msgstr "" "IPv4 주소가 아니므로 floating IP를 작성하여 %s에 바인드할 수 없습니다. " #, python-format msgid "" "Cannot create floating IP and bind it to Port %s, since that port is owned " "by a different tenant." msgstr "" "Floating IP를 작성하여 포트 %s에 바인드할 수 없습니다. 해당 포트를 다른 " "tenant가 소유하기 때문입니다. " msgid "Cannot create resource for another tenant" msgstr "다른 테넌트에 대한 자원을 작성할 수 없음" msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "ipv6 속성이 설정된 enable_dhcp를 사용할 수 없음" #, python-format msgid "Cannot find %(table)s with %(col)s=%(match)s" msgstr "%(col)s=%(match)s인 %(table)s을(를) 찾을 수 없음" #, python-format msgid "Cannot handle subnet of type %(subnet_type)s" msgstr "%(subnet_type)s 유형의 서브넷을 처리할 수 없음" msgid "Cannot have multiple IPv4 subnets on router port" msgstr "라우터 포트에 IPv4 서브넷이 여러 개일 수 없음" #, python-format msgid "" "Cannot have multiple router ports with the same network id if both contain " "IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s" msgstr "" "모두 IPv6 서브넷이 있는 경우 같은 네트워크 ID를 사용하는 라우터 포트를 여러 " "개 사용할 수 없음. 기존 포트 %(p)s에 IPv6 서브넷 및 네트워크 ID %(nid)s이" "(가) 있음." #, python-format msgid "" "Cannot host distributed router %(router_id)s on legacy L3 agent %(agent_id)s." msgstr "" "레거시 L3 에이전트 %(agent_id)s에서 분산된 라우터 %(router_id)s을(를) 호스팅" "할 수 업습니다." msgid "Cannot match priority on flow deletion or modification" msgstr "플로우 삭제나 수정시 우선순위와 일치할 수 없음" msgid "Cannot mix IPv4 and IPv6 prefixes in a subnet pool." msgstr "서브넷 풀에서 IPv4 및 IPv6 prefix를 혼합하여 사용할 수 없습니다." msgid "Cannot specify both --service and --subproject." msgstr "--service와 --subproject를 모두 지정할 수 없습니다. " msgid "Cannot specify both subnet-id and port-id" msgstr "subnet-id와 port-id를 둘 다 지정할 수 없음" msgid "Cannot understand JSON" msgstr "JSON을 이해할 수 없음" #, python-format msgid "Cannot update read-only attribute %s" msgstr "읽기 전용 속성 %s을(를) 업데이트할 수 없음" msgid "" "Cannot upgrade active router to distributed. Please set router " "admin_state_up to False prior to upgrade." msgstr "" "활성 라우터를 분산 라우터로 업그레이드할 수 없습니다. 업그레이드 전에 라우터 " "admin_state_up을 False로 설정하십시오. " msgid "Certificate Authority public key (CA cert) file for ssl" msgstr "SSL용 인증 기관 공개 키(CA cert) 파일 " #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s." msgstr "변경하면 다음 자원에 대한 사용량이 0보다 작아짐: %(unders)s." msgid "Check ebtables installation" msgstr "Ebtables 설치 확인" msgid "Check for ARP header match support" msgstr "ARP 헤더 일치 지원 확인" msgid "Check for ARP responder support" msgstr "ARP 응답기 지원 확인" msgid "Check for ICMPv6 header match support" msgstr "ICMPv6 헤더 일치 지원 확인" msgid "Check for OVS Geneve support" msgstr "OVS Geneve 지원 확인" msgid "Check for OVS vxlan support" msgstr "OVS vxlan 지원 확인" msgid "Check for VF management support" msgstr "VF 관리 지원 확인" msgid "Check for iproute2 vxlan support" msgstr "IProute2 vxlan 지원 확인" msgid "Check for nova notification support" msgstr "Nova 알림 지원 확인" msgid "Check for patch port support" msgstr "패치 포트 지원 확인" msgid "Check ip6tables installation" msgstr "IP6tables 설치 확인" msgid "Check ipset installation" msgstr "IPSet 설치 확인" msgid "Check keepalived IPv6 support" msgstr "Keepalived IPv6 지원 확인" msgid "Check minimal dibbler version" msgstr "최소 dibbler 버전 확인" msgid "Check minimal dnsmasq version" msgstr "최소 dnsmasq 버전 확인" msgid "Check netns permission settings" msgstr "NetNS 권한 설정 확인" msgid "Check ovs conntrack support" msgstr "OVS conntrack 지원 확인" msgid "Check ovsdb native interface support" msgstr "Ovsdb 네이티브 인터페이스 지원 확인" #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of " "subnet %(sub_id)s" msgstr "" "서브넷 %(subnet_id)s의 cidr %(subnet_cidr)s이(가) 서브넷 %(sub_id)s의 cidr " "%(cidr)s과(와) 겹침" msgid "Class not found." msgstr "클래스을 찾을 수 없습니다." msgid "Cleanup resources of a specific agent type only." msgstr "특정 에이전트 유형의 자원만 정리합니다." msgid "Client certificate for nova metadata api server." msgstr "Nova 메타데이터 api 서버에 대한 클라이언트 인증서입니다." msgid "" "Comma-separated list of : tuples, mapping " "network_device to the agent's node-specific list of virtual functions that " "should not be used for virtual networking. vfs_to_exclude is a semicolon-" "separated list of virtual functions to exclude from network_device. The " "network_device in the mapping should appear in the physical_device_mappings " "list." msgstr "" "가상 네트워킹에 사용해서는 안되는 가상 함수의 에이전트 노드별 목록에 " "network_device를 맵핑하는 쉼표로 구분된 : 튜" "플 목록입니다. vfs_to_exclude는 network_device에서 제외시킬 세미콜론으로 구분" "된 가상 함수 목록입니다. 맵핑에 사용된 network_device는 " "physical_device_mappings 목록에 표시되어야 합니다." msgid "" "Comma-separated list of : tuples mapping physical " "network names to the agent's node-specific Open vSwitch bridge names to be " "used for flat and VLAN networks. The length of bridge names should be no " "more than 11. Each bridge must exist, and should have a physical network " "interface configured as a port. All physical networks configured on the " "server should have mappings to appropriate bridges on each agent. Note: If " "you remove a bridge from this mapping, make sure to disconnect it from the " "integration bridge as it won't be managed by the agent anymore. Deprecated " "for ofagent." msgstr "" "플랫 및 VLAN 네트워크에 사용할 에이전트 노드별 Open vSwitch 브리짓 이름에 실" "제 네트워크 이름을 맵핑하는 쉼표로 구분된 : 튜플 목" "록입니다. 브릿지 이름의 길이는 11보다 커야 합니다. 각 브릿지가 있어야 하며, " "실제 네트워크 인터페이스가 포트로 구성되어야 합니다. 서버에 구성된 모든 실제 " "네트워크는 각 에이전트의 적절한 브릿지에 맵핑되어야 합니다. 참고: 이 맵핑에" "서 브릿지를 제거하면 더 이상 에이전트에서 관리하지 않으므로 통합 브릿지에서 " "연결을 끊으십시오. ofagent에서는 더 이상 사용되지 않습니다." msgid "" "Comma-separated list of : tuples mapping " "physical network names to the agent's node-specific physical network device " "interfaces of SR-IOV physical function to be used for VLAN networks. All " "physical networks listed in network_vlan_ranges on the server should have " "mappings to appropriate interfaces on each agent." msgstr "" "VLAN 네트워크에 사용할 SR-IOV 실제 기능의 에이전트 노드별 실제 네트워크 디바" "이스 인터페이스에 실제 네트워크 이름을 맵핑하는 쉼표로 구분된 " ": 튜플 목록입니다. 서버의 " "network_vlan_ranges에 나열된 모든 실제 네트워크는 각 에이전트의 해당 인터페이" "스에 대한 맵핑이 있어야 합니다." msgid "" "Comma-separated list of : tuples " "mapping physical network names to the agent's node-specific physical network " "interfaces to be used for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should have mappings to " "appropriate interfaces on each agent." msgstr "" "플랫 및 VLAN 네트워크에 사용할 에이전트 노드별 실제 네트워크 인터페이스에 실" "제 네트워크 이름을 맵핑하는 쉼표로 구분된 :" " 튜플 목록입니다. 서버의 network_vlan_ranges에 나열된 모" "든 실제 네트워크는 각 에이전트의 해당 인터페이스에 대한 맵핑이 있어야 합니다." msgid "" "Comma-separated list of : tuples enumerating ranges of GRE " "tunnel IDs that are available for tenant network allocation" msgstr "" "Tenant 네트워크 할당에 사용 가능한 GRE 터널 ID의 범위를 열거한 :" " 튜플을 쉼표로 구분한 목록입니다." msgid "" "Comma-separated list of : tuples enumerating ranges of " "Geneve VNI IDs that are available for tenant network allocation" msgstr "" "Tenant 네트워크 할당에 사용 가능한 Geneve VNI ID의 범위를 열거하는 :" " 튜플의 쉼표로 구분된 목록입니다. " msgid "" "Comma-separated list of : tuples enumerating ranges of " "VXLAN VNI IDs that are available for tenant network allocation" msgstr "" "Tenant 네트워크 할당에 사용 가능한 VXLAN VNI ID의 범위를 열거한 :" " 튜플의 쉼표로 구분된 목록입니다. " msgid "" "Comma-separated list of supported PCI vendor devices, as defined by " "vendor_id:product_id according to the PCI ID Repository. Default enables " "support for Intel and Mellanox SR-IOV capable NICs." msgstr "" "PCI ID 저장소에 따라 vendor_id:product_id로 정의된 지원되는 PCI 벤더 장치의 " "쉼표로 구분된 목록입니다. 기본값은 Intel 및 Mellanox SR-IOV 사용 가능 NIC에 " "대한 지원을 사용으로 설정합니다." msgid "" "Comma-separated list of the DNS servers which will be used as forwarders." msgstr "쉼표로 분리된 DNS 서버의 목록이며 전달자로 사용됩니다." msgid "Command to execute" msgstr "실행할 명령" #, python-format msgid "Commands %(commands)s exceeded timeout %(timeout)d seconds" msgstr "명령 %(commands)s 이(가) 제한시간 %(timeout)d 를 초과하였습니다." msgid "Config file for interface driver (You may also use l3_agent.ini)" msgstr "인터페이스 드라이버에 대한 구성 파일(l3_agent.ini도 사용할 수 있음)" #, python-format msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" msgstr "CIDR %(cidr)s에 대한 충돌하는 값 ethertype %(ethertype)s" msgid "" "Controls whether the neutron security group API is enabled in the server. It " "should be false when using no security groups or using the nova security " "group API." msgstr "" "서버에서 neutron 보안 그룹 API가 사용되는지 여부를 제어합니다.보안 그룹을 사" "용하지 않거나 nova 보안 그룹 API를 사용할 때는 false이어야 합니다." #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "%(time)d후 시도한 다음 %(host)s:%(port)s에 바인딩할 수 없습니다" #, python-format msgid "Could not connect to %s" msgstr "%s에 연결할 수 없음" msgid "Could not deserialize data" msgstr "데이터를 직렬화 해제할 수 없음" #, python-format msgid "Could not retrieve schema from %(conn)s: %(err)s" msgstr "%(conn)s에서 스키마를 검색할 수 없음: %(err)s" #, python-format msgid "" "Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable " "to update." msgstr "" "현재 게이트웨이 ip %(ip_address)s을(를) 포트 %(port_id)s에서 이미 사용하고 있" "습니다.업데이트할 수 없습니다." msgid "Currently update of HA mode for a DVR/HA router is not supported." msgstr "DVR/HA 라우터의 HA 모드 업데이트는 현재 지원되지 않습니다." msgid "Currently update of HA mode for a distributed router is not supported." msgstr "분산된 라우터의 HA 모드 업데이트는 현재 지원되지 않습니다." msgid "" "Currently update of distributed mode for a DVR/HA router is not supported" msgstr "DVR/HA 라우터의 분산 모드 업데이트는 현재 지원되지 않습니다." msgid "Currently update of distributed mode for an HA router is not supported." msgstr "HA 라우터의 분산 모드 업데이트는 현재 지원되지 않습니다." msgid "" "Currently updating a router from DVR/HA to non-DVR non-HA is not supported." msgstr "" "DVR/HA에서 비DVR 비HA로 라우터를 업데이트하는 기능은 현재 지원되지 않습니다." msgid "Currently updating a router to DVR/HA is not supported." msgstr "DVR/HA로 라우터를 업데이트하는 기능은 현재 지원되지 않습니다." msgid "" "DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " "lease times." msgstr "" "DHCP 리스 기간(초)입니다. dnsmasq에 무한 리스 시간을 사용하도록 지시하려면 -1" "을 사용하십시오." msgid "" "DVR deployments for VXLAN/GRE/Geneve underlays require L2-pop to be enabled, " "in both the Agent and Server side." msgstr "" "VXLAN/GRE/Geneve 기초를 위한 DVR 배치를 수행하려면 에이전트 측과 서버 측 모두" "에서 L2-pop을 사용으로 설정해야 합니다. " msgid "" "Database engine for which script will be generated when using offline " "migration." msgstr "" "오프라인 마이그레이션을 사용할 때 스크립트가 생성될 데이터베이스 엔진입니다." msgid "" "Default IPv4 subnet pool to be used for automatic subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where creation of a subnet is " "being called without a subnet pool ID. If not set then no pool will be used " "unless passed explicitly to the subnet create. If no pool is used, then a " "CIDR must be passed to create a subnet and that subnet will not be allocated " "from any pool; it will be considered part of the tenant's private address " "space. This option is deprecated for removal in the N release." msgstr "" "자동 서브넷 CIDR 할당에 사용할 기본 IPv4 서브넷 풀입니다. 서브넷 풀 ID 없이 " "서브넷 작성을 호출하는 경우 UUID별로 사용할 풀을 지정합니다.설정하지 않은 경" "우, 서브넷 작성에 명시적으로 전달하지 않으면 풀이 사용되지 않습니다. 풀을 사" "용하지 않으면 서브넷을 작성하기 위해 CIDR을 전달해야 하며 모든 풀에서 서브넷" "이 할당되지 않습니다. 서브넷은 테넌트의 개인 주소 공간의 일부로 간주됩니다. " "이 옵션은 N 릴리스에서 제거되기 위해 더 이상 사용되지 않습니다." msgid "" "Default IPv6 subnet pool to be used for automatic subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where creation of a subnet is " "being called without a subnet pool ID. See the description for " "default_ipv4_subnet_pool for more information. This option is deprecated for " "removal in the N release." msgstr "" "자동 서브넷 CIDR 할당에 사용할 기본 IPv6 서브넷 풀입니다. 서브넷 풀 ID 없이 " "서브넷 작성이 호출되는 경우 UUID별로 풀을 지정합니다. 자세한 내용은 " "default_ipv4_subnet_pool의 설명을 참조하십시오. 이 옵션은 N 릴리스에서 제거" "를 위해 더 이상 사용되지 않습니다." msgid "Default driver to use for quota checks" msgstr "할당량 검사에 사용할 기본 드라이버" msgid "Default external networks must be shared to everyone." msgstr "기본 외부 네트워크를 모든 사용자와 공유해야 합니다." msgid "" "Default network type for external networks when no provider attributes are " "specified. By default it is None, which means that if provider attributes " "are not specified while creating external networks then they will have the " "same type as tenant networks. Allowed values for external_network_type " "config option depend on the network type values configured in type_drivers " "config option." msgstr "" "제공자 속성이 지정되지 않은 경우 외부 네트워크의 기본 네트워크 유형입니다. 기" "본적으로 이 유형은 None이며 이는 외부 네트워크를 작성하는 중에 제공자 속성이 " "지정되지 않은 경우 해당 제공자 속성은 테넌트 네트워크와 동일한 유형을 가진다" "는 것을 의미합니다. external_network_type 구성 옵션에 대해 허용되는 값은 " "type_drivers 구성 옵션에서 구성된 네트워크 유형 값에 따라 다릅니다. " msgid "" "Default number of RBAC entries allowed per tenant. A negative value means " "unlimited." msgstr "" "Tenant당 허용되는 기본 RBAC 항목 수입니다. 음수 값은 무제한을 의미합니다. " msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "Tenant당 허용되는 기본 자원 수입니다. 음수 값은 무제한을 의미합니다." msgid "Default security group" msgstr "기본 보안 그룹" msgid "Default security group already exists." msgstr "기본 보안 그룹이 이미 존재합니다. " msgid "" "Default value of availability zone hints. The availability zone aware " "schedulers use this when the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a comma separated string. " "This value can be empty. In this case, even if availability_zone_hints for a " "resource is empty, availability zone is considered for high availability " "while scheduling the resource." msgstr "" "가용 구역 힌트의 기본값입니다. 리소스 availability_zone_hints가 비어 있으면 " "가용 구역 인식 스케줄러에서 이 값을 사용합니다. 쉼표로 구분된 문자열을 사용하" "여 여러 가용 구역을 지정할 수 있습니다. 이 값은 비어있을 수 있습니다. 이 경" "우 자원의 availability_zone_hints가 비어 있어도 자원을 스케줄링하는 동안 고가" "용성을 위해 가용 구역을 고려합니다." msgid "" "Define the default value of enable_snat if not provided in " "external_gateway_info." msgstr "" "external_gateway_info에 제공되지 않은 경우 enable_snat의 기본값을 정의하십시" "오. " msgid "" "Defines providers for advanced services using the format: :" ":[:default]" msgstr "" "다음 형식을 사용하여 고급 서비스에 대한 제공자 정의: ::" "[:default]" msgid "" "Delay within which agent is expected to update existing ports whent it " "restarts" msgstr "" "에이전트를 다시 시작할 경우 에이전트가 기존 포트를 업데이트할 것으로 예상되" "는 지연 시간" msgid "Delete the namespace by removing all devices." msgstr "모든 디바이스를 제거하여 네임스페이스를 삭제하십시오. " #, python-format msgid "Deleting port %s" msgstr "포트 %s 삭제 중" #, python-format msgid "Deployment error: %(reason)s." msgstr "배포 오류: %(reason)s." msgid "Destroy IPsets even if there is an iptables reference." msgstr "IPtables 참조가 있는 경우에도 IPSet를 영구 삭제하십시오. " msgid "Destroy all IPsets." msgstr "모든 IPSet를 영구 삭제하십시오. " #, python-format msgid "Device %(dev_name)s in mapping: %(mapping)s not unique" msgstr "%(mapping)s 맵핑의 %(dev_name)s 디바이스가 고유하지 않음" #, python-format msgid "Device '%(device_name)s' does not exist." msgstr "'%(device_name)s' 장치가 없습니다." msgid "Device has no virtual functions" msgstr "디바이스에 가상 기능이 없음" #, python-format msgid "Device name %(dev_name)s is missing from physical_device_mappings" msgstr "physical_device_mappings에서 디바이스 이름 %(dev_name)s이(가) 누락됨" msgid "Device not found" msgstr "디바이스를 찾을 수 없음" #, python-format msgid "" "Distributed Virtual Router Mac Address for host %(host)s does not exist." msgstr "%(host)s 호스트의 분산 가상 라우터 Mac 주소가 없습니다." #, python-format msgid "Domain %(dns_domain)s not found in the external DNS service" msgstr "외부 DNS 서비스에서 도메인 %(dns_domain)s을(를) 찾을 수 없음" msgid "Domain to use for building the hostnames" msgstr "호스트 이름 빌드에 사용할 도메인" msgid "" "Domain to use for building the hostnames. This option is deprecated. It has " "been moved to neutron.conf as dns_domain. It will be removed in a future " "release." msgstr "" "호스트 이름 빌드에 사용할 도메인입니다. 이 옵션은 더 이상 사용되지 않습니다. " "이 도메인은 dns_domain으로 neutron.conf에 이동되었습니다. 향후 릴리스에서는 " "제거됩니다. " msgid "Downgrade no longer supported" msgstr "다운그레이드는 현재 지원하지 않음" #, python-format msgid "Driver %s is not unique across providers" msgstr "%s 드라이버가 제공자에서 고유하지 않음" msgid "Driver for external DNS integration." msgstr "외부 DNS 통합을 위한 드라이버." msgid "Driver for security groups firewall in the L2 agent" msgstr "L2 에이전트의 보안 그룹 방화벽에 대한 드라이버" msgid "Driver to use for scheduling network to DHCP agent" msgstr "DHCP 에이전트에 대한 네트워크 스케줄링에 사용할 드라이버" msgid "Driver to use for scheduling router to a default L3 agent" msgstr "기본 L3 에이전트에 대한 라우터 스케줄링에 사용할 드라이버" msgid "" "Driver used for ipv6 prefix delegation. This needs to be an entry point " "defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for " "entry points included with the neutron source." msgstr "" "IPv6 prefix 위임에 사용되는 드라이버입니다. 이는 neutron.agent.linux." "pd_drivers 네임스페이스에서 정의된 시작점이어야 합니다. neutron 소스와 함께 " "포함된 시작점은 setup.cfg를 참조하십시오. " msgid "Driver used for scheduling BGP speakers to BGP DrAgent" msgstr "BGP 스피커를 BGP DrAgent로 스케줄링하는 데 사용하는 드라이버" msgid "Drivers list to use to send the update notification" msgstr "업데이트 알림을 전송하기 위해 사용할 드라이버 목록" #, python-format msgid "Duplicate IP address '%s'" msgstr "중복 IP 주소 '%s'" #, python-format msgid "" "Duplicate L3HARouterAgentPortBinding is created for router(s) %(router)s. " "Database cannot be upgraded. Please, remove all duplicates before upgrading " "the database." msgstr "" "라우터 %(router)s의 중복 L3HARouterAgentPortBinding이 작성되었습니다. 데이터" "베이스를 업그레이드할 수 없습니다. 데이터베이스를 업그레이드하기 전에 모든 중" "복을 제거하십시오." msgid "Duplicate Metering Rule in POST." msgstr "POST에 중복 측정 규칙이 있음." msgid "Duplicate Security Group Rule in POST." msgstr "POST에 중복 보안 그룹 규칙이 있습니다. " msgid "Duplicate address detected" msgstr "중복 주소 발견" #, python-format msgid "Duplicate hostroute '%s'" msgstr "중복 호스트 라우트 '%s'" #, python-format msgid "Duplicate items in the list: '%s'" msgstr "목록의 중복 항목: '%s'" #, python-format msgid "Duplicate nameserver '%s'" msgstr "중복 이름 서버 '%s'" msgid "Duplicate segment entry in request." msgstr "요청에 중복되는 세그먼트 항목이 있음." #, python-format msgid "ERROR: %s" msgstr "오류: %s" msgid "" "ERROR: Unable to find configuration file via the default search paths (~/." "neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" msgstr "" "오류: 기본 검색 경로(~/.quantum/, ~/, /etc/quantum/, /etc/) 및 '--config-" "file' (~/.neutron/, ~/, /etc/neutron/, /etc/) 및 '--config-file' 옵션!" msgid "" "Either one of parameter network_id or router_id must be passed to _get_ports " "method." msgstr "" "매개변수 network_id 및 router_id 중 하나를 _get_ports 메소드에 전달해야 합니" "다." msgid "Either subnet_id or port_id must be specified" msgstr "subnet_id 또는 port_id 중 하나를 지정해야 함" msgid "Empty physical network name." msgstr "실제 네트워크 이름이 비어 있습니다." msgid "Empty subnet pool prefix list." msgstr "서브넷 풀 접두부 목록이 비어 있습니다." msgid "Enable FWaaS" msgstr "FWaaS 사용" msgid "Enable HA mode for virtual routers." msgstr "가상 라우터에 대해 HA 모드를 사용합니다." msgid "Enable SSL on the API server" msgstr "API 서버에서 SSL 연결 활성화" msgid "" "Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " "plugin using linuxbridge mechanism driver" msgstr "" "에이전트에서 VXLAN을 사용 가능하게 설정하십시오. linuxbridge 메커니즘 드라이" "버를 사용하여 ml2 플러그인이 에이전트를 관리할 경우 사용할 수 있습니다." msgid "" "Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 " "l2population driver. Allows the switch (when supporting an overlay) to " "respond to an ARP request locally without performing a costly ARP broadcast " "into the overlay." msgstr "" "로컬 ARP 응답기가 지원되는 경우 이를 사용합니다. OVS 2.1 및 ML2 l2population " "드라이버가 필요합니다. 스위치(오버레이를 지원하는 경우)가 오버레이로 비용이 " "많이 드는 ARP 브로드캐스트를 수행하지 않고 로컬로 ARP 요청에 응답할 수 있도" "록 합니다." msgid "" "Enable local ARP responder which provides local responses instead of " "performing ARP broadcast into the overlay. Enabling local ARP responder is " "not fullycompatible with the allowed-address-pairs extension." msgstr "" "오버레이에 ARP 브로드캐스트를 수행하는 대신 로컬 응답을 제공하는 로컬 ARP 응" "답기를 사용하십시오. 로컬 ARP 응답기를 사용하는 기능은 허용된 주소 쌍 확장과 " "완벽하게 호환되지 않습니다." msgid "" "Enable services on an agent with admin_state_up False. If this option is " "False, when admin_state_up of an agent is turned False, services on it will " "be disabled. Agents with admin_state_up False are not selected for automatic " "scheduling regardless of this option. But manual scheduling to such agents " "is available if this option is True." msgstr "" "admin_state_up False인 에이전트의 서비스 사용. 이 옵션이 False이면 에이전트" "의 admin_state_up이 False가 될 때 해당 서비스가 사용 안함으로 설정됩니다. " "admin_state_up False인 에이전트는 이 옵션과 관계 없이 자동 스케줄링에 사용하" "도록 선택하지 않습니다. 그러나 이 옵션이 True이면 이러한 에이전트에 수동 스케" "줄링을 사용할 수 있습니다." msgid "" "Enable suppression of ARP responses that don't match an IP address that " "belongs to the port from which they originate. Note: This prevents the VMs " "attached to this agent from spoofing, it doesn't protect them from other " "devices which have the capability to spoof (e.g. bare metal or VMs attached " "to agents without this flag set to True). Spoofing rules will not be added " "to any ports that have port security disabled. For LinuxBridge, this " "requires ebtables. For OVS, it requires a version that supports matching ARP " "headers. This option will be removed in Newton so the only way to disable " "protection will be via the port security extension." msgstr "" "시작되는 포트에 속하는 IP 주소와 일치하지 않는 ARP 응답의 억제를 사용으로 설" "정하십시오. 참고: 이는 이 에이전트에 연결된 VM의 위조를 방지하지만 위조 기능" "을 가진 다른 디바이스(예: 이 플래그가 True로 설정되지 않는 에이전트에 연결된 " "베어메탈 또는 VM)로부터 보호하지는 않습니다. 위조 규칙은 포트 보안이 사용 안" "함으로 설정된 포트에는 추가되지 않습니다. LinuxBridge의 경우에는 ebtables가 " "필요합니다. OVS의 경우에는 일치하는 ARP 헤더를 지원하는 버전이 필요합니다. " "이 옵션은 Newton에서 제거되므로, 포트 보안 확장을 통해서만 보호를 사용하지 않" "게 설정할 수 있습니다." msgid "" "Enable/Disable log watch by metadata proxy. It should be disabled when " "metadata_proxy_user/group is not allowed to read/write its log file and " "copytruncate logrotate option must be used if logrotate is enabled on " "metadata proxy log files. Option default value is deduced from " "metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent " "effective user id/name." msgstr "" "메타데이터 프록시별로 로그 감시 사용/사용 안함. metadata_proxy_user/group에" "서 해당 로그 파일을 읽거나 쓸 수 없는 경우 사용 안함으로 설정해야 하며 메타데" "이터 프록시 로그 파일에서 logrotate를 사용하는 경우 copytruncate logrotate 옵" "션을 사용해야 합니다. 옵션 기본값은 metadata_proxy_user에서 도출된 값입니다. " "감시 로그는 metadata_proxy_user가 에이전트 유효 사용자 ID/이름인 경우 사용 설" "정됩니다." msgid "" "Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to " "True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable " "environment. Users making subnet creation requests for IPv6 subnets without " "providing a CIDR or subnetpool ID will be given a CIDR via the Prefix " "Delegation mechanism. Note that enabling PD will override the behavior of " "the default IPv6 subnetpool." msgstr "" "지동 서브넷 CIDR 할당을 위해 IPV6 접두어 위임을 사용합니다. PD 가능 환경에서 " "서브넷 할당을 위해 IPv6 접두어 위임을 사용하려면 True로 설정합니다. 사용자가 " "CIDR 또는 subnetpool ID를 제공하지 않고 IPv6 서브넷의 서브넷 작성을 요청하면 " "접두어 위임 메커니즘을 통해 CIDR이 제공됩니다. PD를 사용하면 기본 IPv6 " "subnetpool의 동작이 재정의됩니다." msgid "" "Enables the dnsmasq service to provide name resolution for instances via DNS " "resolvers on the host running the DHCP agent. Effectively removes the '--no-" "resolv' option from the dnsmasq process arguments. Adding custom DNS " "resolvers to the 'dnsmasq_dns_servers' option disables this feature." msgstr "" "DHCP 에이전트를 실행 중인 호스트에서 DNS 분석기를 통해 인스턴스의 이름 분석" "을 제공하는 dnsmasq 서비스를 사용합니다. dnsmasq 프로세스 인수에서 '--no-" "resolv' 옵션을 효과적으로 제거합니다. 사용자 정의 DNS 분석기를 " "'dnsmasq_dns_servers' 옵션에 추가하면 이 기능이 사용되지 않습니다." msgid "Encountered an empty component." msgstr "비어 있는 컴포넌트가 발생했습니다." msgid "End of VLAN range is less than start of VLAN range" msgstr "VLAN 범위의 끝이 VLAN 범위의 시작보다 작습니다. " msgid "End of tunnel range is less than start of tunnel range" msgstr "터널 범위의 끝이 터널 범위의 시작보다 작음" msgid "Enforce using split branches file structure." msgstr "분할 분기 파일 구조 사용을 적용하십시오. " msgid "" "Ensure that configured gateway is on subnet. For IPv6, validate only if " "gateway is not a link local address. Deprecated, to be removed during the " "Newton release, at which point the gateway will not be forced on to subnet." msgstr "" "구성된 게이트웨이가 서브넷에 있는지 확인합니다. IPv6의 경우, 게이트웨이가 링" "크 로컬 주소가 아닌 경우에만 유효성 검증을 수행합니다. 더 이상 사용되지 않고 " "Newton 릴리스 중에 제거되므로, 서브넷에서 게이트웨이가 적용되지 않습니다." #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "조작 시도 중 오류 %(reason)s이(가) 발생했습니다. " #, python-format msgid "Error importing FWaaS device driver: %s" msgstr "FWaaS 디바이스 드라이버를 가져오는 중에 오류 발생: %s" #, python-format msgid "Error parsing dns address %s" msgstr "DNS 주소 %s 구문 분석 오류" #, python-format msgid "Error while reading %s" msgstr "%s을(를) 읽는 중에 오류 발생" #, python-format msgid "" "Exceeded %s second limit waiting for address to leave the tentative state." msgstr "" "주소가 임시 상태를 벗어날 때까지 대기하는 동안 %s초 제한이 초과되었습니다." msgid "Exceeded maximum amount of fixed ips per port." msgstr "포트당 최대 Fixed IP 수를 초과했습니다." msgid "Existing prefixes must be a subset of the new prefixes" msgstr "기존 접두부는 새 접두부의 서브넷이어야 함" #, python-format msgid "" "Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" msgstr "" "Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" #, python-format msgid "Extension %(driver)s failed." msgstr "확장 %(driver)s이(가) 실패했습니다. " #, python-format msgid "" "Extension driver %(driver)s required for service plugin %(service_plugin)s " "not found." msgstr "" "서비스 플러그인 %(service_plugin)s에 필요한 확장 드라이버 %(driver)s을(를) 찾" "을 수 없음" msgid "" "Extension to use alongside ml2 plugin's l2population mechanism driver. It " "enables the plugin to populate VXLAN forwarding table." msgstr "" "ml2 플러그인의 l2population 메커니즘 드라이버와 함께 사용할 확장기능. 이를 통" "해플러그인이 VXLAN 전달 테이블을 채울 수 있습니다." #, python-format msgid "Extension with alias %s does not exist" msgstr "별명이 %s인 확장이 존재하지 않음" msgid "Extensions list to use" msgstr "사용할 확장 목록" #, python-format msgid "Extensions not found: %(extensions)s." msgstr "확장기능을 찾을 수 없음: %(extensions)s." #, python-format msgid "External DNS driver %(driver)s could not be found." msgstr "외부 DNS 드라이버 %(driver)s을(를) 찾을 수 없습니다." #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "외부 IP %s이(가) 게이트웨이 IP와 같음" #, python-format msgid "" "External network %(external_network_id)s is not reachable from subnet " "%(subnet_id)s. Therefore, cannot associate Port %(port_id)s with a Floating " "IP." msgstr "" "서브넷 %(subnet_id)s에서 외부 네트워크 %(external_network_id)s에 도달할 수 없" "습니다. 따라서 포트 %(port_id)s을(를) floating IP와 연관시킬 수 없습니다. " #, python-format msgid "" "External network %(net_id)s cannot be updated to be made non-external, since " "it has existing gateway ports" msgstr "" "기존 게이트웨이 포트가 있어서 기존 네트워크 %(net_id)s이(가) 비외부 상태가 되" "도록 업데이트할 수 없습니다. " #, python-format msgid "ExtraDhcpOpt %(id)s could not be found" msgstr "ExtraDhcpOpt %(id)s을(를) 찾을 수 없음" msgid "" "FWaaS plugin is configured in the server side, but FWaaS is disabled in L3-" "agent." msgstr "" "서버측에 FWaaS 플러그인이 구성되어 있지만 L3-agent에서 FWaaS가 사용되지 않습" "니다." #, python-format msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." msgstr "" "%(router_id)s 라우터를 다시 스케줄하지 못함: 적합한 l3 에이전트를 찾을 수 없" "습니다." #, python-format msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." msgstr "" "L3 에이전트 %(agent_id)s에 대한 %(router_id)s 라우터를 스케줄링하지 못했습니" "다. " #, python-format msgid "Failed to add interfaces: %(ifaces)s" msgstr "인터페이스 추가 실패: %(ifaces)s" #, python-format msgid "" "Failed to allocate a VRID in the network %(network_id)s for the router " "%(router_id)s after %(max_tries)s tries." msgstr "" "%(max_tries)s 번 시도한 후에 %(router_id)s 라우터의 %(network_id)s 네트워크에" "서 VRID를 할당하는 데 실패했습니다." #, python-format msgid "Failed to allocate subnet: %(reason)s." msgstr "서브넷 할당 실패: %(reason)s." msgid "" "Failed to associate address scope: subnetpools within an address scope must " "have unique prefixes." msgstr "" "주소 범위를 연관시키는 데 실패: 주소 범위의 subnetpool에는 고유한 prefix가 있" "어야 합니다." #, python-format msgid "Failed to check policy %(policy)s because %(reason)s." msgstr "%(reason)s 때문에 정책 %(policy)s을(를) 확인하지 못했습니다. " #, python-format msgid "" "Failed to create a duplicate %(object_type)s: for attribute(s) " "%(attributes)s with value(s) %(values)s" msgstr "" "값이 %(values)s인 속성 %(attributes)s에 대해 중복 %(object_type)s 작성 실패" #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips included " "invalid subnet %(subnet_id)s" msgstr "" "fixed_ips에 올바르지 않은 서브넷 %(subnet_id)s이(가) 포함되어 있어서 네트워" "크 %(network_id)s에서 포트를 작성하지 못했습니다. " #, python-format msgid "Failed to init policy %(policy)s because %(reason)s." msgstr "%(reason)s 때문에 정책 %(policy)s을(를) 초기화하지 못했습니다. " #, python-format msgid "Failed to locate source for %s." msgstr "%s에 대한 소스를 찾지 못했습니다. " #, python-format msgid "Failed to parse request. Parameter '%s' not specified" msgstr "요청을 구문 분석하지 못했습니다. '%s' 매개변수가 지정되지 않았음" #, python-format msgid "Failed to parse request. Required attribute '%s' not specified" msgstr "요청을 구문 분석하지 못했습니다. 필수 속성 '%s'이(가) 지정되지 않음" msgid "Failed to remove supplemental groups" msgstr "보조 그룹을 제거하지 못함" #, python-format msgid "Failed to set gid %s" msgstr "GID %s을(를) 설정하지 못함" #, python-format msgid "Failed to set uid %s" msgstr "uid %s을(를) 설정하지 못함" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "%(type)s 터널 포트를 %(ip)s(으)로 설정하지 못함" msgid "Failure applying iptables rules" msgstr "Iptables 규칙 적용 실패" #, python-format msgid "Failure waiting for address %(address)s to become ready: %(reason)s" msgstr "주소 %(address)s이(가) 준비될 때까지 기다리는 데 실패함: %(reason)s" msgid "Flat provider networks are disabled" msgstr "플랫 제공자 네트워크가 사용되지 않음" #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "%(flavor_id)s 플레이버를 찾을 수 없습니다. " #, python-format msgid "Flavor %(flavor_id)s is used by some service instance." msgstr "일부 서비스 인스턴스에서 Flavor %(flavor_id)s을(를) 사용합니다." msgid "Flavor is not enabled." msgstr "Flavor가 사용되지 않습니다." #, python-format msgid "Floating IP %(floatingip_id)s could not be found" msgstr "%(floatingip_id)s floating IP를 찾을 수 없음" #, python-format msgid "" "Floating IP %(floatingip_id)s is associated with non-IPv4 address " "%s(internal_ip)s and therefore cannot be bound." msgstr "" "Floating IP %(floatingip_id)s이(가) 비IPv4 주소 %s(internal_ip)s과(와) 연관되" "어 있으므로 바인드할 수 없습니다. " msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "TCP/UDP 프로토콜의 경우 port_range_min은 port_range_max 이하여야 함" #, python-format msgid "For class %(object_type)s missing primary keys: %(missing_keys)s" msgstr "%(object_type)s 클래스의 누락된 기본 키: %(missing_keys)s" msgid "Force ip_lib calls to use the root helper" msgstr "루트 헬퍼를 사용하는 ip_lib 호출을 강제합니다" #, python-format msgid "Found duplicate extension: %(alias)s." msgstr "중복 확장 발견: %(alias)s." #, python-format msgid "" "Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet " "%(subnet_cidr)s." msgstr "겹치는 할당 풀 발견: 서브넷 %(subnet_cidr)s의 %(pool_1)s %(pool_2)s." msgid "Gateway IP version inconsistent with allocation pool version" msgstr "게이트웨이 IP 버전이 할당 풀 버전과 일치하지 않음" #, python-format msgid "" "Gateway cannot be updated for router %(router_id)s, since a gateway to " "external network %(net_id)s is required by one or more floating IPs." msgstr "" "외부 네트워크 %(net_id)s에 대한 게이트웨이가 하나 이상의 floating IP에서 필요" "로 하기 때문에 라우터 %(router_id)s에 대한 게이트웨이를 업데이트할 수 없습니" "다. " #, python-format msgid "Gateway ip %(ip_address)s conflicts with allocation pool %(pool)s." msgstr "게이트웨이 IP %(ip_address)s이(가) 할당 풀 %(pool)s과(와) 충돌합니다. " msgid "Gateway is not valid on subnet" msgstr "게이트웨이가 서브넷에서 올바르지 않음" msgid "" "Geneve encapsulation header size is dynamic, this value is used to calculate " "the maximum MTU for the driver. This is the sum of the sizes of the outer " "ETH + IP + UDP + GENEVE header sizes. The default size for this field is 50, " "which is the size of the Geneve header without any additional option headers." msgstr "" "Geneve 캡슐화 헤더 크기가 동적입니다. 이 값은 드라이버의 최대 MTU를 계산하는 " "데 사용합니다. 이 값은 외부 ETH + IP + UDP + GENEVE 헤더 크기의 합계입니다. " "이 필드이 기본 크기는 50으로서, 추가 옵션 헤더가 없는 Geneve 헤더의 크기입니" "다." msgid "Group (gid or name) running metadata proxy after its initialization" msgstr "초기화 후에 메타데이터 프록시를 실행하는 그룹(gid 또는 이름)" msgid "" "Group (gid or name) running metadata proxy after its initialization (if " "empty: agent effective group)." msgstr "" "초기화 후에 메타데이터 프록시를 실행하는 그룹(gid 또는 이름)(비어 있는 경우: " "에이전트 유효 그룹)." msgid "Group (gid or name) running this process after its initialization" msgstr "초기화 이후 이 프로세스를 실행하는 그룹(gid 이름)" #, python-format msgid "HEAD file does not match migration timeline head, expected: %s" msgstr "HEAD 파일이 마이그레이션 타임라인 헤드와 일치하지 않음, 예상값: %s" msgid "" "Hostname to be used by the Neutron server, agents and services running on " "this machine. All the agents and services running on this machine must use " "the same host value." msgstr "" "이 시스템에서 실행 중인 neutron 서버, 에이전트 및 서비스에서 사용할 호스트 이" "름입니다. 이 시스템에서 실행 중인 모든 에이전트 및 서비스는 같은 호스트 값" "을 사용해야 합니다." msgid "How many times Neutron will retry MAC generation" msgstr "Neutron이 MAC 생성을 재시도할 횟수" #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" "min) is missing." msgstr "" "ICMP 코드 (port-range-max) %(value)s이(가) 제공되지만 ICMP 유형(port-range-" "min)이 누락되었습니다." msgid "ID of network" msgstr "네트워크 ID" msgid "ID of network to probe" msgstr "프로브할 네트워크의 ID" msgid "ID of probe port to delete" msgstr "삭제할 프로브 포트의 ID" msgid "ID of probe port to execute command" msgstr "명령을 실행할 프로브 포트의 ID" msgid "ID of the router" msgstr "라우터 ID" #, python-format msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s" msgstr "IP 주소 %(ip)s이(가) 이미 서브넷 %(subnet_id)s에서 할당되어 있음" #, python-format msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s" msgstr "IP 주소 %(ip)s이(가) 서브넷 %(subnet_id)s에 속하지 않음" #, python-format msgid "" "IP address %(ip_address)s is not a valid IP for any of the subnets on the " "specified network." msgstr "" "IP 주소 %(ip_address)s이(가) 지정된 네트워크의 서브넷에 대해 올바른 IP가아닙" "니다. " msgid "IP address used by Nova metadata server." msgstr "Nova 메타데이터 서버가 사용한 IP 주소입니다. " msgid "IP allocation failed. Try again later." msgstr "IP 할당에 실패했습니다. 나중에 다시 시도하십시오." msgid "IP allocation requires subnet_id or ip_address" msgstr "IP 할당은 subnet_id 또는 ip_address가 필요함" #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "IPTablesManager.apply가 다음 iptables 규칙 세트를 적용하지 못함:\n" "%s" msgid "IPtables conntrack zones exhausted, iptables rules cannot be applied." msgstr "" "IPtables conntrack 구역이 소진되었습니다. IPtables 규칙을 적용할 수 없습니" "다. " msgid "IPv6 Address Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "IPv6 주소 모드는 prefix 위임에 대해 Stateless 또는 SLAAC여야 합니다. " msgid "IPv6 RA Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "IPv6 RA 모드는 prefix 위임에 대해 Stateless 또는 SLAAC여야 합니다. " #, python-format msgid "" "IPv6 address %(address)s can not be directly assigned to a port on subnet " "%(id)s since the subnet is configured for automatic addresses" msgstr "" "서브넷을 자동 주소용으로 구성했으므로 IPv6 주소 %(address)s은(는) 서브넷 " "%(id)s의 포트에 직접 지정할 수 없습니다." #, python-format msgid "" "IPv6 address %(ip)s cannot be directly assigned to a port on subnet " "%(subnet_id)s as the subnet is configured for automatic addresses" msgstr "" "서브넷이 자동 주소에 대해 구성되어 있기 때문에 IPv6 주소 %(ip)s을(를) 서브넷 " "%(subnet_id)s의 포트에 직접 지정할 수 없음" #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot be " "added to Neutron Router." msgstr "" "외부 라우터에서 RA를 수신하도록 구성된 IPv6 서브넷 %s을(를) Neutron 라우터에 " "추가할 수 없습니다." msgid "" "If True, advertise network MTU values if core plugin calculates them. MTU is " "advertised to running instances via DHCP and RA MTU options." msgstr "" "True인 경우 핵심 플러그인이 네트워크 MTU 값을 계산하면 이 값을 광고합니다. " "MTU는 DHCP와 RA MTU 옵션을 통해 실행 중인 인스턴스에 광고됩니다." msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "" "True인 경우 이를 지원하는 플러그인을 사용하여 VLAN 투명 네트워크를 작성할 수 " "있습니다." msgid "" "If non-empty, the l3 agent can only configure a router that has the matching " "router ID." msgstr "" "비어있지 않으면 l3 에이전트가 일치하는 라우터 ID가 있는 라우터만 구성할 수 있" "습니다." msgid "Illegal IP version number" msgstr "올바르지 않은 IP 버전 번호" #, python-format msgid "" "Illegal prefix bounds: %(prefix_type)s=%(prefixlen)s, %(base_prefix_type)s=" "%(base_prefixlen)s." msgstr "" "잘못된 prefix 바운드: %(prefix_type)s=%(prefixlen)s, %(base_prefix_type)s=" "%(base_prefixlen)s." #, python-format msgid "" "Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot " "associate with address scope %(address_scope_id)s because subnetpool " "ip_version is not %(ip_version)s." msgstr "" "잘못된 subnetpool 연관: subnetpool ip_version이 %(ip_version)s이(가) 아니므" "로 subnetpool %(subnetpool_id)s을(를) 주소 범위 %(address_scope_id)s과(와) 연" "관시킬 수 없습니다." #, python-format msgid "" "Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot be " "associated with address scope %(address_scope_id)s." msgstr "" "잘못된 subnetpool 연관: subnetpool %(subnetpool_id)s을(를) 주소 범위 " "%(address_scope_id)s과(와) 연관시킬 수 없습니다." #, python-format msgid "Illegal subnetpool update : %(reason)s." msgstr "잘못된 subnetpool 업데이트 : %(reason)s." #, python-format msgid "Illegal update to prefixes: %(msg)s." msgstr "잘못된 prefix 업데이트: %(msg)s." msgid "" "In some cases the Neutron router is not present to provide the metadata IP " "but the DHCP server can be used to provide this info. Setting this value " "will force the DHCP server to append specific host routes to the DHCP " "request. If this option is set, then the metadata service will be activated " "for all the networks." msgstr "" "경우에 따라 메타데이터 IP를 제공하는 Neutron 라우터는 없지만 DHCP 서버를 사용" "하여 이 정보를 제공할 수 있습니다. 이 값을 설정하면 DHCP 서버가 특정 호스트 " "경로를 DHCP 요청에 강제로 추가합니다. 이 옵션이 설정되면 모든 네트워크의 메타" "데이터 서비스가 활성화됩니다." #, python-format msgid "Incorrect pci_vendor_info: \"%s\", should be pair vendor_id:product_id" msgstr "" "올바르지 않은 pci_vendor_info: \"%s\", vendor_id:product_id와 쌍을 이루어야 " "함" msgid "" "Indicates that this L3 agent should also handle routers that do not have an " "external network gateway configured. This option should be True only for a " "single agent in a Neutron deployment, and may be False for all agents if all " "routers must have an external network gateway." msgstr "" "이 L3 에이전트에서 외부 네트워크 게이트웨이가 구성되지 않은 라우터도 처리해" "야 함을 나타냅니다. 이 옵션은 Neutron 배포의 단일 에이전트에만 True여야 하" "며, 라우터에 외부 네트워크 게이트웨이가 있어야 하는 경우에는 모든 에이전트에 " "False일 수 있습니다." #, python-format msgid "Instance of class %(module)s.%(class)s must contain _cache attribute" msgstr "" "클래스 %(module)s.%(class)s의 인스턴스에 _cache 속성이 포함되어야 합니다." #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" msgstr "Prefix 공간이 부족하여 서브넷 크기 /%s을(를) 할당할 수 없음" msgid "Insufficient rights for removing default security group." msgstr "기본 보안 그룹을 제거할 수 있는 권한이 없습니다." msgid "" "Integration bridge to use. Do not change this parameter unless you have a " "good reason to. This is the name of the OVS integration bridge. There is one " "per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM " "VIFs are attached to this bridge and then 'patched' according to their " "network connectivity." msgstr "" "사용할 통합 브릿지입니다. 합당한 이유가 없으면 이 매개변수를 변경하지 마십시" "오. 이 매개변수는 OVS 통합 브릿지의 이름입니다. 하이퍼바이저당 한 개가 있습니" "다. 통합 브릿지는 가상 '패치 베이'의 역할을 수행합니다. 모든 VM VIF가 이 브릿" "지에 연결된 다음 네트워크 연결성에 따라 \"패치\"됩니다." msgid "Interface to monitor" msgstr "모니터할 인터페이스" msgid "" "Interval between checks of child process liveness (seconds), use 0 to disable" msgstr "" "하위 프로세스 활동 확인 간격(초), 사용 안함으로 설정하려면 0을 지정하십시오." msgid "Interval between two metering measures" msgstr "2개의 측정 조치 간의 간격" msgid "Interval between two metering reports" msgstr "2개의 측정 보고서 간의 간격" #, python-format msgid "Invalid CIDR %(input)s given as IP prefix." msgstr "IP 접두부로 지정된 CIDR %(input)s이(가) 올바르지 않습니다." #, python-format msgid "" "Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address " "format, which requires the prefix to be /64." msgstr "" "CIDR %s은(는) IPv6 주소 모드에 올바르지 않습니다. OpenStack에서는 접두부 /64" "를 사용하는 EUI-64 주소 형식을 사용합니다." #, python-format msgid "Invalid Device %(dev_name)s: %(reason)s" msgstr "올바르지 않은 디바이스 %(dev_name)s: %(reason)s" #, python-format msgid "" "Invalid action '%(action)s' for object type '%(object_type)s'. Valid " "actions: %(valid_actions)s" msgstr "" "오브젝트 유형 '%(object_type)s'에 대한 조치 '%(action)s'이(가) 올바르지 않습" "니다. 올바른 조치: %(valid_actions)s" #, python-format msgid "" "Invalid authentication type: %(auth_type)s, valid types are: " "%(valid_auth_types)s" msgstr "" "올바르지 않은 인증 유형임: %(auth_type)s, 올바른 유형은 다음과 같음: " "%(valid_auth_types)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "올바르지 않은 컨텐츠 유형 %(content_type)s." #, python-format msgid "Invalid data format for IP pool: '%s'" msgstr "IP 풀에 대한 올바르지 않은 데이터 형식: '%s'" #, python-format msgid "Invalid data format for extra-dhcp-opt: %(data)s" msgstr "extra-dhcp-opt의 올바르지 않은 데이터 형식: %(data)s" #, python-format msgid "Invalid data format for fixed IP: '%s'" msgstr "고정 IP에 대한 올바르지 않은 데이터 형식: '%s'" #, python-format msgid "Invalid data format for hostroute: '%s'" msgstr "호스트 라우트에 대한 올바르지 않은 데이터 형식: '%s'" #, python-format msgid "Invalid data format for nameserver: '%s'" msgstr "이름 서버에 대한 올바르지 않은 데이터 형식: '%s'" #, python-format msgid "Invalid ethertype %(ethertype)s for protocol %(protocol)s." msgstr "" "프로토콜 %(protocol)s의 ethertype %(ethertype)s이(가) 올바르지 않습니다." #, python-format msgid "Invalid extension environment: %(reason)s." msgstr "올바르지 않은 확장 환경: %(reason)s." #, python-format msgid "Invalid format for routes: %(routes)s, %(reason)s" msgstr "라우터의 형식이 올바르지 않음: %(routes)s, %(reason)s" #, python-format msgid "Invalid format: %s" msgstr "올바르지 않은 형식: %s" #, python-format msgid "Invalid input for %(attr)s. Reason: %(reason)s." msgstr "%(attr)s에 대한 올바르지 않은 입력입니다. 이유: %(reason)s." #, python-format msgid "" "Invalid input. '%(target_dict)s' must be a dictionary with keys: " "%(expected_keys)s" msgstr "" "올바르지 않은 입력. '%(target_dict)s'은(는) %(expected_keys)s 키가 있는 사전" "이어야 함" #, python-format msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s" msgstr "" "올바르지 않은 인스턴스 상태: %(state)s, 올바른 상태는 %(valid_states)s임" #, python-format msgid "Invalid mapping: '%s'" msgstr "올바르지 않은 맵핑: '%s'" #, python-format msgid "Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'." msgstr "올바르지 않은 네트워크 VLAN 범위: '%(vlan_range)s' - '%(error)s'." #, python-format msgid "Invalid network VXLAN port range: '%(vxlan_range)s'." msgstr "올바르지 않은 네트워크 VXLAN 포트 범위: '%(vxlan_range)s'." #, python-format msgid "Invalid pci slot %(pci_slot)s" msgstr "올바르지 않은 pci 슬롯 %(pci_slot)s" #, python-format msgid "Invalid provider format. Last part should be 'default' or empty: %s" msgstr "" "올바르지 않은 제공자 형식. 마지막 부분이 '기본값'이거나 비어 있어야 함: %s" #, python-format msgid "Invalid resource type %(resource_type)s" msgstr "올바르지 않은 자원 유형 %(resource_type)s" #, python-format msgid "Invalid route: %s" msgstr "올바르지 않은 라우트: %s" msgid "Invalid service provider format" msgstr "올바르지 않은 서비스 제공자 형식" #, python-format msgid "Invalid service type %(service_type)s." msgstr "올바르지 않은 서비스 유형 %(service_type)s입니다." #, python-format msgid "" "Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255." msgstr "" "ICMP %(field)s (%(attr)s) %(value)s의 값이 올바르지 않음. 이 값은 0에서 255 " "사이여야 합니다. " #, python-format msgid "Invalid value for port %(port)s" msgstr "%(port)s 포트에 대한 올바르지 않은 값" msgid "" "Iptables mangle mark used to mark ingress from external network. This mark " "will be masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "외부 네트워크의 입구를 표시하는 데 사용되는 Iptables mangle 표시입니다. 이 표" "시는 하위 16비트만 사용되도록 0xffff로 마스크됩니다. " msgid "" "Iptables mangle mark used to mark metadata valid requests. This mark will be " "masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "메타데이터 올바른 요청을 표시하는 데 사용되는 Iptables mangle 표시입니다. 이 " "표시는 하위 16비트만 사용되도록 0xffff로 마스크됩니다. " msgid "" "Keep in track in the database of current resourcequota usage. Plugins which " "do not leverage the neutron database should set this flag to False" msgstr "" "현재 자원 할당량 사용량의 데이터베이스를 추적하십시오. Neutron 데이터베이스" "를 활용하지 않는 플러그인은 이 플래그를 False로 설정해야 합니다. " msgid "Keepalived didn't respawn" msgstr "유휴되면 다시 파생되지 않음" msgid "Keepalived didn't spawn" msgstr "Keepalive가 파생되지 않음" #, python-format msgid "" "Kernel HZ value %(value)s is not valid. This value must be greater than 0." msgstr "" "커널 HZ 값 %(value)s이(가) 올바르지 않습니다. 이 값은 0보다 커야 합니다." #, python-format msgid "Key %(key)s in mapping: '%(mapping)s' not unique" msgstr "'%(mapping)s' 맵핑의 %(key)s 키가 고유하지 않음" msgid "L3 agent failure to setup NAT for floating IPs" msgstr "L3 에이전트에서 Floating IP에대한 NAT 설정 실패" msgid "L3 agent failure to setup floating IPs" msgstr "L3 에이전트에서 Floating IP 설정 실패" #, python-format msgid "Limit must be an integer 0 or greater and not '%d'" msgstr "한계는 정수 0이상 및 '%d'이(가) 아닌 수여야 함" msgid "Limit number of leases to prevent a denial-of-service." msgstr "서비스 거부(DoS)를 막기 위해 리스 수를 제한합니다." msgid "List of :" msgstr ":의 목록" msgid "" "List of :: or " "specifying physical_network names usable for VLAN provider and tenant " "networks, as well as ranges of VLAN tags on each available for allocation to " "tenant networks." msgstr "" "Tenant 네트워크에 대한 할당에 사용할 수 있는 각 VLAN 태그의 범위 및VLAN 제공" "자와 tenant 네트워크에 사용할 수 있는 실제 네트워크 이름을 지정하는 " ":: 또는 의 목록입니" "다." msgid "" "List of network type driver entrypoints to be loaded from the neutron.ml2." "type_drivers namespace." msgstr "" "neutron.ml2.type_drivers 네임스페이스에서 로드할네트워크 유형 드라이버 시작점" "의 목록입니다. " msgid "" "List of physical_network names with which flat networks can be created. Use " "default '*' to allow flat networks with arbitrary physical_network names. " "Use an empty list to disable flat networks." msgstr "" "플랫 네트워크를 작성할 수 있는 실제 네트워크 이름의 목록입니다. 플랫 네트워크" "에 임의의 physical_network 이름을 사용하려면 기본값 '*'를 사용하십시오. 빈 목" "록을 사용하여 플랫 네트워크를 비활성화합니다." msgid "Local IP address of the VXLAN endpoints." msgstr "VXLAN 엔드포인트의 로컬 IP 주소." msgid "" "Local IP address of tunnel endpoint. Can be either an IPv4 or IPv6 address." msgstr "" "터널 엔드 포인트의 로컬 IP 주소입니다. IPv4 또는 IPv6 주소가 가능합니다." msgid "Location for Metadata Proxy UNIX domain socket." msgstr "메타데이터 프록시 UNIX 도메인 소켓 위치입니다." msgid "Location of Metadata Proxy UNIX domain socket" msgstr "메타데이터 프록시 UNIX 도메인 소켓 위치" msgid "Location of pid file of this process." msgstr "이 프로세스 pid 파일 위치입니다." msgid "Location to store DHCP server config files." msgstr "DHCP 서버 구성 파일을 저장할 위치." msgid "Location to store IPv6 PD files." msgstr "IPv6 PD 파일을 저장할 위치입니다. " msgid "Location to store IPv6 RA config files" msgstr "IPv6 RA 구성 파일을 저장할 위치" msgid "Location to store child pid files" msgstr "하위 pid 파일을 저장할 위치" msgid "Location to store keepalived/conntrackd config files" msgstr "keepalived/conntrackd 구성 파일을 저장할 위치" msgid "Log agent heartbeats" msgstr "로그 에이전트 하트비트" msgid "Loopback IP subnet is not supported if enable_dhcp is True." msgstr "enable_dhcp가 True인 경우 루프백 IP 서브넷이 지원되지 않습니다." msgid "MTU size of veth interfaces" msgstr "veth 인터페이스의 MTU 크기" msgid "Make the l2 agent run in DVR mode." msgstr "l2 에이전트를 DVR 모드에서 실행하십시오." msgid "Malformed request body" msgstr "형식이 틀린 요청 본문" #, python-format msgid "Malformed request body: %(reason)s." msgstr "형식이 잘못된 요청 본문: %(reason)s." msgid "MaxRtrAdvInterval setting for radvd.conf" msgstr "radvd.conf의 MaxRtrAdvInterval 설정" msgid "Maximum number of DNS nameservers per subnet" msgstr "서브넷당 최대 DNS 네임스페이스 수" msgid "" "Maximum number of L3 agents which a HA router will be scheduled on. If it is " "set to 0 then the router will be scheduled on every agent." msgstr "" "HA 라우터가 스케줄될 최대 L3 에이전트 수입니다. 이 수가 0으로 설정되면 라우터" "가 모든 에이전트에서 스케줄됩니다." msgid "Maximum number of allowed address pairs" msgstr "허용되는 주소 쌍 최대 수" msgid "" "Maximum number of fixed ips per port. This option is deprecated and will be " "removed in the N release." msgstr "" "포트당 최대 Fixed IP 수. 이 옵션은 더 이상 사용되지 않으므로 N 릴리스에서 제" "거됩니다." msgid "Maximum number of host routes per subnet" msgstr "서브넷당 호스트 라우트의 최대 수" msgid "Maximum number of routes per router" msgstr "라우터당 최대 경로 수" msgid "" "Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce " "mode from metadata_proxy_user/group values, 'user': set metadata proxy " "socket mode to 0o644, to use when metadata_proxy_user is agent effective " "user or root, 'group': set metadata proxy socket mode to 0o664, to use when " "metadata_proxy_group is agent effective group or root, 'all': set metadata " "proxy socket mode to 0o666, to use otherwise." msgstr "" "메타데이터 프록시 UNIX 도메인 소켓 모드, 4개의 값이 허용됨: 'deduce': " "metadata_proxy_user/group 값의 추론 모드, 'user': 메타데이터 프록시 소켓 모드" "를 0o644로 설정, metadata_proxy_user가 에이전트 유효 사용자 또는 루트인 경우 " "사용, 'group': 메타데이터 프록시 소켓 모드를 0o664로 설정, " "metadata_proxy_group이 에이전트 유효 그룹 또는 루트인 경우 사용, 'all': 메타" "데이터 프록시 소켓 모드를 0o666으로 설정, 기타 경우에 사용" msgid "Metering driver" msgstr "측정 드라이버" #, python-format msgid "Metering label %(label_id)s does not exist" msgstr "측정 레이블 %(label_id)s이(가) 존재하지 않음" #, python-format msgid "Metering label rule %(rule_id)s does not exist" msgstr "측정 레이블 규칙 %(rule_id)s이(가) 존재하지 않음" #, python-format msgid "" "Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps " "another" msgstr "" "remote_ip_prefix %(remote_ip_prefix)s을(를) 가진 측정 레이블 규칙이 다른 항목" "과 겹침" msgid "Method cannot be called within a transaction." msgstr "트랜잭션 내에서 메소드를 호출할 수 없습니다." msgid "Migration from distributed router to centralized is not supported" msgstr "분산 라우터에서 중앙으로 마이그레이션하는 작업은 지원되지 않음" msgid "MinRtrAdvInterval setting for radvd.conf" msgstr "radvd.conf의 MinRtrAdvInterval 설정" msgid "Minimize polling by monitoring ovsdb for interface changes." msgstr "인터페이스 변경사항에 대한 ovsdb를 모니터링하여 폴링을 최소화합니다." #, python-format msgid "Missing key in mapping: '%s'" msgstr "맵핑에서 키 누락: '%s'" #, python-format msgid "Missing value in mapping: '%s'" msgstr "맵핑에서 값 누락: '%s'" msgid "Multicast IP subnet is not supported if enable_dhcp is True." msgstr "enable_dhcp가 True인 경우 멀티캐스트 IP 서브넷이 지원되지 않습니다." msgid "" "Multicast group for VXLAN. When configured, will enable sending all " "broadcast traffic to this multicast group. When left unconfigured, will " "disable multicast VXLAN mode." msgstr "" "VXLAN의 멀티캐스트 그룹입니다. 이 그룹이 구성되면 모든 브로드캐스트 트래픽을 " "이 멀티캐스트 그룹에 보낼 수 있습니다. 구성되지 않은 상태로 두면 멀티캐스트 " "VXLAN 모드가 사용되지 않습니다." msgid "" "Multicast group(s) for vxlan interface. A range of group addresses may be " "specified by using CIDR notation. Specifying a range allows different VNIs " "to use different group addresses, reducing or eliminating spurious broadcast " "traffic to the tunnel endpoints. To reserve a unique group for each possible " "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on " "all the agents." msgstr "" "Vxlan 인터페이스의 멀티캐스트 그룹입니다. 그룹 주소의 범위는 CIDR 표기법을 사" "용하여 지정할 수 있습니다. 범위를 지정하면 여러 다른 VNI에서 여러 다른 그룹 " "주소를 사용할 수 있으므로, 터널 endpoint에 대한 의사 브로드캐스트 트래픽이 감" "소하거나 제거됩니다. 가능한 각 (24비트) VNI의 고유 그룹을 유지하려면 /8을 사" "용하십시오(예: 239.0.0.0/8). 이 설정은 모든 에이전트에서 같아야 합니다." #, python-format msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found" msgstr "agent_type=%(agent_type)s 및 host=%(host)s인 에이전트를 여러 개 찾음" #, python-format msgid "Multiple default providers for service %s" msgstr "%s 서비스에 대한 다중 기본 제공자 " #, python-format msgid "Multiple plugins for service %s were configured" msgstr "%s 서비스에 대한 다중 플러그인이 구성되었음" #, python-format msgid "Multiple providers specified for service %s" msgstr "%s 서비스에 대해 다중 제공자가 지정됨" msgid "Multiple tenant_ids in bulk security group rule create not allowed" msgstr "벌크 보안 그룹 규칙 작성의 다중 tenant_id는 허용되지 않음" msgid "Must also specify protocol if port range is given." msgstr "포트 범위가 제공되는 경우 프로토콜도 지정해야 합니다. " msgid "Must specify one or more actions on flow addition or modification" msgstr "플로우 추가나 수정시 하나 이상의 조치를 지정해야 함" #, python-format msgid "Name %(dns_name)s is duplicated in the external DNS service" msgstr "외부 DNS 서비스에서 이름 %(dns_name)s이(가) 중복되어 있음" #, python-format msgid "" "Name '%s' must be 1-63 characters long, each of which can only be " "alphanumeric or a hyphen." msgstr "이름 '%s'의 길이는 1-63자 여야 하며 각각 영숫자나 하이픈이어야 합니다." #, python-format msgid "Name '%s' must not start or end with a hyphen." msgstr "이름 '%s'은(는) 하이픈으로 시작하거나 끝날 수 없습니다." msgid "Name of Open vSwitch bridge to use" msgstr "사용할 Open vSwitch 브릿지 이름" msgid "" "Name of bridge used for external network traffic. This should be set to an " "empty value for the Linux Bridge. When this parameter is set, each L3 agent " "can be associated with no more than one external network." msgstr "" "외부 네트워크 트래픽에 사용되는 브릿지 이름입니다. Linux Bridge는 빈 값으로 " "설정해야 합니다. 이 매개변수가 설정되면 각 L3 에이전트를 두 개 이상의 외부 네" "트워크에 연관시킬수 없습니다." msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "사용할 nova 리젼의 이름입니다. 키스톤이 둘 이상의 리젼을 관리할 경우 유용합니" "다." msgid "Name of the FWaaS Driver" msgstr "FWaaS 드라이버 이름" msgid "Namespace of the router" msgstr "라우터에대한 네임스페이스" msgid "Native pagination depend on native sorting" msgstr "네이티브 페이지 번호 매기기는 네이티브 정렬에 따라 다름" #, python-format msgid "" "Need to apply migrations from %(project)s contract branch. This will require " "all Neutron server instances to be shutdown before proceeding with the " "upgrade." msgstr "" "%(project)s 계약 분기에서 마이그레이션을 적용해야 합니다. 업그레이드를 계속하" "기 전에 모든 Neutron 서버 인스턴스를 종료해야 합니다." msgid "Negative delta (downgrade) not supported" msgstr "음수의 delta (downgrade)는 지원하지 않음" msgid "Negative relative revision (downgrade) not supported" msgstr "음수의 상대적 개정판(다운그레이드)은 지원하지 않음" #, python-format msgid "" "Network %(network_id)s is already bound to BgpSpeaker %(bgp_speaker_id)s." msgstr "" "네트워크 %(network_id)s이(가) 이미 BgpSpeaker %(bgp_speaker_id)s에 바인드되" "어 있습니다." #, python-format msgid "" "Network %(network_id)s is not associated with BGP speaker %(bgp_speaker_id)s." msgstr "" "네트워크 %(network_id)s이(가) BGP 스피커 %(bgp_speaker_id)s과(와) 연관되지 않" "았습니다." #, python-format msgid "Network %(network_id)s is not bound to a BgpSpeaker." msgstr "네트워크 %(network_id)s이(가) BgpSpeaker에 바인드되지 않습니다." #, python-format msgid "Network %(network_id)s is not bound to a IPv%(ip_version)s BgpSpeaker." msgstr "" "네트워크 %(network_id)s이(가) IPv%(ip_version)s BgpSpeaker에 바인드되지 않습" "니다." #, python-format msgid "Network %s does not contain any IPv4 subnet" msgstr "네트워크 %s에 IPv4 서브넷이 포함되어 있지 않음" #, python-format msgid "Network %s is not a valid external network" msgstr "%s 네트워크가 올바른 외부 네트워크가 아님" #, python-format msgid "Network %s is not an external network" msgstr "%s 네트워크가 외부 네트워크가 아님" #, python-format msgid "" "Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges " "%(excluded_ranges)s was not found." msgstr "" "IP 범위가 %(parent_range)s이고 크기가 %(size)s인(IP 범위 %(excluded_ranges)s " "제외) 네트워크를 발견하지 못했습니다." msgid "Network that will have instance metadata proxied." msgstr "인스턴스 메타데이터가 프록시되는 네트워크입니다." #, python-format msgid "Network type value '%s' not supported" msgstr "네트워크 유형 값 '%s'이(가) 지원되지 않음" msgid "Network type value needed by the ML2 plugin" msgstr "ML2 플러그인에 네트워크 유형 값이 필요함" msgid "Network types supported by the agent (gre and/or vxlan)." msgstr "에이전트에서 지원하는 네트워크 유형(gre 및/또는 vxlan)입니다." msgid "" "Neutron IPAM (IP address management) driver to use. If ipam_driver is not " "set (default behavior), no IPAM driver is used. In order to use the " "reference implementation of Neutron IPAM driver, use 'internal'." msgstr "" "사용할 Neutron IPAM(IP 주소 관리) 드라이버입니다. ipam_driver가 설정되지 않" "은 경우(기본 동작) IPAM 드라이버가 사용됩니다. Neutron 드라이버의 참조 구현" "을 사용하려면 'internal'을 사용하십시오." msgid "Neutron Service Type Management" msgstr "Neutron 서비스 유형 관리" msgid "Neutron core_plugin not configured!" msgstr "Neutron core_plugin이 구성되지 않았습니다!" msgid "Neutron plugin provider module" msgstr "Neutron 플러그인 제공자 모듈" msgid "Neutron quota driver class" msgstr "Neutron 할당량 드라이버 클래스" msgid "New value for first_ip or last_ip has to be specified." msgstr "first_ip 또는 last_ip의 새 값을 지정해야 합니다." msgid "No default router:external network" msgstr "기본 router:external 네트워크가 없음" #, python-format msgid "No default subnetpool found for IPv%s" msgstr "IPv%s의 기본 subnetpool이 없음" msgid "No default subnetpools defined" msgstr "기본 subnetpools가 정의되지 않음" #, python-format msgid "No eligible l3 agent associated with external network %s found" msgstr "외부 네트워크 %s과(와) 연관된 적합한 l3 에이전트를 찾을 수 없음" #, python-format msgid "No more IP addresses available for subnet %(subnet_id)s." msgstr "서브넷 %(subnet_id)s에 대해 사용 가능한 IP 주소가 더 이상 없습니다. " #, python-format msgid "" "No more Virtual Router Identifier (VRID) available when creating router " "%(router_id)s. The limit of number of HA Routers per tenant is 254." msgstr "" "%(router_id)s 라우터 작성 시 VRID(Virtual Router Identifier)를 더 이상 사용" "할 수 없습니다.테넌트당 HA 라우터 수의 한계는 254입니다." msgid "No offline migrations pending." msgstr "보류 중인 오프라인 마이그레이션이 없습니다." #, python-format msgid "No providers specified for '%s' service, exiting" msgstr "'%s' 서비스에 대해 제공자가 지정되지 않음, 종료하는 중" #, python-format msgid "No shared key in %s fields" msgstr "%s 필드의 공유 키가 없음" msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "'dvr' 모드에서 수동으로 에이전트에 라우터를 지정할 수 없습니다." msgid "Not allowed to manually remove a router from an agent in 'dvr' mode." msgstr "'dvr' 모드에서 수동으로 에이전트에서 라우터를 제거할 수 없습니다." #, python-format msgid "" "Not enough l3 agents available to ensure HA. Minimum required " "%(min_agents)s, available %(num_agents)s." msgstr "" "HA를 확인하기 위해 사용 가능한 13개의 에이전트가 충분하지 않습니다. 최소 " "%(min_agents)s, 사용 가능한 %(num_agents)s이(가) 필요합니다." msgid "" "Number of DHCP agents scheduled to host a tenant network. If this number is " "greater than 1, the scheduler automatically assigns multiple DHCP agents for " "a given tenant network, providing high availability for DHCP service." msgstr "" "테넌트 네트워크를 호스팅하기 위해 스케줄된 DHCP 에이전트의 수입니다. 이 숫자" "가 1보다 크면 스케줄러는 지정된 테넌트 네트워크에 대해 다중 DHCP 에이전트를 " "자동으로 지정하여 DHCP 서비스에 대한 고가용성을 제공합니다. " msgid "Number of RPC worker processes dedicated to state reports queue" msgstr "상태 보고서 큐 전용 RPC 작업자 프로세스 수" msgid "Number of RPC worker processes for service" msgstr "서비스에 대한 RPC 작업자 프로세스 수" msgid "Number of backlog requests to configure the metadata server socket with" msgstr "메타데이터 서버 소켓을 구성하기 위한 백로그 요청 수" msgid "Number of backlog requests to configure the socket with" msgstr "소켓을 설정하려는 백로그 요청 횟수" msgid "" "Number of bits in an ipv4 PTR zone that will be considered network prefix. " "It has to align to byte boundary. Minimum value is 8. Maximum value is 24. " "As a consequence, range of values is 8, 16 and 24" msgstr "" "네트워크 접두어로 고려될 ipv4 PTR 구역의 비트 수입니다. 바이트 경계에 맞게 정" "렬해야 합니다. 최소값은 8이고 최대값은 24입니다. 결과적으로 값의 범위는 8, " "16 및 24입니다." msgid "" "Number of bits in an ipv6 PTR zone that will be considered network prefix. " "It has to align to nyble boundary. Minimum value is 4. Maximum value is 124. " "As a consequence, range of values is 4, 8, 12, 16,..., 124" msgstr "" "네트워크 접두어로 고려될 ipv6 PTR 구역의 비트 수입니다. nyble 경계에 맞게 정" "렬해야 합니다. 최소값은 4이고 최대값은 124입니다. 결과적으로 값의 범위는 4, " "8, 12, 16,..., 124입니다." msgid "" "Number of floating IPs allowed per tenant. A negative value means unlimited." msgstr "Tenant당 허용된 부동 IP 수입니다. 음수 값은 무제한을 의미합니다." msgid "" "Number of networks allowed per tenant. A negative value means unlimited." msgstr "Tenant당 허용되는 네트워크 수입니다. 음수 값은 무제한을 의미합니다." msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "Tenant당 허용되는 포트 수입니다. 음수 값은 무제한을 의미합니다." msgid "Number of routers allowed per tenant. A negative value means unlimited." msgstr "Tenant당 허용된 라우터 수입니다. 음수 값은 무제한을 의미합니다." msgid "" "Number of seconds between sending events to nova if there are any events to " "send." msgstr "보낼 이벤트가 있는 경우 nova에 전송하는 이벤트 간의 시간(초)입니다." msgid "Number of seconds to keep retrying to listen" msgstr "Listen 재시도 계속할 시간" msgid "" "Number of security groups allowed per tenant. A negative value means " "unlimited." msgstr "테넌트당 허용된 보안 그룹 수입니다. 음수 값은 무제한을 의미합니다." msgid "" "Number of security rules allowed per tenant. A negative value means " "unlimited." msgstr "테넌트당 허용된 보안 규칙 수입니다. 음수 값은 무제한을 의미합니다." msgid "" "Number of separate API worker processes for service. If not specified, the " "default is equal to the number of CPUs available for best performance." msgstr "" "서비스에 대한 별도의 API 작업자 프로세스 수입니다. 지정되지 않은 경우 기본값" "은 최적 성능을 위해 사용 가능한 CPU 수와 동일합니다. " msgid "" "Number of separate worker processes for metadata server (defaults to half of " "the number of CPUs)" msgstr "" "메타데이터 서버에 대한 별도의 작업자 프로세스 수(기본값은 CPU 수의 절반으로 " "지정됨)" msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "Tenant당 허용되는 서브넷 수입니다. 음수 값은 무제한을 의미합니다." msgid "" "Number of threads to use during sync process. Should not exceed connection " "pool size configured on server." msgstr "" "동기화 프로세스 중에 사용할 스레드 수입니다. 서버에 구성된 연결 풀 크기를 초" "과하지 않아야 합니다." msgid "OK" msgstr "OK" msgid "" "OVS datapath to use. 'system' is the default value and corresponds to the " "kernel datapath. To enable the userspace datapath set this value to 'netdev'." msgstr "" "사용할 OVS 데이터 경로입니다. '시스템'은 기본값이며 커널 데이터 경로에 해당합" "니다. 사용자 공간 데이터 경로를 사용하려면 이 값을 'netdev'로 설정하십시오." msgid "OVS vhost-user socket directory." msgstr "OVS vhost-사용자 소켓 디렉토리." #, python-format msgid "OVSDB Error: %s" msgstr "OVSDB 오류: %s" #, python-format msgid "Object action %(action)s failed because: %(reason)s." msgstr "%(action)s 오브젝트 조치가 실패함. 이유: %(reason)s." msgid "Only admin can view or configure quota" msgstr "관리자만이 할당량을 보거나 구성할 수 있습니다. " msgid "Only admin is authorized to access quotas for another tenant" msgstr "관리자만 다른 테넌트의 할당량에 액세스할 수 있는 권한이 있음" msgid "Only admins can manipulate policies on networks they do not own." msgstr "소유하지 않은 네트워크의 정책은 관리자만 조작할 수 있습니다. " msgid "Only admins can manipulate policies on objects they do not own" msgstr "소유하지 않은 오브젝트의 정책은 관리자만 조작할 수 있음" msgid "Only allowed to update rules for one security profile at a time" msgstr "한 번에 하나의 보안 프로파일에 대한 규칙만 업데이트하도록 허용됨" msgid "Only remote_ip_prefix or remote_group_id may be provided." msgstr "remote_ip_prefix 또는 remote_group_id만이 제공될 수 있습니다. " msgid "OpenFlow interface to use." msgstr "사용할 OpenFlow 인터페이스입니다. " #, python-format msgid "" "Operation %(op)s is not supported for device_owner %(device_owner)s on port " "%(port_id)s." msgstr "" "다음 포트의 device_owner %(device_owner)s에 대해 조작 %(op)s이(가) 지원되지 " "않음. 포트: %(port_id)s." #, python-format msgid "Operation not supported on device %(dev_name)s" msgstr "디바이스 %(dev_name)s에 대한 조작이 지원되지 않음" msgid "" "Ordered list of network_types to allocate as tenant networks. The default " "value 'local' is useful for single-box testing but provides no connectivity " "between hosts." msgstr "" "Tenant 네트워크로 할당할 network_types의 정렬된 목록입니다. 기본값 'local'은 " "단일 상자 테스트에 유용하지만 호스트 간 연결을 제공하지 않습니다." msgid "Override the default dnsmasq settings with this file." msgstr "기본 dnsmasq 설정을 이 파일로 대체합니다." msgid "Owner type of the device: network/compute" msgstr "디바이스의 소유자 유형: network/compute" msgid "POST requests are not supported on this resource." msgstr "이 자원에서 POST 요청이 지원되지 않습니다." #, python-format msgid "Package %s not installed" msgstr "패키지 %s이(가) 설치되지 않음" #, python-format msgid "Parameter %(param)s must be of %(param_type)s type." msgstr "%(param)s 매개변수의 유형은 %(param_type)s이어야 합니다." #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "bridge_mappings 구문 분석 실패: %s." msgid "Parsing supported pci_vendor_devs failed" msgstr "지원되는 pci_vendor_devs 구문 분석 실패" msgid "Password for connecting to designate in admin context" msgstr "관리 컨텍스트에서 지정하기 위해 연결할 암호" #, python-format msgid "Password not specified for authentication type=%(auth_type)s." msgstr "인증 유형=%(auth_type)s에 암호가 지정되지 않았습니다." msgid "Path to PID file for this process" msgstr "이 프로세스에 대한 PID 파일의 경로" msgid "Path to the router directory" msgstr "라우터 디렉토리 경로" msgid "Peer patch port in integration bridge for tunnel bridge." msgstr "터널 브릿지에 대한 통합 브릿지에 있는 피어 패치 포트입니다." msgid "Peer patch port in tunnel bridge for integration bridge." msgstr "통합 브릿지에 대한 터널 브릿지에 있는 피어 패치 포트입니다." msgid "Per-tenant subnet pool prefix quota exceeded." msgstr "Tenant당 서브넷 풀 Prefix 할당량이 초과되었습니다." msgid "Phase upgrade options do not accept revision specification" msgstr "단계 업그레이드 옵션이 개정 스펙을 승인하지 않음" msgid "Ping timeout" msgstr "Ping 제한시간 초과" #, python-format msgid "Plugin '%s' not found." msgstr "플러그인 '%s'를 찾을 수 없습니다." msgid "Plugin does not support updating provider attributes" msgstr "플러그인이 제공자 속성 업데이트를 지원하지 않음" msgid "Policy configuration policy.json could not be found." msgstr "정책 구성 policy.json을 찾을 수 없습니다." #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "%(id)s 포트가 고정 IP %(address)s을(를) 갖지 않음" #, python-format msgid "Port %(port)s does not exist on %(bridge)s!" msgstr "%(port)s 포트가 %(bridge)s에 없습니다!" #, python-format msgid "Port %(port_id)s is already acquired by another DHCP agent" msgstr "다른 DHCP 에이전트에가 이미 포트 %(port_id)s을(를) 확보했습니다." #, python-format msgid "" "Port %(port_id)s is associated with a different tenant than Floating IP " "%(floatingip_id)s and therefore cannot be bound." msgstr "" "포트 %(port_id)s이(가) Floating IP %(floatingip_id)s과(와) 다른 tenant와 연관" "되어 있어서 바운드할 수 없습니다. " #, python-format msgid "Port %(port_id)s is not managed by this agent. " msgstr "이 에이전트에서 %(port_id)s 포트를 관리하지 않습니다." #, python-format msgid "Port %s does not exist" msgstr "%s 포트가 없음" #, python-format msgid "" "Port %s has multiple fixed IPv4 addresses. Must provide a specific IPv4 " "address when assigning a floating IP" msgstr "" "포트 %s에 다중 fixed IPv4 주소가 있습니다. Floating IP를 지정하는 경우에는 특" "정 IPv4 주소를 제공해야 합니다. " msgid "" "Port Security must be enabled in order to have allowed address pairs on a " "port." msgstr "포트에서 주소 쌍을 허용하려면 포트 보안을 사용 가능하게 해야 합니다." msgid "" "Port has security group associated. Cannot disable port security or ip " "address until security group is removed" msgstr "" "포트에 보안 그룹이 연관되어 있습니다. 보안 그룹이 제거될 때까지 포트 보안 또" "는 IP 주소를 사용 안함으로 설정할 수 없습니다. " msgid "" "Port security must be enabled and port must have an IP address in order to " "use security groups." msgstr "" "보안 그룹을 사용하려면 포트 보안이 사용으로 설정되고 포트에 IP 주소가 있어야 " "합니다. " msgid "" "Port to listen on for OpenFlow connections. Used only for 'native' driver." msgstr "" "OpenFlow 연결을 위해 청취할 포트입니다. 'native' 드라이버에만 사용됩니다. " #, python-format msgid "Prefix '%(prefix)s' not supported in IPv%(version)s pool." msgstr "IPv%(version)s 풀에서 prefix '%(prefix)s'이(가) 지원되지 않습니다." msgid "Prefix Delegation can only be used with IPv6 subnets." msgstr "Prefix 위임은 IPv6 서브넷에만 사용할 수 있습니다. " msgid "Private key of client certificate." msgstr "클라이언트 인증서 개인 키입니다." #, python-format msgid "Probe %s deleted" msgstr "%s 프로브가 삭제되었음" #, python-format msgid "Probe created : %s " msgstr "프로브 작성: %s " msgid "Process is already started" msgstr "프로세스가 이미 시작됨" msgid "Process is not running." msgstr "프로세스가 실행 중이지 않습니다." msgid "Protocol to access nova metadata, http or https" msgstr "Nova 메타데이터에 접근하기 위한 프로토콜, http 또는 https" #, python-format msgid "Provider name %(name)s is limited by %(len)s characters" msgstr "제공자 이름 %(name)s은(는) %(len)s자로 제한됨" #, python-format msgid "QoS Policy %(policy_id)s is used by %(object_type)s %(object_id)s." msgstr "" "QoS 정책 %(policy_id)s이(가) %(object_type)s %(object_id)s에 의해 사용됩니" "다. " #, python-format msgid "" "QoS binding for network %(net_id)s and policy %(policy_id)s could not be " "found." msgstr "" "네트워크 %(net_id)s 및 정책 %(policy_id)s의 QoS 바인딩을 찾을 수 없습니다." #, python-format msgid "" "QoS binding for port %(port_id)s and policy %(policy_id)s could not be found." msgstr "" "포트 %(port_id)s 및 정책 %(policy_id)s의 QoS 바인딩을 찾을 수 없습니다." #, python-format msgid "QoS policy %(policy_id)s could not be found." msgstr "QoS 정책 %(policy_id)s을(를) 찾을 수 없습니다." #, python-format msgid "QoS rule %(rule_id)s for policy %(policy_id)s could not be found." msgstr "정책 %(policy_id)s의 QoS 규칙 %(rule_id)s을(를) 찾을 수 없습니다." #, python-format msgid "RBAC policy of type %(object_type)s with ID %(id)s not found" msgstr "ID가 %(id)s인 %(object_type)s 유형의 RBAC 정책을 찾을 수 없음" #, python-format msgid "" "RBAC policy on object %(object_id)s cannot be removed because other objects " "depend on it.\n" "Details: %(details)s" msgstr "" "다른 오브젝트가 의존하고 있기 때문에 오브젝트 %(object_id)s에 대한 RBAC 정책" "을 제거할 수 없습니다. \n" "세부사항: %(details)s" msgid "" "Range of seconds to randomly delay when starting the periodic task scheduler " "to reduce stampeding. (Disable by setting to 0)" msgstr "" "몰리지 않도록 주기적 태스크 스케줄러를 시작할 때 무작위로 지연할 시간의 범위" "(초)입니다. (0으로 설정하여 사용 안함) " msgid "Ranges must be in the same IP version" msgstr "범위가 동일한 IP 버전에 있어야 함" msgid "Ranges must be netaddr.IPRange" msgstr "범위는 netaddr.IPRange여야 함" msgid "Ranges must not overlap" msgstr "범위는 중첩되지 않아야 함" #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.EUI type." msgstr "" "유형 '%(type)s' 및 값 '%(value)s'을(를) 수신했습니다. netaddr.EUI 유형이 필요" "합니다." #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.IPAddress " "type." msgstr "" "유형 '%(type)s' 및 값 '%(value)s'을(를) 수신했습니다. netaddr.IPAddress 유형" "이 필요합니다." #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.IPNetwork " "type." msgstr "" "유형 '%(type)s' 및 값 '%(value)s'을(를) 수신했습니다. netaddr.IPNetwork 유형" "이 필요합니다." #, python-format msgid "" "Release aware branch labels (%s) are deprecated. Please switch to expand@ " "and contract@ labels." msgstr "" "분기 레이블(%s)은 더 이상 사용되지 않습니다. expand@ 및 contract@ 레이블로 전" "환하십시오. " msgid "Remote metadata server experienced an internal server error." msgstr "원격 메타데이터 서버에서 내부 서버 오류가 발생했습니다. " msgid "" "Repository does not contain HEAD files for contract and expand branches." msgstr "저장소에 계약 및 확장 분기의 HEAD 파일이 포함되지 않습니다." msgid "" "Representing the resource type whose load is being reported by the agent. " "This can be \"networks\", \"subnets\" or \"ports\". When specified (Default " "is networks), the server will extract particular load sent as part of its " "agent configuration object from the agent report state, which is the number " "of resources being consumed, at every report_interval.dhcp_load_type can be " "used in combination with network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is " "WeightScheduler, dhcp_load_type can be configured to represent the choice " "for the resource being balanced. Example: dhcp_load_type=networks" msgstr "" "에이전트에서 로드를 보고하는 자원 유형을 나타냅니다. 이는 \"네트워크\", \"서" "브넷\" 또는 \"포트\"입니다. 이를 지정하는 경우 (기본값은 네트워크임) 서버는 " "network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler." "WeightScheduler 와의 조합에서 report_interval.dhcp_load_type 을 사용할 수 있" "을 때마다 에이전트 보고 상태에서 에이전트 구성 오브젝트의 일부로 보낸 특정 로" "드를 추출하는데, 이는 이용 중인 자원 수입니다. network_scheduler_driver 가 " "WeightScheduler 인 경우 dhcp_load_type을 구성하여 밸런스 조정 중인 자원에 대" "한 선택을 표시할 수 있습니다. 예: dhcp_load_type=networks" msgid "Request Failed: internal server error while processing your request." msgstr "요청 실패: 요청을 처리하는 중에 내부 서버 오류가 발생했습니다. " msgid "Request body is not supported in DELETE." msgstr "DELETE에서 요청 본문을 지원하지 않습니다." #, python-format msgid "" "Request contains duplicate address pair: mac_address %(mac_address)s " "ip_address %(ip_address)s." msgstr "" "요청에 중복되는 주소 쌍이 포함됨: mac_address %(mac_address)s ip_address " "%(ip_address)s." #, python-format msgid "" "Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps " "with another subnet" msgstr "" "요청된 서브넷(%(network_id)s 네트워크의 cidr: %(cidr)s)이 다른 서브넷과 겹침" msgid "" "Reset flow table on start. Setting this to True will cause brief traffic " "interruption." msgstr "" "시작 시 플로우 테이블을 재설정하십시오. 이를 True로 설정하면 짧은 트래픽 인터" "럽트가 발생합니다. " #, python-format msgid "Resource %(resource)s %(resource_id)s could not be found." msgstr "자원 %(resource)s %(resource_id)s을(를) 찾을 수 없습니다." #, python-format msgid "Resource %(resource_id)s of type %(resource_type)s not found" msgstr "유형 %(resource_type)s의 자원 %(resource_id)s을(를) 찾을 수 없음" #, python-format msgid "" "Resource '%(resource_id)s' is already associated with provider " "'%(provider)s' for service type '%(service_type)s'" msgstr "" "'%(resource_id)s' 자원이 이미 '%(service_type)s' 서비스 유형에 대한 " "'%(provider)s' 제공자와 연관되어 있음" msgid "Resource body required" msgstr "자원 본문 필수" msgid "" "Resource name(s) that are supported in quota features. This option is now " "deprecated for removal." msgstr "" "할당량 기능에서 지원되는 자원 이름입니다. 이 옵션은 이제 제거를 위해더 이상 " "사용되지 않습니다. " msgid "Resource not found." msgstr "자원을 찾을 수 없습니다." msgid "Resources required" msgstr "자원 필수" msgid "" "Root helper application. Use 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' to use the real root filter facility. Change to 'sudo' to skip the " "filtering and just run the command directly." msgstr "" "루트 헬퍼 애플리케이션. 'sudo neutron-rootwrap /etc/neutron/rootwrap.conf'를 " "사용하여 실제 루트 필터 기능을 사용합니다. 'sudo'로 변경하여 필터링을 건너뛰" "고 명령을 직접 실행하기만 하면 됩니다." msgid "Root helper daemon application to use when possible." msgstr "가능한 경우 사용할 루트 헬퍼 데몬 애플리케이션." msgid "Root permissions are required to drop privileges." msgstr "권한을 삭제하려면 루트 권한이 필요합니다." #, python-format msgid "Route %(cidr)s not advertised for BGP Speaker %(speaker_as)d." msgstr "" "BGP 스피커 %(speaker_as)d에 대해 경로 %(cidr)s이(가) 광고되지 않았습니다." #, python-format msgid "Router %(router_id)s %(reason)s" msgstr "라우터 %(router_id)s %(reason)s" #, python-format msgid "Router %(router_id)s could not be found" msgstr "%(router_id)s 라우터를 찾을 수 없음" #, python-format msgid "Router %(router_id)s does not have an interface with id %(port_id)s" msgstr "%(router_id)s 라우터에 ID가 %(port_id)s인 인터페이스가 없음" #, python-format msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s" msgstr "%(router_id)s 라우터에 %(subnet_id)s 서브넷의 인터페이스가 없음" #, python-format msgid "Router '%(router_id)s' cannot be both DVR and HA." msgstr "라우터 '%(router_id)s'은(는) 동시에 DVR 및 HA가 될 수 없습니다." #, python-format msgid "Router '%(router_id)s' is not compatible with this agent." msgstr "라우터 '%(router_id)s'이(가) 이 에이전트와 호환되지 않습니다." #, python-format msgid "Router already has a port on subnet %s" msgstr "라우터가 이미 %s 서브넷에 포트를 갖고 있음" #, python-format msgid "" "Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be " "deleted, as it is required by one or more floating IPs." msgstr "" "하나 이상의 floating IP에서 필요로 하므로 %(router_id)s 라우터의 " "%(subnet_id)s 서브넷에 대한 라우터 인터페이스를 삭제할 수 없습니다. " #, python-format msgid "" "Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be " "deleted, as it is required by one or more routes." msgstr "" "하나 이상의 라우터에서 필요로 하므로 %(router_id)s 라우터의 %(subnet_id)s 서" "브넷에 대한 라우터 인터페이스를 삭제할 수 없습니다. " msgid "Router port must have at least one fixed IP" msgstr "라우터 포트에는 하나 이상의 Fixed IP가 있어야 함" msgid "Router that will have connected instances' metadata proxied." msgstr "연결된 인스턴스의 메타데이터가 프록시되는 라우터입니다." #, python-format msgid "" "Row doesn't exist in the DB. Request info: Table=%(table)s. Columns=" "%(columns)s. Records=%(records)s." msgstr "" "행이 DB에 없습니다. 요청 정보: 테이블=%(table)s. 열=%(columns)s. 레코드=" "%(records)s." msgid "Run as daemon." msgstr "데몬으로 실행됩니다." #, python-format msgid "Running %(cmd)s (%(desc)s) for %(project)s ..." msgstr "%(project)s에 대한 %(cmd)s(%(desc)s) 실행 중..." #, python-format msgid "Running %(cmd)s for %(project)s ..." msgstr "%(project)s에 대한 %(cmd)s 실행 중..." msgid "Running without keystone AuthN requires that tenant_id is specified" msgstr "키스톤 AuthN 없이 실행하려면 tenant ID를 지정해야 합니다. " msgid "" "Seconds between nodes reporting state to server; should be less than " "agent_down_time, best if it is half or less than agent_down_time." msgstr "" "서버에 대한 상태를 보고하는 노드 사이의 시간(초)이며 agent_down_time보다 짧아" "야 하며 절반이거나 agent_down_time보다 짧은 경우 최적입니다." msgid "Seconds between running periodic tasks" msgstr "주기적 태스크 실행 사이의 시간(초)" msgid "" "Seconds to regard the agent is down; should be at least twice " "report_interval, to be sure the agent is down for good." msgstr "" "에이전트가 작동 중지되었다고 간주되는 시간(초)이며 에이전트가 계속 작동 중지 " "상태인지 확인할 수 있도록 report_interval의 두 배 이상이어야 합니다." #, python-format msgid "Security Group %(id)s %(reason)s." msgstr "보안 그룹 %(id)s %(reason)s입니다. " #, python-format msgid "Security Group Rule %(id)s %(reason)s." msgstr "보안 그룹 규칙 %(id)s %(reason)s." #, python-format msgid "Security group %(id)s does not exist" msgstr "%(id)s 보안 그룹이 존재하지 않음" #, python-format msgid "Security group rule %(id)s does not exist" msgstr "보안 그룹 규칙 %(id)s이(가) 존재하지 않음" #, python-format msgid "Security group rule already exists. Rule id is %(rule_id)s." msgstr "보안 그룹 규칙이 이미 있습니다. 규칙 ID는 %(rule_id)s입니다." #, python-format msgid "" "Security group rule for ethertype '%(ethertype)s' not supported. Allowed " "values are %(values)s." msgstr "" "ethertype '%(ethertype)s'의 보안 그룹 규칙이 지원되지 않습니다. 허용된 값은 " "%(values)s입니다." #, python-format msgid "" "Security group rule protocol %(protocol)s not supported. Only protocol " "values %(values)s and integer representations [0 to 255] are supported." msgstr "" "보안 그룹 규칙 프로토콜 %(protocol)s이(가) 지원되지 않습니다. 프로토콜 값 " "%(values)s 및 정수 표시 [0 - 255]만 지원됩니다. " msgid "Segments and provider values cannot both be set." msgstr "세그먼트 및 제공자 값을 모두 설정할 수 없습니다." msgid "Selects the Agent Type reported" msgstr "보고된 에이전트 유형을 선택함" msgid "" "Send notification to nova when port data (fixed_ips/floatingip) changes so " "nova can update its cache." msgstr "" "포트 데이터(fixed_ips/floatingip)가 변경되면 알림을 nova에 보냅니다. 이에 따" "라 nova는 해당 캐시를 업데이트할 수 있습니다." msgid "Send notification to nova when port status changes" msgstr "포트 상태가 변경되면 알림을 nova에 보냅니다." msgid "" "Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the " "feature is disabled" msgstr "" "HA 설정을 위해 불필요한 다수의 ARP를 전송합니다. 0 이하인 경우 기능이 사용 안" "함으로 설정됩니다." #, python-format msgid "Service Profile %(sp_id)s could not be found." msgstr "서비스 프로파일 %(sp_id)s을(를) 찾을 수 없습니다." #, python-format msgid "Service Profile %(sp_id)s is already associated with flavor %(fl_id)s." msgstr "" "서비스 프로파일 %(sp_id)s이(가) 이미 Flavor %(fl_id)s과(와) 연관되어 있습니" "다." #, python-format msgid "Service Profile %(sp_id)s is not associated with flavor %(fl_id)s." msgstr "" "서비스 프로파일 %(sp_id)s이(가) Flavor %(fl_id)s과(와) 연관되지 않습니다." #, python-format msgid "Service Profile %(sp_id)s is used by some service instance." msgstr "일부 서비스 인스턴스에서 서비스 프로파일 %(sp_id)s을(를) 사용합니다." #, python-format msgid "Service Profile driver %(driver)s could not be found." msgstr "서비스 프로파일 드라이버 %(driver)s을(를) 찾을 수 없습니다." msgid "Service Profile is not enabled." msgstr "서비스 프로파일이 사용되지 않습니다." msgid "Service Profile needs either a driver or metainfo." msgstr "서비스 프로파일에 드라이버나 metainfo가 필요합니다." #, python-format msgid "" "Service provider '%(provider)s' could not be found for service type " "%(service_type)s" msgstr "" "서비스 유형에 대한 '%(provider)s' 서비스 제공자를 찾을 수 없음: " "%(service_type)s" msgid "Service to handle DHCPv6 Prefix delegation." msgstr "DHCPv6 접두부 위임을 처리할 서비스입니다. " #, python-format msgid "Service type %(service_type)s does not have a default service provider" msgstr "%(service_type)s 서비스 유형에 기본 서비스 제공자가 없음" msgid "" "Set new timeout in seconds for new rpc calls after agent receives SIGTERM. " "If value is set to 0, rpc timeout won't be changed" msgstr "" "에이전트에서 SIGTERM을 수신한 후에 새 rpc 호출에 대한 새 제한시간(초)을 설정" "합니다. 값을 0으로 설정하면 rpc 제한시간이 변경되지 않습니다." msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "GRE/VXLAN 터널을 전송하는 발신 IP 패킷에 DF(Don't Fragment) 비트를 설정하거" "나 설정 해제하십시오." msgid "" "Set or un-set the tunnel header checksum on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "GRE/VXLAN 터널을 전달하는 발신 IP 패킷에서 터널 헤더 체크섬을 설정하거나 설" "정 해제하십시오. " msgid "Shared address scope can't be unshared" msgstr "공유 주소 범위는 공유 해제할 수 없음" msgid "" "Specifying 'tenant_id' other than authenticated tenant in request requires " "admin privileges" msgstr "" "요청에서 인증된 tenant가 아닌 'tenant_id'를 지정하려면 admin 권한이 필요함" msgid "String prefix used to match IPset names." msgstr "IPSet 이름을 일치시키는 데 사용되는 문자열 prefix입니다. " #, python-format msgid "Sub-project %s not installed." msgstr "하위 프로젝트 %s이(가) 설치되지 않았습니다. " msgid "Subnet for router interface must have a gateway IP" msgstr "라우터 인터페이스에 대한 서브넷은 게이트웨이 IP를 가져야 함" msgid "" "Subnet has a prefix length that is incompatible with DHCP service enabled." msgstr "" "서브넷이 DHCP 서비스가 사용으로 설정된 호환 불가능한 접두부 길이를 가지고 있" "습니다. " #, python-format msgid "Subnet pool %(subnetpool_id)s could not be found." msgstr "서브넷 풀 %(subnetpool_id)s을(를) 찾을 수 없습니다." msgid "Subnet pool has existing allocations" msgstr "서브넷 풀에 기존 할당이 있음" msgid "Subnet used for the l3 HA admin network." msgstr "l3 HA 관리 네트워크에 사용된 서브넷입니다." msgid "" "Subnets hosted on the same network must be allocated from the same subnet " "pool." msgstr "" "동일한 네트워크에서 호스트되는 서브넷을 동일한 서브넷 풀에서 할당해야 합니다." msgid "Suffix to append to all namespace names." msgstr "모든 네임스페이스 이름에 추가될 suffix입니다." msgid "" "System-wide flag to determine the type of router that tenants can create. " "Only admin can override." msgstr "" "Tenant가 작성할 수 있는 라우터 유형을 판별하는 시스템 범위 플래그입니다. 관리" "자만 대체할 수 있습니다." msgid "TCP Port to listen for metadata server requests." msgstr "메타데이터 서버 요청을 listen TCP 포트입니다. " msgid "TCP Port used by Neutron metadata namespace proxy." msgstr "Neutron 메타데이터 네임스페이스 프록시가 사용하는 TCP 포트입니다. " msgid "TCP Port used by Nova metadata server." msgstr "Nova 메타데이터 서버가 사용한 TCP 포트입니다. " #, python-format msgid "TLD '%s' must not be all numeric" msgstr "TLD '%s'에 숫자만 사용할 수 없음" msgid "TOS for vxlan interface protocol packets." msgstr "vxlan 인터페이스 프로토콜 패킷용 TOS." msgid "TTL for vxlan interface protocol packets." msgstr "vxlan 인터페이스 프로토콜 패킷용 TTL." #, python-format msgid "Table %s can only be queried by UUID" msgstr "테이블 %s은(는) UUID로만 조회할 수 있음" #, python-format msgid "Tag %(tag)s could not be found." msgstr "%(tag)s 태그를 찾을 수 없습니다." #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "" "%(tenant_id)s 테넌트는 이 네트워크에 %(resource)s을(를) 작성하도록 허용되지 " "않음" msgid "Tenant id for connecting to designate in admin context" msgstr "관리 컨텍스트에서 지정하기 위해 연결할 테넌트 ID" msgid "Tenant name for connecting to designate in admin context" msgstr "관리 컨텍스트에서 지정하기 위해 연결할 테넌트 이름" msgid "Tenant network creation is not enabled." msgstr "Tenant 네트워크 작성은 사용되지 않습니다. " msgid "Tenant-id was missing from quota request." msgstr "Teanant ID가 할당량 요청에서 누락되었습니다. " msgid "" "The 'gateway_external_network_id' option must be configured for this agent " "as Neutron has more than one external network." msgstr "" "'gateway_external_network_id' 옵션은 Neutron이 두 개 이상의 외부 네트워크를 " "가지므로 이 에이전트에 대해구성되어야 합니다. " msgid "" "The DHCP agent will resync its state with Neutron to recover from any " "transient notification or RPC errors. The interval is number of seconds " "between attempts." msgstr "" "DHCP 에이전트가 임시 알림이나 RPC 오류로부터 복구하기 위해 해당 상태를 " "Neutron과 다시 동기화합니다. 간격은 시도 사이 시간(초) 횟수입니다." msgid "" "The DHCP server can assist with providing metadata support on isolated " "networks. Setting this value to True will cause the DHCP server to append " "specific host routes to the DHCP request. The metadata service will only be " "activated when the subnet does not contain any router port. The guest " "instance must be configured to request host routes via DHCP (Option 121). " "This option doesn't have any effect when force_metadata is set to True." msgstr "" "DHCP 서버는 격리된 네트워크에서 메타데이터 지원을 제공하도록 지원할 수 있습니" "다. 이 값을 True로 설정하면 DHCP 서버가 특정 호스트 경로를 DHCP 요청에 추가합" "니다. 메타데이터 서비스는 서브넷에 라우터 포트가 포함되지 않은 경우에만 활성" "화됩니다. DHCP를 통해 호스트 경로를 요청하려면 게스트 인스턴스가 구성되어야 " "합니다(옵션 121). 이 옵션은 force_metadata가 True로 설정된 경우 적용되지 않습" "니다." #, python-format msgid "" "The HA Network CIDR specified in the configuration file isn't valid; " "%(cidr)s." msgstr "" "구성 파일에 지정된 HA 네트워크 CIDR이 올바르지 않습니다.%(cidr)s과(와) 연관되" "어 있습니다." msgid "The UDP port to use for VXLAN tunnels." msgstr "VXLAN 터널에 사용하는 UDP 포트" #, python-format msgid "" "The address allocation request could not be satisfied because: %(reason)s" msgstr "다음 원인으로 인해 주소 할당 요청을 충족할 수 없음: %(reason)s" msgid "The advertisement interval in seconds" msgstr "Advertisement 간격(초)" #, python-format msgid "The allocation pool %(pool)s is not valid." msgstr "할당 풀 %(pool)s이(가) 올바르지 않습니다. " #, python-format msgid "" "The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s." msgstr "" "할당 풀 %(pool)s이(가) 서브넷 cidr %(subnet_cidr)s 이상으로 확장합니다. " #, python-format msgid "" "The attribute '%(attr)s' is reference to other resource, can't used by sort " "'%(resource)s'" msgstr "" "속성 '%(attr)s'은(는) 다른 자원에 대한 참조이지만 정렬 '%(resource)s'에서 사" "용될 수는 없습니다." msgid "" "The base MAC address Neutron will use for VIFs. The first 3 octets will " "remain unchanged. If the 4th octet is not 00, it will also be used. The " "others will be randomly generated." msgstr "" "VIF에 기본 MAC 주소 Neutron을 사용합니다. 처음 세 개의 octet은 변경되지 않은 " "상태로 남습니다. 네 번째 octet이 00이 아니면 이 octet도 사용됩니다. 다른 " "octet은 임의로 생성됩니다." msgid "" "The base mac address used for unique DVR instances by Neutron. The first 3 " "octets will remain unchanged. If the 4th octet is not 00, it will also be " "used. The others will be randomly generated. The 'dvr_base_mac' *must* be " "different from 'base_mac' to avoid mixing them up with MAC's allocated for " "tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00. " "The default is 3 octet" msgstr "" "Neutron에 의해 고유 DVR 인스턴스에 사용되는 기본 mac 주소입니다. 처음 세 개 " "octet은 변경되지 않고 남아 있습니다. 네 번째 octet이 00이 아니면 이 octet도 " "사용됩니다. 다른 octet은 무작위로 생성됩니다. Tenant 포트에 대해 할당된 MAC과" "의 혼합을 방지하기 위해 'dvr_base_mac'은 'base_mac'과 달라야 *합니다*. 4 " "octet 예제는 dvr_base_mac = fa:16:3f:4f:00:00입니다. 기본값은 3 octet입니다. " msgid "" "The connection string for the native OVSDB backend. Requires the native " "ovsdb_interface to be enabled." msgstr "" "네이티브 OVSDB 백엔드용 연결 문자열. 네이티브 ovsdb_interface를 사용하도록 설" "정해야 합니다." msgid "The core plugin Neutron will use" msgstr "Neutron이 사용할 코어 플러그인" #, python-format msgid "" "The dns_name passed is a FQDN. Its higher level labels must be equal to the " "dns_domain option in neutron.conf, that has been set to '%(dns_domain)s'. It " "must also include one or more valid DNS labels to the left of " "'%(dns_domain)s'" msgstr "" "전달된 dns_name은 FQDN입니다. 상위 레벨 레이블은 '%(dns_domain)s'(으)로 설정" "된 neutron.conf의 dns_domain 옵션과 동일해야 합니다. 또한 '%(dns_domain)s' 왼" "쪽에 하나 이상의 올바른 DNS 레이블을 포함해야 합니다. " #, python-format msgid "" "The dns_name passed is a PQDN and its size is '%(dns_name_len)s'. The " "dns_domain option in neutron.conf is set to %(dns_domain)s, with a length of " "'%(higher_labels_len)s'. When the two are concatenated to form a FQDN (with " "a '.' at the end), the resulting length exceeds the maximum size of " "'%(fqdn_max_len)s'" msgstr "" "전달된 dns_name은 PQDN이고 크기는 '%(dns_name_len)s'입니다. neutron.conf의 " "dns_domain 옵션은 %(dns_domain)s(으)로 설정되고 길이는 " "'%(higher_labels_len)s'입니다. 끝에 '.'를 사용하여 FQDN을 형성하기 위해 이 둘" "을 연결하면 길이가 최대 크기인 '%(fqdn_max_len)s'을(를) 초과합니다. " msgid "The driver used to manage the DHCP server." msgstr "DHCP 서버를 관리하는 데 사용되는 드라이버입니다. " msgid "The driver used to manage the virtual interface." msgstr "가상 인터페이스를 관리하는 데 사용되는 드라이버입니다. " msgid "" "The email address to be used when creating PTR zones. If not specified, the " "email address will be admin@" msgstr "" "PTR 구역을 작성할 때 사용할 이메일 주소입니다. 지정되지 않은 경우 이메일 주소" "는 admin@입니다." #, python-format msgid "" "The following device_id %(device_id)s is not owned by your tenant or matches " "another tenants router." msgstr "" "device_id %(device_id)s이(가) 사용자 tenant의 소유가 아니거나 다른 tenant 라" "우터와 일치합니다." msgid "The host IP to bind to" msgstr "바인드할 호스트 IP" msgid "The interface for interacting with the OVSDB" msgstr "OVSDB와 상호작용하는 데 필요한 인터페이스" msgid "" "The maximum number of items returned in a single response, value was " "'infinite' or negative integer means no limit" msgstr "" "단일 응답으로 최대 항목 수가 리턴되었습니다. 값이 'infinite' 또는 음수인 경" "우 제한이 없다는 의미입니다. " #, python-format msgid "" "The network %(network_id)s has been already hosted by the DHCP Agent " "%(agent_id)s." msgstr "" "DHCP 에이전트 %(agent_id)s에서 %(network_id)s 네트워크를 이미 호스트하고 있습" "니다. " #, python-format msgid "" "The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s." msgstr "" "DHCP 에이전트 %(agent_id)s에서 %(network_id)s 네트워크를 호스트하지 않습니" "다. " msgid "" "The network type to use when creating the HA network for an HA router. By " "default or if empty, the first 'tenant_network_types' is used. This is " "helpful when the VRRP traffic should use a specific network which is not the " "default one." msgstr "" "HA 라우터에 대한 HA 네트워크 작성 시 사용할 네트워크 유형입니다. 기본적으로 " "또는 비어 있는 경우 첫 번째 'tenant_network_types'가 사용됩니다. 이는 VRRP 트" "래픽이 기본값이 아닌 특정 네트워크를 사용해야 하는 경우에 유용합니다. " #, python-format msgid "The number of allowed address pair exceeds the maximum %(quota)s." msgstr "허용되는 주소 쌍 수가 최대값 %(quota)s을(를) 초과합니다." msgid "" "The number of seconds the agent will wait between polling for local device " "changes." msgstr "에이전트가 로컬 디바이스 변경을 폴링하는 사이에 대기하는 시간(초). " msgid "" "The number of seconds to wait before respawning the ovsdb monitor after " "losing communication with it." msgstr "" "통신이 유실된 후에 ovsdb 모니터를 재파생하기 전에 대기할 시간(초)입니다." msgid "The number of sort_keys and sort_dirs must be same" msgstr "sort_keys 및 sort_dirs의 수가 같아야 함" msgid "" "The path for API extensions. Note that this can be a colon-separated list of " "paths. For example: api_extensions_path = extensions:/path/to/more/exts:/" "even/more/exts. The __path__ of neutron.extensions is appended to this, so " "if your extensions are in there you don't need to specify them here." msgstr "" "API 확장의 경로입니다. 이 경로는 콜론으로 구분된 경로 목록일 수 있습니다. " "예: api_extensions_path = extensions:/path/to/more/exts:/even/more/exts. " "neutron.extensions의 __path__가 이 경로에 추가되므로, 해당 위치에 확장이 있으" "면 여기에 지정하지 않아도 됩니다." msgid "The physical network name with which the HA network can be created." msgstr "HA 네트워크를 작성하는 데 사용할 수 있는 실제 네트워크 이름입니다. " #, python-format msgid "The port '%s' was deleted" msgstr "포트 '%s'이(가) 삭제됨" msgid "The port to bind to" msgstr "바인드할 포트" #, python-format msgid "The requested content type %s is invalid." msgstr "요청한 컨텐츠 유형 %s이(가) 올바르지 않습니다." msgid "The resource could not be found." msgstr "자원을 찾을 수 없습니다. " #, python-format msgid "" "The router %(router_id)s has been already hosted by the L3 Agent " "%(agent_id)s." msgstr "" "L3 에이전트 %(agent_id)s에서 %(router_id)s 라우터를 이미 호스트하고 있습니" "다. " msgid "" "The server has either erred or is incapable of performing the requested " "operation." msgstr "서버에 오류가 있거나 서버가 요청된 조작을 수행할 수 없습니다." msgid "The service plugins Neutron will use" msgstr "Neutron이 사용할 서비스 플러그인" #, python-format msgid "The subnet request could not be satisfied because: %(reason)s" msgstr "다음 이유로 인해 서브넷 요청을 충족할 수 없음: %(reason)s" #, python-format msgid "The subproject to execute the command against. Can be one of: '%s'." msgstr "명령을 실행할 하위 프로젝트입니다. 다음 중 하나가 될 수 있음: '%s'." msgid "The type of authentication to use" msgstr "사용할 인증 유형" #, python-format msgid "The value '%(value)s' for %(element)s is not valid." msgstr "%(element)s의 '%(value)s' 값이 올바르지 않습니다." msgid "" "The working mode for the agent. Allowed modes are: 'legacy' - this preserves " "the existing behavior where the L3 agent is deployed on a centralized " "networking node to provide L3 services like DNAT, and SNAT. Use this mode if " "you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality " "and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - " "this enables centralized SNAT support in conjunction with DVR. This mode " "must be used for an L3 agent running on a centralized node (or in single-" "host deployments, e.g. devstack)" msgstr "" "에이전트에 대한 작업 모드입니다. 허용되는 모드는 다음과 같습니다. '레거시' - " "이 모드는 L3 에이전트가 중앙 네트워킹 노드에 배치되어 SNAT와 DNAT 같은 L3 서" "비스를 제공하는 기존 동작을 유지합니다. DVR을 채택하지 않으려면 이 모드를 사" "용하십시오. 'dvr' - 이 모드는 DVR 기능을 사용하며 컴퓨터 호스트에서 실행되는 " "L3 에이전트에 사용해야 합니다. 'dvr_snat' - 이 모드는 DVR과 함께 중앙 SNAT 지" "원을 사용합니다. 중앙 노드에서(또는 devstack과 같은 단일 호스트 배치에서) 실" "행 중인 L3 에이전트에는 이 모드를 사용해야 합니다." msgid "" "There are routers attached to this network that depend on this policy for " "access." msgstr "" "이 네트워크에 연결된 라우터가 있으며, 해당 라우터에는 액세스를 위해 이 정책" "이 필요합니다." msgid "" "This will choose the web framework in which to run the Neutron API server. " "'pecan' is a new experiemental rewrite of the API server." msgstr "" "그러면 Neutron API 서버를 실행할 웹 프레임워크를 선택합니다. 'pecan'은 API 서" "버를 실험적으로 새로 재작성합니다." msgid "Timeout" msgstr "제한시간" msgid "" "Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs " "commands will fail with ALARMCLOCK error." msgstr "" "ovs-vsctl 명령의 제한시간(초)입니다. 제한시간이 초과하면 ALARMCLOCK 오류로 인" "해 ovs 명령이 실패합니다." msgid "" "Timeout in seconds to wait for a single OpenFlow request. Used only for " "'native' driver." msgstr "" "단일 OpenFlow 요청을 기다리는 제한시간(초)입니다. 'native' 드라이버에만 사용" "됩니다. " msgid "" "Timeout in seconds to wait for the local switch connecting the controller. " "Used only for 'native' driver." msgstr "" "제어기에 연결되는 로컬 스위치를 기다리는 제한시간(초)입니다. 'native' 드라이" "버에만 사용됩니다. " msgid "" "Too long prefix provided. New name would exceed given length for an " "interface name." msgstr "" "너무 긴 prefix가 지정되었습니다. 새 이름이 인터페이스 이름에 지정된 길이를 초" "과합니다." msgid "Too many availability_zone_hints specified" msgstr "availability_zone_hints가 너무 많이 지정됨" msgid "" "True to delete all ports on all the OpenvSwitch bridges. False to delete " "ports created by Neutron on integration and external network bridges." msgstr "" "모든 OpenvSwitch 브릿지의 모든 포트를 삭제하려면 true입니다. 통합 및 외부 네" "트워크 브릿지에 Neutron이 작성한 포트를 삭제하려면 false입니다. " msgid "Tunnel IP value needed by the ML2 plugin" msgstr "ML2 플러그인에 터널 IP 값이 필요함" msgid "Tunnel bridge to use." msgstr "사용할 터널 브릿지입니다." msgid "" "Type of the nova endpoint to use. This endpoint will be looked up in the " "keystone catalog and should be one of public, internal or admin." msgstr "" "사용할 nova 엔드포인트의 유형입니다. 이 엔드포인트는 keystone 카탈로그에서 검" "색하며 공용, 내부 또는 관리 중 하나여야 합니다." msgid "URL for connecting to designate" msgstr "지정하기 위해 연결할 URL" msgid "URL to database" msgstr "데이터베이스에 대한 URL" #, python-format msgid "Unable to access %s" msgstr "%s에 접근할 수 없음" #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, maximum allowed " "prefix is %(max_prefixlen)s." msgstr "" "Prefix 길이가 %(prefixlen)s인 서브넷을 할당할 수 없습니다. 허용되는 최대 " "Prefix 길이는 %(max_prefixlen)s입니다. " #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, minimum allowed " "prefix is %(min_prefixlen)s." msgstr "" "Prefix 길이가 %(prefixlen)s인 서브넷을 할당할 수 없습니다. 허용되는 최소 " "prefix 길이는 %(min_prefixlen)s입니다. " #, python-format msgid "Unable to calculate %(address_type)s address because of:%(reason)s" msgstr "다음 원인으로 인해 %(address_type)s 주소를 계산할 수 없음: %(reason)s" #, python-format msgid "" "Unable to complete operation for %(router_id)s. The number of routes exceeds " "the maximum %(quota)s." msgstr "" "%(router_id)s에 대한 조작을 완료할 수 없습니다. 라우트 수가 최대 %(quota)s을" "(를) 초과했습니다. " #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of DNS " "nameservers exceeds the limit %(quota)s." msgstr "" "%(subnet_id)s에 대한 조작을 완료할 수 없습니다. DNS 네임서버 수가 %(quota)s " "한계를 초과했습니다. " #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of host routes " "exceeds the limit %(quota)s." msgstr "" "%(subnet_id)s에 대한 조작을 완료할 수 없습니다. 호스트 라우트 수가 %(quota)s " "한계를 초과했습니다. " #, python-format msgid "" "Unable to complete operation on address scope %(address_scope_id)s. There " "are one or more subnet pools in use on the address scope" msgstr "" "주소 범위 %(address_scope_id)s에 대한 조작을 완료할 수 없습니다. 주소 범위에 " "사용 중인 서브넷 풀이 하나 이상 있습니다. " #, python-format msgid "Unable to convert value in %s" msgstr "%s 값을 변환할 수 없음" msgid "Unable to create the Agent Gateway Port" msgstr "에이전트 게이트웨이 포트를 작성할 수 없음" msgid "Unable to create the SNAT Interface Port" msgstr "SNAT 인터페이스 포트를 작성할 수 없음" #, python-format msgid "" "Unable to create the flat network. Physical network %(physical_network)s is " "in use." msgstr "" "일반 네트워크를 작성할 수 없습니다. 실제 네트워크 %(physical_network)s이(가) " "사용 중입니다. " msgid "" "Unable to create the network. No available network found in maximum allowed " "attempts." msgstr "" "네트워크를 작성할 수 없습니다. 허용되는 최대 시도 수만큼 시도한 후 사용 가능" "한 네트워크를 찾을 수 없습니다." #, python-format msgid "Unable to delete subnet pool: %(reason)s." msgstr "서브넷 풀을 삭제할 수 없음: %(reason)s." #, python-format msgid "Unable to determine mac address for %s" msgstr "%s에대한 맥 주소를 확인할 수 없습니다" #, python-format msgid "Unable to find '%s' in request body" msgstr "요청 본문에서 '%s'을(를) 찾을 수 없음" #, python-format msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s" msgstr "서브넷 %(subnet_id)s에서 IP 주소 %(ip_address)s을(를) 찾을 수 없음" #, python-format msgid "Unable to find resource name in %s" msgstr "%s에서 자원 이름을 찾을 수 없음" msgid "Unable to generate IP address by EUI64 for IPv4 prefix" msgstr "IPv4 prefix에 대해 EUI64에 의해 IP 주소를 생성할 수 없습니다." #, python-format msgid "Unable to generate unique DVR mac for host %(host)s." msgstr "%(host)s 호스트에 대한 고유 DVR mac을 생성할 수 없습니다." #, python-format msgid "Unable to generate unique mac on network %(net_id)s." msgstr "%(net_id)s 네트워크에 고유 MAC을 생성할 수 없습니다. " #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "" "%s에서 대상 필드를 식별할 수 없음. 일치가 다음 양식이어야 함." "%%()s" msgid "Unable to provide external connectivity" msgstr "외부 연결을 제공할 수 없음" msgid "Unable to provide tenant private network" msgstr "Tenant 개인 네트워크를 제공할 수 없음" #, python-format msgid "" "Unable to reconfigure sharing settings for network %(network)s. Multiple " "tenants are using it." msgstr "" "%(network)s 네트워크에 대한 공유 설정을 재구성할 수 없습니다. 여러 개의 " "tenant가 이를 사용 중입니다. " #, python-format msgid "Unable to update address scope %(address_scope_id)s : %(reason)s" msgstr "주소 범위 %(address_scope_id)s을(를) 업데이트할 수 없음: %(reason)s" #, python-format msgid "Unable to update the following object fields: %(fields)s" msgstr "다음 오브젝트 필드를 업데이트할 수 없음: %(fields)s" #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " "found" msgstr "" "상위 소스로서 일치 %(match)s을(를) 확인할 수 없음. %(res)s을(를) 찾을 수 없음" #, python-format msgid "Unexpected label for script %(script_name)s: %(labels)s" msgstr "스크립트 %(script_name)s에 대해 예상치 않은 레이블: %(labels)s" #, python-format msgid "Unexpected number of alembic branch points: %(branchpoints)s" msgstr "예상치 못한 수의 변형 장치(alembic) 분기점: %(branchpoints)s" #, python-format msgid "Unexpected response code: %s" msgstr "예기치 않은 응답 코드: %s" #, python-format msgid "Unexpected response: %s" msgstr "예상치 않은 응답: %s" #, python-format msgid "Unit name '%(unit)s' is not valid." msgstr "단위 이름 '%(unit)s'이(가) 올바르지 않습니다." msgid "Unknown API version specified" msgstr "알 수 없는 API 버전이 지정됨" #, python-format msgid "Unknown address type %(address_type)s" msgstr "알 수 없는 주소 유형 %(address_type)s" #, python-format msgid "Unknown attribute '%s'." msgstr "알 수 없는 속성 '%s'입니다." #, python-format msgid "Unknown chain: %r" msgstr "알 수 없는 체인: %r" #, python-format msgid "Unknown network type %(network_type)s." msgstr "알 수 없는 네트워크 유형 %(network_type)s." #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "알 수 없는 할당량 자원 %(unknown)s." msgid "Unmapped error" msgstr "맵핑되지 않은 오류" msgid "Unrecognized action" msgstr "인식되지 않는 조치" #, python-format msgid "Unrecognized attribute(s) '%s'" msgstr "인식되지 않는 속성 '%s'" msgid "Unrecognized field" msgstr "인식되지 않는 필드" msgid "Unspecified minimum subnet pool prefix." msgstr "최소 서브넷 풀 접두부가 지정되지 않았습니다." msgid "Unsupported Content-Type" msgstr "지원되지 않는 Content-Type" #, python-format msgid "Unsupported network type %(net_type)s." msgstr "지원되지 않는 네트워크 유형 %(net_type)s입니다." #, python-format msgid "Unsupported port state: %(port_state)s." msgstr "지원되지 않는 포트 상태: %(port_state)s" msgid "Unsupported request type" msgstr "지원되지 않는 요청 유형" msgid "Updating default security group not allowed." msgstr "기본 보안 그룹 업데이트가 허용되지 않습니다. " msgid "" "Use ML2 l2population mechanism driver to learn remote MAC and IPs and " "improve tunnel scalability." msgstr "" "원격 MAC 및 IP를 학습하고 터널 확장성을 개선하려면 ML2 l2population 메커니즘 " "드라이버를 사용하십시오." msgid "Use broadcast in DHCP replies." msgstr "DHCP 복제본에서 브로드캐스팅을 사용하십시오." msgid "Use either --delta or relative revision, not both" msgstr "--delta 또는 relative revision 중 하나 사용" msgid "" "Use ipset to speed-up the iptables based security groups. Enabling ipset " "support requires that ipset is installed on L2 agent node." msgstr "" "IPset을 사용하여 iptables 기반 보안 그룹의 속도를 높입니다. IPset 지원을 사용" "하려면 IPset이 L2 에이전트 노드에 설치되어야 합니다." msgid "" "Use the root helper when listing the namespaces on a system. This may not be " "required depending on the security configuration. If the root helper is not " "required, set this to False for a performance improvement." msgstr "" "시스템에 네임스페이스를 나열할 때 루트 헬퍼를 사용하십시오. 보안 구성에 따라 " "이 작업은 필요하지 않을 수 있습니다. 루트 헬퍼가 필요하지 않으면 성능이 향상" "되도록 False로 설정하십시오." msgid "" "Use veths instead of patch ports to interconnect the integration bridge to " "physical networks. Support kernel without Open vSwitch patch port support so " "long as it is set to True." msgstr "" "패치 포트 대신 veth를 사용하여 통합 브릿지와 실제 브릿지를 상호연결하십시오. " "True로 설정된 경우에 한해 Open vSwitch 패치 포트가 없는 커널이 지원됩니다." msgid "User (uid or name) running metadata proxy after its initialization" msgstr "초기화 후에 메타데이터 프록시를 실행하는 사용자(uid 또는 이름)" msgid "" "User (uid or name) running metadata proxy after its initialization (if " "empty: agent effective user)." msgstr "" "초기화 후에 메타데이터 프록시를 실행하는 사용자(uid 또는 이름)(비어 있는 경" "우: 에이전트 유효 사용자)." msgid "User (uid or name) running this process after its initialization" msgstr "초기화 이후 이 프로세스를 실행하는 사용자(uid나 이름)" msgid "Username for connecting to designate in admin context" msgstr "관리 컨텍스트에서 지정하기 위해 연결할 사용자 이름" msgid "" "Uses veth for an OVS interface or not. Support kernels with limited " "namespace support (e.g. RHEL 6.5) so long as ovs_use_veth is set to True." msgstr "" "OVS 인터페이스에 veth를 사용하거나 사용하지 않습니다. ovs_use_veth가 True로 " "설정된 경우 네임스페이스 지원이 제한된 커널을 지원합니다(예: RHEL 6.5)." msgid "VRRP authentication password" msgstr "VRRP 인증 비밀번호" msgid "VRRP authentication type" msgstr "VRRP 인증 유형" msgid "VXLAN network unsupported." msgstr "VXLAN 네트워크가 지원되지 않습니다." #, python-format msgid "" "Validation of dictionary's keys failed. Expected keys: %(expected_keys)s " "Provided keys: %(provided_keys)s" msgstr "" "Dictionary 키에대한 유효성 검증을 하지 못했습니다. 만료된 키: " "%(expected_keys)s 제공된 키: %(provided_keys)s" #, python-format msgid "Validator '%s' does not exist." msgstr "Validator '%s'이(가) 없습니다. " #, python-format msgid "Value %(value)s in mapping: '%(mapping)s' not unique" msgstr "'%(mapping)s' 맵핑의 %(value)s 값이 고유하지 않음" #, python-format msgid "" "Value of %(parameter)s has to be multiple of %(number)s, with maximum value " "of %(maximum)s and minimum value of %(minimum)s" msgstr "" "%(parameter)s의 값은 %(number)s의 배수여야 하며, 최대값은 %(maximum)s이고 최" "소값은 %(minimum)s입니다." msgid "" "Value of host kernel tick rate (hz) for calculating minimum burst value in " "bandwidth limit rules for a port with QoS. See kernel configuration file for " "HZ value and tc-tbf manual for more information." msgstr "" "QoS가 있는 포트의 대역폭 한계 규칙에서 최소 버스트 값을 계산하기 위한 호스트 " "커널 틱(tick) 속도(hz) 값입니다. 자세한 내용은 HZ 값 및 tc-tbf 매뉴얼의 커널 " "구성 파일을 참조하십시오." msgid "" "Value of latency (ms) for calculating size of queue for a port with QoS. See " "tc-tbf manual for more information." msgstr "" "QoS가 있는 포트의 큐 크기를 계산하기 위한 지연 시간 값(ms)입니다. 자세한 정보" "는 tc-tbf 매뉴얼을 참조하십시오." msgid "" "Watch file log. Log watch should be disabled when metadata_proxy_user/group " "has no read/write permissions on metadata proxy log file." msgstr "" "감시 파일 로그. metadata_proxy_user/group에 메타데이터 프록시 로그 파일에 대" "한 읽기/쓰기 권한이 없는 경우 로그 감시를 사용 안함으로 설정해야 합니다. " msgid "" "When external_network_bridge is set, each L3 agent can be associated with no " "more than one external network. This value should be set to the UUID of that " "external network. To allow L3 agent support multiple external networks, both " "the external_network_bridge and gateway_external_network_id must be left " "empty." msgstr "" "external_network_bridge가 설정되면 각 L3 에이전트가 두 개 이상의 외부 네트워" "크와 연결될 수 없습니다. 이 값은 외부 네트워크의 UUID로 설정되어야 합니다. " "L3 에이전트에서 여러 외부 네트워크를 지원할 수 있으려면 " "external_network_bridge와 gateway_external_network_id가 비어 있어야 합니다." msgid "" "When proxying metadata requests, Neutron signs the Instance-ID header with a " "shared secret to prevent spoofing. You may select any string for a secret, " "but it must match here and in the configuration used by the Nova Metadata " "Server. NOTE: Nova uses the same config key, but in [neutron] section." msgstr "" "메타데이터 요청의 프록시 역할을 수행할 때 Neutron이 위조를 방지하기 위해 공" "유 시크릿으로 Instance-ID 헤더에 서명합니다. Secret으로 임의의 문자열을 선택" "할 수 있지만 여기에 있는 문자열와 Nova Metadata Server에서 사용하는 구성과 일" "치해야 합니다. 참고: Nova에서는 [neutron] 섹션에 있는 동일한 구성 키를 사용합" "니다." msgid "" "Where to store Neutron state files. This directory must be writable by the " "agent." msgstr "" "Neutron 상태 파일을 저장할 위치. 에이전트가 이 디렉토리에 쓸 수 있어야 합니" "다." msgid "" "With IPv6, the network used for the external gateway does not need to have " "an associated subnet, since the automatically assigned link-local address " "(LLA) can be used. However, an IPv6 gateway address is needed for use as the " "next-hop for the default route. If no IPv6 gateway address is configured " "here, (and only then) the neutron router will be configured to get its " "default route from router advertisements (RAs) from the upstream router; in " "which case the upstream router must also be configured to send these RAs. " "The ipv6_gateway, when configured, should be the LLA of the interface on the " "upstream router. If a next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated to the network and not " "through this parameter. " msgstr "" "IPv6를 사용하면 자동으로 지정되는 링크 로컬 주소(LLA)를 사용할 수 있으므로 외" "부 게이트웨이에 사용한 네트워크에 연관 서브넷이 필요하지 않습니다. 그러나 기" "본 라우트의 다음 홉으로 사용할 IPv6 게이트웨이 주소가 필요합니다. 여기서 " "IPv6 게이트웨이 주소를 구성하지 않으면(또한 이 경우에만)상위 라우터의 " "RA(Router Advertisement)에서 해당 기본 라우트를 가져오도록 Neutron 라우터를 " "구성할 수 있습니다. 이 경우 이러한 RA를 보내도록 상위 라우터를 구성할 수도 있" "습니다. ipv6_gateway를 구성한 경우, 이 게이트웨이가 상위 라우터의 인터페이스" "에 대한 LLA여야 합니다. 글로벌 고유 주소(GUA)를 사용하는 다음 합이 필요한 경" "우, 이 매개변수가 아닌 네트워크에 할당된 서브넷을 통해 수행해야 합니다. " msgid "You must implement __call__" msgstr "__call__을 구현해야 합니다. " msgid "" "You must provide a config file for bridge - either --config-file or " "env[NEUTRON_TEST_CONFIG_FILE]" msgstr "" "브릿지에 대한 구성 파일, 즉 --config-file 또는 env[QUANTUM_TEST_CONFIG_FILE] " "env[NEUTRON_TEST_CONFIG_FILE]" msgid "You must provide a revision or relative delta" msgstr "개정판 또는 상대적 델타를 제공해야 함" msgid "a subnetpool must be specified in the absence of a cidr" msgstr "cidr이 없는 경우 subnetpool을 지정해야 함" msgid "add_ha_port cannot be called inside of a transaction." msgstr "트랜잭션 내에서 add_ha_port를 호출할 수 없습니다." msgid "allocation_pools allowed only for specific subnet requests." msgstr "allocation_pools는 특정 서브넷 요청에만 사용할 수 있습니다." msgid "allocation_pools are not in the subnet" msgstr "allocation_pools가 서브넷에 없음" msgid "allocation_pools use the wrong ip version" msgstr "allocation_pools에서 잘못된 ip 버전을 사용함" msgid "already a synthetic attribute" msgstr "이미 합성 속성임" msgid "binding:profile value too large" msgstr "바인딩:프로파일 값이 너무 김" #, python-format msgid "cannot perform %(event)s due to %(reason)s" msgstr "%(reason)s(으)로 인해 %(event)s을(를) 수행할 수 없음" msgid "cidr and prefixlen must not be supplied together" msgstr "cidr 및 prefixlen을 함께 입력하지 않아야 함" #, python-format msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid." msgstr "" "dhcp_agents_per_network는 1 이상이어야 합니다. '%s'은(는) 올바르지 않습니다. " msgid "dns_domain cannot be specified without a dns_name" msgstr "dns_name 없이 dns_domain을 지정할 수 없음" msgid "dns_name cannot be specified without a dns_domain" msgstr "dns_domain 없이 dns_name을 지정할 수 없음" msgid "fixed_ip_address cannot be specified without a port_id" msgstr "fixed_ip_address는 port_id 없이 지정할 수 없음" #, python-format msgid "gateway_ip %s is not in the subnet" msgstr "gateway_ip %s이(가) 서브넷에 없음" #, python-format msgid "has device owner %s" msgstr "디바이스 소유자 %s이(가) 있음" msgid "in use" msgstr "사용 중" #, python-format msgid "ip command failed on device %(dev_name)s: %(reason)s" msgstr "%(dev_name)s 디바이스에 대한 ip 명령 실패: %(reason)s" #, python-format msgid "ip command failed: %(reason)s" msgstr "IP 명령 실패: %(reason)s" #, python-format msgid "ip link capability %(capability)s is not supported" msgstr "ip 링크 기능 %(capability)s이(가) 지원되지 않음" #, python-format msgid "ip link command is not supported: %(reason)s" msgstr "ip 링크 명령이 지원되지 않음: %(reason)s" msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "cidr 및 subnetpool_id가 없는 경우 ip_version을 지정해야 함" msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "ip_version이 4인 경우 ipv6_address_mode가 올바르지 않음" msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "ip_version이 4인 경우 ipv6_ra_mode가 올바르지 않음" msgid "" "ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set to " "False." msgstr "" "enable_dhcp가 False로 설정된 경우 ipv6_ra_mode 또는 ipv6_address_mode를 설정" "할 수 없습니다." #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " "'%(addr_mode)s' is not valid. If both attributes are set, they must be the " "same value" msgstr "" "'%(ra_mode)s'(으)로 설정된 ipv6_ra_mode('%(addr_mode)s'(으)로 설정된 " "ipv6_address_mode 포함)가 올바르지 않습니다. 두 설정 다 설정된 경우 동일한 값" "이어야 합니다." msgid "mac address update" msgstr "MAC 주소 업데이트" #, python-format msgid "" "max_l3_agents_per_router %(max_agents)s config parameter is not valid. It " "has to be greater than or equal to min_l3_agents_per_router %(min_agents)s." msgstr "" "max_l3_agents_per_router %(max_agents)s 구성 매개변수가 올바르지 않습니다. " "min_l3_agents_per_router와 같거나 이보다 커야 합니다.%(min_agents)s과(와) 연" "관되어 있습니다." msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "정확히 두 개의 인수 - cidr 및 MAC를 제공해야 함" msgid "network_type required" msgstr "network_type이 필요함" #, python-format msgid "network_type value '%s' not supported" msgstr "network_type에서 '%s' 값을 지원하지 않습니다" msgid "new subnet" msgstr "새 서브넷" #, python-format msgid "physical_network '%s' unknown for VLAN provider network" msgstr "VLAN 제공자 네트워크에 대해 실제 네트워크 '%s'을(를) 알 수 없음. " #, python-format msgid "physical_network '%s' unknown for flat provider network" msgstr "플랫 제공자 네트워크에 대해 실제 네트워크 '%s'을(를) 알 수 없음. " msgid "physical_network required for flat provider network" msgstr "플랫 제공자 네트워크에 실제 네트워크 필요" #, python-format msgid "provider:physical_network specified for %s network" msgstr "%s 네트워크에 대해 지정된 provider:physical_network 입니다" #, python-format msgid "rbac_db_model not found in %s" msgstr "%s에서 rbac_db_model을 찾을 수 없음" msgid "record" msgstr "레코드" msgid "respawn_interval must be >= 0 if provided." msgstr "respawn_interval은 >= 0이어야 합니다(제공된 경우)." #, python-format msgid "segmentation_id out of range (%(min)s through %(max)s)" msgstr "segmentation_id가 범위(%(min)s - %(max)s)를 벗어남" msgid "segmentation_id requires physical_network for VLAN provider network" msgstr "segmentation_id는 VLAN 제공자 네트워크의 physical_network가 필요함" msgid "shared attribute switching to synthetic" msgstr "공유 속성을 합성으로 전환" #, python-format msgid "" "subnetpool %(subnetpool_id)s cannot be updated when associated with shared " "address scope %(address_scope_id)s" msgstr "" "공유 주소 범위 %(address_scope_id)s과(와) 연관된 경우 서브넷 풀 " "%(subnetpool_id)s을(를) 업데이트할 수 없음" msgid "subnetpool_id and use_default_subnetpool cannot both be specified" msgstr "subnetpool_id 및 use_default_subnetpool을 모두 지정할 수 없음" msgid "the nexthop is not connected with router" msgstr "Nexthop이 라우터와 연결되지 않음" msgid "the nexthop is used by router" msgstr "라우터가 nexthop을 사용함" #, python-format msgid "unable to load %s" msgstr "%s을(를) 로드할 수 없음" msgid "" "uuid provided from the command line so external_process can track us via /" "proc/cmdline interface." msgstr "" "external_process가 /proc/cmdline 인터페이스를 통해 추적할 수 있도록 명령행에" "서 제공된 uuid입니다." neutron-8.4.0/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po0000664000567000056710000006617313044372760025666 0ustar jenkinsjenkins00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # Sungjin Kang , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron 8.0.1.dev68\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-04-18 20:06+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-08 12:16+0000\n" "Last-Translator: SeYeon Lee \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "%(action)s failed (client error): %(exc)s" msgstr "%(action)s 실패(클라이언트 오류): %(exc)s" #, python-format msgid "%(method)s %(url)s" msgstr "%(method)s %(url)s" #, python-format msgid "%(prog)s version %(version)s" msgstr "%(prog)s 버전 %(version)s" #, python-format msgid "" "%(rule_types)s rule types disabled for ml2 because %(driver)s does not " "support them" msgstr "" "%(driver)s에서 지원하지 않으므로 ml2의 %(rule_types)s 규칙 유형이 사용되지 않" "음" #, python-format msgid "%(type)s ID ranges: %(range)s" msgstr "%(type)s ID 범위: %(range)s" #, python-format msgid "%(url)s returned a fault: %(exception)s" msgstr "%(url)s이(가) 결함을 리턴함: %(exception)s" #, python-format msgid "%(url)s returned with HTTP %(status)d" msgstr "%(url)s이(가) HTTP %(status)d(으)로 리턴되었음" #, python-format msgid "%d probe(s) deleted" msgstr "%d 프로브가 발견됨" #, python-format msgid "%s Agent RPC Daemon Started!" msgstr "%s 에이전트 RPC 데몬이 시작되었습니다!" #, python-format msgid "%s Agent has just been revived. Doing a full sync." msgstr "%s 에이전트가 다시 활성화되었습니다. 전체 동기화를 수행합니다." #, python-format msgid "%s Agent out of sync with plugin!" msgstr "%s 에이전트가 플러그인과 동기화되지 않았습니다!" #, python-format msgid "" "Added BGP Peer %(peer)s for remote_as=%(as)d to BGP Speaker running for " "local_as=%(local_as)d." msgstr "" "local_as=%(local_as)d에 대해 실행 중인 BGP 스피커에 remote_as=%(as)d의 BGP 피" "어 %(peer)s이(가) 추가되었습니다." #, python-format msgid "Added BGP Speaker for local_as=%(as)d with router_id= %(rtid)s." msgstr "router_id= %(rtid)s인 local_as=%(as)d의 BGP 스피커가 추가되었습니다." #, python-format msgid "" "Added controller for resource %(resource)s via URI path segment:" "%(collection)s" msgstr "" "URI 경로 세그먼트를 통해 자원 %(resource)s의 컨트롤러 추가: %(collection)s" #, python-format msgid "" "Added segment %(id)s of type %(network_type)s for network %(network_id)s" msgstr "" "네트워크 %(network_id)s에 대한 유형 %(network_type)s의 세그먼트 %(id)s이(가) " "추가됨" #, python-format msgid "Adding %s to list of bridges." msgstr "브릿지 목록에 %s 추가." #, python-format msgid "Adding network %(net)s to agent %(agent)s on host %(host)s" msgstr "호스트 %(host)s에서 네트워크 %(net)s을(를) 에이전트 %(agent)s 추가" #, python-format msgid "Agent %s already present" msgstr "%s 에이전트가 이미 있음" #, python-format msgid "Agent Gateway port does not exist, so create one: %s" msgstr "에이전트 게이트워이 포트가 없으므로 하나 작성: %s" msgid "Agent caught SIGHUP, resetting." msgstr "에이전트에서 SIGHUP을 발견하여, 재설정합니다." msgid "Agent caught SIGTERM, quitting daemon loop." msgstr "에이전트에서 SIGTERM을 발견하여 데몬 루프를 중단합니다." msgid "Agent has just been revived. Doing a full sync." msgstr "에이전트가 다시 활성화되었습니다. 전체 동기화를 수행합니다." msgid "Agent has just been revived. Scheduling full sync" msgstr "에이전트가 다시 활성화되었습니다. 전체 동기화를 스케줄링합니다." msgid "Agent initialized successfully, now running... " msgstr "에이전트가 초기화되었으며, 지금 실행 중... " msgid "Agent out of sync with plugin!" msgstr "에이전트가 플러그인과 동기화되지 않았습니다!" msgid "All active networks have been fetched through RPC." msgstr "RPC를 통해 활성 네트워크를 모두 가져왔습니다." msgid "" "Allow sorting is enabled because native pagination requires native sorting" msgstr "" "네이티브 페이지 번호 매기기에 네이티브 정렬이 필요하므로 정렬을 사용할 수 있" "음" #, python-format msgid "Allowable flat physical_network names: %s" msgstr "허용 가능한 플랫 physical_network 이름: %s" #, python-format msgid "Ancillary Ports %(added)s added, failed devices %(failed)s" msgstr "추가된 보조 포트 %(added)s, 실패한 장치 %(failed)s" #, python-format msgid "Ancillary ports %s removed" msgstr "보조 포트 %s이(가) 제거됨" msgid "Arbitrary flat physical_network names allowed" msgstr "임의의 플랫 physical_network 이름이 허용됨" #, python-format msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" msgstr "%(vlan_id)s을(를) net-id=%(net_uuid)s에 대한 로컬 vlan으로 지정 중" #, python-format msgid "Attachment %s removed" msgstr "첨부 %s이(가) 제거됨" #, python-format msgid "" "Attempt %(count)s to allocate a VRID in the network %(network)s for the " "router %(router)s" msgstr "" "라우터 %(router)s의 네트워크 %(network)s에서 VRID를 할당하려고 %(count)s번 시" "도" #, python-format msgid "Attempt %(count)s to bind port %(port)s" msgstr "포트 %(port)s를 바인드하기 위해 %(count)s번 시도" #, python-format msgid "Attempted to remove port filter which is not filtered %r" msgstr "필터링된 %r이(가) 아닌 포트 필터를 제거하려고 시도함" #, python-format msgid "Attempted to update port filter which is not filtered %s" msgstr "필터링된 %s이(가) 아닌 포트 필터를 업데이트하려고 시도함" msgid "BGP Dynamic Routing agent started" msgstr "BGP 동적 라우팅 에이전트가 시작됨" #, python-format msgid "BGP Peer %(peer_ip)s for remote_as=%(peer_as)d is UP." msgstr "remote_as=%(peer_as)d의 BGP 피어 %(peer_ip)s이(가) 작동됩니다." #, python-format msgid "BGP Peer %(peer_ip)s for remote_as=%(peer_as)d went DOWN." msgstr "" "remote_as=%(peer_as)d의 BGP 피어 %(peer_ip)s이(가) 작동 중단되었습니다." msgid "BGP dynamic routing agent started" msgstr "BGP 동적 라우팅 에이전트가 시작됨" #, python-format msgid "" "Best path change observed. cidr=%(prefix)s, nexthop=%(nexthop)s, remote_as=" "%(remote_as)d, is_withdraw=%(is_withdraw)s" msgstr "" "최적의 패치 변경사항이 관찰되었습니다. cidr=%(prefix)s, nexthop=%(nexthop)s, " "remote_as=%(remote_as)d, is_withdraw=%(is_withdraw)s" #, python-format msgid "BgpDrAgent %s already present" msgstr "BgpDrAgent %s이(가) 이미 있음" #, python-format msgid "" "Binding info for port %s was not found, it might have been deleted already." msgstr "포트 %s의 바인딩 정보를 찾을 수 없음, 이미 삭제되었을 수 있습니다." #, python-format msgid "Bridge %(br_name)s changed its datapath-ID from %(old)s to %(new)s" msgstr "브릿지 %(br_name)s의 datapath-ID가 %(old)s에서 %(new)s(으)로 변경됨" #, python-format msgid "Bridge %(br_name)s has datapath-ID %(dpid)s" msgstr "브릿지 %(br_name)s에 datapath-ID %(dpid)s이(가) 있음" #, python-format msgid "Bridge mappings: %s" msgstr "브릿지 맵핑: %s" #, python-format msgid "Bridge mappings: %s." msgstr "브릿지 맵핑: %s." #, python-format msgid "" "Cannot apply dhcp option %(opt)s because it's ip_version %(version)d is not " "in port's address IP versions" msgstr "" "ip_version %(version)d이(가) 포트의 주소 IP 버전이 아니므로 dhcp 옵션 %(opt)s" "을(를) 적용할 수 없음" #, python-format msgid "Centralizing distributed router %s is not supported" msgstr "분산 라우터 %s을(를) 중앙화하는 기능은 지원되지 않음" #, python-format msgid "Cleaning bridge: %s" msgstr "브릿지 정리: %s" #, python-format msgid "Cleaning stale %s flows" msgstr "시간 경과된 %s 플로우 정리" #, python-format msgid "Clearing orphaned ARP spoofing entries for devices %s" msgstr "장치 %s의 orphan ARP 위조 항목 지우기" #, python-format msgid "" "Collecting BGP Peer statistics for peer_ip=%(peer)s, running in speaker_as=" "%(speaker_as)d " msgstr "" "speaker_as=%(speaker_as)d에서 실행 중인 peer_ip=%(peer)s의 BGP 피어 통계 수집" #, python-format msgid "Collecting BGP Speaker statistics for local_as=%d." msgstr "local_as=%d의 BGP 스피커 통계를 수집합니다." msgid "" "ConfDriver is used as quota_driver because the loaded plugin does not " "support 'quotas' table." msgstr "" "로드된 플러그인에서 '할당량' 테이블을 지원하지 않으므로 ConfDriver를 " "quota_driver로 사용합니다." #, python-format msgid "" "Configuration for devices up %(up)s and devices down %(down)s completed." msgstr "장치 작동 %(up)s 및 장치 작동 해제 %(down)s 구성이 완료되었습니다." #, python-format msgid "Configured extension driver names: %s" msgstr "구성된 확장 드라이버 이름: %s" #, python-format msgid "Configured mechanism driver names: %s" msgstr "매커니즘 드라이버 이름을 설정했습니다: %s" #, python-format msgid "Configured type driver names: %s" msgstr "형식 드라이버 이름을 설정했습니다: %s" msgid "Configuring tunnel endpoints to other OVS agents" msgstr "다른 OVS 에이전트에대한 커널 엔드포인트 구성" #, python-format msgid "Creating instance of CountableResource for resource:%s" msgstr "자원에대한 CountableResource 인스턴스 작성:%s" #, python-format msgid "Creating instance of TrackedResource for resource:%s" msgstr "자원에대한 TrackedResource 인스턴스 작성:%s" msgid "DHCP agent started" msgstr "DHCP 에이전트가 시작됨" msgid "DNSExtensionDriverML2 initialization complete" msgstr "DNSExtensionDriverML2 초기화 완료" #, python-format msgid "Default provider is not specified for service type %s" msgstr "서비스 유형 %s의 기본 제공자가 지정되지 않음" #, python-format msgid "Deleting port: %s" msgstr "포트 삭제: %s" #, python-format msgid "Destroying IPset: %s" msgstr "IPset 영구 삭제: %s" #, python-format msgid "Destroying IPsets with prefix: %s" msgstr "접두어가 있는 IPset 영구 삭제: %s" #, python-format msgid "Device %(device)s spoofcheck %(spoofcheck)s" msgstr "장치 %(device)s 위조 확인 %(spoofcheck)s" #, python-format msgid "Device %s already exists" msgstr "%s 디바이스가 이미 존재함" #, python-format msgid "Device %s not defined on plugin" msgstr "%s 디바이스가 플러그인에서 정의되지 않음" #, python-format msgid "Device with MAC %s not defined on plugin" msgstr "MAC가 %s인 디바이스가 플러그인에서 정의되지 않음" #, python-format msgid "Devices down %s " msgstr "장치 작동 중단 %s" msgid "Disabled allowed-address-pairs extension." msgstr "allowed-address-pairs 확장을 사용하지 않습니다." msgid "Disabled security-group extension." msgstr "보안 그룹 확장을 사용하지 않습니다. " msgid "Disabled vlantransparent extension." msgstr "vlantransparent 확장을 사용하지 않습니다." msgid "Eventlet based AMQP RPC server starting..." msgstr "Eventlet 기반 AMQP RPC 서버를 시작 중..." #, python-format msgid "Exclude Devices: %s" msgstr "장치 제외: %s" #, python-format msgid "Extension %s is pecan-aware. Fetching resources and controllers" msgstr "확장 %s이(가) pecan을 인지합니다. 자원 및 컨트롤러를 가져오는 중" #, python-format msgid "Extension driver '%(name)s' failed in %(method)s" msgstr "확장 드라이버 '%(name)s'이(가) %(method)s에서 실패" #, python-format msgid "" "Failed to schedule network %s, no eligible agents or it might be already " "scheduled by another server" msgstr "" "네트워크 %s을(를) 스케줄링하는 데 실패하거나 적합한 에이전트가 없거나 다른 서" "버에서 이미 스케줄되었을 수 있음" #, python-format msgid "Finished network %s dhcp configuration" msgstr "네트워크 %s dhcp 구성이 완료됨" msgid "Flat networks are disabled" msgstr "플랫 네트워크가 사용되지 않음" #, python-format msgid "Found invalid IP address in pool: %(start)s - %(end)s:" msgstr "풀에서 올바르지 않은 IP 주소 발견: %(start)s - %(end)s:" #, python-format msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" msgstr "겹치는 범위 발견: %(l_range)s 및 %(r_range)s" #, python-format msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" msgstr "서브넷 CIDR보다 큰 풀 발견: %(start)s - %(end)s" #, python-format msgid "" "Found port (%(port_id)s, %(ip)s) having IP allocation on subnet %(subnet)s, " "cannot delete" msgstr "" "서브넷 %(subnet)s에서 IP가 할당된 포트(%(port_id)s, %(ip)s)를 찾아, 삭제할 " "수 없음" #, python-format msgid "Got %(alias)s extension from driver '%(drv)s'" msgstr "드라이버 '%(drv)s'에서 %(alias)s 확장 가져오기" #, python-format msgid "" "HA network %(network)s was deleted as no HA routers are present in tenant " "%(tenant)s." msgstr "" "Tenant %(tenant)s에 HA 라우터가 없으므로 HA 네트워크 %(network)s이(가) 삭제되" "었습니다." #, python-format msgid "HA network %s can not be deleted." msgstr "HA 네트워크 %s을(를) 삭제할 수 없습니다." #, python-format msgid "HTTP exception thrown: %s" msgstr "HTTP 예외 처리: %s" #, python-format msgid "" "Heartbeat received from %(type)s agent on host %(host)s, uuid %(uuid)s after " "%(delta)s" msgstr "" "%(delta)s 후에 호스트 %(host)s, uuid %(uuid)s의 %(type)s 에이전트에서 하트비" "트 수신" msgid "IPset cleanup completed successfully" msgstr "IPset 정리가 완료됨" msgid "IPv6 is not enabled on this system." msgstr "이 시스템에서 IPv6이 사용되지 않습니다." #, python-format msgid "Initialized Ryu BGP Speaker driver interface with bgp_router_id=%s" msgstr "bgp_router_id=%s인 Ryu BGP 스피커 드라이버 인터페이스가 초기화됨" msgid "Initializing Linux bridge QoS extension" msgstr "Linux 브릿지 QoS 확장 초기화" msgid "Initializing Ryu driver for BGP Speaker functionality." msgstr "BGP 스피커 기능에서 Ryu 드라이버를 초기화합니다." #, python-format msgid "Initializing agent extension '%s'" msgstr "에이전트 확장 '%s' 초기화" #, python-format msgid "Initializing driver for type '%s'" msgstr "'%s' 형식 드라이버 초기화중" #, python-format msgid "Initializing extension driver '%s'" msgstr "확장 드라이버 '%s' 초기화" msgid "Initializing extension manager." msgstr "확장기능 관리자를 초기화 중입니다. " #, python-format msgid "Initializing mechanism driver '%s'" msgstr "메커니즘 드라이버 '%s' 초기화" #, python-format msgid "Interface mappings: %s" msgstr "인터페이스 맵핑: %s" #, python-format msgid "Interface mappings: %s." msgstr "인터페이스 맵핑: %s." #, python-format msgid "L2 Agent operating in DVR Mode with MAC %s" msgstr "MAC가 %s인 DVR 모드에서 작동하는 L2 에이전트 " msgid "L3 agent started" msgstr "L3 에이전트가 시작됨" #, python-format msgid "Linux bridge %s deleted" msgstr "Linux 브릿지 %s이(가) 삭제됨" msgid "Linux bridge cleanup completed successfully" msgstr "Linux 브릿지 정리가 완료됨" #, python-format msgid "Loaded agent extensions: %s" msgstr "에이전트 확장이 로드됨: %s" #, python-format msgid "Loaded extension driver names: %s" msgstr "로드된 확장 드라이버 이름: %s" #, python-format msgid "Loaded extension: %s" msgstr "로드된 확장: %s" #, python-format msgid "Loaded mechanism driver names: %s" msgstr "매커니즘 드라이버 이름을 불러왔습니다: %s" #, python-format msgid "Loaded quota_driver: %s." msgstr "로드된 quota_driver: %s." #, python-format msgid "Loaded type driver names: %s" msgstr "형식 드라이버 이름을 불러왔습니다: %s" #, python-format msgid "Loading %(name)s (%(description)s) notification driver for QoS plugin" msgstr "QoS 플러그인의 %(name)s(%(description)s) 알림 드라이버 로드" #, python-format msgid "Loading Metering driver %s" msgstr "측정 드라이버 %s 로드" #, python-format msgid "Loading Plugin: %s" msgstr "로딩 플러그인: %s" #, python-format msgid "Loading core plugin: %s" msgstr "코어 플러그인 로드: %s" #, python-format msgid "Loading interface driver %s" msgstr "인터페이스 드라이버 %s 로드" msgid "Logging enabled!" msgstr "로깅 사용!" msgid "ML2 FlatTypeDriver initialization complete" msgstr "ML2 FlatTypeDriver 초기화 완료" msgid "ML2 LocalTypeDriver initialization complete" msgstr "ML2 LocalTypeDriver 초기화 완료" #, python-format msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" msgstr "실제 네트워크 %(physical_network)s을(를) 브릿지 %(bridge)s에 맵핑 중" msgid "Modular L2 Plugin initialization complete" msgstr "모듈러 L2 플러그인 초기화를 완료했습니다" #, python-format msgid "Network VLAN ranges: %s" msgstr "네트워크 VLAN 범위: %s" #, python-format msgid "Neutron service started, listening on %(host)s:%(port)s" msgstr "Neutron 서비스가 시작되었으며 %(host)s:%(port)s에서 청취 중임" #, python-format msgid "No device with MAC %s defined on agent." msgstr "MAC가 %s인 장치가 에이전트에 정의되지 않았습니다." #, python-format msgid "No ports have port_id starting with %s" msgstr "포트에 %s(으)로 시작하는 port_id가 없음" msgid "No ports here to refresh firewall" msgstr "방화벽을 새로 고칠 포트가 여기에 없음" #, python-format msgid "Nova event response: %s" msgstr "Nova 이벤트 응답: %s" #, python-format msgid "" "Number of active agents lower than max_l3_agents_per_router. L3 agents " "available: %s" msgstr "" "max_l3_agents_per_router 이하의 활성 에이전트 수. L3 에이전트를 사용할 수 있" "음: %s" msgid "OVS cleanup completed successfully" msgstr "OVS 정리가 완료됨" msgid "Pecan WSGI server starting..." msgstr "Pecan WSGI 서버 시작 중..." #, python-format msgid "Physical Devices mappings: %s" msgstr "실제 장치 맵핑: %s" #, python-format msgid "" "Physical network %s is defined in bridge_mappings and cannot be deleted." msgstr "" "실제 네트워크 %s이(가) bridge_mappings에 정의되어 있으므로 삭제할 수 없습니" "다." #, python-format msgid "Port %(device)s updated. Details: %(details)s" msgstr "%(device)s 포트가 업데이트되었습니다. 세부사항: %(details)s" #, python-format msgid "Port %(port_id)s not present in bridge %(br_name)s" msgstr "브릿지 %(br_name)s에 포트 %(port_id)s이(가) 없음" #, python-format msgid "Port %s updated." msgstr "%s 포트가 업데이트되었습니다. " #, python-format msgid "Port %s was deleted concurrently" msgstr "포트 %s이(가) 동시에 삭제됨" #, python-format msgid "" "Port %s was not found on the integration bridge and will therefore not be " "processed" msgstr "%s 포트를 통합 브릿지에서 찾을 수 없으므로 처리되지 않음" #, python-format msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!" msgstr "" "포트 '%(port_name)s'에서 vlan 태그 '%(vlan_tag)d'이(가) 유실되었습니다!" #, python-format msgid "Port with MAC %(mac)s and PCI slot %(pci_slot)s updated." msgstr "MAC가 %(mac)s이고 PCI 슬롯이 %(pci_slot)s인 포트가 업데이트되었습니다." msgid "PortSecurityExtensionDriver initialization complete" msgstr "PortSecurityExtensionDriver 초기화 완료" #, python-format msgid "Ports %s removed" msgstr "제거된 포트 %s" #, python-format msgid "Preparing filters for devices %s" msgstr "%s 디바이스에 대한 필터 준비" #, python-format msgid "Process runs with uid/gid: %(uid)s/%(gid)s" msgstr "UID/GID가 %(uid)s/%(gid)s인 프로세스 실행" msgid "Provider rule updated" msgstr "제공자 규칙이 업데이트됨" #, python-format msgid "" "QoS extension did have no information about the port %s that we were trying " "to reset" msgstr "QoS 확장에 재설정하려는 포트 %s에 대한 정보가 없습니다." #, python-format msgid "" "QoS policy %(qos_policy_id)s applied to port %(port_id)s is not available on " "server, it has been deleted. Skipping." msgstr "" "포트 %(port_id)s에 적용된 QoS 정책 %(qos_policy_id)s을(를) 서버에서 사용할 " "수 없어 삭제되었습니다. 건너뜁니다." #, python-format msgid "RPC agent_id: %s" msgstr "RPC agent_id: %s" msgid "RPC was already started in parent process by plugin." msgstr "플러그인에서 상위 프로세스의 RPC를 이미 시작했습니다." #, python-format msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" msgstr "net-id = %(net_uuid)s에서 vlan = %(vlan_id)s 재확보 중" msgid "Refresh firewall rules" msgstr "방화벽 규칙 새로 고치기" #, python-format msgid "Registered extension drivers: %s" msgstr "등록된 확장 드라이버: %s" #, python-format msgid "Registered mechanism drivers: %s" msgstr "등록된 메커니즘 드라이버: %s" #, python-format msgid "Registered types: %s" msgstr "등록된 형식: %s" #, python-format msgid "Remove device filter for %r" msgstr "%r 에대한 디바이스 필터 제거" #, python-format msgid "" "Removed BGP Peer %(peer)s from BGP Speaker running for local_as=%(local_as)d." msgstr "" "local_as=%(local_as)d에 대해 실행 중인 BGP 스피커에서 BGP 피어 %(peer)s을" "(를) 제거했습니다." #, python-format msgid "Removed BGP Speaker for local_as=%(as)d with router_id=%(rtid)s." msgstr "router_id=%(rtid)s인 local_as=%(as)d의 BGP 스피커가 제거되었습니다." #, python-format msgid "Removing device with MAC address %(mac)s and PCI slot %(pci_slot)s" msgstr "MAC 주소가 %(mac)s이고 PCI 슬롯이 %(pci_slot)s인 장치 제거" #, python-format msgid "Removing iptables rule for IPset: %s" msgstr "IPset의 iptables 규칙 제거: %s" #, python-format msgid "" "Route cidr=%(prefix)s is withdrawn from BGP Speaker running for local_as=" "%(local_as)d." msgstr "" "local_as=%(local_as)d에 대해 실행 중인 BGP 스피커에서 경로 cidr=%(prefix)s이 " "취소되었습니다." #, python-format msgid "" "Route cidr=%(prefix)s, nexthop=%(nexthop)s is advertised for BGP Speaker " "running for local_as=%(local_as)d." msgstr "" "local_as=%(local_as)d에 대해 실행 중인 BGP 스피커의 경로 cidr=%(prefix)s, " "nexthop=%(nexthop)s이(가) 광고됩니다." #, python-format msgid "Router %(router_id)s transitioned to %(state)s" msgstr "라우터 %(router_id)s이(가) %(state)s(으)로 전이됨" #, python-format msgid "" "Router %s is not managed by this agent. It was possibly deleted concurrently." msgstr "" "이 에이전트에서 라우터 %s을(를) 관리하지 않습니다. 동시에 삭제되었을 가능성" "이 있습니다." #, python-format msgid "SNAT interface port list does not exist, so create one: %s" msgstr "SNAT 인터페이스 포트 목록이 없으므로, 하나 작성: %s" msgid "SRIOV NIC Agent RPC Daemon Started!" msgstr "SRIOV NIC 에이전트 RPC 데몬이 시작되었습니다!" #, python-format msgid "Scheduling unhosted network %s" msgstr "호스트되지 않은 네트워크 %s 스케줄링" #, python-format msgid "Security group member updated %r" msgstr "보안 그룹 멤버가 %r을(를) 업데이트함" #, python-format msgid "Security group rule updated %r" msgstr "보안 그룹 규칙이 %r을(를) 업데이트함" #, python-format msgid "Service %s is supported by the core plugin" msgstr "서비스 %s은(는) 코어 플러그인에서 지원하지 않음" #, python-format msgid "" "Skipping ARP spoofing rules for port '%s' because it has port security " "disabled" msgstr "포트 보안을 사용하지 않으므로 포트 '%s'의 ARP 위조 규칙을 건너뜀" #, python-format msgid "Skipping DHCP port %s as it is already in use" msgstr "DHCP 포트 %s이(가) 이미 사용 중이므로 건너뛰기" #, python-format msgid "" "Skipping method %s as firewall is disabled or configured as " "NoopFirewallDriver." msgstr "" "방화벽이 사용되지 않거나 NoopFirewallDriver로 구성되었으므로 %s 메소드를 건너" "뜁니다." msgid "" "Skipping period L3 agent status check because automatic router rescheduling " "is disabled." msgstr "" "자동 라우터 재스케줄링을 사용하지 않으므로 주기적 L3 에이전트 상태 확인을 건" "너뜁니다." msgid "" "Skipping periodic DHCP agent status check because automatic network " "rescheduling is disabled." msgstr "" "자동 네트워크 재스케줄링을 사용하지 않으므로 주기적 DHCP 에이전트 상태 확인" "을 건너뜁니다." #, python-format msgid "Skipping port %s as no IP is configure on it" msgstr "구성된 IP가 없어서 포트 %s을(를) 건너뜀" msgid "Specified IP addresses do not match the subnet IP version" msgstr "지정된 IP 주소가 서브넷 IP 버전과 일치하지 않음" #, python-format msgid "Starting network %s dhcp configuration" msgstr "네트워크 %s dhcp 구성 시작" #, python-format msgid "Stopping %s agent." msgstr "%s 에이전트를 중지합니다." #, python-format msgid "Subnet %s was deleted concurrently" msgstr "서브넷 %s이(가) 동시에 삭제됨" msgid "Synchronizing state" msgstr "상태 동기화 중" msgid "Synchronizing state complete" msgstr "상태 동기화 완료" #, python-format msgid "Tenant network_types: %s" msgstr "Tenant network_types: %s" #, python-format msgid "" "The requested interface name %(requested_name)s exceeds the %(limit)d " "character limitation. It was shortened to %(new_name)s to fit." msgstr "" "요청된 인터페이스 이름 %(requested_name)s이(가) 최대치인 %(limit)d자 이상입니" "다. 이 제한에 맞게 %(new_name)s(으)로 단축됩니다." #, python-format msgid "Trigger reload_allocations for port %s" msgstr "포트 %s 에대한 reload_allocations 트리거" #, python-format msgid "" "Tunnel IP %(ip)s was used by host %(host)s and will be assigned to " "%(new_host)s" msgstr "" "호스트 %(host)s에서 터널 IP %(ip)s을(를) 사용했으며 %(new_host)s에 할당됨" #, python-format msgid "VIF port: %s admin state up disabled, putting on the dead VLAN" msgstr "VIF 포트: %s 관리 설정을 사용하지 않음, 작동하지 않는 VLAN에 둠" #, python-format msgid "" "Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " "%(subnet_id)s (CIDR: %(cidr)s)" msgstr "" "CIDR %(new_cidr)s 유효성 검증 실패 - 서브넷 %(subnet_id)s(CIDR: %(cidr)s)과" "(와) 겹침" msgid "VlanTypeDriver initialization complete" msgstr "VlanTypeDriver 초기화 완료" #, python-format msgid "agent_updated by server side %s!" msgstr "서버측 %s!에 의한 agent_updated" #, python-format msgid "port_unbound(): net_uuid %s not in local_vlan_map" msgstr "port_unbound(): net_uuid %s이(가) local_vlan_map에 없음" msgid "rpc_loop doing a full sync." msgstr "전체 동기화를 수행하는 rpc_loop." neutron-8.4.0/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-error.po0000664000567000056710000013245113044372760026055 0ustar jenkinsjenkins00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # Sungjin Kang , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron 8.1.3.dev113\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-08-13 08:46+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-08 12:52+0000\n" "Last-Translator: SeYeon Lee \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "" "%(service)s for %(resource_type)s with uuid %(uuid)s not found. The process " "should not have died" msgstr "" "UUID가 %(uuid)s인 %(resource_type)s의 %(service)s를 찾을 수 없습니다. 프로세" "스가 종료되지 않았어야 합니다." #, python-format msgid "%s Agent terminated!" msgstr "%s 에이전트가 종료됩니다!" #, python-format msgid "%s failed" msgstr "%s 실패" #, python-format msgid "" "%s used in config as ipv6_gateway is not a valid IPv6 link-local address." msgstr "" "구성에서 ipv6_gateway로 사용하는 %s이(가) 올바른 IPv6 링크 로컬 주소가 아닙니" "다." #, python-format msgid "" "'rpc_workers = %d' ignored because start_rpc_listeners is not implemented." msgstr "" "start_rpc_listeners가 구현되지 않았으므로 'rpc_workers = %d'이(가) 무시되었습" "니다." #, python-format msgid "Action %(action)s is not defined on resource %(resource)s" msgstr "작업 %(action)s이(가) 자원 %(resource)s에 정의되지 않음" #, python-format msgid "Agent Extension '%(name)s' failed while handling port deletion" msgstr "포트 삭제를 처리하는 중에 에이전트 확장 '%(name)s'에 실패" #, python-format msgid "Agent Extension '%(name)s' failed while handling port update" msgstr "포트 업데이트를 처리하는 중에 에이전트 확장 '%(name)s'에 실패" msgid "Agent Initialization Failed" msgstr "에이전트 초기화 실패" msgid "Alias or class name is not set" msgstr "별명이나 클래스 이름이 설정되지 않음" #, python-format msgid "An error occurred while communicating with async process [%s]." msgstr "비동기 프로세스 [%s]과(와) 통신하는 중에 오류가 발생했습니다." #, python-format msgid "An error occurred while killing [%s]." msgstr "[%s]을(를) 강제 종료하는 중에 오류가 발생했습니다." #, python-format msgid "An exception occurred while creating the %(resource)s:%(item)s" msgstr "%(resource)s:%(item)s을(를) 작성하는 중에 예외 발생" msgid "An interface driver must be specified" msgstr "인터페이스 드라이버가 지정되어야 함" #, python-format msgid "An unexpected exception was caught: %s" msgstr "예상치 못한 예외가 발생함: %s" #, python-format msgid "BGP Speaker %(bgp_speaker)s info call failed with reason=%(e)s." msgstr "BGP스피커 %(bgp_speaker)s 정보 호출에 실패했으며, 이유=%(e)s입니다." #, python-format msgid "BGP peer %(bgp_peer)s info call failed with reason=%(e)s." msgstr "BGP 피어 %(bgp_peer)s 정보 호출에 실패했으며, 이유=%(e)s입니다." msgid "BGP router-id MUST be specified for the correct functional working." msgstr "기능적으로 올바르게 작동하도록 BGP router-id를 지정해야 합니다." #, python-format msgid "Binding info for DVR port %s not found" msgstr "DVR 포트 %s에대한 바인딩 정보를 찾을 수 없음" #, python-format msgid "" "Bridge %(bridge)s for physical network %(physical_network)s does not exist. " "Agent terminated!" msgstr "" "실제 네트워크 %(physical_network)s에 대한 브릿지 %(bridge)s이(가) 없습니다. " "에이전트가 종료되었습니다! " #, python-format msgid "" "Bridge %(brq)s for physical network %(net)s does not exist. Agent terminated!" msgstr "" "실제 네트워크 %(net)s에 브릿지 %(brq)s이(가) 없습니다. 에이전트가 종료되었습" "니다!" #, python-format msgid "" "Call to driver for BGP Speaker %(bgp_speaker)s %(method)s has failed with " "exception %(driver_exec)s." msgstr "" "BGP 스피커 %(bgp_speaker)s %(method)s 드라이버를 호출했지만 예외 " "%(driver_exec)s이(가) 발생하며 실패했습니다." #, python-format msgid "Cannot clean up created object %(obj)s. Exception: %(exc)s" msgstr "작성된 오브젝트 %(obj)s을(를) 정리할 수 없습니다. 예외: %(exc)s" #, python-format msgid "" "Cannot provision %(network_type)s network for net-id=%(net_uuid)s - " "tunneling disabled" msgstr "" "net-id=%(net_uuid)s에 대해 %(network_type)s 네트워크를 제공할 수 없습니다 - " "터널링을 비활성화했습니다" #, python-format msgid "" "Cannot provision VLAN network for net-id=%(net_uuid)s - no bridge for " "physical_network %(physical_network)s" msgstr "" "net-id=%(net_uuid)s에 대한 VLAN 네트워크를 프로비저닝할 수 없음 - " "physical_network %(physical_network)s에 대한 브릿지가 없음" #, python-format msgid "" "Cannot provision flat network for net-id=%(net_uuid)s - no bridge for " "physical_network %(physical_network)s" msgstr "" "net-id=%(net_uuid)s에 대한 일반 네트워크를 프로비저닝할 수 없음 - " "physical_network %(physical_network)s에 대한 브릿지가 없음" #, python-format msgid "" "Cannot provision unknown network type %(network_type)s for net-id=" "%(net_uuid)s" msgstr "" "net-id=%(net_uuid)s에 대한 알 수 없는 네트워크 유형 %(network_type)s을(를) 프" "로비저닝할 수 없음" #, python-format msgid "" "Cannot reclaim unknown network type %(network_type)s for net-id=%(net_uuid)s" msgstr "" "net-id=%(net_uuid)s에 대한 알 수 없는 네트워크 유형 %(network_type)s을(를) 재" "확보할 수 없음" msgid "Cannot run ebtables. Please ensure that it is installed." msgstr "Ebtables를 실행할 수 없습니다. 설치되었는지 확인하십시오." msgid "Cannot run ip6tables. Please ensure that it is installed." msgstr "IP6tables를 실행할 수 없습니다. 설치되었는지 확인하십시오." msgid "Cannot run ipset. Please ensure that it is installed." msgstr "IPset을 실행할 수 없습니다. 설치되었는지 확인하십시오." #, python-format msgid "" "Centralized-SNAT port %(port)s on subnet %(port_subnet)s already seen on a " "different subnet %(orig_subnet)s" msgstr "" "서브넷 %(port_subnet)s 중앙 집중식 SNAT 포트 %(port)s이(가) 다른 서브넷 " "%(orig_subnet)s에 이미 표시됨" msgid "" "Check for Open vSwitch ARP responder support failed. Please ensure that the " "version of openvswitch being used has ARP flows support." msgstr "" "Open vSwitch ARP 응답자 지원을 확인하는 데 실패했습니다. 사용 중인 " "openvswitch 버전에서 ARP 플로우를 지원하는지 확인하십시오." msgid "" "Check for Open vSwitch Geneve support failed. Please ensure that the version " "of openvswitch and kernel being used has Geneve support." msgstr "" "Open vSwitch Geneve 지원 확인에 실패했습니다. 사용 중인 openvswitch 및 커널 " "버전에서 Geneve를 지원하는지 확인하십시오." msgid "" "Check for Open vSwitch VXLAN support failed. Please ensure that the version " "of openvswitch being used has VXLAN support." msgstr "" "Open vSwitch VXLAN 지원을 확인하는 데 실패했습니다. 사용 중인 openvswitch 버" "전에서 VXLAN을 지원하는지 확인하십시오." msgid "" "Check for Open vSwitch patch port support failed. Please ensure that the " "version of openvswitch being used has patch port support or disable features " "requiring patch ports (gre/vxlan, etc.)." msgstr "" "Open vSwitch VSwitch 패치 포트 지원을 확인하는 데 실패했습니다. 사용 중인 " "openvswitch 버전에서 패치 포트를 지원하는지 확인하거나 패치 포트가 필요한 기" "능을 사용하지 않게 설정하십시오(gre/vxlan 등)." msgid "" "Check for Open vSwitch support of ARP header matching failed. ARP spoofing " "suppression will not work. A newer version of OVS is required." msgstr "" "Open vSwitch에서 ARP 헤더 일치를 지원하는지 확인하는 데 실패했습니다. ARP 위" "조 억제가 작동하지 않습니다. 새 버전의 OVS가 필요합니다." msgid "" "Check for Open vSwitch support of ICMPv6 header matching failed. ICMPv6 " "Neighbor Advt spoofing (part of arp spoofing) suppression will not work. A " "newer version of OVS is required." msgstr "" "Open vSwitch에서 ICMPv6 헤더 일치를 지원하는지 확인하는 데 실패했습니다. " "ICMPv6 Neighbor Advt 위조(arp 위조의 일부) 억제가 작동하지 않습니다. 새 버전" "의 OVS가 필요합니다." msgid "" "Check for Open vSwitch support of conntrack support failed. OVS/CT firewall " "will not work. A newer version of OVS (2.5+) and linux kernel (4.3+) are " "required. See https://github.com/openvswitch/ovs/blob/master/FAQ.mdfor more " "information." msgstr "" "Open vSwitch에서 conntrack을 지원하는지 확인하는 데 실패했습니다. OVS/CT 방화" "벽이 작동하지 않습니다. 새 버전의 OVS(2.5+) 및 linux 커널(4.3+)이 필요합니" "다. 자세한 내용은 https://github.com/openvswitch/ovs/blob/master/FAQ.md를 참" "조하십시오." msgid "" "Check for VF management support failed. Please ensure that the version of ip " "link being used has VF support." msgstr "" "VF 관리 지원을 확인하는 데 실패했습니다. 사용 중인 ip 링크 버전에 VF 지원이 " "있는지 확인하십시오." msgid "" "Check for iproute2 VXLAN support failed. Please ensure that the iproute2 has " "VXLAN support." msgstr "" "Iproute2 VXLAN 지원을 확인하는 데 실패했습니다. Iproute2에서 VXLAN을 지원하는" "지 확인하십시오." msgid "Check for native OVSDB support failed." msgstr "네이티브 OVSDB 지원을 확인하는 데 실패했습니다." #, python-format msgid "Configuration for devices %s failed!" msgstr "장치 %s 에대한 구성을 실패했습니다!" #, python-format msgid "Could not delete %(res)s %(id)s." msgstr "%(res)s %(id)s을(를) 삭제할 수 없습니다." #, python-format msgid "Could not find %s to delete." msgstr "삭제할 %s을(를) 찾을 수 없습니다." #, python-format msgid "Could not parse: %(raw_result)s. Exception: %(exception)s" msgstr "구문 분석할 수 없음: %(raw_result)s. 예외: %(exception)s" #, python-format msgid "Could not retrieve gateway port for subnet %s" msgstr "서브넷 %s에서 게이트웨이 포트를 검색할 수 없음" #, python-format msgid "DVR: Duplicate DVR router interface detected for subnet %s" msgstr "DVR: 서브넷 %s에 대해 중복 DVR 라우터 인터페이스가 발견됨" msgid "DVR: Failed to obtain a valid local DVR MAC address" msgstr "DVR: 올바른 로컬 DVR MAC 주소를 가져오는 데 실패" msgid "DVR: Failed updating arp entry" msgstr "DVR: arp 항목을 업데이트하는 데 실패" #, python-format msgid "" "DVR: SNAT port not found in the list %(snat_list)s for the given router " "internal port %(int_p)s" msgstr "" "DVR: 지정된 라우터 내부 포트 %(int_p)s 목록 %(snat_list)s에서 SNAT 포트를 찾" "을 수 없음" msgid "DVR: error adding redirection logic" msgstr "DVR: 리디렉션 로직을 추가하는 중 오류 발생" msgid "DVR: snat remove failed to clear the rule and device" msgstr "DVR: 규칙과 장치를 지우기 위한 snat 제거에 실패" #, python-format msgid "Driver %(driver)s does not implement %(func)s" msgstr "드라이버 %(driver)s에서 %(func)s을(를) 구현하지 않음" #, python-format msgid "Driver %(driver)s:%(func)s runtime error" msgstr "드라이버 %(driver)s:%(func)s 런타임 오류" #, python-format msgid "" "Error deleting Floating IP data from external DNS service. Name: '%(name)s'. " "Domain: '%(domain)s'. IP addresses '%(ips)s'. DNS service driver message " "'%(message)s'" msgstr "" "외부 DNS 서비스에서 Floating IP 데이터를 삭제하는 중에 오류 발생. 이름: " "'%(name)s'. 도메인: '%(domain)s'. IP 주소 '%(ips)s'. DNS 서비스 드라이버 메시" "지 '%(message)s'" #, python-format msgid "" "Error deleting port data from external DNS service. Name: '%(name)s'. " "Domain: '%(domain)s'. IP addresses '%(ips)s'. DNS service driver message " "'%(message)s'" msgstr "" "외부 DNS 서비스에서 포트 데이터를 삭제하는 중에 오류 발생. 이름: '%(name)s'. " "도메인: '%(domain)s'. IP 주소 '%(ips)s'. DNS 서비스 드라이버 메시지 " "'%(message)s'" #, python-format msgid "Error during notification for %(callback)s %(resource)s, %(event)s" msgstr "%(callback)s %(resource)s에 %(event)s을(를) 알리는 중에 오류 발생" msgid "Error executing command" msgstr "명령 실행 오류" #, python-format msgid "Error in agent loop. Devices info: %s" msgstr "에이전트 루프에서 오류 발생. 디바이스 정보: %s" msgid "Error loading class by alias" msgstr "별명으로 클래스를 로드하는 중 오류 발생" msgid "Error loading class by class name" msgstr "클래스 이름으로 클래스를 로드하는 중에 오류 발생" #, python-format msgid "Error loading interface driver '%s'" msgstr "인터페이스 드라이버 '%s'을(를) 로드하는 중 오류 발생" #, python-format msgid "Error loading provider '%(provider)s' for service %(service_type)s" msgstr "" "서비스 %(service_type)s의 제공자 '%(provider)s'을(를) 로드하는 중 오류 발생" #, python-format msgid "Error occurred while removing port %s" msgstr "포트 %s을(를) 제거하는 중에 오류 발생" #, python-format msgid "" "Error publishing floating IP data in external DNS service. Name: '%(name)s'. " "Domain: '%(domain)s'. DNS service driver message '%(message)s'" msgstr "" "외부 DNS 서비스에서 Floating IP 데이터를 공개하는 중에 오류 발생. 이름: " "'%(name)s'. 도메인: '%(domain)s'. DNS 서비스 드라이버 메시지 '%(message)s'" #, python-format msgid "" "Error publishing port data in external DNS service. Name: '%(name)s'. " "Domain: '%(domain)s'. DNS service driver message '%(message)s'" msgstr "" "외부 DNS 서비스에서 포트 데이터를 공개하는 중에 오류 발생. 이름: '%(name)s'. " "도메인: '%(domain)s'. DNS 서비스 드라이버 메시지 '%(message)s'" #, python-format msgid "Error received from [%(cmd)s]: %(err)s" msgstr "[%(cmd)s]에서 오류 수신: %(err)s" #, python-format msgid "Error response returned from nova: %s" msgstr "Nova에서 오류 응답을 반환함: %s" #, python-format msgid "Error unable to destroy namespace: %s" msgstr "네임스페이스 영구 삭제 불가능 오류: %s" msgid "Error while configuring tunnel endpoints" msgstr "터널 엔드포인트를 구성하는 중에 오류 발생" #, python-format msgid "Error while create dnsmasq log dir: %s" msgstr "Dnsmasq 로그 디렉토리를 작성하는 중에 오류 발생: %s" #, python-format msgid "Error while deleting router %s" msgstr "라우터 %s을(를) 삭제하는 중에 오류 발생" #, python-format msgid "Error while handling pidfile: %s" msgstr "Pidfile을 처리하는 중에 오류 발생: %s" #, python-format msgid "Error while importing BGP speaker driver %s" msgstr "BGP 스피커 드라이버 %s을(를) 가져오는 중 오류 발생" msgid "Error while processing VIF ports" msgstr "VIF 포트를 처리하는 중에 오류 발생" #, python-format msgid "Error while writing HA state for %s" msgstr "%s에대해 HA 상태를 쓰는 중 오류 발생" #, python-format msgid "Error, unable to destroy IPset: %s" msgstr "오류, IPSet을 영구 삭제할 수 없음: %s" #, python-format msgid "Error, unable to remove iptables rule for IPset: %s" msgstr "오류, IPset에서 iptables 규칙을 제거할 수 없음: %s" #, python-format msgid "" "Exceeded maximum binding levels attempting to bind port %(port)s on host " "%(host)s" msgstr "" "호스트 %(host)s에서 포트 %(port)s을(를) 바인드하는 중에 최대 바인딩 레벨을 초" "과함" #, python-format msgid "Exception auto-deleting port %s" msgstr "포트 %s 자동 삭제 중 예외 발생" #, python-format msgid "Exception auto-deleting subnet %s" msgstr "서브넷 %s 자동 삭제 중 예외 발생" #, python-format msgid "Exception deleting fixed_ip from port %s" msgstr "포트 %s에서 fixed_ip 삭제 중 예외 발생" msgid "Exception during stale dhcp device cleanup" msgstr "시간이 경과된 dhcp 장치 정리 중 예외 발생" msgid "Exception encountered during network rescheduling" msgstr "네트워크 재스케줄링 중에 예외 발생" msgid "Exception encountered during router rescheduling." msgstr "라우터 재스케줄링 중에 예외가 발생했습니다." msgid "Exception loading extension" msgstr "확장을 로드하는 중에 예외 발생" msgid "Exception occurs when timer stops" msgstr "타이머가 중지할 때 예외가 발생합니다. " msgid "Exception occurs when waiting for timer" msgstr "타이머가 대기 중일 때 예외가 발생합니다. " msgid "Exiting agent as programmed in check_child_processes_actions" msgstr "check_child_processes_actions에 프로그래밍된 에이전트 종료" #, python-format msgid "" "Exiting agent because of a malfunction with the %(service)s process " "identified by uuid %(uuid)s" msgstr "" "UUID %(uuid)s(으)로 식별된 %(service)s 프로세스 오동작으로 인해 에이전트 종료" #, python-format msgid "Expected port %s not found" msgstr "예상 포트 %s을(를) 찾을 수 없음" #, python-format msgid "Extension driver '%(name)s' failed in %(method)s" msgstr "확장 드라이버 '%(name)s'이(가) %(method)s에서 실패" #, python-format msgid "Extension path '%s' doesn't exist!" msgstr "확장 경로 '%s'이(가) 존재하지 않습니다!" #, python-format msgid "FWaaS RPC failure in %(func_name)s for fw: %(fwid)s" msgstr "FW %(fwid)s의 %(func_name)s에서 FWaaS RPC 실패" #, python-format msgid "FWaaS RPC info call failed for '%s'." msgstr "'%s' FWaaS RPC 정보 호출에 실패했습니다." #, python-format msgid "Failed creating vxlan interface for %(segmentation_id)s" msgstr "%(segmentation_id)s에서 vxlan 인터페이스를 작성하는 데 실패" #, python-format msgid "Failed deleting egress connection state of floatingip %s" msgstr "Floatingip %s의 출구 연결 상태를 삭제하는 데 실패" #, python-format msgid "Failed deleting ingress connection state of floatingip %s" msgstr "Floatingip %s의 진입 연결 상태를 삭제하는 데 실패" #, python-format msgid "Failed execute conntrack command %s" msgstr "Conntrack 명령 %s(으)로 인해 실행 실패" msgid "Failed executing ip command" msgstr "IP 명령 실행 실패" #, python-format msgid "Failed executing ip command: %s" msgstr "IP 명령 실행 실패: %s" msgid "Failed fwaas process services sync" msgstr "FWaaS 프로세스 서비스 동기화에 실패" msgid "Failed on Agent configuration parse. Agent terminated!" msgstr "에이전트 구성 구문 분석에 실패했습니다. 에이전트가 종료되었습니다!" msgid "Failed reporting state!" msgstr "상태 보고 실패!" #, python-format msgid "Failed running %s" msgstr "%s 실행 실패" #, python-format msgid "" "Failed sending gratuitous ARP to %(addr)s on %(iface)s in namespace %(ns)s" msgstr "" "네임스페이스 %(ns)s의 %(iface)s에서 %(addr)s에 불필요한 ARP를 보내는 데 실패" msgid "Failed synchronizing routers" msgstr "라우터 동기화 실패" msgid "Failed synchronizing routers due to RPC error" msgstr "RPC 오류로 인해 라우터를 동기화하는 데 실패" #, python-format msgid "" "Failed to bind port %(port)s on host %(host)s for vnic_type %(vnic_type)s " "using segments %(segments)s" msgstr "" "세그먼트 %(segments)s을(를) 사용하여 vnic_type %(vnic_type)s의 호스트 " "%(host)s에서 포트 %(port)s을(를) 바인드하는 데 실패" #, python-format msgid "Failed to commit binding results for %(port)s after %(max)s tries" msgstr " %(max)s번의 시도 후에 %(port)s의 바인딩 결과를 커밋하는 데 실패" msgid "Failed to communicate with the switch" msgstr "스위치와 통신하는 데 실패" msgid "" "Failed to create OVS patch port. Cannot have tunneling enabled on this " "agent, since this version of OVS does not support tunnels or patch ports. " "Agent terminated!" msgstr "" "OVS 패치 포트를 작성하지 못했습니다. 이 버전 OVS가 터널이나 패치 포트를 지원" "하지 않으므로 이 에이전트에서 터널링을 사용할 수 없습니다. 에이전트가 종료되" "었습니다! " #, python-format msgid "Failed to destroy stale namespace %s" msgstr "시간이 경과된 네임스페이스 %s 영구 삭제 실패" #, python-format msgid "Failed to fetch router information for '%s'" msgstr "'%s'에대한 라우터 정보를 가져오는 데 실패" #, python-format msgid "Failed to get details for device %s" msgstr "장치 %s에대한 세부 사항을 가져오는 데 실패" #, python-format msgid "Failed to get devices for %s" msgstr "%s 장치를 가져오는 데 실패" #, python-format msgid "Failed to get ip addresses for interface: %s." msgstr "인터페이스에대한 ip 주소를 가져오는 데 실패: %s." msgid "Failed to get network interfaces." msgstr "네트워크 인터페이스를 가져오는 데 실패했습니다." #, python-format msgid "Failed to get traffic counters, router: %s" msgstr "트래픽 카운터를 가져오는 데 실패, 라우터: %s" #, python-format msgid "" "Failed to import required modules. Ensure that the python-openvswitch " "package is installed. Error: %s" msgstr "" "필수 모듈을 가져오는 데 실패했습니다. python-openswitch 패키지가 설치되었는" "지 확인하십시오. 오류: %s" #, python-format msgid "Failed to notify nova on events: %s" msgstr "Nova에 이벤트에 대해 알리는 데 실패: %s" msgid "Failed to parse network_vlan_ranges. Service terminated!" msgstr "" "network_vlan_ranges를 구문 분석하지 못했습니다. 서비스가 종료되었습니다!" msgid "Failed to parse supported PCI vendor devices" msgstr "지원되는 PCI 벤더 장치를 구문 분석하는 데 실패" msgid "Failed to parse tunnel_id_ranges. Service terminated!" msgstr "tunnel_id_ranges를 구문 분석하지 못했습니다. 서비스가 종료되었습니다!" msgid "Failed to parse vni_ranges. Service terminated!" msgstr "vni_ranges를 구문 분석하지 못했습니다. 서비스가 종료되었습니다!" #, python-format msgid "Failed to process compatible router '%s'" msgstr "호환 가능한 라우터 '%s'을(를) 처리하는 데 실패" msgid "Failed to process floating IPs." msgstr "Floating IP를 처리하는 데 실패했습니다." #, python-format msgid "Failed to process or handle event for line %s" msgstr "%s 라인의 이벤트를 프로세스하거나 처리할 수 없음" #, python-format msgid "Failed to release segment '%s' because network type is not supported." msgstr "" "네트워크 유형이 지원되지 않으므로 세그먼트 '%s'을(를) 해제하지 못했습니다." #, python-format msgid "Failed to reschedule router %s" msgstr "라우터 %s을(를) 재스케줄링하지 못했습니다." #, python-format msgid "Failed to schedule network %s" msgstr "네트워크 %s을(를) 스케줄하지 못했습니다." #, python-format msgid "Failed to set device %s max rate" msgstr "장치 %s 최대 속도를 설정하는 데 실패" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "%(type)s 터널 포트를 %(ip)s(으)로 설정하지 못함" #, python-format msgid "Failed to update device %s down" msgstr "장치 %s을(를) 작동 해제하도록 업데이트하는 데 실패" #, python-format msgid "Failed to update device %s up" msgstr "장치 %s을(를) 작동하도록 업데이트하는 데 실패" #, python-format msgid "Failed trying to delete namespace: %s" msgstr "네임스페이스 삭제 실패: %s" #, python-format msgid "Failed unplugging interface '%s'" msgstr "'%s' 인터페이스 연결 해제 실패" #, python-format msgid "Firewall Driver Error for %(func_name)s for fw: %(fwid)s" msgstr "FW %(fwid)s의 %(func_name)s에서 방화벽 드라이버 오류 발생" #, python-format msgid "Firewall Driver Error on fw state %(fwmsg)s for fw: %(fwid)s" msgstr "FW %(fwid)s의 fw 상태 %(fwmsg)s에서 방화벽 드라이버 오류 발생" msgid "Fork failed" msgstr "포크 실패" #, python-format msgid "IP allocation failed on external system for %s" msgstr "%s 외부 시스템에서 IP 할당 실패" #, python-format msgid "IP deallocation failed on external system for %s" msgstr "%s 외부 시스템에서 IP 할당 해제 실패" #, python-format msgid "IPAM subnet referenced to Neutron subnet %s does not exist" msgstr "Neutron 서브넷 %s을(를) 참조한 IPAM 서브넷이 없습니다." #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "IPTablesManager.apply가 다음 iptables 규칙 세트를 적용하지 못함:\n" "%s" #, python-format msgid "" "IPv6 protocol requires a minimum MTU of %(min_mtu)s, while the configured " "value is %(current_mtu)s" msgstr "" "IPv6 프로토콜에는 최소 %(min_mtu)s MTU가 필요한 반면 구성된 값은 " "%(current_mtu)s입니다." msgid "" "ImportError exception occurred while loading the external DNS service driver" msgstr "외부 DNS 서비스 드라이버를 로드하는 중에 ImportError 예외 발생" #, python-format msgid "Initializing port %s that was already initialized." msgstr "이미 초기화된 %s 포트를 초기화 중입니다." #, python-format msgid "" "Interface %(intf)s for physical network %(net)s does not exist. Agent " "terminated!" msgstr "" "실제 네트워크 %(net)s에 인터페이스 %(intf)s이(가) 없습니다. 에이전트가 종료되" "었습니다!" msgid "Interface monitor is not active" msgstr "인터페이스 모니터가 활성이 아님" msgid "Internal error" msgstr "내부 오류" #, python-format msgid "" "Invalid VXLAN Group: %(group)s, must be an address or network (in CIDR " "notation) in a multicast range of the same address family as local_ip: %(ip)s" msgstr "" "올바르지 않은 VXLAN 그룹: %(group)s이(가) local_ip: %(ip)s과(와) 동일한 주소" "군의 멀티캐스트 범위에 있는 주소 또는 네트워크(CIDR 표기법)여야 합니다." #, python-format msgid "Invalid tunnel type specified: %s" msgstr "지정된 터널 유형이 올바르지 않음: %s" #, python-format msgid "InvalidContentType: %s" msgstr "InvalidContentType: %s" #, python-format msgid "" "L2 agent could not get DVR MAC address at startup due to RPC error. It " "happens when the server does not support this RPC API. Detailed message: %s" msgstr "" "RPC 오류로 인해 시작 시 L2 에이전트에서 DVR MAC 주소를 가져올 수 없습니다. 서" "버에서 이 RPC API를 지원하지 않을 때 이 문제가 발생합니다. 자세한 메시지: %s" #, python-format msgid "Linux bridge %s delete failed" msgstr "Linux 브릿지 %s 삭제 실패" #, python-format msgid "MAC generation error after %s attempts" msgstr "%s번의 시도 후에 MAC 생성 오류" #, python-format msgid "MalformedRequestBody: %s" msgstr "MalformedRequestBody: %s" msgid "" "Manager class must inherit from CommonAgentManagerBase to ensure CommonAgent " "works properly." msgstr "" "CommonAgent가 제대로 작동하도록 CommonAgentManagerBase에서 관리자 클래스를 상" "속해야 합니다." #, python-format msgid "Mechanism driver %s failed in bind_port" msgstr "메커니즘 드라이버 %s이(가) bind_port에서 실패" #, python-format msgid "Mechanism driver '%(name)s' failed in %(method)s" msgstr "메커니즘 드라이버 '%(name)s'이(가) %(method)s에서 실패함" #, python-format msgid "" "Message received from the host: %(host)s during the registration of " "%(agent_name)s has a timestamp: %(agent_time)s. This differs from the " "current server timestamp: %(serv_time)s by %(diff)s seconds, which is more " "than the threshold agent downtime: %(threshold)s." msgstr "" "%(agent_name)s 등록 중에 호스트: %(host)s에서 받은 메시지에 timestamp: " "%(agent_time)s이(가) 있습니다. 이 timestamp는 현재 서버 timestamp: " "%(serv_time)s과(와) %(diff)s초만큼 차이가 납니다. 이 시간은 임계값 에이전트 " "작동 중단 시간: %(threshold)s보다 큽니다." msgid "Missing subnet/agent_gateway_port" msgstr "서브넷/agent_gateway_port 누락" #, python-format msgid "Model class %s does not have a tenant_id attribute" msgstr "모델 클래스 %s에 tenant_id 속성이 없음" #, python-format msgid "" "Multiple auto-allocated networks detected for tenant %(tenant)s. Attempting " "clean up for network %(network)s and router %(router)s" msgstr "" "Tenant %(tenant)s에 대해 자동으로 할당된 여러 네트워크를 발견했습니다. 네트워" "크 %(network)s 나 라우터 %(router)s을(를) 정리합니다." #, python-format msgid "" "Multiple external default networks detected. Network %s is true 'default'." msgstr "" "여러 외부 기본 네트워크를 발견했습니다. 네트워크 %s이(가) 진정한 '기본값'입니" "다." #, python-format msgid "Multiple ports have port_id starting with %s" msgstr "다중 포트가 %s로 시작하는 port_id를 가지고 있습니다" #, python-format msgid "Network %s info call failed." msgstr "네트워크 %s 정보 호출에 실패했습니다." #, python-format msgid "Network %s is not available." msgstr "네트워크 %s을(를) 사용할 수 없습니다." #, python-format msgid "" "No FloatingIP agent gateway port returned from server for 'network-id': %s" msgstr "" "'network-id'의 서버에서 FloatingIP 에이전트 게이트웨이 포트가 리턴되지 않음: " "%s" #, python-format msgid "No Host supplied to bind DVR Port %s" msgstr "DVR 포트 %s을(를) 바인드하기 위해 제공된 호스트가 없음" #, python-format msgid "No bridge or interface mappings for physical network %s" msgstr "실제 네트워크 %s에서 브릿지나 인터페이스 맵핑이 없음" msgid "No default pools available" msgstr "기본 풀을 사용할 수 없음" msgid "No known API applications configured." msgstr "알려진 API 애플리케이션이 구성되지 않았습니다. " #, python-format msgid "No local VLAN available for net-id=%s" msgstr "net-id=%s에 대해 사용 가능한 로컬 VLAN이 없음" msgid "No plugin for BGP routing registered" msgstr "BGP 라우팅에 플러그인이 등록되지 않음" msgid "No plugin for L3 routing registered to handle router scheduling" msgstr "" "라우터 재스케줄링을 처리하기 위한 L3 라우팅의 플러그인이 등록되지 않았습니다." #, python-format msgid "" "No plugin for L3 routing registered. Cannot notify agents with the message %s" msgstr "" "L3 라우팅에 대한 플러그인이 등록되지 않았습니다. %s 메시지를 에이전트에 알릴 " "수 없음" msgid "No tunnel_ip specified, cannot delete tunnels" msgstr "tunnel_ip가 지정되지 않음, 터널을 삭제할 수 없음" msgid "No tunnel_type specified, cannot create tunnels" msgstr "tunnel_type이 지정되지 않음, 터널을 작성할 수 없음" msgid "No tunnel_type specified, cannot delete tunnels" msgstr "tunnel_type이 지정되지 않음, 터널을 삭제할 수 없음" #, python-format msgid "No type driver for external network_type: %s. Service terminated!" msgstr "외부 network_type의 드라이버 형식이 없음: %s. 서비스를 중단했습니다!" #, python-format msgid "No type driver for tenant network_type: %s. Service terminated!" msgstr "" "network_type 임대자에 대한 드라이버 형식이 없습니다: %s. 서비스를 중단했습니" "다!" msgid "No valid Segmentation ID to perform UCAST test." msgstr "UCAST 테스트를 수행하는 데 올바른 구분 ID가 없습니다." #, python-format msgid "Not enough candidates, a HA router needs at least %s agents" msgstr "후보가 없음, HA 라우터에는 최소 %s개의 에이전트가 필요함" msgid "" "Nova notifications are enabled, but novaclient is not installed. Either " "disable nova notifications or install python-novaclient." msgstr "" "Nova 알림이 사용되지만 novaclient가 설치되지 않았습니다. Nova 알림을 사용하" "지 않거나 python-novaclient를 설치하십시오." #, python-format msgid "OVS flows could not be applied on bridge %s" msgstr "브릿지 %s에서 OVS 플로우를 적용할 수 없음" #, python-format msgid "PCI slot %(pci_slot)s has no mapping to Embedded Switch; skipping" msgstr "PCI 슬롯 %(pci_slot)s에 임베드된 스위치에 대한 맵핑이 없음. 건너뛰기" #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "bridge_mappings 구문 분석 실패: %s." #, python-format msgid "Parsing bridge_mappings failed: %s. Agent terminated!" msgstr "bridge_mappings 구문 분석 실패: %s. 에이전트가 종료되었습니다!" #, python-format msgid "Parsing physical_interface_mappings failed: %s." msgstr "physical_interface_mappings 구문 분석 실패: %s." #, python-format msgid "Parsing physical_interface_mappings failed: %s. Agent terminated!" msgstr "" "physical_interface_mappings 구문 분석 실패: %s. 에이전트가 종료되었습니다!" #, python-format msgid "Pidfile %s already exist. Daemon already running?" msgstr "Pidfile %s이(가) 이미 존재합니다. 디먼이 이미 실행 중입니까?" #, python-format msgid "Policy check error while calling %s!" msgstr "%s 호출 중 정책 확인 오류!" #, python-format msgid "Process [%(cmd)s] dies due to the error: %(err)s" msgstr "다음 오류로 인해 [%(cmd)s] 프로세스가 종료됨: %(err)s" msgid "" "RPC Callback class must inherit from CommonAgentManagerRpcCallBackBase to " "ensure CommonAgent works properly." msgstr "" "CommonAgent가 제대로 작동하도록 CommonAgentManagerRpcCallBackBase에서 RPC " "Callback 클래스를 상속해야 합니다." #, python-format msgid "Removing incompatible router '%s'" msgstr "호환되지 않는 라우터 '%s' 제거" msgid "RuntimeError in obtaining namespace list for namespace cleanup." msgstr "" "네임스페이스 정리를 위한 네임스페이스 목록을 가져오는 중 RuntimeError 발생" #, python-format msgid "Serialized profile DB value '%(value)s' for port %(port)s is invalid" msgstr "" "%(port)s 포트의 직렬화된 프로파일 DB 값 '%(value)s'이(가) 올바르지 않음" #, python-format msgid "" "Serialized vif_details DB value '%(value)s' for port %(port)s is invalid" msgstr "" "포트 %(port)s의 직렬화된 vif_details DB 값 '%(value)s'이(가) 올바르지 않음" #, python-format msgid "" "Server failed to return info for routers in required time even with min " "chunk size: %s. It might be under very high load or just inoperable" msgstr "" "청크 크기가 최소여도 서버가 필수 시간 내에 라우터의 정보를 리턴하지 못함: " "%s. 로드가 매우 많거나 서버가 작동하지 않을 수 있습니다." #, python-format msgid "" "Server failed to return info for routers in required time, decreasing chunk " "size to: %s" msgstr "" "서버에서 필수 시간 내에 라우터의 정보를 리턴하지 못하여, 청크 크기가 %s(으)" "로 감소됨" msgid "Switch connection timeout" msgstr "스위치 연결 제한시간 초과" #, python-format msgid "The SNAT namespace %s does not exist for the router." msgstr "라우터에서 SNAT 네임스페이스 %s이(가) 없습니다." #, python-format msgid "The external network bridge '%s' does not exist" msgstr "외부 네트워크 브릿지 '%s'이(가) 존재하지 않음" #, python-format msgid "" "The installed version of dibbler-client is too old. Please update to at " "least version %s." msgstr "" "설치된 dibbler-client 버전이 너무 오래되었습니다. 최소 %s 버전으로 업데이트하" "십시오." #, python-format msgid "" "The installed version of dnsmasq is too old. Please update to at least " "version %s." msgstr "" "설치된 dnsmasq 버전이 너무 오래되었습니다. 최소 %s 버전으로 업데이트하십시오." msgid "" "The installed version of keepalived does not support IPv6. Please update to " "at least version 1.2.10 for IPv6 support." msgstr "" "활성 유지된(keepalived) 설치 버전에서 IPv6을 지원하지 않습니다. IPv6 지원을 " "위해 최소 1.2.10 버전으로 업데이트하십시오." msgid "The resource could not be found." msgstr "자원을 찾을 수 없습니다. " msgid "" "The user that is executing neutron does not have permissions to read the " "namespaces. Enable the use_helper_for_ns_read configuration option." msgstr "" "eutron을 실행하는 사용자는 네임스페이스를 읽을 권한이 없습니다. " "use_helper_for_ns_read 구성 옵션을 사용하십시오." #, python-format msgid "Timed out retrieving ofport on port %s." msgstr "포트 %s에서 포트를 검색하는 중에 제한시간이 초과되었습니다." #, python-format msgid "" "Tunneling can't be enabled with invalid local_ip '%s'. IP couldn't be found " "on this host's interfaces." msgstr "" "올바르지 않은 local_ip '%s'(으)로 터널링을 사용할 수 없습니다. 이 호스트 인터" "페이스에서 IP를 찾을 수 없습니다." #, python-format msgid "" "Tunneling cannot be enabled without the local_ip bound to an interface on " "the host. Please configure local_ip %s on the host interface to be used for " "tunneling and restart the agent." msgstr "" "호스트의 인터페이스에 바인드된 local_ip가 없으면 터널링을 사용할 수 없습니" "다. 터널링에 사용한 호스트 인터페이스에서 local_ip %s을(를) 구성하고 에이전트" "를 다시 시작하십시오." #, python-format msgid "" "Type driver '%(new_driver)s' ignored because type driver '%(old_driver)s' is " "already registered for type '%(type)s'" msgstr "" " '%(type)s'에 대한 '%(old_driver)s'이(가) 이미 등록되어 있어 " "'%(new_driver)s' 형식 드라이버를 무시했습니다" #, python-format msgid "Unable to %(action)s dhcp for %(net_id)s." msgstr "%(net_id)s의 %(action)s dhcp를 사용할 수 없습니다." #, python-format msgid "Unable to add %(interface)s to %(bridge_name)s! Exception: %(e)s" msgstr "%(interface)s을(를) %(bridge_name)s에 추가할 수 없습니다! 예외: %(e)s" #, python-format msgid "Unable to add vxlan interface for network %s" msgstr "네트워크 %s에서 vxlan 인터페이스를 추가할 수 없음" #, python-format msgid "" "Unable to auto allocate topology for tenant %s because of router errors." msgstr "라우터 오류로 인해 tenant %s의 토폴로지를 자동으로 할당할 수 없습니다." #, python-format msgid "" "Unable to auto allocate topology for tenant %s due to missing requirements, " "e.g. default or shared subnetpools" msgstr "" "요구 사항이 누락되어 tenant %s 토폴로지를 자동으로 할당할 수 없습니다(예: 기" "본 또는 공유 subnetpool)" #, python-format msgid "Unable to convert value in %s" msgstr "%s의 값을 변환할 수 없음" #, python-format msgid "" "Unable to create VLAN interface for VLAN ID %s because it is in use by " "another interface." msgstr "" "다른 인터페이스에서 사용 중이므로 VLAN ID %s에대한 VLAN인터페이스를 작성할 " "수 없습니다." #, python-format msgid "" "Unable to create VXLAN interface for VNI %s because it is in use by another " "interface." msgstr "" "다른 인터페이스에서 사용 중이므로 VNI %s에대하한 VXLAN인터페이스를 작성할 수 " "없습니다." #, python-format msgid "Unable to execute %(cmd)s. Exception: %(exception)s" msgstr "%(cmd)s을(를) 실행할 수 없습니다. 예외: %(exception)s" #, python-format msgid "Unable to find agent %s." msgstr "에이전트 %s을(를) 찾을 수 없습니다." msgid "" "Unable to find default external network for deployment, please create/assign " "one to allow auto-allocation to work correctly." msgstr "" "배포에 사용할 기본 외부 네트워크를 찾을 수 없습니다. 자동 할당이 올바르게 작" "동할 수 있도록 하나를 작성/할당하십시오." #, python-format msgid "Unable to generate mac address after %s attempts" msgstr "%s 시도 후 MAC 주소를 생성할 수 없음" #, python-format msgid "Unable to get port details for %s" msgstr "%s에대한 포트 세부 사항을 가져올 수 없음" #, python-format msgid "Unable to listen on %(host)s:%(port)s" msgstr "%(host)s:%(port)s에서 listen 할 수 없음" msgid "Unable to obtain MAC address for unique ID. Agent terminated!" msgstr "고유 ID에 대한 MAC 주소를 얻을 수 없습니다. 에이전트가 종료됩니다!" #, python-format msgid "Unable to parse route \"%s\"" msgstr "경로 \"%s\"을(를) 구문 분석할 수 없음" #, python-format msgid "Unable to plug DHCP port for network %s. Releasing port." msgstr "네트워크 %s의 DHCP 포트 플러그를 연결할 수 없습니다. 포트 해제 중." #, python-format msgid "Unable to process HA router %s without HA port" msgstr "HA 포트가 없는 HA 라우터 %s을(를) 처리할 수 없음" #, python-format msgid "" "Unable to process extensions (%s) because the configured plugins do not " "satisfy their requirements. Some features will not work as expected." msgstr "" "구성된 플러그인이 요구 사항을 만족하지 않으므로 확장(%s)을 처리할 수 없습니" "다. 일부 기능이 제대로 작동하지 않습니다." msgid "Unable to sync BGP speaker state." msgstr "BGP 스피커 상태를 동기화할 수 없습니다." #, python-format msgid "Unable to sync network state on deleted network %s" msgstr "삭제된 네트워크 %s에서 네트워크 상태를 동기화할 수 없음" msgid "Unable to sync network state." msgstr "네트워크 상태를 동기화할 수 없습니다. " #, python-format msgid "Unable to undo add for %(resource)s %(id)s" msgstr "%(resource)s %(id)s에 대한 추가를 실행 취소할 수 없음" msgid "Unexpected error." msgstr "예기치 않은 오류가 발생했습니다. " #, python-format msgid "" "Unexpected exception occurred while removing network %(net)s from agent " "%(agent)s" msgstr "" "에이전트 %(agent)s에서 네트워크 %(net)s을(를) 제거하는 중에 예상치 못한 예외 " "발생" msgid "Unexpected exception occurred." msgstr "예상치 못한 예외가 발생했습니다." #, python-format msgid "Unexpected exception while checking supported feature via command: %s" msgstr "명령을 통해 지원되는 기능을 확인하는 중에 예상치 못한 예외 발생: %s" msgid "Unexpected exception while checking supported ip link command" msgstr "지원되는 ip 링크 명령을 확인하는 중에 예상치 못한 예외 발생" #, python-format msgid "Unknown network_type %(network_type)s for network %(network_id)s." msgstr "네트워크 %(network_id)s에서 알 수 없는 network_type %(network_type)s." msgid "Unrecoverable error: please check log for details." msgstr "복구할 수 없는 오류: 세부 사항은 로그를 확인하십시오." #, python-format msgid "" "Will not send event %(method)s for network %(net_id)s: no agent available. " "Payload: %(payload)s" msgstr "" "네트워크 %(net_id)s 이벤트 %(method)s을(를) 보내지 않음: 사용할 수 있는 에이" "전트가 없습니다. 페이로드: %(payload)s" #, python-format msgid "_bind_port_if_needed failed, deleting port '%s'" msgstr "_bind_port_if_needed 삭제, 포트 '%s' 삭제" #, python-format msgid "_bind_port_if_needed failed. Deleting all ports from create bulk '%s'" msgstr "_bind_port_if_needed 실패, 작성 벌크 '%s'에서 모든 포트 삭제" msgid "done with wait" msgstr "지연된 후 완료" #, python-format msgid "" "mechanism_manager.create_%(res)s_postcommit failed for %(res)s: " "'%(failed_id)s'. Deleting %(res)ss %(resource_ids)s" msgstr "" "%(res)s의 mechanism_manager.create_%(res)s_postcommit에 실패: " "'%(failed_id)s'. %(res)ss %(resource_ids)s 삭제" #, python-format msgid "" "mechanism_manager.create_network_postcommit failed, deleting network '%s'" msgstr "mechanism_manager.create_network_postcommit에 실패, 네트워크 '%s' 삭제" #, python-format msgid "mechanism_manager.create_port_postcommit failed, deleting port '%s'" msgstr "mechanism_manager.create_port_postcommit 실패, 포트 '%s' 삭제" #, python-format msgid "mechanism_manager.create_subnet_postcommit failed, deleting subnet '%s'" msgstr "mechanism_manager.create_subnet_postcommit 실패, 서브넷 '%s' 삭제" msgid "mechanism_manager.delete_network_postcommit failed" msgstr "mechanism_manager.delete_network_postcommit 실패" #, python-format msgid "mechanism_manager.delete_port_postcommit failed for port %s" msgstr "포트 %s의 mechanism_manager.delete_port_postcommit 실패" msgid "mechanism_manager.delete_subnet_postcommit failed" msgstr "mechanism_manager.delete_subnet_postcommit 실패" #, python-format msgid "mechanism_manager.update_port_postcommit failed for port %s" msgstr "포트 %s의 mechanism_manager.update_port_postcommit 실패" #, python-format msgid "ofctl request %(request)s error %(error)s" msgstr "Ofctl 요청 %(request)s 오류 %(error)s" #, python-format msgid "ofctl request %(request)s timed out" msgstr "Ofctl 요청 %(request)s 제한시간 초과" #, python-format msgid "tunnel_type %s not supported by agent" msgstr "에이전트에서 지원되지 않는 tunnel_type %s" neutron-8.4.0/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-warning.po0000664000567000056710000005351213044372760026371 0ustar jenkinsjenkins00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # Sungjin Kang , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron 8.1.3.dev11\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-06-22 18:13+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-07 07:06+0000\n" "Last-Translator: SeYeon Lee \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "%(agent_type)s agent %(agent_id)s is not active" msgstr "%(agent_type)s 에이전트 %(agent_id)s이(가) 활성이 아님" #, python-format msgid "" "%(port_num)d router ports found on the metadata access network. Only the " "port %(port_id)s, for router %(router_id)s will be considered" msgstr "" "메타데이터 액세스 네트워크에서 %(port_num)d 라우터 포트를 찾았습니다. " "%(router_id)s 라우터의 %(port_id)s 포트만 고려됩니다. " #, python-format msgid "%(type)s tunnel %(id)s not found" msgstr "%(type)s 터널 %(id)s을(를) 찾을 수 없음" #, python-format msgid "%s does not support QoS; no rule types available" msgstr "%s에서 QoS를 지원하지 않습니다. 사용 가능한 규칙 유형이 없습니다." #, python-format msgid "%s is already registered" msgstr "%s이(가) 이미 등록됨" msgid "A concurrent port creation has occurred" msgstr "동시 포트 작성이 발생함" #, python-format msgid "" "Action %(action)s for network %(net_id)s could not complete successfully: " "%(reason)s" msgstr "" "네트워크 %(net_id)s의 작업 %(action)s을 성공적으로 완료할 수 없음: %(reason)s" #, python-format msgid "Action %s not supported" msgstr "작업 %s이(가) 지원되지 않음" #, python-format msgid "" "Agent healthcheck: found %(count)s dead agents out of %(total)s:\n" "%(data)s" msgstr "" "에이전트 상태 확인: %(total)s 중 %(count)s개의 에이전트가 작동하지 않음:\n" "%(data)s" #, python-format msgid "Attempted to get traffic counters of chain %s which does not exist" msgstr "존재하지 않는 %s 체인의 트래픽 카운터를 가져오려고 시도" #, python-format msgid "Attempting to bind with dead agent: %s" msgstr "작동하지 않는 에이전트와 바인드하려고 시도: %s" #, python-format msgid "BGP Peer %s has been deleted." msgstr "BGP 피어 %s이(가) 삭제되었습니다." #, python-format msgid "BGP Speaker %s has been deleted." msgstr "BGP 스피커 %s이(가) 삭제되었습니다." #, python-format msgid "" "BGP speaker %s may have been deleted and its resources may have already been " "disposed." msgstr "BGP 스피커 %s이(가) 삭제되었으며 해당 자원이 이미 삭제되었습니다." #, python-format msgid "BgpDrAgent %s is down" msgstr "BgpDrAgent %s이(가) 작동 중지됨" #, python-format msgid "Cannot find vf index for pci slot %s" msgstr "PCI 슬롯 %s에대한 vf 인덱스를 찾을 수 없음" #, python-format msgid "Cannot find vfs %(vfs)s in device %(dev_name)s" msgstr "장치 %(dev_name)s에서 vfs %(vfs)s을(를) 찾을 수 없음" msgid "Cannot schedule BgpSpeaker to DrAgent. Reason: No scheduler registered." msgstr "" "DrAgent에 대해 BgpSpeaker를 스케줄링할 수 없습니다. 이유: 스케줄러가 등록되" "지 않았습니다." #, python-format msgid "Clearing cache of registered ports, retries to resync were > %s" msgstr "등록된 포트의 캐시 삭제, 재동기화 시도 수가 %s보다 큼" #, python-format msgid "Could not expand segment %s" msgstr "%s 세그먼트를 확장할 수 없음" #, python-format msgid "DHCP agent %s is not active" msgstr "DHCP 에이전트 %s이(가) 활성 상태가 아님" msgid "DVR functionality requires a server upgrade." msgstr "DVR 기능을 사용하려면 서버를 업그레이드해야 합니다." #, python-format msgid "" "DVR: Unable to retrieve subnet information for subnet_id %s. The subnet or " "the gateway may have already been deleted" msgstr "" "DVR: subnet_id %s의 서브넷 정보를 검색할 수 없습니다. 서브넷이나 게이트웨이" "가 이미 삭제되었을 수 있습니다." #, python-format msgid "Deleting flow %s" msgstr "플로우 %s 삭제" #, python-format msgid "Deleting flow with cookie 0x%(cookie)x" msgstr "쿠키가 0x%(cookie)x인 플로우 삭제" #, python-format msgid "Device %(dev)s failed for %(times)s times and won't be retried anymore" msgstr "%(dev)s 장치가 %(times)s번 실패했으므로 더 이상 재시도하지 않음" #, python-format msgid "" "Device %(device)s requested by agent %(agent_id)s on network %(network_id)s " "not bound, vif_type: %(vif_type)s" msgstr "" "네트워크 %(network_id)s에서 에이전트 %(agent_id)s이(가) 요청한 장치 " "%(device)s이(가) 바인드되지 않음, vif_type: %(vif_type)s" #, python-format msgid "" "Device %s does not exist so ARP entry cannot be updated, will cache " "information to be applied later when the device exists" msgstr "" "장치 %s이(가) 없으므로 ARP 항목을 업데이트할 수 없습니다. 장치가 있으면 나중" "에 적용될 정보를 캐시합니다." #, python-format msgid "Device %s does not support state change" msgstr "장치 %s에서 상태 변경을 지원하지 않음" #, python-format msgid "Device %s not defined on plugin or binding failed" msgstr "%s 장치가 플러그인에서 정의되지 않았거나 바인딩에 실패" #, python-format msgid "" "Dictionary %(dict_name)s for agent %(agent_type)s on host %(host)s is " "invalid." msgstr "" "호스트 %(host)s에 있는 에이전트 %(agent_type)s 사전 %(dict_name)s이(가) 올바" "르지 않습니다." #, python-format msgid "Did not find expected name \"%(ext_name)s\" in %(file)s" msgstr "%(file)s에서 예상된 이름 \"%(ext_name)s\"을(를) 찾지 못했음" msgid "Driver configuration doesn't match with enable_security_group" msgstr "드라이버 구성이 enable_security_group과 일치하지 않음" #, python-format msgid "" "Duplicate iptables %(thing)s detected. This may indicate a bug in the the " "iptables %(thing)s generation code. Line: %(line)s" msgstr "" "중복 iptables %(thing)s이(가) 발견되었습니다. iptables %(thing)s 생성 코드에 " "버그가 있음을 나타낼 수 있습니다. 행: %(line)s" #, python-format msgid "Endpoint with ip %s already exists" msgstr "IP가 %s인 엔드포인트가 이미 있음" #, python-format msgid "Extension %s not supported by any of loaded plugins" msgstr "%s 확장이 로드된 어떤 플러그인에서도 지원되지 않음" #, python-format msgid "Extension file %(f)s wasn't loaded due to %(exception)s" msgstr "%(exception)s(으)로 인해 %(f)s 확장 파일이 로드되지 않았음" #, python-format msgid "Failed to bind port %(port)s on host %(host)s at level %(lvl)s" msgstr "" "레벨 %(lvl)s에서 호스트 %(host)s의 포트 %(port)s을(를) 바인드하는 데 실패" #, python-format msgid "Failed to delete namespace %s" msgstr "%s 네임스페이스 삭제 실패" #, python-format msgid "" "Failed to notify L3 agent on host %(host)s about added router. Attempt " "%(attempt)d out of %(max_attempts)d" msgstr "" "호스트 %(host)s에서 L3 에이전트에 추가된 라우터에 대해 알리는 데 실패했습니" "다. %(max_attempts)d 중 %(attempt)d번 시도했습니다." #, python-format msgid "Failed to set device %s state" msgstr "장치 %s 상태를 설정하는 데 실패" #, python-format msgid "Failed to set spoofcheck for device %s" msgstr "장치 %s의 위조 검사를 설정하지 못함" #, python-format msgid "Failed trying to delete interface: %s" msgstr "인터페이스 삭제 실패: %s" #, python-format msgid "Failed trying to delete namespace: %s" msgstr "네임스페이스 삭제 실패: %s" #, python-format msgid "Found failed openvswitch port: %s" msgstr "실패한 openvswitch 포트 발견: %s" #, python-format msgid "Found not yet ready openvswitch port: %s" msgstr "아직 준비되지 않은 openvswitch 포트 발견: %s" #, python-format msgid "Info for router %s was not found. Performing router cleanup" msgstr "라우터 %s 정보를 찾지 못했습니다. 라우터 정리 수행" msgid "Invalid Interface ID, will lead to incorrect tap device name" msgstr "" "올바르지 않은 인터페이스 ID로 인해 올바르지 않은 테이프 디바이스 이름이 만들" "어짐" msgid "Invalid Network ID, will lead to incorrect bridge name" msgstr "올바르지 않은 네트워크 ID로 인해 올바르지 않은 브릿지 이름이 생성됨" #, python-format msgid "Invalid Segmentation ID: %s, will lead to incorrect vxlan device name" msgstr "" "올바르지 구분 ID %s(으)로 인해 올바르지 않은 vxlan 디바이스 이름이 생성됨" msgid "Invalid VLAN ID, will lead to incorrect subinterface name" msgstr "" "올바르지 않은 VLAN ID로 인해 올바르지 않은 하위 인터페이스 이름이 만들어짐" #, python-format msgid "Invalid remote IP: %s" msgstr "올바르지 않은 원격 IP: %s" #, python-format msgid "" "Invalid value for pagination_max_limit: %s. It should be an integer greater " "to 0" msgstr "" "pagination_max_limit의 올바르지 않은 값: %s. 이는 0보다 큰 정수여야 합니다. " #, python-format msgid "" "L2 agent could not get DVR MAC address from server. Retrying. Detailed " "message: %s" msgstr "" "L2 에이전트가 서버에서 DVR MAC 주소를 가져올 수 없습니다. 재시도 중입니다. 자" "세한 메시지: %s" #, python-format msgid "Loaded plugins do not implement extension %s interface" msgstr "로드된 플러그인이 확장 %s 인터페이스를 구현하지 않음" #, python-format msgid "" "Network %s may have been deleted and its resources may have already been " "disposed." msgstr "네트워크 %s이(가) 삭제되었으며 해당 자원이 이미 삭제되었습니다." msgid "" "Neutron server does not support state report. State report for this agent " "will be disabled." msgstr "" "Neutron 서버에서 상태 보고서를 지원하지 않습니다. 이 에이전트의 상태 보고서" "를 사용할 수 없습니다." msgid "No DHCP agents available, skipping rescheduling" msgstr "DHCP 에이전트를 사용할 수 없음, 재스케줄링 건너뛰기" #, python-format msgid "No L3 agents can host the router %s" msgstr "L3 에이전트가 라우터 %s을(를) 호스트할 수 없음" #, python-format msgid "No MTU configured for port %s" msgstr "포트 %s에 구성된 MTU가 없음" msgid "No active L3 agents" msgstr "활성 L3 에이전트가 없음" #, python-format msgid "No controller found for: %s - returning response code 404" msgstr "%s 컨트롤러를 찾을 수 없음 - 응답 코드 404 반환됨" #, python-format msgid "No flat network found on physical network %s" msgstr "실제 네트워크 %s에서 플랫 네트워크를 찾을 수 없음" msgid "No more DHCP agents" msgstr "추가 DHCP 에이전트가 없음" #, python-format msgid "" "No plugin found for resource:%s. API calls may not be correctly dispatched" msgstr "" "%s 자원 플러그인을 찾을 수 없습니다. API 호출이 올바르게 디스패치되지 않을 " "수 있습니다." #, python-format msgid "No plugin found for: %s" msgstr "%s 플러그인을 찾을 수 없음" #, python-format msgid "No routers compatible with L3 agent configuration on host %s" msgstr "호스트 %s에서 L3 에이전트 구성과 호환 가능한 라우터가 없음" #, python-format msgid "No sqlalchemy event for resource %s found" msgstr "자원 %s sqlalchemy 이벤트를 찾을 수 없음" #, python-format msgid "No valid gateway port on subnet %s is found for IPv6 RA" msgstr "서브넷 %s에 IPv6 RA의 올바른 게이트웨이 포트가 없음" #, python-format msgid "No vlan_id %(vlan_id)s found on physical network %(physical_network)s" msgstr "" "실제 네트워크 %(physical_network)s에서 vlan_id %(vlan_id)s을(를) 찾을 수 없음" #, python-format msgid "Nova event: %s returned with failed status" msgstr "Nova 이벤트: %s이(가) 실패한 상태로 반환됨" msgid "" "OVS is dead. OVSNeutronAgent will keep running and checking OVS status " "periodically." msgstr "" "OVS가 작동하지 않습니다. OVSNeutronAgent가 계속 실행되며 정기적으로 OVS 상태" "를 확인합니다." msgid "OVS is restarted. OVSNeutronAgent will reset bridges and recover ports." msgstr "" "OVS가 다시 시작됩니다. OVSNeutronAgent가 브릿지를 재설정하고 포트를 복구합니" "다." #, python-format msgid "" "Only %(active)d of %(total)d DHCP agents associated with network " "'%(net_id)s' are marked as active, so notifications may be sent to inactive " "agents." msgstr "" "네트워크 '%(net_id)s'와 연관된 %(total)d DHCP 에이전트 중 %(active)d만 활성으" "로 표시되므로 비활성 에이전트에 알림이 전송될 수 있습니다." #, python-format msgid "" "Option \"%(option)s\" must be supported by command \"%(command)s\" to enable " "%(mode)s mode" msgstr "" "%(mode)s 모드를 사용하려면 명령 \"%(command)s\"에서 옵션 \"%(option)s\"을" "(를) 지원해야 함" #, python-format msgid "" "Port %(pid)s on network %(network)s not bound, no agent registered on host " "%(host)s" msgstr "" "네트워크 %(network)s의 포트 %(pid)s이(가) 바인드되지 않음, 호스트 %(host)s에 " "등록된 에이전트가 없음" #, python-format msgid "Port %s not found during update" msgstr "업데이트 중에 포트 %s을(를) 찾을 수 없음" msgid "Port ID not set! Nova will not be notified of port status change." msgstr "" "포트 ID를 설정하지 않았습니다! Nova에 포트 상태 변경을 알리지 않습니다." #, python-format msgid "Received %(resource)s %(policy_id)s without context" msgstr "컨텍스트 없이 %(resource)s %(policy_id)s을(를) 수신" #, python-format msgid "Refusing to bind port %(pid)s to dead agent: %(agent)s" msgstr "포트 %(pid)s을(를) 작동하지 않는 에이전트 %(agent)s에 바인드 거부" #, python-format msgid "" "Removing network %(network)s from agent %(agent)s because the agent did not " "report to the server in the last %(dead_time)s seconds." msgstr "" "마지막 %(dead_time)s초에 에이전트가 서버에 보고하지 않았으므로 에이전트 " "%(agent)s에서 네트워크 %(network)s 제거" #, python-format msgid "" "Rescheduling router %(router)s from agent %(agent)s because the agent did " "not report to the server in the last %(dead_time)s seconds." msgstr "" "마지막 %(dead_time)s초에 에이전트가 서버에 보고하지 않았으므로 에이전트 " "%(agent)s에서 라우터 %(router)s 재스케줄링." #, python-format msgid "Respawning %(service)s for uuid %(uuid)s" msgstr "uuid %(uuid)s %(service)s 다시 파생" #, python-format msgid "Router %s was not found. Skipping agent notification." msgstr "라우터 %s을(를) 찾을 수 없습니다. 에이전트 알림을 건너뜁니다." msgid "" "Security group agent binding currently not set. This should be set by the " "end of the init process." msgstr "" "보안 그룹 에이전트 바인딩이 현재 설정되지 않았습니다. init 프로세스 종료 시 " "설정해야 합니다." #, python-format msgid "" "The configured driver %(driver)s has been moved, automatically using " "%(new_driver)s instead. Please update your config files, as this automatic " "fixup will be removed in a future release." msgstr "" "구성된 드라이버 %(driver)s이(가) 이동되었습니다. 대신 %(new_driver)s을(를) 자" "동으로 사용합니다. 이 자동 수정은 향후 릴리스에서 제거되므로 구성 파일을 업데" "이트하십시오." msgid "" "The input changed_since must be in the following format: YYYY-MM-DDTHH:MM:SS" msgstr "input changed_since 형식은 YYYY-MM-DDTHH:MM:SS이어야 함" msgid "" "The quota driver neutron.quota.ConfDriver is deprecated as of Liberty. " "neutron.db.quota.driver.DbQuotaDriver should be used in its place" msgstr "" "Liberty에서 quota driver neutron.quota.ConfDriver는 더 이상 사용되지 않습니" "다. 대신 neutron.db.quota.driver.DbQuotaDriver를 사용해야 합니다." msgid "" "The remote metadata server responded with Forbidden. This response usually " "occurs when shared secrets do not match." msgstr "" "원격 메타데이터 서버가 Forbidden으로 응답했습니다. 이 응답은 대개 공유 본인확" "인정보가 일치하지 않을 때 발생합니다. " msgid "" "The user that is executing neutron can read the namespaces without using the " "root_helper. Disable the use_helper_for_ns_read option to avoid a " "performance impact." msgstr "" "Neutron을 실행하는 사용자가 root_helper를 사용하지 않고 네임스페이스를 읽을 " "수 있습니다. 성능에 영향을 미치지 않도록 use_helper_for_ns_read 옵션을 사용하" "지 않게 설정하십시오." #, python-format msgid "" "Time since last %s agent reschedule check has exceeded the interval between " "checks. Waiting before check to allow agents to send a heartbeat in case " "there was a clock adjustment." msgstr "" "마지막 %s 에이전트 재스케줄링을 확인한 이후 시간이 확인 사이의 간격을 초과했" "습니다. 확인 전에 대기하면 클럭 조정이 있는 경우 에이전트가 하트비트를 보낼 " "수 있습니다." #, python-format msgid "" "Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r %(top)r" msgstr "없는 규칙을 제거하려 했음: %(chain)r %(rule)r %(wrap)r %(top)r " msgid "Tunnel synchronization requires a server upgrade." msgstr "터널 동기화를 사용하려면 서버를 업그레이드해야 합니다." #, python-format msgid "" "Unable to %(action)s dhcp for %(net_id)s: there is a conflict with its " "current state; please check that the network and/or its subnet(s) still " "exist." msgstr "" "%(net_id)s dhcp에 대해 %(action)s을(를) 수행할 수 없음: 현재 상태와 충돌됩니" "다. 네트워크 및/또는 서브넷이 여전히 있는지 확인하십시오." #, python-format msgid "Unable to configure IP address for floating IP: %s" msgstr "Floating IP에대한 IP 주소를 구성할 수 없음: %s" #, python-format msgid "Unable to find data type descriptor for attribute %s" msgstr "속성 %s 데이터 유형 descriptor를 찾을 수 없음" #, python-format msgid "Unable to retrieve active L2 agent on host %s" msgstr "호스트 %s에서 활성 L2 에이전트를 검색할 수 없음" #, python-format msgid "" "Unable to schedule network %s: no agents available; will retry on subsequent " "port and subnet creation events." msgstr "" "네트워크 %s을(를) 스케줄링할 수 없음: 에이전트를 사용할 수 없으므로, 후속 포" "트와 서브넷 작성 이벤트에서 재시도합니다." #, python-format msgid "Unsupported QoS rule type for %(rule_id)s: %(rule_type)s; skipping" msgstr "%(rule_id)s 지원되지 않는 QoS 규칙 유형: %(rule_type)s. 건너뜁니다." #, python-format msgid "Updating lease expiration is now deprecated. Issued from host %s." msgstr "" "임대 만기 업데이트는 더 이상 사용되지 않습니다. 호스트 %s에서 실행됩니다." #, python-format msgid "" "VF with PCI slot %(pci_slot)s is already assigned; skipping reset maximum " "rate" msgstr "" "PCI 슬롯 %(pci_slot)s이(가) 있는 VF가 이미 할당되어 있습니다. 최대 속도 재설" "정 건너뛰기" #, python-format msgid "" "VIF port: %s has no ofport configured, and might not be able to transmit" msgstr "VIF 포트 %s에 ofport가 구성되지 않았으므로 전송할 수 없음" msgid "" "VXLAN muticast group(s) must be provided in vxlan_group option to enable " "VXLAN MCAST mode" msgstr "" "VXLAN MCAST 모드를 사용하려면 vxlan_group 옵션에 VXLAN 멀티캐스트 그룹을 제공" "해야 함" #, python-format msgid "" "You are using the deprecated firewall driver: %(deprecated)s.Use the " "recommended driver %(new)s instead." msgstr "" "더 이상 사용되지 않는 방화벽 드라이버를 사용 중임: %(deprecated)s. 권장 드라" "이버 %(new)s을(를) 대신 사용하십시오." #, python-format msgid "device pci mismatch: %(device_mac)s - %(pci_slot)s" msgstr "장치 PCI 불일치: %(device_mac)s - %(pci_slot)s" #, python-format msgid "failed to parse vf link show line %(line)s: for %(device)s" msgstr "%(device)s의 vf 링크 표시 행 %(line)s 구문 분석 실패" #, python-format msgid "" "l3-agent cannot check service plugins enabled at the neutron server when " "startup due to RPC error. It happens when the server does not support this " "RPC API. If the error is UnsupportedVersion you can ignore this warning. " "Detail message: %s" msgstr "" "RPC 오류로 인해 시작 시 l3 에이전트가 neutron 서버에서 사용된 서비스 플러그인" "을 확인할 수 없습니다. 서버에서 이 RPC API를 지원하지 않을 때 이 문제가 발생" "합니다. 오류가 UnsupportedVersion인 경우 이 경고를 무시할 수 있습니다. 자세" "한 메시지: %s" #, python-format msgid "ofport: %(ofport)s for VIF: %(vif)s is not a positive integer" msgstr "VIF: %(vif)s fport: %(ofport)s이(가) 양수가 아님" #, python-format msgid "port_id to device with MAC %s not found" msgstr "MAC가 %s인 장치의 port_id를 찾을 수 없음" msgid "" "security_group_info_for_devices rpc call not supported by the server, " "falling back to old security_group_rules_for_devices which scales worse." msgstr "" "서버에서 security_group_info_for_devices rpc 호출을 지원하지 않으므로, 확장 " "기능이 더욱 취약한 이전 security_group_rules_for_devices로 장애 복구됩니다." #, python-format msgid "unable to modify mac_address of ACTIVE port %s" msgstr "활성 포트 %s에대한 mac_address를 수정할 수 없음" neutron-8.4.0/neutron/locale/zh_CN/0000775000567000056710000000000013044373210020305 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/zh_CN/LC_MESSAGES/0000775000567000056710000000000013044373210022072 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/locale/zh_CN/LC_MESSAGES/neutron.po0000664000567000056710000045347713044372760024161 0ustar jenkinsjenkins00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # Allerson Yao, 2015 # Zhong Chaoliang , 2013 # lyndon zhang , 2014 # MA QIANG , 2014 # MA QIANG , 2014 # Zhong Chaoliang , 2013 # 汪军 , 2015 # 陈展奇 , 2013-2014 # 颜海峰 , 2014 # Eric Lei , 2016. #zanata # Jimmy Li , 2016. #zanata # Linda , 2016. #zanata # maoshuai , 2016. #zanata # yan , 2016. #zanata # zzxwill , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron 8.2.1.dev52\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2016-09-01 18:10+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-09-07 09:06+0000\n" "Last-Translator: Eric Lei \n" "Language: zh-CN\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Chinese (China)\n" #, python-format msgid "" "\n" "Command: %(cmd)s\n" "Exit code: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" msgstr "" "\n" "命令:%(cmd)s\n" "退出代码:%(code)s\n" "标准输入:%(stdin)s\n" "标准输出:%(stdout)s\n" "标准错误:%(stderr)s" #, python-format msgid "" "%(branch)s HEAD file does not match migration timeline head, expected: " "%(head)s" msgstr "%(branch)s HEAD 文件与迁移时间线头不匹配,需要:%(head)s" #, python-format msgid "%(driver)s: Internal driver error." msgstr "%(driver)s:内部驱动错误。" #, python-format msgid "%(id)s is not a valid %(type)s identifier" msgstr "%(id)s 是无效 %(type)s 标识" #, python-format msgid "" "%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' " "and '%(desc)s'" msgstr "" "%(invalid_dirs)s 对于 sort_dirs 是无效值,有效值是“%(asc)s”和“%(desc)s”" #, python-format msgid "%(key)s prohibited for %(tunnel)s provider network" msgstr "对于 %(tunnel)s 供应商网络,已禁止 %(key)s" #, python-format msgid "" "%(method)s called with network settings %(current)s (original settings " "%(original)s) and network segments %(segments)s" msgstr "" "已使用网络设置 %(current)s(原始设置 %(original)s)和网络段 %(segments)s 调" "用 %(method)s" #, python-format msgid "" "%(method)s called with port settings %(current)s (original settings " "%(original)s) host %(host)s (original host %(original_host)s) vif type " "%(vif_type)s (original vif type %(original_vif_type)s) vif details " "%(vif_details)s (original vif details %(original_vif_details)s) binding " "levels %(levels)s (original binding levels %(original_levels)s) on network " "%(network)s with segments to bind %(segments_to_bind)s" msgstr "" "在带有要绑定的分段 %(segments_to_bind)s 的网络 %(network)s 上使用以下设置调用" "了 %(method)s:端口设置 %(current)s(原始设置 %(original)s),主机 " "%(host)s(原始主机 %(original_host)s),vif 类型 %(vif_type)s(原始 vif 类型 " "%(original_vif_type)s)vif 详细信息 %(vif_details)s(原始 vif 详细信息 " "%(original_vif_details)s),绑定级别 %(levels)s(原始绑定级别 " "%(original_levels)s)" #, python-format msgid "" "%(method)s called with subnet settings %(current)s (original settings " "%(original)s)" msgstr "已使用子网设置 %(current)s 调用 %(method)s(原始设置 %(original)s)" #, python-format msgid "%(method)s failed." msgstr "%(method)s 失败。" #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "%(name)s“%(addr)s”与 ip_version“%(ip_version)s”不匹配" #, python-format msgid "%(param)s must be in %(range)s range." msgstr "%(param)s 必须在 %(range)s 范围内。" #, python-format msgid "%s cannot be called while in offline mode" msgstr "在 %s 处于脱机方式时,无法对其进行调用" #, python-format msgid "%s is invalid attribute for sort_key" msgstr "%s 对于 sort_key 是无效属性" #, python-format msgid "%s is invalid attribute for sort_keys" msgstr "%s 对于 sort_keys 是无效属性" #, python-format msgid "%s is not a valid VLAN tag" msgstr "%s 是无效 VLAN 标记" #, python-format msgid "%s must be specified" msgstr "必须指定 %s" #, python-format msgid "%s must implement get_port_from_device or get_ports_from_devices." msgstr "%s 必须实现 get_port_from_device 或 get_ports_from_devices。" #, python-format msgid "%s prohibited for VLAN provider network" msgstr "VLAN 供应商网络中禁止 %s" #, python-format msgid "%s prohibited for flat provider network" msgstr "在平面供应商网络中禁止 %s" #, python-format msgid "%s prohibited for local provider network" msgstr "在本地供应商网络中禁止 %s" #, python-format msgid "" "'%(data)s' contains '%(length)s' characters. Adding a domain name will cause " "it to exceed the maximum length of a FQDN of '%(max_len)s'" msgstr "" "“%(data)s”包含“%(length)s”个字符。添加域名将导致它超出 FQDN 的最大长" "度“%(max_len)s”" #, python-format msgid "" "'%(data)s' contains '%(length)s' characters. Adding a sub-domain will cause " "it to exceed the maximum length of a FQDN of '%(max_len)s'" msgstr "" "“%(data)s”包含“%(length)s”个字符。添加子域将导致它超出 FQDN 的最大长" "度“%(max_len)s”" #, python-format msgid "'%(data)s' exceeds maximum length of %(max_len)s" msgstr "“%(data)s”超过最大长度 %(max_len)s" #, python-format msgid "'%(data)s' is not an accepted IP address, '%(ip)s' is recommended" msgstr "“%(data)s”并非已接受 IP 地址。建议使用“%(ip)s”" #, python-format msgid "'%(data)s' is not in %(valid_values)s" msgstr "“%(data)s”不在 %(valid_values)s 中" #, python-format msgid "'%(data)s' is too large - must be no larger than '%(limit)d'" msgstr "“%(data)s”太大 – 不能大于“%(limit)d”" #, python-format msgid "'%(data)s' is too small - must be at least '%(limit)d'" msgstr "“%(data)s”太小 - 必须至少为“%(limit)d”" #, python-format msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended" msgstr "“%(data)s”是不可识别的 IP子网 CIDR,建议使用“%(cidr)s”" #, python-format msgid "'%(data)s' not a valid PQDN or FQDN. Reason: %(reason)s" msgstr "“%(data)s”是无效 PQDN 或 FQDN。原因:%(reason)s" #, python-format msgid "'%(host)s' is not a valid nameserver. %(msg)s" msgstr "“%(host)s”是无效名称服务器。%(msg)s" #, python-format msgid "'%s' Blank strings are not permitted" msgstr "“%s”不允许空白字符串" #, python-format msgid "'%s' cannot be converted to boolean" msgstr "无法将“%s”转换为布尔值" #, python-format msgid "'%s' cannot be converted to lowercase string" msgstr "无法将“%s”转换为小写字符串" #, python-format msgid "'%s' contains whitespace" msgstr "“%s”包含空格" #, python-format msgid "'%s' exceeds the 255 character FQDN limit" msgstr "“%s”超过了 255 个字符这一 FQDN 限制" #, python-format msgid "'%s' is a FQDN. It should be a relative domain name" msgstr "“%s”为 FQDN。它应该是相对域名" #, python-format msgid "'%s' is not a FQDN" msgstr "“%s”并非 FQDN" #, python-format msgid "'%s' is not a dictionary" msgstr "“%s”不是字典" #, python-format msgid "'%s' is not a list" msgstr "“%s”不是列表" #, python-format msgid "'%s' is not a valid IP address" msgstr "“%s”是无效 IP 地址" #, python-format msgid "'%s' is not a valid IP subnet" msgstr "“%s”是无效 IP 子网" #, python-format msgid "'%s' is not a valid MAC address" msgstr "“%s”是无效 MAC 地址" #, python-format msgid "'%s' is not a valid RBAC object type" msgstr "“%s”是无效 RBAC 对象类型" #, python-format msgid "'%s' is not a valid UUID" msgstr "“%s”是无效 UUID" #, python-format msgid "'%s' is not a valid boolean value" msgstr "“%s”是无效布尔值" #, python-format msgid "'%s' is not a valid input" msgstr "“%s”是无效输入" #, python-format msgid "'%s' is not a valid string" msgstr "“%s”是无效字符串" #, python-format msgid "'%s' is not an integer" msgstr "“%s”不是整数" #, python-format msgid "'%s' is not an integer or uuid" msgstr "“%s”不是整数或 uuid" #, python-format msgid "'%s' is not of the form =[value]" msgstr "“%s”没有采用 =[value] 格式" #, python-format msgid "'%s' is not supported for filtering" msgstr "“%s”不支持进行过滤" #, python-format msgid "'%s' must be a non negative decimal." msgstr "“%s”必须为非负十进制数。" #, python-format msgid "'%s' should be non-negative" msgstr "“%s”应该为非负" msgid "'.' searches are not implemented" msgstr "未实现“.”搜索" #, python-format msgid "'module' object has no attribute '%s'" msgstr "“module”对象没有属性“%s”" msgid "'port_max' is smaller than 'port_min'" msgstr "“port_max”小于“port_min”" msgid "" "(Deprecated. Use '--subproject neutron-SERVICE' instead.) The advanced " "service to execute the command against." msgstr "" "(不推荐使用。请改为使用“--subproject neutron-SERVICE”。)要对其执行该命令的" "高级服务。" msgid "0 is not allowed as CIDR prefix length" msgstr "CIDR 前缀长度不允许为 0" msgid "" "32-bit BGP identifier, typically an IPv4 address owned by the system running " "the BGP DrAgent." msgstr "32 位 BGP 标识,通常是运行 BGP DrAgent 的系统所拥有的 IPv4 地址。" msgid "A QoS driver must be specified" msgstr "必须指定 QoS 驱动程序" msgid "A cidr must be specified in the absence of a subnet pool" msgstr "在缺少子网池的情况下,必须指定 cidr" msgid "" "A decimal value as Vendor's Registered Private Enterprise Number as required " "by RFC3315 DUID-EN." msgstr "作为 RFC3315 DUID-EN 所需的供应商注册私营企业号的十进制值。" #, python-format msgid "A default external network already exists: %(net_id)s." msgstr "缺省外部网络已存在:%(net_id)s。" msgid "" "A default subnetpool for this IP family has already been set. Only one " "default may exist per IP family" msgstr "已对此 IP 系列设置缺省子网池。对于每个 IP 系列,只能有一个缺省子网池。" msgid "A metering driver must be specified" msgstr "必须指定测量驱动程序" msgid "A password must be supplied when using auth_type md5." msgstr "使用 auth_type md5 时,必须提供密码。" msgid "API for retrieving service providers for Neutron advanced services" msgstr "用于为 Neutron 高级服务检索服务提供程序的 API" msgid "Aborting periodic_sync_routers_task due to an error." msgstr "正在异常中止 periodic_sync_routers_task,因为发生了错误。" msgid "Access to this resource was denied." msgstr "访问该资源被拒绝。" msgid "Action to be executed when a child process dies" msgstr "当子进程终止时要执行的操作" msgid "" "Add comments to iptables rules. Set to false to disallow the addition of " "comments to generated iptables rules that describe each rule's purpose. " "System must support the iptables comments module for addition of comments." msgstr "" "向 iptable 规则添加注释。设置为 false 以禁止向描述每个规则用途的已生成 " "iptable 添加注释。系统必须支持 iptable 注释模块以添加注释。" msgid "Address not present on interface" msgstr "接口上没有地址" #, python-format msgid "Address scope %(address_scope_id)s could not be found" msgstr "找不到地址范围 %(address_scope_id)s" msgid "" "Address to listen on for OpenFlow connections. Used only for 'native' driver." msgstr "对于 OpenFlow 连接,要侦听的地址。仅用于“本机”驱动程序。" msgid "Adds external network attribute to network resource." msgstr "请对网络资源添加外部网络属性。" msgid "Adds test attributes to core resources." msgstr "将测试属性添加至核心资源。" #, python-format msgid "Agent %(id)s could not be found" msgstr "找不到代理 %(id)s" #, python-format msgid "Agent %(id)s is not a L3 Agent or has been disabled" msgstr "代理 %(id)s 不是 L3 代理或已禁用" #, python-format msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled" msgstr "代理 %(id)s 是无效 DHCP 代理或已禁用" msgid "Agent has just been revived" msgstr "代理程序已苏醒" msgid "" "Agent starts with admin_state_up=False when enable_new_agents=False. In the " "case, user's resources will not be scheduled automatically to the agent " "until admin changes admin_state_up to True." msgstr "" "当 enable_new_agents=False 时,代理将使用 admin_state_up=False 启动。在这种情" "况下,将不会自动对代理调度用户的资源,直到管理员将 admin_state_up 更改为 " "True 为止。" #, python-format msgid "Agent updated: %(payload)s" msgstr "代理程序已更新:%(payload)s" #, python-format msgid "" "Agent with agent_type=%(agent_type)s and host=%(host)s could not be found" msgstr "找不到符合以下条件的代理:agent_type=%(agent_type)s 且 host=%(host)s" msgid "Allow auto scheduling networks to DHCP agent." msgstr "允许自动对 DHCP 代理调度网络。" msgid "Allow auto scheduling of routers to L3 agent." msgstr "允许自动对 L3 代理安排路由器。" msgid "" "Allow overlapping IP support in Neutron. Attention: the following parameter " "MUST be set to False if Neutron is being used in conjunction with Nova " "security groups." msgstr "" "允许在 Neutron 中重叠 IP 支持。注意:如果将 Neutron 与 Nova 安全组配合使用," "那么以下参数必须设置为 False。" msgid "Allow running metadata proxy." msgstr "允许运行元数据代理" msgid "Allow sending resource operation notification to DHCP agent" msgstr "允许将资源操作通知发送至 DHCP 代理" msgid "Allow the creation of PTR records" msgstr "允许创建 PTR 记录" msgid "Allow the usage of the bulk API" msgstr "允许使用成批 API" msgid "Allow the usage of the pagination" msgstr "允许使用分页" msgid "Allow the usage of the sorting" msgstr "允许使用排序" msgid "Allow to perform insecure SSL (https) requests to nova metadata" msgstr "允许对 nova 元数据执行非安全 SSL (HTTPS) 请求" msgid "Allowed address pairs must be a list." msgstr "允许的地址对必须是列表。" msgid "AllowedAddressPair must contain ip_address" msgstr "AllowedAddressPair 必须包含 ip_address" msgid "" "Allows for serving metadata requests coming from a dedicated metadata access " "network whose CIDR is 169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send metadata:1 request. In " "this case DHCP Option 121 will not be injected in VMs, as they will be able " "to reach 169.254.169.254 through a router. This option requires " "enable_isolated_metadata = True." msgstr "" "允许处理来自专用元数据访问网络的元数据请求,此网络的 CIDR 为 " "169.254.169.254/16(或更大前缀),并且连接至 VM 从其发送元数据(1 个请求)的 " "Neutron 路由器。此情况下,不会在 VM 中插入 DHCP 选项 121,因为它们能够通过路" "由器到达 169.254.169.254。此选项要求 enable_isolated_metadata = True。" #, python-format msgid "" "Already hosting BGP Speaker for local_as=%(current_as)d with router_id=" "%(rtid)s." msgstr "" "已托管对应 local_as=%(current_as)d 和 router_id=%(rtid)s 的 BGP Speaker。" #, python-format msgid "" "Already hosting maximum number of BGP Speakers. Allowed scheduled count=" "%(count)d" msgstr "托管的 BGP Speaker 已达到最大数目。允许安排数为 %(count)d" msgid "An RBAC policy already exists with those values." msgstr "已存在带有这些值的 RBAC 策略。" msgid "An identifier must be specified when updating a subnet" msgstr "更新子网时,必须指定标识" msgid "An interface driver must be specified" msgstr "必须指定接口驱动程序" msgid "" "An ordered list of extension driver entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. For example: extension_drivers = " "port_security,qos" msgstr "" "要从 neutron.ml2.extension_drivers 名称空间装入的扩展驱动程序入口点的有序列" "表。例如:extension_drivers = port_security,qos" msgid "" "An ordered list of networking mechanism driver entrypoints to be loaded from " "the neutron.ml2.mechanism_drivers namespace." msgstr "" "要从 neutron.ml2.mechanism_drivers 名称空间装入的联网机制驱动程序入口点的有序" "列表。" msgid "An unexpected internal error occurred." msgstr "发生了意外内部错误。" msgid "An unknown error has occurred. Please try your request again." msgstr "发生未知错误。请再次尝试您的请求。" msgid "Async process didn't respawn" msgstr "同步进程未重新衍生" #, python-format msgid "Attribute '%s' not allowed in POST" msgstr "在 POST 中,不允许属性“%s”" #, python-format msgid "Authentication type not supported. Requested type=%(auth_type)s." msgstr "不受支持的认证类型。请求的类型为 %(auth_type)s。" msgid "Authorization URL for connecting to designate in admin context" msgstr "管理员上下文中要指定的连接授权 URL" msgid "Automatically remove networks from offline DHCP agents." msgstr "自动从脱机 DHCP 代理移除网络。" msgid "" "Automatically reschedule routers from offline L3 agents to online L3 agents." msgstr "将路由器从脱机 L3 代理自动重新安排至联机 L3 代理程序。" msgid "Availability zone of this node" msgstr "此节点的可用区域" #, python-format msgid "AvailabilityZone %(availability_zone)s could not be found." msgstr "找不到 AvailabilityZone %(availability_zone)s。" msgid "Available commands" msgstr "可用的命令" #, python-format msgid "" "BGP Peer %(peer_ip)s for remote_as=%(remote_as)s, running for BGP Speaker " "%(speaker_as)d not added yet." msgstr "" "未添加对应 remote_as=%(remote_as)s 且正针对 BGP Speaker %(speaker_as)d 运行" "的 BGP Peer %(peer_ip)s。" #, python-format msgid "" "BGP Speaker %(bgp_speaker_id)s is already configured to peer with a BGP Peer " "at %(peer_ip)s, it cannot peer with BGP Peer %(bgp_peer_id)s." msgstr "" "BGP Speaker %(bgp_speaker_id)s 已配置为与 BGP Peer(位于 %(peer_ip)s)同级。" "它不能与 BGP Peer %(bgp_peer_id)s 同级。" #, python-format msgid "" "BGP Speaker for local_as=%(local_as)s with router_id=%(rtid)s not added yet." msgstr "" "尚未添加对应 local_as=%(local_as)s 和 router_id=%(rtid)s 的 BGP Speaker。" #, python-format msgid "" "BGP peer %(bgp_peer_id)s is not associated with BGP speaker " "%(bgp_speaker_id)s." msgstr "BGP Peer %(bgp_peer_id)s 未与 BGP Speaker %(bgp_speaker_id)s 关联。" #, python-format msgid "BGP peer %(bgp_peer_id)s not authenticated." msgstr "未认证 BGP Peer %(bgp_peer_id)s。" #, python-format msgid "BGP peer %(id)s could not be found." msgstr "找不到 BGP Peer %(id)s。" #, python-format msgid "" "BGP speaker %(bgp_speaker_id)s is not hosted by the BgpDrAgent %(agent_id)s." msgstr "BGP Speaker %(bgp_speaker_id)s 未由 BgpDrAgent %(agent_id)s 托管。" #, python-format msgid "BGP speaker %(id)s could not be found." msgstr "找不到 BGP Speaker %(id)s。" msgid "BGP speaker driver class to be instantiated." msgstr "要实例化的 BGP Speaker 驱动程序类。" msgid "Backend does not support VLAN Transparency." msgstr "后端不支持 VLAN 透明。" #, python-format msgid "" "Bad prefix or mac format for generating IPv6 address by EUI-64: %(prefix)s, " "%(mac)s:" msgstr "" "以下前缀或 mac 的格式不正确,无法通过 EUI-64 生成 IPv6 地址:%(prefix)s 和 " "%(mac)s:" #, python-format msgid "Bad prefix type for generate IPv6 address by EUI-64: %s" msgstr "前缀类型不正确,无法通过 EUI-64 生成 IPv6 地址:%s" #, python-format msgid "Base MAC: %s" msgstr "基本 MAC:%s" msgid "" "Base log dir for dnsmasq logging. The log contains DHCP and DNS log " "information and is useful for debugging issues with either DHCP or DNS. If " "this section is null, disable dnsmasq log." msgstr "" "用于 dnsmasq 日志记录的基本日志目录。日志包含 DHCP 和 DNS 日志信息,对于调试 " "DHCP 或 DNS 存在的问题很有用。如果此部分为空,请禁用 dnsmasq 日志。" #, python-format msgid "BgpDrAgent %(agent_id)s is already associated to a BGP speaker." msgstr "BgpDrAgent %(agent_id)s 已与 BGP Speaker 相关联。" #, python-format msgid "BgpDrAgent %(id)s is invalid or has been disabled." msgstr "BgpDrAgent %(id)s 无效或已禁用。" #, python-format msgid "BgpDrAgent updated: %s" msgstr "BgpDrAgent 已更新:%s" msgid "Body contains invalid data" msgstr "主体中包含无效数据" msgid "Both network_id and router_id are None. One must be provided." msgstr "network_id 和 router_id 都为 None。必须提供其中一项。" #, python-format msgid "Bridge %(bridge)s does not exist." msgstr "网桥 %(bridge)s 不存在。" #, python-format msgid "Bridge %s does not exist" msgstr "网桥 %s 不存在" msgid "Bulk operation not supported" msgstr "成批操作不受支持" msgid "CA certificate file to use to verify connecting clients" msgstr "要用于验证连接客户机的 CA 证书文件。" msgid "CIDR to monitor" msgstr "要监视的 CIDR" #, python-format msgid "Callback for %(resource_type)s not found" msgstr "找不到针对 %(resource_type)s 的回调" #, python-format msgid "Callback for %(resource_type)s returned wrong resource type" msgstr "针对 %(resource_type)s 的回调返回了错误的资源类型" #, python-format msgid "Cannot add floating IP to port %s that has no fixed IPv4 addresses" msgstr "无法将浮动 IP 添加至没有固定 IPv4 地址的端口 %s" #, python-format msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip" msgstr "无法将浮动 IP 添加至子网 %s(不具有任何 gateway_ip)上的端口" #, python-format msgid "Cannot add multiple callbacks for %(resource_type)s" msgstr "无法添加针对 %(resource_type)s 的多个回调" #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "无法从 IPv%(pool_ver)s 子网池分配 IPv%(req_ver)s 子网" msgid "Cannot allocate requested subnet from the available set of prefixes" msgstr "无法从可用前缀集分配所请求的子网" #, python-format msgid "" "Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with port " "%(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already has a " "floating IP on external network %(net_id)s." msgstr "" "无法使浮动 IP %(floating_ip_address)s (%(fip_id)s) 与使用固定 IP " "%(fixed_ip)s 的端口 %(port_id)s 关联,因为该固定 IP 已具有外部网络 " "%(net_id)s 上的浮动 IP。" msgid "" "Cannot change HA attribute of active routers. Please set router " "admin_state_up to False prior to upgrade." msgstr "" "不能更改活动路由器的 HA 属性。升级之前,请将路由器 admin_state_up 设置为 " "False。" #, python-format msgid "" "Cannot create floating IP and bind it to %s, since that is not an IPv4 " "address." msgstr "无法创建浮动 IP 并将它绑定至 %s,因为它不是 IPv4 地址。" #, python-format msgid "" "Cannot create floating IP and bind it to Port %s, since that port is owned " "by a different tenant." msgstr "无法创建浮动 IP 并将它绑定至端口 %s,因为该端口由另一租户拥有。" msgid "Cannot create resource for another tenant" msgstr "无法为另一租户创建资源" msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "在设置了 ipv6 属性的情况下,无法禁用 enable_dhcp" #, python-format msgid "Cannot find %(table)s with %(col)s=%(match)s" msgstr "找不到具有 %(col)s=%(match)s 的 %(table)s" #, python-format msgid "Cannot handle subnet of type %(subnet_type)s" msgstr "无法处理类型为 %(subnet_type)s 的子网" msgid "Cannot have multiple IPv4 subnets on router port" msgstr "路由器端口上不能有多个 IPv4 子网" #, python-format msgid "" "Cannot have multiple router ports with the same network id if both contain " "IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s" msgstr "" "不得存在多个具有相同网络标识的路由器端口(如果它们都包含 IPv6 子网)。现有端" "口 %(p)s 具有 IPv6 子网和网络标识 %(nid)s" #, python-format msgid "" "Cannot host distributed router %(router_id)s on legacy L3 agent %(agent_id)s." msgstr "无法在传统 L3 代理程序 %(agent_id)s 上托管分布式路由器 %(router_id)s。" msgid "Cannot match priority on flow deletion or modification" msgstr "无法匹配删除或修改流时的优先级" msgid "Cannot mix IPv4 and IPv6 prefixes in a subnet pool." msgstr "不能在子网池中同时使用 IPv4 前缀和 IPv6 前缀。" msgid "Cannot specify both --service and --subproject." msgstr "无法同时指定 --service 和 --subproject。" msgid "Cannot specify both subnet-id and port-id" msgstr "无法同时指定 subnet-id 和 port-id" msgid "Cannot understand JSON" msgstr "无法理解 JSON" #, python-format msgid "Cannot update read-only attribute %s" msgstr "无法更新只读属性 %s" msgid "" "Cannot upgrade active router to distributed. Please set router " "admin_state_up to False prior to upgrade." msgstr "" "无法将活动路由器升级为分布式路由器。在升级之前,请将路由器 admin_state_up 设" "置为 False。" msgid "Certificate Authority public key (CA cert) file for ssl" msgstr "用于 SSL 的认证中心公用密钥(CA 证书)文件" #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s." msgstr "更改导致以下资源的使用率低于 0:%(unders)s。" msgid "Check ebtables installation" msgstr "检查 ebtables 安装" msgid "Check for ARP header match support" msgstr "检查 ARP 头匹配支持" msgid "Check for ARP responder support" msgstr "检查 ARP 响应程序支持" msgid "Check for ICMPv6 header match support" msgstr "检查 ICMPv6 头匹配支持" msgid "Check for OVS Geneve support" msgstr "检查 OVS Geneve 支持" msgid "Check for OVS vxlan support" msgstr "检查OVS vxlan支持" msgid "Check for VF management support" msgstr "检查 VF 管理支持" msgid "Check for iproute2 vxlan support" msgstr "检查 iproute2 vxlan 支持" msgid "Check for nova notification support" msgstr "检查 nova 通知支持" msgid "Check for patch port support" msgstr "检查补丁端口支持" msgid "Check ip6tables installation" msgstr "检查 ip6tables 安装" msgid "Check ipset installation" msgstr "检查 ipset 安装" msgid "Check keepalived IPv6 support" msgstr "检查 keepalived IPv6 支持" msgid "Check minimal dibbler version" msgstr "检查最低点播器版本" msgid "Check minimal dnsmasq version" msgstr "检查最小 dnsmasq 版本" msgid "Check netns permission settings" msgstr "检查 netns 许可权设置" msgid "Check ovs conntrack support" msgstr "检查 ovs conntrack 支持" msgid "Check ovsdb native interface support" msgstr "检查 ovsdb 本机接口支持" #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of " "subnet %(sub_id)s" msgstr "" "子网 %(subnet_id)s 的 cidr %(subnet_cidr)s 与子网 %(sub_id)s 的 cidr " "%(cidr)s 重叠" msgid "Class not found." msgstr "找不到类。" msgid "Cleanup resources of a specific agent type only." msgstr "仅清除特定代理程序类型的资源。" msgid "Client certificate for nova metadata api server." msgstr "nova 元数据 API 服务器的客户机证书。" msgid "" "Comma-separated list of : tuples, mapping " "network_device to the agent's node-specific list of virtual functions that " "should not be used for virtual networking. vfs_to_exclude is a semicolon-" "separated list of virtual functions to exclude from network_device. The " "network_device in the mapping should appear in the physical_device_mappings " "list." msgstr "" ": 元组的逗号分隔列表,这些元组将 " "network_device 映射至代理程序的特定于节点的不应用于虚拟联网的功能列表。" "vfs_to_exclude 是要从 network_device 中排除的虚拟功能的分号分隔列表。映射中" "的 network_vlan_ranges 应出现在 physical_device_mappings 列表中。" msgid "" "Comma-separated list of : tuples mapping physical " "network names to the agent's node-specific Open vSwitch bridge names to be " "used for flat and VLAN networks. The length of bridge names should be no " "more than 11. Each bridge must exist, and should have a physical network " "interface configured as a port. All physical networks configured on the " "server should have mappings to appropriate bridges on each agent. Note: If " "you remove a bridge from this mapping, make sure to disconnect it from the " "integration bridge as it won't be managed by the agent anymore. Deprecated " "for ofagent." msgstr "" ": 元组的逗号分隔列表,这些元组将物理网络名称映射至" "代理程序的特定于节点的 Open vSwitch 网桥名(将用于平面网络和 VLAN 网络)。网" "桥名称的长度不应超过 11。每个网桥必须存在,并且应具有配置为端口的物理网络接" "口。服务器上配置的所有物理网络在每个代理程序上应具有指向相应网桥的映射。注" "意:如果从此映射中移除网桥,请确保断开它与集成网桥的连接,因为它不再由代理程" "序管理。已不推荐对代理程序使用。" msgid "" "Comma-separated list of : tuples mapping " "physical network names to the agent's node-specific physical network device " "interfaces of SR-IOV physical function to be used for VLAN networks. All " "physical networks listed in network_vlan_ranges on the server should have " "mappings to appropriate interfaces on each agent." msgstr "" " : 元组的逗号分隔列表,这些元组将物理网络名" "称映射至代理程序的 SR-IOV 物理功能的特定于节点的物理网络设备接口(将用于 " "VLAN 网络)。服务器上的 network_vlan_ranges 中列示的所有物理网络在每个代理程" "序上应具有指向相应接口的映射。" msgid "" "Comma-separated list of : tuples " "mapping physical network names to the agent's node-specific physical network " "interfaces to be used for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should have mappings to " "appropriate interfaces on each agent." msgstr "" ": 元组的逗号分隔列表,这些元组将物理网" "络名称映射至代理程序的特定于节点的物理网络接口(将用于平面网络和 VLAN 网" "络)。服务器上的 network_vlan_ranges 中列示的所有物理网络在每个代理程序上应具" "有指向相应接口的映射。" msgid "" "Comma-separated list of : tuples enumerating ranges of GRE " "tunnel IDs that are available for tenant network allocation" msgstr "" "可用于租户网络分配的 GRE 隧道标识的 : 元组枚举范围的逗号分" "隔列表" msgid "" "Comma-separated list of : tuples enumerating ranges of " "Geneve VNI IDs that are available for tenant network allocation" msgstr "" "枚举可用于租户网络分配的 Geneve VNI 标识的范围的: 元组的逗" "号分隔列表" msgid "" "Comma-separated list of : tuples enumerating ranges of " "VXLAN VNI IDs that are available for tenant network allocation" msgstr "" "可用于租户网络分配的 VXLAN VNI 标识的 : 元组枚举范围的逗号" "分隔列表" msgid "" "Comma-separated list of supported PCI vendor devices, as defined by " "vendor_id:product_id according to the PCI ID Repository. Default enables " "support for Intel and Mellanox SR-IOV capable NICs." msgstr "" "根据 PCI 标识存储库,vendor_id:product_id 定义的受支持的 PCI 供应商设备的逗号" "分隔列表。缺省值启用对支持 Intel 和 Mellanox SR-IOV 的 NIC 的支持。" msgid "" "Comma-separated list of the DNS servers which will be used as forwarders." msgstr "将用作转发器的 DNS 服务器的逗号分隔列表。" msgid "Command to execute" msgstr "要执行的命令" msgid "Config file for interface driver (You may also use l3_agent.ini)" msgstr "用于接口驱动程序的配置文件(还可使用 l3_agent.ini)" #, python-format msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" msgstr "CIDR %(cidr)s 具有冲突值 ethertype %(ethertype)s " msgid "" "Controls whether the neutron security group API is enabled in the server. It " "should be false when using no security groups or using the nova security " "group API." msgstr "" "控制是否在服务器中启用 neutron 安全组 API。未使用安全组或使用 nova 安全组 " "API 时,这个配置应该为 false。" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "在尝试 %(time)d 秒之后无法绑定 %(host)s:%(port)s" #, python-format msgid "Could not connect to %s" msgstr "无法连接至 %s" msgid "Could not deserialize data" msgstr "未能对数据进行反序列化" #, python-format msgid "Could not retrieve schema from %(conn)s: %(err)s" msgstr "无法从 %(conn)s 检索模式:%(err)s" #, python-format msgid "" "Current gateway ip %(ip_address)s already in use by port %(port_id)s. Unable " "to update." msgstr "当前网关 IP %(ip_address)s 已由端口 %(port_id)s 使用。无法更新。" msgid "Currently update of HA mode for a DVR/HA router is not supported." msgstr "当前不支持更新 DVR/HA 路由器的 HA 方式。" msgid "Currently update of HA mode for a distributed router is not supported." msgstr "当前不支持更新分布式路由器的 HA 方式。" msgid "" "Currently update of distributed mode for a DVR/HA router is not supported" msgstr "当前不支持更新 DVR/HA 路由器的分布式方式。" msgid "Currently update of distributed mode for an HA router is not supported." msgstr "当前不支持更新 HA 路由器的分布式方式。" msgid "" "Currently updating a router from DVR/HA to non-DVR non-HA is not supported." msgstr "当前不支持将路由器从 DVR/HA 路由器更新为非 DVR 或非 HA 路由器。" msgid "Currently updating a router to DVR/HA is not supported." msgstr "当前不支持将路由器更新为 DVR/HA 路由器。" msgid "" "DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " "lease times." msgstr "DHCP 租赁持续时间(以秒计)。使用 -1 告诉 dnsmasq 使用无限租赁时间。" msgid "" "DVR deployments for VXLAN/GRE/Geneve underlays require L2-pop to be enabled, " "in both the Agent and Server side." msgstr "VXLAN/GRE/Geneve 底层的 DVR 部署需要在代理端和服务器端都启用 L2-pop。" msgid "" "Database engine for which script will be generated when using offline " "migration." msgstr "使用脱机迁移时将对其生成脚本的数据库引擎。" msgid "" "Default IPv4 subnet pool to be used for automatic subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where creation of a subnet is " "being called without a subnet pool ID. If not set then no pool will be used " "unless passed explicitly to the subnet create. If no pool is used, then a " "CIDR must be passed to create a subnet and that subnet will not be allocated " "from any pool; it will be considered part of the tenant's private address " "space. This option is deprecated for removal in the N release." msgstr "" "要用于自动子网 CIDR 分配的缺省 IPv4 子网池。通过 UUID 指定要在没有子网池标识" "的情况下调用创建子网操作时使用的池。如果未设置,那么不会使用任何池(除非已将 " "CIDR 显式传递至创建子网操作)。如果未使用任何池,那么必须传递 CIDR 以创建子" "网,并且系统不会从任何池分配该子网;它将被视为租户的专用地址空间的一部分。此" "选项已不推荐使用,将在 N 发行版中移除。" msgid "" "Default IPv6 subnet pool to be used for automatic subnet CIDR allocation. " "Specifies by UUID the pool to be used in case where creation of a subnet is " "being called without a subnet pool ID. See the description for " "default_ipv4_subnet_pool for more information. This option is deprecated for " "removal in the N release." msgstr "" "要用于自动子网 CIDR 分配的缺省 IPv6 子网池。通过 UUID 指定要在没有子网池标识" "的情况下调用创建子网操作时使用的池。请参阅 default_ipv4_subnet_pool 的描述以" "获取更多信息。此选项已不推荐使用,将在 N 发行版中移除。" msgid "Default driver to use for quota checks" msgstr "存在要用于配额检查的缺省驱动程序" msgid "Default external networks must be shared to everyone." msgstr "缺省外部网络必须共享给每个人。" msgid "" "Default network type for external networks when no provider attributes are " "specified. By default it is None, which means that if provider attributes " "are not specified while creating external networks then they will have the " "same type as tenant networks. Allowed values for external_network_type " "config option depend on the network type values configured in type_drivers " "config option." msgstr "" "在未指定供应商属性时,外部网络的缺省网络类型。缺省情况下为 None,这意味着如果" "在创建外部网络时未指定供应商属性,那么它们将与租户网络具有相同类型。" "external_network_type 配置选项的允许值取决于在 type_drivers 配置选项中所配置" "的网络类型值。" msgid "" "Default number of RBAC entries allowed per tenant. A negative value means " "unlimited." msgstr "每个租户允许的缺省 RBAC 条目数。负值表示无限制。" msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "每个租户允许的缺省资源数。负值表示无限。" msgid "Default security group" msgstr "缺省安全组" msgid "Default security group already exists." msgstr "缺省安全组已存在。" msgid "" "Default value of availability zone hints. The availability zone aware " "schedulers use this when the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a comma separated string. " "This value can be empty. In this case, even if availability_zone_hints for a " "resource is empty, availability zone is considered for high availability " "while scheduling the resource." msgstr "" "可用区域提示的缺省值。如果资源的 availability_zone_hints 为空,那么可用区域感" "知调度程序使用此项。可通过逗号分隔的字符串来指定多个可用区域。此值可为空。在" "此情况下,即使资源的 availability_zone_hints 为空,那么调度该资源时,可用区域" "仍被视为具备高可用性。" msgid "" "Define the default value of enable_snat if not provided in " "external_gateway_info." msgstr "" "如果未提供 enable_snat 的值,请在 external_gateway_info 中定义 enable_snat 的" "缺省值。" msgid "" "Defines providers for advanced services using the format: :" ":[:default]" msgstr "" "会使用以下格式为高级服务定义供应商:::[:default]" msgid "" "Delay within which agent is expected to update existing ports whent it " "restarts" msgstr "延迟时间,当代理重新启动时,在该段时间内,代理应该更新现有端口" msgid "Delete the namespace by removing all devices." msgstr "请通过除去所有设备来删除名称空间。" #, python-format msgid "Deleting port %s" msgstr "正在删除端口 %s" #, python-format msgid "Deployment error: %(reason)s." msgstr "部署错误:%(reason)s。" msgid "Destroy IPsets even if there is an iptables reference." msgstr "即使存在 iptables 引用,也会破坏 IPset。" msgid "Destroy all IPsets." msgstr "破坏所有 IPset。" #, python-format msgid "Device %(dev_name)s in mapping: %(mapping)s not unique" msgstr "映射 %(mapping)s 中的设备 %(dev_name)s 并非唯一" #, python-format msgid "Device '%(device_name)s' does not exist." msgstr "设备“%(device_name)s”不存在。" msgid "Device has no virtual functions" msgstr "设备没有虚拟功能" #, python-format msgid "Device name %(dev_name)s is missing from physical_device_mappings" msgstr "physical_device_mappings 中缺少设备名称 %(dev_name)s" msgid "Device not found" msgstr "找不到设备" #, python-format msgid "" "Distributed Virtual Router Mac Address for host %(host)s does not exist." msgstr "主机 %(host)s 的分布式虚拟路由器 MAC 地址不存在。" #, python-format msgid "Domain %(dns_domain)s not found in the external DNS service" msgstr "在外部 DNS 服务中找不到域 %(dns_domain)s" msgid "Domain to use for building the hostnames" msgstr "要用于构建主机名的域" msgid "" "Domain to use for building the hostnames. This option is deprecated. It has " "been moved to neutron.conf as dns_domain. It will be removed in a future " "release." msgstr "" "要用于构建主机名的域。此选项已不推荐使用。它已作为 dns_domain 移至 neutron." "conf。未来发行版会将其移除。" msgid "Downgrade no longer supported" msgstr "不再支持降级" #, python-format msgid "Driver %s is not unique across providers" msgstr "驱动程序 %s 在提供程序中不唯一" msgid "Driver for external DNS integration." msgstr "外部 DNS 集成的驱动程序。" msgid "Driver for security groups firewall in the L2 agent" msgstr "Driver for security groups firewall in the L2 agent" msgid "Driver to use for scheduling network to DHCP agent" msgstr "要用于对 DHCP 代理调度网络的驱动程序" msgid "Driver to use for scheduling router to a default L3 agent" msgstr "要用于对缺省 L3 代理安排路由器的驱动程序" msgid "" "Driver used for ipv6 prefix delegation. This needs to be an entry point " "defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for " "entry points included with the neutron source." msgstr "" "用于 IPv6 前缀授权的驱动程序。这需要是 neutron.agent.linux.pd_drivers 名称空" "间中定义的入口点。请参阅 setup.cfg 以了解 Neutron 源随附的入口点。" msgid "Driver used for scheduling BGP speakers to BGP DrAgent" msgstr "用于对 BGP DrAgent 调度 BGP Speaker 的驱动程序" msgid "Drivers list to use to send the update notification" msgstr "要用来发送更新通知的驱动程序列表" #, python-format msgid "Duplicate IP address '%s'" msgstr "IP 地址“%s”重复" #, python-format msgid "" "Duplicate L3HARouterAgentPortBinding is created for router(s) %(router)s. " "Database cannot be upgraded. Please, remove all duplicates before upgrading " "the database." msgstr "" "对路由器 %(router)s 创建了重复 L3HARouterAgentPortBinding。无法升级数据库。请" "移除所有重复项,然后升级数据库。" msgid "Duplicate Metering Rule in POST." msgstr "POST 中的测量规则重复。" msgid "Duplicate Security Group Rule in POST." msgstr "POST 中的安全组规则重复。" msgid "Duplicate address detected" msgstr "检测到重复地址。" #, python-format msgid "Duplicate hostroute '%s'" msgstr "主机路由“%s”重复" #, python-format msgid "Duplicate items in the list: '%s'" msgstr "列表“%s”中的项重复" #, python-format msgid "Duplicate nameserver '%s'" msgstr "名称服务器“%s”重复" msgid "Duplicate segment entry in request." msgstr "请求中的分段条目重复。" #, python-format msgid "ERROR: %s" msgstr "错误:%s" msgid "" "ERROR: Unable to find configuration file via the default search paths (~/." "neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" msgstr "" "错误:无法通过缺省搜索路径(~/.neutron/、~/、/etc/neutron/ 和 /etc/)以及“--" "config-file”选项找到配置文件!" msgid "" "Either one of parameter network_id or router_id must be passed to _get_ports " "method." msgstr "参数 network_id 或 router_id 的其中之一必须传递至_get_ports 方法。" msgid "Either subnet_id or port_id must be specified" msgstr "必须指定 subnet_id 或 port_id" msgid "Empty physical network name." msgstr "空的物理网络名。" msgid "Empty subnet pool prefix list." msgstr "子网池前缀列表为空。" msgid "Enable FWaaS" msgstr "请启用 FWaaS" msgid "Enable HA mode for virtual routers." msgstr "对虚拟路由器启用 HA 方式。" msgid "Enable SSL on the API server" msgstr "在 API 服务器上启用 SSL" msgid "" "Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " "plugin using linuxbridge mechanism driver" msgstr "" "请在代理上启用 VXLAN。可在通过使用 linuxbridge 机制驱动程序由 ml2 插件管理代" "理时启用" msgid "" "Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 " "l2population driver. Allows the switch (when supporting an overlay) to " "respond to an ARP request locally without performing a costly ARP broadcast " "into the overlay." msgstr "" "启用本地 ARP 响应程序(如果它受支持)。需要 OVS 2.1 和 ML2 l2population 驱动" "程序。允许交换机(支持重叠部分时)在本地响应 ARP 请求而不必执行成本高昂的 " "ARP 广播到重叠部分中。" msgid "" "Enable local ARP responder which provides local responses instead of " "performing ARP broadcast into the overlay. Enabling local ARP responder is " "not fullycompatible with the allowed-address-pairs extension." msgstr "" "启用本地 ARP 响应程序,这将提供本地响应而不是对重叠部分执行 ARP 广播。启用本" "地 ARP 响应程序并未与 allowed-address-pairs 扩展完全兼容。" msgid "" "Enable services on an agent with admin_state_up False. If this option is " "False, when admin_state_up of an agent is turned False, services on it will " "be disabled. Agents with admin_state_up False are not selected for automatic " "scheduling regardless of this option. But manual scheduling to such agents " "is available if this option is True." msgstr "" "在代理上启用服务使admin_state_up为False。如果此选项为 False,那么当代理的 " "admin_state_up 变为 False 时,将禁用该代理上的服务。无论此选项如何,都不会选" "择 admin_state_up 为 False 的代理进行自动调度。但是,如果此选项为 True,那么" "可以手动调度这样的代理。" msgid "" "Enable suppression of ARP responses that don't match an IP address that " "belongs to the port from which they originate. Note: This prevents the VMs " "attached to this agent from spoofing, it doesn't protect them from other " "devices which have the capability to spoof (e.g. bare metal or VMs attached " "to agents without this flag set to True). Spoofing rules will not be added " "to any ports that have port security disabled. For LinuxBridge, this " "requires ebtables. For OVS, it requires a version that supports matching ARP " "headers. This option will be removed in Newton so the only way to disable " "protection will be via the port security extension." msgstr "" "启用阻止符合以下条件的 APR 响应的功能:这些响应与属于发出这些响应的端口的 IP " "地址不匹配。 注意:这可避免附加至此代理程序的 VM 遭受电子诈骗,它无法保护 VM " "抵御来自能够进行电子诈骗的其他设备(例如,裸机或附加至未将此标记设置为 True " "的代理程序的 VM)的攻击。系统不会将电子诈骗规则添加至已禁用端口安全性的任何端" "口。对于 LinuxBridge,这需要 ebtables。对于 OVS,它需要支持匹配 ARP 头的版" "本。此选项在 Newton 中将被移除,所以禁用保护的唯一方法将是通过端口安全性扩展" "进行。" msgid "" "Enable/Disable log watch by metadata proxy. It should be disabled when " "metadata_proxy_user/group is not allowed to read/write its log file and " "copytruncate logrotate option must be used if logrotate is enabled on " "metadata proxy log files. Option default value is deduced from " "metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent " "effective user id/name." msgstr "" "由元数据代理启用/禁用日志监控。当不允许 metadata_proxy_user/group 读/写日志文" "件时,应当禁用日志监控,如果对元数据代理日志文件启用了 logrotate,那么必须使" "用 copytruncate logrotate 选项。从 metadata_proxy_user 推论出选项缺省值:如" "果 metadata_proxy_user 是代理有效用户标识/名称,那么启用了监控日志。" msgid "" "Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to " "True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable " "environment. Users making subnet creation requests for IPv6 subnets without " "providing a CIDR or subnetpool ID will be given a CIDR via the Prefix " "Delegation mechanism. Note that enabling PD will override the behavior of " "the default IPv6 subnetpool." msgstr "" "对自动子网 CIDR 分配启用 IPv6 前缀代理。设置为 True 将在支持 PD 的环境中对子" "网分配启用 IPv6 前缀代理。如果用户对 IPv6 子网发出创建子网请求但未提供 CIDR " "或子网池标识,那么系统将通过前缀代理机制为该用户提供 CIDR。请注意,启用 PD 将" "覆盖缺省 IPv6 子网池的行为。" msgid "" "Enables the dnsmasq service to provide name resolution for instances via DNS " "resolvers on the host running the DHCP agent. Effectively removes the '--no-" "resolv' option from the dnsmasq process arguments. Adding custom DNS " "resolvers to the 'dnsmasq_dns_servers' option disables this feature." msgstr "" "启用 dnsmasq 服务已在运行 DHCP 代理程序的主机上通过 DNS 解析器提供实例的名称" "解析。实际上会从 dnsmasq 进程自变量中移除“--no-resolv”选项。将定制 DNS 解析器" "添加至“dnsmasq_dns_servers”选项会禁用此功能部件。" msgid "Encountered an empty component." msgstr "遇到空组件。" msgid "End of VLAN range is less than start of VLAN range" msgstr "VLAN 范围结束值比开始值小" msgid "End of tunnel range is less than start of tunnel range" msgstr "隧道范围的结束小于隧道范围的起始" msgid "Enforce using split branches file structure." msgstr "强制使用拆分分支文件结构。" msgid "" "Ensure that configured gateway is on subnet. For IPv6, validate only if " "gateway is not a link local address. Deprecated, to be removed during the " "Newton release, at which point the gateway will not be forced on to subnet." msgstr "" "确保所配置网关为子网。对于 IPv6,仅当网关并非链路本地地址时,才应进行验证。已" "不推荐使用,将在 Newton 发行版中移除,到时该网关不必强制位于子网。" #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "尝试执行该操作时发生错误 %(reason)s。" #, python-format msgid "Error importing FWaaS device driver: %s" msgstr "导入 FWaaS 设备驱动程序时出错:%s" #, python-format msgid "Error parsing dns address %s" msgstr "解析 dns 地址 %s 时出错" #, python-format msgid "Error while reading %s" msgstr "读取 %s 时出错" #, python-format msgid "" "Exceeded %s second limit waiting for address to leave the tentative state." msgstr "等待地址以离开暂定状态时超过 %s 秒限制。" msgid "Exceeded maximum amount of fixed ips per port." msgstr "已超出每个端口的固定 IP 的最大数目。" msgid "Existing prefixes must be a subset of the new prefixes" msgstr "现有前缀必须是新前缀的子集" #, python-format msgid "" "Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" msgstr "" "退出代码:%(returncode)d;标准输入:%(stdin)s;标准输出:%(stdout)s;标准错" "误:%(stderr)s" #, python-format msgid "Extension %(driver)s failed." msgstr "扩展 %(driver)s 失败。" #, python-format msgid "" "Extension driver %(driver)s required for service plugin %(service_plugin)s " "not found." msgstr "找不到服务插件 %(service_plugin)s 所需的扩展驱动程序 %(driver)s。" msgid "" "Extension to use alongside ml2 plugin's l2population mechanism driver. It " "enables the plugin to populate VXLAN forwarding table." msgstr "" "要与 ml2 插件的 l2population 机制驱动程序一起使用的扩展。它使该插件能够填充 " "VXLAN 转发表。" #, python-format msgid "Extension with alias %s does not exist" msgstr "具有别名 %s 的扩展不存在" msgid "Extensions list to use" msgstr "要使用的扩展列表" #, python-format msgid "Extensions not found: %(extensions)s." msgstr "找不到扩展:%(extensions)s。" #, python-format msgid "External DNS driver %(driver)s could not be found." msgstr "找不到外部 DNS 驱动程序 %(driver)s。" #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "外部 IP %s 和网关 IP 相同" #, python-format msgid "" "External network %(external_network_id)s is not reachable from subnet " "%(subnet_id)s. Therefore, cannot associate Port %(port_id)s with a Floating " "IP." msgstr "" "无法从子网 %(subnet_id)s 访问外部网络 %(external_network_id)s。因此,无法使端" "口 %(port_id)s 与浮动 IP 关联。" #, python-format msgid "" "External network %(net_id)s cannot be updated to be made non-external, since " "it has existing gateway ports" msgstr "无法将外部网络 %(net_id)s 更新为非外部网络,因为它包含现有的网关端口" #, python-format msgid "ExtraDhcpOpt %(id)s could not be found" msgstr "找不到 ExtraDhcpOpt %(id)s" msgid "" "FWaaS plugin is configured in the server side, but FWaaS is disabled in L3-" "agent." msgstr "FWaaS 插件是在服务器端配置的,但 FWaaS 在 L3 代理中被禁用。" #, python-format msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." msgstr "重新安排路由器 %(router_id)s 失败:找不到合格 L3 代理。" #, python-format msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." msgstr "将路由器 %(router_id)s 安排至 L3 代理 %(agent_id)s 失败。" #, python-format msgid "Failed to add interfaces: %(ifaces)s" msgstr "添加接口失败: %(ifaces)s" #, python-format msgid "" "Failed to allocate a VRID in the network %(network_id)s for the router " "%(router_id)s after %(max_tries)s tries." msgstr "" "在 %(max_tries)s 次尝试之后,未能在网络 %(network_id)s 中为路由器 " "%(router_id)s 分配 VRID。" #, python-format msgid "Failed to allocate subnet: %(reason)s." msgstr "无法分配子网:%(reason)s。" msgid "" "Failed to associate address scope: subnetpools within an address scope must " "have unique prefixes." msgstr "无法关联地址范围:地址范围中的子网池必须具有唯一前缀。" #, python-format msgid "Failed to check policy %(policy)s because %(reason)s." msgstr "无法检查策略 %(policy)s,因为 %(reason)s。" #, python-format msgid "" "Failed to create a duplicate %(object_type)s: for attribute(s) " "%(attributes)s with value(s) %(values)s" msgstr "" "对于具有值 %(values)s 的属性 %(attributes)s,未能创建重复的 %(object_type)s" #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips included " "invalid subnet %(subnet_id)s" msgstr "" "未能在网络 %(network_id)s 上创建端口,因为 fixed_ips 包括了无效子网 " "%(subnet_id)s" #, python-format msgid "Failed to init policy %(policy)s because %(reason)s." msgstr "无法初始化策略 %(policy)s,因为 %(reason)s。" #, python-format msgid "Failed to locate source for %s." msgstr "未能找到 %s 的源。" #, python-format msgid "Failed to parse request. Parameter '%s' not specified" msgstr "未能解析请求。未指定参数“%s”" #, python-format msgid "Failed to parse request. Required attribute '%s' not specified" msgstr "未能解析请求。未指定必需属性“%s”" msgid "Failed to remove supplemental groups" msgstr "未能移除补充组" #, python-format msgid "Failed to set gid %s" msgstr "设置 gid %s 失败" #, python-format msgid "Failed to set uid %s" msgstr "设置 uid %s 失败" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "未能将 %(type)s 隧道端口设置为 %(ip)s" msgid "Failure applying iptables rules" msgstr "应用 iptable 规则时失败" #, python-format msgid "Failure waiting for address %(address)s to become ready: %(reason)s" msgstr "等待地址 %(address)s 变得就绪时发生故障:%(reason)s" msgid "Flat provider networks are disabled" msgstr "平面供应商网络被禁用" #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "找不到类型 %(flavor_id)s。" #, python-format msgid "Flavor %(flavor_id)s is used by some service instance." msgstr "类型 %(flavor_id)s 已被某个服务实例使用。" msgid "Flavor is not enabled." msgstr "类型未启用。" #, python-format msgid "Floating IP %(floatingip_id)s could not be found" msgstr "找不到浮动 IP %(floatingip_id)s" #, python-format msgid "" "Floating IP %(floatingip_id)s is associated with non-IPv4 address " "%s(internal_ip)s and therefore cannot be bound." msgstr "" "浮动 IP %(floatingip_id)s 与非 IPv4 地址%s(internal_ip)s 相关联,因此无法绑" "定。" msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "对于 TCP/UDP 协议,port_range_min 必须小于或等于 port_range_max" #, python-format msgid "For class %(object_type)s missing primary keys: %(missing_keys)s" msgstr "对于类 %(object_type)s,缺少主键:%(missing_keys)s" msgid "Force ip_lib calls to use the root helper" msgstr "强制 ip_lib 调用使用 root helper" #, python-format msgid "Found duplicate extension: %(alias)s." msgstr "发现重复扩展:%(alias)s。" #, python-format msgid "" "Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet " "%(subnet_cidr)s." msgstr "对于子网 %(subnet_cidr)s,找到重叠的分配池:%(pool_1)s %(pool_2)s" msgid "Gateway IP version inconsistent with allocation pool version" msgstr "网关 IP 版本与分配池版本不一致" #, python-format msgid "" "Gateway cannot be updated for router %(router_id)s, since a gateway to " "external network %(net_id)s is required by one or more floating IPs." msgstr "" "无法为路由器 %(router_id)s 更新网关,因为一个或多个浮动 IP 需要指向外部网络 " "%(net_id)s 的网关。" #, python-format msgid "Gateway ip %(ip_address)s conflicts with allocation pool %(pool)s." msgstr "网关 IP %(ip_address)s 与分配池 %(pool)s 冲突。" msgid "Gateway is not valid on subnet" msgstr "网关在子网上无效" msgid "" "Geneve encapsulation header size is dynamic, this value is used to calculate " "the maximum MTU for the driver. This is the sum of the sizes of the outer " "ETH + IP + UDP + GENEVE header sizes. The default size for this field is 50, " "which is the size of the Geneve header without any additional option headers." msgstr "" "Geneve 封装头大小是动态的,此值用于计算驱动程序的最大 MTU。这是外部 ETH + IP " "+ UDP + GENEVE 头大小的总和。此字段的缺省大小为 50,这是没有任何附加选项头的 " "Geneve 头的大小。" msgid "Group (gid or name) running metadata proxy after its initialization" msgstr "在元数据代理的初始化之后,运行该代理的组(gid 或名称)" msgid "" "Group (gid or name) running metadata proxy after its initialization (if " "empty: agent effective group)." msgstr "" "在元数据代理的初始化之后,运行该代理的组(gid 或名称),(如果此组为空,那么" "这是代理有效组)。" msgid "Group (gid or name) running this process after its initialization" msgstr "在此进程的初始化之后,运行此进程的组(gid 或名称)" #, python-format msgid "HEAD file does not match migration timeline head, expected: %s" msgstr "HEAD 文件与迁移时间线头不匹配,需要:%s" msgid "" "Hostname to be used by the Neutron server, agents and services running on " "this machine. All the agents and services running on this machine must use " "the same host value." msgstr "" "Neutron 服务器以及此机器上运行的代理程序和服务要使用的主机名。此机器上运行的" "所有代理程序和服务必须使用同一主机值。" msgid "How many times Neutron will retry MAC generation" msgstr "Neutron 将重试 MAC 生成的次数" #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" "min) is missing." msgstr "" "已提供 ICMP 代码 (port-range-max) %(value)s,但缺少 ICMP 类型 (port-range-" "min)。" msgid "ID of network" msgstr "网络的标识" msgid "ID of network to probe" msgstr "要探测的网络的标识" msgid "ID of probe port to delete" msgstr "要删除的探测器端口的标识" msgid "ID of probe port to execute command" msgstr "用于执行命令的探测器端口的标识" msgid "ID of the router" msgstr "路由器标识" #, python-format msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s" msgstr "在子网 %(subnet_id)s 中已分配 IP 地址 %(ip)s" #, python-format msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s" msgstr "IP 地址 %(ip)s 不属于子网 %(subnet_id)s" #, python-format msgid "" "IP address %(ip_address)s is not a valid IP for any of the subnets on the " "specified network." msgstr "对于所指定网络上的任何子网,IP 地址 %(ip_address)s 是无效 IP。" msgid "IP address used by Nova metadata server." msgstr "Nova 元数据服务器使用的 IP 地址。" msgid "IP allocation failed. Try again later." msgstr "IP 分配失败。请稍后重试。" msgid "IP allocation requires subnet_id or ip_address" msgstr "IP 分配需要 subnet_id 或 ip_address" #, python-format msgid "" "IP version mismatch, cannot create tunnel: local_ip=%(lip)s remote_ip=%(rip)s" msgstr "IP版本不匹配,不能创建通道:local_ip=%(lip)s remote_ip=%(rip)s" #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "IPTablesManager.apply 无法应用以下 iptables 规则集:\n" "%s" msgid "IPtables conntrack zones exhausted, iptables rules cannot be applied." msgstr "IPtables 连接跟踪区域已用完,无法应用 iptables 规则。" msgid "IPv6 Address Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "对于前缀授权,IPv6 寻址方式必须为 SLAAC 或者“无状态”。" msgid "IPv6 RA Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "对于前缀授权,IPv6 RA 方式必须为 SLAAC 或者“无状态”。" #, python-format msgid "" "IPv6 address %(address)s can not be directly assigned to a port on subnet " "%(id)s since the subnet is configured for automatic addresses" msgstr "" "IPv6 地址 %(address)s 无法直接分配给子网 %(id)s 上的端口,因为该子网是针对自" "动地址配置的" #, python-format msgid "" "IPv6 address %(ip)s cannot be directly assigned to a port on subnet " "%(subnet_id)s as the subnet is configured for automatic addresses" msgstr "" "无法直接将 IPv6 地址 %(ip)s 分配给子网%(subnet_id)s,因为针对自动地址配置了该" "子网" #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot be " "added to Neutron Router." msgstr "" "无法将已配置为从外部路由器接收 RA 的 IPv6 子网 %s 添加至 Neutron 路由器。" msgid "" "If True, advertise network MTU values if core plugin calculates them. MTU is " "advertised to running instances via DHCP and RA MTU options." msgstr "" "如果为 True,那么核心插件计算网络 MTU 值时会进行通告。MTU 将通过 DHCP 和 RA " "MTU 选项向正在运行的实例通告。" msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "如果为 True,那么允许那些支持它的插件创建 VLAN 透明网络。" msgid "" "If non-empty, the l3 agent can only configure a router that has the matching " "router ID." msgstr "如果非空,那么 L3 代理程序只能配置具有匹配路由器标识的路由器。" msgid "Illegal IP version number" msgstr "IP 版本号不合法" #, python-format msgid "" "Illegal prefix bounds: %(prefix_type)s=%(prefixlen)s, %(base_prefix_type)s=" "%(base_prefixlen)s." msgstr "" "非法前缀界限:%(prefix_type)s=%(prefixlen)s,%(base_prefix_type)s=" "%(base_prefixlen)s。" #, python-format msgid "" "Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot " "associate with address scope %(address_scope_id)s because subnetpool " "ip_version is not %(ip_version)s." msgstr "" "非法子网池关联:子网池 %(subnetpool_id)s 不能与地址范围 %(address_scope_id)s " "关联,因为 subnetpool ip_version 并非 %(ip_version)s。" #, python-format msgid "" "Illegal subnetpool association: subnetpool %(subnetpool_id)s cannot be " "associated with address scope %(address_scope_id)s." msgstr "" "非法子网池关联:子网池 %(subnetpool_id)s 不能与地址范围 %(address_scope_id)s " "关联。" #, python-format msgid "Illegal subnetpool update : %(reason)s." msgstr "非法子网池更新:%(reason)s。" #, python-format msgid "Illegal update to prefixes: %(msg)s." msgstr "对前缀进行的更新非法:%(msg)s。" msgid "" "In some cases the Neutron router is not present to provide the metadata IP " "but the DHCP server can be used to provide this info. Setting this value " "will force the DHCP server to append specific host routes to the DHCP " "request. If this option is set, then the metadata service will be activated " "for all the networks." msgstr "" "在某些情况下,没有 Neutron 路由器可提供元数据 IP,但 DHCP 服务器可用于提供此" "信息。设置此值将强制 DHCP 服务器将特定主机路由追加至 DHCP 请求。如果设置了此" "选项,那么将对所有网络激活此元数据服务。" #, python-format msgid "Incorrect pci_vendor_info: \"%s\", should be pair vendor_id:product_id" msgstr "pci_vendor_info“%s”不正确,应该是 vendor_id:product_id 对" msgid "" "Indicates that this L3 agent should also handle routers that do not have an " "external network gateway configured. This option should be True only for a " "single agent in a Neutron deployment, and may be False for all agents if all " "routers must have an external network gateway." msgstr "" "指示此 L3 代理程序还应处理未配置外部网络网关的路由器。此选项只有在用于 " "Neutron 部署中的单个代理程序时才应为 True,在所有路由器必须具有外部网络网关的" "情况下用于所有代理程序时可为 False。" #, python-format msgid "Instance of class %(module)s.%(class)s must contain _cache attribute" msgstr "类 %(module)s.%(class)s 的实例必须包含 _cache 属性" #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" msgstr "没有足够的前缀空间来分配子网大小 /%s" msgid "Insufficient rights for removing default security group." msgstr "权限不足,无法移除缺省安全组。" msgid "" "Integration bridge to use. Do not change this parameter unless you have a " "good reason to. This is the name of the OVS integration bridge. There is one " "per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM " "VIFs are attached to this bridge and then 'patched' according to their " "network connectivity." msgstr "" "要使用的集成网桥。不要更改此参数,除非您有正当原因。这是 OVS 集成网桥的名称。" "每个 hypervisor 都有一个集成网桥。此集成网桥充当虚拟“接线架”。所有 VM VIF 附" "加至此网桥,然后根据其网络连接进行“接线”。" msgid "Interface to monitor" msgstr "要监视的接口" msgid "" "Interval between checks of child process liveness (seconds), use 0 to disable" msgstr "进行子进程活性检查的时间间隔(秒),使用 0 表示禁用" msgid "Interval between two metering measures" msgstr "采取两种测量措施的时间间隔" msgid "Interval between two metering reports" msgstr "生成两个测量报告的时间间隔" #, python-format msgid "Invalid CIDR %(input)s given as IP prefix." msgstr "作为 IP 前缀提供的 CIDR %(input)s 无效。" #, python-format msgid "" "Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address " "format, which requires the prefix to be /64." msgstr "" "CIDR %s 对于 IPv6 地址方式无效。OpenStack 使用 EUI-64 地址格式,该格式要求前" "缀为 /64。" #, python-format msgid "Invalid Device %(dev_name)s: %(reason)s" msgstr "无效设备 %(dev_name)s:%(reason)s" #, python-format msgid "" "Invalid action '%(action)s' for object type '%(object_type)s'. Valid " "actions: %(valid_actions)s" msgstr "" "针对对象类型“%(object_type)s”的操作“%(action)s”无效。有效操作为:" "%(valid_actions)s" #, python-format msgid "" "Invalid authentication type: %(auth_type)s, valid types are: " "%(valid_auth_types)s" msgstr "认证类型 %(auth_type)s 无效,以下是有效类型:%(valid_auth_types)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "无效内容类型:%(content_type)s。" #, python-format msgid "Invalid data format for IP pool: '%s'" msgstr "IP 池的数据格式无效:“%s”" #, python-format msgid "Invalid data format for extra-dhcp-opt: %(data)s" msgstr "extra-dhcp-opt 的数据格式无效:%(data)s" #, python-format msgid "Invalid data format for fixed IP: '%s'" msgstr "固定 IP 的数据格式无效:“%s”" #, python-format msgid "Invalid data format for hostroute: '%s'" msgstr "主机路由“%s”的数据格式无效" #, python-format msgid "Invalid data format for nameserver: '%s'" msgstr "名称服务器“%s”的数据格式无效" #, python-format msgid "Invalid ethertype %(ethertype)s for protocol %(protocol)s." msgstr "ethertype %(ethertype)s 对协议 %(protocol)s 无效。" #, python-format msgid "Invalid extension environment: %(reason)s." msgstr "无效扩展环境:%(reason)s。" #, python-format msgid "Invalid format for routes: %(routes)s, %(reason)s" msgstr "路由 %(routes)s 的格式无效,%(reason)s" #, python-format msgid "Invalid format: %s" msgstr "格式无效:%s" #, python-format msgid "Invalid input for %(attr)s. Reason: %(reason)s." msgstr "输入对于 %(attr)s 无效。原因:%(reason)s。" #, python-format msgid "" "Invalid input. '%(target_dict)s' must be a dictionary with keys: " "%(expected_keys)s" msgstr "输入无效。“%(target_dict)s”必须是具有以下键的字典:%(expected_keys)s" #, python-format msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s" msgstr "实例状态 %(state)s 无效,以下是有效状态:%(valid_states)s" #, python-format msgid "" "Invalid local or remote IP, cannot create tunnel: local_ip=%(lip)s remote_ip=" "%(rip)s" msgstr "非法的本地或远端IP,不能创建通道:local_ip=%(lip)s remote_ip=%(rip)s" #, python-format msgid "Invalid mapping: '%s'" msgstr "映射无效:“%s”" #, python-format msgid "Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'." msgstr "无效网络 VLAN 范围:“%(vlan_range)s”-“%(error)s”。" #, python-format msgid "Invalid network VXLAN port range: '%(vxlan_range)s'." msgstr "无效网络 VXLAN 端口范围:“%(vxlan_range)s”。" #, python-format msgid "Invalid pci slot %(pci_slot)s" msgstr "无效 PCI 插槽 %(pci_slot)s" #, python-format msgid "Invalid provider format. Last part should be 'default' or empty: %s" msgstr "提供程序格式无效。最后部分应该为“default”或空:%s" #, python-format msgid "Invalid resource type %(resource_type)s" msgstr "无效资源类型 %(resource_type)s" #, python-format msgid "Invalid route: %s" msgstr "路由无效:%s" msgid "Invalid service provider format" msgstr "服务提供程序格式无效" #, python-format msgid "Invalid service type %(service_type)s." msgstr "服务类型 %(service_type)s 无效。" #, python-format msgid "" "Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255." msgstr "" "ICMP %(field)s (%(attr)s) 的值 %(value)s 无效。它必须为 0 到 255 之间的值。" #, python-format msgid "Invalid value for port %(port)s" msgstr "端口 %(port)s 的值无效" msgid "" "Iptables mangle mark used to mark ingress from external network. This mark " "will be masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "用来标记外部网络中的入口的 Iptables mangle 标记。将使用 0xffff 来屏蔽此标记," "以便将仅使用低位的 16 位。" msgid "" "Iptables mangle mark used to mark metadata valid requests. This mark will be " "masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "用来标记元数据有效请求的 Iptables mangle 标记。将使用 0xffff 来屏蔽此标记,以" "便将仅使用低位的 16 位。" msgid "" "Keep in track in the database of current resourcequota usage. Plugins which " "do not leverage the neutron database should set this flag to False" msgstr "" "跟踪数据库的当前资源配额使用量。那些未利用 Neutron 数据库的插件应将此标志设置" "为 False" msgid "Keepalived didn't respawn" msgstr "Keepalived 未重新衍生" msgid "Keepalived didn't spawn" msgstr "Keepalived 未衍生" #, python-format msgid "" "Kernel HZ value %(value)s is not valid. This value must be greater than 0." msgstr "内核 HZ 值 %(value)s 无效。此值必须大于 0。" #, python-format msgid "Key %(key)s in mapping: '%(mapping)s' not unique" msgstr "映射“%(mapping)s”中的键 %(key)s 不唯一" msgid "L3 agent failure to setup NAT for floating IPs" msgstr "L3 代理程序无法为浮动 IP 设置 NAT" msgid "L3 agent failure to setup floating IPs" msgstr "L3 代理程序无法设置浮动 IP" #, python-format msgid "Limit must be an integer 0 or greater and not '%d'" msgstr "limit 必须是整数 0 或更大整数,而不是“%d”" msgid "Limit number of leases to prevent a denial-of-service." msgstr "请对租赁数进行限制,以防止拒绝服务。" msgid "List of :" msgstr ": 的列表" msgid "" "List of :: or " "specifying physical_network names usable for VLAN provider and tenant " "networks, as well as ranges of VLAN tags on each available for allocation to " "tenant networks." msgstr "" ":: 列表,用于指定" "对 VLAN 供应商和租户网络可用的物理网络名称,以及可分配至租户网络的每项上的 " "VLAN 标记。" msgid "" "List of network type driver entrypoints to be loaded from the neutron.ml2." "type_drivers namespace." msgstr "" "要从 neutron.ml2.type_drivers 名称空间装入的网络类型驱动程序入口点的列表。" msgid "" "List of physical_network names with which flat networks can be created. Use " "default '*' to allow flat networks with arbitrary physical_network names. " "Use an empty list to disable flat networks." msgstr "" "可通过其创建平面网络的 physical_network 名称的列表。使用缺省值“*”将允许平面网" "络使用任意 physical_network 名称。使用空列表将禁用平面网络。" msgid "Local IP address of the VXLAN endpoints." msgstr "VXLAN 端点的本地 IP 地址。" msgid "" "Local IP address of tunnel endpoint. Can be either an IPv4 or IPv6 address." msgstr "通道端点的本地 IP 地址。应该是一个IPv4或IPv6地址。" msgid "Location for Metadata Proxy UNIX domain socket." msgstr "元数据代理 UNIX 域套接字的位置。" msgid "Location of Metadata Proxy UNIX domain socket" msgstr "元数据代理 UNIX 域套接字的位置" msgid "Location of pid file of this process." msgstr "此进程的 pid 文件的位置。" msgid "Location to store DHCP server config files." msgstr "用于存储 DHCP 服务器配置文件的位置。" msgid "Location to store IPv6 PD files." msgstr "用来存储 IPv6 PD 文件的位置。" msgid "Location to store IPv6 RA config files" msgstr "用于存储 IPv6 RA 配置文件的位置" msgid "Location to store child pid files" msgstr "用于存储子 pid 文件的位置" msgid "Location to store keepalived/conntrackd config files" msgstr "用于存储保持活动的/连接跟踪的配置文件的位置" msgid "Log agent heartbeats" msgstr "日志代理程序脉动信号" msgid "Loopback IP subnet is not supported if enable_dhcp is True." msgstr "如果 enable_dhcp 为 True,那么回送 IP 子网不受支持。" msgid "MTU size of veth interfaces" msgstr "veth 接口的 MTU 大小" msgid "Make the l2 agent run in DVR mode." msgstr "使 l2 代理在 DVR 方式下运行。" msgid "Malformed request body" msgstr "请求主体的格式不正确" #, python-format msgid "Malformed request body: %(reason)s." msgstr "请求主体格式不正确:%(reason)s。" msgid "MaxRtrAdvInterval setting for radvd.conf" msgstr "radvd.conf 的 MaxRtrAdvInterval 设置" msgid "Maximum number of DNS nameservers per subnet" msgstr "每个子网的 DNS 名称服务器的最大数目" msgid "" "Maximum number of L3 agents which a HA router will be scheduled on. If it is " "set to 0 then the router will be scheduled on every agent." msgstr "" "将对其调度 HA 路由器的 L3 代理程序的最大数目。如果设置为 0,那么将对每个代理" "程序调度该路由器。" msgid "Maximum number of allowed address pairs" msgstr "允许的最大地址对数" msgid "" "Maximum number of fixed ips per port. This option is deprecated and will be " "removed in the N release." msgstr "" "每个端口的固定 IP 的最大数目。此选项已不推荐使用,将在 N 发行版中移除。" msgid "Maximum number of host routes per subnet" msgstr "每个子网的最大主机路由数" msgid "Maximum number of routes per router" msgstr "每个路由器的最大路由数目" msgid "" "Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce " "mode from metadata_proxy_user/group values, 'user': set metadata proxy " "socket mode to 0o644, to use when metadata_proxy_user is agent effective " "user or root, 'group': set metadata proxy socket mode to 0o664, to use when " "metadata_proxy_group is agent effective group or root, 'all': set metadata " "proxy socket mode to 0o666, to use otherwise." msgstr "" "元数据代理 UNIX 域套接字方式,允许使用以下 4 个值:“deduce”:根据 " "metadata_proxy_user/group 值进行推论的推论方式;“user”:将元数据代理套接字方" "式设置为 0o644,以在 metadata_proxy_user 为代理有效用户或者 root 用户时使" "用;“group”:将元数据代理套接字方式设置为 0o664,以在 metadata_proxy_group 为" "代理有效组或者 root 用户时使用;“all”:设置元数据代理套接字方式为 0o666,以在" "其他情况下使用。" msgid "Metering driver" msgstr "测量驱动程序" #, python-format msgid "Metering label %(label_id)s does not exist" msgstr "测量标签 %(label_id)s 不存在" #, python-format msgid "Metering label rule %(rule_id)s does not exist" msgstr "测量标签规则 %(rule_id)s 不存在" #, python-format msgid "" "Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps " "another" msgstr "" "带有 remote_ip_prefix %(remote_ip_prefix)s 的测量标签规则与另一测量标签规则重" "叠" msgid "Method cannot be called within a transaction." msgstr "不能在事务中调用方法。" msgid "Migration from distributed router to centralized is not supported" msgstr "不支持从分布式路由器迁移至集中路由器" msgid "MinRtrAdvInterval setting for radvd.conf" msgstr "radvd.conf 的 MinRtrAdvInterval 设置" msgid "Minimize polling by monitoring ovsdb for interface changes." msgstr "请通过监视 ovsdb 以获取接口更改来最大程度地减少轮询。" msgid "" "Minimum number of L3 agents that have to be available in order to allow a " "new HA router to be scheduled." msgstr "为了允许一个HA路由器可调度,最小数量的L3代理必须要可用。" #, python-format msgid "Missing key in mapping: '%s'" msgstr "映射中缺少键:“%s”" #, python-format msgid "Missing value in mapping: '%s'" msgstr "映射中缺少值:“%s”" msgid "Multicast IP subnet is not supported if enable_dhcp is True." msgstr "如果 enable_dhcp 为 True,那么多点广播 IP 子网不受支持。" msgid "" "Multicast group for VXLAN. When configured, will enable sending all " "broadcast traffic to this multicast group. When left unconfigured, will " "disable multicast VXLAN mode." msgstr "" "VXLAN 的多点广播组。如果配置了此项,那么系统允许将所有广播流量发送至此多点广" "播组。如果保留为未配置,那么系统将禁用多点广播 VXLAN 方式。" msgid "" "Multicast group(s) for vxlan interface. A range of group addresses may be " "specified by using CIDR notation. Specifying a range allows different VNIs " "to use different group addresses, reducing or eliminating spurious broadcast " "traffic to the tunnel endpoints. To reserve a unique group for each possible " "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on " "all the agents." msgstr "" "VXLAN 接口的多点广播组。必须使用 CIDR 注释指定一定范围的组地址。指定范围允许" "不同 VNI 使用不同组地址,从而减少或消除发送至通道端点的伪造广播流量。为对每个" "可能的(24 位)VNI 保留唯一组,请使用 /8,例如,239.0.0.0/8。此设置在所有代理" "程序上必须相同。" #, python-format msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found" msgstr "找到多个符合以下条件的代理:agent_type=%(agent_type)s 且 host=%(host)s" #, python-format msgid "Multiple default providers for service %s" msgstr "对于服务 %s,存在多个缺省提供程序" #, python-format msgid "Multiple plugins for service %s were configured" msgstr "已配置多个用于服务 %s 的插件" #, python-format msgid "Multiple providers specified for service %s" msgstr "对于服务 %s,已指定多个提供程序" msgid "Multiple tenant_ids in bulk security group rule create not allowed" msgstr "不允许在成批安全组规则创建中使用多个 tenant_id" msgid "Must also specify protocol if port range is given." msgstr "如果指定了端口范围,那么还必须指定协议。" msgid "Must specify one or more actions on flow addition or modification" msgstr "必须在添加或删除流时指定一个或多个操作" #, python-format msgid "Name %(dns_name)s is duplicated in the external DNS service" msgstr "名称 %(dns_name)s 在外部 DNS 服务中重复" #, python-format msgid "" "Name '%s' must be 1-63 characters long, each of which can only be " "alphanumeric or a hyphen." msgstr "" "名称“%s”的长度必须在 1 到 63 个字符之间,其中每个字符只能是字母数字或连字符。" #, python-format msgid "Name '%s' must not start or end with a hyphen." msgstr "名称“%s”不能以连字符开头或结尾。" msgid "Name of Open vSwitch bridge to use" msgstr "要使用的已打开 vSwitch 网桥的名称" msgid "" "Name of bridge used for external network traffic. This should be set to an " "empty value for the Linux Bridge. When this parameter is set, each L3 agent " "can be associated with no more than one external network." msgstr "" "用于外部网络流量的网桥名称。对于 Linux 网桥应设置为空值。如果设置了此参数,那" "么每个 L3 代理程序最多可与一个外部网络相关联。" msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "要使用的 nova 区域的名称。如果 keystone 管理多个区域,那么这很有用。" msgid "Name of the FWaaS Driver" msgstr "FWaaS 驱动程序的名称" msgid "Namespace of the router" msgstr "路由器名称空间" msgid "Native pagination depend on native sorting" msgstr "本机分页依赖于本机排序" #, python-format msgid "" "Need to apply migrations from %(project)s contract branch. This will require " "all Neutron server instances to be shutdown before proceeding with the " "upgrade." msgstr "" "需要从 %(project)s 合同分支应用迁移。这要求所有 Neutron 服务器实例关闭,然后" "继续升级。" msgid "Negative delta (downgrade) not supported" msgstr "不支持为负变化量(降级)" msgid "Negative relative revision (downgrade) not supported" msgstr "不支持为负数的相关修订版(降级)" #, python-format msgid "" "Network %(network_id)s is already bound to BgpSpeaker %(bgp_speaker_id)s." msgstr "网络 %(network_id)s 已绑定至 BgpSpeaker %(bgp_speaker_id)s。" #, python-format msgid "" "Network %(network_id)s is not associated with BGP speaker %(bgp_speaker_id)s." msgstr "网络 %(network_id)s 未与 BGP Speaker %(bgp_speaker_id)s 关联。" #, python-format msgid "Network %(network_id)s is not bound to a BgpSpeaker." msgstr "网络 %(network_id)s 未绑定至 BgpSpeaker。" #, python-format msgid "Network %(network_id)s is not bound to a IPv%(ip_version)s BgpSpeaker." msgstr "网络 %(network_id)s 未绑定至 IPv%(ip_version)s BgpSpeaker。" #, python-format msgid "Network %s does not contain any IPv4 subnet" msgstr "网络 %s 中不包含任何 IPv4 子网" #, python-format msgid "Network %s is not a valid external network" msgstr "网络 %s 是无效外部网络" #, python-format msgid "Network %s is not an external network" msgstr "网络 %s 不是外部网络" #, python-format msgid "" "Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges " "%(excluded_ranges)s was not found." msgstr "" "从 IP 范围 %(parent_range)s(排除 IP 范围%(excluded_ranges)s)中找不到大小为 " "%(size)s 的网络。" msgid "Network that will have instance metadata proxied." msgstr "将代理实例元数据的网络。" #, python-format msgid "Network type value '%s' not supported" msgstr "网络类型值“%s”不受支持" msgid "Network type value needed by the ML2 plugin" msgstr "ML2 插件需要网络类型值" msgid "Network types supported by the agent (gre and/or vxlan)." msgstr "代理支持的网络类型(gre 和/或 vxlan)。" msgid "" "Neutron IPAM (IP address management) driver to use. If ipam_driver is not " "set (default behavior), no IPAM driver is used. In order to use the " "reference implementation of Neutron IPAM driver, use 'internal'." msgstr "" "要使用的 Neutron IPAM(IP 地址管理)驱动程序。如果未设置 ipam_driver(缺省行" "为),那么不会使用任何 IPAM 驱动程序。为使用 Neutron IPAM 驱动程序的引用实" "现,请使用“internal”。" msgid "Neutron Service Type Management" msgstr "Neutron 服务类型管理" msgid "Neutron core_plugin not configured!" msgstr "未配置 Neutron core_plugin!" msgid "Neutron plugin provider module" msgstr "Neutron 插件提供程序模块" msgid "Neutron quota driver class" msgstr "Neutron 配额驱动程序类" msgid "New value for first_ip or last_ip has to be specified." msgstr "必须对 first_ip 或 last_ip 指定新值。" msgid "No default router:external network" msgstr "没有缺省路由器:外部网络" #, python-format msgid "No default subnetpool found for IPv%s" msgstr "找不到对应 IPv%s 的缺省子网池" msgid "No default subnetpools defined" msgstr "未定义缺省子网池" #, python-format msgid "No eligible l3 agent associated with external network %s found" msgstr "找不到合格的与外部网络 %s 关联的 L3 代理" #, python-format msgid "No more IP addresses available for subnet %(subnet_id)s." msgstr "没有更多 IP 地址可用于子网 %(subnet_id)s。" #, python-format msgid "" "No more Virtual Router Identifier (VRID) available when creating router " "%(router_id)s. The limit of number of HA Routers per tenant is 254." msgstr "" "当创建路由器 %(router_id)s 时,没有更多虚拟路由器标识 (VRID) 可用。每个租户" "的 HA 路由器数的限制为 254。" msgid "No offline migrations pending." msgstr "没有脱机迁移处于暂挂状态。" #, python-format msgid "No providers specified for '%s' service, exiting" msgstr "没有为“%s”服务指定任何提供程序,正在退出" #, python-format msgid "No shared key in %s fields" msgstr "%s 字段中没有共享键" msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "不允许以“dvr”方式将路由器手动分配给代理程序。" msgid "Not allowed to manually remove a router from an agent in 'dvr' mode." msgstr "不允许以“dvr”方式从代理程序手动移除路由器。" #, python-format msgid "" "Not enough l3 agents available to ensure HA. Minimum required " "%(min_agents)s, available %(num_agents)s." msgstr "" "l3 个代理并非足够可用于确保 HA。需要的最小数目为 %(min_agents)s,可用的数目" "为 %(num_agents)s。" msgid "" "Number of DHCP agents scheduled to host a tenant network. If this number is " "greater than 1, the scheduler automatically assigns multiple DHCP agents for " "a given tenant network, providing high availability for DHCP service." msgstr "" "安排托管租户网络的 DHCP 代理数。如果此数目大于 1,那么调度程序会自动为所给定" "的租户网络分配多个 DHCP 代理,从而为 DHCP 服务提供高可用性。" msgid "Number of RPC worker processes dedicated to state reports queue" msgstr "专用于状态报告队列的 RPC 工作程序进程数" msgid "Number of RPC worker processes for service" msgstr "服务的 RPC 执行程序编号。" msgid "Number of backlog requests to configure the metadata server socket with" msgstr "用于配置元数据服务器套接字的储备请求数" msgid "Number of backlog requests to configure the socket with" msgstr "用于配置套接字的储备请求数" msgid "" "Number of bits in an ipv4 PTR zone that will be considered network prefix. " "It has to align to byte boundary. Minimum value is 8. Maximum value is 24. " "As a consequence, range of values is 8, 16 and 24" msgstr "" "ipv4 PTR 区域中将被视为网络前缀的位数。它必须符合字节限制。最小值为 8。最大值" "为 24。因此,有效值包括:8、16 和 24。" msgid "" "Number of bits in an ipv6 PTR zone that will be considered network prefix. " "It has to align to nyble boundary. Minimum value is 4. Maximum value is 124. " "As a consequence, range of values is 4, 8, 12, 16,..., 124" msgstr "" "ipv6 PTR 区域中将被视为网络前缀的位数。它必须符合字节限制。最小值为 4。最大值" "为 124。因此,有效值包括:4,8,12,16,...,124。" msgid "" "Number of floating IPs allowed per tenant. A negative value means unlimited." msgstr "每个租户允许的浮动 IP 数。负值表示无限。" msgid "" "Number of networks allowed per tenant. A negative value means unlimited." msgstr "每个租户允许的网络数。负值表示无限。" msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "每个租户允许的端口数。负值表示无限。" msgid "Number of routers allowed per tenant. A negative value means unlimited." msgstr "每个租户允许的路由器数。负值表示无限。" msgid "" "Number of seconds between sending events to nova if there are any events to " "send." msgstr "前后两次将事件发送至 nova 的间隔秒数(如果有事件要发送)。" msgid "Number of seconds to keep retrying to listen" msgstr "保持重试监听的秒数" msgid "" "Number of security groups allowed per tenant. A negative value means " "unlimited." msgstr "每个租户允许的安全组数。负值表示无限。" msgid "" "Number of security rules allowed per tenant. A negative value means " "unlimited." msgstr "每个租户允许的安全性规则数。负值表示无限。" msgid "" "Number of separate API worker processes for service. If not specified, the " "default is equal to the number of CPUs available for best performance." msgstr "" "针对服务的不同 API 执行程序的编号。如果没有指定,缺省为最佳性能下的可用 CPU " "数" msgid "" "Number of separate worker processes for metadata server (defaults to half of " "the number of CPUs)" msgstr "元数据服务器的单独工作程序进程数(缺省设置为 CPU 数目的一半)" msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "每个租户允许的子网数。负值表示无限。" msgid "" "Number of threads to use during sync process. Should not exceed connection " "pool size configured on server." msgstr "同步过程中要使用的线程数。不应超过在服务器上配置的连接池大小。" msgid "OK" msgstr "确定" msgid "" "OVS datapath to use. 'system' is the default value and corresponds to the " "kernel datapath. To enable the userspace datapath set this value to 'netdev'." msgstr "" "要使用的 OVS 数据路径。“system”是缺省值,对应内核数据路径,要启用用户空间数据" "路径,请将此值设置为“netdev”。" msgid "OVS vhost-user socket directory." msgstr "OVS vhost-user 套接字目录。" #, python-format msgid "OVSDB Error: %s" msgstr "OVSDB 错误:%s" #, python-format msgid "Object action %(action)s failed because: %(reason)s." msgstr "对象操作 %(action)s 失败,因为:%(reason)s。" msgid "Only admin can view or configure quota" msgstr "只有管理员才能查看或配置配额" msgid "Only admin is authorized to access quotas for another tenant" msgstr "只有管理员才有权访问另一租户的配额" msgid "Only admins can manipulate policies on networks they do not own." msgstr "只有管理员可以处理他们并不拥有的网络上的策略。" msgid "Only admins can manipulate policies on objects they do not own" msgstr "只有管理员才能处理针对并非他们所有的对象的策略" msgid "Only allowed to update rules for one security profile at a time" msgstr "一次仅允许更新一个安全概要文件的规则" msgid "Only remote_ip_prefix or remote_group_id may be provided." msgstr "只能提供 remote_ip_prefix 或 remote_group_id。" msgid "OpenFlow interface to use." msgstr "要使用的 OpenFlow 接口。" #, python-format msgid "" "Operation %(op)s is not supported for device_owner %(device_owner)s on port " "%(port_id)s." msgstr "" "端口 %(port_id)s 上的 device_owner %(device_owner)s 不支持操作 %(op)s。" #, python-format msgid "Operation not supported on device %(dev_name)s" msgstr "操作在设备 %(dev_name)s 上不受支持" msgid "" "Ordered list of network_types to allocate as tenant networks. The default " "value 'local' is useful for single-box testing but provides no connectivity " "between hosts." msgstr "" "要作为租户网络分配的 network_type 的有序列表。缺省值“local”对单框测试很有用," "但不会在主机之间提供连接。" msgid "Override the default dnsmasq settings with this file." msgstr "使用此文件覆盖缺省 dnsmasq 设置。" msgid "Owner type of the device: network/compute" msgstr "设备的所有者类型如下:网络/计算" msgid "POST requests are not supported on this resource." msgstr "POST 请求在此资源上不受支持。" #, python-format msgid "Package %s not installed" msgstr "未安装软件包 %s" #, python-format msgid "Parameter %(param)s must be of %(param_type)s type." msgstr "参数 %(param)s 必须为 %(param_type)s 类型。" #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "解析 bridge_mappings 失败:%s。" msgid "Parsing supported pci_vendor_devs failed" msgstr "解析受支持的 pci_vendor_devs 失败" msgid "Password for connecting to designate in admin context" msgstr "管理员上下文中要指定的连接密码" #, python-format msgid "Password not specified for authentication type=%(auth_type)s." msgstr "未对认证类型 %(auth_type)s 指定密码。" msgid "Path to PID file for this process" msgstr "此进程的 PID 文件的路径" msgid "Path to the router directory" msgstr "路由器目录的路径" msgid "Peer patch port in integration bridge for tunnel bridge." msgstr "集成网桥中的同级补丁端口(对于隧道网桥)。" msgid "Peer patch port in tunnel bridge for integration bridge." msgstr "隧道网桥中的同级补丁端口(对于集成网桥)。" msgid "Per-tenant subnet pool prefix quota exceeded." msgstr "超出每个租户的子网池前缀配额。" msgid "Phase upgrade options do not accept revision specification" msgstr "阶段升级选项不接受修订规范" msgid "Ping timeout" msgstr "Ping 超时" #, python-format msgid "Plugin '%s' not found." msgstr "找不到插件“%s”。" msgid "Plugin does not support updating provider attributes" msgstr "插件不支持更新提供程序属性" msgid "Policy configuration policy.json could not be found." msgstr "找不到策略配置 policy.json。" #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "端口 %(id)s 没有固定 ip %(address)s" #, python-format msgid "Port %(port)s does not exist on %(bridge)s!" msgstr "端口 %(port)s 在 %(bridge)s 上不存在!" #, python-format msgid "Port %(port_id)s is already acquired by another DHCP agent" msgstr "另一 DHCP 代理程序尚未获取端口 %(port_id)s" #, python-format msgid "" "Port %(port_id)s is associated with a different tenant than Floating IP " "%(floatingip_id)s and therefore cannot be bound." msgstr "" "端口 %(port_id)s 和浮动 IP %(floatingip_id)s 不是与同一租户关联,因此找不到该" "端口。" #, python-format msgid "Port %(port_id)s is not managed by this agent. " msgstr "端口 %(port_id)s 并非由此代理程序管理。" #, python-format msgid "Port %s does not exist" msgstr "端口 %s 不存在" #, python-format msgid "" "Port %s has multiple fixed IPv4 addresses. Must provide a specific IPv4 " "address when assigning a floating IP" msgstr "" "端口 %s 具有多个固定 IPv4 地址。当分配浮动 IP 时,必须提供特定 IPv4 地址" msgid "" "Port Security must be enabled in order to have allowed address pairs on a " "port." msgstr "必须启用端口安全性,以便在端口上具有所允许的地址对。" msgid "" "Port has security group associated. Cannot disable port security or ip " "address until security group is removed" msgstr "端口已使安全组关联。直到除去安全组,才能禁用端口安全性或 IP 地址" msgid "" "Port security must be enabled and port must have an IP address in order to " "use security groups." msgstr "必须启用端口安全性,并且端口必须具有 IP 地址以使用安全组。" msgid "" "Port to listen on for OpenFlow connections. Used only for 'native' driver." msgstr "对于 OpenFlow 连接,要侦听的端口。仅用于“本机”驱动程序。" #, python-format msgid "Prefix '%(prefix)s' not supported in IPv%(version)s pool." msgstr "前缀“%(prefix)s”在 IPv%(version)s 池中不受支持。" msgid "Prefix Delegation can only be used with IPv6 subnets." msgstr "前缀授权只能用于 IPv6 子网。" msgid "Private key of client certificate." msgstr "客户机证书的专用密钥。" #, python-format msgid "Probe %s deleted" msgstr "已删除探测器 %s" #, python-format msgid "Probe created : %s " msgstr "已创建探测器:%s " msgid "Process is already started" msgstr "进程已经启动" msgid "Process is not running." msgstr "进程未在运行" msgid "Protocol to access nova metadata, http or https" msgstr "用于访问 nova 元数据的协议(HTTP 或 HTTPS)" #, python-format msgid "Provider name %(name)s is limited by %(len)s characters" msgstr "供应商名称 %(name)s 被限制为不超过 %(len)s 个字符" #, python-format msgid "QoS Policy %(policy_id)s is used by %(object_type)s %(object_id)s." msgstr "%(object_type)s %(object_id)s 使用了 QoS 策略 %(policy_id)s。" #, python-format msgid "" "QoS binding for network %(net_id)s and policy %(policy_id)s could not be " "found." msgstr "找不到网络 %(net_id)s 和策略 %(policy_id)s 的 QoS 绑定。" #, python-format msgid "" "QoS binding for port %(port_id)s and policy %(policy_id)s could not be found." msgstr "找不到端口 %(port_id)s 和策略 %(policy_id)s 的 QoS 绑定。" #, python-format msgid "QoS policy %(policy_id)s could not be found." msgstr "找不到 QoS 策略 %(policy_id)s。" #, python-format msgid "QoS rule %(rule_id)s for policy %(policy_id)s could not be found." msgstr "找不到策略 %(policy_id)s 的 QoS 规则 %(rule_id)s。" #, python-format msgid "RBAC policy of type %(object_type)s with ID %(id)s not found" msgstr "找不到标识为 %(id)s 的 %(object_type)s 类型的 RBAC 策略" #, python-format msgid "" "RBAC policy on object %(object_id)s cannot be removed because other objects " "depend on it.\n" "Details: %(details)s" msgstr "" "无法移除对象 %(object_id)s 的 RBAC 策略,因为其他对象依赖于它。\n" "详细信息:%(details)s" msgid "" "Range of seconds to randomly delay when starting the periodic task scheduler " "to reduce stampeding. (Disable by setting to 0)" msgstr "" "当启动定期任务调度程序以减少拥堵时要随机延迟的秒数范围。(通过设为 0 来禁用)" msgid "Ranges must be in the same IP version" msgstr "范围必须为同一 IP 版本" msgid "Ranges must be netaddr.IPRange" msgstr "范围必须为 netaddr.IPRange" msgid "Ranges must not overlap" msgstr "范围不能重叠" #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.EUI type." msgstr "接收到类型“%(type)s”和值“%(value)s”。需要 netaddr.EUI 类型。" #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.IPAddress " "type." msgstr "接收到类型“%(type)s”和值“%(value)s”。需要 netaddr.IPAddress 类型。" #, python-format msgid "" "Received type '%(type)s' and value '%(value)s'. Expecting netaddr.IPNetwork " "type." msgstr "接收到类型“%(type)s”和值“%(value)s”。需要 netaddr.IPNetwork 类型。" #, python-format msgid "" "Release aware branch labels (%s) are deprecated. Please switch to expand@ " "and contract@ labels." msgstr "" "不推荐使用区分发行版的分支标签 (%s)。请切换到 expand@ 和 contract@ 标签。" msgid "Remote metadata server experienced an internal server error." msgstr "远程元数据服务器遇到内部服务器错误。" msgid "" "Repository does not contain HEAD files for contract and expand branches." msgstr "存储库未包含用于合同分支和扩展分支的 HEAD 文件。" msgid "" "Representing the resource type whose load is being reported by the agent. " "This can be \"networks\", \"subnets\" or \"ports\". When specified (Default " "is networks), the server will extract particular load sent as part of its " "agent configuration object from the agent report state, which is the number " "of resources being consumed, at every report_interval.dhcp_load_type can be " "used in combination with network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is " "WeightScheduler, dhcp_load_type can be configured to represent the choice " "for the resource being balanced. Example: dhcp_load_type=networks" msgstr "" "表示其负载要由代理报告的资源类型。可以是“networks”、“subnets”或“ports”。如果" "已指定(缺省值为“networks”),那么服务器将根据代理报告状态抽取特定负载(作为" "其代理配置对象的一部分发送),这是在每个 report_interval 要消耗的资源数。" "dhcp_load_type 可与 network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler 一起使用。当 network_scheduler_driver " "为 WeightScheduler 时,dhcp_load_type 可配置为表示针对要均衡的资源的选择。示" "例:dhcp_load_type=networks" msgid "Request Failed: internal server error while processing your request." msgstr "请求失败:在处理请求时,发生内部服务器错误。" #, python-format msgid "" "Request contains duplicate address pair: mac_address %(mac_address)s " "ip_address %(ip_address)s." msgstr "" "请求包含重复地址对:mac_address %(mac_address)s ip_address %(ip_address)s。" #, python-format msgid "" "Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps " "with another subnet" msgstr "" "所请求子网(具有 cidr %(cidr)s,对于网络 %(network_id)s)与另一子网重叠" msgid "" "Reset flow table on start. Setting this to True will cause brief traffic " "interruption." msgstr "在启动时重置流表。将此项设置为 True 将导致短暂的通信中断。" #, python-format msgid "Resource %(resource)s %(resource_id)s could not be found." msgstr "找不到资源 %(resource)s %(resource_id)s。" #, python-format msgid "Resource %(resource_id)s of type %(resource_type)s not found" msgstr "找不到类型为 %(resource_type)s 的资源 %(resource_id)s" #, python-format msgid "" "Resource '%(resource_id)s' is already associated with provider " "'%(provider)s' for service type '%(service_type)s'" msgstr "" "对于服务类型“%(service_type)s”,资源“%(resource_id)s”已经与提供程" "序“%(provider)s”关联" msgid "Resource body required" msgstr "需要资源主体" msgid "" "Resource name(s) that are supported in quota features. This option is now " "deprecated for removal." msgstr "配额功能部件中受支持的资源名称。不推荐使用此选项,即将移除。" msgid "Resource not found." msgstr "找不到资源。" msgid "Resources required" msgstr "需要资源" msgid "" "Root helper application. Use 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' to use the real root filter facility. Change to 'sudo' to skip the " "filtering and just run the command directly." msgstr "" "root helper 应用程序。使用“sudo neutron-rootwrap /etc/neutron/rootwrap." "conf”以使用真实根过滤工具。更改为“sudo”将跳过过滤并且直接运行该命令。" msgid "Root helper daemon application to use when possible." msgstr "在可能的情况下,要使用的 root helper 守护程序应用程序。" msgid "Root permissions are required to drop privileges." msgstr "删除特权需要 root 用户许可权。" #, python-format msgid "Route %(cidr)s not advertised for BGP Speaker %(speaker_as)d." msgstr "未针对 BGP Speaker %(speaker_as)d 通告路由 %(cidr)s。" #, python-format msgid "Router %(router_id)s %(reason)s" msgstr "路由器 %(router_id)s %(reason)s" #, python-format msgid "Router %(router_id)s could not be found" msgstr "找不到路由器 %(router_id)s" #, python-format msgid "Router %(router_id)s does not have an interface with id %(port_id)s" msgstr "路由器 %(router_id)s 没有具有标识 %(port_id)s 的接口" #, python-format msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s" msgstr "路由器 %(router_id)s 在子网 %(subnet_id)s 上没有任何接口" #, python-format msgid "Router '%(router_id)s' cannot be both DVR and HA." msgstr "路由器“%(router_id)s”不能同时为 DVR 和 HA。" #, python-format msgid "Router '%(router_id)s' is not compatible with this agent." msgstr "路由器“%(router_id)s”与此代理程序不兼容。" #, python-format msgid "Router already has a port on subnet %s" msgstr "路由器已在子网 %s 上具有端口" #, python-format msgid "" "Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be " "deleted, as it is required by one or more floating IPs." msgstr "" "无法删除路由器 %(router_id)s 上用于子网 %(subnet_id)s 的路由器接口,因为一个" "或多个浮动 IP 需要该接口。" #, python-format msgid "" "Router interface for subnet %(subnet_id)s on router %(router_id)s cannot be " "deleted, as it is required by one or more routes." msgstr "" "无法删除路由器 %(router_id)s 上用于子网 %(subnet_id)s 的路由器接口,因为一个" "或多个路由需要该接口。" msgid "Router port must have at least one fixed IP" msgstr "路由器端口必须具有至少一个固定 IP" msgid "Router that will have connected instances' metadata proxied." msgstr "将代理相连实例元数据的路由器。" #, python-format msgid "" "Row doesn't exist in the DB. Request info: Table=%(table)s. Columns=" "%(columns)s. Records=%(records)s." msgstr "" "该行在数据库中不存在。请求信息:Table=%(table)s。Columns=%(columns)s。" "Records=%(records)s。" msgid "Run as daemon." msgstr "作为守护程序运行。" #, python-format msgid "Running %(cmd)s (%(desc)s) for %(project)s ..." msgstr "正在对 %(project)s 运行 %(cmd)s (%(desc)s)..." #, python-format msgid "Running %(cmd)s for %(project)s ..." msgstr "正在对 %(project)s 运行 %(cmd)s..." msgid "Running without keystone AuthN requires that tenant_id is specified" msgstr "在没有 keystone AuthN 的情况下运行要求指定 tenant_id" msgid "" "Seconds between nodes reporting state to server; should be less than " "agent_down_time, best if it is half or less than agent_down_time." msgstr "" "节点向服务器报告状态需要间隔秒数;应该小于 agent_down_time,最好小于 " "agent_down_time 或是它的一半。" msgid "Seconds between running periodic tasks" msgstr "运行定期任务之间的秒数" msgid "" "Seconds to regard the agent is down; should be at least twice " "report_interval, to be sure the agent is down for good." msgstr "" "认为代理已关闭的秒数;应该至少为 report_interval 的两倍,以确保代理已正常关" "闭。" #, python-format msgid "Security Group %(id)s %(reason)s." msgstr "安全组 %(id)s %(reason)s。" #, python-format msgid "Security Group Rule %(id)s %(reason)s." msgstr "安全组规则 %(id)s %(reason)s。" #, python-format msgid "Security group %(id)s does not exist" msgstr "安全组 %(id)s 不存在" #, python-format msgid "Security group rule %(id)s does not exist" msgstr "安全组规则 %(id)s 不存在" #, python-format msgid "Security group rule already exists. Rule id is %(rule_id)s." msgstr "安全组规则已存在,规则标识为 %(rule_id)s。" #, python-format msgid "" "Security group rule for ethertype '%(ethertype)s' not supported. Allowed " "values are %(values)s." msgstr "" "ethertype “%(ethertype)s” 的安全组规则不受支持。允许的值为 %(values)s。" #, python-format msgid "" "Security group rule protocol %(protocol)s not supported. Only protocol " "values %(values)s and integer representations [0 to 255] are supported." msgstr "" "安全组规则协议 %(protocol)s 不受支持。只有协议值 %(values)s 和整数表示 [0 到 " "255] 受支持。" msgid "Segments and provider values cannot both be set." msgstr "无法同时设置段和提供程序值。" msgid "Selects the Agent Type reported" msgstr "选择所报告的代理类型" msgid "" "Send notification to nova when port data (fixed_ips/floatingip) changes so " "nova can update its cache." msgstr "" "当端口数据(固定 IP/floatingip)更改时,将通知发送至 nova,以便 nova 可更新其" "高速缓存。" msgid "Send notification to nova when port status changes" msgstr "当端口状态更改时,将通知发送至 nova" msgid "" "Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the " "feature is disabled" msgstr "针对 HA 设置,发送此大量免费 ARP,如果小于或等于 0,那么会禁用该功能" #, python-format msgid "Service Profile %(sp_id)s could not be found." msgstr "找不到服务概要文件 %(sp_id)s。" #, python-format msgid "Service Profile %(sp_id)s is already associated with flavor %(fl_id)s." msgstr "服务概要文件 %(sp_id)s 已与类型 %(fl_id)s 相关联。" #, python-format msgid "Service Profile %(sp_id)s is not associated with flavor %(fl_id)s." msgstr "服务概要文件 %(sp_id)s 未与类型 %(fl_id)s 相关联。" #, python-format msgid "Service Profile %(sp_id)s is used by some service instance." msgstr "服务概要文件 %(sp_id)s 已被某个服务实例使用。" #, python-format msgid "Service Profile driver %(driver)s could not be found." msgstr "找不到服务概要文件驱动程序 %(driver)s。" msgid "Service Profile is not enabled." msgstr "服务概要文件未启用。" msgid "Service Profile needs either a driver or metainfo." msgstr "服务概要文件需要驱动程序或元信息。" #, python-format msgid "" "Service provider '%(provider)s' could not be found for service type " "%(service_type)s" msgstr "对于以下服务类型,找不到服务提供程序“%(provider)s”:%(service_type)s" msgid "Service to handle DHCPv6 Prefix delegation." msgstr "用来处理 DHCPv6 前缀授权的服务。" #, python-format msgid "Service type %(service_type)s does not have a default service provider" msgstr "服务类型 %(service_type)s 没有缺省服务提供程序" msgid "" "Set new timeout in seconds for new rpc calls after agent receives SIGTERM. " "If value is set to 0, rpc timeout won't be changed" msgstr "" "在代理接收到 SIGTERM 之后,为新的 RPC 调用设置新超时(以秒计)。如果值设置为 " "0,那么 RPC 超时将不更改" msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "在承载 GRE/VXLAN 隧道的出局 IP 包上设置或取消设置不分段 (DF) 位。" msgid "" "Set or un-set the tunnel header checksum on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "对承载 GRE/VXLAN 隧道的出局 IP 包设置或取消设置隧道头校验和。" msgid "Shared address scope can't be unshared" msgstr "无法取消共享已共享的地址范围" msgid "" "Specifying 'tenant_id' other than authenticated tenant in request requires " "admin privileges" msgstr "在请求中指定除了已认证租户之外的“tenant_id”需要管理特权" msgid "String prefix used to match IPset names." msgstr "用来匹配 IPset 名称的字符串前缀。" #, python-format msgid "Sub-project %s not installed." msgstr "未安装子项目 %s。" msgid "Subnet for router interface must have a gateway IP" msgstr "路由器接口的子网必须具有网关 IP" msgid "" "Subnet has a prefix length that is incompatible with DHCP service enabled." msgstr "子网具有与已启用的 DHCP 服务不兼容的前缀长度。" #, python-format msgid "Subnet pool %(subnetpool_id)s could not be found." msgstr "找不到子网协议 %(subnetpool_id)s。" msgid "Subnet pool has existing allocations" msgstr "子网池已有分配" msgid "Subnet used for the l3 HA admin network." msgstr "用于 l3 HA 管理网络的子网。" msgid "" "Subnets hosted on the same network must be allocated from the same subnet " "pool." msgstr "同一网络上的子网必须分配自同一子网池。" msgid "Suffix to append to all namespace names." msgstr "要附加至所有名称空间名称的后缀。" msgid "" "System-wide flag to determine the type of router that tenants can create. " "Only admin can override." msgstr "系统范围标记,用于确定租户可创建的路由器类型。仅管理员可以覆盖。" msgid "TCP Port to listen for metadata server requests." msgstr "用于侦听元数据服务器请求的 TCP 端口。" msgid "TCP Port used by Neutron metadata namespace proxy." msgstr "TCP 端口已由 Neutron 元数据名称空间代理使用。" msgid "TCP Port used by Nova metadata server." msgstr "Nova 元数据服务器使用的 TCP 端口。" #, python-format msgid "TLD '%s' must not be all numeric" msgstr "TLD“%s”不能全部为数字" msgid "TOS for vxlan interface protocol packets." msgstr "用于 vxlan 接口协议包的 TOS。" msgid "TTL for vxlan interface protocol packets." msgstr "用于 vxlan 接口协议包的 TTL。" #, python-format msgid "Table %s can only be queried by UUID" msgstr "只能按 UUID 来查询表 %s" #, python-format msgid "Tag %(tag)s could not be found." msgstr "找不到标记 %(tag)s。" #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "不允许租户 %(tenant_id)s 在此网络上创建 %(resource)s" msgid "Tenant id for connecting to designate in admin context" msgstr "管理员上下文中要指定的连接租户标识" msgid "Tenant name for connecting to designate in admin context" msgstr "管理员上下文中要指定的连接租户名" msgid "Tenant network creation is not enabled." msgstr "未启用租户网络创建。" msgid "Tenant-id was missing from quota request." msgstr "配额请求中缺少 tenant-id。" msgid "" "The 'gateway_external_network_id' option must be configured for this agent " "as Neutron has more than one external network." msgstr "" "必须为此代理配置“gateway_external_network_id”选项,因为 Neutron 具有多个外部" "网络。" msgid "" "The DHCP agent will resync its state with Neutron to recover from any " "transient notification or RPC errors. The interval is number of seconds " "between attempts." msgstr "" "DHCP 代理程序将其状态重新同步至 Neutron,以从任何瞬时通知或 RPC 错误恢复。时" "间间隔为两次尝试之间的秒数。" msgid "" "The DHCP server can assist with providing metadata support on isolated " "networks. Setting this value to True will cause the DHCP server to append " "specific host routes to the DHCP request. The metadata service will only be " "activated when the subnet does not contain any router port. The guest " "instance must be configured to request host routes via DHCP (Option 121). " "This option doesn't have any effect when force_metadata is set to True." msgstr "" "DHCP 服务器可帮助在隔离网络上提供元数据支持。将此值设置为 True 将导致 DHCP 服" "务器将特定主机路由追加至 DHCP 请求。仅当子网未包含任何路由器端口时,才会激活" "元数据服务。访客实例必须配置为通过 DHCP 请求主机路由(选项 121)。如果 " "force_metadata 设置 True,那么此选项没有任何效果。" #, python-format msgid "" "The HA Network CIDR specified in the configuration file isn't valid; " "%(cidr)s." msgstr "配置文件中指定的 HA 网络 CIDR 无效;%(cidr)s。" msgid "The UDP port to use for VXLAN tunnels." msgstr "要用于 VXLAN 通道的 UDP 端口" #, python-format msgid "" "The address allocation request could not be satisfied because: %(reason)s" msgstr "未能满足地址分配请求,原因:%(reason)s" msgid "The advertisement interval in seconds" msgstr "通告间隔(以秒计)" #, python-format msgid "The allocation pool %(pool)s is not valid." msgstr "分配池 %(pool)s 无效。" #, python-format msgid "" "The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s." msgstr "分配池 %(pool)s 范围超出子网 cidr %(subnet_cidr)s。" #, python-format msgid "" "The attribute '%(attr)s' is reference to other resource, can't used by sort " "'%(resource)s'" msgstr "属性“%(attr)s”是对其他资源的引用,无法由排序“%(resource)s”使用" msgid "" "The base MAC address Neutron will use for VIFs. The first 3 octets will " "remain unchanged. If the 4th octet is not 00, it will also be used. The " "others will be randomly generated." msgstr "" "基本 MAC 地址 Neutron 将用于 VIF。前 3 个八位元将保持不变。如果第 4 个八位元" "并非 00,那么也将使用该八位元。将随机生成其他八位元。" msgid "" "The base mac address used for unique DVR instances by Neutron. The first 3 " "octets will remain unchanged. If the 4th octet is not 00, it will also be " "used. The others will be randomly generated. The 'dvr_base_mac' *must* be " "different from 'base_mac' to avoid mixing them up with MAC's allocated for " "tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00. " "The default is 3 octet" msgstr "" "Neutron 用于唯一 DVR 实例的基本 MAC 地址。前三个八位元将保持不变。如果第四个" "八位元不为 00,那么也将使用该八位元。将随机生成其他八位元。“dvr_base_mac”必须" "不同于“base_mac”,以避免将它们与为租户端口分配的 MAC 混合使用。以下是一个具" "有 4 个八位元的示例:dvr_base_mac = fa:16:3f:4f:00:00。缺省值为 3 个八位元" msgid "" "The connection string for the native OVSDB backend. Requires the native " "ovsdb_interface to be enabled." msgstr "本机 OVSDB 后端的连接字符串。需要启用本机 ovsdb_interface。" msgid "The core plugin Neutron will use" msgstr "Neutron 将使用的核心插件" #, python-format msgid "" "The dns_name passed is a FQDN. Its higher level labels must be equal to the " "dns_domain option in neutron.conf, that has been set to '%(dns_domain)s'. It " "must also include one or more valid DNS labels to the left of " "'%(dns_domain)s'" msgstr "" "所传递的 dns_name 为 FQDN。它的更高级别的标签必须与neutron.conf 中的 " "dns_domain 选项相同,该选项已设置为“%(dns_domain)s”。它还必须" "在“%(dns_domain)s”的左边包括一个或多个有效 DNS 标签" #, python-format msgid "" "The dns_name passed is a PQDN and its size is '%(dns_name_len)s'. The " "dns_domain option in neutron.conf is set to %(dns_domain)s, with a length of " "'%(higher_labels_len)s'. When the two are concatenated to form a FQDN (with " "a '.' at the end), the resulting length exceeds the maximum size of " "'%(fqdn_max_len)s'" msgstr "" "所传递的 dns_name 为 PQDN,其大小为“%(dns_name_len)s”。neutron.conf 中的 " "dns_domain 选项设置为 %(dns_domain)s,长度为“%(higher_labels_len)s”。当这两者" "合并以组成 FQDN 时(末尾为“.”),最终获得的长度超过了最大大" "小“%(fqdn_max_len)s”" msgid "The driver used to manage the DHCP server." msgstr "用于管理 DHCP 服务器的驱动程序。" msgid "The driver used to manage the virtual interface." msgstr "用于管理虚拟接口的驱动程序。" msgid "" "The email address to be used when creating PTR zones. If not specified, the " "email address will be admin@" msgstr "" "创建 PTR 区域时要使用的电子邮件地址。如果未指定,那么电子邮件地址将为 " "admin@" #, python-format msgid "" "The following device_id %(device_id)s is not owned by your tenant or matches " "another tenants router." msgstr "以下 device_id %(device_id)s 不属于您的租户或与另一租户路由器匹配。" msgid "The host IP to bind to" msgstr "要绑定至的主机 IP" msgid "The interface for interacting with the OVSDB" msgstr "用于与 OVSDB 进行交互的接口" msgid "" "The maximum number of items returned in a single response, value was " "'infinite' or negative integer means no limit" msgstr "在单个响应中返回的最大项数,值为“无限”或负整数表示无限制" #, python-format msgid "" "The network %(network_id)s has been already hosted by the DHCP Agent " "%(agent_id)s." msgstr "网络 %(network_id)s 已由 DHCP 代理 %(agent_id)s 托管。" #, python-format msgid "" "The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s." msgstr "网络 %(network_id)s 未由 DHCP 代理 %(agent_id)s 托管。" msgid "" "The network type to use when creating the HA network for an HA router. By " "default or if empty, the first 'tenant_network_types' is used. This is " "helpful when the VRRP traffic should use a specific network which is not the " "default one." msgstr "" "为 HA 路由器创建 HA 网络时要使用的网络类型。缺省情况下,或者如果网络类型为" "空,那么将使用第一个“tenant_network_types”。这在 VRRP 流量应使用特定网络(该" "网络不是缺省网络)时很有帮助。" #, python-format msgid "The number of allowed address pair exceeds the maximum %(quota)s." msgstr "允许的地址对数超过最大值 %(quota)s。" msgid "" "The number of seconds the agent will wait between polling for local device " "changes." msgstr "在轮询本地设备更改之间,代理将等待的秒数。" msgid "" "The number of seconds to wait before respawning the ovsdb monitor after " "losing communication with it." msgstr "在与 ovsdb 监视器失去通信联系之后重新衍生该监视器之前要等待的秒数。" msgid "The number of sort_keys and sort_dirs must be same" msgstr "sort_keys 数与 sort_dirs 数必须相同" msgid "" "The path for API extensions. Note that this can be a colon-separated list of " "paths. For example: api_extensions_path = extensions:/path/to/more/exts:/" "even/more/exts. The __path__ of neutron.extensions is appended to this, so " "if your extensions are in there you don't need to specify them here." msgstr "" "API 扩展的路径。请注意,它可能是路径的冒号分隔列表。例如:" "api_extensions_path = extensions:/path/to/more/exts:/even/more/exts。neutron." "extensions 的 __path__ 将追加至此项之后,所以,如果扩展位于该处,那么不需要在" "此处指定它们。" msgid "The physical network name with which the HA network can be created." msgstr "可以用来创建 HA 网络的物理网络名称。" #, python-format msgid "The port '%s' was deleted" msgstr "已删除端口“%s”" msgid "The port to bind to" msgstr "要绑定至的端口" #, python-format msgid "The requested content type %s is invalid." msgstr "请求的内容类型 %s 无效。" msgid "The resource could not be found." msgstr "找不到该资源。" #, python-format msgid "" "The router %(router_id)s has been already hosted by the L3 Agent " "%(agent_id)s." msgstr "路由器 %(router_id)s 已由 L3 代理 %(agent_id)s 主管。" msgid "" "The server has either erred or is incapable of performing the requested " "operation." msgstr "服务器已出错或无法执行所请求操作。" msgid "The service plugins Neutron will use" msgstr "Neutron 将使用的服务插件" #, python-format msgid "The subnet request could not be satisfied because: %(reason)s" msgstr "未能满足子网请求,原因:%(reason)s" #, python-format msgid "The subproject to execute the command against. Can be one of: '%s'." msgstr "要对其执行命令的子项目。可以是“%s”的其中一项。" msgid "The type of authentication to use" msgstr "要使用的认证的类型" #, python-format msgid "The value '%(value)s' for %(element)s is not valid." msgstr "%(element)s 的值“%(value)s”无效。" msgid "" "The working mode for the agent. Allowed modes are: 'legacy' - this preserves " "the existing behavior where the L3 agent is deployed on a centralized " "networking node to provide L3 services like DNAT, and SNAT. Use this mode if " "you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality " "and must be used for an L3 agent that runs on a compute host. 'dvr_snat' - " "this enables centralized SNAT support in conjunction with DVR. This mode " "must be used for an L3 agent running on a centralized node (or in single-" "host deployments, e.g. devstack)" msgstr "" "代理程序的工作方式。允许的方式为:“legacy”- 它会保留现有行为,即,L3 代理部署" "在中央联网节点上,以提供 DNAT 和 SNAT 之类的 L3 服务。如果不想采用 DVR,请使" "用此方式。“dvr”- 此方法启用 DVR 功能,并且必须用于计算主机上运行的 L3 代" "理。“dvr_snat”- 它允许中央 SNAT 支持与 DVR 配合使用。此方法必须用于中央节点" "(或单主机部署例如,devstack)上运行的 L3代理程序" msgid "" "There are routers attached to this network that depend on this policy for " "access." msgstr "根据此策略,有一些路由器连接至此网络以用于访问。" msgid "" "This will choose the web framework in which to run the Neutron API server. " "'pecan' is a new experiemental rewrite of the API server." msgstr "" "这将选择要运行 Neutron API 服务器的 Web 框架。“pecan”是 API 服务器的新的试验" "性改写。" msgid "Timeout" msgstr "超时" msgid "" "Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs " "commands will fail with ALARMCLOCK error." msgstr "" "ovs-vsctl 命令的超时(以秒计)。如果此超时到期,那么 ovs 命令将失败,并且发" "生 ALARMCLOCK 错误。" msgid "" "Timeout in seconds to wait for a single OpenFlow request. Used only for " "'native' driver." msgstr "等待单个 OpenFlow 请求时的超时(秒)。仅用于“本机”驱动程序。" msgid "" "Timeout in seconds to wait for the local switch connecting the controller. " "Used only for 'native' driver." msgstr "等待用于连接控制器的本地交换机时的超时(秒)。仅用于“本机”驱动程序。" msgid "" "Too long prefix provided. New name would exceed given length for an " "interface name." msgstr "提供的前缀太长。新名称将超出接口名称的给定长度。" msgid "Too many availability_zone_hints specified" msgstr "指定了过多 availability_zone_hints" msgid "" "True to delete all ports on all the OpenvSwitch bridges. False to delete " "ports created by Neutron on integration and external network bridges." msgstr "" "True 表示删除所有 OpenvSwitch 网桥上的所有端口。False 表示删除集成和外部网络" "网桥上由 Neutron 创建的端口。" msgid "Tunnel IP value needed by the ML2 plugin" msgstr "ML2 插件需要隧道 IP 值" msgid "Tunnel bridge to use." msgstr "要使用的隧道网桥。" msgid "" "Type of the nova endpoint to use. This endpoint will be looked up in the " "keystone catalog and should be one of public, internal or admin." msgstr "" "要使用的 nova 端点的类型。系统将在 keystone 目录中查找此端点,值应该为 " "public、internal 或 admin 的其中之一。" msgid "URL for connecting to designate" msgstr "要指定的连接 URL" msgid "URL to database" msgstr "指向数据库的 URL" #, python-format msgid "Unable to access %s" msgstr "无法访问 %s" #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, maximum allowed " "prefix is %(max_prefixlen)s." msgstr "" "无法分配前缀长度为 %(prefixlen)s 的子网,允许的最大前缀长度为 " "%(max_prefixlen)s" #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, minimum allowed " "prefix is %(min_prefixlen)s." msgstr "" "无法分配前缀长度为 %(prefixlen)s 的子网,允许的最低前缀长度为 " "%(min_prefixlen)s。" #, python-format msgid "Unable to calculate %(address_type)s address because of:%(reason)s" msgstr "无法计算 %(address_type)s 地址,原因:%(reason)s" #, python-format msgid "" "Unable to complete operation for %(router_id)s. The number of routes exceeds " "the maximum %(quota)s." msgstr "对于 %(router_id)s,无法完成操作。路由数超过最大值 %(quota)s。" #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of DNS " "nameservers exceeds the limit %(quota)s." msgstr "对于 %(subnet_id)s,无法完成操作。DNS 名称服务器数超过限制 %(quota)s。" #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of host routes " "exceeds the limit %(quota)s." msgstr "对于 %(subnet_id)s,无法完成操作。主机路由数超过限制 %(quota)s。" #, python-format msgid "" "Unable to complete operation on address scope %(address_scope_id)s. There " "are one or more subnet pools in use on the address scope" msgstr "" "无法对地址范围 %(address_scope_id)s 完成操作。在该地址范围内,正在使用一个或" "多个子网池" #, python-format msgid "Unable to convert value in %s" msgstr "无法转换 %s 中的值" msgid "Unable to create the Agent Gateway Port" msgstr "无法创建代理网关端口" msgid "Unable to create the SNAT Interface Port" msgstr "无法创建 SNAT 接口端口" #, python-format msgid "" "Unable to create the flat network. Physical network %(physical_network)s is " "in use." msgstr "无法创建该平面网络。物理网络 %(physical_network)s 在使用中。" msgid "" "Unable to create the network. No available network found in maximum allowed " "attempts." msgstr "无法创建网络。未在最大允许尝试次数中发现任何可用网络。" #, python-format msgid "Unable to delete subnet pool: %(reason)s." msgstr "无法删除子网池:%(reason)s。" #, python-format msgid "Unable to determine mac address for %s" msgstr "无法确定 %s 的 MAC 地址" #, python-format msgid "Unable to find '%s' in request body" msgstr "在请求主体中找不到“%s”" #, python-format msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s" msgstr "在子网 %(subnet_id)s 上找不到 IP 地址 %(ip_address)s" #, python-format msgid "Unable to find resource name in %s" msgstr "在 %s 中找不到资源的名称" msgid "Unable to generate IP address by EUI64 for IPv4 prefix" msgstr "对于 IPv4 前缀,无法通过 EUI64 生成 IP 地址" #, python-format msgid "Unable to generate unique DVR mac for host %(host)s." msgstr "无法为主机 %(host)s 生成唯一 DVR MAC。" #, python-format msgid "Unable to generate unique mac on network %(net_id)s." msgstr "无法在网络 %(net_id)s 上生成唯一 MAC。" #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "无法通过 %s 标识目标字段。匹配项必须为以下格式:%%()s" msgid "Unable to provide external connectivity" msgstr "无法提供外部连接" msgid "Unable to provide tenant private network" msgstr "无法提供租户专用网络" #, python-format msgid "" "Unable to reconfigure sharing settings for network %(network)s. Multiple " "tenants are using it." msgstr "无法重新配置网络 %(network)s 的共享设置。多个租户正在使用该网络。" #, python-format msgid "Unable to update address scope %(address_scope_id)s : %(reason)s" msgstr "无法更新地址范围 %(address_scope_id)s:%(reason)s" #, python-format msgid "Unable to update the following object fields: %(fields)s" msgstr "无法更新下列对象字段:%(fields)s" #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " "found" msgstr "无法验证该匹配项 %(match)s 为父资源:找不到 %(res)s " #, python-format msgid "Unexpected label for script %(script_name)s: %(labels)s" msgstr "脚本 %(script_name)s 的意外标签:%(labels)s" #, python-format msgid "Unexpected number of alembic branch points: %(branchpoints)s" msgstr "净化器分支点的数目异常:%(branchpoints)s" #, python-format msgid "Unexpected response code: %s" msgstr "意外响应代码:%s" #, python-format msgid "Unexpected response: %s" msgstr "意外响应:%s" #, python-format msgid "Unit name '%(unit)s' is not valid." msgstr "单元名称“%(unit)s”无效。" msgid "Unknown API version specified" msgstr "指定的 API 版本未知" #, python-format msgid "Unknown address type %(address_type)s" msgstr "未知地址类型 %(address_type)s" #, python-format msgid "Unknown attribute '%s'." msgstr "属性“%s”未知。" #, python-format msgid "Unknown chain: %r" msgstr "链未知:%r" #, python-format msgid "Unknown network type %(network_type)s." msgstr "未知网络类型 %(network_type)s。" #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "配额资源 %(unknown)s 未知。" msgid "Unmapped error" msgstr "未映射错误" msgid "Unrecognized action" msgstr "无法识别动作" #, python-format msgid "Unrecognized attribute(s) '%s'" msgstr "无法识别属性“%s”" msgid "Unrecognized field" msgstr "无法识别字段" msgid "Unspecified minimum subnet pool prefix." msgstr "未指定最低子网池前缀。" msgid "Unsupported Content-Type" msgstr "Content-Type 不受支持" #, python-format msgid "Unsupported network type %(net_type)s." msgstr "网络类型 %(net_type)s 不受支持。" #, python-format msgid "Unsupported port state: %(port_state)s." msgstr "不受支持的端口状态:%(port_state)s。" msgid "Unsupported request type" msgstr "未支持请求类型" msgid "Updating default security group not allowed." msgstr "正在更新的缺省安全组内容不被允许" msgid "" "Use ML2 l2population mechanism driver to learn remote MAC and IPs and " "improve tunnel scalability." msgstr "" "请使用 ML2 l2population 机制驱动程序以了解远程 MAC 和 IP 并提高隧道可伸缩性。" msgid "Use broadcast in DHCP replies." msgstr "在 DHCP 应答中使用广播。" msgid "Use either --delta or relative revision, not both" msgstr "请使用 --delta 或者相关修订版,但是不能同时指定这两者" msgid "" "Use ipset to speed-up the iptables based security groups. Enabling ipset " "support requires that ipset is installed on L2 agent node." msgstr "" "使用 ipset 以加速基于安全组的 iptable。启用 ipset 支持要求该 ipset 安装在 L2 " "代理程序节点上。" msgid "" "Use the root helper when listing the namespaces on a system. This may not be " "required depending on the security configuration. If the root helper is not " "required, set this to False for a performance improvement." msgstr "" "查询系统上的名字空间时使用 root helper。根据安全配置,这可能不是必需的。如果 " "root helper 不是必需的,请将其设置为 False 以改进性能。" msgid "" "Use veths instead of patch ports to interconnect the integration bridge to " "physical networks. Support kernel without Open vSwitch patch port support so " "long as it is set to True." msgstr "" "使用 veth 而不是接线端口以使集成网桥与物理网络互连。设置为 True 时支持不具备 " "Open vSwitch 接线端口支持的内核。" msgid "User (uid or name) running metadata proxy after its initialization" msgstr "在元数据代理的初始化之后,运行该代理的用户(uid 或名称)" msgid "" "User (uid or name) running metadata proxy after its initialization (if " "empty: agent effective user)." msgstr "" "在元数据代理的初始化之后,运行该代理的用户(uid 或名称),(如果此用户为空," "那么这是代理有效用户)。" msgid "User (uid or name) running this process after its initialization" msgstr "在此进程的初始化之后,运行此进程的用户(uid 或名称)" msgid "Username for connecting to designate in admin context" msgstr "管理员上下文中要指定的连接用户名" msgid "" "Uses veth for an OVS interface or not. Support kernels with limited " "namespace support (e.g. RHEL 6.5) so long as ovs_use_veth is set to True." msgstr "" "对 OVS 接口使用或不使用 veth。如果 ovs_use_veth 设置为 True,那么支持具备有限" "名称空间支持(例如,RHEL 6.5)的内核。" msgid "VRRP authentication password" msgstr "VRRP 认证密码" msgid "VRRP authentication type" msgstr "VRRP 认证类型" msgid "VXLAN network unsupported." msgstr "VXLAN 网络不受支持。" #, python-format msgid "" "Validation of dictionary's keys failed. Expected keys: %(expected_keys)s " "Provided keys: %(provided_keys)s" msgstr "" "对字典的键进行的验证失败。期望的键是 %(expected_keys)s,提供的键是 " "%(provided_keys)s" #, python-format msgid "Validator '%s' does not exist." msgstr "验证器“%s”不存在。" #, python-format msgid "Value %(value)s in mapping: '%(mapping)s' not unique" msgstr "映射“%(mapping)s”中的值 %(value)s 不唯一" #, python-format msgid "" "Value of %(parameter)s has to be multiple of %(number)s, with maximum value " "of %(maximum)s and minimum value of %(minimum)s" msgstr "" "%(parameter)s 的值必须是 %(number)s 的倍数,最大值为 %(maximum)s,最小值为 " "%(minimum)s" msgid "" "Value of host kernel tick rate (hz) for calculating minimum burst value in " "bandwidth limit rules for a port with QoS. See kernel configuration file for " "HZ value and tc-tbf manual for more information." msgstr "" "主机内核节拍率的值 (hz),用于计算带有 QoS 的端口的带宽限制规则中的最小脉冲" "值。请参阅内核配置文件以获取 HZ 值,并参阅 tc-tbf 手册以获取更多信息。" msgid "" "Value of latency (ms) for calculating size of queue for a port with QoS. See " "tc-tbf manual for more information." msgstr "" "延迟值 (ms),用于计算带有 QoS 的端口的队列的大小。请参阅 tc-tbf 手册以了解更" "多信息。" msgid "" "Watch file log. Log watch should be disabled when metadata_proxy_user/group " "has no read/write permissions on metadata proxy log file." msgstr "" "监控文件日志。当 metadata_proxy_user/group 对元数据代理日志文件不具备读/写许" "可权时,应当禁用日志监控。" msgid "" "When external_network_bridge is set, each L3 agent can be associated with no " "more than one external network. This value should be set to the UUID of that " "external network. To allow L3 agent support multiple external networks, both " "the external_network_bridge and gateway_external_network_id must be left " "empty." msgstr "" "如果设置了 external_network_bridge,那么每个 L3 agent 最多可与一个外部网络相" "关联。此值应设置为该外部网络的 UUID。为允许 L3 代理程序支持多个外部网络," "external_network_bridge 和 gateway_external_network_id 必须留空。" msgid "" "When proxying metadata requests, Neutron signs the Instance-ID header with a " "shared secret to prevent spoofing. You may select any string for a secret, " "but it must match here and in the configuration used by the Nova Metadata " "Server. NOTE: Nova uses the same config key, but in [neutron] section." msgstr "" "代理元数据请求时,Neutron 会使用共享密钥签署 Instance-ID 头以避免电子诈骗。可" "选择任何字符串作为密钥,但此处的密钥必须与 Nova Metadata Server 使用的配置中" "的密钥相匹配。注意:Nova 使用同一配置密钥,但在 [neutron] 一节中。" msgid "" "Where to store Neutron state files. This directory must be writable by the " "agent." msgstr "用于存储 Neutron 状态文件的位置。此目录对于代理必须为可写。" msgid "" "With IPv6, the network used for the external gateway does not need to have " "an associated subnet, since the automatically assigned link-local address " "(LLA) can be used. However, an IPv6 gateway address is needed for use as the " "next-hop for the default route. If no IPv6 gateway address is configured " "here, (and only then) the neutron router will be configured to get its " "default route from router advertisements (RAs) from the upstream router; in " "which case the upstream router must also be configured to send these RAs. " "The ipv6_gateway, when configured, should be the LLA of the interface on the " "upstream router. If a next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated to the network and not " "through this parameter. " msgstr "" "对于 IPv6,外部网关的网络不需要具备相关联的子网,因为可以使用自动指定的链路本" "地地址 (LLA)。但是,需要 IPv6 网关地址用作缺省路由的下一个路由器。如果此处未" "配置 IPv6 网关地址,那么将配置 Neutron 路由器,以从上游的路由器中获取路由器广" "告(RA)中的缺省路由;在这种情况下,还必须配置上游路由器以发送这些 RA。配置" "了 ipv6_gateway 时,ipv6_gateway 应为上游路由器上的接口的 LLA。如果需要下一个" "使用全局唯一地址 (GUA) 的路由器,那么它需要通过分配给该网络的子网来完成,而不" "是通过此参数来完成。" msgid "You must implement __call__" msgstr "必须实现 __call__" msgid "" "You must provide a config file for bridge - either --config-file or " "env[NEUTRON_TEST_CONFIG_FILE]" msgstr "" "必须为网桥提供配置文件 - --config-file 或 env[NEUTRON_TEST_CONFIG_FILE]" msgid "You must provide a revision or relative delta" msgstr "必须提供修订或相对变化量" msgid "a subnetpool must be specified in the absence of a cidr" msgstr "缺少 CIDR 时必须指定子网池" msgid "add_ha_port cannot be called inside of a transaction." msgstr "不能在事务内部调用 add_ha_port。" msgid "allocation_pools allowed only for specific subnet requests." msgstr "仅允许将 allocation_pools 用于特定子网请求。" msgid "allocation_pools are not in the subnet" msgstr "allocation_pools 不在子网内" msgid "allocation_pools use the wrong ip version" msgstr "allocation_pools 使用错误的 IP 版本" msgid "already a synthetic attribute" msgstr "已是综合属性" msgid "binding:profile value too large" msgstr "binding:profile 值太大" #, python-format msgid "cannot perform %(event)s due to %(reason)s" msgstr "无法执行 %(event)s,因为 %(reason)s" msgid "cidr and prefixlen must not be supplied together" msgstr "不得同时指定 cidr 和 prefixlen" #, python-format msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid." msgstr "dhcp_agents_per_network 必须大于等于 1。“%s”无效。" msgid "dns_domain cannot be specified without a dns_name" msgstr "不能在没有 dns_name 的情况下指定 dns_domain" msgid "dns_name cannot be specified without a dns_domain" msgstr "不能在没有 dns_domain 的情况下指定 dns_name" msgid "fixed_ip_address cannot be specified without a port_id" msgstr "在没有 port_id 的情况下,无法指定 fixed_ip_address" #, python-format msgid "gateway_ip %s is not in the subnet" msgstr "gateway_ip %s 不在子网内" #, python-format msgid "has device owner %s" msgstr "具有设备所有者 %s" msgid "in use" msgstr "正在使用" #, python-format msgid "ip command failed on device %(dev_name)s: %(reason)s" msgstr "对设备 %(dev_name)s 执行 IP 命令失败:%(reason)s" #, python-format msgid "ip command failed: %(reason)s" msgstr "ip 命令失败:%(reason)s" #, python-format msgid "ip link capability %(capability)s is not supported" msgstr "IP 链接功能 %(capability)s 不受支持" #, python-format msgid "ip link command is not supported: %(reason)s" msgstr "ip 链路命令不受支持:%(reason)s" msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "在缺少 cidr 和 subnetpool_id 的情况下,必须指定 ip_version" msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "ip_version 为 4 时,ipv6_address_mode 无效" msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "ip_version 为 4 时,ipv6_ra_mode 无效" msgid "" "ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set to " "False." msgstr "" "当 enable_dhcp 设置为 False 时,无法设置 ipv6_ra_mode 或 ipv6_address_mode。" #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " "'%(addr_mode)s' is not valid. If both attributes are set, they must be the " "same value" msgstr "" "设置为“%(ra_mode)s”的 ipv6_ra_mode(在 ipv6_address_mode 设置" "为“%(addr_mode)s”的情况下)无效。如果设置了这两个属性,那么它们必须为同一个值" msgid "mac address update" msgstr "MAC 地址更新" #, python-format msgid "" "max_l3_agents_per_router %(max_agents)s config parameter is not valid. It " "has to be greater than or equal to min_l3_agents_per_router %(min_agents)s." msgstr "" "max_l3_agents_per_router %(max_agents)s 配置参数无效。它必须大于或等于 " "min_l3_agents_per_router %(min_agents)s。" #, python-format msgid "" "min_l3_agents_per_router config parameter is not valid. It has to be greater " "than or equal to %s for HA." msgstr "" "min_l3_agents_per_router 配置参加不是有效的。它必须大于或等于%s才能确保 HA。" msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "必须提供正好 2 个自变量:cidr 和 MAC" msgid "network_type required" msgstr "需要 network_type" #, python-format msgid "network_type value '%s' not supported" msgstr "网络类型值“%s”不受支持" msgid "new subnet" msgstr "新子网" #, python-format msgid "physical_network '%s' unknown for VLAN provider network" msgstr "对于 VLAN 供应商网络,physical_network“%s”未知" #, python-format msgid "physical_network '%s' unknown for flat provider network" msgstr "平面供应商网络的物理网络“%s”为未知状态" msgid "physical_network required for flat provider network" msgstr "平面供应商网络需要的物理网络" #, python-format msgid "provider:physical_network specified for %s network" msgstr "对 %s 网络指定的 provider:physical_network" #, python-format msgid "rbac_db_model not found in %s" msgstr "在 %s 中找不到 rbac_db_model" msgid "record" msgstr "记录" msgid "respawn_interval must be >= 0 if provided." msgstr "respawn_interval 必须大于或等于 0(如果已提供此项)。" #, python-format msgid "segmentation_id out of range (%(min)s through %(max)s)" msgstr "segmentation_id 超出从 (%(min)s 到 %(max)s) 的范围" msgid "segmentation_id requires physical_network for VLAN provider network" msgstr "segmentation_id 需要 VLAN 供应商网络的 physical_network" msgid "shared attribute switching to synthetic" msgstr "共享属性正切换为综合属性" #, python-format msgid "" "subnetpool %(subnetpool_id)s cannot be updated when associated with shared " "address scope %(address_scope_id)s" msgstr "" "当子网池 %(subnetpool_id)s 与共享地址范围 %(address_scope_id)s 相关联时,将无" "法更新该子网池" msgid "subnetpool_id and use_default_subnetpool cannot both be specified" msgstr "不能同时指定 subnetpool_id 和 use_default_subnetpool" msgid "the nexthop is not connected with router" msgstr "下一中继段未与路由器连接" msgid "the nexthop is used by router" msgstr "路由器已使用下一中继段" #, python-format msgid "unable to load %s" msgstr "无法装入 %s" msgid "" "uuid provided from the command line so external_process can track us via /" "proc/cmdline interface." msgstr "" "在命令行中提供了 uuid,以便 external_process 可通过 /proc/cmdline 接口跟踪我" "们。" neutron-8.4.0/neutron/extensions/0000775000567000056710000000000013044373210020244 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/extensions/extra_dhcp_opt.py0000664000567000056710000000646413044372760023644 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.common import exceptions # ExtraDHcpOpts Exceptions class ExtraDhcpOptNotFound(exceptions.NotFound): message = _("ExtraDhcpOpt %(id)s could not be found") class ExtraDhcpOptBadData(exceptions.InvalidInput): message = _("Invalid data format for extra-dhcp-opt: %(data)s") # Valid blank extra dhcp opts VALID_BLANK_EXTRA_DHCP_OPTS = ('router', 'classless-static-route') # Common definitions for maximum string field length DHCP_OPT_NAME_MAX_LEN = 64 DHCP_OPT_VALUE_MAX_LEN = 255 EXTRA_DHCP_OPT_KEY_SPECS = { 'id': {'type:uuid': None, 'required': False}, 'opt_name': {'type:not_empty_string': DHCP_OPT_NAME_MAX_LEN, 'required': True}, 'opt_value': {'type:not_empty_string_or_none': DHCP_OPT_VALUE_MAX_LEN, 'required': True}, 'ip_version': {'convert_to': attr.convert_to_int, 'type:values': [4, 6], 'required': False} } def _validate_extra_dhcp_opt(data, key_specs=None): if data is not None: if not isinstance(data, list): raise ExtraDhcpOptBadData(data=data) for d in data: if d['opt_name'] in VALID_BLANK_EXTRA_DHCP_OPTS: msg = attr._validate_string_or_none(d['opt_value'], DHCP_OPT_VALUE_MAX_LEN) else: msg = attr._validate_dict(d, key_specs) if msg: raise ExtraDhcpOptBadData(data=msg) attr.validators['type:list_of_extra_dhcp_opts'] = _validate_extra_dhcp_opt # Attribute Map EXTRADHCPOPTS = 'extra_dhcp_opts' CLIENT_ID = "client-id" EXTENDED_ATTRIBUTES_2_0 = { 'ports': { EXTRADHCPOPTS: { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'validate': { 'type:list_of_extra_dhcp_opts': EXTRA_DHCP_OPT_KEY_SPECS } } } } class Extra_dhcp_opt(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Neutron Extra DHCP opts" @classmethod def get_alias(cls): return "extra_dhcp_opt" @classmethod def get_description(cls): return ("Extra options configuration for DHCP. " "For example PXE boot options to DHCP clients can " "be specified (e.g. tftp-server, server-ip-address, " "bootfile-name)") @classmethod def get_updated(cls): return "2013-03-17T12:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} neutron-8.4.0/neutron/extensions/dvr.py0000664000567000056710000000506313044372760021426 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import exceptions DISTRIBUTED = 'distributed' EXTENDED_ATTRIBUTES_2_0 = { 'routers': { DISTRIBUTED: {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': attributes.ATTR_NOT_SPECIFIED, 'convert_to': attributes.convert_to_boolean_if_not_none, 'enforce_policy': True}, } } class DVRMacAddressNotFound(exceptions.NotFound): message = _("Distributed Virtual Router Mac Address for " "host %(host)s does not exist.") class MacAddressGenerationFailure(exceptions.ServiceUnavailable): message = _("Unable to generate unique DVR mac for host %(host)s.") class Dvr(extensions.ExtensionDescriptor): """Extension class supporting distributed virtual router.""" @classmethod def get_name(cls): return "Distributed Virtual Router" @classmethod def get_alias(cls): return constants.L3_DISTRIBUTED_EXT_ALIAS @classmethod def get_description(cls): return "Enables configuration of Distributed Virtual Routers." @classmethod def get_updated(cls): return "2014-06-1T10:00:00-00:00" def get_required_extensions(self): return ["router"] @classmethod def get_resources(cls): """Returns Ext Resources.""" return [] def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} @six.add_metaclass(abc.ABCMeta) class DVRMacAddressPluginBase(object): @abc.abstractmethod def get_dvr_mac_address_list(self, context): pass @abc.abstractmethod def get_dvr_mac_address_by_host(self, context, host): pass neutron-8.4.0/neutron/extensions/l3_ext_ha_mode.py0000664000567000056710000001002413044372760023476 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import exceptions HA_INFO = 'ha' EXTENDED_ATTRIBUTES_2_0 = { 'routers': { HA_INFO: {'allow_post': True, 'allow_put': True, 'default': attributes.ATTR_NOT_SPECIFIED, 'is_visible': True, 'enforce_policy': True, 'convert_to': attributes.convert_to_boolean_if_not_none} } } class HAmodeUpdateOfDvrNotSupported(NotImplementedError): message = _("Currently update of HA mode for a distributed router is " "not supported.") class DVRmodeUpdateOfHaNotSupported(NotImplementedError): message = _("Currently update of distributed mode for an HA router is " "not supported.") class HAmodeUpdateOfDvrHaNotSupported(NotImplementedError): message = _("Currently update of HA mode for a DVR/HA router is " "not supported.") class DVRmodeUpdateOfDvrHaNotSupported(NotImplementedError): message = _("Currently update of distributed mode for a DVR/HA router " "is not supported") class UpdateToDvrHamodeNotSupported(NotImplementedError): message = _("Currently updating a router to DVR/HA is not supported.") class UpdateToNonDvrHamodeNotSupported(NotImplementedError): message = _("Currently updating a router from DVR/HA to non-DVR " " non-HA is not supported.") class MaxVRIDAllocationTriesReached(exceptions.NeutronException): message = _("Failed to allocate a VRID in the network %(network_id)s " "for the router %(router_id)s after %(max_tries)s tries.") class NoVRIDAvailable(exceptions.Conflict): message = _("No more Virtual Router Identifier (VRID) available when " "creating router %(router_id)s. The limit of number " "of HA Routers per tenant is 254.") class HANetworkCIDRNotValid(exceptions.NeutronException): message = _("The HA Network CIDR specified in the configuration file " "isn't valid; %(cidr)s.") class HANotEnoughAvailableAgents(exceptions.NeutronException): message = _("Not enough l3 agents available to ensure HA. Minimum " "required %(min_agents)s, available %(num_agents)s.") class HAMaximumAgentsNumberNotValid(exceptions.NeutronException): message = _("max_l3_agents_per_router %(max_agents)s config parameter " "is not valid. It has to be greater than or equal to " "min_l3_agents_per_router %(min_agents)s.") class HAMinimumAgentsNumberNotValid(exceptions.NeutronException): message = (_("min_l3_agents_per_router config parameter is not valid. " "It has to be greater than or equal to %s for HA.") % constants.MINIMUM_MINIMUM_AGENTS_FOR_HA) class L3_ext_ha_mode(extensions.ExtensionDescriptor): """Extension class supporting virtual router in HA mode.""" @classmethod def get_name(cls): return "HA Router extension" @classmethod def get_alias(cls): return constants.L3_HA_MODE_EXT_ALIAS @classmethod def get_description(cls): return "Add HA capability to routers." @classmethod def get_updated(cls): return "2014-04-26T00:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} neutron-8.4.0/neutron/extensions/timestamp_core.py0000664000567000056710000000372013044372760023644 0ustar jenkinsjenkins00000000000000# Copyright 2015 HuaWei Technologies. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api import extensions # Attribute Map CREATED = 'created_at' UPDATED = 'updated_at' TIMESTAMP_BODY = { CREATED: {'allow_post': False, 'allow_put': False, 'is_visible': True, 'default': None }, UPDATED: {'allow_post': False, 'allow_put': False, 'is_visible': True, 'default': None }, } EXTENDED_ATTRIBUTES_2_0 = { 'networks': TIMESTAMP_BODY, 'subnets': TIMESTAMP_BODY, 'ports': TIMESTAMP_BODY, 'subnetpools': TIMESTAMP_BODY, } class Timestamp_core(extensions.ExtensionDescriptor): """Extension class supporting timestamp. This class is used by neutron's extension framework for adding timestamp to neutron core resources. """ @classmethod def get_name(cls): return "Time Stamp Fields addition for core resources" @classmethod def get_alias(cls): return "timestamp_core" @classmethod def get_description(cls): return ("This extension can be used for recording " "create/update timestamps for core resources " "like port/subnet/network/subnetpools.") @classmethod def get_updated(cls): return "2016-03-01T10:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} neutron-8.4.0/neutron/extensions/portbindings.py0000664000567000056710000001435713044372760023343 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api import extensions from neutron.api.v2 import attributes # The type of vnic that this port should be attached to VNIC_TYPE = 'binding:vnic_type' # The service will return the vif type for the specific port. VIF_TYPE = 'binding:vif_type' # The service may return a dictionary containing additional # information needed by the interface driver. The set of items # returned may depend on the value of VIF_TYPE. VIF_DETAILS = 'binding:vif_details' # In some cases different implementations may be run on different hosts. # The host on which the port will be allocated. HOST_ID = 'binding:host_id' # The profile will be a dictionary that enables the application running # on the specific host to pass and receive vif port specific information to # the plugin. PROFILE = 'binding:profile' # The keys below are used in the VIF_DETAILS attribute to convey # information to the VIF driver. # TODO(rkukura): Replace CAP_PORT_FILTER, which nova no longer # understands, with the new set of VIF security details to be used in # the VIF_DETAILS attribute. # # - port_filter : Boolean value indicating Neutron provides port filtering # features such as security group and anti MAC/IP spoofing # - ovs_hybrid_plug: Boolean used to inform Nova that the hybrid plugging # strategy for OVS should be used CAP_PORT_FILTER = 'port_filter' OVS_HYBRID_PLUG = 'ovs_hybrid_plug' VIF_DETAILS_VLAN = 'vlan' VIF_DETAILS_MACVTAP_SOURCE = 'macvtap_source' VIF_DETAILS_MACVTAP_MODE = 'macvtap_mode' VIF_DETAILS_PHYSICAL_INTERFACE = 'physical_interface' # The keys below are used in the VIF_DETAILS attribute to convey # information related to the configuration of the vhost-user VIF driver. # - vhost_user_mode: String value used to declare the mode of a # vhost-user socket VHOST_USER_MODE = 'vhostuser_mode' # - server: socket created by hypervisor VHOST_USER_MODE_SERVER = 'server' # - client: socket created by vswitch VHOST_USER_MODE_CLIENT = 'client' # - vhostuser_socket String value used to declare the vhostuser socket name VHOST_USER_SOCKET = 'vhostuser_socket' # - vhost_user_ovs_plug: Boolean used to inform Nova that the ovs plug # method should be used when binding the # vhost-user vif. VHOST_USER_OVS_PLUG = 'vhostuser_ovs_plug' # VIF_TYPE: vif_types are required by Nova to determine which vif_driver to # use to attach a virtual server to the network # - vhost-user: The vhost-user interface type is a standard virtio interface # provided by qemu 2.1+. This constant defines the neutron side # of the vif binding type to provide a common definition # to enable reuse in multiple agents and drivers. VIF_TYPE_VHOST_USER = 'vhostuser' VIF_TYPE_UNBOUND = 'unbound' VIF_TYPE_BINDING_FAILED = 'binding_failed' VIF_TYPE_DISTRIBUTED = 'distributed' VIF_TYPE_OVS = 'ovs' VIF_TYPE_BRIDGE = 'bridge' VIF_TYPE_OTHER = 'other' # vif_type_macvtap: Tells Nova that the macvtap vif_driver should be used to # create a vif. It does not require the VNIC_TYPE_MACVTAP, # which is defined further below. E.g. Macvtap agent uses # vnic_type 'normal'. VIF_TYPE_MACVTAP = 'macvtap' # VNIC_TYPE: It's used to determine which mechanism driver to use to bind a # port. It can be specified via the Neutron API. Default is normal, # used by OVS and LinuxBridge agent. VNIC_NORMAL = 'normal' VNIC_DIRECT = 'direct' VNIC_MACVTAP = 'macvtap' VNIC_BAREMETAL = 'baremetal' VNIC_DIRECT_PHYSICAL = 'direct-physical' VNIC_TYPES = [VNIC_NORMAL, VNIC_DIRECT, VNIC_MACVTAP, VNIC_BAREMETAL, VNIC_DIRECT_PHYSICAL] EXTENDED_ATTRIBUTES_2_0 = { 'ports': { VIF_TYPE: {'allow_post': False, 'allow_put': False, 'default': attributes.ATTR_NOT_SPECIFIED, 'enforce_policy': True, 'is_visible': True}, VIF_DETAILS: {'allow_post': False, 'allow_put': False, 'default': attributes.ATTR_NOT_SPECIFIED, 'enforce_policy': True, 'is_visible': True}, VNIC_TYPE: {'allow_post': True, 'allow_put': True, 'default': VNIC_NORMAL, 'is_visible': True, 'validate': {'type:values': VNIC_TYPES}, 'enforce_policy': True}, HOST_ID: {'allow_post': True, 'allow_put': True, 'default': attributes.ATTR_NOT_SPECIFIED, 'is_visible': True, 'enforce_policy': True}, PROFILE: {'allow_post': True, 'allow_put': True, 'default': attributes.ATTR_NOT_SPECIFIED, 'enforce_policy': True, 'validate': {'type:dict_or_none': None}, 'is_visible': True}, } } class Portbindings(extensions.ExtensionDescriptor): """Extension class supporting port bindings. This class is used by neutron's extension framework to make metadata about the port bindings available to external applications. With admin rights one will be able to update and read the values. """ @classmethod def get_name(cls): return "Port Binding" @classmethod def get_alias(cls): return "binding" @classmethod def get_description(cls): return "Expose port bindings of a virtual port to external application" @classmethod def get_updated(cls): return "2014-02-03T10:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} neutron-8.4.0/neutron/extensions/network_availability_zone.py0000664000567000056710000000367313044372760026116 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import six from neutron.api import extensions from neutron.extensions import availability_zone as az_ext EXTENDED_ATTRIBUTES_2_0 = { 'networks': { az_ext.AVAILABILITY_ZONES: {'allow_post': False, 'allow_put': False, 'is_visible': True}, az_ext.AZ_HINTS: { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'validate': {'type:availability_zone_hints': None}, 'default': []}}, } class Network_availability_zone(extensions.ExtensionDescriptor): """Network availability zone extension.""" @classmethod def get_name(cls): return "Network Availability Zone" @classmethod def get_alias(cls): return "network_availability_zone" @classmethod def get_description(cls): return "Availability zone support for network." @classmethod def get_updated(cls): return "2015-01-01T10:00:00-00:00" def get_required_extensions(self): return ["availability_zone"] def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} @six.add_metaclass(abc.ABCMeta) class NetworkAvailabilityZonePluginBase(object): @abc.abstractmethod def get_network_availability_zones(self, network): """Return availability zones which a network belongs to""" neutron-8.4.0/neutron/extensions/l3.py0000664000567000056710000002457213044372760021157 0ustar jenkinsjenkins00000000000000# Copyright 2012 VMware, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import resource_helper from neutron.common import exceptions as nexception from neutron.pecan_wsgi import controllers from neutron.plugins.common import constants # L3 Exceptions class RouterNotFound(nexception.NotFound): message = _("Router %(router_id)s could not be found") class RouterInUse(nexception.InUse): message = _("Router %(router_id)s %(reason)s") def __init__(self, **kwargs): if 'reason' not in kwargs: kwargs['reason'] = "still has ports" super(RouterInUse, self).__init__(**kwargs) class RouterInterfaceNotFound(nexception.NotFound): message = _("Router %(router_id)s does not have " "an interface with id %(port_id)s") class RouterInterfaceNotFoundForSubnet(nexception.NotFound): message = _("Router %(router_id)s has no interface " "on subnet %(subnet_id)s") class RouterInterfaceInUseByFloatingIP(nexception.InUse): message = _("Router interface for subnet %(subnet_id)s on router " "%(router_id)s cannot be deleted, as it is required " "by one or more floating IPs.") class FloatingIPNotFound(nexception.NotFound): message = _("Floating IP %(floatingip_id)s could not be found") class ExternalGatewayForFloatingIPNotFound(nexception.NotFound): message = _("External network %(external_network_id)s is not reachable " "from subnet %(subnet_id)s. Therefore, cannot associate " "Port %(port_id)s with a Floating IP.") class FloatingIPPortAlreadyAssociated(nexception.InUse): message = _("Cannot associate floating IP %(floating_ip_address)s " "(%(fip_id)s) with port %(port_id)s " "using fixed IP %(fixed_ip)s, as that fixed IP already " "has a floating IP on external network %(net_id)s.") class RouterExternalGatewayInUseByFloatingIp(nexception.InUse): message = _("Gateway cannot be updated for router %(router_id)s, since a " "gateway to external network %(net_id)s is required by one or " "more floating IPs.") ROUTERS = 'routers' FLOATINGIP = 'floatingip' FLOATINGIPS = '%ss' % FLOATINGIP EXTERNAL_GW_INFO = 'external_gateway_info' FLOATINGIPS = 'floatingips' RESOURCE_ATTRIBUTE_MAP = { ROUTERS: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': attr.NAME_MAX_LEN}, 'is_visible': True, 'default': ''}, 'admin_state_up': {'allow_post': True, 'allow_put': True, 'default': True, 'convert_to': attr.convert_to_boolean, 'is_visible': True}, 'status': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}, EXTERNAL_GW_INFO: {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'enforce_policy': True, 'validate': { 'type:dict_or_nodata': { 'network_id': {'type:uuid': None, 'required': True}, 'external_fixed_ips': { 'convert_list_to': attr.convert_kvp_list_to_dict, 'type:fixed_ips': None, 'default': None, 'required': False, } } }} }, FLOATINGIPS: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'floating_ip_address': {'allow_post': True, 'allow_put': False, 'validate': {'type:ip_address_or_none': None}, 'is_visible': True, 'default': None, 'enforce_policy': True}, 'subnet_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid_or_none': None}, 'is_visible': False, # Use False for input only attr 'default': None}, 'floating_network_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'router_id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid_or_none': None}, 'is_visible': True, 'default': None}, 'port_id': {'allow_post': True, 'allow_put': True, 'validate': {'type:uuid_or_none': None}, 'is_visible': True, 'default': None, 'required_by_policy': True}, 'fixed_ip_address': {'allow_post': True, 'allow_put': True, 'validate': {'type:ip_address_or_none': None}, 'is_visible': True, 'default': None}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}, 'status': {'allow_post': False, 'allow_put': False, 'is_visible': True}, }, } l3_quota_opts = [ cfg.IntOpt('quota_router', default=10, help=_('Number of routers allowed per tenant. ' 'A negative value means unlimited.')), cfg.IntOpt('quota_floatingip', default=50, help=_('Number of floating IPs allowed per tenant. ' 'A negative value means unlimited.')), ] cfg.CONF.register_opts(l3_quota_opts, 'QUOTAS') class L3(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Neutron L3 Router" @classmethod def get_alias(cls): return "router" @classmethod def get_description(cls): return ("Router abstraction for basic L3 forwarding" " between L2 Neutron networks and access to external" " networks via a NAT gateway.") @classmethod def get_updated(cls): return "2012-07-20T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) plural_mappings['external_fixed_ips'] = 'external_fixed_ip' attr.PLURALS.update(plural_mappings) action_map = {'router': {'add_router_interface': 'PUT', 'remove_router_interface': 'PUT'}} return resource_helper.build_resource_info(plural_mappings, RESOURCE_ATTRIBUTE_MAP, constants.L3_ROUTER_NAT, action_map=action_map, register_quota=True) def update_attributes_map(self, attributes): super(L3, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) @classmethod def get_pecan_controllers(cls): return ((ROUTERS, controllers.RoutersController()), (FLOATINGIPS, controllers.CollectionsController(FLOATINGIPS, FLOATINGIP))) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} class RouterPluginBase(object): @abc.abstractmethod def create_router(self, context, router): pass @abc.abstractmethod def update_router(self, context, id, router): pass @abc.abstractmethod def get_router(self, context, id, fields=None): pass @abc.abstractmethod def delete_router(self, context, id): pass @abc.abstractmethod def get_routers(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abc.abstractmethod def add_router_interface(self, context, router_id, interface_info): pass @abc.abstractmethod def remove_router_interface(self, context, router_id, interface_info): pass @abc.abstractmethod def create_floatingip(self, context, floatingip): pass @abc.abstractmethod def update_floatingip(self, context, id, floatingip): pass @abc.abstractmethod def get_floatingip(self, context, id, fields=None): pass @abc.abstractmethod def delete_floatingip(self, context, id): pass @abc.abstractmethod def get_floatingips(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass def get_routers_count(self, context, filters=None): raise NotImplementedError() def get_floatingips_count(self, context, filters=None): raise NotImplementedError() neutron-8.4.0/neutron/extensions/standardattrdescription.py0000664000567000056710000000344113044372760025570 0ustar jenkinsjenkins00000000000000# Copyright 2016 OpenStack Foundation # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api import extensions from neutron.api.v2 import attributes as attr EXTENDED_ATTRIBUTES_2_0 = {} for resource in ('security_group_rules', 'security_groups', 'ports', 'subnets', 'networks', 'routers', 'floatingips', 'subnetpools'): EXTENDED_ATTRIBUTES_2_0[resource] = { 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': attr.DESCRIPTION_MAX_LEN}, 'is_visible': True, 'default': ''}, } class Standardattrdescription(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "standard-attr-description" @classmethod def get_alias(cls): return "standard-attr-description" @classmethod def get_description(cls): return "Extension to add descriptions to standard attributes" @classmethod def get_updated(cls): return "2016-02-10T10:00:00-00:00" def get_optional_extensions(self): return ['security-group', 'router'] def get_extended_resources(self, version): if version == "2.0": return dict(EXTENDED_ATTRIBUTES_2_0.items()) return {} neutron-8.4.0/neutron/extensions/extraroute.py0000664000567000056710000000437513044372760023042 0ustar jenkinsjenkins00000000000000# Copyright 2013, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.common import exceptions as nexception # Extra Routes Exceptions class InvalidRoutes(nexception.InvalidInput): message = _("Invalid format for routes: %(routes)s, %(reason)s") class RouterInterfaceInUseByRoute(nexception.InUse): message = _("Router interface for subnet %(subnet_id)s on router " "%(router_id)s cannot be deleted, as it is required " "by one or more routes.") class RoutesExhausted(nexception.BadRequest): message = _("Unable to complete operation for %(router_id)s. " "The number of routes exceeds the maximum %(quota)s.") # Attribute Map EXTENDED_ATTRIBUTES_2_0 = { 'routers': { 'routes': {'allow_post': False, 'allow_put': True, 'validate': {'type:hostroutes': None}, 'convert_to': attr.convert_none_to_empty_list, 'is_visible': True, 'default': attr.ATTR_NOT_SPECIFIED}, } } class Extraroute(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Neutron Extra Route" @classmethod def get_alias(cls): return "extraroute" @classmethod def get_description(cls): return "Extra routes configuration for L3 router" @classmethod def get_updated(cls): return "2013-02-01T10:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": attr.PLURALS.update({'routes': 'route'}) return EXTENDED_ATTRIBUTES_2_0 else: return {} neutron-8.4.0/neutron/extensions/external_net.py0000664000567000056710000000400313044372760023314 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.common import exceptions as nexception class ExternalNetworkInUse(nexception.InUse): message = _("External network %(net_id)s cannot be updated to be made " "non-external, since it has existing gateway ports") # For backward compatibility the 'router' prefix is kept. EXTERNAL = 'router:external' EXTENDED_ATTRIBUTES_2_0 = { 'networks': {EXTERNAL: {'allow_post': True, 'allow_put': True, 'default': False, 'is_visible': True, 'convert_to': attr.convert_to_boolean, 'enforce_policy': True, 'required_by_policy': True}}} class External_net(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Neutron external network" @classmethod def get_alias(cls): return "external-net" @classmethod def get_description(cls): return _("Adds external network attribute to network resource.") @classmethod def get_updated(cls): return "2013-01-14T10:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} neutron-8.4.0/neutron/extensions/dns.py0000664000567000056710000002244013044372760021415 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from oslo_config import cfg import six from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.common import exceptions as n_exc from neutron.extensions import l3 DNS_LABEL_MAX_LEN = 63 DNS_LABEL_REGEX = "[a-z0-9-]{1,%d}$" % DNS_LABEL_MAX_LEN FQDN_MAX_LEN = 255 DNS_DOMAIN_DEFAULT = 'openstacklocal.' class DNSDomainNotFound(n_exc.NotFound): message = _("Domain %(dns_domain)s not found in the external DNS service") class DuplicateRecordSet(n_exc.Conflict): message = _("Name %(dns_name)s is duplicated in the external DNS service") class ExternalDNSDriverNotFound(n_exc.NotFound): message = _("External DNS driver %(driver)s could not be found.") class InvalidPTRZoneConfiguration(n_exc.Conflict): message = _("Value of %(parameter)s has to be multiple of %(number)s, " "with maximum value of %(maximum)s and minimum value of " "%(minimum)s") def _validate_dns_name(data, max_len=FQDN_MAX_LEN): msg = _validate_dns_format(data, max_len) if msg: return msg request_dns_name = _get_request_dns_name(data) if request_dns_name: msg = _validate_dns_name_with_dns_domain(request_dns_name) if msg: return msg def _validate_fip_dns_name(data, max_len=FQDN_MAX_LEN): msg = attr._validate_string(data) if msg: return msg if not data: return if data.endswith('.'): msg = _("'%s' is a FQDN. It should be a relative domain name") % data return msg msg = _validate_dns_format(data, max_len) if msg: return msg length = len(data) if length > max_len - 3: msg = _("'%(data)s' contains '%(length)s' characters. Adding a " "domain name will cause it to exceed the maximum length " "of a FQDN of '%(max_len)s'") % {"data": data, "length": length, "max_len": max_len} return msg def _validate_dns_domain(data, max_len=FQDN_MAX_LEN): msg = attr._validate_string(data) if msg: return msg if not data: return if not data.endswith('.'): msg = _("'%s' is not a FQDN") % data return msg msg = _validate_dns_format(data, max_len) if msg: return msg length = len(data) if length > max_len - 2: msg = _("'%(data)s' contains '%(length)s' characters. Adding a " "sub-domain will cause it to exceed the maximum length of a " "FQDN of '%(max_len)s'") % {"data": data, "length": length, "max_len": max_len} return msg def _validate_dns_format(data, max_len=FQDN_MAX_LEN): # NOTE: An individual name regex instead of an entire FQDN was used # because its easier to make correct. The logic should validate that the # dns_name matches RFC 1123 (section 2.1) and RFC 952. if not data: return try: # Trailing periods are allowed to indicate that a name is fully # qualified per RFC 1034 (page 7). trimmed = data if not data.endswith('.') else data[:-1] if len(trimmed) > 255: raise TypeError( _("'%s' exceeds the 255 character FQDN limit") % trimmed) names = trimmed.split('.') for name in names: if not name: raise TypeError(_("Encountered an empty component.")) if name.endswith('-') or name[0] == '-': raise TypeError( _("Name '%s' must not start or end with a hyphen.") % name) if not re.match(DNS_LABEL_REGEX, name): raise TypeError( _("Name '%s' must be 1-63 characters long, each of " "which can only be alphanumeric or a hyphen.") % name) # RFC 1123 hints that a TLD can't be all numeric. last is a TLD if # it's an FQDN. if len(names) > 1 and re.match("^[0-9]+$", names[-1]): raise TypeError(_("TLD '%s' must not be all numeric") % names[-1]) except TypeError as e: msg = _("'%(data)s' not a valid PQDN or FQDN. Reason: %(reason)s") % { 'data': data, 'reason': str(e)} return msg def _validate_dns_name_with_dns_domain(request_dns_name): # If a PQDN was passed, make sure the FQDN that will be generated is of # legal size dns_domain = _get_dns_domain() higher_labels = dns_domain if dns_domain: higher_labels = '.%s' % dns_domain higher_labels_len = len(higher_labels) dns_name_len = len(request_dns_name) if not request_dns_name.endswith('.'): if dns_name_len + higher_labels_len > FQDN_MAX_LEN: msg = _("The dns_name passed is a PQDN and its size is " "'%(dns_name_len)s'. The dns_domain option in " "neutron.conf is set to %(dns_domain)s, with a " "length of '%(higher_labels_len)s'. When the two are " "concatenated to form a FQDN (with a '.' at the end), " "the resulting length exceeds the maximum size " "of '%(fqdn_max_len)s'" ) % {'dns_name_len': dns_name_len, 'dns_domain': cfg.CONF.dns_domain, 'higher_labels_len': higher_labels_len, 'fqdn_max_len': FQDN_MAX_LEN} return msg return # A FQDN was passed if (dns_name_len <= higher_labels_len or not request_dns_name.endswith(higher_labels)): msg = _("The dns_name passed is a FQDN. Its higher level labels " "must be equal to the dns_domain option in neutron.conf, " "that has been set to '%(dns_domain)s'. It must also " "include one or more valid DNS labels to the left " "of '%(dns_domain)s'") % {'dns_domain': cfg.CONF.dns_domain} return msg def _get_dns_domain(): if not cfg.CONF.dns_domain: return '' if cfg.CONF.dns_domain.endswith('.'): return cfg.CONF.dns_domain return '%s.' % cfg.CONF.dns_domain def _get_request_dns_name(data): dns_domain = _get_dns_domain() if ((dns_domain and dns_domain != DNS_DOMAIN_DEFAULT)): return data return '' def convert_to_lowercase(data): if isinstance(data, six.string_types): return data.lower() msg = _("'%s' cannot be converted to lowercase string") % data raise n_exc.InvalidInput(error_message=msg) attr.validators['type:dns_name'] = (_validate_dns_name) attr.validators['type:fip_dns_name'] = (_validate_fip_dns_name) attr.validators['type:dns_domain'] = (_validate_dns_domain) DNSNAME = 'dns_name' DNSDOMAIN = 'dns_domain' DNSASSIGNMENT = 'dns_assignment' EXTENDED_ATTRIBUTES_2_0 = { 'ports': { DNSNAME: {'allow_post': True, 'allow_put': True, 'default': '', 'convert_to': convert_to_lowercase, 'validate': {'type:dns_name': FQDN_MAX_LEN}, 'is_visible': True}, DNSASSIGNMENT: {'allow_post': False, 'allow_put': False, 'is_visible': True}, }, l3.FLOATINGIPS: { DNSNAME: {'allow_post': True, 'allow_put': False, 'default': '', 'convert_to': convert_to_lowercase, 'validate': {'type:fip_dns_name': FQDN_MAX_LEN}, 'is_visible': True}, DNSDOMAIN: {'allow_post': True, 'allow_put': False, 'default': '', 'convert_to': convert_to_lowercase, 'validate': {'type:dns_domain': FQDN_MAX_LEN}, 'is_visible': True}, }, attr.NETWORKS: { DNSDOMAIN: {'allow_post': True, 'allow_put': True, 'default': '', 'convert_to': convert_to_lowercase, 'validate': {'type:dns_domain': FQDN_MAX_LEN}, 'is_visible': True}, }, } class Dns(extensions.ExtensionDescriptor): """Extension class supporting DNS Integration.""" @classmethod def get_name(cls): return "DNS Integration" @classmethod def get_alias(cls): return "dns-integration" @classmethod def get_description(cls): return "Provides integration with DNS." @classmethod def get_updated(cls): return "2015-08-15T18:00:00-00:00" def get_required_extensions(self): return ["router"] def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} neutron-8.4.0/neutron/extensions/multiprovidernet.py0000664000567000056710000001021613044372760024243 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob.exc from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.common import exceptions as nexception from neutron.extensions import providernet as pnet SEGMENTS = 'segments' class SegmentsSetInConjunctionWithProviders(nexception.InvalidInput): message = _("Segments and provider values cannot both be set.") class SegmentsContainDuplicateEntry(nexception.InvalidInput): message = _("Duplicate segment entry in request.") def _convert_and_validate_segments(segments, valid_values=None): for segment in segments: segment.setdefault(pnet.NETWORK_TYPE, attr.ATTR_NOT_SPECIFIED) segment.setdefault(pnet.PHYSICAL_NETWORK, attr.ATTR_NOT_SPECIFIED) segmentation_id = segment.get(pnet.SEGMENTATION_ID) if segmentation_id: segment[pnet.SEGMENTATION_ID] = attr.convert_to_int( segmentation_id) else: segment[pnet.SEGMENTATION_ID] = attr.ATTR_NOT_SPECIFIED if len(segment.keys()) != 3: msg = (_("Unrecognized attribute(s) '%s'") % ', '.join(set(segment.keys()) - set([pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID]))) raise webob.exc.HTTPBadRequest(msg) def check_duplicate_segments(segments, is_partial_func=None): """Helper function checking duplicate segments. If is_partial_funcs is specified and not None, then SegmentsContainDuplicateEntry is raised if two segments are identical and non partially defined (is_partial_func(segment) == False). Otherwise SegmentsContainDuplicateEntry is raised if two segment are identical. """ if is_partial_func is not None: segments = [s for s in segments if not is_partial_func(s)] fully_specifieds = [tuple(sorted(s.items())) for s in segments] if len(set(fully_specifieds)) != len(fully_specifieds): raise SegmentsContainDuplicateEntry() attr.validators['type:convert_segments'] = ( _convert_and_validate_segments) EXTENDED_ATTRIBUTES_2_0 = { 'networks': { SEGMENTS: {'allow_post': True, 'allow_put': True, 'validate': {'type:convert_segments': None}, 'convert_list_to': attr.convert_kvp_list_to_dict, 'default': attr.ATTR_NOT_SPECIFIED, 'enforce_policy': True, 'is_visible': True}, } } class Multiprovidernet(extensions.ExtensionDescriptor): """Extension class supporting multiple provider networks. This class is used by neutron's extension framework to make metadata about the multiple provider network extension available to clients. No new resources are defined by this extension. Instead, the existing network resource's request and response messages are extended with 'segments' attribute. With admin rights, network dictionaries returned will also include 'segments' attribute. """ @classmethod def get_name(cls): return "Multi Provider Network" @classmethod def get_alias(cls): return "multi-provider" @classmethod def get_description(cls): return ("Expose mapping of virtual networks to multiple physical " "networks") @classmethod def get_updated(cls): return "2013-06-27T10:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} neutron-8.4.0/neutron/extensions/agent.py0000664000567000056710000001254313044372760021732 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import base from neutron.common import exceptions from neutron import manager # Attribute Map RESOURCE_NAME = 'agent' RESOURCE_ATTRIBUTE_MAP = { RESOURCE_NAME + 's': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'agent_type': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'binary': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'topic': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'host': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'admin_state_up': {'allow_post': False, 'allow_put': True, 'convert_to': attr.convert_to_boolean, 'is_visible': True}, 'created_at': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'started_at': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'heartbeat_timestamp': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'alive': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'configurations': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'description': {'allow_post': False, 'allow_put': True, 'is_visible': True, 'validate': { 'type:string_or_none': attr.DESCRIPTION_MAX_LEN}}, }, } class AgentNotFound(exceptions.NotFound): message = _("Agent %(id)s could not be found") class AgentNotFoundByTypeHost(exceptions.NotFound): message = _("Agent with agent_type=%(agent_type)s and host=%(host)s " "could not be found") class MultipleAgentFoundByTypeHost(exceptions.Conflict): message = _("Multiple agents with agent_type=%(agent_type)s and " "host=%(host)s found") class Agent(extensions.ExtensionDescriptor): """Agent management extension.""" @classmethod def get_name(cls): return "agent" @classmethod def get_alias(cls): return "agent" @classmethod def get_description(cls): return "The agent management extension." @classmethod def get_updated(cls): return "2013-02-03T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()] attr.PLURALS.update(dict(my_plurals)) plugin = manager.NeutronManager.get_plugin() params = RESOURCE_ATTRIBUTE_MAP.get(RESOURCE_NAME + 's') controller = base.create_resource(RESOURCE_NAME + 's', RESOURCE_NAME, plugin, params ) ex = extensions.ResourceExtension(RESOURCE_NAME + 's', controller) return [ex] def update_attributes_map(self, attributes): super(Agent, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} class AgentPluginBase(object): """REST API to operate the Agent. All of method must be in an admin context. """ def create_agent(self, context, agent): """Create agent. This operation is not allow in REST API. @raise exceptions.BadRequest: """ raise exceptions.BadRequest() @abc.abstractmethod def delete_agent(self, context, id): """Delete agent. Agents register themselves on reporting state. But if an agent does not report its status for a long time (for example, it is dead forever. ), admin can remove it. Agents must be disabled before being removed. """ pass @abc.abstractmethod def update_agent(self, context, agent): """Disable or Enable the agent. Discription also can be updated. Some agents cannot be disabled, such as plugins, services. An error code should be reported in this case. @raise exceptions.BadRequest: """ pass @abc.abstractmethod def get_agents(self, context, filters=None, fields=None): pass @abc.abstractmethod def get_agent(self, context, id, fields=None): pass neutron-8.4.0/neutron/extensions/l3agentscheduler.py0000664000567000056710000001536413044372760024074 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_log import log as logging import webob.exc from neutron._i18n import _, _LE from neutron.api import extensions from neutron.api.v2 import base from neutron.api.v2 import resource from neutron.common import constants from neutron.common import exceptions from neutron.common import rpc as n_rpc from neutron.extensions import agent from neutron import manager from neutron.plugins.common import constants as service_constants from neutron import policy from neutron import wsgi LOG = logging.getLogger(__name__) L3_ROUTER = 'l3-router' L3_ROUTERS = L3_ROUTER + 's' L3_AGENT = 'l3-agent' L3_AGENTS = L3_AGENT + 's' class RouterSchedulerController(wsgi.Controller): def get_plugin(self): plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) if not plugin: LOG.error(_LE('No plugin for L3 routing registered to handle ' 'router scheduling')) msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) return plugin def index(self, request, **kwargs): plugin = self.get_plugin() policy.enforce(request.context, "get_%s" % L3_ROUTERS, {}) return plugin.list_routers_on_l3_agent( request.context, kwargs['agent_id']) def create(self, request, body, **kwargs): plugin = self.get_plugin() policy.enforce(request.context, "create_%s" % L3_ROUTER, {}) agent_id = kwargs['agent_id'] router_id = body['router_id'] result = plugin.add_router_to_l3_agent(request.context, agent_id, router_id) notify(request.context, 'l3_agent.router.add', router_id, agent_id) return result def delete(self, request, id, **kwargs): plugin = self.get_plugin() policy.enforce(request.context, "delete_%s" % L3_ROUTER, {}) agent_id = kwargs['agent_id'] result = plugin.remove_router_from_l3_agent(request.context, agent_id, id) notify(request.context, 'l3_agent.router.remove', id, agent_id) return result class L3AgentsHostingRouterController(wsgi.Controller): def get_plugin(self): plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) if not plugin: LOG.error(_LE('No plugin for L3 routing registered to handle ' 'router scheduling')) msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) return plugin def index(self, request, **kwargs): plugin = self.get_plugin() policy.enforce(request.context, "get_%s" % L3_AGENTS, {}) return plugin.list_l3_agents_hosting_router( request.context, kwargs['router_id']) class L3agentscheduler(extensions.ExtensionDescriptor): """Extension class supporting l3 agent scheduler. """ @classmethod def get_name(cls): return "L3 Agent Scheduler" @classmethod def get_alias(cls): return constants.L3_AGENT_SCHEDULER_EXT_ALIAS @classmethod def get_description(cls): return "Schedule routers among l3 agents" @classmethod def get_updated(cls): return "2013-02-07T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" exts = [] parent = dict(member_name="agent", collection_name="agents") controller = resource.Resource(RouterSchedulerController(), base.FAULT_MAP) exts.append(extensions.ResourceExtension( L3_ROUTERS, controller, parent)) parent = dict(member_name="router", collection_name="routers") controller = resource.Resource(L3AgentsHostingRouterController(), base.FAULT_MAP) exts.append(extensions.ResourceExtension( L3_AGENTS, controller, parent)) return exts def get_extended_resources(self, version): return {} class InvalidL3Agent(agent.AgentNotFound): message = _("Agent %(id)s is not a L3 Agent or has been disabled") class RouterHostedByL3Agent(exceptions.Conflict): message = _("The router %(router_id)s has been already hosted " "by the L3 Agent %(agent_id)s.") class RouterSchedulingFailed(exceptions.Conflict): message = _("Failed scheduling router %(router_id)s to " "the L3 Agent %(agent_id)s.") class RouterReschedulingFailed(exceptions.Conflict): message = _("Failed rescheduling router %(router_id)s: " "no eligible l3 agent found.") class RouterL3AgentMismatch(exceptions.Conflict): message = _("Cannot host distributed router %(router_id)s " "on legacy L3 agent %(agent_id)s.") class DVRL3CannotAssignToDvrAgent(exceptions.Conflict): message = _("Not allowed to manually assign a router to an " "agent in 'dvr' mode.") class DVRL3CannotRemoveFromDvrAgent(exceptions.Conflict): message = _("Not allowed to manually remove a router from " "an agent in 'dvr' mode.") class L3AgentSchedulerPluginBase(object): """REST API to operate the l3 agent scheduler. All of method must be in an admin context. """ @abc.abstractmethod def add_router_to_l3_agent(self, context, id, router_id): pass @abc.abstractmethod def remove_router_from_l3_agent(self, context, id, router_id): pass @abc.abstractmethod def list_routers_on_l3_agent(self, context, id): pass @abc.abstractmethod def list_l3_agents_hosting_router(self, context, router_id): pass def notify(context, action, router_id, agent_id): info = {'id': agent_id, 'router_id': router_id} notifier = n_rpc.get_notifier('router') notifier.info(context, action, {'agent': info}) neutron-8.4.0/neutron/extensions/availability_zone.py0000664000567000056710000001026613044372760024341 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from oslo_serialization import jsonutils from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import base from neutron.common import exceptions from neutron import manager AZ_HINTS_DB_LEN = 255 # resource independent common methods def convert_az_list_to_string(az_list): return jsonutils.dumps(az_list) def convert_az_string_to_list(az_string): return jsonutils.loads(az_string) if az_string else [] def _validate_availability_zone_hints(data, valid_value=None): # syntax check only here. existence of az will be checked later. msg = attr.validate_list_of_unique_strings(data) if msg: return msg az_string = convert_az_list_to_string(data) if len(az_string) > AZ_HINTS_DB_LEN: msg = _("Too many availability_zone_hints specified") raise exceptions.InvalidInput(error_message=msg) attr.validators['type:availability_zone_hints'] = ( _validate_availability_zone_hints) # Attribute Map RESOURCE_NAME = 'availability_zone' AVAILABILITY_ZONES = 'availability_zones' AZ_HINTS = 'availability_zone_hints' # name: name of availability zone (string) # resource: type of resource: 'network' or 'router' # state: state of availability zone: 'available' or 'unavailable' # It means whether users can use the availability zone. RESOURCE_ATTRIBUTE_MAP = { AVAILABILITY_ZONES: { 'name': {'is_visible': True}, 'resource': {'is_visible': True}, 'state': {'is_visible': True} } } EXTENDED_ATTRIBUTES_2_0 = { 'agents': { RESOURCE_NAME: {'allow_post': False, 'allow_put': False, 'is_visible': True} } } class AvailabilityZoneNotFound(exceptions.NotFound): message = _("AvailabilityZone %(availability_zone)s could not be found.") class Availability_zone(extensions.ExtensionDescriptor): """Availability zone extension.""" @classmethod def get_name(cls): return "Availability Zone" @classmethod def get_alias(cls): return "availability_zone" @classmethod def get_description(cls): return "The availability zone extension." @classmethod def get_updated(cls): return "2015-01-01T10:00:00-00:00" def get_required_extensions(self): return ["agent"] @classmethod def get_resources(cls): """Returns Ext Resources.""" my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()] attr.PLURALS.update(dict(my_plurals)) plugin = manager.NeutronManager.get_plugin() params = RESOURCE_ATTRIBUTE_MAP.get(AVAILABILITY_ZONES) controller = base.create_resource(AVAILABILITY_ZONES, RESOURCE_NAME, plugin, params) ex = extensions.ResourceExtension(AVAILABILITY_ZONES, controller) return [ex] def get_extended_resources(self, version): if version == "2.0": return dict(list(EXTENDED_ATTRIBUTES_2_0.items()) + list(RESOURCE_ATTRIBUTE_MAP.items())) else: return {} class AvailabilityZonePluginBase(object): """REST API to operate the Availability Zone.""" @abc.abstractmethod def get_availability_zones(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Return availability zones which a resource belongs to""" @abc.abstractmethod def validate_availability_zones(self, context, resource_type, availability_zones): """Verify that the availability zones exist.""" neutron-8.4.0/neutron/extensions/__init__.py0000664000567000056710000000000013044372736022357 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/extensions/allowedaddresspairs.py0000664000567000056710000001154313044372760024667 0ustar jenkinsjenkins00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import webob.exc from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.common import exceptions as nexception allowed_address_pair_opts = [ #TODO(limao): use quota framework when it support quota for attributes cfg.IntOpt('max_allowed_address_pair', default=10, help=_("Maximum number of allowed address pairs")), ] cfg.CONF.register_opts(allowed_address_pair_opts) class AllowedAddressPairsMissingIP(nexception.InvalidInput): message = _("AllowedAddressPair must contain ip_address") class AddressPairAndPortSecurityRequired(nexception.Conflict): message = _("Port Security must be enabled in order to have allowed " "address pairs on a port.") class DuplicateAddressPairInRequest(nexception.InvalidInput): message = _("Request contains duplicate address pair: " "mac_address %(mac_address)s ip_address %(ip_address)s.") class AllowedAddressPairExhausted(nexception.BadRequest): message = _("The number of allowed address pair " "exceeds the maximum %(quota)s.") def _validate_allowed_address_pairs(address_pairs, valid_values=None): unique_check = {} try: if len(address_pairs) > cfg.CONF.max_allowed_address_pair: raise AllowedAddressPairExhausted( quota=cfg.CONF.max_allowed_address_pair) except TypeError: raise webob.exc.HTTPBadRequest( _("Allowed address pairs must be a list.")) for address_pair in address_pairs: # mac_address is optional, if not set we use the mac on the port if 'mac_address' in address_pair: msg = attr._validate_mac_address(address_pair['mac_address']) if msg: raise webob.exc.HTTPBadRequest(msg) if 'ip_address' not in address_pair: raise AllowedAddressPairsMissingIP() mac = address_pair.get('mac_address') ip_address = address_pair['ip_address'] if (mac, ip_address) not in unique_check: unique_check[(mac, ip_address)] = None else: raise DuplicateAddressPairInRequest(mac_address=mac, ip_address=ip_address) invalid_attrs = set(address_pair.keys()) - set(['mac_address', 'ip_address']) if invalid_attrs: msg = (_("Unrecognized attribute(s) '%s'") % ', '.join(set(address_pair.keys()) - set(['mac_address', 'ip_address']))) raise webob.exc.HTTPBadRequest(msg) if '/' in ip_address: msg = attr._validate_subnet(ip_address) else: msg = attr._validate_ip_address(ip_address) if msg: raise webob.exc.HTTPBadRequest(msg) attr.validators['type:validate_allowed_address_pairs'] = ( _validate_allowed_address_pairs) ADDRESS_PAIRS = 'allowed_address_pairs' EXTENDED_ATTRIBUTES_2_0 = { 'ports': { ADDRESS_PAIRS: {'allow_post': True, 'allow_put': True, 'convert_to': attr.convert_none_to_empty_list, 'convert_list_to': attr.convert_kvp_list_to_dict, 'validate': {'type:validate_allowed_address_pairs': None}, 'enforce_policy': True, 'default': attr.ATTR_NOT_SPECIFIED, 'is_visible': True}, } } class Allowedaddresspairs(extensions.ExtensionDescriptor): """Extension class supporting allowed address pairs.""" @classmethod def get_name(cls): return "Allowed Address Pairs" @classmethod def get_alias(cls): return "allowed-address-pairs" @classmethod def get_description(cls): return "Provides allowed address pairs" @classmethod def get_updated(cls): return "2013-07-23T10:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": attr.PLURALS.update({'allowed_address_pairs': 'allowed_address_pair'}) return EXTENDED_ATTRIBUTES_2_0 else: return {} neutron-8.4.0/neutron/extensions/tag.py0000664000567000056710000001474113044372760021411 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six from oslo_log import log as logging import webob.exc from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes from neutron.api.v2 import base from neutron.api.v2 import resource as api_resource from neutron.common import exceptions from neutron import manager from neutron.services import service_base LOG = logging.getLogger(__name__) TAG = 'tag' TAGS = TAG + 's' MAX_TAG_LEN = 60 TAG_PLUGIN_TYPE = 'TAG' TAG_SUPPORTED_RESOURCES = { attributes.NETWORKS: attributes.NETWORK, # other resources can be added } TAG_ATTRIBUTE_MAP = { TAGS: {'allow_post': False, 'allow_put': False, 'is_visible': True} } class TagResourceNotFound(exceptions.NotFound): message = _("Resource %(resource)s %(resource_id)s could not be found.") class TagNotFound(exceptions.NotFound): message = _("Tag %(tag)s could not be found.") def get_parent_resource_and_id(kwargs): for key in kwargs: for resource in TAG_SUPPORTED_RESOURCES: if key == TAG_SUPPORTED_RESOURCES[resource] + '_id': return resource, kwargs[key] return None, None def validate_tag(tag): msg = attributes._validate_string(tag, MAX_TAG_LEN) if msg: raise exceptions.InvalidInput(error_message=msg) def validate_tags(body): if 'tags' not in body: raise exceptions.InvalidInput(error_message="Invalid tags body.") msg = attributes.validate_list_of_unique_strings(body['tags'], MAX_TAG_LEN) if msg: raise exceptions.InvalidInput(error_message=msg) class TagController(object): def __init__(self): self.plugin = (manager.NeutronManager.get_service_plugins() [TAG_PLUGIN_TYPE]) def index(self, request, **kwargs): # GET /v2.0/networks/{network_id}/tags parent, parent_id = get_parent_resource_and_id(kwargs) return self.plugin.get_tags(request.context, parent, parent_id) def show(self, request, id, **kwargs): # GET /v2.0/networks/{network_id}/tags/{tag} # id == tag validate_tag(id) parent, parent_id = get_parent_resource_and_id(kwargs) return self.plugin.get_tag(request.context, parent, parent_id, id) def create(self, request, **kwargs): # not supported # POST /v2.0/networks/{network_id}/tags raise webob.exc.HTTPNotFound("not supported") def update(self, request, id, **kwargs): # PUT /v2.0/networks/{network_id}/tags/{tag} # id == tag validate_tag(id) parent, parent_id = get_parent_resource_and_id(kwargs) return self.plugin.update_tag(request.context, parent, parent_id, id) def update_all(self, request, body, **kwargs): # PUT /v2.0/networks/{network_id}/tags # body: {"tags": ["aaa", "bbb"]} validate_tags(body) parent, parent_id = get_parent_resource_and_id(kwargs) return self.plugin.update_tags(request.context, parent, parent_id, body) def delete(self, request, id, **kwargs): # DELETE /v2.0/networks/{network_id}/tags/{tag} # id == tag validate_tag(id) parent, parent_id = get_parent_resource_and_id(kwargs) return self.plugin.delete_tag(request.context, parent, parent_id, id) def delete_all(self, request, **kwargs): # DELETE /v2.0/networks/{network_id}/tags parent, parent_id = get_parent_resource_and_id(kwargs) return self.plugin.delete_tags(request.context, parent, parent_id) class Tag(extensions.ExtensionDescriptor): """Extension class supporting tags.""" @classmethod def get_name(cls): return "Tag support" @classmethod def get_alias(cls): return "tag" @classmethod def get_description(cls): return "Enables to set tag on resources." @classmethod def get_updated(cls): return "2016-01-01T00:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" exts = [] action_status = {'index': 200, 'show': 204, 'update': 201, 'update_all': 200, 'delete': 204, 'delete_all': 204} controller = api_resource.Resource(TagController(), base.FAULT_MAP, action_status=action_status) collection_methods = {"delete_all": "DELETE", "update_all": "PUT"} exts = [] for collection_name, member_name in TAG_SUPPORTED_RESOURCES.items(): parent = {'member_name': member_name, 'collection_name': collection_name} exts.append(extensions.ResourceExtension( TAGS, controller, parent, collection_methods=collection_methods)) return exts def get_extended_resources(self, version): if version != "2.0": return {} EXTENDED_ATTRIBUTES_2_0 = {} for collection_name in TAG_SUPPORTED_RESOURCES: EXTENDED_ATTRIBUTES_2_0[collection_name] = TAG_ATTRIBUTE_MAP return EXTENDED_ATTRIBUTES_2_0 @six.add_metaclass(abc.ABCMeta) class TagPluginBase(service_base.ServicePluginBase): """REST API to operate the Tag.""" def get_plugin_description(self): return "Tag support" def get_plugin_type(self): return TAG_PLUGIN_TYPE @abc.abstractmethod def get_tags(self, context, resource, resource_id): pass @abc.abstractmethod def get_tag(self, context, resource, resource_id, tag): pass @abc.abstractmethod def update_tags(self, context, resource, resource_id, body): pass @abc.abstractmethod def update_tag(self, context, resource, resource_id, tag): pass @abc.abstractmethod def delete_tags(self, context, resource, resource_id): pass @abc.abstractmethod def delete_tag(self, context, resource, resource_id, tag): pass neutron-8.4.0/neutron/extensions/dhcpagentscheduler.py0000664000567000056710000001247513044372760024474 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import base from neutron.api.v2 import resource from neutron.common import constants from neutron.common import exceptions from neutron.common import rpc as n_rpc from neutron.extensions import agent from neutron import manager from neutron import policy from neutron import wsgi DHCP_NET = 'dhcp-network' DHCP_NETS = DHCP_NET + 's' DHCP_AGENT = 'dhcp-agent' DHCP_AGENTS = DHCP_AGENT + 's' class NetworkSchedulerController(wsgi.Controller): def index(self, request, **kwargs): plugin = manager.NeutronManager.get_plugin() policy.enforce(request.context, "get_%s" % DHCP_NETS, {}) return plugin.list_networks_on_dhcp_agent( request.context, kwargs['agent_id']) def create(self, request, body, **kwargs): plugin = manager.NeutronManager.get_plugin() policy.enforce(request.context, "create_%s" % DHCP_NET, {}) agent_id = kwargs['agent_id'] network_id = body['network_id'] result = plugin.add_network_to_dhcp_agent(request.context, agent_id, network_id) notify(request.context, 'dhcp_agent.network.add', network_id, agent_id) return result def delete(self, request, id, **kwargs): plugin = manager.NeutronManager.get_plugin() policy.enforce(request.context, "delete_%s" % DHCP_NET, {}) agent_id = kwargs['agent_id'] result = plugin.remove_network_from_dhcp_agent(request.context, agent_id, id) notify(request.context, 'dhcp_agent.network.remove', id, agent_id) return result class DhcpAgentsHostingNetworkController(wsgi.Controller): def index(self, request, **kwargs): plugin = manager.NeutronManager.get_plugin() policy.enforce(request.context, "get_%s" % DHCP_AGENTS, {}) return plugin.list_dhcp_agents_hosting_network( request.context, kwargs['network_id']) class Dhcpagentscheduler(extensions.ExtensionDescriptor): """Extension class supporting dhcp agent scheduler. """ @classmethod def get_name(cls): return "DHCP Agent Scheduler" @classmethod def get_alias(cls): return constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS @classmethod def get_description(cls): return "Schedule networks among dhcp agents" @classmethod def get_updated(cls): return "2013-02-07T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" exts = [] parent = dict(member_name="agent", collection_name="agents") controller = resource.Resource(NetworkSchedulerController(), base.FAULT_MAP) exts.append(extensions.ResourceExtension( DHCP_NETS, controller, parent)) parent = dict(member_name="network", collection_name="networks") controller = resource.Resource(DhcpAgentsHostingNetworkController(), base.FAULT_MAP) exts.append(extensions.ResourceExtension( DHCP_AGENTS, controller, parent)) return exts def get_extended_resources(self, version): return {} class InvalidDHCPAgent(agent.AgentNotFound): message = _("Agent %(id)s is not a valid DHCP Agent or has been disabled") class NetworkHostedByDHCPAgent(exceptions.Conflict): message = _("The network %(network_id)s has been already hosted" " by the DHCP Agent %(agent_id)s.") class NetworkNotHostedByDhcpAgent(exceptions.Conflict): message = _("The network %(network_id)s is not hosted" " by the DHCP agent %(agent_id)s.") class DhcpAgentSchedulerPluginBase(object): """REST API to operate the DHCP agent scheduler. All of method must be in an admin context. """ @abc.abstractmethod def add_network_to_dhcp_agent(self, context, id, network_id): pass @abc.abstractmethod def remove_network_from_dhcp_agent(self, context, id, network_id): pass @abc.abstractmethod def list_networks_on_dhcp_agent(self, context, id): pass @abc.abstractmethod def list_dhcp_agents_hosting_network(self, context, network_id): pass def notify(context, action, network_id, agent_id): info = {'id': agent_id, 'network_id': network_id} notifier = n_rpc.get_notifier('network') notifier.info(context, action, {'agent': info}) neutron-8.4.0/neutron/extensions/subnetallocation.py0000664000567000056710000000255213044372760024201 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api import extensions from neutron.common import constants class Subnetallocation(extensions.ExtensionDescriptor): """Extension class supporting subnet allocation.""" @classmethod def get_name(cls): return "Subnet Allocation" @classmethod def get_alias(cls): return constants.SUBNET_ALLOCATION_EXT_ALIAS @classmethod def get_description(cls): return "Enables allocation of subnets from a subnet pool" @classmethod def get_updated(cls): return "2015-03-30T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" return [] def get_extended_resources(self, version): return {} neutron-8.4.0/neutron/extensions/vlantransparent.py0000664000567000056710000000471113044372760024054 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Cisco Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from neutron._i18n import _, _LI from neutron.api import extensions from neutron.api.v2 import attributes from neutron.common import exceptions as nexception LOG = logging.getLogger(__name__) class VlanTransparencyDriverError(nexception.NeutronException): """Vlan Transparency not supported by all mechanism drivers.""" message = _("Backend does not support VLAN Transparency.") VLANTRANSPARENT = 'vlan_transparent' EXTENDED_ATTRIBUTES_2_0 = { 'networks': { VLANTRANSPARENT: {'allow_post': True, 'allow_put': False, 'convert_to': attributes.convert_to_boolean, 'default': attributes.ATTR_NOT_SPECIFIED, 'is_visible': True}, }, } def disable_extension_by_config(aliases): if not cfg.CONF.vlan_transparent: if 'vlan-transparent' in aliases: aliases.remove('vlan-transparent') LOG.info(_LI('Disabled vlantransparent extension.')) def get_vlan_transparent(network): return (network['vlan_transparent'] if ('vlan_transparent' in network and attributes.is_attr_set(network['vlan_transparent'])) else False) class Vlantransparent(extensions.ExtensionDescriptor): """Extension class supporting vlan transparent networks.""" @classmethod def get_name(cls): return "Vlantransparent" @classmethod def get_alias(cls): return "vlan-transparent" @classmethod def get_description(cls): return "Provides Vlan Transparent Networks" @classmethod def get_updated(cls): return "2015-03-23T09:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} neutron-8.4.0/neutron/extensions/routerservicetype.py0000664000567000056710000000303013044372760024426 0ustar jenkinsjenkins00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api import extensions SERVICE_TYPE_ID = 'service_type_id' EXTENDED_ATTRIBUTES_2_0 = { 'routers': { SERVICE_TYPE_ID: {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid_or_none': None}, 'default': None, 'is_visible': True}, } } class Routerservicetype(extensions.ExtensionDescriptor): """Extension class supporting router service type.""" @classmethod def get_name(cls): return "Router Service Type" @classmethod def get_alias(cls): return "router-service-type" @classmethod def get_description(cls): return "Provides router service type" @classmethod def get_updated(cls): return "2013-01-29T00:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} neutron-8.4.0/neutron/extensions/router_availability_zone.py0000664000567000056710000000370713044372760025743 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import six from neutron.api import extensions from neutron.extensions import availability_zone as az_ext EXTENDED_ATTRIBUTES_2_0 = { 'routers': { az_ext.AVAILABILITY_ZONES: {'allow_post': False, 'allow_put': False, 'is_visible': True}, az_ext.AZ_HINTS: { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'validate': {'type:availability_zone_hints': None}, 'default': []}} } class Router_availability_zone(extensions.ExtensionDescriptor): """Router availability zone extension.""" @classmethod def get_name(cls): return "Router Availability Zone" @classmethod def get_alias(cls): return "router_availability_zone" @classmethod def get_description(cls): return "Availability zone support for router." @classmethod def get_updated(cls): return "2015-01-01T10:00:00-00:00" def get_required_extensions(self): return ["router", "availability_zone"] def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} @six.add_metaclass(abc.ABCMeta) class RouterAvailabilityZonePluginBase(object): @abc.abstractmethod def get_router_availability_zones(self, router): """Return availability zones which a router belongs to.""" neutron-8.4.0/neutron/extensions/bgp_dragentscheduler.py0000664000567000056710000001334413044372760025007 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six import webob from oslo_log import log as logging from neutron.api import extensions from neutron.api.v2 import base from neutron.api.v2 import resource from neutron.common import exceptions from neutron.extensions import agent from neutron.extensions import bgp as bgp_ext from neutron._i18n import _, _LE from neutron import manager from neutron import wsgi LOG = logging.getLogger(__name__) BGP_DRAGENT_SCHEDULER_EXT_ALIAS = 'bgp_dragent_scheduler' BGP_DRINSTANCE = 'bgp-drinstance' BGP_DRINSTANCES = BGP_DRINSTANCE + 's' BGP_DRAGENT = 'bgp-dragent' BGP_DRAGENTS = BGP_DRAGENT + 's' class DrAgentInvalid(agent.AgentNotFound): message = _("BgpDrAgent %(id)s is invalid or has been disabled.") class DrAgentNotHostingBgpSpeaker(exceptions.NotFound): message = _("BGP speaker %(bgp_speaker_id)s is not hosted " "by the BgpDrAgent %(agent_id)s.") class DrAgentAssociationError(exceptions.Conflict): message = _("BgpDrAgent %(agent_id)s is already associated " "to a BGP speaker.") class BgpDrSchedulerController(wsgi.Controller): """Schedule BgpSpeaker for a BgpDrAgent""" def get_plugin(self): plugin = manager.NeutronManager.get_service_plugins().get( bgp_ext.BGP_EXT_ALIAS) if not plugin: LOG.error(_LE('No plugin for BGP routing registered')) msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) return plugin def index(self, request, **kwargs): plugin = self.get_plugin() return plugin.list_bgp_speaker_on_dragent( request.context, kwargs['agent_id']) def create(self, request, body, **kwargs): plugin = self.get_plugin() return plugin.add_bgp_speaker_to_dragent( request.context, kwargs['agent_id'], body['bgp_speaker_id']) def delete(self, request, id, **kwargs): plugin = self.get_plugin() return plugin.remove_bgp_speaker_from_dragent( request.context, kwargs['agent_id'], id) class BgpDrAgentController(wsgi.Controller): def get_plugin(self): plugin = manager.NeutronManager.get_service_plugins().get( bgp_ext.BGP_EXT_ALIAS) if not plugin: LOG.error(_LE('No plugin for BGP routing registered')) msg = _LE('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) return plugin def index(self, request, **kwargs): plugin = manager.NeutronManager.get_service_plugins().get( bgp_ext.BGP_EXT_ALIAS) return plugin.list_dragent_hosting_bgp_speaker( request.context, kwargs['bgp_speaker_id']) class Bgp_dragentscheduler(extensions.ExtensionDescriptor): """Extension class supporting Dynamic Routing scheduler. """ @classmethod def get_name(cls): return "BGP Dynamic Routing Agent Scheduler" @classmethod def get_alias(cls): return BGP_DRAGENT_SCHEDULER_EXT_ALIAS @classmethod def get_description(cls): return "Schedules BgpSpeakers on BgpDrAgent" @classmethod def get_updated(cls): return "2015-07-30T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" exts = [] parent = dict(member_name="agent", collection_name="agents") controller = resource.Resource(BgpDrSchedulerController(), base.FAULT_MAP) exts.append(extensions.ResourceExtension(BGP_DRINSTANCES, controller, parent)) parent = dict(member_name="bgp_speaker", collection_name="bgp-speakers") controller = resource.Resource(BgpDrAgentController(), base.FAULT_MAP) exts.append(extensions.ResourceExtension(BGP_DRAGENTS, controller, parent)) return exts def get_extended_resources(self, version): return {} @six.add_metaclass(abc.ABCMeta) class BgpDrSchedulerPluginBase(object): """REST API to operate BGP dynamic routing agent scheduler. All the methods must be executed in admin context. """ def get_plugin_description(self): return "Neutron BGP dynamic routing scheduler Plugin" def get_plugin_type(self): return bgp_ext.BGP_EXT_ALIAS @abc.abstractmethod def add_bgp_speaker_to_dragent(self, context, agent_id, speaker_id): pass @abc.abstractmethod def remove_bgp_speaker_from_dragent(self, context, agent_id, speaker_id): pass @abc.abstractmethod def list_dragent_hosting_bgp_speaker(self, context, speaker_id): pass @abc.abstractmethod def list_bgp_speaker_on_dragent(self, context, agent_id): pass @abc.abstractmethod def get_bgp_speakers_for_agent_host(self, context, host): pass @abc.abstractmethod def get_bgp_speaker_by_speaker_id(self, context, speaker_id): pass @abc.abstractmethod def get_bgp_peer_by_peer_id(self, context, bgp_peer_id): pass neutron-8.4.0/neutron/extensions/flavors.py0000664000567000056710000002217713044372760022314 0ustar jenkinsjenkins00000000000000# All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import base from neutron.api.v2 import resource_helper from neutron.common import exceptions as nexception from neutron import manager from neutron.plugins.common import constants # Flavor Exceptions class FlavorNotFound(nexception.NotFound): message = _("Flavor %(flavor_id)s could not be found.") class FlavorInUse(nexception.InUse): message = _("Flavor %(flavor_id)s is used by some service instance.") class ServiceProfileNotFound(nexception.NotFound): message = _("Service Profile %(sp_id)s could not be found.") class ServiceProfileInUse(nexception.InUse): message = _("Service Profile %(sp_id)s is used by some service instance.") class FlavorServiceProfileBindingExists(nexception.Conflict): message = _("Service Profile %(sp_id)s is already associated " "with flavor %(fl_id)s.") class FlavorServiceProfileBindingNotFound(nexception.NotFound): message = _("Service Profile %(sp_id)s is not associated " "with flavor %(fl_id)s.") class ServiceProfileDriverNotFound(nexception.NotFound): message = _("Service Profile driver %(driver)s could not be found.") class ServiceProfileEmpty(nexception.InvalidInput): message = _("Service Profile needs either a driver or metainfo.") class FlavorDisabled(nexception.ServiceUnavailable): message = _("Flavor is not enabled.") class ServiceProfileDisabled(nexception.ServiceUnavailable): message = _("Service Profile is not enabled.") class InvalidFlavorServiceType(nexception.InvalidInput): message = _("Invalid service type %(service_type)s.") def _validate_flavor_service_type(validate_type, valid_values=None): """Ensure requested flavor service type plugin is loaded.""" plugins = manager.NeutronManager.get_service_plugins() if validate_type not in plugins: raise InvalidFlavorServiceType(service_type=validate_type) attr.validators['type:validate_flavor_service_type'] = ( _validate_flavor_service_type) FLAVORS = 'flavors' SERVICE_PROFILES = 'service_profiles' FLAVORS_PREFIX = "" RESOURCE_ATTRIBUTE_MAP = { FLAVORS: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': attr.NAME_MAX_LEN}, 'is_visible': True, 'default': ''}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string_or_none': attr.LONG_DESCRIPTION_MAX_LEN}, 'is_visible': True, 'default': ''}, 'service_type': {'allow_post': True, 'allow_put': False, 'validate': {'type:validate_flavor_service_type': None}, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}, 'service_profiles': {'allow_post': True, 'allow_put': True, 'validate': {'type:uuid_list': None}, 'is_visible': True, 'default': []}, 'enabled': {'allow_post': True, 'allow_put': True, 'convert_to': attr.convert_to_boolean_if_not_none, 'default': True, 'is_visible': True}, }, SERVICE_PROFILES: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string_or_none': attr.LONG_DESCRIPTION_MAX_LEN}, 'is_visible': True, 'default': ''}, 'driver': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': attr.LONG_DESCRIPTION_MAX_LEN}, 'is_visible': True, 'default': ''}, 'metainfo': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': ''}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}, 'enabled': {'allow_post': True, 'allow_put': True, 'convert_to': attr.convert_to_boolean_if_not_none, 'is_visible': True, 'default': True}, }, } SUB_RESOURCE_ATTRIBUTE_MAP = { 'next_providers': { 'parent': {'collection_name': 'flavors', 'member_name': 'flavor'}, 'parameters': {'provider': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'driver': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'metainfo': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}} }, 'service_profiles': { 'parent': {'collection_name': 'flavors', 'member_name': 'flavor'}, 'parameters': {'id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}} } } class Flavors(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Neutron Service Flavors" @classmethod def get_alias(cls): return "flavors" @classmethod def get_description(cls): return "Flavor specification for Neutron advanced services" @classmethod def get_updated(cls): return "2015-09-17T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) attr.PLURALS.update(plural_mappings) resources = resource_helper.build_resource_info( plural_mappings, RESOURCE_ATTRIBUTE_MAP, constants.FLAVORS) plugin = manager.NeutronManager.get_service_plugins()[ constants.FLAVORS] for collection_name in SUB_RESOURCE_ATTRIBUTE_MAP: # Special handling needed for sub-resources with 'y' ending # (e.g. proxies -> proxy) resource_name = collection_name[:-1] parent = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get('parent') params = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get( 'parameters') controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, parent=parent) resource = extensions.ResourceExtension( collection_name, controller, parent, path_prefix=FLAVORS_PREFIX, attr_map=params) resources.append(resource) return resources def update_attributes_map(self, attributes): super(Flavors, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} neutron-8.4.0/neutron/extensions/rbac.py0000664000567000056710000001161713044372760021544 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import base from neutron.common import exceptions as n_exc from neutron.db import rbac_db_models from neutron import manager from neutron.quota import resource_registry class RbacPolicyNotFound(n_exc.NotFound): message = _("RBAC policy of type %(object_type)s with ID %(id)s not found") class RbacPolicyInUse(n_exc.Conflict): message = _("RBAC policy on object %(object_id)s cannot be removed " "because other objects depend on it.\nDetails: %(details)s") class DuplicateRbacPolicy(n_exc.Conflict): message = _("An RBAC policy already exists with those values.") def convert_valid_object_type(otype): normalized = otype.strip().lower() if normalized in rbac_db_models.get_type_model_map(): return normalized msg = _("'%s' is not a valid RBAC object type") % otype raise n_exc.InvalidInput(error_message=msg) RESOURCE_NAME = 'rbac_policy' RESOURCE_COLLECTION = 'rbac_policies' RESOURCE_ATTRIBUTE_MAP = { RESOURCE_COLLECTION: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'object_type': {'allow_post': True, 'allow_put': False, 'convert_to': convert_valid_object_type, 'is_visible': True, 'default': None, 'enforce_policy': True}, 'object_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'enforce_policy': True}, 'target_tenant': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True, 'enforce_policy': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'required_by_policy': True, 'is_visible': True}, 'action': {'allow_post': True, 'allow_put': False, # action depends on type so validation has to occur in # the extension 'validate': {'type:string': attr.DESCRIPTION_MAX_LEN}, # we set enforce_policy so operators can define policies # that restrict actions 'is_visible': True, 'enforce_policy': True} } } rbac_quota_opts = [ cfg.IntOpt('quota_rbac_policy', default=10, deprecated_name='quota_rbac_entry', help=_('Default number of RBAC entries allowed per tenant. ' 'A negative value means unlimited.')) ] cfg.CONF.register_opts(rbac_quota_opts, 'QUOTAS') class Rbac(extensions.ExtensionDescriptor): """RBAC policy support.""" @classmethod def get_name(cls): return "RBAC Policies" @classmethod def get_alias(cls): return 'rbac-policies' @classmethod def get_description(cls): return ("Allows creation and modification of policies that control " "tenant access to resources.") @classmethod def get_updated(cls): return "2015-06-17T12:15:12-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = {'rbac_policies': 'rbac_policy'} attr.PLURALS.update(plural_mappings) plugin = manager.NeutronManager.get_plugin() params = RESOURCE_ATTRIBUTE_MAP['rbac_policies'] collection_name = 'rbac-policies' resource_name = 'rbac_policy' resource_registry.register_resource_by_name(resource_name) controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, allow_pagination=False, allow_sorting=True) return [extensions.ResourceExtension(collection_name, controller, attr_map=params)] def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP return {} neutron-8.4.0/neutron/extensions/quotasv2.py0000664000567000056710000001257613044372760022426 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_utils import importutils import webob from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes from neutron.api.v2 import base from neutron.api.v2 import resource from neutron.common import constants as const from neutron.common import exceptions as n_exc from neutron import manager from neutron.pecan_wsgi import controllers from neutron import quota from neutron.quota import resource_registry from neutron import wsgi RESOURCE_NAME = 'quota' RESOURCE_COLLECTION = RESOURCE_NAME + "s" QUOTAS = quota.QUOTAS DB_QUOTA_DRIVER = 'neutron.db.quota.driver.DbQuotaDriver' EXTENDED_ATTRIBUTES_2_0 = { RESOURCE_COLLECTION: {} } class QuotaSetsController(wsgi.Controller): def __init__(self, plugin): self._resource_name = RESOURCE_NAME self._plugin = plugin self._driver = importutils.import_class( cfg.CONF.QUOTAS.quota_driver ) self._update_extended_attributes = True def _update_attributes(self): for quota_resource in resource_registry.get_all_resources().keys(): attr_dict = EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION] attr_dict[quota_resource] = { 'allow_post': False, 'allow_put': True, 'convert_to': attributes.convert_to_int, 'validate': {'type:range': [-1, const.DB_INTEGER_MAX_VALUE]}, 'is_visible': True} self._update_extended_attributes = False def _get_quotas(self, request, tenant_id): return self._driver.get_tenant_quotas( request.context, resource_registry.get_all_resources(), tenant_id) def create(self, request, body=None): msg = _('POST requests are not supported on this resource.') raise webob.exc.HTTPNotImplemented(msg) def index(self, request): context = request.context self._check_admin(context) return {self._resource_name + "s": self._driver.get_all_quotas( context, resource_registry.get_all_resources())} def tenant(self, request): """Retrieve the tenant info in context.""" context = request.context if not context.tenant_id: raise n_exc.QuotaMissingTenant() return {'tenant': {'tenant_id': context.tenant_id}} def show(self, request, id): if id != request.context.tenant_id: self._check_admin(request.context, reason=_("Only admin is authorized " "to access quotas for another tenant")) return {self._resource_name: self._get_quotas(request, id)} def _check_admin(self, context, reason=_("Only admin can view or configure quota")): if not context.is_admin: raise n_exc.AdminRequired(reason=reason) def delete(self, request, id): self._check_admin(request.context) self._driver.delete_tenant_quota(request.context, id) def update(self, request, id, body=None): self._check_admin(request.context) if self._update_extended_attributes: self._update_attributes() body = base.Controller.prepare_request_body( request.context, body, False, self._resource_name, EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION]) for key, value in body[self._resource_name].items(): self._driver.update_quota_limit(request.context, id, key, value) return {self._resource_name: self._get_quotas(request, id)} class Quotasv2(extensions.ExtensionDescriptor): """Quotas management support.""" @classmethod def get_name(cls): return "Quota management support" @classmethod def get_alias(cls): return RESOURCE_COLLECTION @classmethod def get_description(cls): description = 'Expose functions for quotas management' if cfg.CONF.QUOTAS.quota_driver == DB_QUOTA_DRIVER: description += ' per tenant' return description @classmethod def get_updated(cls): return "2012-07-29T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" controller = resource.Resource( QuotaSetsController(manager.NeutronManager.get_plugin()), faults=base.FAULT_MAP) return [extensions.ResourceExtension( Quotasv2.get_alias(), controller, collection_actions={'tenant': 'GET'})] @classmethod def get_pecan_controllers(cls): return ((RESOURCE_COLLECTION, controllers.QuotasController()), ) def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} neutron-8.4.0/neutron/extensions/auto_allocated_topology.py0000664000567000056710000000542713044372760025553 0ustar jenkinsjenkins00000000000000# Copyright 2015-2016 Hewlett Packard Enterprise Development Company, LP # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import base from neutron.services.auto_allocate import plugin RESOURCE_NAME = "auto_allocated_topology" COLLECTION_NAME = "auto_allocated_topologies" IS_DEFAULT = "is_default" EXT_ALIAS = RESOURCE_NAME.replace('_', '-') RESOURCE_ATTRIBUTE_MAP = { COLLECTION_NAME: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'tenant_id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, }, 'networks': {IS_DEFAULT: {'allow_post': True, 'allow_put': True, 'default': False, 'is_visible': True, 'convert_to': attr.convert_to_boolean, 'enforce_policy': True, 'required_by_policy': True}}, } class Auto_allocated_topology(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Auto Allocated Topology Services" @classmethod def get_alias(cls): return EXT_ALIAS @classmethod def get_description(cls): return "Auto Allocated Topology Services." @classmethod def get_updated(cls): return "2016-01-01T00:00:00-00:00" @classmethod def get_resources(cls): params = RESOURCE_ATTRIBUTE_MAP.get(COLLECTION_NAME, dict()) controller = base.create_resource(COLLECTION_NAME, EXT_ALIAS, plugin.Plugin.get_instance(), params, allow_bulk=False) return [extensions.ResourceExtension(EXT_ALIAS, controller)] def get_required_extensions(self): return ["subnet_allocation", "external-net", "router"] def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} neutron-8.4.0/neutron/extensions/bgp.py0000664000567000056710000002065013044372760021402 0ustar jenkinsjenkins00000000000000# Copyright 2016 Hewlett Packard Development Coompany LP # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import resource_helper as rh from neutron.common import exceptions from neutron.services.bgp.common import constants as bgp_consts BGP_EXT_ALIAS = 'bgp' BGP_SPEAKER_RESOURCE_NAME = 'bgp-speaker' BGP_SPEAKER_BODY_KEY_NAME = 'bgp_speaker' BGP_PEER_BODY_KEY_NAME = 'bgp_peer' RESOURCE_ATTRIBUTE_MAP = { BGP_SPEAKER_RESOURCE_NAME + 's': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': attr.NAME_MAX_LEN}, 'is_visible': True, 'default': ''}, 'local_as': {'allow_post': True, 'allow_put': False, 'validate': {'type:range': (bgp_consts.MIN_ASNUM, bgp_consts.MAX_ASNUM)}, 'is_visible': True, 'default': None, 'required_by_policy': False, 'enforce_policy': False}, 'ip_version': {'allow_post': True, 'allow_put': False, 'validate': {'type:values': [4, 6]}, 'is_visible': True, 'default': None, 'required_by_policy': False, 'enforce_policy': False}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': False, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}, 'peers': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid_list': None}, 'is_visible': True, 'default': [], 'required_by_policy': False, 'enforce_policy': True}, 'networks': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid_list': None}, 'is_visible': True, 'default': [], 'required_by_policy': False, 'enforce_policy': True}, 'advertise_floating_ip_host_routes': { 'allow_post': True, 'allow_put': True, 'convert_to': attr.convert_to_boolean, 'validate': {'type:boolean': None}, 'is_visible': True, 'default': True, 'required_by_policy': False, 'enforce_policy': True}, 'advertise_tenant_networks': { 'allow_post': True, 'allow_put': True, 'convert_to': attr.convert_to_boolean, 'validate': {'type:boolean': None}, 'is_visible': True, 'default': True, 'required_by_policy': False, 'enforce_policy': True}, }, 'bgp-peers': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': attr.NAME_MAX_LEN}, 'is_visible': True, 'default': ''}, 'peer_ip': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:ip_address': None}, 'is_visible': True}, 'remote_as': {'allow_post': True, 'allow_put': False, 'validate': {'type:range': (bgp_consts.MIN_ASNUM, bgp_consts.MAX_ASNUM)}, 'is_visible': True, 'default': None, 'required_by_policy': False, 'enforce_policy': False}, 'auth_type': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:values': bgp_consts.SUPPORTED_AUTH_TYPES}, 'is_visible': True}, 'password': {'allow_post': True, 'allow_put': True, 'required_by_policy': True, 'validate': {'type:string_or_none': None}, 'is_visible': False, 'default': None}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': False, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True} } } # Dynamic Routing Exceptions class BgpSpeakerNotFound(exceptions.NotFound): message = _("BGP speaker %(id)s could not be found.") class BgpPeerNotFound(exceptions.NotFound): message = _("BGP peer %(id)s could not be found.") class BgpPeerNotAuthenticated(exceptions.NotFound): message = _("BGP peer %(bgp_peer_id)s not authenticated.") class BgpSpeakerPeerNotAssociated(exceptions.NotFound): message = _("BGP peer %(bgp_peer_id)s is not associated with " "BGP speaker %(bgp_speaker_id)s.") class BgpSpeakerNetworkNotAssociated(exceptions.NotFound): message = _("Network %(network_id)s is not associated with " "BGP speaker %(bgp_speaker_id)s.") class BgpSpeakerNetworkBindingError(exceptions.Conflict): message = _("Network %(network_id)s is already bound to BgpSpeaker " "%(bgp_speaker_id)s.") class NetworkNotBound(exceptions.NotFound): message = _("Network %(network_id)s is not bound to a BgpSpeaker.") class DuplicateBgpPeerIpException(exceptions.Conflict): _message = _("BGP Speaker %(bgp_speaker_id)s is already configured to " "peer with a BGP Peer at %(peer_ip)s, it cannot peer with " "BGP Peer %(bgp_peer_id)s.") class InvalidBgpPeerMd5Authentication(exceptions.BadRequest): message = _("A password must be supplied when using auth_type md5.") class NetworkNotBoundForIpVersion(NetworkNotBound): message = _("Network %(network_id)s is not bound to a IPv%(ip_version)s " "BgpSpeaker.") class Bgp(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Neutron BGP Dynamic Routing Extension" @classmethod def get_alias(cls): return BGP_EXT_ALIAS @classmethod def get_description(cls): return("Discover and advertise routes for Neutron prefixes " "dynamically via BGP") @classmethod def get_updated(cls): return "2014-07-01T15:37:00-00:00" @classmethod def get_resources(cls): plural_mappings = rh.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) attr.PLURALS.update(plural_mappings) action_map = {BGP_SPEAKER_RESOURCE_NAME: {'add_bgp_peer': 'PUT', 'remove_bgp_peer': 'PUT', 'add_gateway_network': 'PUT', 'remove_gateway_network': 'PUT', 'get_advertised_routes': 'GET'}} exts = rh.build_resource_info(plural_mappings, RESOURCE_ATTRIBUTE_MAP, BGP_EXT_ALIAS, action_map=action_map) return exts def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} def update_attributes_map(self, attributes): super(Bgp, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) neutron-8.4.0/neutron/extensions/default_subnetpools.py0000664000567000056710000000340013044372760024705 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api import extensions from neutron.api.v2 import attributes from neutron.common import constants EXTENDED_ATTRIBUTES_2_0 = { attributes.SUBNETS: { 'use_default_subnetpool': {'allow_post': True, 'allow_put': False, 'default': False, 'convert_to': attributes.convert_to_boolean, 'is_visible': False, }, }, } class Default_subnetpools(extensions.ExtensionDescriptor): """Extension class supporting default subnetpools.""" @classmethod def get_name(cls): return "Default Subnetpools" @classmethod def get_alias(cls): return "default-subnetpools" @classmethod def get_description(cls): return "Provides ability to mark and use a subnetpool as the default" @classmethod def get_updated(cls): return "2016-02-18T18:00:00-00:00" def get_required_extensions(self): return [constants.SUBNET_ALLOCATION_EXT_ALIAS] def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} neutron-8.4.0/neutron/extensions/securitygroup.py0000664000567000056710000003424213044372760023560 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import netaddr from oslo_config import cfg from oslo_utils import uuidutils import six from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import base from neutron.common import constants as const from neutron.common import exceptions as nexception from neutron import manager from neutron.quota import resource_registry # Security group Exceptions class SecurityGroupInvalidPortRange(nexception.InvalidInput): message = _("For TCP/UDP protocols, port_range_min must be " "<= port_range_max") class SecurityGroupInvalidPortValue(nexception.InvalidInput): message = _("Invalid value for port %(port)s") class SecurityGroupInvalidIcmpValue(nexception.InvalidInput): message = _("Invalid value for ICMP %(field)s (%(attr)s) " "%(value)s. It must be 0 to 255.") class SecurityGroupEthertypeConflictWithProtocol(nexception.InvalidInput): message = _("Invalid ethertype %(ethertype)s for protocol " "%(protocol)s.") class SecurityGroupMissingIcmpType(nexception.InvalidInput): message = _("ICMP code (port-range-max) %(value)s is provided" " but ICMP type (port-range-min) is missing.") class SecurityGroupInUse(nexception.InUse): message = _("Security Group %(id)s %(reason)s.") def __init__(self, **kwargs): if 'reason' not in kwargs: kwargs['reason'] = _("in use") super(SecurityGroupInUse, self).__init__(**kwargs) class SecurityGroupCannotRemoveDefault(nexception.InUse): message = _("Insufficient rights for removing default security group.") class SecurityGroupCannotUpdateDefault(nexception.InUse): message = _("Updating default security group not allowed.") class SecurityGroupDefaultAlreadyExists(nexception.InUse): message = _("Default security group already exists.") class SecurityGroupRuleInvalidProtocol(nexception.InvalidInput): message = _("Security group rule protocol %(protocol)s not supported. " "Only protocol values %(values)s and integer representations " "[0 to 255] are supported.") class SecurityGroupRulesNotSingleTenant(nexception.InvalidInput): message = _("Multiple tenant_ids in bulk security group rule create" " not allowed") class SecurityGroupRemoteGroupAndRemoteIpPrefix(nexception.InvalidInput): message = _("Only remote_ip_prefix or remote_group_id may " "be provided.") class SecurityGroupProtocolRequiredWithPorts(nexception.InvalidInput): message = _("Must also specify protocol if port range is given.") class SecurityGroupNotSingleGroupRules(nexception.InvalidInput): message = _("Only allowed to update rules for " "one security profile at a time") class SecurityGroupNotFound(nexception.NotFound): message = _("Security group %(id)s does not exist") class SecurityGroupRuleNotFound(nexception.NotFound): message = _("Security group rule %(id)s does not exist") class DuplicateSecurityGroupRuleInPost(nexception.InUse): message = _("Duplicate Security Group Rule in POST.") class SecurityGroupRuleExists(nexception.InUse): message = _("Security group rule already exists. Rule id is %(rule_id)s.") class SecurityGroupRuleInUse(nexception.InUse): message = _("Security Group Rule %(id)s %(reason)s.") def __init__(self, **kwargs): if 'reason' not in kwargs: kwargs['reason'] = _("in use") super(SecurityGroupRuleInUse, self).__init__(**kwargs) class SecurityGroupRuleParameterConflict(nexception.InvalidInput): message = _("Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s") class SecurityGroupConflict(nexception.Conflict): message = _("Error %(reason)s while attempting the operation.") class SecurityGroupRuleInvalidEtherType(nexception.InvalidInput): message = _("Security group rule for ethertype '%(ethertype)s' not " "supported. Allowed values are %(values)s.") def convert_protocol(value): if value is None: return try: val = int(value) if val >= 0 and val <= 255: # Set value of protocol number to string due to bug 1381379, # PostgreSQL fails when it tries to compare integer with string, # that exists in db. return str(value) raise SecurityGroupRuleInvalidProtocol( protocol=value, values=sg_supported_protocols) except (ValueError, TypeError): if value.lower() in sg_supported_protocols: return value.lower() raise SecurityGroupRuleInvalidProtocol( protocol=value, values=sg_supported_protocols) except AttributeError: raise SecurityGroupRuleInvalidProtocol( protocol=value, values=sg_supported_protocols) def convert_ethertype_to_case_insensitive(value): if isinstance(value, six.string_types): for ethertype in sg_supported_ethertypes: if ethertype.lower() == value.lower(): return ethertype raise SecurityGroupRuleInvalidEtherType( ethertype=value, values=sg_supported_ethertypes) def convert_validate_port_value(port): if port is None: return port try: val = int(port) except (ValueError, TypeError): raise SecurityGroupInvalidPortValue(port=port) if val >= 0 and val <= 65535: return val else: raise SecurityGroupInvalidPortValue(port=port) def convert_to_uuid_list_or_none(value_list): if value_list is None: return for sg_id in value_list: if not uuidutils.is_uuid_like(sg_id): msg = _("'%s' is not an integer or uuid") % sg_id raise nexception.InvalidInput(error_message=msg) return value_list def convert_ip_prefix_to_cidr(ip_prefix): if not ip_prefix: return try: cidr = netaddr.IPNetwork(ip_prefix) return str(cidr) except (ValueError, TypeError, netaddr.AddrFormatError): raise nexception.InvalidCIDR(input=ip_prefix) def _validate_name_not_default(data, valid_values=None): if data.lower() == "default": raise SecurityGroupDefaultAlreadyExists() attr.validators['type:name_not_default'] = _validate_name_not_default # TODO(amotoki): const.IP_PROTOCOL_MAP now comes from neutron-lib, # so we cannot add PROTO_NAME_IPV6_ICMP_LEGACY to const.IP_PROTOCOL_MAP # in neutron.common.constants. IP_PROTOCOL_MAP in neutron-lib should # be updated and neutron should consume it once Mitaka backport is done. sg_supported_protocols = ([None] + list(const.IP_PROTOCOL_MAP.keys()) + list(const.IP_PROTOCOL_NAME_ALIASES.keys())) sg_supported_ethertypes = ['IPv4', 'IPv6'] SECURITYGROUPS = 'security_groups' SECURITYGROUPRULES = 'security_group_rules' # Attribute Map RESOURCE_ATTRIBUTE_MAP = { SECURITYGROUPS: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'name': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {'type:name_not_default': attr.NAME_MAX_LEN}}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': attr.DESCRIPTION_MAX_LEN}, 'is_visible': True, 'default': ''}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}, SECURITYGROUPRULES: {'allow_post': False, 'allow_put': False, 'is_visible': True}, }, SECURITYGROUPRULES: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'security_group_id': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'required_by_policy': True}, 'remote_group_id': {'allow_post': True, 'allow_put': False, 'default': None, 'is_visible': True}, 'direction': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'validate': {'type:values': ['ingress', 'egress']}}, 'protocol': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'convert_to': convert_protocol}, 'port_range_min': {'allow_post': True, 'allow_put': False, 'convert_to': convert_validate_port_value, 'default': None, 'is_visible': True}, 'port_range_max': {'allow_post': True, 'allow_put': False, 'convert_to': convert_validate_port_value, 'default': None, 'is_visible': True}, 'ethertype': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': 'IPv4', 'convert_to': convert_ethertype_to_case_insensitive, 'validate': {'type:values': sg_supported_ethertypes}}, 'remote_ip_prefix': {'allow_post': True, 'allow_put': False, 'default': None, 'is_visible': True, 'convert_to': convert_ip_prefix_to_cidr}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}, } } EXTENDED_ATTRIBUTES_2_0 = { 'ports': {SECURITYGROUPS: {'allow_post': True, 'allow_put': True, 'is_visible': True, 'convert_to': convert_to_uuid_list_or_none, 'default': attr.ATTR_NOT_SPECIFIED}}} security_group_quota_opts = [ cfg.IntOpt('quota_security_group', default=10, help=_('Number of security groups allowed per tenant. ' 'A negative value means unlimited.')), cfg.IntOpt('quota_security_group_rule', default=100, help=_('Number of security rules allowed per tenant. ' 'A negative value means unlimited.')), ] cfg.CONF.register_opts(security_group_quota_opts, 'QUOTAS') class Securitygroup(extensions.ExtensionDescriptor): """Security group extension.""" @classmethod def get_name(cls): return "security-group" @classmethod def get_alias(cls): return "security-group" @classmethod def get_description(cls): return "The security groups extension." @classmethod def get_updated(cls): return "2012-10-05T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()] attr.PLURALS.update(dict(my_plurals)) exts = [] plugin = manager.NeutronManager.get_plugin() for resource_name in ['security_group', 'security_group_rule']: collection_name = resource_name.replace('_', '-') + "s" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict()) resource_registry.register_resource_by_name(resource_name) controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, allow_pagination=True, allow_sorting=True) ex = extensions.ResourceExtension(collection_name, controller, attr_map=params) exts.append(ex) return exts def update_attributes_map(self, attributes): super(Securitygroup, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) def get_extended_resources(self, version): if version == "2.0": return dict(list(EXTENDED_ATTRIBUTES_2_0.items()) + list(RESOURCE_ATTRIBUTE_MAP.items())) else: return {} @six.add_metaclass(abc.ABCMeta) class SecurityGroupPluginBase(object): @abc.abstractmethod def create_security_group(self, context, security_group): pass @abc.abstractmethod def update_security_group(self, context, id, security_group): pass @abc.abstractmethod def delete_security_group(self, context, id): pass @abc.abstractmethod def get_security_groups(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abc.abstractmethod def get_security_group(self, context, id, fields=None): pass @abc.abstractmethod def create_security_group_rule(self, context, security_group_rule): pass @abc.abstractmethod def delete_security_group_rule(self, context, id): pass @abc.abstractmethod def get_security_group_rules(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abc.abstractmethod def get_security_group_rule(self, context, id, fields=None): pass neutron-8.4.0/neutron/extensions/providernet.py0000664000567000056710000000672313044372760023200 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes from neutron.common import exceptions as n_exc NETWORK_TYPE = 'provider:network_type' PHYSICAL_NETWORK = 'provider:physical_network' SEGMENTATION_ID = 'provider:segmentation_id' ATTRIBUTES = (NETWORK_TYPE, PHYSICAL_NETWORK, SEGMENTATION_ID) # Common definitions for maximum string field length NETWORK_TYPE_MAX_LEN = 32 PHYSICAL_NETWORK_MAX_LEN = 64 EXTENDED_ATTRIBUTES_2_0 = { 'networks': { NETWORK_TYPE: {'allow_post': True, 'allow_put': True, 'validate': {'type:string': NETWORK_TYPE_MAX_LEN}, 'default': attributes.ATTR_NOT_SPECIFIED, 'enforce_policy': True, 'is_visible': True}, PHYSICAL_NETWORK: {'allow_post': True, 'allow_put': True, 'validate': {'type:string': PHYSICAL_NETWORK_MAX_LEN}, 'default': attributes.ATTR_NOT_SPECIFIED, 'enforce_policy': True, 'is_visible': True}, SEGMENTATION_ID: {'allow_post': True, 'allow_put': True, 'convert_to': attributes.convert_to_int, 'enforce_policy': True, 'default': attributes.ATTR_NOT_SPECIFIED, 'is_visible': True}, } } def _raise_if_updates_provider_attributes(attrs): """Raise exception if provider attributes are present. This method is used for plugins that do not support updating provider networks. """ if any(attributes.is_attr_set(attrs.get(a)) for a in ATTRIBUTES): msg = _("Plugin does not support updating provider attributes") raise n_exc.InvalidInput(error_message=msg) class Providernet(extensions.ExtensionDescriptor): """Extension class supporting provider networks. This class is used by neutron's extension framework to make metadata about the provider network extension available to clients. No new resources are defined by this extension. Instead, the existing network resource's request and response messages are extended with attributes in the provider namespace. With admin rights, network dictionaries returned will also include provider attributes. """ @classmethod def get_name(cls): return "Provider Network" @classmethod def get_alias(cls): return "provider" @classmethod def get_description(cls): return "Expose mapping of virtual networks to physical networks" @classmethod def get_updated(cls): return "2012-09-07T10:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} neutron-8.4.0/neutron/extensions/qos.py0000664000567000056710000002044013044372760021431 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import itertools import six from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import base from neutron.api.v2 import resource_helper from neutron import manager from neutron.plugins.common import constants from neutron.services.qos import qos_consts from neutron.services import service_base QOS_PREFIX = "/qos" # Attribute Map QOS_RULE_COMMON_FIELDS = { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'is_visible': True}, } RESOURCE_ATTRIBUTE_MAP = { 'policies': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'name': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {'type:string': None}}, 'description': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {'type:string': None}}, 'shared': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': False, 'convert_to': attr.convert_to_boolean}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'is_visible': True}, 'rules': {'allow_post': False, 'allow_put': False, 'is_visible': True}, }, 'rule_types': { 'type': {'allow_post': False, 'allow_put': False, 'is_visible': True} } } SUB_RESOURCE_ATTRIBUTE_MAP = { 'bandwidth_limit_rules': { 'parent': {'collection_name': 'policies', 'member_name': 'policy'}, 'parameters': dict(QOS_RULE_COMMON_FIELDS, **{'max_kbps': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'validate': {'type:non_negative': None}}, 'max_burst_kbps': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': 0, 'validate': {'type:non_negative': None}}}) } } EXTENDED_ATTRIBUTES_2_0 = { 'ports': {qos_consts.QOS_POLICY_ID: { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'validate': {'type:uuid_or_none': None}}}, 'networks': {qos_consts.QOS_POLICY_ID: { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'validate': {'type:uuid_or_none': None}}}} class Qos(extensions.ExtensionDescriptor): """Quality of Service API extension.""" @classmethod def get_name(cls): return "Quality of Service" @classmethod def get_alias(cls): return "qos" @classmethod def get_description(cls): return "The Quality of Service extension." @classmethod def get_updated(cls): return "2015-06-08T10:00:00-00:00" @classmethod def get_plugin_interface(cls): return QoSPluginBase @classmethod def get_resources(cls): """Returns Ext Resources.""" special_mappings = {'policies': 'policy'} plural_mappings = resource_helper.build_plural_mappings( special_mappings, itertools.chain(RESOURCE_ATTRIBUTE_MAP, SUB_RESOURCE_ATTRIBUTE_MAP)) attr.PLURALS.update(plural_mappings) resources = resource_helper.build_resource_info( plural_mappings, RESOURCE_ATTRIBUTE_MAP, constants.QOS, translate_name=True, allow_bulk=True) plugin = manager.NeutronManager.get_service_plugins()[constants.QOS] for collection_name in SUB_RESOURCE_ATTRIBUTE_MAP: resource_name = collection_name[:-1] parent = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get('parent') params = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get( 'parameters') controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, parent=parent, allow_pagination=True, allow_sorting=True) resource = extensions.ResourceExtension( collection_name, controller, parent, path_prefix=QOS_PREFIX, attr_map=params) resources.append(resource) return resources def update_attributes_map(self, attributes, extension_attrs_map=None): super(Qos, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) def get_extended_resources(self, version): if version == "2.0": return dict(list(EXTENDED_ATTRIBUTES_2_0.items()) + list(RESOURCE_ATTRIBUTE_MAP.items())) else: return {} @six.add_metaclass(abc.ABCMeta) class QoSPluginBase(service_base.ServicePluginBase): path_prefix = QOS_PREFIX def get_plugin_description(self): return "QoS Service Plugin for ports and networks" def get_plugin_type(self): return constants.QOS @abc.abstractmethod def get_policy(self, context, policy_id, fields=None): pass @abc.abstractmethod def get_policies(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abc.abstractmethod def create_policy(self, context, policy): pass @abc.abstractmethod def update_policy(self, context, policy_id, policy): pass @abc.abstractmethod def delete_policy(self, context, policy_id): pass @abc.abstractmethod def get_policy_bandwidth_limit_rule(self, context, rule_id, policy_id, fields=None): pass @abc.abstractmethod def get_policy_bandwidth_limit_rules(self, context, policy_id, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abc.abstractmethod def create_policy_bandwidth_limit_rule(self, context, policy_id, bandwidth_limit_rule): pass @abc.abstractmethod def update_policy_bandwidth_limit_rule(self, context, rule_id, policy_id, bandwidth_limit_rule): pass @abc.abstractmethod def delete_policy_bandwidth_limit_rule(self, context, rule_id, policy_id): pass @abc.abstractmethod def get_rule_types(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass neutron-8.4.0/neutron/extensions/netmtu.py0000664000567000056710000000256613044372760022154 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api import extensions MTU = 'mtu' EXTENDED_ATTRIBUTES_2_0 = { 'networks': { MTU: {'allow_post': False, 'allow_put': False, 'is_visible': True}, }, } class Netmtu(extensions.ExtensionDescriptor): """Extension class supporting network MTU.""" @classmethod def get_name(cls): return "Network MTU" @classmethod def get_alias(cls): return "net-mtu" @classmethod def get_description(cls): return "Provides MTU attribute for a network resource." @classmethod def get_updated(cls): return "2015-03-25T10:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} neutron-8.4.0/neutron/extensions/network_ip_availability.py0000664000567000056710000000574313044372760025553 0ustar jenkinsjenkins00000000000000# Copyright 2016 GoDaddy. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import neutron.api.extensions as extensions import neutron.api.v2.attributes as attr import neutron.api.v2.base as base import neutron.services.network_ip_availability.plugin as plugin RESOURCE_NAME = "network_ip_availability" RESOURCE_PLURAL = "network_ip_availabilities" COLLECTION_NAME = RESOURCE_PLURAL.replace('_', '-') EXT_ALIAS = RESOURCE_NAME.replace('_', '-') RESOURCE_ATTRIBUTE_MAP = { RESOURCE_PLURAL: { 'network_id': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'network_name': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'tenant_id': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'total_ips': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'used_ips': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'subnet_ip_availability': {'allow_post': False, 'allow_put': False, 'is_visible': True}, # TODO(wwriverrat) Make composite attribute for subnet_ip_availability } } class Network_ip_availability(extensions.ExtensionDescriptor): """Extension class supporting network ip availability information.""" @classmethod def get_name(cls): return "Network IP Availability" @classmethod def get_alias(cls): return EXT_ALIAS @classmethod def get_description(cls): return "Provides IP availability data for each network and subnet." @classmethod def get_updated(cls): return "2015-09-24T00:00:00-00:00" @classmethod def get_resources(cls): """Returns Extended Resource for service type management.""" attr.PLURALS[RESOURCE_PLURAL] = RESOURCE_NAME resource_attributes = RESOURCE_ATTRIBUTE_MAP[RESOURCE_PLURAL] controller = base.create_resource( RESOURCE_PLURAL, RESOURCE_NAME, plugin.NetworkIPAvailabilityPlugin.get_instance(), resource_attributes) return [extensions.ResourceExtension(COLLECTION_NAME, controller, attr_map=resource_attributes)] def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} neutron-8.4.0/neutron/extensions/l3_ext_gw_mode.py0000664000567000056710000000442113044372760023527 0ustar jenkinsjenkins00000000000000# Copyright 2013 VMware, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api import extensions from neutron.api.v2 import attributes as attrs from neutron.extensions import l3 EXTENDED_ATTRIBUTES_2_0 = { 'routers': {l3.EXTERNAL_GW_INFO: {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'enforce_policy': True, 'validate': {'type:dict_or_nodata': {'network_id': {'type:uuid': None, 'required': True}, 'enable_snat': {'type:boolean': None, 'required': False, 'convert_to': attrs.convert_to_boolean}, 'external_fixed_ips': { 'convert_list_to': attrs.convert_kvp_list_to_dict, 'type:fixed_ips': None, 'default': None, 'required': False} } }}}} class L3_ext_gw_mode(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Neutron L3 Configurable external gateway mode" @classmethod def get_alias(cls): return "ext-gw-mode" @classmethod def get_description(cls): return ("Extension of the router abstraction for specifying whether " "SNAT should occur on the external gateway") @classmethod def get_updated(cls): return "2013-03-28T10:00:00-00:00" def get_required_extensions(self): return ["router"] def get_extended_resources(self, version): if version == "2.0": return dict(EXTENDED_ATTRIBUTES_2_0.items()) else: return {} neutron-8.4.0/neutron/extensions/address_scope.py0000664000567000056710000001311613044372760023447 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import base from neutron.common import exceptions as nexception from neutron import manager ADDRESS_SCOPE = 'address_scope' ADDRESS_SCOPES = '%ss' % ADDRESS_SCOPE ADDRESS_SCOPE_ID = 'address_scope_id' IPV4_ADDRESS_SCOPE = 'ipv4_%s' % ADDRESS_SCOPE IPV6_ADDRESS_SCOPE = 'ipv6_%s' % ADDRESS_SCOPE # Attribute Map RESOURCE_ATTRIBUTE_MAP = { ADDRESS_SCOPES: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'name': {'allow_post': True, 'allow_put': True, 'default': '', 'validate': {'type:string': attr.NAME_MAX_LEN}, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'required_by_policy': True, 'is_visible': True}, attr.SHARED: {'allow_post': True, 'allow_put': True, 'default': False, 'convert_to': attr.convert_to_boolean, 'is_visible': True, 'required_by_policy': True, 'enforce_policy': True}, 'ip_version': {'allow_post': True, 'allow_put': False, 'convert_to': attr.convert_to_int, 'validate': {'type:values': [4, 6]}, 'is_visible': True}, }, attr.SUBNETPOOLS: { ADDRESS_SCOPE_ID: {'allow_post': True, 'allow_put': True, 'default': attr.ATTR_NOT_SPECIFIED, 'validate': {'type:uuid_or_none': None}, 'is_visible': True} }, attr.NETWORKS: { IPV4_ADDRESS_SCOPE: {'allow_post': False, 'allow_put': False, 'is_visible': True}, IPV6_ADDRESS_SCOPE: {'allow_post': False, 'allow_put': False, 'is_visible': True}, } } class AddressScopeNotFound(nexception.NotFound): message = _("Address scope %(address_scope_id)s could not be found") class AddressScopeInUse(nexception.InUse): message = _("Unable to complete operation on " "address scope %(address_scope_id)s. There are one or more" " subnet pools in use on the address scope") class AddressScopeUpdateError(nexception.BadRequest): message = _("Unable to update address scope %(address_scope_id)s : " "%(reason)s") class Address_scope(extensions.ExtensionDescriptor): """Extension class supporting Address Scopes.""" @classmethod def get_name(cls): return "Address scope" @classmethod def get_alias(cls): return "address-scope" @classmethod def get_description(cls): return "Address scopes extension." @classmethod def get_updated(cls): return "2015-07-26T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()] attr.PLURALS.update(dict(my_plurals)) plugin = manager.NeutronManager.get_plugin() collection_name = ADDRESS_SCOPES.replace('_', '-') params = RESOURCE_ATTRIBUTE_MAP.get(ADDRESS_SCOPES, dict()) controller = base.create_resource(collection_name, ADDRESS_SCOPE, plugin, params, allow_bulk=True, allow_pagination=True, allow_sorting=True) ex = extensions.ResourceExtension(collection_name, controller, attr_map=params) return [ex] def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} @six.add_metaclass(abc.ABCMeta) class AddressScopePluginBase(object): @abc.abstractmethod def create_address_scope(self, context, address_scope): pass @abc.abstractmethod def update_address_scope(self, context, id, address_scope): pass @abc.abstractmethod def get_address_scope(self, context, id, fields=None): pass @abc.abstractmethod def get_address_scopes(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abc.abstractmethod def delete_address_scope(self, context, id): pass def get_address_scopes_count(self, context, filters=None): raise NotImplementedError() neutron-8.4.0/neutron/extensions/portsecurity.py0000664000567000056710000000467613044372760023420 0ustar jenkinsjenkins00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes from neutron.common import exceptions as nexception DEFAULT_PORT_SECURITY = True class PortSecurityPortHasSecurityGroup(nexception.InUse): message = _("Port has security group associated. Cannot disable port " "security or ip address until security group is removed") class PortSecurityAndIPRequiredForSecurityGroups(nexception.InvalidInput): message = _("Port security must be enabled and port must have an IP" " address in order to use security groups.") PORTSECURITY = 'port_security_enabled' EXTENDED_ATTRIBUTES_2_0 = { 'networks': { PORTSECURITY: {'allow_post': True, 'allow_put': True, 'convert_to': attributes.convert_to_boolean, 'enforce_policy': True, 'default': DEFAULT_PORT_SECURITY, 'is_visible': True}, }, 'ports': { PORTSECURITY: {'allow_post': True, 'allow_put': True, 'convert_to': attributes.convert_to_boolean, 'default': attributes.ATTR_NOT_SPECIFIED, 'enforce_policy': True, 'is_visible': True}, } } class Portsecurity(extensions.ExtensionDescriptor): """Extension class supporting port security.""" @classmethod def get_name(cls): return "Port Security" @classmethod def get_alias(cls): return "port-security" @classmethod def get_description(cls): return "Provides port security" @classmethod def get_updated(cls): return "2012-07-23T10:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} neutron-8.4.0/neutron/extensions/servicetype.py0000664000567000056710000000532313044372760023174 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes from neutron.api.v2 import base from neutron.db import servicetype_db RESOURCE_NAME = "service_provider" COLLECTION_NAME = "%ss" % RESOURCE_NAME SERVICE_ATTR = 'service_type' PLUGIN_ATTR = 'plugin' DRIVER_ATTR = 'driver' EXT_ALIAS = 'service-type' # Attribute Map for Service Provider Resource # Allow read-only access RESOURCE_ATTRIBUTE_MAP = { COLLECTION_NAME: { 'service_type': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'name': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'default': {'allow_post': False, 'allow_put': False, 'is_visible': True}, } } class Servicetype(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return _("Neutron Service Type Management") @classmethod def get_alias(cls): return EXT_ALIAS @classmethod def get_description(cls): return _("API for retrieving service providers for " "Neutron advanced services") @classmethod def get_updated(cls): return "2013-01-20T00:00:00-00:00" @classmethod def get_resources(cls): """Returns Extended Resource for service type management.""" my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()] attributes.PLURALS.update(dict(my_plurals)) attr_map = RESOURCE_ATTRIBUTE_MAP[COLLECTION_NAME] collection_name = COLLECTION_NAME.replace('_', '-') controller = base.create_resource( collection_name, RESOURCE_NAME, servicetype_db.ServiceTypeManager.get_instance(), attr_map) return [extensions.ResourceExtension(collection_name, controller, attr_map=attr_map)] def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} neutron-8.4.0/neutron/extensions/metering.py0000664000567000056710000001467513044372760022456 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import resource_helper from neutron.common import exceptions as nexception from neutron.plugins.common import constants from neutron.services import service_base class MeteringLabelNotFound(nexception.NotFound): message = _("Metering label %(label_id)s does not exist") class DuplicateMeteringRuleInPost(nexception.InUse): message = _("Duplicate Metering Rule in POST.") class MeteringLabelRuleNotFound(nexception.NotFound): message = _("Metering label rule %(rule_id)s does not exist") class MeteringLabelRuleOverlaps(nexception.Conflict): message = _("Metering label rule with remote_ip_prefix " "%(remote_ip_prefix)s overlaps another") RESOURCE_ATTRIBUTE_MAP = { 'metering_labels': { 'id': {'allow_post': False, 'allow_put': False, 'is_visible': True, 'primary_key': True}, 'name': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': ''}, 'description': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': ''}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}, 'shared': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': False, 'convert_to': attr.convert_to_boolean} }, 'metering_label_rules': { 'id': {'allow_post': False, 'allow_put': False, 'is_visible': True, 'primary_key': True}, 'metering_label_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'required_by_policy': True}, 'direction': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'validate': {'type:values': ['ingress', 'egress']}}, 'excluded': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': False, 'convert_to': attr.convert_to_boolean}, 'remote_ip_prefix': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'required_by_policy': True, 'validate': {'type:subnet': None}}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True} } } class Metering(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Neutron Metering" @classmethod def get_alias(cls): return "metering" @classmethod def get_description(cls): return "Neutron Metering extension." @classmethod def get_updated(cls): return "2013-06-12T10:00:00-00:00" @classmethod def get_plugin_interface(cls): return MeteringPluginBase @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) attr.PLURALS.update(plural_mappings) # PCM: Metering sets pagination and sorting to True. Do we have cfg # entries for these so can be read? Else, must pass in. return resource_helper.build_resource_info(plural_mappings, RESOURCE_ATTRIBUTE_MAP, constants.METERING, translate_name=True, allow_bulk=True) def update_attributes_map(self, attributes): super(Metering, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} @six.add_metaclass(abc.ABCMeta) class MeteringPluginBase(service_base.ServicePluginBase): def get_plugin_description(self): return constants.METERING def get_plugin_type(self): return constants.METERING @abc.abstractmethod def create_metering_label(self, context, metering_label): """Create a metering label.""" pass @abc.abstractmethod def delete_metering_label(self, context, label_id): """Delete a metering label.""" pass @abc.abstractmethod def get_metering_label(self, context, label_id, fields=None): """Get a metering label.""" pass @abc.abstractmethod def get_metering_labels(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """List all metering labels.""" pass @abc.abstractmethod def create_metering_label_rule(self, context, metering_label_rule): """Create a metering label rule.""" pass @abc.abstractmethod def get_metering_label_rule(self, context, rule_id, fields=None): """Get a metering label rule.""" pass @abc.abstractmethod def delete_metering_label_rule(self, context, rule_id): """Delete a metering label rule.""" pass @abc.abstractmethod def get_metering_label_rules(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """List all metering label rules.""" pass neutron-8.4.0/neutron/agent/0000775000567000056710000000000013044373210017143 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/ovsdb/0000775000567000056710000000000013044373210020260 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/ovsdb/impl_idl.py0000664000567000056710000002341613044372760022442 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from neutron_lib import exceptions from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from ovs.db import idl from six.moves import queue as Queue from neutron._i18n import _, _LE from neutron.agent.ovsdb import api from neutron.agent.ovsdb.native import commands as cmd from neutron.agent.ovsdb.native import connection from neutron.agent.ovsdb.native import idlutils cfg.CONF.import_opt('ovs_vsctl_timeout', 'neutron.agent.common.ovs_lib') LOG = logging.getLogger(__name__) class VswitchdInterfaceAddException(exceptions.NeutronException): message = _("Failed to add interfaces: %(ifaces)s") class Transaction(api.Transaction): def __init__(self, api, ovsdb_connection, timeout, check_error=False, log_errors=True): self.api = api self.check_error = check_error self.log_errors = log_errors self.commands = [] self.results = Queue.Queue(1) self.ovsdb_connection = ovsdb_connection self.timeout = timeout self.expected_ifaces = set() def __str__(self): return ", ".join(str(cmd) for cmd in self.commands) def add(self, command): """Add a command to the transaction returns The command passed as a convenience """ self.commands.append(command) return command def commit(self): self.ovsdb_connection.queue_txn(self) try: result = self.results.get(self.timeout) except Queue.Empty: raise api.TimeoutException( _("Commands %(commands)s exceeded timeout %(timeout)d " "seconds") % {'commands': self.commands, 'timeout': self.timeout}) if isinstance(result, idlutils.ExceptionResult): if self.log_errors: LOG.error(result.tb) if self.check_error: raise result.ex return result def pre_commit(self, txn): pass def post_commit(self, txn): pass def do_commit(self): self.start_time = time.time() attempts = 0 while True: if attempts > 0 and self.timeout_exceeded(): raise RuntimeError("OVS transaction timed out") attempts += 1 # TODO(twilson) Make sure we don't loop longer than vsctl_timeout txn = idl.Transaction(self.api.idl) self.pre_commit(txn) for i, command in enumerate(self.commands): LOG.debug("Running txn command(idx=%(idx)s): %(cmd)s", {'idx': i, 'cmd': command}) try: command.run_idl(txn) except Exception: with excutils.save_and_reraise_exception() as ctx: txn.abort() if not self.check_error: ctx.reraise = False seqno = self.api.idl.change_seqno status = txn.commit_block() if status == txn.TRY_AGAIN: LOG.debug("OVSDB transaction returned TRY_AGAIN, retrying") idlutils.wait_for_change(self.api.idl, self.time_remaining(), seqno) continue elif status == txn.ERROR: msg = _("OVSDB Error: %s") % txn.get_error() if self.log_errors: LOG.error(msg) if self.check_error: # For now, raise similar error to vsctl/utils.execute() raise RuntimeError(msg) return elif status == txn.ABORTED: LOG.debug("Transaction aborted") return elif status == txn.UNCHANGED: LOG.debug("Transaction caused no change") elif status == txn.SUCCESS: self.post_commit(txn) return [cmd.result for cmd in self.commands] def elapsed_time(self): return time.time() - self.start_time def time_remaining(self): return self.timeout - self.elapsed_time() def timeout_exceeded(self): return self.elapsed_time() > self.timeout class NeutronOVSDBTransaction(Transaction): def pre_commit(self, txn): self.api._ovs.increment('next_cfg') txn.expected_ifaces = set() def post_commit(self, txn): # ovs-vsctl only logs these failures and does not return nonzero try: self.do_post_commit(txn) except Exception: LOG.exception(_LE("Post-commit checks failed")) def do_post_commit(self, txn): next_cfg = txn.get_increment_new_value() while not self.timeout_exceeded(): self.api.idl.run() if self.vswitchd_has_completed(next_cfg): failed = self.post_commit_failed_interfaces(txn) if failed: raise VswitchdInterfaceAddException( ifaces=", ".join(failed)) break self.ovsdb_connection.poller.timer_wait( self.time_remaining() * 1000) self.api.idl.wait(self.ovsdb_connection.poller) self.ovsdb_connection.poller.block() else: raise api.TimeoutException( _("Commands %(commands)s exceeded timeout %(timeout)d " "seconds post-commit") % {'commands': self.commands, 'timeout': self.timeout}) def post_commit_failed_interfaces(self, txn): failed = [] for iface_uuid in txn.expected_ifaces: uuid = txn.get_insert_uuid(iface_uuid) if uuid: ifaces = self.api.idl.tables['Interface'] iface = ifaces.rows.get(uuid) if iface and (not iface.ofport or iface.ofport == -1): failed.append(iface.name) return failed def vswitchd_has_completed(self, next_cfg): return self.api._ovs.cur_cfg >= next_cfg class OvsdbIdl(api.API): ovsdb_connection = connection.Connection(cfg.CONF.OVS.ovsdb_connection, cfg.CONF.ovs_vsctl_timeout, 'Open_vSwitch') def __init__(self, context): super(OvsdbIdl, self).__init__(context) OvsdbIdl.ovsdb_connection.start() self.idl = OvsdbIdl.ovsdb_connection.idl @property def _tables(self): return self.idl.tables @property def _ovs(self): return list(self._tables['Open_vSwitch'].rows.values())[0] def transaction(self, check_error=False, log_errors=True, **kwargs): return NeutronOVSDBTransaction(self, OvsdbIdl.ovsdb_connection, self.context.vsctl_timeout, check_error, log_errors) def add_br(self, name, may_exist=True, datapath_type=None): return cmd.AddBridgeCommand(self, name, may_exist, datapath_type) def del_br(self, name, if_exists=True): return cmd.DelBridgeCommand(self, name, if_exists) def br_exists(self, name): return cmd.BridgeExistsCommand(self, name) def port_to_br(self, name): return cmd.PortToBridgeCommand(self, name) def iface_to_br(self, name): return cmd.InterfaceToBridgeCommand(self, name) def list_br(self): return cmd.ListBridgesCommand(self) def br_get_external_id(self, name, field): return cmd.BrGetExternalIdCommand(self, name, field) def br_set_external_id(self, name, field, value): return cmd.BrSetExternalIdCommand(self, name, field, value) def db_create(self, table, **col_values): return cmd.DbCreateCommand(self, table, **col_values) def db_destroy(self, table, record): return cmd.DbDestroyCommand(self, table, record) def db_set(self, table, record, *col_values): return cmd.DbSetCommand(self, table, record, *col_values) def db_clear(self, table, record, column): return cmd.DbClearCommand(self, table, record, column) def db_get(self, table, record, column): return cmd.DbGetCommand(self, table, record, column) def db_list(self, table, records=None, columns=None, if_exists=False): return cmd.DbListCommand(self, table, records, columns, if_exists) def db_find(self, table, *conditions, **kwargs): return cmd.DbFindCommand(self, table, *conditions, **kwargs) def set_controller(self, bridge, controllers): return cmd.SetControllerCommand(self, bridge, controllers) def del_controller(self, bridge): return cmd.DelControllerCommand(self, bridge) def get_controller(self, bridge): return cmd.GetControllerCommand(self, bridge) def set_fail_mode(self, bridge, mode): return cmd.SetFailModeCommand(self, bridge, mode) def add_port(self, bridge, port, may_exist=True): return cmd.AddPortCommand(self, bridge, port, may_exist) def del_port(self, port, bridge=None, if_exists=True): return cmd.DelPortCommand(self, port, bridge, if_exists) def list_ports(self, bridge): return cmd.ListPortsCommand(self, bridge) def list_ifaces(self, bridge): return cmd.ListIfacesCommand(self, bridge) neutron-8.4.0/neutron/agent/ovsdb/native/0000775000567000056710000000000013044373210021546 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/ovsdb/native/helpers.py0000664000567000056710000000204613044372760023575 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.common import utils def _connection_to_manager_uri(conn_uri): proto, addr = conn_uri.split(':', 1) if ':' in addr: ip, port = addr.split(':', 1) return 'p%s:%s:%s' % (proto, port, ip) else: return 'p%s:%s' % (proto, addr) def enable_connection_uri(conn_uri): manager_uri = _connection_to_manager_uri(conn_uri) utils.execute(['ovs-vsctl', 'set-manager', manager_uri], run_as_root=True) neutron-8.4.0/neutron/agent/ovsdb/native/__init__.py0000664000567000056710000000000013044372736023661 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/ovsdb/native/commands.py0000664000567000056710000004052513044372760023740 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from oslo_log import log as logging from oslo_utils import excutils from neutron._i18n import _, _LE from neutron.agent.ovsdb import api from neutron.agent.ovsdb.native import idlutils LOG = logging.getLogger(__name__) class BaseCommand(api.Command): def __init__(self, api): self.api = api self.result = None def execute(self, check_error=False, log_errors=True): try: with self.api.transaction(check_error, log_errors) as txn: txn.add(self) return self.result except Exception: with excutils.save_and_reraise_exception() as ctx: if log_errors: LOG.exception(_LE("Error executing command")) if not check_error: ctx.reraise = False def __str__(self): command_info = self.__dict__ return "%s(%s)" % ( self.__class__.__name__, ", ".join("%s=%s" % (k, v) for k, v in command_info.items() if k not in ['api', 'result'])) __repr__ = __str__ class AddBridgeCommand(BaseCommand): def __init__(self, api, name, may_exist, datapath_type): super(AddBridgeCommand, self).__init__(api) self.name = name self.may_exist = may_exist self.datapath_type = datapath_type def run_idl(self, txn): if self.may_exist: br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.name, None) if br: if self.datapath_type: br.datapath_type = self.datapath_type return row = txn.insert(self.api._tables['Bridge']) row.name = self.name if self.datapath_type: row.datapath_type = self.datapath_type self.api._ovs.verify('bridges') self.api._ovs.bridges = self.api._ovs.bridges + [row] # Add the internal bridge port cmd = AddPortCommand(self.api, self.name, self.name, self.may_exist) cmd.run_idl(txn) cmd = DbSetCommand(self.api, 'Interface', self.name, ('type', 'internal')) cmd.run_idl(txn) class DelBridgeCommand(BaseCommand): def __init__(self, api, name, if_exists): super(DelBridgeCommand, self).__init__(api) self.name = name self.if_exists = if_exists def run_idl(self, txn): try: br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.name) except idlutils.RowNotFound: if self.if_exists: return else: msg = _("Bridge %s does not exist") % self.name LOG.error(msg) raise RuntimeError(msg) self.api._ovs.verify('bridges') for port in br.ports: cmd = DelPortCommand(self.api, port.name, self.name, if_exists=True) cmd.run_idl(txn) bridges = self.api._ovs.bridges bridges.remove(br) self.api._ovs.bridges = bridges self.api._tables['Bridge'].rows[br.uuid].delete() class BridgeExistsCommand(BaseCommand): def __init__(self, api, name): super(BridgeExistsCommand, self).__init__(api) self.name = name def run_idl(self, txn): self.result = bool(idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.name, None)) class ListBridgesCommand(BaseCommand): def __init__(self, api): super(ListBridgesCommand, self).__init__(api) def run_idl(self, txn): # NOTE (twilson) [x.name for x in rows.values()] if no index self.result = [x.name for x in self.api._tables['Bridge'].rows.values()] class BrGetExternalIdCommand(BaseCommand): def __init__(self, api, name, field): super(BrGetExternalIdCommand, self).__init__(api) self.name = name self.field = field def run_idl(self, txn): br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.name) self.result = br.external_ids[self.field] class BrSetExternalIdCommand(BaseCommand): def __init__(self, api, name, field, value): super(BrSetExternalIdCommand, self).__init__(api) self.name = name self.field = field self.value = value def run_idl(self, txn): br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.name) external_ids = getattr(br, 'external_ids', {}) external_ids[self.field] = self.value br.external_ids = external_ids class DbCreateCommand(BaseCommand): def __init__(self, api, table, **columns): super(DbCreateCommand, self).__init__(api) self.table = table self.columns = columns def run_idl(self, txn): row = txn.insert(self.api._tables[self.table]) for col, val in self.columns.items(): setattr(row, col, val) self.result = row class DbDestroyCommand(BaseCommand): def __init__(self, api, table, record): super(DbDestroyCommand, self).__init__(api) self.table = table self.record = record def run_idl(self, txn): record = idlutils.row_by_record(self.api.idl, self.table, self.record) record.delete() class DbSetCommand(BaseCommand): def __init__(self, api, table, record, *col_values): super(DbSetCommand, self).__init__(api) self.table = table self.record = record self.col_values = col_values def run_idl(self, txn): record = idlutils.row_by_record(self.api.idl, self.table, self.record) for col, val in self.col_values: # TODO(twilson) Ugh, the OVS library doesn't like OrderedDict # We're only using it to make a unit test work, so we should fix # this soon. if isinstance(val, collections.OrderedDict): val = dict(val) setattr(record, col, val) class DbClearCommand(BaseCommand): def __init__(self, api, table, record, column): super(DbClearCommand, self).__init__(api) self.table = table self.record = record self.column = column def run_idl(self, txn): record = idlutils.row_by_record(self.api.idl, self.table, self.record) # Create an empty value of the column type value = type(getattr(record, self.column))() setattr(record, self.column, value) class DbGetCommand(BaseCommand): def __init__(self, api, table, record, column): super(DbGetCommand, self).__init__(api) self.table = table self.record = record self.column = column def run_idl(self, txn): record = idlutils.row_by_record(self.api.idl, self.table, self.record) # TODO(twilson) This feels wrong, but ovs-vsctl returns single results # on set types without the list. The IDL is returning them as lists, # even if the set has the maximum number of items set to 1. Might be # able to inspect the Schema and just do this conversion for that case. result = idlutils.get_column_value(record, self.column) if isinstance(result, list) and len(result) == 1: self.result = result[0] else: self.result = result class SetControllerCommand(BaseCommand): def __init__(self, api, bridge, targets): super(SetControllerCommand, self).__init__(api) self.bridge = bridge self.targets = targets def run_idl(self, txn): br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge) controllers = [] for target in self.targets: controller = txn.insert(self.api._tables['Controller']) controller.target = target controllers.append(controller) br.verify('controller') br.controller = controllers class DelControllerCommand(BaseCommand): def __init__(self, api, bridge): super(DelControllerCommand, self).__init__(api) self.bridge = bridge def run_idl(self, txn): br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge) br.controller = [] class GetControllerCommand(BaseCommand): def __init__(self, api, bridge): super(GetControllerCommand, self).__init__(api) self.bridge = bridge def run_idl(self, txn): br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge) br.verify('controller') self.result = [c.target for c in br.controller] class SetFailModeCommand(BaseCommand): def __init__(self, api, bridge, mode): super(SetFailModeCommand, self).__init__(api) self.bridge = bridge self.mode = mode def run_idl(self, txn): br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge) br.verify('fail_mode') br.fail_mode = self.mode class AddPortCommand(BaseCommand): def __init__(self, api, bridge, port, may_exist): super(AddPortCommand, self).__init__(api) self.bridge = bridge self.port = port self.may_exist = may_exist def run_idl(self, txn): br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge) if self.may_exist: port = idlutils.row_by_value(self.api.idl, 'Port', 'name', self.port, None) if port: return port = txn.insert(self.api._tables['Port']) port.name = self.port br.verify('ports') ports = getattr(br, 'ports', []) ports.append(port) br.ports = ports iface = txn.insert(self.api._tables['Interface']) # NOTE(twilson) The OVS lib's __getattr__ breaks iface.uuid here txn.expected_ifaces.add(iface.__dict__['uuid']) iface.name = self.port port.verify('interfaces') ifaces = getattr(port, 'interfaces', []) ifaces.append(iface) port.interfaces = ifaces class DelPortCommand(BaseCommand): def __init__(self, api, port, bridge, if_exists): super(DelPortCommand, self).__init__(api) self.port = port self.bridge = bridge self.if_exists = if_exists def run_idl(self, txn): try: port = idlutils.row_by_value(self.api.idl, 'Port', 'name', self.port) except idlutils.RowNotFound: if self.if_exists: return msg = _("Port %s does not exist") % self.port raise RuntimeError(msg) if self.bridge: br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge) else: br = next(b for b in self.api._tables['Bridge'].rows.values() if port in b.ports) if port.uuid not in br.ports and not self.if_exists: # TODO(twilson) Make real errors across both implementations msg = _("Port %(port)s does not exist on %(bridge)s!") % { 'port': self.name, 'bridge': self.bridge } LOG.error(msg) raise RuntimeError(msg) br.verify('ports') ports = br.ports ports.remove(port) br.ports = ports # Also remove port/interface directly for indexing? port.verify('interfaces') for iface in port.interfaces: self.api._tables['Interface'].rows[iface.uuid].delete() self.api._tables['Port'].rows[port.uuid].delete() class ListPortsCommand(BaseCommand): def __init__(self, api, bridge): super(ListPortsCommand, self).__init__(api) self.bridge = bridge def run_idl(self, txn): br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge) self.result = [p.name for p in br.ports if p.name != self.bridge] class ListIfacesCommand(BaseCommand): def __init__(self, api, bridge): super(ListIfacesCommand, self).__init__(api) self.bridge = bridge def run_idl(self, txn): br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge) self.result = [i.name for p in br.ports if p.name != self.bridge for i in p.interfaces] class PortToBridgeCommand(BaseCommand): def __init__(self, api, name): super(PortToBridgeCommand, self).__init__(api) self.name = name def run_idl(self, txn): # TODO(twilson) This is expensive! # This traversal of all ports could be eliminated by caching the bridge # name on the Port's external_id field # In fact, if we did that, the only place that uses to_br functions # could just add the external_id field to the conditions passed to find port = idlutils.row_by_value(self.api.idl, 'Port', 'name', self.name) bridges = self.api._tables['Bridge'].rows.values() self.result = next(br.name for br in bridges if port in br.ports) class InterfaceToBridgeCommand(BaseCommand): def __init__(self, api, name): super(InterfaceToBridgeCommand, self).__init__(api) self.name = name def run_idl(self, txn): interface = idlutils.row_by_value(self.api.idl, 'Interface', 'name', self.name) ports = self.api._tables['Port'].rows.values() pname = next( port for port in ports if interface in port.interfaces) bridges = self.api._tables['Bridge'].rows.values() self.result = next(br.name for br in bridges if pname in br.ports) class DbListCommand(BaseCommand): def __init__(self, api, table, records, columns, if_exists): super(DbListCommand, self).__init__(api) self.table = table self.columns = columns self.if_exists = if_exists self.records = records def run_idl(self, txn): table_schema = self.api._tables[self.table] columns = self.columns or list(table_schema.columns.keys()) + ['_uuid'] if self.records: row_uuids = [] for record in self.records: try: row_uuids.append(idlutils.row_by_record( self.api.idl, self.table, record).uuid) except idlutils.RowNotFound: if self.if_exists: continue # NOTE(kevinbenton): this is converted to a RuntimeError # for compat with the vsctl version. It might make more # sense to change this to a RowNotFoundError in the future. raise RuntimeError(_( "Row doesn't exist in the DB. Request info: " "Table=%(table)s. Columns=%(columns)s. " "Records=%(records)s.") % { "table": self.table, "columns": self.columns, "records": self.records, }) else: row_uuids = table_schema.rows.keys() self.result = [ { c: idlutils.get_column_value(table_schema.rows[uuid], c) for c in columns } for uuid in row_uuids ] class DbFindCommand(BaseCommand): def __init__(self, api, table, *conditions, **kwargs): super(DbFindCommand, self).__init__(api) self.table = self.api._tables[table] self.conditions = conditions self.columns = (kwargs.get('columns') or list(self.table.columns.keys()) + ['_uuid']) def run_idl(self, txn): self.result = [ { c: idlutils.get_column_value(r, c) for c in self.columns } for r in self.table.rows.values() if idlutils.row_match(r, self.conditions) ] neutron-8.4.0/neutron/agent/ovsdb/native/idlutils.py0000664000567000056710000001375513044372760023775 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import os import time import uuid from ovs.db import idl from ovs import jsonrpc from ovs import poller from ovs import stream from neutron._i18n import _ from neutron.common import exceptions RowLookup = collections.namedtuple('RowLookup', ['table', 'column', 'uuid_column']) # Tables with no index in OVSDB and special record lookup rules _LOOKUP_TABLE = { 'Controller': RowLookup('Bridge', 'name', 'controller'), 'Flow_Table': RowLookup('Flow_Table', 'name', None), 'IPFIX': RowLookup('Bridge', 'name', 'ipfix'), 'Mirror': RowLookup('Mirror', 'name', None), 'NetFlow': RowLookup('Bridge', 'name', 'netflow'), 'QoS': RowLookup('Port', 'name', 'qos'), 'Queue': RowLookup(None, None, None), 'sFlow': RowLookup('Bridge', 'name', 'sflow'), 'SSL': RowLookup('Open_vSwitch', None, 'ssl'), } _NO_DEFAULT = object() class RowNotFound(exceptions.NeutronException): message = _("Cannot find %(table)s with %(col)s=%(match)s") def row_by_value(idl_, table, column, match, default=_NO_DEFAULT): """Lookup an IDL row in a table by column/value""" tab = idl_.tables[table] for r in tab.rows.values(): if getattr(r, column) == match: return r if default is not _NO_DEFAULT: return default raise RowNotFound(table=table, col=column, match=match) def row_by_record(idl_, table, record): t = idl_.tables[table] try: if isinstance(record, uuid.UUID): return t.rows[record] uuid_ = uuid.UUID(record) return t.rows[uuid_] except ValueError: # Not a UUID string, continue lookup by other means pass except KeyError: raise RowNotFound(table=table, col='uuid', match=record) rl = _LOOKUP_TABLE.get(table, RowLookup(table, get_index_column(t), None)) # no table means uuid only, no column is just SSL which we don't need if rl.table is None: raise ValueError(_("Table %s can only be queried by UUID") % table) if rl.column is None: raise NotImplementedError(_("'.' searches are not implemented")) row = row_by_value(idl_, rl.table, rl.column, record) if rl.uuid_column: rows = getattr(row, rl.uuid_column) if len(rows) != 1: raise RowNotFound(table=table, col=_('record'), match=record) row = rows[0] return row class ExceptionResult(object): def __init__(self, ex, tb): self.ex = ex self.tb = tb def get_schema_helper(connection, schema_name): err, strm = stream.Stream.open_block( stream.Stream.open(connection)) if err: raise Exception(_("Could not connect to %s") % connection) rpc = jsonrpc.Connection(strm) req = jsonrpc.Message.create_request('get_schema', [schema_name]) err, resp = rpc.transact_block(req) rpc.close() if err: raise Exception(_("Could not retrieve schema from %(conn)s: " "%(err)s") % {'conn': connection, 'err': os.strerror(err)}) elif resp.error: raise Exception(resp.error) return idl.SchemaHelper(None, resp.result) def wait_for_change(_idl, timeout, seqno=None): if seqno is None: seqno = _idl.change_seqno stop = time.time() + timeout while _idl.change_seqno == seqno and not _idl.run(): ovs_poller = poller.Poller() _idl.wait(ovs_poller) ovs_poller.timer_wait(timeout * 1000) ovs_poller.block() if time.time() > stop: raise Exception(_("Timeout")) def get_column_value(row, col): if col == '_uuid': val = row.uuid else: val = getattr(row, col) # Idl returns lists of Rows where ovs-vsctl returns lists of UUIDs if isinstance(val, list) and len(val): if isinstance(val[0], idl.Row): val = [v.uuid for v in val] # ovs-vsctl treats lists of 1 as single results if len(val) == 1: val = val[0] return val def condition_match(row, condition): """Return whether a condition matches a row :param row: An OVSDB Row :param condition: A 3-tuple containing (column, operation, match) """ col, op, match = condition val = get_column_value(row, col) matched = True # TODO(twilson) Implement other operators and type comparisons # ovs_lib only uses dict '=' and '!=' searches for now if isinstance(match, dict): for key in match: if op == '=': if (key not in val or match[key] != val[key]): matched = False break elif op == '!=': if key not in val or match[key] == val[key]: matched = False break else: raise NotImplementedError() elif isinstance(match, list): raise NotImplementedError() else: if op == '=': if val != match: matched = False elif op == '!=': if val == match: matched = False else: raise NotImplementedError() return matched def row_match(row, conditions): """Return whether the row matches the list of conditions""" return all(condition_match(row, cond) for cond in conditions) def get_index_column(table): if len(table.indexes) == 1: idx = table.indexes[0] if len(idx) == 1: return idx[0].name neutron-8.4.0/neutron/agent/ovsdb/native/connection.py0000664000567000056710000000754413044372760024302 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import threading import traceback from ovs.db import idl from ovs import poller import retrying from six.moves import queue as Queue from neutron.agent.ovsdb.native import helpers from neutron.agent.ovsdb.native import idlutils class TransactionQueue(Queue.Queue, object): def __init__(self, *args, **kwargs): super(TransactionQueue, self).__init__(*args, **kwargs) alertpipe = os.pipe() self.alertin = os.fdopen(alertpipe[0], 'r', 0) self.alertout = os.fdopen(alertpipe[1], 'w', 0) def get_nowait(self, *args, **kwargs): try: result = super(TransactionQueue, self).get_nowait(*args, **kwargs) except Queue.Empty: return None self.alertin.read(1) return result def put(self, *args, **kwargs): super(TransactionQueue, self).put(*args, **kwargs) self.alertout.write('X') self.alertout.flush() @property def alert_fileno(self): return self.alertin.fileno() class Connection(object): def __init__(self, connection, timeout, schema_name): self.idl = None self.connection = connection self.timeout = timeout self.txns = TransactionQueue(1) self.lock = threading.Lock() self.schema_name = schema_name def start(self): with self.lock: if self.idl is not None: return try: helper = idlutils.get_schema_helper(self.connection, self.schema_name) except Exception: # We may have failed do to set-manager not being called helpers.enable_connection_uri(self.connection) # There is a small window for a race, so retry up to a second @retrying.retry(wait_exponential_multiplier=10, stop_max_delay=1000) def do_get_schema_helper(): return idlutils.get_schema_helper(self.connection, self.schema_name) helper = do_get_schema_helper() helper.register_all() self.idl = idl.Idl(self.connection, helper) idlutils.wait_for_change(self.idl, self.timeout) self.poller = poller.Poller() self.thread = threading.Thread(target=self.run) self.thread.setDaemon(True) self.thread.start() def run(self): while True: self.idl.wait(self.poller) self.poller.fd_wait(self.txns.alert_fileno, poller.POLLIN) #TODO(jlibosva): Remove next line once losing connection to ovsdb # is solved. self.poller.timer_wait(self.timeout * 1000) self.poller.block() self.idl.run() txn = self.txns.get_nowait() if txn is not None: try: txn.results.put(txn.do_commit()) except Exception as ex: er = idlutils.ExceptionResult(ex=ex, tb=traceback.format_exc()) txn.results.put(er) self.txns.task_done() def queue_txn(self, txn): self.txns.put(txn) neutron-8.4.0/neutron/agent/ovsdb/__init__.py0000664000567000056710000000000013044372736022373 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/ovsdb/impl_vsctl.py0000664000567000056710000002450413044372760023024 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import itertools from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import excutils import six from neutron._i18n import _LE from neutron.agent.common import utils from neutron.agent.ovsdb import api as ovsdb LOG = logging.getLogger(__name__) class Transaction(ovsdb.Transaction): def __init__(self, context, check_error=False, log_errors=True, opts=None): self.context = context self.check_error = check_error self.log_errors = log_errors self.opts = ["--timeout=%d" % self.context.vsctl_timeout, '--oneline', '--format=json'] if opts: self.opts += opts self.commands = [] def add(self, command): self.commands.append(command) return command def commit(self): args = [] for cmd in self.commands: cmd.result = None args += cmd.vsctl_args() res = self.run_vsctl(args) if res is None: return res = res.replace(r'\\', '\\').splitlines() for i, record in enumerate(res): self.commands[i].result = record return [cmd.result for cmd in self.commands] def run_vsctl(self, args): full_args = ["ovs-vsctl"] + self.opts + args try: # We log our own errors, so never have utils.execute do it return utils.execute(full_args, run_as_root=True, log_fail_as_error=False).rstrip() except Exception as e: with excutils.save_and_reraise_exception() as ctxt: if self.log_errors: LOG.error(_LE("Unable to execute %(cmd)s. " "Exception: %(exception)s"), {'cmd': full_args, 'exception': e}) if not self.check_error: ctxt.reraise = False class BaseCommand(ovsdb.Command): def __init__(self, context, cmd, opts=None, args=None): self.context = context self.cmd = cmd self.opts = [] if opts is None else opts self.args = [] if args is None else args def execute(self, check_error=False, log_errors=True): with Transaction(self.context, check_error=check_error, log_errors=log_errors) as txn: txn.add(self) return self.result def vsctl_args(self): return itertools.chain(('--',), self.opts, (self.cmd,), self.args) class MultiLineCommand(BaseCommand): """Command for ovs-vsctl commands that return multiple lines""" @property def result(self): return self._result @result.setter def result(self, raw_result): self._result = raw_result.split(r'\n') if raw_result else [] class DbCommand(BaseCommand): def __init__(self, context, cmd, opts=None, args=None, columns=None): if opts is None: opts = [] if columns: opts += ['--columns=%s' % ",".join(columns)] super(DbCommand, self).__init__(context, cmd, opts, args) @property def result(self): return self._result @result.setter def result(self, raw_result): # If check_error=False, run_vsctl can return None if not raw_result: self._result = None return try: json = jsonutils.loads(raw_result) except (ValueError, TypeError) as e: # This shouldn't happen, but if it does and we check_errors # log and raise. with excutils.save_and_reraise_exception(): LOG.error(_LE("Could not parse: %(raw_result)s. " "Exception: %(exception)s"), {'raw_result': raw_result, 'exception': e}) headings = json['headings'] data = json['data'] results = [] for record in data: obj = {} for pos, heading in enumerate(headings): obj[heading] = ovsdb.val_to_py(record[pos]) results.append(obj) self._result = results class DbGetCommand(DbCommand): @DbCommand.result.setter def result(self, val): # super()'s never worked for setters http://bugs.python.org/issue14965 DbCommand.result.fset(self, val) # DbCommand will return [{'column': value}] and we just want value. if self._result: self._result = list(self._result[0].values())[0] class BrExistsCommand(DbCommand): @DbCommand.result.setter def result(self, val): self._result = val is not None def execute(self): return super(BrExistsCommand, self).execute(check_error=False, log_errors=False) class OvsdbVsctl(ovsdb.API): def transaction(self, check_error=False, log_errors=True, **kwargs): return Transaction(self.context, check_error, log_errors, **kwargs) def add_br(self, name, may_exist=True, datapath_type=None): opts = ['--may-exist'] if may_exist else None params = [name] if datapath_type: params += ['--', 'set', 'Bridge', name, 'datapath_type=%s' % datapath_type] return BaseCommand(self.context, 'add-br', opts, params) def del_br(self, name, if_exists=True): opts = ['--if-exists'] if if_exists else None return BaseCommand(self.context, 'del-br', opts, [name]) def br_exists(self, name): return BrExistsCommand(self.context, 'list', args=['Bridge', name]) def port_to_br(self, name): return BaseCommand(self.context, 'port-to-br', args=[name]) def iface_to_br(self, name): return BaseCommand(self.context, 'iface-to-br', args=[name]) def list_br(self): return MultiLineCommand(self.context, 'list-br') def br_get_external_id(self, name, field): return BaseCommand(self.context, 'br-get-external-id', args=[name, field]) def db_create(self, table, **col_values): args = [table] args += _set_colval_args(*col_values.items()) return BaseCommand(self.context, 'create', args=args) def db_destroy(self, table, record): args = [table, record] return BaseCommand(self.context, 'destroy', args=args) def db_set(self, table, record, *col_values): args = [table, record] args += _set_colval_args(*col_values) return BaseCommand(self.context, 'set', args=args) def db_clear(self, table, record, column): return BaseCommand(self.context, 'clear', args=[table, record, column]) def db_get(self, table, record, column): # Use the 'list' command as it can return json and 'get' cannot so that # we can get real return types instead of treating everything as string # NOTE: openvswitch can return a single atomic value for fields that # are sets, but only have one value. This makes directly iterating over # the result of a db_get() call unsafe. return DbGetCommand(self.context, 'list', args=[table, record], columns=[column]) def db_list(self, table, records=None, columns=None, if_exists=False): opts = ['--if-exists'] if if_exists else None args = [table] if records: args += records return DbCommand(self.context, 'list', opts=opts, args=args, columns=columns) def db_find(self, table, *conditions, **kwargs): columns = kwargs.pop('columns', None) args = itertools.chain([table], *[_set_colval_args(c) for c in conditions]) return DbCommand(self.context, 'find', args=args, columns=columns) def set_controller(self, bridge, controllers): return BaseCommand(self.context, 'set-controller', args=[bridge] + list(controllers)) def del_controller(self, bridge): return BaseCommand(self.context, 'del-controller', args=[bridge]) def get_controller(self, bridge): return MultiLineCommand(self.context, 'get-controller', args=[bridge]) def set_fail_mode(self, bridge, mode): return BaseCommand(self.context, 'set-fail-mode', args=[bridge, mode]) def add_port(self, bridge, port, may_exist=True): opts = ['--may-exist'] if may_exist else None return BaseCommand(self.context, 'add-port', opts, [bridge, port]) def del_port(self, port, bridge=None, if_exists=True): opts = ['--if-exists'] if if_exists else None args = filter(None, [bridge, port]) return BaseCommand(self.context, 'del-port', opts, args) def list_ports(self, bridge): return MultiLineCommand(self.context, 'list-ports', args=[bridge]) def list_ifaces(self, bridge): return MultiLineCommand(self.context, 'list-ifaces', args=[bridge]) def _set_colval_args(*col_values): args = [] # TODO(twilson) This is ugly, but set/find args are very similar except for # op. Will try to find a better way to default this op to '=' for entry in col_values: if len(entry) == 2: col, op, val = entry[0], '=', entry[1] else: col, op, val = entry if isinstance(val, collections.Mapping): args += ["%s:%s%s%s" % ( col, k, op, ovsdb.py_to_val(v)) for k, v in val.items()] elif (isinstance(val, collections.Sequence) and not isinstance(val, six.string_types)): if len(val) == 0: args.append("%s%s%s" % (col, op, "[]")) else: args.append( "%s%s%s" % (col, op, ",".join(map(ovsdb.py_to_val, val)))) else: args.append("%s%s%s" % (col, op, ovsdb.py_to_val(val))) return args neutron-8.4.0/neutron/agent/ovsdb/api.py0000664000567000056710000003234713044372760021425 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import uuid from oslo_config import cfg from oslo_utils import importutils import six from neutron._i18n import _ interface_map = { 'vsctl': 'neutron.agent.ovsdb.impl_vsctl.OvsdbVsctl', 'native': 'neutron.agent.ovsdb.impl_idl.OvsdbIdl', } OPTS = [ cfg.StrOpt('ovsdb_interface', choices=interface_map.keys(), default='vsctl', help=_('The interface for interacting with the OVSDB')), cfg.StrOpt('ovsdb_connection', default='tcp:127.0.0.1:6640', help=_('The connection string for the native OVSDB backend. ' 'Requires the native ovsdb_interface to be enabled.')) ] cfg.CONF.register_opts(OPTS, 'OVS') @six.add_metaclass(abc.ABCMeta) class Command(object): """An OVSDB command that can be executed in a transaction :attr result: The result of executing the command in a transaction """ @abc.abstractmethod def execute(self, **transaction_options): """Immediately execute an OVSDB command This implicitly creates a transaction with the passed options and then executes it, returning the value of the executed transaction :param transaction_options: Options to pass to the transaction """ @six.add_metaclass(abc.ABCMeta) class Transaction(object): @abc.abstractmethod def commit(self): """Commit the transaction to OVSDB""" @abc.abstractmethod def add(self, command): """Append an OVSDB operation to the transaction""" def __enter__(self): return self def __exit__(self, exc_type, exc_val, tb): if exc_type is None: self.result = self.commit() @six.add_metaclass(abc.ABCMeta) class API(object): def __init__(self, context): self.context = context @staticmethod def get(context, iface_name=None): """Return the configured OVSDB API implementation""" iface = importutils.import_class( interface_map[iface_name or cfg.CONF.OVS.ovsdb_interface]) return iface(context) @abc.abstractmethod def transaction(self, check_error=False, log_errors=True, **kwargs): """Create a transaction :param check_error: Allow the transaction to raise an exception? :type check_error: bool :param log_errors: Log an error if the transaction fails? :type log_errors: bool :returns: A new transaction :rtype: :class:`Transaction` """ @abc.abstractmethod def add_br(self, name, may_exist=True, datapath_type=None): """Create a command to add an OVS bridge :param name: The name of the bridge :type name: string :param may_exist: Do not fail if bridge already exists :type may_exist: bool :param datapath_type: The datapath_type of the bridge :type datapath_type: string :returns: :class:`Command` with no result """ @abc.abstractmethod def del_br(self, name, if_exists=True): """Create a command to delete an OVS bridge :param name: The name of the bridge :type name: string :param if_exists: Do not fail if the bridge does not exist :type if_exists: bool :returns: :class:`Command` with no result """ @abc.abstractmethod def br_exists(self, name): """Create a command to check if an OVS bridge exists :param name: The name of the bridge :type name: string :returns: :class:`Command` with bool result """ @abc.abstractmethod def port_to_br(self, name): """Create a command to return the name of the bridge with the port :param name: The name of the OVS port :type name: string :returns: :class:`Command` with bridge name result """ @abc.abstractmethod def iface_to_br(self, name): """Create a command to return the name of the bridge with the interface :param name: The name of the OVS interface :type name: string :returns: :class:`Command` with bridge name result """ @abc.abstractmethod def list_br(self): """Create a command to return the current list of OVS bridge names :returns: :class:`Command` with list of bridge names result """ @abc.abstractmethod def br_get_external_id(self, name, field): """Create a command to return a field from the Bridge's external_ids :param name: The name of the OVS Bridge :type name: string :param field: The external_ids field to return :type field: string :returns: :class:`Command` with field value result """ @abc.abstractmethod def db_create(self, table, **col_values): """Create a command to create new record :param table: The OVS table containing the record to be created :type table: string :param col_values: The columns and their associated values to be set after create :type col_values: Dictionary of columns id's and values :returns: :class:`Command` with no result """ @abc.abstractmethod def db_destroy(self, table, record): """Create a command to destroy a record :param table: The OVS table containing the record to be destroyed :type table: string :param record: The record id (name/uuid) to be destroyed :type record: uuid/string :returns: :class:`Command` with no result """ @abc.abstractmethod def db_set(self, table, record, *col_values): """Create a command to set fields in a record :param table: The OVS table containing the record to be modified :type table: string :param record: The record id (name/uuid) to be modified :type table: string :param col_values: The columns and their associated values :type col_values: Tuples of (column, value). Values may be atomic values or unnested sequences/mappings :returns: :class:`Command` with no result """ # TODO(twilson) Consider handling kwargs for arguments where order # doesn't matter. Though that would break the assert_called_once_with # unit tests @abc.abstractmethod def db_clear(self, table, record, column): """Create a command to clear a field's value in a record :param table: The OVS table containing the record to be modified :type table: string :param record: The record id (name/uuid) to be modified :type record: string :param column: The column whose value should be cleared :type column: string :returns: :class:`Command` with no result """ @abc.abstractmethod def db_get(self, table, record, column): """Create a command to return a field's value in a record :param table: The OVS table containing the record to be queried :type table: string :param record: The record id (name/uuid) to be queried :type record: string :param column: The column whose value should be returned :type column: string :returns: :class:`Command` with the field's value result """ @abc.abstractmethod def db_list(self, table, records=None, columns=None, if_exists=False): """Create a command to return a list of OVSDB records :param table: The OVS table to query :type table: string :param records: The records to return values from :type records: list of record ids (names/uuids) :param columns: Limit results to only columns, None means all columns :type columns: list of column names or None :param if_exists: Do not fail if the record does not exist :type if_exists: bool :returns: :class:`Command` with [{'column', value}, ...] result """ @abc.abstractmethod def db_find(self, table, *conditions, **kwargs): """Create a command to return find OVSDB records matching conditions :param table: The OVS table to query :type table: string :param conditions:The conditions to satisfy the query :type conditions: 3-tuples containing (column, operation, match) Examples: atomic: ('tag', '=', 7) map: ('external_ids' '=', {'iface-id': 'xxx'}) field exists? ('external_ids', '!=', {'iface-id', ''}) set contains?: ('protocols', '{>=}', 'OpenFlow13') See the ovs-vsctl man page for more operations :param columns: Limit results to only columns, None means all columns :type columns: list of column names or None :returns: :class:`Command` with [{'column', value}, ...] result """ @abc.abstractmethod def set_controller(self, bridge, controllers): """Create a command to set an OVS bridge's OpenFlow controllers :param bridge: The name of the bridge :type bridge: string :param controllers: The controller strings :type controllers: list of strings, see ovs-vsctl manpage for format :returns: :class:`Command` with no result """ @abc.abstractmethod def del_controller(self, bridge): """Create a command to clear an OVS bridge's OpenFlow controllers :param bridge: The name of the bridge :type bridge: string :returns: :class:`Command` with no result """ @abc.abstractmethod def get_controller(self, bridge): """Create a command to return an OVS bridge's OpenFlow controllers :param bridge: The name of the bridge :type bridge: string :returns: :class:`Command` with list of controller strings result """ @abc.abstractmethod def set_fail_mode(self, bridge, mode): """Create a command to set an OVS bridge's failure mode :param bridge: The name of the bridge :type bridge: string :param mode: The failure mode :type mode: "secure" or "standalone" :returns: :class:`Command` with no result """ @abc.abstractmethod def add_port(self, bridge, port, may_exist=True): """Create a command to add a port to an OVS bridge :param bridge: The name of the bridge :type bridge: string :param port: The name of the port :type port: string :param may_exist: Do not fail if the port already exists :type may_exist: bool :returns: :class:`Command` with no result """ @abc.abstractmethod def del_port(self, port, bridge=None, if_exists=True): """Create a command to delete a port an OVS port :param port: The name of the port :type port: string :param bridge: Only delete port if it is attached to this bridge :type bridge: string :param if_exists: Do not fail if the port does not exist :type if_exists: bool :returns: :class:`Command` with no result """ @abc.abstractmethod def list_ports(self, bridge): """Create a command to list the names of ports on a bridge :param bridge: The name of the bridge :type bridge: string :returns: :class:`Command` with list of port names result """ @abc.abstractmethod def list_ifaces(self, bridge): """Create a command to list the names of interfaces on a bridge :param bridge: The name of the bridge :type bridge: string :returns: :class:`Command` with list of interfaces names result """ class TimeoutException(Exception): pass def val_to_py(val): """Convert a json ovsdb return value to native python object""" if isinstance(val, collections.Sequence) and len(val) == 2: if val[0] == "uuid": return uuid.UUID(val[1]) elif val[0] == "set": return [val_to_py(x) for x in val[1]] elif val[0] == "map": return {val_to_py(x): val_to_py(y) for x, y in val[1]} return val def py_to_val(pyval): """Convert python value to ovs-vsctl value argument""" if isinstance(pyval, bool): return 'true' if pyval is True else 'false' elif pyval == '': return '""' else: return pyval neutron-8.4.0/neutron/agent/l3/0000775000567000056710000000000013044373210017461 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/l3/dvr.py0000664000567000056710000000552713044372760020650 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import weakref from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import dvr_snat_ns # TODO(Carl) Following constants retained to increase SNR during refactoring SNAT_INT_DEV_PREFIX = dvr_snat_ns.SNAT_INT_DEV_PREFIX SNAT_NS_PREFIX = dvr_snat_ns.SNAT_NS_PREFIX class AgentMixin(object): def __init__(self, host): # dvr data self._fip_namespaces = weakref.WeakValueDictionary() super(AgentMixin, self).__init__(host) def get_fip_ns(self, ext_net_id): # TODO(Carl) is this necessary? Code that this replaced was careful to # convert these to string like this so I preserved that. ext_net_id = str(ext_net_id) fip_ns = self._fip_namespaces.get(ext_net_id) if fip_ns and not fip_ns.destroyed: return fip_ns fip_ns = dvr_fip_ns.FipNamespace(ext_net_id, self.conf, self.driver, self.use_ipv6) self._fip_namespaces[ext_net_id] = fip_ns return fip_ns def get_ports_by_subnet(self, subnet_id): return self.plugin_rpc.get_ports_by_subnet(self.context, subnet_id) def _update_arp_entry(self, context, payload, action): router_id = payload['router_id'] ri = self.router_info.get(router_id) if not ri: return arp_table = payload['arp_table'] ip = arp_table['ip_address'] mac = arp_table['mac_address'] subnet_id = arp_table['subnet_id'] ri._update_arp_entry(ip, mac, subnet_id, action) def add_arp_entry(self, context, payload): """Add arp entry into router namespace. Called from RPC.""" self._update_arp_entry(context, payload, 'add') def del_arp_entry(self, context, payload): """Delete arp entry from router namespace. Called from RPC.""" self._update_arp_entry(context, payload, 'delete') def fipnamespace_delete_on_ext_net(self, context, ext_net_id): """Delete fip namespace after external network removed.""" fip_ns = self.get_fip_ns(ext_net_id) if fip_ns.agent_gateway_port and not fip_ns.destroyed: fip_ns.unsubscribe(ext_net_id) fip_ns.delete() neutron-8.4.0/neutron/agent/l3/config.py0000664000567000056710000001330513044372760021313 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ from neutron.agent.common import config from neutron.common import constants OPTS = [ cfg.StrOpt('agent_mode', default=constants.L3_AGENT_MODE_LEGACY, choices=(constants.L3_AGENT_MODE_DVR, constants.L3_AGENT_MODE_DVR_SNAT, constants.L3_AGENT_MODE_LEGACY), help=_("The working mode for the agent. Allowed modes are: " "'legacy' - this preserves the existing behavior " "where the L3 agent is deployed on a centralized " "networking node to provide L3 services like DNAT, " "and SNAT. Use this mode if you do not want to " "adopt DVR. 'dvr' - this mode enables DVR " "functionality and must be used for an L3 agent " "that runs on a compute host. 'dvr_snat' - this " "enables centralized SNAT support in conjunction " "with DVR. This mode must be used for an L3 agent " "running on a centralized node (or in single-host " "deployments, e.g. devstack)")), cfg.PortOpt('metadata_port', default=9697, help=_("TCP Port used by Neutron metadata namespace proxy.")), cfg.IntOpt('send_arp_for_ha', default=3, help=_("Send this many gratuitous ARPs for HA setup, if " "less than or equal to 0, the feature is disabled")), cfg.StrOpt('router_id', default='', deprecated_for_removal=True, help=_("If non-empty, the l3 agent can only configure a router " "that has the matching router ID.")), cfg.BoolOpt('handle_internal_only_routers', default=True, help=_("Indicates that this L3 agent should also handle " "routers that do not have an external network gateway " "configured. This option should be True only for a " "single agent in a Neutron deployment, and may be " "False for all agents if all routers must have an " "external network gateway.")), cfg.StrOpt('gateway_external_network_id', default='', help=_("When external_network_bridge is set, each L3 agent can " "be associated with no more than one external network. " "This value should be set to the UUID of that external " "network. To allow L3 agent support multiple external " "networks, both the external_network_bridge and " "gateway_external_network_id must be left empty.")), cfg.StrOpt('ipv6_gateway', default='', help=_("With IPv6, the network used for the external gateway " "does not need to have an associated subnet, since the " "automatically assigned link-local address (LLA) can " "be used. However, an IPv6 gateway address is needed " "for use as the next-hop for the default route. " "If no IPv6 gateway address is configured here, " "(and only then) the neutron router will be configured " "to get its default route from router advertisements " "(RAs) from the upstream router; in which case the " "upstream router must also be configured to send " "these RAs. " "The ipv6_gateway, when configured, should be the LLA " "of the interface on the upstream router. If a " "next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated " "to the network and not through this parameter. ")), cfg.StrOpt('prefix_delegation_driver', default='dibbler', help=_('Driver used for ipv6 prefix delegation. This needs to ' 'be an entry point defined in the ' 'neutron.agent.linux.pd_drivers namespace. See ' 'setup.cfg for entry points included with the neutron ' 'source.')), cfg.BoolOpt('enable_metadata_proxy', default=True, help=_("Allow running metadata proxy.")), cfg.StrOpt('metadata_access_mark', default='0x1', help=_('Iptables mangle mark used to mark metadata valid ' 'requests. This mark will be masked with 0xffff so ' 'that only the lower 16 bits will be used.')), cfg.StrOpt('external_ingress_mark', default='0x2', help=_('Iptables mangle mark used to mark ingress from ' 'external network. This mark will be masked with ' '0xffff so that only the lower 16 bits will be used.')), ] OPTS += config.EXT_NET_BRIDGE_OPTS neutron-8.4.0/neutron/agent/l3/router_info.py0000664000567000056710000013076613044372760022414 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import netaddr from oslo_log import log as logging from neutron._i18n import _, _LE, _LW from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_manager from neutron.agent.linux import ra from neutron.common import constants as l3_constants from neutron.common import exceptions as n_exc from neutron.common import ipv6_utils from neutron.common import utils as common_utils from neutron.ipam import utils as ipam_utils LOG = logging.getLogger(__name__) INTERNAL_DEV_PREFIX = namespaces.INTERNAL_DEV_PREFIX EXTERNAL_DEV_PREFIX = namespaces.EXTERNAL_DEV_PREFIX FLOATINGIP_STATUS_NOCHANGE = object() ADDRESS_SCOPE_MARK_MASK = "0xffff0000" ADDRESS_SCOPE_MARK_ID_MIN = 1024 ADDRESS_SCOPE_MARK_ID_MAX = 2048 DEFAULT_ADDRESS_SCOPE = "noscope" class RouterInfo(object): def __init__(self, router_id, router, agent_conf, interface_driver, use_ipv6=False): self.router_id = router_id self.ex_gw_port = None self._snat_enabled = None self.fip_map = {} self.internal_ports = [] self.floating_ips = set() # Invoke the setter for establishing initial SNAT action self.router = router self.use_ipv6 = use_ipv6 ns = self.create_router_namespace_object( router_id, agent_conf, interface_driver, use_ipv6) self.router_namespace = ns self.ns_name = ns.name self.available_mark_ids = set(range(ADDRESS_SCOPE_MARK_ID_MIN, ADDRESS_SCOPE_MARK_ID_MAX)) self._address_scope_to_mark_id = { DEFAULT_ADDRESS_SCOPE: self.available_mark_ids.pop()} self.iptables_manager = iptables_manager.IptablesManager( use_ipv6=use_ipv6, namespace=self.ns_name) self.routes = [] self.agent_conf = agent_conf self.driver = interface_driver # radvd is a neutron.agent.linux.ra.DaemonMonitor self.radvd = None def initialize(self, process_monitor): """Initialize the router on the system. This differs from __init__ in that this method actually affects the system creating namespaces, starting processes, etc. The other merely initializes the python object. This separates in-memory object initialization from methods that actually go do stuff to the system. :param process_monitor: The agent's process monitor instance. """ self.process_monitor = process_monitor self.radvd = ra.DaemonMonitor(self.router_id, self.ns_name, process_monitor, self.get_internal_device_name, self.agent_conf) self.router_namespace.create() def create_router_namespace_object( self, router_id, agent_conf, iface_driver, use_ipv6): return namespaces.RouterNamespace( router_id, agent_conf, iface_driver, use_ipv6) @property def router(self): return self._router @router.setter def router(self, value): self._router = value if not self._router: return # enable_snat by default if it wasn't specified by plugin self._snat_enabled = self._router.get('enable_snat', True) def get_internal_device_name(self, port_id): return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] def get_external_device_name(self, port_id): return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] def get_external_device_interface_name(self, ex_gw_port): return self.get_external_device_name(ex_gw_port['id']) def _update_routing_table(self, operation, route, namespace): cmd = ['ip', 'route', operation, 'to', route['destination'], 'via', route['nexthop']] ip_wrapper = ip_lib.IPWrapper(namespace=namespace) ip_wrapper.netns.execute(cmd, check_exit_code=False) def update_routing_table(self, operation, route): self._update_routing_table(operation, route, self.ns_name) def routes_updated(self, old_routes, new_routes): adds, removes = common_utils.diff_list_of_dict(old_routes, new_routes) for route in adds: LOG.debug("Added route entry is '%s'", route) # remove replaced route from deleted route for del_route in removes: if route['destination'] == del_route['destination']: removes.remove(del_route) #replace success even if there is no existing route self.update_routing_table('replace', route) for route in removes: LOG.debug("Removed route entry is '%s'", route) self.update_routing_table('delete', route) def get_ex_gw_port(self): return self.router.get('gw_port') def get_floating_ips(self): """Filter Floating IPs to be hosted on this agent.""" return self.router.get(l3_constants.FLOATINGIP_KEY, []) def floating_forward_rules(self, floating_ip, fixed_ip): return [('PREROUTING', '-d %s/32 -j DNAT --to-destination %s' % (floating_ip, fixed_ip)), ('OUTPUT', '-d %s/32 -j DNAT --to-destination %s' % (floating_ip, fixed_ip)), ('float-snat', '-s %s/32 -j SNAT --to-source %s' % (fixed_ip, floating_ip))] def floating_mangle_rules(self, floating_ip, fixed_ip, internal_mark): mark_traffic_to_floating_ip = ( 'floatingip', '-d %s -j MARK --set-xmark %s' % ( floating_ip, internal_mark)) mark_traffic_from_fixed_ip = ( 'FORWARD', '-s %s -j $float-snat' % fixed_ip) return [mark_traffic_to_floating_ip, mark_traffic_from_fixed_ip] def get_address_scope_mark_mask(self, address_scope=None): if not address_scope: address_scope = DEFAULT_ADDRESS_SCOPE if address_scope not in self._address_scope_to_mark_id: self._address_scope_to_mark_id[address_scope] = ( self.available_mark_ids.pop()) mark_id = self._address_scope_to_mark_id[address_scope] # NOTE: Address scopes use only the upper 16 bits of the 32 fwmark return "%s/%s" % (hex(mark_id << 16), ADDRESS_SCOPE_MARK_MASK) def get_port_address_scope_mark(self, port): """Get the IP version 4 and 6 address scope mark for the port :param port: A port dict from the RPC call :returns: A dict mapping the address family to the address scope mark """ port_scopes = port.get('address_scopes', {}) address_scope_mark_masks = ( (int(k), self.get_address_scope_mark_mask(v)) for k, v in port_scopes.items()) return collections.defaultdict(self.get_address_scope_mark_mask, address_scope_mark_masks) def process_floating_ip_nat_rules(self): """Configure NAT rules for the router's floating IPs. Configures iptables rules for the floating ips of the given router """ # Clear out all iptables rules for floating ips self.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip') floating_ips = self.get_floating_ips() # Loop once to ensure that floating ips are configured. for fip in floating_ips: # Rebuild iptables rules for the floating ip. fixed = fip['fixed_ip_address'] fip_ip = fip['floating_ip_address'] for chain, rule in self.floating_forward_rules(fip_ip, fixed): self.iptables_manager.ipv4['nat'].add_rule(chain, rule, tag='floating_ip') self.iptables_manager.apply() def process_floating_ip_address_scope_rules(self): """Configure address scope related iptables rules for the router's floating IPs. """ # Clear out all iptables rules for floating ips self.iptables_manager.ipv4['mangle'].clear_rules_by_tag('floating_ip') all_floating_ips = self.get_floating_ips() ext_scope = self._get_external_address_scope() # Filter out the floating ips that have fixed ip in the same address # scope. Because the packets for them will always be in one address # scope, no need to manipulate MARK/CONNMARK for them. floating_ips = [fip for fip in all_floating_ips if fip.get('fixed_ip_address_scope') != ext_scope] if floating_ips: ext_scope_mark = self.get_address_scope_mark_mask(ext_scope) ports_scopemark = self._get_address_scope_mark() devices_in_ext_scope = { device for device, mark in ports_scopemark[l3_constants.IP_VERSION_4].items() if mark == ext_scope_mark} # Add address scope for floatingip egress for device in devices_in_ext_scope: self.iptables_manager.ipv4['mangle'].add_rule( 'float-snat', '-o %s -j MARK --set-xmark %s' % (device, ext_scope_mark), tag='floating_ip') # Loop once to ensure that floating ips are configured. for fip in floating_ips: # Rebuild iptables rules for the floating ip. fip_ip = fip['floating_ip_address'] # Send the floating ip traffic to the right address scope fixed_ip = fip['fixed_ip_address'] fixed_scope = fip.get('fixed_ip_address_scope') internal_mark = self.get_address_scope_mark_mask(fixed_scope) mangle_rules = self.floating_mangle_rules( fip_ip, fixed_ip, internal_mark) for chain, rule in mangle_rules: self.iptables_manager.ipv4['mangle'].add_rule( chain, rule, tag='floating_ip') def process_snat_dnat_for_fip(self): try: self.process_floating_ip_nat_rules() except Exception: # TODO(salv-orlando): Less broad catching msg = _('L3 agent failure to setup NAT for floating IPs') LOG.exception(msg) raise n_exc.FloatingIpSetupException(msg) def _add_fip_addr_to_device(self, fip, device): """Configures the floating ip address on the device. """ try: ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address']) device.addr.add(ip_cidr) return True except RuntimeError: # any exception occurred here should cause the floating IP # to be set in error state LOG.warning(_LW("Unable to configure IP address for " "floating IP: %s"), fip['id']) def add_floating_ip(self, fip, interface_name, device): raise NotImplementedError() def gateway_redirect_cleanup(self, rtr_interface): pass def remove_floating_ip(self, device, ip_cidr): device.delete_addr_and_conntrack_state(ip_cidr) def move_floating_ip(self, fip): return l3_constants.FLOATINGIP_STATUS_ACTIVE def remove_external_gateway_ip(self, device, ip_cidr): device.delete_addr_and_conntrack_state(ip_cidr) def get_router_cidrs(self, device): return set([addr['cidr'] for addr in device.addr.list()]) def process_floating_ip_addresses(self, interface_name): """Configure IP addresses on router's external gateway interface. Ensures addresses for existing floating IPs and cleans up those that should not longer be configured. """ fip_statuses = {} if interface_name is None: LOG.debug('No Interface for floating IPs router: %s', self.router['id']) return fip_statuses device = ip_lib.IPDevice(interface_name, namespace=self.ns_name) existing_cidrs = self.get_router_cidrs(device) new_cidrs = set() floating_ips = self.get_floating_ips() # Loop once to ensure that floating ips are configured. for fip in floating_ips: fip_ip = fip['floating_ip_address'] ip_cidr = common_utils.ip_to_cidr(fip_ip) new_cidrs.add(ip_cidr) fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ACTIVE if ip_cidr not in existing_cidrs: fip_statuses[fip['id']] = self.add_floating_ip( fip, interface_name, device) LOG.debug('Floating ip %(id)s added, status %(status)s', {'id': fip['id'], 'status': fip_statuses.get(fip['id'])}) elif (fip_ip in self.fip_map and self.fip_map[fip_ip] != fip['fixed_ip_address']): LOG.debug("Floating IP was moved from fixed IP " "%(old)s to %(new)s", {'old': self.fip_map[fip_ip], 'new': fip['fixed_ip_address']}) fip_statuses[fip['id']] = self.move_floating_ip(fip) elif fip_statuses[fip['id']] == fip['status']: # mark the status as not changed. we can't remove it because # that's how the caller determines that it was removed fip_statuses[fip['id']] = FLOATINGIP_STATUS_NOCHANGE fips_to_remove = ( ip_cidr for ip_cidr in existing_cidrs - new_cidrs if common_utils.is_cidr_host(ip_cidr)) for ip_cidr in fips_to_remove: LOG.debug("Removing floating ip %s from interface %s in " "namespace %s", ip_cidr, interface_name, self.ns_name) self.remove_floating_ip(device, ip_cidr) return fip_statuses def configure_fip_addresses(self, interface_name): try: return self.process_floating_ip_addresses(interface_name) except Exception: # TODO(salv-orlando): Less broad catching msg = _('L3 agent failure to setup floating IPs') LOG.exception(msg) raise n_exc.FloatingIpSetupException(msg) def put_fips_in_error_state(self): fip_statuses = {} for fip in self.router.get(l3_constants.FLOATINGIP_KEY, []): fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR return fip_statuses def delete(self, agent): self.router['gw_port'] = None self.router[l3_constants.INTERFACE_KEY] = [] self.router[l3_constants.FLOATINGIP_KEY] = [] self.process_delete(agent) self.disable_radvd() self.router_namespace.delete() def _internal_network_updated(self, port, subnet_id, prefix, old_prefix, updated_cidrs): interface_name = self.get_internal_device_name(port['id']) if prefix != l3_constants.PROVISIONAL_IPV6_PD_PREFIX: fixed_ips = port['fixed_ips'] for fixed_ip in fixed_ips: if fixed_ip['subnet_id'] == subnet_id: v6addr = common_utils.ip_to_cidr(fixed_ip['ip_address'], fixed_ip.get('prefixlen')) if v6addr not in updated_cidrs: self.driver.add_ipv6_addr(interface_name, v6addr, self.ns_name) else: self.driver.delete_ipv6_addr_with_prefix(interface_name, old_prefix, self.ns_name) def _internal_network_added(self, ns_name, network_id, port_id, fixed_ips, mac_address, interface_name, prefix, mtu=None): LOG.debug("adding internal network: prefix(%s), port(%s)", prefix, port_id) self.driver.plug(network_id, port_id, interface_name, mac_address, namespace=ns_name, prefix=prefix, mtu=mtu) ip_cidrs = common_utils.fixed_ip_cidrs(fixed_ips) self.driver.init_router_port( interface_name, ip_cidrs, namespace=ns_name) for fixed_ip in fixed_ips: ip_lib.send_ip_addr_adv_notif(ns_name, interface_name, fixed_ip['ip_address'], self.agent_conf) def internal_network_added(self, port): network_id = port['network_id'] port_id = port['id'] fixed_ips = port['fixed_ips'] mac_address = port['mac_address'] interface_name = self.get_internal_device_name(port_id) self._internal_network_added(self.ns_name, network_id, port_id, fixed_ips, mac_address, interface_name, INTERNAL_DEV_PREFIX, mtu=port.get('mtu')) def internal_network_removed(self, port): interface_name = self.get_internal_device_name(port['id']) LOG.debug("removing internal network: port(%s) interface(%s)", port['id'], interface_name) if ip_lib.device_exists(interface_name, namespace=self.ns_name): self.driver.unplug(interface_name, namespace=self.ns_name, prefix=INTERNAL_DEV_PREFIX) def _get_existing_devices(self): ip_wrapper = ip_lib.IPWrapper(namespace=self.ns_name) ip_devs = ip_wrapper.get_devices(exclude_loopback=True) return [ip_dev.name for ip_dev in ip_devs] @staticmethod def _get_updated_ports(existing_ports, current_ports): updated_ports = dict() current_ports_dict = {p['id']: p for p in current_ports} for existing_port in existing_ports: current_port = current_ports_dict.get(existing_port['id']) if current_port: if (sorted(existing_port['fixed_ips'], key=common_utils.safe_sort_key) != sorted(current_port['fixed_ips'], key=common_utils.safe_sort_key)): updated_ports[current_port['id']] = current_port return updated_ports @staticmethod def _port_has_ipv6_subnet(port): if 'subnets' in port: for subnet in port['subnets']: if (netaddr.IPNetwork(subnet['cidr']).version == 6 and subnet['cidr'] != l3_constants.PROVISIONAL_IPV6_PD_PREFIX): return True def enable_radvd(self, internal_ports=None): LOG.debug('Spawning radvd daemon in router device: %s', self.router_id) if not internal_ports: internal_ports = self.internal_ports self.radvd.enable(internal_ports) def disable_radvd(self): LOG.debug('Terminating radvd daemon in router device: %s', self.router_id) self.radvd.disable() def internal_network_updated(self, interface_name, ip_cidrs): self.driver.init_router_port( interface_name, ip_cidrs=ip_cidrs, namespace=self.ns_name) def address_scope_mangle_rule(self, device_name, mark_mask): return '-i %s -j MARK --set-xmark %s' % (device_name, mark_mask) def address_scope_filter_rule(self, device_name, mark_mask): return '-o %s -m mark ! --mark %s -j DROP' % ( device_name, mark_mask) def _process_internal_ports(self, pd): existing_port_ids = set(p['id'] for p in self.internal_ports) internal_ports = self.router.get(l3_constants.INTERFACE_KEY, []) current_port_ids = set(p['id'] for p in internal_ports if p['admin_state_up']) new_port_ids = current_port_ids - existing_port_ids new_ports = [p for p in internal_ports if p['id'] in new_port_ids] old_ports = [p for p in self.internal_ports if p['id'] not in current_port_ids] updated_ports = self._get_updated_ports(self.internal_ports, internal_ports) enable_ra = False for p in new_ports: self.internal_network_added(p) LOG.debug("appending port %s to internal_ports cache", p) self.internal_ports.append(p) enable_ra = enable_ra or self._port_has_ipv6_subnet(p) for subnet in p['subnets']: if ipv6_utils.is_ipv6_pd_enabled(subnet): interface_name = self.get_internal_device_name(p['id']) pd.enable_subnet(self.router_id, subnet['id'], subnet['cidr'], interface_name, p['mac_address']) for p in old_ports: self.internal_network_removed(p) LOG.debug("removing port %s from internal_ports cache", p) self.internal_ports.remove(p) enable_ra = enable_ra or self._port_has_ipv6_subnet(p) for subnet in p['subnets']: if ipv6_utils.is_ipv6_pd_enabled(subnet): pd.disable_subnet(self.router_id, subnet['id']) updated_cidrs = [] if updated_ports: for index, p in enumerate(internal_ports): if not updated_ports.get(p['id']): continue self.internal_ports[index] = updated_ports[p['id']] interface_name = self.get_internal_device_name(p['id']) ip_cidrs = common_utils.fixed_ip_cidrs(p['fixed_ips']) LOG.debug("updating internal network for port %s", p) updated_cidrs += ip_cidrs self.internal_network_updated(interface_name, ip_cidrs) enable_ra = enable_ra or self._port_has_ipv6_subnet(p) # Check if there is any pd prefix update for p in internal_ports: if p['id'] in (set(current_port_ids) & set(existing_port_ids)): for subnet in p.get('subnets', []): if ipv6_utils.is_ipv6_pd_enabled(subnet): old_prefix = pd.update_subnet(self.router_id, subnet['id'], subnet['cidr']) if old_prefix: self._internal_network_updated(p, subnet['id'], subnet['cidr'], old_prefix, updated_cidrs) enable_ra = True # Enable RA if enable_ra: self.enable_radvd(internal_ports) existing_devices = self._get_existing_devices() current_internal_devs = set(n for n in existing_devices if n.startswith(INTERNAL_DEV_PREFIX)) current_port_devs = set(self.get_internal_device_name(port_id) for port_id in current_port_ids) stale_devs = current_internal_devs - current_port_devs for stale_dev in stale_devs: LOG.debug('Deleting stale internal router device: %s', stale_dev) pd.remove_stale_ri_ifname(self.router_id, stale_dev) self.driver.unplug(stale_dev, namespace=self.ns_name, prefix=INTERNAL_DEV_PREFIX) def _list_floating_ip_cidrs(self): # Compute a list of addresses this router is supposed to have. # This avoids unnecessarily removing those addresses and # causing a momentarily network outage. floating_ips = self.get_floating_ips() return [common_utils.ip_to_cidr(ip['floating_ip_address']) for ip in floating_ips] def _plug_external_gateway(self, ex_gw_port, interface_name, ns_name): self.driver.plug(ex_gw_port['network_id'], ex_gw_port['id'], interface_name, ex_gw_port['mac_address'], bridge=self.agent_conf.external_network_bridge, namespace=ns_name, prefix=EXTERNAL_DEV_PREFIX, mtu=ex_gw_port.get('mtu')) def _get_external_gw_ips(self, ex_gw_port): gateway_ips = [] if 'subnets' in ex_gw_port: gateway_ips = [subnet['gateway_ip'] for subnet in ex_gw_port['subnets'] if subnet['gateway_ip']] if self.use_ipv6 and not self.is_v6_gateway_set(gateway_ips): # No IPv6 gateway is available, but IPv6 is enabled. if self.agent_conf.ipv6_gateway: # ipv6_gateway configured, use address for default route. gateway_ips.append(self.agent_conf.ipv6_gateway) return gateway_ips def _add_route_to_gw(self, ex_gw_port, device_name, namespace, preserve_ips): # Note: ipv6_gateway is an ipv6 LLA # and so doesn't need a special route for subnet in ex_gw_port.get('subnets', []): is_gateway_not_in_subnet = (subnet['gateway_ip'] and not ipam_utils.check_subnet_ip( subnet['cidr'], subnet['gateway_ip'])) if is_gateway_not_in_subnet: preserve_ips.append(subnet['gateway_ip']) device = ip_lib.IPDevice(device_name, namespace=namespace) device.route.add_route(subnet['gateway_ip'], scope='link') def _external_gateway_added(self, ex_gw_port, interface_name, ns_name, preserve_ips): LOG.debug("External gateway added: port(%s), interface(%s), ns(%s)", ex_gw_port, interface_name, ns_name) self._plug_external_gateway(ex_gw_port, interface_name, ns_name) # Build up the interface and gateway IP addresses that # will be added to the interface. ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips']) gateway_ips = self._get_external_gw_ips(ex_gw_port) enable_ra_on_gw = False if self.use_ipv6 and not self.is_v6_gateway_set(gateway_ips): # There is no IPv6 gw_ip, use RouterAdvt for default route. enable_ra_on_gw = True self._add_route_to_gw(ex_gw_port, device_name=interface_name, namespace=ns_name, preserve_ips=preserve_ips) self.driver.init_router_port( interface_name, ip_cidrs, namespace=ns_name, extra_subnets=ex_gw_port.get('extra_subnets', []), preserve_ips=preserve_ips, clean_connections=True) device = ip_lib.IPDevice(interface_name, namespace=ns_name) current_gateways = set() for ip_version in (l3_constants.IP_VERSION_4, l3_constants.IP_VERSION_6): gateway = device.route.get_gateway(ip_version=ip_version) if gateway and gateway.get('gateway'): current_gateways.add(gateway.get('gateway')) for ip in current_gateways - set(gateway_ips): device.route.delete_gateway(ip) for ip in gateway_ips: device.route.add_gateway(ip) if enable_ra_on_gw: self.driver.configure_ipv6_ra(ns_name, interface_name) for fixed_ip in ex_gw_port['fixed_ips']: ip_lib.send_ip_addr_adv_notif(ns_name, interface_name, fixed_ip['ip_address'], self.agent_conf) def is_v6_gateway_set(self, gateway_ips): """Check to see if list of gateway_ips has an IPv6 gateway. """ # Note - don't require a try-except here as all # gateway_ips elements are valid addresses, if they exist. return any(netaddr.IPAddress(gw_ip).version == 6 for gw_ip in gateway_ips) def external_gateway_added(self, ex_gw_port, interface_name): preserve_ips = self._list_floating_ip_cidrs() self._external_gateway_added( ex_gw_port, interface_name, self.ns_name, preserve_ips) def external_gateway_updated(self, ex_gw_port, interface_name): preserve_ips = self._list_floating_ip_cidrs() self._external_gateway_added( ex_gw_port, interface_name, self.ns_name, preserve_ips) def external_gateway_removed(self, ex_gw_port, interface_name): LOG.debug("External gateway removed: port(%s), interface(%s)", ex_gw_port, interface_name) device = ip_lib.IPDevice(interface_name, namespace=self.ns_name) for ip_addr in ex_gw_port['fixed_ips']: self.remove_external_gateway_ip(device, common_utils.ip_to_cidr( ip_addr['ip_address'], ip_addr['prefixlen'])) self.driver.unplug(interface_name, bridge=self.agent_conf.external_network_bridge, namespace=self.ns_name, prefix=EXTERNAL_DEV_PREFIX) @staticmethod def _gateway_ports_equal(port1, port2): return port1 == port2 def _process_external_gateway(self, ex_gw_port, pd): # TODO(Carl) Refactor to clarify roles of ex_gw_port vs self.ex_gw_port ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or self.ex_gw_port and self.ex_gw_port['id']) interface_name = None if ex_gw_port_id: interface_name = self.get_external_device_name(ex_gw_port_id) if ex_gw_port: if not self.ex_gw_port: self.external_gateway_added(ex_gw_port, interface_name) pd.add_gw_interface(self.router['id'], interface_name) elif not self._gateway_ports_equal(ex_gw_port, self.ex_gw_port): self.external_gateway_updated(ex_gw_port, interface_name) elif not ex_gw_port and self.ex_gw_port: self.external_gateway_removed(self.ex_gw_port, interface_name) pd.remove_gw_interface(self.router['id']) elif not ex_gw_port and not self.ex_gw_port: for p in self.internal_ports: interface_name = self.get_internal_device_name(p['id']) self.gateway_redirect_cleanup(interface_name) existing_devices = self._get_existing_devices() stale_devs = [dev for dev in existing_devices if dev.startswith(EXTERNAL_DEV_PREFIX) and dev != interface_name] for stale_dev in stale_devs: LOG.debug('Deleting stale external router device: %s', stale_dev) pd.remove_gw_interface(self.router['id']) self.driver.unplug(stale_dev, bridge=self.agent_conf.external_network_bridge, namespace=self.ns_name, prefix=EXTERNAL_DEV_PREFIX) # Process SNAT rules for external gateway gw_port = self._router.get('gw_port') self._handle_router_snat_rules(gw_port, interface_name) def _prevent_snat_for_internal_traffic_rule(self, interface_name): return ( 'POSTROUTING', '! -i %(interface_name)s ' '! -o %(interface_name)s -m conntrack ! ' '--ctstate DNAT -j ACCEPT' % {'interface_name': interface_name}) def external_gateway_nat_fip_rules(self, ex_gw_ip, interface_name): dont_snat_traffic_to_internal_ports_if_not_to_floating_ip = ( self._prevent_snat_for_internal_traffic_rule(interface_name)) # Makes replies come back through the router to reverse DNAT ext_in_mark = self.agent_conf.external_ingress_mark snat_internal_traffic_to_floating_ip = ( 'snat', '-m mark ! --mark %s/%s ' '-m conntrack --ctstate DNAT ' '-j SNAT --to-source %s' % (ext_in_mark, l3_constants.ROUTER_MARK_MASK, ex_gw_ip)) return [dont_snat_traffic_to_internal_ports_if_not_to_floating_ip, snat_internal_traffic_to_floating_ip] def external_gateway_nat_snat_rules(self, ex_gw_ip, interface_name): snat_normal_external_traffic = ( 'snat', '-o %s -j SNAT --to-source %s' % (interface_name, ex_gw_ip)) return [snat_normal_external_traffic] def external_gateway_mangle_rules(self, interface_name): mark = self.agent_conf.external_ingress_mark mark_packets_entering_external_gateway_port = ( 'mark', '-i %s -j MARK --set-xmark %s/%s' % (interface_name, mark, l3_constants.ROUTER_MARK_MASK)) return [mark_packets_entering_external_gateway_port] def _empty_snat_chains(self, iptables_manager): iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') iptables_manager.ipv4['nat'].empty_chain('snat') iptables_manager.ipv4['mangle'].empty_chain('mark') iptables_manager.ipv4['mangle'].empty_chain('POSTROUTING') def _add_snat_rules(self, ex_gw_port, iptables_manager, interface_name): self.process_external_port_address_scope_routing(iptables_manager) if ex_gw_port: # ex_gw_port should not be None in this case # NAT rules are added only if ex_gw_port has an IPv4 address for ip_addr in ex_gw_port['fixed_ips']: ex_gw_ip = ip_addr['ip_address'] if netaddr.IPAddress(ex_gw_ip).version == 4: if self._snat_enabled: rules = self.external_gateway_nat_snat_rules( ex_gw_ip, interface_name) for rule in rules: iptables_manager.ipv4['nat'].add_rule(*rule) rules = self.external_gateway_nat_fip_rules( ex_gw_ip, interface_name) for rule in rules: iptables_manager.ipv4['nat'].add_rule(*rule) rules = self.external_gateway_mangle_rules(interface_name) for rule in rules: iptables_manager.ipv4['mangle'].add_rule(*rule) break def _handle_router_snat_rules(self, ex_gw_port, interface_name): self._empty_snat_chains(self.iptables_manager) self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') self._add_snat_rules(ex_gw_port, self.iptables_manager, interface_name) def _process_external_on_delete(self, agent): fip_statuses = {} try: ex_gw_port = self.get_ex_gw_port() self._process_external_gateway(ex_gw_port, agent.pd) if not ex_gw_port: return interface_name = self.get_external_device_interface_name( ex_gw_port) fip_statuses = self.configure_fip_addresses(interface_name) except (n_exc.FloatingIpSetupException): # All floating IPs must be put in error state LOG.exception(_LE("Failed to process floating IPs.")) fip_statuses = self.put_fips_in_error_state() finally: self.update_fip_statuses(agent, fip_statuses) def process_external(self, agent): fip_statuses = {} try: with self.iptables_manager.defer_apply(): ex_gw_port = self.get_ex_gw_port() self._process_external_gateway(ex_gw_port, agent.pd) if not ex_gw_port: return # Process SNAT/DNAT rules and addresses for floating IPs self.process_snat_dnat_for_fip() # Once NAT rules for floating IPs are safely in place # configure their addresses on the external gateway port interface_name = self.get_external_device_interface_name( ex_gw_port) fip_statuses = self.configure_fip_addresses(interface_name) except (n_exc.FloatingIpSetupException, n_exc.IpTablesApplyException): # All floating IPs must be put in error state LOG.exception(_LE("Failed to process floating IPs.")) fip_statuses = self.put_fips_in_error_state() finally: self.update_fip_statuses(agent, fip_statuses) def update_fip_statuses(self, agent, fip_statuses): # Identify floating IPs which were disabled existing_floating_ips = self.floating_ips self.floating_ips = set(fip_statuses.keys()) for fip_id in existing_floating_ips - self.floating_ips: fip_statuses[fip_id] = l3_constants.FLOATINGIP_STATUS_DOWN # filter out statuses that didn't change fip_statuses = {f: stat for f, stat in fip_statuses.items() if stat != FLOATINGIP_STATUS_NOCHANGE} if not fip_statuses: return LOG.debug('Sending floating ip statuses: %s', fip_statuses) # Update floating IP status on the neutron server agent.plugin_rpc.update_floatingip_statuses( agent.context, self.router_id, fip_statuses) def _get_port_devicename_scopemark(self, ports, name_generator): devicename_scopemark = {l3_constants.IP_VERSION_4: dict(), l3_constants.IP_VERSION_6: dict()} for p in ports: device_name = name_generator(p['id']) ip_cidrs = common_utils.fixed_ip_cidrs(p['fixed_ips']) port_as_marks = self.get_port_address_scope_mark(p) for ip_version in {ip_lib.get_ip_version(cidr) for cidr in ip_cidrs}: devicename_scopemark[ip_version][device_name] = ( port_as_marks[ip_version]) return devicename_scopemark def _get_address_scope_mark(self): # Prepare address scope iptables rule for internal ports internal_ports = self.router.get(l3_constants.INTERFACE_KEY, []) ports_scopemark = self._get_port_devicename_scopemark( internal_ports, self.get_internal_device_name) # Prepare address scope iptables rule for external port external_port = self.get_ex_gw_port() if external_port: external_port_scopemark = self._get_port_devicename_scopemark( [external_port], self.get_external_device_name) for ip_version in (l3_constants.IP_VERSION_4, l3_constants.IP_VERSION_6): ports_scopemark[ip_version].update( external_port_scopemark[ip_version]) return ports_scopemark def _add_address_scope_mark(self, iptables_manager, ports_scopemark): external_device_name = None external_port = self.get_ex_gw_port() if external_port: external_device_name = self.get_external_device_name( external_port['id']) # Process address scope iptables rules for ip_version in (l3_constants.IP_VERSION_4, l3_constants.IP_VERSION_6): scopemarks = ports_scopemark[ip_version] iptables = iptables_manager.get_tables(ip_version) iptables['mangle'].empty_chain('scope') iptables['filter'].empty_chain('scope') dont_block_external = (ip_version == l3_constants.IP_VERSION_4 and self._snat_enabled and external_port) for device_name, mark in scopemarks.items(): # Add address scope iptables rule iptables['mangle'].add_rule( 'scope', self.address_scope_mangle_rule(device_name, mark)) if dont_block_external and device_name == external_device_name: continue iptables['filter'].add_rule( 'scope', self.address_scope_filter_rule(device_name, mark)) def process_ports_address_scope_iptables(self): ports_scopemark = self._get_address_scope_mark() self._add_address_scope_mark(self.iptables_manager, ports_scopemark) def _get_external_address_scope(self): external_port = self.get_ex_gw_port() if not external_port: return scopes = external_port.get('address_scopes', {}) return scopes.get(str(l3_constants.IP_VERSION_4)) def process_external_port_address_scope_routing(self, iptables_manager): if not self._snat_enabled: return external_port = self.get_ex_gw_port() if not external_port: return external_devicename = self.get_external_device_name( external_port['id']) # Saves the originating address scope by saving the packet MARK to # the CONNMARK for new connections so that returning traffic can be # match to it. rule = ('-o %s -m connmark --mark 0x0/0xffff0000 ' '-j CONNMARK --save-mark ' '--nfmask 0xffff0000 --ctmask 0xffff0000' % external_devicename) iptables_manager.ipv4['mangle'].add_rule('POSTROUTING', rule) address_scope = self._get_external_address_scope() if not address_scope: return # Prevents snat within the same address scope rule = '-o %s -m connmark --mark %s -j ACCEPT' % ( external_devicename, self.get_address_scope_mark_mask(address_scope)) iptables_manager.ipv4['nat'].add_rule('snat', rule) def process_address_scope(self): with self.iptables_manager.defer_apply(): self.process_ports_address_scope_iptables() self.process_floating_ip_address_scope_rules() @common_utils.exception_logger() def process_delete(self, agent): """Process the delete of this router This method is the point where the agent requests that this router be deleted. This is a separate code path from process in that it avoids any changes to the qrouter namespace that will be removed at the end of the operation. :param agent: Passes the agent in order to send RPC messages. """ LOG.debug("process router delete") if self.router_namespace.exists(): self._process_internal_ports(agent.pd) agent.pd.sync_router(self.router['id']) self._process_external_on_delete(agent) else: LOG.warning(_LW("Can't gracefully delete the router %s: " "no router namespace found."), self.router['id']) @common_utils.exception_logger() def process(self, agent): """Process updates to this router This method is the point where the agent requests that updates be applied to this router. :param agent: Passes the agent in order to send RPC messages. """ LOG.debug("process router updates") self._process_internal_ports(agent.pd) agent.pd.sync_router(self.router['id']) self.process_external(agent) self.process_address_scope() # Process static routes for router self.routes_updated(self.routes, self.router['routes']) self.routes = self.router['routes'] # Update ex_gw_port and enable_snat on the router info cache self.ex_gw_port = self.get_ex_gw_port() self.fip_map = dict([(fip['floating_ip_address'], fip['fixed_ip_address']) for fip in self.get_floating_ips()]) # TODO(Carl) FWaaS uses this. Why is it set after processing is done? self.enable_snat = self.router.get('enable_snat') neutron-8.4.0/neutron/agent/l3/item_allocator.py0000664000567000056710000001025213044372760023042 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os class ItemAllocator(object): """Manages allocation of items from a pool Some of the allocations such as link local addresses used for routing inside the fip namespaces need to persist across agent restarts to maintain consistency. Persisting such allocations in the neutron database is unnecessary and would degrade performance. ItemAllocator utilizes local file system to track allocations made for objects of a given class. The persistent datastore is a file. The records are one per line of the format: keyvalue. For example if the delimiter is a ',' (the default value) then the records will be: key,value (one per line) """ def __init__(self, state_file, ItemClass, item_pool, delimiter=','): """Read the file with previous allocations recorded. See the note in the allocate method for more detail. """ self.ItemClass = ItemClass self.state_file = state_file self.allocations = {} self.remembered = {} self.pool = item_pool for line in self._read(): key, saved_value = line.strip().split(delimiter) self.remembered[key] = self.ItemClass(saved_value) self.pool.difference_update(self.remembered.values()) def allocate(self, key): """Try to allocate an item of ItemClass type. I expect this to work in all cases because I expect the pool size to be large enough for any situation. Nonetheless, there is some defensive programming in here. Since the allocations are persisted, there is the chance to leak allocations which should have been released but were not. This leak could eventually exhaust the pool. So, if a new allocation is needed, the code first checks to see if there are any remembered allocations for the key. If not, it checks the free pool. If the free pool is empty then it dumps the remembered allocations to free the pool. This final desperate step will not happen often in practice. """ if key in self.allocations: return self.allocations[key] if key in self.remembered: self.allocations[key] = self.remembered.pop(key) return self.allocations[key] if not self.pool: # Desperate times. Try to get more in the pool. self.pool.update(self.remembered.values()) self.remembered.clear() if not self.pool: # The number of address pairs allocated from the # pool depends upon the prefix length specified # in FIP_LL_SUBNET raise RuntimeError("Cannot allocate item of type:" " %s from pool using file %s" % (self.ItemClass, self.state_file)) self.allocations[key] = self.pool.pop() self._write_allocations() return self.allocations[key] def release(self, key): self.pool.add(self.allocations.pop(key)) self._write_allocations() def _write_allocations(self): current = ["%s,%s\n" % (k, v) for k, v in self.allocations.items()] remembered = ["%s,%s\n" % (k, v) for k, v in self.remembered.items()] current.extend(remembered) self._write(current) def _write(self, lines): with open(self.state_file, "w") as f: f.writelines(lines) def _read(self): if not os.path.exists(self.state_file): return [] with open(self.state_file) as f: return f.readlines() neutron-8.4.0/neutron/agent/l3/agent.py0000664000567000056710000007541013044372760021151 0ustar jenkinsjenkins00000000000000# Copyright 2012 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import eventlet import netaddr from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import loopingcall from oslo_service import periodic_task from oslo_utils import excutils from oslo_utils import timeutils from neutron._i18n import _, _LE, _LI, _LW from neutron.agent.common import utils as common_utils from neutron.agent.l3 import dvr from neutron.agent.l3 import dvr_edge_ha_router from neutron.agent.l3 import dvr_edge_router as dvr_router from neutron.agent.l3 import dvr_local_router as dvr_local_router from neutron.agent.l3 import ha from neutron.agent.l3 import ha_router from neutron.agent.l3 import legacy_router from neutron.agent.l3 import namespace_manager from neutron.agent.l3 import namespaces from neutron.agent.l3 import router_processing_queue as queue from neutron.agent.linux import external_process from neutron.agent.linux import ip_lib from neutron.agent.linux import pd from neutron.agent.metadata import driver as metadata_driver from neutron.agent import rpc as agent_rpc from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants as l3_constants from neutron.common import exceptions as n_exc from neutron.common import ipv6_utils from neutron.common import rpc as n_rpc from neutron.common import topics from neutron import context as n_context from neutron import manager try: from neutron_fwaas.services.firewall.agents.l3reference \ import firewall_l3_agent except Exception: # TODO(dougw) - REMOVE THIS FROM NEUTRON; during l3_agent refactor only from neutron.services.firewall.agents.l3reference import firewall_l3_agent LOG = logging.getLogger(__name__) # TODO(Carl) Following constants retained to increase SNR during refactoring NS_PREFIX = namespaces.NS_PREFIX INTERNAL_DEV_PREFIX = namespaces.INTERNAL_DEV_PREFIX EXTERNAL_DEV_PREFIX = namespaces.EXTERNAL_DEV_PREFIX # Number of routers to fetch from server at a time on resync. # Needed to reduce load on server side and to speed up resync on agent side. SYNC_ROUTERS_MAX_CHUNK_SIZE = 256 SYNC_ROUTERS_MIN_CHUNK_SIZE = 32 class L3PluginApi(object): """Agent side of the l3 agent RPC API. API version history: 1.0 - Initial version. 1.1 - Floating IP operational status updates 1.2 - DVR support: new L3 plugin methods added. - get_ports_by_subnet - get_agent_gateway_port Needed by the agent when operating in DVR/DVR_SNAT mode 1.3 - Get the list of activated services 1.4 - Added L3 HA update_router_state. This method was reworked in to update_ha_routers_states 1.5 - Added update_ha_routers_states 1.6 - Added process_prefix_update 1.7 - DVR support: new L3 plugin methods added. - delete_agent_gateway_port 1.8 - Added address scope information 1.9 - Added get_router_ids """ def __init__(self, topic, host): self.host = host target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) def get_routers(self, context, router_ids=None): """Make a remote process call to retrieve the sync data for routers.""" cctxt = self.client.prepare() return cctxt.call(context, 'sync_routers', host=self.host, router_ids=router_ids) def get_router_ids(self, context): """Make a remote process call to retrieve scheduled routers ids.""" cctxt = self.client.prepare(version='1.9') return cctxt.call(context, 'get_router_ids', host=self.host) def get_external_network_id(self, context): """Make a remote process call to retrieve the external network id. @raise oslo_messaging.RemoteError: with TooManyExternalNetworks as exc_type if there are more than one external network """ cctxt = self.client.prepare() return cctxt.call(context, 'get_external_network_id', host=self.host) def update_floatingip_statuses(self, context, router_id, fip_statuses): """Call the plugin update floating IPs's operational status.""" cctxt = self.client.prepare(version='1.1') return cctxt.call(context, 'update_floatingip_statuses', router_id=router_id, fip_statuses=fip_statuses) def get_ports_by_subnet(self, context, subnet_id): """Retrieve ports by subnet id.""" cctxt = self.client.prepare(version='1.2') return cctxt.call(context, 'get_ports_by_subnet', host=self.host, subnet_id=subnet_id) def get_agent_gateway_port(self, context, fip_net): """Get or create an agent_gateway_port.""" cctxt = self.client.prepare(version='1.2') return cctxt.call(context, 'get_agent_gateway_port', network_id=fip_net, host=self.host) def get_service_plugin_list(self, context): """Make a call to get the list of activated services.""" cctxt = self.client.prepare(version='1.3') return cctxt.call(context, 'get_service_plugin_list') def update_ha_routers_states(self, context, states): """Update HA routers states.""" cctxt = self.client.prepare(version='1.5') return cctxt.call(context, 'update_ha_routers_states', host=self.host, states=states) def process_prefix_update(self, context, prefix_update): """Process prefix update whenever prefixes get changed.""" cctxt = self.client.prepare(version='1.6') return cctxt.call(context, 'process_prefix_update', subnets=prefix_update) def delete_agent_gateway_port(self, context, fip_net): """Delete Floatingip_agent_gateway_port.""" cctxt = self.client.prepare(version='1.7') return cctxt.call(context, 'delete_agent_gateway_port', host=self.host, network_id=fip_net) class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, ha.AgentMixin, dvr.AgentMixin, manager.Manager): """Manager for L3NatAgent API version history: 1.0 initial Version 1.1 changed the type of the routers parameter to the routers_updated method. It was previously a list of routers in dict format. It is now a list of router IDs only. Per rpc versioning rules, it is backwards compatible. 1.2 - DVR support: new L3 agent methods added. - add_arp_entry - del_arp_entry 1.3 - fipnamespace_delete_on_ext_net - to delete fipnamespace after the external network is removed Needed by the L3 service when dealing with DVR """ target = oslo_messaging.Target(version='1.3') def __init__(self, host, conf=None): if conf: self.conf = conf else: self.conf = cfg.CONF self.router_info = {} self._check_config_params() self.process_monitor = external_process.ProcessMonitor( config=self.conf, resource_type='router') self.driver = common_utils.load_interface_driver(self.conf) self.context = n_context.get_admin_context_without_session() self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host) self.fullsync = True self.sync_routers_chunk_size = SYNC_ROUTERS_MAX_CHUNK_SIZE # Get the list of service plugins from Neutron Server # This is the first place where we contact neutron-server on startup # so retry in case its not ready to respond. while True: try: self.neutron_service_plugins = ( self.plugin_rpc.get_service_plugin_list(self.context)) except oslo_messaging.RemoteError as e: with excutils.save_and_reraise_exception() as ctx: ctx.reraise = False LOG.warning(_LW('l3-agent cannot check service plugins ' 'enabled at the neutron server when ' 'startup due to RPC error. It happens ' 'when the server does not support this ' 'RPC API. If the error is ' 'UnsupportedVersion you can ignore this ' 'warning. Detail message: %s'), e) self.neutron_service_plugins = None except oslo_messaging.MessagingTimeout as e: with excutils.save_and_reraise_exception() as ctx: ctx.reraise = False LOG.warning(_LW('l3-agent cannot contact neutron server ' 'to retrieve service plugins enabled. ' 'Check connectivity to neutron server. ' 'Retrying... ' 'Detailed message: %(msg)s.') % {'msg': e}) continue break self.metadata_driver = None if self.conf.enable_metadata_proxy: self.metadata_driver = metadata_driver.MetadataDriver(self) self.namespaces_manager = namespace_manager.NamespaceManager( self.conf, self.driver, self.metadata_driver) self._queue = queue.RouterProcessingQueue() super(L3NATAgent, self).__init__(conf=self.conf) self.target_ex_net_id = None self.use_ipv6 = ipv6_utils.is_enabled() self.pd = pd.PrefixDelegation(self.context, self.process_monitor, self.driver, self.plugin_rpc.process_prefix_update, self.create_pd_router_update, self.conf) def _check_config_params(self): """Check items in configuration files. Check for required and invalid configuration items. The actual values are not verified for correctness. """ if not self.conf.interface_driver: msg = _LE('An interface driver must be specified') LOG.error(msg) raise SystemExit(1) if self.conf.external_network_bridge: LOG.warning(_LW("Using an 'external_network_bridge' value other " "than '' is deprecated. Any other values may " "not be supported in the future. Note that the " "default value is 'br-ex' so it must be " "explicitly set to a blank value.")) if self.conf.ipv6_gateway: # ipv6_gateway configured. Check for valid v6 link-local address. try: msg = _LE("%s used in config as ipv6_gateway is not a valid " "IPv6 link-local address."), ip_addr = netaddr.IPAddress(self.conf.ipv6_gateway) if ip_addr.version != 6 or not ip_addr.is_link_local(): LOG.error(msg, self.conf.ipv6_gateway) raise SystemExit(1) except netaddr.AddrFormatError: LOG.error(msg, self.conf.ipv6_gateway) raise SystemExit(1) def _fetch_external_net_id(self, force=False): """Find UUID of single external network for this agent.""" if self.conf.gateway_external_network_id: return self.conf.gateway_external_network_id # L3 agent doesn't use external_network_bridge to handle external # networks, so bridge_mappings with provider networks will be used # and the L3 agent is able to handle any external networks. if not self.conf.external_network_bridge: return if not force and self.target_ex_net_id: return self.target_ex_net_id try: self.target_ex_net_id = self.plugin_rpc.get_external_network_id( self.context) return self.target_ex_net_id except oslo_messaging.RemoteError as e: with excutils.save_and_reraise_exception() as ctx: if e.exc_type == 'TooManyExternalNetworks': ctx.reraise = False msg = _( "The 'gateway_external_network_id' option must be " "configured for this agent as Neutron has more than " "one external network.") raise Exception(msg) def _create_router(self, router_id, router): args = [] kwargs = { 'router_id': router_id, 'router': router, 'use_ipv6': self.use_ipv6, 'agent_conf': self.conf, 'interface_driver': self.driver, } if router.get('distributed'): kwargs['agent'] = self kwargs['host'] = self.host if router.get('distributed') and router.get('ha'): if self.conf.agent_mode == l3_constants.L3_AGENT_MODE_DVR_SNAT: kwargs['state_change_callback'] = self.enqueue_state_change return dvr_edge_ha_router.DvrEdgeHaRouter(*args, **kwargs) if router.get('distributed'): if self.conf.agent_mode == l3_constants.L3_AGENT_MODE_DVR_SNAT: return dvr_router.DvrEdgeRouter(*args, **kwargs) else: return dvr_local_router.DvrLocalRouter(*args, **kwargs) if router.get('ha'): kwargs['state_change_callback'] = self.enqueue_state_change return ha_router.HaRouter(*args, **kwargs) return legacy_router.LegacyRouter(*args, **kwargs) def _router_added(self, router_id, router): ri = self._create_router(router_id, router) registry.notify(resources.ROUTER, events.BEFORE_CREATE, self, router=ri) self.router_info[router_id] = ri ri.initialize(self.process_monitor) # TODO(Carl) This is a hook in to fwaas. It should be cleaned up. self.process_router_add(ri) def _safe_router_removed(self, router_id): """Try to delete a router and return True if successful.""" try: self._router_removed(router_id) except Exception: LOG.exception(_LE('Error while deleting router %s'), router_id) return False else: return True def _router_removed(self, router_id): ri = self.router_info.get(router_id) if ri is None: LOG.warning(_LW("Info for router %s was not found. " "Performing router cleanup"), router_id) self.namespaces_manager.ensure_router_cleanup(router_id) return registry.notify(resources.ROUTER, events.BEFORE_DELETE, self, router=ri) ri.delete(self) del self.router_info[router_id] registry.notify(resources.ROUTER, events.AFTER_DELETE, self, router=ri) def router_deleted(self, context, router_id): """Deal with router deletion RPC message.""" LOG.debug('Got router deleted notification for %s', router_id) update = queue.RouterUpdate(router_id, queue.PRIORITY_RPC, action=queue.DELETE_ROUTER) self._queue.add(update) def routers_updated(self, context, routers): """Deal with routers modification and creation RPC message.""" LOG.debug('Got routers updated notification :%s', routers) if routers: # This is needed for backward compatibility if isinstance(routers[0], dict): routers = [router['id'] for router in routers] for id in routers: update = queue.RouterUpdate(id, queue.PRIORITY_RPC) self._queue.add(update) def router_removed_from_agent(self, context, payload): LOG.debug('Got router removed from agent :%r', payload) router_id = payload['router_id'] update = queue.RouterUpdate(router_id, queue.PRIORITY_RPC, action=queue.DELETE_ROUTER) self._queue.add(update) def router_added_to_agent(self, context, payload): LOG.debug('Got router added to agent :%r', payload) self.routers_updated(context, payload) def _process_router_if_compatible(self, router): if (self.conf.external_network_bridge and not ip_lib.device_exists(self.conf.external_network_bridge)): LOG.error(_LE("The external network bridge '%s' does not exist"), self.conf.external_network_bridge) return if self.conf.router_id and router['id'] != self.conf.router_id: raise n_exc.RouterNotCompatibleWithAgent(router_id=router['id']) # Either ex_net_id or handle_internal_only_routers must be set ex_net_id = (router['external_gateway_info'] or {}).get('network_id') if not ex_net_id and not self.conf.handle_internal_only_routers: raise n_exc.RouterNotCompatibleWithAgent(router_id=router['id']) # If target_ex_net_id and ex_net_id are set they must be equal target_ex_net_id = self._fetch_external_net_id() if (target_ex_net_id and ex_net_id and ex_net_id != target_ex_net_id): # Double check that our single external_net_id has not changed # by forcing a check by RPC. if ex_net_id != self._fetch_external_net_id(force=True): raise n_exc.RouterNotCompatibleWithAgent( router_id=router['id']) if router['id'] not in self.router_info: self._process_added_router(router) else: self._process_updated_router(router) def _process_added_router(self, router): self._router_added(router['id'], router) ri = self.router_info[router['id']] ri.router = router ri.process(self) registry.notify(resources.ROUTER, events.AFTER_CREATE, self, router=ri) def _process_updated_router(self, router): ri = self.router_info[router['id']] ri.router = router registry.notify(resources.ROUTER, events.BEFORE_UPDATE, self, router=ri) ri.process(self) registry.notify(resources.ROUTER, events.AFTER_UPDATE, self, router=ri) def _resync_router(self, router_update, priority=queue.PRIORITY_SYNC_ROUTERS_TASK): router_update.timestamp = timeutils.utcnow() router_update.priority = priority router_update.router = None # Force the agent to resync the router self._queue.add(router_update) def _process_router_update(self): for rp, update in self._queue.each_update_to_next_router(): LOG.debug("Starting router update for %s, action %s, priority %s", update.id, update.action, update.priority) if update.action == queue.PD_UPDATE: self.pd.process_prefix_update() LOG.debug("Finished a router update for %s", update.id) continue router = update.router if update.action != queue.DELETE_ROUTER and not router: try: update.timestamp = timeutils.utcnow() routers = self.plugin_rpc.get_routers(self.context, [update.id]) except Exception: msg = _LE("Failed to fetch router information for '%s'") LOG.exception(msg, update.id) self._resync_router(update) continue if routers: router = routers[0] if not router: removed = self._safe_router_removed(update.id) if not removed: self._resync_router(update) else: # need to update timestamp of removed router in case # there are older events for the same router in the # processing queue (like events from fullsync) in order to # prevent deleted router re-creation rp.fetched_and_processed(update.timestamp) LOG.debug("Finished a router update for %s", update.id) continue try: self._process_router_if_compatible(router) except n_exc.RouterNotCompatibleWithAgent as e: LOG.exception(e.msg) # Was the router previously handled by this agent? if router['id'] in self.router_info: LOG.error(_LE("Removing incompatible router '%s'"), router['id']) self._safe_router_removed(router['id']) except Exception: msg = _LE("Failed to process compatible router '%s'") LOG.exception(msg, update.id) self._resync_router(update) continue LOG.debug("Finished a router update for %s", update.id) rp.fetched_and_processed(update.timestamp) def _process_routers_loop(self): LOG.debug("Starting _process_routers_loop") pool = eventlet.GreenPool(size=8) while True: pool.spawn_n(self._process_router_update) # NOTE(kevinbenton): this is set to 1 second because the actual interval # is controlled by a FixedIntervalLoopingCall in neutron/service.py that # is responsible for task execution. @periodic_task.periodic_task(spacing=1, run_immediately=True) def periodic_sync_routers_task(self, context): self.process_services_sync(context) if not self.fullsync: return LOG.debug("Starting fullsync periodic_sync_routers_task") # self.fullsync is True at this point. If an exception -- caught or # uncaught -- prevents setting it to False below then the next call # to periodic_sync_routers_task will re-enter this code and try again. # Context manager self.namespaces_manager captures a picture of # namespaces *before* fetch_and_sync_all_routers fetches the full list # of routers from the database. This is important to correctly # identify stale ones. try: with self.namespaces_manager as ns_manager: self.fetch_and_sync_all_routers(context, ns_manager) except n_exc.AbortSyncRouters: self.fullsync = True def fetch_and_sync_all_routers(self, context, ns_manager): prev_router_ids = set(self.router_info) curr_router_ids = set() timestamp = timeutils.utcnow() try: router_ids = ([self.conf.router_id] if self.conf.router_id else self.plugin_rpc.get_router_ids(context)) # fetch routers by chunks to reduce the load on server and to # start router processing earlier for i in range(0, len(router_ids), self.sync_routers_chunk_size): routers = self.plugin_rpc.get_routers( context, router_ids[i:i + self.sync_routers_chunk_size]) LOG.debug('Processing :%r', routers) for r in routers: curr_router_ids.add(r['id']) ns_manager.keep_router(r['id']) if r.get('distributed'): # need to keep fip namespaces as well ext_net_id = (r['external_gateway_info'] or {}).get( 'network_id') is_snat_agent = (self.conf.agent_mode == l3_constants.L3_AGENT_MODE_DVR_SNAT) if ext_net_id: ns_manager.keep_ext_net(ext_net_id) elif is_snat_agent: ns_manager.ensure_snat_cleanup(r['id']) update = queue.RouterUpdate( r['id'], queue.PRIORITY_SYNC_ROUTERS_TASK, router=r, timestamp=timestamp) self._queue.add(update) except oslo_messaging.MessagingTimeout: if self.sync_routers_chunk_size > SYNC_ROUTERS_MIN_CHUNK_SIZE: self.sync_routers_chunk_size = max( self.sync_routers_chunk_size / 2, SYNC_ROUTERS_MIN_CHUNK_SIZE) LOG.error(_LE('Server failed to return info for routers in ' 'required time, decreasing chunk size to: %s'), self.sync_routers_chunk_size) else: LOG.error(_LE('Server failed to return info for routers in ' 'required time even with min chunk size: %s. ' 'It might be under very high load or ' 'just inoperable'), self.sync_routers_chunk_size) raise except oslo_messaging.MessagingException: LOG.exception(_LE("Failed synchronizing routers due to RPC error")) raise n_exc.AbortSyncRouters() self.fullsync = False LOG.debug("periodic_sync_routers_task successfully completed") # adjust chunk size after successful sync if self.sync_routers_chunk_size < SYNC_ROUTERS_MAX_CHUNK_SIZE: self.sync_routers_chunk_size = min( self.sync_routers_chunk_size + SYNC_ROUTERS_MIN_CHUNK_SIZE, SYNC_ROUTERS_MAX_CHUNK_SIZE) # Delete routers that have disappeared since the last sync for router_id in prev_router_ids - curr_router_ids: ns_manager.keep_router(router_id) update = queue.RouterUpdate(router_id, queue.PRIORITY_SYNC_ROUTERS_TASK, timestamp=timestamp, action=queue.DELETE_ROUTER) self._queue.add(update) def after_start(self): # Note: the FWaaS' vArmourL3NATAgent is a subclass of L3NATAgent. It # calls this method here. So Removing this after_start() would break # vArmourL3NATAgent. We need to find out whether vArmourL3NATAgent # can have L3NATAgentWithStateReport as its base class instead of # L3NATAgent. eventlet.spawn_n(self._process_routers_loop) LOG.info(_LI("L3 agent started")) def create_pd_router_update(self): router_id = None update = queue.RouterUpdate(router_id, queue.PRIORITY_PD_UPDATE, timestamp=timeutils.utcnow(), action=queue.PD_UPDATE) self._queue.add(update) class L3NATAgentWithStateReport(L3NATAgent): def __init__(self, host, conf=None): super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf) self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) self.agent_state = { 'binary': 'neutron-l3-agent', 'host': host, 'availability_zone': self.conf.AGENT.availability_zone, 'topic': topics.L3_AGENT, 'configurations': { 'agent_mode': self.conf.agent_mode, 'router_id': self.conf.router_id, 'handle_internal_only_routers': self.conf.handle_internal_only_routers, 'external_network_bridge': self.conf.external_network_bridge, 'gateway_external_network_id': self.conf.gateway_external_network_id, 'interface_driver': self.conf.interface_driver, 'log_agent_heartbeats': self.conf.AGENT.log_agent_heartbeats}, 'start_flag': True, 'agent_type': l3_constants.AGENT_TYPE_L3} report_interval = self.conf.AGENT.report_interval if report_interval: self.heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) self.heartbeat.start(interval=report_interval) def _report_state(self): num_ex_gw_ports = 0 num_interfaces = 0 num_floating_ips = 0 router_infos = self.router_info.values() num_routers = len(router_infos) for ri in router_infos: ex_gw_port = ri.get_ex_gw_port() if ex_gw_port: num_ex_gw_ports += 1 num_interfaces += len(ri.router.get(l3_constants.INTERFACE_KEY, [])) num_floating_ips += len(ri.router.get(l3_constants.FLOATINGIP_KEY, [])) configurations = self.agent_state['configurations'] configurations['routers'] = num_routers configurations['ex_gw_ports'] = num_ex_gw_ports configurations['interfaces'] = num_interfaces configurations['floating_ips'] = num_floating_ips try: agent_status = self.state_rpc.report_state(self.context, self.agent_state, True) if agent_status == l3_constants.AGENT_REVIVED: LOG.info(_LI('Agent has just been revived. ' 'Doing a full sync.')) self.fullsync = True self.agent_state.pop('start_flag', None) except AttributeError: # This means the server does not support report_state LOG.warning(_LW("Neutron server does not support state report. " "State report for this agent will be disabled.")) self.heartbeat.stop() return except Exception: LOG.exception(_LE("Failed reporting state!")) def after_start(self): eventlet.spawn_n(self._process_routers_loop) LOG.info(_LI("L3 agent started")) # Do the report state before we do the first full sync. self._report_state() self.pd.after_start() def agent_updated(self, context, payload): """Handle the agent_updated notification event.""" self.fullsync = True LOG.info(_LI("agent_updated by server side %s!"), payload) neutron-8.4.0/neutron/agent/l3/namespaces.py0000664000567000056710000001112013044372760022156 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import functools from oslo_log import log as logging from oslo_utils import excutils from neutron._i18n import _LE, _LW from neutron.agent.linux import ip_lib LOG = logging.getLogger(__name__) NS_PREFIX = 'qrouter-' INTERNAL_DEV_PREFIX = 'qr-' EXTERNAL_DEV_PREFIX = 'qg-' # TODO(Carl) It is odd that this file needs this. It is a dvr detail. ROUTER_2_FIP_DEV_PREFIX = 'rfp-' def build_ns_name(prefix, identifier): """Builds a namespace name from the given prefix and identifier :param prefix: The prefix which must end with '-' for legacy reasons :param identifier: The id associated with the namespace """ return prefix + identifier def get_prefix_from_ns_name(ns_name): """Parses prefix from prefix-identifier :param ns_name: The name of a namespace :returns: The prefix ending with a '-' or None if there is no '-' """ dash_index = ns_name.find('-') if 0 <= dash_index: return ns_name[:dash_index + 1] def get_id_from_ns_name(ns_name): """Parses identifier from prefix-identifier :param ns_name: The name of a namespace :returns: Identifier or None if there is no - to end the prefix """ dash_index = ns_name.find('-') if 0 <= dash_index: return ns_name[dash_index + 1:] def check_ns_existence(f): @functools.wraps(f) def wrapped(self, *args, **kwargs): if not self.exists(): LOG.warning(_LW('Namespace %(name)s does not exists. Skipping ' '%(func)s'), {'name': self.name, 'func': f.__name__}) return try: return f(self, *args, **kwargs) except RuntimeError: with excutils.save_and_reraise_exception() as ctx: if not self.exists(): LOG.debug('Namespace %(name)s was concurrently deleted', self.name) ctx.reraise = False return wrapped class Namespace(object): def __init__(self, name, agent_conf, driver, use_ipv6): self.name = name self.ip_wrapper_root = ip_lib.IPWrapper() self.agent_conf = agent_conf self.driver = driver self.use_ipv6 = use_ipv6 def create(self): ip_wrapper = self.ip_wrapper_root.ensure_namespace(self.name) cmd = ['sysctl', '-w', 'net.ipv4.ip_forward=1'] ip_wrapper.netns.execute(cmd) if self.use_ipv6: cmd = ['sysctl', '-w', 'net.ipv6.conf.all.forwarding=1'] ip_wrapper.netns.execute(cmd) def delete(self): try: self.ip_wrapper_root.netns.delete(self.name) except RuntimeError: msg = _LE('Failed trying to delete namespace: %s') LOG.exception(msg, self.name) def exists(self): return self.ip_wrapper_root.netns.exists(self.name) class RouterNamespace(Namespace): def __init__(self, router_id, agent_conf, driver, use_ipv6): self.router_id = router_id name = self._get_ns_name(router_id) super(RouterNamespace, self).__init__( name, agent_conf, driver, use_ipv6) @classmethod def _get_ns_name(cls, router_id): return build_ns_name(NS_PREFIX, router_id) @check_ns_existence def delete(self): ns_ip = ip_lib.IPWrapper(namespace=self.name) for d in ns_ip.get_devices(exclude_loopback=True): if d.name.startswith(INTERNAL_DEV_PREFIX): # device is on default bridge self.driver.unplug(d.name, namespace=self.name, prefix=INTERNAL_DEV_PREFIX) elif d.name.startswith(ROUTER_2_FIP_DEV_PREFIX): ns_ip.del_veth(d.name) elif d.name.startswith(EXTERNAL_DEV_PREFIX): self.driver.unplug( d.name, bridge=self.agent_conf.external_network_bridge, namespace=self.name, prefix=EXTERNAL_DEV_PREFIX) super(RouterNamespace, self).delete() neutron-8.4.0/neutron/agent/l3/dvr_edge_ha_router.py0000664000567000056710000001120013044372760023665 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.l3.dvr_edge_router import DvrEdgeRouter from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3.ha_router import HaRouter from neutron.agent.l3.router_info import RouterInfo from neutron.common import constants as l3_constants class DvrEdgeHaRouter(DvrEdgeRouter, HaRouter): """Router class which represents a centralized SNAT DVR router with HA capabilities. """ def __init__(self, agent, host, *args, **kwargs): super(DvrEdgeHaRouter, self).__init__(agent, host, *args, **kwargs) self.enable_snat = None @property def ha_namespace(self): if self.snat_namespace: return self.snat_namespace.name return None def internal_network_added(self, port): # Call RouterInfo's internal_network_added (Plugs the port, adds IP) RouterInfo.internal_network_added(self, port) for subnet in port['subnets']: self._set_subnet_arp_info(subnet['id']) self._snat_redirect_add_from_port(port) if not self.get_ex_gw_port() or not self._is_this_snat_host(): return sn_port = self.get_snat_port_for_internal_port(port) if not sn_port: return self._plug_ha_router_port( sn_port, self._get_snat_int_device_name, dvr_snat_ns.SNAT_INT_DEV_PREFIX) def external_gateway_added(self, ex_gw_port, interface_name): super(DvrEdgeHaRouter, self).external_gateway_added( ex_gw_port, interface_name) for port in self.get_snat_interfaces(): snat_interface_name = self._get_snat_int_device_name(port['id']) self._disable_ipv6_addressing_on_interface(snat_interface_name) self._add_vips( self.get_snat_port_for_internal_port(port), snat_interface_name) self._add_gateway_vip(ex_gw_port, interface_name) self._disable_ipv6_addressing_on_interface(interface_name) def external_gateway_removed(self, ex_gw_port, interface_name): for port in self.snat_ports: snat_interface = self._get_snat_int_device_name(port['id']) self.driver.unplug(snat_interface, namespace=self.ha_namespace, prefix=l3_constants.SNAT_INT_DEV_PREFIX) self._clear_vips(snat_interface) super(DvrEdgeHaRouter, self)._external_gateway_removed( ex_gw_port, interface_name) self._clear_vips(interface_name) def external_gateway_updated(self, ex_gw_port, interface_name): HaRouter.external_gateway_updated(self, ex_gw_port, interface_name) def initialize(self, process_monitor): self._create_snat_namespace() super(DvrEdgeHaRouter, self).initialize(process_monitor) def get_router_cidrs(self, device): return RouterInfo.get_router_cidrs(self, device) def _external_gateway_added(self, ex_gw_port, interface_name, ns_name, preserve_ips): self._plug_external_gateway(ex_gw_port, interface_name, ns_name) def _is_this_snat_host(self): return (self.agent_conf.agent_mode == l3_constants.L3_AGENT_MODE_DVR_SNAT) def _dvr_internal_network_removed(self, port): super(DvrEdgeHaRouter, self)._dvr_internal_network_removed(port) sn_port = self.get_snat_port_for_internal_port(port, self.snat_ports) if not sn_port: return self._clear_vips(self._get_snat_int_device_name(sn_port['id'])) def _plug_snat_port(self, port): """Used by _create_dvr_gateway in DvrEdgeRouter.""" interface_name = self._get_snat_int_device_name(port['id']) self.driver.plug(port['network_id'], port['id'], interface_name, port['mac_address'], namespace=self.snat_namespace.name, prefix=dvr_snat_ns.SNAT_INT_DEV_PREFIX, mtu=port.get('mtu')) neutron-8.4.0/neutron/agent/l3/namespace_manager.py0000664000567000056710000001341413044372736023500 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron._i18n import _LE from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import namespaces from neutron.agent.linux import external_process from neutron.agent.linux import ip_lib LOG = logging.getLogger(__name__) class NamespaceManager(object): """Keeps track of namespaces that need to be cleaned up. This is a context manager that looks to clean up stale namespaces that have not been touched by the end of the "with" statement it is called in. This formalizes the pattern used in the L3 agent which enumerated all of the namespaces known to the system before a full sync. Then, after the full sync completed, it cleaned up any that were not touched during the sync. The agent and this context manager use method keep_router to communicate. In the "with" statement, the agent calls keep_router to record the id's of the routers whose namespaces should be preserved. Any other router and snat namespace present in the system will be deleted by the __exit__ method of this context manager This pattern can be more generally applicable to other resources besides namespaces in the future because it is idempotent and, as such, does not rely on state recorded at runtime in the agent so it handles agent restarts gracefully. """ ns_prefix_to_class_map = { namespaces.NS_PREFIX: namespaces.RouterNamespace, dvr_snat_ns.SNAT_NS_PREFIX: dvr_snat_ns.SnatNamespace, dvr_fip_ns.FIP_NS_PREFIX: dvr_fip_ns.FipNamespace, } def __init__(self, agent_conf, driver, metadata_driver=None): """Initialize the NamespaceManager. :param agent_conf: configuration from l3 agent :param driver: to perform operations on devices :param metadata_driver: used to cleanup stale metadata proxy processes """ self.agent_conf = agent_conf self.driver = driver self._clean_stale = True self.metadata_driver = metadata_driver if metadata_driver: self.process_monitor = external_process.ProcessMonitor( config=agent_conf, resource_type='router') def __enter__(self): self._all_namespaces = set() self._ids_to_keep = set() if self._clean_stale: self._all_namespaces = self.list_all() return self def __exit__(self, exc_type, value, traceback): # TODO(carl) Preserves old behavior of L3 agent where cleaning # namespaces was only done once after restart. Still a good idea? if exc_type: # An exception occurred in the caller's with statement return False if not self._clean_stale: # No need to cleanup return True self._clean_stale = False for ns in self._all_namespaces: _ns_prefix, ns_id = self.get_prefix_and_id(ns) if ns_id in self._ids_to_keep: continue self._cleanup(_ns_prefix, ns_id) return True def keep_router(self, router_id): self._ids_to_keep.add(router_id) def keep_ext_net(self, ext_net_id): self._ids_to_keep.add(ext_net_id) def get_prefix_and_id(self, ns_name): """Get the prefix and id from the namespace name. :param ns_name: The name of the namespace :returns: tuple with prefix and id or None if no prefix matches """ prefix = namespaces.get_prefix_from_ns_name(ns_name) if prefix in self.ns_prefix_to_class_map: identifier = namespaces.get_id_from_ns_name(ns_name) return (prefix, identifier) def is_managed(self, ns_name): """Return True if the namespace name passed belongs to this manager.""" return self.get_prefix_and_id(ns_name) is not None def list_all(self): """Get a set of all namespaces on host managed by this manager.""" try: root_ip = ip_lib.IPWrapper() namespaces = root_ip.get_namespaces() return set(ns for ns in namespaces if self.is_managed(ns)) except RuntimeError: LOG.exception(_LE('RuntimeError in obtaining namespace list for ' 'namespace cleanup.')) return set() def ensure_router_cleanup(self, router_id): """Performs cleanup for a router""" for ns in self.list_all(): if ns.endswith(router_id): ns_prefix, ns_id = self.get_prefix_and_id(ns) self._cleanup(ns_prefix, ns_id) def ensure_snat_cleanup(self, router_id): prefix = dvr_snat_ns.SNAT_NS_PREFIX self._cleanup(prefix, router_id) def _cleanup(self, ns_prefix, ns_id): ns_class = self.ns_prefix_to_class_map[ns_prefix] ns = ns_class(ns_id, self.agent_conf, self.driver, use_ipv6=False) try: if self.metadata_driver: # cleanup stale metadata proxy processes first self.metadata_driver.destroy_monitored_metadata_proxy( self.process_monitor, ns_id, self.agent_conf) ns.delete() except RuntimeError: LOG.exception(_LE('Failed to destroy stale namespace %s'), ns) neutron-8.4.0/neutron/agent/l3/__init__.py0000664000567000056710000000000013044372736021574 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/l3/dvr_fip_ns.py0000664000567000056710000003542113044372760022202 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import os from oslo_concurrency import lockutils from oslo_log import log as logging from oslo_utils import excutils from neutron._i18n import _, _LE, _LW from neutron.agent.l3 import fip_rule_priority_allocator as frpa from neutron.agent.l3 import link_local_allocator as lla from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_manager from neutron.common import exceptions as n_exc from neutron.common import utils as common_utils from neutron.ipam import utils as ipam_utils LOG = logging.getLogger(__name__) FIP_NS_PREFIX = 'fip-' FIP_EXT_DEV_PREFIX = 'fg-' FIP_2_ROUTER_DEV_PREFIX = 'fpr-' ROUTER_2_FIP_DEV_PREFIX = namespaces.ROUTER_2_FIP_DEV_PREFIX # Route Table index for FIPs FIP_RT_TBL = 16 FIP_LL_SUBNET = '169.254.64.0/18' # Rule priority range for FIPs FIP_PR_START = 32768 FIP_PR_END = FIP_PR_START + 40000 class FipNamespace(namespaces.Namespace): def __init__(self, ext_net_id, agent_conf, driver, use_ipv6): name = self._get_ns_name(ext_net_id) super(FipNamespace, self).__init__( name, agent_conf, driver, use_ipv6) self._ext_net_id = ext_net_id self.agent_conf = agent_conf self.driver = driver self.use_ipv6 = use_ipv6 self.agent_gateway_port = None self._subscribers = set() path = os.path.join(agent_conf.state_path, 'fip-priorities') self._rule_priorities = frpa.FipRulePriorityAllocator(path, FIP_PR_START, FIP_PR_END) self._iptables_manager = iptables_manager.IptablesManager( namespace=self.get_name(), use_ipv6=self.use_ipv6) path = os.path.join(agent_conf.state_path, 'fip-linklocal-networks') self.local_subnets = lla.LinkLocalAllocator(path, FIP_LL_SUBNET) self.destroyed = False @classmethod def _get_ns_name(cls, ext_net_id): return namespaces.build_ns_name(FIP_NS_PREFIX, ext_net_id) def get_name(self): return self._get_ns_name(self._ext_net_id) def get_ext_device_name(self, port_id): return (FIP_EXT_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] def get_int_device_name(self, router_id): return (FIP_2_ROUTER_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN] def get_rtr_ext_device_name(self, router_id): return (ROUTER_2_FIP_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN] def has_subscribers(self): return len(self._subscribers) != 0 def subscribe(self, external_net_id): is_first = not self.has_subscribers() self._subscribers.add(external_net_id) return is_first def unsubscribe(self, external_net_id): self._subscribers.discard(external_net_id) return not self.has_subscribers() def allocate_rule_priority(self, floating_ip): return self._rule_priorities.allocate(floating_ip) def deallocate_rule_priority(self, floating_ip): self._rule_priorities.release(floating_ip) @contextlib.contextmanager def _fip_port_lock(self, interface_name): # Use a namespace and port-specific lock semaphore to allow for # concurrency lock_name = 'port-lock-' + self.name + '-' + interface_name with lockutils.lock(lock_name, common_utils.SYNCHRONIZED_PREFIX): try: yield except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('DVR: FIP namespace config failure ' 'for interface %s'), interface_name) def create_or_update_gateway_port(self, agent_gateway_port): interface_name = self.get_ext_device_name(agent_gateway_port['id']) # The lock is used to make sure another thread doesn't call to # update the gateway port before we are done initializing things. with self._fip_port_lock(interface_name): is_first = self.subscribe(agent_gateway_port['network_id']) if is_first: self._create_gateway_port_and_ns(agent_gateway_port, interface_name) else: self._update_gateway_port(agent_gateway_port, interface_name) def _create_gateway_port_and_ns(self, agent_gateway_port, interface_name): """Create namespace and Floating IP gateway port.""" self.create() try: self._create_gateway_port(agent_gateway_port, interface_name) except Exception: # If an exception occurs at this point, then it is # good to clean up the namespace that has been created # and reraise the exception in order to resync the router with excutils.save_and_reraise_exception(): self.unsubscribe(agent_gateway_port['network_id']) self.delete() LOG.exception(_LE('DVR: Gateway setup in FIP namespace ' 'failed')) def _create_gateway_port(self, ex_gw_port, interface_name): """Request port creation from Plugin then configure gateway port.""" LOG.debug("DVR: adding gateway interface: %s", interface_name) ns_name = self.get_name() self.driver.plug(ex_gw_port['network_id'], ex_gw_port['id'], interface_name, ex_gw_port['mac_address'], bridge=self.agent_conf.external_network_bridge, namespace=ns_name, prefix=FIP_EXT_DEV_PREFIX, mtu=ex_gw_port.get('mtu')) # Remove stale fg devices ip_wrapper = ip_lib.IPWrapper(namespace=ns_name) devices = ip_wrapper.get_devices() for device in devices: name = device.name if name.startswith(FIP_EXT_DEV_PREFIX) and name != interface_name: LOG.debug('DVR: unplug: %s', name) ext_net_bridge = self.agent_conf.external_network_bridge self.driver.unplug(name, bridge=ext_net_bridge, namespace=ns_name, prefix=FIP_EXT_DEV_PREFIX) ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips']) self.driver.init_l3(interface_name, ip_cidrs, namespace=ns_name, clean_connections=True) self._update_gateway_port(ex_gw_port, interface_name) cmd = ['sysctl', '-w', 'net.ipv4.conf.%s.proxy_arp=1' % interface_name] ip_wrapper.netns.execute(cmd, check_exit_code=False) def create(self): # TODO(Carl) Get this functionality from mlavelle's namespace baseclass LOG.debug("DVR: add fip namespace: %s", self.name) ip_wrapper_root = ip_lib.IPWrapper() ip_wrapper = ip_wrapper_root.ensure_namespace(self.get_name()) # Somewhere in the 3.19 kernel timeframe ip_nonlocal_bind was # changed to be a per-namespace attribute. To be backwards # compatible we need to try both if at first we fail. try: ip_lib.set_ip_nonlocal_bind( value=1, namespace=self.name, log_fail_as_error=False) except RuntimeError: LOG.debug('DVR: fip namespace (%s) does not support setting ' 'net.ipv4.ip_nonlocal_bind, trying in root namespace', self.name) ip_lib.set_ip_nonlocal_bind(value=1) ip_wrapper.netns.execute(['sysctl', '-w', 'net.ipv4.ip_forward=1']) if self.use_ipv6: ip_wrapper.netns.execute(['sysctl', '-w', 'net.ipv6.conf.all.forwarding=1']) # no connection tracking needed in fip namespace self._iptables_manager.ipv4['raw'].add_rule('PREROUTING', '-j CT --notrack') self._iptables_manager.apply() def delete(self): self.destroyed = True self._delete() self.agent_gateway_port = None @namespaces.check_ns_existence def _delete(self): ip_wrapper = ip_lib.IPWrapper(namespace=self.name) for d in ip_wrapper.get_devices(exclude_loopback=True): if d.name.startswith(FIP_2_ROUTER_DEV_PREFIX): # internal link between IRs and FIP NS ip_wrapper.del_veth(d.name) elif d.name.startswith(FIP_EXT_DEV_PREFIX): # single port from FIP NS to br-ext # TODO(carl) Where does the port get deleted? LOG.debug('DVR: unplug: %s', d.name) ext_net_bridge = self.agent_conf.external_network_bridge self.driver.unplug(d.name, bridge=ext_net_bridge, namespace=self.name, prefix=FIP_EXT_DEV_PREFIX) # TODO(mrsmith): add LOG warn if fip count != 0 LOG.debug('DVR: destroy fip namespace: %s', self.name) super(FipNamespace, self).delete() def _check_for_gateway_ip_change(self, new_agent_gateway_port): def get_gateway_ips(gateway_port): gw_ips = {} if gateway_port: for subnet in gateway_port.get('subnets', []): gateway_ip = subnet.get('gateway_ip', None) if gateway_ip: ip_version = ip_lib.get_ip_version(gateway_ip) gw_ips[ip_version] = gateway_ip return gw_ips new_gw_ips = get_gateway_ips(new_agent_gateway_port) old_gw_ips = get_gateway_ips(self.agent_gateway_port) return new_gw_ips != old_gw_ips def _update_gateway_port(self, agent_gateway_port, interface_name): if (self.agent_gateway_port and not self._check_for_gateway_ip_change(agent_gateway_port)): return ns_name = self.get_name() ipd = ip_lib.IPDevice(interface_name, namespace=ns_name) # If the 'fg-' device doesn't exist in the namespace then trying # to send advertisements or configure the default route will just # throw exceptions. Unsubscribe this external network so that # the next call will trigger the interface to be plugged. if not ipd.exists(): self.unsubscribe(agent_gateway_port['network_id']) LOG.warning(_LW('DVR: FIP gateway port with interface ' 'name: %(device)s does not exist in the given ' 'namespace: %(ns)s'), {'device': interface_name, 'ns': ns_name}) msg = _('DVR: Gateway setup in FIP namespace failed, retry ' 'should be attempted on next call') raise n_exc.FloatingIpSetupException(msg) for fixed_ip in agent_gateway_port['fixed_ips']: ip_lib.send_ip_addr_adv_notif(ns_name, interface_name, fixed_ip['ip_address'], self.agent_conf) for subnet in agent_gateway_port['subnets']: gw_ip = subnet.get('gateway_ip') if gw_ip: is_gateway_not_in_subnet = not ipam_utils.check_subnet_ip( subnet.get('cidr'), gw_ip) if is_gateway_not_in_subnet: ipd.route.add_route(gw_ip, scope='link') ipd.route.add_gateway(gw_ip) else: current_gateway = ipd.route.get_gateway() if current_gateway and current_gateway.get('gateway'): ipd.route.delete_gateway(current_gateway.get('gateway')) # Cache the agent gateway port after successfully configuring # the gateway, so that checking on self.agent_gateway_port # will be a valid check self.agent_gateway_port = agent_gateway_port def _add_cidr_to_device(self, device, ip_cidr): if not device.addr.list(to=ip_cidr): device.addr.add(ip_cidr, add_broadcast=False) def create_rtr_2_fip_link(self, ri): """Create interface between router and Floating IP namespace.""" LOG.debug("Create FIP link interfaces for router %s", ri.router_id) rtr_2_fip_name = self.get_rtr_ext_device_name(ri.router_id) fip_2_rtr_name = self.get_int_device_name(ri.router_id) fip_ns_name = self.get_name() # add link local IP to interface if ri.rtr_fip_subnet is None: ri.rtr_fip_subnet = self.local_subnets.allocate(ri.router_id) rtr_2_fip, fip_2_rtr = ri.rtr_fip_subnet.get_pair() rtr_2_fip_dev = ip_lib.IPDevice(rtr_2_fip_name, namespace=ri.ns_name) fip_2_rtr_dev = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) if not rtr_2_fip_dev.exists(): ip_wrapper = ip_lib.IPWrapper(namespace=ri.ns_name) rtr_2_fip_dev, fip_2_rtr_dev = ip_wrapper.add_veth(rtr_2_fip_name, fip_2_rtr_name, fip_ns_name) mtu = (self.agent_conf.network_device_mtu or ri.get_ex_gw_port().get('mtu')) if mtu: rtr_2_fip_dev.link.set_mtu(mtu) fip_2_rtr_dev.link.set_mtu(mtu) rtr_2_fip_dev.link.set_up() fip_2_rtr_dev.link.set_up() self._add_cidr_to_device(rtr_2_fip_dev, str(rtr_2_fip)) self._add_cidr_to_device(fip_2_rtr_dev, str(fip_2_rtr)) # add default route for the link local interface rtr_2_fip_dev.route.add_gateway(str(fip_2_rtr.ip), table=FIP_RT_TBL) def scan_fip_ports(self, ri): # don't scan if not dvr or count is not None if ri.dist_fip_count is not None: return # scan system for any existing fip ports ri.dist_fip_count = 0 rtr_2_fip_interface = self.get_rtr_ext_device_name(ri.router_id) device = ip_lib.IPDevice(rtr_2_fip_interface, namespace=ri.ns_name) if device.exists(): ri.dist_fip_count = len(ri.get_router_cidrs(device)) neutron-8.4.0/neutron/agent/l3/ha.py0000664000567000056710000001701113044372760020434 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import eventlet from oslo_config import cfg from oslo_log import log as logging import webob from neutron._i18n import _, _LI from neutron.agent.linux import keepalived from neutron.agent.linux import utils as agent_utils from neutron.common import utils as common_utils from neutron.notifiers import batch_notifier LOG = logging.getLogger(__name__) KEEPALIVED_STATE_CHANGE_SERVER_BACKLOG = 4096 OPTS = [ cfg.StrOpt('ha_confs_path', default='$state_path/ha_confs', help=_('Location to store keepalived/conntrackd ' 'config files')), cfg.StrOpt('ha_vrrp_auth_type', default='PASS', choices=keepalived.VALID_AUTH_TYPES, help=_('VRRP authentication type')), cfg.StrOpt('ha_vrrp_auth_password', help=_('VRRP authentication password'), secret=True), cfg.IntOpt('ha_vrrp_advert_int', default=2, help=_('The advertisement interval in seconds')), cfg.IntOpt('ha_keepalived_state_change_server_threads', default=(1 + common_utils.cpu_count()) // 2, min=1, help=_('Number of concurrent threads for ' 'keepalived server connection requests.' 'More threads create a higher CPU load ' 'on the agent node.')), ] class KeepalivedStateChangeHandler(object): def __init__(self, agent): self.agent = agent @webob.dec.wsgify(RequestClass=webob.Request) def __call__(self, req): router_id = req.headers['X-Neutron-Router-Id'] state = req.headers['X-Neutron-State'] self.enqueue(router_id, state) def enqueue(self, router_id, state): LOG.debug('Handling notification for router ' '%(router_id)s, state %(state)s', {'router_id': router_id, 'state': state}) self.agent.enqueue_state_change(router_id, state) class L3AgentKeepalivedStateChangeServer(object): def __init__(self, agent, conf): self.agent = agent self.conf = conf agent_utils.ensure_directory_exists_without_file( self.get_keepalived_state_change_socket_path(self.conf)) @classmethod def get_keepalived_state_change_socket_path(cls, conf): return os.path.join(conf.state_path, 'keepalived-state-change') def run(self): server = agent_utils.UnixDomainWSGIServer( 'neutron-keepalived-state-change', num_threads=self.conf.ha_keepalived_state_change_server_threads) server.start(KeepalivedStateChangeHandler(self.agent), self.get_keepalived_state_change_socket_path(self.conf), workers=0, backlog=KEEPALIVED_STATE_CHANGE_SERVER_BACKLOG) server.wait() class AgentMixin(object): def __init__(self, host): self._init_ha_conf_path() super(AgentMixin, self).__init__(host) self.state_change_notifier = batch_notifier.BatchNotifier( self._calculate_batch_duration(), self.notify_server) eventlet.spawn(self._start_keepalived_notifications_server) def _start_keepalived_notifications_server(self): state_change_server = ( L3AgentKeepalivedStateChangeServer(self, self.conf)) state_change_server.run() def _calculate_batch_duration(self): # Slave becomes the master after not hearing from it 3 times detection_time = self.conf.ha_vrrp_advert_int * 3 # Keepalived takes a couple of seconds to configure the VIPs configuration_time = 2 # Give it enough slack to batch all events due to the same failure return (detection_time + configuration_time) * 2 def enqueue_state_change(self, router_id, state): LOG.info(_LI('Router %(router_id)s transitioned to %(state)s'), {'router_id': router_id, 'state': state}) try: ri = self.router_info[router_id] except KeyError: LOG.info(_LI('Router %s is not managed by this agent. It was ' 'possibly deleted concurrently.'), router_id) return self._configure_ipv6_ra_on_ext_gw_port_if_necessary(ri, state) if self.conf.enable_metadata_proxy: self._update_metadata_proxy(ri, router_id, state) self._update_radvd_daemon(ri, state) self.state_change_notifier.queue_event((router_id, state)) def _configure_ipv6_ra_on_ext_gw_port_if_necessary(self, ri, state): # If ipv6 is enabled on the platform, ipv6_gateway config flag is # not set and external_network associated to the router does not # include any IPv6 subnet, enable the gateway interface to accept # Router Advts from upstream router for default route. ex_gw_port_id = ri.ex_gw_port and ri.ex_gw_port['id'] if state == 'master' and ex_gw_port_id and ri.use_ipv6: gateway_ips = ri._get_external_gw_ips(ri.ex_gw_port) if not ri.is_v6_gateway_set(gateway_ips): interface_name = ri.get_external_device_name(ex_gw_port_id) if ri.router.get('distributed', False): namespace = ri.ha_namespace else: namespace = ri.ns_name ri.driver.configure_ipv6_ra(namespace, interface_name) def _update_metadata_proxy(self, ri, router_id, state): if state == 'master': LOG.debug('Spawning metadata proxy for router %s', router_id) self.metadata_driver.spawn_monitored_metadata_proxy( self.process_monitor, ri.ns_name, self.conf.metadata_port, self.conf, router_id=ri.router_id) else: LOG.debug('Closing metadata proxy for router %s', router_id) self.metadata_driver.destroy_monitored_metadata_proxy( self.process_monitor, ri.router_id, self.conf) def _update_radvd_daemon(self, ri, state): # Radvd has to be spawned only on the Master HA Router. If there are # any state transitions, we enable/disable radvd accordingly. if state == 'master': ri.enable_radvd() else: ri.disable_radvd() def notify_server(self, batched_events): translation_map = {'master': 'active', 'backup': 'standby', 'fault': 'standby'} translated_states = dict((router_id, translation_map[state]) for router_id, state in batched_events) LOG.debug('Updating server with HA routers states %s', translated_states) self.plugin_rpc.update_ha_routers_states( self.context, translated_states) def _init_ha_conf_path(self): ha_full_path = os.path.dirname("/%s/" % self.conf.ha_confs_path) common_utils.ensure_dir(ha_full_path) neutron-8.4.0/neutron/agent/l3/dvr_snat_ns.py0000664000567000056710000000445313044372760022372 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib from neutron.common import constants LOG = logging.getLogger(__name__) SNAT_NS_PREFIX = 'snat-' SNAT_INT_DEV_PREFIX = constants.SNAT_INT_DEV_PREFIX class SnatNamespace(namespaces.Namespace): def __init__(self, router_id, agent_conf, driver, use_ipv6): self.router_id = router_id name = self.get_snat_ns_name(router_id) super(SnatNamespace, self).__init__( name, agent_conf, driver, use_ipv6) def create(self): super(SnatNamespace, self).create() # This might be an HA router namespaces and it should not have # ip_nonlocal_bind enabled ip_lib.set_ip_nonlocal_bind_for_namespace(self.name) @classmethod def get_snat_ns_name(cls, router_id): return namespaces.build_ns_name(SNAT_NS_PREFIX, router_id) @namespaces.check_ns_existence def delete(self): ns_ip = ip_lib.IPWrapper(namespace=self.name) for d in ns_ip.get_devices(exclude_loopback=True): if d.name.startswith(SNAT_INT_DEV_PREFIX): LOG.debug('Unplugging DVR device %s', d.name) self.driver.unplug(d.name, namespace=self.name, prefix=SNAT_INT_DEV_PREFIX) elif d.name.startswith(namespaces.EXTERNAL_DEV_PREFIX): self.driver.unplug( d.name, bridge=self.agent_conf.external_network_bridge, namespace=self.name, prefix=namespaces.EXTERNAL_DEV_PREFIX) # TODO(mrsmith): delete ext-gw-port LOG.debug('DVR: destroy snat ns: %s', self.name) super(SnatNamespace, self).delete() neutron-8.4.0/neutron/agent/l3/dvr_edge_router.py0000664000567000056710000002473513044372760023236 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron._i18n import _LE from neutron.agent.l3 import dvr_local_router from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import router_info as router from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_manager from neutron.common import constants as l3_constants LOG = logging.getLogger(__name__) class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): def __init__(self, agent, host, *args, **kwargs): super(DvrEdgeRouter, self).__init__(agent, host, *args, **kwargs) self.snat_namespace = dvr_snat_ns.SnatNamespace( self.router_id, self.agent_conf, self.driver, self.use_ipv6) self.snat_iptables_manager = None def external_gateway_added(self, ex_gw_port, interface_name): super(DvrEdgeRouter, self).external_gateway_added( ex_gw_port, interface_name) if self._is_this_snat_host(): self._create_dvr_gateway(ex_gw_port, interface_name) # NOTE: When a router is created without a gateway the routes get # added to the router namespace, but if we wanted to populate # the same routes to the snat namespace after the gateway port # is added, we need to call routes_updated here. self.routes_updated([], self.router['routes']) elif self.snat_namespace.exists(): # This is the case where the snat was moved manually or # rescheduled to a different agent when the agent was dead. LOG.debug("SNAT was moved or rescheduled to a different host " "and does not match with the current host. This is " "a stale namespace %s and will be cleared from the " "current dvr_snat host.", self.snat_namespace.name) self.external_gateway_removed(ex_gw_port, interface_name) def external_gateway_updated(self, ex_gw_port, interface_name): if not self._is_this_snat_host(): # no centralized SNAT gateway for this node/agent LOG.debug("not hosting snat for router: %s", self.router['id']) if self.snat_namespace.exists(): LOG.debug("SNAT was rescheduled to host %s. Clearing snat " "namespace.", self.router.get('gw_port_host')) return self.external_gateway_removed( ex_gw_port, interface_name) return if not self.snat_namespace.exists(): # SNAT might be rescheduled to this agent; need to process like # newly created gateway return self.external_gateway_added(ex_gw_port, interface_name) else: self._external_gateway_added(ex_gw_port, interface_name, self.snat_namespace.name, preserve_ips=[]) def _external_gateway_removed(self, ex_gw_port, interface_name): super(DvrEdgeRouter, self).external_gateway_removed(ex_gw_port, interface_name) if not self._is_this_snat_host() and not self.snat_namespace.exists(): # no centralized SNAT gateway for this node/agent LOG.debug("not hosting snat for router: %s", self.router['id']) return self.driver.unplug(interface_name, bridge=self.agent_conf.external_network_bridge, namespace=self.snat_namespace.name, prefix=router.EXTERNAL_DEV_PREFIX) def external_gateway_removed(self, ex_gw_port, interface_name): self._external_gateway_removed(ex_gw_port, interface_name) if self.snat_namespace.exists(): self.snat_namespace.delete() def internal_network_added(self, port): super(DvrEdgeRouter, self).internal_network_added(port) # TODO(gsagie) some of this checks are already implemented # in the base class, think how to avoid re-doing them if not self._is_this_snat_host(): return sn_port = self.get_snat_port_for_internal_port(port) if not sn_port: return ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id']) interface_name = self._get_snat_int_device_name(sn_port['id']) self._internal_network_added( ns_name, sn_port['network_id'], sn_port['id'], sn_port['fixed_ips'], sn_port['mac_address'], interface_name, dvr_snat_ns.SNAT_INT_DEV_PREFIX, mtu=sn_port.get('mtu')) def _dvr_internal_network_removed(self, port): super(DvrEdgeRouter, self)._dvr_internal_network_removed(port) if not self.ex_gw_port: return sn_port = self.get_snat_port_for_internal_port(port, self.snat_ports) if not sn_port: return if not self._is_this_snat_host(): return snat_interface = self._get_snat_int_device_name(sn_port['id']) ns_name = self.snat_namespace.name prefix = dvr_snat_ns.SNAT_INT_DEV_PREFIX if ip_lib.device_exists(snat_interface, namespace=ns_name): self.driver.unplug(snat_interface, namespace=ns_name, prefix=prefix) def _plug_snat_port(self, port): interface_name = self._get_snat_int_device_name(port['id']) self._internal_network_added( self.snat_namespace.name, port['network_id'], port['id'], port['fixed_ips'], port['mac_address'], interface_name, dvr_snat_ns.SNAT_INT_DEV_PREFIX, mtu=port.get('mtu')) def _create_dvr_gateway(self, ex_gw_port, gw_interface_name): """Create SNAT namespace.""" snat_ns = self._create_snat_namespace() # connect snat_ports to br_int from SNAT namespace for port in self.get_snat_interfaces(): # create interface_name self._plug_snat_port(port) self._external_gateway_added(ex_gw_port, gw_interface_name, snat_ns.name, preserve_ips=[]) self.snat_iptables_manager = iptables_manager.IptablesManager( namespace=snat_ns.name, use_ipv6=self.use_ipv6) # kicks the FW Agent to add rules for the snat namespace self.agent.process_router_add(self) def _create_snat_namespace(self): # TODO(mlavalle): in the near future, this method should contain the # code in the L3 agent that creates a gateway for a dvr. The first step # is to move the creation of the snat namespace here self.snat_namespace.create() return self.snat_namespace def _get_snat_int_device_name(self, port_id): long_name = dvr_snat_ns.SNAT_INT_DEV_PREFIX + port_id return long_name[:self.driver.DEV_NAME_LEN] def _is_this_snat_host(self): host = self.router.get('gw_port_host') if not host: LOG.debug("gw_port_host missing from router: %s", self.router['id']) return host == self.host def _handle_router_snat_rules(self, ex_gw_port, interface_name): super(DvrEdgeRouter, self)._handle_router_snat_rules( ex_gw_port, interface_name) if not self._is_this_snat_host(): return if not self.get_ex_gw_port(): return if not self.snat_iptables_manager: LOG.debug("DVR router: no snat rules to be handled") return with self.snat_iptables_manager.defer_apply(): self._empty_snat_chains(self.snat_iptables_manager) # NOTE: DVR adds the jump to float snat via super class, # but that is in the router namespace and not snat. self._add_snat_rules(ex_gw_port, self.snat_iptables_manager, interface_name) def update_routing_table(self, operation, route): if self.get_ex_gw_port() and self._is_this_snat_host(): ns_name = self.snat_namespace.name # NOTE: For now let us apply the static routes both in SNAT # namespace and Router Namespace, to reduce the complexity. if self.snat_namespace.exists(): super(DvrEdgeRouter, self)._update_routing_table( operation, route, namespace=ns_name) else: LOG.error(_LE("The SNAT namespace %s does not exist for " "the router."), ns_name) super(DvrEdgeRouter, self).update_routing_table(operation, route) def delete(self, agent): super(DvrEdgeRouter, self).delete(agent) if self.snat_namespace.exists(): self.snat_namespace.delete() def process_address_scope(self): super(DvrEdgeRouter, self).process_address_scope() if not self._is_this_snat_host(): return if not self.snat_iptables_manager: LOG.debug("DVR router: no snat rules to be handled") return # Prepare address scope iptables rule for dvr snat interfaces internal_ports = self.get_snat_interfaces() ports_scopemark = self._get_port_devicename_scopemark( internal_ports, self._get_snat_int_device_name) # Prepare address scope iptables rule for external port external_port = self.get_ex_gw_port() if external_port: external_port_scopemark = self._get_port_devicename_scopemark( [external_port], self.get_external_device_name) for ip_version in (l3_constants.IP_VERSION_4, l3_constants.IP_VERSION_6): ports_scopemark[ip_version].update( external_port_scopemark[ip_version]) with self.snat_iptables_manager.defer_apply(): self._add_address_scope_mark( self.snat_iptables_manager, ports_scopemark) neutron-8.4.0/neutron/agent/l3/fip_rule_priority_allocator.py0000664000567000056710000000373313044372760025660 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.l3.item_allocator import ItemAllocator class FipPriority(object): def __init__(self, index): self.index = index def __repr__(self): return str(self.index) def __hash__(self): return hash(self.__repr__()) def __eq__(self, other): if isinstance(other, FipPriority): return (self.index == other.index) else: return False class FipRulePriorityAllocator(ItemAllocator): """Manages allocation of floating ips rule priorities. IP rule priorities assigned to DVR floating IPs need to be preserved over L3 agent restarts. This class provides an allocator which saves the prirorities to a datastore which will survive L3 agent restarts. """ def __init__(self, data_store_path, priority_rule_start, priority_rule_end): """Create the necessary pool and create the item allocator using ',' as the delimiter and FipRulePriorityAllocator as the class type """ pool = set(FipPriority(str(s)) for s in range(priority_rule_start, priority_rule_end)) super(FipRulePriorityAllocator, self).__init__(data_store_path, FipPriority, pool) neutron-8.4.0/neutron/agent/l3/dvr_router_base.py0000664000567000056710000000422513044372760023234 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron._i18n import _LE from neutron.agent.l3 import router_info as router from neutron.common import constants as l3_constants LOG = logging.getLogger(__name__) class DvrRouterBase(router.RouterInfo): def __init__(self, agent, host, *args, **kwargs): super(DvrRouterBase, self).__init__(*args, **kwargs) self.agent = agent self.host = host self.snat_ports = None def process(self, agent): super(DvrRouterBase, self).process(agent) # NOTE: Keep a copy of the interfaces around for when they are removed self.snat_ports = self.get_snat_interfaces() def get_snat_interfaces(self): return self.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) def get_snat_port_for_internal_port(self, int_port, snat_ports=None): """Return the SNAT port for the given internal interface port.""" if snat_ports is None: snat_ports = self.get_snat_interfaces() fixed_ip = int_port['fixed_ips'][0] subnet_id = fixed_ip['subnet_id'] if snat_ports: match_port = [p for p in snat_ports if p['fixed_ips'][0]['subnet_id'] == subnet_id] if match_port: return match_port[0] else: LOG.error(_LE('DVR: SNAT port not found in the list ' '%(snat_list)s for the given router ' ' internal port %(int_p)s'), { 'snat_list': snat_ports, 'int_p': int_port}) neutron-8.4.0/neutron/agent/l3/keepalived_state_change.py0000664000567000056710000001473013044372760024667 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys import httplib2 import netaddr from oslo_config import cfg from oslo_log import log as logging import requests from neutron._i18n import _, _LE from neutron.agent.l3 import ha from neutron.agent.linux import daemon from neutron.agent.linux import ip_lib from neutron.agent.linux import ip_monitor from neutron.agent.linux import utils as agent_utils from neutron.common import config LOG = logging.getLogger(__name__) class L3HAConfig(object): send_arp_for_ha = 3 class KeepalivedUnixDomainConnection(agent_utils.UnixDomainHTTPConnection): def __init__(self, *args, **kwargs): # Old style super initialization is required! agent_utils.UnixDomainHTTPConnection.__init__( self, *args, **kwargs) self.socket_path = ( ha.L3AgentKeepalivedStateChangeServer. get_keepalived_state_change_socket_path(cfg.CONF)) class MonitorDaemon(daemon.Daemon): def __init__(self, pidfile, router_id, user, group, namespace, conf_dir, interface, cidr): self.router_id = router_id self.namespace = namespace self.conf_dir = conf_dir self.interface = interface self.cidr = cidr self.monitor = None super(MonitorDaemon, self).__init__(pidfile, uuid=router_id, user=user, group=group) def run(self, run_as_root=False): self.monitor = ip_monitor.IPMonitor(namespace=self.namespace, run_as_root=run_as_root) self.monitor.start() # Only drop privileges if the process is currently running as root # (The run_as_root variable name here is unfortunate - It means to # use a root helper when the running process is NOT already running # as root if not run_as_root: super(MonitorDaemon, self).run() for iterable in self.monitor: self.parse_and_handle_event(iterable) def parse_and_handle_event(self, iterable): try: event = ip_monitor.IPMonitorEvent.from_text(iterable) if event.interface == self.interface and event.cidr == self.cidr: new_state = 'master' if event.added else 'backup' self.write_state_change(new_state) self.notify_agent(new_state) elif event.interface != self.interface and event.added: # Send GARPs for all new router interfaces. # REVISIT(jlibosva): keepalived versions 1.2.19 and below # contain bug where gratuitous ARPs are not sent on receiving # SIGHUP signal. This is a workaround to this bug. keepalived # has this issue fixed since 1.2.20 but the version is not # packaged in some distributions (RHEL/CentOS/Ubuntu Xenial). # Remove this code once new keepalived versions are available. self.send_garp(event) except Exception: LOG.exception(_LE( 'Failed to process or handle event for line %s'), iterable) def write_state_change(self, state): with open(os.path.join( self.conf_dir, 'state'), 'w') as state_file: state_file.write(state) LOG.debug('Wrote router %s state %s', self.router_id, state) def notify_agent(self, state): resp, content = httplib2.Http().request( # Note that the message is sent via a Unix domain socket so that # the URL doesn't matter. 'http://127.0.0.1/', headers={'X-Neutron-Router-Id': self.router_id, 'X-Neutron-State': state}, connection_type=KeepalivedUnixDomainConnection) if resp.status != requests.codes.ok: raise Exception(_('Unexpected response: %s') % resp) LOG.debug('Notified agent router %s, state %s', self.router_id, state) def send_garp(self, event): """Send gratuitous ARP for given event.""" ip_lib.send_ip_addr_adv_notif( self.namespace, event.interface, str(netaddr.IPNetwork(event.cidr).ip), L3HAConfig, log_exception=False ) def register_opts(conf): conf.register_cli_opt( cfg.StrOpt('router_id', help=_('ID of the router'))) conf.register_cli_opt( cfg.StrOpt('namespace', help=_('Namespace of the router'))) conf.register_cli_opt( cfg.StrOpt('conf_dir', help=_('Path to the router directory'))) conf.register_cli_opt( cfg.StrOpt('monitor_interface', help=_('Interface to monitor'))) conf.register_cli_opt( cfg.StrOpt('monitor_cidr', help=_('CIDR to monitor'))) conf.register_cli_opt( cfg.StrOpt('pid_file', help=_('Path to PID file for this process'))) conf.register_cli_opt( cfg.StrOpt('user', help=_('User (uid or name) running this process ' 'after its initialization'))) conf.register_cli_opt( cfg.StrOpt('group', help=_('Group (gid or name) running this process ' 'after its initialization'))) conf.register_opt( cfg.StrOpt('metadata_proxy_socket', default='$state_path/metadata_proxy', help=_('Location of Metadata Proxy UNIX domain ' 'socket'))) def configure(conf): config.init(sys.argv[1:]) conf.set_override('log_dir', cfg.CONF.conf_dir) conf.set_override('debug', True) conf.set_override('verbose', True) config.setup_logging() def main(): register_opts(cfg.CONF) configure(cfg.CONF) MonitorDaemon(cfg.CONF.pid_file, cfg.CONF.router_id, cfg.CONF.user, cfg.CONF.group, cfg.CONF.namespace, cfg.CONF.conf_dir, cfg.CONF.monitor_interface, cfg.CONF.monitor_cidr).start() neutron-8.4.0/neutron/agent/l3/router_processing_queue.py0000664000567000056710000001371113044372760025027 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import datetime from oslo_utils import timeutils from six.moves import queue as Queue # Lower value is higher priority PRIORITY_RPC = 0 PRIORITY_SYNC_ROUTERS_TASK = 1 PRIORITY_PD_UPDATE = 2 DELETE_ROUTER = 1 PD_UPDATE = 2 class RouterUpdate(object): """Encapsulates a router update An instance of this object carries the information necessary to prioritize and process a request to update a router. """ def __init__(self, router_id, priority, action=None, router=None, timestamp=None): self.priority = priority self.timestamp = timestamp if not timestamp: self.timestamp = timeutils.utcnow() self.id = router_id self.action = action self.router = router def __lt__(self, other): """Implements priority among updates Lower numerical priority always gets precedence. When comparing two updates of the same priority then the one with the earlier timestamp gets procedence. In the unlikely event that the timestamps are also equal it falls back to a simple comparison of ids meaning the precedence is essentially random. """ if self.priority != other.priority: return self.priority < other.priority if self.timestamp != other.timestamp: return self.timestamp < other.timestamp return self.id < other.id class ExclusiveRouterProcessor(object): """Manager for access to a router for processing This class controls access to a router in a non-blocking way. The first instance to be created for a given router_id is granted exclusive access to the router. Other instances may be created for the same router_id while the first instance has exclusive access. If that happens then it doesn't block and wait for access. Instead, it signals to the master instance that an update came in with the timestamp. This way, a thread will not block to wait for access to a router. Instead it effectively signals to the thread that is working on the router that something has changed since it started working on it. That thread will simply finish its current iteration and then repeat. This class keeps track of the last time that a router data was fetched and processed. The timestamp that it keeps must be before when the data used to process the router last was fetched from the database. But, as close as possible. The timestamp should not be recorded, however, until the router has been processed using the fetch data. """ _masters = {} _router_timestamps = {} def __init__(self, router_id): self._router_id = router_id if router_id not in self._masters: self._masters[router_id] = self self._queue = [] self._master = self._masters[router_id] def _i_am_master(self): return self == self._master def __enter__(self): return self def __exit__(self, type, value, traceback): if self._i_am_master(): del self._masters[self._router_id] def _get_router_data_timestamp(self): return self._router_timestamps.get(self._router_id, datetime.datetime.min) def fetched_and_processed(self, timestamp): """Records the data timestamp after it is used to update the router""" new_timestamp = max(timestamp, self._get_router_data_timestamp()) self._router_timestamps[self._router_id] = new_timestamp def queue_update(self, update): """Queues an update from a worker This is the queue used to keep new updates that come in while a router is being processed. These updates have already bubbled to the front of the RouterProcessingQueue. """ self._master._queue.append(update) def updates(self): """Processes the router until updates stop coming Only the master instance will process the router. However, updates may come in from other workers while it is in progress. This method loops until they stop coming. """ if self._i_am_master(): while self._queue: # Remove the update from the queue even if it is old. update = self._queue.pop(0) # Process the update only if it is fresh. if self._get_router_data_timestamp() < update.timestamp: yield update class RouterProcessingQueue(object): """Manager of the queue of routers to process.""" def __init__(self): self._queue = Queue.PriorityQueue() def add(self, update): self._queue.put(update) def each_update_to_next_router(self): """Grabs the next router from the queue and processes This method uses a for loop to process the router repeatedly until updates stop bubbling to the front of the queue. """ next_update = self._queue.get() with ExclusiveRouterProcessor(next_update.id) as rp: # Queue the update whether this worker is the master or not. rp.queue_update(next_update) # Here, if the current worker is not the master, the call to # rp.updates() will not yield and so this will essentially be a # noop. for update in rp.updates(): yield (rp, update) neutron-8.4.0/neutron/agent/l3/link_local_allocator.py0000664000567000056710000000427513044372760024223 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron.agent.l3.item_allocator import ItemAllocator class LinkLocalAddressPair(netaddr.IPNetwork): def __init__(self, addr): super(LinkLocalAddressPair, self).__init__(addr) def get_pair(self): """Builds an address pair from the first and last addresses. """ # TODO(kevinbenton): the callers of this seem only interested in an IP, # so we should just return two IPAddresses. return (netaddr.IPNetwork("%s/%s" % (self.network, self.prefixlen)), netaddr.IPNetwork("%s/%s" % (self[-1], self.prefixlen))) class LinkLocalAllocator(ItemAllocator): """Manages allocation of link local IP addresses. These link local addresses are used for routing inside the fip namespaces. The associations need to persist across agent restarts to maintain consistency. Without this, there is disruption in network connectivity as the agent rewires the connections with the new IP address assocations. Persisting these in the database is unnecessary and would degrade performance. """ def __init__(self, data_store_path, subnet): """Create the necessary pool and item allocator using ',' as the delimiter and LinkLocalAllocator as the class type """ subnet = netaddr.IPNetwork(subnet) pool = set(LinkLocalAddressPair(s) for s in subnet.subnet(31)) super(LinkLocalAllocator, self).__init__(data_store_path, LinkLocalAddressPair, pool) neutron-8.4.0/neutron/agent/l3/ha_router.py0000664000567000056710000004037113044372760022041 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil import netaddr from oslo_log import log as logging from neutron._i18n import _LE from neutron.agent.l3 import namespaces from neutron.agent.l3 import router_info as router from neutron.agent.linux import external_process from neutron.agent.linux import ip_lib from neutron.agent.linux import keepalived from neutron.common import constants as n_consts from neutron.common import utils as common_utils from neutron.extensions import portbindings LOG = logging.getLogger(__name__) HA_DEV_PREFIX = 'ha-' IP_MONITOR_PROCESS_SERVICE = 'ip_monitor' class HaRouterNamespace(namespaces.RouterNamespace): """Namespace for HA router. This namespace sets the ip_nonlocal_bind to 0 for HA router namespaces. It does so to prevent sending gratuitous ARPs for interfaces that got VIP removed in the middle of processing. """ def create(self): super(HaRouterNamespace, self).create() # HA router namespaces should not have ip_nonlocal_bind enabled ip_lib.set_ip_nonlocal_bind_for_namespace(self.name) class HaRouter(router.RouterInfo): def __init__(self, state_change_callback, *args, **kwargs): super(HaRouter, self).__init__(*args, **kwargs) self.ha_port = None self.keepalived_manager = None self.state_change_callback = state_change_callback def create_router_namespace_object( self, router_id, agent_conf, iface_driver, use_ipv6): return HaRouterNamespace( router_id, agent_conf, iface_driver, use_ipv6) @property def ha_priority(self): return self.router.get('priority', keepalived.HA_DEFAULT_PRIORITY) @property def ha_vr_id(self): return self.router.get('ha_vr_id') @property def ha_state(self): ha_state_path = self.keepalived_manager.get_full_config_file_path( 'state') try: with open(ha_state_path, 'r') as f: return f.read() except (OSError, IOError): LOG.debug('Error while reading HA state for %s', self.router_id) return None @ha_state.setter def ha_state(self, new_state): ha_state_path = self.keepalived_manager.get_full_config_file_path( 'state') try: with open(ha_state_path, 'w') as f: f.write(new_state) except (OSError, IOError): LOG.error(_LE('Error while writing HA state for %s'), self.router_id) @property def ha_namespace(self): return self.ns_name def initialize(self, process_monitor): super(HaRouter, self).initialize(process_monitor) ha_port = self.router.get(n_consts.HA_INTERFACE_KEY) if not ha_port: LOG.error(_LE('Unable to process HA router %s without HA port'), self.router_id) return self.ha_port = ha_port self._init_keepalived_manager(process_monitor) self.ha_network_added() self.update_initial_state(self.state_change_callback) self.spawn_state_change_monitor(process_monitor) def _init_keepalived_manager(self, process_monitor): self.keepalived_manager = keepalived.KeepalivedManager( self.router['id'], keepalived.KeepalivedConf(), process_monitor, conf_path=self.agent_conf.ha_confs_path, namespace=self.ha_namespace) config = self.keepalived_manager.config interface_name = self.get_ha_device_name() subnets = self.ha_port.get('subnets', []) ha_port_cidrs = [subnet['cidr'] for subnet in subnets] instance = keepalived.KeepalivedInstance( 'BACKUP', interface_name, self.ha_vr_id, ha_port_cidrs, nopreempt=True, advert_int=self.agent_conf.ha_vrrp_advert_int, priority=self.ha_priority) instance.track_interfaces.append(interface_name) if self.agent_conf.ha_vrrp_auth_password: # TODO(safchain): use oslo.config types when it will be available # in order to check the validity of ha_vrrp_auth_type instance.set_authentication(self.agent_conf.ha_vrrp_auth_type, self.agent_conf.ha_vrrp_auth_password) config.add_instance(instance) def enable_keepalived(self): self.keepalived_manager.spawn() def disable_keepalived(self): self.keepalived_manager.disable() conf_dir = self.keepalived_manager.get_conf_dir() shutil.rmtree(conf_dir) def _get_keepalived_instance(self): return self.keepalived_manager.config.get_instance(self.ha_vr_id) def _get_primary_vip(self): return self._get_keepalived_instance().get_primary_vip() def get_ha_device_name(self): return (HA_DEV_PREFIX + self.ha_port['id'])[:self.driver.DEV_NAME_LEN] def ha_network_added(self): interface_name = self.get_ha_device_name() self.driver.plug(self.ha_port['network_id'], self.ha_port['id'], interface_name, self.ha_port['mac_address'], namespace=self.ha_namespace, prefix=HA_DEV_PREFIX, mtu=self.ha_port.get('mtu')) ip_cidrs = common_utils.fixed_ip_cidrs(self.ha_port['fixed_ips']) self.driver.init_l3(interface_name, ip_cidrs, namespace=self.ha_namespace, preserve_ips=[self._get_primary_vip()]) def ha_network_removed(self): self.driver.unplug(self.get_ha_device_name(), namespace=self.ha_namespace, prefix=HA_DEV_PREFIX) self.ha_port = None def _add_vips(self, port, interface_name): for ip_cidr in common_utils.fixed_ip_cidrs(port['fixed_ips']): self._add_vip(ip_cidr, interface_name) def _add_vip(self, ip_cidr, interface, scope=None): instance = self._get_keepalived_instance() instance.add_vip(ip_cidr, interface, scope) def _remove_vip(self, ip_cidr): instance = self._get_keepalived_instance() instance.remove_vip_by_ip_address(ip_cidr) def _clear_vips(self, interface): instance = self._get_keepalived_instance() instance.remove_vips_vroutes_by_interface(interface) def _get_cidrs_from_keepalived(self, interface_name): instance = self._get_keepalived_instance() return instance.get_existing_vip_ip_addresses(interface_name) def get_router_cidrs(self, device): return set(self._get_cidrs_from_keepalived(device.name)) def routes_updated(self, old_routes, new_routes): instance = self._get_keepalived_instance() instance.virtual_routes.extra_routes = [ keepalived.KeepalivedVirtualRoute( route['destination'], route['nexthop']) for route in new_routes] def _add_default_gw_virtual_route(self, ex_gw_port, interface_name): gateway_ips = self._get_external_gw_ips(ex_gw_port) default_gw_rts = [] instance = self._get_keepalived_instance() for gw_ip in gateway_ips: # TODO(Carl) This is repeated everywhere. A method would # be nice. default_gw = n_consts.IP_ANY[netaddr.IPAddress(gw_ip).version] default_gw_rts.append(keepalived.KeepalivedVirtualRoute( default_gw, gw_ip, interface_name)) instance.virtual_routes.gateway_routes = default_gw_rts def _add_extra_subnet_onlink_routes(self, ex_gw_port, interface_name): extra_subnets = ex_gw_port.get('extra_subnets', []) instance = self._get_keepalived_instance() onlink_route_cidrs = set(s['cidr'] for s in extra_subnets) instance.virtual_routes.extra_subnets = [ keepalived.KeepalivedVirtualRoute( onlink_route_cidr, None, interface_name, scope='link') for onlink_route_cidr in onlink_route_cidrs] def _should_delete_ipv6_lladdr(self, ipv6_lladdr): """Only the master should have any IP addresses configured. Let keepalived manage IPv6 link local addresses, the same way we let it manage IPv4 addresses. If the router is not in the master state, we must delete the address first as it is autoconfigured by the kernel. """ manager = self.keepalived_manager if manager.get_process().active: if self.ha_state != 'master': conf = manager.get_conf_on_disk() managed_by_keepalived = conf and ipv6_lladdr in conf if managed_by_keepalived: return False else: return False return True def _disable_ipv6_addressing_on_interface(self, interface_name): """Disable IPv6 link local addressing on the device and add it as a VIP to keepalived. This means that the IPv6 link local address will only be present on the master. """ device = ip_lib.IPDevice(interface_name, namespace=self.ha_namespace) ipv6_lladdr = ip_lib.get_ipv6_lladdr(device.link.address) if self._should_delete_ipv6_lladdr(ipv6_lladdr): device.addr.flush(n_consts.IP_VERSION_6) self._remove_vip(ipv6_lladdr) self._add_vip(ipv6_lladdr, interface_name, scope='link') def _add_gateway_vip(self, ex_gw_port, interface_name): self._add_vips(ex_gw_port, interface_name) self._add_default_gw_virtual_route(ex_gw_port, interface_name) self._add_extra_subnet_onlink_routes(ex_gw_port, interface_name) def add_floating_ip(self, fip, interface_name, device): fip_ip = fip['floating_ip_address'] ip_cidr = common_utils.ip_to_cidr(fip_ip) self._add_vip(ip_cidr, interface_name) return n_consts.FLOATINGIP_STATUS_ACTIVE def remove_floating_ip(self, device, ip_cidr): self._remove_vip(ip_cidr) if self.ha_state == 'master' and device.addr.list(to=ip_cidr): # Delete the floatingip address from external port only after # the ip address has been configured to the device super(HaRouter, self).remove_floating_ip(device, ip_cidr) def internal_network_updated(self, interface_name, ip_cidrs): self._clear_vips(interface_name) self._disable_ipv6_addressing_on_interface(interface_name) for ip_cidr in ip_cidrs: self._add_vip(ip_cidr, interface_name) def _plug_ha_router_port(self, port, name_getter, prefix): port_id = port['id'] interface_name = name_getter(port_id) self.driver.plug(port['network_id'], port_id, interface_name, port['mac_address'], namespace=self.ha_namespace, prefix=prefix, mtu=port.get('mtu')) self._disable_ipv6_addressing_on_interface(interface_name) self._add_vips(port, interface_name) def internal_network_added(self, port): self._plug_ha_router_port( port, self.get_internal_device_name, router.INTERNAL_DEV_PREFIX) def internal_network_removed(self, port): super(HaRouter, self).internal_network_removed(port) interface_name = self.get_internal_device_name(port['id']) self._clear_vips(interface_name) def _get_state_change_monitor_process_manager(self): return external_process.ProcessManager( self.agent_conf, '%s.monitor' % self.router_id, self.ha_namespace, default_cmd_callback=self._get_state_change_monitor_callback()) def _get_state_change_monitor_callback(self): ha_device = self.get_ha_device_name() ha_cidr = self._get_primary_vip() def callback(pid_file): cmd = [ 'neutron-keepalived-state-change', '--router_id=%s' % self.router_id, '--namespace=%s' % self.ha_namespace, '--conf_dir=%s' % self.keepalived_manager.get_conf_dir(), '--monitor_interface=%s' % ha_device, '--monitor_cidr=%s' % ha_cidr, '--pid_file=%s' % pid_file, '--state_path=%s' % self.agent_conf.state_path, '--user=%s' % os.geteuid(), '--group=%s' % os.getegid()] return cmd return callback def spawn_state_change_monitor(self, process_monitor): pm = self._get_state_change_monitor_process_manager() pm.enable() process_monitor.register( self.router_id, IP_MONITOR_PROCESS_SERVICE, pm) def destroy_state_change_monitor(self, process_monitor): pm = self._get_state_change_monitor_process_manager() process_monitor.unregister( self.router_id, IP_MONITOR_PROCESS_SERVICE) pm.disable() def update_initial_state(self, callback): ha_device = ip_lib.IPDevice( self.get_ha_device_name(), self.ha_namespace) addresses = ha_device.addr.list() cidrs = (address['cidr'] for address in addresses) ha_cidr = self._get_primary_vip() state = 'master' if ha_cidr in cidrs else 'backup' self.ha_state = state callback(self.router_id, state) @staticmethod def _gateway_ports_equal(port1, port2): def _get_filtered_dict(d, ignore): return {k: v for k, v in d.items() if k not in ignore} keys_to_ignore = set([portbindings.HOST_ID]) port1_filtered = _get_filtered_dict(port1, keys_to_ignore) port2_filtered = _get_filtered_dict(port2, keys_to_ignore) return port1_filtered == port2_filtered def external_gateway_added(self, ex_gw_port, interface_name): self._plug_external_gateway(ex_gw_port, interface_name, self.ns_name) self._add_gateway_vip(ex_gw_port, interface_name) self._disable_ipv6_addressing_on_interface(interface_name) def external_gateway_updated(self, ex_gw_port, interface_name): self._plug_external_gateway( ex_gw_port, interface_name, self.ha_namespace) ip_cidrs = common_utils.fixed_ip_cidrs(self.ex_gw_port['fixed_ips']) for old_gateway_cidr in ip_cidrs: self._remove_vip(old_gateway_cidr) self._add_gateway_vip(ex_gw_port, interface_name) def external_gateway_removed(self, ex_gw_port, interface_name): self._clear_vips(interface_name) if self.ha_state == 'master': super(HaRouter, self).external_gateway_removed(ex_gw_port, interface_name) else: # We are not the master node, so no need to delete ip addresses. self.driver.unplug(interface_name, bridge=self.agent_conf.external_network_bridge, namespace=self.ns_name, prefix=router.EXTERNAL_DEV_PREFIX) def delete(self, agent): self.destroy_state_change_monitor(self.process_monitor) self.disable_keepalived() self.ha_network_removed() super(HaRouter, self).delete(agent) def process(self, agent): super(HaRouter, self).process(agent) self.ha_port = self.router.get(n_consts.HA_INTERFACE_KEY) if (self.ha_port and self.ha_port['status'] == n_consts.PORT_STATUS_ACTIVE): self.enable_keepalived() @common_utils.synchronized('enable_radvd') def enable_radvd(self, internal_ports=None): if (self.keepalived_manager.get_process().active and self.ha_state == 'master'): super(HaRouter, self).enable_radvd(internal_ports) neutron-8.4.0/neutron/agent/l3/legacy_router.py0000664000567000056710000000253513044372760022715 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.l3 import router_info as router from neutron.agent.linux import ip_lib from neutron.common import constants as l3_constants class LegacyRouter(router.RouterInfo): def add_floating_ip(self, fip, interface_name, device): if not self._add_fip_addr_to_device(fip, device): return l3_constants.FLOATINGIP_STATUS_ERROR # As GARP is processed in a distinct thread the call below # won't raise an exception to be handled. ip_lib.send_ip_addr_adv_notif(self.ns_name, interface_name, fip['floating_ip_address'], self.agent_conf) return l3_constants.FLOATINGIP_STATUS_ACTIVE neutron-8.4.0/neutron/agent/l3/dvr_local_router.py0000664000567000056710000006247613044372760023430 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import binascii import collections import netaddr from oslo_log import log as logging from oslo_utils import excutils import six from neutron._i18n import _LE, _LW from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import dvr_router_base from neutron.agent.linux import ip_lib from neutron.common import constants as l3_constants from neutron.common import exceptions from neutron.common import utils as common_utils LOG = logging.getLogger(__name__) # xor-folding mask used for IPv6 rule index MASK_30 = 0x3fffffff # Tracks the arp entry cache Arp_entry = collections.namedtuple( 'Arp_entry', 'ip mac subnet_id operation') class DvrLocalRouter(dvr_router_base.DvrRouterBase): def __init__(self, agent, host, *args, **kwargs): super(DvrLocalRouter, self).__init__(agent, host, *args, **kwargs) self.floating_ips_dict = {} # Linklocal subnet for router and floating IP namespace link self.rtr_fip_subnet = None self.dist_fip_count = None self.fip_ns = None self._pending_arp_set = set() def get_floating_ips(self): """Filter Floating IPs to be hosted on this agent.""" floating_ips = super(DvrLocalRouter, self).get_floating_ips() return [i for i in floating_ips if ( (i['host'] == self.host) or (i.get('dest_host') == self.host))] def floating_forward_rules(self, floating_ip, fixed_ip): """Override this function defined in router_info for dvr routers.""" if not self.fip_ns: return [] rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id) dnat_from_floatingip_to_fixedip = ( 'PREROUTING', '-d %s/32 -i %s -j DNAT --to-destination %s' % ( floating_ip, rtr_2_fip_name, fixed_ip)) snat_from_fixedip_to_floatingip = ( 'float-snat', '-s %s/32 -j SNAT --to-source %s' % ( fixed_ip, floating_ip)) return [dnat_from_floatingip_to_fixedip, snat_from_fixedip_to_floatingip] def floating_mangle_rules(self, floating_ip, fixed_ip, internal_mark): if not self.fip_ns: return [] rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id) mark_traffic_to_floating_ip = ( 'floatingip', '-d %s -i %s -j MARK --set-xmark %s' % ( floating_ip, rtr_2_fip_name, internal_mark)) mark_traffic_from_fixed_ip = ( 'FORWARD', '-s %s -j $float-snat' % fixed_ip) return [mark_traffic_to_floating_ip, mark_traffic_from_fixed_ip] def floating_ip_added_dist(self, fip, fip_cidr): """Add floating IP to FIP namespace.""" floating_ip = fip['floating_ip_address'] fixed_ip = fip['fixed_ip_address'] self._add_floating_ip_rule(floating_ip, fixed_ip) fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) #Add routing rule in fip namespace fip_ns_name = self.fip_ns.get_name() if self.rtr_fip_subnet is None: self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate( self.router_id) rtr_2_fip, _ = self.rtr_fip_subnet.get_pair() device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) device.route.add_route(fip_cidr, str(rtr_2_fip.ip)) interface_name = ( self.fip_ns.get_ext_device_name( self.fip_ns.agent_gateway_port['id'])) ip_lib.send_ip_addr_adv_notif(fip_ns_name, interface_name, floating_ip, self.agent_conf) # update internal structures self.dist_fip_count = self.dist_fip_count + 1 def _add_floating_ip_rule(self, floating_ip, fixed_ip): rule_pr = self.fip_ns.allocate_rule_priority(floating_ip) self.floating_ips_dict[floating_ip] = rule_pr ip_rule = ip_lib.IPRule(namespace=self.ns_name) ip_rule.rule.add(ip=fixed_ip, table=dvr_fip_ns.FIP_RT_TBL, priority=rule_pr) def _remove_floating_ip_rule(self, floating_ip): if floating_ip in self.floating_ips_dict: rule_pr = self.floating_ips_dict[floating_ip] ip_rule = ip_lib.IPRule(namespace=self.ns_name) ip_rule.rule.delete(ip=floating_ip, table=dvr_fip_ns.FIP_RT_TBL, priority=rule_pr) self.fip_ns.deallocate_rule_priority(floating_ip) #TODO(rajeev): Handle else case - exception/log? def floating_ip_removed_dist(self, fip_cidr): """Remove floating IP from FIP namespace.""" floating_ip = fip_cidr.split('/')[0] rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id) fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) if self.rtr_fip_subnet is None: self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate( self.router_id) rtr_2_fip, fip_2_rtr = self.rtr_fip_subnet.get_pair() fip_ns_name = self.fip_ns.get_name() self._remove_floating_ip_rule(floating_ip) device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) device.route.delete_route(fip_cidr, str(rtr_2_fip.ip)) # check if this is the last FIP for this router self.dist_fip_count = self.dist_fip_count - 1 if self.dist_fip_count == 0: #remove default route entry device = ip_lib.IPDevice(rtr_2_fip_name, namespace=self.ns_name) ns_ip = ip_lib.IPWrapper(namespace=fip_ns_name) device.route.delete_gateway(str(fip_2_rtr.ip), table=dvr_fip_ns.FIP_RT_TBL) self.fip_ns.local_subnets.release(self.router_id) self.rtr_fip_subnet = None ns_ip.del_veth(fip_2_rtr_name) def floating_ip_moved_dist(self, fip): """Handle floating IP move between fixed IPs.""" floating_ip = fip['floating_ip_address'] self._remove_floating_ip_rule(floating_ip) self._add_floating_ip_rule(floating_ip, fip['fixed_ip_address']) def add_floating_ip(self, fip, interface_name, device): # Special Handling for DVR - update FIP namespace ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address']) self.floating_ip_added_dist(fip, ip_cidr) return l3_constants.FLOATINGIP_STATUS_ACTIVE def remove_floating_ip(self, device, ip_cidr): self.floating_ip_removed_dist(ip_cidr) def move_floating_ip(self, fip): self.floating_ip_moved_dist(fip) return l3_constants.FLOATINGIP_STATUS_ACTIVE def _get_internal_port(self, subnet_id): """Return internal router port based on subnet_id.""" router_ports = self.router.get(l3_constants.INTERFACE_KEY, []) for port in router_ports: fips = port['fixed_ips'] for f in fips: if f['subnet_id'] == subnet_id: return port def _cache_arp_entry(self, ip, mac, subnet_id, operation): """Cache the arp entries if device not ready.""" arp_entry_tuple = Arp_entry(ip=ip, mac=mac, subnet_id=subnet_id, operation=operation) self._pending_arp_set.add(arp_entry_tuple) def _process_arp_cache_for_internal_port(self, subnet_id): """Function to process the cached arp entries.""" arp_remove = set() for arp_entry in self._pending_arp_set: if subnet_id == arp_entry.subnet_id: try: state = self._update_arp_entry( arp_entry.ip, arp_entry.mac, arp_entry.subnet_id, arp_entry.operation) except Exception: state = False if state: # If the arp update was successful, then # go ahead and add it to the remove set arp_remove.add(arp_entry) self._pending_arp_set -= arp_remove def _delete_arp_cache_for_internal_port(self, subnet_id): """Function to delete the cached arp entries.""" arp_delete = set() for arp_entry in self._pending_arp_set: if subnet_id == arp_entry.subnet_id: arp_delete.add(arp_entry) self._pending_arp_set -= arp_delete def _update_arp_entry(self, ip, mac, subnet_id, operation): """Add or delete arp entry into router namespace for the subnet.""" port = self._get_internal_port(subnet_id) # update arp entry only if the subnet is attached to the router if not port: return False try: # TODO(mrsmith): optimize the calls below for bulk calls interface_name = self.get_internal_device_name(port['id']) device = ip_lib.IPDevice(interface_name, namespace=self.ns_name) if device.exists(): if operation == 'add': device.neigh.add(ip, mac) elif operation == 'delete': device.neigh.delete(ip, mac) return True else: if operation == 'add': LOG.warning(_LW("Device %s does not exist so ARP entry " "cannot be updated, will cache " "information to be applied later " "when the device exists"), device) self._cache_arp_entry(ip, mac, subnet_id, operation) return False except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("DVR: Failed updating arp entry")) def _set_subnet_arp_info(self, subnet_id): """Set ARP info retrieved from Plugin for existing ports.""" # TODO(Carl) Can we eliminate the need to make this RPC while # processing a router. subnet_ports = self.agent.get_ports_by_subnet(subnet_id) for p in subnet_ports: if p['device_owner'] not in l3_constants.ROUTER_INTERFACE_OWNERS: for fixed_ip in p['fixed_ips']: self._update_arp_entry(fixed_ip['ip_address'], p['mac_address'], subnet_id, 'add') self._process_arp_cache_for_internal_port(subnet_id) @staticmethod def _get_snat_idx(ip_cidr): """Generate index for DVR snat rules and route tables. The index value has to be 32 bits or less but more than the system generated entries i.e. 32768. For IPv4 use the numeric value of the cidr. For IPv6 generate a crc32 bit hash and xor-fold to 30 bits. Use the freed range to extend smaller values so that they become greater than system generated entries. """ net = netaddr.IPNetwork(ip_cidr) if net.version == 6: if isinstance(ip_cidr, six.text_type): ip_cidr = ip_cidr.encode() # Needed for Python 3.x # the crc32 & 0xffffffff is for Python 2.6 and 3.0 compatibility snat_idx = binascii.crc32(ip_cidr) & 0xffffffff # xor-fold the hash to reserve upper range to extend smaller values snat_idx = (snat_idx >> 30) ^ (snat_idx & MASK_30) if snat_idx < 32768: snat_idx = snat_idx + MASK_30 else: snat_idx = net.value return snat_idx def _delete_gateway_device_if_exists(self, ns_ip_device, gw_ip_addr, snat_idx): try: ns_ip_device.route.delete_gateway(gw_ip_addr, table=snat_idx) except exceptions.DeviceNotFoundError: pass def _stale_ip_rule_cleanup(self, ns_ipr, ns_ipd, ip_version): ip_rules_list = ns_ipr.rule.list_rules(ip_version) snat_table_list = [] for ip_rule in ip_rules_list: snat_table = ip_rule['table'] priority = ip_rule['priority'] if snat_table in ['local', 'default', 'main']: continue if (ip_version == l3_constants.IP_VERSION_4 and snat_table in range(dvr_fip_ns.FIP_PR_START, dvr_fip_ns.FIP_PR_END)): continue gateway_cidr = ip_rule['from'] ns_ipr.rule.delete(ip=gateway_cidr, table=snat_table, priority=priority) snat_table_list.append(snat_table) for tb in snat_table_list: ns_ipd.route.flush(ip_version, table=tb) def gateway_redirect_cleanup(self, rtr_interface): ns_ipr = ip_lib.IPRule(namespace=self.ns_name) ns_ipd = ip_lib.IPDevice(rtr_interface, namespace=self.ns_name) self._stale_ip_rule_cleanup(ns_ipr, ns_ipd, l3_constants.IP_VERSION_4) self._stale_ip_rule_cleanup(ns_ipr, ns_ipd, l3_constants.IP_VERSION_6) def _snat_redirect_modify(self, gateway, sn_port, sn_int, is_add): """Adds or removes rules and routes for SNAT redirection.""" try: ns_ipr = ip_lib.IPRule(namespace=self.ns_name) ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name) if is_add: ns_ipwrapr = ip_lib.IPWrapper(namespace=self.ns_name) for port_fixed_ip in sn_port['fixed_ips']: # Iterate and find the gateway IP address matching # the IP version port_ip_addr = port_fixed_ip['ip_address'] port_ip_vers = netaddr.IPAddress(port_ip_addr).version for gw_fixed_ip in gateway['fixed_ips']: gw_ip_addr = gw_fixed_ip['ip_address'] if netaddr.IPAddress(gw_ip_addr).version == port_ip_vers: sn_port_cidr = common_utils.ip_to_cidr( port_ip_addr, port_fixed_ip['prefixlen']) snat_idx = self._get_snat_idx(sn_port_cidr) if is_add: ns_ipd.route.add_gateway(gw_ip_addr, table=snat_idx) ns_ipr.rule.add(ip=sn_port_cidr, table=snat_idx, priority=snat_idx) ns_ipwrapr.netns.execute( ['sysctl', '-w', 'net.ipv4.conf.%s.send_redirects=0' % sn_int]) else: self._delete_gateway_device_if_exists(ns_ipd, gw_ip_addr, snat_idx) ns_ipr.rule.delete(ip=sn_port_cidr, table=snat_idx, priority=snat_idx) except Exception: if is_add: exc = _LE('DVR: error adding redirection logic') else: exc = _LE('DVR: snat remove failed to clear the rule ' 'and device') LOG.exception(exc) def _snat_redirect_add(self, gateway, sn_port, sn_int): """Adds rules and routes for SNAT redirection.""" self._snat_redirect_modify(gateway, sn_port, sn_int, is_add=True) def _snat_redirect_remove(self, gateway, sn_port, sn_int): """Removes rules and routes for SNAT redirection.""" self._snat_redirect_modify(gateway, sn_port, sn_int, is_add=False) def internal_network_added(self, port): super(DvrLocalRouter, self).internal_network_added(port) # NOTE: The following function _set_subnet_arp_info # should be called to dynamically populate the arp # entries for the dvr services ports into the router # namespace. This does not have dependency on the # external_gateway port or the agent_mode. for subnet in port['subnets']: self._set_subnet_arp_info(subnet['id']) self._snat_redirect_add_from_port(port) def _snat_redirect_add_from_port(self, port): ex_gw_port = self.get_ex_gw_port() if not ex_gw_port: return sn_port = self.get_snat_port_for_internal_port(port) if not sn_port: return interface_name = self.get_internal_device_name(port['id']) self._snat_redirect_add(sn_port, port, interface_name) def _dvr_internal_network_removed(self, port): if not self.ex_gw_port: return sn_port = self.get_snat_port_for_internal_port(port, self.snat_ports) if not sn_port: return # DVR handling code for SNAT interface_name = self.get_internal_device_name(port['id']) self._snat_redirect_remove(sn_port, port, interface_name) # Clean up the cached arp entries related to the port subnet for subnet in port['subnets']: self._delete_arp_cache_for_internal_port(subnet) def internal_network_removed(self, port): self._dvr_internal_network_removed(port) super(DvrLocalRouter, self).internal_network_removed(port) def get_floating_agent_gw_interface(self, ext_net_id): """Filter Floating Agent GW port for the external network.""" fip_ports = self.router.get(l3_constants.FLOATINGIP_AGENT_INTF_KEY, []) return next( (p for p in fip_ports if p['network_id'] == ext_net_id), None) def get_external_device_interface_name(self, ex_gw_port): fip_int = self.fip_ns.get_int_device_name(self.router_id) if ip_lib.device_exists(fip_int, namespace=self.fip_ns.get_name()): return self.fip_ns.get_rtr_ext_device_name(self.router_id) def external_gateway_added(self, ex_gw_port, interface_name): # TODO(Carl) Refactor external_gateway_added/updated/removed to use # super class implementation where possible. Looks like preserve_ips, # and ns_name are the key differences. ip_wrapr = ip_lib.IPWrapper(namespace=self.ns_name) ip_wrapr.netns.execute(['sysctl', '-w', 'net.ipv4.conf.all.send_redirects=0']) for p in self.internal_ports: gateway = self.get_snat_port_for_internal_port(p) id_name = self.get_internal_device_name(p['id']) if gateway: self._snat_redirect_add(gateway, p, id_name) for port in self.get_snat_interfaces(): for ip in port['fixed_ips']: self._update_arp_entry(ip['ip_address'], port['mac_address'], ip['subnet_id'], 'add') def external_gateway_updated(self, ex_gw_port, interface_name): pass def external_gateway_removed(self, ex_gw_port, interface_name): # TODO(Carl) Should this be calling process_snat_dnat_for_fip? self.process_floating_ip_nat_rules() if self.fip_ns: to_fip_interface_name = ( self.get_external_device_interface_name(ex_gw_port)) self.process_floating_ip_addresses(to_fip_interface_name) # NOTE:_snat_redirect_remove should be only called when the # gateway is cleared and should not be called when the gateway # is moved or rescheduled. if not self.router.get('gw_port'): for p in self.internal_ports: # NOTE: When removing the gateway port, pass in the snat_port # cache along with the current ports. gateway = self.get_snat_port_for_internal_port( p, self.snat_ports) if not gateway: continue internal_interface = self.get_internal_device_name(p['id']) self._snat_redirect_remove(gateway, p, internal_interface) def _handle_router_snat_rules(self, ex_gw_port, interface_name): """Configures NAT rules for Floating IPs for DVR.""" self.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') self.iptables_manager.ipv4['nat'].empty_chain('snat') ex_gw_port = self.get_ex_gw_port() if not ex_gw_port: return ext_device_name = self.get_external_device_interface_name(ex_gw_port) floatingips = self.get_floating_ips() if not ext_device_name or not floatingips: # Without router to fip device, or without any floating ip, # the snat rules should not be added return # Add back the jump to float-snat self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') rule = self._prevent_snat_for_internal_traffic_rule(ext_device_name) self.iptables_manager.ipv4['nat'].add_rule(*rule) def _get_address_scope_mark(self): # Prepare address scope iptables rule for internal ports internal_ports = self.router.get(l3_constants.INTERFACE_KEY, []) ports_scopemark = self._get_port_devicename_scopemark( internal_ports, self.get_internal_device_name) # DVR local router will use rfp port as external port ext_port = self.get_ex_gw_port() if not ext_port: return ports_scopemark ext_device_name = self.get_external_device_interface_name(ext_port) if not ext_device_name: return ports_scopemark ext_scope = self._get_external_address_scope() ext_scope_mark = self.get_address_scope_mark_mask(ext_scope) ports_scopemark[l3_constants.IP_VERSION_4][ext_device_name] = ( ext_scope_mark) return ports_scopemark def process_external(self, agent): ex_gw_port = self.get_ex_gw_port() if ex_gw_port: self.create_dvr_fip_interfaces(ex_gw_port) super(DvrLocalRouter, self).process_external(agent) def create_dvr_fip_interfaces(self, ex_gw_port): floating_ips = self.get_floating_ips() fip_agent_port = self.get_floating_agent_gw_interface( ex_gw_port['network_id']) if fip_agent_port: LOG.debug("FloatingIP agent gateway port received from the " "plugin: %s", fip_agent_port) if floating_ips: if not fip_agent_port: LOG.debug("No FloatingIP agent gateway port possibly due to " "late binding of the private port to the host, " "requesting agent gateway port for 'network-id' :" "%s", ex_gw_port['network_id']) fip_agent_port = self.agent.plugin_rpc.get_agent_gateway_port( self.agent.context, ex_gw_port['network_id']) if not fip_agent_port: LOG.error(_LE("No FloatingIP agent gateway port " "returned from server for 'network-id': " "%s"), ex_gw_port['network_id']) if fip_agent_port: if 'subnets' not in fip_agent_port: LOG.error(_LE('Missing subnet/agent_gateway_port')) else: self.fip_ns.create_or_update_gateway_port(fip_agent_port) if (self.fip_ns.agent_gateway_port and (self.dist_fip_count == 0)): self.fip_ns.create_rtr_2_fip_link(self) # kicks the FW Agent to add rules for the IR namespace if # configured self.agent.process_router_add(self) def get_router_cidrs(self, device): """As no floatingip will be set on the rfp device. Get floatingip from the route of fip namespace. """ if not self.fip_ns: return set() fip_ns_name = self.fip_ns.get_name() fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) if not device.exists(): return set() if self.rtr_fip_subnet is None: self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate( self.router_id) rtr_2_fip, _fip_2_rtr = self.rtr_fip_subnet.get_pair() exist_routes = device.route.list_routes( l3_constants.IP_VERSION_4, via=str(rtr_2_fip.ip)) return {common_utils.ip_to_cidr(route['cidr']) for route in exist_routes} def process(self, agent): ex_gw_port = self.get_ex_gw_port() if ex_gw_port: self.fip_ns = agent.get_fip_ns(ex_gw_port['network_id']) self.fip_ns.scan_fip_ports(self) super(DvrLocalRouter, self).process(agent) neutron-8.4.0/neutron/agent/__init__.py0000664000567000056710000000000013044372736021256 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/securitygroups_rpc.py0000664000567000056710000003265513044372760023514 0ustar jenkinsjenkins00000000000000# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import functools from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from neutron._i18n import _, _LI, _LW from neutron.agent import firewall from neutron.api.rpc.handlers import securitygroups_rpc LOG = logging.getLogger(__name__) security_group_opts = [ cfg.StrOpt( 'firewall_driver', help=_('Driver for security groups firewall in the L2 agent')), cfg.BoolOpt( 'enable_security_group', default=True, help=_( 'Controls whether the neutron security group API is enabled ' 'in the server. It should be false when using no security ' 'groups or using the nova security group API.')), cfg.BoolOpt( 'enable_ipset', default=True, help=_('Use ipset to speed-up the iptables based security groups. ' 'Enabling ipset support requires that ipset is installed on L2 ' 'agent node.')) ] cfg.CONF.register_opts(security_group_opts, 'SECURITYGROUP') #This is backward compatibility check for Havana def _is_valid_driver_combination(): return ((cfg.CONF.SECURITYGROUP.enable_security_group and (cfg.CONF.SECURITYGROUP.firewall_driver and cfg.CONF.SECURITYGROUP.firewall_driver != 'neutron.agent.firewall.NoopFirewallDriver')) or (not cfg.CONF.SECURITYGROUP.enable_security_group and (cfg.CONF.SECURITYGROUP.firewall_driver == 'neutron.agent.firewall.NoopFirewallDriver' or cfg.CONF.SECURITYGROUP.firewall_driver is None) )) def is_firewall_enabled(): if not _is_valid_driver_combination(): LOG.warning(_LW("Driver configuration doesn't match with " "enable_security_group")) return cfg.CONF.SECURITYGROUP.enable_security_group def _disable_extension(extension, aliases): if extension in aliases: aliases.remove(extension) def disable_security_group_extension_by_config(aliases): if not is_firewall_enabled(): LOG.info(_LI('Disabled security-group extension.')) _disable_extension('security-group', aliases) LOG.info(_LI('Disabled allowed-address-pairs extension.')) _disable_extension('allowed-address-pairs', aliases) class SecurityGroupAgentRpc(object): """Enables SecurityGroup agent support in agent implementations.""" def __init__(self, context, plugin_rpc, local_vlan_map=None, defer_refresh_firewall=False, integration_bridge=None): self.context = context self.plugin_rpc = plugin_rpc self.init_firewall(defer_refresh_firewall, integration_bridge) self.local_vlan_map = local_vlan_map def init_firewall(self, defer_refresh_firewall=False, integration_bridge=None): firewall_driver = cfg.CONF.SECURITYGROUP.firewall_driver or 'noop' LOG.debug("Init firewall settings (driver=%s)", firewall_driver) if not _is_valid_driver_combination(): LOG.warning(_LW("Driver configuration doesn't match " "with enable_security_group")) firewall_class = firewall.load_firewall_driver_class(firewall_driver) try: self.firewall = firewall_class( integration_bridge=integration_bridge) except TypeError: self.firewall = firewall_class() # The following flag will be set to true if port filter must not be # applied as soon as a rule or membership notification is received self.defer_refresh_firewall = defer_refresh_firewall # Stores devices for which firewall should be refreshed when # deferred refresh is enabled. self.devices_to_refilter = set() # Flag raised when a global refresh is needed self.global_refresh_firewall = False self._use_enhanced_rpc = None @property def use_enhanced_rpc(self): if self._use_enhanced_rpc is None: self._use_enhanced_rpc = ( self._check_enhanced_rpc_is_supported_by_server()) return self._use_enhanced_rpc def _check_enhanced_rpc_is_supported_by_server(self): try: self.plugin_rpc.security_group_info_for_devices( self.context, devices=[]) except oslo_messaging.UnsupportedVersion: LOG.warning(_LW('security_group_info_for_devices rpc call not ' 'supported by the server, falling back to old ' 'security_group_rules_for_devices which scales ' 'worse.')) return False return True def skip_if_noopfirewall_or_firewall_disabled(func): @functools.wraps(func) def decorated_function(self, *args, **kwargs): if (isinstance(self.firewall, firewall.NoopFirewallDriver) or not is_firewall_enabled()): LOG.info(_LI("Skipping method %s as firewall is disabled " "or configured as NoopFirewallDriver."), func.__name__) else: return func(self, # pylint: disable=not-callable *args, **kwargs) return decorated_function @skip_if_noopfirewall_or_firewall_disabled def prepare_devices_filter(self, device_ids): if not device_ids: return LOG.info(_LI("Preparing filters for devices %s"), device_ids) if self.use_enhanced_rpc: devices_info = self.plugin_rpc.security_group_info_for_devices( self.context, list(device_ids)) devices = devices_info['devices'] security_groups = devices_info['security_groups'] security_group_member_ips = devices_info['sg_member_ips'] else: devices = self.plugin_rpc.security_group_rules_for_devices( self.context, list(device_ids)) with self.firewall.defer_apply(): if self.use_enhanced_rpc: LOG.debug("Update security group information for ports %s", devices.keys()) self._update_security_group_info( security_groups, security_group_member_ips) for device in devices.values(): self.firewall.prepare_port_filter(device) def _update_security_group_info(self, security_groups, security_group_member_ips): LOG.debug("Update security group information") for sg_id, sg_rules in security_groups.items(): self.firewall.update_security_group_rules(sg_id, sg_rules) for remote_sg_id, member_ips in security_group_member_ips.items(): self.firewall.update_security_group_members( remote_sg_id, member_ips) def security_groups_rule_updated(self, security_groups): LOG.info(_LI("Security group " "rule updated %r"), security_groups) self._security_group_updated( security_groups, 'security_groups', 'sg_rule') def security_groups_member_updated(self, security_groups): LOG.info(_LI("Security group " "member updated %r"), security_groups) self._security_group_updated( security_groups, 'security_group_source_groups', 'sg_member') def _security_group_updated(self, security_groups, attribute, action_type): devices = [] sec_grp_set = set(security_groups) for device in self.firewall.ports.values(): if sec_grp_set & set(device.get(attribute, [])): devices.append(device['device']) if devices: if self.use_enhanced_rpc: self.firewall.security_group_updated(action_type, sec_grp_set) if self.defer_refresh_firewall: LOG.debug("Adding %s devices to the list of devices " "for which firewall needs to be refreshed", devices) self.devices_to_refilter |= set(devices) else: self.refresh_firewall(devices) def security_groups_provider_updated(self, devices_to_update): LOG.info(_LI("Provider rule updated")) if self.defer_refresh_firewall: if devices_to_update is None: self.global_refresh_firewall = True else: self.devices_to_refilter |= set(devices_to_update) else: self.refresh_firewall(devices_to_update) def remove_devices_filter(self, device_ids): if not device_ids: return LOG.info(_LI("Remove device filter for %r"), device_ids) with self.firewall.defer_apply(): for device_id in device_ids: device = self.firewall.ports.get(device_id) if not device: continue self.firewall.remove_port_filter(device) @skip_if_noopfirewall_or_firewall_disabled def refresh_firewall(self, device_ids=None): LOG.info(_LI("Refresh firewall rules")) if not device_ids: device_ids = self.firewall.ports.keys() if not device_ids: LOG.info(_LI("No ports here to refresh firewall")) return if self.use_enhanced_rpc: devices_info = self.plugin_rpc.security_group_info_for_devices( self.context, device_ids) devices = devices_info['devices'] security_groups = devices_info['security_groups'] security_group_member_ips = devices_info['sg_member_ips'] else: devices = self.plugin_rpc.security_group_rules_for_devices( self.context, device_ids) with self.firewall.defer_apply(): if self.use_enhanced_rpc: LOG.debug("Update security group information for ports %s", devices.keys()) self._update_security_group_info( security_groups, security_group_member_ips) for device in devices.values(): LOG.debug("Update port filter for %s", device['device']) self.firewall.update_port_filter(device) def firewall_refresh_needed(self): return self.global_refresh_firewall or self.devices_to_refilter def setup_port_filters(self, new_devices, updated_devices): """Configure port filters for devices. This routine applies filters for new devices and refreshes firewall rules when devices have been updated, or when there are changes in security group membership or rules. :param new_devices: set containing identifiers for new devices :param updated_devices: set containing identifiers for updated devices """ # These data structures are cleared here in order to avoid # losing updates occurring during firewall refresh devices_to_refilter = self.devices_to_refilter global_refresh_firewall = self.global_refresh_firewall self.devices_to_refilter = set() self.global_refresh_firewall = False # We must call prepare_devices_filter() after we've grabbed # self.devices_to_refilter since an update for a new port # could arrive while we're processing, and we need to make # sure we don't skip it. It will get handled the next time. if new_devices: LOG.debug("Preparing device filters for %d new devices", len(new_devices)) self.prepare_devices_filter(new_devices) # TODO(salv-orlando): Avoid if possible ever performing the global # refresh providing a precise list of devices for which firewall # should be refreshed if global_refresh_firewall: LOG.debug("Refreshing firewall for all filtered devices") self.refresh_firewall() else: if self.use_enhanced_rpc and updated_devices: self.firewall.security_group_updated('sg_member', [], updated_devices) # If a device is both in new and updated devices # avoid reprocessing it updated_devices = ((updated_devices | devices_to_refilter) - new_devices) if updated_devices: LOG.debug("Refreshing firewall for %d devices", len(updated_devices)) self.refresh_firewall(updated_devices) # TODO(armax): for bw compat with external dependencies; to be dropped in M. SG_RPC_VERSION = ( securitygroups_rpc.SecurityGroupAgentRpcApiMixin.SG_RPC_VERSION ) SecurityGroupServerRpcApi = ( securitygroups_rpc.SecurityGroupServerRpcApi ) SecurityGroupAgentRpcApiMixin = ( securitygroups_rpc.SecurityGroupAgentRpcApiMixin ) SecurityGroupAgentRpcCallbackMixin = ( securitygroups_rpc.SecurityGroupAgentRpcCallbackMixin ) neutron-8.4.0/neutron/agent/dhcp/0000775000567000056710000000000013044373210020061 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/dhcp/config.py0000664000567000056710000001237313044372760021717 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ DHCP_AGENT_OPTS = [ cfg.IntOpt('resync_interval', default=5, help=_("The DHCP agent will resync its state with Neutron to " "recover from any transient notification or RPC errors. " "The interval is number of seconds between attempts.")), cfg.StrOpt('dhcp_driver', default='neutron.agent.linux.dhcp.Dnsmasq', help=_("The driver used to manage the DHCP server.")), cfg.BoolOpt('enable_isolated_metadata', default=False, help=_("The DHCP server can assist with providing metadata " "support on isolated networks. Setting this value to " "True will cause the DHCP server to append specific " "host routes to the DHCP request. The metadata service " "will only be activated when the subnet does not " "contain any router port. The guest instance must be " "configured to request host routes via DHCP (Option " "121). This option doesn't have any effect when " "force_metadata is set to True.")), cfg.BoolOpt('force_metadata', default=False, help=_("In some cases the Neutron router is not present to " "provide the metadata IP but the DHCP server can be " "used to provide this info. Setting this value will " "force the DHCP server to append specific host routes " "to the DHCP request. If this option is set, then the " "metadata service will be activated for all the " "networks.")), cfg.BoolOpt('enable_metadata_network', default=False, help=_("Allows for serving metadata requests coming from a " "dedicated metadata access network whose CIDR is " "169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send " "metadata:1 request. In this case DHCP Option 121 will " "not be injected in VMs, as they will be able to reach " "169.254.169.254 through a router. This option " "requires enable_isolated_metadata = True.")), cfg.IntOpt('num_sync_threads', default=4, help=_('Number of threads to use during sync process. ' 'Should not exceed connection pool size configured on ' 'server.')) ] DHCP_OPTS = [ cfg.StrOpt('dhcp_confs', default='$state_path/dhcp', help=_('Location to store DHCP server config files.')), cfg.StrOpt('dhcp_domain', default='openstacklocal', help=_('Domain to use for building the hostnames. ' 'This option is deprecated. It has been moved to ' 'neutron.conf as dns_domain. It will be removed ' 'in a future release.'), deprecated_for_removal=True), ] DNSMASQ_OPTS = [ cfg.StrOpt('dnsmasq_config_file', default='', help=_('Override the default dnsmasq settings ' 'with this file.')), cfg.ListOpt('dnsmasq_dns_servers', help=_('Comma-separated list of the DNS servers which will be ' 'used as forwarders.'), deprecated_name='dnsmasq_dns_server'), cfg.StrOpt('dnsmasq_base_log_dir', help=_("Base log dir for dnsmasq logging. " "The log contains DHCP and DNS log information and " "is useful for debugging issues with either DHCP or " "DNS. If this section is null, disable dnsmasq log.")), cfg.BoolOpt('dnsmasq_local_resolv', default=False, help=_("Enables the dnsmasq service to provide name " "resolution for instances via DNS resolvers on the " "host running the DHCP agent. Effectively removes the " "'--no-resolv' option from the dnsmasq process " "arguments. Adding custom DNS resolvers to the " "'dnsmasq_dns_servers' option disables this feature.")), cfg.IntOpt( 'dnsmasq_lease_max', default=(2 ** 24), help=_('Limit number of leases to prevent a denial-of-service.')), cfg.BoolOpt('dhcp_broadcast_reply', default=False, help=_("Use broadcast in DHCP replies.")), ] neutron-8.4.0/neutron/agent/dhcp/agent.py0000664000567000056710000006274613044372760021561 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import os import eventlet from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import loopingcall from oslo_utils import importutils from neutron._i18n import _, _LE, _LI, _LW from neutron.agent.linux import dhcp from neutron.agent.linux import external_process from neutron.agent.metadata import driver as metadata_driver from neutron.agent import rpc as agent_rpc from neutron.common import constants from neutron.common import exceptions from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.common import utils from neutron import context from neutron import manager LOG = logging.getLogger(__name__) class DhcpAgent(manager.Manager): """DHCP agent service manager. Note that the public methods of this class are exposed as the server side of an rpc interface. The neutron server uses neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.DhcpAgentNotifyApi as the client side to execute the methods here. For more information about changing rpc interfaces, see doc/source/devref/rpc_api.rst. """ target = oslo_messaging.Target(version='1.0') def __init__(self, host=None, conf=None): super(DhcpAgent, self).__init__(host=host) self.needs_resync_reasons = collections.defaultdict(list) self.conf = conf or cfg.CONF self.cache = NetworkCache() self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver) ctx = context.get_admin_context_without_session() self.plugin_rpc = DhcpPluginApi(topics.PLUGIN, ctx, self.conf.host) # create dhcp dir to store dhcp info dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path) utils.ensure_dir(dhcp_dir) self.dhcp_version = self.dhcp_driver_cls.check_version() self._populate_networks_cache() # keep track of mappings between networks and routers for # metadata processing self._metadata_routers = {} # {network_id: router_id} self._process_monitor = external_process.ProcessMonitor( config=self.conf, resource_type='dhcp') def init_host(self): self.sync_state() def _populate_networks_cache(self): """Populate the networks cache when the DHCP-agent starts.""" try: existing_networks = self.dhcp_driver_cls.existing_dhcp_networks( self.conf ) for net_id in existing_networks: net = dhcp.NetModel({"id": net_id, "subnets": [], "ports": []}) self.cache.put(net) except NotImplementedError: # just go ahead with an empty networks cache LOG.debug("The '%s' DHCP-driver does not support retrieving of a " "list of existing networks", self.conf.dhcp_driver) def after_start(self): self.run() LOG.info(_LI("DHCP agent started")) def run(self): """Activate the DHCP agent.""" self.sync_state() self.periodic_resync() def call_driver(self, action, network, **action_kwargs): """Invoke an action on a DHCP driver instance.""" LOG.debug('Calling driver for network: %(net)s action: %(action)s', {'net': network.id, 'action': action}) try: # the Driver expects something that is duck typed similar to # the base models. driver = self.dhcp_driver_cls(self.conf, network, self._process_monitor, self.dhcp_version, self.plugin_rpc) getattr(driver, action)(**action_kwargs) return True except exceptions.Conflict: # No need to resync here, the agent will receive the event related # to a status update for the network LOG.warning(_LW('Unable to %(action)s dhcp for %(net_id)s: there ' 'is a conflict with its current state; please ' 'check that the network and/or its subnet(s) ' 'still exist.'), {'net_id': network.id, 'action': action}) except Exception as e: if getattr(e, 'exc_type', '') != 'IpAddressGenerationFailure': # Don't resync if port could not be created because of an IP # allocation failure. When the subnet is updated with a new # allocation pool or a port is deleted to free up an IP, this # will automatically be retried on the notification self.schedule_resync(e, network.id) if (isinstance(e, oslo_messaging.RemoteError) and e.exc_type == 'NetworkNotFound' or isinstance(e, exceptions.NetworkNotFound)): LOG.debug("Network %s has been deleted.", network.id) else: LOG.exception(_LE('Unable to %(action)s dhcp for %(net_id)s.'), {'net_id': network.id, 'action': action}) def schedule_resync(self, reason, network_id=None): """Schedule a resync for a given network and reason. If no network is specified, resync all networks. """ self.needs_resync_reasons[network_id].append(reason) @utils.synchronized('dhcp-agent') def sync_state(self, networks=None): """Sync the local DHCP state with Neutron. If no networks are passed, or 'None' is one of the networks, sync all of the networks. """ only_nets = set([] if (not networks or None in networks) else networks) LOG.info(_LI('Synchronizing state')) pool = eventlet.GreenPool(self.conf.num_sync_threads) known_network_ids = set(self.cache.get_network_ids()) try: active_networks = self.plugin_rpc.get_active_networks_info() LOG.info(_LI('All active networks have been fetched through RPC.')) active_network_ids = set(network.id for network in active_networks) for deleted_id in known_network_ids - active_network_ids: try: self.disable_dhcp_helper(deleted_id) except Exception as e: self.schedule_resync(e, deleted_id) LOG.exception(_LE('Unable to sync network state on ' 'deleted network %s'), deleted_id) for network in active_networks: if (not only_nets or # specifically resync all network.id not in known_network_ids or # missing net network.id in only_nets): # specific network to sync pool.spawn(self.safe_configure_dhcp_for_network, network) pool.waitall() LOG.info(_LI('Synchronizing state complete')) except Exception as e: if only_nets: for network_id in only_nets: self.schedule_resync(e, network_id) else: self.schedule_resync(e) LOG.exception(_LE('Unable to sync network state.')) @utils.exception_logger() def _periodic_resync_helper(self): """Resync the dhcp state at the configured interval.""" while True: eventlet.sleep(self.conf.resync_interval) if self.needs_resync_reasons: # be careful to avoid a race with additions to list # from other threads reasons = self.needs_resync_reasons self.needs_resync_reasons = collections.defaultdict(list) for net, r in reasons.items(): if not net: net = "*" LOG.debug("resync (%(network)s): %(reason)s", {"reason": r, "network": net}) self.sync_state(reasons.keys()) def periodic_resync(self): """Spawn a thread to periodically resync the dhcp state.""" eventlet.spawn(self._periodic_resync_helper) def safe_get_network_info(self, network_id): try: network = self.plugin_rpc.get_network_info(network_id) if not network: LOG.debug('Network %s has been deleted.', network_id) return network except Exception as e: self.schedule_resync(e, network_id) LOG.exception(_LE('Network %s info call failed.'), network_id) def enable_dhcp_helper(self, network_id): """Enable DHCP for a network that meets enabling criteria.""" network = self.safe_get_network_info(network_id) if network: self.configure_dhcp_for_network(network) @utils.exception_logger() def safe_configure_dhcp_for_network(self, network): try: network_id = network.get('id') LOG.info(_LI('Starting network %s dhcp configuration'), network_id) self.configure_dhcp_for_network(network) LOG.info(_LI('Finished network %s dhcp configuration'), network_id) except (exceptions.NetworkNotFound, RuntimeError): LOG.warning(_LW('Network %s may have been deleted and ' 'its resources may have already been disposed.'), network.id) def configure_dhcp_for_network(self, network): if not network.admin_state_up: return for subnet in network.subnets: if subnet.enable_dhcp: if self.call_driver('enable', network): self.update_isolated_metadata_proxy(network) self.cache.put(network) break def disable_dhcp_helper(self, network_id): """Disable DHCP for a network known to the agent.""" network = self.cache.get_network_by_id(network_id) if network: if self.conf.enable_isolated_metadata: # NOTE(jschwarz): In the case where a network is deleted, all # the subnets and ports are deleted before this function is # called, so checking if 'should_enable_metadata' is True # for any subnet is false logic here. self.disable_isolated_metadata_proxy(network) if self.call_driver('disable', network): self.cache.remove(network) def refresh_dhcp_helper(self, network_id): """Refresh or disable DHCP for a network depending on the current state of the network. """ old_network = self.cache.get_network_by_id(network_id) if not old_network: # DHCP current not running for network. return self.enable_dhcp_helper(network_id) network = self.safe_get_network_info(network_id) if not network: return if not any(s for s in network.subnets if s.enable_dhcp): self.disable_dhcp_helper(network.id) return # NOTE(kevinbenton): we don't exclude dhcp disabled subnets because # they still change the indexes used for tags old_cidrs = [s.cidr for s in network.subnets] new_cidrs = [s.cidr for s in old_network.subnets] if old_cidrs == new_cidrs: self.call_driver('reload_allocations', network) self.cache.put(network) elif self.call_driver('restart', network): self.cache.put(network) # Update the metadata proxy after the dhcp driver has been updated self.update_isolated_metadata_proxy(network) @utils.synchronized('dhcp-agent') def network_create_end(self, context, payload): """Handle the network.create.end notification event.""" network_id = payload['network']['id'] self.enable_dhcp_helper(network_id) @utils.synchronized('dhcp-agent') def network_update_end(self, context, payload): """Handle the network.update.end notification event.""" network_id = payload['network']['id'] if payload['network']['admin_state_up']: self.enable_dhcp_helper(network_id) else: self.disable_dhcp_helper(network_id) @utils.synchronized('dhcp-agent') def network_delete_end(self, context, payload): """Handle the network.delete.end notification event.""" self.disable_dhcp_helper(payload['network_id']) @utils.synchronized('dhcp-agent') def subnet_update_end(self, context, payload): """Handle the subnet.update.end notification event.""" network_id = payload['subnet']['network_id'] self.refresh_dhcp_helper(network_id) # Use the update handler for the subnet create event. subnet_create_end = subnet_update_end @utils.synchronized('dhcp-agent') def subnet_delete_end(self, context, payload): """Handle the subnet.delete.end notification event.""" subnet_id = payload['subnet_id'] network = self.cache.get_network_by_subnet_id(subnet_id) if network: self.refresh_dhcp_helper(network.id) @utils.synchronized('dhcp-agent') def port_update_end(self, context, payload): """Handle the port.update.end notification event.""" updated_port = dhcp.DictModel(payload['port']) if self.cache.is_port_message_stale(payload['port']): LOG.debug("Discarding stale port update: %s", updated_port) return network = self.cache.get_network_by_id(updated_port.network_id) if network: LOG.info(_LI("Trigger reload_allocations for port %s"), updated_port) driver_action = 'reload_allocations' if self._is_port_on_this_agent(updated_port): orig = self.cache.get_port_by_id(updated_port['id']) # assume IP change if not in cache old_ips = {i['ip_address'] for i in orig['fixed_ips'] or []} new_ips = {i['ip_address'] for i in updated_port['fixed_ips']} if old_ips != new_ips: driver_action = 'restart' self.cache.put_port(updated_port) self.call_driver(driver_action, network) def _is_port_on_this_agent(self, port): thishost = utils.get_dhcp_agent_device_id( port['network_id'], self.conf.host) return port['device_id'] == thishost # Use the update handler for the port create event. port_create_end = port_update_end @utils.synchronized('dhcp-agent') def port_delete_end(self, context, payload): """Handle the port.delete.end notification event.""" port = self.cache.get_port_by_id(payload['port_id']) self.cache.deleted_ports.add(payload['port_id']) if port: network = self.cache.get_network_by_id(port.network_id) self.cache.remove_port(port) self.call_driver('reload_allocations', network) def update_isolated_metadata_proxy(self, network): """Spawn or kill metadata proxy. According to return from driver class, spawn or kill the metadata proxy process. Spawn an existing metadata proxy or kill a nonexistent metadata proxy will just silently return. """ should_enable_metadata = self.dhcp_driver_cls.should_enable_metadata( self.conf, network) if should_enable_metadata: self.enable_isolated_metadata_proxy(network) else: self.disable_isolated_metadata_proxy(network) def enable_isolated_metadata_proxy(self, network): # The proxy might work for either a single network # or all the networks connected via a router # to the one passed as a parameter kwargs = {'network_id': network.id} # When the metadata network is enabled, the proxy might # be started for the router attached to the network if self.conf.enable_metadata_network: router_ports = [port for port in network.ports if (port.device_owner in constants.ROUTER_INTERFACE_OWNERS)] if router_ports: # Multiple router ports should not be allowed if len(router_ports) > 1: LOG.warning(_LW("%(port_num)d router ports found on the " "metadata access network. Only the port " "%(port_id)s, for router %(router_id)s " "will be considered"), {'port_num': len(router_ports), 'port_id': router_ports[0].id, 'router_id': router_ports[0].device_id}) kwargs = {'router_id': router_ports[0].device_id} self._metadata_routers[network.id] = router_ports[0].device_id metadata_driver.MetadataDriver.spawn_monitored_metadata_proxy( self._process_monitor, network.namespace, dhcp.METADATA_PORT, self.conf, **kwargs) def disable_isolated_metadata_proxy(self, network): if (self.conf.enable_metadata_network and network.id in self._metadata_routers): uuid = self._metadata_routers[network.id] is_router_id = True else: uuid = network.id is_router_id = False metadata_driver.MetadataDriver.destroy_monitored_metadata_proxy( self._process_monitor, uuid, self.conf) if is_router_id: del self._metadata_routers[network.id] class DhcpPluginApi(object): """Agent side of the dhcp rpc API. This class implements the client side of an rpc interface. The server side of this interface can be found in neutron.api.rpc.handlers.dhcp_rpc.DhcpRpcCallback. For more information about changing rpc interfaces, see doc/source/devref/rpc_api.rst. API version history: 1.0 - Initial version. 1.1 - Added get_active_networks_info, create_dhcp_port, and update_dhcp_port methods. """ def __init__(self, topic, context, host): self.context = context self.host = host target = oslo_messaging.Target( topic=topic, namespace=constants.RPC_NAMESPACE_DHCP_PLUGIN, version='1.0') self.client = n_rpc.get_client(target) def get_active_networks_info(self): """Make a remote process call to retrieve all network info.""" cctxt = self.client.prepare(version='1.1') networks = cctxt.call(self.context, 'get_active_networks_info', host=self.host) return [dhcp.NetModel(n) for n in networks] def get_network_info(self, network_id): """Make a remote process call to retrieve network info.""" cctxt = self.client.prepare() network = cctxt.call(self.context, 'get_network_info', network_id=network_id, host=self.host) if network: return dhcp.NetModel(network) def create_dhcp_port(self, port): """Make a remote process call to create the dhcp port.""" cctxt = self.client.prepare(version='1.1') port = cctxt.call(self.context, 'create_dhcp_port', port=port, host=self.host) if port: return dhcp.DictModel(port) def update_dhcp_port(self, port_id, port): """Make a remote process call to update the dhcp port.""" cctxt = self.client.prepare(version='1.1') port = cctxt.call(self.context, 'update_dhcp_port', port_id=port_id, port=port, host=self.host) if port: return dhcp.DictModel(port) def release_dhcp_port(self, network_id, device_id): """Make a remote process call to release the dhcp port.""" cctxt = self.client.prepare() return cctxt.call(self.context, 'release_dhcp_port', network_id=network_id, device_id=device_id, host=self.host) class NetworkCache(object): """Agent cache of the current network state.""" def __init__(self): self.cache = {} self.subnet_lookup = {} self.port_lookup = {} self.deleted_ports = set() def is_port_message_stale(self, payload): orig = self.get_port_by_id(payload['id']) or {} if orig.get('revision_number', 0) > payload.get('revision_number', 0): return True if payload['id'] in self.deleted_ports: return True return False def get_network_ids(self): return self.cache.keys() def get_network_by_id(self, network_id): return self.cache.get(network_id) def get_network_by_subnet_id(self, subnet_id): return self.cache.get(self.subnet_lookup.get(subnet_id)) def get_network_by_port_id(self, port_id): return self.cache.get(self.port_lookup.get(port_id)) def put(self, network): if network.id in self.cache: self.remove(self.cache[network.id]) self.cache[network.id] = network for subnet in network.subnets: self.subnet_lookup[subnet.id] = network.id for port in network.ports: self.port_lookup[port.id] = network.id def remove(self, network): del self.cache[network.id] for subnet in network.subnets: del self.subnet_lookup[subnet.id] for port in network.ports: del self.port_lookup[port.id] def put_port(self, port): network = self.get_network_by_id(port.network_id) for index in range(len(network.ports)): if network.ports[index].id == port.id: network.ports[index] = port break else: network.ports.append(port) self.port_lookup[port.id] = network.id def remove_port(self, port): network = self.get_network_by_port_id(port.id) for index in range(len(network.ports)): if network.ports[index] == port: del network.ports[index] del self.port_lookup[port.id] break def get_port_by_id(self, port_id): network = self.get_network_by_port_id(port_id) if network: for port in network.ports: if port.id == port_id: return port def get_state(self): net_ids = self.get_network_ids() num_nets = len(net_ids) num_subnets = 0 num_ports = 0 for net_id in net_ids: network = self.get_network_by_id(net_id) num_subnets += len(network.subnets) num_ports += len(network.ports) return {'networks': num_nets, 'subnets': num_subnets, 'ports': num_ports} class DhcpAgentWithStateReport(DhcpAgent): def __init__(self, host=None, conf=None): super(DhcpAgentWithStateReport, self).__init__(host=host, conf=conf) self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) self.agent_state = { 'binary': 'neutron-dhcp-agent', 'host': host, 'availability_zone': self.conf.AGENT.availability_zone, 'topic': topics.DHCP_AGENT, 'configurations': { 'dhcp_driver': self.conf.dhcp_driver, 'dhcp_lease_duration': self.conf.dhcp_lease_duration, 'log_agent_heartbeats': self.conf.AGENT.log_agent_heartbeats}, 'start_flag': True, 'agent_type': constants.AGENT_TYPE_DHCP} report_interval = self.conf.AGENT.report_interval if report_interval: self.heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) self.heartbeat.start(interval=report_interval) def _report_state(self): try: self.agent_state.get('configurations').update( self.cache.get_state()) ctx = context.get_admin_context_without_session() agent_status = self.state_rpc.report_state( ctx, self.agent_state, True) if agent_status == constants.AGENT_REVIVED: LOG.info(_LI("Agent has just been revived. " "Scheduling full sync")) self.schedule_resync("Agent has just been revived") except AttributeError: # This means the server does not support report_state LOG.warning(_LW("Neutron server does not support state report. " "State report for this agent will be disabled.")) self.heartbeat.stop() self.run() return except Exception: LOG.exception(_LE("Failed reporting state!")) return if self.agent_state.pop('start_flag', None): self.run() def agent_updated(self, context, payload): """Handle the agent_updated notification event.""" self.schedule_resync(_("Agent updated: %(payload)s") % {"payload": payload}) LOG.info(_LI("agent_updated by server side %s!"), payload) def after_start(self): LOG.info(_LI("DHCP agent started")) neutron-8.4.0/neutron/agent/dhcp/__init__.py0000664000567000056710000000000013044372736022174 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/common/0000775000567000056710000000000013044373210020433 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/common/ip_lib.py0000664000567000056710000000150713044372736022262 0ustar jenkinsjenkins00000000000000# Copyright 2016 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os if os.name == 'nt': from neutron.agent.windows import ip_lib else: from neutron.agent.linux import ip_lib OPTS = ip_lib.OPTS IPWrapper = ip_lib.IPWrapper IPDevice = ip_lib.IPDevice neutron-8.4.0/neutron/agent/common/config.py0000664000567000056710000001371213044372760022267 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg from neutron._i18n import _ from neutron.common import config ROOT_HELPER_OPTS = [ cfg.StrOpt('root_helper', default='sudo', help=_("Root helper application. " "Use 'sudo neutron-rootwrap /etc/neutron/rootwrap.conf' " "to use the real root filter facility. Change to 'sudo' " "to skip the filtering and just run the command " "directly.")), cfg.BoolOpt('use_helper_for_ns_read', default=True, help=_("Use the root helper when listing the namespaces on a " "system. This may not be required depending on the " "security configuration. If the root helper is " "not required, set this to False for a performance " "improvement.")), # We can't just use root_helper=sudo neutron-rootwrap-daemon $cfg because # it isn't appropriate for long-lived processes spawned with create_process # Having a bool use_rootwrap_daemon option precludes specifying the # rootwrap daemon command, which may be necessary for Xen? cfg.StrOpt('root_helper_daemon', help=_('Root helper daemon application to use when possible.')), ] AGENT_STATE_OPTS = [ cfg.FloatOpt('report_interval', default=30, help=_('Seconds between nodes reporting state to server; ' 'should be less than agent_down_time, best if it ' 'is half or less than agent_down_time.')), cfg.BoolOpt('log_agent_heartbeats', default=False, help=_('Log agent heartbeats')), ] INTERFACE_DRIVER_OPTS = [ cfg.StrOpt('interface_driver', help=_("The driver used to manage the virtual interface.")), ] IPTABLES_OPTS = [ cfg.BoolOpt('comment_iptables_rules', default=True, help=_("Add comments to iptables rules. " "Set to false to disallow the addition of comments to " "generated iptables rules that describe each rule's " "purpose. System must support the iptables comments " "module for addition of comments.")), ] PROCESS_MONITOR_OPTS = [ cfg.StrOpt('check_child_processes_action', default='respawn', choices=['respawn', 'exit'], help=_('Action to be executed when a child process dies')), cfg.IntOpt('check_child_processes_interval', default=60, help=_('Interval between checks of child process liveness ' '(seconds), use 0 to disable')), ] AVAILABILITY_ZONE_OPTS = [ # The default AZ name "nova" is selected to match the default # AZ name in Nova and Cinder. cfg.StrOpt('availability_zone', max_length=255, default='nova', help=_("Availability zone of this node")), ] EXT_NET_BRIDGE_OPTS = [ cfg.StrOpt('external_network_bridge', default='br-ex', help=_("Name of bridge used for external network " "traffic. This should be set to an empty value for the " "Linux Bridge. When this parameter is set, each L3 " "agent can be associated with no more than one external " "network.")), ] def get_log_args(conf, log_file_name, **kwargs): cmd_args = [] if conf.debug: cmd_args.append('--debug') if conf.verbose: cmd_args.append('--verbose') if (conf.log_dir or conf.log_file): cmd_args.append('--log-file=%s' % log_file_name) log_dir = None if conf.log_dir and conf.log_file: log_dir = os.path.dirname( os.path.join(conf.log_dir, conf.log_file)) elif conf.log_dir: log_dir = conf.log_dir elif conf.log_file: log_dir = os.path.dirname(conf.log_file) if log_dir: cmd_args.append('--log-dir=%s' % log_dir) if kwargs.get('metadata_proxy_watch_log') is False: cmd_args.append('--nometadata_proxy_watch_log') else: if conf.use_syslog: cmd_args.append('--use-syslog') if conf.syslog_log_facility: cmd_args.append( '--syslog-log-facility=%s' % conf.syslog_log_facility) return cmd_args def register_root_helper(conf): conf.register_opts(ROOT_HELPER_OPTS, 'AGENT') def register_agent_state_opts_helper(conf): conf.register_opts(AGENT_STATE_OPTS, 'AGENT') def register_interface_driver_opts_helper(conf): conf.register_opts(INTERFACE_DRIVER_OPTS) def register_iptables_opts(conf): conf.register_opts(IPTABLES_OPTS, 'AGENT') def register_process_monitor_opts(conf): conf.register_opts(PROCESS_MONITOR_OPTS, 'AGENT') def register_availability_zone_opts_helper(conf): conf.register_opts(AVAILABILITY_ZONE_OPTS, 'AGENT') def get_root_helper(conf): return conf.AGENT.root_helper def setup_conf(): bind_opts = [ cfg.StrOpt('state_path', default='/var/lib/neutron', help=_("Where to store Neutron state files. " "This directory must be writable by the agent.")), ] conf = cfg.ConfigOpts() conf.register_opts(bind_opts) return conf # add a logging setup method here for convenience setup_logging = config.setup_logging neutron-8.4.0/neutron/agent/common/base_polling.py0000664000567000056710000000366213044372736023466 0ustar jenkinsjenkins00000000000000# Copyright 2015 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class BasePollingManager(object): def __init__(self): self._force_polling = False self._polling_completed = True def force_polling(self): self._force_polling = True def polling_completed(self): self._polling_completed = True def _is_polling_required(self): raise NotImplementedError() @property def is_polling_required(self): # Always consume the updates to minimize polling. polling_required = self._is_polling_required() # Polling is required regardless of whether updates have been # detected. if self._force_polling: self._force_polling = False polling_required = True # Polling is required if not yet done for previously detected # updates. if not self._polling_completed: polling_required = True if polling_required: # Track whether polling has been completed to ensure that # polling can be required until the caller indicates via a # call to polling_completed() that polling has been # successfully performed. self._polling_completed = False return polling_required class AlwaysPoll(BasePollingManager): @property def is_polling_required(self): return True neutron-8.4.0/neutron/agent/common/__init__.py0000664000567000056710000000000013044372736022546 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/common/utils.py0000664000567000056710000000314213044372760022156 0ustar jenkinsjenkins00000000000000# Copyright 2015 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg from oslo_log import log as logging from neutron._i18n import _LE from neutron.agent.common import config from neutron.common import utils as neutron_utils if os.name == 'nt': from neutron.agent.windows import utils else: from neutron.agent.linux import utils LOG = logging.getLogger(__name__) config.register_root_helper(cfg.CONF) INTERFACE_NAMESPACE = 'neutron.interface_drivers' execute = utils.execute def load_interface_driver(conf): """Load interface driver for agents like DHCP or L3 agent. :param conf: driver configuration object :raises SystemExit of 1 if driver cannot be loaded """ try: loaded_class = neutron_utils.load_class_by_alias_or_classname( INTERFACE_NAMESPACE, conf.interface_driver) return loaded_class(conf) except ImportError: LOG.error(_LE("Error loading interface driver '%s'"), conf.interface_driver) raise SystemExit(1) neutron-8.4.0/neutron/agent/common/polling.py0000664000567000056710000000155513044372736022473 0ustar jenkinsjenkins00000000000000# Copyright 2015 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os if os.name == 'nt': from neutron.agent.windows import polling else: from neutron.agent.linux import polling get_polling_manager = polling.get_polling_manager InterfacePollingMinimizer = polling.InterfacePollingMinimizer neutron-8.4.0/neutron/agent/common/ovs_lib.py0000664000567000056710000006447213044372760022470 0ustar jenkinsjenkins00000000000000# Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import itertools import operator import time import uuid from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils import retrying import six from neutron._i18n import _, _LE, _LI, _LW from neutron.agent.common import utils from neutron.agent.linux import ip_lib from neutron.agent.ovsdb import api as ovsdb from neutron.common import exceptions from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers.openvswitch.agent.common \ import constants UINT64_BITMASK = (1 << 64) - 1 # Default timeout for ovs-vsctl command DEFAULT_OVS_VSCTL_TIMEOUT = 10 # Special return value for an invalid OVS ofport INVALID_OFPORT = -1 UNASSIGNED_OFPORT = [] # OVS bridge fail modes FAILMODE_SECURE = 'secure' FAILMODE_STANDALONE = 'standalone' OPTS = [ cfg.IntOpt('ovs_vsctl_timeout', default=DEFAULT_OVS_VSCTL_TIMEOUT, help=_('Timeout in seconds for ovs-vsctl commands. ' 'If the timeout expires, ovs commands will fail with ' 'ALARMCLOCK error.')), ] cfg.CONF.register_opts(OPTS) LOG = logging.getLogger(__name__) OVS_DEFAULT_CAPS = { 'datapath_types': [], 'iface_types': [], } def _ofport_result_pending(result): """Return True if ovs-vsctl indicates the result is still pending.""" # ovs-vsctl can return '[]' for an ofport that has not yet been assigned try: int(result) return False except (ValueError, TypeError): return True def _ofport_retry(fn): """Decorator for retrying when OVS has yet to assign an ofport. The instance's vsctl_timeout is used as the max waiting time. This relies on the fact that instance methods receive self as the first argument. """ @six.wraps(fn) def wrapped(*args, **kwargs): self = args[0] new_fn = retrying.retry( retry_on_result=_ofport_result_pending, stop_max_delay=self.vsctl_timeout * 1000, wait_exponential_multiplier=10, wait_exponential_max=1000, retry_on_exception=lambda _: False)(fn) return new_fn(*args, **kwargs) return wrapped class VifPort(object): def __init__(self, port_name, ofport, vif_id, vif_mac, switch): self.port_name = port_name self.ofport = ofport self.vif_id = vif_id self.vif_mac = vif_mac self.switch = switch def __str__(self): return ("iface-id=%s, vif_mac=%s, port_name=%s, ofport=%s, " "bridge_name=%s") % ( self.vif_id, self.vif_mac, self.port_name, self.ofport, self.switch.br_name) class BaseOVS(object): def __init__(self): self.vsctl_timeout = cfg.CONF.ovs_vsctl_timeout self.ovsdb = ovsdb.API.get(self) def add_bridge(self, bridge_name, datapath_type=constants.OVS_DATAPATH_SYSTEM): self.ovsdb.add_br(bridge_name, datapath_type).execute() return OVSBridge(bridge_name) def delete_bridge(self, bridge_name): self.ovsdb.del_br(bridge_name).execute() def bridge_exists(self, bridge_name): return self.ovsdb.br_exists(bridge_name).execute() def port_exists(self, port_name): cmd = self.ovsdb.db_get('Port', port_name, 'name') return bool(cmd.execute(check_error=False, log_errors=False)) def get_bridge_for_iface(self, iface): return self.ovsdb.iface_to_br(iface).execute() def get_bridges(self): return self.ovsdb.list_br().execute(check_error=True) def get_bridge_external_bridge_id(self, bridge): return self.ovsdb.br_get_external_id(bridge, 'bridge-id').execute() def set_db_attribute(self, table_name, record, column, value, check_error=False, log_errors=True): self.ovsdb.db_set(table_name, record, (column, value)).execute( check_error=check_error, log_errors=log_errors) def clear_db_attribute(self, table_name, record, column): self.ovsdb.db_clear(table_name, record, column).execute() def db_get_val(self, table, record, column, check_error=False, log_errors=True): return self.ovsdb.db_get(table, record, column).execute( check_error=check_error, log_errors=log_errors) @property def config(self): """A dict containing the only row from the root Open_vSwitch table This row contains several columns describing the Open vSwitch install and the system on which it is installed. Useful keys include: datapath_types: a list of supported datapath types iface_types: a list of supported interface types ovs_version: the OVS version """ return self.ovsdb.db_list("Open_vSwitch").execute()[0] @property def capabilities(self): _cfg = self.config return {k: _cfg.get(k, OVS_DEFAULT_CAPS[k]) for k in OVS_DEFAULT_CAPS} class OVSBridge(BaseOVS): def __init__(self, br_name, datapath_type=constants.OVS_DATAPATH_SYSTEM): super(OVSBridge, self).__init__() self.br_name = br_name self.datapath_type = datapath_type self._default_cookie = generate_random_cookie() @property def default_cookie(self): return self._default_cookie def set_agent_uuid_stamp(self, val): self._default_cookie = val def set_controller(self, controllers): self.ovsdb.set_controller(self.br_name, controllers).execute(check_error=True) def del_controller(self): self.ovsdb.del_controller(self.br_name).execute(check_error=True) def get_controller(self): return self.ovsdb.get_controller(self.br_name).execute( check_error=True) def _set_bridge_fail_mode(self, mode): self.ovsdb.set_fail_mode(self.br_name, mode).execute(check_error=True) def set_secure_mode(self): self._set_bridge_fail_mode(FAILMODE_SECURE) def set_standalone_mode(self): self._set_bridge_fail_mode(FAILMODE_STANDALONE) def set_protocols(self, protocols): self.set_db_attribute('Bridge', self.br_name, 'protocols', protocols, check_error=True) def create(self, secure_mode=False): with self.ovsdb.transaction() as txn: txn.add( self.ovsdb.add_br(self.br_name, datapath_type=self.datapath_type)) if secure_mode: txn.add(self.ovsdb.set_fail_mode(self.br_name, FAILMODE_SECURE)) def destroy(self): self.delete_bridge(self.br_name) def add_port(self, port_name, *interface_attr_tuples): with self.ovsdb.transaction() as txn: txn.add(self.ovsdb.add_port(self.br_name, port_name)) if interface_attr_tuples: txn.add(self.ovsdb.db_set('Interface', port_name, *interface_attr_tuples)) return self.get_port_ofport(port_name) def replace_port(self, port_name, *interface_attr_tuples): """Replace existing port or create it, and configure port interface.""" # NOTE(xiaohhui): If del_port is inside the transaction, there will # only be one command for replace_port. This will cause the new port # not be found by system, which will lead to Bug #1519926. self.ovsdb.del_port(port_name).execute() with self.ovsdb.transaction() as txn: txn.add(self.ovsdb.add_port(self.br_name, port_name, may_exist=False)) if interface_attr_tuples: txn.add(self.ovsdb.db_set('Interface', port_name, *interface_attr_tuples)) def delete_port(self, port_name): self.ovsdb.del_port(port_name, self.br_name).execute() def run_ofctl(self, cmd, args, process_input=None): full_args = ["ovs-ofctl", cmd, self.br_name] + args # TODO(kevinbenton): This error handling is really brittle and only # detects one specific type of failure. The callers of this need to # be refactored to expect errors so we can re-raise and they can # take appropriate action based on the type of error. for i in range(1, 11): try: return utils.execute(full_args, run_as_root=True, process_input=process_input) except Exception as e: if "failed to connect to socket" in str(e): LOG.debug("Failed to connect to OVS. Retrying " "in 1 second. Attempt: %s/10", i) time.sleep(1) continue LOG.error(_LE("Unable to execute %(cmd)s. Exception: " "%(exception)s"), {'cmd': full_args, 'exception': e}) break def count_flows(self): flow_list = self.run_ofctl("dump-flows", []).split("\n")[1:] return len(flow_list) - 1 def remove_all_flows(self): self.run_ofctl("del-flows", []) @_ofport_retry def _get_port_ofport(self, port_name): return self.db_get_val("Interface", port_name, "ofport") def get_port_ofport(self, port_name): """Get the port's assigned ofport, retrying if not yet assigned.""" ofport = INVALID_OFPORT try: ofport = self._get_port_ofport(port_name) except retrying.RetryError: LOG.exception(_LE("Timed out retrieving ofport on port %s."), port_name) return ofport def get_datapath_id(self): return self.db_get_val('Bridge', self.br_name, 'datapath_id') def do_action_flows(self, action, kwargs_list): if action != 'del': for kw in kwargs_list: if 'cookie' not in kw: kw['cookie'] = self._default_cookie flow_strs = [_build_flow_expr_str(kw, action) for kw in kwargs_list] self.run_ofctl('%s-flows' % action, ['-'], '\n'.join(flow_strs)) def add_flow(self, **kwargs): self.do_action_flows('add', [kwargs]) def mod_flow(self, **kwargs): self.do_action_flows('mod', [kwargs]) def delete_flows(self, **kwargs): self.do_action_flows('del', [kwargs]) def dump_flows_for_table(self, table): return self.dump_flows_for(table=table) def dump_flows_for(self, **kwargs): retval = None if "cookie" in kwargs: kwargs["cookie"] = check_cookie_mask(str(kwargs["cookie"])) flow_str = ",".join("=".join([key, str(val)]) for key, val in kwargs.items()) flows = self.run_ofctl("dump-flows", [flow_str]) if flows: retval = '\n'.join(item for item in flows.splitlines() if 'NXST' not in item) return retval def dump_all_flows(self): return [f for f in self.run_ofctl("dump-flows", []).splitlines() if 'NXST' not in f] def deferred(self, **kwargs): return DeferredOVSBridge(self, **kwargs) def add_tunnel_port(self, port_name, remote_ip, local_ip, tunnel_type=p_const.TYPE_GRE, vxlan_udp_port=p_const.VXLAN_UDP_PORT, dont_fragment=True, tunnel_csum=False): attrs = [('type', tunnel_type)] # TODO(twilson) This is an OrderedDict solely to make a test happy options = collections.OrderedDict() vxlan_uses_custom_udp_port = ( tunnel_type == p_const.TYPE_VXLAN and vxlan_udp_port != p_const.VXLAN_UDP_PORT ) if vxlan_uses_custom_udp_port: options['dst_port'] = vxlan_udp_port options['df_default'] = str(dont_fragment).lower() options['remote_ip'] = remote_ip options['local_ip'] = local_ip options['in_key'] = 'flow' options['out_key'] = 'flow' if tunnel_csum: options['csum'] = str(tunnel_csum).lower() attrs.append(('options', options)) return self.add_port(port_name, *attrs) def add_patch_port(self, local_name, remote_name): attrs = [('type', 'patch'), ('options', {'peer': remote_name})] return self.add_port(local_name, *attrs) def get_iface_name_list(self): # get the interface name list for this bridge return self.ovsdb.list_ifaces(self.br_name).execute(check_error=True) def get_port_name_list(self): # get the port name list for this bridge return self.ovsdb.list_ports(self.br_name).execute(check_error=True) def get_port_stats(self, port_name): return self.db_get_val("Interface", port_name, "statistics") def get_xapi_iface_id(self, xs_vif_uuid): args = ["xe", "vif-param-get", "param-name=other-config", "param-key=nicira-iface-id", "uuid=%s" % xs_vif_uuid] try: return utils.execute(args, run_as_root=True).strip() except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Unable to execute %(cmd)s. " "Exception: %(exception)s"), {'cmd': args, 'exception': e}) def get_ports_attributes(self, table, columns=None, ports=None, check_error=True, log_errors=True, if_exists=False): port_names = ports or self.get_port_name_list() if not port_names: return [] return (self.ovsdb.db_list(table, port_names, columns=columns, if_exists=if_exists). execute(check_error=check_error, log_errors=log_errors)) # returns a VIF object for each VIF port def get_vif_ports(self, ofport_filter=None): edge_ports = [] port_info = self.get_ports_attributes( 'Interface', columns=['name', 'external_ids', 'ofport'], if_exists=True) for port in port_info: name = port['name'] external_ids = port['external_ids'] ofport = port['ofport'] if ofport_filter and ofport in ofport_filter: continue if "iface-id" in external_ids and "attached-mac" in external_ids: p = VifPort(name, ofport, external_ids["iface-id"], external_ids["attached-mac"], self) edge_ports.append(p) elif ("xs-vif-uuid" in external_ids and "attached-mac" in external_ids): # if this is a xenserver and iface-id is not automatically # synced to OVS from XAPI, we grab it from XAPI directly iface_id = self.get_xapi_iface_id(external_ids["xs-vif-uuid"]) p = VifPort(name, ofport, iface_id, external_ids["attached-mac"], self) edge_ports.append(p) return edge_ports def get_vif_port_to_ofport_map(self): results = self.get_ports_attributes( 'Interface', columns=['name', 'external_ids', 'ofport'], if_exists=True) port_map = {} for r in results: # fall back to basic interface name key = self.portid_from_external_ids(r['external_ids']) or r['name'] try: port_map[key] = int(r['ofport']) except TypeError: # port doesn't yet have an ofport entry so we ignore it pass return port_map def get_vif_port_set(self): edge_ports = set() results = self.get_ports_attributes( 'Interface', columns=['name', 'external_ids', 'ofport'], if_exists=True) for result in results: if result['ofport'] == UNASSIGNED_OFPORT: LOG.warning(_LW("Found not yet ready openvswitch port: %s"), result['name']) elif result['ofport'] == INVALID_OFPORT: LOG.warning(_LW("Found failed openvswitch port: %s"), result['name']) elif 'attached-mac' in result['external_ids']: port_id = self.portid_from_external_ids(result['external_ids']) if port_id: edge_ports.add(port_id) return edge_ports def portid_from_external_ids(self, external_ids): if 'iface-id' in external_ids: return external_ids['iface-id'] if 'xs-vif-uuid' in external_ids: iface_id = self.get_xapi_iface_id( external_ids['xs-vif-uuid']) return iface_id def get_port_tag_dict(self): """Get a dict of port names and associated vlan tags. e.g. the returned dict is of the following form:: {u'int-br-eth2': [], u'patch-tun': [], u'qr-76d9e6b6-21': 1, u'tapce5318ff-78': 1, u'tape1400310-e6': 1} The TAG ID is only available in the "Port" table and is not available in the "Interface" table queried by the get_vif_port_set() method. """ results = self.get_ports_attributes( 'Port', columns=['name', 'tag'], if_exists=True) return {p['name']: p['tag'] for p in results} def get_vifs_by_ids(self, port_ids): interface_info = self.get_ports_attributes( "Interface", columns=["name", "external_ids", "ofport"], if_exists=True) by_id = {x['external_ids'].get('iface-id'): x for x in interface_info} result = {} for port_id in port_ids: result[port_id] = None if port_id not in by_id: LOG.info(_LI("Port %(port_id)s not present in bridge " "%(br_name)s"), {'port_id': port_id, 'br_name': self.br_name}) continue pinfo = by_id[port_id] if not self._check_ofport(port_id, pinfo): continue mac = pinfo['external_ids'].get('attached-mac') result[port_id] = VifPort(pinfo['name'], pinfo['ofport'], port_id, mac, self) return result @staticmethod def _check_ofport(port_id, port_info): if port_info['ofport'] in [UNASSIGNED_OFPORT, INVALID_OFPORT]: LOG.warning(_LW("ofport: %(ofport)s for VIF: %(vif)s " "is not a positive integer"), {'ofport': port_info['ofport'], 'vif': port_id}) return False return True def get_vif_port_by_id(self, port_id): ports = self.ovsdb.db_find( 'Interface', ('external_ids', '=', {'iface-id': port_id}), ('external_ids', '!=', {'attached-mac': ''}), columns=['external_ids', 'name', 'ofport']).execute() for port in ports: if self.br_name != self.get_bridge_for_iface(port['name']): continue if not self._check_ofport(port_id, port): continue mac = port['external_ids'].get('attached-mac') return VifPort(port['name'], port['ofport'], port_id, mac, self) LOG.info(_LI("Port %(port_id)s not present in bridge %(br_name)s"), {'port_id': port_id, 'br_name': self.br_name}) def delete_ports(self, all_ports=False): if all_ports: port_names = self.get_port_name_list() else: port_names = (port.port_name for port in self.get_vif_ports()) for port_name in port_names: self.delete_port(port_name) def get_local_port_mac(self): """Retrieve the mac of the bridge's local port.""" address = ip_lib.IPDevice(self.br_name).link.address if address: return address else: msg = _('Unable to determine mac address for %s') % self.br_name raise Exception(msg) def set_controllers_connection_mode(self, connection_mode): """Set bridge controllers connection mode. :param connection_mode: "out-of-band" or "in-band" """ attr = [('connection_mode', connection_mode)] controllers = self.db_get_val('Bridge', self.br_name, 'controller') controllers = [controllers] if isinstance( controllers, uuid.UUID) else controllers with self.ovsdb.transaction(check_error=True) as txn: for controller_uuid in controllers: txn.add(self.ovsdb.db_set('Controller', controller_uuid, *attr)) def _set_egress_bw_limit_for_port(self, port_name, max_kbps, max_burst_kbps): with self.ovsdb.transaction(check_error=True) as txn: txn.add(self.ovsdb.db_set('Interface', port_name, ('ingress_policing_rate', max_kbps))) txn.add(self.ovsdb.db_set('Interface', port_name, ('ingress_policing_burst', max_burst_kbps))) def create_egress_bw_limit_for_port(self, port_name, max_kbps, max_burst_kbps): self._set_egress_bw_limit_for_port( port_name, max_kbps, max_burst_kbps) def get_egress_bw_limit_for_port(self, port_name): max_kbps = self.db_get_val('Interface', port_name, 'ingress_policing_rate') max_burst_kbps = self.db_get_val('Interface', port_name, 'ingress_policing_burst') max_kbps = max_kbps or None max_burst_kbps = max_burst_kbps or None return max_kbps, max_burst_kbps def delete_egress_bw_limit_for_port(self, port_name): self._set_egress_bw_limit_for_port( port_name, 0, 0) def __enter__(self): self.create() return self def __exit__(self, exc_type, exc_value, exc_tb): self.destroy() class DeferredOVSBridge(object): '''Deferred OVSBridge. This class wraps add_flow, mod_flow and delete_flows calls to an OVSBridge and defers their application until apply_flows call in order to perform bulk calls. It wraps also ALLOWED_PASSTHROUGHS calls to avoid mixing OVSBridge and DeferredOVSBridge uses. This class can be used as a context, in such case apply_flows is called on __exit__ except if an exception is raised. This class is not thread-safe, that's why for every use a new instance must be implemented. ''' ALLOWED_PASSTHROUGHS = 'add_port', 'add_tunnel_port', 'delete_port' def __init__(self, br, full_ordered=False, order=('add', 'mod', 'del')): '''Constructor. :param br: wrapped bridge :param full_ordered: Optional, disable flow reordering (slower) :param order: Optional, define in which order flow are applied ''' self.br = br self.full_ordered = full_ordered self.order = order if not self.full_ordered: self.weights = dict((y, x) for x, y in enumerate(self.order)) self.action_flow_tuples = [] def __getattr__(self, name): if name in self.ALLOWED_PASSTHROUGHS: return getattr(self.br, name) raise AttributeError(name) def add_flow(self, **kwargs): self.action_flow_tuples.append(('add', kwargs)) def mod_flow(self, **kwargs): self.action_flow_tuples.append(('mod', kwargs)) def delete_flows(self, **kwargs): self.action_flow_tuples.append(('del', kwargs)) def apply_flows(self): action_flow_tuples = self.action_flow_tuples self.action_flow_tuples = [] if not action_flow_tuples: return if not self.full_ordered: action_flow_tuples.sort(key=lambda af: self.weights[af[0]]) grouped = itertools.groupby(action_flow_tuples, key=operator.itemgetter(0)) itemgetter_1 = operator.itemgetter(1) for action, action_flow_list in grouped: flows = list(map(itemgetter_1, action_flow_list)) self.br.do_action_flows(action, flows) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): if exc_type is None: self.apply_flows() else: LOG.exception(_LE("OVS flows could not be applied on bridge %s"), self.br.br_name) def _build_flow_expr_str(flow_dict, cmd): flow_expr_arr = [] actions = None if cmd == 'add': flow_expr_arr.append("hard_timeout=%s" % flow_dict.pop('hard_timeout', '0')) flow_expr_arr.append("idle_timeout=%s" % flow_dict.pop('idle_timeout', '0')) flow_expr_arr.append("priority=%s" % flow_dict.pop('priority', '1')) elif 'priority' in flow_dict: msg = _("Cannot match priority on flow deletion or modification") raise exceptions.InvalidInput(error_message=msg) if cmd != 'del': if "actions" not in flow_dict: msg = _("Must specify one or more actions on flow addition" " or modification") raise exceptions.InvalidInput(error_message=msg) actions = "actions=%s" % flow_dict.pop('actions') for key, value in six.iteritems(flow_dict): if key == 'proto': flow_expr_arr.append(value) else: flow_expr_arr.append("%s=%s" % (key, str(value))) if actions: flow_expr_arr.append(actions) return ','.join(flow_expr_arr) def generate_random_cookie(): return uuid.uuid4().int & UINT64_BITMASK def check_cookie_mask(cookie): if '/' not in cookie: return cookie + '/-1' else: return cookie neutron-8.4.0/neutron/agent/rpc.py0000664000567000056710000002135713044372760020322 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime import itertools from oslo_log import log as logging import oslo_messaging from oslo_utils import uuidutils from neutron._i18n import _LW from neutron.common import constants from neutron.common import rpc as n_rpc from neutron.common import topics LOG = logging.getLogger(__name__) def create_consumers(endpoints, prefix, topic_details, start_listening=True): """Create agent RPC consumers. :param endpoints: The list of endpoints to process the incoming messages. :param prefix: Common prefix for the plugin/agent message queues. :param topic_details: A list of topics. Each topic has a name, an operation, and an optional host param keying the subscription to topic.host for plugin calls. :param start_listening: if True, it starts the processing loop :returns: A common Connection. """ connection = n_rpc.create_connection() for details in topic_details: topic, operation, node_name = itertools.islice( itertools.chain(details, [None]), 3) topic_name = topics.get_topic_name(prefix, topic, operation) connection.create_consumer(topic_name, endpoints, fanout=True) if node_name: node_topic_name = '%s.%s' % (topic_name, node_name) connection.create_consumer(node_topic_name, endpoints, fanout=False) if start_listening: connection.consume_in_threads() return connection class PluginReportStateAPI(object): """RPC client used to report state back to plugin. This class implements the client side of an rpc interface. The server side can be found in neutron.db.agents_db.AgentExtRpcCallback. For more information on changing rpc interfaces, see doc/source/devref/rpc_api.rst. """ def __init__(self, topic): target = oslo_messaging.Target(topic=topic, version='1.0', namespace=constants.RPC_NAMESPACE_STATE) self.client = n_rpc.get_client(target) def report_state(self, context, agent_state, use_call=False): cctxt = self.client.prepare( timeout=n_rpc.TRANSPORT.conf.rpc_response_timeout) # add unique identifier to a report # that can be logged on server side. # This create visible correspondence between events on # the agent and on the server agent_state['uuid'] = uuidutils.generate_uuid() kwargs = { 'agent_state': {'agent_state': agent_state}, 'time': datetime.utcnow().strftime(constants.ISO8601_TIME_FORMAT), } method = cctxt.call if use_call else cctxt.cast return method(context, 'report_state', **kwargs) class PluginApi(object): '''Agent side of the rpc API. API version history: 1.0 - Initial version. 1.3 - get_device_details rpc signature upgrade to obtain 'host' and return value to include fixed_ips and device_owner for the device port 1.4 - tunnel_sync rpc signature upgrade to obtain 'host' 1.5 - Support update_device_list and get_devices_details_list_and_failed_devices ''' def __init__(self, topic): target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) def get_device_details(self, context, device, agent_id, host=None): cctxt = self.client.prepare() return cctxt.call(context, 'get_device_details', device=device, agent_id=agent_id, host=host) def get_devices_details_list(self, context, devices, agent_id, host=None): try: cctxt = self.client.prepare(version='1.3') res = cctxt.call(context, 'get_devices_details_list', devices=devices, agent_id=agent_id, host=host) except oslo_messaging.UnsupportedVersion: # If the server has not been upgraded yet, a DVR-enabled agent # may not work correctly, however it can function in 'degraded' # mode, in that DVR routers may not be in the system yet, and # it might be not necessary to retrieve info about the host. LOG.warning(_LW('DVR functionality requires a server upgrade.')) res = [ self.get_device_details(context, device, agent_id, host) for device in devices ] return res def get_devices_details_list_and_failed_devices(self, context, devices, agent_id, host=None): """Get devices details and the list of devices that failed. This method returns the devices details. If an error is thrown when retrieving the devices details, the device is put in a list of failed devices. """ try: cctxt = self.client.prepare(version='1.5') res = cctxt.call( context, 'get_devices_details_list_and_failed_devices', devices=devices, agent_id=agent_id, host=host) except oslo_messaging.UnsupportedVersion: #TODO(rossella_s): Remove this failback logic in M res = self._device_list_rpc_call_with_failed_dev( self.get_device_details, context, agent_id, host, devices) return res def update_device_down(self, context, device, agent_id, host=None): cctxt = self.client.prepare() return cctxt.call(context, 'update_device_down', device=device, agent_id=agent_id, host=host) def update_device_up(self, context, device, agent_id, host=None): cctxt = self.client.prepare() return cctxt.call(context, 'update_device_up', device=device, agent_id=agent_id, host=host) def _device_list_rpc_call_with_failed_dev(self, rpc_call, context, agent_id, host, devices): succeeded_devices = [] failed_devices = [] for device in devices: try: rpc_device = rpc_call(context, device, agent_id, host) except Exception: failed_devices.append(device) else: # update_device_up doesn't return the device succeeded_dev = rpc_device or device succeeded_devices.append(succeeded_dev) return {'devices': succeeded_devices, 'failed_devices': failed_devices} def update_device_list(self, context, devices_up, devices_down, agent_id, host): try: cctxt = self.client.prepare(version='1.5') res = cctxt.call(context, 'update_device_list', devices_up=devices_up, devices_down=devices_down, agent_id=agent_id, host=host) except oslo_messaging.UnsupportedVersion: #TODO(rossella_s): Remove this failback logic in M dev_up = self._device_list_rpc_call_with_failed_dev( self.update_device_up, context, agent_id, host, devices_up) dev_down = self._device_list_rpc_call_with_failed_dev( self.update_device_down, context, agent_id, host, devices_down) res = {'devices_up': dev_up.get('devices'), 'failed_devices_up': dev_up.get('failed_devices'), 'devices_down': dev_down.get('devices'), 'failed_devices_down': dev_down.get('failed_devices')} return res def tunnel_sync(self, context, tunnel_ip, tunnel_type=None, host=None): try: cctxt = self.client.prepare(version='1.4') res = cctxt.call(context, 'tunnel_sync', tunnel_ip=tunnel_ip, tunnel_type=tunnel_type, host=host) except oslo_messaging.UnsupportedVersion: LOG.warning(_LW('Tunnel synchronization requires a ' 'server upgrade.')) cctxt = self.client.prepare() res = cctxt.call(context, 'tunnel_sync', tunnel_ip=tunnel_ip, tunnel_type=tunnel_type) return res neutron-8.4.0/neutron/agent/metadata_agent.py0000664000567000056710000000300513044372760022462 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_log import log as logging from neutron.agent.common import config as agent_conf from neutron.agent.metadata import agent from neutron.agent.metadata import config as metadata_conf from neutron.common import config from neutron.common import utils from neutron.openstack.common.cache import cache LOG = logging.getLogger(__name__) def main(): cfg.CONF.register_opts(metadata_conf.SHARED_OPTS) cfg.CONF.register_opts(metadata_conf.UNIX_DOMAIN_METADATA_PROXY_OPTS) cfg.CONF.register_opts(metadata_conf.METADATA_PROXY_HANDLER_OPTS) cache.register_oslo_configs(cfg.CONF) cfg.CONF.set_default(name='cache_url', default='memory://?default_ttl=5') agent_conf.register_agent_state_opts_helper(cfg.CONF) config.init(sys.argv[1:]) config.setup_logging() utils.log_opt_values(LOG) proxy = agent.UnixDomainMetadataProxy(cfg.CONF) proxy.run() neutron-8.4.0/neutron/agent/firewall.py0000664000567000056710000001302013044372760021327 0ustar jenkinsjenkins00000000000000# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import contextlib import six from neutron.common import utils from neutron.extensions import portsecurity as psec INGRESS_DIRECTION = 'ingress' EGRESS_DIRECTION = 'egress' DIRECTION_IP_PREFIX = {INGRESS_DIRECTION: 'source_ip_prefix', EGRESS_DIRECTION: 'dest_ip_prefix'} def port_sec_enabled(port): return port.get(psec.PORTSECURITY, True) def load_firewall_driver_class(driver): return utils.load_class_by_alias_or_classname( 'neutron.agent.firewall_drivers', driver) @six.add_metaclass(abc.ABCMeta) class FirewallDriver(object): """Firewall Driver base class. Defines methods that any driver providing security groups and provider firewall functionality should implement. Note port attribute should have information of security group ids and security group rules. the dict of port should have device : interface name fixed_ips: ips of the device mac_address: mac_address of the device security_groups: [sgid, sgid] security_group_rules : [ rule, rule ] the rule must contain ethertype and direction the rule may contain security_group_id, protocol, port_min, port_max source_ip_prefix, source_port_min, source_port_max, dest_ip_prefix, and remote_group_id Note: source_group_ip in REST API should be converted by this rule if direction is ingress: remote_group_ip will be a source_ip_prefix if direction is egress: remote_group_ip will be a dest_ip_prefix Note: remote_group_id in REST API should be converted by this rule if direction is ingress: remote_group_id will be a list of source_ip_prefix if direction is egress: remote_group_id will be a list of dest_ip_prefix remote_group_id will also remaining membership update management """ # OVS agent installs arp spoofing openflow rules. If firewall is capable # of handling that, ovs agent doesn't need to install the protection. provides_arp_spoofing_protection = False @abc.abstractmethod def prepare_port_filter(self, port): """Prepare filters for the port. This method should be called before the port is created. """ def apply_port_filter(self, port): """Apply port filter. Once this method returns, the port should be firewalled appropriately. This method should as far as possible be a no-op. It's vastly preferred to get everything set up in prepare_port_filter. """ raise NotImplementedError() @abc.abstractmethod def update_port_filter(self, port): """Refresh security group rules from data store Gets called when a port gets added to or removed from the security group the port is a member of or if the group gains or looses a rule. """ def remove_port_filter(self, port): """Stop filtering port.""" raise NotImplementedError() def filter_defer_apply_on(self): """Defer application of filtering rule.""" pass def filter_defer_apply_off(self): """Turn off deferral of rules and apply the rules now.""" pass @property def ports(self): """Returns filtered ports.""" pass @contextlib.contextmanager def defer_apply(self): """Defer apply context.""" self.filter_defer_apply_on() try: yield finally: self.filter_defer_apply_off() def update_security_group_members(self, sg_id, ips): """Update group members in a security group.""" raise NotImplementedError() def update_security_group_rules(self, sg_id, rules): """Update rules in a security group.""" raise NotImplementedError() def security_group_updated(self, action_type, sec_group_ids, device_id=None): """Called when a security group is updated. Note: This method needs to be implemented by the firewall drivers which use enhanced RPC for security_groups. """ raise NotImplementedError() class NoopFirewallDriver(FirewallDriver): """Noop Firewall Driver. Firewall driver which does nothing. This driver is for disabling the firewall functionality. """ def prepare_port_filter(self, port): pass def apply_port_filter(self, port): pass def update_port_filter(self, port): pass def remove_port_filter(self, port): pass def filter_defer_apply_on(self): pass def filter_defer_apply_off(self): pass @property def ports(self): return {} def update_security_group_members(self, sg_id, ips): pass def update_security_group_rules(self, sg_id, rules): pass def security_group_updated(self, action_type, sec_group_ids, device_id=None): pass neutron-8.4.0/neutron/agent/l3_agent.py0000664000567000056710000000407013044372760021223 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_service import service from neutron.agent.common import config from neutron.agent.l3 import config as l3_config from neutron.agent.l3 import ha from neutron.agent.linux import external_process from neutron.agent.linux import interface from neutron.agent.linux import pd from neutron.agent.linux import ra from neutron.agent.metadata import config as metadata_config from neutron.common import config as common_config from neutron.common import topics from neutron import service as neutron_service def register_opts(conf): conf.register_opts(l3_config.OPTS) conf.register_opts(metadata_config.DRIVER_OPTS) conf.register_opts(metadata_config.SHARED_OPTS) conf.register_opts(ha.OPTS) config.register_interface_driver_opts_helper(conf) config.register_agent_state_opts_helper(conf) conf.register_opts(interface.OPTS) conf.register_opts(external_process.OPTS) conf.register_opts(pd.OPTS) conf.register_opts(ra.OPTS) config.register_availability_zone_opts_helper(conf) def main(manager='neutron.agent.l3.agent.L3NATAgentWithStateReport'): register_opts(cfg.CONF) common_config.init(sys.argv[1:]) config.setup_logging() server = neutron_service.Service.create( binary='neutron-l3-agent', topic=topics.L3_AGENT, report_interval=cfg.CONF.AGENT.report_interval, manager=manager) service.launch(cfg.CONF, server).wait() neutron-8.4.0/neutron/agent/linux/0000775000567000056710000000000013044373210020302 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/linux/tc_lib.py0000664000567000056710000002147613044372760022133 0ustar jenkinsjenkins00000000000000# Copyright 2016 OVH SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from neutron._i18n import _ from neutron.agent.linux import ip_lib from neutron.common import exceptions from neutron.services.qos import qos_consts INGRESS_QDISC_ID = "ffff:" MAX_MTU_VALUE = 65535 SI_BASE = 1000 IEC_BASE = 1024 LATENCY_UNIT = "ms" BW_LIMIT_UNIT = "kbit" # kilobits per second in tc's notation BURST_UNIT = "kbit" # kilobits in tc's notation # Those are RATES (bits per second) and SIZE (bytes) unit names from tc manual UNITS = { "k": 1, "m": 2, "g": 3, "t": 4 } filters_pattern = re.compile(r"police \w+ rate (\w+) burst (\w+)") tbf_pattern = re.compile( r"qdisc (\w+) \w+: \w+ refcnt \d rate (\w+) burst (\w+) \w*") class InvalidKernelHzValue(exceptions.NeutronException): message = _("Kernel HZ value %(value)s is not valid. This value must be " "greater than 0.") class InvalidUnit(exceptions.NeutronException): message = _("Unit name '%(unit)s' is not valid.") def convert_to_kilobits(value, base): value = value.lower() if "bit" in value: input_in_bits = True value = value.replace("bit", "") else: input_in_bits = False value = value.replace("b", "") # if it is now bare number then it is in bits, so we return it simply if value.isdigit(): value = int(value) if input_in_bits: return bits_to_kilobits(value, base) else: bits_value = bytes_to_bits(value) return bits_to_kilobits(bits_value, base) unit = value[-1:] if unit not in UNITS.keys(): raise InvalidUnit(unit=unit) val = int(value[:-1]) if input_in_bits: bits_value = val * (base ** UNITS[unit]) else: bits_value = bytes_to_bits(val * (base ** UNITS[unit])) return bits_to_kilobits(bits_value, base) def bytes_to_bits(value): return value * 8 def bits_to_kilobits(value, base): #NOTE(slaweq): round up that even 1 bit will give 1 kbit as a result return int((value + (base - 1)) / base) class TcCommand(ip_lib.IPDevice): def __init__(self, name, kernel_hz, namespace=None): if kernel_hz <= 0: raise InvalidKernelHzValue(value=kernel_hz) super(TcCommand, self).__init__(name, namespace=namespace) self.kernel_hz = kernel_hz def _execute_tc_cmd(self, cmd, **kwargs): cmd = ['tc'] + cmd ip_wrapper = ip_lib.IPWrapper(self.namespace) return ip_wrapper.netns.execute(cmd, run_as_root=True, **kwargs) @staticmethod def get_ingress_qdisc_burst_value(bw_limit, burst_limit): """Return burst value used in ingress qdisc. If burst value is not specified given than it will be set to default rate to ensure that limit for TCP traffic will work well """ if not burst_limit: return float(bw_limit) * qos_consts.DEFAULT_BURST_RATE return burst_limit def get_filters_bw_limits(self, qdisc_id=INGRESS_QDISC_ID): cmd = ['filter', 'show', 'dev', self.name, 'parent', qdisc_id] cmd_result = self._execute_tc_cmd(cmd) if not cmd_result: return None, None for line in cmd_result.split("\n"): m = filters_pattern.match(line.strip()) if m: #NOTE(slaweq): because tc is giving bw limit in SI units # we need to calculate it as 1000bit = 1kbit: bw_limit = convert_to_kilobits(m.group(1), SI_BASE) #NOTE(slaweq): because tc is giving burst limit in IEC units # we need to calculate it as 1024bit = 1kbit: burst_limit = convert_to_kilobits(m.group(2), IEC_BASE) return bw_limit, burst_limit return None, None def get_tbf_bw_limits(self): cmd = ['qdisc', 'show', 'dev', self.name] cmd_result = self._execute_tc_cmd(cmd) if not cmd_result: return None, None m = tbf_pattern.match(cmd_result) if not m: return None, None qdisc_name = m.group(1) if qdisc_name != "tbf": return None, None #NOTE(slaweq): because tc is giving bw limit in SI units # we need to calculate it as 1000bit = 1kbit: bw_limit = convert_to_kilobits(m.group(2), SI_BASE) #NOTE(slaweq): because tc is giving burst limit in IEC units # we need to calculate it as 1024bit = 1kbit: burst_limit = convert_to_kilobits(m.group(3), IEC_BASE) return bw_limit, burst_limit def set_filters_bw_limit(self, bw_limit, burst_limit): """Set ingress qdisc and filter for police ingress traffic on device This will allow to police traffic incoming to interface. It means that it is fine to limit egress traffic from instance point of view. """ #because replace of tc filters is not working properly and it's adding # new filters each time instead of replacing existing one first old # ingress qdisc should be deleted and then added new one so update will # be called to do that: return self.update_filters_bw_limit(bw_limit, burst_limit) def set_tbf_bw_limit(self, bw_limit, burst_limit, latency_value): """Set token bucket filter qdisc on device This will allow to limit speed of packets going out from interface. It means that it is fine to limit ingress traffic from instance point of view. """ return self._replace_tbf_qdisc(bw_limit, burst_limit, latency_value) def update_filters_bw_limit(self, bw_limit, burst_limit, qdisc_id=INGRESS_QDISC_ID): self.delete_filters_bw_limit() return self._set_filters_bw_limit(bw_limit, burst_limit, qdisc_id) def update_tbf_bw_limit(self, bw_limit, burst_limit, latency_value): return self._replace_tbf_qdisc(bw_limit, burst_limit, latency_value) def delete_filters_bw_limit(self): #NOTE(slaweq): For limit traffic egress from instance we need to use # qdisc "ingress" because it is ingress traffic from interface POV: self._delete_qdisc("ingress") def delete_tbf_bw_limit(self): self._delete_qdisc("root") def _set_filters_bw_limit(self, bw_limit, burst_limit, qdisc_id=INGRESS_QDISC_ID): cmd = ['qdisc', 'add', 'dev', self.name, 'ingress', 'handle', qdisc_id] self._execute_tc_cmd(cmd) return self._add_policy_filter(bw_limit, burst_limit) def _delete_qdisc(self, qdisc_name): cmd = ['qdisc', 'del', 'dev', self.name, qdisc_name] # Return_code=2 is fine because it means # "RTNETLINK answers: No such file or directory" what is fine when we # are trying to delete qdisc return self._execute_tc_cmd(cmd, extra_ok_codes=[2]) def _get_tbf_burst_value(self, bw_limit, burst_limit): min_burst_value = float(bw_limit) / float(self.kernel_hz) return max(min_burst_value, burst_limit) def _replace_tbf_qdisc(self, bw_limit, burst_limit, latency_value): burst = "%s%s" % ( self._get_tbf_burst_value(bw_limit, burst_limit), BURST_UNIT) latency = "%s%s" % (latency_value, LATENCY_UNIT) rate_limit = "%s%s" % (bw_limit, BW_LIMIT_UNIT) cmd = [ 'qdisc', 'replace', 'dev', self.name, 'root', 'tbf', 'rate', rate_limit, 'latency', latency, 'burst', burst ] return self._execute_tc_cmd(cmd) def _add_policy_filter(self, bw_limit, burst_limit, qdisc_id=INGRESS_QDISC_ID): rate_limit = "%s%s" % (bw_limit, BW_LIMIT_UNIT) burst = "%s%s" % ( self.get_ingress_qdisc_burst_value(bw_limit, burst_limit), BURST_UNIT ) #NOTE(slaweq): it is made in exactly same way how openvswitch is doing # it when configuing ingress traffic limit on port. It can be found in # lib/netdev-linux.c#L4698 in openvswitch sources: cmd = [ 'filter', 'add', 'dev', self.name, 'parent', qdisc_id, 'protocol', 'all', 'prio', '49', 'basic', 'police', 'rate', rate_limit, 'burst', burst, 'mtu', MAX_MTU_VALUE, 'drop'] return self._execute_tc_cmd(cmd) neutron-8.4.0/neutron/agent/linux/ip_lib.py0000664000567000056710000012336513044372760022135 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import debtcollector import eventlet import netaddr from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils import six from neutron._i18n import _, _LE, _LW from neutron.agent.common import utils from neutron.common import constants from neutron.common import exceptions LOG = logging.getLogger(__name__) OPTS = [ cfg.BoolOpt('ip_lib_force_root', default=False, help=_('Force ip_lib calls to use the root helper')), ] IP_NONLOCAL_BIND = 'net.ipv4.ip_nonlocal_bind' LOOPBACK_DEVNAME = 'lo' GRE_TUNNEL_DEVICE_NAMES = ['gre0', 'gretap0'] SYS_NET_PATH = '/sys/class/net' DEFAULT_GW_PATTERN = re.compile(r"via (\S+)") METRIC_PATTERN = re.compile(r"metric (\S+)") DEVICE_NAME_PATTERN = re.compile(r"(\d+?): (\S+?):.*") def remove_interface_suffix(interface): """Remove a possible "@" suffix from an interface' name. This suffix can appear in some kernel versions, and intends on specifying, for example, a veth's pair. However, this interface name is useless to us as further 'ip' commands require that the suffix be removed. """ # If '@' is not present, this will do nothing. return interface.partition("@")[0] class AddressNotReady(exceptions.NeutronException): message = _("Failure waiting for address %(address)s to " "become ready: %(reason)s") class SubProcessBase(object): def __init__(self, namespace=None, log_fail_as_error=True): self.namespace = namespace self.log_fail_as_error = log_fail_as_error try: self.force_root = cfg.CONF.ip_lib_force_root except cfg.NoSuchOptError: # Only callers that need to force use of the root helper # need to register the option. self.force_root = False def _run(self, options, command, args): if self.namespace: return self._as_root(options, command, args) elif self.force_root: # Force use of the root helper to ensure that commands # will execute in dom0 when running under XenServer/XCP. return self._execute(options, command, args, run_as_root=True, log_fail_as_error=self.log_fail_as_error) else: return self._execute(options, command, args, log_fail_as_error=self.log_fail_as_error) def _as_root(self, options, command, args, use_root_namespace=False): namespace = self.namespace if not use_root_namespace else None return self._execute(options, command, args, run_as_root=True, namespace=namespace, log_fail_as_error=self.log_fail_as_error) @classmethod def _execute(cls, options, command, args, run_as_root=False, namespace=None, log_fail_as_error=True): opt_list = ['-%s' % o for o in options] ip_cmd = add_namespace_to_cmd(['ip'], namespace) cmd = ip_cmd + opt_list + [command] + list(args) return utils.execute(cmd, run_as_root=run_as_root, log_fail_as_error=log_fail_as_error) def set_log_fail_as_error(self, fail_with_error): self.log_fail_as_error = fail_with_error def get_log_fail_as_error(self): return self.log_fail_as_error class IPWrapper(SubProcessBase): def __init__(self, namespace=None): super(IPWrapper, self).__init__(namespace=namespace) self.netns = IpNetnsCommand(self) def device(self, name): return IPDevice(name, namespace=self.namespace) def get_devices(self, exclude_loopback=False, exclude_gre_devices=False): retval = [] if self.namespace: # we call out manually because in order to avoid screen scraping # iproute2 we use find to see what is in the sysfs directory, as # suggested by Stephen Hemminger (iproute2 dev). try: cmd = ['ip', 'netns', 'exec', self.namespace, 'find', SYS_NET_PATH, '-maxdepth', '1', '-type', 'l', '-printf', '%f '] output = utils.execute( cmd, run_as_root=True, log_fail_as_error=self.log_fail_as_error).split() except RuntimeError: # We could be racing with a cron job deleting namespaces. # Just return a empty list if the namespace is deleted. with excutils.save_and_reraise_exception() as ctx: if not self.netns.exists(self.namespace): ctx.reraise = False return [] else: output = ( i for i in os.listdir(SYS_NET_PATH) if os.path.islink(os.path.join(SYS_NET_PATH, i)) ) for name in output: if (exclude_loopback and name == LOOPBACK_DEVNAME or exclude_gre_devices and name in GRE_TUNNEL_DEVICE_NAMES): continue retval.append(IPDevice(name, namespace=self.namespace)) return retval def get_device_by_ip(self, ip): """Get the IPDevice from system which has ip configured. @param ip: look for the device holding this ip. If this is None, None is returned. @type ip: str. """ if not ip: return None addr = IpAddrCommand(self) devices = addr.get_devices_with_ip(to=ip) if devices: return IPDevice(devices[0]['name'], namespace=self.namespace) def add_tuntap(self, name, mode='tap'): self._as_root([], 'tuntap', ('add', name, 'mode', mode)) return IPDevice(name, namespace=self.namespace) def add_veth(self, name1, name2, namespace2=None): args = ['add', name1, 'type', 'veth', 'peer', 'name', name2] if namespace2 is None: namespace2 = self.namespace else: self.ensure_namespace(namespace2) args += ['netns', namespace2] self._as_root([], 'link', tuple(args)) return (IPDevice(name1, namespace=self.namespace), IPDevice(name2, namespace=namespace2)) def add_macvtap(self, name, src_dev, mode='bridge'): args = ['add', 'link', src_dev, 'name', name, 'type', 'macvtap', 'mode', mode] self._as_root([], 'link', tuple(args)) return IPDevice(name, namespace=self.namespace) def del_veth(self, name): """Delete a virtual interface between two namespaces.""" self._as_root([], 'link', ('del', name)) def add_dummy(self, name): """Create a Linux dummy interface with the given name.""" self._as_root([], 'link', ('add', name, 'type', 'dummy')) return IPDevice(name, namespace=self.namespace) def ensure_namespace(self, name): if not self.netns.exists(name): ip = self.netns.add(name) lo = ip.device(LOOPBACK_DEVNAME) lo.link.set_up() else: ip = IPWrapper(namespace=name) return ip def namespace_is_empty(self): return not self.get_devices(exclude_loopback=True, exclude_gre_devices=True) def garbage_collect_namespace(self): """Conditionally destroy the namespace if it is empty.""" if self.namespace and self.netns.exists(self.namespace): if self.namespace_is_empty(): self.netns.delete(self.namespace) return True return False def add_device_to_namespace(self, device): if self.namespace: device.link.set_netns(self.namespace) def add_vlan(self, name, physical_interface, vlan_id): cmd = ['add', 'link', physical_interface, 'name', name, 'type', 'vlan', 'id', vlan_id] self._as_root([], 'link', cmd) return IPDevice(name, namespace=self.namespace) def add_vxlan(self, name, vni, group=None, dev=None, ttl=None, tos=None, local=None, port=None, proxy=False): cmd = ['add', name, 'type', 'vxlan', 'id', vni] if group: cmd.extend(['group', group]) if dev: cmd.extend(['dev', dev]) if ttl: cmd.extend(['ttl', ttl]) if tos: cmd.extend(['tos', tos]) if local: cmd.extend(['local', local]) if proxy: cmd.append('proxy') # tuple: min,max if port and len(port) == 2: cmd.extend(['port', port[0], port[1]]) elif port: raise exceptions.NetworkVxlanPortRangeError(vxlan_range=port) self._as_root([], 'link', cmd) return (IPDevice(name, namespace=self.namespace)) @classmethod def get_namespaces(cls): output = cls._execute([], 'netns', ('list',)) return [l.split()[0] for l in output.splitlines()] class IPDevice(SubProcessBase): def __init__(self, name, namespace=None): super(IPDevice, self).__init__(namespace=namespace) self._name = name self.link = IpLinkCommand(self) self.addr = IpAddrCommand(self) self.route = IpRouteCommand(self) self.neigh = IpNeighCommand(self) def __eq__(self, other): return (other is not None and self.name == other.name and self.namespace == other.namespace) def __str__(self): return self.name def exists(self): """Return True if the device exists in the namespace.""" # we must save and restore this before returning orig_log_fail_as_error = self.get_log_fail_as_error() self.set_log_fail_as_error(False) try: return bool(self.link.address) except RuntimeError: return False finally: self.set_log_fail_as_error(orig_log_fail_as_error) def delete_addr_and_conntrack_state(self, cidr): """Delete an address along with its conntrack state This terminates any active connections through an IP. :param cidr: the IP address for which state should be removed. This can be passed as a string with or without /NN. A netaddr.IPAddress or netaddr.Network representing the IP address can also be passed. """ self.addr.delete(cidr) ip_str = str(netaddr.IPNetwork(cidr).ip) ip_wrapper = IPWrapper(namespace=self.namespace) # Delete conntrack state for ingress traffic # If 0 flow entries have been deleted # conntrack -D will return 1 try: ip_wrapper.netns.execute(["conntrack", "-D", "-d", ip_str], check_exit_code=True, extra_ok_codes=[1]) except RuntimeError: LOG.exception(_LE("Failed deleting ingress connection state of" " floatingip %s"), ip_str) # Delete conntrack state for egress traffic try: ip_wrapper.netns.execute(["conntrack", "-D", "-q", ip_str], check_exit_code=True, extra_ok_codes=[1]) except RuntimeError: LOG.exception(_LE("Failed deleting egress connection state of" " floatingip %s"), ip_str) def _sysctl(self, cmd): """execute() doesn't return the exit status of the command it runs, it returns stdout and stderr. Setting check_exit_code=True will cause it to raise a RuntimeError if the exit status of the command is non-zero, which in sysctl's case is an error. So we're normalizing that into zero (success) and one (failure) here to mimic what "echo $?" in a shell would be. This is all because sysctl is too verbose and prints the value you just set on success, unlike most other utilities that print nothing. execute() will have dumped a message to the logs with the actual output on failure, so it's not lost, and we don't need to print it here. """ cmd = ['sysctl', '-w'] + cmd ip_wrapper = IPWrapper(self.namespace) try: ip_wrapper.netns.execute(cmd, run_as_root=True, check_exit_code=True) except RuntimeError: LOG.exception(_LE("Failed running %s"), cmd) return 1 return 0 def disable_ipv6(self): sysctl_name = re.sub(r'\.', '/', self.name) cmd = 'net.ipv6.conf.%s.disable_ipv6=1' % sysctl_name return self._sysctl([cmd]) @property def name(self): if self._name: return self._name[:constants.DEVICE_NAME_MAX_LEN] return self._name @name.setter def name(self, name): self._name = name class IpCommandBase(object): COMMAND = '' def __init__(self, parent): self._parent = parent def _run(self, options, args): return self._parent._run(options, self.COMMAND, args) def _as_root(self, options, args, use_root_namespace=False): return self._parent._as_root(options, self.COMMAND, args, use_root_namespace=use_root_namespace) class IPRule(SubProcessBase): def __init__(self, namespace=None): super(IPRule, self).__init__(namespace=namespace) self.rule = IpRuleCommand(self) class IpRuleCommand(IpCommandBase): COMMAND = 'rule' @staticmethod def _make_canonical(ip_version, settings): """Converts settings to a canonical representation to compare easily""" def canonicalize_fwmark_string(fwmark_mask): """Reformats fwmark/mask in to a canonical form Examples, these are all equivalent: "0x1" 0x1 "0x1/0xfffffffff" (0x1, 0xfffffffff) :param fwmark_mask: The firewall and mask (default 0xffffffff) :type fwmark_mask: A string with / as delimiter, an iterable, or a single value. """ # Turn the value we were passed in to an iterable: fwmark[, mask] if isinstance(fwmark_mask, six.string_types): # A / separates the optional mask in a string iterable = fwmark_mask.split('/') else: try: iterable = iter(fwmark_mask) except TypeError: # At this point, it must be a single integer iterable = [fwmark_mask] def to_i(s): if isinstance(s, six.string_types): # Passing 0 as "base" arg to "int" causes it to determine # the base automatically. return int(s, 0) # s isn't a string, can't specify base argument return int(s) integers = [to_i(x) for x in iterable] # The default mask is all ones, the mask is 32 bits. if len(integers) == 1: integers.append(0xffffffff) # We now have two integers in a list. Convert to canonical string. return '{0:#x}/{1:#x}'.format(*integers) def canonicalize(item): k, v = item # ip rule shows these as 'any' if k == 'from' and v == 'all': return k, constants.IP_ANY[ip_version] # lookup and table are interchangeable. Use table every time. if k == 'lookup': return 'table', v if k == 'fwmark': return k, canonicalize_fwmark_string(v) return k, v if 'type' not in settings: settings['type'] = 'unicast' return {k: str(v) for k, v in map(canonicalize, settings.items())} def _parse_line(self, ip_version, line): # Typical rules from 'ip rule show': # 4030201: from 1.2.3.4/24 lookup 10203040 # 1024: from all iif qg-c43b1928-48 lookup noscope parts = line.split() if not parts: return {} # Format of line is: "priority: ... []" settings = {k: v for k, v in zip(parts[1::2], parts[2::2])} settings['priority'] = parts[0][:-1] if len(parts) % 2 == 0: # When line has an even number of columns, last one is the type. settings['type'] = parts[-1] return self._make_canonical(ip_version, settings) def list_rules(self, ip_version): lines = self._as_root([ip_version], ['show']).splitlines() return [self._parse_line(ip_version, line) for line in lines] def _exists(self, ip_version, **kwargs): return kwargs in self.list_rules(ip_version) def _make__flat_args_tuple(self, *args, **kwargs): for kwargs_item in sorted(kwargs.items(), key=lambda i: i[0]): args += kwargs_item return tuple(args) def add(self, ip, **kwargs): ip_version = get_ip_version(ip) kwargs.update({'from': ip}) canonical_kwargs = self._make_canonical(ip_version, kwargs) if not self._exists(ip_version, **canonical_kwargs): args_tuple = self._make__flat_args_tuple('add', **canonical_kwargs) self._as_root([ip_version], args_tuple) def delete(self, ip, **kwargs): ip_version = get_ip_version(ip) # TODO(Carl) ip ignored in delete, okay in general? canonical_kwargs = self._make_canonical(ip_version, kwargs) args_tuple = self._make__flat_args_tuple('del', **canonical_kwargs) self._as_root([ip_version], args_tuple) class IpDeviceCommandBase(IpCommandBase): @property def name(self): return self._parent.name class IpLinkCommand(IpDeviceCommandBase): COMMAND = 'link' def set_address(self, mac_address): self._as_root([], ('set', self.name, 'address', mac_address)) def set_allmulticast_on(self): self._as_root([], ('set', self.name, 'allmulticast', 'on')) def set_mtu(self, mtu_size): self._as_root([], ('set', self.name, 'mtu', mtu_size)) def set_up(self): return self._as_root([], ('set', self.name, 'up')) def set_down(self): return self._as_root([], ('set', self.name, 'down')) def set_netns(self, namespace): self._as_root([], ('set', self.name, 'netns', namespace)) self._parent.namespace = namespace def set_name(self, name): self._as_root([], ('set', self.name, 'name', name)) self._parent.name = name def set_alias(self, alias_name): self._as_root([], ('set', self.name, 'alias', alias_name)) def delete(self): self._as_root([], ('delete', self.name)) @property def address(self): return self.attributes.get('link/ether') @property def state(self): return self.attributes.get('state') @property def mtu(self): return self.attributes.get('mtu') @property def qdisc(self): return self.attributes.get('qdisc') @property def qlen(self): return self.attributes.get('qlen') @property def alias(self): return self.attributes.get('alias') @property def attributes(self): return self._parse_line(self._run(['o'], ('show', self.name))) def _parse_line(self, value): if not value: return {} device_name, settings = value.replace("\\", '').split('>', 1) tokens = settings.split() keys = tokens[::2] values = [int(v) if v.isdigit() else v for v in tokens[1::2]] retval = dict(zip(keys, values)) return retval class IpAddrCommand(IpDeviceCommandBase): COMMAND = 'addr' def add(self, cidr, scope='global', add_broadcast=True): net = netaddr.IPNetwork(cidr) args = ['add', cidr, 'scope', scope, 'dev', self.name] if add_broadcast and net.version == 4: args += ['brd', str(net[-1])] self._as_root([net.version], tuple(args)) def delete(self, cidr): ip_version = get_ip_version(cidr) self._as_root([ip_version], ('del', cidr, 'dev', self.name)) def flush(self, ip_version): self._as_root([ip_version], ('flush', self.name)) def get_devices_with_ip(self, name=None, scope=None, to=None, filters=None, ip_version=None): """Get a list of all the devices with an IP attached in the namespace. @param name: if it's not None, only a device with that matching name will be returned. """ options = [ip_version] if ip_version else [] args = ['show'] if name: args += [name] if filters: args += filters if scope: args += ['scope', scope] if to: args += ['to', to] retval = [] for line in self._run(options, tuple(args)).split('\n'): line = line.strip() match = DEVICE_NAME_PATTERN.search(line) if match: # Found a match for a device name, but its' addresses will # only appear in following lines, so we may as well continue. device_name = remove_interface_suffix(match.group(2)) continue elif not line.startswith('inet'): continue parts = line.split(" ") if parts[0] == 'inet6': scope = parts[3] else: if parts[2] == 'brd': scope = parts[5] else: scope = parts[3] retval.append(dict(name=device_name, cidr=parts[1], scope=scope, dynamic=('dynamic' == parts[-1]), tentative=('tentative' in line), dadfailed=('dadfailed' == parts[-1]))) return retval def list(self, scope=None, to=None, filters=None, ip_version=None): """Get device details of a device named .""" return self.get_devices_with_ip( self.name, scope, to, filters, ip_version) def wait_until_address_ready(self, address, wait_time=30): """Wait until an address is no longer marked 'tentative' raises AddressNotReady if times out or address not present on interface """ def is_address_ready(): try: addr_info = self.list(to=address)[0] except IndexError: raise AddressNotReady( address=address, reason=_('Address not present on interface')) if not addr_info['tentative']: return True if addr_info['dadfailed']: raise AddressNotReady( address=address, reason=_('Duplicate address detected')) errmsg = _("Exceeded %s second limit waiting for " "address to leave the tentative state.") % wait_time utils.utils.wait_until_true( is_address_ready, timeout=wait_time, sleep=0.20, exception=AddressNotReady(address=address, reason=errmsg)) class IpRouteCommand(IpDeviceCommandBase): COMMAND = 'route' def __init__(self, parent, table=None): super(IpRouteCommand, self).__init__(parent) self._table = table def table(self, table): """Return an instance of IpRouteCommand which works on given table""" return IpRouteCommand(self._parent, table) def _table_args(self, override=None): if override: return ['table', override] return ['table', self._table] if self._table else [] def _dev_args(self): return ['dev', self.name] if self.name else [] def add_gateway(self, gateway, metric=None, table=None): ip_version = get_ip_version(gateway) args = ['replace', 'default', 'via', gateway] if metric: args += ['metric', metric] args += self._dev_args() args += self._table_args(table) self._as_root([ip_version], tuple(args)) def _run_as_root_detect_device_not_found(self, *args, **kwargs): try: return self._as_root(*args, **kwargs) except RuntimeError as rte: with excutils.save_and_reraise_exception() as ctx: if "Cannot find device" in str(rte): ctx.reraise = False raise exceptions.DeviceNotFoundError(device_name=self.name) def delete_gateway(self, gateway, table=None): ip_version = get_ip_version(gateway) args = ['del', 'default', 'via', gateway] args += self._dev_args() args += self._table_args(table) self._run_as_root_detect_device_not_found([ip_version], tuple(args)) def _parse_routes(self, ip_version, output, **kwargs): for line in output.splitlines(): parts = line.split() # Format of line is: "|default [ ] ..." route = {k: v for k, v in zip(parts[1::2], parts[2::2])} route['cidr'] = parts[0] # Avoids having to explicitly pass around the IP version if route['cidr'] == 'default': route['cidr'] = constants.IP_ANY[ip_version] # ip route drops things like scope and dev from the output if it # was specified as a filter. This allows us to add them back. if self.name: route['dev'] = self.name if self._table: route['table'] = self._table # Callers add any filters they use as kwargs route.update(kwargs) yield route def list_routes(self, ip_version, **kwargs): args = ['list'] args += self._dev_args() args += self._table_args() for k, v in kwargs.items(): args += [k, v] output = self._run([ip_version], tuple(args)) return [r for r in self._parse_routes(ip_version, output, **kwargs)] def list_onlink_routes(self, ip_version): routes = self.list_routes(ip_version, scope='link') return [r for r in routes if 'src' not in r] def add_onlink_route(self, cidr): self.add_route(cidr, scope='link') def delete_onlink_route(self, cidr): self.delete_route(cidr, scope='link') def get_gateway(self, scope=None, filters=None, ip_version=None): options = [ip_version] if ip_version else [] args = ['list'] args += self._dev_args() args += self._table_args() if filters: args += filters retval = None if scope: args += ['scope', scope] route_list_lines = self._run(options, tuple(args)).split('\n') default_route_line = next((x.strip() for x in route_list_lines if x.strip().startswith('default')), None) if default_route_line: retval = dict() gateway = DEFAULT_GW_PATTERN.search(default_route_line) if gateway: retval.update(gateway=gateway.group(1)) metric = METRIC_PATTERN.search(default_route_line) if metric: retval.update(metric=int(metric.group(1))) return retval @debtcollector.removals.remove(message="Will be removed in the N cycle.") def pullup_route(self, interface_name, ip_version): """Ensures that the route entry for the interface is before all others on the same subnet. """ options = [ip_version] device_list = [] device_route_list_lines = self._run(options, ('list', 'proto', 'kernel', 'dev', interface_name) ).split('\n') for device_route_line in device_route_list_lines: try: subnet = device_route_line.split()[0] except Exception: continue subnet_route_list_lines = self._run(options, ('list', 'proto', 'kernel', 'match', subnet) ).split('\n') for subnet_route_line in subnet_route_list_lines: i = iter(subnet_route_line.split()) while(next(i) != 'dev'): pass device = next(i) try: while(next(i) != 'src'): pass src = next(i) except Exception: src = '' if device != interface_name: device_list.append((device, src)) else: break for (device, src) in device_list: self._as_root(options, ('del', subnet, 'dev', device)) if (src != ''): self._as_root(options, ('append', subnet, 'proto', 'kernel', 'src', src, 'dev', device)) else: self._as_root(options, ('append', subnet, 'proto', 'kernel', 'dev', device)) def flush(self, ip_version, table=None, **kwargs): args = ['flush'] args += self._table_args(table) for k, v in kwargs.items(): args += [k, v] self._as_root([ip_version], tuple(args)) def add_route(self, cidr, via=None, table=None, **kwargs): ip_version = get_ip_version(cidr) args = ['replace', cidr] if via: args += ['via', via] args += self._dev_args() args += self._table_args(table) for k, v in kwargs.items(): args += [k, v] self._run_as_root_detect_device_not_found([ip_version], tuple(args)) def delete_route(self, cidr, via=None, table=None, **kwargs): ip_version = get_ip_version(cidr) args = ['del', cidr] if via: args += ['via', via] args += self._dev_args() args += self._table_args(table) for k, v in kwargs.items(): args += [k, v] self._run_as_root_detect_device_not_found([ip_version], tuple(args)) class IPRoute(SubProcessBase): def __init__(self, namespace=None, table=None): super(IPRoute, self).__init__(namespace=namespace) self.name = None self.route = IpRouteCommand(self, table=table) class IpNeighCommand(IpDeviceCommandBase): COMMAND = 'neigh' def add(self, ip_address, mac_address): ip_version = get_ip_version(ip_address) self._as_root([ip_version], ('replace', ip_address, 'lladdr', mac_address, 'nud', 'permanent', 'dev', self.name)) def delete(self, ip_address, mac_address): ip_version = get_ip_version(ip_address) self._as_root([ip_version], ('del', ip_address, 'lladdr', mac_address, 'dev', self.name)) def show(self, ip_version): options = [ip_version] return self._as_root(options, ('show', 'dev', self.name)) def flush(self, ip_version, ip_address): """Flush neighbour entries Given address entry is removed from neighbour cache (ARP or NDP). To flush all entries pass string 'all' as an address. :param ip_version: Either 4 or 6 for IPv4 or IPv6 respectively :param ip_address: The prefix selecting the neighbours to flush """ self._as_root([ip_version], ('flush', 'to', ip_address)) class IpNetnsCommand(IpCommandBase): COMMAND = 'netns' def add(self, name): self._as_root([], ('add', name), use_root_namespace=True) wrapper = IPWrapper(namespace=name) wrapper.netns.execute(['sysctl', '-w', 'net.ipv4.conf.all.promote_secondaries=1']) return wrapper def delete(self, name): self._as_root([], ('delete', name), use_root_namespace=True) def execute(self, cmds, addl_env=None, check_exit_code=True, log_fail_as_error=True, extra_ok_codes=None, run_as_root=False): ns_params = [] kwargs = {'run_as_root': run_as_root} if self._parent.namespace: kwargs['run_as_root'] = True ns_params = ['ip', 'netns', 'exec', self._parent.namespace] env_params = [] if addl_env: env_params = (['env'] + ['%s=%s' % pair for pair in addl_env.items()]) cmd = ns_params + env_params + list(cmds) return utils.execute(cmd, check_exit_code=check_exit_code, extra_ok_codes=extra_ok_codes, log_fail_as_error=log_fail_as_error, **kwargs) def exists(self, name): output = self._parent._execute( ['o'], 'netns', ['list'], run_as_root=cfg.CONF.AGENT.use_helper_for_ns_read) for line in [l.split()[0] for l in output.splitlines()]: if name == line: return True return False def vlan_in_use(segmentation_id, namespace=None): """Return True if VLAN ID is in use by an interface, else False.""" ip_wrapper = IPWrapper(namespace=namespace) interfaces = ip_wrapper.netns.execute(["ip", "-d", "link", "list"], check_exit_code=True) return '802.1Q id %s ' % segmentation_id in interfaces def vxlan_in_use(segmentation_id, namespace=None): """Return True if VXLAN VNID is in use by an interface, else False.""" ip_wrapper = IPWrapper(namespace=namespace) interfaces = ip_wrapper.netns.execute(["ip", "-d", "link", "list"], check_exit_code=True) return 'vxlan id %s ' % segmentation_id in interfaces def device_exists(device_name, namespace=None): """Return True if the device exists in the namespace.""" return IPDevice(device_name, namespace=namespace).exists() def device_exists_with_ips_and_mac(device_name, ip_cidrs, mac, namespace=None): """Return True if the device with the given IP addresses and MAC address exists in the namespace. """ try: device = IPDevice(device_name, namespace=namespace) if mac != device.link.address: return False device_ip_cidrs = [ip['cidr'] for ip in device.addr.list()] for ip_cidr in ip_cidrs: if ip_cidr not in device_ip_cidrs: return False except RuntimeError: return False else: return True _IP_ROUTE_PARSE_KEYS = { 'via': 'nexthop', 'dev': 'device', 'scope': 'scope' } def _parse_ip_route_line(line): """Parse a line output from ip route. Example for output from 'ip route': default via 192.168.3.120 dev wlp3s0 proto static metric 1024 10.0.0.0/8 dev tun0 proto static scope link metric 1024 10.0.1.0/8 dev tun1 proto static scope link metric 1024 linkdown The first column is the destination, followed by key/value pairs and flags. @param line A line output from ip route @return: a dictionary representing a route. """ line = line.split() result = { 'destination': line[0], 'nexthop': None, 'device': None, 'scope': None } idx = 1 while idx < len(line): field = _IP_ROUTE_PARSE_KEYS.get(line[idx]) if not field: idx = idx + 1 else: result[field] = line[idx + 1] idx = idx + 2 return result def get_routing_table(ip_version, namespace=None): """Return a list of dictionaries, each representing a route. @param ip_version: the routes of version to return, for example 4 @param namespace @return: a list of dictionaries, each representing a route. The dictionary format is: {'destination': cidr, 'nexthop': ip, 'device': device_name, 'scope': scope} """ ip_wrapper = IPWrapper(namespace=namespace) table = ip_wrapper.netns.execute( ['ip', '-%s' % ip_version, 'route'], check_exit_code=True) return [_parse_ip_route_line(line) for line in table.split('\n') if line.strip()] def ensure_device_is_ready(device_name, namespace=None): dev = IPDevice(device_name, namespace=namespace) dev.set_log_fail_as_error(False) try: # Ensure the device is up, even if it is already up. If the device # doesn't exist, a RuntimeError will be raised. dev.link.set_up() except RuntimeError: return False return True def iproute_arg_supported(command, arg): command += ['help'] stdout, stderr = utils.execute(command, check_exit_code=False, return_stderr=True, log_fail_as_error=False) return any(arg in line for line in stderr.split('\n')) def _arping(ns_name, iface_name, address, count, log_exception): # Pass -w to set timeout to ensure exit if interface removed while running arping_cmd = ['arping', '-A', '-I', iface_name, '-c', count, '-w', 1.5 * count, address] try: ip_wrapper = IPWrapper(namespace=ns_name) # Since arping is used to send gratuitous ARP, a response is not # expected. In some cases (no response) and with some platforms # (>=Ubuntu 14.04), arping exit code can be 1. ip_wrapper.netns.execute(arping_cmd, extra_ok_codes=[1]) except Exception as exc: msg = _("Failed sending gratuitous ARP " "to %(addr)s on %(iface)s in namespace %(ns)s: %(err)s") logger_method = LOG.exception if not log_exception: logger_method = LOG.warning logger_method(msg, {'addr': address, 'iface': iface_name, 'ns': ns_name, 'err': exc}) def send_ip_addr_adv_notif( ns_name, iface_name, address, config, log_exception=True): """Send advance notification of an IP address assignment. If the address is in the IPv4 family, send gratuitous ARP. If the address is in the IPv6 family, no advance notification is necessary, since the Neighbor Discovery Protocol (NDP), Duplicate Address Discovery (DAD), and (for stateless addresses) router advertisements (RAs) are sufficient for address resolution and duplicate address detection. :param ns_name: Namespace name which GARPs are gonna be sent from. :param iface_name: Name of interface which GARPs are gonna be sent from. :param address: Advertised IP address. :param config: An object with send_arp_for_ha member, about how many GARPs are gonna be sent. :param log_exception: (Optional) True if possible failures should be logged on exception level. Otherwise they are logged on WARNING level. Default is True. """ count = config.send_arp_for_ha def arping(): _arping(ns_name, iface_name, address, count, log_exception) if count > 0 and netaddr.IPAddress(address).version == 4: eventlet.spawn_n(arping) def add_namespace_to_cmd(cmd, namespace=None): """Add an optional namespace to the command.""" return ['ip', 'netns', 'exec', namespace] + cmd if namespace else cmd def get_ip_version(ip_or_cidr): return netaddr.IPNetwork(ip_or_cidr).version def get_ipv6_lladdr(mac_addr): return '%s/64' % netaddr.EUI(mac_addr).ipv6_link_local() def get_ip_nonlocal_bind(namespace=None): """Get kernel option value of ip_nonlocal_bind in given namespace.""" cmd = ['sysctl', '-bn', IP_NONLOCAL_BIND] ip_wrapper = IPWrapper(namespace) return int(ip_wrapper.netns.execute(cmd, run_as_root=True)) def set_ip_nonlocal_bind(value, namespace=None, log_fail_as_error=True): """Set sysctl knob of ip_nonlocal_bind to given value.""" cmd = ['sysctl', '-w', '%s=%d' % (IP_NONLOCAL_BIND, value)] ip_wrapper = IPWrapper(namespace) ip_wrapper.netns.execute( cmd, run_as_root=True, log_fail_as_error=log_fail_as_error) def set_ip_nonlocal_bind_for_namespace(namespace): """Set ip_nonlocal_bind but don't raise exception on failure.""" try: set_ip_nonlocal_bind( value=0, namespace=namespace, log_fail_as_error=False) except RuntimeError as rte: LOG.warning( _LW("Setting %(knob)s=0 in namespace %(ns)s failed: %(err)s. It " "will not be set to 0 in the root namespace in order to not " "break DVR, which requires this value be set to 1. This " "may introduce a race between moving a floating IP to a " "different network node, and the peer side getting a " "populated ARP cache for a given floating IP address."), {'knob': IP_NONLOCAL_BIND, 'ns': namespace, 'err': rte}) neutron-8.4.0/neutron/agent/linux/pd_driver.py0000664000567000056710000000412213044372736022645 0ustar jenkinsjenkins00000000000000# Copyright 2015 Cisco Systems # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg import six from neutron._i18n import _ OPTS = [ cfg.StrOpt('pd_confs', default='$state_path/pd', help=_('Location to store IPv6 PD files.')), cfg.StrOpt('vendor_pen', default='8888', help=_("A decimal value as Vendor's Registered Private " "Enterprise Number as required by RFC3315 DUID-EN.")), ] cfg.CONF.register_opts(OPTS) @six.add_metaclass(abc.ABCMeta) class PDDriverBase(object): def __init__(self, router_id, subnet_id, ri_ifname): self.router_id = router_id self.subnet_id = subnet_id self.ri_ifname = ri_ifname @abc.abstractmethod def enable(self, pmon, router_ns, ex_gw_ifname, lla): """Enable IPv6 Prefix Delegation for this PDDriver on the given external interface, with the given link local address """ @abc.abstractmethod def disable(self, pmon, router_ns): """Disable IPv6 Prefix Delegation for this PDDriver """ @abc.abstractmethod def get_prefix(self): """Get the current assigned prefix for this PDDriver from the PD agent. If no prefix is currently assigned, return constants.PROVISIONAL_IPV6_PD_PREFIX """ @staticmethod @abc.abstractmethod def get_sync_data(): """Get the latest router_id, subnet_id, and ri_ifname from the PD agent so that the PDDriver can be kept up to date """ neutron-8.4.0/neutron/agent/linux/external_process.py0000664000567000056710000002362613044372760024256 0ustar jenkinsjenkins00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import os.path import eventlet from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import fileutils import six from neutron._i18n import _, _LW, _LE from neutron.agent.common import config as agent_cfg from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.common import utils as common_utils LOG = logging.getLogger(__name__) OPTS = [ cfg.StrOpt('external_pids', default='$state_path/external/pids', help=_('Location to store child pid files')), ] cfg.CONF.register_opts(OPTS) agent_cfg.register_process_monitor_opts(cfg.CONF) @six.add_metaclass(abc.ABCMeta) class MonitoredProcess(object): @abc.abstractproperty def active(self): """Boolean representing the running state of the process.""" @abc.abstractmethod def enable(self): """Enable the service, or respawn the process.""" class ProcessManager(MonitoredProcess): """An external process manager for Neutron spawned processes. Note: The manager expects uuid to be in cmdline. """ def __init__(self, conf, uuid, namespace=None, service=None, pids_path=None, default_cmd_callback=None, cmd_addl_env=None, pid_file=None, run_as_root=False): self.conf = conf self.uuid = uuid self.namespace = namespace self.default_cmd_callback = default_cmd_callback self.cmd_addl_env = cmd_addl_env self.pids_path = pids_path or self.conf.external_pids self.pid_file = pid_file self.run_as_root = run_as_root if service: self.service_pid_fname = 'pid.' + service self.service = service else: self.service_pid_fname = 'pid' self.service = 'default-service' common_utils.ensure_dir(os.path.dirname(self.get_pid_file_name())) def enable(self, cmd_callback=None, reload_cfg=False): if not self.active: if not cmd_callback: cmd_callback = self.default_cmd_callback cmd = cmd_callback(self.get_pid_file_name()) ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace) ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env, run_as_root=self.run_as_root) elif reload_cfg: self.reload_cfg() def reload_cfg(self): self.disable('HUP') def disable(self, sig='9', get_stop_command=None): pid = self.pid if self.active: if get_stop_command: cmd = get_stop_command(self.get_pid_file_name()) ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace) ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env) else: cmd = ['kill', '-%s' % (sig), pid] utils.execute(cmd, run_as_root=True) # In the case of shutting down, remove the pid file if sig == '9': fileutils.delete_if_exists(self.get_pid_file_name()) elif pid: LOG.debug('Process for %(uuid)s pid %(pid)d is stale, ignoring ' 'signal %(signal)s', {'uuid': self.uuid, 'pid': pid, 'signal': sig}) else: LOG.debug('No process started for %s', self.uuid) def get_pid_file_name(self): """Returns the file name for a given kind of config file.""" if self.pid_file: return self.pid_file else: return utils.get_conf_file_name(self.pids_path, self.uuid, self.service_pid_fname) @property def pid(self): """Last known pid for this external process spawned for this uuid.""" return utils.get_value_from_file(self.get_pid_file_name(), int) @property def active(self): pid = self.pid if pid is None: return False cmdline = '/proc/%s/cmdline' % pid try: with open(cmdline, "r") as f: return self.uuid in f.readline() except IOError: return False ServiceId = collections.namedtuple('ServiceId', ['uuid', 'service']) class ProcessMonitor(object): def __init__(self, config, resource_type): """Handle multiple process managers and watch over all of them. :param config: oslo config object with the agent configuration. :type config: oslo_config.ConfigOpts :param resource_type: can be dhcp, router, load_balancer, etc. :type resource_type: str """ self._config = config self._resource_type = resource_type self._monitored_processes = {} if self._config.AGENT.check_child_processes_interval: self._spawn_checking_thread() def register(self, uuid, service_name, monitored_process): """Start monitoring a process. The given monitored_process will be tied to it's uuid+service_name replacing the old one if it existed already. The monitored_process should be enabled before registration, otherwise ProcessMonitor could try to enable the process itself, which could lead to double enable and if unlucky enough, two processes running, and also errors in the logs. :param uuid: An ID of the resource for which the process is running. :param service_name: A logical service name for this process monitor, so the same uuid provided via process manager can reference several different services. :param monitored_process: MonitoredProcess we want to monitor. """ service_id = ServiceId(uuid, service_name) self._monitored_processes[service_id] = monitored_process def unregister(self, uuid, service_name): """Stop monitoring a process. The uuid+service_name will be removed from the monitored processes. The service must be disabled **after** unregistering, otherwise if process monitor checks after you disable the process, and before you unregister it, the process will be respawned, and left orphaned into the system. :param uuid: An ID of the resource for which the process is running. :param service_name: A logical service name for this process monitor, so the same uuid provided via process manager can reference several different services. """ service_id = ServiceId(uuid, service_name) self._monitored_processes.pop(service_id, None) def stop(self): """Stop the process monitoring. This method will stop the monitoring thread, but no monitored process will be stopped. """ self._monitor_processes = False def _spawn_checking_thread(self): self._monitor_processes = True eventlet.spawn(self._periodic_checking_thread) @lockutils.synchronized("_check_child_processes") def _check_child_processes(self): # we build the list of keys before iterating in the loop to cover # the case where other threads add or remove items from the # dictionary which otherwise will cause a RuntimeError for service_id in list(self._monitored_processes): pm = self._monitored_processes.get(service_id) if pm and not pm.active: LOG.error(_LE("%(service)s for %(resource_type)s " "with uuid %(uuid)s not found. " "The process should not have died"), {'service': service_id.service, 'resource_type': self._resource_type, 'uuid': service_id.uuid}) self._execute_action(service_id) eventlet.sleep(0) def _periodic_checking_thread(self): while self._monitor_processes: eventlet.sleep(self._config.AGENT.check_child_processes_interval) eventlet.spawn(self._check_child_processes) def _execute_action(self, service_id): action = self._config.AGENT.check_child_processes_action action_function = getattr(self, "_%s_action" % action) action_function(service_id) def _respawn_action(self, service_id): LOG.warning(_LW("Respawning %(service)s for uuid %(uuid)s"), {'service': service_id.service, 'uuid': service_id.uuid}) self._monitored_processes[service_id].enable() def _exit_action(self, service_id): LOG.error(_LE("Exiting agent as programmed in check_child_processes_" "actions")) self._exit_handler(service_id.uuid, service_id.service) def _exit_handler(self, uuid, service): """This is an exit handler for the ProcessMonitor. It will be called if the administrator configured the exit action in check_child_processes_actions, and one of our external processes die unexpectedly. """ LOG.error(_LE("Exiting agent because of a malfunction with the " "%(service)s process identified by uuid %(uuid)s"), {'service': service, 'uuid': uuid}) raise SystemExit(1) neutron-8.4.0/neutron/agent/linux/interface.py0000664000567000056710000004564413044372760022642 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import netaddr from oslo_config import cfg from oslo_log import log as logging import six from neutron._i18n import _, _LE, _LI, _LW from neutron.agent.common import ovs_lib from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.common import constants as n_const from neutron.common import exceptions from neutron.common import ipv6_utils LOG = logging.getLogger(__name__) OPTS = [ cfg.StrOpt('ovs_integration_bridge', default='br-int', help=_('Name of Open vSwitch bridge to use')), cfg.BoolOpt('ovs_use_veth', default=False, help=_('Uses veth for an OVS interface or not. ' 'Support kernels with limited namespace support ' '(e.g. RHEL 6.5) so long as ovs_use_veth is set to ' 'True.')), cfg.IntOpt('network_device_mtu', deprecated_for_removal=True, help=_('MTU setting for device. This option will be removed in ' 'Newton. Please use the system-wide global_physnet_mtu ' 'setting which the agents will take into account when ' 'wiring VIFs.')), ] @six.add_metaclass(abc.ABCMeta) class LinuxInterfaceDriver(object): # from linux IF_NAMESIZE DEV_NAME_LEN = 14 DEV_NAME_PREFIX = n_const.TAP_DEVICE_PREFIX def __init__(self, conf): self.conf = conf if self.conf.network_device_mtu: self._validate_network_device_mtu() def _validate_network_device_mtu(self): if (ipv6_utils.is_enabled() and self.conf.network_device_mtu < n_const.IPV6_MIN_MTU): LOG.error(_LE("IPv6 protocol requires a minimum MTU of " "%(min_mtu)s, while the configured value is " "%(current_mtu)s"), {'min_mtu': n_const.IPV6_MIN_MTU, 'current_mtu': self.conf.network_device_mtu}) raise SystemExit(1) @property def use_gateway_ips(self): """Whether to use gateway IPs instead of unique IP allocations. In each place where the DHCP agent runs, and for each subnet for which DHCP is handling out IP addresses, the DHCP port needs - at the Linux level - to have an IP address within that subnet. Generally this needs to be a unique Neutron-allocated IP address, because the subnet's underlying L2 domain is bridged across multiple compute hosts and network nodes, and for HA there may be multiple DHCP agents running on that same bridged L2 domain. However, if the DHCP ports - on multiple compute/network nodes but for the same network - are _not_ bridged to each other, they do not need each to have a unique IP address. Instead they can all share the same address from the relevant subnet. This works, without creating any ambiguity, because those ports are not all present on the same L2 domain, and because no data within the network is ever sent to that address. (DHCP requests are broadcast, and it is the network's job to ensure that such a broadcast will reach at least one of the available DHCP servers. DHCP responses will be sent _from_ the DHCP port address.) Specifically, for networking backends where it makes sense, the DHCP agent allows all DHCP ports to use the subnet's gateway IP address, and thereby to completely avoid any unique IP address allocation. This behaviour is selected by running the DHCP agent with a configured interface driver whose 'use_gateway_ips' property is True. When an operator deploys Neutron with an interface driver that makes use_gateway_ips True, they should also ensure that a gateway IP address is defined for each DHCP-enabled subnet, and that the gateway IP address doesn't change during the subnet's lifetime. """ return False def init_l3(self, device_name, ip_cidrs, namespace=None, preserve_ips=None, clean_connections=False): """Set the L3 settings for the interface using data from the port. ip_cidrs: list of 'X.X.X.X/YY' strings preserve_ips: list of ip cidrs that should not be removed from device clean_connections: Boolean to indicate if we should cleanup connections associated to removed ips """ preserve_ips = preserve_ips or [] device = ip_lib.IPDevice(device_name, namespace=namespace) # The LLA generated by the operating system is not known to # Neutron, so it would be deleted if we added it to the 'previous' # list here default_ipv6_lla = ip_lib.get_ipv6_lladdr(device.link.address) previous = {addr['cidr'] for addr in device.addr.list( filters=['permanent'])} - {default_ipv6_lla} # add new addresses for ip_cidr in ip_cidrs: net = netaddr.IPNetwork(ip_cidr) # Convert to compact IPv6 address because the return values of # "ip addr list" are compact. if net.version == 6: ip_cidr = str(net) if ip_cidr in previous: previous.remove(ip_cidr) continue device.addr.add(ip_cidr) # clean up any old addresses for ip_cidr in previous: if ip_cidr not in preserve_ips: if clean_connections: device.delete_addr_and_conntrack_state(ip_cidr) else: device.addr.delete(ip_cidr) def init_router_port(self, device_name, ip_cidrs, namespace, preserve_ips=None, extra_subnets=None, clean_connections=False): """Set the L3 settings for a router interface using data from the port. ip_cidrs: list of 'X.X.X.X/YY' strings preserve_ips: list of ip cidrs that should not be removed from device clean_connections: Boolean to indicate if we should cleanup connections associated to removed ips extra_subnets: An iterable of cidrs to add as routes without address """ LOG.debug("init_router_port: device_name(%s), namespace(%s)", device_name, namespace) self.init_l3(device_name=device_name, ip_cidrs=ip_cidrs, namespace=namespace, preserve_ips=preserve_ips or [], clean_connections=clean_connections) device = ip_lib.IPDevice(device_name, namespace=namespace) # Manage on-link routes (routes without an associated address) new_onlink_cidrs = set(s['cidr'] for s in extra_subnets or []) v4_onlink = device.route.list_onlink_routes(n_const.IP_VERSION_4) v6_onlink = device.route.list_onlink_routes(n_const.IP_VERSION_6) existing_onlink_cidrs = set(r['cidr'] for r in v4_onlink + v6_onlink) for route in new_onlink_cidrs - existing_onlink_cidrs: LOG.debug("adding onlink route(%s)", route) device.route.add_onlink_route(route) for route in (existing_onlink_cidrs - new_onlink_cidrs - set(preserve_ips or [])): LOG.debug("deleting onlink route(%s)", route) device.route.delete_onlink_route(route) def add_ipv6_addr(self, device_name, v6addr, namespace, scope='global'): device = ip_lib.IPDevice(device_name, namespace=namespace) net = netaddr.IPNetwork(v6addr) device.addr.add(str(net), scope) def delete_ipv6_addr(self, device_name, v6addr, namespace): device = ip_lib.IPDevice(device_name, namespace=namespace) device.delete_addr_and_conntrack_state(v6addr) def delete_ipv6_addr_with_prefix(self, device_name, prefix, namespace): """Delete the first listed IPv6 address that falls within a given prefix. """ device = ip_lib.IPDevice(device_name, namespace=namespace) net = netaddr.IPNetwork(prefix) for address in device.addr.list(scope='global', filters=['permanent']): ip_address = netaddr.IPNetwork(address['cidr']) if ip_address in net: device.delete_addr_and_conntrack_state(address['cidr']) break def get_ipv6_llas(self, device_name, namespace): device = ip_lib.IPDevice(device_name, namespace=namespace) return device.addr.list(scope='link', ip_version=6) def check_bridge_exists(self, bridge): if not ip_lib.device_exists(bridge): raise exceptions.BridgeDoesNotExist(bridge=bridge) def get_device_name(self, port): return (self.DEV_NAME_PREFIX + port.id)[:self.DEV_NAME_LEN] @staticmethod def configure_ipv6_ra(namespace, dev_name): """Configure acceptance of IPv6 route advertisements on an intf.""" # Learn the default router's IP address via RAs ip_lib.IPWrapper(namespace=namespace).netns.execute( ['sysctl', '-w', 'net.ipv6.conf.%s.accept_ra=2' % dev_name]) @abc.abstractmethod def plug_new(self, network_id, port_id, device_name, mac_address, bridge=None, namespace=None, prefix=None, mtu=None): """Plug in the interface only for new devices that don't exist yet.""" def plug(self, network_id, port_id, device_name, mac_address, bridge=None, namespace=None, prefix=None, mtu=None): if not ip_lib.device_exists(device_name, namespace=namespace): try: self.plug_new(network_id, port_id, device_name, mac_address, bridge, namespace, prefix, mtu) except TypeError: self.plug_new(network_id, port_id, device_name, mac_address, bridge, namespace, prefix) else: LOG.info(_LI("Device %s already exists"), device_name) @abc.abstractmethod def unplug(self, device_name, bridge=None, namespace=None, prefix=None): """Unplug the interface.""" @property def bridged(self): """Whether the DHCP port is bridged to the VM TAP interfaces. When the DHCP port is bridged to the TAP interfaces for the VMs for which it is providing DHCP service - as is the case for most Neutron network implementations - the DHCP server only needs to listen on the DHCP port, and will still receive DHCP requests from all the relevant VMs. If the DHCP port is not bridged to the relevant VM TAP interfaces, the DHCP server needs to listen explicitly on those TAP interfaces, and to treat those as aliases of the DHCP port where the IP subnet is defined. """ return True class NullDriver(LinuxInterfaceDriver): def plug_new(self, network_id, port_id, device_name, mac_address, bridge=None, namespace=None, prefix=None, mtu=None): pass def unplug(self, device_name, bridge=None, namespace=None, prefix=None): pass class OVSInterfaceDriver(LinuxInterfaceDriver): """Driver for creating an internal interface on an OVS bridge.""" DEV_NAME_PREFIX = n_const.TAP_DEVICE_PREFIX def __init__(self, conf): super(OVSInterfaceDriver, self).__init__(conf) if self.conf.ovs_use_veth: self.DEV_NAME_PREFIX = 'ns-' def _get_tap_name(self, dev_name, prefix=None): if self.conf.ovs_use_veth: dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX, n_const.TAP_DEVICE_PREFIX) return dev_name def _ovs_add_port(self, bridge, device_name, port_id, mac_address, internal=True): attrs = [('external_ids', {'iface-id': port_id, 'iface-status': 'active', 'attached-mac': mac_address})] if internal: attrs.insert(0, ('type', 'internal')) ovs = ovs_lib.OVSBridge(bridge) ovs.replace_port(device_name, *attrs) def plug_new(self, network_id, port_id, device_name, mac_address, bridge=None, namespace=None, prefix=None, mtu=None): """Plug in the interface.""" if not bridge: bridge = self.conf.ovs_integration_bridge self.check_bridge_exists(bridge) ip = ip_lib.IPWrapper() tap_name = self._get_tap_name(device_name, prefix) if self.conf.ovs_use_veth: # Create ns_dev in a namespace if one is configured. root_dev, ns_dev = ip.add_veth(tap_name, device_name, namespace2=namespace) root_dev.disable_ipv6() else: ns_dev = ip.device(device_name) internal = not self.conf.ovs_use_veth self._ovs_add_port(bridge, tap_name, port_id, mac_address, internal=internal) ns_dev.link.set_address(mac_address) # Add an interface created by ovs to the namespace. if not self.conf.ovs_use_veth and namespace: namespace_obj = ip.ensure_namespace(namespace) namespace_obj.add_device_to_namespace(ns_dev) # NOTE(ihrachys): the order here is significant: we must set MTU after # the device is moved into a namespace, otherwise OVS bridge does not # allow to set MTU that is higher than the least of all device MTUs on # the bridge mtu = self.conf.network_device_mtu or mtu if mtu: ns_dev.link.set_mtu(mtu) if self.conf.ovs_use_veth: root_dev.link.set_mtu(mtu) else: LOG.warning(_LW("No MTU configured for port %s"), port_id) ns_dev.link.set_up() if self.conf.ovs_use_veth: root_dev.link.set_up() def unplug(self, device_name, bridge=None, namespace=None, prefix=None): """Unplug the interface.""" if not bridge: bridge = self.conf.ovs_integration_bridge tap_name = self._get_tap_name(device_name, prefix) self.check_bridge_exists(bridge) ovs = ovs_lib.OVSBridge(bridge) try: ovs.delete_port(tap_name) if self.conf.ovs_use_veth: device = ip_lib.IPDevice(device_name, namespace=namespace) device.link.delete() LOG.debug("Unplugged interface '%s'", device_name) except RuntimeError: LOG.error(_LE("Failed unplugging interface '%s'"), device_name) class IVSInterfaceDriver(LinuxInterfaceDriver): """Driver for creating an internal interface on an IVS bridge.""" DEV_NAME_PREFIX = n_const.TAP_DEVICE_PREFIX def __init__(self, conf): super(IVSInterfaceDriver, self).__init__(conf) self.DEV_NAME_PREFIX = 'ns-' def _get_tap_name(self, dev_name, prefix=None): dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX, n_const.TAP_DEVICE_PREFIX) return dev_name def _ivs_add_port(self, device_name, port_id, mac_address): cmd = ['ivs-ctl', 'add-port', device_name] utils.execute(cmd, run_as_root=True) def plug_new(self, network_id, port_id, device_name, mac_address, bridge=None, namespace=None, prefix=None, mtu=None): """Plug in the interface.""" ip = ip_lib.IPWrapper() tap_name = self._get_tap_name(device_name, prefix) root_dev, ns_dev = ip.add_veth(tap_name, device_name) root_dev.disable_ipv6() self._ivs_add_port(tap_name, port_id, mac_address) ns_dev = ip.device(device_name) ns_dev.link.set_address(mac_address) mtu = self.conf.network_device_mtu or mtu if mtu: ns_dev.link.set_mtu(mtu) root_dev.link.set_mtu(mtu) else: LOG.warning(_LW("No MTU configured for port %s"), port_id) if namespace: namespace_obj = ip.ensure_namespace(namespace) namespace_obj.add_device_to_namespace(ns_dev) ns_dev.link.set_up() root_dev.link.set_up() def unplug(self, device_name, bridge=None, namespace=None, prefix=None): """Unplug the interface.""" tap_name = self._get_tap_name(device_name, prefix) try: cmd = ['ivs-ctl', 'del-port', tap_name] utils.execute(cmd, run_as_root=True) device = ip_lib.IPDevice(device_name, namespace=namespace) device.link.delete() LOG.debug("Unplugged interface '%s'", device_name) except RuntimeError: LOG.error(_LE("Failed unplugging interface '%s'"), device_name) class BridgeInterfaceDriver(LinuxInterfaceDriver): """Driver for creating bridge interfaces.""" DEV_NAME_PREFIX = 'ns-' def plug_new(self, network_id, port_id, device_name, mac_address, bridge=None, namespace=None, prefix=None, mtu=None): """Plugin the interface.""" ip = ip_lib.IPWrapper() # Enable agent to define the prefix tap_name = device_name.replace(prefix or self.DEV_NAME_PREFIX, n_const.TAP_DEVICE_PREFIX) # Create ns_veth in a namespace if one is configured. root_veth, ns_veth = ip.add_veth(tap_name, device_name, namespace2=namespace) root_veth.disable_ipv6() ns_veth.link.set_address(mac_address) mtu = self.conf.network_device_mtu or mtu if mtu: root_veth.link.set_mtu(mtu) ns_veth.link.set_mtu(mtu) else: LOG.warning(_LW("No MTU configured for port %s"), port_id) root_veth.link.set_up() ns_veth.link.set_up() def unplug(self, device_name, bridge=None, namespace=None, prefix=None): """Unplug the interface.""" device = ip_lib.IPDevice(device_name, namespace=namespace) try: device.link.delete() LOG.debug("Unplugged interface '%s'", device_name) except RuntimeError: LOG.error(_LE("Failed unplugging interface '%s'"), device_name) neutron-8.4.0/neutron/agent/linux/dhcp.py0000664000567000056710000015130713044372760021612 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import os import re import shutil import time import netaddr from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_utils import excutils from oslo_utils import uuidutils import six from neutron._i18n import _, _LI, _LW, _LE from neutron.agent.common import utils as agent_common_utils from neutron.agent.linux import external_process from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_manager from neutron.common import constants from neutron.common import exceptions from neutron.common import ipv6_utils from neutron.common import utils as common_utils from neutron.extensions import extra_dhcp_opt as edo_ext from neutron.ipam import utils as ipam_utils LOG = logging.getLogger(__name__) UDP = 'udp' TCP = 'tcp' DNS_PORT = 53 DHCPV4_PORT = 67 DHCPV6_PORT = 547 METADATA_DEFAULT_PREFIX = 16 METADATA_DEFAULT_IP = '169.254.169.254' METADATA_DEFAULT_CIDR = '%s/%d' % (METADATA_DEFAULT_IP, METADATA_DEFAULT_PREFIX) METADATA_PORT = 80 WIN2k3_STATIC_DNS = 249 NS_PREFIX = 'qdhcp-' DNSMASQ_SERVICE_NAME = 'dnsmasq' class DictModel(dict): """Convert dict into an object that provides attribute access to values.""" def __init__(self, *args, **kwargs): """Convert dict values to DictModel values.""" super(DictModel, self).__init__(*args, **kwargs) def needs_upgrade(item): """Check if `item` is a dict and needs to be changed to DictModel. """ return isinstance(item, dict) and not isinstance(item, DictModel) def upgrade(item): """Upgrade item if it needs to be upgraded.""" if needs_upgrade(item): return DictModel(item) else: return item for key, value in six.iteritems(self): if isinstance(value, (list, tuple)): # Keep the same type but convert dicts to DictModels self[key] = type(value)( (upgrade(item) for item in value) ) elif needs_upgrade(value): # Change dict instance values to DictModel instance values self[key] = DictModel(value) def __getattr__(self, name): try: return self[name] except KeyError as e: raise AttributeError(e) def __setattr__(self, name, value): self[name] = value def __delattr__(self, name): del self[name] def __str__(self): pairs = ['%s=%s' % (k, v) for k, v in self.items()] return ', '.join(sorted(pairs)) class NetModel(DictModel): def __init__(self, d): super(NetModel, self).__init__(d) self._ns_name = "%s%s" % (NS_PREFIX, self.id) @property def namespace(self): return self._ns_name @six.add_metaclass(abc.ABCMeta) class DhcpBase(object): def __init__(self, conf, network, process_monitor, version=None, plugin=None): self.conf = conf self.network = network self.process_monitor = process_monitor self.device_manager = DeviceManager(self.conf, plugin) self.version = version @abc.abstractmethod def enable(self): """Enables DHCP for this network.""" @abc.abstractmethod def disable(self, retain_port=False): """Disable dhcp for this network.""" def restart(self): """Restart the dhcp service for the network.""" self.disable(retain_port=True) self.enable() @abc.abstractproperty def active(self): """Boolean representing the running state of the DHCP server.""" @abc.abstractmethod def reload_allocations(self): """Force the DHCP server to reload the assignment database.""" @classmethod def existing_dhcp_networks(cls, conf): """Return a list of existing networks ids that we have configs for.""" raise NotImplementedError() @classmethod def check_version(cls): """Execute version checks on DHCP server.""" raise NotImplementedError() @classmethod def get_isolated_subnets(cls, network): """Returns a dict indicating whether or not a subnet is isolated""" raise NotImplementedError() @classmethod def should_enable_metadata(cls, conf, network): """True if the metadata-proxy should be enabled for the network.""" raise NotImplementedError() class DhcpLocalProcess(DhcpBase): PORTS = [] def __init__(self, conf, network, process_monitor, version=None, plugin=None): super(DhcpLocalProcess, self).__init__(conf, network, process_monitor, version, plugin) self.confs_dir = self.get_confs_dir(conf) self.network_conf_dir = os.path.join(self.confs_dir, network.id) common_utils.ensure_dir(self.network_conf_dir) @staticmethod def get_confs_dir(conf): return os.path.abspath(os.path.normpath(conf.dhcp_confs)) def get_conf_file_name(self, kind): """Returns the file name for a given kind of config file.""" return os.path.join(self.network_conf_dir, kind) def _remove_config_files(self): shutil.rmtree(self.network_conf_dir, ignore_errors=True) def _enable_dhcp(self): """check if there is a subnet within the network with dhcp enabled.""" for subnet in self.network.subnets: if subnet.enable_dhcp: return True return False def enable(self): """Enables DHCP for this network by spawning a local process.""" if self.active: self.restart() elif self._enable_dhcp(): common_utils.ensure_dir(self.network_conf_dir) interface_name = self.device_manager.setup(self.network) self.interface_name = interface_name self.spawn_process() def _get_process_manager(self, cmd_callback=None): return external_process.ProcessManager( conf=self.conf, uuid=self.network.id, namespace=self.network.namespace, default_cmd_callback=cmd_callback, pid_file=self.get_conf_file_name('pid'), run_as_root=True) def disable(self, retain_port=False): """Disable DHCP for this network by killing the local process.""" self.process_monitor.unregister(self.network.id, DNSMASQ_SERVICE_NAME) self._get_process_manager().disable() if not retain_port: self._destroy_namespace_and_port() self._remove_config_files() def _destroy_namespace_and_port(self): try: self.device_manager.destroy(self.network, self.interface_name) except RuntimeError: LOG.warning(_LW('Failed trying to delete interface: %s'), self.interface_name) ns_ip = ip_lib.IPWrapper(namespace=self.network.namespace) try: ns_ip.netns.delete(self.network.namespace) except RuntimeError: LOG.warning(_LW('Failed trying to delete namespace: %s'), self.network.namespace) def _get_value_from_conf_file(self, kind, converter=None): """A helper function to read a value from one of the state files.""" file_name = self.get_conf_file_name(kind) msg = _('Error while reading %s') try: with open(file_name, 'r') as f: try: return converter(f.read()) if converter else f.read() except ValueError: msg = _('Unable to convert value in %s') except IOError: msg = _('Unable to access %s') LOG.debug(msg, file_name) return None @property def interface_name(self): return self._get_value_from_conf_file('interface') @interface_name.setter def interface_name(self, value): interface_file_path = self.get_conf_file_name('interface') common_utils.replace_file(interface_file_path, value) @property def active(self): return self._get_process_manager().active @abc.abstractmethod def spawn_process(self): pass class Dnsmasq(DhcpLocalProcess): # The ports that need to be opened when security policies are active # on the Neutron port used for DHCP. These are provided as a convenience # for users of this class. PORTS = {constants.IP_VERSION_4: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV4_PORT)], constants.IP_VERSION_6: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV6_PORT)], } _TAG_PREFIX = 'tag%d' _ID = 'id:' @classmethod def check_version(cls): pass @classmethod def existing_dhcp_networks(cls, conf): """Return a list of existing networks ids that we have configs for.""" confs_dir = cls.get_confs_dir(conf) try: return [ c for c in os.listdir(confs_dir) if uuidutils.is_uuid_like(c) ] except OSError: return [] def _build_cmdline_callback(self, pid_file): # We ignore local resolv.conf if dns servers are specified # or if local resolution is explicitly disabled. _no_resolv = ( '--no-resolv' if self.conf.dnsmasq_dns_servers or not self.conf.dnsmasq_local_resolv else '') cmd = [ 'dnsmasq', '--no-hosts', _no_resolv, '--strict-order', '--except-interface=lo', '--pid-file=%s' % pid_file, '--dhcp-hostsfile=%s' % self.get_conf_file_name('host'), '--addn-hosts=%s' % self.get_conf_file_name('addn_hosts'), '--dhcp-optsfile=%s' % self.get_conf_file_name('opts'), '--dhcp-leasefile=%s' % self.get_conf_file_name('leases'), '--dhcp-match=set:ipxe,175', ] if self.device_manager.driver.bridged: cmd += [ '--bind-interfaces', '--interface=%s' % self.interface_name, ] else: cmd += [ '--bind-dynamic', '--interface=%s' % self.interface_name, '--interface=tap*', '--bridge-interface=%s,tap*' % self.interface_name, ] possible_leases = 0 for i, subnet in enumerate(self.network.subnets): mode = None # if a subnet is specified to have dhcp disabled if not subnet.enable_dhcp: continue if subnet.ip_version == 4: mode = 'static' else: # Note(scollins) If the IPv6 attributes are not set, set it as # static to preserve previous behavior addr_mode = getattr(subnet, 'ipv6_address_mode', None) ra_mode = getattr(subnet, 'ipv6_ra_mode', None) if (addr_mode in [constants.DHCPV6_STATEFUL, constants.DHCPV6_STATELESS] or not addr_mode and not ra_mode): mode = 'static' cidr = netaddr.IPNetwork(subnet.cidr) if self.conf.dhcp_lease_duration == -1: lease = 'infinite' else: lease = '%ss' % self.conf.dhcp_lease_duration # mode is optional and is not set - skip it if mode: if subnet.ip_version == 4: cmd.append('--dhcp-range=%s%s,%s,%s,%s' % ('set:', self._TAG_PREFIX % i, cidr.network, mode, lease)) else: if cidr.prefixlen < 64: LOG.debug('Ignoring subnet %(subnet)s, CIDR has ' 'prefix length < 64: %(cidr)s', {'subnet': subnet.id, 'cidr': cidr}) continue cmd.append('--dhcp-range=%s%s,%s,%s,%d,%s' % ('set:', self._TAG_PREFIX % i, cidr.network, mode, cidr.prefixlen, lease)) possible_leases += cidr.size if cfg.CONF.advertise_mtu: mtu = getattr(self.network, 'mtu', 0) # Do not advertise unknown mtu if mtu > 0: cmd.append('--dhcp-option-force=option:mtu,%d' % mtu) # Cap the limit because creating lots of subnets can inflate # this possible lease cap. cmd.append('--dhcp-lease-max=%d' % min(possible_leases, self.conf.dnsmasq_lease_max)) cmd.append('--conf-file=%s' % self.conf.dnsmasq_config_file) if self.conf.dnsmasq_dns_servers: cmd.extend( '--server=%s' % server for server in self.conf.dnsmasq_dns_servers) if self.conf.dhcp_domain: cmd.append('--domain=%s' % self.conf.dhcp_domain) if self.conf.dhcp_broadcast_reply: cmd.append('--dhcp-broadcast') if self.conf.dnsmasq_base_log_dir: log_dir = os.path.join( self.conf.dnsmasq_base_log_dir, self.network.id) try: if not os.path.exists(log_dir): os.makedirs(log_dir) except OSError: LOG.error(_LE('Error while create dnsmasq log dir: %s'), log_dir) else: log_filename = os.path.join(log_dir, 'dhcp_dns_log') cmd.append('--log-queries') cmd.append('--log-dhcp') cmd.append('--log-facility=%s' % log_filename) return cmd def spawn_process(self): """Spawn the process, if it's not spawned already.""" # we only need to generate the lease file the first time dnsmasq starts # rather than on every reload since dnsmasq will keep the file current self._output_init_lease_file() self._spawn_or_reload_process(reload_with_HUP=False) def _spawn_or_reload_process(self, reload_with_HUP): """Spawns or reloads a Dnsmasq process for the network. When reload_with_HUP is True, dnsmasq receives a HUP signal, or it's reloaded if the process is not running. """ self._output_config_files() pm = self._get_process_manager( cmd_callback=self._build_cmdline_callback) pm.enable(reload_cfg=reload_with_HUP) self.process_monitor.register(uuid=self.network.id, service_name=DNSMASQ_SERVICE_NAME, monitored_process=pm) def _release_lease(self, mac_address, ip, client_id): """Release a DHCP lease.""" if netaddr.IPAddress(ip).version == constants.IP_VERSION_6: # Note(SridharG) dhcp_release is only supported for IPv4 # addresses. For more details, please refer to man page. return cmd = ['dhcp_release', self.interface_name, ip, mac_address] if client_id: cmd.append(client_id) ip_wrapper = ip_lib.IPWrapper(namespace=self.network.namespace) ip_wrapper.netns.execute(cmd, run_as_root=True) def _output_config_files(self): self._output_hosts_file() self._output_addn_hosts_file() self._output_opts_file() def reload_allocations(self): """Rebuild the dnsmasq config and signal the dnsmasq to reload.""" # If all subnets turn off dhcp, kill the process. if not self._enable_dhcp(): self.disable() LOG.debug('Killing dnsmasq for network since all subnets have ' 'turned off DHCP: %s', self.network.id) return self._release_unused_leases() self._spawn_or_reload_process(reload_with_HUP=True) LOG.debug('Reloading allocations for network: %s', self.network.id) self.device_manager.update(self.network, self.interface_name) def _sort_fixed_ips_for_dnsmasq(self, fixed_ips, v6_nets): """Sort fixed_ips so that stateless IPv6 subnets appear first. For example, If a port with v6 extra_dhcp_opts is on a network with IPv4 and IPv6 stateless subnets. Then dhcp host file will have below 2 entries for same MAC, fa:16:3e:8f:9d:65,30.0.0.5,set:aabc7d33-4874-429e-9637-436e4232d2cd (entry for IPv4 dhcp) fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd (entry for stateless IPv6 for v6 options) dnsmasq internal details for processing host file entries 1) dnsmasq reads the host file from EOF. 2) So it first picks up stateless IPv6 entry, fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd 3) But dnsmasq doesn't have sufficient checks to skip this entry and pick next entry, to process dhcp IPv4 request. 4) So dnsmasq uses this this entry to process dhcp IPv4 request. 5) As there is no ip in this entry, dnsmasq logs "no address available" and fails to send DHCPOFFER message. As we rely on internal details of dnsmasq to understand and fix the issue, Ihar sent a mail to dnsmasq-discuss mailing list http://lists.thekelleys.org.uk/pipermail/dnsmasq-discuss/2015q2/ 009650.html So If we reverse the order of writing entries in host file, so that entry for stateless IPv6 comes first, then dnsmasq can correctly fetch the IPv4 address. """ return sorted( fixed_ips, key=lambda fip: ((fip.subnet_id in v6_nets) and ( v6_nets[fip.subnet_id].ipv6_address_mode == ( constants.DHCPV6_STATELESS))), reverse=True) def _iter_hosts(self): """Iterate over hosts. For each host on the network we yield a tuple containing: ( port, # a DictModel instance representing the port. alloc, # a DictModel instance of the allocated ip and subnet. # if alloc is None, it means there is no need to allocate # an IPv6 address because of stateless DHCPv6 network. host_name, # Host name. name, # Canonical hostname in the format 'hostname[.domain]'. no_dhcp, # A flag indicating that the address doesn't need a DHCP # IP address. no_opts, # A flag indication that options shouldn't be written ) """ v6_nets = dict((subnet.id, subnet) for subnet in self.network.subnets if subnet.ip_version == 6) for port in self.network.ports: fixed_ips = self._sort_fixed_ips_for_dnsmasq(port.fixed_ips, v6_nets) # Confirm whether Neutron server supports dns_name attribute in the # ports API dns_assignment = getattr(port, 'dns_assignment', None) if dns_assignment: dns_ip_map = {d.ip_address: d for d in dns_assignment} for alloc in fixed_ips: no_dhcp = False no_opts = False if alloc.subnet_id in v6_nets: addr_mode = v6_nets[alloc.subnet_id].ipv6_address_mode no_dhcp = addr_mode in (constants.IPV6_SLAAC, constants.DHCPV6_STATELESS) # we don't setup anything for SLAAC. It doesn't make sense # to provide options for a client that won't use DHCP no_opts = addr_mode == constants.IPV6_SLAAC # If dns_name attribute is supported by ports API, return the # dns_assignment generated by the Neutron server. Otherwise, # generate hostname and fqdn locally (previous behaviour) if dns_assignment: hostname = dns_ip_map[alloc.ip_address].hostname fqdn = dns_ip_map[alloc.ip_address].fqdn else: hostname = 'host-%s' % alloc.ip_address.replace( '.', '-').replace(':', '-') fqdn = hostname if self.conf.dhcp_domain: fqdn = '%s.%s' % (fqdn, self.conf.dhcp_domain) yield (port, alloc, hostname, fqdn, no_dhcp, no_opts) def _get_port_extra_dhcp_opts(self, port): return getattr(port, edo_ext.EXTRADHCPOPTS, False) def _output_init_lease_file(self): """Write a fake lease file to bootstrap dnsmasq. The generated file is passed to the --dhcp-leasefile option of dnsmasq. This is used as a bootstrapping mechanism to avoid NAKing active leases when a dhcp server is scheduled to another agent. Using a leasefile will also prevent dnsmasq from NAKing or ignoring renewals after a restart. Format is as follows: epoch-timestamp mac_addr ip_addr hostname client-ID """ filename = self.get_conf_file_name('leases') buf = six.StringIO() LOG.debug('Building initial lease file: %s', filename) # we make up a lease time for the database entry if self.conf.dhcp_lease_duration == -1: # Even with an infinite lease, a client may choose to renew a # previous lease on reboot or interface bounce so we should have # an entry for it. # Dnsmasq timestamp format for an infinite lease is 0. timestamp = 0 else: timestamp = int(time.time()) + self.conf.dhcp_lease_duration dhcp_enabled_subnet_ids = [s.id for s in self.network.subnets if s.enable_dhcp] for host_tuple in self._iter_hosts(): port, alloc, hostname, name, no_dhcp, no_opts = host_tuple # don't write ip address which belongs to a dhcp disabled subnet # or an IPv6 SLAAC/stateless subnet if no_dhcp or alloc.subnet_id not in dhcp_enabled_subnet_ids: continue ip_address = self._format_address_for_dnsmasq(alloc.ip_address) # all that matters is the mac address and IP. the hostname and # client ID will be overwritten on the next renewal. buf.write('%s %s %s * *\n' % (timestamp, port.mac_address, ip_address)) contents = buf.getvalue() common_utils.replace_file(filename, contents) LOG.debug('Done building initial lease file %s with contents:\n%s', filename, contents) return filename @staticmethod def _format_address_for_dnsmasq(address): # (dzyu) Check if it is legal ipv6 address, if so, need wrap # it with '[]' to let dnsmasq to distinguish MAC address from # IPv6 address. if netaddr.valid_ipv6(address): return '[%s]' % address return address def _output_hosts_file(self): """Writes a dnsmasq compatible dhcp hosts file. The generated file is sent to the --dhcp-hostsfile option of dnsmasq, and lists the hosts on the network which should receive a dhcp lease. Each line in this file is in the form:: 'mac_address,FQDN,ip_address' IMPORTANT NOTE: a dnsmasq instance does not resolve hosts defined in this file if it did not give a lease to a host listed in it (e.g.: multiple dnsmasq instances on the same network if this network is on multiple network nodes). This file is only defining hosts which should receive a dhcp lease, the hosts resolution in itself is defined by the `_output_addn_hosts_file` method. """ buf = six.StringIO() filename = self.get_conf_file_name('host') LOG.debug('Building host file: %s', filename) dhcp_enabled_subnet_ids = [s.id for s in self.network.subnets if s.enable_dhcp] # NOTE(ihrachyshka): the loop should not log anything inside it, to # avoid potential performance drop when lots of hosts are dumped for host_tuple in self._iter_hosts(): port, alloc, hostname, name, no_dhcp, no_opts = host_tuple if no_dhcp: if not no_opts and self._get_port_extra_dhcp_opts(port): buf.write('%s,%s%s\n' % (port.mac_address, 'set:', port.id)) continue # don't write ip address which belongs to a dhcp disabled subnet. if alloc.subnet_id not in dhcp_enabled_subnet_ids: continue ip_address = self._format_address_for_dnsmasq(alloc.ip_address) if self._get_port_extra_dhcp_opts(port): client_id = self._get_client_id(port) if client_id and len(port.extra_dhcp_opts) > 1: buf.write('%s,%s%s,%s,%s,%s%s\n' % (port.mac_address, self._ID, client_id, name, ip_address, 'set:', port.id)) elif client_id and len(port.extra_dhcp_opts) == 1: buf.write('%s,%s%s,%s,%s\n' % (port.mac_address, self._ID, client_id, name, ip_address)) else: buf.write('%s,%s,%s,%s%s\n' % (port.mac_address, name, ip_address, 'set:', port.id)) else: buf.write('%s,%s,%s\n' % (port.mac_address, name, ip_address)) common_utils.replace_file(filename, buf.getvalue()) LOG.debug('Done building host file %s', filename) return filename def _get_client_id(self, port): if self._get_port_extra_dhcp_opts(port): for opt in port.extra_dhcp_opts: if opt.opt_name == edo_ext.CLIENT_ID: return opt.opt_value def _read_hosts_file_leases(self, filename): leases = set() try: with open(filename) as f: for l in f.readlines(): host = l.strip().split(',') mac = host[0] client_id = None if host[1].startswith('set:'): continue if host[1].startswith(self._ID): ip = host[3].strip('[]') client_id = host[1][len(self._ID):] else: ip = host[2].strip('[]') leases.add((ip, mac, client_id)) except (OSError, IOError): LOG.debug('Error while reading hosts file %s', filename) return leases def _release_unused_leases(self): filename = self.get_conf_file_name('host') old_leases = self._read_hosts_file_leases(filename) new_leases = set() dhcp_port_exists = False dhcp_port_on_this_host = self.device_manager.get_device_id( self.network) for port in self.network.ports: client_id = self._get_client_id(port) for alloc in port.fixed_ips: new_leases.add((alloc.ip_address, port.mac_address, client_id)) if port.device_id == dhcp_port_on_this_host: dhcp_port_exists = True for ip, mac, client_id in old_leases - new_leases: self._release_lease(mac, ip, client_id) if not dhcp_port_exists: self.device_manager.driver.unplug( self.interface_name, namespace=self.network.namespace) def _output_addn_hosts_file(self): """Writes a dnsmasq compatible additional hosts file. The generated file is sent to the --addn-hosts option of dnsmasq, and lists the hosts on the network which should be resolved even if the dnsmasq instance did not give a lease to the host (see the `_output_hosts_file` method). Each line in this file is in the same form as a standard /etc/hosts file. """ buf = six.StringIO() for host_tuple in self._iter_hosts(): port, alloc, hostname, fqdn, no_dhcp, no_opts = host_tuple # It is compulsory to write the `fqdn` before the `hostname` in # order to obtain it in PTR responses. if alloc: buf.write('%s\t%s %s\n' % (alloc.ip_address, fqdn, hostname)) addn_hosts = self.get_conf_file_name('addn_hosts') common_utils.replace_file(addn_hosts, buf.getvalue()) return addn_hosts def _output_opts_file(self): """Write a dnsmasq compatible options file.""" options, subnet_index_map = self._generate_opts_per_subnet() options += self._generate_opts_per_port(subnet_index_map) name = self.get_conf_file_name('opts') common_utils.replace_file(name, '\n'.join(options)) return name def _generate_opts_per_subnet(self): options = [] subnet_index_map = {} if self.conf.enable_isolated_metadata or self.conf.force_metadata: subnet_to_interface_ip = self._make_subnet_interface_ip_map() isolated_subnets = self.get_isolated_subnets(self.network) for i, subnet in enumerate(self.network.subnets): addr_mode = getattr(subnet, 'ipv6_address_mode', None) if (not subnet.enable_dhcp or (subnet.ip_version == 6 and addr_mode == constants.IPV6_SLAAC)): continue if subnet.dns_nameservers: options.append( self._format_option( subnet.ip_version, i, 'dns-server', ','.join( Dnsmasq._convert_to_literal_addrs( subnet.ip_version, subnet.dns_nameservers)))) else: # use the dnsmasq ip as nameservers only if there is no # dns-server submitted by the server subnet_index_map[subnet.id] = i if self.conf.dhcp_domain and subnet.ip_version == 6: options.append('tag:tag%s,option6:domain-search,%s' % (i, ''.join(self.conf.dhcp_domain))) gateway = subnet.gateway_ip host_routes = [] for hr in subnet.host_routes: if hr.destination == constants.IPv4_ANY: if not gateway: gateway = hr.nexthop else: host_routes.append("%s,%s" % (hr.destination, hr.nexthop)) # Add host routes for isolated network segments if (self.conf.force_metadata or (isolated_subnets[subnet.id] and self.conf.enable_isolated_metadata and subnet.ip_version == 4)): subnet_dhcp_ip = subnet_to_interface_ip[subnet.id] host_routes.append( '%s/32,%s' % (METADATA_DEFAULT_IP, subnet_dhcp_ip) ) elif not isolated_subnets[subnet.id] and gateway: host_routes.append( '%s/32,%s' % (METADATA_DEFAULT_IP, gateway) ) if subnet.ip_version == 4: host_routes.extend(["%s,0.0.0.0" % (s.cidr) for s in self.network.subnets if (s.ip_version == 4 and s.cidr != subnet.cidr)]) if host_routes: if gateway: host_routes.append("%s,%s" % (constants.IPv4_ANY, gateway)) options.append( self._format_option(subnet.ip_version, i, 'classless-static-route', ','.join(host_routes))) options.append( self._format_option(subnet.ip_version, i, WIN2k3_STATIC_DNS, ','.join(host_routes))) if gateway: options.append(self._format_option(subnet.ip_version, i, 'router', gateway)) else: options.append(self._format_option(subnet.ip_version, i, 'router')) return options, subnet_index_map def _generate_opts_per_port(self, subnet_index_map): options = [] dhcp_ips = collections.defaultdict(list) for port in self.network.ports: if self._get_port_extra_dhcp_opts(port): port_ip_versions = set( [netaddr.IPAddress(ip.ip_address).version for ip in port.fixed_ips]) for opt in port.extra_dhcp_opts: if opt.opt_name == edo_ext.CLIENT_ID: continue opt_ip_version = opt.ip_version if opt_ip_version in port_ip_versions: options.append( self._format_option(opt_ip_version, port.id, opt.opt_name, opt.opt_value)) else: LOG.info(_LI("Cannot apply dhcp option %(opt)s " "because it's ip_version %(version)d " "is not in port's address IP versions"), {'opt': opt.opt_name, 'version': opt_ip_version}) # provides all dnsmasq ip as dns-server if there is more than # one dnsmasq for a subnet and there is no dns-server submitted # by the server if port.device_owner == constants.DEVICE_OWNER_DHCP: for ip in port.fixed_ips: i = subnet_index_map.get(ip.subnet_id) if i is None: continue dhcp_ips[i].append(ip.ip_address) for i, ips in dhcp_ips.items(): for ip_version in (4, 6): vx_ips = [ip for ip in ips if netaddr.IPAddress(ip).version == ip_version] if len(vx_ips) > 1: options.append( self._format_option( ip_version, i, 'dns-server', ','.join( Dnsmasq._convert_to_literal_addrs(ip_version, vx_ips)))) return options def _make_subnet_interface_ip_map(self): ip_dev = ip_lib.IPDevice(self.interface_name, namespace=self.network.namespace) subnet_lookup = dict( (netaddr.IPNetwork(subnet.cidr), subnet.id) for subnet in self.network.subnets ) retval = {} for addr in ip_dev.addr.list(): ip_net = netaddr.IPNetwork(addr['cidr']) if ip_net in subnet_lookup: retval[subnet_lookup[ip_net]] = addr['cidr'].split('/')[0] return retval def _format_option(self, ip_version, tag, option, *args): """Format DHCP option by option name or code.""" option = str(option) pattern = "(tag:(.*),)?(.*)$" matches = re.match(pattern, option) extra_tag = matches.groups()[0] option = matches.groups()[2] if isinstance(tag, int): tag = self._TAG_PREFIX % tag if not option.isdigit(): if ip_version == 4: option = 'option:%s' % option else: option = 'option6:%s' % option if extra_tag: tags = ('tag:' + tag, extra_tag[:-1], '%s' % option) else: tags = ('tag:' + tag, '%s' % option) return ','.join(tags + args) @staticmethod def _convert_to_literal_addrs(ip_version, ips): if ip_version == 4: return ips return ['[' + ip + ']' for ip in ips] @classmethod def get_isolated_subnets(cls, network): """Returns a dict indicating whether or not a subnet is isolated A subnet is considered non-isolated if there is a port connected to the subnet, and the port's ip address matches that of the subnet's gateway. The port must be owned by a neutron router. """ isolated_subnets = collections.defaultdict(lambda: True) subnets = dict((subnet.id, subnet) for subnet in network.subnets) for port in network.ports: if port.device_owner not in constants.ROUTER_INTERFACE_OWNERS: continue for alloc in port.fixed_ips: if subnets[alloc.subnet_id].gateway_ip == alloc.ip_address: isolated_subnets[alloc.subnet_id] = False return isolated_subnets @classmethod def should_enable_metadata(cls, conf, network): """Determine whether the metadata proxy is needed for a network This method returns True for truly isolated networks (ie: not attached to a router) when enable_isolated_metadata is True, or for all the networks when the force_metadata flags is True. This method also returns True when enable_metadata_network is True, and the network passed as a parameter has a subnet in the link-local CIDR, thus characterizing it as a "metadata" network. The metadata network is used by solutions which do not leverage the l3 agent for providing access to the metadata service via logical routers built with 3rd party backends. """ if conf.force_metadata: # Only ipv4 subnet, with dhcp enabled, will use metadata proxy. return any(s for s in network.subnets if s.ip_version == 4 and s.enable_dhcp) if conf.enable_metadata_network and conf.enable_isolated_metadata: # check if the network has a metadata subnet meta_cidr = netaddr.IPNetwork(METADATA_DEFAULT_CIDR) if any(netaddr.IPNetwork(s.cidr) in meta_cidr for s in network.subnets): return True if not conf.enable_isolated_metadata: return False isolated_subnets = cls.get_isolated_subnets(network) # Only ipv4 isolated subnet, which has dhcp enabled, will use # metadata proxy. return any(isolated_subnets[s.id] for s in network.subnets if s.ip_version == 4 and s.enable_dhcp) class DeviceManager(object): def __init__(self, conf, plugin): self.conf = conf self.plugin = plugin self.driver = agent_common_utils.load_interface_driver(conf) def get_interface_name(self, network, port): """Return interface(device) name for use by the DHCP process.""" return self.driver.get_device_name(port) def get_device_id(self, network): """Return a unique DHCP device ID for this host on the network.""" # There could be more than one dhcp server per network, so create # a device id that combines host and network ids return common_utils.get_dhcp_agent_device_id(network.id, self.conf.host) def _set_default_route(self, network, device_name): """Sets the default gateway for this dhcp namespace. This method is idempotent and will only adjust the route if adjusting it would change it from what it already is. This makes it safe to call and avoids unnecessary perturbation of the system. """ device = ip_lib.IPDevice(device_name, namespace=network.namespace) gateway = device.route.get_gateway() if gateway: gateway = gateway.get('gateway') for subnet in network.subnets: skip_subnet = ( subnet.ip_version != 4 or not subnet.enable_dhcp or subnet.gateway_ip is None) if skip_subnet: continue if gateway != subnet.gateway_ip: LOG.debug('Setting gateway for dhcp netns on net %(n)s to ' '%(ip)s', {'n': network.id, 'ip': subnet.gateway_ip}) # Check for and remove the on-link route for the old # gateway being replaced, if it is outside the subnet is_old_gateway_not_in_subnet = (gateway and not ipam_utils.check_subnet_ip( subnet.cidr, gateway)) if is_old_gateway_not_in_subnet: v4_onlink = device.route.list_onlink_routes( constants.IP_VERSION_4) v6_onlink = device.route.list_onlink_routes( constants.IP_VERSION_6) existing_onlink_routes = set( r['cidr'] for r in v4_onlink + v6_onlink) if gateway in existing_onlink_routes: device.route.delete_route(gateway, scope='link') is_new_gateway_not_in_subnet = (subnet.gateway_ip and not ipam_utils.check_subnet_ip( subnet.cidr, subnet.gateway_ip)) if is_new_gateway_not_in_subnet: device.route.add_route(subnet.gateway_ip, scope='link') device.route.add_gateway(subnet.gateway_ip) return # No subnets on the network have a valid gateway. Clean it up to avoid # confusion from seeing an invalid gateway here. if gateway is not None: LOG.debug('Removing gateway for dhcp netns on net %s', network.id) device.route.delete_gateway(gateway) def _setup_existing_dhcp_port(self, network, device_id, dhcp_subnets): """Set up the existing DHCP port, if there is one.""" # To avoid pylint thinking that port might be undefined after # the following loop... port = None # Look for an existing DHCP port for this network. for port in network.ports: port_device_id = getattr(port, 'device_id', None) if port_device_id == device_id: # If using gateway IPs on this port, we can skip the # following code, whose purpose is just to review and # update the Neutron-allocated IP addresses for the # port. if self.driver.use_gateway_ips: return port # Otherwise break out, as we now have the DHCP port # whose subnets and addresses we need to review. break else: return None # Compare what the subnets should be against what is already # on the port. dhcp_enabled_subnet_ids = set(dhcp_subnets) port_subnet_ids = set(ip.subnet_id for ip in port.fixed_ips) # If those differ, we need to call update. if dhcp_enabled_subnet_ids != port_subnet_ids: # Collect the subnets and fixed IPs that the port already # has, for subnets that are still in the DHCP-enabled set. wanted_fixed_ips = [] for fixed_ip in port.fixed_ips: if fixed_ip.subnet_id in dhcp_enabled_subnet_ids: wanted_fixed_ips.append( {'subnet_id': fixed_ip.subnet_id, 'ip_address': fixed_ip.ip_address}) # Add subnet IDs for new DHCP-enabled subnets. wanted_fixed_ips.extend( dict(subnet_id=s) for s in dhcp_enabled_subnet_ids - port_subnet_ids) # Update the port to have the calculated subnets and fixed # IPs. The Neutron server will allocate a fresh IP for # each subnet that doesn't already have one. port = self.plugin.update_dhcp_port( port.id, {'port': {'network_id': network.id, 'fixed_ips': wanted_fixed_ips}}) if not port: raise exceptions.Conflict() return port def _setup_reserved_dhcp_port(self, network, device_id, dhcp_subnets): """Setup the reserved DHCP port, if there is one.""" LOG.debug('DHCP port %(device_id)s on network %(network_id)s' ' does not yet exist. Checking for a reserved port.', {'device_id': device_id, 'network_id': network.id}) for port in network.ports: port_device_id = getattr(port, 'device_id', None) if port_device_id == constants.DEVICE_ID_RESERVED_DHCP_PORT: try: port = self.plugin.update_dhcp_port( port.id, {'port': {'network_id': network.id, 'device_id': device_id}}) except oslo_messaging.RemoteError as e: if e.exc_type == exceptions.DhcpPortInUse: LOG.info(_LI("Skipping DHCP port %s as it is " "already in use"), port.id) continue raise if port: return port def _setup_new_dhcp_port(self, network, device_id, dhcp_subnets): """Create and set up new DHCP port for the specified network.""" LOG.debug('DHCP port %(device_id)s on network %(network_id)s' ' does not yet exist. Creating new one.', {'device_id': device_id, 'network_id': network.id}) # Make a list of the subnets that need a unique IP address for # this DHCP port. if self.driver.use_gateway_ips: unique_ip_subnets = [] else: unique_ip_subnets = [dict(subnet_id=s) for s in dhcp_subnets] port_dict = dict( name='', admin_state_up=True, device_id=device_id, network_id=network.id, tenant_id=network.tenant_id, fixed_ips=unique_ip_subnets) return self.plugin.create_dhcp_port({'port': port_dict}) def setup_dhcp_port(self, network): """Create/update DHCP port for the host if needed and return port.""" # The ID that the DHCP port will have (or already has). device_id = self.get_device_id(network) # Get the set of DHCP-enabled subnets on this network. dhcp_subnets = {subnet.id: subnet for subnet in network.subnets if subnet.enable_dhcp} # There are 3 cases: either the DHCP port already exists (but # might need to be updated for a changed set of subnets); or # some other code has already prepared a 'reserved' DHCP port, # and we just need to adopt that; or we need to create a new # DHCP port. Try each of those in turn until we have a DHCP # port. for setup_method in (self._setup_existing_dhcp_port, self._setup_reserved_dhcp_port, self._setup_new_dhcp_port): dhcp_port = setup_method(network, device_id, dhcp_subnets) if dhcp_port: break else: raise exceptions.Conflict() # Convert subnet_id to subnet dict fixed_ips = [dict(subnet_id=fixed_ip.subnet_id, ip_address=fixed_ip.ip_address, subnet=dhcp_subnets[fixed_ip.subnet_id]) for fixed_ip in dhcp_port.fixed_ips] ips = [DictModel(item) if isinstance(item, dict) else item for item in fixed_ips] dhcp_port.fixed_ips = ips return dhcp_port def _update_dhcp_port(self, network, port): for index in range(len(network.ports)): if network.ports[index].id == port.id: network.ports[index] = port break else: network.ports.append(port) def _cleanup_stale_devices(self, network, dhcp_port): LOG.debug("Cleaning stale devices for network %s", network.id) dev_name = self.driver.get_device_name(dhcp_port) ns_ip = ip_lib.IPWrapper(namespace=network.namespace) for d in ns_ip.get_devices(exclude_loopback=True): # delete all devices except current active DHCP port device if d.name != dev_name: LOG.debug("Found stale device %s, deleting", d.name) self.driver.unplug(d.name, namespace=network.namespace) def setup(self, network): """Create and initialize a device for network's DHCP on this host.""" port = self.setup_dhcp_port(network) self._update_dhcp_port(network, port) interface_name = self.get_interface_name(network, port) if ip_lib.ensure_device_is_ready(interface_name, namespace=network.namespace): LOG.debug('Reusing existing device: %s.', interface_name) else: try: self.driver.plug(network.id, port.id, interface_name, port.mac_address, namespace=network.namespace, mtu=network.get('mtu')) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Unable to plug DHCP port for ' 'network %s. Releasing port.'), network.id) self.plugin.release_dhcp_port(network.id, port.device_id) self.fill_dhcp_udp_checksums(namespace=network.namespace) ip_cidrs = [] for fixed_ip in port.fixed_ips: subnet = fixed_ip.subnet if not ipv6_utils.is_auto_address_subnet(subnet): net = netaddr.IPNetwork(subnet.cidr) ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen) ip_cidrs.append(ip_cidr) if self.driver.use_gateway_ips: # For each DHCP-enabled subnet, add that subnet's gateway # IP address to the Linux device for the DHCP port. for subnet in network.subnets: if not subnet.enable_dhcp: continue gateway = subnet.gateway_ip if gateway: net = netaddr.IPNetwork(subnet.cidr) ip_cidrs.append('%s/%s' % (gateway, net.prefixlen)) if self.conf.force_metadata or self.conf.enable_isolated_metadata: ip_cidrs.append(METADATA_DEFAULT_CIDR) self.driver.init_l3(interface_name, ip_cidrs, namespace=network.namespace) self._set_default_route(network, interface_name) try: self._cleanup_stale_devices(network, port) except Exception: # catch everything as we don't want to fail because of # cleanup step LOG.error(_LE("Exception during stale dhcp device cleanup")) return interface_name def update(self, network, device_name): """Update device settings for the network's DHCP on this host.""" self._set_default_route(network, device_name) def destroy(self, network, device_name): """Destroy the device used for the network's DHCP on this host.""" if device_name: self.driver.unplug(device_name, namespace=network.namespace) else: LOG.debug('No interface exists for network %s', network.id) self.plugin.release_dhcp_port(network.id, self.get_device_id(network)) def fill_dhcp_udp_checksums(self, namespace): """Ensure DHCP reply packets always have correct UDP checksums.""" iptables_mgr = iptables_manager.IptablesManager(use_ipv6=False, namespace=namespace) ipv4_rule = ('-p udp -m udp --dport %d -j CHECKSUM --checksum-fill' % constants.DHCP_RESPONSE_PORT) iptables_mgr.ipv4['mangle'].add_rule('POSTROUTING', ipv4_rule) iptables_mgr.apply() neutron-8.4.0/neutron/agent/linux/keepalived.py0000664000567000056710000003761613044372760023013 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import itertools import os import netaddr from oslo_config import cfg from oslo_log import log as logging from neutron._i18n import _, _LE from neutron.agent.linux import external_process from neutron.common import exceptions from neutron.common import utils as common_utils VALID_STATES = ['MASTER', 'BACKUP'] VALID_AUTH_TYPES = ['AH', 'PASS'] HA_DEFAULT_PRIORITY = 50 PRIMARY_VIP_RANGE_SIZE = 24 # TODO(amuller): Use L3 agent constant when new constants module is introduced. FIP_LL_SUBNET = '169.254.64.0/18' KEEPALIVED_SERVICE_NAME = 'keepalived' GARP_MASTER_DELAY = 60 LOG = logging.getLogger(__name__) def get_free_range(parent_range, excluded_ranges, size=PRIMARY_VIP_RANGE_SIZE): """Get a free IP range, from parent_range, of the specified size. :param parent_range: String representing an IP range. E.g: '169.254.0.0/16' :param excluded_ranges: A list of strings to be excluded from parent_range :param size: What should be the size of the range returned? :return: A string representing an IP range """ free_cidrs = netaddr.IPSet([parent_range]) - netaddr.IPSet(excluded_ranges) for cidr in free_cidrs.iter_cidrs(): if cidr.prefixlen <= size: return '%s/%s' % (cidr.network, size) raise ValueError(_('Network of size %(size)s, from IP range ' '%(parent_range)s excluding IP ranges ' '%(excluded_ranges)s was not found.') % {'size': size, 'parent_range': parent_range, 'excluded_ranges': excluded_ranges}) class InvalidInstanceStateException(exceptions.NeutronException): message = _('Invalid instance state: %(state)s, valid states are: ' '%(valid_states)s') def __init__(self, **kwargs): if 'valid_states' not in kwargs: kwargs['valid_states'] = ', '.join(VALID_STATES) super(InvalidInstanceStateException, self).__init__(**kwargs) class InvalidAuthenticationTypeException(exceptions.NeutronException): message = _('Invalid authentication type: %(auth_type)s, ' 'valid types are: %(valid_auth_types)s') def __init__(self, **kwargs): if 'valid_auth_types' not in kwargs: kwargs['valid_auth_types'] = ', '.join(VALID_AUTH_TYPES) super(InvalidAuthenticationTypeException, self).__init__(**kwargs) class KeepalivedVipAddress(object): """A virtual address entry of a keepalived configuration.""" def __init__(self, ip_address, interface_name, scope=None): self.ip_address = ip_address self.interface_name = interface_name self.scope = scope def __eq__(self, other): return (isinstance(other, KeepalivedVipAddress) and self.ip_address == other.ip_address) def __str__(self): return '[%s, %s, %s]' % (self.ip_address, self.interface_name, self.scope) def build_config(self): result = '%s dev %s' % (self.ip_address, self.interface_name) if self.scope: result += ' scope %s' % self.scope return result class KeepalivedVirtualRoute(object): """A virtual route entry of a keepalived configuration.""" def __init__(self, destination, nexthop, interface_name=None, scope=None): self.destination = destination self.nexthop = nexthop self.interface_name = interface_name self.scope = scope def build_config(self): output = self.destination if self.nexthop: output += ' via %s' % self.nexthop if self.interface_name: output += ' dev %s' % self.interface_name if self.scope: output += ' scope %s' % self.scope return output class KeepalivedInstanceRoutes(object): def __init__(self): self.gateway_routes = [] self.extra_routes = [] self.extra_subnets = [] def remove_routes_on_interface(self, interface_name): self.gateway_routes = [gw_rt for gw_rt in self.gateway_routes if gw_rt.interface_name != interface_name] # NOTE(amuller): extra_routes are initialized from the router's # 'routes' attribute. These routes do not have an interface # parameter and so cannot be removed via an interface_name lookup. self.extra_subnets = [route for route in self.extra_subnets if route.interface_name != interface_name] @property def routes(self): return self.gateway_routes + self.extra_routes + self.extra_subnets def __len__(self): return len(self.routes) def build_config(self): return itertools.chain([' virtual_routes {'], (' %s' % route.build_config() for route in self.routes), [' }']) class KeepalivedInstance(object): """Instance section of a keepalived configuration.""" def __init__(self, state, interface, vrouter_id, ha_cidrs, priority=HA_DEFAULT_PRIORITY, advert_int=None, mcast_src_ip=None, nopreempt=False, garp_master_delay=GARP_MASTER_DELAY): self.name = 'VR_%s' % vrouter_id if state not in VALID_STATES: raise InvalidInstanceStateException(state=state) self.state = state self.interface = interface self.vrouter_id = vrouter_id self.priority = priority self.nopreempt = nopreempt self.advert_int = advert_int self.mcast_src_ip = mcast_src_ip self.garp_master_delay = garp_master_delay self.track_interfaces = [] self.vips = [] self.virtual_routes = KeepalivedInstanceRoutes() self.authentication = None metadata_cidr = '169.254.169.254/32' self.primary_vip_range = get_free_range( parent_range='169.254.0.0/16', excluded_ranges=[metadata_cidr, FIP_LL_SUBNET] + ha_cidrs, size=PRIMARY_VIP_RANGE_SIZE) def set_authentication(self, auth_type, password): if auth_type not in VALID_AUTH_TYPES: raise InvalidAuthenticationTypeException(auth_type=auth_type) self.authentication = (auth_type, password) def add_vip(self, ip_cidr, interface_name, scope): vip = KeepalivedVipAddress(ip_cidr, interface_name, scope) if vip not in self.vips: self.vips.append(vip) else: LOG.debug('VIP %s already present in %s', vip, self.vips) def remove_vips_vroutes_by_interface(self, interface_name): self.vips = [vip for vip in self.vips if vip.interface_name != interface_name] self.virtual_routes.remove_routes_on_interface(interface_name) def remove_vip_by_ip_address(self, ip_address): self.vips = [vip for vip in self.vips if vip.ip_address != ip_address] def get_existing_vip_ip_addresses(self, interface_name): return [vip.ip_address for vip in self.vips if vip.interface_name == interface_name] def _build_track_interface_config(self): return itertools.chain( [' track_interface {'], (' %s' % i for i in self.track_interfaces), [' }']) def get_primary_vip(self): """Return an address in the primary_vip_range CIDR, with the router's VRID in the host section. For example, if primary_vip_range is 169.254.0.0/24, and this router's VRID is 5, the result is 169.254.0.5. Using the VRID assures that the primary VIP is consistent amongst HA router instances on different nodes. """ ip = (netaddr.IPNetwork(self.primary_vip_range).network + self.vrouter_id) return str(netaddr.IPNetwork('%s/%s' % (ip, PRIMARY_VIP_RANGE_SIZE))) def _build_vips_config(self): # NOTE(amuller): The primary VIP must be consistent in order to avoid # keepalived bugs. Changing the VIP in the 'virtual_ipaddress' and # SIGHUP'ing keepalived can remove virtual routers, including the # router's default gateway. # We solve this by never changing the VIP in the virtual_ipaddress # section, herein known as the primary VIP. # The only interface known to exist for HA routers is the HA interface # (self.interface). We generate an IP on that device and use it as the # primary VIP. The other VIPs (Internal interfaces IPs, the external # interface IP and floating IPs) are placed in the # virtual_ipaddress_excluded section. primary = KeepalivedVipAddress(self.get_primary_vip(), self.interface) vips_result = [' virtual_ipaddress {', ' %s' % primary.build_config(), ' }'] if self.vips: vips_result.extend( itertools.chain([' virtual_ipaddress_excluded {'], (' %s' % vip.build_config() for vip in sorted(self.vips, key=lambda vip: vip.ip_address)), [' }'])) return vips_result def _build_virtual_routes_config(self): return itertools.chain([' virtual_routes {'], (' %s' % route.build_config() for route in self.virtual_routes), [' }']) def build_config(self): config = ['vrrp_instance %s {' % self.name, ' state %s' % self.state, ' interface %s' % self.interface, ' virtual_router_id %s' % self.vrouter_id, ' priority %s' % self.priority, ' garp_master_delay %s' % self.garp_master_delay] if self.nopreempt: config.append(' nopreempt') if self.advert_int: config.append(' advert_int %s' % self.advert_int) if self.authentication: auth_type, password = self.authentication authentication = [' authentication {', ' auth_type %s' % auth_type, ' auth_pass %s' % password, ' }'] config.extend(authentication) if self.mcast_src_ip: config.append(' mcast_src_ip %s' % self.mcast_src_ip) if self.track_interfaces: config.extend(self._build_track_interface_config()) config.extend(self._build_vips_config()) if len(self.virtual_routes): config.extend(self.virtual_routes.build_config()) config.append('}') return config class KeepalivedConf(object): """A keepalived configuration.""" def __init__(self): self.reset() def reset(self): self.instances = {} def add_instance(self, instance): self.instances[instance.vrouter_id] = instance def get_instance(self, vrouter_id): return self.instances.get(vrouter_id) def build_config(self): config = [] for instance in self.instances.values(): config.extend(instance.build_config()) return config def get_config_str(self): """Generates and returns the keepalived configuration. :return: Keepalived configuration string. """ return '\n'.join(self.build_config()) class KeepalivedManager(object): """Wrapper for keepalived. This wrapper permits to write keepalived config files, to start/restart keepalived process. """ def __init__(self, resource_id, config, process_monitor, conf_path='/tmp', namespace=None): self.resource_id = resource_id self.config = config self.namespace = namespace self.process_monitor = process_monitor self.conf_path = conf_path def get_conf_dir(self): confs_dir = os.path.abspath(os.path.normpath(self.conf_path)) conf_dir = os.path.join(confs_dir, self.resource_id) return conf_dir def get_full_config_file_path(self, filename, ensure_conf_dir=True): conf_dir = self.get_conf_dir() if ensure_conf_dir: common_utils.ensure_dir(conf_dir) return os.path.join(conf_dir, filename) def _output_config_file(self): config_str = self.config.get_config_str() config_path = self.get_full_config_file_path('keepalived.conf') common_utils.replace_file(config_path, config_str) return config_path @staticmethod def _safe_remove_pid_file(pid_file): try: os.remove(pid_file) except OSError as e: if e.errno != errno.ENOENT: LOG.error(_LE("Could not delete file %s, keepalived can " "refuse to start."), pid_file) def get_vrrp_pid_file_name(self, base_pid_file): return '%s-vrrp' % base_pid_file def get_conf_on_disk(self): config_path = self.get_full_config_file_path('keepalived.conf') try: with open(config_path) as conf: return conf.read() except (OSError, IOError) as e: if e.errno != errno.ENOENT: raise def spawn(self): config_path = self._output_config_file() keepalived_pm = self.get_process() vrrp_pm = self._get_vrrp_process( self.get_vrrp_pid_file_name(keepalived_pm.get_pid_file_name())) keepalived_pm.default_cmd_callback = ( self._get_keepalived_process_callback(vrrp_pm, config_path)) keepalived_pm.enable(reload_cfg=True) self.process_monitor.register(uuid=self.resource_id, service_name=KEEPALIVED_SERVICE_NAME, monitored_process=keepalived_pm) LOG.debug('Keepalived spawned with config %s', config_path) def disable(self): self.process_monitor.unregister(uuid=self.resource_id, service_name=KEEPALIVED_SERVICE_NAME) pm = self.get_process() pm.disable(sig='15') def get_process(self): return external_process.ProcessManager( cfg.CONF, self.resource_id, self.namespace, pids_path=self.conf_path) def _get_vrrp_process(self, pid_file): return external_process.ProcessManager( cfg.CONF, self.resource_id, self.namespace, pid_file=pid_file) def _get_keepalived_process_callback(self, vrrp_pm, config_path): def callback(pid_file): # If keepalived process crashed unexpectedly, the vrrp process # will be orphan and prevent keepalived process to be spawned. # A check here will let the l3-agent to kill the orphan process # and spawn keepalived successfully. if vrrp_pm.active: vrrp_pm.disable() self._safe_remove_pid_file(pid_file) self._safe_remove_pid_file(self.get_vrrp_pid_file_name(pid_file)) cmd = ['keepalived', '-P', '-f', config_path, '-p', pid_file, '-r', self.get_vrrp_pid_file_name(pid_file)] return cmd return callback neutron-8.4.0/neutron/agent/linux/__init__.py0000664000567000056710000000000013044372736022415 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/linux/async_process.py0000664000567000056710000002324513044372760023546 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import signal import eventlet import eventlet.event import eventlet.queue from oslo_log import log as logging from neutron._i18n import _, _LE from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.common import utils as common_utils LOG = logging.getLogger(__name__) class AsyncProcessException(Exception): pass class AsyncProcess(object): """Manages an asynchronous process. This class spawns a new process via subprocess and uses greenthreads to read stderr and stdout asynchronously into queues that can be read via repeatedly calling iter_stdout() and iter_stderr(). If respawn_interval is non-zero, any error in communicating with the managed process will result in the process and greenthreads being cleaned up and the process restarted after the specified interval. Example usage: >>> import time >>> proc = AsyncProcess(['ping']) >>> proc.start() >>> time.sleep(5) >>> proc.stop() >>> for line in proc.iter_stdout(): ... print(line) """ def __init__(self, cmd, run_as_root=False, respawn_interval=None, namespace=None, log_output=False, die_on_error=False): """Constructor. :param cmd: The list of command arguments to invoke. :param run_as_root: The process should run with elevated privileges. :param respawn_interval: Optional, the interval in seconds to wait to respawn after unexpected process death. Respawn will only be attempted if a value of 0 or greater is provided. :param namespace: Optional, start the command in the specified namespace. :param log_output: Optional, also log received output. :param die_on_error: Optional, kills the process on stderr output. """ self.cmd_without_namespace = cmd self._cmd = ip_lib.add_namespace_to_cmd(cmd, namespace) self.run_as_root = run_as_root if respawn_interval is not None and respawn_interval < 0: raise ValueError(_('respawn_interval must be >= 0 if provided.')) self.respawn_interval = respawn_interval self._process = None self._is_running = False self._kill_event = None self._reset_queues() self._watchers = [] self.log_output = log_output self.die_on_error = die_on_error @property def cmd(self): return ' '.join(self._cmd) def _reset_queues(self): self._stdout_lines = eventlet.queue.LightQueue() self._stderr_lines = eventlet.queue.LightQueue() def is_active(self): # If using sudo rootwrap as a root_helper, we have to wait until sudo # spawns rootwrap and rootwrap spawns the process. return utils.pid_invoked_with_cmdline( self.pid, self.cmd_without_namespace) def start(self, block=False): """Launch a process and monitor it asynchronously. :param block: Block until the process has started. :raises eventlet.timeout.Timeout if blocking is True and the process did not start in time. """ LOG.debug('Launching async process [%s].', self.cmd) if self._is_running: raise AsyncProcessException(_('Process is already started')) else: self._spawn() if block: utils.wait_until_true(self.is_active) def stop(self, block=False, kill_signal=signal.SIGKILL): """Halt the process and watcher threads. :param block: Block until the process has stopped. :param kill_signal: Number of signal that will be sent to the process when terminating the process :raises eventlet.timeout.Timeout if blocking is True and the process did not stop in time. """ if self._is_running: LOG.debug('Halting async process [%s].', self.cmd) self._kill(kill_signal) else: raise AsyncProcessException(_('Process is not running.')) if block: utils.wait_until_true(lambda: not self.is_active()) def _spawn(self): """Spawn a process and its watchers.""" self._is_running = True self._kill_event = eventlet.event.Event() self._process, cmd = utils.create_process(self._cmd, run_as_root=self.run_as_root) self._watchers = [] for reader in (self._read_stdout, self._read_stderr): # Pass the stop event directly to the greenthread to # ensure that assignment of a new event to the instance # attribute does not prevent the greenthread from using # the original event. watcher = eventlet.spawn(self._watch_process, reader, self._kill_event) self._watchers.append(watcher) @property def pid(self): if self._process: return utils.get_root_helper_child_pid( self._process.pid, self.cmd_without_namespace, run_as_root=self.run_as_root) def _kill(self, kill_signal): """Kill the process and the associated watcher greenthreads.""" pid = self.pid if pid: self._is_running = False self._kill_process(pid, kill_signal) # Halt the greenthreads if they weren't already. if self._kill_event: self._kill_event.send() self._kill_event = None def _kill_process(self, pid, kill_signal): try: # A process started by a root helper will be running as # root and need to be killed via the same helper. utils.execute(['kill', '-%d' % kill_signal, pid], run_as_root=self.run_as_root) except Exception as ex: stale_pid = (isinstance(ex, RuntimeError) and 'No such process' in str(ex)) if not stale_pid: LOG.exception(_LE('An error occurred while killing [%s].'), self.cmd) return False if self._process: self._process.wait() return True def _handle_process_error(self): """Kill the async process and respawn if necessary.""" LOG.debug('Halting async process [%s] in response to an error.', self.cmd) self._kill(signal.SIGKILL) if self.respawn_interval is not None and self.respawn_interval >= 0: eventlet.sleep(self.respawn_interval) LOG.debug('Respawning async process [%s].', self.cmd) try: self.start() except AsyncProcessException: # Process was already respawned by someone else... pass def _watch_process(self, callback, kill_event): while not kill_event.ready(): try: output = callback() if not output and output != "": break except Exception: LOG.exception(_LE('An error occurred while communicating ' 'with async process [%s].'), self.cmd) break # Ensure that watching a process with lots of output does # not block execution of other greenthreads. eventlet.sleep() # self._is_running being True indicates that the loop was # broken out of due to an error in the watched process rather # than the loop condition being satisfied. if self._is_running: self._is_running = False self._handle_process_error() def _read(self, stream, queue): data = stream.readline() if data: data = common_utils.safe_decode_utf8(data.strip()) queue.put(data) return data def _read_stdout(self): data = self._read(self._process.stdout, self._stdout_lines) if self.log_output: LOG.debug('Output received from [%(cmd)s]: %(data)s', {'cmd': self.cmd, 'data': data}) return data def _read_stderr(self): data = self._read(self._process.stderr, self._stderr_lines) if self.log_output: LOG.error(_LE('Error received from [%(cmd)s]: %(err)s'), {'cmd': self.cmd, 'err': data}) if self.die_on_error: LOG.error(_LE("Process [%(cmd)s] dies due to the error: %(err)s"), {'cmd': self.cmd, 'err': data}) # the callback caller will use None to indicate the need to bail # out of the thread return None return data def _iter_queue(self, queue, block): while True: try: yield queue.get(block=block) except eventlet.queue.Empty: break def iter_stdout(self, block=False): return self._iter_queue(self._stdout_lines, block) def iter_stderr(self, block=False): return self._iter_queue(self._stderr_lines, block) neutron-8.4.0/neutron/agent/linux/pd.py0000664000567000056710000003345013044372760021275 0ustar jenkinsjenkins00000000000000# Copyright 2015 Cisco Systems # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import signal import eventlet from oslo_config import cfg from oslo_log import log as logging import six from stevedore import driver from neutron._i18n import _ from neutron.agent.linux import utils as linux_utils from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants as l3_constants from neutron.common import ipv6_utils from neutron.common import utils LOG = logging.getLogger(__name__) OPTS = [ cfg.StrOpt('pd_dhcp_driver', default='dibbler', help=_('Service to handle DHCPv6 Prefix delegation.')), ] class PrefixDelegation(object): def __init__(self, context, pmon, intf_driver, notifier, pd_update_cb, agent_conf): self.context = context self.pmon = pmon self.intf_driver = intf_driver self.notifier = notifier self.routers = {} self.pd_update_cb = pd_update_cb self.agent_conf = agent_conf self.pd_dhcp_driver = driver.DriverManager( namespace='neutron.agent.linux.pd_drivers', name=agent_conf.prefix_delegation_driver, ).driver registry.subscribe(add_router, resources.ROUTER, events.BEFORE_CREATE) registry.subscribe(remove_router, resources.ROUTER, events.AFTER_DELETE) self._get_sync_data() @utils.synchronized("l3-agent-pd") def enable_subnet(self, router_id, subnet_id, prefix, ri_ifname, mac): router = self.routers.get(router_id) if router is None: return pd_info = router['subnets'].get(subnet_id) if not pd_info: pd_info = PDInfo(ri_ifname=ri_ifname, mac=mac) router['subnets'][subnet_id] = pd_info pd_info.bind_lla = self._get_lla(mac) if pd_info.sync: pd_info.mac = mac pd_info.old_prefix = prefix else: self._add_lla(router, pd_info.get_bind_lla_with_mask()) def _delete_pd(self, router, pd_info): self._delete_lla(router, pd_info.get_bind_lla_with_mask()) if pd_info.client_started: pd_info.driver.disable(self.pmon, router['ns_name']) @utils.synchronized("l3-agent-pd") def disable_subnet(self, router_id, subnet_id): prefix_update = {} router = self.routers.get(router_id) if not router: return pd_info = router['subnets'].get(subnet_id) if not pd_info: return self._delete_pd(router, pd_info) prefix_update[subnet_id] = l3_constants.PROVISIONAL_IPV6_PD_PREFIX del router['subnets'][subnet_id] LOG.debug("Update server with prefixes: %s", prefix_update) self.notifier(self.context, prefix_update) @utils.synchronized("l3-agent-pd") def update_subnet(self, router_id, subnet_id, prefix): router = self.routers.get(router_id) if router is not None: pd_info = router['subnets'].get(subnet_id) if pd_info and pd_info.old_prefix != prefix: old_prefix = pd_info.old_prefix pd_info.old_prefix = prefix return old_prefix @utils.synchronized("l3-agent-pd") def add_gw_interface(self, router_id, gw_ifname): router = self.routers.get(router_id) prefix_update = {} if not router: return router['gw_interface'] = gw_ifname for subnet_id, pd_info in six.iteritems(router['subnets']): # gateway is added after internal router ports. # If a PD is being synced, and if the prefix is available, # send update if prefix out of sync; If not available, # start the PD client bind_lla_with_mask = pd_info.get_bind_lla_with_mask() if pd_info.sync: pd_info.sync = False if pd_info.client_started: if pd_info.prefix != pd_info.old_prefix: prefix_update['subnet_id'] = pd_info.prefix else: self._delete_lla(router, bind_lla_with_mask) self._add_lla(router, bind_lla_with_mask) else: self._add_lla(router, bind_lla_with_mask) if prefix_update: LOG.debug("Update server with prefixes: %s", prefix_update) self.notifier(self.context, prefix_update) def delete_router_pd(self, router): prefix_update = {} for subnet_id, pd_info in six.iteritems(router['subnets']): self._delete_lla(router, pd_info.get_bind_lla_with_mask()) if pd_info.client_started: pd_info.driver.disable(self.pmon, router['ns_name']) pd_info.prefix = None pd_info.client_started = False prefix = l3_constants.PROVISIONAL_IPV6_PD_PREFIX prefix_update[subnet_id] = prefix if prefix_update: LOG.debug("Update server with prefixes: %s", prefix_update) self.notifier(self.context, prefix_update) @utils.synchronized("l3-agent-pd") def remove_gw_interface(self, router_id): router = self.routers.get(router_id) if router is not None: router['gw_interface'] = None self.delete_router_pd(router) @utils.synchronized("l3-agent-pd") def sync_router(self, router_id): router = self.routers.get(router_id) if router is not None and router['gw_interface'] is None: self.delete_router_pd(router) @utils.synchronized("l3-agent-pd") def remove_stale_ri_ifname(self, router_id, stale_ifname): router = self.routers.get(router_id) if router is not None: for subnet_id, pd_info in router['subnets'].items(): if pd_info.ri_ifname == stale_ifname: self._delete_pd(router, pd_info) del router['subnets'][subnet_id] @staticmethod def _get_lla(mac): lla = ipv6_utils.get_ipv6_addr_by_EUI64(l3_constants.IPV6_LLA_PREFIX, mac) return lla def _get_llas(self, gw_ifname, ns_name): try: return self.intf_driver.get_ipv6_llas(gw_ifname, ns_name) except RuntimeError: # The error message was printed as part of the driver call # This could happen if the gw_ifname was removed # simply return and exit the thread return def _add_lla(self, router, lla_with_mask): if router['gw_interface']: self.intf_driver.add_ipv6_addr(router['gw_interface'], lla_with_mask, router['ns_name'], 'link') # There is a delay before the LLA becomes active. # This is because the kernel runs DAD to make sure LLA uniqueness # Spawn a thread to wait for the interface to be ready self._spawn_lla_thread(router['gw_interface'], router['ns_name'], lla_with_mask) def _spawn_lla_thread(self, gw_ifname, ns_name, lla_with_mask): eventlet.spawn_n(self._ensure_lla_task, gw_ifname, ns_name, lla_with_mask) def _delete_lla(self, router, lla_with_mask): if lla_with_mask and router['gw_interface']: try: self.intf_driver.delete_ipv6_addr(router['gw_interface'], lla_with_mask, router['ns_name']) except RuntimeError: # Ignore error if the lla doesn't exist pass def _ensure_lla_task(self, gw_ifname, ns_name, lla_with_mask): # It would be insane for taking so long unless DAD test failed # In that case, the subnet would never be assigned a prefix. linux_utils.wait_until_true(functools.partial(self._lla_available, gw_ifname, ns_name, lla_with_mask), timeout=l3_constants.LLA_TASK_TIMEOUT, sleep=2) def _lla_available(self, gw_ifname, ns_name, lla_with_mask): llas = self._get_llas(gw_ifname, ns_name) if self._is_lla_active(lla_with_mask, llas): LOG.debug("LLA %s is active now" % lla_with_mask) self.pd_update_cb() return True @staticmethod def _is_lla_active(lla_with_mask, llas): for lla in llas: if lla_with_mask == lla['cidr']: return not lla['tentative'] return False @utils.synchronized("l3-agent-pd") def process_prefix_update(self): LOG.debug("Processing IPv6 PD Prefix Update") prefix_update = {} for router_id, router in six.iteritems(self.routers): if not router['gw_interface']: continue llas = None for subnet_id, pd_info in six.iteritems(router['subnets']): if pd_info.client_started: prefix = pd_info.driver.get_prefix() if prefix != pd_info.prefix: pd_info.prefix = prefix prefix_update[subnet_id] = prefix else: if not llas: llas = self._get_llas(router['gw_interface'], router['ns_name']) if self._is_lla_active(pd_info.get_bind_lla_with_mask(), llas): if not pd_info.driver: pd_info.driver = self.pd_dhcp_driver( router_id, subnet_id, pd_info.ri_ifname) pd_info.driver.enable(self.pmon, router['ns_name'], router['gw_interface'], pd_info.bind_lla) pd_info.client_started = True if prefix_update: LOG.debug("Update server with prefixes: %s", prefix_update) self.notifier(self.context, prefix_update) def after_start(self): LOG.debug('SIGUSR1 signal handler set') signal.signal(signal.SIGUSR1, self._handle_sigusr1) def _handle_sigusr1(self, signum, frame): """Update PD on receiving SIGUSR1. The external DHCPv6 client uses SIGUSR1 to notify agent of prefix changes. """ self.pd_update_cb() def _get_sync_data(self): sync_data = self.pd_dhcp_driver.get_sync_data() for pd_info in sync_data: router_id = pd_info.router_id if not self.routers.get(router_id): self.routers[router_id] = {'gw_interface': None, 'ns_name': None, 'subnets': {}} new_pd_info = PDInfo(pd_info=pd_info) subnets = self.routers[router_id]['subnets'] subnets[pd_info.subnet_id] = new_pd_info @utils.synchronized("l3-agent-pd") def remove_router(resource, event, l3_agent, **kwargs): router_id = kwargs['router'].router_id router = l3_agent.pd.routers.get(router_id) l3_agent.pd.delete_router_pd(router) del l3_agent.pd.routers[router_id]['subnets'] del l3_agent.pd.routers[router_id] def get_router_entry(ns_name): return {'gw_interface': None, 'ns_name': ns_name, 'subnets': {}} @utils.synchronized("l3-agent-pd") def add_router(resource, event, l3_agent, **kwargs): added_router = kwargs['router'] router = l3_agent.pd.routers.get(added_router.router_id) if not router: l3_agent.pd.routers[added_router.router_id] = ( get_router_entry(added_router.ns_name)) else: # This will happen during l3 agent restart router['ns_name'] = added_router.ns_name class PDInfo(object): """A class to simplify storing and passing of information relevant to Prefix Delegation operations for a given subnet. """ def __init__(self, pd_info=None, ri_ifname=None, mac=None): if pd_info is None: self.prefix = l3_constants.PROVISIONAL_IPV6_PD_PREFIX self.old_prefix = l3_constants.PROVISIONAL_IPV6_PD_PREFIX self.ri_ifname = ri_ifname self.mac = mac self.bind_lla = None self.sync = False self.driver = None self.client_started = False else: self.prefix = pd_info.prefix self.old_prefix = None self.ri_ifname = pd_info.ri_ifname self.mac = None self.bind_lla = None self.sync = True self.driver = pd_info.driver self.client_started = pd_info.client_started def get_bind_lla_with_mask(self): bind_lla_with_mask = '%s/64' % self.bind_lla return bind_lla_with_mask neutron-8.4.0/neutron/agent/linux/iptables_comments.py0000664000567000056710000000346013044372736024403 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """iptables comments""" # Do not translate these comments. These comments cannot contain a quote or # an escape character because they will end up in a call to iptables and # could interfere with other parameters. SNAT_OUT = 'Perform source NAT on outgoing traffic.' UNMATCH_DROP = 'Default drop rule for unmatched traffic.' VM_INT_SG = 'Direct traffic from the VM interface to the security group chain.' SG_TO_VM_SG = 'Jump to the VM specific chain.' INPUT_TO_SG = 'Direct incoming traffic from VM to the security group chain.' PAIR_ALLOW = 'Allow traffic from defined IP/MAC pairs.' PAIR_DROP = 'Drop traffic without an IP/MAC allow rule.' DHCP_CLIENT = 'Allow DHCP client traffic.' DHCP_SPOOF = 'Prevent DHCP Spoofing by VM.' UNMATCHED = 'Send unmatched traffic to the fallback chain.' INVALID_DROP = ("Drop packets that appear related to an existing connection " "(e.g. TCP ACK/FIN) but do not have an entry in conntrack.") ALLOW_ASSOC = ('Direct packets associated with a known session to the RETURN ' 'chain.') PORT_SEC_ACCEPT = 'Accept all packets when port security is disabled.' IPV6_RA_DROP = 'Drop IPv6 Router Advts from VM Instance.' IPV6_ICMP_ALLOW = 'Allow IPv6 ICMP traffic.' neutron-8.4.0/neutron/agent/linux/dibbler.py0000664000567000056710000001535513044372760022301 0ustar jenkinsjenkins00000000000000# Copyright 2015 Cisco Systems # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil import jinja2 from oslo_config import cfg from oslo_log import log as logging import six from neutron.agent.linux import external_process from neutron.agent.linux import pd from neutron.agent.linux import pd_driver from neutron.agent.linux import utils from neutron.common import constants from neutron.common import utils as common_utils LOG = logging.getLogger(__name__) PD_SERVICE_NAME = 'dibbler' CONFIG_TEMPLATE = jinja2.Template(""" # Config for dibbler-client. # Use enterprise number based duid duid-type duid-en {{ enterprise_number }} {{ va_id }} # 8 (Debug) is most verbose. 7 (Info) is usually the best option log-level 8 # No automatic downlink address assignment downlink-prefix-ifaces "none" # Use script to notify l3_agent of assigned prefix script {{ script_path }} # Ask for prefix over the external gateway interface iface {{ interface_name }} { # Bind to generated LLA bind-to-address {{ bind_address }} # ask for address pd 1 } """) # The first line must be #!/usr/bin/env bash SCRIPT_TEMPLATE = jinja2.Template("""#!/usr/bin/env bash exec neutron-pd-notify $1 {{ prefix_path }} {{ l3_agent_pid }} """) class PDDibbler(pd_driver.PDDriverBase): def __init__(self, router_id, subnet_id, ri_ifname): super(PDDibbler, self).__init__(router_id, subnet_id, ri_ifname) self.requestor_id = "%s:%s:%s" % (self.router_id, self.subnet_id, self.ri_ifname) self.dibbler_client_working_area = "%s/%s" % (cfg.CONF.pd_confs, self.requestor_id) self.prefix_path = "%s/prefix" % self.dibbler_client_working_area self.pid_path = "%s/client.pid" % self.dibbler_client_working_area self.converted_subnet_id = self.subnet_id.replace('-', '') def _is_dibbler_client_running(self): return utils.get_value_from_file(self.pid_path) def _generate_dibbler_conf(self, ex_gw_ifname, lla): dcwa = self.dibbler_client_working_area script_path = utils.get_conf_file_name(dcwa, 'notify', 'sh', True) buf = six.StringIO() buf.write('%s' % SCRIPT_TEMPLATE.render( prefix_path=self.prefix_path, l3_agent_pid=os.getpid())) common_utils.replace_file(script_path, buf.getvalue()) os.chmod(script_path, 0o744) dibbler_conf = utils.get_conf_file_name(dcwa, 'client', 'conf', False) buf = six.StringIO() buf.write('%s' % CONFIG_TEMPLATE.render( enterprise_number=cfg.CONF.vendor_pen, va_id='0x%s' % self.converted_subnet_id, script_path='"%s/notify.sh"' % dcwa, interface_name='"%s"' % ex_gw_ifname, bind_address='%s' % lla)) common_utils.replace_file(dibbler_conf, buf.getvalue()) return dcwa def _spawn_dibbler(self, pmon, router_ns, dibbler_conf): def callback(pid_file): dibbler_cmd = ['dibbler-client', 'start', '-w', '%s' % dibbler_conf] return dibbler_cmd pm = external_process.ProcessManager( uuid=self.requestor_id, default_cmd_callback=callback, namespace=router_ns, service=PD_SERVICE_NAME, conf=cfg.CONF, pid_file=self.pid_path) pm.enable(reload_cfg=False) pmon.register(uuid=self.requestor_id, service_name=PD_SERVICE_NAME, monitored_process=pm) def enable(self, pmon, router_ns, ex_gw_ifname, lla): LOG.debug("Enable IPv6 PD for router %s subnet %s ri_ifname %s", self.router_id, self.subnet_id, self.ri_ifname) if not self._is_dibbler_client_running(): dibbler_conf = self._generate_dibbler_conf(ex_gw_ifname, lla) self._spawn_dibbler(pmon, router_ns, dibbler_conf) LOG.debug("dibbler client enabled for router %s subnet %s" " ri_ifname %s", self.router_id, self.subnet_id, self.ri_ifname) def disable(self, pmon, router_ns): LOG.debug("Disable IPv6 PD for router %s subnet %s ri_ifname %s", self.router_id, self.subnet_id, self.ri_ifname) dcwa = self.dibbler_client_working_area def callback(pid_file): dibbler_cmd = ['dibbler-client', 'stop', '-w', '%s' % dcwa] return dibbler_cmd pmon.unregister(uuid=self.requestor_id, service_name=PD_SERVICE_NAME) pm = external_process.ProcessManager( uuid=self.requestor_id, namespace=router_ns, service=PD_SERVICE_NAME, conf=cfg.CONF, pid_file=self.pid_path) pm.disable(get_stop_command=callback) shutil.rmtree(dcwa, ignore_errors=True) LOG.debug("dibbler client disabled for router %s subnet %s " "ri_ifname %s", self.router_id, self.subnet_id, self.ri_ifname) def get_prefix(self): prefix = utils.get_value_from_file(self.prefix_path) if not prefix: prefix = constants.PROVISIONAL_IPV6_PD_PREFIX return prefix @staticmethod def get_sync_data(): try: requestor_ids = os.listdir(cfg.CONF.pd_confs) except OSError: return [] sync_data = [] requestors = (r.split(':') for r in requestor_ids if r.count(':') == 2) for router_id, subnet_id, ri_ifname in requestors: pd_info = pd.PDInfo() pd_info.router_id = router_id pd_info.subnet_id = subnet_id pd_info.ri_ifname = ri_ifname pd_info.driver = PDDibbler(router_id, subnet_id, ri_ifname) pd_info.client_started = ( pd_info.driver._is_dibbler_client_running()) pd_info.prefix = pd_info.driver.get_prefix() sync_data.append(pd_info) return sync_data neutron-8.4.0/neutron/agent/linux/iptables_firewall.py0000664000567000056710000012514013044372760024360 0ustar jenkinsjenkins00000000000000# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import re import netaddr from oslo_config import cfg from oslo_log import log as logging import six from neutron._i18n import _LI from neutron.agent import firewall from neutron.agent.linux import ip_conntrack from neutron.agent.linux import ipset_manager from neutron.agent.linux import iptables_comments as ic from neutron.agent.linux import iptables_manager from neutron.agent.linux import utils from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import ipv6_utils from neutron.common import utils as c_utils LOG = logging.getLogger(__name__) SG_CHAIN = 'sg-chain' SPOOF_FILTER = 'spoof-filter' CHAIN_NAME_PREFIX = {firewall.INGRESS_DIRECTION: 'i', firewall.EGRESS_DIRECTION: 'o', SPOOF_FILTER: 's'} ICMPV6_ALLOWED_UNSPEC_ADDR_TYPES = [131, 135, 143] IPSET_DIRECTION = {firewall.INGRESS_DIRECTION: 'src', firewall.EGRESS_DIRECTION: 'dst'} # length of all device prefixes (e.g. qvo, tap, qvb) LINUX_DEV_PREFIX_LEN = 3 LINUX_DEV_LEN = 14 MAX_CONNTRACK_ZONES = 65535 comment_rule = iptables_manager.comment_rule class mac_iptables(netaddr.mac_eui48): """mac format class for netaddr to match iptables representation.""" word_sep = ':' class IptablesFirewallDriver(firewall.FirewallDriver): """Driver which enforces security groups through iptables rules.""" IPTABLES_DIRECTION = {firewall.INGRESS_DIRECTION: 'physdev-out', firewall.EGRESS_DIRECTION: 'physdev-in'} def __init__(self, namespace=None): self.iptables = iptables_manager.IptablesManager( use_ipv6=ipv6_utils.is_enabled(), namespace=namespace) # TODO(majopela, shihanzhang): refactor out ipset to a separate # driver composed over this one self.ipset = ipset_manager.IpsetManager(namespace=namespace) self.ipconntrack = ip_conntrack.IpConntrackManager( self.get_device_zone, namespace=namespace) self._populate_initial_zone_map() # list of port which has security group self.filtered_ports = {} self.unfiltered_ports = {} self._add_fallback_chain_v4v6() self._defer_apply = False self._pre_defer_filtered_ports = None self._pre_defer_unfiltered_ports = None # List of security group rules for ports residing on this host self.sg_rules = {} self.pre_sg_rules = None # List of security group member ips for ports residing on this host self.sg_members = collections.defaultdict( lambda: collections.defaultdict(list)) self.pre_sg_members = None self.enable_ipset = cfg.CONF.SECURITYGROUP.enable_ipset self._enabled_netfilter_for_bridges = False self.updated_rule_sg_ids = set() self.updated_sg_members = set() self.devices_with_updated_sg_members = collections.defaultdict(list) def _enable_netfilter_for_bridges(self): # we only need to set these values once, but it has to be when # we create a bridge; before that the bridge module might not # be loaded and the proc values aren't there. if self._enabled_netfilter_for_bridges: return else: self._enabled_netfilter_for_bridges = True # These proc values ensure that netfilter is enabled on # bridges; essential for enforcing security groups rules with # OVS Hybrid. Distributions can differ on whether this is # enabled by default or not (Ubuntu - yes, Redhat - no, for # example). LOG.debug("Enabling netfilter for bridges") utils.execute(['sysctl', '-w', 'net.bridge.bridge-nf-call-arptables=1'], run_as_root=True) utils.execute(['sysctl', '-w', 'net.bridge.bridge-nf-call-ip6tables=1'], run_as_root=True) utils.execute(['sysctl', '-w', 'net.bridge.bridge-nf-call-iptables=1'], run_as_root=True) @property def ports(self): return dict(self.filtered_ports, **self.unfiltered_ports) def _update_remote_security_group_members(self, sec_group_ids): for sg_id in sec_group_ids: for device in self.filtered_ports.values(): if sg_id in device.get('security_group_source_groups', []): self.devices_with_updated_sg_members[sg_id].append(device) def security_group_updated(self, action_type, sec_group_ids, device_ids=None): device_ids = device_ids or [] if action_type == 'sg_rule': self.updated_rule_sg_ids.update(sec_group_ids) elif action_type == 'sg_member': if device_ids: self.updated_sg_members.update(device_ids) else: self._update_remote_security_group_members(sec_group_ids) def update_security_group_rules(self, sg_id, sg_rules): LOG.debug("Update rules of security group (%s)", sg_id) self.sg_rules[sg_id] = sg_rules def update_security_group_members(self, sg_id, sg_members): LOG.debug("Update members of security group (%s)", sg_id) self.sg_members[sg_id] = collections.defaultdict(list, sg_members) if self.enable_ipset: self._update_ipset_members(sg_id, sg_members) def _update_ipset_members(self, sg_id, sg_members): devices = self.devices_with_updated_sg_members.pop(sg_id, None) for ip_version, current_ips in sg_members.items(): add_ips, del_ips = self.ipset.set_members( sg_id, ip_version, current_ips) if devices and del_ips: # remove prefix from del_ips ips = [str(netaddr.IPNetwork(del_ip).ip) for del_ip in del_ips] self.ipconntrack.delete_conntrack_state_by_remote_ips( devices, ip_version, ips) def _set_ports(self, port): if not firewall.port_sec_enabled(port): self.unfiltered_ports[port['device']] = port self.filtered_ports.pop(port['device'], None) else: self.filtered_ports[port['device']] = port self.unfiltered_ports.pop(port['device'], None) def _unset_ports(self, port): self.unfiltered_ports.pop(port['device'], None) self.filtered_ports.pop(port['device'], None) def prepare_port_filter(self, port): LOG.debug("Preparing device (%s) filter", port['device']) self._remove_chains() self._set_ports(port) self._enable_netfilter_for_bridges() # each security group has it own chains self._setup_chains() return self.iptables.apply() def update_port_filter(self, port): LOG.debug("Updating device (%s) filter", port['device']) if port['device'] not in self.ports: LOG.info(_LI('Attempted to update port filter which is not ' 'filtered %s'), port['device']) return self._remove_chains() self._set_ports(port) self._setup_chains() return self.iptables.apply() def remove_port_filter(self, port): LOG.debug("Removing device (%s) filter", port['device']) if port['device'] not in self.ports: LOG.info(_LI('Attempted to remove port filter which is not ' 'filtered %r'), port) return self._remove_chains() self._unset_ports(port) self._setup_chains() return self.iptables.apply() def _add_accept_rule_port_sec(self, port, direction): self._update_port_sec_rules(port, direction, add=True) def _remove_rule_port_sec(self, port, direction): self._update_port_sec_rules(port, direction, add=False) def _remove_rule_from_chain_v4v6(self, chain_name, ipv4_rules, ipv6_rules): for rule in ipv4_rules: self.iptables.ipv4['filter'].remove_rule(chain_name, rule) for rule in ipv6_rules: self.iptables.ipv6['filter'].remove_rule(chain_name, rule) def _setup_chains(self): """Setup ingress and egress chain for a port.""" if not self._defer_apply: self._setup_chains_apply(self.filtered_ports, self.unfiltered_ports) def _setup_chains_apply(self, ports, unfiltered_ports): self._add_chain_by_name_v4v6(SG_CHAIN) # sort by port so we always do this deterministically between # agent restarts and don't cause unnecessary rule differences for pname in sorted(ports): port = ports[pname] self._setup_chain(port, firewall.INGRESS_DIRECTION) self._setup_chain(port, firewall.EGRESS_DIRECTION) self.iptables.ipv4['filter'].add_rule(SG_CHAIN, '-j ACCEPT') self.iptables.ipv6['filter'].add_rule(SG_CHAIN, '-j ACCEPT') for port in unfiltered_ports.values(): self._add_accept_rule_port_sec(port, firewall.INGRESS_DIRECTION) self._add_accept_rule_port_sec(port, firewall.EGRESS_DIRECTION) def _remove_chains(self): """Remove ingress and egress chain for a port.""" if not self._defer_apply: self._remove_chains_apply(self.filtered_ports, self.unfiltered_ports) def _remove_chains_apply(self, ports, unfiltered_ports): for port in ports.values(): self._remove_chain(port, firewall.INGRESS_DIRECTION) self._remove_chain(port, firewall.EGRESS_DIRECTION) self._remove_chain(port, SPOOF_FILTER) for port in unfiltered_ports.values(): self._remove_rule_port_sec(port, firewall.INGRESS_DIRECTION) self._remove_rule_port_sec(port, firewall.EGRESS_DIRECTION) self._remove_chain_by_name_v4v6(SG_CHAIN) def _setup_chain(self, port, DIRECTION): self._add_chain(port, DIRECTION) self._add_rules_by_security_group(port, DIRECTION) def _remove_chain(self, port, DIRECTION): chain_name = self._port_chain_name(port, DIRECTION) self._remove_chain_by_name_v4v6(chain_name) def _add_fallback_chain_v4v6(self): self.iptables.ipv4['filter'].add_chain('sg-fallback') self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP', comment=ic.UNMATCH_DROP) self.iptables.ipv6['filter'].add_chain('sg-fallback') self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP', comment=ic.UNMATCH_DROP) def _add_raw_chain(self, chain_name): self.iptables.ipv4['raw'].add_chain(chain_name) self.iptables.ipv6['raw'].add_chain(chain_name) def _add_chain_by_name_v4v6(self, chain_name): self.iptables.ipv4['filter'].add_chain(chain_name) self.iptables.ipv6['filter'].add_chain(chain_name) def _remove_raw_chain(self, chain_name): self.iptables.ipv4['raw'].remove_chain(chain_name) self.iptables.ipv6['raw'].remove_chain(chain_name) def _remove_chain_by_name_v4v6(self, chain_name): self.iptables.ipv4['filter'].remove_chain(chain_name) self.iptables.ipv6['filter'].remove_chain(chain_name) def _add_rules_to_chain_v4v6(self, chain_name, ipv4_rules, ipv6_rules, comment=None): for rule in ipv4_rules: self.iptables.ipv4['filter'].add_rule(chain_name, rule, comment=comment) for rule in ipv6_rules: self.iptables.ipv6['filter'].add_rule(chain_name, rule, comment=comment) def _get_device_name(self, port): return port['device'] def _update_port_sec_rules(self, port, direction, add=False): # add/remove rules in FORWARD and INPUT chain device = self._get_device_name(port) jump_rule = ['-m physdev --%s %s --physdev-is-bridged ' '-j ACCEPT' % (self.IPTABLES_DIRECTION[direction], device)] if add: self._add_rules_to_chain_v4v6( 'FORWARD', jump_rule, jump_rule, comment=ic.PORT_SEC_ACCEPT) else: self._remove_rule_from_chain_v4v6('FORWARD', jump_rule, jump_rule) if direction == firewall.EGRESS_DIRECTION: jump_rule = ['-m physdev --%s %s --physdev-is-bridged ' '-j ACCEPT' % (self.IPTABLES_DIRECTION[direction], device)] if add: self._add_rules_to_chain_v4v6('INPUT', jump_rule, jump_rule, comment=ic.PORT_SEC_ACCEPT) else: self._remove_rule_from_chain_v4v6( 'INPUT', jump_rule, jump_rule) def _add_chain(self, port, direction): chain_name = self._port_chain_name(port, direction) self._add_chain_by_name_v4v6(chain_name) # Note(nati) jump to the security group chain (SG_CHAIN) # This is needed because the packet may much two rule in port # if the two port is in the same host # We accept the packet at the end of SG_CHAIN. # jump to the security group chain device = self._get_device_name(port) jump_rule = ['-m physdev --%s %s --physdev-is-bridged ' '-j $%s' % (self.IPTABLES_DIRECTION[direction], device, SG_CHAIN)] self._add_rules_to_chain_v4v6('FORWARD', jump_rule, jump_rule, comment=ic.VM_INT_SG) # jump to the chain based on the device jump_rule = ['-m physdev --%s %s --physdev-is-bridged ' '-j $%s' % (self.IPTABLES_DIRECTION[direction], device, chain_name)] self._add_rules_to_chain_v4v6(SG_CHAIN, jump_rule, jump_rule, comment=ic.SG_TO_VM_SG) if direction == firewall.EGRESS_DIRECTION: self._add_rules_to_chain_v4v6('INPUT', jump_rule, jump_rule, comment=ic.INPUT_TO_SG) def _split_sgr_by_ethertype(self, security_group_rules): ipv4_sg_rules = [] ipv6_sg_rules = [] for rule in security_group_rules: if rule.get('ethertype') == constants.IPv4: ipv4_sg_rules.append(rule) elif rule.get('ethertype') == constants.IPv6: if rule.get('protocol') == 'icmp': rule['protocol'] = 'ipv6-icmp' ipv6_sg_rules.append(rule) return ipv4_sg_rules, ipv6_sg_rules def _select_sgr_by_direction(self, port, direction): return [rule for rule in port.get('security_group_rules', []) if rule['direction'] == direction] def _setup_spoof_filter_chain(self, port, table, mac_ip_pairs, rules): if mac_ip_pairs: chain_name = self._port_chain_name(port, SPOOF_FILTER) table.add_chain(chain_name) for mac, ip in mac_ip_pairs: if ip is None: # If fixed_ips is [] this rule will be added to the end # of the list after the allowed_address_pair rules. table.add_rule(chain_name, '-m mac --mac-source %s -j RETURN' % mac.upper(), comment=ic.PAIR_ALLOW) else: # we need to convert it into a prefix to match iptables ip = c_utils.ip_to_cidr(ip) table.add_rule(chain_name, '-s %s -m mac --mac-source %s -j RETURN' % (ip, mac.upper()), comment=ic.PAIR_ALLOW) table.add_rule(chain_name, '-j DROP', comment=ic.PAIR_DROP) rules.append('-j $%s' % chain_name) def _build_ipv4v6_mac_ip_list(self, mac, ip_address, mac_ipv4_pairs, mac_ipv6_pairs): mac = str(netaddr.EUI(mac, dialect=mac_iptables)) if netaddr.IPNetwork(ip_address).version == 4: mac_ipv4_pairs.append((mac, ip_address)) else: mac_ipv6_pairs.append((mac, ip_address)) lla = str(ipv6_utils.get_ipv6_addr_by_EUI64( constants.IPV6_LLA_PREFIX, mac)) mac_ipv6_pairs.append((mac, lla)) def _spoofing_rule(self, port, ipv4_rules, ipv6_rules): # Fixed rules for traffic sourced from unspecified addresses: 0.0.0.0 # and :: # Allow dhcp client discovery and request ipv4_rules += [comment_rule('-s 0.0.0.0/32 -d 255.255.255.255/32 ' '-p udp -m udp --sport 68 --dport 67 ' '-j RETURN', comment=ic.DHCP_CLIENT)] # Allow neighbor solicitation and multicast listener discovery # from the unspecified address for duplicate address detection for icmp6_type in ICMPV6_ALLOWED_UNSPEC_ADDR_TYPES: ipv6_rules += [comment_rule('-s ::/128 -d ff02::/16 ' '-p ipv6-icmp -m icmp6 ' '--icmpv6-type %s -j RETURN' % icmp6_type, comment=ic.IPV6_ICMP_ALLOW)] mac_ipv4_pairs = [] mac_ipv6_pairs = [] if isinstance(port.get('allowed_address_pairs'), list): for address_pair in port['allowed_address_pairs']: self._build_ipv4v6_mac_ip_list(address_pair['mac_address'], address_pair['ip_address'], mac_ipv4_pairs, mac_ipv6_pairs) for ip in port['fixed_ips']: self._build_ipv4v6_mac_ip_list(port['mac_address'], ip, mac_ipv4_pairs, mac_ipv6_pairs) if not port['fixed_ips']: mac_ipv4_pairs.append((port['mac_address'], None)) mac_ipv6_pairs.append((port['mac_address'], None)) self._setup_spoof_filter_chain(port, self.iptables.ipv4['filter'], mac_ipv4_pairs, ipv4_rules) self._setup_spoof_filter_chain(port, self.iptables.ipv6['filter'], mac_ipv6_pairs, ipv6_rules) # Fixed rules for traffic after source address is verified # Allow dhcp client renewal and rebinding ipv4_rules += [comment_rule('-p udp -m udp --sport 68 --dport 67 ' '-j RETURN', comment=ic.DHCP_CLIENT)] # Drop Router Advts from the port. ipv6_rules += [comment_rule('-p ipv6-icmp -m icmp6 --icmpv6-type %s ' '-j DROP' % constants.ICMPV6_TYPE_RA, comment=ic.IPV6_RA_DROP)] ipv6_rules += [comment_rule('-p ipv6-icmp -j RETURN', comment=ic.IPV6_ICMP_ALLOW)] ipv6_rules += [comment_rule('-p udp -m udp --sport 546 ' '-m udp --dport 547 ' '-j RETURN', comment=ic.DHCP_CLIENT)] def _drop_dhcp_rule(self, ipv4_rules, ipv6_rules): #Note(nati) Drop dhcp packet from VM ipv4_rules += [comment_rule('-p udp -m udp --sport 67 ' '-m udp --dport 68 ' '-j DROP', comment=ic.DHCP_SPOOF)] ipv6_rules += [comment_rule('-p udp -m udp --sport 547 ' '-m udp --dport 546 ' '-j DROP', comment=ic.DHCP_SPOOF)] def _accept_inbound_icmpv6(self): # Allow multicast listener, neighbor solicitation and # neighbor advertisement into the instance icmpv6_rules = [] for icmp6_type in constants.ICMPV6_ALLOWED_TYPES: icmpv6_rules += ['-p ipv6-icmp -m icmp6 --icmpv6-type %s ' '-j RETURN' % icmp6_type] return icmpv6_rules def _select_sg_rules_for_port(self, port, direction): """Select rules from the security groups the port is member of.""" port_sg_ids = port.get('security_groups', []) port_rules = [] for sg_id in port_sg_ids: for rule in self.sg_rules.get(sg_id, []): if rule['direction'] == direction: if self.enable_ipset: port_rules.append(rule) else: port_rules.extend( self._expand_sg_rule_with_remote_ips( rule, port, direction)) return port_rules def _expand_sg_rule_with_remote_ips(self, rule, port, direction): """Expand a remote group rule to rule per remote group IP.""" remote_group_id = rule.get('remote_group_id') if remote_group_id: ethertype = rule['ethertype'] port_ips = port.get('fixed_ips', []) for ip in self.sg_members[remote_group_id][ethertype]: if ip not in port_ips: ip_rule = rule.copy() direction_ip_prefix = firewall.DIRECTION_IP_PREFIX[ direction] ip_prefix = str(netaddr.IPNetwork(ip).cidr) ip_rule[direction_ip_prefix] = ip_prefix yield ip_rule else: yield rule def _get_remote_sg_ids(self, port, direction=None): sg_ids = port.get('security_groups', []) remote_sg_ids = {constants.IPv4: set(), constants.IPv6: set()} for sg_id in sg_ids: for rule in self.sg_rules.get(sg_id, []): if not direction or rule['direction'] == direction: remote_sg_id = rule.get('remote_group_id') ether_type = rule.get('ethertype') if remote_sg_id and ether_type: remote_sg_ids[ether_type].add(remote_sg_id) return remote_sg_ids def _add_rules_by_security_group(self, port, direction): # select rules for current port and direction security_group_rules = self._select_sgr_by_direction(port, direction) security_group_rules += self._select_sg_rules_for_port(port, direction) # split groups by ip version # for ipv4, iptables command is used # for ipv6, iptables6 command is used ipv4_sg_rules, ipv6_sg_rules = self._split_sgr_by_ethertype( security_group_rules) ipv4_iptables_rules = [] ipv6_iptables_rules = [] # include fixed egress/ingress rules if direction == firewall.EGRESS_DIRECTION: self._add_fixed_egress_rules(port, ipv4_iptables_rules, ipv6_iptables_rules) elif direction == firewall.INGRESS_DIRECTION: ipv6_iptables_rules += self._accept_inbound_icmpv6() # include IPv4 and IPv6 iptable rules from security group ipv4_iptables_rules += self._convert_sgr_to_iptables_rules( ipv4_sg_rules) ipv6_iptables_rules += self._convert_sgr_to_iptables_rules( ipv6_sg_rules) # finally add the rules to the port chain for a given direction self._add_rules_to_chain_v4v6(self._port_chain_name(port, direction), ipv4_iptables_rules, ipv6_iptables_rules) def _add_fixed_egress_rules(self, port, ipv4_iptables_rules, ipv6_iptables_rules): self._spoofing_rule(port, ipv4_iptables_rules, ipv6_iptables_rules) self._drop_dhcp_rule(ipv4_iptables_rules, ipv6_iptables_rules) def _generate_ipset_rule_args(self, sg_rule, remote_gid): ethertype = sg_rule.get('ethertype') ipset_name = self.ipset.get_name(remote_gid, ethertype) if not self.ipset.set_name_exists(ipset_name): #NOTE(mangelajo): ipsets for empty groups are not created # thus we can't reference them. return None ipset_direction = IPSET_DIRECTION[sg_rule.get('direction')] args = self._generate_protocol_and_port_args(sg_rule) args += ['-m set', '--match-set', ipset_name, ipset_direction] args += ['-j RETURN'] return args def _generate_protocol_and_port_args(self, sg_rule): args = self._protocol_arg(sg_rule.get('protocol')) args += self._port_arg('sport', sg_rule.get('protocol'), sg_rule.get('source_port_range_min'), sg_rule.get('source_port_range_max')) args += self._port_arg('dport', sg_rule.get('protocol'), sg_rule.get('port_range_min'), sg_rule.get('port_range_max')) return args def _generate_plain_rule_args(self, sg_rule): # These arguments MUST be in the format iptables-save will # display them: source/dest, protocol, sport, dport, target # Otherwise the iptables_manager code won't be able to find # them to preserve their [packet:byte] counts. args = self._ip_prefix_arg('s', sg_rule.get('source_ip_prefix')) args += self._ip_prefix_arg('d', sg_rule.get('dest_ip_prefix')) args += self._generate_protocol_and_port_args(sg_rule) args += ['-j RETURN'] return args def _convert_sg_rule_to_iptables_args(self, sg_rule): remote_gid = sg_rule.get('remote_group_id') if self.enable_ipset and remote_gid: return self._generate_ipset_rule_args(sg_rule, remote_gid) else: return self._generate_plain_rule_args(sg_rule) def _convert_sgr_to_iptables_rules(self, security_group_rules): iptables_rules = [] self._allow_established(iptables_rules) seen_sg_rules = set() for rule in security_group_rules: args = self._convert_sg_rule_to_iptables_args(rule) if args: rule_command = ' '.join(args) if rule_command in seen_sg_rules: # since these rules are from multiple security groups, # there may be duplicates so we prune them out here continue seen_sg_rules.add(rule_command) iptables_rules.append(rule_command) self._drop_invalid_packets(iptables_rules) iptables_rules += [comment_rule('-j $sg-fallback', comment=ic.UNMATCHED)] return iptables_rules def _drop_invalid_packets(self, iptables_rules): # Always drop invalid packets iptables_rules += [comment_rule('-m state --state ' 'INVALID -j DROP', comment=ic.INVALID_DROP)] return iptables_rules def _allow_established(self, iptables_rules): # Allow established connections iptables_rules += [comment_rule( '-m state --state RELATED,ESTABLISHED -j RETURN', comment=ic.ALLOW_ASSOC)] return iptables_rules def _protocol_arg(self, protocol): if not protocol: return [] if protocol == 'icmpv6': protocol = 'ipv6-icmp' iptables_rule = ['-p', protocol] return iptables_rule def _port_arg(self, direction, protocol, port_range_min, port_range_max): if (protocol not in ['udp', 'tcp', 'icmp', 'ipv6-icmp'] or port_range_min is None): return [] protocol_modules = {'udp': 'udp', 'tcp': 'tcp', 'icmp': 'icmp', 'ipv6-icmp': 'icmp6'} # iptables adds '-m protocol' when the port number is specified args = ['-m', protocol_modules[protocol]] if protocol in ['icmp', 'ipv6-icmp']: protocol_type = 'icmpv6' if protocol == 'ipv6-icmp' else 'icmp' # Note(xuhanp): port_range_min/port_range_max represent # icmp type/code when protocol is icmp or icmpv6 args += ['--%s-type' % protocol_type, '%s' % port_range_min] # icmp code can be 0 so we cannot use "if port_range_max" here if port_range_max is not None: args[-1] += '/%s' % port_range_max elif port_range_min == port_range_max: args += ['--%s' % direction, '%s' % (port_range_min,)] else: args += ['-m', 'multiport', '--%ss' % direction, '%s:%s' % (port_range_min, port_range_max)] return args def _ip_prefix_arg(self, direction, ip_prefix): #NOTE (nati) : source_group_id is converted to list of source_ # ip_prefix in server side if ip_prefix: if '/' not in ip_prefix: # we need to convert it into a prefix to match iptables ip_prefix = c_utils.ip_to_cidr(ip_prefix) elif ip_prefix.endswith('/0'): # an allow for every address is not a constraint so # iptables drops it return [] return ['-%s' % direction, ip_prefix] return [] def _port_chain_name(self, port, direction): return iptables_manager.get_chain_name( '%s%s' % (CHAIN_NAME_PREFIX[direction], port['device'][3:])) def filter_defer_apply_on(self): if not self._defer_apply: self.iptables.defer_apply_on() self._pre_defer_filtered_ports = dict(self.filtered_ports) self._pre_defer_unfiltered_ports = dict(self.unfiltered_ports) self.pre_sg_members = dict(self.sg_members) self.pre_sg_rules = dict(self.sg_rules) self._defer_apply = True def _remove_unused_security_group_info(self): """Remove any unnecessary local security group info or unused ipsets. This function has to be called after applying the last iptables rules, so we're in a point where no iptable rule depends on an ipset we're going to delete. """ filtered_ports = self.filtered_ports.values() remote_sgs_to_remove = self._determine_remote_sgs_to_remove( filtered_ports) for ip_version, remote_sg_ids in six.iteritems(remote_sgs_to_remove): if self.enable_ipset: self._remove_ipsets_for_remote_sgs(ip_version, remote_sg_ids) self._remove_sg_members(remote_sgs_to_remove) # Remove unused security group rules for remove_group_id in self._determine_sg_rules_to_remove( filtered_ports): self.sg_rules.pop(remove_group_id, None) def _determine_remote_sgs_to_remove(self, filtered_ports): """Calculate which remote security groups we don't need anymore. We do the calculation for each ip_version. """ sgs_to_remove_per_ipversion = {constants.IPv4: set(), constants.IPv6: set()} remote_group_id_sets = self._get_remote_sg_ids_sets_by_ipversion( filtered_ports) for ip_version, remote_group_id_set in ( six.iteritems(remote_group_id_sets)): sgs_to_remove_per_ipversion[ip_version].update( set(self.pre_sg_members) - remote_group_id_set) return sgs_to_remove_per_ipversion def _get_remote_sg_ids_sets_by_ipversion(self, filtered_ports): """Given a port, calculates the remote sg references by ip_version.""" remote_group_id_sets = {constants.IPv4: set(), constants.IPv6: set()} for port in filtered_ports: remote_sg_ids = self._get_remote_sg_ids(port) for ip_version in (constants.IPv4, constants.IPv6): remote_group_id_sets[ip_version] |= remote_sg_ids[ip_version] return remote_group_id_sets def _determine_sg_rules_to_remove(self, filtered_ports): """Calculate which security groups need to be removed. We find out by subtracting our previous sg group ids, with the security groups associated to a set of ports. """ port_group_ids = self._get_sg_ids_set_for_ports(filtered_ports) return set(self.pre_sg_rules) - port_group_ids def _get_sg_ids_set_for_ports(self, filtered_ports): """Get the port security group ids as a set.""" port_group_ids = set() for port in filtered_ports: port_group_ids.update(port.get('security_groups', [])) return port_group_ids def _remove_ipsets_for_remote_sgs(self, ip_version, remote_sg_ids): """Remove system ipsets matching the provided parameters.""" for remote_sg_id in remote_sg_ids: self.ipset.destroy(remote_sg_id, ip_version) def _remove_sg_members(self, remote_sgs_to_remove): """Remove sg_member entries.""" ipv4_sec_group_set = remote_sgs_to_remove.get(constants.IPv4) ipv6_sec_group_set = remote_sgs_to_remove.get(constants.IPv6) for sg_id in (ipv4_sec_group_set & ipv6_sec_group_set): if sg_id in self.sg_members: del self.sg_members[sg_id] def _find_deleted_sg_rules(self, sg_id): del_rules = list() for pre_rule in self.pre_sg_rules.get(sg_id, []): if pre_rule not in self.sg_rules.get(sg_id, []): del_rules.append(pre_rule) return del_rules def _find_devices_on_security_group(self, sg_id): device_list = list() for device in self.filtered_ports.values(): if sg_id in device.get('security_groups', []): device_list.append(device) return device_list def _clean_deleted_sg_rule_conntrack_entries(self): deleted_sg_ids = set() for sg_id in self.updated_rule_sg_ids: del_rules = self._find_deleted_sg_rules(sg_id) if not del_rules: continue device_list = self._find_devices_on_security_group(sg_id) for rule in del_rules: self.ipconntrack.delete_conntrack_state_by_rule( device_list, rule) deleted_sg_ids.add(sg_id) for id in deleted_sg_ids: self.updated_rule_sg_ids.remove(id) def _clean_updated_sg_member_conntrack_entries(self): updated_device_ids = set() for device in self.updated_sg_members: sec_group_change = False device_info = self.filtered_ports.get(device) pre_device_info = self._pre_defer_filtered_ports.get(device) if not device_info or not pre_device_info: continue for sg_id in pre_device_info.get('security_groups', []): if sg_id not in device_info.get('security_groups', []): sec_group_change = True break if not sec_group_change: continue for ethertype in [constants.IPv4, constants.IPv6]: self.ipconntrack.delete_conntrack_state_by_remote_ips( [device_info], ethertype, set()) updated_device_ids.add(device) for id in updated_device_ids: self.updated_sg_members.remove(id) def _clean_deleted_remote_sg_members_conntrack_entries(self): deleted_sg_ids = set() for sg_id, devices in self.devices_with_updated_sg_members.items(): for ethertype in [constants.IPv4, constants.IPv6]: pre_ips = self._get_sg_members( self.pre_sg_members, sg_id, ethertype) cur_ips = self._get_sg_members( self.sg_members, sg_id, ethertype) ips = (pre_ips - cur_ips) if devices and ips: self.ipconntrack.delete_conntrack_state_by_remote_ips( devices, ethertype, ips) deleted_sg_ids.add(sg_id) for id in deleted_sg_ids: self.devices_with_updated_sg_members.pop(id, None) def _remove_conntrack_entries_from_sg_updates(self): self._clean_deleted_sg_rule_conntrack_entries() self._clean_updated_sg_member_conntrack_entries() if not self.enable_ipset: self._clean_deleted_remote_sg_members_conntrack_entries() def _get_sg_members(self, sg_info, sg_id, ethertype): return set(sg_info.get(sg_id, {}).get(ethertype, [])) def filter_defer_apply_off(self): if self._defer_apply: self._defer_apply = False self._remove_chains_apply(self._pre_defer_filtered_ports, self._pre_defer_unfiltered_ports) self._setup_chains_apply(self.filtered_ports, self.unfiltered_ports) self.iptables.defer_apply_off() self._remove_conntrack_entries_from_sg_updates() self._remove_unused_security_group_info() self._pre_defer_filtered_ports = None self._pre_defer_unfiltered_ports = None def _populate_initial_zone_map(self): """Setup the map between devices and zones based on current rules.""" self._device_zone_map = {} rules = self.iptables.get_rules_for_table('raw') for rule in rules: match = re.match(r'.* --physdev-in (?P[a-zA-Z0-9\-]+)' r'.* -j CT --zone (?P\d+).*', rule) if match: # strip off any prefix that the interface is using short_port_id = match.group('dev')[LINUX_DEV_PREFIX_LEN:] self._device_zone_map[short_port_id] = int(match.group('zone')) LOG.debug("Populated conntrack zone map: %s", self._device_zone_map) def get_device_zone(self, port_id): # we have to key the device_zone_map based on the fragment of the port # UUID that shows up in the interface name. This is because the initial # map is populated strictly based on interface names that we don't know # the full UUID of. short_port_id = port_id[:(LINUX_DEV_LEN - LINUX_DEV_PREFIX_LEN)] try: return self._device_zone_map[short_port_id] except KeyError: return self._generate_device_zone(short_port_id) def _free_zones_from_removed_ports(self): """Clears any entries from the zone map of removed ports.""" existing_ports = [ port['device'][:(LINUX_DEV_LEN - LINUX_DEV_PREFIX_LEN)] for port in (list(self.filtered_ports.values()) + list(self.unfiltered_ports.values())) ] removed = set(self._device_zone_map) - set(existing_ports) for dev in removed: self._device_zone_map.pop(dev, None) def _generate_device_zone(self, short_port_id): """Generates a unique conntrack zone for the passed in ID.""" try: zone = self._find_open_zone() except n_exc.CTZoneExhaustedError: # Free some zones and try again, repeat failure will not be caught self._free_zones_from_removed_ports() zone = self._find_open_zone() self._device_zone_map[short_port_id] = zone LOG.debug("Assigned CT zone %(z)s to port %(dev)s.", {'z': zone, 'dev': short_port_id}) return self._device_zone_map[short_port_id] def _find_open_zone(self): # call set to dedup because old ports may be mapped to the same zone. zones_in_use = sorted(set(self._device_zone_map.values())) if not zones_in_use: return 1 # attempt to increment onto the highest used zone first. if we hit the # end, go back and look for any gaps left by removed devices. last = zones_in_use[-1] if last < MAX_CONNTRACK_ZONES: return last + 1 for index, used in enumerate(zones_in_use): if used - index != 1: # gap found, let's use it! return index + 1 # conntrack zones exhausted :( :( raise n_exc.CTZoneExhaustedError() class OVSHybridIptablesFirewallDriver(IptablesFirewallDriver): OVS_HYBRID_TAP_PREFIX = constants.TAP_DEVICE_PREFIX OVS_HYBRID_PLUG_REQUIRED = True def _port_chain_name(self, port, direction): return iptables_manager.get_chain_name( '%s%s' % (CHAIN_NAME_PREFIX[direction], port['device'])) def _get_device_name(self, port): return (self.OVS_HYBRID_TAP_PREFIX + port['device'])[:LINUX_DEV_LEN] def _get_br_device_name(self, port): return ('qvb' + port['device'])[:LINUX_DEV_LEN] def _get_jump_rule(self, port, direction): if direction == firewall.INGRESS_DIRECTION: device = self._get_br_device_name(port) else: device = self._get_device_name(port) jump_rule = '-m physdev --physdev-in %s -j CT --zone %s' % ( device, self.get_device_zone(port['device'])) return jump_rule def _add_raw_chain_rules(self, port, direction): jump_rule = self._get_jump_rule(port, direction) self.iptables.ipv4['raw'].add_rule('PREROUTING', jump_rule) self.iptables.ipv6['raw'].add_rule('PREROUTING', jump_rule) def _remove_raw_chain_rules(self, port, direction): jump_rule = self._get_jump_rule(port, direction) self.iptables.ipv4['raw'].remove_rule('PREROUTING', jump_rule) self.iptables.ipv6['raw'].remove_rule('PREROUTING', jump_rule) def _add_chain(self, port, direction): super(OVSHybridIptablesFirewallDriver, self)._add_chain(port, direction) if direction in [firewall.INGRESS_DIRECTION, firewall.EGRESS_DIRECTION]: self._add_raw_chain_rules(port, direction) def _remove_chain(self, port, direction): super(OVSHybridIptablesFirewallDriver, self)._remove_chain(port, direction) if direction in [firewall.INGRESS_DIRECTION, firewall.EGRESS_DIRECTION]: self._remove_raw_chain_rules(port, direction) neutron-8.4.0/neutron/agent/linux/ipset_manager.py0000664000567000056710000001661513044372736023517 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import netaddr from neutron.agent.linux import utils as linux_utils from neutron.common import utils IPSET_ADD_BULK_THRESHOLD = 5 NET_PREFIX = 'N' SWAP_SUFFIX = '-n' IPSET_NAME_MAX_LENGTH = 31 - len(SWAP_SUFFIX) class IpsetManager(object): """Smart wrapper for ipset. Keeps track of ip addresses per set, using bulk or single ip add/remove for smaller changes. """ def __init__(self, execute=None, namespace=None): self.execute = execute or linux_utils.execute self.namespace = namespace self.ipset_sets = {} def _sanitize_addresses(self, addresses): """This method converts any address to ipset format. If an address has a mask of /0 we need to cover to it to a mask of /1 as ipset does not support /0 length addresses. Instead we use two /1's to represent the /0. """ sanitized_addresses = [] for ip in addresses: ip = netaddr.IPNetwork(ip) if ip.prefixlen == 0: if ip.version == 4: sanitized_addresses.append('0.0.0.0/1') sanitized_addresses.append('128.0.0.0/1') elif ip.version == 6: sanitized_addresses.append('::/1') sanitized_addresses.append('8000::/1') else: sanitized_addresses.append(str(ip)) return sanitized_addresses @staticmethod def get_name(id, ethertype): """Returns the given ipset name for an id+ethertype pair. This reference can be used from iptables. """ name = NET_PREFIX + ethertype + id return name[:IPSET_NAME_MAX_LENGTH] def set_name_exists(self, set_name): """Returns true if the set name is known to the manager.""" return set_name in self.ipset_sets def set_members(self, id, ethertype, member_ips): """Create or update a specific set by name and ethertype. It will make sure that a set is created, updated to add / remove new members, or swapped atomically if that's faster, and return added / removed members. """ member_ips = self._sanitize_addresses(member_ips) set_name = self.get_name(id, ethertype) add_ips = self._get_new_set_ips(set_name, member_ips) del_ips = self._get_deleted_set_ips(set_name, member_ips) if add_ips or del_ips or not self.set_name_exists(set_name): self.set_members_mutate(set_name, ethertype, member_ips) return add_ips, del_ips @utils.synchronized('ipset', external=True) def set_members_mutate(self, set_name, ethertype, member_ips): if not self.set_name_exists(set_name): # The initial creation is handled with create/refresh to # avoid any downtime for existing sets (i.e. avoiding # a flush/restore), as the restore operation of ipset is # additive to the existing set. self._create_set(set_name, ethertype) self._refresh_set(set_name, member_ips, ethertype) # TODO(majopela,shihanzhang,haleyb): Optimize this by # gathering the system ipsets at start. So we can determine # if a normal restore is enough for initial creation. # That should speed up agent boot up time. else: add_ips = self._get_new_set_ips(set_name, member_ips) del_ips = self._get_deleted_set_ips(set_name, member_ips) if (len(add_ips) + len(del_ips) < IPSET_ADD_BULK_THRESHOLD): self._add_members_to_set(set_name, add_ips) self._del_members_from_set(set_name, del_ips) else: self._refresh_set(set_name, member_ips, ethertype) @utils.synchronized('ipset', external=True) def destroy(self, id, ethertype, forced=False): set_name = self.get_name(id, ethertype) self._destroy(set_name, forced) def _add_member_to_set(self, set_name, member_ip): cmd = ['ipset', 'add', '-exist', set_name, member_ip] self._apply(cmd) self.ipset_sets[set_name].append(member_ip) def _refresh_set(self, set_name, member_ips, ethertype): new_set_name = set_name + SWAP_SUFFIX set_type = self._get_ipset_set_type(ethertype) process_input = ["create %s hash:net family %s" % (new_set_name, set_type)] for ip in member_ips: process_input.append("add %s %s" % (new_set_name, ip)) self._restore_sets(process_input) self._swap_sets(new_set_name, set_name) self._destroy(new_set_name, True) self.ipset_sets[set_name] = copy.copy(member_ips) def _del_member_from_set(self, set_name, member_ip): cmd = ['ipset', 'del', set_name, member_ip] self._apply(cmd, fail_on_errors=False) self.ipset_sets[set_name].remove(member_ip) def _create_set(self, set_name, ethertype): cmd = ['ipset', 'create', '-exist', set_name, 'hash:net', 'family', self._get_ipset_set_type(ethertype)] self._apply(cmd) self.ipset_sets[set_name] = [] def _apply(self, cmd, input=None, fail_on_errors=True): input = '\n'.join(input) if input else None cmd_ns = [] if self.namespace: cmd_ns.extend(['ip', 'netns', 'exec', self.namespace]) cmd_ns.extend(cmd) self.execute(cmd_ns, run_as_root=True, process_input=input, check_exit_code=fail_on_errors) def _get_new_set_ips(self, set_name, expected_ips): new_member_ips = (set(expected_ips) - set(self.ipset_sets.get(set_name, []))) return list(new_member_ips) def _get_deleted_set_ips(self, set_name, expected_ips): deleted_member_ips = (set(self.ipset_sets.get(set_name, [])) - set(expected_ips)) return list(deleted_member_ips) def _add_members_to_set(self, set_name, add_ips): for ip in add_ips: if ip not in self.ipset_sets[set_name]: self._add_member_to_set(set_name, ip) def _del_members_from_set(self, set_name, del_ips): for ip in del_ips: if ip in self.ipset_sets[set_name]: self._del_member_from_set(set_name, ip) def _get_ipset_set_type(self, ethertype): return 'inet6' if ethertype == 'IPv6' else 'inet' def _restore_sets(self, process_input): cmd = ['ipset', 'restore', '-exist'] self._apply(cmd, process_input) def _swap_sets(self, src_set, dest_set): cmd = ['ipset', 'swap', src_set, dest_set] self._apply(cmd) def _destroy(self, set_name, forced=False): if set_name in self.ipset_sets or forced: cmd = ['ipset', 'destroy', set_name] self._apply(cmd, fail_on_errors=False) self.ipset_sets.pop(set_name, None) neutron-8.4.0/neutron/agent/linux/ip_monitor.py0000664000567000056710000000530113044372736023046 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import excutils from neutron._i18n import _LE from neutron.agent.linux import async_process from neutron.agent.linux import ip_lib LOG = logging.getLogger(__name__) class IPMonitorEvent(object): def __init__(self, line, added, interface, cidr): self.line = line self.added = added self.interface = interface self.cidr = cidr def __str__(self): return self.line @classmethod def from_text(cls, line): route = line.split() try: first_word = route[0] except IndexError: with excutils.save_and_reraise_exception(): LOG.error(_LE('Unable to parse route "%s"'), line) added = (first_word != 'Deleted') if not added: route = route[1:] try: interface = ip_lib.remove_interface_suffix(route[1]) cidr = route[3] except IndexError: with excutils.save_and_reraise_exception(): LOG.error(_LE('Unable to parse route "%s"'), line) return cls(line, added, interface, cidr) class IPMonitor(async_process.AsyncProcess): """Wrapper over `ip monitor address`. To monitor and react indefinitely: m = IPMonitor(namespace='tmp', root_as_root=True) m.start() for iterable in m: event = IPMonitorEvent.from_text(iterable) print(event, event.added, event.interface, event.cidr) """ def __init__(self, namespace=None, run_as_root=True, respawn_interval=None): super(IPMonitor, self).__init__(['ip', '-o', 'monitor', 'address'], run_as_root=run_as_root, respawn_interval=respawn_interval, namespace=namespace) def __iter__(self): return self.iter_stdout(block=True) def start(self): super(IPMonitor, self).start(block=True) def stop(self): super(IPMonitor, self).stop(block=True) neutron-8.4.0/neutron/agent/linux/utils.py0000664000567000056710000003533113044372760022032 0ustar jenkinsjenkins00000000000000# Copyright 2012 Locaweb. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fcntl import glob import grp import os import pwd import shlex import socket import struct import tempfile import threading import debtcollector import eventlet from eventlet.green import subprocess from eventlet import greenthread from oslo_config import cfg from oslo_log import log as logging from oslo_rootwrap import client from oslo_utils import excutils import six from six.moves import http_client as httplib from neutron._i18n import _, _LE from neutron.agent.common import config from neutron.common import constants from neutron.common import utils from neutron import wsgi LOG = logging.getLogger(__name__) class ProcessExecutionError(RuntimeError): def __init__(self, message, returncode): super(ProcessExecutionError, self).__init__(message) self.returncode = returncode class RootwrapDaemonHelper(object): __client = None __lock = threading.Lock() def __new__(cls): """There is no reason to instantiate this class""" raise NotImplementedError() @classmethod def get_client(cls): with cls.__lock: if cls.__client is None: cls.__client = client.Client( shlex.split(cfg.CONF.AGENT.root_helper_daemon)) return cls.__client def addl_env_args(addl_env): """Build arugments for adding additional environment vars with env""" # NOTE (twilson) If using rootwrap, an EnvFilter should be set up for the # command instead of a CommandFilter. if addl_env is None: return [] return ['env'] + ['%s=%s' % pair for pair in addl_env.items()] def create_process(cmd, run_as_root=False, addl_env=None): """Create a process object for the given command. The return value will be a tuple of the process object and the list of command arguments used to create it. """ cmd = list(map(str, addl_env_args(addl_env) + cmd)) if run_as_root: cmd = shlex.split(config.get_root_helper(cfg.CONF)) + cmd LOG.debug("Running command: %s", cmd) obj = utils.subprocess_popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return obj, cmd def execute_rootwrap_daemon(cmd, process_input, addl_env): cmd = list(map(str, addl_env_args(addl_env) + cmd)) # NOTE(twilson) oslo_rootwrap.daemon will raise on filter match # errors, whereas oslo_rootwrap.cmd converts them to return codes. # In practice, no neutron code should be trying to execute something that # would throw those errors, and if it does it should be fixed as opposed to # just logging the execution error. LOG.debug("Running command (rootwrap daemon): %s", cmd) client = RootwrapDaemonHelper.get_client() return client.execute(cmd, process_input) def execute(cmd, process_input=None, addl_env=None, check_exit_code=True, return_stderr=False, log_fail_as_error=True, extra_ok_codes=None, run_as_root=False): try: if (process_input is None or isinstance(process_input, six.binary_type)): _process_input = process_input else: _process_input = process_input.encode('utf-8') if run_as_root and cfg.CONF.AGENT.root_helper_daemon: returncode, _stdout, _stderr = ( execute_rootwrap_daemon(cmd, process_input, addl_env)) else: obj, cmd = create_process(cmd, run_as_root=run_as_root, addl_env=addl_env) _stdout, _stderr = obj.communicate(_process_input) returncode = obj.returncode obj.stdin.close() _stdout = utils.safe_decode_utf8(_stdout) _stderr = utils.safe_decode_utf8(_stderr) extra_ok_codes = extra_ok_codes or [] if returncode and returncode not in extra_ok_codes: msg = _("Exit code: %(returncode)d; " "Stdin: %(stdin)s; " "Stdout: %(stdout)s; " "Stderr: %(stderr)s") % { 'returncode': returncode, 'stdin': process_input or '', 'stdout': _stdout, 'stderr': _stderr} if log_fail_as_error: LOG.error(msg) if check_exit_code: raise ProcessExecutionError(msg, returncode=returncode) else: LOG.debug("Exit code: %d", returncode) finally: # NOTE(termie): this appears to be necessary to let the subprocess # call clean something up in between calls, without # it two execute calls in a row hangs the second one greenthread.sleep(0) return (_stdout, _stderr) if return_stderr else _stdout def get_interface_mac(interface): MAC_START = 18 MAC_END = 24 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) dev = interface[:constants.DEVICE_NAME_MAX_LEN] if isinstance(dev, six.text_type): dev = dev.encode('utf-8') info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', dev)) return ''.join(['%02x:' % ord(char) for char in info[MAC_START:MAC_END]])[:-1] @debtcollector.removals.remove(message="Redundant in Mitaka release.") def replace_file(file_name, data, file_mode=0o644): """Replaces the contents of file_name with data in a safe manner. First write to a temp file and then rename. Since POSIX renames are atomic, the file is unlikely to be corrupted by competing writes. We create the tempfile on the same device to ensure that it can be renamed. """ base_dir = os.path.dirname(os.path.abspath(file_name)) tmp_file = tempfile.NamedTemporaryFile('w+', dir=base_dir, delete=False) tmp_file.write(data) tmp_file.close() os.chmod(tmp_file.name, file_mode) os.rename(tmp_file.name, file_name) def find_child_pids(pid): """Retrieve a list of the pids of child processes of the given pid.""" try: raw_pids = execute(['ps', '--ppid', pid, '-o', 'pid='], log_fail_as_error=False) except ProcessExecutionError as e: # Unexpected errors are the responsibility of the caller with excutils.save_and_reraise_exception() as ctxt: # Exception has already been logged by execute no_children_found = e.returncode == 1 if no_children_found: ctxt.reraise = False return [] return [x.strip() for x in raw_pids.split('\n') if x.strip()] def _get_conf_base(cfg_root, uuid, ensure_conf_dir): #TODO(mangelajo): separate responsibilities here, ensure_conf_dir # should be a separate function conf_dir = os.path.abspath(os.path.normpath(cfg_root)) conf_base = os.path.join(conf_dir, uuid) if ensure_conf_dir: utils.ensure_dir(conf_dir) return conf_base def get_conf_file_name(cfg_root, uuid, cfg_file, ensure_conf_dir=False): """Returns the file name for a given kind of config file.""" conf_base = _get_conf_base(cfg_root, uuid, ensure_conf_dir) return "%s.%s" % (conf_base, cfg_file) def get_value_from_file(filename, converter=None): try: with open(filename, 'r') as f: try: return converter(f.read()) if converter else f.read() except ValueError: LOG.error(_LE('Unable to convert value in %s'), filename) except IOError: LOG.debug('Unable to access %s', filename) def get_value_from_conf_file(cfg_root, uuid, cfg_file, converter=None): """A helper function to read a value from one of a config file.""" file_name = get_conf_file_name(cfg_root, uuid, cfg_file) return get_value_from_file(file_name, converter) def remove_conf_files(cfg_root, uuid): conf_base = _get_conf_base(cfg_root, uuid, False) for file_path in glob.iglob("%s.*" % conf_base): os.unlink(file_path) def get_root_helper_child_pid(pid, expected_cmd, run_as_root=False): """ Get the first non root_helper child pid in the process hierarchy. If root helper was used, two or more processes would be created: - a root helper process (e.g. sudo myscript) - possibly a rootwrap script (e.g. neutron-rootwrap) - a child process (e.g. myscript) - possibly its child processes Killing the root helper process will leave the child process running, re-parented to init, so the only way to ensure that both die is to target the child process directly. """ pid = str(pid) if run_as_root: while True: try: # We shouldn't have more than one child per process # so keep getting the children of the first one pid = find_child_pids(pid)[0] except IndexError: return # We never found the child pid with expected_cmd # If we've found a pid with no root helper, return it. # If we continue, we can find transient children. if pid_invoked_with_cmdline(pid, expected_cmd): break return pid def remove_abs_path(cmd): """Remove absolute path of executable in cmd Note: New instance of list is returned :param cmd: parsed shlex command (e.g. ['/bin/foo', 'param1', 'param two']) """ if cmd and os.path.isabs(cmd[0]): cmd = list(cmd) cmd[0] = os.path.basename(cmd[0]) return cmd def get_cmdline_from_pid(pid): if pid is None or not os.path.exists('/proc/%s' % pid): return [] with open('/proc/%s/cmdline' % pid, 'r') as f: return f.readline().split('\0')[:-1] def cmd_matches_expected(cmd, expected_cmd): abs_cmd = remove_abs_path(cmd) abs_expected_cmd = remove_abs_path(expected_cmd) if abs_cmd != abs_expected_cmd: # Commands executed with #! are prefixed with the script # executable. Check for the expected cmd being a subset of the # actual cmd to cover this possibility. abs_cmd = remove_abs_path(abs_cmd[1:]) return abs_cmd == abs_expected_cmd def pid_invoked_with_cmdline(pid, expected_cmd): """Validate process with given pid is running with provided parameters """ cmd = get_cmdline_from_pid(pid) return cmd_matches_expected(cmd, expected_cmd) def wait_until_true(predicate, timeout=60, sleep=1, exception=None): """ Wait until callable predicate is evaluated as True :param predicate: Callable deciding whether waiting should continue. Best practice is to instantiate predicate with functools.partial() :param timeout: Timeout in seconds how long should function wait. :param sleep: Polling interval for results in seconds. :param exception: Exception class for eventlet.Timeout. (see doc for eventlet.Timeout for more information) """ with eventlet.timeout.Timeout(timeout, exception): while not predicate(): eventlet.sleep(sleep) def ensure_directory_exists_without_file(path): dirname = os.path.dirname(path) if os.path.isdir(dirname): try: os.unlink(path) except OSError: with excutils.save_and_reraise_exception() as ctxt: if not os.path.exists(path): ctxt.reraise = False else: utils.ensure_dir(dirname) def is_effective_user(user_id_or_name): """Returns True if user_id_or_name is effective user (id/name).""" euid = os.geteuid() if str(user_id_or_name) == str(euid): return True effective_user_name = pwd.getpwuid(euid).pw_name return user_id_or_name == effective_user_name def is_effective_group(group_id_or_name): """Returns True if group_id_or_name is effective group (id/name).""" egid = os.getegid() if str(group_id_or_name) == str(egid): return True effective_group_name = grp.getgrgid(egid).gr_name return group_id_or_name == effective_group_name class UnixDomainHTTPConnection(httplib.HTTPConnection): """Connection class for HTTP over UNIX domain socket.""" def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None): httplib.HTTPConnection.__init__(self, host, port, strict) self.timeout = timeout self.socket_path = cfg.CONF.metadata_proxy_socket def connect(self): self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) if self.timeout: self.sock.settimeout(self.timeout) self.sock.connect(self.socket_path) class UnixDomainHttpProtocol(eventlet.wsgi.HttpProtocol): # TODO(jlibosva): This is just a workaround not to set TCP_NODELAY on # socket due to 40714b1ffadd47b315ca07f9b85009448f0fe63d evenlet change # This should be removed once # https://github.com/eventlet/eventlet/issues/301 is fixed disable_nagle_algorithm = False def __init__(self, request, client_address, server): if client_address == '': client_address = ('', 0) # base class is old-style, so super does not work properly eventlet.wsgi.HttpProtocol.__init__(self, request, client_address, server) class UnixDomainWSGIServer(wsgi.Server): def __init__(self, name, num_threads=None): self._socket = None self._launcher = None self._server = None super(UnixDomainWSGIServer, self).__init__(name, disable_ssl=True, num_threads=num_threads) def start(self, application, file_socket, workers, backlog, mode=None): self._socket = eventlet.listen(file_socket, family=socket.AF_UNIX, backlog=backlog) if mode is not None: os.chmod(file_socket, mode) self._launch(application, workers=workers) def _run(self, application, socket): """Start a WSGI service in a new green thread.""" logger = logging.getLogger('eventlet.wsgi.server') eventlet.wsgi.server(socket, application, max_size=self.num_threads, protocol=UnixDomainHttpProtocol, log=logger) neutron-8.4.0/neutron/agent/linux/ovsdb_monitor.py0000664000567000056710000001053413044372760023554 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet from oslo_log import log as logging from oslo_serialization import jsonutils from neutron._i18n import _LE from neutron.agent.linux import async_process from neutron.agent.ovsdb import api as ovsdb LOG = logging.getLogger(__name__) OVSDB_ACTION_INITIAL = 'initial' OVSDB_ACTION_INSERT = 'insert' OVSDB_ACTION_DELETE = 'delete' OVSDB_ACTION_NEW = 'new' class OvsdbMonitor(async_process.AsyncProcess): """Manages an invocation of 'ovsdb-client monitor'.""" def __init__(self, table_name, columns=None, format=None, respawn_interval=None): cmd = ['ovsdb-client', 'monitor', table_name] if columns: cmd.append(','.join(columns)) if format: cmd.append('--format=%s' % format) super(OvsdbMonitor, self).__init__(cmd, run_as_root=True, respawn_interval=respawn_interval, log_output=True, die_on_error=True) class SimpleInterfaceMonitor(OvsdbMonitor): """Monitors the Interface table of the local host's ovsdb for changes. The has_updates() method indicates whether changes to the ovsdb Interface table have been detected since the monitor started or since the previous access. """ def __init__(self, respawn_interval=None): super(SimpleInterfaceMonitor, self).__init__( 'Interface', columns=['name', 'ofport', 'external_ids'], format='json', respawn_interval=respawn_interval, ) self.new_events = {'added': [], 'removed': []} @property def has_updates(self): """Indicate whether the ovsdb Interface table has been updated. If the monitor process is not active an error will be logged since it won't be able to communicate any update. This situation should be temporary if respawn_interval is set. """ if not self.is_active(): LOG.error(_LE("Interface monitor is not active")) else: self.process_events() return bool(self.new_events['added'] or self.new_events['removed']) def get_events(self): self.process_events() events = self.new_events self.new_events = {'added': [], 'removed': []} return events def process_events(self): devices_added = [] devices_removed = [] dev_to_ofport = {} for row in self.iter_stdout(): json = jsonutils.loads(row).get('data') for ovs_id, action, name, ofport, external_ids in json: if external_ids: external_ids = ovsdb.val_to_py(external_ids) if ofport: ofport = ovsdb.val_to_py(ofport) device = {'name': name, 'ofport': ofport, 'external_ids': external_ids} if action in (OVSDB_ACTION_INITIAL, OVSDB_ACTION_INSERT): devices_added.append(device) elif action == OVSDB_ACTION_DELETE: devices_removed.append(device) elif action == OVSDB_ACTION_NEW: dev_to_ofport[name] = ofport self.new_events['added'].extend(devices_added) self.new_events['removed'].extend(devices_removed) # update any events with ofports received from 'new' action for event in self.new_events['added']: event['ofport'] = dev_to_ofport.get(event['name'], event['ofport']) def start(self, block=False, timeout=5): super(SimpleInterfaceMonitor, self).start() if block: with eventlet.timeout.Timeout(timeout): while not self.is_active(): eventlet.sleep() neutron-8.4.0/neutron/agent/linux/openvswitch_firewall/0000775000567000056710000000000013044373210024540 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/linux/openvswitch_firewall/rules.py0000664000567000056710000001222713044372760026261 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib import constants as n_consts from oslo_log import log as logging from neutron.agent import firewall from neutron.agent.linux import ip_lib from neutron.agent.linux.openvswitch_firewall import constants as ovsfw_consts from neutron.common import constants from neutron.common import utils from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \ as ovs_consts LOG = logging.getLogger(__name__) FORBIDDEN_PREFIXES = (n_consts.IPv4_ANY, n_consts.IPv6_ANY) def is_valid_prefix(ip_prefix): # IPv6 have multiple ways how to describe ::/0 network, converting to # IPNetwork and back to string unifies it return (ip_prefix and str(netaddr.IPNetwork(ip_prefix)) not in FORBIDDEN_PREFIXES) def create_flows_from_rule_and_port(rule, port): ethertype = rule['ethertype'] direction = rule['direction'] dst_ip_prefix = rule.get('dest_ip_prefix') src_ip_prefix = rule.get('source_ip_prefix') flow_template = { 'priority': 70, 'dl_type': ovsfw_consts.ethertype_to_dl_type_map[ethertype], 'reg_port': port.ofport, } if is_valid_prefix(dst_ip_prefix): if ip_lib.get_ip_version(dst_ip_prefix) == n_consts.IP_VERSION_4: flow_template["nw_dst"] = dst_ip_prefix elif ip_lib.get_ip_version(dst_ip_prefix) == n_consts.IP_VERSION_6: flow_template["ipv6_dst"] = dst_ip_prefix if is_valid_prefix(src_ip_prefix): if ip_lib.get_ip_version(src_ip_prefix) == n_consts.IP_VERSION_4: flow_template["nw_src"] = src_ip_prefix elif ip_lib.get_ip_version(src_ip_prefix) == n_consts.IP_VERSION_6: flow_template["ipv6_src"] = src_ip_prefix flows = create_protocol_flows(direction, flow_template, port, rule) return flows def create_protocol_flows(direction, flow_template, port, rule): flow_template = flow_template.copy() if direction == firewall.INGRESS_DIRECTION: flow_template['table'] = ovs_consts.RULES_INGRESS_TABLE flow_template['dl_dst'] = port.mac flow_template['actions'] = "strip_vlan,output:{:d}".format(port.ofport) elif direction == firewall.EGRESS_DIRECTION: flow_template['table'] = ovs_consts.RULES_EGRESS_TABLE flow_template['dl_src'] = port.mac # Traffic can be both ingress and egress, check that no ingress rules # should be applied flow_template['actions'] = 'resubmit(,{:d})'.format( ovs_consts.ACCEPT_OR_INGRESS_TABLE) protocol = rule.get('protocol') try: flow_template['nw_proto'] = ovsfw_consts.protocol_to_nw_proto[protocol] if rule['ethertype'] == n_consts.IPv6 and protocol == 'icmp': flow_template['nw_proto'] = constants.PROTO_NUM_IPV6_ICMP except KeyError: pass flows = create_port_range_flows(flow_template, rule) return flows or [flow_template] def create_port_range_flows(flow_template, rule): protocol = rule.get('protocol') if protocol not in ovsfw_consts.PROTOCOLS_WITH_PORTS: return [] flows = [] src_port_match = '{:s}_src'.format(protocol) src_port_min = rule.get('source_port_range_min') src_port_max = rule.get('source_port_range_max') dst_port_match = '{:s}_dst'.format(protocol) dst_port_min = rule.get('port_range_min') dst_port_max = rule.get('port_range_max') dst_port_range = [] if dst_port_min and dst_port_max: dst_port_range = utils.port_rule_masking(dst_port_min, dst_port_max) src_port_range = [] if src_port_min and src_port_max: src_port_range = utils.port_rule_masking(src_port_min, src_port_max) for port in src_port_range: flow = flow_template.copy() flow[src_port_match] = port if dst_port_range: for port in dst_port_range: dst_flow = flow.copy() dst_flow[dst_port_match] = port flows.append(dst_flow) else: flows.append(flow) else: for port in dst_port_range: flow = flow_template.copy() flow[dst_port_match] = port flows.append(flow) return flows def create_rule_for_ip_address(ip_address, rule): new_rule = rule.copy() del new_rule['remote_group_id'] direction = rule['direction'] ip_prefix = str(netaddr.IPNetwork(ip_address).cidr) new_rule[firewall.DIRECTION_IP_PREFIX[direction]] = ip_prefix LOG.debug('RULGEN: From rule %s with IP %s created new rule %s', rule, ip_address, new_rule) return new_rule neutron-8.4.0/neutron/agent/linux/openvswitch_firewall/__init__.py0000664000567000056710000000132513044372736026666 0ustar jenkinsjenkins00000000000000# Copyright 2015 # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.linux.openvswitch_firewall import firewall OVSFirewallDriver = firewall.OVSFirewallDriver neutron-8.4.0/neutron/agent/linux/openvswitch_firewall/firewall.py0000664000567000056710000006221013044372760026731 0ustar jenkinsjenkins00000000000000# Copyright 2015 # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_log import log as logging from neutron._i18n import _, _LE, _LW from neutron.agent import firewall from neutron.agent.linux.openvswitch_firewall import constants as ovsfw_consts from neutron.agent.linux.openvswitch_firewall import rules from neutron.common import constants from neutron.common import exceptions from neutron.common import ipv6_utils from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \ as ovs_consts LOG = logging.getLogger(__name__) def _replace_register(flow_params, register_number, register_value): """Replace value from flows to given register number 'register_value' key in dictionary will be replaced by register number given by 'register_number' :param flow_params: Dictionary containing defined flows :param register_number: The number of register where value will be stored :param register_value: Key to be replaced by register number """ try: reg_port = flow_params[register_value] del flow_params[register_value] flow_params['reg{:d}'.format(register_number)] = reg_port except KeyError: pass def create_reg_numbers(flow_params): """Replace reg_(port|net) values with defined register numbers""" _replace_register(flow_params, ovsfw_consts.REG_PORT, 'reg_port') _replace_register(flow_params, ovsfw_consts.REG_NET, 'reg_net') class OVSFWPortNotFound(exceptions.NeutronException): message = _("Port %(port_id)s is not managed by this agent. ") class SecurityGroup(object): def __init__(self, id_): self.id = id_ self.raw_rules = [] self.remote_rules = [] self.members = {} self.ports = set() def update_rules(self, rules): """Separate raw and remote rules.""" self.raw_rules = [rule for rule in rules if 'remote_group_id' not in rule] self.remote_rules = [rule for rule in rules if 'remote_group_id' in rule] def get_ethertype_filtered_addresses(self, ethertype, exclude_addresses=None): exclude_addresses = set(exclude_addresses) or set() group_addresses = set(self.members.get(ethertype, [])) return list(group_addresses - exclude_addresses) class OFPort(object): def __init__(self, port_dict, ovs_port, vlan_tag): self.id = port_dict['device'] self.vlan_tag = vlan_tag self.mac = ovs_port.vif_mac self.lla_address = str(ipv6_utils.get_ipv6_addr_by_EUI64( constants.IPV6_LLA_PREFIX, self.mac)) self.ofport = ovs_port.ofport self.sec_groups = list() self.fixed_ips = port_dict.get('fixed_ips', []) self.neutron_port_dict = port_dict.copy() self.allowed_pairs_v4 = self._get_allowed_pairs(port_dict, version=4) self.allowed_pairs_v6 = self._get_allowed_pairs(port_dict, version=6) @staticmethod def _get_allowed_pairs(port_dict, version): aap_dict = port_dict.get('allowed_address_pairs', set()) return {(aap['mac_address'], aap['ip_address']) for aap in aap_dict if netaddr.IPAddress(aap['ip_address']).version == version} @property def ipv4_addresses(self): return [ip_addr for ip_addr in self.fixed_ips if netaddr.IPAddress(ip_addr).version == 4] @property def ipv6_addresses(self): return [ip_addr for ip_addr in self.fixed_ips if netaddr.IPAddress(ip_addr).version == 6] def update(self, port_dict): self.allowed_pairs_v4 = self._get_allowed_pairs(port_dict, version=4) self.allowed_pairs_v6 = self._get_allowed_pairs(port_dict, version=6) # Neighbour discovery uses LLA self.allowed_pairs_v6.add((self.mac, self.lla_address)) self.fixed_ips = port_dict.get('fixed_ips', []) self.neutron_port_dict = port_dict.copy() class SGPortMap(object): def __init__(self): self.ports = {} self.sec_groups = {} def get_or_create_sg(self, sg_id): try: sec_group = self.sec_groups[sg_id] except KeyError: sec_group = SecurityGroup(sg_id) self.sec_groups[sg_id] = sec_group return sec_group def create_port(self, port, port_dict): self.ports[port.id] = port self.update_port(port, port_dict) def update_port(self, port, port_dict): for sec_group in self.sec_groups.values(): sec_group.ports.discard(port) port.sec_groups = [self.get_or_create_sg(sg_id) for sg_id in port_dict['security_groups']] for sec_group in port.sec_groups: sec_group.ports.add(port) port.update(port_dict) def remove_port(self, port): for sec_group in port.sec_groups: sec_group.ports.discard(port) del self.ports[port.id] def update_rules(self, sg_id, rules): sec_group = self.get_or_create_sg(sg_id) sec_group.update_rules(rules) def update_members(self, sg_id, members): sec_group = self.get_or_create_sg(sg_id) sec_group.members = members class OVSFirewallDriver(firewall.FirewallDriver): REQUIRED_PROTOCOLS = [ ovs_consts.OPENFLOW10, ovs_consts.OPENFLOW11, ovs_consts.OPENFLOW12, ovs_consts.OPENFLOW13, ovs_consts.OPENFLOW14, ] provides_arp_spoofing_protection = True def __init__(self, integration_bridge): """Initialize object :param integration_bridge: Bridge on which openflow rules will be applied """ self.int_br = self.initialize_bridge(integration_bridge) self.sg_port_map = SGPortMap() self._deferred = False self._drop_all_unmatched_flows() def apply_port_filter(self, port): """We never call this method It exists here to override abstract method of parent abstract class. """ def security_group_updated(self, action_type, sec_group_ids, device_ids=None): """This method is obsolete The current driver only supports enhanced rpc calls into security group agent. This method is never called from that place. """ def _accept_flow(self, **flow): flow['ct_state'] = ovsfw_consts.OF_STATE_ESTABLISHED_NOT_REPLY self._add_flow(**flow) flow['ct_state'] = ovsfw_consts.OF_STATE_NEW_NOT_ESTABLISHED if flow['table'] == ovs_consts.RULES_INGRESS_TABLE: flow['actions'] = ( 'ct(commit,zone=NXM_NX_REG{:d}[0..15]),{:s}'.format( ovsfw_consts.REG_NET, flow['actions'])) self._add_flow(**flow) def _add_flow(self, **kwargs): dl_type = kwargs.get('dl_type') create_reg_numbers(kwargs) if isinstance(dl_type, int): kwargs['dl_type'] = "0x{:04x}".format(dl_type) if self._deferred: self.int_br.add_flow(**kwargs) else: self.int_br.br.add_flow(**kwargs) def _delete_flows(self, **kwargs): create_reg_numbers(kwargs) if self._deferred: self.int_br.delete_flows(**kwargs) else: self.int_br.br.delete_flows(**kwargs) @staticmethod def initialize_bridge(int_br): int_br.set_protocols(OVSFirewallDriver.REQUIRED_PROTOCOLS) return int_br.deferred(full_ordered=True) def _drop_all_unmatched_flows(self): for table in ovs_consts.OVS_FIREWALL_TABLES: self.int_br.br.add_flow(table=table, priority=0, actions='drop') def get_or_create_ofport(self, port): port_id = port['device'] try: of_port = self.sg_port_map.ports[port_id] except KeyError: ovs_port = self.int_br.br.get_vif_port_by_id(port_id) if not ovs_port: raise OVSFWPortNotFound(port_id=port_id) try: other_config = self.int_br.br.db_get_val( 'Port', ovs_port.port_name, 'other_config') port_vlan_id = int(other_config['tag']) except (KeyError, TypeError): LOG.warning(_LW("Can't get tag for port %(port_id)s from its " "other_config: %(other_config)s"), port_id=port_id, other_config=other_config) port_vlan_id = ovs_consts.DEAD_VLAN_TAG of_port = OFPort(port, ovs_port, port_vlan_id) self.sg_port_map.create_port(of_port, port) else: self.sg_port_map.update_port(of_port, port) return of_port def is_port_managed(self, port): return port['device'] in self.sg_port_map.ports def prepare_port_filter(self, port): if not firewall.port_sec_enabled(port): return port_exists = self.is_port_managed(port) of_port = self.get_or_create_ofport(port) if port_exists: LOG.error(_LE("Initializing port %s that was already " "initialized."), port['device']) self.delete_all_port_flows(of_port) self.initialize_port_flows(of_port) self.add_flows_from_rules(of_port) def update_port_filter(self, port): """Update rules for given port Current existing filtering rules are removed and new ones are generated based on current loaded security group rules and members. """ if not firewall.port_sec_enabled(port): self.remove_port_filter(port) return elif not self.is_port_managed(port): self.prepare_port_filter(port) return of_port = self.get_or_create_ofport(port) # TODO(jlibosva): Handle firewall blink self.delete_all_port_flows(of_port) self.initialize_port_flows(of_port) self.add_flows_from_rules(of_port) def remove_port_filter(self, port): """Remove port from firewall All flows related to this port are removed from ovs. Port is also removed from ports managed by this firewall. """ if self.is_port_managed(port): of_port = self.get_or_create_ofport(port) self.delete_all_port_flows(of_port) self.sg_port_map.remove_port(of_port) def update_security_group_rules(self, sg_id, rules): self.sg_port_map.update_rules(sg_id, rules) def update_security_group_members(self, sg_id, member_ips): self.sg_port_map.update_members(sg_id, member_ips) def filter_defer_apply_on(self): self._deferred = True def filter_defer_apply_off(self): if self._deferred: self.int_br.apply_flows() self._deferred = False @property def ports(self): return {id_: port.neutron_port_dict for id_, port in self.sg_port_map.ports.items()} def initialize_port_flows(self, port): """Set base flows for port :param port: OFPort instance """ # Identify egress flow self._add_flow( table=ovs_consts.LOCAL_SWITCHING, priority=100, in_port=port.ofport, actions='set_field:{:d}->reg{:d},' 'set_field:{:d}->reg{:d},' 'resubmit(,{:d})'.format( port.ofport, ovsfw_consts.REG_PORT, port.vlan_tag, ovsfw_consts.REG_NET, ovs_consts.BASE_EGRESS_TABLE) ) # Identify ingress flows after egress filtering self._add_flow( table=ovs_consts.LOCAL_SWITCHING, priority=90, dl_dst=port.mac, actions='set_field:{:d}->reg{:d},' 'set_field:{:d}->reg{:d},' 'resubmit(,{:d})'.format( port.ofport, ovsfw_consts.REG_PORT, port.vlan_tag, ovsfw_consts.REG_NET, ovs_consts.BASE_INGRESS_TABLE), ) self._initialize_egress(port) self._initialize_ingress(port) def _initialize_egress_ipv6_icmp(self, port): for icmp_type in constants.ICMPV6_ALLOWED_TYPES: self._add_flow( table=ovs_consts.BASE_EGRESS_TABLE, priority=95, in_port=port.ofport, reg_port=port.ofport, dl_type=constants.ETHERTYPE_IPV6, nw_proto=constants.PROTO_NUM_IPV6_ICMP, icmp_type=icmp_type, actions='normal' ) def _initialize_egress(self, port): """Identify egress traffic and send it to egress base""" self._initialize_egress_ipv6_icmp(port) # Apply mac/ip pairs for IPv4 allowed_pairs = port.allowed_pairs_v4.union( {(port.mac, ip_addr) for ip_addr in port.ipv4_addresses}) for mac_addr, ip_addr in allowed_pairs: self._add_flow( table=ovs_consts.BASE_EGRESS_TABLE, priority=95, in_port=port.ofport, reg_port=port.ofport, dl_src=mac_addr, dl_type=constants.ETHERTYPE_ARP, arp_spa=ip_addr, actions='normal' ) self._add_flow( table=ovs_consts.BASE_EGRESS_TABLE, priority=65, reg_port=port.ofport, ct_state=ovsfw_consts.OF_STATE_NOT_TRACKED, dl_type=constants.ETHERTYPE_IP, in_port=port.ofport, dl_src=mac_addr, nw_src=ip_addr, actions='ct(table={:d},zone=NXM_NX_REG{:d}[0..15])'.format( ovs_consts.RULES_EGRESS_TABLE, ovsfw_consts.REG_NET) ) # Apply mac/ip pairs for IPv6 allowed_pairs = port.allowed_pairs_v6.union( {(port.mac, ip_addr) for ip_addr in port.ipv6_addresses}) for mac_addr, ip_addr in allowed_pairs: self._add_flow( table=ovs_consts.BASE_EGRESS_TABLE, priority=65, reg_port=port.ofport, in_port=port.ofport, ct_state=ovsfw_consts.OF_STATE_NOT_TRACKED, dl_type=constants.ETHERTYPE_IPV6, dl_src=mac_addr, ipv6_src=ip_addr, actions='ct(table={:d},zone=NXM_NX_REG{:d}[0..15])'.format( ovs_consts.RULES_EGRESS_TABLE, ovsfw_consts.REG_NET) ) # DHCP discovery for dl_type, src_port, dst_port in ( (constants.ETHERTYPE_IP, 68, 67), (constants.ETHERTYPE_IPV6, 546, 547)): self._add_flow( table=ovs_consts.BASE_EGRESS_TABLE, priority=80, reg_port=port.ofport, in_port=port.ofport, dl_type=dl_type, nw_proto=constants.PROTO_NUM_UDP, tp_src=src_port, tp_dst=dst_port, actions='resubmit(,{:d})'.format( ovs_consts.ACCEPT_OR_INGRESS_TABLE) ) # Ban dhcp service running on an instance for dl_type, src_port, dst_port in ( (constants.ETHERTYPE_IP, 67, 68), (constants.ETHERTYPE_IPV6, 547, 546)): self._add_flow( table=ovs_consts.BASE_EGRESS_TABLE, priority=70, in_port=port.ofport, reg_port=port.ofport, dl_type=dl_type, nw_proto=constants.PROTO_NUM_UDP, tp_src=src_port, tp_dst=dst_port, actions='drop' ) # Drop all remaining not tracked egress connections self._add_flow( table=ovs_consts.BASE_EGRESS_TABLE, priority=10, ct_state=ovsfw_consts.OF_STATE_NOT_TRACKED, in_port=port.ofport, reg_port=port.ofport, actions='drop' ) # Fill in accept_or_ingress table by checking that traffic is ingress # and if not, accept it self._add_flow( table=ovs_consts.ACCEPT_OR_INGRESS_TABLE, priority=100, dl_dst=port.mac, actions='set_field:{:d}->reg{:d},resubmit(,{:d})'.format( port.ofport, ovsfw_consts.REG_PORT, ovs_consts.BASE_INGRESS_TABLE), ) for ethertype in [constants.ETHERTYPE_IP, constants.ETHERTYPE_IPV6]: self._add_flow( table=ovs_consts.ACCEPT_OR_INGRESS_TABLE, priority=90, dl_type=ethertype, reg_port=port.ofport, ct_state=ovsfw_consts.OF_STATE_NEW_NOT_ESTABLISHED, actions='ct(commit,zone=NXM_NX_REG{:d}[0..15]),normal'.format( ovsfw_consts.REG_NET) ) self._add_flow( table=ovs_consts.ACCEPT_OR_INGRESS_TABLE, priority=80, reg_port=port.ofport, actions='normal' ) def _initialize_tracked_egress(self, port): # Drop invalid packets self._add_flow( table=ovs_consts.RULES_EGRESS_TABLE, priority=50, ct_state=ovsfw_consts.OF_STATE_INVALID, actions='drop' ) # Drop traffic for removed sg rules self._add_flow( table=ovs_consts.RULES_EGRESS_TABLE, priority=50, reg_port=port.ofport, ct_mark=ovsfw_consts.CT_MARK_INVALID, actions='drop' ) for state in ( ovsfw_consts.OF_STATE_ESTABLISHED_REPLY, ovsfw_consts.OF_STATE_RELATED, ): self._add_flow( table=ovs_consts.RULES_EGRESS_TABLE, priority=50, ct_state=state, ct_mark=ovsfw_consts.CT_MARK_NORMAL, reg_port=port.ofport, ct_zone=port.vlan_tag, actions='normal' ) self._add_flow( table=ovs_consts.RULES_EGRESS_TABLE, priority=40, reg_port=port.ofport, ct_state=ovsfw_consts.OF_STATE_NOT_ESTABLISHED, actions='drop' ) for ethertype in [constants.ETHERTYPE_IP, constants.ETHERTYPE_IPV6]: self._add_flow( table=ovs_consts.RULES_EGRESS_TABLE, priority=40, dl_type=ethertype, reg_port=port.ofport, ct_state=ovsfw_consts.OF_STATE_ESTABLISHED, actions="ct(commit,zone=NXM_NX_REG{:d}[0..15]," "exec(set_field:{:s}->ct_mark))".format( ovsfw_consts.REG_NET, ovsfw_consts.CT_MARK_INVALID) ) def _initialize_ingress_ipv6_icmp(self, port): for icmp_type in constants.ICMPV6_ALLOWED_TYPES: self._add_flow( table=ovs_consts.BASE_INGRESS_TABLE, priority=100, reg_port=port.ofport, dl_dst=port.mac, dl_type=constants.ETHERTYPE_IPV6, nw_proto=constants.PROTO_NUM_IPV6_ICMP, icmp_type=icmp_type, actions='strip_vlan,output:{:d}'.format(port.ofport), ) def _initialize_ingress(self, port): # Allow incoming ARPs self._add_flow( table=ovs_consts.BASE_INGRESS_TABLE, priority=100, dl_type=constants.ETHERTYPE_ARP, reg_port=port.ofport, dl_dst=port.mac, actions='strip_vlan,output:{:d}'.format(port.ofport), ) self._initialize_ingress_ipv6_icmp(port) # DHCP offers for dl_type, src_port, dst_port in ( (constants.ETHERTYPE_IP, 67, 68), (constants.ETHERTYPE_IPV6, 547, 546)): self._add_flow( table=ovs_consts.BASE_INGRESS_TABLE, priority=95, reg_port=port.ofport, dl_type=dl_type, nw_proto=constants.PROTO_NUM_UDP, tp_src=src_port, tp_dst=dst_port, actions='strip_vlan,output:{:d}'.format(port.ofport), ) # Track untracked for dl_type in (constants.ETHERTYPE_IP, constants.ETHERTYPE_IPV6): self._add_flow( table=ovs_consts.BASE_INGRESS_TABLE, priority=90, reg_port=port.ofport, dl_type=dl_type, ct_state=ovsfw_consts.OF_STATE_NOT_TRACKED, actions='ct(table={:d},zone=NXM_NX_REG{:d}[0..15])'.format( ovs_consts.RULES_INGRESS_TABLE, ovsfw_consts.REG_NET) ) self._add_flow( table=ovs_consts.BASE_INGRESS_TABLE, ct_state=ovsfw_consts.OF_STATE_TRACKED, priority=80, reg_port=port.ofport, dl_dst=port.mac, actions='resubmit(,{:d})'.format(ovs_consts.RULES_INGRESS_TABLE) ) def _initialize_tracked_ingress(self, port): # Drop invalid packets self._add_flow( table=ovs_consts.RULES_INGRESS_TABLE, priority=50, ct_state=ovsfw_consts.OF_STATE_INVALID, actions='drop' ) # Drop traffic for removed sg rules self._add_flow( table=ovs_consts.RULES_INGRESS_TABLE, priority=50, reg_port=port.ofport, ct_mark=ovsfw_consts.CT_MARK_INVALID, actions='drop' ) # Allow established and related connections for state in (ovsfw_consts.OF_STATE_ESTABLISHED_REPLY, ovsfw_consts.OF_STATE_RELATED): self._add_flow( table=ovs_consts.RULES_INGRESS_TABLE, priority=50, dl_dst=port.mac, reg_port=port.ofport, ct_state=state, ct_mark=ovsfw_consts.CT_MARK_NORMAL, ct_zone=port.vlan_tag, actions='strip_vlan,output:{:d}'.format(port.ofport) ) self._add_flow( table=ovs_consts.RULES_INGRESS_TABLE, priority=40, reg_port=port.ofport, ct_state=ovsfw_consts.OF_STATE_NOT_ESTABLISHED, actions='drop' ) for ethertype in [constants.ETHERTYPE_IP, constants.ETHERTYPE_IPV6]: self._add_flow( table=ovs_consts.RULES_INGRESS_TABLE, priority=40, dl_type=ethertype, reg_port=port.ofport, ct_state=ovsfw_consts.OF_STATE_ESTABLISHED, actions="ct(commit,zone=NXM_NX_REG{:d}[0..15]," "exec(set_field:{:s}->ct_mark))".format( ovsfw_consts.REG_NET, ovsfw_consts.CT_MARK_INVALID) ) def add_flows_from_rules(self, port): self._initialize_tracked_ingress(port) self._initialize_tracked_egress(port) LOG.debug('Creating flow rules for port %s that is port %d in OVS', port.id, port.ofport) rules_generator = self.create_rules_generator_for_port(port) for rule in rules_generator: flows = rules.create_flows_from_rule_and_port(rule, port) LOG.debug("RULGEN: Rules generated for flow %s are %s", rule, flows) for flow in flows: self._accept_flow(**flow) def create_rules_generator_for_port(self, port): for sec_group in port.sec_groups: for rule in sec_group.raw_rules: yield rule for rule in sec_group.remote_rules: remote_group = self.sg_port_map.sec_groups[ rule['remote_group_id']] for ip_addr in remote_group.get_ethertype_filtered_addresses( rule['ethertype'], port.fixed_ips): yield rules.create_rule_for_ip_address(ip_addr, rule) def delete_all_port_flows(self, port): """Delete all flows for given port""" self._delete_flows(table=ovs_consts.LOCAL_SWITCHING, dl_dst=port.mac) self._delete_flows(table=ovs_consts.LOCAL_SWITCHING, in_port=port.ofport) self._delete_flows(reg_port=port.ofport) self._delete_flows(table=ovs_consts.ACCEPT_OR_INGRESS_TABLE, dl_dst=port.mac) neutron-8.4.0/neutron/agent/linux/openvswitch_firewall/constants.py0000664000567000056710000000275513044372760027150 0ustar jenkinsjenkins00000000000000# Copyright 2015 # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import constants OF_STATE_NOT_TRACKED = "-trk" OF_STATE_TRACKED = "+trk" OF_STATE_NEW_NOT_ESTABLISHED = "+new-est" OF_STATE_NOT_ESTABLISHED = "-est" OF_STATE_ESTABLISHED = "+est" OF_STATE_ESTABLISHED_NOT_REPLY = "+est-rel-rpl" OF_STATE_ESTABLISHED_REPLY = "+est-rel+rpl" OF_STATE_RELATED = "-new-est+rel-inv" OF_STATE_INVALID = "+trk+inv" OF_STATE_NEW = "+new" OF_STATE_NOT_REPLY_NOT_NEW = "-new-rpl" CT_MARK_NORMAL = '0x0' CT_MARK_INVALID = '0x1' REG_PORT = 5 REG_NET = 6 protocol_to_nw_proto = { constants.PROTO_NAME_ICMP: constants.PROTO_NUM_ICMP, constants.PROTO_NAME_TCP: constants.PROTO_NUM_TCP, constants.PROTO_NAME_UDP: constants.PROTO_NUM_UDP, } PROTOCOLS_WITH_PORTS = (constants.PROTO_NAME_TCP, constants.PROTO_NAME_UDP) ethertype_to_dl_type_map = { constants.IPv4: constants.ETHERTYPE_IP, constants.IPv6: constants.ETHERTYPE_IPV6, } neutron-8.4.0/neutron/agent/linux/ip_conntrack.py0000664000567000056710000000702413044372760023342 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import netaddr from oslo_log import log as logging from neutron._i18n import _LE from neutron.agent.linux import utils as linux_utils LOG = logging.getLogger(__name__) class IpConntrackManager(object): """Smart wrapper for ip conntrack.""" def __init__(self, zone_lookup_func, execute=None, namespace=None): self.get_device_zone = zone_lookup_func self.execute = execute or linux_utils.execute self.namespace = namespace @staticmethod def _generate_conntrack_cmd_by_rule(rule, namespace): ethertype = rule.get('ethertype') protocol = rule.get('protocol') direction = rule.get('direction') cmd = ['conntrack', '-D'] if protocol: cmd.extend(['-p', str(protocol)]) cmd.extend(['-f', str(ethertype).lower()]) cmd.append('-d' if direction == 'ingress' else '-s') cmd_ns = [] if namespace: cmd_ns.extend(['ip', 'netns', 'exec', namespace]) cmd_ns.extend(cmd) return cmd_ns def _get_conntrack_cmds(self, device_info_list, rule, remote_ip=None): conntrack_cmds = set() cmd = self._generate_conntrack_cmd_by_rule(rule, self.namespace) ethertype = rule.get('ethertype') for device_info in device_info_list: zone_id = self.get_device_zone(device_info['device']) ips = device_info.get('fixed_ips', []) for ip in ips: net = netaddr.IPNetwork(ip) if str(net.version) not in ethertype: continue ip_cmd = [str(net.ip), '-w', zone_id] if remote_ip and str( netaddr.IPNetwork(remote_ip).version) in ethertype: ip_cmd.extend(['-s', str(remote_ip)]) conntrack_cmds.add(tuple(cmd + ip_cmd)) return conntrack_cmds def _delete_conntrack_state(self, device_info_list, rule, remote_ip=None): conntrack_cmds = self._get_conntrack_cmds(device_info_list, rule, remote_ip) for cmd in conntrack_cmds: try: self.execute(list(cmd), run_as_root=True, check_exit_code=True, extra_ok_codes=[1]) except RuntimeError: LOG.exception( _LE("Failed execute conntrack command %s"), str(cmd)) def delete_conntrack_state_by_rule(self, device_info_list, rule): self._delete_conntrack_state(device_info_list, rule) def delete_conntrack_state_by_remote_ips(self, device_info_list, ethertype, remote_ips): rule = {'ethertype': str(ethertype).lower(), 'direction': 'ingress'} if remote_ips: for remote_ip in remote_ips: self._delete_conntrack_state( device_info_list, rule, remote_ip) else: self._delete_conntrack_state(device_info_list, rule) neutron-8.4.0/neutron/agent/linux/bridge_lib.py0000664000567000056710000000577313044372760022763 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel Corporation. # Copyright 2015 Isaku Yamahata # # All Rights Reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging from neutron.agent.linux import ip_lib LOG = logging.getLogger(__name__) # NOTE(toabctl): Don't use /sys/devices/virtual/net here because not all tap # devices are listed here (i.e. when using Xen) BRIDGE_FS = "/sys/class/net/" BRIDGE_INTERFACE_FS = BRIDGE_FS + "%(bridge)s/brif/%(interface)s" BRIDGE_INTERFACES_FS = BRIDGE_FS + "%s/brif/" BRIDGE_PORT_FS_FOR_DEVICE = BRIDGE_FS + "%s/brport" BRIDGE_PATH_FOR_DEVICE = BRIDGE_PORT_FS_FOR_DEVICE + '/bridge' def is_bridged_interface(interface): if not interface: return False else: return os.path.exists(BRIDGE_PORT_FS_FOR_DEVICE % interface) def get_interface_bridged_time(interface): try: return os.stat(BRIDGE_PORT_FS_FOR_DEVICE % interface).st_mtime except OSError: pass def get_bridge_names(): return os.listdir(BRIDGE_FS) class BridgeDevice(ip_lib.IPDevice): def _brctl(self, cmd): cmd = ['brctl'] + cmd ip_wrapper = ip_lib.IPWrapper(self.namespace) return ip_wrapper.netns.execute(cmd, run_as_root=True) @classmethod def addbr(cls, name, namespace=None): bridge = cls(name, namespace) bridge._brctl(['addbr', bridge.name]) return bridge @classmethod def get_interface_bridge(cls, interface): try: path = os.readlink(BRIDGE_PATH_FOR_DEVICE % interface) except OSError: return None else: name = path.rpartition('/')[-1] return cls(name) def delbr(self): return self._brctl(['delbr', self.name]) def addif(self, interface): return self._brctl(['addif', self.name, interface]) def delif(self, interface): return self._brctl(['delif', self.name, interface]) def setfd(self, fd): return self._brctl(['setfd', self.name, str(fd)]) def disable_stp(self): return self._brctl(['stp', self.name, 'off']) def owns_interface(self, interface): return os.path.exists( BRIDGE_INTERFACE_FS % {'bridge': self.name, 'interface': interface}) def get_interfaces(self): try: return os.listdir(BRIDGE_INTERFACES_FS % self.name) except OSError: return [] neutron-8.4.0/neutron/agent/linux/iptables_manager.py0000664000567000056710000007601313044372760024171 0ustar jenkinsjenkins00000000000000# Copyright 2012 Locaweb. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # based on # https://github.com/openstack/nova/blob/master/nova/network/linux_net.py """Implements iptables rules using linux utilities.""" import collections import contextlib import difflib import os import re import sys from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils import six from neutron._i18n import _, _LE, _LW from neutron.agent.common import config from neutron.agent.linux import iptables_comments as ic from neutron.agent.linux import utils as linux_utils from neutron.common import exceptions as n_exc from neutron.common import utils LOG = logging.getLogger(__name__) config.register_iptables_opts(cfg.CONF) # NOTE(vish): Iptables supports chain names of up to 28 characters, and we # add up to 12 characters to binary_name which is used as a prefix, # so we limit it to 16 characters. # (max_chain_name_length - len('-POSTROUTING') == 16) def get_binary_name(): """Grab the name of the binary we're running in.""" return os.path.basename(sys.argv[0])[:16].replace(' ', '_') binary_name = get_binary_name() # A length of a chain name must be less than or equal to 11 characters. # - ( + '-') = 28-(16+1) = 11 MAX_CHAIN_LEN_WRAP = 11 MAX_CHAIN_LEN_NOWRAP = 28 # Number of iptables rules to print before and after a rule that causes a # a failure during iptables-restore IPTABLES_ERROR_LINES_OF_CONTEXT = 5 def comment_rule(rule, comment): if not cfg.CONF.AGENT.comment_iptables_rules or not comment: return rule # iptables-save outputs the comment before the jump so we need to match # that order so _find_last_entry works comment = '-m comment --comment "%s"' % comment if rule.startswith('-j'): # this is a jump only rule so we just put the comment first return '%s %s' % (comment, rule) try: jpos = rule.index(' -j ') return ' '.join((rule[:jpos], comment, rule[jpos + 1:])) except ValueError: return '%s %s' % (rule, comment) def get_chain_name(chain_name, wrap=True): if wrap: return chain_name[:MAX_CHAIN_LEN_WRAP] else: return chain_name[:MAX_CHAIN_LEN_NOWRAP] class IptablesRule(object): """An iptables rule. You shouldn't need to use this class directly, it's only used by IptablesManager. """ def __init__(self, chain, rule, wrap=True, top=False, binary_name=binary_name, tag=None, comment=None): self.chain = get_chain_name(chain, wrap) self.rule = rule self.wrap = wrap self.top = top self.wrap_name = binary_name[:16] self.tag = tag self.comment = comment def __eq__(self, other): return ((self.chain == other.chain) and (self.rule == other.rule) and (self.top == other.top) and (self.wrap == other.wrap)) def __ne__(self, other): return not self == other def __str__(self): if self.wrap: chain = '%s-%s' % (self.wrap_name, self.chain) else: chain = self.chain return comment_rule('-A %s %s' % (chain, self.rule), self.comment) class IptablesTable(object): """An iptables table.""" def __init__(self, binary_name=binary_name): self.rules = [] self.remove_rules = [] self.chains = set() self.unwrapped_chains = set() self.remove_chains = set() self.wrap_name = binary_name[:16] def add_chain(self, name, wrap=True): """Adds a named chain to the table. The chain name is wrapped to be unique for the component creating it, so different components of Nova can safely create identically named chains without interfering with one another. At the moment, its wrapped name is -, so if neutron-openvswitch-agent creates a chain named 'OUTPUT', it'll actually end up being named 'neutron-openvswi-OUTPUT'. """ name = get_chain_name(name, wrap) if wrap: self.chains.add(name) else: self.unwrapped_chains.add(name) def _select_chain_set(self, wrap): if wrap: return self.chains else: return self.unwrapped_chains def remove_chain(self, name, wrap=True): """Remove named chain. This removal "cascades". All rule in the chain are removed, as are all rules in other chains that jump to it. If the chain is not found, this is merely logged. """ name = get_chain_name(name, wrap) chain_set = self._select_chain_set(wrap) if name not in chain_set: LOG.debug('Attempted to remove chain %s which does not exist', name) return chain_set.remove(name) if not wrap: # non-wrapped chains and rules need to be dealt with specially, # so we keep a list of them to be iterated over in apply() self.remove_chains.add(name) # first, add rules to remove that have a matching chain name self.remove_rules += [str(r) for r in self.rules if r.chain == name] # next, remove rules from list that have a matching chain name self.rules = [r for r in self.rules if r.chain != name] if not wrap: jump_snippet = '-j %s' % name # next, add rules to remove that have a matching jump chain self.remove_rules += [str(r) for r in self.rules if jump_snippet in r.rule] else: jump_snippet = '-j %s-%s' % (self.wrap_name, name) # finally, remove rules from list that have a matching jump chain self.rules = [r for r in self.rules if jump_snippet not in r.rule] def add_rule(self, chain, rule, wrap=True, top=False, tag=None, comment=None): """Add a rule to the table. This is just like what you'd feed to iptables, just without the '-A ' bit at the start. However, if you need to jump to one of your wrapped chains, prepend its name with a '$' which will ensure the wrapping is applied correctly. """ chain = get_chain_name(chain, wrap) if wrap and chain not in self.chains: raise LookupError(_('Unknown chain: %r') % chain) if '$' in rule: rule = ' '.join( self._wrap_target_chain(e, wrap) for e in rule.split(' ')) self.rules.append(IptablesRule(chain, rule, wrap, top, self.wrap_name, tag, comment)) def _wrap_target_chain(self, s, wrap): if s.startswith('$'): s = ('%s-%s' % (self.wrap_name, get_chain_name(s[1:], wrap))) return s def remove_rule(self, chain, rule, wrap=True, top=False, comment=None): """Remove a rule from a chain. Note: The rule must be exactly identical to the one that was added. You cannot switch arguments around like you can with the iptables CLI tool. """ chain = get_chain_name(chain, wrap) try: if '$' in rule: rule = ' '.join( self._wrap_target_chain(e, wrap) for e in rule.split(' ')) self.rules.remove(IptablesRule(chain, rule, wrap, top, self.wrap_name, comment=comment)) if not wrap: self.remove_rules.append(str(IptablesRule(chain, rule, wrap, top, self.wrap_name, comment=comment))) except ValueError: LOG.warning(_LW('Tried to remove rule that was not there:' ' %(chain)r %(rule)r %(wrap)r %(top)r'), {'chain': chain, 'rule': rule, 'top': top, 'wrap': wrap}) def _get_chain_rules(self, chain, wrap): chain = get_chain_name(chain, wrap) return [rule for rule in self.rules if rule.chain == chain and rule.wrap == wrap] def empty_chain(self, chain, wrap=True): """Remove all rules from a chain.""" chained_rules = self._get_chain_rules(chain, wrap) for rule in chained_rules: self.rules.remove(rule) def clear_rules_by_tag(self, tag): if not tag: return rules = [rule for rule in self.rules if rule.tag == tag] for rule in rules: self.rules.remove(rule) class IptablesManager(object): """Wrapper for iptables. See IptablesTable for some usage docs A number of chains are set up to begin with. First, neutron-filter-top. It's added at the top of FORWARD and OUTPUT. Its name is not wrapped, so it's shared between the various neutron workers. It's intended for rules that need to live at the top of the FORWARD and OUTPUT chains. It's in both the ipv4 and ipv6 set of tables. For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains are wrapped, meaning that the "real" INPUT chain has a rule that jumps to the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named "local" which is jumped to from neutron-filter-top. For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are wrapped in the same was as the built-in filter chains. Additionally, there's a snat chain that is applied after the POSTROUTING chain. """ def __init__(self, _execute=None, state_less=False, use_ipv6=False, namespace=None, binary_name=binary_name): if _execute: self.execute = _execute else: self.execute = linux_utils.execute self.use_ipv6 = use_ipv6 self.namespace = namespace self.iptables_apply_deferred = False self.wrap_name = binary_name[:16] self.ipv4 = {'filter': IptablesTable(binary_name=self.wrap_name)} self.ipv6 = {'filter': IptablesTable(binary_name=self.wrap_name)} # Add a neutron-filter-top chain. It's intended to be shared # among the various neutron components. It sits at the very top # of FORWARD and OUTPUT. for tables in [self.ipv4, self.ipv6]: tables['filter'].add_chain('neutron-filter-top', wrap=False) tables['filter'].add_rule('FORWARD', '-j neutron-filter-top', wrap=False, top=True) tables['filter'].add_rule('OUTPUT', '-j neutron-filter-top', wrap=False, top=True) tables['filter'].add_chain('local') tables['filter'].add_rule('neutron-filter-top', '-j $local', wrap=False) # Wrap the built-in chains builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}, 6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}} if not state_less: self.ipv4.update( {'mangle': IptablesTable(binary_name=self.wrap_name)}) builtin_chains[4].update( {'mangle': ['PREROUTING', 'INPUT', 'FORWARD', 'OUTPUT', 'POSTROUTING']}) self.ipv6.update( {'mangle': IptablesTable(binary_name=self.wrap_name)}) builtin_chains[6].update( {'mangle': ['PREROUTING', 'INPUT', 'FORWARD', 'OUTPUT', 'POSTROUTING']}) self.ipv4.update( {'nat': IptablesTable(binary_name=self.wrap_name)}) builtin_chains[4].update({'nat': ['PREROUTING', 'OUTPUT', 'POSTROUTING']}) self.ipv4.update({'raw': IptablesTable(binary_name=self.wrap_name)}) builtin_chains[4].update({'raw': ['PREROUTING', 'OUTPUT']}) self.ipv6.update({'raw': IptablesTable(binary_name=self.wrap_name)}) builtin_chains[6].update({'raw': ['PREROUTING', 'OUTPUT']}) for ip_version in builtin_chains: if ip_version == 4: tables = self.ipv4 elif ip_version == 6: tables = self.ipv6 for table, chains in six.iteritems(builtin_chains[ip_version]): for chain in chains: tables[table].add_chain(chain) tables[table].add_rule(chain, '-j $%s' % (chain), wrap=False) if not state_less: # Add a neutron-postrouting-bottom chain. It's intended to be # shared among the various neutron components. We set it as the # last chain of POSTROUTING chain. self.ipv4['nat'].add_chain('neutron-postrouting-bottom', wrap=False) self.ipv4['nat'].add_rule('POSTROUTING', '-j neutron-postrouting-bottom', wrap=False) # We add a snat chain to the shared neutron-postrouting-bottom # chain so that it's applied last. self.ipv4['nat'].add_chain('snat') self.ipv4['nat'].add_rule('neutron-postrouting-bottom', '-j $snat', wrap=False, comment=ic.SNAT_OUT) # And then we add a float-snat chain and jump to first thing in # the snat chain. self.ipv4['nat'].add_chain('float-snat') self.ipv4['nat'].add_rule('snat', '-j $float-snat') # Add a mark chain to mangle PREROUTING chain. It is used to # identify ingress packets from a certain interface. self.ipv4['mangle'].add_chain('mark') self.ipv4['mangle'].add_rule('PREROUTING', '-j $mark') # Add address scope related chains self.ipv4['mangle'].add_chain('scope') self.ipv6['mangle'].add_chain('scope') self.ipv4['mangle'].add_chain('floatingip') self.ipv4['mangle'].add_chain('float-snat') self.ipv4['filter'].add_chain('scope') self.ipv6['filter'].add_chain('scope') self.ipv4['filter'].add_rule('FORWARD', '-j $scope') self.ipv6['filter'].add_rule('FORWARD', '-j $scope') # Add rules for marking traffic for address scopes mark_new_ingress_address_scope_by_interface = ( '-j $scope') copy_address_scope_for_existing = ( '-m connmark ! --mark 0x0/0xffff0000 ' '-j CONNMARK --restore-mark ' '--nfmask 0xffff0000 --ctmask 0xffff0000') mark_new_ingress_address_scope_by_floatingip = ( '-j $floatingip') save_mark_to_connmark = ( '-m connmark --mark 0x0/0xffff0000 ' '-j CONNMARK --save-mark ' '--nfmask 0xffff0000 --ctmask 0xffff0000') self.ipv4['mangle'].add_rule( 'PREROUTING', mark_new_ingress_address_scope_by_interface) self.ipv4['mangle'].add_rule( 'PREROUTING', copy_address_scope_for_existing) # The floating ip scope rules must come after the CONNTRACK rules # because the (CONN)MARK targets are non-terminating (this is true # despite them not being documented as such) and the floating ip # rules need to override the mark from CONNMARK to cross scopes. self.ipv4['mangle'].add_rule( 'PREROUTING', mark_new_ingress_address_scope_by_floatingip) self.ipv4['mangle'].add_rule( 'float-snat', save_mark_to_connmark) self.ipv6['mangle'].add_rule( 'PREROUTING', mark_new_ingress_address_scope_by_interface) self.ipv6['mangle'].add_rule( 'PREROUTING', copy_address_scope_for_existing) def get_tables(self, ip_version): return {4: self.ipv4, 6: self.ipv6}[ip_version] def get_chain(self, table, chain, ip_version=4, wrap=True): try: requested_table = self.get_tables(ip_version)[table] except KeyError: return [] return requested_table._get_chain_rules(chain, wrap) def is_chain_empty(self, table, chain, ip_version=4, wrap=True): return not self.get_chain(table, chain, ip_version, wrap) @contextlib.contextmanager def defer_apply(self): """Defer apply context.""" self.defer_apply_on() try: yield finally: try: self.defer_apply_off() except Exception: msg = _('Failure applying iptables rules') LOG.exception(msg) raise n_exc.IpTablesApplyException(msg) def defer_apply_on(self): self.iptables_apply_deferred = True def defer_apply_off(self): self.iptables_apply_deferred = False self._apply() def apply(self): if self.iptables_apply_deferred: return return self._apply() def _apply(self): lock_name = 'iptables' if self.namespace: lock_name += '-' + self.namespace with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True): return self._apply_synchronized() def get_rules_for_table(self, table): """Runs iptables-save on a table and returns the results.""" args = ['iptables-save', '-t', table] if self.namespace: args = ['ip', 'netns', 'exec', self.namespace] + args return self.execute(args, run_as_root=True).split('\n') def _apply_synchronized(self): """Apply the current in-memory set of iptables rules. This will create a diff between the rules from the previous runs and replace them with the current set of rules. This happens atomically, thanks to iptables-restore. Returns a list of the changes that were sent to iptables-save. """ s = [('iptables', self.ipv4)] if self.use_ipv6: s += [('ip6tables', self.ipv6)] all_commands = [] # variable to keep track all commands for return val for cmd, tables in s: args = ['%s-save' % (cmd,)] if self.namespace: args = ['ip', 'netns', 'exec', self.namespace] + args save_output = self.execute(args, run_as_root=True) all_lines = save_output.split('\n') commands = [] # Traverse tables in sorted order for predictable dump output for table_name in sorted(tables): table = tables[table_name] # isolate the lines of the table we are modifying start, end = self._find_table(all_lines, table_name) old_rules = all_lines[start:end] # generate the new table state we want new_rules = self._modify_rules(old_rules, table, table_name) # generate the iptables commands to get between the old state # and the new state changes = _generate_path_between_rules(old_rules, new_rules) if changes: # if there are changes to the table, we put on the header # and footer that iptables-save needs commands += (['# Generated by iptables_manager'] + ['*%s' % table_name] + changes + ['COMMIT', '# Completed by iptables_manager']) if not commands: continue all_commands += commands args = ['%s-restore' % (cmd,), '-n'] if self.namespace: args = ['ip', 'netns', 'exec', self.namespace] + args try: # always end with a new line commands.append('') self.execute(args, process_input='\n'.join(commands), run_as_root=True) except RuntimeError as r_error: with excutils.save_and_reraise_exception(): try: line_no = int(re.search( 'iptables-restore: line ([0-9]+?) failed', str(r_error)).group(1)) context = IPTABLES_ERROR_LINES_OF_CONTEXT log_start = max(0, line_no - context) log_end = line_no + context except AttributeError: # line error wasn't found, print all lines instead log_start = 0 log_end = len(commands) log_lines = ('%7d. %s' % (idx, l) for idx, l in enumerate( commands[log_start:log_end], log_start + 1) ) LOG.error(_LE("IPTablesManager.apply failed to apply the " "following set of iptables rules:\n%s"), '\n'.join(log_lines)) LOG.debug("IPTablesManager.apply completed with success. %d iptables " "commands were issued", len(all_commands)) return all_commands def _find_table(self, lines, table_name): if len(lines) < 3: # length only <2 when fake iptables return (0, 0) try: start = lines.index('*%s' % table_name) except ValueError: # Couldn't find table_name LOG.debug('Unable to find table %s', table_name) return (0, 0) end = lines[start:].index('COMMIT') + start + 1 return (start, end) def _find_rules_index(self, lines): seen_chains = False rules_index = 0 for rules_index, rule in enumerate(lines): if not seen_chains: if rule.startswith(':'): seen_chains = True else: if not rule.startswith(':'): break if not seen_chains: rules_index = 2 return rules_index def _modify_rules(self, current_lines, table, table_name): # Chains are stored as sets to avoid duplicates. # Sort the output chains here to make their order predictable. unwrapped_chains = sorted(table.unwrapped_chains) chains = sorted(table.chains) # we don't want to change any rules that don't belong to us so we start # the new_filter with these rules new_filter = [line.strip() for line in current_lines if self.wrap_name not in line] # generate our list of chain names our_chains = [':%s-%s' % (self.wrap_name, name) for name in chains] # the unwrapped chains (e.g. neutron-filter-top) may already exist in # the new_filter since they aren't marked by the wrap_name so we only # want to add them if they arent' already there our_chains += [':%s' % name for name in unwrapped_chains if not any(':%s' % name in s for s in new_filter)] our_top_rules = [] our_bottom_rules = [] for rule in table.rules: rule_str = str(rule) # similar to the unwrapped chains, there are some rules that belong # to us but they don't have the wrap name. we want to remove them # from the new_filter and then add them in the right location in # case our new rules changed the order. # (e.g. '-A FORWARD -j neutron-filter-top') new_filter = [s for s in new_filter if rule_str not in s] if rule.top: # rule.top == True means we want this rule to be at the top. our_top_rules += [rule_str] else: our_bottom_rules += [rule_str] our_chains_and_rules = our_chains + our_top_rules + our_bottom_rules # locate the position immediately after the existing chains to insert # our chains and rules rules_index = self._find_rules_index(new_filter) new_filter[rules_index:rules_index] = our_chains_and_rules def _weed_out_removes(line): # remove any rules or chains from the filter that were slated # for removal if line.startswith(':'): chain = line[1:] if chain in table.remove_chains: table.remove_chains.remove(chain) return False else: if line in table.remove_rules: table.remove_rules.remove(line) return False # Leave it alone return True seen_lines = set() # TODO(kevinbenton): remove this function and the next one. They are # just oversized brooms to sweep bugs under the rug!!! We generate the # rules and we shouldn't be generating duplicates. def _weed_out_duplicates(line): if line in seen_lines: thing = 'chain' if line.startswith(':') else 'rule' LOG.warning(_LW("Duplicate iptables %(thing)s detected. This " "may indicate a bug in the the iptables " "%(thing)s generation code. Line: %(line)s"), {'thing': thing, 'line': line}) return False seen_lines.add(line) # Leave it alone return True new_filter.reverse() new_filter = [line for line in new_filter if _weed_out_duplicates(line) and _weed_out_removes(line)] new_filter.reverse() # flush lists, just in case a rule or chain marked for removal # was already gone. (chains is a set, rules is a list) table.remove_chains.clear() table.remove_rules = [] return new_filter def _get_traffic_counters_cmd_tables(self, chain, wrap=True): name = get_chain_name(chain, wrap) cmd_tables = [('iptables', key) for key, table in self.ipv4.items() if name in table._select_chain_set(wrap)] if self.use_ipv6: cmd_tables += [('ip6tables', key) for key, table in self.ipv6.items() if name in table._select_chain_set(wrap)] return cmd_tables def get_traffic_counters(self, chain, wrap=True, zero=False): """Return the sum of the traffic counters of all rules of a chain.""" cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap) if not cmd_tables: LOG.warning(_LW('Attempted to get traffic counters of chain %s ' 'which does not exist'), chain) return name = get_chain_name(chain, wrap) acc = {'pkts': 0, 'bytes': 0} for cmd, table in cmd_tables: args = [cmd, '-t', table, '-L', name, '-n', '-v', '-x'] if zero: args.append('-Z') if self.namespace: args = ['ip', 'netns', 'exec', self.namespace] + args current_table = self.execute(args, run_as_root=True) current_lines = current_table.split('\n') for line in current_lines[2:]: if not line: break data = line.split() if (len(data) < 2 or not data[0].isdigit() or not data[1].isdigit()): break acc['pkts'] += int(data[0]) acc['bytes'] += int(data[1]) return acc def _generate_path_between_rules(old_rules, new_rules): """Generates iptables commands to get from old_rules to new_rules. This function diffs the two rule sets and then calculates the iptables commands necessary to get from the old rules to the new rules using insert and delete commands. """ old_by_chain = _get_rules_by_chain(old_rules) new_by_chain = _get_rules_by_chain(new_rules) old_chains, new_chains = set(old_by_chain.keys()), set(new_by_chain.keys()) # all referenced chains should be declared at the top before rules. # NOTE(kevinbenton): sorting and grouping chains is for determinism in # tests. iptables doesn't care about the order here statements = [':%s - [0:0]' % c for c in sorted(new_chains - old_chains)] sg_chains = [] other_chains = [] for chain in sorted(old_chains | new_chains): if '-sg-' in chain: sg_chains.append(chain) else: other_chains.append(chain) for chain in other_chains + sg_chains: statements += _generate_chain_diff_iptables_commands( chain, old_by_chain[chain], new_by_chain[chain]) # unreferenced chains get the axe for chain in sorted(old_chains - new_chains): statements += ['-X %s' % chain] return statements def _get_rules_by_chain(rules): by_chain = collections.defaultdict(list) for line in rules: if line.startswith(':'): chain = line[1:].split(' ', 1)[0] # even though this is a default dict, we need to manually add # chains to ensure that ones without rules are included because # they might be a jump reference if chain not in by_chain: by_chain[chain] = [] elif line.startswith('-A'): chain = line[3:].split(' ', 1)[0] by_chain[chain].append(line) return by_chain def _generate_chain_diff_iptables_commands(chain, old_chain_rules, new_chain_rules): # keep track of the old index because we have to insert rules # in the right position old_index = 1 statements = [] for line in difflib.ndiff(old_chain_rules, new_chain_rules): if line.startswith('?'): # skip ? because that's a guide string for intraline differences continue elif line.startswith('-'): # line deleted statements.append('-D %s %d' % (chain, old_index)) # since we are removing a line from the old rules, we # backup the index by 1 old_index -= 1 elif line.startswith('+'): # line added # strip the chain name since we have to add it before the index rule = line[5:].split(' ', 1)[-1] # rule inserted at this position statements.append('-I %s %d %s' % (chain, old_index, rule)) old_index += 1 return statements neutron-8.4.0/neutron/agent/linux/polling.py0000664000567000056710000000450613044372760022336 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import eventlet from oslo_log import log as logging from neutron.agent.common import base_polling from neutron.agent.linux import async_process from neutron.agent.linux import ovsdb_monitor from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants LOG = logging.getLogger(__name__) @contextlib.contextmanager def get_polling_manager(minimize_polling=False, ovsdb_monitor_respawn_interval=( constants.DEFAULT_OVSDBMON_RESPAWN)): if minimize_polling: pm = InterfacePollingMinimizer( ovsdb_monitor_respawn_interval=ovsdb_monitor_respawn_interval) pm.start() else: pm = base_polling.AlwaysPoll() try: yield pm finally: if minimize_polling: pm.stop() class InterfacePollingMinimizer(base_polling.BasePollingManager): """Monitors ovsdb to determine when polling is required.""" def __init__( self, ovsdb_monitor_respawn_interval=constants.DEFAULT_OVSDBMON_RESPAWN): super(InterfacePollingMinimizer, self).__init__() self._monitor = ovsdb_monitor.SimpleInterfaceMonitor( respawn_interval=ovsdb_monitor_respawn_interval) def start(self): self._monitor.start() def stop(self): try: self._monitor.stop() except async_process.AsyncProcessException: LOG.debug("InterfacePollingMinimizer was not running when stopped") def _is_polling_required(self): # Maximize the chances of update detection having a chance to # collect output. eventlet.sleep() return self._monitor.has_updates def get_events(self): return self._monitor.get_events() neutron-8.4.0/neutron/agent/linux/ra.py0000664000567000056710000001610713044372760021274 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from itertools import chain as iter_chain import jinja2 import netaddr from oslo_config import cfg from oslo_log import log as logging import six from neutron._i18n import _ from neutron.agent.linux import external_process from neutron.agent.linux import utils from neutron.common import constants from neutron.common import utils as common_utils RADVD_SERVICE_NAME = 'radvd' RADVD_SERVICE_CMD = 'radvd' # We can configure max of 3 DNS servers in radvd RDNSS section. MAX_RDNSS_ENTRIES = 3 LOG = logging.getLogger(__name__) OPTS = [ cfg.StrOpt('ra_confs', default='$state_path/ra', help=_('Location to store IPv6 RA config files')), cfg.IntOpt('min_rtr_adv_interval', default=30, help=_('MinRtrAdvInterval setting for radvd.conf')), cfg.IntOpt('max_rtr_adv_interval', default=100, help=_('MaxRtrAdvInterval setting for radvd.conf')), ] CONFIG_TEMPLATE = jinja2.Template("""interface {{ interface_name }} { AdvSendAdvert on; MinRtrAdvInterval {{ min_rtr_adv_interval }}; MaxRtrAdvInterval {{ max_rtr_adv_interval }}; {% if network_mtu >= constants.IPV6_MIN_MTU %} AdvLinkMTU {{network_mtu}}; {% endif %} {% if constants.DHCPV6_STATELESS in ra_modes %} AdvOtherConfigFlag on; {% endif %} {% if constants.DHCPV6_STATEFUL in ra_modes %} AdvManagedFlag on; {% endif %} {% if dns_servers %} RDNSS {% for dns in dns_servers %} {{ dns }} {% endfor %} {}; {% endif %} {% for prefix in auto_config_prefixes %} prefix {{ prefix }} { AdvOnLink on; AdvAutonomous on; }; {% endfor %} {% for prefix in stateful_config_prefixes %} prefix {{ prefix }} { AdvOnLink on; AdvAutonomous off; }; {% endfor %} }; """) class DaemonMonitor(object): """Manage the data and state of an radvd process.""" def __init__(self, router_id, router_ns, process_monitor, dev_name_helper, agent_conf): self._router_id = router_id self._router_ns = router_ns self._process_monitor = process_monitor self._dev_name_helper = dev_name_helper self._agent_conf = agent_conf def _generate_radvd_conf(self, router_ports): radvd_conf = utils.get_conf_file_name(self._agent_conf.ra_confs, self._router_id, 'radvd.conf', True) buf = six.StringIO() network_mtu = 0 for p in router_ports: subnets = p.get('subnets', []) v6_subnets = [subnet for subnet in subnets if netaddr.IPNetwork(subnet['cidr']).version == 6] if not v6_subnets: continue ra_modes = {subnet['ipv6_ra_mode'] for subnet in v6_subnets} auto_config_prefixes = [subnet['cidr'] for subnet in v6_subnets if subnet['ipv6_ra_mode'] == constants.IPV6_SLAAC or subnet['ipv6_ra_mode'] == constants.DHCPV6_STATELESS] stateful_config_prefixes = [subnet['cidr'] for subnet in v6_subnets if subnet['ipv6_ra_mode'] == constants.DHCPV6_STATEFUL] interface_name = self._dev_name_helper(p['id']) slaac_subnets = [subnet for subnet in v6_subnets if subnet['ipv6_ra_mode'] == constants.IPV6_SLAAC] dns_servers = list(iter_chain(*[subnet['dns_nameservers'] for subnet in slaac_subnets if subnet.get('dns_nameservers')])) if self._agent_conf.advertise_mtu: network_mtu = p.get('mtu', 0) buf.write('%s' % CONFIG_TEMPLATE.render( ra_modes=list(ra_modes), interface_name=interface_name, auto_config_prefixes=auto_config_prefixes, stateful_config_prefixes=stateful_config_prefixes, dns_servers=dns_servers[0:MAX_RDNSS_ENTRIES], constants=constants, min_rtr_adv_interval=self._agent_conf.min_rtr_adv_interval, max_rtr_adv_interval=self._agent_conf.max_rtr_adv_interval, network_mtu=int(network_mtu))) common_utils.replace_file(radvd_conf, buf.getvalue()) return radvd_conf def _get_radvd_process_manager(self, callback=None): return external_process.ProcessManager( uuid=self._router_id, default_cmd_callback=callback, namespace=self._router_ns, service=RADVD_SERVICE_NAME, conf=self._agent_conf, run_as_root=True) def _spawn_radvd(self, radvd_conf): def callback(pid_file): # we need to use -m syslog and f.e. not -m stderr (the default) # or -m stderr_syslog so that radvd 2.0+ will close stderr and # exit after daemonization; otherwise, the current thread will # be locked waiting for result from radvd that won't ever come # until the process dies radvd_cmd = [RADVD_SERVICE_CMD, '-C', '%s' % radvd_conf, '-p', '%s' % pid_file, '-m', 'syslog'] return radvd_cmd pm = self._get_radvd_process_manager(callback) pm.enable(reload_cfg=True) self._process_monitor.register(uuid=self._router_id, service_name=RADVD_SERVICE_NAME, monitored_process=pm) LOG.debug("radvd enabled for router %s", self._router_id) def enable(self, router_ports): for p in router_ports: for subnet in p['subnets']: if netaddr.IPNetwork(subnet['cidr']).version == 6: LOG.debug("Enable IPv6 RA for router %s", self._router_id) radvd_conf = self._generate_radvd_conf(router_ports) self._spawn_radvd(radvd_conf) return # Kill the daemon if it's running self.disable() def disable(self): self._process_monitor.unregister(uuid=self._router_id, service_name=RADVD_SERVICE_NAME) pm = self._get_radvd_process_manager() pm.disable() utils.remove_conf_files(self._agent_conf.ra_confs, self._router_id) LOG.debug("radvd disabled for router %s", self._router_id) @property def enabled(self): return self._get_radvd_process_manager().active neutron-8.4.0/neutron/agent/linux/daemon.py0000664000567000056710000001765213044372736022146 0ustar jenkinsjenkins00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import atexit import fcntl import grp import logging as std_logging from logging import handlers import os import pwd import signal import sys from oslo_log import log as logging from neutron._i18n import _, _LE, _LI from neutron.common import exceptions LOG = logging.getLogger(__name__) DEVNULL = object() # Note: We can't use sys.std*.fileno() here. sys.std* objects may be # random file-like objects that may not match the true system std* fds # - and indeed may not even have a file descriptor at all (eg: test # fixtures that monkey patch fixtures.StringStream onto sys.stdout). # Below we always want the _real_ well-known 0,1,2 Unix fds during # os.dup2 manipulation. STDIN_FILENO = 0 STDOUT_FILENO = 1 STDERR_FILENO = 2 def setuid(user_id_or_name): try: new_uid = int(user_id_or_name) except (TypeError, ValueError): new_uid = pwd.getpwnam(user_id_or_name).pw_uid if new_uid != 0: try: os.setuid(new_uid) except OSError: msg = _('Failed to set uid %s') % new_uid LOG.critical(msg) raise exceptions.FailToDropPrivilegesExit(msg) def setgid(group_id_or_name): try: new_gid = int(group_id_or_name) except (TypeError, ValueError): new_gid = grp.getgrnam(group_id_or_name).gr_gid if new_gid != 0: try: os.setgid(new_gid) except OSError: msg = _('Failed to set gid %s') % new_gid LOG.critical(msg) raise exceptions.FailToDropPrivilegesExit(msg) def unwatch_log(): """Replace WatchedFileHandler handlers by FileHandler ones. Neutron logging uses WatchedFileHandler handlers but they do not support privileges drop, this method replaces them by FileHandler handlers supporting privileges drop. """ log_root = logging.getLogger(None).logger to_replace = [h for h in log_root.handlers if isinstance(h, handlers.WatchedFileHandler)] for handler in to_replace: # NOTE(cbrandily): we use default delay(=False) to ensure the log file # is opened before privileges drop. new_handler = std_logging.FileHandler(handler.baseFilename, mode=handler.mode, encoding=handler.encoding) log_root.removeHandler(handler) log_root.addHandler(new_handler) def drop_privileges(user=None, group=None): """Drop privileges to user/group privileges.""" if user is None and group is None: return if os.geteuid() != 0: msg = _('Root permissions are required to drop privileges.') LOG.critical(msg) raise exceptions.FailToDropPrivilegesExit(msg) if group is not None: try: os.setgroups([]) except OSError: msg = _('Failed to remove supplemental groups') LOG.critical(msg) raise exceptions.FailToDropPrivilegesExit(msg) setgid(group) if user is not None: setuid(user) LOG.info(_LI("Process runs with uid/gid: %(uid)s/%(gid)s"), {'uid': os.getuid(), 'gid': os.getgid()}) class Pidfile(object): def __init__(self, pidfile, procname, uuid=None): self.pidfile = pidfile self.procname = procname self.uuid = uuid try: self.fd = os.open(pidfile, os.O_CREAT | os.O_RDWR) fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: LOG.exception(_LE("Error while handling pidfile: %s"), pidfile) sys.exit(1) def __str__(self): return self.pidfile def unlock(self): fcntl.flock(self.fd, fcntl.LOCK_UN) def write(self, pid): os.ftruncate(self.fd, 0) os.write(self.fd, "%d" % pid) os.fsync(self.fd) def read(self): try: pid = int(os.read(self.fd, 128)) os.lseek(self.fd, 0, os.SEEK_SET) return pid except ValueError: return def is_running(self): pid = self.read() if not pid: return False cmdline = '/proc/%s/cmdline' % pid try: with open(cmdline, "r") as f: exec_out = f.readline() return self.procname in exec_out and (not self.uuid or self.uuid in exec_out) except IOError: return False class Daemon(object): """A generic daemon class. Usage: subclass the Daemon class and override the run() method """ def __init__(self, pidfile, stdin=DEVNULL, stdout=DEVNULL, stderr=DEVNULL, procname='python', uuid=None, user=None, group=None, watch_log=True): """Note: pidfile may be None.""" self.stdin = stdin self.stdout = stdout self.stderr = stderr self.procname = procname self.pidfile = (Pidfile(pidfile, procname, uuid) if pidfile is not None else None) self.user = user self.group = group self.watch_log = watch_log def _fork(self): try: pid = os.fork() if pid > 0: os._exit(0) except OSError: LOG.exception(_LE('Fork failed')) sys.exit(1) def daemonize(self): """Daemonize process by doing Stevens double fork.""" # flush any buffered data before fork/dup2. if self.stdout is not DEVNULL: self.stdout.flush() if self.stderr is not DEVNULL: self.stderr.flush() # sys.std* may not match STD{OUT,ERR}_FILENO. Tough. for f in (sys.stdout, sys.stderr): f.flush() # fork first time self._fork() # decouple from parent environment os.chdir("/") os.setsid() os.umask(0) # fork second time self._fork() # redirect standard file descriptors with open(os.devnull, 'w+') as devnull: stdin = devnull if self.stdin is DEVNULL else self.stdin stdout = devnull if self.stdout is DEVNULL else self.stdout stderr = devnull if self.stderr is DEVNULL else self.stderr os.dup2(stdin.fileno(), STDIN_FILENO) os.dup2(stdout.fileno(), STDOUT_FILENO) os.dup2(stderr.fileno(), STDERR_FILENO) if self.pidfile is not None: # write pidfile atexit.register(self.delete_pid) signal.signal(signal.SIGTERM, self.handle_sigterm) self.pidfile.write(os.getpid()) def delete_pid(self): if self.pidfile is not None: os.remove(str(self.pidfile)) def handle_sigterm(self, signum, frame): sys.exit(0) def start(self): """Start the daemon.""" if self.pidfile is not None and self.pidfile.is_running(): self.pidfile.unlock() LOG.error(_LE('Pidfile %s already exist. Daemon already ' 'running?'), self.pidfile) sys.exit(1) # Start the daemon self.daemonize() self.run() def run(self): """Override this method and call super().run when subclassing Daemon. start() will call this method after the process has daemonized. """ if not self.watch_log: unwatch_log() drop_privileges(self.user, self.group) neutron-8.4.0/neutron/agent/linux/ip_link_support.py0000664000567000056710000000713113044372760024110 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re from oslo_log import log as logging from neutron._i18n import _, _LE from neutron.agent.linux import utils from neutron.common import exceptions as n_exc LOG = logging.getLogger(__name__) class IpLinkSupportError(n_exc.NeutronException): pass class UnsupportedIpLinkCommand(IpLinkSupportError): message = _("ip link command is not supported: %(reason)s") class InvalidIpLinkCapability(IpLinkSupportError): message = _("ip link capability %(capability)s is not supported") class IpLinkConstants(object): IP_LINK_CAPABILITY_STATE = "state" IP_LINK_CAPABILITY_VLAN = "vlan" IP_LINK_CAPABILITY_RATE = "rate" IP_LINK_CAPABILITY_SPOOFCHK = "spoofchk" IP_LINK_SUB_CAPABILITY_QOS = "qos" class IpLinkSupport(object): VF_BLOCK_REGEX = r"\[ vf NUM(?P.*) \] \]" CAPABILITY_REGEX = r"\[ %s (.*)" SUB_CAPABILITY_REGEX = r"\[ %(cap)s (.*) \[ %(subcap)s (.*)" @classmethod def get_vf_mgmt_section(cls): """Parses ip link help output, and gets vf block""" output = cls._get_ip_link_output() vf_block_pattern = re.search(cls.VF_BLOCK_REGEX, output, re.DOTALL | re.MULTILINE) if vf_block_pattern: return vf_block_pattern.group("vf_block") @classmethod def vf_mgmt_capability_supported(cls, vf_section, capability, subcapability=None): """Validate vf capability support Checks if given vf capability (and sub capability if given) supported :param vf_section: vf Num block content :param capability: for example: vlan, rate, spoofchk, state :param subcapability: for example: qos """ if not vf_section: return False if subcapability: regex = cls.SUB_CAPABILITY_REGEX % {"cap": capability, "subcap": subcapability} else: regex = cls.CAPABILITY_REGEX % capability pattern_match = re.search(regex, vf_section, re.DOTALL | re.MULTILINE) return pattern_match is not None @classmethod def _get_ip_link_output(cls): """Gets the output of the ip link help command Runs ip link help command and stores its output Note: ip link help return error and writes its output to stderr so we get the output from there. however, if this issue will be solved and the command will write to stdout, we will get the output from there too. """ try: ip_cmd = ['ip', 'link', 'help'] _stdout, _stderr = utils.execute( ip_cmd, check_exit_code=False, return_stderr=True, log_fail_as_error=False) except Exception as e: LOG.exception(_LE("Failed executing ip command")) raise UnsupportedIpLinkCommand(reason=e) return _stdout or _stderr neutron-8.4.0/neutron/agent/l2/0000775000567000056710000000000013044373210017460 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/l2/__init__.py0000664000567000056710000000000013044372736021573 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/l2/agent_extension.py0000664000567000056710000000436413044372760023244 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mellanox Technologies, Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class AgentCoreResourceExtension(object): """Define stable abstract interface for agent extensions. An agent extension extends the agent core functionality. """ def initialize(self, connection, driver_type): """Perform agent core resource extension initialization. :param connection: RPC connection that can be reused by the extension to define its RPC endpoints :param driver_type: a string that defines the agent type to the extension. Can be used to choose the right backend implementation. Called after all extensions have been loaded. No port handling will be called before this method. """ @abc.abstractmethod def handle_port(self, context, data): """Handle agent extension for port. This can be called on either create or update, depending on the code flow. Thus, it's this function's responsibility to check what actually changed. :param context: rpc context :param data: port data """ @abc.abstractmethod def delete_port(self, context, data): """Delete port from agent extension. :param context: rpc context :param data: port data """ def consume_api(self, agent_api): """Consume the AgentAPI instance from the AgentExtensionsManager This allows extensions to gain access to resources limited to the NeutronAgent. :param agent_api: An instance of an agent specific API """ neutron-8.4.0/neutron/agent/l2/extensions/0000775000567000056710000000000013044373210021657 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/l2/extensions/manager.py0000664000567000056710000000662613044372760023666 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mellanox Technologies, Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log import stevedore from neutron._i18n import _, _LE, _LI LOG = log.getLogger(__name__) L2_AGENT_EXT_MANAGER_NAMESPACE = 'neutron.agent.l2.extensions' L2_AGENT_EXT_MANAGER_OPTS = [ cfg.ListOpt('extensions', default=[], help=_('Extensions list to use')), ] def register_opts(conf): conf.register_opts(L2_AGENT_EXT_MANAGER_OPTS, 'agent') class AgentExtensionsManager(stevedore.named.NamedExtensionManager): """Manage agent extensions.""" def __init__(self, conf): super(AgentExtensionsManager, self).__init__( L2_AGENT_EXT_MANAGER_NAMESPACE, conf.agent.extensions, invoke_on_load=True, name_order=True) LOG.info(_LI("Loaded agent extensions: %s"), self.names()) def initialize(self, connection, driver_type, agent_api=None): """Initialize enabled L2 agent extensions. :param connection: RPC connection that can be reused by extensions to define their RPC endpoints :param driver_type: a string that defines the agent type to the extension. Can be used by the extension to choose the right backend implementation. :param agent_api: an AgentAPI instance that provides an API to interact with the agent that the manager is running in. """ # Initialize each agent extension in the list. for extension in self: LOG.info(_LI("Initializing agent extension '%s'"), extension.name) extension.obj.consume_api(agent_api) extension.obj.initialize(connection, driver_type) def handle_port(self, context, data): """Notify all agent extensions to handle port.""" for extension in self: try: extension.obj.handle_port(context, data) # TODO(QoS) add agent extensions exception and catch them here except AttributeError: LOG.exception( _LE("Agent Extension '%(name)s' failed " "while handling port update"), {'name': extension.name} ) def delete_port(self, context, data): """Notify all agent extensions to delete port.""" for extension in self: try: extension.obj.delete_port(context, data) # TODO(QoS) add agent extensions exception and catch them here # instead of AttributeError except AttributeError: LOG.exception( _LE("Agent Extension '%(name)s' failed " "while handling port deletion"), {'name': extension.name} ) neutron-8.4.0/neutron/agent/l2/extensions/__init__.py0000664000567000056710000000000013044372736023772 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/l2/extensions/qos.py0000664000567000056710000002476613044372760023063 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mellanox Technologies, Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections from oslo_concurrency import lockutils from oslo_log import log as logging import six from neutron._i18n import _LW, _LI from neutron.agent.l2 import agent_extension from neutron.agent.linux import tc_lib from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron.common import exceptions from neutron import manager LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class QosAgentDriver(object): """Defines stable abstract interface for QoS Agent Driver. QoS Agent driver defines the interface to be implemented by Agent for applying QoS Rules on a port. """ # Each QoS driver should define the set of rule types that it supports, and # corresponding handlers that has the following names: # # create_ # update_ # delete_ # # where is one of VALID_RULE_TYPES SUPPORTED_RULES = set() @abc.abstractmethod def initialize(self): """Perform QoS agent driver initialization. """ def create(self, port, qos_policy): """Apply QoS rules on port for the first time. :param port: port object. :param qos_policy: the QoS policy to be applied on port. """ self._handle_update_create_rules('create', port, qos_policy) def update(self, port, qos_policy): """Apply QoS rules on port. :param port: port object. :param qos_policy: the QoS policy to be applied on port. """ self._handle_update_create_rules('update', port, qos_policy) def delete(self, port, qos_policy=None): """Remove QoS rules from port. :param port: port object. :param qos_policy: the QoS policy to be removed from port. """ if qos_policy is None: rule_types = self.SUPPORTED_RULES else: rule_types = set( [rule.rule_type for rule in self._iterate_rules(qos_policy.rules)]) for rule_type in rule_types: self._handle_rule_delete(port, rule_type) def _iterate_rules(self, rules): for rule in rules: rule_type = rule.rule_type if rule_type in self.SUPPORTED_RULES: yield rule else: LOG.warning(_LW('Unsupported QoS rule type for %(rule_id)s: ' '%(rule_type)s; skipping'), {'rule_id': rule.id, 'rule_type': rule_type}) def _handle_rule_delete(self, port, rule_type): handler_name = "".join(("delete_", rule_type)) handler = getattr(self, handler_name) handler(port) def _handle_update_create_rules(self, action, port, qos_policy): for rule in self._iterate_rules(qos_policy.rules): if rule.should_apply_to_port(port): handler_name = "".join((action, "_", rule.rule_type)) handler = getattr(self, handler_name) handler(port, rule) else: LOG.debug("Port %(port)s excluded from QoS rule %(rule)s", {'port': port, 'rule': rule.id}) def _get_egress_burst_value(self, rule): """Return burst value used for egress bandwidth limitation. Because Egress bw_limit is done on ingress qdisc by LB and ovs drivers so it will return burst_value used by tc on as ingress_qdisc. """ return tc_lib.TcCommand.get_ingress_qdisc_burst_value( rule.max_kbps, rule.max_burst_kbps) class PortPolicyMap(object): def __init__(self): # we cannot use a dict of sets here because port dicts are not hashable self.qos_policy_ports = collections.defaultdict(dict) self.known_policies = {} self.port_policies = {} def get_ports(self, policy): return self.qos_policy_ports[policy.id].values() def get_policy(self, policy_id): return self.known_policies.get(policy_id) def update_policy(self, policy): self.known_policies[policy.id] = policy def has_policy_changed(self, port, policy_id): return self.port_policies.get(port['port_id']) != policy_id def get_port_policy(self, port): policy_id = self.port_policies.get(port['port_id']) if policy_id: return self.get_policy(policy_id) def set_port_policy(self, port, policy): """Attach a port to policy and return any previous policy on port.""" port_id = port['port_id'] old_policy = self.get_port_policy(port) self.known_policies[policy.id] = policy self.port_policies[port_id] = policy.id self.qos_policy_ports[policy.id][port_id] = port if old_policy and old_policy.id != policy.id: del self.qos_policy_ports[old_policy.id][port_id] return old_policy def clean_by_port(self, port): """Detach port from policy and cleanup data we don't need anymore.""" port_id = port['port_id'] if port_id in self.port_policies: del self.port_policies[port_id] for qos_policy_id, port_dict in self.qos_policy_ports.items(): if port_id in port_dict: del port_dict[port_id] if not port_dict: self._clean_policy_info(qos_policy_id) return raise exceptions.PortNotFound(port_id=port['port_id']) def _clean_policy_info(self, qos_policy_id): del self.qos_policy_ports[qos_policy_id] del self.known_policies[qos_policy_id] class QosAgentExtension(agent_extension.AgentCoreResourceExtension): SUPPORTED_RESOURCES = [resources.QOS_POLICY] def initialize(self, connection, driver_type): """Perform Agent Extension initialization. """ self.resource_rpc = resources_rpc.ResourcesPullRpcApi() self.qos_driver = manager.NeutronManager.load_class_for_provider( 'neutron.qos.agent_drivers', driver_type)() self.qos_driver.initialize() self.policy_map = PortPolicyMap() registry.subscribe(self._handle_notification, resources.QOS_POLICY) self._register_rpc_consumers(connection) def _register_rpc_consumers(self, connection): endpoints = [resources_rpc.ResourcesPushRpcCallback()] for resource_type in self.SUPPORTED_RESOURCES: # we assume that neutron-server always broadcasts the latest # version known to the agent topic = resources_rpc.resource_type_versioned_topic(resource_type) connection.create_consumer(topic, endpoints, fanout=True) @lockutils.synchronized('qos-port') def _handle_notification(self, qos_policy, event_type): # server does not allow to remove a policy that is attached to any # port, so we ignore DELETED events. Also, if we receive a CREATED # event for a policy, it means that there are no ports so far that are # attached to it. That's why we are interested in UPDATED events only if event_type == events.UPDATED: self._process_update_policy(qos_policy) @lockutils.synchronized('qos-port') def handle_port(self, context, port): """Handle agent QoS extension for port. This method applies a new policy to a port using the QoS driver. Update events are handled in _handle_notification. """ port_id = port['port_id'] port_qos_policy_id = port.get('qos_policy_id') network_qos_policy_id = port.get('network_qos_policy_id') qos_policy_id = port_qos_policy_id or network_qos_policy_id if qos_policy_id is None: self._process_reset_port(port) return if not self.policy_map.has_policy_changed(port, qos_policy_id): return qos_policy = self.resource_rpc.pull( context, resources.QOS_POLICY, qos_policy_id) if qos_policy is None: LOG.info(_LI("QoS policy %(qos_policy_id)s applied to port " "%(port_id)s is not available on server, " "it has been deleted. Skipping."), {'qos_policy_id': qos_policy_id, 'port_id': port_id}) self._process_reset_port(port) else: old_qos_policy = self.policy_map.set_port_policy(port, qos_policy) if old_qos_policy: self.qos_driver.delete(port, old_qos_policy) self.qos_driver.update(port, qos_policy) else: self.qos_driver.create(port, qos_policy) def delete_port(self, context, port): self._process_reset_port(port) def _policy_rules_modified(self, old_policy, policy): return not (len(old_policy.rules) == len(policy.rules) and all(i in old_policy.rules for i in policy.rules)) def _process_update_policy(self, qos_policy): old_qos_policy = self.policy_map.get_policy(qos_policy.id) if old_qos_policy: if self._policy_rules_modified(old_qos_policy, qos_policy): for port in self.policy_map.get_ports(qos_policy): #NOTE(QoS): for now, just reflush the rules on the port. # Later, we may want to apply the difference # between the old and new rule lists. self.qos_driver.delete(port, old_qos_policy) self.qos_driver.update(port, qos_policy) self.policy_map.update_policy(qos_policy) def _process_reset_port(self, port): try: self.policy_map.clean_by_port(port) self.qos_driver.delete(port) except exceptions.PortNotFound: LOG.info(_LI("QoS extension did have no information about the " "port %s that we were trying to reset"), port['port_id']) neutron-8.4.0/neutron/agent/metadata/0000775000567000056710000000000013044373210020723 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/metadata/config.py0000664000567000056710000001201713044372760022554 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ from neutron.common import utils SHARED_OPTS = [ cfg.StrOpt('metadata_proxy_socket', default='$state_path/metadata_proxy', help=_('Location for Metadata Proxy UNIX domain socket.')), cfg.StrOpt('metadata_proxy_user', default='', help=_("User (uid or name) running metadata proxy after " "its initialization (if empty: agent effective " "user).")), cfg.StrOpt('metadata_proxy_group', default='', help=_("Group (gid or name) running metadata proxy after " "its initialization (if empty: agent effective " "group).")) ] DRIVER_OPTS = [ cfg.BoolOpt('metadata_proxy_watch_log', help=_("Enable/Disable log watch by metadata proxy. It " "should be disabled when metadata_proxy_user/group " "is not allowed to read/write its log file and " "copytruncate logrotate option must be used if " "logrotate is enabled on metadata proxy log " "files. Option default value is deduced from " "metadata_proxy_user: watch log is enabled if " "metadata_proxy_user is agent effective user " "id/name.")), ] METADATA_PROXY_HANDLER_OPTS = [ cfg.StrOpt('auth_ca_cert', help=_("Certificate Authority public key (CA cert) " "file for ssl")), cfg.StrOpt('nova_metadata_ip', default='127.0.0.1', help=_("IP address used by Nova metadata server.")), cfg.PortOpt('nova_metadata_port', default=8775, help=_("TCP Port used by Nova metadata server.")), cfg.StrOpt('metadata_proxy_shared_secret', default='', help=_('When proxying metadata requests, Neutron signs the ' 'Instance-ID header with a shared secret to prevent ' 'spoofing. You may select any string for a secret, ' 'but it must match here and in the configuration used ' 'by the Nova Metadata Server. NOTE: Nova uses the same ' 'config key, but in [neutron] section.'), secret=True), cfg.StrOpt('nova_metadata_protocol', default='http', choices=['http', 'https'], help=_("Protocol to access nova metadata, http or https")), cfg.BoolOpt('nova_metadata_insecure', default=False, help=_("Allow to perform insecure SSL (https) requests to " "nova metadata")), cfg.StrOpt('nova_client_cert', default='', help=_("Client certificate for nova metadata api server.")), cfg.StrOpt('nova_client_priv_key', default='', help=_("Private key of client certificate.")) ] DEDUCE_MODE = 'deduce' USER_MODE = 'user' GROUP_MODE = 'group' ALL_MODE = 'all' SOCKET_MODES = (DEDUCE_MODE, USER_MODE, GROUP_MODE, ALL_MODE) UNIX_DOMAIN_METADATA_PROXY_OPTS = [ cfg.StrOpt('metadata_proxy_socket_mode', default=DEDUCE_MODE, choices=SOCKET_MODES, help=_("Metadata Proxy UNIX domain socket mode, 4 values " "allowed: " "'deduce': deduce mode from metadata_proxy_user/group " "values, " "'user': set metadata proxy socket mode to 0o644, to " "use when metadata_proxy_user is agent effective user " "or root, " "'group': set metadata proxy socket mode to 0o664, to " "use when metadata_proxy_group is agent effective " "group or root, " "'all': set metadata proxy socket mode to 0o666, to use " "otherwise.")), cfg.IntOpt('metadata_workers', default=utils.cpu_count() // 2, help=_('Number of separate worker processes for metadata ' 'server (defaults to half of the number of CPUs)')), cfg.IntOpt('metadata_backlog', default=4096, help=_('Number of backlog requests to configure the ' 'metadata server socket with')) ] neutron-8.4.0/neutron/agent/metadata/agent.py0000664000567000056710000002655513044372760022421 0ustar jenkinsjenkins00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import hmac import httplib2 from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import loopingcall import six import six.moves.urllib.parse as urlparse import webob from neutron._i18n import _, _LE, _LW from neutron.agent.linux import utils as agent_utils from neutron.agent.metadata import config from neutron.agent import rpc as agent_rpc from neutron.common import constants as n_const from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.common import utils from neutron import context from neutron.openstack.common.cache import cache LOG = logging.getLogger(__name__) MODE_MAP = { config.USER_MODE: 0o644, config.GROUP_MODE: 0o664, config.ALL_MODE: 0o666, } class MetadataPluginAPI(object): """Agent-side RPC for metadata agent-to-plugin interaction. This class implements the client side of an rpc interface used by the metadata service to make calls back into the Neutron plugin. The server side is defined in neutron.api.rpc.handlers.metadata_rpc.MetadataRpcCallback. For more information about changing rpc interfaces, see doc/source/devref/rpc_api.rst. API version history: 1.0 - Initial version. """ def __init__(self, topic): target = oslo_messaging.Target( topic=topic, namespace=n_const.RPC_NAMESPACE_METADATA, version='1.0') self.client = n_rpc.get_client(target) def get_ports(self, context, filters): cctxt = self.client.prepare() return cctxt.call(context, 'get_ports', filters=filters) class MetadataProxyHandler(object): def __init__(self, conf): self.conf = conf if self.conf.cache_url: self._cache = cache.get_cache(self.conf.cache_url) else: self._cache = False self.plugin_rpc = MetadataPluginAPI(topics.PLUGIN) self.context = context.get_admin_context_without_session() @webob.dec.wsgify(RequestClass=webob.Request) def __call__(self, req): try: LOG.debug("Request: %s", req) instance_id, tenant_id = self._get_instance_and_tenant_id(req) if instance_id: return self._proxy_request(instance_id, tenant_id, req) else: return webob.exc.HTTPNotFound() except Exception: LOG.exception(_LE("Unexpected error.")) msg = _('An unknown error has occurred. ' 'Please try your request again.') explanation = six.text_type(msg) return webob.exc.HTTPInternalServerError(explanation=explanation) def _get_ports_from_server(self, router_id=None, ip_address=None, networks=None): """Get ports from server.""" filters = self._get_port_filters(router_id, ip_address, networks) return self.plugin_rpc.get_ports(self.context, filters) def _get_port_filters(self, router_id=None, ip_address=None, networks=None): filters = {} if router_id: filters['device_id'] = [router_id] filters['device_owner'] = n_const.ROUTER_INTERFACE_OWNERS if ip_address: filters['fixed_ips'] = {'ip_address': [ip_address]} if networks: filters['network_id'] = networks return filters @utils.cache_method_results def _get_router_networks(self, router_id): """Find all networks connected to given router.""" internal_ports = self._get_ports_from_server(router_id=router_id) return tuple(p['network_id'] for p in internal_ports) @utils.cache_method_results def _get_ports_for_remote_address(self, remote_address, networks): """Get list of ports that has given ip address and are part of given networks. :param networks: list of networks in which the ip address will be searched for """ return self._get_ports_from_server(networks=networks, ip_address=remote_address) def _get_ports(self, remote_address, network_id=None, router_id=None): """Search for all ports that contain passed ip address and belongs to given network. If no network is passed ports are searched on all networks connected to given router. Either one of network_id or router_id must be passed. """ if network_id: networks = (network_id,) elif router_id: networks = self._get_router_networks(router_id) else: raise TypeError(_("Either one of parameter network_id or router_id" " must be passed to _get_ports method.")) return self._get_ports_for_remote_address(remote_address, networks) def _get_instance_and_tenant_id(self, req): remote_address = req.headers.get('X-Forwarded-For') network_id = req.headers.get('X-Neutron-Network-ID') router_id = req.headers.get('X-Neutron-Router-ID') ports = self._get_ports(remote_address, network_id, router_id) if len(ports) == 1: return ports[0]['device_id'], ports[0]['tenant_id'] return None, None def _proxy_request(self, instance_id, tenant_id, req): headers = { 'X-Forwarded-For': req.headers.get('X-Forwarded-For'), 'X-Instance-ID': instance_id, 'X-Tenant-ID': tenant_id, 'X-Instance-ID-Signature': self._sign_instance_id(instance_id) } nova_ip_port = '%s:%s' % (self.conf.nova_metadata_ip, self.conf.nova_metadata_port) url = urlparse.urlunsplit(( self.conf.nova_metadata_protocol, nova_ip_port, req.path_info, req.query_string, '')) h = httplib2.Http( ca_certs=self.conf.auth_ca_cert, disable_ssl_certificate_validation=self.conf.nova_metadata_insecure ) if self.conf.nova_client_cert and self.conf.nova_client_priv_key: h.add_certificate(self.conf.nova_client_priv_key, self.conf.nova_client_cert, nova_ip_port) resp, content = h.request(url, method=req.method, headers=headers, body=req.body) if resp.status == 200: LOG.debug(str(resp)) req.response.content_type = resp['content-type'] req.response.body = content return req.response elif resp.status == 403: LOG.warning(_LW( 'The remote metadata server responded with Forbidden. This ' 'response usually occurs when shared secrets do not match.' )) return webob.exc.HTTPForbidden() elif resp.status == 400: return webob.exc.HTTPBadRequest() elif resp.status == 404: return webob.exc.HTTPNotFound() elif resp.status == 409: return webob.exc.HTTPConflict() elif resp.status == 500: msg = _( 'Remote metadata server experienced an internal server error.' ) LOG.warning(msg) explanation = six.text_type(msg) return webob.exc.HTTPInternalServerError(explanation=explanation) else: raise Exception(_('Unexpected response code: %s') % resp.status) def _sign_instance_id(self, instance_id): secret = self.conf.metadata_proxy_shared_secret if isinstance(secret, six.text_type): secret = secret.encode('utf-8') if isinstance(instance_id, six.text_type): instance_id = instance_id.encode('utf-8') return hmac.new(secret, instance_id, hashlib.sha256).hexdigest() class UnixDomainMetadataProxy(object): def __init__(self, conf): self.conf = conf agent_utils.ensure_directory_exists_without_file( cfg.CONF.metadata_proxy_socket) def _init_state_reporting(self): self.context = context.get_admin_context_without_session() self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) self.agent_state = { 'binary': 'neutron-metadata-agent', 'host': cfg.CONF.host, 'topic': 'N/A', 'configurations': { 'metadata_proxy_socket': cfg.CONF.metadata_proxy_socket, 'nova_metadata_ip': cfg.CONF.nova_metadata_ip, 'nova_metadata_port': cfg.CONF.nova_metadata_port, 'log_agent_heartbeats': cfg.CONF.AGENT.log_agent_heartbeats, }, 'start_flag': True, 'agent_type': n_const.AGENT_TYPE_METADATA} report_interval = cfg.CONF.AGENT.report_interval if report_interval: self.heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) self.heartbeat.start(interval=report_interval) def _report_state(self): try: self.state_rpc.report_state( self.context, self.agent_state, use_call=self.agent_state.get('start_flag')) except AttributeError: # This means the server does not support report_state LOG.warning(_LW('Neutron server does not support state report.' ' State report for this agent will be disabled.')) self.heartbeat.stop() return except Exception: LOG.exception(_LE("Failed reporting state!")) return self.agent_state.pop('start_flag', None) def _get_socket_mode(self): mode = self.conf.metadata_proxy_socket_mode if mode == config.DEDUCE_MODE: user = self.conf.metadata_proxy_user if (not user or user == '0' or user == 'root' or agent_utils.is_effective_user(user)): # user is agent effective user or root => USER_MODE mode = config.USER_MODE else: group = self.conf.metadata_proxy_group if not group or agent_utils.is_effective_group(group): # group is agent effective group => GROUP_MODE mode = config.GROUP_MODE else: # otherwise => ALL_MODE mode = config.ALL_MODE return MODE_MAP[mode] def run(self): server = agent_utils.UnixDomainWSGIServer('neutron-metadata-agent') server.start(MetadataProxyHandler(self.conf), self.conf.metadata_proxy_socket, workers=self.conf.metadata_workers, backlog=self.conf.metadata_backlog, mode=self._get_socket_mode()) self._init_state_reporting() server.wait() neutron-8.4.0/neutron/agent/metadata/__init__.py0000664000567000056710000000000013044372736023036 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/metadata/namespace_proxy.py0000664000567000056710000001573713044372760024520 0ustar jenkinsjenkins00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import httplib2 from oslo_config import cfg from oslo_log import log as logging from oslo_service import wsgi as base_wsgi from oslo_utils import encodeutils import six import six.moves.urllib.parse as urlparse import webob from neutron._i18n import _, _LE from neutron.agent.linux import daemon from neutron.agent.linux import utils as agent_utils from neutron.common import config from neutron.common import exceptions from neutron.common import utils from neutron import wsgi LOG = logging.getLogger(__name__) class NetworkMetadataProxyHandler(object): """Proxy AF_INET metadata request through Unix Domain socket. The Unix domain socket allows the proxy access resource that are not accessible within the isolated tenant context. """ def __init__(self, network_id=None, router_id=None): self.network_id = network_id self.router_id = router_id if network_id is None and router_id is None: raise exceptions.NetworkIdOrRouterIdRequiredError() @webob.dec.wsgify(RequestClass=base_wsgi.Request) def __call__(self, req): LOG.debug("Request: %s", req) try: return self._proxy_request(req.remote_addr, req.method, req.path_info, req.query_string, req.body) except Exception: LOG.exception(_LE("Unexpected error.")) msg = _('An unknown error has occurred. ' 'Please try your request again.') explanation = six.text_type(msg) return webob.exc.HTTPInternalServerError(explanation=explanation) def _proxy_request(self, remote_address, method, path_info, query_string, body): headers = { 'X-Forwarded-For': remote_address, } if self.router_id: headers['X-Neutron-Router-ID'] = self.router_id else: headers['X-Neutron-Network-ID'] = self.network_id url = urlparse.urlunsplit(( 'http', '169.254.169.254', # a dummy value to make the request proper path_info, query_string, '')) h = httplib2.Http() resp, content = h.request( url, method=method, headers=headers, body=body, connection_type=agent_utils.UnixDomainHTTPConnection) if resp.status == 200: LOG.debug(resp) LOG.debug(encodeutils.safe_decode(content, errors='replace')) response = webob.Response() response.status = resp.status response.headers['Content-Type'] = resp['content-type'] response.body = wsgi.encode_body(content) return response elif resp.status == 400: return webob.exc.HTTPBadRequest() elif resp.status == 404: return webob.exc.HTTPNotFound() elif resp.status == 409: return webob.exc.HTTPConflict() elif resp.status == 500: msg = _( 'Remote metadata server experienced an internal server error.' ) LOG.debug(msg) explanation = six.text_type(msg) return webob.exc.HTTPInternalServerError(explanation=explanation) else: raise Exception(_('Unexpected response code: %s') % resp.status) class ProxyDaemon(daemon.Daemon): def __init__(self, pidfile, port, network_id=None, router_id=None, user=None, group=None, watch_log=True): uuid = network_id or router_id super(ProxyDaemon, self).__init__(pidfile, uuid=uuid, user=user, group=group, watch_log=watch_log) self.network_id = network_id self.router_id = router_id self.port = port def run(self): handler = NetworkMetadataProxyHandler( self.network_id, self.router_id) proxy = wsgi.Server('neutron-network-metadata-proxy') proxy.start(handler, self.port) # Drop privileges after port bind super(ProxyDaemon, self).run() proxy.wait() def main(): opts = [ cfg.StrOpt('network_id', help=_('Network that will have instance metadata ' 'proxied.')), cfg.StrOpt('router_id', help=_('Router that will have connected instances\' ' 'metadata proxied.')), cfg.StrOpt('pid_file', help=_('Location of pid file of this process.')), cfg.BoolOpt('daemonize', default=True, help=_('Run as daemon.')), cfg.PortOpt('metadata_port', default=9697, help=_("TCP Port to listen for metadata server " "requests.")), cfg.StrOpt('metadata_proxy_socket', default='$state_path/metadata_proxy', help=_('Location of Metadata Proxy UNIX domain ' 'socket')), cfg.StrOpt('metadata_proxy_user', help=_("User (uid or name) running metadata proxy after " "its initialization")), cfg.StrOpt('metadata_proxy_group', help=_("Group (gid or name) running metadata proxy after " "its initialization")), cfg.BoolOpt('metadata_proxy_watch_log', default=True, help=_("Watch file log. Log watch should be disabled when " "metadata_proxy_user/group has no read/write " "permissions on metadata proxy log file.")), ] cfg.CONF.register_cli_opts(opts) # Don't get the default configuration file cfg.CONF(project='neutron', default_config_files=[]) config.setup_logging() utils.log_opt_values(LOG) proxy = ProxyDaemon(cfg.CONF.pid_file, cfg.CONF.metadata_port, network_id=cfg.CONF.network_id, router_id=cfg.CONF.router_id, user=cfg.CONF.metadata_proxy_user, group=cfg.CONF.metadata_proxy_group, watch_log=cfg.CONF.metadata_proxy_watch_log) if cfg.CONF.daemonize: proxy.start() else: proxy.run() neutron-8.4.0/neutron/agent/metadata/driver.py0000664000567000056710000001534513044372760022611 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from neutron.agent.common import config from neutron.agent.l3 import ha_router from neutron.agent.l3 import namespaces from neutron.agent.linux import external_process from neutron.agent.linux import utils from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants from neutron.common import exceptions # Access with redirection to metadata proxy iptables mark mask METADATA_SERVICE_NAME = 'metadata-proxy' class MetadataDriver(object): def __init__(self, l3_agent): self.metadata_port = l3_agent.conf.metadata_port self.metadata_access_mark = l3_agent.conf.metadata_access_mark registry.subscribe( after_router_added, resources.ROUTER, events.AFTER_CREATE) registry.subscribe( before_router_removed, resources.ROUTER, events.BEFORE_DELETE) @classmethod def metadata_filter_rules(cls, port, mark): return [('INPUT', '-m mark --mark %s/%s -j ACCEPT' % (mark, constants.ROUTER_MARK_MASK)), ('INPUT', '-p tcp -m tcp --dport %s ' '-j DROP' % port)] @classmethod def metadata_mangle_rules(cls, mark): return [('PREROUTING', '-d 169.254.169.254/32 ' '-i %(interface_name)s ' '-p tcp -m tcp --dport 80 ' '-j MARK --set-xmark %(value)s/%(mask)s' % {'interface_name': namespaces.INTERNAL_DEV_PREFIX + '+', 'value': mark, 'mask': constants.ROUTER_MARK_MASK})] @classmethod def metadata_nat_rules(cls, port): return [('PREROUTING', '-d 169.254.169.254/32 ' '-i %(interface_name)s ' '-p tcp -m tcp --dport 80 -j REDIRECT ' '--to-ports %(port)s' % {'interface_name': namespaces.INTERNAL_DEV_PREFIX + '+', 'port': port})] @classmethod def _get_metadata_proxy_user_group_watchlog(cls, conf): user = conf.metadata_proxy_user or str(os.geteuid()) group = conf.metadata_proxy_group or str(os.getegid()) watch_log = conf.metadata_proxy_watch_log if watch_log is None: # NOTE(cbrandily): Commonly, log watching can be enabled only # when metadata proxy user is agent effective user (id/name). watch_log = utils.is_effective_user(user) return user, group, watch_log @classmethod def _get_metadata_proxy_callback(cls, port, conf, network_id=None, router_id=None): uuid = network_id or router_id if uuid is None: raise exceptions.NetworkIdOrRouterIdRequiredError() if network_id: lookup_param = '--network_id=%s' % network_id else: lookup_param = '--router_id=%s' % router_id def callback(pid_file): metadata_proxy_socket = conf.metadata_proxy_socket user, group, watch_log = ( cls._get_metadata_proxy_user_group_watchlog(conf)) proxy_cmd = ['neutron-ns-metadata-proxy', '--pid_file=%s' % pid_file, '--metadata_proxy_socket=%s' % metadata_proxy_socket, lookup_param, '--state_path=%s' % conf.state_path, '--metadata_port=%s' % port, '--metadata_proxy_user=%s' % user, '--metadata_proxy_group=%s' % group] proxy_cmd.extend(config.get_log_args( conf, 'neutron-ns-metadata-proxy-%s.log' % uuid, metadata_proxy_watch_log=watch_log)) return proxy_cmd return callback @classmethod def spawn_monitored_metadata_proxy(cls, monitor, ns_name, port, conf, network_id=None, router_id=None): uuid = network_id or router_id callback = cls._get_metadata_proxy_callback( port, conf, network_id=network_id, router_id=router_id) pm = cls._get_metadata_proxy_process_manager(uuid, conf, ns_name=ns_name, callback=callback) pm.enable() monitor.register(uuid, METADATA_SERVICE_NAME, pm) @classmethod def destroy_monitored_metadata_proxy(cls, monitor, uuid, conf): monitor.unregister(uuid, METADATA_SERVICE_NAME) # No need to pass ns name as it's not needed for disable() pm = cls._get_metadata_proxy_process_manager(uuid, conf) pm.disable() @classmethod def _get_metadata_proxy_process_manager(cls, router_id, conf, ns_name=None, callback=None): return external_process.ProcessManager( conf=conf, uuid=router_id, namespace=ns_name, default_cmd_callback=callback) def after_router_added(resource, event, l3_agent, **kwargs): router = kwargs['router'] proxy = l3_agent.metadata_driver for c, r in proxy.metadata_filter_rules(proxy.metadata_port, proxy.metadata_access_mark): router.iptables_manager.ipv4['filter'].add_rule(c, r) for c, r in proxy.metadata_mangle_rules(proxy.metadata_access_mark): router.iptables_manager.ipv4['mangle'].add_rule(c, r) for c, r in proxy.metadata_nat_rules(proxy.metadata_port): router.iptables_manager.ipv4['nat'].add_rule(c, r) router.iptables_manager.apply() if not isinstance(router, ha_router.HaRouter): proxy.spawn_monitored_metadata_proxy( l3_agent.process_monitor, router.ns_name, proxy.metadata_port, l3_agent.conf, router_id=router.router_id) def before_router_removed(resource, event, l3_agent, **kwargs): router = kwargs['router'] proxy = l3_agent.metadata_driver proxy.destroy_monitored_metadata_proxy(l3_agent.process_monitor, router.router['id'], l3_agent.conf) neutron-8.4.0/neutron/agent/dhcp_agent.py0000664000567000056710000000357213044372760021631 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_service import service from neutron.agent.common import config from neutron.agent.dhcp import config as dhcp_config from neutron.agent.linux import interface from neutron.agent.metadata import config as metadata_config from neutron.common import config as common_config from neutron.common import topics from neutron import service as neutron_service def register_options(conf): config.register_interface_driver_opts_helper(conf) config.register_agent_state_opts_helper(conf) config.register_availability_zone_opts_helper(conf) conf.register_opts(dhcp_config.DHCP_AGENT_OPTS) conf.register_opts(dhcp_config.DHCP_OPTS) conf.register_opts(dhcp_config.DNSMASQ_OPTS) conf.register_opts(metadata_config.DRIVER_OPTS) conf.register_opts(metadata_config.SHARED_OPTS) conf.register_opts(interface.OPTS) def main(): register_options(cfg.CONF) common_config.init(sys.argv[1:]) config.setup_logging() server = neutron_service.Service.create( binary='neutron-dhcp-agent', topic=topics.DHCP_AGENT, report_interval=cfg.CONF.AGENT.report_interval, manager='neutron.agent.dhcp.agent.DhcpAgentWithStateReport') service.launch(cfg.CONF, server).wait() neutron-8.4.0/neutron/agent/windows/0000775000567000056710000000000013044373210020635 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/windows/ip_lib.py0000664000567000056710000000350213044372760022456 0ustar jenkinsjenkins00000000000000# Copyright 2016 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netifaces from oslo_log import log as logging from neutron._i18n import _LE LOG = logging.getLogger(__name__) OPTS = [] class IPWrapper(object): def get_device_by_ip(self, ip): if not ip: return for device in self.get_devices(): if device.device_has_ip(ip): return device def get_devices(self): try: return [IPDevice(iface) for iface in netifaces.interfaces()] except (OSError, MemoryError): LOG.error(_LE("Failed to get network interfaces.")) return [] class IPDevice(object): def __init__(self, name): self.device_name = name def device_has_ip(self, ip): try: device_addresses = netifaces.ifaddresses(self.device_name) except ValueError: # The device does not exist on the system return False try: addresses = [ip_addr['addr'] for ip_addr in device_addresses.get(netifaces.AF_INET, [])] return ip in addresses except OSError: LOG.error(_LE("Failed to get ip addresses for interface: %s."), self.device_name) return False neutron-8.4.0/neutron/agent/windows/__init__.py0000664000567000056710000000000013044372736022750 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/agent/windows/utils.py0000664000567000056710000000607713044372760022372 0ustar jenkinsjenkins00000000000000# Copyright 2015 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from eventlet.green import subprocess from eventlet import greenthread from oslo_log import log as logging import six from neutron._i18n import _ from neutron.common import utils LOG = logging.getLogger(__name__) def create_process(cmd, addl_env=None): cmd = list(map(str, cmd)) LOG.debug("Running command: %s", cmd) env = os.environ.copy() if addl_env: env.update(addl_env) obj = utils.subprocess_popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, preexec_fn=None, close_fds=False) return obj, cmd def execute(cmd, process_input=None, addl_env=None, check_exit_code=True, return_stderr=False, log_fail_as_error=True, extra_ok_codes=None, run_as_root=False, do_decode=True): try: if (process_input is None or isinstance(process_input, six.binary_type)): _process_input = process_input else: _process_input = process_input.encode('utf-8') obj, cmd = create_process(cmd, addl_env=addl_env) _stdout, _stderr = obj.communicate(_process_input) obj.stdin.close() _stdout = utils.safe_decode_utf8(_stdout) _stderr = utils.safe_decode_utf8(_stderr) m = _("\nCommand: %(cmd)s\nExit code: %(code)s\nStdin: %(stdin)s\n" "Stdout: %(stdout)s\nStderr: %(stderr)s") % \ {'cmd': cmd, 'code': obj.returncode, 'stdin': process_input or '', 'stdout': _stdout, 'stderr': _stderr} extra_ok_codes = extra_ok_codes or [] if obj.returncode and obj.returncode in extra_ok_codes: obj.returncode = None log_msg = m.strip().replace('\n', '; ') if obj.returncode and log_fail_as_error: LOG.error(log_msg) else: LOG.debug(log_msg) if obj.returncode and check_exit_code: raise RuntimeError(m) finally: # NOTE(termie): this appears to be necessary to let the subprocess # call clean something up in between calls, without # it two execute calls in a row hangs the second one greenthread.sleep(0) return (_stdout, _stderr) if return_stderr else _stdout neutron-8.4.0/neutron/agent/windows/polling.py0000664000567000056710000000175113044372736022673 0ustar jenkinsjenkins00000000000000# Copyright 2015 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib from neutron.agent.common import base_polling @contextlib.contextmanager def get_polling_manager(minimize_polling, ovsdb_monitor_respawn_interval): pm = base_polling.AlwaysPoll() yield pm # TODO(atuvenie): make this manager inherit from # that fully fledged polling manager interface class InterfacePollingMinimizer(object): pass neutron-8.4.0/neutron/objects/0000775000567000056710000000000013044373210017476 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/objects/rbac_db.py0000664000567000056710000003075413044372760021446 0ustar jenkinsjenkins00000000000000# Copyright 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import itertools from six import add_metaclass from sqlalchemy import and_ from neutron._i18n import _ from neutron.callbacks import events from neutron.callbacks import registry from neutron.common import exceptions as n_exc from neutron.db import api as db_api from neutron.db import common_db_mixin from neutron.db import rbac_db_mixin from neutron.db import rbac_db_models as models from neutron.extensions import rbac as ext_rbac from neutron.objects import base from neutron.objects.db import api as obj_db_api @add_metaclass(abc.ABCMeta) class RbacNeutronDbObjectMixin(rbac_db_mixin.RbacPluginMixin, base.NeutronDbObject): rbac_db_model = None @classmethod @abc.abstractmethod def get_bound_tenant_ids(cls, context, obj_id): """Returns ids of all tenants depending on this db object. Has to be implemented by classes using RbacNeutronMetaclass. The tenants are the ones that need the sharing or 'visibility' of the object to them. E.g: for QosPolicy that would be the tenants using the Networks and Ports with the shared QosPolicy applied to them. :returns: set -- a set of tenants' ids dependent on this object. """ @classmethod def is_shared_with_tenant(cls, context, obj_id, tenant_id): ctx = context.elevated() rbac_db_model = cls.rbac_db_model with ctx.session.begin(subtransactions=True): return (common_db_mixin.model_query(ctx, rbac_db_model).filter( and_(rbac_db_model.object_id == obj_id, rbac_db_model.action == models.ACCESS_SHARED, rbac_db_model.target_tenant.in_( ['*', tenant_id]))).count() != 0) @classmethod def is_accessible(cls, context, db_obj): return (super( RbacNeutronDbObjectMixin, cls).is_accessible(context, db_obj) or cls.is_shared_with_tenant(context, db_obj.id, context.tenant_id)) @classmethod def _get_db_obj_rbac_entries(cls, context, rbac_obj_id, rbac_action): rbac_db_model = cls.rbac_db_model return common_db_mixin.model_query(context, rbac_db_model).filter( and_(rbac_db_model.object_id == rbac_obj_id, rbac_db_model.action == rbac_action)) @classmethod def _get_tenants_with_shared_access_to_db_obj(cls, context, obj_id): return set(itertools.chain.from_iterable(context.session.query( cls.rbac_db_model.target_tenant).filter( and_(cls.rbac_db_model.object_id == obj_id, cls.rbac_db_model.action == models.ACCESS_SHARED, cls.rbac_db_model.target_tenant != '*')))) @classmethod def _validate_rbac_policy_delete(cls, context, obj_id, target_tenant): ctx_admin = context.elevated() rb_model = cls.rbac_db_model bound_tenant_ids = cls.get_bound_tenant_ids(ctx_admin, obj_id) db_obj_sharing_entries = cls._get_db_obj_rbac_entries( ctx_admin, obj_id, models.ACCESS_SHARED) def raise_policy_in_use(): raise ext_rbac.RbacPolicyInUse( object_id=obj_id, details='tenant_id={}'.format(target_tenant)) if target_tenant != '*': # if there is a wildcard rule, we can return early because it # shares the object globally wildcard_sharing_entries = db_obj_sharing_entries.filter( rb_model.target_tenant == '*') if wildcard_sharing_entries.count(): return if target_tenant in bound_tenant_ids: raise_policy_in_use() return # for the wildcard we need to query all of the rbac entries to # see if any allow the object sharing other_target_tenants = cls._get_tenants_with_shared_access_to_db_obj( ctx_admin, obj_id) if not bound_tenant_ids.issubset(other_target_tenants): raise_policy_in_use() @classmethod def validate_rbac_policy_delete(cls, resource, event, trigger, context, object_type, policy, **kwargs): """Callback to handle RBAC_POLICY, BEFORE_DELETE callback. :raises: RbacPolicyInUse -- in case the policy is in use. """ if policy['action'] != models.ACCESS_SHARED: return target_tenant = policy['target_tenant'] db_obj = cls.get_object(context, id=policy['object_id']) if db_obj.tenant_id == target_tenant: return cls._validate_rbac_policy_delete(context=context, obj_id=policy['object_id'], target_tenant=target_tenant) @classmethod def validate_rbac_policy_update(cls, resource, event, trigger, context, object_type, policy, **kwargs): """Callback to handle RBAC_POLICY, BEFORE_UPDATE callback. :raises: RbacPolicyInUse -- in case the update is forbidden. """ prev_tenant = policy['target_tenant'] new_tenant = kwargs['policy_update']['target_tenant'] if prev_tenant == new_tenant: return if new_tenant != '*': return cls.validate_rbac_policy_delete( resource, event, trigger, context, object_type, policy) @classmethod def validate_rbac_policy_change(cls, resource, event, trigger, context, object_type, policy, **kwargs): """Callback to validate RBAC_POLICY changes. This is the dispatching function for create, update and delete callbacks. On creation and update, verify that the creator is an admin or owns the resource being shared. """ # TODO(hdaniel): As this code was shamelessly stolen from # NeutronDbPluginV2.validate_network_rbac_policy_change(), those pieces # should be synced and contain the same bugs, until Network RBAC logic # (hopefully) melded with this one. if object_type != cls.rbac_db_model.object_type: return db_obj = cls.get_object(context.elevated(), id=policy['object_id']) if event in (events.BEFORE_CREATE, events.BEFORE_UPDATE): if (not context.is_admin and db_obj['tenant_id'] != context.tenant_id): msg = _("Only admins can manipulate policies on objects " "they do not own") raise n_exc.InvalidInput(error_message=msg) callback_map = {events.BEFORE_UPDATE: cls.validate_rbac_policy_update, events.BEFORE_DELETE: cls.validate_rbac_policy_delete} if event in callback_map: return callback_map[event](resource, event, trigger, context, object_type, policy, **kwargs) def attach_rbac(self, obj_id, tenant_id, target_tenant='*'): obj_type = self.rbac_db_model.object_type rbac_policy = {'rbac_policy': {'object_id': obj_id, 'target_tenant': target_tenant, 'tenant_id': tenant_id, 'object_type': obj_type, 'action': models.ACCESS_SHARED}} return self.create_rbac_policy(self._context, rbac_policy) def update_shared(self, is_shared_new, obj_id): admin_context = self._context.elevated() shared_prev = obj_db_api.get_object(admin_context, self.rbac_db_model, object_id=obj_id, target_tenant='*', action=models.ACCESS_SHARED) is_shared_prev = bool(shared_prev) if is_shared_prev == is_shared_new: return # 'shared' goes False -> True if not is_shared_prev and is_shared_new: self.attach_rbac(obj_id, self._context.tenant_id) return # 'shared' goes True -> False is actually an attempt to delete # rbac rule for sharing obj_id with target_tenant = '*' self._validate_rbac_policy_delete(self._context, obj_id, '*') return self._context.session.delete(shared_prev) def _update_post(self, obj_changes): if "shared" in obj_changes: self.update_shared(self.shared, self.id) def _update_hook(self, update_orig): with db_api.autonested_transaction(self._context.session): # NOTE(slaweq): copy of object changes is required to pass it later to # _update_post method because update() will reset all those changes obj_changes = self.obj_get_changes() update_orig(self) _update_post(self, obj_changes) def _create_post(self): if self.shared: self.attach_rbac(self.id, self._context.tenant_id) def _create_hook(self, orig_create): with db_api.autonested_transaction(self._context.session): orig_create(self) _create_post(self) def _to_dict_hook(self, to_dict_orig): dct = to_dict_orig(self) dct['shared'] = self.is_shared_with_tenant(self._context, self.id, self._context.tenant_id) return dct class RbacNeutronMetaclass(type): """Adds support for RBAC in NeutronDbObjects. Injects code for CRUD operations and modifies existing ops to do so. """ @classmethod def _get_attribute(mcs, attribute_name, bases): for b in bases: attribute = getattr(b, attribute_name, None) if attribute: return attribute @classmethod def get_attribute(mcs, attribute_name, bases, dct): return (dct.get(attribute_name, None) or mcs._get_attribute(attribute_name, bases)) @classmethod def update_synthetic_fields(mcs, bases, dct): if not dct.get('synthetic_fields', None): synthetic_attr = mcs.get_attribute('synthetic_fields', bases, dct) dct['synthetic_fields'] = synthetic_attr or [] if 'shared' in dct['synthetic_fields']: raise n_exc.ObjectActionError( action=_('shared attribute switching to synthetic'), reason=_('already a synthetic attribute')) dct['synthetic_fields'].append('shared') @staticmethod def subscribe_to_rbac_events(class_instance): for e in (events.BEFORE_CREATE, events.BEFORE_UPDATE, events.BEFORE_DELETE): registry.subscribe(class_instance.validate_rbac_policy_change, rbac_db_mixin.RBAC_POLICY, e) @staticmethod def validate_existing_attrs(cls_name, dct): if 'shared' not in dct['fields']: raise KeyError(_('No shared key in %s fields') % cls_name) if 'rbac_db_model' not in dct: raise AttributeError(_('rbac_db_model not found in %s') % cls_name) @staticmethod def get_replaced_method(orig_method, new_method): def func(self): return new_method(self, orig_method) return func @classmethod def replace_class_methods_with_hooks(mcs, bases, dct): methods_replacement_map = {'create': _create_hook, 'update': _update_hook, 'to_dict': _to_dict_hook} for orig_method_name, new_method in methods_replacement_map.items(): orig_method = mcs.get_attribute(orig_method_name, bases, dct) hook_method = mcs.get_replaced_method(orig_method, new_method) dct[orig_method_name] = hook_method def __new__(mcs, name, bases, dct): mcs.validate_existing_attrs(name, dct) mcs.update_synthetic_fields(bases, dct) mcs.replace_class_methods_with_hooks(bases, dct) cls = type(name, (RbacNeutronDbObjectMixin,) + bases, dct) mcs.subscribe_to_rbac_events(cls) return cls neutron-8.4.0/neutron/objects/__init__.py0000664000567000056710000000000013044372736021611 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/objects/base.py0000664000567000056710000002143013044372760020773 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import copy import itertools from neutron_lib import exceptions from oslo_db import exception as obj_exc from oslo_utils import reflection from oslo_versionedobjects import base as obj_base import six from neutron._i18n import _ from neutron.objects.db import api as obj_db_api class NeutronObjectUpdateForbidden(exceptions.NeutronException): message = _("Unable to update the following object fields: %(fields)s") class NeutronDbObjectDuplicateEntry(exceptions.Conflict): message = _("Failed to create a duplicate %(object_type)s: " "for attribute(s) %(attributes)s with value(s) %(values)s") def __init__(self, object_class, db_exception): super(NeutronDbObjectDuplicateEntry, self).__init__( object_type=reflection.get_class_name(object_class, fully_qualified=False), attributes=db_exception.columns, values=db_exception.value) class NeutronPrimaryKeyMissing(exceptions.BadRequest): message = _("For class %(object_type)s missing primary keys: " "%(missing_keys)s") def __init__(self, object_class, missing_keys): super(NeutronPrimaryKeyMissing, self).__init__( object_type=reflection.get_class_name(object_class, fully_qualified=False), missing_keys=missing_keys ) def get_updatable_fields(cls, fields): fields = fields.copy() for field in cls.fields_no_update: if field in fields: del fields[field] return fields @six.add_metaclass(abc.ABCMeta) class NeutronObject(obj_base.VersionedObject, obj_base.VersionedObjectDictCompat, obj_base.ComparableVersionedObject): synthetic_fields = [] def __init__(self, context=None, **kwargs): super(NeutronObject, self).__init__(context, **kwargs) self.obj_set_defaults() def to_dict(self): return dict(self.items()) @classmethod def clean_obj_from_primitive(cls, primitive, context=None): obj = cls.obj_from_primitive(primitive, context) obj.obj_reset_changes() return obj @classmethod def get_object(cls, context, **kwargs): raise NotImplementedError() @classmethod def validate_filters(cls, **kwargs): bad_filters = [key for key in kwargs if key not in cls.fields or key in cls.synthetic_fields] if bad_filters: bad_filters = ', '.join(bad_filters) msg = _("'%s' is not supported for filtering") % bad_filters raise exceptions.InvalidInput(error_message=msg) @classmethod @abc.abstractmethod def get_objects(cls, context, **kwargs): raise NotImplementedError() def create(self): raise NotImplementedError() def update(self): raise NotImplementedError() def delete(self): raise NotImplementedError() class DeclarativeObject(abc.ABCMeta): def __init__(cls, name, bases, dct): super(DeclarativeObject, cls).__init__(name, bases, dct) for base in itertools.chain([cls], bases): if hasattr(base, 'primary_keys'): cls.fields_no_update += base.primary_keys # avoid duplicate entries cls.fields_no_update = list(set(cls.fields_no_update)) @six.add_metaclass(DeclarativeObject) class NeutronDbObject(NeutronObject): # should be overridden for all persistent objects db_model = None primary_keys = ['id'] fields_no_update = [] # dict with name mapping: {'field_name_in_object': 'field_name_in_db'} fields_need_translation = {} def from_db_object(self, *objs): db_objs = [self.modify_fields_from_db(db_obj) for db_obj in objs] for field in self.fields: for db_obj in db_objs: if field in db_obj: setattr(self, field, db_obj[field]) break self.obj_reset_changes() @classmethod def modify_fields_to_db(cls, fields): """ This method enables to modify the fields and its content before data is inserted into DB. It uses the fields_need_translation dict with structure: { 'field_name_in_object': 'field_name_in_db' } :param fields: dict of fields from NeutronDbObject :return: modified dict of fields """ result = copy.deepcopy(dict(fields)) for field, field_db in cls.fields_need_translation.items(): if field in result: result[field_db] = result.pop(field) return result @classmethod def modify_fields_from_db(cls, db_obj): """ This method enables to modify the fields and its content after data was fetched from DB. It uses the fields_need_translation dict with structure: { 'field_name_in_object': 'field_name_in_db' } :param db_obj: dict of object fetched from database :return: modified dict of DB values """ result = dict(db_obj) for field, field_db in cls.fields_need_translation.items(): if field_db in result: result[field] = result.pop(field_db) return result @classmethod def get_object(cls, context, **kwargs): """ This method fetches object from DB and convert it to versioned object. :param context: :param kwargs: multiple primary keys defined key=value pairs :return: single object of NeutronDbObject class """ missing_keys = set(cls.primary_keys).difference(kwargs.keys()) if missing_keys: raise NeutronPrimaryKeyMissing(object_class=cls.__class__, missing_keys=missing_keys) db_obj = obj_db_api.get_object(context, cls.db_model, **kwargs) if db_obj: obj = cls(context, **cls.modify_fields_from_db(db_obj)) obj.obj_reset_changes() return obj @classmethod def get_objects(cls, context, **kwargs): cls.validate_filters(**kwargs) db_objs = obj_db_api.get_objects(context, cls.db_model, **kwargs) result = [] for db_obj in db_objs: obj = cls(context, **cls.modify_fields_from_db(db_obj)) obj.obj_reset_changes() result.append(obj) return result @classmethod def is_accessible(cls, context, db_obj): return (context.is_admin or context.tenant_id == db_obj.tenant_id) def _get_changed_persistent_fields(self): fields = self.obj_get_changes() for field in self.synthetic_fields: if field in fields: del fields[field] return fields def _validate_changed_fields(self, fields): fields = fields.copy() forbidden_updates = set(self.fields_no_update) & set(fields.keys()) if forbidden_updates: raise NeutronObjectUpdateForbidden(fields=forbidden_updates) return fields def create(self): fields = self._get_changed_persistent_fields() try: db_obj = obj_db_api.create_object(self._context, self.db_model, self.modify_fields_to_db(fields)) except obj_exc.DBDuplicateEntry as db_exc: raise NeutronDbObjectDuplicateEntry(object_class=self.__class__, db_exception=db_exc) self.from_db_object(db_obj) def _get_composite_keys(self): keys = {} for key in self.primary_keys: keys[key] = getattr(self, key) return self.modify_fields_to_db(keys) def update(self): updates = self._get_changed_persistent_fields() updates = self._validate_changed_fields(updates) if updates: db_obj = obj_db_api.update_object(self._context, self.db_model, self.modify_fields_to_db(updates), **self._get_composite_keys()) self.from_db_object(self, db_obj) def delete(self): obj_db_api.delete_object(self._context, self.db_model, **self._get_composite_keys()) neutron-8.4.0/neutron/objects/qos/0000775000567000056710000000000013044373210020300 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/objects/qos/policy.py0000664000567000056710000002013113044372760022157 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from oslo_versionedobjects import base as obj_base from oslo_versionedobjects import fields as obj_fields from six import add_metaclass from neutron._i18n import _ from neutron.common import exceptions from neutron.db import api as db_api from neutron.db import models_v2 from neutron.db.qos import api as qos_db_api from neutron.db.qos import models as qos_db_model from neutron.db.rbac_db_models import QosPolicyRBAC from neutron.objects import base from neutron.objects.db import api as obj_db_api from neutron.objects.qos import rule as rule_obj_impl from neutron.objects import rbac_db @obj_base.VersionedObjectRegistry.register @add_metaclass(rbac_db.RbacNeutronMetaclass) class QosPolicy(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' # required by RbacNeutronMetaclass rbac_db_model = QosPolicyRBAC db_model = qos_db_model.QosPolicy port_binding_model = qos_db_model.QosPortPolicyBinding network_binding_model = qos_db_model.QosNetworkPolicyBinding fields = { 'id': obj_fields.UUIDField(), 'tenant_id': obj_fields.UUIDField(), 'name': obj_fields.StringField(), 'description': obj_fields.StringField(), 'shared': obj_fields.BooleanField(default=False), 'rules': obj_fields.ListOfObjectsField('QosRule', subclasses=True), } fields_no_update = ['id', 'tenant_id'] synthetic_fields = ['rules'] binding_models = {'network': network_binding_model, 'port': port_binding_model} def to_dict(self): dict_ = super(QosPolicy, self).to_dict() if 'rules' in dict_: dict_['rules'] = [rule.to_dict() for rule in dict_['rules']] return dict_ def obj_load_attr(self, attrname): if attrname != 'rules': raise exceptions.ObjectActionError( action='obj_load_attr', reason=_('unable to load %s') % attrname) if not hasattr(self, attrname): self.reload_rules() def reload_rules(self): rules = rule_obj_impl.get_rules(self._context, self.id) setattr(self, 'rules', rules) self.obj_reset_changes(['rules']) def get_rule_by_id(self, rule_id): """Return rule specified by rule_id. @raise QosRuleNotFound: if there is no such rule in the policy. """ for rule in self.rules: if rule_id == rule.id: return rule raise exceptions.QosRuleNotFound(policy_id=self.id, rule_id=rule_id) @classmethod def get_object(cls, context, **kwargs): # We want to get the policy regardless of its tenant id. We'll make # sure the tenant has permission to access the policy later on. admin_context = context.elevated() with db_api.autonested_transaction(admin_context.session): policy_obj = super(QosPolicy, cls).get_object(admin_context, **kwargs) if (not policy_obj or not cls.is_accessible(context, policy_obj)): return policy_obj.reload_rules() return policy_obj @classmethod def get_objects(cls, context, **kwargs): # We want to get the policy regardless of its tenant id. We'll make # sure the tenant has permission to access the policy later on. admin_context = context.elevated() with db_api.autonested_transaction(admin_context.session): objs = super(QosPolicy, cls).get_objects(admin_context, **kwargs) result = [] for obj in objs: if not cls.is_accessible(context, obj): continue obj.reload_rules() result.append(obj) return result @classmethod def _get_object_policy(cls, context, model, **kwargs): with db_api.autonested_transaction(context.session): binding_db_obj = obj_db_api.get_object(context, model, **kwargs) if binding_db_obj: return cls.get_object(context, id=binding_db_obj['policy_id']) @classmethod def get_network_policy(cls, context, network_id): return cls._get_object_policy(context, cls.network_binding_model, network_id=network_id) @classmethod def get_port_policy(cls, context, port_id): return cls._get_object_policy(context, cls.port_binding_model, port_id=port_id) # TODO(QoS): Consider extending base to trigger registered methods for us def create(self): with db_api.autonested_transaction(self._context.session): super(QosPolicy, self).create() self.reload_rules() def delete(self): with db_api.autonested_transaction(self._context.session): for object_type, model in self.binding_models.items(): binding_db_obj = obj_db_api.get_object(self._context, model, policy_id=self.id) if binding_db_obj: raise exceptions.QosPolicyInUse( policy_id=self.id, object_type=object_type, object_id=binding_db_obj['%s_id' % object_type]) super(QosPolicy, self).delete() def attach_network(self, network_id): qos_db_api.create_policy_network_binding(self._context, policy_id=self.id, network_id=network_id) def attach_port(self, port_id): qos_db_api.create_policy_port_binding(self._context, policy_id=self.id, port_id=port_id) def detach_network(self, network_id): qos_db_api.delete_policy_network_binding(self._context, policy_id=self.id, network_id=network_id) def detach_port(self, port_id): qos_db_api.delete_policy_port_binding(self._context, policy_id=self.id, port_id=port_id) @classmethod def _get_bound_tenant_ids(cls, session, binding_db, bound_db, binding_db_id_column, policy_id): return list(itertools.chain.from_iterable( session.query(bound_db.tenant_id).join( binding_db, bound_db.id == binding_db_id_column).filter( binding_db.policy_id == policy_id).all())) @classmethod def get_bound_tenant_ids(cls, context, policy_id): """Implements RbacNeutronObject.get_bound_tenant_ids. :returns: set -- a set of tenants' ids dependant on QosPolicy. """ net = models_v2.Network qosnet = qos_db_model.QosNetworkPolicyBinding port = models_v2.Port qosport = qos_db_model.QosPortPolicyBinding bound_tenants = [] with db_api.autonested_transaction(context.session): bound_tenants.extend(cls._get_bound_tenant_ids( context.session, qosnet, net, qosnet.network_id, policy_id)) bound_tenants.extend( cls._get_bound_tenant_ids(context.session, qosport, port, qosport.port_id, policy_id)) return set(bound_tenants) neutron-8.4.0/neutron/objects/qos/__init__.py0000664000567000056710000000000013044372736022413 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/objects/qos/rule_type.py0000664000567000056710000000277513044372760022706 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import base as obj_base from oslo_versionedobjects import fields as obj_fields from neutron import manager from neutron.objects import base from neutron.services.qos import qos_consts class RuleTypeField(obj_fields.BaseEnumField): def __init__(self, **kwargs): self.AUTO_TYPE = obj_fields.Enum( valid_values=qos_consts.VALID_RULE_TYPES) super(RuleTypeField, self).__init__(**kwargs) @obj_base.VersionedObjectRegistry.register class QosRuleType(base.NeutronObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'type': RuleTypeField(), } # we don't receive context because we don't need db access at all @classmethod def get_objects(cls, **kwargs): cls.validate_filters(**kwargs) core_plugin = manager.NeutronManager.get_plugin() return [cls(type=type_) for type_ in core_plugin.supported_qos_rule_types] neutron-8.4.0/neutron/objects/qos/rule.py0000664000567000056710000000574613044372760021646 0ustar jenkinsjenkins00000000000000# Copyright 2015 Huawei Technologies India Pvt Ltd, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import sys from oslo_versionedobjects import base as obj_base from oslo_versionedobjects import fields as obj_fields import six from neutron.common import constants from neutron.common import utils from neutron.db import api as db_api from neutron.db.qos import models as qos_db_model from neutron.objects import base from neutron.services.qos import qos_consts def get_rules(context, qos_policy_id): all_rules = [] with db_api.autonested_transaction(context.session): for rule_type in qos_consts.VALID_RULE_TYPES: rule_cls_name = 'Qos%sRule' % utils.camelize(rule_type) rule_cls = getattr(sys.modules[__name__], rule_cls_name) rules = rule_cls.get_objects(context, qos_policy_id=qos_policy_id) all_rules.extend(rules) return all_rules @six.add_metaclass(abc.ABCMeta) class QosRule(base.NeutronDbObject): fields = { 'id': obj_fields.UUIDField(), 'qos_policy_id': obj_fields.UUIDField() } fields_no_update = ['id', 'qos_policy_id'] # should be redefined in subclasses rule_type = None def to_dict(self): dict_ = super(QosRule, self).to_dict() dict_['type'] = self.rule_type return dict_ def should_apply_to_port(self, port): """Check whether a rule can be applied to a specific port. This function has the logic to decide whether a rule should be applied to a port or not, depending on the source of the policy (network, or port). Eventually rules could override this method, or we could make it abstract to allow different rule behaviour. """ is_network_rule = self.qos_policy_id != port[qos_consts.QOS_POLICY_ID] is_network_device_port = any(port['device_owner'].startswith(prefix) for prefix in constants.DEVICE_OWNER_PREFIXES) return not (is_network_rule and is_network_device_port) @obj_base.VersionedObjectRegistry.register class QosBandwidthLimitRule(QosRule): # Version 1.0: Initial version VERSION = '1.0' db_model = qos_db_model.QosBandwidthLimitRule fields = { 'max_kbps': obj_fields.IntegerField(nullable=True), 'max_burst_kbps': obj_fields.IntegerField(nullable=True) } rule_type = qos_consts.RULE_TYPE_BANDWIDTH_LIMIT neutron-8.4.0/neutron/objects/common_types.py0000664000567000056710000000214513044372760022577 0ustar jenkinsjenkins00000000000000# Copyright 2016 OpenStack Foundation # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields as obj_fields from neutron.common import constants class IPV6ModeEnum(obj_fields.Enum): """IPV6 Mode custom Enum""" def __init__(self, **kwargs): super(IPV6ModeEnum, self).__init__(valid_values=constants.IPV6_MODES, **kwargs) class IPV6ModeEnumField(obj_fields.BaseEnumField): def __init__(self, **kwargs): self.AUTO_TYPE = IPV6ModeEnum() super(IPV6ModeEnumField, self).__init__(**kwargs) neutron-8.4.0/neutron/objects/db/0000775000567000056710000000000013044373210020063 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/objects/db/__init__.py0000664000567000056710000000000013044372736022176 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/objects/db/api.py0000664000567000056710000000430413044372760021220 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from oslo_utils import uuidutils from neutron.db import common_db_mixin # Common database operation implementations def get_object(context, model, **kwargs): with context.session.begin(subtransactions=True): return (common_db_mixin.model_query(context, model) .filter_by(**kwargs) .first()) def get_objects(context, model, **kwargs): with context.session.begin(subtransactions=True): return (common_db_mixin.model_query(context, model) .filter_by(**kwargs) .all()) def create_object(context, model, values): with context.session.begin(subtransactions=True): if 'id' not in values and hasattr(model, 'id'): values['id'] = uuidutils.generate_uuid() db_obj = model(**values) context.session.add(db_obj) return db_obj.__dict__ def _safe_get_object(context, model, **kwargs): db_obj = get_object(context, model, **kwargs) if db_obj is None: key = "".join(['%s:: %s ' % (key, value) for (key, value) in kwargs.items()]) raise n_exc.ObjectNotFound(id=key) return db_obj def update_object(context, model, values, **kwargs): with context.session.begin(subtransactions=True): db_obj = _safe_get_object(context, model, **kwargs) db_obj.update(values) db_obj.save(session=context.session) return db_obj.__dict__ def delete_object(context, model, **kwargs): with context.session.begin(subtransactions=True): db_obj = _safe_get_object(context, model, **kwargs) context.session.delete(db_obj) neutron-8.4.0/neutron/_i18n.py0000664000567000056710000000251313044372736017352 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_i18n DOMAIN = "neutron" _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary # The contextual translation function using the name "_C" _C = _translators.contextual_form # The plural translation function using the name "_P" _P = _translators.plural_form # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) neutron-8.4.0/neutron/service.py0000664000567000056710000003133713044372760020077 0ustar jenkinsjenkins00000000000000# Copyright 2011 VMware, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import os import random from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_messaging import server as rpc_server from oslo_service import loopingcall from oslo_service import service as common_service from oslo_utils import excutils from oslo_utils import importutils from neutron._i18n import _, _LE, _LI from neutron.common import config from neutron.common import rpc as n_rpc from neutron import context from neutron.db import api as session from neutron import manager from neutron import worker from neutron import wsgi service_opts = [ cfg.IntOpt('periodic_interval', default=40, help=_('Seconds between running periodic tasks')), cfg.IntOpt('api_workers', help=_('Number of separate API worker processes for service. ' 'If not specified, the default is equal to the number ' 'of CPUs available for best performance.')), cfg.IntOpt('rpc_workers', default=1, help=_('Number of RPC worker processes for service')), cfg.IntOpt('rpc_state_report_workers', default=1, help=_('Number of RPC worker processes dedicated to state ' 'reports queue')), cfg.IntOpt('periodic_fuzzy_delay', default=5, help=_('Range of seconds to randomly delay when starting the ' 'periodic task scheduler to reduce stampeding. ' '(Disable by setting to 0)')), ] CONF = cfg.CONF CONF.register_opts(service_opts) LOG = logging.getLogger(__name__) class WsgiService(object): """Base class for WSGI based services. For each api you define, you must also define these flags: :_listen: The address on which to listen :_listen_port: The port on which to listen """ def __init__(self, app_name): self.app_name = app_name self.wsgi_app = None def start(self): self.wsgi_app = _run_wsgi(self.app_name) def wait(self): self.wsgi_app.wait() class NeutronApiService(WsgiService): """Class for neutron-api service.""" @classmethod def create(cls, app_name='neutron'): # Setup logging early, supplying both the CLI options and the # configuration mapping from the config file # We only update the conf dict for the verbose and debug # flags. Everything else must be set up in the conf file... # Log the options used when starting if we're in debug mode... config.setup_logging() service = cls(app_name) return service def serve_wsgi(cls): try: service = cls.create() service.start() except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Unrecoverable error: please check log ' 'for details.')) return service def start_plugin_workers(): launchers = [] # NOTE(twilson) get_service_plugins also returns the core plugin for plugin in manager.NeutronManager.get_unique_service_plugins(): # TODO(twilson) Instead of defaulting here, come up with a good way to # share a common get_workers default between NeutronPluginBaseV2 and # ServicePluginBase for plugin_worker in getattr(plugin, 'get_workers', tuple)(): launcher = common_service.ProcessLauncher(cfg.CONF) launcher.launch_service(plugin_worker) launchers.append(launcher) return launchers class RpcWorker(worker.NeutronWorker): """Wraps a worker to be handled by ProcessLauncher""" start_listeners_method = 'start_rpc_listeners' def __init__(self, plugins): self._plugins = plugins self._servers = [] def start(self): super(RpcWorker, self).start() for plugin in self._plugins: if hasattr(plugin, self.start_listeners_method): try: servers = getattr(plugin, self.start_listeners_method)() except NotImplementedError: continue self._servers.extend(servers) def wait(self): try: self._wait() except Exception: LOG.exception(_LE('done with wait')) raise def _wait(self): LOG.debug('calling RpcWorker wait()') for server in self._servers: if isinstance(server, rpc_server.MessageHandlingServer): LOG.debug('calling wait on %s', server) server.wait() else: LOG.debug('NOT calling wait on %s', server) LOG.debug('returning from RpcWorker wait()') def stop(self): LOG.debug('calling RpcWorker stop()') for server in self._servers: if isinstance(server, rpc_server.MessageHandlingServer): LOG.debug('calling stop on %s', server) server.stop() @staticmethod def reset(): config.reset_service() class RpcReportsWorker(RpcWorker): start_listeners_method = 'start_rpc_state_reports_listener' def serve_rpc(): plugin = manager.NeutronManager.get_plugin() service_plugins = ( manager.NeutronManager.get_service_plugins().values()) if cfg.CONF.rpc_workers < 1: cfg.CONF.set_override('rpc_workers', 1) # If 0 < rpc_workers then start_rpc_listeners would be called in a # subprocess and we cannot simply catch the NotImplementedError. It is # simpler to check this up front by testing whether the plugin supports # multiple RPC workers. if not plugin.rpc_workers_supported(): LOG.debug("Active plugin doesn't implement start_rpc_listeners") if 0 < cfg.CONF.rpc_workers: LOG.error(_LE("'rpc_workers = %d' ignored because " "start_rpc_listeners is not implemented."), cfg.CONF.rpc_workers) raise NotImplementedError() try: # passing service plugins only, because core plugin is among them rpc = RpcWorker(service_plugins) # dispose the whole pool before os.fork, otherwise there will # be shared DB connections in child processes which may cause # DB errors. LOG.debug('using launcher for rpc, workers=%s', cfg.CONF.rpc_workers) session.dispose() launcher = common_service.ProcessLauncher(cfg.CONF, wait_interval=1.0) launcher.launch_service(rpc, workers=cfg.CONF.rpc_workers) if (cfg.CONF.rpc_state_report_workers > 0 and plugin.rpc_state_report_workers_supported()): rpc_state_rep = RpcReportsWorker([plugin]) LOG.debug('using launcher for state reports rpc, workers=%s', cfg.CONF.rpc_state_report_workers) launcher.launch_service( rpc_state_rep, workers=cfg.CONF.rpc_state_report_workers) return launcher except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Unrecoverable error: please check log for ' 'details.')) def _get_api_workers(): workers = cfg.CONF.api_workers if not workers: workers = processutils.get_worker_count() return workers def _run_wsgi(app_name): app = config.load_paste_app(app_name) if not app: LOG.error(_LE('No known API applications configured.')) return return run_wsgi_app(app) def run_wsgi_app(app): server = wsgi.Server("Neutron") server.start(app, cfg.CONF.bind_port, cfg.CONF.bind_host, workers=_get_api_workers()) LOG.info(_LI("Neutron service started, listening on %(host)s:%(port)s"), {'host': cfg.CONF.bind_host, 'port': cfg.CONF.bind_port}) return server class Service(n_rpc.Service): """Service object for binaries running on hosts. A service takes a manager and enables rpc by listening to queues based on topic. It also periodically runs tasks on the manager. """ def __init__(self, host, binary, topic, manager, report_interval=None, periodic_interval=None, periodic_fuzzy_delay=None, *args, **kwargs): self.binary = binary self.manager_class_name = manager manager_class = importutils.import_class(self.manager_class_name) self.manager = manager_class(host=host, *args, **kwargs) self.report_interval = report_interval self.periodic_interval = periodic_interval self.periodic_fuzzy_delay = periodic_fuzzy_delay self.saved_args, self.saved_kwargs = args, kwargs self.timers = [] super(Service, self).__init__(host, topic, manager=self.manager) def start(self): self.manager.init_host() super(Service, self).start() if self.report_interval: pulse = loopingcall.FixedIntervalLoopingCall(self.report_state) pulse.start(interval=self.report_interval, initial_delay=self.report_interval) self.timers.append(pulse) if self.periodic_interval: if self.periodic_fuzzy_delay: initial_delay = random.randint(0, self.periodic_fuzzy_delay) else: initial_delay = None periodic = loopingcall.FixedIntervalLoopingCall( self.periodic_tasks) periodic.start(interval=self.periodic_interval, initial_delay=initial_delay) self.timers.append(periodic) self.manager.after_start() def __getattr__(self, key): manager = self.__dict__.get('manager', None) return getattr(manager, key) @classmethod def create(cls, host=None, binary=None, topic=None, manager=None, report_interval=None, periodic_interval=None, periodic_fuzzy_delay=None): """Instantiates class and passes back application object. :param host: defaults to CONF.host :param binary: defaults to basename of executable :param topic: defaults to bin_name - 'neutron-' part :param manager: defaults to CONF._manager :param report_interval: defaults to CONF.report_interval :param periodic_interval: defaults to CONF.periodic_interval :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay """ if not host: host = CONF.host if not binary: binary = os.path.basename(inspect.stack()[-1][1]) if not topic: topic = binary.rpartition('neutron-')[2] topic = topic.replace("-", "_") if not manager: manager = CONF.get('%s_manager' % topic, None) if report_interval is None: report_interval = CONF.report_interval if periodic_interval is None: periodic_interval = CONF.periodic_interval if periodic_fuzzy_delay is None: periodic_fuzzy_delay = CONF.periodic_fuzzy_delay service_obj = cls(host, binary, topic, manager, report_interval=report_interval, periodic_interval=periodic_interval, periodic_fuzzy_delay=periodic_fuzzy_delay) return service_obj def kill(self): """Destroy the service object.""" self.stop() def stop(self): super(Service, self).stop() for x in self.timers: try: x.stop() except Exception: LOG.exception(_LE("Exception occurs when timer stops")) self.timers = [] def wait(self): super(Service, self).wait() for x in self.timers: try: x.wait() except Exception: LOG.exception(_LE("Exception occurs when waiting for timer")) def reset(self): config.reset_service() def periodic_tasks(self, raise_on_error=False): """Tasks to be run at a periodic interval.""" ctxt = context.get_admin_context() self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error) def report_state(self): """Update the state of this service.""" # Todo(gongysh) report state to neutron server pass neutron-8.4.0/neutron/ipam/0000775000567000056710000000000013044373210016773 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/ipam/__init__.py0000664000567000056710000000000013044372736021106 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/ipam/subnet_alloc.py0000664000567000056710000004216513044372760022040 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Hewlett-Packard Co. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math import operator import netaddr from oslo_db import exception as db_exc from oslo_utils import uuidutils from neutron._i18n import _ from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.db import models_v2 from neutron.ipam import driver from neutron.ipam import requests as ipam_req from neutron.ipam import utils as ipam_utils class SubnetAllocator(driver.Pool): """Class for handling allocation of subnet prefixes from a subnet pool. This class leverages the pluggable IPAM interface where possible to make merging into IPAM framework easier in future cycles. """ def __init__(self, subnetpool, context): super(SubnetAllocator, self).__init__(subnetpool, context) self._sp_helper = SubnetPoolHelper() def _lock_subnetpool(self): """Lock subnetpool associated row. This method disallows to allocate concurrently 2 subnets in the same subnetpool, it's required to ensure non-overlapping cidrs in the same subnetpool. """ current_hash = (self._context.session.query(models_v2.SubnetPool.hash) .filter_by(id=self._subnetpool['id']).scalar()) if current_hash is None: # NOTE(cbrandily): subnetpool has been deleted raise n_exc.SubnetPoolNotFound( subnetpool_id=self._subnetpool['id']) new_hash = uuidutils.generate_uuid() # NOTE(cbrandily): the update disallows 2 concurrent subnet allocation # to succeed: at most 1 transaction will succeed, others will be # rolled back and be caught in neutron.db.v2.base query = self._context.session.query(models_v2.SubnetPool).filter_by( id=self._subnetpool['id'], hash=current_hash) count = query.update({'hash': new_hash}) if not count: raise db_exc.RetryRequest(n_exc.SubnetPoolInUse( subnet_pool_id=self._subnetpool['id'])) def _get_allocated_cidrs(self): query = self._context.session.query(models_v2.Subnet) subnets = query.filter_by(subnetpool_id=self._subnetpool['id']) return (x.cidr for x in subnets) def _get_available_prefix_list(self): prefixes = (x.cidr for x in self._subnetpool.prefixes) allocations = self._get_allocated_cidrs() prefix_set = netaddr.IPSet(iterable=prefixes) allocation_set = netaddr.IPSet(iterable=allocations) available_set = prefix_set.difference(allocation_set) available_set.compact() return sorted(available_set.iter_cidrs(), key=operator.attrgetter('prefixlen'), reverse=True) def _num_quota_units_in_prefixlen(self, prefixlen, quota_unit): return math.pow(2, quota_unit - prefixlen) def _allocations_used_by_tenant(self, quota_unit): subnetpool_id = self._subnetpool['id'] tenant_id = self._subnetpool['tenant_id'] with self._context.session.begin(subtransactions=True): qry = self._context.session.query(models_v2.Subnet) allocations = qry.filter_by(subnetpool_id=subnetpool_id, tenant_id=tenant_id) value = 0 for allocation in allocations: prefixlen = netaddr.IPNetwork(allocation.cidr).prefixlen value += self._num_quota_units_in_prefixlen(prefixlen, quota_unit) return value def _check_subnetpool_tenant_quota(self, tenant_id, prefixlen): quota_unit = self._sp_helper.ip_version_subnetpool_quota_unit( self._subnetpool['ip_version']) quota = self._subnetpool.get('default_quota') if quota: used = self._allocations_used_by_tenant(quota_unit) requested_units = self._num_quota_units_in_prefixlen(prefixlen, quota_unit) if used + requested_units > quota: raise n_exc.SubnetPoolQuotaExceeded() def _allocate_any_subnet(self, request): with self._context.session.begin(subtransactions=True): self._lock_subnetpool() self._check_subnetpool_tenant_quota(request.tenant_id, request.prefixlen) prefix_pool = self._get_available_prefix_list() for prefix in prefix_pool: if request.prefixlen >= prefix.prefixlen: subnet = next(prefix.subnet(request.prefixlen)) gateway_ip = request.gateway_ip if not gateway_ip: gateway_ip = subnet.network + 1 pools = ipam_utils.generate_pools(subnet.cidr, gateway_ip) return IpamSubnet(request.tenant_id, request.subnet_id, subnet.cidr, gateway_ip=gateway_ip, allocation_pools=pools) msg = _("Insufficient prefix space to allocate subnet size /%s") raise n_exc.SubnetAllocationError(reason=msg % str(request.prefixlen)) def _allocate_specific_subnet(self, request): with self._context.session.begin(subtransactions=True): self._lock_subnetpool() self._check_subnetpool_tenant_quota(request.tenant_id, request.prefixlen) cidr = request.subnet_cidr available = self._get_available_prefix_list() matched = netaddr.all_matching_cidrs(cidr, available) if len(matched) is 1 and matched[0].prefixlen <= cidr.prefixlen: return IpamSubnet(request.tenant_id, request.subnet_id, cidr, gateway_ip=request.gateway_ip, allocation_pools=request.allocation_pools) msg = _("Cannot allocate requested subnet from the available " "set of prefixes") raise n_exc.SubnetAllocationError(reason=msg) def allocate_subnet(self, request): max_prefixlen = int(self._subnetpool['max_prefixlen']) min_prefixlen = int(self._subnetpool['min_prefixlen']) if request.prefixlen > max_prefixlen: raise n_exc.MaxPrefixSubnetAllocationError( prefixlen=request.prefixlen, max_prefixlen=max_prefixlen) if request.prefixlen < min_prefixlen: raise n_exc.MinPrefixSubnetAllocationError( prefixlen=request.prefixlen, min_prefixlen=min_prefixlen) if isinstance(request, ipam_req.AnySubnetRequest): return self._allocate_any_subnet(request) elif isinstance(request, ipam_req.SpecificSubnetRequest): return self._allocate_specific_subnet(request) else: msg = _("Unsupported request type") raise n_exc.SubnetAllocationError(reason=msg) def get_subnet(self, subnet_id): raise NotImplementedError() def update_subnet(self, request): raise NotImplementedError() def remove_subnet(self, subnet_id): raise NotImplementedError() class IpamSubnet(driver.Subnet): def __init__(self, tenant_id, subnet_id, cidr, gateway_ip=None, allocation_pools=None): self._req = ipam_req.SpecificSubnetRequest( tenant_id, subnet_id, cidr, gateway_ip=gateway_ip, allocation_pools=allocation_pools) def allocate(self, address_request): raise NotImplementedError() def deallocate(self, address): raise NotImplementedError() def get_details(self): return self._req class SubnetPoolReader(object): '''Class to assist with reading a subnetpool, loading defaults, and inferring IP version from prefix list. Provides a common way of reading a stored model or a create request with default table attributes. ''' MIN_PREFIX_TYPE = 'min' MAX_PREFIX_TYPE = 'max' DEFAULT_PREFIX_TYPE = 'default' _sp_helper = None def __init__(self, subnetpool): self._read_prefix_info(subnetpool) self._sp_helper = SubnetPoolHelper() self._read_id(subnetpool) self._read_prefix_bounds(subnetpool) self._read_attrs(subnetpool, ['tenant_id', 'name', 'is_default', 'shared']) self.description = subnetpool.get('description') self._read_address_scope(subnetpool) self.subnetpool = {'id': self.id, 'name': self.name, 'tenant_id': self.tenant_id, 'prefixes': self.prefixes, 'min_prefix': self.min_prefix, 'min_prefixlen': self.min_prefixlen, 'max_prefix': self.max_prefix, 'max_prefixlen': self.max_prefixlen, 'default_prefix': self.default_prefix, 'default_prefixlen': self.default_prefixlen, 'default_quota': self.default_quota, 'address_scope_id': self.address_scope_id, 'is_default': self.is_default, 'shared': self.shared, 'description': self.description} def _read_attrs(self, subnetpool, keys): for key in keys: setattr(self, key, subnetpool[key]) def _ip_version_from_cidr(self, cidr): return netaddr.IPNetwork(cidr).version def _prefixlen_from_cidr(self, cidr): return netaddr.IPNetwork(cidr).prefixlen def _read_id(self, subnetpool): id = subnetpool.get('id', attributes.ATTR_NOT_SPECIFIED) if id is attributes.ATTR_NOT_SPECIFIED: id = uuidutils.generate_uuid() self.id = id def _read_prefix_bounds(self, subnetpool): ip_version = self.ip_version default_min = self._sp_helper.default_min_prefixlen(ip_version) default_max = self._sp_helper.default_max_prefixlen(ip_version) self._read_prefix_bound(self.MIN_PREFIX_TYPE, subnetpool, default_min) self._read_prefix_bound(self.MAX_PREFIX_TYPE, subnetpool, default_max) self._read_prefix_bound(self.DEFAULT_PREFIX_TYPE, subnetpool, self.min_prefixlen) self._sp_helper.validate_min_prefixlen(self.min_prefixlen, self.max_prefixlen) self._sp_helper.validate_max_prefixlen(self.max_prefixlen, ip_version) self._sp_helper.validate_default_prefixlen(self.min_prefixlen, self.max_prefixlen, self.default_prefixlen) def _read_prefix_bound(self, type, subnetpool, default_bound=None): prefixlen_attr = type + '_prefixlen' prefix_attr = type + '_prefix' prefixlen = subnetpool.get(prefixlen_attr, attributes.ATTR_NOT_SPECIFIED) wildcard = self._sp_helper.wildcard(self.ip_version) if prefixlen is attributes.ATTR_NOT_SPECIFIED and default_bound: prefixlen = default_bound if prefixlen is not attributes.ATTR_NOT_SPECIFIED: prefix_cidr = '/'.join((wildcard, str(prefixlen))) setattr(self, prefix_attr, prefix_cidr) setattr(self, prefixlen_attr, prefixlen) def _read_prefix_info(self, subnetpool): prefix_list = subnetpool['prefixes'] if not prefix_list: raise n_exc.EmptySubnetPoolPrefixList() ip_version = None for prefix in prefix_list: if not ip_version: ip_version = netaddr.IPNetwork(prefix).version elif netaddr.IPNetwork(prefix).version != ip_version: raise n_exc.PrefixVersionMismatch() self.default_quota = subnetpool.get('default_quota') if self.default_quota is attributes.ATTR_NOT_SPECIFIED: self.default_quota = None self.ip_version = ip_version self.prefixes = self._compact_subnetpool_prefix_list(prefix_list) def _read_address_scope(self, subnetpool): self.address_scope_id = subnetpool.get('address_scope_id', attributes.ATTR_NOT_SPECIFIED) def _compact_subnetpool_prefix_list(self, prefix_list): """Compact any overlapping prefixes in prefix_list and return the result """ ip_set = netaddr.IPSet() for prefix in prefix_list: ip_set.add(netaddr.IPNetwork(prefix)) ip_set.compact() return [str(x.cidr) for x in ip_set.iter_cidrs()] class SubnetPoolHelper(object): _PREFIX_VERSION_INFO = {4: {'max_prefixlen': constants.IPv4_BITS, 'wildcard': '0.0.0.0', 'default_min_prefixlen': 8, # IPv4 quota measured in units of /32 'quota_units': 32}, 6: {'max_prefixlen': constants.IPv6_BITS, 'wildcard': '::', 'default_min_prefixlen': 64, # IPv6 quota measured in units of /64 'quota_units': 64}} def validate_min_prefixlen(self, min_prefixlen, max_prefixlen): if min_prefixlen < 0: raise n_exc.UnsupportedMinSubnetPoolPrefix(prefix=min_prefixlen, version=4) if min_prefixlen > max_prefixlen: raise n_exc.IllegalSubnetPoolPrefixBounds( prefix_type='min_prefixlen', prefixlen=min_prefixlen, base_prefix_type='max_prefixlen', base_prefixlen=max_prefixlen) def validate_max_prefixlen(self, prefixlen, ip_version): max = self._PREFIX_VERSION_INFO[ip_version]['max_prefixlen'] if prefixlen > max: raise n_exc.IllegalSubnetPoolPrefixBounds( prefix_type='max_prefixlen', prefixlen=prefixlen, base_prefix_type='ip_version_max', base_prefixlen=max) def validate_default_prefixlen(self, min_prefixlen, max_prefixlen, default_prefixlen): if default_prefixlen < min_prefixlen: raise n_exc.IllegalSubnetPoolPrefixBounds( prefix_type='default_prefixlen', prefixlen=default_prefixlen, base_prefix_type='min_prefixlen', base_prefixlen=min_prefixlen) if default_prefixlen > max_prefixlen: raise n_exc.IllegalSubnetPoolPrefixBounds( prefix_type='default_prefixlen', prefixlen=default_prefixlen, base_prefix_type='max_prefixlen', base_prefixlen=max_prefixlen) def wildcard(self, ip_version): return self._PREFIX_VERSION_INFO[ip_version]['wildcard'] def default_max_prefixlen(self, ip_version): return self._PREFIX_VERSION_INFO[ip_version]['max_prefixlen'] def default_min_prefixlen(self, ip_version): return self._PREFIX_VERSION_INFO[ip_version]['default_min_prefixlen'] def ip_version_subnetpool_quota_unit(self, ip_version): return self._PREFIX_VERSION_INFO[ip_version]['quota_units'] neutron-8.4.0/neutron/ipam/requests.py0000664000567000056710000002676013044372760021244 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import netaddr from oslo_config import cfg from oslo_utils import uuidutils import six from neutron._i18n import _ from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import ipv6_utils from neutron.common import utils as common_utils from neutron.ipam import exceptions as ipam_exc @six.add_metaclass(abc.ABCMeta) class SubnetPool(object): """Represents a pool of IPs available inside an address scope.""" @six.add_metaclass(abc.ABCMeta) class SubnetRequest(object): """Carries the data needed to make a subnet request The data validated and carried by an instance of this class is the data that is common to any type of request. This class shouldn't be instantiated on its own. Rather, a subclass of this class should be used. """ def __init__(self, tenant_id, subnet_id, gateway_ip=None, allocation_pools=None): """Initialize and validate :param tenant_id: The tenant id who will own the subnet :type tenant_id: str uuid :param subnet_id: Neutron's subnet ID :type subnet_id: str uuid :param gateway_ip: An IP to reserve for the subnet gateway. :type gateway_ip: None or convertible to netaddr.IPAddress :param allocation_pools: The pool from which IPAM should allocate addresses. The allocator *may* allow allocating addresses outside of this range if specifically requested. :type allocation_pools: A list of netaddr.IPRange. None if not specified. """ self._tenant_id = tenant_id self._subnet_id = subnet_id self._gateway_ip = None self._allocation_pools = None if gateway_ip is not None: self._gateway_ip = netaddr.IPAddress(gateway_ip) if allocation_pools is not None: allocation_pools = sorted(allocation_pools) previous = None for pool in allocation_pools: if not isinstance(pool, netaddr.ip.IPRange): raise TypeError(_("Ranges must be netaddr.IPRange")) if previous and pool.first <= previous.last: raise ValueError(_("Ranges must not overlap")) previous = pool if 1 < len(allocation_pools): # Checks that all the ranges are in the same IP version. # IPRange sorts first by ip version so we can get by with just # checking the first and the last range having sorted them # above. first_version = allocation_pools[0].version last_version = allocation_pools[-1].version if first_version != last_version: raise ValueError(_("Ranges must be in the same IP " "version")) self._allocation_pools = allocation_pools if self.gateway_ip and self.allocation_pools: if self.gateway_ip.version != self.allocation_pools[0].version: raise ValueError(_("Gateway IP version inconsistent with " "allocation pool version")) @property def tenant_id(self): return self._tenant_id @property def subnet_id(self): return self._subnet_id @property def gateway_ip(self): return self._gateway_ip @property def allocation_pools(self): return self._allocation_pools def _validate_with_subnet(self, subnet_cidr): if self.gateway_ip and cfg.CONF.force_gateway_on_subnet: gw_ip = netaddr.IPAddress(self.gateway_ip) if (gw_ip.version == 4 or (gw_ip.version == 6 and not gw_ip.is_link_local())): if self.gateway_ip not in subnet_cidr: raise ipam_exc.IpamValueInvalid(_( "gateway_ip %s is not in the subnet") % self.gateway_ip) if self.allocation_pools: if subnet_cidr.version != self.allocation_pools[0].version: raise ipam_exc.IpamValueInvalid(_( "allocation_pools use the wrong ip version")) for pool in self.allocation_pools: if pool not in subnet_cidr: raise ipam_exc.IpamValueInvalid(_( "allocation_pools are not in the subnet")) class AnySubnetRequest(SubnetRequest): """A template for allocating an unspecified subnet from IPAM Support for this type of request in a driver is optional. For example, the initial reference implementation will not support this. The API has no way of creating a subnet without a specific address until subnet-allocation is implemented. """ WILDCARDS = {constants.IPv4: '0.0.0.0', constants.IPv6: '::'} def __init__(self, tenant_id, subnet_id, version, prefixlen, gateway_ip=None, allocation_pools=None): """ :param version: Either constants.IPv4 or constants.IPv6 :param prefixlen: The prefix len requested. Must be within the min and max allowed. :type prefixlen: int """ super(AnySubnetRequest, self).__init__( tenant_id=tenant_id, subnet_id=subnet_id, gateway_ip=gateway_ip, allocation_pools=allocation_pools) net = netaddr.IPNetwork(self.WILDCARDS[version] + '/' + str(prefixlen)) self._validate_with_subnet(net) self._prefixlen = prefixlen @property def prefixlen(self): return self._prefixlen class SpecificSubnetRequest(SubnetRequest): """A template for allocating a specified subnet from IPAM The initial reference implementation will probably just allow any allocation, even overlapping ones. This can be expanded on by future blueprints. """ def __init__(self, tenant_id, subnet_id, subnet_cidr, gateway_ip=None, allocation_pools=None): """ :param subnet: The subnet requested. Can be IPv4 or IPv6. However, when IPAM tries to fulfill this request, the IP version must match the version of the address scope being used. :type subnet: netaddr.IPNetwork or convertible to one """ super(SpecificSubnetRequest, self).__init__( tenant_id=tenant_id, subnet_id=subnet_id, gateway_ip=gateway_ip, allocation_pools=allocation_pools) self._subnet_cidr = netaddr.IPNetwork(subnet_cidr) self._validate_with_subnet(self._subnet_cidr) @property def subnet_cidr(self): return self._subnet_cidr @property def prefixlen(self): return self._subnet_cidr.prefixlen @six.add_metaclass(abc.ABCMeta) class AddressRequest(object): """Abstract base class for address requests""" class SpecificAddressRequest(AddressRequest): """For requesting a specified address from IPAM""" def __init__(self, address): """ :param address: The address being requested :type address: A netaddr.IPAddress or convertible to one. """ super(SpecificAddressRequest, self).__init__() self._address = netaddr.IPAddress(address) @property def address(self): return self._address class AnyAddressRequest(AddressRequest): """Used to request any available address from the pool.""" class AutomaticAddressRequest(SpecificAddressRequest): """Used to create auto generated addresses, such as EUI64""" EUI64 = 'eui64' def _generate_eui64_address(self, **kwargs): if set(kwargs) != set(['prefix', 'mac']): raise ipam_exc.AddressCalculationFailure( address_type='eui-64', reason=_('must provide exactly 2 arguments - cidr and MAC')) prefix = kwargs['prefix'] mac_address = kwargs['mac'] return ipv6_utils.get_ipv6_addr_by_EUI64(prefix, mac_address) _address_generators = {EUI64: _generate_eui64_address} def __init__(self, address_type=EUI64, **kwargs): """ This constructor builds an automatic IP address. Parameter needed for generating it can be passed as optional keyword arguments. :param address_type: the type of address to generate. It could be an eui-64 address, a random IPv6 address, or an ipv4 link-local address. For the Kilo release only eui-64 addresses will be supported. """ address_generator = self._address_generators.get(address_type) if not address_generator: raise ipam_exc.InvalidAddressType(address_type=address_type) address = address_generator(self, **kwargs) super(AutomaticAddressRequest, self).__init__(address) class RouterGatewayAddressRequest(AddressRequest): """Used to request allocating the special router gateway address.""" class AddressRequestFactory(object): """Builds request using ip info Additional parameters(port and context) are not used in default implementation, but planned to be used in sub-classes provided by specific ipam driver, """ @classmethod def get_request(cls, context, port, ip_dict): """ :param context: context (not used here, but can be used in sub-classes) :param port: port dict (not used here, but can be used in sub-classes) :param ip_dict: dict that can contain 'ip_address', 'mac' and 'subnet_cidr' keys. Request to generate is selected depending on this ip_dict keys. :return: returns prepared AddressRequest (specific or any) """ if ip_dict.get('ip_address'): return SpecificAddressRequest(ip_dict['ip_address']) elif ip_dict.get('eui64_address'): return AutomaticAddressRequest(prefix=ip_dict['subnet_cidr'], mac=ip_dict['mac']) else: return AnyAddressRequest() class SubnetRequestFactory(object): """Builds request using subnet info""" @classmethod def get_request(cls, context, subnet, subnetpool): cidr = subnet.get('cidr') subnet_id = subnet.get('id', uuidutils.generate_uuid()) is_any_subnetpool_request = not attributes.is_attr_set(cidr) if is_any_subnetpool_request: prefixlen = subnet['prefixlen'] if not attributes.is_attr_set(prefixlen): prefixlen = int(subnetpool['default_prefixlen']) return AnySubnetRequest( subnet['tenant_id'], subnet_id, common_utils.ip_version_from_int(subnetpool['ip_version']), prefixlen) else: return SpecificSubnetRequest(subnet['tenant_id'], subnet_id, cidr, subnet.get('gateway_ip'), subnet.get('allocation_pools')) neutron-8.4.0/neutron/ipam/utils.py0000664000567000056710000000551113044372760020520 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron.common import constants def check_subnet_ip(cidr, ip_address): """Validate that the IP address is on the subnet.""" ip = netaddr.IPAddress(ip_address) net = netaddr.IPNetwork(cidr) # Check that the IP is valid on subnet. This cannot be the # network or the broadcast address (which exists only in IPv4) return (ip != net.network and (net.version == 6 or ip != net[-1]) and net.netmask & ip == net.network) def check_gateway_invalid_in_subnet(cidr, gateway): """Check whether the gw IP address is invalid on the subnet.""" ip = netaddr.IPAddress(gateway) net = netaddr.IPNetwork(cidr) # Check whether the gw IP is in-valid on subnet. # If gateway is in the subnet, it cannot be the # 'network' or the 'broadcast address (only in IPv4)'. # If gateway is out of subnet, there is no way to # check since we don't have gateway's subnet cidr. return (ip in net and (ip == net.network or (net.version == constants.IP_VERSION_4 and ip == net[-1]))) def check_gateway_in_subnet(cidr, gateway): """Validate that the gateway is on the subnet.""" ip = netaddr.IPAddress(gateway) if ip.version == 4 or (ip.version == 6 and not ip.is_link_local()): return check_subnet_ip(cidr, gateway) return True def generate_pools(cidr, gateway_ip): """Create IP allocation pools for a specified subnet The Neutron API defines a subnet's allocation pools as a list of IPRange objects for defining the pool range. """ # Auto allocate the pool around gateway_ip net = netaddr.IPNetwork(cidr) ip_version = net.version first = netaddr.IPAddress(net.first, ip_version) last = netaddr.IPAddress(net.last, ip_version) if first == last: # handle single address subnet case return [netaddr.IPRange(first, last)] first_ip = first + 1 # last address is broadcast in v4 last_ip = last - (ip_version == 4) if first_ip >= last_ip: # /31 lands here return [] ipset = netaddr.IPSet(netaddr.IPRange(first_ip, last_ip)) if gateway_ip: ipset.remove(netaddr.IPAddress(gateway_ip, ip_version)) return list(ipset.iter_ipranges()) neutron-8.4.0/neutron/ipam/driver.py0000664000567000056710000001165113044372760020655 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from oslo_log import log import six from neutron.ipam import requests as ipam_req from neutron import manager LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class Pool(object): """Interface definition for an IPAM driver. There should be an instance of the driver for every subnet pool. """ def __init__(self, subnetpool, context): """Initialize pool :param subnetpool: SubnetPool of the address space to use. :type subnetpool: dict """ self._subnetpool = subnetpool self._context = context @classmethod def get_instance(cls, subnet_pool, context): """Returns an instance of the configured IPAM driver :param subnet_pool: Subnet pool of the address space to use. :type subnet_pool: dict :returns: An instance of Driver for the given subnet pool """ ipam_driver_name = cfg.CONF.ipam_driver mgr = manager.NeutronManager LOG.debug("Loading ipam driver: %s", ipam_driver_name) driver_class = mgr.load_class_for_provider('neutron.ipam_drivers', ipam_driver_name) return driver_class(subnet_pool, context) @abc.abstractmethod def allocate_subnet(self, request): """Allocates a subnet based on the subnet request :param request: Describes the allocation requested. :type request: An instance of a sub-class of SubnetRequest :returns: An instance of Subnet :raises: RequestNotSupported, IPAMAlreadyAllocated """ @abc.abstractmethod def get_subnet(self, subnet_id): """Gets the matching subnet if it has been allocated :param subnet_id: the subnet identifier :type subnet_id: str uuid :returns: An instance of IPAM Subnet :raises: IPAMAllocationNotFound """ @abc.abstractmethod def update_subnet(self, request): """Updates an already allocated subnet This is used to notify the external IPAM system of updates to a subnet. :param request: Update the subnet to match this request :type request: An instance of a sub-class of SpecificSubnetRequest :returns: An instance of IPAM Subnet :raises: RequestNotSupported, IPAMAllocationNotFound """ @abc.abstractmethod def remove_subnet(self, subnet_id): """Removes an allocation The initial reference implementation will probably do nothing. :param subnet_id: the subnet identifier :type subnet_id: str uuid :raises: IPAMAllocationNotFound """ def get_subnet_request_factory(self): """Returns default SubnetRequestFactory Can be overridden on driver level to return custom factory """ return ipam_req.SubnetRequestFactory def get_address_request_factory(self): """Returns default AddressRequestFactory Can be overridden on driver level to return custom factory """ return ipam_req.AddressRequestFactory @six.add_metaclass(abc.ABCMeta) class Subnet(object): """Interface definition for an IPAM subnet A subnet would typically be associated with a network but may not be. It could represent a dynamically routed IP address space in which case the normal network and broadcast addresses would be useable. It should always be a routable block of addresses and representable in CIDR notation. """ @abc.abstractmethod def allocate(self, address_request): """Allocates an IP address based on the request passed in :param address_request: Specifies what to allocate. :type address_request: An instance of a subclass of AddressRequest :returns: A netaddr.IPAddress :raises: AddressNotAvailable, AddressOutsideAllocationPool, AddressOutsideSubnet """ @abc.abstractmethod def deallocate(self, address): """Returns a previously allocated address to the pool :param address: The address to give back. :type address: A netaddr.IPAddress or convertible to one. :returns: None :raises: IPAMAllocationNotFound """ @abc.abstractmethod def get_details(self): """Returns the details of the subnet :returns: An instance of SpecificSubnetRequest with the subnet detail. """ neutron-8.4.0/neutron/ipam/drivers/0000775000567000056710000000000013044373210020451 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/ipam/drivers/__init__.py0000664000567000056710000000000013044372736022564 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/ipam/drivers/neutrondb_ipam/0000775000567000056710000000000013044373210023457 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/ipam/drivers/neutrondb_ipam/__init__.py0000664000567000056710000000000013044372736025572 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/ipam/drivers/neutrondb_ipam/db_models.py0000664000567000056710000001144713044372760026001 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from sqlalchemy import orm as sa_orm from neutron.db import model_base # Database models used by the neutron DB IPAM driver # NOTE(salv-orlando): This is meant to replace the class # neutron.db.models_v2.IPAvailabilityRange. class IpamAvailabilityRange(model_base.BASEV2): """Internal representation of available IPs for Neutron subnets. Allocation - first entry from the range will be allocated. If the first entry is equal to the last entry then this row will be deleted. Recycling ips involves reading the IPAllocationPool and IPAllocation tables and inserting ranges representing available ips. This happens after the final allocation is pulled from this table and a new ip allocation is requested. Any contiguous ranges of available ips will be inserted as a single range. """ allocation_pool_id = sa.Column(sa.String(36), sa.ForeignKey('ipamallocationpools.id', ondelete="CASCADE"), nullable=False, primary_key=True) first_ip = sa.Column(sa.String(64), nullable=False, primary_key=True) last_ip = sa.Column(sa.String(64), nullable=False, primary_key=True) __table_args__ = ( sa.Index('ix_ipamavailabilityranges_first_ip_allocation_pool_id', 'first_ip', 'allocation_pool_id'), sa.Index('ix_ipamavailabilityranges_last_ip_allocation_pool_id', 'last_ip', 'allocation_pool_id'), model_base.BASEV2.__table_args__ ) def __repr__(self): return "%s - %s" % (self.first_ip, self.last_ip) # NOTE(salv-orlando): The following data model creates redundancy with # models_v2.IPAllocationPool. This level of data redundancy could be tolerated # considering that the following model is specific to the IPAM driver logic. # It therefore represents an internal representation of a subnet allocation # pool and can therefore change in the future, where as # models_v2.IPAllocationPool is the representation of IP allocation pools in # the management layer and therefore its evolution is subject to APIs backward # compatibility policies class IpamAllocationPool(model_base.BASEV2, model_base.HasId): """Representation of an allocation pool in a Neutron subnet.""" ipam_subnet_id = sa.Column(sa.String(36), sa.ForeignKey('ipamsubnets.id', ondelete="CASCADE"), nullable=False) first_ip = sa.Column(sa.String(64), nullable=False) last_ip = sa.Column(sa.String(64), nullable=False) available_ranges = sa_orm.relationship(IpamAvailabilityRange, backref='allocation_pool', lazy="joined", cascade='all, delete-orphan') def __repr__(self): return "%s - %s" % (self.first_ip, self.last_ip) class IpamSubnet(model_base.BASEV2, model_base.HasId): """Association between IPAM entities and neutron subnets. For subnet data persistency - such as cidr and gateway IP, the IPAM driver relies on Neutron's subnet model as source of truth to limit data redundancy. """ neutron_subnet_id = sa.Column(sa.String(36), nullable=True) allocation_pools = sa_orm.relationship(IpamAllocationPool, backref='subnet', lazy="joined", cascade='delete') class IpamAllocation(model_base.BASEV2): """Model class for IP Allocation requests. """ ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True) status = sa.Column(sa.String(36)) # The subnet identifier is redundant but come handy for looking up # IP addresses to remove. ipam_subnet_id = sa.Column(sa.String(36), sa.ForeignKey('ipamsubnets.id', ondelete="CASCADE"), primary_key=True, nullable=False) neutron-8.4.0/neutron/ipam/drivers/neutrondb_ipam/driver.py0000664000567000056710000005151613044372760025345 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_db import exception as db_exc from oslo_log import log from oslo_utils import uuidutils from neutron._i18n import _, _LE from neutron.common import exceptions as n_exc from neutron.common import ipv6_utils from neutron.db import api as db_api from neutron.ipam import driver as ipam_base from neutron.ipam.drivers.neutrondb_ipam import db_api as ipam_db_api from neutron.ipam import exceptions as ipam_exc from neutron.ipam import requests as ipam_req from neutron.ipam import subnet_alloc from neutron.ipam import utils as ipam_utils from neutron import manager LOG = log.getLogger(__name__) class NeutronDbSubnet(ipam_base.Subnet): """Manage IP addresses for Neutron DB IPAM driver. This class implements the strategy for IP address allocation and deallocation for the Neutron DB IPAM driver. Allocation for IP addresses is based on the concept of availability ranges, which were already used in Neutron's DB base class for handling IPAM operations. """ @classmethod def create_allocation_pools(cls, subnet_manager, session, pools, cidr): for pool in pools: # IPv6 addresses that start '::1', '::2', etc cause IP version # ambiguity when converted to integers by pool.first and pool.last. # Infer the IP version from the subnet cidr. ip_version = cidr.version subnet_manager.create_pool( session, netaddr.IPAddress(pool.first, ip_version).format(), netaddr.IPAddress(pool.last, ip_version).format()) @classmethod def create_from_subnet_request(cls, subnet_request, ctx): ipam_subnet_id = uuidutils.generate_uuid() subnet_manager = ipam_db_api.IpamSubnetManager( ipam_subnet_id, subnet_request.subnet_id) # Create subnet resource session = ctx.session subnet_manager.create(session) # If allocation pools are not specified, define them around # the subnet's gateway IP if not subnet_request.allocation_pools: pools = ipam_utils.generate_pools(subnet_request.subnet_cidr, subnet_request.gateway_ip) else: pools = subnet_request.allocation_pools # Create IPAM allocation pools and availability ranges cls.create_allocation_pools(subnet_manager, session, pools, subnet_request.subnet_cidr) return cls(ipam_subnet_id, ctx, cidr=subnet_request.subnet_cidr, allocation_pools=pools, gateway_ip=subnet_request.gateway_ip, tenant_id=subnet_request.tenant_id, subnet_id=subnet_request.subnet_id) @classmethod def load(cls, neutron_subnet_id, ctx): """Load an IPAM subnet from the database given its neutron ID. :param neutron_subnet_id: neutron subnet identifier. """ ipam_subnet = ipam_db_api.IpamSubnetManager.load_by_neutron_subnet_id( ctx.session, neutron_subnet_id) if not ipam_subnet: LOG.error(_LE("IPAM subnet referenced to " "Neutron subnet %s does not exist"), neutron_subnet_id) raise n_exc.SubnetNotFound(subnet_id=neutron_subnet_id) pools = [] for pool in ipam_subnet.allocation_pools: pools.append(netaddr.IPRange(pool['first_ip'], pool['last_ip'])) neutron_subnet = cls._fetch_subnet(ctx, neutron_subnet_id) return cls(ipam_subnet['id'], ctx, cidr=neutron_subnet['cidr'], allocation_pools=pools, gateway_ip=neutron_subnet['gateway_ip'], tenant_id=neutron_subnet['tenant_id'], subnet_id=neutron_subnet_id) @classmethod def _fetch_subnet(cls, context, id): plugin = manager.NeutronManager.get_plugin() return plugin._get_subnet(context, id) def __init__(self, internal_id, ctx, cidr=None, allocation_pools=None, gateway_ip=None, tenant_id=None, subnet_id=None): # NOTE: In theory it could have been possible to grant the IPAM # driver direct access to the database. While this is possible, # it would have led to duplicate code and/or non-trivial # refactorings in neutron.db.db_base_plugin_v2. # This is because in the Neutron V2 plugin logic DB management is # encapsulated within the plugin. self._cidr = cidr self._pools = allocation_pools self._gateway_ip = gateway_ip self._tenant_id = tenant_id self._subnet_id = subnet_id self.subnet_manager = ipam_db_api.IpamSubnetManager(internal_id, self._subnet_id) self._context = ctx def _verify_ip(self, session, ip_address): """Verify whether IP address can be allocated on subnet. :param session: database session :param ip_address: String representing the IP address to verify :raises: InvalidInput, IpAddressAlreadyAllocated """ # Ensure that the IP's are unique if not self.subnet_manager.check_unique_allocation(session, ip_address): raise ipam_exc.IpAddressAlreadyAllocated( subnet_id=self.subnet_manager.neutron_id, ip=ip_address) # Ensure that the IP is valid on the subnet if not ipam_utils.check_subnet_ip(self._cidr, ip_address): raise ipam_exc.InvalidIpForSubnet( subnet_id=self.subnet_manager.neutron_id, ip=ip_address) def _allocate_specific_ip(self, session, ip_address, allocation_pool_id=None, auto_generated=False): """Remove an IP address from subnet's availability ranges. This method is supposed to be called from within a database transaction, otherwise atomicity and integrity might not be enforced and the operation might result in incosistent availability ranges for the subnet. :param session: database session :param ip_address: ip address to mark as allocated :param allocation_pool_id: identifier of the allocation pool from which the ip address has been extracted. If not specified this routine will scan all allocation pools. :param auto_generated: indicates whether ip was auto generated :returns: list of IP ranges as instances of IPAvailabilityRange """ # Return immediately for EUI-64 addresses. For this # class of subnets availability ranges do not apply if ipv6_utils.is_eui64_address(ip_address): return LOG.debug("Removing %(ip_address)s from availability ranges for " "subnet id:%(subnet_id)s", {'ip_address': ip_address, 'subnet_id': self.subnet_manager.neutron_id}) # Netaddr's IPRange and IPSet objects work very well even with very # large subnets, including IPv6 ones. final_ranges = [] ip_in_pools = False if allocation_pool_id: av_ranges = self.subnet_manager.list_ranges_by_allocation_pool( session, allocation_pool_id) else: av_ranges = self.subnet_manager.list_ranges_by_subnet_id(session) for db_range in av_ranges: initial_ip_set = netaddr.IPSet(netaddr.IPRange( db_range['first_ip'], db_range['last_ip'])) final_ip_set = initial_ip_set - netaddr.IPSet([ip_address]) if not final_ip_set: ip_in_pools = True # Range exhausted - bye bye if not self.subnet_manager.delete_range(session, db_range): raise db_exc.RetryRequest(ipam_exc.IPAllocationFailed()) continue if initial_ip_set == final_ip_set: # IP address does not fall within the current range, move # to the next one final_ranges.append(db_range) continue ip_in_pools = True for new_range in final_ip_set.iter_ipranges(): # store new range in database # use netaddr.IPAddress format() method which is equivalent # to str(...) but also enables us to use different # representation formats (if needed) for IPv6. first_ip = netaddr.IPAddress(new_range.first) last_ip = netaddr.IPAddress(new_range.last) if (db_range['first_ip'] == first_ip.format() or db_range['last_ip'] == last_ip.format()): rows = self.subnet_manager.update_range( session, db_range, first_ip=first_ip, last_ip=last_ip) if not rows: raise db_exc.RetryRequest( ipam_exc.IPAllocationFailed()) LOG.debug("Adjusted availability range for pool %s", db_range['allocation_pool_id']) final_ranges.append(db_range) else: new_ip_range = self.subnet_manager.create_range( session, db_range['allocation_pool_id'], first_ip.format(), last_ip.format()) LOG.debug("Created availability range for pool %s", new_ip_range['allocation_pool_id']) final_ranges.append(new_ip_range) # If ip is autogenerated it should be present in allocation pools, # so retry if it is not there if auto_generated and not ip_in_pools: raise db_exc.RetryRequest(ipam_exc.IPAllocationFailed()) # Most callers might ignore this return value, which is however # useful for testing purposes LOG.debug("Availability ranges for subnet id %(subnet_id)s " "modified: %(new_ranges)s", {'subnet_id': self.subnet_manager.neutron_id, 'new_ranges': ", ".join(["[%s; %s]" % (r['first_ip'], r['last_ip']) for r in final_ranges])}) return final_ranges def _rebuild_availability_ranges(self, session): """Rebuild availability ranges. This method should be called only when the availability ranges are exhausted or when the subnet's allocation pools are updated, which may trigger a deletion of the availability ranges. For this operation to complete successfully, this method uses a locking query to ensure that no IP is allocated while the regeneration of availability ranges is in progress. :param session: database session """ # List all currently allocated addresses, and prevent further # allocations with a write-intent lock. # NOTE: because of this driver's logic the write intent lock is # probably unnecessary as this routine is called when the availability # ranges for a subnet are exhausted and no further address can be # allocated. # TODO(salv-orlando): devise, if possible, a more efficient solution # for building the IPSet to ensure decent performances even with very # large subnets. allocations = netaddr.IPSet( [netaddr.IPAddress(allocation['ip_address']) for allocation in self.subnet_manager.list_allocations( session)]) # MEH MEH # There should be no need to set a write intent lock on the allocation # pool table. Indeed it is not important for the correctness of this # operation if the allocation pools are updated by another operation, # which will result in the generation of new availability ranges. # NOTE: it might be argued that an allocation pool update should in # theory preempt rebuilding the availability range. This is an option # to consider for future developments. LOG.debug("Rebuilding availability ranges for subnet %s", self.subnet_manager.neutron_id) for pool in self.subnet_manager.list_pools(session): # Create a set of all addresses in the pool poolset = netaddr.IPSet(netaddr.IPRange(pool['first_ip'], pool['last_ip'])) # Use set difference to find free addresses in the pool available = poolset - allocations # Write the ranges to the db for ip_range in available.iter_ipranges(): av_range = self.subnet_manager.create_range( session, pool['id'], netaddr.IPAddress(ip_range.first).format(), netaddr.IPAddress(ip_range.last).format()) session.add(av_range) def _generate_ip(self, session): try: return self._try_generate_ip(session) except ipam_exc.IpAddressGenerationFailure: self._rebuild_availability_ranges(session) return self._try_generate_ip(session) def _try_generate_ip(self, session): """Generate an IP address from availability ranges.""" ip_range = self.subnet_manager.get_first_range(session) if not ip_range: LOG.debug("All IPs from subnet %(subnet_id)s allocated", {'subnet_id': self.subnet_manager.neutron_id}) raise ipam_exc.IpAddressGenerationFailure( subnet_id=self.subnet_manager.neutron_id) # A suitable range was found. Return IP address. ip_address = ip_range['first_ip'] LOG.debug("Allocated IP - %(ip_address)s from range " "[%(first_ip)s; %(last_ip)s]", {'ip_address': ip_address, 'first_ip': ip_address, 'last_ip': ip_range['last_ip']}) return ip_address, ip_range['allocation_pool_id'] def allocate(self, address_request): # NOTE(salv-orlando): Creating a new db session might be a rather # dangerous thing to do, if executed from within another database # transaction. Therefore the IPAM driver should never be # called from within a database transaction, which is also good # practice since in the general case these drivers may interact # with remote backends session = self._context.session all_pool_id = None auto_generated = False with db_api.autonested_transaction(session): # NOTE(salv-orlando): It would probably better to have a simpler # model for address requests and just check whether there is a # specific IP address specified in address_request if isinstance(address_request, ipam_req.SpecificAddressRequest): # This handles both specific and automatic address requests # Check availability of requested IP ip_address = str(address_request.address) self._verify_ip(session, ip_address) else: ip_address, all_pool_id = self._generate_ip(session) auto_generated = True self._allocate_specific_ip(session, ip_address, all_pool_id, auto_generated) # Create IP allocation request object # The only defined status at this stage is 'ALLOCATED'. # More states will be available in the future - e.g.: RECYCLABLE self.subnet_manager.create_allocation(session, ip_address) return ip_address def deallocate(self, address): # This is almost a no-op because the Neutron DB IPAM driver does not # delete IPAllocation objects, neither rebuilds availability ranges # at every deallocation. The only operation it performs is to delete # an IPRequest entry. session = self._context.session count = self.subnet_manager.delete_allocation( session, address) # count can hardly be greater than 1, but it can be 0... if not count: raise ipam_exc.IpAddressAllocationNotFound( subnet_id=self.subnet_manager.neutron_id, ip_address=address) def _no_pool_changes(self, session, pools): """Check if pool updates in db are required.""" db_pools = self.subnet_manager.list_pools(session) iprange_pools = [netaddr.IPRange(pool.first_ip, pool.last_ip) for pool in db_pools] return pools == iprange_pools def update_allocation_pools(self, pools, cidr): # Pools have already been validated in the subnet request object which # was sent to the subnet pool driver. Further validation should not be # required. session = self._context.session if self._no_pool_changes(session, pools): return self.subnet_manager.delete_allocation_pools(session) self.create_allocation_pools(self.subnet_manager, session, pools, cidr) self._pools = pools def get_details(self): """Return subnet data as a SpecificSubnetRequest""" return ipam_req.SpecificSubnetRequest( self._tenant_id, self.subnet_manager.neutron_id, self._cidr, self._gateway_ip, self._pools) class NeutronDbPool(subnet_alloc.SubnetAllocator): """Subnet pools backed by Neutron Database. As this driver does not implement yet the subnet pool concept, most operations are either trivial or no-ops. """ def get_subnet(self, subnet_id): """Retrieve an IPAM subnet. :param subnet_id: Neutron subnet identifier :returns: a NeutronDbSubnet instance """ return NeutronDbSubnet.load(subnet_id, self._context) def allocate_subnet(self, subnet_request): """Create an IPAMSubnet object for the provided cidr. This method does not actually do any operation in the driver, given its simplified nature. :param cidr: subnet's CIDR :returns: a NeutronDbSubnet instance """ if self._subnetpool: subnet = super(NeutronDbPool, self).allocate_subnet(subnet_request) subnet_request = subnet.get_details() # SubnetRequest must be an instance of SpecificSubnet if not isinstance(subnet_request, ipam_req.SpecificSubnetRequest): raise ipam_exc.InvalidSubnetRequestType( subnet_type=type(subnet_request)) return NeutronDbSubnet.create_from_subnet_request(subnet_request, self._context) def update_subnet(self, subnet_request): """Update subnet info the in the IPAM driver. The only update subnet information the driver needs to be aware of are allocation pools. """ if not subnet_request.subnet_id: raise ipam_exc.InvalidSubnetRequest( reason=_("An identifier must be specified when updating " "a subnet")) if not subnet_request.allocation_pools: LOG.debug("Update subnet request for subnet %s did not specify " "new allocation pools, there is nothing to do", subnet_request.subnet_id) return subnet = NeutronDbSubnet.load(subnet_request.subnet_id, self._context) cidr = netaddr.IPNetwork(subnet._cidr) subnet.update_allocation_pools(subnet_request.allocation_pools, cidr) return subnet def remove_subnet(self, subnet_id): """Remove data structures for a given subnet. IPAM-related data has no foreign key relationships to neutron subnet, so removing ipam subnet manually """ count = ipam_db_api.IpamSubnetManager.delete(self._context.session, subnet_id) if count < 1: LOG.error(_LE("IPAM subnet referenced to " "Neutron subnet %s does not exist"), subnet_id) raise n_exc.SubnetNotFound(subnet_id=subnet_id) neutron-8.4.0/neutron/ipam/drivers/neutrondb_ipam/db_api.py0000664000567000056710000002265113044372760025266 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_utils import uuidutils from sqlalchemy.orm import exc as orm_exc from neutron.ipam.drivers.neutrondb_ipam import db_models from neutron.ipam import exceptions as ipam_exc # Database operations for Neutron's DB-backed IPAM driver class IpamSubnetManager(object): @classmethod def load_by_neutron_subnet_id(cls, session, neutron_subnet_id): return session.query(db_models.IpamSubnet).filter_by( neutron_subnet_id=neutron_subnet_id).first() def __init__(self, ipam_subnet_id, neutron_subnet_id): self._ipam_subnet_id = ipam_subnet_id self._neutron_subnet_id = neutron_subnet_id @property def neutron_id(self): return self._neutron_subnet_id def create(self, session): """Create database models for an IPAM subnet. This method creates a subnet resource for the IPAM driver and associates it with its neutron identifier, if specified. :param session: database sesssion. :returns: the idenfier of created IPAM subnet """ if not self._ipam_subnet_id: self._ipam_subnet_id = uuidutils.generate_uuid() ipam_subnet = db_models.IpamSubnet( id=self._ipam_subnet_id, neutron_subnet_id=self._neutron_subnet_id) session.add(ipam_subnet) return self._ipam_subnet_id @classmethod def delete(cls, session, neutron_subnet_id): """Delete IPAM subnet. IPAM subnet no longer has foreign key to neutron subnet, so need to perform delete manually :param session: database sesssion :param neutron_subnet_id: neutron subnet id associated with ipam subnet """ return session.query(db_models.IpamSubnet).filter_by( neutron_subnet_id=neutron_subnet_id).delete() def create_pool(self, session, pool_start, pool_end): """Create an allocation pool and availability ranges for the subnet. This method does not perform any validation on parameters; it simply persist data on the database. :param pool_start: string expressing the start of the pool :param pool_end: string expressing the end of the pool :return: the newly created pool object. """ ip_pool = db_models.IpamAllocationPool( ipam_subnet_id=self._ipam_subnet_id, first_ip=pool_start, last_ip=pool_end) session.add(ip_pool) ip_range = db_models.IpamAvailabilityRange( allocation_pool=ip_pool, first_ip=pool_start, last_ip=pool_end) session.add(ip_range) return ip_pool def delete_allocation_pools(self, session): """Remove all allocation pools for the current subnet. :param session: database session """ session.query(db_models.IpamAllocationPool).filter_by( ipam_subnet_id=self._ipam_subnet_id).delete() def list_pools(self, session): """Return pools for the current subnet.""" return session.query( db_models.IpamAllocationPool).filter_by( ipam_subnet_id=self._ipam_subnet_id) def _range_query(self, session): return session.query( db_models.IpamAvailabilityRange).join( db_models.IpamAllocationPool).filter_by( ipam_subnet_id=self._ipam_subnet_id) def get_first_range(self, session): """Return the first availability range for the subnet :param session: database session :return: first available range as instance of neutron.ipam.drivers.neutrondb_ipam.db_models.IpamAvailabilityRange """ return self._range_query(session).first() def list_ranges_by_subnet_id(self, session): """Return availability ranges for a given ipam subnet :param session: database session :return: list of availability ranges as instances of neutron.ipam.drivers.neutrondb_ipam.db_models.IpamAvailabilityRange """ return self._range_query(session) def list_ranges_by_allocation_pool(self, session, allocation_pool_id): """Return availability ranges for a given pool. :param session: database session :param allocation_pool_id: allocation pool identifier :return: list of availability ranges as instances of neutron.ipam.drivers.neutrondb_ipam.db_models.IpamAvailabilityRange """ return session.query( db_models.IpamAvailabilityRange).join( db_models.IpamAllocationPool).filter_by( id=allocation_pool_id) def update_range(self, session, db_range, first_ip=None, last_ip=None): """Updates db_range to have new first_ip and last_ip. :param session: database session :param db_range: IpamAvailabilityRange db object :param first_ip: first ip address in range :param last_ip: last ip address in range :return: count of updated rows """ opts = {} if first_ip: opts['first_ip'] = str(first_ip) if last_ip: opts['last_ip'] = str(last_ip) if not opts: raise ipam_exc.IpamAvailabilityRangeNoChanges() try: return session.query( db_models.IpamAvailabilityRange).filter_by( allocation_pool_id=db_range.allocation_pool_id).filter_by( first_ip=db_range.first_ip).filter_by( last_ip=db_range.last_ip).update(opts) except orm_exc.ObjectDeletedError: raise db_exc.RetryRequest(ipam_exc.IPAllocationFailed()) def delete_range(self, session, db_range): """Return count of deleted ranges :param session: database session :param db_range: IpamAvailabilityRange db object """ try: return session.query( db_models.IpamAvailabilityRange).filter_by( allocation_pool_id=db_range.allocation_pool_id).filter_by( first_ip=db_range.first_ip).filter_by( last_ip=db_range.last_ip).delete() except orm_exc.ObjectDeletedError: raise db_exc.RetryRequest(ipam_exc.IPAllocationFailed()) def create_range(self, session, allocation_pool_id, range_start, range_end): """Create an availability range for a given pool. This method does not perform any validation on parameters; it simply persist data on the database. :param session: database session :param allocation_pool_id: allocation pool identifier :param range_start: first ip address in the range :param range_end: last ip address in the range :return: the newly created availability range as an instance of neutron.ipam.drivers.neutrondb_ipam.db_models.IpamAvailabilityRange """ new_ip_range = db_models.IpamAvailabilityRange( allocation_pool_id=allocation_pool_id, first_ip=range_start, last_ip=range_end) session.add(new_ip_range) return new_ip_range def check_unique_allocation(self, session, ip_address): """Validate that the IP address on the subnet is not in use.""" iprequest = session.query(db_models.IpamAllocation).filter_by( ipam_subnet_id=self._ipam_subnet_id, status='ALLOCATED', ip_address=ip_address).first() if iprequest: return False return True def list_allocations(self, session, status='ALLOCATED'): """Return current allocations for the subnet. :param session: database session :param status: IP allocation status :returns: a list of IP allocation as instance of neutron.ipam.drivers.neutrondb_ipam.db_models.IpamAllocation """ return session.query( db_models.IpamAllocation).filter_by( ipam_subnet_id=self._ipam_subnet_id, status=status) def create_allocation(self, session, ip_address, status='ALLOCATED'): """Create an IP allocation entry. :param session: database session :param ip_address: the IP address to allocate :param status: IP allocation status """ ip_request = db_models.IpamAllocation( ip_address=ip_address, status=status, ipam_subnet_id=self._ipam_subnet_id) session.add(ip_request) def delete_allocation(self, session, ip_address): """Remove an IP allocation for this subnet. :param session: database session :param ip_address: IP address for which the allocation entry should be removed. """ return session.query(db_models.IpamAllocation).filter_by( ip_address=ip_address, ipam_subnet_id=self._ipam_subnet_id).delete( synchronize_session=False) neutron-8.4.0/neutron/ipam/exceptions.py0000664000567000056710000000515213044372760021542 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _ from neutron.common import exceptions class InvalidSubnetRequestType(exceptions.BadRequest): message = _("Cannot handle subnet of type %(subnet_type)s") class AddressCalculationFailure(exceptions.NeutronException): message = _("Unable to calculate %(address_type)s address because of:" "%(reason)s") class InvalidAddressType(exceptions.NeutronException): message = _("Unknown address type %(address_type)s") class IpAddressAllocationNotFound(exceptions.NeutronException): message = _("Unable to find IP address %(ip_address)s on subnet " "%(subnet_id)s") class IpAddressAlreadyAllocated(exceptions.Conflict): message = _("IP address %(ip)s already allocated in subnet %(subnet_id)s") class InvalidIpForSubnet(exceptions.BadRequest): message = _("IP address %(ip)s does not belong to subnet %(subnet_id)s") class InvalidAddressRequest(exceptions.BadRequest): message = _("The address allocation request could not be satisfied " "because: %(reason)s") class InvalidSubnetRequest(exceptions.BadRequest): message = _("The subnet request could not be satisfied because: " "%(reason)s") class AllocationOnAutoAddressSubnet(exceptions.NeutronException): message = _("IPv6 address %(ip)s cannot be directly " "assigned to a port on subnet %(subnet_id)s as the " "subnet is configured for automatic addresses") class IpAddressGenerationFailure(exceptions.Conflict): message = _("No more IP addresses available for subnet %(subnet_id)s.") class IPAllocationFailed(exceptions.NeutronException): message = _("IP allocation failed. Try again later.") class IpamAvailabilityRangeNoChanges(exceptions.NeutronException): message = _("New value for first_ip or last_ip has to be specified.") class IpamValueInvalid(exceptions.Conflict): def __init__(self, message=None): self.message = message super(IpamValueInvalid, self).__init__() neutron-8.4.0/neutron/api/0000775000567000056710000000000013044373210016616 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/api/api_common.py0000664000567000056710000002614313044372760021330 0ustar jenkinsjenkins00000000000000# Copyright 2011 Citrix System. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from oslo_config import cfg from oslo_log import log as logging from six.moves.urllib import parse from webob import exc from neutron._i18n import _, _LW from neutron.common import constants from neutron.common import exceptions LOG = logging.getLogger(__name__) def get_filters(request, attr_info, skips=None): return get_filters_from_dict(request.GET.dict_of_lists(), attr_info, skips) def get_filters_from_dict(data, attr_info, skips=None): """Extracts the filters from a dict of query parameters. Returns a dict of lists for the filters: check=a&check=b&name=Bob& becomes: {'check': [u'a', u'b'], 'name': [u'Bob']} """ skips = skips or [] res = {} for key, values in data.items(): if key in skips: continue values = [v for v in values if v] key_attr_info = attr_info.get(key, {}) if 'convert_list_to' in key_attr_info: values = key_attr_info['convert_list_to'](values) elif 'convert_to' in key_attr_info: convert_to = key_attr_info['convert_to'] values = [convert_to(v) for v in values] if values: res[key] = values return res def get_previous_link(request, items, id_key): params = request.GET.copy() params.pop('marker', None) if items: marker = items[0][id_key] params['marker'] = marker params['page_reverse'] = True return "%s?%s" % (request.path_url, parse.urlencode(params)) def get_next_link(request, items, id_key): params = request.GET.copy() params.pop('marker', None) if items: marker = items[-1][id_key] params['marker'] = marker params.pop('page_reverse', None) return "%s?%s" % (request.path_url, parse.urlencode(params)) def get_limit_and_marker(request): """Return marker, limit tuple from request. :param request: `wsgi.Request` possibly containing 'marker' and 'limit' GET variables. 'marker' is the id of the last element the client has seen, and 'limit' is the maximum number of items to return. If limit == 0, it means we needn't pagination, then return None. """ max_limit = _get_pagination_max_limit() limit = _get_limit_param(request) if max_limit > 0: limit = min(max_limit, limit) or max_limit if not limit: return None, None marker = request.GET.get('marker', None) return limit, marker def _get_pagination_max_limit(): max_limit = -1 if (cfg.CONF.pagination_max_limit.lower() != constants.PAGINATION_INFINITE): try: max_limit = int(cfg.CONF.pagination_max_limit) if max_limit == 0: raise ValueError() except ValueError: LOG.warning(_LW("Invalid value for pagination_max_limit: %s. It " "should be an integer greater to 0"), cfg.CONF.pagination_max_limit) return max_limit def _get_limit_param(request): """Extract integer limit from request or fail.""" limit = request.GET.get('limit', 0) try: limit = int(limit) if limit >= 0: return limit except ValueError: pass msg = _("Limit must be an integer 0 or greater and not '%s'") % limit raise exceptions.BadRequest(resource='limit', msg=msg) def list_args(request, arg): """Extracts the list of arg from request.""" return [v for v in request.GET.getall(arg) if v] def get_sorts(request, attr_info): """Extract sort_key and sort_dir from request. Return as: [(key1, value1), (key2, value2)] """ sort_keys = list_args(request, "sort_key") sort_dirs = list_args(request, "sort_dir") if len(sort_keys) != len(sort_dirs): msg = _("The number of sort_keys and sort_dirs must be same") raise exc.HTTPBadRequest(explanation=msg) valid_dirs = [constants.SORT_DIRECTION_ASC, constants.SORT_DIRECTION_DESC] absent_keys = [x for x in sort_keys if x not in attr_info] if absent_keys: msg = _("%s is invalid attribute for sort_keys") % absent_keys raise exc.HTTPBadRequest(explanation=msg) invalid_dirs = [x for x in sort_dirs if x not in valid_dirs] if invalid_dirs: msg = (_("%(invalid_dirs)s is invalid value for sort_dirs, " "valid value is '%(asc)s' and '%(desc)s'") % {'invalid_dirs': invalid_dirs, 'asc': constants.SORT_DIRECTION_ASC, 'desc': constants.SORT_DIRECTION_DESC}) raise exc.HTTPBadRequest(explanation=msg) return list(zip(sort_keys, [x == constants.SORT_DIRECTION_ASC for x in sort_dirs])) def get_page_reverse(request): data = request.GET.get('page_reverse', 'False') return data.lower() == "true" def get_pagination_links(request, items, limit, marker, page_reverse, key="id"): key = key if key else 'id' links = [] if not limit: return links if not (len(items) < limit and not page_reverse): links.append({"rel": "next", "href": get_next_link(request, items, key)}) if not (len(items) < limit and page_reverse): links.append({"rel": "previous", "href": get_previous_link(request, items, key)}) return links class PaginationHelper(object): def __init__(self, request, primary_key='id'): self.request = request self.primary_key = primary_key def update_fields(self, original_fields, fields_to_add): pass def update_args(self, args): pass def paginate(self, items): return items def get_links(self, items): return {} class PaginationEmulatedHelper(PaginationHelper): def __init__(self, request, primary_key='id'): super(PaginationEmulatedHelper, self).__init__(request, primary_key) self.limit, self.marker = get_limit_and_marker(request) self.page_reverse = get_page_reverse(request) def update_fields(self, original_fields, fields_to_add): if not original_fields: return if self.primary_key not in original_fields: original_fields.append(self.primary_key) fields_to_add.append(self.primary_key) def paginate(self, items): if not self.limit: return items i = -1 if self.marker: for item in items: i = i + 1 if item[self.primary_key] == self.marker: break if self.page_reverse: return items[i - self.limit:i] return items[i + 1:i + self.limit + 1] def get_links(self, items): return get_pagination_links( self.request, items, self.limit, self.marker, self.page_reverse, self.primary_key) class PaginationNativeHelper(PaginationEmulatedHelper): def update_args(self, args): if self.primary_key not in dict(args.get('sorts', [])).keys(): args.setdefault('sorts', []).append((self.primary_key, True)) args.update({'limit': self.limit, 'marker': self.marker, 'page_reverse': self.page_reverse}) def paginate(self, items): return items class NoPaginationHelper(PaginationHelper): pass class SortingHelper(object): def __init__(self, request, attr_info): pass def update_args(self, args): pass def update_fields(self, original_fields, fields_to_add): pass def sort(self, items): return items class SortingEmulatedHelper(SortingHelper): def __init__(self, request, attr_info): super(SortingEmulatedHelper, self).__init__(request, attr_info) self.sort_dict = get_sorts(request, attr_info) def update_fields(self, original_fields, fields_to_add): if not original_fields: return for key in dict(self.sort_dict).keys(): if key not in original_fields: original_fields.append(key) fields_to_add.append(key) def sort(self, items): def cmp_func(obj1, obj2): for key, direction in self.sort_dict: o1 = obj1[key] o2 = obj2[key] if o1 is None and o2 is None: ret = 0 elif o1 is None and o2 is not None: ret = -1 elif o1 is not None and o2 is None: ret = 1 else: ret = (o1 > o2) - (o1 < o2) if ret: return ret * (1 if direction else -1) return 0 return sorted(items, key=functools.cmp_to_key(cmp_func)) class SortingNativeHelper(SortingHelper): def __init__(self, request, attr_info): self.sort_dict = get_sorts(request, attr_info) def update_args(self, args): args['sorts'] = self.sort_dict class NoSortingHelper(SortingHelper): pass class NeutronController(object): """Base controller class for Neutron API.""" # _resource_name will be redefined in sub concrete controller _resource_name = None def __init__(self, plugin): self._plugin = plugin super(NeutronController, self).__init__() def _prepare_request_body(self, body, params): """Verifies required parameters are in request body. Sets default value for missing optional parameters. Body argument must be the deserialized body. """ try: if body is None: # Initialize empty resource for setting default value body = {self._resource_name: {}} data = body[self._resource_name] except KeyError: # raise if _resource_name is not in req body. raise exc.HTTPBadRequest(_("Unable to find '%s' in request body") % self._resource_name) for param in params: param_name = param['param-name'] param_value = data.get(param_name) # If the parameter wasn't found and it was required, return 400 if param_value is None and param['required']: msg = (_("Failed to parse request. " "Parameter '%s' not specified") % param_name) LOG.error(msg) raise exc.HTTPBadRequest(msg) data[param_name] = param_value or param.get('default-value') return body neutron-8.4.0/neutron/api/__init__.py0000664000567000056710000000000013044372736020731 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/api/v2/0000775000567000056710000000000013044373210017145 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/api/v2/router.py0000664000567000056710000001173213044372760021054 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_service import wsgi as base_wsgi import routes as routes_mapper import six import six.moves.urllib.parse as urlparse import webob import webob.dec import webob.exc from neutron.api import extensions from neutron.api.v2 import attributes from neutron.api.v2 import base from neutron import manager from neutron import policy from neutron.quota import resource_registry from neutron import wsgi RESOURCES = {'network': 'networks', 'subnet': 'subnets', 'subnetpool': 'subnetpools', 'port': 'ports'} SUB_RESOURCES = {} COLLECTION_ACTIONS = ['index', 'create'] MEMBER_ACTIONS = ['show', 'update', 'delete'] REQUIREMENTS = {'id': attributes.UUID_PATTERN, 'format': 'json'} class Index(wsgi.Application): def __init__(self, resources): self.resources = resources @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): metadata = {} layout = [] for name, collection in six.iteritems(self.resources): href = urlparse.urljoin(req.path_url, collection) resource = {'name': name, 'collection': collection, 'links': [{'rel': 'self', 'href': href}]} layout.append(resource) response = dict(resources=layout) content_type = req.best_match_content_type() body = wsgi.Serializer(metadata=metadata).serialize(response, content_type) return webob.Response(body=body, content_type=content_type) class APIRouter(base_wsgi.Router): @classmethod def factory(cls, global_config, **local_config): return cls(**local_config) def __init__(self, **local_config): mapper = routes_mapper.Mapper() plugin = manager.NeutronManager.get_plugin() ext_mgr = extensions.PluginAwareExtensionManager.get_instance() ext_mgr.extend_resources("2.0", attributes.RESOURCE_ATTRIBUTE_MAP) col_kwargs = dict(collection_actions=COLLECTION_ACTIONS, member_actions=MEMBER_ACTIONS) def _map_resource(collection, resource, params, parent=None): allow_bulk = cfg.CONF.allow_bulk allow_pagination = cfg.CONF.allow_pagination allow_sorting = cfg.CONF.allow_sorting controller = base.create_resource( collection, resource, plugin, params, allow_bulk=allow_bulk, parent=parent, allow_pagination=allow_pagination, allow_sorting=allow_sorting) path_prefix = None if parent: path_prefix = "/%s/{%s_id}/%s" % (parent['collection_name'], parent['member_name'], collection) mapper_kwargs = dict(controller=controller, requirements=REQUIREMENTS, path_prefix=path_prefix, **col_kwargs) return mapper.collection(collection, resource, **mapper_kwargs) mapper.connect('index', '/', controller=Index(RESOURCES)) for resource in RESOURCES: _map_resource(RESOURCES[resource], resource, attributes.RESOURCE_ATTRIBUTE_MAP.get( RESOURCES[resource], dict())) resource_registry.register_resource_by_name(resource) for resource in SUB_RESOURCES: _map_resource(SUB_RESOURCES[resource]['collection_name'], resource, attributes.RESOURCE_ATTRIBUTE_MAP.get( SUB_RESOURCES[resource]['collection_name'], dict()), SUB_RESOURCES[resource]['parent']) # Certain policy checks require that the extensions are loaded # and the RESOURCE_ATTRIBUTE_MAP populated before they can be # properly initialized. This can only be claimed with certainty # once this point in the code has been reached. In the event # that the policies have been initialized before this point, # calling reset will cause the next policy check to # re-initialize with all of the required data in place. policy.reset() super(APIRouter, self).__init__(mapper) neutron-8.4.0/neutron/api/v2/__init__.py0000664000567000056710000000000013044372736021260 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/api/v2/attributes.py0000664000567000056710000011040613044372760021720 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import re import netaddr from oslo_log import log as logging from oslo_utils import uuidutils import six import webob.exc from neutron._i18n import _ from neutron.common import constants from neutron.common import exceptions as n_exc LOG = logging.getLogger(__name__) ATTR_NOT_SPECIFIED = object() # Defining a constant to avoid repeating string literal in several modules SHARED = 'shared' # Used by range check to indicate no limit for a bound. UNLIMITED = None NAME_MAX_LEN = 255 TENANT_ID_MAX_LEN = 255 DESCRIPTION_MAX_LEN = 255 LONG_DESCRIPTION_MAX_LEN = 1024 DEVICE_ID_MAX_LEN = 255 DEVICE_OWNER_MAX_LEN = 255 def _verify_dict_keys(expected_keys, target_dict, strict=True): """Allows to verify keys in a dictionary. :param expected_keys: A list of keys expected to be present. :param target_dict: The dictionary which should be verified. :param strict: Specifies whether additional keys are allowed to be present. :return: True, if keys in the dictionary correspond to the specification. """ if not isinstance(target_dict, dict): msg = (_("Invalid input. '%(target_dict)s' must be a dictionary " "with keys: %(expected_keys)s") % {'target_dict': target_dict, 'expected_keys': expected_keys}) LOG.debug(msg) return msg expected_keys = set(expected_keys) provided_keys = set(target_dict.keys()) predicate = expected_keys.__eq__ if strict else expected_keys.issubset if not predicate(provided_keys): msg = (_("Validation of dictionary's keys failed. " "Expected keys: %(expected_keys)s " "Provided keys: %(provided_keys)s") % {'expected_keys': expected_keys, 'provided_keys': provided_keys}) LOG.debug(msg) return msg def is_attr_set(attribute): return not (attribute is None or attribute is ATTR_NOT_SPECIFIED) def _validate_list_of_items(item_validator, data, *args, **kwargs): if not isinstance(data, list): msg = _("'%s' is not a list") % data return msg if len(set(data)) != len(data): msg = _("Duplicate items in the list: '%s'") % ', '.join(data) return msg for item in data: msg = item_validator(item, *args, **kwargs) if msg: return msg def _validate_values(data, valid_values=None): if data not in valid_values: msg = (_("'%(data)s' is not in %(valid_values)s") % {'data': data, 'valid_values': valid_values}) LOG.debug(msg) return msg def _validate_not_empty_string_or_none(data, max_len=None): if data is not None: return _validate_not_empty_string(data, max_len=max_len) def _validate_not_empty_string(data, max_len=None): msg = _validate_string(data, max_len=max_len) if msg: return msg if not data.strip(): msg = _("'%s' Blank strings are not permitted") % data LOG.debug(msg) return msg def _validate_string_or_none(data, max_len=None): if data is not None: return _validate_string(data, max_len=max_len) def _validate_string(data, max_len=None): if not isinstance(data, six.string_types): msg = _("'%s' is not a valid string") % data LOG.debug(msg) return msg if max_len is not None and len(data) > max_len: msg = (_("'%(data)s' exceeds maximum length of %(max_len)s") % {'data': data, 'max_len': max_len}) LOG.debug(msg) return msg validate_list_of_unique_strings = functools.partial(_validate_list_of_items, _validate_string) def _validate_boolean(data, valid_values=None): try: convert_to_boolean(data) except n_exc.InvalidInput: msg = _("'%s' is not a valid boolean value") % data LOG.debug(msg) return msg def _validate_range(data, valid_values=None): """Check that integer value is within a range provided. Test is inclusive. Allows either limit to be ignored, to allow checking ranges where only the lower or upper limit matter. It is expected that the limits provided are valid integers or the value None. """ min_value = valid_values[0] max_value = valid_values[1] try: data = int(data) except (ValueError, TypeError): msg = _("'%s' is not an integer") % data LOG.debug(msg) return msg if min_value is not UNLIMITED and data < min_value: msg = _("'%(data)s' is too small - must be at least " "'%(limit)d'") % {'data': data, 'limit': min_value} LOG.debug(msg) return msg if max_value is not UNLIMITED and data > max_value: msg = _("'%(data)s' is too large - must be no larger than " "'%(limit)d'") % {'data': data, 'limit': max_value} LOG.debug(msg) return msg def _validate_no_whitespace(data): """Validates that input has no whitespace.""" if re.search(r'\s', data): msg = _("'%s' contains whitespace") % data LOG.debug(msg) raise n_exc.InvalidInput(error_message=msg) return data def _validate_mac_address(data, valid_values=None): try: valid_mac = netaddr.valid_mac(_validate_no_whitespace(data)) except Exception: valid_mac = False if valid_mac: valid_mac = not netaddr.EUI(data) in map(netaddr.EUI, constants.INVALID_MAC_ADDRESSES) # TODO(arosen): The code in this file should be refactored # so it catches the correct exceptions. _validate_no_whitespace # raises AttributeError if data is None. if not valid_mac: msg = _("'%s' is not a valid MAC address") % data LOG.debug(msg) return msg def _validate_mac_address_or_none(data, valid_values=None): if data is not None: return _validate_mac_address(data, valid_values) def _validate_ip_address(data, valid_values=None): msg = None try: # netaddr.core.ZEROFILL is only applicable to IPv4. # it will remove leading zeros from IPv4 address octets. ip = netaddr.IPAddress(_validate_no_whitespace(data), flags=netaddr.core.ZEROFILL) # The followings are quick checks for IPv6 (has ':') and # IPv4. (has 3 periods like 'xx.xx.xx.xx') # NOTE(yamamoto): netaddr uses libraries provided by the underlying # platform to convert addresses. For example, inet_aton(3). # Some platforms, including NetBSD and OS X, have inet_aton # implementation which accepts more varying forms of addresses than # we want to accept here. The following check is to reject such # addresses. For Example: # >>> netaddr.IPAddress('1' * 59) # IPAddress('199.28.113.199') # >>> netaddr.IPAddress(str(int('1' * 59) & 0xffffffff)) # IPAddress('199.28.113.199') # >>> if ':' not in data and data.count('.') != 3: msg = _("'%s' is not a valid IP address") % data # A leading '0' in IPv4 address may be interpreted as an octal number, # e.g. 011 octal is 9 decimal. Since there is no standard saying # whether IP address with leading '0's should be interpreted as octal # or decimal, hence we reject leading '0's to avoid ambiguity. elif ip.version == 4 and str(ip) != data: msg = _("'%(data)s' is not an accepted IP address, " "'%(ip)s' is recommended") % {"data": data, "ip": ip} except Exception: msg = _("'%s' is not a valid IP address") % data if msg: LOG.debug(msg) return msg def _validate_ip_pools(data, valid_values=None): """Validate that start and end IP addresses are present. In addition to this the IP addresses will also be validated """ if not isinstance(data, list): msg = _("Invalid data format for IP pool: '%s'") % data LOG.debug(msg) return msg expected_keys = ['start', 'end'] for ip_pool in data: msg = _verify_dict_keys(expected_keys, ip_pool) if msg: return msg for k in expected_keys: msg = _validate_ip_address(ip_pool[k]) if msg: return msg def _validate_fixed_ips(data, valid_values=None): if not isinstance(data, list): msg = _("Invalid data format for fixed IP: '%s'") % data LOG.debug(msg) return msg ips = [] for fixed_ip in data: if not isinstance(fixed_ip, dict): msg = _("Invalid data format for fixed IP: '%s'") % fixed_ip LOG.debug(msg) return msg if 'ip_address' in fixed_ip: # Ensure that duplicate entries are not set - just checking IP # suffices. Duplicate subnet_id's are legitimate. fixed_ip_address = fixed_ip['ip_address'] if fixed_ip_address in ips: msg = _("Duplicate IP address '%s'") % fixed_ip_address LOG.debug(msg) else: msg = _validate_ip_address(fixed_ip_address) if msg: return msg ips.append(fixed_ip_address) if 'subnet_id' in fixed_ip: msg = _validate_uuid(fixed_ip['subnet_id']) if msg: return msg def _validate_nameservers(data, valid_values=None): if not hasattr(data, '__iter__'): msg = _("Invalid data format for nameserver: '%s'") % data LOG.debug(msg) return msg hosts = [] for host in data: # This must be an IP address only msg = _validate_ip_address(host) if msg: msg = _("'%(host)s' is not a valid nameserver. %(msg)s") % { 'host': host, 'msg': msg} LOG.debug(msg) return msg if host in hosts: msg = _("Duplicate nameserver '%s'") % host LOG.debug(msg) return msg hosts.append(host) def _validate_hostroutes(data, valid_values=None): if not isinstance(data, list): msg = _("Invalid data format for hostroute: '%s'") % data LOG.debug(msg) return msg expected_keys = ['destination', 'nexthop'] hostroutes = [] for hostroute in data: msg = _verify_dict_keys(expected_keys, hostroute) if msg: return msg msg = _validate_subnet(hostroute['destination']) if msg: return msg msg = _validate_ip_address(hostroute['nexthop']) if msg: return msg if hostroute in hostroutes: msg = _("Duplicate hostroute '%s'") % hostroute LOG.debug(msg) return msg hostroutes.append(hostroute) def _validate_ip_address_or_none(data, valid_values=None): if data is not None: return _validate_ip_address(data, valid_values) def _validate_subnet(data, valid_values=None): msg = None try: net = netaddr.IPNetwork(_validate_no_whitespace(data)) if '/' not in data or (net.version == 4 and str(net) != data): msg = _("'%(data)s' isn't a recognized IP subnet cidr," " '%(cidr)s' is recommended") % {"data": data, "cidr": net.cidr} else: return except Exception: msg = _("'%s' is not a valid IP subnet") % data if msg: LOG.debug(msg) return msg def _validate_subnet_or_none(data, valid_values=None): if data is not None: return _validate_subnet(data, valid_values) _validate_subnet_list = functools.partial(_validate_list_of_items, _validate_subnet) def _validate_regex(data, valid_values=None): try: if re.match(valid_values, data): return except TypeError: pass msg = _("'%s' is not a valid input") % data LOG.debug(msg) return msg def _validate_regex_or_none(data, valid_values=None): if data is not None: return _validate_regex(data, valid_values) def _validate_subnetpool_id(data, valid_values=None): if data != constants.IPV6_PD_POOL_ID: return _validate_uuid_or_none(data, valid_values) def _validate_subnetpool_id_or_none(data, valid_values=None): if data is not None: return _validate_subnetpool_id(data, valid_values) def _validate_uuid(data, valid_values=None): if not uuidutils.is_uuid_like(data): msg = _("'%s' is not a valid UUID") % data LOG.debug(msg) return msg def _validate_uuid_or_none(data, valid_values=None): if data is not None: return _validate_uuid(data) _validate_uuid_list = functools.partial(_validate_list_of_items, _validate_uuid) def _validate_dict_item(key, key_validator, data): # Find conversion function, if any, and apply it conv_func = key_validator.get('convert_to') if conv_func: data[key] = conv_func(data.get(key)) # Find validator function # TODO(salv-orlando): Structure of dict attributes should be improved # to avoid iterating over items val_func = val_params = None for (k, v) in six.iteritems(key_validator): if k.startswith('type:'): # ask forgiveness, not permission try: val_func = validators[k] except KeyError: msg = _("Validator '%s' does not exist.") % k LOG.debug(msg) return msg val_params = v break # Process validation if val_func: return val_func(data.get(key), val_params) def _validate_dict(data, key_specs=None): if not isinstance(data, dict): msg = _("'%s' is not a dictionary") % data LOG.debug(msg) return msg # Do not perform any further validation, if no constraints are supplied if not key_specs: return # Check whether all required keys are present required_keys = [key for key, spec in six.iteritems(key_specs) if spec.get('required')] if required_keys: msg = _verify_dict_keys(required_keys, data, False) if msg: return msg # Perform validation and conversion of all values # according to the specifications. for key, key_validator in [(k, v) for k, v in six.iteritems(key_specs) if k in data]: msg = _validate_dict_item(key, key_validator, data) if msg: return msg def _validate_dict_or_none(data, key_specs=None): if data is not None: return _validate_dict(data, key_specs) def _validate_dict_or_empty(data, key_specs=None): if data != {}: return _validate_dict(data, key_specs) def _validate_dict_or_nodata(data, key_specs=None): if data: return _validate_dict(data, key_specs) def _validate_non_negative(data, valid_values=None): try: data = int(data) except (ValueError, TypeError): msg = _("'%s' is not an integer") % data LOG.debug(msg) return msg if data < 0: msg = _("'%s' should be non-negative") % data LOG.debug(msg) return msg def convert_to_boolean(data): if isinstance(data, six.string_types): val = data.lower() if val == "true" or val == "1": return True if val == "false" or val == "0": return False elif isinstance(data, bool): return data elif isinstance(data, int): if data == 0: return False elif data == 1: return True msg = _("'%s' cannot be converted to boolean") % data raise n_exc.InvalidInput(error_message=msg) def convert_to_boolean_if_not_none(data): if data is not None: return convert_to_boolean(data) def convert_to_int(data): try: return int(data) except (ValueError, TypeError): msg = _("'%s' is not an integer") % data raise n_exc.InvalidInput(error_message=msg) def convert_to_int_if_not_none(data): if data is not None: return convert_to_int(data) return data def convert_to_positive_float_or_none(val): # NOTE(salv-orlando): This conversion function is currently used by # a vendor specific extension only at the moment It is used for # port's RXTX factor in neutron.plugins.vmware.extensions.qos. # It is deemed however generic enough to be in this module as it # might be used in future for other API attributes. if val is None: return try: val = float(val) if val < 0: raise ValueError() except (ValueError, TypeError): msg = _("'%s' must be a non negative decimal.") % val raise n_exc.InvalidInput(error_message=msg) return val def convert_kvp_str_to_list(data): """Convert a value of the form 'key=value' to ['key', 'value']. :raises: n_exc.InvalidInput if any of the strings are malformed (e.g. do not contain a key). """ kvp = [x.strip() for x in data.split('=', 1)] if len(kvp) == 2 and kvp[0]: return kvp msg = _("'%s' is not of the form =[value]") % data raise n_exc.InvalidInput(error_message=msg) def convert_kvp_list_to_dict(kvp_list): """Convert a list of 'key=value' strings to a dict. :raises: n_exc.InvalidInput if any of the strings are malformed (e.g. do not contain a key) or if any of the keys appear more than once. """ if kvp_list == ['True']: # No values were provided (i.e. '--flag-name') return {} kvp_map = {} for kvp_str in kvp_list: key, value = convert_kvp_str_to_list(kvp_str) kvp_map.setdefault(key, set()) kvp_map[key].add(value) return dict((x, list(y)) for x, y in six.iteritems(kvp_map)) def convert_none_to_empty_list(value): return [] if value is None else value def convert_none_to_empty_dict(value): return {} if value is None else value def convert_to_list(data): if data is None: return [] elif hasattr(data, '__iter__') and not isinstance(data, six.string_types): return list(data) else: return [data] HEX_ELEM = '[0-9A-Fa-f]' UUID_PATTERN = '-'.join([HEX_ELEM + '{8}', HEX_ELEM + '{4}', HEX_ELEM + '{4}', HEX_ELEM + '{4}', HEX_ELEM + '{12}']) # Note: In order to ensure that the MAC address is unicast the first byte # must be even. MAC_PATTERN = "^%s[aceACE02468](:%s{2}){5}$" % (HEX_ELEM, HEX_ELEM) # Dictionary that maintains a list of validation functions validators = {'type:dict': _validate_dict, 'type:dict_or_none': _validate_dict_or_none, 'type:dict_or_empty': _validate_dict_or_empty, 'type:dict_or_nodata': _validate_dict_or_nodata, 'type:fixed_ips': _validate_fixed_ips, 'type:hostroutes': _validate_hostroutes, 'type:ip_address': _validate_ip_address, 'type:ip_address_or_none': _validate_ip_address_or_none, 'type:ip_pools': _validate_ip_pools, 'type:mac_address': _validate_mac_address, 'type:mac_address_or_none': _validate_mac_address_or_none, 'type:nameservers': _validate_nameservers, 'type:non_negative': _validate_non_negative, 'type:range': _validate_range, 'type:regex': _validate_regex, 'type:regex_or_none': _validate_regex_or_none, 'type:string': _validate_string, 'type:string_or_none': _validate_string_or_none, 'type:not_empty_string': _validate_not_empty_string, 'type:not_empty_string_or_none': _validate_not_empty_string_or_none, 'type:subnet': _validate_subnet, 'type:subnet_list': _validate_subnet_list, 'type:subnet_or_none': _validate_subnet_or_none, 'type:subnetpool_id': _validate_subnetpool_id, 'type:subnetpool_id_or_none': _validate_subnetpool_id_or_none, 'type:uuid': _validate_uuid, 'type:uuid_or_none': _validate_uuid_or_none, 'type:uuid_list': _validate_uuid_list, 'type:values': _validate_values, 'type:boolean': _validate_boolean, 'type:list_of_unique_strings': validate_list_of_unique_strings} # Define constants for base resource name NETWORK = 'network' NETWORKS = '%ss' % NETWORK PORT = 'port' PORTS = '%ss' % PORT SUBNET = 'subnet' SUBNETS = '%ss' % SUBNET SUBNETPOOL = 'subnetpool' SUBNETPOOLS = '%ss' % SUBNETPOOL # Note: a default of ATTR_NOT_SPECIFIED indicates that an # attribute is not required, but will be generated by the plugin # if it is not specified. Particularly, a value of ATTR_NOT_SPECIFIED # is different from an attribute that has been specified with a value of # None. For example, if 'gateway_ip' is omitted in a request to # create a subnet, the plugin will receive ATTR_NOT_SPECIFIED # and the default gateway_ip will be generated. # However, if gateway_ip is specified as None, this means that # the subnet does not have a gateway IP. # The following is a short reference for understanding attribute info: # default: default value of the attribute (if missing, the attribute # becomes mandatory. # allow_post: the attribute can be used on POST requests. # allow_put: the attribute can be used on PUT requests. # validate: specifies rules for validating data in the attribute. # convert_to: transformation to apply to the value before it is returned # is_visible: the attribute is returned in GET responses. # required_by_policy: the attribute is required by the policy engine and # should therefore be filled by the API layer even if not present in # request body. # enforce_policy: the attribute is actively part of the policy enforcing # mechanism, ie: there might be rules which refer to this attribute. RESOURCE_ATTRIBUTE_MAP = { NETWORKS: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': NAME_MAX_LEN}, 'default': '', 'is_visible': True}, 'subnets': {'allow_post': False, 'allow_put': False, 'default': [], 'is_visible': True}, 'admin_state_up': {'allow_post': True, 'allow_put': True, 'default': True, 'convert_to': convert_to_boolean, 'is_visible': True}, 'status': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': TENANT_ID_MAX_LEN}, 'required_by_policy': True, 'is_visible': True}, SHARED: {'allow_post': True, 'allow_put': True, 'default': False, 'convert_to': convert_to_boolean, 'is_visible': True, 'required_by_policy': True, 'enforce_policy': True}, }, PORTS: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'name': {'allow_post': True, 'allow_put': True, 'default': '', 'validate': {'type:string': NAME_MAX_LEN}, 'is_visible': True}, 'network_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:uuid': None}, 'is_visible': True}, 'admin_state_up': {'allow_post': True, 'allow_put': True, 'default': True, 'convert_to': convert_to_boolean, 'is_visible': True}, 'mac_address': {'allow_post': True, 'allow_put': True, 'default': ATTR_NOT_SPECIFIED, 'validate': {'type:mac_address': None}, 'enforce_policy': True, 'is_visible': True}, 'fixed_ips': {'allow_post': True, 'allow_put': True, 'default': ATTR_NOT_SPECIFIED, 'convert_list_to': convert_kvp_list_to_dict, 'validate': {'type:fixed_ips': None}, 'enforce_policy': True, 'is_visible': True}, 'device_id': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': DEVICE_ID_MAX_LEN}, 'default': '', 'is_visible': True}, 'device_owner': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': DEVICE_OWNER_MAX_LEN}, 'default': '', 'enforce_policy': True, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': TENANT_ID_MAX_LEN}, 'required_by_policy': True, 'is_visible': True}, 'status': {'allow_post': False, 'allow_put': False, 'is_visible': True}, }, SUBNETS: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'name': {'allow_post': True, 'allow_put': True, 'default': '', 'validate': {'type:string': NAME_MAX_LEN}, 'is_visible': True}, 'ip_version': {'allow_post': True, 'allow_put': False, 'convert_to': convert_to_int, 'validate': {'type:values': [4, 6]}, 'is_visible': True}, 'network_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:uuid': None}, 'is_visible': True}, 'subnetpool_id': {'allow_post': True, 'allow_put': False, 'default': ATTR_NOT_SPECIFIED, 'required_by_policy': False, 'validate': {'type:subnetpool_id_or_none': None}, 'is_visible': True}, 'prefixlen': {'allow_post': True, 'allow_put': False, 'validate': {'type:non_negative': None}, 'convert_to': convert_to_int, 'default': ATTR_NOT_SPECIFIED, 'required_by_policy': False, 'is_visible': False}, 'cidr': {'allow_post': True, 'allow_put': False, 'default': ATTR_NOT_SPECIFIED, 'validate': {'type:subnet_or_none': None}, 'required_by_policy': False, 'is_visible': True}, 'gateway_ip': {'allow_post': True, 'allow_put': True, 'default': ATTR_NOT_SPECIFIED, 'validate': {'type:ip_address_or_none': None}, 'is_visible': True}, 'allocation_pools': {'allow_post': True, 'allow_put': True, 'default': ATTR_NOT_SPECIFIED, 'validate': {'type:ip_pools': None}, 'is_visible': True}, 'dns_nameservers': {'allow_post': True, 'allow_put': True, 'convert_to': convert_none_to_empty_list, 'default': ATTR_NOT_SPECIFIED, 'validate': {'type:nameservers': None}, 'is_visible': True}, 'host_routes': {'allow_post': True, 'allow_put': True, 'convert_to': convert_none_to_empty_list, 'default': ATTR_NOT_SPECIFIED, 'validate': {'type:hostroutes': None}, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': TENANT_ID_MAX_LEN}, 'required_by_policy': True, 'is_visible': True}, 'enable_dhcp': {'allow_post': True, 'allow_put': True, 'default': True, 'convert_to': convert_to_boolean, 'is_visible': True}, 'ipv6_ra_mode': {'allow_post': True, 'allow_put': False, 'default': ATTR_NOT_SPECIFIED, 'validate': {'type:values': constants.IPV6_MODES}, 'is_visible': True}, 'ipv6_address_mode': {'allow_post': True, 'allow_put': False, 'default': ATTR_NOT_SPECIFIED, 'validate': {'type:values': constants.IPV6_MODES}, 'is_visible': True}, SHARED: {'allow_post': False, 'allow_put': False, 'default': False, 'convert_to': convert_to_boolean, 'is_visible': False, 'required_by_policy': True, 'enforce_policy': True}, }, SUBNETPOOLS: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:not_empty_string': None}, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': TENANT_ID_MAX_LEN}, 'required_by_policy': True, 'is_visible': True}, 'prefixes': {'allow_post': True, 'allow_put': True, 'validate': {'type:subnet_list': None}, 'is_visible': True}, 'default_quota': {'allow_post': True, 'allow_put': True, 'validate': {'type:non_negative': None}, 'convert_to': convert_to_int, 'default': ATTR_NOT_SPECIFIED, 'is_visible': True}, 'ip_version': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'default_prefixlen': {'allow_post': True, 'allow_put': True, 'validate': {'type:non_negative': None}, 'convert_to': convert_to_int, 'default': ATTR_NOT_SPECIFIED, 'is_visible': True}, 'min_prefixlen': {'allow_post': True, 'allow_put': True, 'default': ATTR_NOT_SPECIFIED, 'validate': {'type:non_negative': None}, 'convert_to': convert_to_int, 'is_visible': True}, 'max_prefixlen': {'allow_post': True, 'allow_put': True, 'default': ATTR_NOT_SPECIFIED, 'validate': {'type:non_negative': None}, 'convert_to': convert_to_int, 'is_visible': True}, 'is_default': {'allow_post': True, 'allow_put': True, 'default': False, 'convert_to': convert_to_boolean, 'is_visible': True, 'required_by_policy': True, 'enforce_policy': True}, SHARED: {'allow_post': True, 'allow_put': False, 'default': False, 'convert_to': convert_to_boolean, 'is_visible': True, 'required_by_policy': True, 'enforce_policy': True}, } } # Identify the attribute used by a resource to reference another resource RESOURCE_FOREIGN_KEYS = { NETWORKS: 'network_id' } # Store plural/singular mappings PLURALS = {NETWORKS: NETWORK, PORTS: PORT, SUBNETS: SUBNET, SUBNETPOOLS: SUBNETPOOL, 'dns_nameservers': 'dns_nameserver', 'host_routes': 'host_route', 'allocation_pools': 'allocation_pool', 'fixed_ips': 'fixed_ip', 'extensions': 'extension'} # Store singular/plural mappings. This dictionary is populated by # get_resource_info REVERSED_PLURALS = {} def get_collection_info(collection): """Helper function to retrieve attribute info. :param collection: Collection or plural name of the resource """ return RESOURCE_ATTRIBUTE_MAP.get(collection) def get_resource_info(resource): """Helper function to retrive attribute info :param resource: resource name """ plural_name = REVERSED_PLURALS.get(resource) if not plural_name: for (plural, singular) in PLURALS.items(): if singular == resource: plural_name = plural REVERSED_PLURALS[resource] = plural_name return RESOURCE_ATTRIBUTE_MAP.get(plural_name) def fill_default_value(attr_info, res_dict, exc_cls=ValueError, check_allow_post=True): for attr, attr_vals in six.iteritems(attr_info): if attr_vals['allow_post']: if ('default' not in attr_vals and attr not in res_dict): msg = _("Failed to parse request. Required " "attribute '%s' not specified") % attr raise exc_cls(msg) res_dict[attr] = res_dict.get(attr, attr_vals.get('default')) elif check_allow_post: if attr in res_dict: msg = _("Attribute '%s' not allowed in POST") % attr raise exc_cls(msg) def convert_value(attr_info, res_dict, exc_cls=ValueError): for attr, attr_vals in six.iteritems(attr_info): if (attr not in res_dict or res_dict[attr] is ATTR_NOT_SPECIFIED): continue # Convert values if necessary if 'convert_to' in attr_vals: res_dict[attr] = attr_vals['convert_to'](res_dict[attr]) # Check that configured values are correct if 'validate' not in attr_vals: continue for rule in attr_vals['validate']: res = validators[rule](res_dict[attr], attr_vals['validate'][rule]) if res: msg_dict = dict(attr=attr, reason=res) msg = _("Invalid input for %(attr)s. " "Reason: %(reason)s.") % msg_dict raise exc_cls(msg) def populate_tenant_id(context, res_dict, attr_info, is_create): if (('tenant_id' in res_dict and res_dict['tenant_id'] != context.tenant_id and not context.is_admin)): msg = _("Specifying 'tenant_id' other than authenticated " "tenant in request requires admin privileges") raise webob.exc.HTTPBadRequest(msg) if is_create and 'tenant_id' not in res_dict: if context.tenant_id: res_dict['tenant_id'] = context.tenant_id elif 'tenant_id' in attr_info: msg = _("Running without keystone AuthN requires " "that tenant_id is specified") raise webob.exc.HTTPBadRequest(msg) def verify_attributes(res_dict, attr_info): extra_keys = set(res_dict.keys()) - set(attr_info.keys()) if extra_keys: msg = _("Unrecognized attribute(s) '%s'") % ', '.join(extra_keys) raise webob.exc.HTTPBadRequest(msg) neutron-8.4.0/neutron/api/v2/resource_helper.py0000664000567000056710000001055713044372760022726 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from neutron.api import extensions from neutron.api.v2 import base from neutron import manager from neutron.plugins.common import constants from neutron.quota import resource_registry LOG = logging.getLogger(__name__) def build_plural_mappings(special_mappings, resource_map): """Create plural to singular mapping for all resources. Allows for special mappings to be provided, for particular cases.. Otherwise, will strip off the last character for normal mappings, like routers -> router, unless the plural name ends with 'ies', in which case the singular form will end with a 'y' (e.g.: policy/policies) """ plural_mappings = {} for plural in resource_map: singular = special_mappings.get(plural) if not singular: if plural.endswith('ies'): singular = "%sy" % plural[:-3] else: singular = plural[:-1] plural_mappings[plural] = singular return plural_mappings def build_resource_info(plural_mappings, resource_map, which_service, action_map=None, register_quota=False, translate_name=False, allow_bulk=False): """Build resources for advanced services. Takes the resource information, and singular/plural mappings, and creates API resource objects for advanced services extensions. Will optionally translate underscores to dashes in resource names, register the resource, and accept action information for resources. :param plural_mappings: mappings between singular and plural forms :param resource_map: attribute map for the WSGI resources to create :param which_service: The name of the service for which the WSGI resources are being created. This name will be used to pass the appropriate plugin to the WSGI resource. It can be set to None or "CORE" to create WSGI resources for the core plugin :param action_map: custom resource actions :param register_quota: it can be set to True to register quotas for the resource(s) being created :param translate_name: replaces underscores with dashes :param allow_bulk: True if bulk create are allowed """ resources = [] if not which_service: which_service = constants.CORE if action_map is None: action_map = {} if which_service != constants.CORE: plugin = manager.NeutronManager.get_service_plugins()[which_service] else: plugin = manager.NeutronManager.get_plugin() path_prefix = getattr(plugin, "path_prefix", "") LOG.debug('Service %(service)s assigned prefix: %(prefix)s' % {'service': which_service, 'prefix': path_prefix}) for collection_name in resource_map: resource_name = plural_mappings[collection_name] params = resource_map.get(collection_name, {}) if translate_name: collection_name = collection_name.replace('_', '-') if register_quota: resource_registry.register_resource_by_name(resource_name) member_actions = action_map.get(resource_name, {}) controller = base.create_resource( collection_name, resource_name, plugin, params, member_actions=member_actions, allow_bulk=allow_bulk, allow_pagination=cfg.CONF.allow_pagination, allow_sorting=cfg.CONF.allow_sorting) resource = extensions.ResourceExtension( collection_name, controller, path_prefix=path_prefix, member_actions=member_actions, attr_map=params) resources.append(resource) return resources neutron-8.4.0/neutron/api/v2/base.py0000664000567000056710000010274113044372760020447 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import netaddr from oslo_config import cfg from oslo_log import log as logging from oslo_policy import policy as oslo_policy from oslo_utils import excutils import six import webob.exc from neutron._i18n import _, _LE, _LI from neutron.api import api_common from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.api.v2 import attributes from neutron.api.v2 import resource as wsgi_resource from neutron.common import constants as const from neutron.common import exceptions from neutron.common import rpc as n_rpc from neutron.db import api as db_api from neutron import policy from neutron import quota from neutron.quota import resource_registry LOG = logging.getLogger(__name__) FAULT_MAP = {exceptions.NotFound: webob.exc.HTTPNotFound, exceptions.Conflict: webob.exc.HTTPConflict, exceptions.InUse: webob.exc.HTTPConflict, exceptions.BadRequest: webob.exc.HTTPBadRequest, exceptions.ServiceUnavailable: webob.exc.HTTPServiceUnavailable, exceptions.NotAuthorized: webob.exc.HTTPForbidden, netaddr.AddrFormatError: webob.exc.HTTPBadRequest, oslo_policy.PolicyNotAuthorized: webob.exc.HTTPForbidden } class Controller(object): LIST = 'list' SHOW = 'show' CREATE = 'create' UPDATE = 'update' DELETE = 'delete' def __init__(self, plugin, collection, resource, attr_info, allow_bulk=False, member_actions=None, parent=None, allow_pagination=False, allow_sorting=False): if member_actions is None: member_actions = [] self._plugin = plugin self._collection = collection.replace('-', '_') self._resource = resource.replace('-', '_') self._attr_info = attr_info self._allow_bulk = allow_bulk self._allow_pagination = allow_pagination self._allow_sorting = allow_sorting self._native_bulk = self._is_native_bulk_supported() self._native_pagination = self._is_native_pagination_supported() self._native_sorting = self._is_native_sorting_supported() self._policy_attrs = [name for (name, info) in self._attr_info.items() if info.get('required_by_policy')] self._notifier = n_rpc.get_notifier('network') # use plugin's dhcp notifier, if this is already instantiated agent_notifiers = getattr(plugin, 'agent_notifiers', {}) self._dhcp_agent_notifier = ( agent_notifiers.get(const.AGENT_TYPE_DHCP) or dhcp_rpc_agent_api.DhcpAgentNotifyAPI() ) if cfg.CONF.notify_nova_on_port_data_changes: from neutron.notifiers import nova self._nova_notifier = nova.Notifier() self._member_actions = member_actions self._primary_key = self._get_primary_key() if self._allow_pagination and self._native_pagination: # Native pagination need native sorting support if not self._native_sorting: raise exceptions.Invalid( _("Native pagination depend on native sorting") ) if not self._allow_sorting: LOG.info(_LI("Allow sorting is enabled because native " "pagination requires native sorting")) self._allow_sorting = True if parent: self._parent_id_name = '%s_id' % parent['member_name'] parent_part = '_%s' % parent['member_name'] else: self._parent_id_name = None parent_part = '' self._plugin_handlers = { self.LIST: 'get%s_%s' % (parent_part, self._collection), self.SHOW: 'get%s_%s' % (parent_part, self._resource) } for action in [self.CREATE, self.UPDATE, self.DELETE]: self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part, self._resource) def _get_primary_key(self, default_primary_key='id'): for key, value in six.iteritems(self._attr_info): if value.get('primary_key', False): return key return default_primary_key def _is_native_bulk_supported(self): native_bulk_attr_name = ("_%s__native_bulk_support" % self._plugin.__class__.__name__) return getattr(self._plugin, native_bulk_attr_name, False) def _is_native_pagination_supported(self): native_pagination_attr_name = ("_%s__native_pagination_support" % self._plugin.__class__.__name__) return getattr(self._plugin, native_pagination_attr_name, False) def _is_native_sorting_supported(self): native_sorting_attr_name = ("_%s__native_sorting_support" % self._plugin.__class__.__name__) return getattr(self._plugin, native_sorting_attr_name, False) def _exclude_attributes_by_policy(self, context, data): """Identifies attributes to exclude according to authZ policies. Return a list of attribute names which should be stripped from the response returned to the user because the user is not authorized to see them. """ attributes_to_exclude = [] for attr_name in data.keys(): attr_data = self._attr_info.get(attr_name) if attr_data and attr_data['is_visible']: if policy.check( context, '%s:%s' % (self._plugin_handlers[self.SHOW], attr_name), data, might_not_exist=True, pluralized=self._collection): # this attribute is visible, check next one continue # if the code reaches this point then either the policy check # failed or the attribute was not visible in the first place attributes_to_exclude.append(attr_name) return attributes_to_exclude def _view(self, context, data, fields_to_strip=None): """Build a view of an API resource. :param context: the neutron context :param data: the object for which a view is being created :param fields_to_strip: attributes to remove from the view :returns: a view of the object which includes only attributes visible according to API resource declaration and authZ policies. """ fields_to_strip = ((fields_to_strip or []) + self._exclude_attributes_by_policy(context, data)) return self._filter_attributes(context, data, fields_to_strip) def _filter_attributes(self, context, data, fields_to_strip=None): if not fields_to_strip: return data return dict(item for item in six.iteritems(data) if (item[0] not in fields_to_strip)) def _do_field_list(self, original_fields): fields_to_add = None # don't do anything if fields were not specified in the request if original_fields: fields_to_add = [attr for attr in self._policy_attrs if attr not in original_fields] original_fields.extend(self._policy_attrs) return original_fields, fields_to_add def __getattr__(self, name): if name in self._member_actions: @db_api.retry_db_errors def _handle_action(request, id, **kwargs): arg_list = [request.context, id] # Ensure policy engine is initialized policy.init() # Fetch the resource and verify if the user can access it try: parent_id = kwargs.get(self._parent_id_name) resource = self._item(request, id, do_authz=True, field_list=None, parent_id=parent_id) except oslo_policy.PolicyNotAuthorized: msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) body = copy.deepcopy(kwargs.pop('body', None)) # Explicit comparison with None to distinguish from {} if body is not None: arg_list.append(body) # It is ok to raise a 403 because accessibility to the # object was checked earlier in this method policy.enforce(request.context, name, resource, pluralized=self._collection) ret_value = getattr(self._plugin, name)(*arg_list, **kwargs) # It is simply impossible to predict whether one of this # actions alters resource usage. For instance a tenant port # is created when a router interface is added. Therefore it is # important to mark as dirty resources whose counters have # been altered by this operation resource_registry.set_resources_dirty(request.context) return ret_value return _handle_action else: raise AttributeError() def _get_pagination_helper(self, request): if self._allow_pagination and self._native_pagination: return api_common.PaginationNativeHelper(request, self._primary_key) elif self._allow_pagination: return api_common.PaginationEmulatedHelper(request, self._primary_key) return api_common.NoPaginationHelper(request, self._primary_key) def _get_sorting_helper(self, request): if self._allow_sorting and self._native_sorting: return api_common.SortingNativeHelper(request, self._attr_info) elif self._allow_sorting: return api_common.SortingEmulatedHelper(request, self._attr_info) return api_common.NoSortingHelper(request, self._attr_info) def _items(self, request, do_authz=False, parent_id=None): """Retrieves and formats a list of elements of the requested entity.""" # NOTE(salvatore-orlando): The following ensures that fields which # are needed for authZ policy validation are not stripped away by the # plugin before returning. original_fields, fields_to_add = self._do_field_list( api_common.list_args(request, 'fields')) filters = api_common.get_filters(request, self._attr_info, ['fields', 'sort_key', 'sort_dir', 'limit', 'marker', 'page_reverse']) kwargs = {'filters': filters, 'fields': original_fields} sorting_helper = self._get_sorting_helper(request) pagination_helper = self._get_pagination_helper(request) sorting_helper.update_args(kwargs) sorting_helper.update_fields(original_fields, fields_to_add) pagination_helper.update_args(kwargs) pagination_helper.update_fields(original_fields, fields_to_add) if parent_id: kwargs[self._parent_id_name] = parent_id obj_getter = getattr(self._plugin, self._plugin_handlers[self.LIST]) obj_list = obj_getter(request.context, **kwargs) obj_list = sorting_helper.sort(obj_list) obj_list = pagination_helper.paginate(obj_list) # Check authz if do_authz: # FIXME(salvatore-orlando): obj_getter might return references to # other resources. Must check authZ on them too. # Omit items from list that should not be visible obj_list = [obj for obj in obj_list if policy.check(request.context, self._plugin_handlers[self.SHOW], obj, plugin=self._plugin, pluralized=self._collection)] # Use the first element in the list for discriminating which attributes # should be filtered out because of authZ policies # fields_to_add contains a list of attributes added for request policy # checks but that were not required by the user. They should be # therefore stripped fields_to_strip = fields_to_add or [] if obj_list: fields_to_strip += self._exclude_attributes_by_policy( request.context, obj_list[0]) collection = {self._collection: [self._filter_attributes( request.context, obj, fields_to_strip=fields_to_strip) for obj in obj_list]} pagination_links = pagination_helper.get_links(obj_list) if pagination_links: collection[self._collection + "_links"] = pagination_links # Synchronize usage trackers, if needed resource_registry.resync_resource( request.context, self._resource, request.context.tenant_id) return collection def _item(self, request, id, do_authz=False, field_list=None, parent_id=None): """Retrieves and formats a single element of the requested entity.""" kwargs = {'fields': field_list} action = self._plugin_handlers[self.SHOW] if parent_id: kwargs[self._parent_id_name] = parent_id obj_getter = getattr(self._plugin, action) obj = obj_getter(request.context, id, **kwargs) # Check authz # FIXME(salvatore-orlando): obj_getter might return references to # other resources. Must check authZ on them too. if do_authz: policy.enforce(request.context, action, obj, pluralized=self._collection) return obj def _send_dhcp_notification(self, context, data, methodname): if cfg.CONF.dhcp_agent_notification: if self._collection in data: for body in data[self._collection]: item = {self._resource: body} self._dhcp_agent_notifier.notify(context, item, methodname) else: self._dhcp_agent_notifier.notify(context, data, methodname) def _send_nova_notification(self, action, orig, returned): if hasattr(self, '_nova_notifier'): self._nova_notifier.send_network_change(action, orig, returned) @db_api.retry_db_errors def index(self, request, **kwargs): """Returns a list of the requested entity.""" parent_id = kwargs.get(self._parent_id_name) # Ensure policy engine is initialized policy.init() return self._items(request, True, parent_id) @db_api.retry_db_errors def show(self, request, id, **kwargs): """Returns detailed information about the requested entity.""" try: # NOTE(salvatore-orlando): The following ensures that fields # which are needed for authZ policy validation are not stripped # away by the plugin before returning. field_list, added_fields = self._do_field_list( api_common.list_args(request, "fields")) parent_id = kwargs.get(self._parent_id_name) # Ensure policy engine is initialized policy.init() return {self._resource: self._view(request.context, self._item(request, id, do_authz=True, field_list=field_list, parent_id=parent_id), fields_to_strip=added_fields)} except oslo_policy.PolicyNotAuthorized: # To avoid giving away information, pretend that it # doesn't exist msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) def _emulate_bulk_create(self, obj_creator, request, body, parent_id=None): objs = [] try: for item in body[self._collection]: kwargs = {self._resource: item} if parent_id: kwargs[self._parent_id_name] = parent_id fields_to_strip = self._exclude_attributes_by_policy( request.context, item) objs.append(self._filter_attributes( request.context, obj_creator(request.context, **kwargs), fields_to_strip=fields_to_strip)) return objs # Note(salvatore-orlando): broad catch as in theory a plugin # could raise any kind of exception except Exception: with excutils.save_and_reraise_exception(): for obj in objs: obj_deleter = getattr(self._plugin, self._plugin_handlers[self.DELETE]) try: kwargs = ({self._parent_id_name: parent_id} if parent_id else {}) obj_deleter(request.context, obj['id'], **kwargs) except Exception: # broad catch as our only purpose is to log the # exception LOG.exception(_LE("Unable to undo add for " "%(resource)s %(id)s"), {'resource': self._resource, 'id': obj['id']}) # TODO(salvatore-orlando): The object being processed when the # plugin raised might have been created or not in the db. # We need a way for ensuring that if it has been created, # it is then deleted def create(self, request, body=None, **kwargs): self._notifier.info(request.context, self._resource + '.create.start', body) return self._create(request, body, **kwargs) @db_api.retry_db_errors def _create(self, request, body, **kwargs): """Creates a new instance of the requested entity.""" parent_id = kwargs.get(self._parent_id_name) body = Controller.prepare_request_body(request.context, copy.deepcopy(body), True, self._resource, self._attr_info, allow_bulk=self._allow_bulk) action = self._plugin_handlers[self.CREATE] # Check authz if self._collection in body: # Have to account for bulk create items = body[self._collection] else: items = [body] # Ensure policy engine is initialized policy.init() # Store requested resource amounts grouping them by tenant # This won't work with multiple resources. However because of the # current structure of this controller there will hardly be more than # one resource for which reservations are being made request_deltas = collections.defaultdict(int) for item in items: self._validate_network_tenant_ownership(request, item[self._resource]) policy.enforce(request.context, action, item[self._resource], pluralized=self._collection) if 'tenant_id' not in item[self._resource]: # no tenant_id - no quota check continue tenant_id = item[self._resource]['tenant_id'] request_deltas[tenant_id] += 1 # Quota enforcement reservations = [] try: for (tenant, delta) in request_deltas.items(): reservation = quota.QUOTAS.make_reservation( request.context, tenant, {self._resource: delta}, self._plugin) reservations.append(reservation) except exceptions.QuotaResourceUnknown as e: # We don't want to quota this resource LOG.debug(e) def notify(create_result): # Ensure usage trackers for all resources affected by this API # operation are marked as dirty with request.context.session.begin(): # Commit the reservation(s) for reservation in reservations: quota.QUOTAS.commit_reservation( request.context, reservation.reservation_id) resource_registry.set_resources_dirty(request.context) notifier_method = self._resource + '.create.end' self._notifier.info(request.context, notifier_method, create_result) self._send_dhcp_notification(request.context, create_result, notifier_method) return create_result def do_create(body, bulk=False, emulated=False): kwargs = {self._parent_id_name: parent_id} if parent_id else {} if bulk and not emulated: obj_creator = getattr(self._plugin, "%s_bulk" % action) else: obj_creator = getattr(self._plugin, action) try: if emulated: return self._emulate_bulk_create(obj_creator, request, body, parent_id) else: if self._collection in body: # This is weird but fixing it requires changes to the # plugin interface kwargs.update({self._collection: body}) else: kwargs.update({self._resource: body}) return obj_creator(request.context, **kwargs) except Exception: # In case of failure the plugin will always raise an # exception. Cancel the reservation with excutils.save_and_reraise_exception(): for reservation in reservations: quota.QUOTAS.cancel_reservation( request.context, reservation.reservation_id) if self._collection in body and self._native_bulk: # plugin does atomic bulk create operations objs = do_create(body, bulk=True) # Use first element of list to discriminate attributes which # should be removed because of authZ policies fields_to_strip = self._exclude_attributes_by_policy( request.context, objs[0]) return notify({self._collection: [self._filter_attributes( request.context, obj, fields_to_strip=fields_to_strip) for obj in objs]}) else: if self._collection in body: # Emulate atomic bulk behavior objs = do_create(body, bulk=True, emulated=True) return notify({self._collection: objs}) else: obj = do_create(body) self._send_nova_notification(action, {}, {self._resource: obj}) return notify({self._resource: self._view(request.context, obj)}) def delete(self, request, id, **kwargs): """Deletes the specified entity.""" if request.body: msg = _('Request body is not supported in DELETE.') raise webob.exc.HTTPBadRequest(msg) self._notifier.info(request.context, self._resource + '.delete.start', {self._resource + '_id': id}) return self._delete(request, id, **kwargs) @db_api.retry_db_errors def _delete(self, request, id, **kwargs): action = self._plugin_handlers[self.DELETE] # Check authz policy.init() parent_id = kwargs.get(self._parent_id_name) obj = self._item(request, id, parent_id=parent_id) try: policy.enforce(request.context, action, obj, pluralized=self._collection) except oslo_policy.PolicyNotAuthorized: # To avoid giving away information, pretend that it # doesn't exist msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) obj_deleter = getattr(self._plugin, action) obj_deleter(request.context, id, **kwargs) # A delete operation usually alters resource usage, so mark affected # usage trackers as dirty resource_registry.set_resources_dirty(request.context) notifier_method = self._resource + '.delete.end' self._notifier.info(request.context, notifier_method, {self._resource + '_id': id}) result = {self._resource: self._view(request.context, obj)} self._send_nova_notification(action, {}, result) self._send_dhcp_notification(request.context, result, notifier_method) def update(self, request, id, body=None, **kwargs): """Updates the specified entity's attributes.""" try: payload = body.copy() except AttributeError: msg = _("Invalid format: %s") % request.body raise exceptions.BadRequest(resource='body', msg=msg) payload['id'] = id self._notifier.info(request.context, self._resource + '.update.start', payload) return self._update(request, id, body, **kwargs) @db_api.retry_db_errors def _update(self, request, id, body, **kwargs): body = Controller.prepare_request_body(request.context, copy.deepcopy(body), False, self._resource, self._attr_info, allow_bulk=self._allow_bulk) action = self._plugin_handlers[self.UPDATE] # Load object to check authz # but pass only attributes in the original body and required # by the policy engine to the policy 'brain' field_list = [name for (name, value) in six.iteritems(self._attr_info) if (value.get('required_by_policy') or value.get('primary_key') or 'default' not in value)] # Ensure policy engine is initialized policy.init() parent_id = kwargs.get(self._parent_id_name) orig_obj = self._item(request, id, field_list=field_list, parent_id=parent_id) orig_object_copy = copy.copy(orig_obj) orig_obj.update(body[self._resource]) # Make a list of attributes to be updated to inform the policy engine # which attributes are set explicitly so that it can distinguish them # from the ones that are set to their default values. orig_obj[const.ATTRIBUTES_TO_UPDATE] = body[self._resource].keys() try: policy.enforce(request.context, action, orig_obj, pluralized=self._collection) except oslo_policy.PolicyNotAuthorized: with excutils.save_and_reraise_exception() as ctxt: # If a tenant is modifying its own object, it's safe to return # a 403. Otherwise, pretend that it doesn't exist to avoid # giving away information. orig_obj_tenant_id = orig_obj.get("tenant_id") if (request.context.tenant_id != orig_obj_tenant_id or orig_obj_tenant_id is None): ctxt.reraise = False msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) obj_updater = getattr(self._plugin, action) kwargs = {self._resource: body} if parent_id: kwargs[self._parent_id_name] = parent_id obj = obj_updater(request.context, id, **kwargs) # Usually an update operation does not alter resource usage, but as # there might be side effects it might be worth checking for changes # in resource usage here as well (e.g: a tenant port is created when a # router interface is added) resource_registry.set_resources_dirty(request.context) result = {self._resource: self._view(request.context, obj)} notifier_method = self._resource + '.update.end' self._notifier.info(request.context, notifier_method, result) self._send_dhcp_notification(request.context, result, notifier_method) self._send_nova_notification(action, orig_object_copy, result) return result @staticmethod def prepare_request_body(context, body, is_create, resource, attr_info, allow_bulk=False): """Verifies required attributes are in request body. Also checking that an attribute is only specified if it is allowed for the given operation (create/update). Attribute with default values are considered to be optional. body argument must be the deserialized body. """ collection = resource + "s" if not body: raise webob.exc.HTTPBadRequest(_("Resource body required")) LOG.debug("Request body: %(body)s", {'body': body}) try: if collection in body: if not allow_bulk: raise webob.exc.HTTPBadRequest(_("Bulk operation " "not supported")) if not body[collection]: raise webob.exc.HTTPBadRequest(_("Resources required")) bulk_body = [ Controller.prepare_request_body( context, item if resource in item else {resource: item}, is_create, resource, attr_info, allow_bulk) for item in body[collection] ] return {collection: bulk_body} res_dict = body.get(resource) except (AttributeError, TypeError): msg = _("Body contains invalid data") raise webob.exc.HTTPBadRequest(msg) if res_dict is None: msg = _("Unable to find '%s' in request body") % resource raise webob.exc.HTTPBadRequest(msg) attributes.populate_tenant_id(context, res_dict, attr_info, is_create) attributes.verify_attributes(res_dict, attr_info) if is_create: # POST attributes.fill_default_value(attr_info, res_dict, webob.exc.HTTPBadRequest) else: # PUT for attr, attr_vals in six.iteritems(attr_info): if attr in res_dict and not attr_vals['allow_put']: msg = _("Cannot update read-only attribute %s") % attr raise webob.exc.HTTPBadRequest(msg) attributes.convert_value(attr_info, res_dict, webob.exc.HTTPBadRequest) return body def _validate_network_tenant_ownership(self, request, resource_item): # TODO(salvatore-orlando): consider whether this check can be folded # in the policy engine if (request.context.is_admin or request.context.is_advsvc or self._resource not in ('port', 'subnet')): return network = self._plugin.get_network( request.context, resource_item['network_id']) # do not perform the check on shared networks if network.get('shared'): return network_owner = network['tenant_id'] if network_owner != resource_item['tenant_id']: # NOTE(kevinbenton): we raise a 404 to hide the existence of the # network from the tenant since they don't have access to it. msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) def create_resource(collection, resource, plugin, params, allow_bulk=False, member_actions=None, parent=None, allow_pagination=False, allow_sorting=False): controller = Controller(plugin, collection, resource, params, allow_bulk, member_actions=member_actions, parent=parent, allow_pagination=allow_pagination, allow_sorting=allow_sorting) return wsgi_resource.Resource(controller, FAULT_MAP) neutron-8.4.0/neutron/api/v2/resource.py0000664000567000056710000001676713044372760021400 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility methods for working with WSGI servers redux """ import sys import netaddr import oslo_i18n from oslo_log import log as logging from oslo_policy import policy as oslo_policy import six import webob.dec import webob.exc from neutron._i18n import _, _LE, _LI from neutron.common import exceptions from neutron import wsgi LOG = logging.getLogger(__name__) class Request(wsgi.Request): pass def Resource(controller, faults=None, deserializers=None, serializers=None, action_status=None): """Represents an API entity resource and the associated serialization and deserialization logic """ default_deserializers = {'application/json': wsgi.JSONDeserializer()} default_serializers = {'application/json': wsgi.JSONDictSerializer()} format_types = {'json': 'application/json'} action_status = action_status or dict(create=201, delete=204) default_deserializers.update(deserializers or {}) default_serializers.update(serializers or {}) deserializers = default_deserializers serializers = default_serializers faults = faults or {} @webob.dec.wsgify(RequestClass=Request) def resource(request): route_args = request.environ.get('wsgiorg.routing_args') if route_args: args = route_args[1].copy() else: args = {} # NOTE(jkoelker) by now the controller is already found, remove # it from the args if it is in the matchdict args.pop('controller', None) fmt = args.pop('format', None) action = args.pop('action', None) content_type = format_types.get(fmt, request.best_match_content_type()) language = request.best_match_language() deserializer = deserializers.get(content_type) serializer = serializers.get(content_type) try: if request.body: args['body'] = deserializer.deserialize(request.body)['body'] method = getattr(controller, action) result = method(request=request, **args) except (exceptions.NeutronException, netaddr.AddrFormatError, oslo_policy.PolicyNotAuthorized) as e: for fault in faults: if isinstance(e, fault): mapped_exc = faults[fault] break else: mapped_exc = webob.exc.HTTPInternalServerError if 400 <= mapped_exc.code < 500: LOG.info(_LI('%(action)s failed (client error): %(exc)s'), {'action': action, 'exc': e}) else: LOG.exception(_LE('%s failed'), action) e = translate(e, language) body = serializer.serialize( {'NeutronError': get_exception_data(e)}) kwargs = {'body': body, 'content_type': content_type} raise mapped_exc(**kwargs) except webob.exc.HTTPException as e: type_, value, tb = sys.exc_info() if hasattr(e, 'code') and 400 <= e.code < 500: LOG.info(_LI('%(action)s failed (client error): %(exc)s'), {'action': action, 'exc': e}) else: LOG.exception(_LE('%s failed'), action) translate(e, language) value.body = serializer.serialize( {'NeutronError': get_exception_data(e)}) value.content_type = content_type six.reraise(type_, value, tb) except NotImplementedError as e: e = translate(e, language) # NOTE(armando-migliaccio): from a client standpoint # it makes sense to receive these errors, because # extensions may or may not be implemented by # the underlying plugin. So if something goes south, # because a plugin does not implement a feature, # returning 500 is definitely confusing. body = serializer.serialize( {'NotImplementedError': get_exception_data(e)}) kwargs = {'body': body, 'content_type': content_type} raise webob.exc.HTTPNotImplemented(**kwargs) except Exception: # NOTE(jkoelker) Everything else is 500 LOG.exception(_LE('%s failed'), action) # Do not expose details of 500 error to clients. msg = _('Request Failed: internal server error while ' 'processing your request.') msg = translate(msg, language) body = serializer.serialize( {'NeutronError': get_exception_data( webob.exc.HTTPInternalServerError(msg))}) kwargs = {'body': body, 'content_type': content_type} raise webob.exc.HTTPInternalServerError(**kwargs) status = action_status.get(action, 200) body = serializer.serialize(result) # NOTE(jkoelker) Comply with RFC2616 section 9.7 if status == 204: content_type = '' body = None return webob.Response(request=request, status=status, content_type=content_type, body=body) # NOTE(blogan): this is something that is needed for the transition to # pecan. This will allow the pecan code to have a handle on the controller # for an extension so it can reuse the code instead of forcing every # extension to rewrite the code for use with pecan. setattr(resource, 'controller', controller) return resource def get_exception_data(e): """Extract the information about an exception. Neutron client for the v2 API expects exceptions to have 'type', 'message' and 'detail' attributes.This information is extracted and converted into a dictionary. :param e: the exception to be reraised :returns: a structured dict with the exception data """ err_data = {'type': e.__class__.__name__, 'message': e, 'detail': ''} return err_data def translate(translatable, locale): """Translates the object to the given locale. If the object is an exception its translatable elements are translated in place, if the object is a translatable string it is translated and returned. Otherwise, the object is returned as-is. :param translatable: the object to be translated :param locale: the locale to translate to :returns: the translated object, or the object as-is if it was not translated """ localize = oslo_i18n.translate if isinstance(translatable, exceptions.NeutronException): translatable.msg = localize(translatable.msg, locale) elif isinstance(translatable, webob.exc.HTTPError): translatable.detail = localize(translatable.detail, locale) elif isinstance(translatable, Exception): translatable.message = localize(translatable, locale) else: return localize(translatable, locale) return translatable neutron-8.4.0/neutron/api/extensions.py0000664000567000056710000006614113044372760021410 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import imp import os from oslo_config import cfg from oslo_log import log as logging from oslo_middleware import base import routes import six import webob.dec import webob.exc from neutron._i18n import _, _LE, _LI, _LW from neutron.common import exceptions import neutron.extensions from neutron import manager from neutron.plugins.common import constants as const from neutron.services import provider_configuration from neutron import wsgi LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class PluginInterface(object): @classmethod def __subclasshook__(cls, klass): """Checking plugin class. The __subclasshook__ method is a class method that will be called every time a class is tested using issubclass(klass, PluginInterface). In that case, it will check that every method marked with the abstractmethod decorator is provided by the plugin class. """ if not cls.__abstractmethods__: return NotImplemented for method in cls.__abstractmethods__: if any(method in base.__dict__ for base in klass.__mro__): continue return NotImplemented return True @six.add_metaclass(abc.ABCMeta) class ExtensionDescriptor(object): """Base class that defines the contract for extensions.""" @abc.abstractmethod def get_name(self): """The name of the extension. e.g. 'Fox In Socks' """ @abc.abstractmethod def get_alias(self): """The alias for the extension. e.g. 'FOXNSOX' """ @abc.abstractmethod def get_description(self): """Friendly description for the extension. e.g. 'The Fox In Socks Extension' """ @abc.abstractmethod def get_updated(self): """The timestamp when the extension was last updated. e.g. '2011-01-22T13:25:27-06:00' """ # NOTE(justinsb): Not sure of the purpose of this is, vs the XML NS def get_resources(self): """List of extensions.ResourceExtension extension objects. Resources define new nouns, and are accessible through URLs. """ resources = [] return resources def get_actions(self): """List of extensions.ActionExtension extension objects. Actions are verbs callable from the API. """ actions = [] return actions def get_request_extensions(self): """List of extensions.RequestException extension objects. Request extensions are used to handle custom request data. """ request_exts = [] return request_exts def get_extended_resources(self, version): """Retrieve extended resources or attributes for core resources. Extended attributes are implemented by a core plugin similarly to the attributes defined in the core, and can appear in request and response messages. Their names are scoped with the extension's prefix. The core API version is passed to this function, which must return a map[][][] specifying the extended resource attribute properties required by that API version. Extension can add resources and their attr definitions too. The returned map can be integrated into RESOURCE_ATTRIBUTE_MAP. """ return {} def get_plugin_interface(self): """Returns an abstract class which defines contract for the plugin. The abstract class should inherit from extensions.PluginInterface, Methods in this abstract class should be decorated as abstractmethod """ return None def get_required_extensions(self): """Returns a list of extensions to be processed before this one.""" return [] def get_optional_extensions(self): """Returns a list of extensions to be processed before this one. Unlike get_required_extensions. This will not fail the loading of the extension if one of these extensions is not present. This is useful for an extension that extends multiple resources across other extensions that should still work for the remaining extensions when one is missing. """ return [] def update_attributes_map(self, extended_attributes, extension_attrs_map=None): """Update attributes map for this extension. This is default method for extending an extension's attributes map. An extension can use this method and supplying its own resource attribute map in extension_attrs_map argument to extend all its attributes that needs to be extended. If an extension does not implement update_attributes_map, the method does nothing and just return. """ if not extension_attrs_map: return for resource, attrs in six.iteritems(extension_attrs_map): extended_attrs = extended_attributes.get(resource) if extended_attrs: attrs.update(extended_attrs) class ActionExtensionController(wsgi.Controller): def __init__(self, application): self.application = application self.action_handlers = {} def add_action(self, action_name, handler): self.action_handlers[action_name] = handler def action(self, request, id): input_dict = self._deserialize(request.body, request.get_content_type()) for action_name, handler in six.iteritems(self.action_handlers): if action_name in input_dict: return handler(input_dict, request, id) # no action handler found (bump to downstream application) response = self.application return response class RequestExtensionController(wsgi.Controller): def __init__(self, application): self.application = application self.handlers = [] def add_handler(self, handler): self.handlers.append(handler) def process(self, request, *args, **kwargs): res = request.get_response(self.application) # currently request handlers are un-ordered for handler in self.handlers: response = handler(request, res) return response class ExtensionController(wsgi.Controller): def __init__(self, extension_manager): self.extension_manager = extension_manager @staticmethod def _translate(ext): ext_data = {} ext_data['name'] = ext.get_name() ext_data['alias'] = ext.get_alias() ext_data['description'] = ext.get_description() ext_data['updated'] = ext.get_updated() ext_data['links'] = [] # TODO(dprince): implement extension links return ext_data def index(self, request): extensions = [] for _alias, ext in six.iteritems(self.extension_manager.extensions): extensions.append(self._translate(ext)) return dict(extensions=extensions) def show(self, request, id): # NOTE(dprince): the extensions alias is used as the 'id' for show ext = self.extension_manager.extensions.get(id, None) if not ext: raise webob.exc.HTTPNotFound( _("Extension with alias %s does not exist") % id) return dict(extension=self._translate(ext)) def delete(self, request, id): msg = _('Resource not found.') raise webob.exc.HTTPNotFound(msg) def create(self, request): msg = _('Resource not found.') raise webob.exc.HTTPNotFound(msg) class ExtensionMiddleware(base.ConfigurableMiddleware): """Extensions middleware for WSGI.""" def __init__(self, application, ext_mgr=None): self.ext_mgr = (ext_mgr or ExtensionManager(get_extensions_path())) mapper = routes.Mapper() # extended resources for resource in self.ext_mgr.get_resources(): path_prefix = resource.path_prefix if resource.parent: path_prefix = (resource.path_prefix + "/%s/{%s_id}" % (resource.parent["collection_name"], resource.parent["member_name"])) LOG.debug('Extended resource: %s', resource.collection) for action, method in six.iteritems(resource.collection_actions): conditions = dict(method=[method]) path = "/%s/%s" % (resource.collection, action) with mapper.submapper(controller=resource.controller, action=action, path_prefix=path_prefix, conditions=conditions) as submap: submap.connect(path_prefix + path, path) submap.connect(path_prefix + path + "_format", "%s.:(format)" % path) for action, method in resource.collection_methods.items(): conditions = dict(method=[method]) path = "/%s" % resource.collection with mapper.submapper(controller=resource.controller, action=action, path_prefix=path_prefix, conditions=conditions) as submap: submap.connect(path_prefix + path, path) submap.connect(path_prefix + path + "_format", "%s.:(format)" % path) mapper.resource(resource.collection, resource.collection, controller=resource.controller, member=resource.member_actions, parent_resource=resource.parent, path_prefix=path_prefix) # extended actions action_controllers = self._action_ext_controllers(application, self.ext_mgr, mapper) for action in self.ext_mgr.get_actions(): LOG.debug('Extended action: %s', action.action_name) controller = action_controllers[action.collection] controller.add_action(action.action_name, action.handler) # extended requests req_controllers = self._request_ext_controllers(application, self.ext_mgr, mapper) for request_ext in self.ext_mgr.get_request_extensions(): LOG.debug('Extended request: %s', request_ext.key) controller = req_controllers[request_ext.key] controller.add_handler(request_ext.handler) self._router = routes.middleware.RoutesMiddleware(self._dispatch, mapper) super(ExtensionMiddleware, self).__init__(application) @classmethod def factory(cls, global_config, **local_config): """Paste factory.""" def _factory(app): return cls(app, global_config, **local_config) return _factory def _action_ext_controllers(self, application, ext_mgr, mapper): """Return a dict of ActionExtensionController-s by collection.""" action_controllers = {} for action in ext_mgr.get_actions(): if action.collection not in action_controllers.keys(): controller = ActionExtensionController(application) mapper.connect("/%s/:(id)/action.:(format)" % action.collection, action='action', controller=controller, conditions=dict(method=['POST'])) mapper.connect("/%s/:(id)/action" % action.collection, action='action', controller=controller, conditions=dict(method=['POST'])) action_controllers[action.collection] = controller return action_controllers def _request_ext_controllers(self, application, ext_mgr, mapper): """Returns a dict of RequestExtensionController-s by collection.""" request_ext_controllers = {} for req_ext in ext_mgr.get_request_extensions(): if req_ext.key not in request_ext_controllers.keys(): controller = RequestExtensionController(application) mapper.connect(req_ext.url_route + '.:(format)', action='process', controller=controller, conditions=req_ext.conditions) mapper.connect(req_ext.url_route, action='process', controller=controller, conditions=req_ext.conditions) request_ext_controllers[req_ext.key] = controller return request_ext_controllers @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Route the incoming request with router.""" req.environ['extended.app'] = self.application return self._router @staticmethod @webob.dec.wsgify(RequestClass=wsgi.Request) def _dispatch(req): """Dispatch the request. Returns the routed WSGI app's response or defers to the extended application. """ match = req.environ['wsgiorg.routing_args'][1] if not match: return req.environ['extended.app'] app = match['controller'] return app def plugin_aware_extension_middleware_factory(global_config, **local_config): """Paste factory.""" def _factory(app): ext_mgr = PluginAwareExtensionManager.get_instance() return ExtensionMiddleware(app, ext_mgr=ext_mgr) return _factory class ExtensionManager(object): """Load extensions from the configured extension path. See tests/unit/extensions/foxinsocks.py for an example extension implementation. """ def __init__(self, path): LOG.info(_LI('Initializing extension manager.')) self.path = path self.extensions = {} self._load_all_extensions() def get_resources(self): """Returns a list of ResourceExtension objects.""" resources = [] resources.append(ResourceExtension('extensions', ExtensionController(self))) for ext in self.extensions.values(): resources.extend(ext.get_resources()) return resources def get_actions(self): """Returns a list of ActionExtension objects.""" actions = [] for ext in self.extensions.values(): actions.extend(ext.get_actions()) return actions def get_request_extensions(self): """Returns a list of RequestExtension objects.""" request_exts = [] for ext in self.extensions.values(): request_exts.extend(ext.get_request_extensions()) return request_exts def extend_resources(self, version, attr_map): """Extend resources with additional resources or attributes. :param attr_map: the existing mapping from resource name to attrs definition. After this function, we will extend the attr_map if an extension wants to extend this map. """ processed_exts = {} exts_to_process = self.extensions.copy() check_optionals = True # Iterate until there are unprocessed extensions or if no progress # is made in a whole iteration while exts_to_process: processed_ext_count = len(processed_exts) for ext_name, ext in list(exts_to_process.items()): # Process extension only if all required extensions # have been processed already required_exts_set = set(ext.get_required_extensions()) if required_exts_set - set(processed_exts): continue optional_exts_set = set(ext.get_optional_extensions()) if check_optionals and optional_exts_set - set(processed_exts): continue extended_attrs = ext.get_extended_resources(version) for res, resource_attrs in six.iteritems(extended_attrs): attr_map.setdefault(res, {}).update(resource_attrs) processed_exts[ext_name] = ext del exts_to_process[ext_name] if len(processed_exts) == processed_ext_count: # if we hit here, it means there are unsatisfied # dependencies. try again without optionals since optionals # are only necessary to set order if they are present. if check_optionals: check_optionals = False continue # Exit loop as no progress was made break if exts_to_process: unloadable_extensions = set(exts_to_process.keys()) LOG.error(_LE("Unable to process extensions (%s) because " "the configured plugins do not satisfy " "their requirements. Some features will not " "work as expected."), ', '.join(unloadable_extensions)) # Fail gracefully for default extensions, just in case some out # of tree plugins are not entirely up to speed default_extensions = set(const.DEFAULT_SERVICE_PLUGINS.values()) if not unloadable_extensions <= default_extensions: raise exceptions.ExtensionsNotFound( extensions=list(unloadable_extensions)) # Extending extensions' attributes map. for ext in processed_exts.values(): ext.update_attributes_map(attr_map) def _check_extension(self, extension): """Checks for required methods in extension objects.""" try: LOG.debug('Ext name: %s', extension.get_name()) LOG.debug('Ext alias: %s', extension.get_alias()) LOG.debug('Ext description: %s', extension.get_description()) LOG.debug('Ext updated: %s', extension.get_updated()) except AttributeError: LOG.exception(_LE("Exception loading extension")) return False return isinstance(extension, ExtensionDescriptor) def _load_all_extensions(self): """Load extensions from the configured path. The extension name is constructed from the module_name. If your extension module is named widgets.py, the extension class within that module should be 'Widgets'. See tests/unit/extensions/foxinsocks.py for an example extension implementation. """ for path in self.path.split(':'): if os.path.exists(path): self._load_all_extensions_from_path(path) else: LOG.error(_LE("Extension path '%s' doesn't exist!"), path) def _load_all_extensions_from_path(self, path): # Sorting the extension list makes the order in which they # are loaded predictable across a cluster of load-balanced # Neutron Servers for f in sorted(os.listdir(path)): try: LOG.debug('Loading extension file: %s', f) mod_name, file_ext = os.path.splitext(os.path.split(f)[-1]) ext_path = os.path.join(path, f) if file_ext.lower() == '.py' and not mod_name.startswith('_'): mod = imp.load_source(mod_name, ext_path) ext_name = mod_name[0].upper() + mod_name[1:] new_ext_class = getattr(mod, ext_name, None) if not new_ext_class: LOG.warning(_LW('Did not find expected name ' '"%(ext_name)s" in %(file)s'), {'ext_name': ext_name, 'file': ext_path}) continue new_ext = new_ext_class() self.add_extension(new_ext) except Exception as exception: LOG.warning(_LW("Extension file %(f)s wasn't loaded due to " "%(exception)s"), {'f': f, 'exception': exception}) def add_extension(self, ext): # Do nothing if the extension doesn't check out if not self._check_extension(ext): return alias = ext.get_alias() LOG.info(_LI('Loaded extension: %s'), alias) if alias in self.extensions: raise exceptions.DuplicatedExtension(alias=alias) self.extensions[alias] = ext class PluginAwareExtensionManager(ExtensionManager): _instance = None def __init__(self, path, plugins): self.plugins = plugins super(PluginAwareExtensionManager, self).__init__(path) self.check_if_plugin_extensions_loaded() def _check_extension(self, extension): """Check if an extension is supported by any plugin.""" extension_is_valid = super(PluginAwareExtensionManager, self)._check_extension(extension) return (extension_is_valid and self._plugins_support(extension) and self._plugins_implement_interface(extension)) def _plugins_support(self, extension): alias = extension.get_alias() supports_extension = alias in self.get_supported_extension_aliases() if not supports_extension: LOG.warning(_LW("Extension %s not supported by any of loaded " "plugins"), alias) return supports_extension def _plugins_implement_interface(self, extension): if extension.get_plugin_interface() is None: return True for plugin in self.plugins.values(): if isinstance(plugin, extension.get_plugin_interface()): return True LOG.warning(_LW("Loaded plugins do not implement extension " "%s interface"), extension.get_alias()) return False @classmethod def get_instance(cls): if cls._instance is None: service_plugins = manager.NeutronManager.get_service_plugins() cls._instance = cls(get_extensions_path(service_plugins), service_plugins) return cls._instance def get_plugin_supported_extension_aliases(self, plugin): """Return extension aliases supported by a given plugin""" aliases = set() # we also check all classes that the plugins inherit to see if they # directly provide support for an extension for item in [plugin] + plugin.__class__.mro(): try: aliases |= set( getattr(item, "supported_extension_aliases", [])) except TypeError: # we land here if a class has a @property decorator for # supported extension aliases. They only work on objects. pass return aliases def get_supported_extension_aliases(self): """Gets extension aliases supported by all plugins.""" aliases = set() for plugin in self.plugins.values(): aliases |= self.get_plugin_supported_extension_aliases(plugin) return aliases @classmethod def clear_instance(cls): cls._instance = None def check_if_plugin_extensions_loaded(self): """Check if an extension supported by a plugin has been loaded.""" plugin_extensions = self.get_supported_extension_aliases() missing_aliases = plugin_extensions - set(self.extensions) if missing_aliases: raise exceptions.ExtensionsNotFound( extensions=list(missing_aliases)) class RequestExtension(object): """Extend requests and responses of core Neutron OpenStack API controllers. Provide a way to add data to responses and handle custom request data that is sent to core Neutron OpenStack API controllers. """ def __init__(self, method, url_route, handler): self.url_route = url_route self.handler = handler self.conditions = dict(method=[method]) self.key = "%s-%s" % (method, url_route) class ActionExtension(object): """Add custom actions to core Neutron OpenStack API controllers.""" def __init__(self, collection, action_name, handler): self.collection = collection self.action_name = action_name self.handler = handler class ResourceExtension(object): """Add top level resources to the OpenStack API in Neutron.""" def __init__(self, collection, controller, parent=None, path_prefix="", collection_actions=None, member_actions=None, attr_map=None, collection_methods=None): collection_actions = collection_actions or {} collection_methods = collection_methods or {} member_actions = member_actions or {} attr_map = attr_map or {} self.collection = collection self.controller = controller self.parent = parent self.collection_actions = collection_actions self.collection_methods = collection_methods self.member_actions = member_actions self.path_prefix = path_prefix self.attr_map = attr_map # Returns the extension paths from a config entry and the __path__ # of neutron.extensions def get_extensions_path(service_plugins=None): paths = collections.OrderedDict() # Add Neutron core extensions paths[neutron.extensions.__path__[0]] = 1 if service_plugins: # Add Neutron *-aas extensions for plugin in service_plugins.values(): neutron_mod = provider_configuration.NeutronModule( plugin.__module__.split('.')[0]) try: paths[neutron_mod.module().extensions.__path__[0]] = 1 except AttributeError: # Occurs normally if module has no extensions sub-module pass # Add external/other plugins extensions if cfg.CONF.api_extensions_path: for path in cfg.CONF.api_extensions_path.split(":"): paths[path] = 1 LOG.debug("get_extension_paths = %s", paths) # Re-build the extension string path = ':'.join(paths) return path def append_api_extensions_path(paths): paths = list(set([cfg.CONF.api_extensions_path] + paths)) cfg.CONF.set_override('api_extensions_path', ':'.join([p for p in paths if p])) neutron-8.4.0/neutron/api/versions.py0000664000567000056710000000400213044372736021050 0ustar jenkinsjenkins00000000000000# Copyright 2011 Citrix Systems. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_i18n import webob.dec from neutron._i18n import _ from neutron.api.views import versions as versions_view from neutron import wsgi class Versions(object): @classmethod def factory(cls, global_config, **local_config): return cls(app=None) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Respond to a request for all Neutron API versions.""" version_objs = [ { "id": "v2.0", "status": "CURRENT", }, ] if req.path != '/': if self.app: return req.get_response(self.app) language = req.best_match_language() msg = _('Unknown API version specified') msg = oslo_i18n.translate(msg, language) return webob.exc.HTTPNotFound(explanation=msg) builder = versions_view.get_view_builder(req) versions = [builder.build(version) for version in version_objs] response = dict(versions=versions) metadata = {} content_type = req.best_match_content_type() body = (wsgi.Serializer(metadata=metadata). serialize(response, content_type)) response = webob.Response() response.content_type = content_type response.body = wsgi.encode_body(body) return response def __init__(self, app): self.app = app neutron-8.4.0/neutron/api/rpc/0000775000567000056710000000000013044373210017402 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/api/rpc/handlers/0000775000567000056710000000000013044373210021202 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/api/rpc/handlers/resources_rpc.py0000775000567000056710000002101013044372760024440 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mellanox Technologies, Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from oslo_log import log as logging import oslo_messaging from neutron._i18n import _ from neutron.api.rpc.callbacks.consumer import registry as cons_registry from neutron.api.rpc.callbacks.producer import registry as prod_registry from neutron.api.rpc.callbacks import resources from neutron.api.rpc.callbacks import version_manager from neutron.common import constants from neutron.common import exceptions from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.objects import base as obj_base LOG = logging.getLogger(__name__) class ResourcesRpcError(exceptions.NeutronException): pass class InvalidResourceTypeClass(ResourcesRpcError): message = _("Invalid resource type %(resource_type)s") class ResourceNotFound(ResourcesRpcError): message = _("Resource %(resource_id)s of type %(resource_type)s " "not found") def _validate_resource_type(resource_type): if not resources.is_valid_resource_type(resource_type): raise InvalidResourceTypeClass(resource_type=resource_type) def resource_type_versioned_topic(resource_type, version=None): """Return the topic for a resource type. If no version is provided, the latest version of the object will be used. """ _validate_resource_type(resource_type) cls = resources.get_resource_cls(resource_type) return topics.RESOURCE_TOPIC_PATTERN % {'resource_type': resource_type, 'version': version or cls.VERSION} class ResourcesPullRpcApi(object): """Agent-side RPC (stub) for agent-to-plugin interaction. This class implements the client side of an rpc interface. The server side can be found below: ResourcesPullRpcCallback. For more information on this RPC interface, see doc/source/devref/rpc_callbacks.rst. """ def __new__(cls): # make it a singleton if not hasattr(cls, '_instance'): cls._instance = super(ResourcesPullRpcApi, cls).__new__(cls) target = oslo_messaging.Target( topic=topics.PLUGIN, version='1.0', namespace=constants.RPC_NAMESPACE_RESOURCES) cls._instance.client = n_rpc.get_client(target) return cls._instance @log_helpers.log_method_call def pull(self, context, resource_type, resource_id): _validate_resource_type(resource_type) # we've already validated the resource type, so we are pretty sure the # class is there => no need to validate it specifically resource_type_cls = resources.get_resource_cls(resource_type) cctxt = self.client.prepare() primitive = cctxt.call(context, 'pull', resource_type=resource_type, version=resource_type_cls.VERSION, resource_id=resource_id) if primitive is None: raise ResourceNotFound(resource_type=resource_type, resource_id=resource_id) return resource_type_cls.clean_obj_from_primitive(primitive) class ResourcesPullRpcCallback(object): """Plugin-side RPC (implementation) for agent-to-plugin interaction. This class implements the server side of an rpc interface. The client side can be found above: ResourcesPullRpcApi. For more information on this RPC interface, see doc/source/devref/rpc_callbacks.rst. """ # History # 1.0 Initial version target = oslo_messaging.Target( version='1.0', namespace=constants.RPC_NAMESPACE_RESOURCES) def pull(self, context, resource_type, version, resource_id): obj = prod_registry.pull(resource_type, resource_id, context=context) if obj: return obj.obj_to_primitive(target_version=version) class ResourcesPushToServersRpcApi(object): """Publisher-side RPC (stub) for plugin-to-plugin fanout interaction. This class implements the client side of an rpc interface. The receiver side can be found below: ResourcesPushToServerRpcCallback. For more information on this RPC interface, see doc/source/devref/rpc_callbacks.rst. """ def __init__(self): target = oslo_messaging.Target( topic=topics.SERVER_RESOURCE_VERSIONS, version='1.0', namespace=constants.RPC_NAMESPACE_RESOURCES) self.client = n_rpc.get_client(target) @log_helpers.log_method_call def report_agent_resource_versions(self, context, agent_type, agent_host, version_map): """Fan out all the agent resource versions to other servers.""" cctxt = self.client.prepare(fanout=True) cctxt.cast(context, 'report_agent_resource_versions', agent_type=agent_type, agent_host=agent_host, version_map=version_map) class ResourcesPushToServerRpcCallback(object): """Receiver-side RPC (implementation) for plugin-to-plugin interaction. This class implements the receiver side of an rpc interface. The client side can be found above: ResourcePushToServerRpcApi. For more information on this RPC interface, see doc/source/devref/rpc_callbacks.rst. """ # History # 1.0 Initial version target = oslo_messaging.Target( version='1.0', namespace=constants.RPC_NAMESPACE_RESOURCES) @log_helpers.log_method_call def report_agent_resource_versions(self, context, agent_type, agent_host, version_map): consumer_id = version_manager.AgentConsumer(agent_type=agent_type, host=agent_host) version_manager.update_versions(consumer_id, version_map) class ResourcesPushRpcApi(object): """Plugin-side RPC for plugin-to-agents interaction. This interface is designed to push versioned object updates to interested agents using fanout topics. This class implements the caller side of an rpc interface. The receiver side can be found below: ResourcesPushRpcCallback. """ def __init__(self): target = oslo_messaging.Target( version='1.0', namespace=constants.RPC_NAMESPACE_RESOURCES) self.client = n_rpc.get_client(target) def _prepare_object_fanout_context(self, obj, version): """Prepare fanout context, one topic per object type.""" obj_topic = resource_type_versioned_topic(obj.obj_name(), version) return self.client.prepare(fanout=True, topic=obj_topic) @log_helpers.log_method_call def push(self, context, resource, event_type): resource_type = resources.get_resource_type(resource) _validate_resource_type(resource_type) versions = version_manager.get_resource_versions(resource_type) for version in versions: cctxt = self._prepare_object_fanout_context(resource, version) dehydrated_resource = resource.obj_to_primitive( target_version=version) cctxt.cast(context, 'push', resource=dehydrated_resource, event_type=event_type) class ResourcesPushRpcCallback(object): """Agent-side RPC for plugin-to-agents interaction. This class implements the receiver for notification about versioned objects resource updates used by neutron.api.rpc.callbacks. You can find the caller side in ResourcesPushRpcApi. """ # History # 1.0 Initial version target = oslo_messaging.Target(version='1.0', namespace=constants.RPC_NAMESPACE_RESOURCES) def push(self, context, resource, event_type): resource_obj = obj_base.NeutronObject.clean_obj_from_primitive( resource) LOG.debug("Resources notification (%(event_type)s): %(resource)s", {'event_type': event_type, 'resource': repr(resource_obj)}) resource_type = resources.get_resource_type(resource_obj) cons_registry.push(resource_type, resource_obj, event_type) neutron-8.4.0/neutron/api/rpc/handlers/metadata_rpc.py0000664000567000056710000000303113044372760024206 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import oslo_messaging from neutron.common import constants from neutron import manager class MetadataRpcCallback(object): """Metadata agent RPC callback in plugin implementations. This class implements the server side of an rpc interface used by the metadata service to make calls back into the Neutron plugin. The client side is defined in neutron.agent.metadata.agent.MetadataPluginAPI. For more information about changing rpc interfaces, see doc/source/devref/rpc_api.rst. """ # 1.0 MetadataPluginAPI BASE_RPC_API_VERSION target = oslo_messaging.Target(version='1.0', namespace=constants.RPC_NAMESPACE_METADATA) @property def plugin(self): if not hasattr(self, '_plugin'): self._plugin = manager.NeutronManager.get_plugin() return self._plugin def get_ports(self, context, filters): return self.plugin.get_ports(context, filters=filters) neutron-8.4.0/neutron/api/rpc/handlers/l3_rpc.py0000664000567000056710000003322513044372760022754 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_serialization import jsonutils import six from neutron.common import constants from neutron.common import exceptions from neutron.common import utils from neutron import context as neutron_context from neutron.db import api as db_api from neutron.extensions import l3 from neutron.extensions import portbindings from neutron import manager from neutron.plugins.common import constants as plugin_constants LOG = logging.getLogger(__name__) class L3RpcCallback(object): """L3 agent RPC callback in plugin implementations.""" # 1.0 L3PluginApi BASE_RPC_API_VERSION # 1.1 Support update_floatingip_statuses # 1.2 Added methods for DVR support # 1.3 Added a method that returns the list of activated services # 1.4 Added L3 HA update_router_state. This method was later removed, # since it was unused. The RPC version was not changed # 1.5 Added update_ha_routers_states # 1.6 Added process_prefix_update to support IPv6 Prefix Delegation # 1.7 Added method delete_agent_gateway_port for DVR Routers # 1.8 Added address scope information # 1.9 Added get_router_ids target = oslo_messaging.Target(version='1.9') @property def plugin(self): if not hasattr(self, '_plugin'): self._plugin = manager.NeutronManager.get_plugin() return self._plugin @property def l3plugin(self): if not hasattr(self, '_l3plugin'): self._l3plugin = manager.NeutronManager.get_service_plugins()[ plugin_constants.L3_ROUTER_NAT] return self._l3plugin def get_router_ids(self, context, host): """Returns IDs of routers scheduled to l3 agent on This will autoschedule unhosted routers to l3 agent on and then return all ids of routers scheduled to it. """ if utils.is_extension_supported( self.l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): if cfg.CONF.router_auto_schedule: self.l3plugin.auto_schedule_routers(context, host, router_ids=None) return self.l3plugin.list_router_ids_on_host(context, host) @db_api.retry_db_errors def sync_routers(self, context, **kwargs): """Sync routers according to filters to a specific agent. @param context: contain user information @param kwargs: host, router_ids @return: a list of routers with their interfaces and floating_ips """ router_ids = kwargs.get('router_ids') host = kwargs.get('host') context = neutron_context.get_admin_context() if utils.is_extension_supported( self.l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): routers = ( self.l3plugin.list_active_sync_routers_on_active_l3_agent( context, host, router_ids)) else: routers = self.l3plugin.get_sync_data(context, router_ids) if utils.is_extension_supported( self.plugin, constants.PORT_BINDING_EXT_ALIAS): self._ensure_host_set_on_ports(context, host, routers) LOG.debug("Routers returned to l3 agent:\n %s", utils.DelayedStringRenderer(jsonutils.dumps, routers, indent=5)) return routers def _ensure_host_set_on_ports(self, context, host, routers): for router in routers: LOG.debug("Checking router: %(id)s for host: %(host)s", {'id': router['id'], 'host': host}) if router.get('gw_port') and router.get('distributed'): # '' is used to effectively clear binding of a gw port if not # bound (snat is not hosted on any l3 agent) gw_port_host = router.get('gw_port_host') or '' self._ensure_host_set_on_port(context, gw_port_host, router.get('gw_port'), router['id']) for p in router.get(constants.SNAT_ROUTER_INTF_KEY, []): self._ensure_host_set_on_port( context, gw_port_host, p, router['id'], ha_router_port=router.get('ha')) else: self._ensure_host_set_on_port( context, host, router.get('gw_port'), router['id'], ha_router_port=router.get('ha')) for interface in router.get(constants.INTERFACE_KEY, []): self._ensure_host_set_on_port( context, host, interface, router['id'], ha_router_port=router.get('ha')) interface = router.get(constants.HA_INTERFACE_KEY) if interface: self._ensure_host_set_on_port(context, host, interface, router['id']) def _ensure_host_set_on_port(self, context, host, port, router_id=None, ha_router_port=False): not_bound = port and port.get(portbindings.VIF_TYPE) in ( portbindings.VIF_TYPE_BINDING_FAILED, portbindings.VIF_TYPE_UNBOUND) if (port and host is not None and (port.get('device_owner') != constants.DEVICE_OWNER_DVR_INTERFACE and port.get(portbindings.HOST_ID) != host or not_bound)): # Ports owned by non-HA routers are bound again if they're # already bound but the router moved to another host. if not ha_router_port: # All ports, including ports created for SNAT'ing for # DVR are handled here try: self.plugin.update_port( context, port['id'], {'port': {portbindings.HOST_ID: host}}) # updating port's host to pass actual info to l3 agent port[portbindings.HOST_ID] = host except exceptions.PortNotFound: LOG.debug("Port %(port)s not found while updating " "agent binding for router %(router)s.", {"port": port['id'], "router": router_id}) # Ports owned by HA routers should only be bound once, if # they are unbound. These ports are moved when an agent reports # that one of its routers moved to the active state. else: if not port.get(portbindings.HOST_ID): active_host = ( self.l3plugin.get_active_host_for_ha_router( context, router_id)) if active_host: host = active_host # If there is currently no active router instance (For # example it's a new router), the host that requested # the routers (Essentially a random host) will do. The # port binding will be corrected when an active is # elected. try: self.plugin.update_port( context, port['id'], {'port': {portbindings.HOST_ID: host}}) except exceptions.PortNotFound: LOG.debug("Port %(port)s not found while updating " "agent binding for router %(router)s.", {"port": port['id'], "router": router_id}) elif (port and port.get('device_owner') == constants.DEVICE_OWNER_DVR_INTERFACE): # Ports that are DVR interfaces have multiple bindings (based on # of hosts on which DVR router interfaces are spawned). Such # bindings are created/updated here by invoking # update_dvr_port_binding self.plugin.update_dvr_port_binding(context, port['id'], {'port': {portbindings.HOST_ID: host, 'device_id': router_id} }) def get_external_network_id(self, context, **kwargs): """Get one external network id for l3 agent. l3 agent expects only one external network when it performs this query. """ context = neutron_context.get_admin_context() net_id = self.plugin.get_external_network_id(context) LOG.debug("External network ID returned to l3 agent: %s", net_id) return net_id def get_service_plugin_list(self, context, **kwargs): plugins = manager.NeutronManager.get_service_plugins() return plugins.keys() @db_api.retry_db_errors def update_floatingip_statuses(self, context, router_id, fip_statuses): """Update operational status for a floating IP.""" with context.session.begin(subtransactions=True): for (floatingip_id, status) in six.iteritems(fip_statuses): LOG.debug("New status for floating IP %(floatingip_id)s: " "%(status)s", {'floatingip_id': floatingip_id, 'status': status}) try: self.l3plugin.update_floatingip_status(context, floatingip_id, status) except l3.FloatingIPNotFound: LOG.debug("Floating IP: %s no longer present.", floatingip_id) # Find all floating IPs known to have been the given router # for which an update was not received. Set them DOWN mercilessly # This situation might occur for some asynchronous backends if # notifications were missed known_router_fips = self.l3plugin.get_floatingips( context, {'last_known_router_id': [router_id]}) # Consider only floating ips which were disassociated in the API # FIXME(salv-orlando): Filtering in code should be avoided. # the plugin should offer a way to specify a null filter fips_to_disable = (fip['id'] for fip in known_router_fips if not fip['router_id']) for fip_id in fips_to_disable: self.l3plugin.update_floatingip_status( context, fip_id, constants.FLOATINGIP_STATUS_DOWN) def get_ports_by_subnet(self, context, **kwargs): """DVR: RPC called by dvr-agent to get all ports for subnet.""" subnet_id = kwargs.get('subnet_id') LOG.debug("DVR: subnet_id: %s", subnet_id) filters = {'fixed_ips': {'subnet_id': [subnet_id]}} return self.plugin.get_ports(context, filters=filters) @db_api.retry_db_errors def get_agent_gateway_port(self, context, **kwargs): """Get Agent Gateway port for FIP. l3 agent expects an Agent Gateway Port to be returned for this query. """ network_id = kwargs.get('network_id') host = kwargs.get('host') admin_ctx = neutron_context.get_admin_context() agent_port = self.l3plugin.create_fip_agent_gw_port_if_not_exists( admin_ctx, network_id, host) self._ensure_host_set_on_port(admin_ctx, host, agent_port) LOG.debug('Agent Gateway port returned : %(agent_port)s with ' 'host %(host)s', {'agent_port': agent_port, 'host': host}) return agent_port @db_api.retry_db_errors def update_ha_routers_states(self, context, **kwargs): """Update states for HA routers. Get a map of router_id to its HA state on a host and update the DB. State must be in: ('active', 'standby'). """ states = kwargs.get('states') host = kwargs.get('host') LOG.debug('Updating HA routers states on host %s: %s', host, states) self.l3plugin.update_routers_states(context, states, host) def process_prefix_update(self, context, **kwargs): subnets = kwargs.get('subnets') updated_subnets = [] for subnet_id, prefix in subnets.items(): updated_subnets.append(self.plugin.update_subnet( context, subnet_id, {'subnet': {'cidr': prefix}})) return updated_subnets @db_api.retry_db_errors def delete_agent_gateway_port(self, context, **kwargs): """Delete Floatingip agent gateway port.""" network_id = kwargs.get('network_id') host = kwargs.get('host') admin_ctx = neutron_context.get_admin_context() self.l3plugin.delete_floatingip_agent_gateway_port( admin_ctx, host, network_id) neutron-8.4.0/neutron/api/rpc/handlers/dhcp_rpc.py0000664000567000056710000002475713044372760023366 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import itertools import operator from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging import oslo_messaging from oslo_utils import excutils from neutron._i18n import _, _LW from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import utils from neutron.db import api as db_api from neutron.extensions import portbindings from neutron import manager from neutron.plugins.common import utils as p_utils from neutron.quota import resource_registry LOG = logging.getLogger(__name__) class DhcpRpcCallback(object): """DHCP agent RPC callback in plugin implementations. This class implements the server side of an rpc interface. The client side of this interface can be found in neutron.agent.dhcp.agent.DhcpPluginApi. For more information about changing rpc interfaces, see doc/source/devref/rpc_api.rst. """ # API version history: # 1.0 - Initial version. # 1.1 - Added get_active_networks_info, create_dhcp_port, # and update_dhcp_port methods. # 1.2 - Removed get_dhcp_port. When removing a method (Making a # backwards incompatible change) you would normally bump the # major version. However, since the method was unused in the # RPC client for many releases, it should be OK to bump the # minor release instead and claim RPC compatibility with the # last few client versions. # 1.3 - Removed release_port_fixed_ip. It's not used by reference DHCP # agent since Juno, so similar rationale for not bumping the # major version as above applies here too. target = oslo_messaging.Target( namespace=constants.RPC_NAMESPACE_DHCP_PLUGIN, version='1.3') def _get_active_networks(self, context, **kwargs): """Retrieve and return a list of the active networks.""" host = kwargs.get('host') plugin = manager.NeutronManager.get_plugin() if utils.is_extension_supported( plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS): if cfg.CONF.network_auto_schedule: plugin.auto_schedule_networks(context, host) nets = plugin.list_active_networks_on_active_dhcp_agent( context, host) else: filters = dict(admin_state_up=[True]) nets = plugin.get_networks(context, filters=filters) return nets def _port_action(self, plugin, context, port, action): """Perform port operations taking care of concurrency issues.""" try: if action == 'create_port': return p_utils.create_port(plugin, context, port) elif action == 'update_port': return plugin.update_port(context, port['id'], port) else: msg = _('Unrecognized action') raise n_exc.Invalid(message=msg) except (db_exc.DBError, n_exc.NetworkNotFound, n_exc.SubnetNotFound, n_exc.IpAddressGenerationFailure) as e: with excutils.save_and_reraise_exception(reraise=False) as ctxt: if isinstance(e, n_exc.IpAddressGenerationFailure): # Check if the subnet still exists and if it does not, # this is the reason why the ip address generation failed. # In any other unlikely event re-raise try: subnet_id = port['port']['fixed_ips'][0]['subnet_id'] plugin.get_subnet(context, subnet_id) except n_exc.SubnetNotFound: pass else: ctxt.reraise = True net_id = port['port']['network_id'] LOG.warning(_LW("Action %(action)s for network %(net_id)s " "could not complete successfully: %(reason)s"), {"action": action, "net_id": net_id, 'reason': e}) def get_active_networks(self, context, **kwargs): """Retrieve and return a list of the active network ids.""" # NOTE(arosen): This method is no longer used by the DHCP agent but is # left so that neutron-dhcp-agents will still continue to work if # neutron-server is upgraded and not the agent. host = kwargs.get('host') LOG.debug('get_active_networks requested from %s', host) nets = self._get_active_networks(context, **kwargs) return [net['id'] for net in nets] def _group_by_network_id(self, res): grouped = {} keyfunc = operator.itemgetter('network_id') for net_id, values in itertools.groupby(sorted(res, key=keyfunc), keyfunc): grouped[net_id] = list(values) return grouped def get_active_networks_info(self, context, **kwargs): """Returns all the networks/subnets/ports in system.""" host = kwargs.get('host') LOG.debug('get_active_networks_info from %s', host) networks = self._get_active_networks(context, **kwargs) plugin = manager.NeutronManager.get_plugin() filters = {'network_id': [network['id'] for network in networks]} ports = plugin.get_ports(context, filters=filters) filters['enable_dhcp'] = [True] # NOTE(kevinbenton): we sort these because the agent builds tags # based on position in the list and has to restart the process if # the order changes. subnets = sorted(plugin.get_subnets(context, filters=filters), key=operator.itemgetter('id')) grouped_subnets = self._group_by_network_id(subnets) grouped_ports = self._group_by_network_id(ports) for network in networks: network['subnets'] = grouped_subnets.get(network['id'], []) network['ports'] = grouped_ports.get(network['id'], []) return networks def get_network_info(self, context, **kwargs): """Retrieve and return extended information about a network.""" network_id = kwargs.get('network_id') host = kwargs.get('host') LOG.debug('Network %(network_id)s requested from ' '%(host)s', {'network_id': network_id, 'host': host}) plugin = manager.NeutronManager.get_plugin() try: network = plugin.get_network(context, network_id) except n_exc.NetworkNotFound: LOG.debug("Network %s could not be found, it might have " "been deleted concurrently.", network_id) return filters = dict(network_id=[network_id]) # NOTE(kevinbenton): we sort these because the agent builds tags # based on position in the list and has to restart the process if # the order changes. network['subnets'] = sorted( plugin.get_subnets(context, filters=filters), key=operator.itemgetter('id')) network['ports'] = plugin.get_ports(context, filters=filters) return network @db_api.retry_db_errors def release_dhcp_port(self, context, **kwargs): """Release the port currently being used by a DHCP agent.""" host = kwargs.get('host') network_id = kwargs.get('network_id') device_id = kwargs.get('device_id') LOG.debug('DHCP port deletion for %(network_id)s request from ' '%(host)s', {'network_id': network_id, 'host': host}) plugin = manager.NeutronManager.get_plugin() plugin.delete_ports_by_device_id(context, device_id, network_id) def update_lease_expiration(self, context, **kwargs): """Release the fixed_ip associated the subnet on a port.""" # NOTE(arosen): This method is no longer used by the DHCP agent but is # left so that neutron-dhcp-agents will still continue to work if # neutron-server is upgraded and not the agent. host = kwargs.get('host') LOG.warning(_LW('Updating lease expiration is now deprecated. Issued ' 'from host %s.'), host) @db_api.retry_db_errors @resource_registry.mark_resources_dirty def create_dhcp_port(self, context, **kwargs): """Create and return dhcp port information. If an expected failure occurs, a None port is returned. """ host = kwargs.get('host') # Note(pbondar): Create deep copy of port to prevent operating # on changed dict if RetryRequest is raised port = copy.deepcopy(kwargs.get('port')) LOG.debug('Create dhcp port %(port)s ' 'from %(host)s.', {'port': port, 'host': host}) port['port']['device_owner'] = constants.DEVICE_OWNER_DHCP port['port'][portbindings.HOST_ID] = host if 'mac_address' not in port['port']: port['port']['mac_address'] = attributes.ATTR_NOT_SPECIFIED plugin = manager.NeutronManager.get_plugin() return self._port_action(plugin, context, port, 'create_port') @db_api.retry_db_errors def update_dhcp_port(self, context, **kwargs): """Update the dhcp port.""" host = kwargs.get('host') port = kwargs.get('port') port['id'] = kwargs.get('port_id') port['port'][portbindings.HOST_ID] = host plugin = manager.NeutronManager.get_plugin() old_port = plugin.get_port(context, port['id']) if (old_port['device_id'] != constants.DEVICE_ID_RESERVED_DHCP_PORT and old_port['device_id'] != utils.get_dhcp_agent_device_id(port['port']['network_id'], host)): raise n_exc.DhcpPortInUse(port_id=port['id']) LOG.debug('Update dhcp port %(port)s ' 'from %(host)s.', {'port': port, 'host': host}) return self._port_action(plugin, context, port, 'update_port') neutron-8.4.0/neutron/api/rpc/handlers/__init__.py0000664000567000056710000000000013044372736023315 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/api/rpc/handlers/securitygroups_rpc.py0000664000567000056710000002242313044372760025543 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import oslo_messaging from neutron._i18n import _LW from neutron.common import constants from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.common import utils from neutron import manager LOG = logging.getLogger(__name__) class SecurityGroupServerRpcApi(object): """RPC client for security group methods in the plugin. This class implements the client side of an rpc interface. This interface is used by agents to call security group related methods implemented on the plugin side. The other side of this interface is defined in SecurityGroupServerRpcCallback. For more information about changing rpc interfaces, see doc/source/devref/rpc_api.rst. """ def __init__(self, topic): target = oslo_messaging.Target( topic=topic, version='1.0', namespace=constants.RPC_NAMESPACE_SECGROUP) self.client = n_rpc.get_client(target) def security_group_rules_for_devices(self, context, devices): LOG.debug("Get security group rules " "for devices via rpc %r", devices) cctxt = self.client.prepare(version='1.1') return cctxt.call(context, 'security_group_rules_for_devices', devices=devices) def security_group_info_for_devices(self, context, devices): LOG.debug("Get security group information for devices via rpc %r", devices) cctxt = self.client.prepare(version='1.2') return cctxt.call(context, 'security_group_info_for_devices', devices=devices) class SecurityGroupServerRpcCallback(object): """Callback for SecurityGroup agent RPC in plugin implementations. This class implements the server side of an rpc interface. The client side can be found in SecurityGroupServerRpcApi. For more information on changing rpc interfaces, see doc/source/devref/rpc_api.rst. """ # API version history: # 1.1 - Initial version # 1.2 - security_group_info_for_devices introduced as an optimization # NOTE: target must not be overridden in subclasses # to keep RPC API version consistent across plugins. target = oslo_messaging.Target(version='1.2', namespace=constants.RPC_NAMESPACE_SECGROUP) @property def plugin(self): return manager.NeutronManager.get_plugin() def _get_devices_info(self, context, devices): return dict( (port['id'], port) for port in self.plugin.get_ports_from_devices(context, devices) if port and not utils.is_port_trusted(port) ) def security_group_rules_for_devices(self, context, **kwargs): """Callback method to return security group rules for each port. also convert remote_group_id rule to source_ip_prefix and dest_ip_prefix rule :params devices: list of devices :returns: port correspond to the devices with security group rules """ devices_info = kwargs.get('devices') ports = self._get_devices_info(context, devices_info) return self.plugin.security_group_rules_for_ports(context, ports) def security_group_info_for_devices(self, context, **kwargs): """Return security group information for requested devices. :params devices: list of devices :returns: sg_info{ 'security_groups': {sg_id: [rule1, rule2]} 'sg_member_ips': {sg_id: {'IPv4': set(), 'IPv6': set()}} 'devices': {device_id: {device_info}} } Note that sets are serialized into lists by rpc code. """ devices_info = kwargs.get('devices') ports = self._get_devices_info(context, devices_info) return self.plugin.security_group_info_for_ports(context, ports) class SecurityGroupAgentRpcApiMixin(object): """RPC client for security group methods to the agent. This class implements the client side of an rpc interface. This interface is used by plugins to call security group methods implemented on the agent side. The other side of this interface can be found in SecurityGroupAgentRpcCallbackMixin. For more information about changing rpc interfaces, see doc/source/devref/rpc_api.rst. """ # history # 1.1 Support Security Group RPC SG_RPC_VERSION = "1.1" def _get_security_group_topic(self): return topics.get_topic_name(self.topic, topics.SECURITY_GROUP, topics.UPDATE) def security_groups_rule_updated(self, context, security_groups): """Notify rule updated security groups.""" if not security_groups: return cctxt = self.client.prepare(version=self.SG_RPC_VERSION, topic=self._get_security_group_topic(), fanout=True) cctxt.cast(context, 'security_groups_rule_updated', security_groups=security_groups) def security_groups_member_updated(self, context, security_groups): """Notify member updated security groups.""" if not security_groups: return cctxt = self.client.prepare(version=self.SG_RPC_VERSION, topic=self._get_security_group_topic(), fanout=True) cctxt.cast(context, 'security_groups_member_updated', security_groups=security_groups) def security_groups_provider_updated(self, context, devices_to_update=None): """Notify provider updated security groups.""" # NOTE(ihrachys) the version here should really be 1.3, but since we # don't support proper version pinning yet, we leave it intact to allow # to work with older agents. The reason why we should not require the # version here is that in rolling upgrade scenarios we always upgrade # server first, and since the notification is directed from the newer # server to older agents, and those agents don't have their RPC entry # point bumped to 1.3 yet, we cannot safely enforce the minimal # version. Newer payload works for older agents because agent handlers # are written so that we silently ignore unknown parameters. cctxt = self.client.prepare(version=self.SG_RPC_VERSION, topic=self._get_security_group_topic(), fanout=True) cctxt.cast(context, 'security_groups_provider_updated', devices_to_update=devices_to_update) class SecurityGroupAgentRpcCallbackMixin(object): """A mix-in that enable SecurityGroup support in agent implementations. This class implements the server side of an rpc interface. The client side can be found in SecurityGroupServerRpcApi. For more information on changing rpc interfaces, see doc/source/devref/rpc_api.rst. The sg_agent reference implementation is available in neutron/agent """ # mix-in object should be have sg_agent sg_agent = None def _security_groups_agent_not_set(self): LOG.warning(_LW("Security group agent binding currently not set. " "This should be set by the end of the init " "process.")) def security_groups_rule_updated(self, context, **kwargs): """Callback for security group rule update. :param security_groups: list of updated security_groups """ security_groups = kwargs.get('security_groups', []) LOG.debug("Security group rule updated on remote: %s", security_groups) if not self.sg_agent: return self._security_groups_agent_not_set() self.sg_agent.security_groups_rule_updated(security_groups) def security_groups_member_updated(self, context, **kwargs): """Callback for security group member update. :param security_groups: list of updated security_groups """ security_groups = kwargs.get('security_groups', []) LOG.debug("Security group member updated on remote: %s", security_groups) if not self.sg_agent: return self._security_groups_agent_not_set() self.sg_agent.security_groups_member_updated(security_groups) def security_groups_provider_updated(self, context, **kwargs): """Callback for security group provider update.""" LOG.debug("Provider rule updated") devices_to_update = kwargs.get('devices_to_update') if not self.sg_agent: return self._security_groups_agent_not_set() self.sg_agent.security_groups_provider_updated(devices_to_update) neutron-8.4.0/neutron/api/rpc/handlers/bgp_speaker_rpc.py0000664000567000056710000000463013044372760024716 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import oslo_messaging from neutron.extensions import bgp as bgp_ext from neutron import manager class BgpSpeakerRpcCallback(object): """BgpDrAgent RPC callback in plugin implementations. This class implements the server side of an RPC interface. The client side of this interface can be found in neutron.services.bgp_speaker.agent.bgp_dragent.BgpDrPluginApi. For more information about changing RPC interfaces, see doc/source/devref/rpc_api.rst. """ # API version history: # 1.0 BGPDRPluginApi BASE_RPC_API_VERSION target = oslo_messaging.Target(version='1.0') @property def plugin(self): if not hasattr(self, '_plugin'): self._plugin = manager.NeutronManager.get_service_plugins().get( bgp_ext.BGP_EXT_ALIAS) return self._plugin def get_bgp_speaker_info(self, context, bgp_speaker_id): """Return BGP Speaker details such as peer list and local_as. Invoked by the BgpDrAgent to lookup the details of a BGP Speaker. """ return self.plugin.get_bgp_speaker_with_advertised_routes( context, bgp_speaker_id) def get_bgp_peer_info(self, context, bgp_peer_id): """Return BgpPeer details such as IP, remote_as, and credentials. Invoked by the BgpDrAgent to lookup the details of a BGP peer. """ return self.plugin.get_bgp_peer(context, bgp_peer_id, ['peer_ip', 'remote_as', 'auth_type', 'password']) def get_bgp_speakers(self, context, host=None, **kwargs): """Returns the list of all BgpSpeakers. Typically invoked by the BgpDrAgent as part of its bootstrap process. """ return self.plugin.get_bgp_speakers_for_agent_host(context, host) neutron-8.4.0/neutron/api/rpc/handlers/dvr_rpc.py0000664000567000056710000001257013044372760023231 0ustar jenkinsjenkins00000000000000# Copyright 2014, Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from oslo_log import log as logging import oslo_messaging from neutron.common import constants from neutron.common import rpc as n_rpc from neutron.common import topics from neutron import manager LOG = logging.getLogger(__name__) class DVRServerRpcApi(object): """Agent-side RPC (stub) for agent-to-plugin interaction. This class implements the client side of an rpc interface. The server side can be found below: DVRServerRpcCallback. For more information on changing rpc interfaces, see doc/source/devref/rpc_api.rst. """ # 1.0 Initial Version # 1.1 Support for passing 'fixed_ips' in get_subnet_for_dvr function. # Passing 'subnet" will be deprecated in the next release. def __init__(self, topic): target = oslo_messaging.Target(topic=topic, version='1.0', namespace=constants.RPC_NAMESPACE_DVR) self.client = n_rpc.get_client(target) @log_helpers.log_method_call def get_dvr_mac_address_by_host(self, context, host): cctxt = self.client.prepare() return cctxt.call(context, 'get_dvr_mac_address_by_host', host=host) @log_helpers.log_method_call def get_dvr_mac_address_list(self, context): cctxt = self.client.prepare() return cctxt.call(context, 'get_dvr_mac_address_list') @log_helpers.log_method_call def get_ports_on_host_by_subnet(self, context, host, subnet): """Get DVR serviced ports on given host and subnet.""" cctxt = self.client.prepare() return cctxt.call(context, 'get_ports_on_host_by_subnet', host=host, subnet=subnet) @log_helpers.log_method_call def get_subnet_for_dvr(self, context, subnet, fixed_ips): cctxt = self.client.prepare() return cctxt.call( context, 'get_subnet_for_dvr', subnet=subnet, fixed_ips=fixed_ips) class DVRServerRpcCallback(object): """Plugin-side RPC (implementation) for agent-to-plugin interaction. This class implements the server side of an rpc interface. The client side can be found above: DVRServerRpcApi. For more information on changing rpc interfaces, see doc/source/devref/rpc_api.rst. """ # History # 1.0 Initial version # 1.1 Support for passing the 'fixed_ips" in get_subnet_for_dvr. # Passing subnet will be deprecated in the next release. target = oslo_messaging.Target(version='1.1', namespace=constants.RPC_NAMESPACE_DVR) @property def plugin(self): if not getattr(self, '_plugin', None): self._plugin = manager.NeutronManager.get_plugin() return self._plugin def get_dvr_mac_address_list(self, context): return self.plugin.get_dvr_mac_address_list(context) def get_dvr_mac_address_by_host(self, context, **kwargs): host = kwargs.get('host') LOG.debug("DVR Agent requests mac_address for host %s", host) return self.plugin.get_dvr_mac_address_by_host(context, host) def get_ports_on_host_by_subnet(self, context, **kwargs): """Get DVR serviced ports for given host and subnet.""" host = kwargs.get('host') subnet = kwargs.get('subnet') LOG.debug("DVR Agent requests list of VM ports on host %s", host) return self.plugin.get_ports_on_host_by_subnet(context, host, subnet) def get_subnet_for_dvr(self, context, **kwargs): fixed_ips = kwargs.get('fixed_ips') subnet = kwargs.get('subnet') return self.plugin.get_subnet_for_dvr( context, subnet, fixed_ips=fixed_ips) class DVRAgentRpcApiMixin(object): """Plugin-side RPC (stub) for plugin-to-agent interaction.""" DVR_RPC_VERSION = "1.0" def _get_dvr_update_topic(self): return topics.get_topic_name(self.topic, topics.DVR, topics.UPDATE) def dvr_mac_address_update(self, context, dvr_macs): """Notify dvr mac address updates.""" if not dvr_macs: return cctxt = self.client.prepare(topic=self._get_dvr_update_topic(), version=self.DVR_RPC_VERSION, fanout=True) cctxt.cast(context, 'dvr_mac_address_update', dvr_macs=dvr_macs) class DVRAgentRpcCallbackMixin(object): """Agent-side RPC (implementation) for plugin-to-agent interaction.""" def dvr_mac_address_update(self, context, **kwargs): """Callback for dvr_mac_addresses update. :param dvr_macs: list of updated dvr_macs """ dvr_macs = kwargs.get('dvr_macs', []) LOG.debug("dvr_macs updated on remote: %s", dvr_macs) self.dvr_agent.dvr_mac_address_update(dvr_macs) neutron-8.4.0/neutron/api/rpc/callbacks/0000775000567000056710000000000013044373210021321 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/api/rpc/callbacks/resource_manager.py0000664000567000056710000001114613044372736025233 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections from oslo_log import log as logging import six from neutron.api.rpc.callbacks import exceptions as rpc_exc from neutron.api.rpc.callbacks import resources from neutron.callbacks import exceptions LOG = logging.getLogger(__name__) # TODO(QoS): split the registry/resources_rpc modules into two separate things: # one for pull and one for push APIs def _validate_resource_type(resource_type): if not resources.is_valid_resource_type(resource_type): raise exceptions.Invalid(element='resource', value=resource_type) @six.add_metaclass(abc.ABCMeta) class ResourceCallbacksManager(object): """A callback system that allows information providers in a loose manner. """ # This hook is to allow tests to get new objects for the class _singleton = True def __new__(cls, *args, **kwargs): if not cls._singleton: return super(ResourceCallbacksManager, cls).__new__(cls) if not hasattr(cls, '_instance'): cls._instance = super(ResourceCallbacksManager, cls).__new__(cls) return cls._instance @abc.abstractmethod def _add_callback(self, callback, resource_type): pass @abc.abstractmethod def _delete_callback(self, callback, resource_type): pass def register(self, callback, resource_type): """Register a callback for a resource type. :param callback: the callback. It must raise or return NeutronObject. :param resource_type: must be a valid resource type. """ LOG.debug("Registering callback for %s", resource_type) _validate_resource_type(resource_type) self._add_callback(callback, resource_type) def unregister(self, callback, resource_type): """Unregister callback from the registry. :param callback: the callback. :param resource_type: must be a valid resource type. """ LOG.debug("Unregistering callback for %s", resource_type) _validate_resource_type(resource_type) self._delete_callback(callback, resource_type) @abc.abstractmethod def clear(self): """Brings the manager to a clean state.""" def get_subscribed_types(self): return list(self._callbacks.keys()) class ProducerResourceCallbacksManager(ResourceCallbacksManager): _callbacks = dict() def _add_callback(self, callback, resource_type): if resource_type in self._callbacks: raise rpc_exc.CallbacksMaxLimitReached(resource_type=resource_type) self._callbacks[resource_type] = callback def _delete_callback(self, callback, resource_type): try: del self._callbacks[resource_type] except KeyError: raise rpc_exc.CallbackNotFound(resource_type=resource_type) def clear(self): self._callbacks = dict() def get_callback(self, resource_type): _validate_resource_type(resource_type) try: return self._callbacks[resource_type] except KeyError: raise rpc_exc.CallbackNotFound(resource_type=resource_type) class ConsumerResourceCallbacksManager(ResourceCallbacksManager): _callbacks = collections.defaultdict(set) def _add_callback(self, callback, resource_type): self._callbacks[resource_type].add(callback) def _delete_callback(self, callback, resource_type): try: self._callbacks[resource_type].remove(callback) if not self._callbacks[resource_type]: del self._callbacks[resource_type] except KeyError: raise rpc_exc.CallbackNotFound(resource_type=resource_type) def clear(self): self._callbacks = collections.defaultdict(set) def get_callbacks(self, resource_type): """Return the callback if found, None otherwise. :param resource_type: must be a valid resource type. """ _validate_resource_type(resource_type) callbacks = self._callbacks[resource_type] if not callbacks: raise rpc_exc.CallbackNotFound(resource_type=resource_type) return callbacks neutron-8.4.0/neutron/api/rpc/callbacks/__init__.py0000664000567000056710000000000013044372736023434 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/api/rpc/callbacks/producer/0000775000567000056710000000000013044373210023144 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/api/rpc/callbacks/producer/__init__.py0000664000567000056710000000000013044372736025257 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/api/rpc/callbacks/producer/registry.py0000664000567000056710000000367413044372736025414 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api.rpc.callbacks import exceptions from neutron.api.rpc.callbacks import resource_manager from neutron.objects import base # TODO(ajo): consider adding locking: it's safe for eventlet but not # for other types of threading. def _get_manager(): return resource_manager.ProducerResourceCallbacksManager() def provide(callback, resource_type): """Register a callback as a producer for the resource type. This callback will be used to produce resources of corresponding type for interested parties. """ _get_manager().register(callback, resource_type) def unprovide(callback, resource_type): """Unregister a callback for corresponding resource type.""" _get_manager().unregister(callback, resource_type) def clear(): """Clear all callbacks.""" _get_manager().clear() def pull(resource_type, resource_id, **kwargs): """Get resource object that corresponds to resource id. The function will return an object that is provided by resource producer. :returns: NeutronObject """ callback = _get_manager().get_callback(resource_type) obj = callback(resource_type, resource_id, **kwargs) if obj: if (not isinstance(obj, base.NeutronObject) or resource_type != obj.obj_name()): raise exceptions.CallbackWrongResourceType( resource_type=resource_type) return obj neutron-8.4.0/neutron/api/rpc/callbacks/resources.py0000664000567000056710000000247113044372760023722 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects.qos import policy _QOS_POLICY_CLS = policy.QosPolicy _VALID_CLS = ( _QOS_POLICY_CLS, ) _VALID_TYPES = [cls.obj_name() for cls in _VALID_CLS] # Supported types QOS_POLICY = _QOS_POLICY_CLS.obj_name() _TYPE_TO_CLS_MAP = { QOS_POLICY: _QOS_POLICY_CLS, } LOCAL_RESOURCE_VERSIONS = { resource_type: cls.VERSION for resource_type, cls in _TYPE_TO_CLS_MAP.items() } def get_resource_type(resource_cls): if not resource_cls: return None if not hasattr(resource_cls, 'obj_name'): return None return resource_cls.obj_name() def is_valid_resource_type(resource_type): return resource_type in _VALID_TYPES def get_resource_cls(resource_type): return _TYPE_TO_CLS_MAP.get(resource_type) neutron-8.4.0/neutron/api/rpc/callbacks/consumer/0000775000567000056710000000000013044373210023154 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/api/rpc/callbacks/consumer/__init__.py0000664000567000056710000000000013044372736025267 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/api/rpc/callbacks/consumer/registry.py0000664000567000056710000000245313044372760025413 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api.rpc.callbacks import resource_manager #TODO(ajo): consider adding locking to _get_manager, it's # safe for eventlet, but not for normal threading. def _get_manager(): return resource_manager.ConsumerResourceCallbacksManager() def subscribe(callback, resource_type): _get_manager().register(callback, resource_type) def unsubscribe(callback, resource_type): _get_manager().unregister(callback, resource_type) def push(resource_type, resource, event_type): """Push resource events into all registered callbacks for the type.""" callbacks = _get_manager().get_callbacks(resource_type) for callback in callbacks: callback(resource, event_type) def clear(): _get_manager().clear() neutron-8.4.0/neutron/api/rpc/callbacks/events.py0000664000567000056710000000125413044372736023215 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. CREATED = 'created' UPDATED = 'updated' DELETED = 'deleted' VALID = ( CREATED, UPDATED, DELETED ) neutron-8.4.0/neutron/api/rpc/callbacks/version_manager.py0000664000567000056710000002603213044372760025066 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import pprint import time from neutron_lib import constants from oslo_log import log as logging from oslo_utils import importutils from neutron.api.rpc.callbacks import exceptions from neutron import manager LOG = logging.getLogger(__name__) VERSIONS_TTL = 60 # This is the list of agents that started using this rpc push/pull mechanism # for versioned objects, but at that time stable/liberty, they were not # reporting versions, so we need to assume they need QosPolicy 1.0 #TODO(mangelajo): Remove this logic in Newton, since those agents will be # already reporting From N to O NON_REPORTING_AGENT_TYPES = [constants.AGENT_TYPE_OVS, constants.AGENT_TYPE_NIC_SWITCH] # NOTE(mangelajo): if we import this globally we end up with a (very # long) circular dependency, this can be fixed if we # stop importing all exposed classes in # neutron.api.rpc.callbacks.resources and provide # a decorator to expose classes def _import_resources(): return importutils.import_module('neutron.api.rpc.callbacks.resources') def _import_agents_db(): return importutils.import_module('neutron.db.agents_db') AgentConsumer = collections.namedtuple('AgentConsumer', ['agent_type', 'host']) AgentConsumer.__repr__ = lambda self: '%s@%s' % self class ResourceConsumerTracker(object): """Class passed down to collect consumer's resource versions. This class is responsible for fetching the local versions of resources, and letting the called function register every consumer's resource version. This class is passed down to the plugin get_agents_resource_versions currently, as the only expected consumers are agents so far. Later on, this class can also be used to recalculate, for each resource type, the collection of versions that are local or known by one or more consumers. """ def __init__(self): # Initialize with the local (server) versions, as we always want # to send those. Agents, as they upgrade, will need the latest version, # and there is a corner case we'd not be covering otherwise: # 1) one or several neutron-servers get disconnected from rpc (while # running) # 2) a new agent comes up, with the latest version and it reports # 2 ways: # a) via status report (which will be stored in the database) # b) via fanout call to all neutron servers, this way, all of them # get their version set updated right away without the need to # re-fetch anything from the database. # 3) the neutron-servers get back online to the rpc bus, but they # lost the fanout message. # # TODO(mangelajo) To cover this case we may need a callback from oslo # messaging to get notified about disconnections/reconnections to the # rpc bus, invalidating the consumer version cache when we receive such # callback. self._versions = self._get_local_resource_versions() self._versions_by_consumer = collections.defaultdict(dict) self._needs_recalculation = False def _get_local_resource_versions(self): resources = _import_resources() local_resource_versions = collections.defaultdict(set) for resource_type, version in ( resources.LOCAL_RESOURCE_VERSIONS.items()): local_resource_versions[resource_type].add(version) return local_resource_versions # TODO(mangelajo): add locking with _recalculate_versions if we ever # move out of green threads. def _set_version(self, consumer, resource_type, version): """Set or update a consumer resource type version.""" self._versions[resource_type].add(version) consumer_versions = self._versions_by_consumer[consumer] prev_version = consumer_versions.get(resource_type, None) if version: consumer_versions[resource_type] = version else: consumer_versions.pop(resource_type, None) if prev_version != version: # If a version got updated/changed in a consumer, we need to # recalculate the main dictionary of versions based on the # new _versions_by_consumer. # We defer the recalculation until every consumer version has # been set for all of its resource types. self._needs_recalculation = True LOG.debug("Version for resource type %(resource_type)s changed " "%(prev_version)s to %(version)s on " "consumer %(consumer)s", {'resource_type': resource_type, 'version': version, 'prev_version': prev_version, 'consumer': consumer}) def set_versions(self, consumer, versions): """Set or update an specific consumer resource types. :param consumer: should be an AgentConsumer object, with agent_type and host set. This acts as the unique ID for the agent. :param versions: should be a dictionary in the following format: {'QosPolicy': '1.1', 'SecurityGroup': '1.0', 'Port': '1.0'} """ for resource_type, resource_version in versions.items(): self._set_version(consumer, resource_type, resource_version) if versions: self._cleanup_removed_versions(consumer, versions) else: self._handle_no_set_versions(consumer) def _cleanup_removed_versions(self, consumer, versions): """Check if any version report has been removed, and cleanup.""" prev_resource_types = set( self._versions_by_consumer[consumer].keys()) cur_resource_types = set(versions.keys()) removed_resource_types = prev_resource_types - cur_resource_types for resource_type in removed_resource_types: self._set_version(consumer, resource_type, None) def _handle_no_set_versions(self, consumer): """Handle consumers reporting no versions.""" if isinstance(consumer, AgentConsumer): if consumer.agent_type in NON_REPORTING_AGENT_TYPES: resources = _import_resources() self._versions_by_consumer[consumer] = { resources.QOS_POLICY: '1.0'} self._versions[resources.QOS_POLICY].add('1.0') return if self._versions_by_consumer[consumer]: self._needs_recalculation = True self._versions_by_consumer[consumer] = {} def get_resource_versions(self, resource_type): """Fetch the versions necessary to notify all consumers.""" if self._needs_recalculation: self._recalculate_versions() self._needs_recalculation = False return copy.copy(self._versions[resource_type]) def report(self): """Output debug information about the consumer versions.""" #TODO(mangelajo): report only when pushed_versions differ from # previous reports. format = lambda versions: pprint.pformat(dict(versions), indent=4) debug_dict = {'pushed_versions': format(self._versions), 'consumer_versions': format(self._versions_by_consumer)} LOG.debug('Tracked resource versions report:\n' 'pushed versions:\n%(pushed_versions)s\n\n' 'consumer versions:\n%(consumer_versions)s\n', debug_dict) # TODO(mangelajo): Add locking if we ever move out of greenthreads. def _recalculate_versions(self): """Recalculate the _versions set. Re-fetch the local (server) versions and expand with consumers' versions. """ versions = self._get_local_resource_versions() for versions_dict in self._versions_by_consumer.values(): for res_type, res_version in versions_dict.items(): versions[res_type].add(res_version) self._versions = versions class CachedResourceConsumerTracker(object): """This class takes care of the caching logic of versions.""" def __init__(self): # This is TTL expiration time, 0 means it will be expired at start self._expires_at = 0 self._versions = ResourceConsumerTracker() def _update_consumer_versions(self): new_tracker = ResourceConsumerTracker() neutron_plugin = manager.NeutronManager.get_plugin() agents_db = _import_agents_db() # If you use RPC callbacks, your plugin needs to implement # AgentsDbMixin so that we know which resource versions your # agents consume via RPC, please note that rpc_callbacks are # only designed to work with agents currently. if isinstance(neutron_plugin, agents_db.AgentDbMixin): neutron_plugin.get_agents_resource_versions(new_tracker) else: raise exceptions.NoAgentDbMixinImplemented() self._versions = new_tracker self._versions.report() def _check_expiration(self): if time.time() > self._expires_at: self._update_consumer_versions() self._expires_at = time.time() + VERSIONS_TTL def get_resource_versions(self, resource_type): self._check_expiration() return self._versions.get_resource_versions(resource_type) def update_versions(self, consumer, resource_versions): self._versions.set_versions(consumer, resource_versions) def report(self): self._check_expiration() self._versions.report() _cached_version_tracker = None #NOTE(ajo): add locking if we ever stop using greenthreads def _get_cached_tracker(): global _cached_version_tracker if not _cached_version_tracker: _cached_version_tracker = CachedResourceConsumerTracker() return _cached_version_tracker def get_resource_versions(resource_type): """Return the set of versions expected by the consumers of a resource.""" return _get_cached_tracker().get_resource_versions(resource_type) def update_versions(consumer, resource_versions): """Update the resources' versions for a consumer id.""" _get_cached_tracker().update_versions(consumer, resource_versions) def report(): """Report resource versions in debug logs.""" _get_cached_tracker().report() neutron-8.4.0/neutron/api/rpc/callbacks/exceptions.py0000664000567000056710000000240113044372760024062 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _ from neutron.common import exceptions class CallbackWrongResourceType(exceptions.NeutronException): message = _('Callback for %(resource_type)s returned wrong resource type') class CallbackNotFound(exceptions.NeutronException): message = _('Callback for %(resource_type)s not found') class CallbacksMaxLimitReached(exceptions.NeutronException): message = _("Cannot add multiple callbacks for %(resource_type)s") class NoAgentDbMixinImplemented(exceptions.NeutronException): message = _("RPC callbacks mechanism needs the implementation of " "AgentDbMixin in the plugin, as so far it's only designed " "to work with agents") neutron-8.4.0/neutron/api/rpc/__init__.py0000664000567000056710000000000013044372736021515 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/api/rpc/agentnotifiers/0000775000567000056710000000000013044373210022423 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py0000664000567000056710000002263213044372760026604 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from neutron._i18n import _LE, _LW from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.common import utils from neutron import manager LOG = logging.getLogger(__name__) class DhcpAgentNotifyAPI(object): """API for plugin to notify DHCP agent. This class implements the client side of an rpc interface. The server side is neutron.agent.dhcp_agent.DhcpAgent. For more information about changing rpc interfaces, please see doc/source/devref/rpc_api.rst. """ # It seems dhcp agent does not support bulk operation VALID_RESOURCES = ['network', 'subnet', 'port'] VALID_METHOD_NAMES = ['network.create.end', 'network.update.end', 'network.delete.end', 'subnet.create.end', 'subnet.update.end', 'subnet.delete.end', 'port.create.end', 'port.update.end', 'port.delete.end'] def __init__(self, topic=topics.DHCP_AGENT, plugin=None): self._plugin = plugin target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) # register callbacks for router interface changes registry.subscribe(self._after_router_interface_created, resources.ROUTER_INTERFACE, events.AFTER_CREATE) registry.subscribe(self._after_router_interface_deleted, resources.ROUTER_INTERFACE, events.AFTER_DELETE) @property def plugin(self): if self._plugin is None: self._plugin = manager.NeutronManager.get_plugin() return self._plugin def _schedule_network(self, context, network, existing_agents): """Schedule the network to new agents :return: all agents associated with the network """ new_agents = self.plugin.schedule_network(context, network) or [] if new_agents: for agent in new_agents: self._cast_message( context, 'network_create_end', {'network': {'id': network['id']}}, agent['host']) elif not existing_agents: LOG.warning(_LW('Unable to schedule network %s: no agents ' 'available; will retry on subsequent port ' 'and subnet creation events.'), network['id']) return new_agents + existing_agents def _get_enabled_agents(self, context, network, agents, method, payload): """Get the list of agents who can provide services.""" if not agents: return [] network_id = network['id'] enabled_agents = agents if not cfg.CONF.enable_services_on_agents_with_admin_state_down: enabled_agents = [x for x in agents if x.admin_state_up] active_agents = [x for x in agents if x.is_active] len_enabled_agents = len(enabled_agents) len_active_agents = len(active_agents) if len_active_agents < len_enabled_agents: LOG.warning(_LW("Only %(active)d of %(total)d DHCP agents " "associated with network '%(net_id)s' " "are marked as active, so notifications " "may be sent to inactive agents."), {'active': len_active_agents, 'total': len_enabled_agents, 'net_id': network_id}) if not enabled_agents: num_ports = self.plugin.get_ports_count( context, {'network_id': [network_id]}) notification_required = ( num_ports > 0 and len(network['subnets']) >= 1) if notification_required: LOG.error(_LE("Will not send event %(method)s for network " "%(net_id)s: no agent available. Payload: " "%(payload)s"), {'method': method, 'net_id': network_id, 'payload': payload}) return enabled_agents def _is_reserved_dhcp_port(self, port): return port.get('device_id') == constants.DEVICE_ID_RESERVED_DHCP_PORT def _notify_agents(self, context, method, payload, network_id): """Notify all the agents that are hosting the network.""" # fanout is required as we do not know who is "listening" no_agents = not utils.is_extension_supported( self.plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS) fanout_required = method == 'network_delete_end' or no_agents # we do nothing on network creation because we want to give the # admin the chance to associate an agent to the network manually cast_required = method != 'network_create_end' if fanout_required: self._fanout_message(context, method, payload) elif cast_required: admin_ctx = (context if context.is_admin else context.elevated()) network = self.plugin.get_network(admin_ctx, network_id) agents = self.plugin.get_dhcp_agents_hosting_networks( context, [network_id]) # schedule the network first, if needed schedule_required = ( method == 'subnet_create_end' or method == 'port_create_end' and not self._is_reserved_dhcp_port(payload['port'])) if schedule_required: agents = self._schedule_network(admin_ctx, network, agents) if not agents: LOG.debug("Network %s is not hosted by any dhcp agent", network_id) return enabled_agents = self._get_enabled_agents( context, network, agents, method, payload) for agent in enabled_agents: self._cast_message( context, method, payload, agent.host, agent.topic) def _cast_message(self, context, method, payload, host, topic=topics.DHCP_AGENT): """Cast the payload to the dhcp agent running on the host.""" cctxt = self.client.prepare(topic=topic, server=host) cctxt.cast(context, method, payload=payload) def _fanout_message(self, context, method, payload): """Fanout the payload to all dhcp agents.""" cctxt = self.client.prepare(fanout=True) cctxt.cast(context, method, payload=payload) def network_removed_from_agent(self, context, network_id, host): self._cast_message(context, 'network_delete_end', {'network_id': network_id}, host) def network_added_to_agent(self, context, network_id, host): self._cast_message(context, 'network_create_end', {'network': {'id': network_id}}, host) def agent_updated(self, context, admin_state_up, host): self._cast_message(context, 'agent_updated', {'admin_state_up': admin_state_up}, host) def _after_router_interface_created(self, resource, event, trigger, **kwargs): self._notify_agents(kwargs['context'], 'port_create_end', {'port': kwargs['port']}, kwargs['port']['network_id']) def _after_router_interface_deleted(self, resource, event, trigger, **kwargs): self._notify_agents(kwargs['context'], 'port_delete_end', {'port_id': kwargs['port']['id']}, kwargs['port']['network_id']) def notify(self, context, data, method_name): # data is {'key' : 'value'} with only one key if method_name not in self.VALID_METHOD_NAMES: return obj_type = list(data.keys())[0] if obj_type not in self.VALID_RESOURCES: return obj_value = data[obj_type] network_id = None if obj_type == 'network' and 'id' in obj_value: network_id = obj_value['id'] elif obj_type in ['port', 'subnet'] and 'network_id' in obj_value: network_id = obj_value['network_id'] if not network_id: return method_name = method_name.replace(".", "_") if method_name.endswith("_delete_end"): if 'id' in obj_value: self._notify_agents(context, method_name, {obj_type + '_id': obj_value['id']}, network_id) else: self._notify_agents(context, method_name, data, network_id) neutron-8.4.0/neutron/api/rpc/agentnotifiers/__init__.py0000664000567000056710000000000013044372736024536 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py0000664000567000056710000001043413044372760027475 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import oslo_messaging import six from neutron.common import constants from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.common import utils from neutron.db import agentschedulers_db from neutron import manager from neutron.plugins.common import constants as service_constants LOG = logging.getLogger(__name__) class MeteringAgentNotifyAPI(object): """API for plugin to notify L3 metering agent.""" def __init__(self, topic=topics.METERING_AGENT): self.topic = topic target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) def _agent_notification(self, context, method, routers): """Notify l3 metering agents hosted by l3 agent hosts.""" adminContext = context if context.is_admin else context.elevated() plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) l3_routers = {} state = agentschedulers_db.get_admin_state_up_filter() for router in routers: l3_agents = plugin.get_l3_agents_hosting_routers( adminContext, [router['id']], admin_state_up=state, active=True) for l3_agent in l3_agents: LOG.debug('Notify metering agent at %(topic)s.%(host)s ' 'the message %(method)s', {'topic': self.topic, 'host': l3_agent.host, 'method': method}) l3_router = l3_routers.get(l3_agent.host, []) l3_router.append(router) l3_routers[l3_agent.host] = l3_router for host, routers in six.iteritems(l3_routers): cctxt = self.client.prepare(server=host) cctxt.cast(context, method, routers=routers) def _notification_fanout(self, context, method, router_id): LOG.debug('Fanout notify metering agent at %(topic)s the message ' '%(method)s on router %(router_id)s', {'topic': self.topic, 'method': method, 'router_id': router_id}) cctxt = self.client.prepare(fanout=True) cctxt.cast(context, method, router_id=router_id) def _notification(self, context, method, routers): """Notify all the agents that are hosting the routers.""" plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) if utils.is_extension_supported( plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): self._agent_notification(context, method, routers) else: cctxt = self.client.prepare(fanout=True) cctxt.cast(context, method, routers=routers) def router_deleted(self, context, router_id): self._notification_fanout(context, 'router_deleted', router_id) def routers_updated(self, context, routers): if routers: self._notification(context, 'routers_updated', routers) def update_metering_label_rules(self, context, routers): self._notification(context, 'update_metering_label_rules', routers) def add_metering_label_rule(self, context, routers): self._notification(context, 'add_metering_label_rule', routers) def remove_metering_label_rule(self, context, routers): self._notification(context, 'remove_metering_label_rule', routers) def add_metering_label(self, context, routers): self._notification(context, 'add_metering_label', routers) def remove_metering_label(self, context, routers): self._notification(context, 'remove_metering_label', routers) neutron-8.4.0/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py0000664000567000056710000001635513044372760026211 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import random from oslo_log import log as logging import oslo_messaging from neutron._i18n import _LE from neutron.common import constants from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.common import utils from neutron import manager from neutron.plugins.common import constants as service_constants LOG = logging.getLogger(__name__) class L3AgentNotifyAPI(object): """API for plugin to notify L3 agent.""" def __init__(self, topic=topics.L3_AGENT): target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) def _notification_host(self, context, method, host, use_call=False, **kwargs): """Notify the agent that is hosting the router.""" LOG.debug('Notify agent at %(host)s the message ' '%(method)s', {'host': host, 'method': method}) cctxt = self.client.prepare(server=host) rpc_method = cctxt.call if use_call else cctxt.cast rpc_method(context, method, **kwargs) def _agent_notification(self, context, method, router_ids, operation, shuffle_agents): """Notify changed routers to hosting l3 agents.""" adminContext = context if context.is_admin else context.elevated() plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) for router_id in router_ids: hosts = plugin.get_hosts_to_notify(adminContext, router_id) if shuffle_agents: random.shuffle(hosts) for host in hosts: LOG.debug('Notify agent at %(topic)s.%(host)s the message ' '%(method)s', {'topic': topics.L3_AGENT, 'host': host, 'method': method}) cctxt = self.client.prepare(topic=topics.L3_AGENT, server=host, version='1.1') cctxt.cast(context, method, routers=[router_id]) def _agent_notification_arp(self, context, method, router_id, operation, data): """Notify arp details to l3 agents hosting router.""" if not router_id: return dvr_arptable = {'router_id': router_id, 'arp_table': data} LOG.debug('Fanout dvr_arptable update: %s', dvr_arptable) cctxt = self.client.prepare(fanout=True, version='1.2') cctxt.cast(context, method, payload=dvr_arptable) def _notification(self, context, method, router_ids, operation, shuffle_agents, schedule_routers=True): """Notify all the agents that are hosting the routers.""" plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) if not plugin: LOG.error(_LE('No plugin for L3 routing registered. Cannot notify ' 'agents with the message %s'), method) return if utils.is_extension_supported( plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): adminContext = (context.is_admin and context or context.elevated()) if schedule_routers: plugin.schedule_routers(adminContext, router_ids) self._agent_notification( context, method, router_ids, operation, shuffle_agents) else: cctxt = self.client.prepare(fanout=True) cctxt.cast(context, method, routers=router_ids) def _notification_fanout(self, context, method, router_id=None, **kwargs): """Fanout the information to all L3 agents. This function will fanout the router_id or ext_net_id to the L3 Agents. """ ext_net_id = kwargs.get('ext_net_id') if router_id: kwargs['router_id'] = router_id LOG.debug('Fanout notify agent at %(topic)s the message ' '%(method)s on router %(router_id)s', {'topic': topics.L3_AGENT, 'method': method, 'router_id': router_id}) if ext_net_id: LOG.debug('Fanout notify agent at %(topic)s the message ' '%(method)s for external_network %(ext_net_id)s', {'topic': topics.L3_AGENT, 'method': method, 'ext_net_id': ext_net_id}) cctxt = self.client.prepare(fanout=True) cctxt.cast(context, method, **kwargs) def agent_updated(self, context, admin_state_up, host): self._notification_host(context, 'agent_updated', host, payload={'admin_state_up': admin_state_up}) def router_deleted(self, context, router_id): self._notification_fanout(context, 'router_deleted', router_id) def routers_updated(self, context, router_ids, operation=None, data=None, shuffle_agents=False, schedule_routers=True): if router_ids: self._notification(context, 'routers_updated', router_ids, operation, shuffle_agents, schedule_routers) def add_arp_entry(self, context, router_id, arp_table, operation=None): self._agent_notification_arp(context, 'add_arp_entry', router_id, operation, arp_table) def del_arp_entry(self, context, router_id, arp_table, operation=None): self._agent_notification_arp(context, 'del_arp_entry', router_id, operation, arp_table) def delete_fipnamespace_for_ext_net(self, context, ext_net_id): self._notification_fanout( context, 'fipnamespace_delete_on_ext_net', ext_net_id=ext_net_id) def router_removed_from_agent(self, context, router_id, host): self._notification_host(context, 'router_removed_from_agent', host, payload={'router_id': router_id}) def router_added_to_agent(self, context, router_ids, host): # need to use call here as we want to be sure agent received # notification and router will not be "lost". However using call() # itself is not a guarantee, calling code should handle exceptions and # retry self._notification_host(context, 'router_added_to_agent', host, use_call=True, payload=router_ids) def routers_updated_on_host(self, context, router_ids, host): self._notification_host(context, 'routers_updated', host, routers=router_ids) neutron-8.4.0/neutron/api/rpc/agentnotifiers/bgp_dr_rpc_agent_api.py0000664000567000056710000001113213044372760027114 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import oslo_messaging from neutron.common import rpc as n_rpc from neutron.services.bgp.common import constants as bgp_consts class BgpDrAgentNotifyApi(object): """API for plugin to notify BGP DrAgent. This class implements the client side of an rpc interface. The server side is neutron.services.bgp_speaker.agent.bgp_dragent.BgpDrAgent. For more information about rpc interfaces, please see doc/source/devref/rpc_api.rst. """ def __init__(self, topic=bgp_consts.BGP_DRAGENT): target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) self.topic = topic def bgp_routes_advertisement(self, context, bgp_speaker_id, routes, host): """Tell BgpDrAgent to begin advertising the given route. Invoked on FIP association, adding router port to a tenant network, and new DVR port-host bindings, and subnet creation(?). """ self._notification_host_cast(context, 'bgp_routes_advertisement_end', {'advertise_routes': {'speaker_id': bgp_speaker_id, 'routes': routes}}, host) def bgp_routes_withdrawal(self, context, bgp_speaker_id, routes, host): """Tell BgpDrAgent to stop advertising the given route. Invoked on FIP disassociation, removal of a router port on a network, and removal of DVR port-host binding, and subnet delete(?). """ self._notification_host_cast(context, 'bgp_routes_withdrawal_end', {'withdraw_routes': {'speaker_id': bgp_speaker_id, 'routes': routes}}, host) def bgp_peer_disassociated(self, context, bgp_speaker_id, bgp_peer_ip, host): """Tell BgpDrAgent about a new BGP Peer association. This effectively tells the BgpDrAgent to stop a peering session. """ self._notification_host_cast(context, 'bgp_peer_disassociation_end', {'bgp_peer': {'speaker_id': bgp_speaker_id, 'peer_ip': bgp_peer_ip}}, host) def bgp_peer_associated(self, context, bgp_speaker_id, bgp_peer_id, host): """Tell BgpDrAgent about a BGP Peer disassociation. This effectively tells the bgp_dragent to open a peering session. """ self._notification_host_cast(context, 'bgp_peer_association_end', {'bgp_peer': {'speaker_id': bgp_speaker_id, 'peer_id': bgp_peer_id}}, host) def bgp_speaker_created(self, context, bgp_speaker_id, host): """Tell BgpDrAgent about the creation of a BGP Speaker. Because a BGP Speaker can be created with BgpPeer binding in place, we need to inform the BgpDrAgent of a new BGP Speaker in case a peering session needs to opened immediately. """ self._notification_host_cast(context, 'bgp_speaker_create_end', {'bgp_speaker': {'id': bgp_speaker_id}}, host) def bgp_speaker_removed(self, context, bgp_speaker_id, host): """Tell BgpDrAgent about the removal of a BGP Speaker. Because a BGP Speaker can be removed with BGP Peer binding in place, we need to inform the BgpDrAgent of the removal of a BGP Speaker in case peering sessions need to be stopped. """ self._notification_host_cast(context, 'bgp_speaker_remove_end', {'bgp_speaker': {'id': bgp_speaker_id}}, host) def _notification_host_cast(self, context, method, payload, host): """Send payload to BgpDrAgent in the cast mode""" cctxt = self.client.prepare(topic=self.topic, server=host) cctxt.cast(context, method, payload=payload) def _notification_host_call(self, context, method, payload, host): """Send payload to BgpDrAgent in the call mode""" cctxt = self.client.prepare(topic=self.topic, server=host) cctxt.call(context, method, payload=payload) neutron-8.4.0/neutron/api/views/0000775000567000056710000000000013044373210017753 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/api/views/__init__.py0000664000567000056710000000000013044372736022066 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/api/views/versions.py0000664000567000056710000000331013044372760022203 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os def get_view_builder(req): base_url = req.application_url return ViewBuilder(base_url) class ViewBuilder(object): def __init__(self, base_url): """Object initialization. :param base_url: url of the root wsgi application """ self.base_url = base_url def build(self, version_data): """Generic method used to generate a version entity.""" version = { "id": version_data["id"], "status": version_data["status"], "links": self._build_links(version_data), } return version def _build_links(self, version_data): """Generate a container of links that refer to the provided version.""" href = self.generate_href(version_data["id"]) links = [ { "rel": "self", "href": href, }, ] return links def generate_href(self, version_number): """Create an url that refers to a specific version_number.""" return os.path.join(self.base_url, version_number) neutron-8.4.0/neutron/db/0000775000567000056710000000000013044373210016432 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/db/flavors_db.py0000664000567000056710000003132513044372760021142 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.orm import exc as sa_exc from neutron.api.v2 import attributes as attr from neutron.db import common_db_mixin from neutron.db import model_base from neutron.db import servicetype_db as sdb from neutron.extensions import flavors as ext_flavors LOG = logging.getLogger(__name__) class Flavor(model_base.BASEV2, model_base.HasId): name = sa.Column(sa.String(attr.NAME_MAX_LEN)) description = sa.Column(sa.String(attr.LONG_DESCRIPTION_MAX_LEN)) enabled = sa.Column(sa.Boolean, nullable=False, default=True, server_default=sa.sql.true()) # Make it True for multi-type flavors service_type = sa.Column(sa.String(36), nullable=True) service_profiles = orm.relationship("FlavorServiceProfileBinding", cascade="all, delete-orphan") class ServiceProfile(model_base.BASEV2, model_base.HasId): description = sa.Column(sa.String(attr.LONG_DESCRIPTION_MAX_LEN)) driver = sa.Column(sa.String(1024), nullable=False) enabled = sa.Column(sa.Boolean, nullable=False, default=True, server_default=sa.sql.true()) metainfo = sa.Column(sa.String(4096)) flavors = orm.relationship("FlavorServiceProfileBinding") class FlavorServiceProfileBinding(model_base.BASEV2): flavor_id = sa.Column(sa.String(36), sa.ForeignKey("flavors.id", ondelete="CASCADE"), nullable=False, primary_key=True) flavor = orm.relationship(Flavor) service_profile_id = sa.Column(sa.String(36), sa.ForeignKey("serviceprofiles.id", ondelete="CASCADE"), nullable=False, primary_key=True) service_profile = orm.relationship(ServiceProfile) class FlavorsDbMixin(common_db_mixin.CommonDbMixin): """Class to support flavors and service profiles.""" def _get_flavor(self, context, flavor_id): try: return self._get_by_id(context, Flavor, flavor_id) except sa_exc.NoResultFound: raise ext_flavors.FlavorNotFound(flavor_id=flavor_id) def _get_service_profile(self, context, sp_id): try: return self._get_by_id(context, ServiceProfile, sp_id) except sa_exc.NoResultFound: raise ext_flavors.ServiceProfileNotFound(sp_id=sp_id) def _make_flavor_dict(self, flavor_db, fields=None): res = {'id': flavor_db['id'], 'name': flavor_db['name'], 'description': flavor_db['description'], 'service_type': flavor_db['service_type'], 'enabled': flavor_db['enabled'], 'service_profiles': []} if flavor_db.service_profiles: res['service_profiles'] = [sp['service_profile_id'] for sp in flavor_db.service_profiles] return self._fields(res, fields) def _make_service_profile_dict(self, sp_db, fields=None): res = {'id': sp_db['id'], 'description': sp_db['description'], 'driver': sp_db['driver'], 'enabled': sp_db['enabled'], 'metainfo': sp_db['metainfo']} if sp_db.flavors: res['flavors'] = [fl['flavor_id'] for fl in sp_db.flavors] return self._fields(res, fields) def _ensure_flavor_not_in_use(self, context, flavor_id): """Checks that flavor is not associated with service instance.""" # Future TODO(enikanorov): check that there is no binding to # instances. Shall address in future upon getting the right # flavor supported driver pass def _ensure_service_profile_not_in_use(self, context, sp_id): """Ensures no current bindings to flavors exist.""" fl = (context.session.query(FlavorServiceProfileBinding). filter_by(service_profile_id=sp_id).first()) if fl: raise ext_flavors.ServiceProfileInUse(sp_id=sp_id) def _validate_driver(self, context, driver): """Confirms a non-empty driver is a valid provider.""" service_type_manager = sdb.ServiceTypeManager.get_instance() providers = service_type_manager.get_service_providers( context, filters={'driver': driver}) if not providers: raise ext_flavors.ServiceProfileDriverNotFound(driver=driver) def create_flavor(self, context, flavor): fl = flavor['flavor'] with context.session.begin(subtransactions=True): fl_db = Flavor(id=uuidutils.generate_uuid(), name=fl['name'], description=fl['description'], service_type=fl['service_type'], enabled=fl['enabled']) context.session.add(fl_db) return self._make_flavor_dict(fl_db) def update_flavor(self, context, flavor_id, flavor): fl = flavor['flavor'] with context.session.begin(subtransactions=True): self._ensure_flavor_not_in_use(context, flavor_id) fl_db = self._get_flavor(context, flavor_id) fl_db.update(fl) return self._make_flavor_dict(fl_db) def get_flavor(self, context, flavor_id, fields=None): fl = self._get_flavor(context, flavor_id) return self._make_flavor_dict(fl, fields) def delete_flavor(self, context, flavor_id): with context.session.begin(subtransactions=True): self._ensure_flavor_not_in_use(context, flavor_id) fl_db = self._get_flavor(context, flavor_id) context.session.delete(fl_db) def get_flavors(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): return self._get_collection(context, Flavor, self._make_flavor_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker, page_reverse=page_reverse) def create_flavor_service_profile(self, context, service_profile, flavor_id): sp = service_profile['service_profile'] with context.session.begin(subtransactions=True): bind_qry = context.session.query(FlavorServiceProfileBinding) binding = bind_qry.filter_by(service_profile_id=sp['id'], flavor_id=flavor_id).first() if binding: raise ext_flavors.FlavorServiceProfileBindingExists( sp_id=sp['id'], fl_id=flavor_id) binding = FlavorServiceProfileBinding( service_profile_id=sp['id'], flavor_id=flavor_id) context.session.add(binding) fl_db = self._get_flavor(context, flavor_id) return self._make_flavor_dict(fl_db) def delete_flavor_service_profile(self, context, service_profile_id, flavor_id): with context.session.begin(subtransactions=True): binding = (context.session.query(FlavorServiceProfileBinding). filter_by(service_profile_id=service_profile_id, flavor_id=flavor_id).first()) if not binding: raise ext_flavors.FlavorServiceProfileBindingNotFound( sp_id=service_profile_id, fl_id=flavor_id) context.session.delete(binding) def get_flavor_service_profile(self, context, service_profile_id, flavor_id, fields=None): with context.session.begin(subtransactions=True): binding = (context.session.query(FlavorServiceProfileBinding). filter_by(service_profile_id=service_profile_id, flavor_id=flavor_id).first()) if not binding: raise ext_flavors.FlavorServiceProfileBindingNotFound( sp_id=service_profile_id, fl_id=flavor_id) res = {'service_profile_id': service_profile_id, 'flavor_id': flavor_id} return self._fields(res, fields) def create_service_profile(self, context, service_profile): sp = service_profile['service_profile'] if sp['driver']: self._validate_driver(context, sp['driver']) else: if not sp['metainfo']: raise ext_flavors.ServiceProfileEmpty() with context.session.begin(subtransactions=True): sp_db = ServiceProfile(id=uuidutils.generate_uuid(), description=sp['description'], driver=sp['driver'], enabled=sp['enabled'], metainfo=sp['metainfo']) context.session.add(sp_db) return self._make_service_profile_dict(sp_db) def update_service_profile(self, context, service_profile_id, service_profile): sp = service_profile['service_profile'] if sp.get('driver'): self._validate_driver(context, sp['driver']) with context.session.begin(subtransactions=True): self._ensure_service_profile_not_in_use(context, service_profile_id) sp_db = self._get_service_profile(context, service_profile_id) sp_db.update(sp) return self._make_service_profile_dict(sp_db) def get_service_profile(self, context, sp_id, fields=None): sp_db = self._get_service_profile(context, sp_id) return self._make_service_profile_dict(sp_db, fields) def delete_service_profile(self, context, sp_id): with context.session.begin(subtransactions=True): self._ensure_service_profile_not_in_use(context, sp_id) sp_db = self._get_service_profile(context, sp_id) context.session.delete(sp_db) def get_service_profiles(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): return self._get_collection(context, ServiceProfile, self._make_service_profile_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker, page_reverse=page_reverse) def get_flavor_next_provider(self, context, flavor_id, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """From flavor, choose service profile and find provider for driver.""" with context.session.begin(subtransactions=True): bind_qry = context.session.query(FlavorServiceProfileBinding) binding = bind_qry.filter_by(flavor_id=flavor_id).first() if not binding: raise ext_flavors.FlavorServiceProfileBindingNotFound( sp_id='', fl_id=flavor_id) # Get the service profile from the first binding # TODO(jwarendt) Should become a scheduling framework instead sp_db = self._get_service_profile(context, binding['service_profile_id']) if not sp_db.enabled: raise ext_flavors.ServiceProfileDisabled() LOG.debug("Found driver %s.", sp_db.driver) service_type_manager = sdb.ServiceTypeManager.get_instance() providers = service_type_manager.get_service_providers( context, filters={'driver': sp_db.driver}) if not providers: raise ext_flavors.ServiceProfileDriverNotFound(driver=sp_db.driver) LOG.debug("Found providers %s.", providers) res = {'driver': sp_db.driver, 'provider': providers[0].get('name')} return [self._fields(res, fields)] neutron-8.4.0/neutron/db/extradhcpopt_db.py0000664000567000056710000001530613044372760022174 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from sqlalchemy import orm from neutron.api.v2 import attributes from neutron.db import db_base_plugin_v2 from neutron.db import model_base from neutron.db import models_v2 from neutron.extensions import extra_dhcp_opt as edo_ext class ExtraDhcpOpt(model_base.BASEV2, model_base.HasId): """Represent a generic concept of extra options associated to a port. Each port may have none to many dhcp opts associated to it that can define specifically different or extra options to DHCP clients. These will be written to the /opts files, and each option's tag will be referenced in the /host file. """ port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), nullable=False) opt_name = sa.Column(sa.String(64), nullable=False) opt_value = sa.Column(sa.String(255), nullable=False) ip_version = sa.Column(sa.Integer, server_default='4', nullable=False) __table_args__ = (sa.UniqueConstraint( 'port_id', 'opt_name', 'ip_version', name='uniq_extradhcpopts0portid0optname0ipversion'), model_base.BASEV2.__table_args__,) # Add a relationship to the Port model in order to instruct SQLAlchemy to # eagerly load extra_dhcp_opts bindings ports = orm.relationship( models_v2.Port, backref=orm.backref("dhcp_opts", lazy='joined', cascade='delete')) class ExtraDhcpOptMixin(object): """Mixin class to add extra options to the DHCP opts file and associate them to a port. """ def _is_valid_opt_value(self, opt_name, opt_value): # If the dhcp opt is blank-able, it shouldn't be saved to the DB in # case that the value is None if opt_name in edo_ext.VALID_BLANK_EXTRA_DHCP_OPTS: return opt_value is not None # Otherwise, it shouldn't be saved to the DB in case that the value # is None or empty return bool(opt_value) def _process_port_create_extra_dhcp_opts(self, context, port, extra_dhcp_opts): if not extra_dhcp_opts: return port with context.session.begin(subtransactions=True): for dopt in extra_dhcp_opts: if self._is_valid_opt_value(dopt['opt_name'], dopt['opt_value']): ip_version = dopt.get('ip_version', 4) db = ExtraDhcpOpt( port_id=port['id'], opt_name=dopt['opt_name'], opt_value=dopt['opt_value'], ip_version=ip_version) context.session.add(db) return self._extend_port_extra_dhcp_opts_dict(context, port) def _extend_port_extra_dhcp_opts_dict(self, context, port): port[edo_ext.EXTRADHCPOPTS] = self._get_port_extra_dhcp_opts_binding( context, port['id']) def _get_port_extra_dhcp_opts_binding(self, context, port_id): query = self._model_query(context, ExtraDhcpOpt) binding = query.filter(ExtraDhcpOpt.port_id == port_id) return [{'opt_name': r.opt_name, 'opt_value': r.opt_value, 'ip_version': r.ip_version} for r in binding] def _update_extra_dhcp_opts_on_port(self, context, id, port, updated_port=None): # It is not necessary to update in a transaction, because # its called from within one from ovs_neutron_plugin. dopts = port['port'].get(edo_ext.EXTRADHCPOPTS) if dopts: opt_db = self._model_query( context, ExtraDhcpOpt).filter_by(port_id=id).all() # if there are currently no dhcp_options associated to # this port, Then just insert the new ones and be done. with context.session.begin(subtransactions=True): for upd_rec in dopts: for opt in opt_db: if (opt['opt_name'] == upd_rec['opt_name'] and opt['ip_version'] == upd_rec.get( 'ip_version', 4)): # to handle deleting of a opt from the port. if upd_rec['opt_value'] is None: context.session.delete(opt) else: if (self._is_valid_opt_value( opt['opt_name'], upd_rec['opt_value']) and opt['opt_value'] != upd_rec['opt_value']): opt.update( {'opt_value': upd_rec['opt_value']}) break else: if self._is_valid_opt_value( upd_rec['opt_name'], upd_rec['opt_value']): ip_version = upd_rec.get('ip_version', 4) db = ExtraDhcpOpt( port_id=id, opt_name=upd_rec['opt_name'], opt_value=upd_rec['opt_value'], ip_version=ip_version) context.session.add(db) if updated_port: edolist = self._get_port_extra_dhcp_opts_binding(context, id) updated_port[edo_ext.EXTRADHCPOPTS] = edolist return bool(dopts) def _extend_port_dict_extra_dhcp_opt(self, res, port): res[edo_ext.EXTRADHCPOPTS] = [{'opt_name': dho.opt_name, 'opt_value': dho.opt_value, 'ip_version': dho.ip_version} for dho in port.dhcp_opts] return res db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attributes.PORTS, ['_extend_port_dict_extra_dhcp_opt']) neutron-8.4.0/neutron/db/model_base.py0000664000567000056710000001233713044372760021115 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_db.sqlalchemy import models from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.ext import declarative from sqlalchemy import orm from neutron.api.v2 import attributes as attr class HasTenant(object): """Tenant mixin, add to subclasses that have a tenant.""" # NOTE(jkoelker) tenant_id is just a free form string ;( tenant_id = sa.Column(sa.String(attr.TENANT_ID_MAX_LEN), index=True) class HasId(object): """id mixin, add to subclasses that have an id.""" id = sa.Column(sa.String(36), primary_key=True, default=uuidutils.generate_uuid) class HasStatusDescription(object): """Status with description mixin.""" status = sa.Column(sa.String(16), nullable=False) status_description = sa.Column(sa.String(attr.DESCRIPTION_MAX_LEN)) class NeutronBase(models.ModelBase): """Base class for Neutron Models.""" __table_args__ = {'mysql_engine': 'InnoDB'} def __iter__(self): self._i = iter(orm.object_mapper(self).columns) return self def next(self): n = next(self._i).name return n, getattr(self, n) __next__ = next def __repr__(self): """sqlalchemy based automatic __repr__ method.""" items = ['%s=%r' % (col.name, getattr(self, col.name)) for col in self.__table__.columns] return "<%s.%s[object at %x] {%s}>" % (self.__class__.__module__, self.__class__.__name__, id(self), ', '.join(items)) class NeutronBaseV2(NeutronBase): @declarative.declared_attr def __tablename__(cls): # NOTE(jkoelker) use the pluralized name of the class as the table return cls.__name__.lower() + 's' BASEV2 = declarative.declarative_base(cls=NeutronBaseV2) class StandardAttribute(BASEV2, models.TimestampMixin): """Common table to associate all Neutron API resources. By having Neutron objects related to this table, we can associate new tables that apply to many Neutron objects (e.g. timestamps, rbac entries) to this table to avoid schema duplication while maintaining referential integrity. NOTE(kevinbenton): This table should not have more columns added to it unless we are absolutely certain the new column will have a value for every single type of Neutron resource. Otherwise this table will be filled with NULL entries for combinations that don't make sense. Additionally, by keeping this table small we can ensure that performance isn't adversely impacted for queries on objects. """ # sqlite doesn't support auto increment on big integers so we use big int # for everything but sqlite id = sa.Column(sa.BigInteger().with_variant(sa.Integer(), 'sqlite'), primary_key=True, autoincrement=True) # NOTE(kevinbenton): this column is redundant information, but it allows # operators/devs to look at the contents of this table and know which table # the corresponding object is in. # 255 was selected as a max just because it's the varchar ceiling in mysql # before a 2-byte prefix is required. We shouldn't get anywhere near this # limit with our table names... resource_type = sa.Column(sa.String(255), nullable=False) description = sa.Column(sa.String(attr.DESCRIPTION_MAX_LEN)) class HasStandardAttributes(object): @declarative.declared_attr def standard_attr_id(cls): return sa.Column( sa.BigInteger().with_variant(sa.Integer(), 'sqlite'), sa.ForeignKey(StandardAttribute.id, ondelete="CASCADE"), unique=True, nullable=False ) # NOTE(kevinbenton): we have to disable the following pylint check because # it thinks we are overriding this method in the __init__ method. #pylint: disable=method-hidden @declarative.declared_attr def standard_attr(cls): return orm.relationship(StandardAttribute, lazy='joined', cascade='all, delete-orphan', single_parent=True, uselist=False) def __init__(self, description='', *args, **kwargs): super(HasStandardAttributes, self).__init__(*args, **kwargs) # here we automatically create the related standard attribute object self.standard_attr = StandardAttribute( resource_type=self.__tablename__, description=description) @declarative.declared_attr def description(cls): return association_proxy('standard_attr', 'description') neutron-8.4.0/neutron/db/l3_dvr_ha_scheduler_db.py0000664000567000056710000000376713044372760023376 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import neutron.db.l3_dvrscheduler_db as l3agent_dvr_sch_db import neutron.db.l3_hascheduler_db as l3_ha_sch_db from oslo_log import log as logging LOG = logging.getLogger(__name__) class L3_DVR_HA_scheduler_db_mixin(l3agent_dvr_sch_db.L3_DVRsch_db_mixin, l3_ha_sch_db.L3_HA_scheduler_db_mixin): def get_dvr_routers_to_remove(self, context, port_id): """Returns info about which routers should be removed In case dvr serviceable port was deleted we need to check if any dvr routers should be removed from l3 agent on port's host """ remove_router_info = super(L3_DVR_HA_scheduler_db_mixin, self).get_dvr_routers_to_remove(context, port_id) # Process the router information which was returned to make # sure we don't delete routers which have dvrhs snat bindings. processed_remove_router_info = [] for router_info in remove_router_info: router_id = router_info['router_id'] agent_id = router_info['agent_id'] if not self._check_router_agent_ha_binding( context, router_id, agent_id): processed_remove_router_info.append(router_info) return processed_remove_router_info neutron-8.4.0/neutron/db/external_net_db.py0000664000567000056710000003107113044372760022154 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.orm import exc from sqlalchemy import sql from sqlalchemy.sql import expression as expr from neutron.api.v2 import attributes from neutron.callbacks import events from neutron.callbacks import exceptions as c_exc from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants as l3_constants from neutron.common import exceptions as n_exc from neutron.db import db_base_plugin_v2 from neutron.db import l3_db from neutron.db import model_base from neutron.db import models_v2 from neutron.db import rbac_db_models as rbac_db from neutron.extensions import external_net from neutron.extensions import rbac as rbac_ext from neutron import manager from neutron.plugins.common import constants as service_constants DEVICE_OWNER_ROUTER_GW = l3_constants.DEVICE_OWNER_ROUTER_GW class ExternalNetwork(model_base.BASEV2): network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), primary_key=True) # introduced by auto-allocated-topology extension is_default = sa.Column(sa.Boolean(), nullable=False, server_default=sql.false()) # Add a relationship to the Network model in order to instruct # SQLAlchemy to eagerly load this association network = orm.relationship( models_v2.Network, backref=orm.backref("external", lazy='joined', uselist=False, cascade='delete')) class External_net_db_mixin(object): """Mixin class to add external network methods to db_base_plugin_v2.""" def _network_model_hook(self, context, original_model, query): query = query.outerjoin(ExternalNetwork, (original_model.id == ExternalNetwork.network_id)) return query def _network_filter_hook(self, context, original_model, conditions): if conditions is not None and not hasattr(conditions, '__iter__'): conditions = (conditions, ) # Apply the external network filter only in non-admin and non-advsvc # context if self.model_query_scope(context, original_model): # the table will already be joined to the rbac entries for the # shared check so we don't need to worry about ensuring that rbac_model = original_model.rbac_entries.property.mapper.class_ tenant_allowed = ( (rbac_model.action == 'access_as_external') & (rbac_model.target_tenant == context.tenant_id) | (rbac_model.target_tenant == '*')) conditions = expr.or_(tenant_allowed, *conditions) return conditions def _network_result_filter_hook(self, query, filters): vals = filters and filters.get(external_net.EXTERNAL, []) if not vals: return query if vals[0]: return query.filter((ExternalNetwork.network_id != expr.null())) return query.filter((ExternalNetwork.network_id == expr.null())) # TODO(salvatore-orlando): Perform this operation without explicitly # referring to db_base_plugin_v2, as plugins that do not extend from it # might exist in the future db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( models_v2.Network, "external_net", '_network_model_hook', '_network_filter_hook', '_network_result_filter_hook') def _network_is_external(self, context, net_id): try: context.session.query(ExternalNetwork).filter_by( network_id=net_id).one() return True except exc.NoResultFound: return False def _extend_network_dict_l3(self, network_res, network_db): # Comparing with None for converting uuid into bool network_res[external_net.EXTERNAL] = network_db.external is not None return network_res # Register dict extend functions for networks db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attributes.NETWORKS, ['_extend_network_dict_l3']) def _process_l3_create(self, context, net_data, req_data): external = req_data.get(external_net.EXTERNAL) external_set = attributes.is_attr_set(external) if not external_set: return # TODO(armax): these notifications should switch to *_COMMIT # when the event becomes available, as this block is expected # to be called within a plugin's session if external: try: registry.notify( resources.EXTERNAL_NETWORK, events.BEFORE_CREATE, self, context=context, request=req_data, network=net_data) except c_exc.CallbackFailure as e: # raise the underlying exception raise e.errors[0].error context.session.add(ExternalNetwork(network_id=net_data['id'])) context.session.add(rbac_db.NetworkRBAC( object_id=net_data['id'], action='access_as_external', target_tenant='*', tenant_id=net_data['tenant_id'])) registry.notify( resources.EXTERNAL_NETWORK, events.AFTER_CREATE, self, context=context, request=req_data, network=net_data) net_data[external_net.EXTERNAL] = external def _process_l3_update(self, context, net_data, req_data, allow_all=True): try: registry.notify( resources.EXTERNAL_NETWORK, events.BEFORE_UPDATE, self, context=context, request=req_data, network=net_data) except c_exc.CallbackFailure as e: # raise the underlying exception raise e.errors[0].error new_value = req_data.get(external_net.EXTERNAL) net_id = net_data['id'] if not attributes.is_attr_set(new_value): return if net_data.get(external_net.EXTERNAL) == new_value: return if new_value: context.session.add(ExternalNetwork(network_id=net_id)) net_data[external_net.EXTERNAL] = True if allow_all: context.session.add(rbac_db.NetworkRBAC( object_id=net_id, action='access_as_external', target_tenant='*', tenant_id=net_data['tenant_id'])) else: # must make sure we do not have any external gateway ports # (and thus, possible floating IPs) on this network before # allow it to be update to external=False port = context.session.query(models_v2.Port).filter_by( device_owner=DEVICE_OWNER_ROUTER_GW, network_id=net_data['id']).first() if port: raise external_net.ExternalNetworkInUse(net_id=net_id) context.session.query(ExternalNetwork).filter_by( network_id=net_id).delete() context.session.query(rbac_db.NetworkRBAC).filter_by( object_id=net_id, action='access_as_external').delete() net_data[external_net.EXTERNAL] = False def _process_l3_delete(self, context, network_id): l3plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) if l3plugin: l3plugin.delete_disassociated_floatingips(context, network_id) def get_external_network_id(self, context): nets = self.get_networks(context, {external_net.EXTERNAL: [True]}) if len(nets) > 1: raise n_exc.TooManyExternalNetworks() else: return nets[0]['id'] if nets else None def _process_ext_policy_create(self, resource, event, trigger, context, object_type, policy, **kwargs): if (object_type != 'network' or policy['action'] != 'access_as_external'): return net = self.get_network(context, policy['object_id']) if not context.is_admin and net['tenant_id'] != context.tenant_id: msg = _("Only admins can manipulate policies on networks they " "do not own.") raise n_exc.InvalidInput(error_message=msg) if not self._network_is_external(context, policy['object_id']): # we automatically convert the network into an external network self._process_l3_update(context, net, {external_net.EXTERNAL: True}, allow_all=False) def _validate_ext_not_in_use_by_tenant(self, resource, event, trigger, context, object_type, policy, **kwargs): if (object_type != 'network' or policy['action'] != 'access_as_external'): return new_tenant = None if event == events.BEFORE_UPDATE: new_tenant = kwargs['policy_update']['target_tenant'] if new_tenant == policy['target_tenant']: # nothing to validate if the tenant didn't change return ports = context.session.query(models_v2.Port.id).filter_by( device_owner=DEVICE_OWNER_ROUTER_GW, network_id=policy['object_id']) router = context.session.query(l3_db.Router).filter( l3_db.Router.gw_port_id.in_(ports)) rbac = rbac_db.NetworkRBAC if policy['target_tenant'] != '*': router = router.filter( l3_db.Router.tenant_id == policy['target_tenant']) # if there is a wildcard entry we can safely proceed without the # router lookup because they will have access either way if context.session.query(rbac_db.NetworkRBAC).filter( rbac.object_id == policy['object_id'], rbac.action == 'access_as_external', rbac.target_tenant == '*').count(): return else: # deleting the wildcard is okay as long as the tenants with # attached routers have their own entries and the network is # not the default external network. is_default = context.session.query(ExternalNetwork).filter_by( network_id=policy['object_id'], is_default=True).count() if is_default: msg = _("Default external networks must be shared to " "everyone.") raise rbac_ext.RbacPolicyInUse(object_id=policy['object_id'], details=msg) tenants_with_entries = ( context.session.query(rbac.target_tenant). filter(rbac.object_id == policy['object_id'], rbac.action == 'access_as_external', rbac.target_tenant != '*')) router = router.filter( ~l3_db.Router.tenant_id.in_(tenants_with_entries)) if new_tenant: # if this is an update we also need to ignore any router # interfaces that belong to the new target. router = router.filter(l3_db.Router.tenant_id != new_tenant) if router.count(): msg = _("There are routers attached to this network that " "depend on this policy for access.") raise rbac_ext.RbacPolicyInUse(object_id=policy['object_id'], details=msg) def _register_external_net_rbac_hooks(self): registry.subscribe(self._process_ext_policy_create, 'rbac-policy', events.BEFORE_CREATE) for e in (events.BEFORE_UPDATE, events.BEFORE_DELETE): registry.subscribe(self._validate_ext_not_in_use_by_tenant, 'rbac-policy', e) def __new__(cls, *args, **kwargs): new = super(External_net_db_mixin, cls).__new__(cls, *args, **kwargs) new._register_external_net_rbac_hooks() return new neutron-8.4.0/neutron/db/sqlalchemyutils.py0000664000567000056710000001050513044372760022241 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from six import moves import sqlalchemy from sqlalchemy.orm import properties from neutron._i18n import _ from neutron.common import exceptions as n_exc def paginate_query(query, model, limit, sorts, marker_obj=None): """Returns a query with sorting / pagination criteria added. Pagination works by requiring a unique sort key, specified by sorts. (If sort keys is not unique, then we risk looping through values.) We use the last row in the previous page as the 'marker' for pagination. So we must return values that follow the passed marker in the order. With a single-valued sort key, this would be easy: sort_key > X. With a compound-values sort key, (k1, k2, k3) we must do this to repeat the lexicographical ordering: (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) The reason of didn't use OFFSET clause was it don't scale, please refer discussion at https://lists.launchpad.net/openstack/msg02547.html We also have to cope with different sort directions. Typically, the id of the last row is used as the client-facing pagination marker, then the actual marker object must be fetched from the db and passed in to us as marker. :param query: the query object to which we should add paging/sorting :param model: the ORM model class :param limit: maximum number of items to return :param sorts: array of attributes and direction by which results should be sorted :param marker: the last item of the previous page; we returns the next results after this value. :rtype: sqlalchemy.orm.query.Query :return: The query with sorting/pagination added. """ if not sorts: return query # A primary key must be specified in sort keys assert not (limit and len(set(dict(sorts).keys()) & set(model.__table__.primary_key.columns.keys())) == 0) # Add sorting for sort_key, sort_direction in sorts: sort_dir_func = sqlalchemy.asc if sort_direction else sqlalchemy.desc try: sort_key_attr = getattr(model, sort_key) except AttributeError: # Extension attribute doesn't support for sorting. Because it # existed in attr_info, it will be caught here msg = _("%s is invalid attribute for sort_key") % sort_key raise n_exc.BadRequest(resource=model.__tablename__, msg=msg) if isinstance(sort_key_attr.property, properties.RelationshipProperty): msg = _("The attribute '%(attr)s' is reference to other " "resource, can't used by sort " "'%(resource)s'") % {'attr': sort_key, 'resource': model.__tablename__} raise n_exc.BadRequest(resource=model.__tablename__, msg=msg) query = query.order_by(sort_dir_func(sort_key_attr)) # Add pagination if marker_obj: marker_values = [getattr(marker_obj, sort[0]) for sort in sorts] # Build up an array of sort criteria as in the docstring criteria_list = [] for i, sort in enumerate(sorts): crit_attrs = [(getattr(model, sorts[j][0]) == marker_values[j]) for j in moves.range(i)] model_attr = getattr(model, sort[0]) if sort[1]: crit_attrs.append((model_attr > marker_values[i])) else: crit_attrs.append((model_attr < marker_values[i])) criteria = sqlalchemy.sql.and_(*crit_attrs) criteria_list.append(criteria) f = sqlalchemy.sql.or_(*criteria_list) query = query.filter(f) if limit: query = query.limit(limit) return query neutron-8.4.0/neutron/db/models_v2.py0000664000567000056710000002770213044372760020717 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy import sql from neutron.api.v2 import attributes as attr from neutron.common import constants from neutron.db import agentschedulers_db as agt from neutron.db import model_base from neutron.db import rbac_db_models # NOTE(kevinbenton): these are here for external projects that expect them # to be found in this module. HasTenant = model_base.HasTenant HasId = model_base.HasId HasStatusDescription = model_base.HasStatusDescription class IPAvailabilityRange(model_base.BASEV2): """Internal representation of available IPs for Neutron subnets. Allocation - first entry from the range will be allocated. If the first entry is equal to the last entry then this row will be deleted. Recycling ips involves reading the IPAllocationPool and IPAllocation tables and inserting ranges representing available ips. This happens after the final allocation is pulled from this table and a new ip allocation is requested. Any contiguous ranges of available ips will be inserted as a single range. """ allocation_pool_id = sa.Column(sa.String(36), sa.ForeignKey('ipallocationpools.id', ondelete="CASCADE"), nullable=False, primary_key=True) first_ip = sa.Column(sa.String(64), nullable=False, primary_key=True) last_ip = sa.Column(sa.String(64), nullable=False, primary_key=True) __table_args__ = ( sa.UniqueConstraint( first_ip, allocation_pool_id, name='uniq_ipavailabilityranges0first_ip0allocation_pool_id'), sa.UniqueConstraint( last_ip, allocation_pool_id, name='uniq_ipavailabilityranges0last_ip0allocation_pool_id'), model_base.BASEV2.__table_args__ ) def __repr__(self): return "%s - %s" % (self.first_ip, self.last_ip) class IPAllocationPool(model_base.BASEV2, HasId): """Representation of an allocation pool in a Neutron subnet.""" subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', ondelete="CASCADE"), nullable=True) first_ip = sa.Column(sa.String(64), nullable=False) last_ip = sa.Column(sa.String(64), nullable=False) available_ranges = orm.relationship(IPAvailabilityRange, backref='ipallocationpool', lazy="select", cascade='all, delete-orphan') def __repr__(self): return "%s - %s" % (self.first_ip, self.last_ip) class IPAllocation(model_base.BASEV2): """Internal representation of allocated IP addresses in a Neutron subnet. """ port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), nullable=True) ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True) subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', ondelete="CASCADE"), nullable=False, primary_key=True) network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id", ondelete="CASCADE"), nullable=False, primary_key=True) class Route(object): """mixin of a route.""" destination = sa.Column(sa.String(64), nullable=False, primary_key=True) nexthop = sa.Column(sa.String(64), nullable=False, primary_key=True) class SubnetRoute(model_base.BASEV2, Route): subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', ondelete="CASCADE"), primary_key=True) class Port(model_base.HasStandardAttributes, model_base.BASEV2, HasId, HasTenant): """Represents a port on a Neutron v2 network.""" name = sa.Column(sa.String(attr.NAME_MAX_LEN)) network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"), nullable=False) fixed_ips = orm.relationship(IPAllocation, backref='port', lazy='joined', cascade='all, delete-orphan') mac_address = sa.Column(sa.String(32), nullable=False) admin_state_up = sa.Column(sa.Boolean(), nullable=False) status = sa.Column(sa.String(16), nullable=False) device_id = sa.Column(sa.String(attr.DEVICE_ID_MAX_LEN), nullable=False) device_owner = sa.Column(sa.String(attr.DEVICE_OWNER_MAX_LEN), nullable=False) dns_name = sa.Column(sa.String(255), nullable=True) __table_args__ = ( sa.Index( 'ix_ports_network_id_mac_address', 'network_id', 'mac_address'), sa.Index( 'ix_ports_network_id_device_owner', 'network_id', 'device_owner'), sa.UniqueConstraint( network_id, mac_address, name='uniq_ports0network_id0mac_address'), model_base.BASEV2.__table_args__ ) def __init__(self, id=None, tenant_id=None, name=None, network_id=None, mac_address=None, admin_state_up=None, status=None, device_id=None, device_owner=None, fixed_ips=None, dns_name=None, **kwargs): super(Port, self).__init__(**kwargs) self.id = id self.tenant_id = tenant_id self.name = name self.network_id = network_id self.mac_address = mac_address self.admin_state_up = admin_state_up self.device_owner = device_owner self.device_id = device_id self.dns_name = dns_name # Since this is a relationship only set it if one is passed in. if fixed_ips: self.fixed_ips = fixed_ips # NOTE(arosen): status must be set last as an event is triggered on! self.status = status class DNSNameServer(model_base.BASEV2): """Internal representation of a DNS nameserver.""" address = sa.Column(sa.String(128), nullable=False, primary_key=True) subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', ondelete="CASCADE"), primary_key=True) order = sa.Column(sa.Integer, nullable=False, server_default='0') class Subnet(model_base.HasStandardAttributes, model_base.BASEV2, HasId, HasTenant): """Represents a neutron subnet. When a subnet is created the first and last entries will be created. These are used for the IP allocation. """ name = sa.Column(sa.String(attr.NAME_MAX_LEN)) network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id')) subnetpool_id = sa.Column(sa.String(36), index=True) # NOTE: Explicitly specify join conditions for the relationship because # subnetpool_id in subnet might be 'prefix_delegation' when the IPv6 Prefix # Delegation is enabled subnetpool = orm.relationship( 'SubnetPool', lazy='joined', foreign_keys='Subnet.subnetpool_id', primaryjoin='Subnet.subnetpool_id==SubnetPool.id') ip_version = sa.Column(sa.Integer, nullable=False) cidr = sa.Column(sa.String(64), nullable=False) gateway_ip = sa.Column(sa.String(64)) allocation_pools = orm.relationship(IPAllocationPool, backref='subnet', lazy="joined", cascade='delete') enable_dhcp = sa.Column(sa.Boolean()) dns_nameservers = orm.relationship(DNSNameServer, backref='subnet', cascade='all, delete, delete-orphan', order_by=DNSNameServer.order, lazy='joined') routes = orm.relationship(SubnetRoute, backref='subnet', cascade='all, delete, delete-orphan', lazy='joined') ipv6_ra_mode = sa.Column(sa.Enum(constants.IPV6_SLAAC, constants.DHCPV6_STATEFUL, constants.DHCPV6_STATELESS, name='ipv6_ra_modes'), nullable=True) ipv6_address_mode = sa.Column(sa.Enum(constants.IPV6_SLAAC, constants.DHCPV6_STATEFUL, constants.DHCPV6_STATELESS, name='ipv6_address_modes'), nullable=True) # subnets don't have their own rbac_entries, they just inherit from # the network rbac entries rbac_entries = orm.relationship( rbac_db_models.NetworkRBAC, lazy='subquery', uselist=True, foreign_keys='Subnet.network_id', primaryjoin='Subnet.network_id==NetworkRBAC.object_id') class SubnetPoolPrefix(model_base.BASEV2): """Represents a neutron subnet pool prefix """ __tablename__ = 'subnetpoolprefixes' cidr = sa.Column(sa.String(64), nullable=False, primary_key=True) subnetpool_id = sa.Column(sa.String(36), sa.ForeignKey('subnetpools.id', ondelete='CASCADE'), nullable=False, primary_key=True) class SubnetPool(model_base.HasStandardAttributes, model_base.BASEV2, HasId, HasTenant): """Represents a neutron subnet pool. """ name = sa.Column(sa.String(attr.NAME_MAX_LEN)) ip_version = sa.Column(sa.Integer, nullable=False) default_prefixlen = sa.Column(sa.Integer, nullable=False) min_prefixlen = sa.Column(sa.Integer, nullable=False) max_prefixlen = sa.Column(sa.Integer, nullable=False) shared = sa.Column(sa.Boolean, nullable=False) is_default = sa.Column(sa.Boolean, nullable=False, server_default=sql.false()) default_quota = sa.Column(sa.Integer, nullable=True) hash = sa.Column(sa.String(36), nullable=False, server_default='') address_scope_id = sa.Column(sa.String(36), nullable=True) prefixes = orm.relationship(SubnetPoolPrefix, backref='subnetpools', cascade='all, delete, delete-orphan', lazy='joined') class Network(model_base.HasStandardAttributes, model_base.BASEV2, HasId, HasTenant): """Represents a v2 neutron network.""" name = sa.Column(sa.String(attr.NAME_MAX_LEN)) ports = orm.relationship(Port, backref='networks') subnets = orm.relationship( Subnet, backref=orm.backref('networks', lazy='subquery'), lazy="subquery") status = sa.Column(sa.String(16)) admin_state_up = sa.Column(sa.Boolean) mtu = sa.Column(sa.Integer, nullable=True) vlan_transparent = sa.Column(sa.Boolean, nullable=True) rbac_entries = orm.relationship(rbac_db_models.NetworkRBAC, backref='network', lazy='subquery', cascade='all, delete, delete-orphan') availability_zone_hints = sa.Column(sa.String(255)) dhcp_agents = orm.relationship( 'Agent', lazy='joined', viewonly=True, secondary=agt.NetworkDhcpAgentBinding.__table__) neutron-8.4.0/neutron/db/securitygroups_rpc_base.py0000664000567000056710000005232213044372760023766 0ustar jenkinsjenkins00000000000000# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_log import log as logging from sqlalchemy.orm import exc from neutron._i18n import _, _LW from neutron.common import constants as n_const from neutron.common import ipv6_utils as ipv6 from neutron.common import utils from neutron.db import allowedaddresspairs_db as addr_pair from neutron.db import models_v2 from neutron.db import securitygroups_db as sg_db from neutron.extensions import securitygroup as ext_sg LOG = logging.getLogger(__name__) DIRECTION_IP_PREFIX = {'ingress': 'source_ip_prefix', 'egress': 'dest_ip_prefix'} DHCP_RULE_PORT = {4: (67, 68, n_const.IPv4), 6: (547, 546, n_const.IPv6)} class SecurityGroupServerRpcMixin(sg_db.SecurityGroupDbMixin): """Mixin class to add agent-based security group implementation.""" def get_port_from_device(self, context, device): """Get port dict from device name on an agent. Subclass must provide this method or get_ports_from_devices. :param device: device name which identifies a port on the agent side. What is specified in "device" depends on a plugin agent implementation. For example, it is a port ID in OVS agent and netdev name in Linux Bridge agent. :return: port dict returned by DB plugin get_port(). In addition, it must contain the following fields in the port dict returned. - device - security_groups - security_group_rules, - security_group_source_groups - fixed_ips """ raise NotImplementedError(_("%s must implement get_port_from_device " "or get_ports_from_devices.") % self.__class__.__name__) def get_ports_from_devices(self, context, devices): """Bulk method of get_port_from_device. Subclasses may override this to provide better performance for DB queries, backend calls, etc. """ return [self.get_port_from_device(context, device) for device in devices] def create_security_group_rule(self, context, security_group_rule): rule = super(SecurityGroupServerRpcMixin, self).create_security_group_rule(context, security_group_rule) sgids = [rule['security_group_id']] self.notifier.security_groups_rule_updated(context, sgids) return rule def create_security_group_rule_bulk(self, context, security_group_rules): rules = super(SecurityGroupServerRpcMixin, self).create_security_group_rule_bulk_native( context, security_group_rules) sgids = set([r['security_group_id'] for r in rules]) self.notifier.security_groups_rule_updated(context, list(sgids)) return rules def delete_security_group_rule(self, context, sgrid): rule = self.get_security_group_rule(context, sgrid) super(SecurityGroupServerRpcMixin, self).delete_security_group_rule(context, sgrid) self.notifier.security_groups_rule_updated(context, [rule['security_group_id']]) def check_and_notify_security_group_member_changed( self, context, original_port, updated_port): sg_change = not utils.compare_elements( original_port.get(ext_sg.SECURITYGROUPS), updated_port.get(ext_sg.SECURITYGROUPS)) if sg_change: self.notify_security_groups_member_updated_bulk( context, [original_port, updated_port]) elif original_port['fixed_ips'] != updated_port['fixed_ips']: self.notify_security_groups_member_updated(context, updated_port) def is_security_group_member_updated(self, context, original_port, updated_port): """Check security group member updated or not. This method returns a flag which indicates request notification is required and does not perform notification itself. It is because another changes for the port may require notification. """ need_notify = False if (original_port['fixed_ips'] != updated_port['fixed_ips'] or original_port['mac_address'] != updated_port['mac_address'] or not utils.compare_elements( original_port.get(ext_sg.SECURITYGROUPS), updated_port.get(ext_sg.SECURITYGROUPS))): need_notify = True return need_notify def notify_security_groups_member_updated_bulk(self, context, ports): """Notify update event of security group members for ports. The agent setups the iptables rule to allow ingress packet from the dhcp server (as a part of provider rules), so we need to notify an update of dhcp server ip address to the plugin agent. security_groups_provider_updated() just notifies that an event occurs and the plugin agent fetches the update provider rule in the other RPC call (security_group_rules_for_devices). """ sg_provider_updated_networks = set() sec_groups = set() for port in ports: if port['device_owner'] == n_const.DEVICE_OWNER_DHCP: sg_provider_updated_networks.add( port['network_id']) # For IPv6, provider rule need to be updated in case router # interface is created or updated after VM port is created. # NOTE (Swami): ROUTER_INTERFACE_OWNERS check is required # since it includes the legacy router interface device owners # and DVR router interface device owners. elif port['device_owner'] in n_const.ROUTER_INTERFACE_OWNERS: if any(netaddr.IPAddress(fixed_ip['ip_address']).version == 6 for fixed_ip in port['fixed_ips']): sg_provider_updated_networks.add( port['network_id']) else: sec_groups |= set(port.get(ext_sg.SECURITYGROUPS)) if sg_provider_updated_networks: ports_query = context.session.query(models_v2.Port.id).filter( models_v2.Port.network_id.in_( sg_provider_updated_networks)).all() ports_to_update = [p.id for p in ports_query] self.notifier.security_groups_provider_updated( context, ports_to_update) if sec_groups: self.notifier.security_groups_member_updated( context, list(sec_groups)) def notify_security_groups_member_updated(self, context, port): self.notify_security_groups_member_updated_bulk(context, [port]) def security_group_info_for_ports(self, context, ports): sg_info = {'devices': ports, 'security_groups': {}, 'sg_member_ips': {}} rules_in_db = self._select_rules_for_ports(context, ports) remote_security_group_info = {} for (port_id, rule_in_db) in rules_in_db: remote_gid = rule_in_db.get('remote_group_id') security_group_id = rule_in_db.get('security_group_id') ethertype = rule_in_db['ethertype'] if ('security_group_source_groups' not in sg_info['devices'][port_id]): sg_info['devices'][port_id][ 'security_group_source_groups'] = [] if remote_gid: if (remote_gid not in sg_info['devices'][port_id][ 'security_group_source_groups']): sg_info['devices'][port_id][ 'security_group_source_groups'].append(remote_gid) if remote_gid not in remote_security_group_info: remote_security_group_info[remote_gid] = {} if ethertype not in remote_security_group_info[remote_gid]: # this set will be serialized into a list by rpc code remote_security_group_info[remote_gid][ethertype] = set() direction = rule_in_db['direction'] rule_dict = { 'direction': direction, 'ethertype': ethertype} for key in ('protocol', 'port_range_min', 'port_range_max', 'remote_ip_prefix', 'remote_group_id'): if rule_in_db.get(key) is not None: if key == 'remote_ip_prefix': direction_ip_prefix = DIRECTION_IP_PREFIX[direction] rule_dict[direction_ip_prefix] = rule_in_db[key] continue rule_dict[key] = rule_in_db[key] if security_group_id not in sg_info['security_groups']: sg_info['security_groups'][security_group_id] = [] if rule_dict not in sg_info['security_groups'][security_group_id]: sg_info['security_groups'][security_group_id].append( rule_dict) # Update the security groups info if they don't have any rules sg_ids = self._select_sg_ids_for_ports(context, ports) for (sg_id, ) in sg_ids: if sg_id not in sg_info['security_groups']: sg_info['security_groups'][sg_id] = [] sg_info['sg_member_ips'] = remote_security_group_info # the provider rules do not belong to any security group, so these # rules still reside in sg_info['devices'] [port_id] self._apply_provider_rule(context, sg_info['devices']) return self._get_security_group_member_ips(context, sg_info) def _get_security_group_member_ips(self, context, sg_info): ips = self._select_ips_for_remote_group( context, sg_info['sg_member_ips'].keys()) for sg_id, member_ips in ips.items(): for ip in member_ips: ethertype = 'IPv%d' % netaddr.IPNetwork(ip).version if ethertype in sg_info['sg_member_ips'][sg_id]: sg_info['sg_member_ips'][sg_id][ethertype].add(ip) return sg_info def _select_sg_ids_for_ports(self, context, ports): if not ports: return [] sg_binding_port = sg_db.SecurityGroupPortBinding.port_id sg_binding_sgid = sg_db.SecurityGroupPortBinding.security_group_id query = context.session.query(sg_binding_sgid) query = query.filter(sg_binding_port.in_(ports.keys())) return query.all() def _select_rules_for_ports(self, context, ports): if not ports: return [] sg_binding_port = sg_db.SecurityGroupPortBinding.port_id sg_binding_sgid = sg_db.SecurityGroupPortBinding.security_group_id sgr_sgid = sg_db.SecurityGroupRule.security_group_id query = context.session.query(sg_binding_port, sg_db.SecurityGroupRule) query = query.join(sg_db.SecurityGroupRule, sgr_sgid == sg_binding_sgid) query = query.filter(sg_binding_port.in_(ports.keys())) return query.all() def _select_ips_for_remote_group(self, context, remote_group_ids): ips_by_group = {} if not remote_group_ids: return ips_by_group for remote_group_id in remote_group_ids: ips_by_group[remote_group_id] = set() ip_port = models_v2.IPAllocation.port_id sg_binding_port = sg_db.SecurityGroupPortBinding.port_id sg_binding_sgid = sg_db.SecurityGroupPortBinding.security_group_id # Join the security group binding table directly to the IP allocation # table instead of via the Port table skip an unnecessary intermediary query = context.session.query(sg_binding_sgid, models_v2.IPAllocation.ip_address, addr_pair.AllowedAddressPair.ip_address) query = query.join(models_v2.IPAllocation, ip_port == sg_binding_port) # Outerjoin because address pairs may be null and we still want the # IP for the port. query = query.outerjoin( addr_pair.AllowedAddressPair, sg_binding_port == addr_pair.AllowedAddressPair.port_id) query = query.filter(sg_binding_sgid.in_(remote_group_ids)) # Each allowed address pair IP record for a port beyond the 1st # will have a duplicate regular IP in the query response since # the relationship is 1-to-many. Dedup with a set for security_group_id, ip_address, allowed_addr_ip in query: ips_by_group[security_group_id].add(ip_address) if allowed_addr_ip: ips_by_group[security_group_id].add(allowed_addr_ip) return ips_by_group def _select_remote_group_ids(self, ports): remote_group_ids = [] for port in ports.values(): for rule in port.get('security_group_rules'): remote_group_id = rule.get('remote_group_id') if remote_group_id: remote_group_ids.append(remote_group_id) return remote_group_ids def _select_network_ids(self, ports): return set((port['network_id'] for port in ports.values())) def _select_dhcp_ips_for_network_ids(self, context, network_ids): if not network_ids: return {} query = context.session.query(models_v2.Port.mac_address, models_v2.Port.network_id, models_v2.IPAllocation.ip_address) query = query.join(models_v2.IPAllocation) query = query.filter(models_v2.Port.network_id.in_(network_ids)) owner = n_const.DEVICE_OWNER_DHCP query = query.filter(models_v2.Port.device_owner == owner) ips = {} for network_id in network_ids: ips[network_id] = [] for mac_address, network_id, ip in query: if (netaddr.IPAddress(ip).version == 6 and not netaddr.IPAddress(ip).is_link_local()): ip = str(ipv6.get_ipv6_addr_by_EUI64(n_const.IPV6_LLA_PREFIX, mac_address)) if ip not in ips[network_id]: ips[network_id].append(ip) return ips def _select_ra_ips_for_network_ids(self, context, network_ids): """Select IP addresses to allow sending router advertisement from. If the OpenStack managed radvd process sends an RA, get link local address of gateway and allow RA from this Link Local address. The gateway port link local address will only be obtained when router is created before VM instance is booted and subnet is attached to router. If OpenStack doesn't send RA, allow RA from gateway IP. Currently, the gateway IP needs to be link local to be able to send RA to VM. """ if not network_ids: return {} ips = {} for network_id in network_ids: ips[network_id] = set([]) query = context.session.query(models_v2.Subnet) subnets = query.filter(models_v2.Subnet.network_id.in_(network_ids)) for subnet in subnets: gateway_ip = subnet['gateway_ip'] if subnet['ip_version'] != 6 or not gateway_ip: continue if not netaddr.IPAddress(gateway_ip).is_link_local(): if subnet['ipv6_ra_mode']: gateway_ip = self._get_lla_gateway_ip_for_subnet(context, subnet) else: # TODO(xuhanp):Figure out how to allow gateway IP from # existing device to be global address and figure out the # link local address by other method. continue if gateway_ip: ips[subnet['network_id']].add(gateway_ip) return ips def _get_lla_gateway_ip_for_subnet(self, context, subnet): query = context.session.query(models_v2.Port.mac_address) query = query.join(models_v2.IPAllocation) query = query.filter( models_v2.IPAllocation.subnet_id == subnet['id']) query = query.filter( models_v2.IPAllocation.ip_address == subnet['gateway_ip']) query = query.filter( models_v2.Port.device_owner.in_(n_const.ROUTER_INTERFACE_OWNERS)) try: mac_address = query.one()[0] except (exc.NoResultFound, exc.MultipleResultsFound): LOG.warning(_LW('No valid gateway port on subnet %s is ' 'found for IPv6 RA'), subnet['id']) return lla_ip = str(ipv6.get_ipv6_addr_by_EUI64( n_const.IPV6_LLA_PREFIX, mac_address)) return lla_ip def _convert_remote_group_id_to_ip_prefix(self, context, ports): remote_group_ids = self._select_remote_group_ids(ports) ips = self._select_ips_for_remote_group(context, remote_group_ids) for port in ports.values(): updated_rule = [] for rule in port.get('security_group_rules'): remote_group_id = rule.get('remote_group_id') direction = rule.get('direction') direction_ip_prefix = DIRECTION_IP_PREFIX[direction] if not remote_group_id: updated_rule.append(rule) continue port['security_group_source_groups'].append(remote_group_id) base_rule = rule for ip in ips[remote_group_id]: if ip in port.get('fixed_ips', []): continue ip_rule = base_rule.copy() version = netaddr.IPNetwork(ip).version ethertype = 'IPv%s' % version if base_rule['ethertype'] != ethertype: continue ip_rule[direction_ip_prefix] = str( netaddr.IPNetwork(ip).cidr) updated_rule.append(ip_rule) port['security_group_rules'] = updated_rule return ports def _add_ingress_dhcp_rule(self, port, ips): dhcp_ips = ips.get(port['network_id']) for dhcp_ip in dhcp_ips: source_port, dest_port, ethertype = DHCP_RULE_PORT[ netaddr.IPAddress(dhcp_ip).version] dhcp_rule = {'direction': 'ingress', 'ethertype': ethertype, 'protocol': 'udp', 'port_range_min': dest_port, 'port_range_max': dest_port, 'source_port_range_min': source_port, 'source_port_range_max': source_port, 'source_ip_prefix': dhcp_ip} port['security_group_rules'].append(dhcp_rule) def _add_ingress_ra_rule(self, port, ips): ra_ips = ips.get(port['network_id']) for ra_ip in ra_ips: ra_rule = {'direction': 'ingress', 'ethertype': n_const.IPv6, 'protocol': n_const.PROTO_NAME_IPV6_ICMP, 'source_ip_prefix': ra_ip, 'source_port_range_min': n_const.ICMPV6_TYPE_RA} port['security_group_rules'].append(ra_rule) def _apply_provider_rule(self, context, ports): network_ids = self._select_network_ids(ports) ips_dhcp = self._select_dhcp_ips_for_network_ids(context, network_ids) ips_ra = self._select_ra_ips_for_network_ids(context, network_ids) for port in ports.values(): self._add_ingress_ra_rule(port, ips_ra) self._add_ingress_dhcp_rule(port, ips_dhcp) def security_group_rules_for_ports(self, context, ports): rules_in_db = self._select_rules_for_ports(context, ports) for (port_id, rule_in_db) in rules_in_db: port = ports[port_id] direction = rule_in_db['direction'] rule_dict = { 'security_group_id': rule_in_db['security_group_id'], 'direction': direction, 'ethertype': rule_in_db['ethertype'], } for key in ('protocol', 'port_range_min', 'port_range_max', 'remote_ip_prefix', 'remote_group_id'): if rule_in_db.get(key) is not None: if key == 'remote_ip_prefix': direction_ip_prefix = DIRECTION_IP_PREFIX[direction] rule_dict[direction_ip_prefix] = rule_in_db[key] continue rule_dict[key] = rule_in_db[key] port['security_group_rules'].append(rule_dict) self._apply_provider_rule(context, ports) return self._convert_remote_group_id_to_ip_prefix(context, ports) neutron-8.4.0/neutron/db/ipam_non_pluggable_backend.py0000664000567000056710000005454313044372760024321 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_db import exception as db_exc from oslo_log import log as logging from sqlalchemy import and_ from sqlalchemy import orm from sqlalchemy.orm import exc from neutron._i18n import _ from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import ipv6_utils from neutron.db import ipam_backend_mixin from neutron.db import models_v2 from neutron.ipam import requests as ipam_req from neutron.ipam import subnet_alloc LOG = logging.getLogger(__name__) class IpamNonPluggableBackend(ipam_backend_mixin.IpamBackendMixin): @staticmethod def _generate_ip(context, subnets): try: return IpamNonPluggableBackend._try_generate_ip(context, subnets) except n_exc.IpAddressGenerationFailure: IpamNonPluggableBackend._rebuild_availability_ranges(context, subnets) return IpamNonPluggableBackend._try_generate_ip(context, subnets) @staticmethod def _try_generate_ip(context, subnets): """Generate an IP address. The IP address will be generated from one of the subnets defined on the network. """ range_qry = context.session.query( models_v2.IPAvailabilityRange).join( models_v2.IPAllocationPool).with_lockmode('update') for subnet in subnets: ip_range = range_qry.filter_by(subnet_id=subnet['id']).first() if not ip_range: LOG.debug("All IPs from subnet %(subnet_id)s (%(cidr)s) " "allocated", {'subnet_id': subnet['id'], 'cidr': subnet['cidr']}) continue ip_address = ip_range['first_ip'] if ip_range['first_ip'] == ip_range['last_ip']: # No more free indices on subnet => delete LOG.debug("No more free IP's in slice. Deleting " "allocation pool.") context.session.delete(ip_range) else: # increment the first free new_first_ip = str(netaddr.IPAddress(ip_address) + 1) ip_range['first_ip'] = new_first_ip LOG.debug("Allocated IP - %(ip_address)s from %(first_ip)s " "to %(last_ip)s", {'ip_address': ip_address, 'first_ip': ip_range['first_ip'], 'last_ip': ip_range['last_ip']}) return {'ip_address': ip_address, 'subnet_id': subnet['id']} raise n_exc.IpAddressGenerationFailure(net_id=subnets[0]['network_id']) @staticmethod def _rebuild_availability_ranges(context, subnets): """Rebuild availability ranges. This method is called only when there's no more IP available or by _update_subnet_allocation_pools. Calling _update_subnet_allocation_pools before calling this function deletes the IPAllocationPools associated with the subnet that is updating, which will result in deleting the IPAvailabilityRange too. """ ip_qry = context.session.query( models_v2.IPAllocation).with_lockmode('update') # PostgreSQL does not support select...for update with an outer join. # No join is needed here. pool_qry = context.session.query( models_v2.IPAllocationPool).options( orm.noload('available_ranges')).with_lockmode('update') for subnet in sorted(subnets): LOG.debug("Rebuilding availability ranges for subnet %s", subnet) # Create a set of all currently allocated addresses ip_qry_results = ip_qry.filter_by(subnet_id=subnet['id']) allocations = netaddr.IPSet([netaddr.IPAddress(i['ip_address']) for i in ip_qry_results]) for pool in pool_qry.filter_by(subnet_id=subnet['id']): # Create a set of all addresses in the pool poolset = netaddr.IPSet(netaddr.IPRange(pool['first_ip'], pool['last_ip'])) # Use set difference to find free addresses in the pool available = poolset - allocations # Generator compacts an ip set into contiguous ranges def ipset_to_ranges(ipset): first, last = None, None for cidr in ipset.iter_cidrs(): if last and last + 1 != cidr.first: yield netaddr.IPRange(first, last) first = None first, last = first if first else cidr.first, cidr.last if first: yield netaddr.IPRange(first, last) # Write the ranges to the db for ip_range in ipset_to_ranges(available): available_range = models_v2.IPAvailabilityRange( allocation_pool_id=pool['id'], first_ip=str(netaddr.IPAddress(ip_range.first)), last_ip=str(netaddr.IPAddress(ip_range.last))) context.session.add(available_range) @staticmethod def _allocate_specific_ip(context, subnet_id, ip_address): """Allocate a specific IP address on the subnet.""" ip = int(netaddr.IPAddress(ip_address)) range_qry = context.session.query( models_v2.IPAvailabilityRange).join( models_v2.IPAllocationPool).with_lockmode('update') results = range_qry.filter_by(subnet_id=subnet_id) for ip_range in results: first = int(netaddr.IPAddress(ip_range['first_ip'])) last = int(netaddr.IPAddress(ip_range['last_ip'])) if first <= ip <= last: if first == last: context.session.delete(ip_range) return elif first == ip: new_first_ip = str(netaddr.IPAddress(ip_address) + 1) ip_range['first_ip'] = new_first_ip return elif last == ip: new_last_ip = str(netaddr.IPAddress(ip_address) - 1) ip_range['last_ip'] = new_last_ip return else: # Adjust the original range to end before ip_address old_last_ip = ip_range['last_ip'] new_last_ip = str(netaddr.IPAddress(ip_address) - 1) ip_range['last_ip'] = new_last_ip # Create a new second range for after ip_address new_first_ip = str(netaddr.IPAddress(ip_address) + 1) new_ip_range = models_v2.IPAvailabilityRange( allocation_pool_id=ip_range['allocation_pool_id'], first_ip=new_first_ip, last_ip=old_last_ip) context.session.add(new_ip_range) return @staticmethod def _check_unique_ip(context, network_id, subnet_id, ip_address): """Validate that the IP address on the subnet is not in use.""" ip_qry = context.session.query(models_v2.IPAllocation) try: ip_qry.filter_by(network_id=network_id, subnet_id=subnet_id, ip_address=ip_address).one() except exc.NoResultFound: return True return False def save_allocation_pools(self, context, subnet, allocation_pools): for pool in allocation_pools: first_ip = str(netaddr.IPAddress(pool.first, pool.version)) last_ip = str(netaddr.IPAddress(pool.last, pool.version)) ip_pool = models_v2.IPAllocationPool(subnet=subnet, first_ip=first_ip, last_ip=last_ip) context.session.add(ip_pool) ip_range = models_v2.IPAvailabilityRange( ipallocationpool=ip_pool, first_ip=first_ip, last_ip=last_ip) context.session.add(ip_range) def allocate_ips_for_port_and_store(self, context, port, port_id): network_id = port['port']['network_id'] ips = self._allocate_ips_for_port(context, port) if ips: for ip in ips: ip_address = ip['ip_address'] subnet_id = ip['subnet_id'] self._store_ip_allocation(context, ip_address, network_id, subnet_id, port_id) return ips def update_port_with_ips(self, context, db_port, new_port, new_mac): changes = self.Changes(add=[], original=[], remove=[]) # Check if the IPs need to be updated network_id = db_port['network_id'] if 'fixed_ips' in new_port: original = self._make_port_dict(db_port, process_extensions=False) changes = self._update_ips_for_port( context, network_id, original["fixed_ips"], new_port['fixed_ips'], original['mac_address'], db_port['device_owner']) # Update ips if necessary for ip in changes.add: IpamNonPluggableBackend._store_ip_allocation( context, ip['ip_address'], network_id, ip['subnet_id'], db_port.id) self._update_db_port(context, db_port, new_port, network_id, new_mac) return changes def _test_fixed_ips_for_port(self, context, network_id, fixed_ips, device_owner): """Test fixed IPs for port. Check that configured subnets are valid prior to allocating any IPs. Include the subnet_id in the result if only an IP address is configured. :raises: InvalidInput, IpAddressInUse, InvalidIpForNetwork, InvalidIpForSubnet """ fixed_ip_set = [] for fixed in fixed_ips: subnet = self._get_subnet_for_fixed_ip(context, fixed, network_id) is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet) if ('ip_address' in fixed and subnet['cidr'] != constants.PROVISIONAL_IPV6_PD_PREFIX): # Ensure that the IP's are unique if not IpamNonPluggableBackend._check_unique_ip( context, network_id, subnet['id'], fixed['ip_address']): raise n_exc.IpAddressInUse(net_id=network_id, ip_address=fixed['ip_address']) if (is_auto_addr_subnet and device_owner not in constants.ROUTER_INTERFACE_OWNERS): msg = (_("IPv6 address %(address)s can not be directly " "assigned to a port on subnet %(id)s since the " "subnet is configured for automatic addresses") % {'address': fixed['ip_address'], 'id': subnet['id']}) raise n_exc.InvalidInput(error_message=msg) fixed_ip_set.append({'subnet_id': subnet['id'], 'ip_address': fixed['ip_address']}) else: # A scan for auto-address subnets on the network is done # separately so that all such subnets (not just those # listed explicitly here by subnet ID) are associated # with the port. if (device_owner in constants.ROUTER_INTERFACE_OWNERS_SNAT or not is_auto_addr_subnet): fixed_ip_set.append({'subnet_id': subnet['id']}) self._validate_max_ips_per_port(fixed_ip_set, device_owner) return fixed_ip_set def _allocate_fixed_ips(self, context, fixed_ips, mac_address): """Allocate IP addresses according to the configured fixed_ips.""" ips = [] # we need to start with entries that asked for a specific IP in case # those IPs happen to be next in the line for allocation for ones that # didn't ask for a specific IP fixed_ips.sort(key=lambda x: 'ip_address' not in x) for fixed in fixed_ips: subnet = self._get_subnet(context, fixed['subnet_id']) is_auto_addr = ipv6_utils.is_auto_address_subnet(subnet) if 'ip_address' in fixed: if not is_auto_addr: # Remove the IP address from the allocation pool IpamNonPluggableBackend._allocate_specific_ip( context, fixed['subnet_id'], fixed['ip_address']) ips.append({'ip_address': fixed['ip_address'], 'subnet_id': fixed['subnet_id']}) # Only subnet ID is specified => need to generate IP # from subnet else: if is_auto_addr: ip_address = self._calculate_ipv6_eui64_addr(context, subnet, mac_address) ips.append({'ip_address': ip_address.format(), 'subnet_id': subnet['id']}) else: subnets = [subnet] # IP address allocation result = self._generate_ip(context, subnets) ips.append({'ip_address': result['ip_address'], 'subnet_id': result['subnet_id']}) return ips def _update_ips_for_port(self, context, network_id, original_ips, new_ips, mac_address, device_owner): """Add or remove IPs from the port.""" added = [] changes = self._get_changed_ips_for_port(context, original_ips, new_ips, device_owner) # Check if the IP's to add are OK to_add = self._test_fixed_ips_for_port(context, network_id, changes.add, device_owner) for ip in changes.remove: LOG.debug("Port update. Hold %s", ip) IpamNonPluggableBackend._delete_ip_allocation(context, network_id, ip['subnet_id'], ip['ip_address']) if to_add: LOG.debug("Port update. Adding %s", to_add) added = self._allocate_fixed_ips(context, to_add, mac_address) return self.Changes(add=added, original=changes.original, remove=changes.remove) def _allocate_ips_for_port(self, context, port): """Allocate IP addresses for the port. If port['fixed_ips'] is set to 'ATTR_NOT_SPECIFIED', allocate IP addresses for the port. If port['fixed_ips'] contains an IP address or a subnet_id then allocate an IP address accordingly. """ p = port['port'] ips = [] v6_stateless = [] net_id_filter = {'network_id': [p['network_id']]} subnets = self._get_subnets(context, filters=net_id_filter) is_router_port = ( p['device_owner'] in constants.ROUTER_INTERFACE_OWNERS_SNAT) fixed_configured = p['fixed_ips'] is not attributes.ATTR_NOT_SPECIFIED if fixed_configured: configured_ips = self._test_fixed_ips_for_port(context, p["network_id"], p['fixed_ips'], p['device_owner']) ips = self._allocate_fixed_ips(context, configured_ips, p['mac_address']) # For ports that are not router ports, implicitly include all # auto-address subnets for address association. if not is_router_port: v6_stateless += [subnet for subnet in subnets if ipv6_utils.is_auto_address_subnet(subnet)] else: # Split into v4, v6 stateless and v6 stateful subnets v4 = [] v6_stateful = [] for subnet in subnets: if subnet['ip_version'] == 4: v4.append(subnet) elif ipv6_utils.is_auto_address_subnet(subnet): if not is_router_port: v6_stateless.append(subnet) else: v6_stateful.append(subnet) version_subnets = [v4, v6_stateful] for subnets in version_subnets: if subnets: result = IpamNonPluggableBackend._generate_ip(context, subnets) ips.append({'ip_address': result['ip_address'], 'subnet_id': result['subnet_id']}) for subnet in v6_stateless: # IP addresses for IPv6 SLAAC and DHCPv6-stateless subnets # are implicitly included. ip_address = self._calculate_ipv6_eui64_addr(context, subnet, p['mac_address']) ips.append({'ip_address': ip_address.format(), 'subnet_id': subnet['id']}) return ips def add_auto_addrs_on_network_ports(self, context, subnet, ipam_subnet): """For an auto-address subnet, add addrs for ports on the net.""" with context.session.begin(subtransactions=True): network_id = subnet['network_id'] port_qry = context.session.query(models_v2.Port) ports = port_qry.filter( and_(models_v2.Port.network_id == network_id, ~models_v2.Port.device_owner.in_( constants.ROUTER_INTERFACE_OWNERS_SNAT))) updated_ports = [] for port in ports: ip_address = self._calculate_ipv6_eui64_addr( context, subnet, port['mac_address']) allocated = models_v2.IPAllocation(network_id=network_id, port_id=port['id'], ip_address=ip_address, subnet_id=subnet['id']) try: # Do the insertion of each IP allocation entry within # the context of a nested transaction, so that the entry # is rolled back independently of other entries whenever # the corresponding port has been deleted. with context.session.begin_nested(): context.session.add(allocated) updated_ports.append(port['id']) except db_exc.DBReferenceError: LOG.debug("Port %s was deleted while updating it with an " "IPv6 auto-address. Ignoring.", port['id']) return updated_ports def _calculate_ipv6_eui64_addr(self, context, subnet, mac_addr): prefix = subnet['cidr'] network_id = subnet['network_id'] ip_address = ipv6_utils.get_ipv6_addr_by_EUI64( prefix, mac_addr).format() if not self._check_unique_ip(context, network_id, subnet['id'], ip_address): raise n_exc.IpAddressInUse(net_id=network_id, ip_address=ip_address) return ip_address def allocate_subnet(self, context, network, subnet, subnetpool_id): subnetpool = None if subnetpool_id and not subnetpool_id == constants.IPV6_PD_POOL_ID: subnetpool = self._get_subnetpool(context, subnetpool_id) self._validate_ip_version_with_subnetpool(subnet, subnetpool) # gateway_ip and allocation pools should be validated or generated # only for specific request if subnet['cidr'] is not attributes.ATTR_NOT_SPECIFIED: subnet['gateway_ip'] = self._gateway_ip_str(subnet, subnet['cidr']) # allocation_pools are converted to list of IPRanges subnet['allocation_pools'] = self._prepare_allocation_pools( subnet['allocation_pools'], subnet['cidr'], subnet['gateway_ip']) subnet_request = ipam_req.SubnetRequestFactory.get_request(context, subnet, subnetpool) if subnetpool_id and not subnetpool_id == constants.IPV6_PD_POOL_ID: driver = subnet_alloc.SubnetAllocator(subnetpool, context) ipam_subnet = driver.allocate_subnet(subnet_request) subnet_request = ipam_subnet.get_details() subnet = self._save_subnet(context, network, self._make_subnet_args( subnet_request, subnet, subnetpool_id), subnet['dns_nameservers'], subnet['host_routes'], subnet_request) # ipam_subnet is not expected to be allocated for non pluggable ipam, # so just return None for it (second element in returned tuple) return subnet, None neutron-8.4.0/neutron/db/l3_hamode_db.py0000664000567000056710000010435213044372760021322 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import functools import netaddr from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils import sqlalchemy as sa from sqlalchemy import exc as sql_exc from sqlalchemy import orm from neutron._i18n import _, _LI from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import utils as n_utils from neutron.db import agents_db from neutron.db import api as db_api from neutron.db.availability_zone import router as router_az_db from neutron.db import common_db_mixin from neutron.db import l3_attrs_db from neutron.db import l3_db from neutron.db import l3_dvr_db from neutron.db import model_base from neutron.db import models_v2 from neutron.extensions import l3 from neutron.extensions import l3_ext_ha_mode as l3_ha from neutron.extensions import portbindings from neutron.extensions import providernet from neutron.plugins.common import utils as p_utils VR_ID_RANGE = set(range(1, 255)) MAX_ALLOCATION_TRIES = 10 UNLIMITED_AGENTS_PER_ROUTER = 0 LOG = logging.getLogger(__name__) L3_HA_OPTS = [ cfg.BoolOpt('l3_ha', default=False, help=_('Enable HA mode for virtual routers.')), cfg.IntOpt('max_l3_agents_per_router', default=3, help=_("Maximum number of L3 agents which a HA router will be " "scheduled on. If it is set to 0 then the router will " "be scheduled on every agent.")), cfg.IntOpt('min_l3_agents_per_router', default=constants.DEFAULT_MINIMUM_AGENTS_FOR_HA, help=_("Minimum number of L3 agents that have to be available " "in order to allow a new HA router to be scheduled.")), cfg.StrOpt('l3_ha_net_cidr', default='169.254.192.0/18', help=_('Subnet used for the l3 HA admin network.')), cfg.StrOpt('l3_ha_network_type', default='', help=_("The network type to use when creating the HA network " "for an HA router. By default or if empty, the first " "'tenant_network_types' is used. This is helpful when " "the VRRP traffic should use a specific network which " "is not the default one.")), cfg.StrOpt('l3_ha_network_physical_name', default='', help=_("The physical network name with which the HA network " "can be created.")) ] cfg.CONF.register_opts(L3_HA_OPTS) class L3HARouterAgentPortBinding(model_base.BASEV2): """Represent agent binding state of a HA router port. A HA Router has one HA port per agent on which it is spawned. This binding table stores which port is used for a HA router by a L3 agent. """ __tablename__ = 'ha_router_agent_port_bindings' __table_args__ = ( sa.UniqueConstraint( 'router_id', 'l3_agent_id', name='uniq_ha_router_agent_port_bindings0port_id0l3_agent_id'), model_base.BASEV2.__table_args__ ) port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete='CASCADE'), nullable=False, primary_key=True) port = orm.relationship(models_v2.Port) router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id', ondelete='CASCADE'), nullable=False) l3_agent_id = sa.Column(sa.String(36), sa.ForeignKey("agents.id", ondelete='CASCADE')) agent = orm.relationship(agents_db.Agent) state = sa.Column(sa.Enum(constants.HA_ROUTER_STATE_ACTIVE, constants.HA_ROUTER_STATE_STANDBY, name='l3_ha_states'), default=constants.HA_ROUTER_STATE_STANDBY, server_default=constants.HA_ROUTER_STATE_STANDBY) class L3HARouterNetwork(model_base.BASEV2): """Host HA network for a tenant. One HA Network is used per tenant, all HA router ports are created on this network. """ __tablename__ = 'ha_router_networks' tenant_id = sa.Column(sa.String(attributes.TENANT_ID_MAX_LEN), primary_key=True, nullable=False) network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), nullable=False, primary_key=True) network = orm.relationship(models_v2.Network) class L3HARouterVRIdAllocation(model_base.BASEV2): """VRID allocation per HA network. Keep a track of the VRID allocations per HA network. """ __tablename__ = 'ha_router_vrid_allocations' network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), nullable=False, primary_key=True) vr_id = sa.Column(sa.Integer(), nullable=False, primary_key=True) class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin, router_az_db.RouterAvailabilityZoneMixin): """Mixin class to add high availability capability to routers.""" extra_attributes = ( l3_dvr_db.L3_NAT_with_dvr_db_mixin.extra_attributes + router_az_db.RouterAvailabilityZoneMixin.extra_attributes + [ {'name': 'ha', 'default': cfg.CONF.l3_ha}, {'name': 'ha_vr_id', 'default': 0}]) def _verify_configuration(self): self.ha_cidr = cfg.CONF.l3_ha_net_cidr try: net = netaddr.IPNetwork(self.ha_cidr) except netaddr.AddrFormatError: raise l3_ha.HANetworkCIDRNotValid(cidr=self.ha_cidr) if ('/' not in self.ha_cidr or net.network != net.ip): raise l3_ha.HANetworkCIDRNotValid(cidr=self.ha_cidr) self._check_num_agents_per_router() def _check_num_agents_per_router(self): max_agents = cfg.CONF.max_l3_agents_per_router min_agents = cfg.CONF.min_l3_agents_per_router if (max_agents != UNLIMITED_AGENTS_PER_ROUTER and max_agents < min_agents): raise l3_ha.HAMaximumAgentsNumberNotValid( max_agents=max_agents, min_agents=min_agents) if min_agents < constants.MINIMUM_MINIMUM_AGENTS_FOR_HA: raise l3_ha.HAMinimumAgentsNumberNotValid() def __init__(self): self._verify_configuration() super(L3_HA_NAT_db_mixin, self).__init__() def get_ha_network(self, context, tenant_id): return (context.session.query(L3HARouterNetwork). filter(L3HARouterNetwork.tenant_id == tenant_id). first()) def _get_allocated_vr_id(self, context, network_id): with context.session.begin(subtransactions=True): query = (context.session.query(L3HARouterVRIdAllocation). filter(L3HARouterVRIdAllocation.network_id == network_id)) allocated_vr_ids = set(a.vr_id for a in query) - set([0]) return allocated_vr_ids def _allocate_vr_id(self, context, network_id, router_id): for count in range(MAX_ALLOCATION_TRIES): try: # NOTE(kevinbenton): we disallow subtransactions because the # retry logic will bust any parent transactions with context.session.begin(): allocated_vr_ids = self._get_allocated_vr_id(context, network_id) available_vr_ids = VR_ID_RANGE - allocated_vr_ids if not available_vr_ids: raise l3_ha.NoVRIDAvailable(router_id=router_id) allocation = L3HARouterVRIdAllocation() allocation.network_id = network_id allocation.vr_id = available_vr_ids.pop() context.session.add(allocation) return allocation.vr_id except db_exc.DBDuplicateEntry: LOG.info(_LI("Attempt %(count)s to allocate a VRID in the " "network %(network)s for the router %(router)s"), {'count': count, 'network': network_id, 'router': router_id}) raise l3_ha.MaxVRIDAllocationTriesReached( network_id=network_id, router_id=router_id, max_tries=MAX_ALLOCATION_TRIES) def _delete_vr_id_allocation(self, context, ha_network, vr_id): with context.session.begin(subtransactions=True): context.session.query(L3HARouterVRIdAllocation).filter_by( network_id=ha_network.network_id, vr_id=vr_id).delete() def _set_vr_id(self, context, router, ha_network): router.extra_attributes.ha_vr_id = self._allocate_vr_id( context, ha_network.network_id, router.id) def _create_ha_subnet(self, context, network_id, tenant_id): args = {'network_id': network_id, 'tenant_id': '', 'name': constants.HA_SUBNET_NAME % tenant_id, 'ip_version': 4, 'cidr': cfg.CONF.l3_ha_net_cidr, 'enable_dhcp': False, 'gateway_ip': None} return p_utils.create_subnet(self._core_plugin, context, {'subnet': args}) def _create_ha_network_tenant_binding(self, context, tenant_id, network_id): with context.session.begin(): ha_network = L3HARouterNetwork(tenant_id=tenant_id, network_id=network_id) context.session.add(ha_network) # we need to check if someone else just inserted at exactly the # same time as us because there is no constrain in L3HARouterNetwork # that prevents multiple networks per tenant with context.session.begin(subtransactions=True): items = (context.session.query(L3HARouterNetwork). filter_by(tenant_id=tenant_id).all()) if len(items) > 1: # we need to throw an error so our network is deleted # and the process is started over where the existing # network will be selected. raise db_exc.DBDuplicateEntry(columns=['tenant_id']) return ha_network def _add_ha_network_settings(self, network): if cfg.CONF.l3_ha_network_type: network[providernet.NETWORK_TYPE] = cfg.CONF.l3_ha_network_type if cfg.CONF.l3_ha_network_physical_name: network[providernet.PHYSICAL_NETWORK] = ( cfg.CONF.l3_ha_network_physical_name) def _create_ha_network(self, context, tenant_id): admin_ctx = context.elevated() args = {'network': {'name': constants.HA_NETWORK_NAME % tenant_id, 'tenant_id': '', 'shared': False, 'admin_state_up': True}} self._add_ha_network_settings(args['network']) creation = functools.partial(p_utils.create_network, self._core_plugin, admin_ctx, args) content = functools.partial(self._create_ha_network_tenant_binding, admin_ctx, tenant_id) deletion = functools.partial(self._core_plugin.delete_network, admin_ctx) network, ha_network = common_db_mixin.safe_creation( context, creation, deletion, content, transaction=False) try: self._create_ha_subnet(admin_ctx, network['id'], tenant_id) except Exception: with excutils.save_and_reraise_exception(): self._core_plugin.delete_network(admin_ctx, network['id']) return ha_network def get_number_of_agents_for_scheduling(self, context): """Return the number of agents on which the router will be scheduled. Raises an exception if there are not enough agents available to honor the min_agents config parameter. If the max_agents parameter is set to 0 all the agents will be used. """ min_agents = cfg.CONF.min_l3_agents_per_router num_agents = len(self.get_l3_agents(context, active=True, filters={'agent_modes': [constants.L3_AGENT_MODE_LEGACY, constants.L3_AGENT_MODE_DVR_SNAT]})) max_agents = cfg.CONF.max_l3_agents_per_router if max_agents: if max_agents > num_agents: LOG.info(_LI("Number of active agents lower than " "max_l3_agents_per_router. L3 agents " "available: %s"), num_agents) else: num_agents = max_agents if num_agents < min_agents: raise l3_ha.HANotEnoughAvailableAgents(min_agents=min_agents, num_agents=num_agents) return num_agents def _create_ha_port_binding(self, context, router_id, port_id): try: with context.session.begin(): portbinding = L3HARouterAgentPortBinding(port_id=port_id, router_id=router_id) context.session.add(portbinding) return portbinding except db_exc.DBReferenceError as e: with excutils.save_and_reraise_exception() as ctxt: if isinstance(e.inner_exception, sql_exc.IntegrityError): ctxt.reraise = False LOG.debug( 'Failed to create HA router agent PortBinding, ' 'Router %s has already been removed ' 'by concurrent operation', router_id) raise l3.RouterNotFound(router_id=router_id) def add_ha_port(self, context, router_id, network_id, tenant_id): # NOTE(kevinbenton): we have to block any ongoing transactions because # our exception handling will try to delete the port using the normal # core plugin API. If this function is called inside of a transaction # the exception will mangle the state, cause the delete call to fail, # and end up relying on the DB rollback to remove the port instead of # proper delete_port call. if context.session.is_active: raise RuntimeError(_('add_ha_port cannot be called inside of a ' 'transaction.')) args = {'tenant_id': '', 'network_id': network_id, 'admin_state_up': True, 'device_id': router_id, 'device_owner': constants.DEVICE_OWNER_ROUTER_HA_INTF, 'name': constants.HA_PORT_NAME % tenant_id} creation = functools.partial(p_utils.create_port, self._core_plugin, context, {'port': args}) content = functools.partial(self._create_ha_port_binding, context, router_id) deletion = functools.partial(self._core_plugin.delete_port, context, l3_port_check=False) port, bindings = common_db_mixin.safe_creation(context, creation, deletion, content, transaction=False) return bindings def _create_ha_interfaces(self, context, router, ha_network): admin_ctx = context.elevated() num_agents = self.get_number_of_agents_for_scheduling(context) port_ids = [] try: for index in range(num_agents): binding = self.add_ha_port(admin_ctx, router.id, ha_network.network['id'], router.tenant_id) port_ids.append(binding.port_id) except Exception: with excutils.save_and_reraise_exception(): for port_id in port_ids: self._core_plugin.delete_port(admin_ctx, port_id, l3_port_check=False) def _delete_ha_interfaces(self, context, router_id): admin_ctx = context.elevated() device_filter = {'device_id': [router_id], 'device_owner': [constants.DEVICE_OWNER_ROUTER_HA_INTF]} ports = self._core_plugin.get_ports(admin_ctx, filters=device_filter) for port in ports: self._core_plugin.delete_port(admin_ctx, port['id'], l3_port_check=False) def delete_ha_interfaces_on_host(self, context, router_id, host): admin_ctx = context.elevated() port_ids = (binding.port_id for binding in self.get_ha_router_port_bindings(admin_ctx, [router_id], host)) for port_id in port_ids: self._core_plugin.delete_port(admin_ctx, port_id, l3_port_check=False) def _notify_ha_interfaces_updated(self, context, router_id, schedule_routers=True): self.l3_rpc_notifier.routers_updated( context, [router_id], shuffle_agents=True, schedule_routers=schedule_routers) @classmethod def _is_ha(cls, router): ha = router.get('ha') if not attributes.is_attr_set(ha): ha = cfg.CONF.l3_ha return ha @n_utils.transaction_guard def _create_ha_interfaces_and_ensure_network(self, context, router_db): """Attach interfaces to a network while tolerating network deletes.""" creator = functools.partial(self._create_ha_interfaces, context, router_db) dep_getter = functools.partial(self.get_ha_network, context, router_db.tenant_id) dep_creator = functools.partial(self._create_ha_network, context, router_db.tenant_id) dep_id_attr = 'network_id' return n_utils.create_object_with_dependency( creator, dep_getter, dep_creator, dep_id_attr) def create_router(self, context, router): is_ha = self._is_ha(router['router']) router['router']['ha'] = is_ha if is_ha: # we set the allocating status to hide it from the L3 agents # until we have created all of the requisite interfaces/networks router['router']['status'] = constants.ROUTER_STATUS_ALLOCATING router_dict = super(L3_HA_NAT_db_mixin, self).create_router(context, router) if is_ha: try: router_db = self._get_router(context, router_dict['id']) # the following returns interfaces and the network we only # care about the network ha_network = self._create_ha_interfaces_and_ensure_network( context, router_db)[1] self._set_vr_id(context, router_db, ha_network) router_dict['ha_vr_id'] = router_db.extra_attributes.ha_vr_id self.schedule_router(context, router_dict['id']) router_dict['status'] = self._update_router_db( context, router_dict['id'], {'status': constants.ROUTER_STATUS_ACTIVE})['status'] self._notify_ha_interfaces_updated(context, router_db.id, schedule_routers=False) except Exception: with excutils.save_and_reraise_exception(): self.delete_router(context, router_dict['id']) return router_dict def _update_router_db(self, context, router_id, data): router_db = self._get_router(context, router_id) original_distributed_state = router_db.extra_attributes.distributed original_ha_state = router_db.extra_attributes.ha requested_ha_state = data.pop('ha', None) requested_distributed_state = data.get('distributed', None) # cvr to dvrha if not original_distributed_state and not original_ha_state: if (requested_ha_state is True and requested_distributed_state is True): raise l3_ha.UpdateToDvrHamodeNotSupported() # cvrha to any dvr... elif not original_distributed_state and original_ha_state: if requested_distributed_state is True: raise l3_ha.DVRmodeUpdateOfHaNotSupported() # dvr to any ha... elif original_distributed_state and not original_ha_state: if requested_ha_state is True: raise l3_ha.HAmodeUpdateOfDvrNotSupported() #dvrha to any cvr... elif original_distributed_state and original_ha_state: if requested_distributed_state is False: raise l3_ha.DVRmodeUpdateOfDvrHaNotSupported() #elif dvrha to dvr if requested_ha_state is False: raise l3_ha.HAmodeUpdateOfDvrHaNotSupported() ha_changed = (requested_ha_state is not None and requested_ha_state != original_ha_state) if ha_changed: if router_db.admin_state_up: msg = _('Cannot change HA attribute of active routers. Please ' 'set router admin_state_up to False prior to upgrade.') raise n_exc.BadRequest(resource='router', msg=msg) if requested_ha_state: # This will throw HANotEnoughAvailableAgents if there aren't # enough l3 agents to handle this router. self.get_number_of_agents_for_scheduling(context) # set status to ALLOCATING so this router is no longer # provided to agents while its interfaces are being re-configured. # Keep in mind that if we want conversion to be hitless, this # status cannot be used because agents treat hidden routers as # deleted routers. data['status'] = constants.ROUTER_STATUS_ALLOCATING with context.session.begin(subtransactions=True): router_db = super(L3_HA_NAT_db_mixin, self)._update_router_db( context, router_id, data) if not ha_changed: return router_db ha_network = self.get_ha_network(context, router_db.tenant_id) router_db.extra_attributes.ha = requested_ha_state if not requested_ha_state: self._delete_vr_id_allocation( context, ha_network, router_db.extra_attributes.ha_vr_id) router_db.extra_attributes.ha_vr_id = None # The HA attribute has changed. First unbind the router from agents # to force a proper re-scheduling to agents. # TODO(jschwarz): This will have to be more selective to get HA + DVR # working (Only unbind from dvr_snat nodes). self._unbind_ha_router(context, router_id) if requested_ha_state: ha_network = self._create_ha_interfaces_and_ensure_network( context, router_db)[1] self._set_vr_id(context, router_db, ha_network) else: self._delete_ha_interfaces(context, router_db.id) self.schedule_router(context, router_id) router_db = super(L3_HA_NAT_db_mixin, self)._update_router_db( context, router_id, {'status': constants.ROUTER_STATUS_ACTIVE}) self._notify_ha_interfaces_updated(context, router_db.id, schedule_routers=False) return router_db def _delete_ha_network(self, context, net): admin_ctx = context.elevated() self._core_plugin.delete_network(admin_ctx, net.network_id) def _ha_routers_present(self, context, tenant_id): ha = True routers = context.session.query(l3_db.Router).filter( l3_db.Router.tenant_id == tenant_id).subquery() ha_routers = context.session.query( l3_attrs_db.RouterExtraAttributes).join( routers, l3_attrs_db.RouterExtraAttributes.router_id == routers.c.id ).filter(l3_attrs_db.RouterExtraAttributes.ha == ha).first() return ha_routers is not None def delete_router(self, context, id): router_db = self._get_router(context, id) super(L3_HA_NAT_db_mixin, self).delete_router(context, id) if router_db.extra_attributes.ha: ha_network = self.get_ha_network(context, router_db.tenant_id) if ha_network: self._delete_vr_id_allocation( context, ha_network, router_db.extra_attributes.ha_vr_id) self._delete_ha_interfaces(context, router_db.id) # In case that create HA router failed because of the failure # in HA network creation. So here put this deleting HA network # procedure under 'if ha_network' block. if not self._ha_routers_present(context, router_db.tenant_id): try: self._delete_ha_network(context, ha_network) except (n_exc.NetworkNotFound, orm.exc.ObjectDeletedError): LOG.debug( "HA network for tenant %s was already deleted.", router_db.tenant_id) except sa.exc.InvalidRequestError: LOG.info(_LI("HA network %s can not be deleted."), ha_network.network_id) except n_exc.NetworkInUse: LOG.debug("HA network %s is still in use.", ha_network.network_id) else: LOG.info(_LI("HA network %(network)s was deleted as " "no HA routers are present in tenant " "%(tenant)s."), {'network': ha_network.network_id, 'tenant': router_db.tenant_id}) def _unbind_ha_router(self, context, router_id): for agent in self.get_l3_agents_hosting_routers(context, [router_id]): self.remove_router_from_l3_agent(context, agent['id'], router_id) def get_ha_router_port_bindings(self, context, router_ids, host=None): if not router_ids: return [] query = context.session.query(L3HARouterAgentPortBinding) if host: query = query.join(agents_db.Agent).filter( agents_db.Agent.host == host) query = query.filter( L3HARouterAgentPortBinding.router_id.in_(router_ids)) return query.all() @staticmethod def _check_router_agent_ha_binding(context, router_id, agent_id): query = context.session.query(L3HARouterAgentPortBinding) query = query.filter( L3HARouterAgentPortBinding.router_id == router_id, L3HARouterAgentPortBinding.l3_agent_id == agent_id) return query.first() is not None def _get_bindings_and_update_router_state_for_dead_agents(self, context, router_id): """Return bindings. In case if dead agents were detected update router states on this agent. """ with context.session.begin(subtransactions=True): bindings = self.get_ha_router_port_bindings(context, [router_id]) dead_agents = [ binding.agent for binding in bindings if binding.state == constants.HA_ROUTER_STATE_ACTIVE and not binding.agent.is_active] for dead_agent in dead_agents: self.update_routers_states( context, {router_id: constants.HA_ROUTER_STATE_STANDBY}, dead_agent.host) if dead_agents: return self.get_ha_router_port_bindings(context, [router_id]) return bindings def get_l3_bindings_hosting_router_with_ha_states( self, context, router_id): """Return a list of [(agent, ha_state), ...].""" bindings = self._get_bindings_and_update_router_state_for_dead_agents( context, router_id) return [(binding.agent, binding.state) for binding in bindings if binding.agent is not None] def get_active_host_for_ha_router(self, context, router_id): bindings = self.get_l3_bindings_hosting_router_with_ha_states( context, router_id) # TODO(amuller): In case we have two or more actives, this method # needs to return the last agent to become active. This requires # timestamps for state changes. Otherwise, if a host goes down # and another takes over, we'll have two actives. In this case, # if an interface is added to a router, its binding might be wrong # and l2pop would not work correctly. return next( (agent.host for agent, state in bindings if state == constants.HA_ROUTER_STATE_ACTIVE), None) @log_helpers.log_method_call def _process_sync_ha_data(self, context, routers, host): routers_dict = dict((router['id'], router) for router in routers) bindings = self.get_ha_router_port_bindings(context, routers_dict.keys(), host) for binding in bindings: port = binding.port if not port: # Filter the HA router has no ha port here LOG.info(_LI("HA router %s is missing HA router port " "bindings. Skipping it."), binding.router_id) routers_dict.pop(binding.router_id) continue port_dict = self._core_plugin._make_port_dict(port) router = routers_dict.get(binding.router_id) router[constants.HA_INTERFACE_KEY] = port_dict router[constants.HA_ROUTER_STATE_KEY] = binding.state for router in routers_dict.values(): interface = router.get(constants.HA_INTERFACE_KEY) if interface: self._populate_mtu_and_subnets_for_ports(context, [interface]) # Could not filter the HA_INTERFACE_KEY here, because a DVR router # with SNAT HA in DVR compute host also does not have that attribute. return list(routers_dict.values()) @log_helpers.log_method_call def get_ha_sync_data_for_host(self, context, host, agent, router_ids=None, active=None): agent_mode = self._get_agent_mode(agent) dvr_agent_mode = (agent_mode in [constants.L3_AGENT_MODE_DVR_SNAT, constants.L3_AGENT_MODE_DVR]) if (dvr_agent_mode and n_utils.is_extension_supported( self, constants.L3_DISTRIBUTED_EXT_ALIAS)): # DVR has to be handled differently sync_data = self._get_dvr_sync_data(context, host, agent, router_ids, active) else: sync_data = super(L3_HA_NAT_db_mixin, self).get_sync_data(context, router_ids, active) return self._process_sync_ha_data(context, sync_data, host) @classmethod def _set_router_states(cls, context, bindings, states): for binding in bindings: try: with context.session.begin(subtransactions=True): binding.state = states[binding.router_id] except (orm.exc.StaleDataError, orm.exc.ObjectDeletedError): # Take concurrently deleted routers in to account pass def update_routers_states(self, context, states, host): """Receive dict of router ID to state and update them all.""" bindings = self.get_ha_router_port_bindings( context, router_ids=states.keys(), host=host) self._set_router_states(context, bindings, states) self._update_router_port_bindings(context, states, host) def _update_router_port_bindings(self, context, states, host): admin_ctx = context.elevated() device_filter = {'device_id': states.keys(), 'device_owner': [constants.DEVICE_OWNER_ROUTER_INTF, constants.DEVICE_OWNER_ROUTER_SNAT]} ports = self._core_plugin.get_ports(admin_ctx, filters=device_filter) active_ports = (port for port in ports if states[port['device_id']] == constants.HA_ROUTER_STATE_ACTIVE) for port in active_ports: port[portbindings.HOST_ID] = host try: self._core_plugin.update_port(admin_ctx, port['id'], {attributes.PORT: port}) except (orm.exc.StaleDataError, orm.exc.ObjectDeletedError, n_exc.PortNotFound): # Take concurrently deleted interfaces in to account pass def is_ha_router_port(device_owner, router_id): session = db_api.get_session() if device_owner in [constants.DEVICE_OWNER_ROUTER_INTF, constants.DEVICE_OWNER_ROUTER_SNAT]: query = session.query(l3_attrs_db.RouterExtraAttributes) query = query.filter_by(ha=True) query = query.filter(l3_attrs_db.RouterExtraAttributes.router_id == router_id) return bool(query.limit(1).count()) else: return False neutron-8.4.0/neutron/db/bgp_db.py0000664000567000056710000015037213044372760020242 0ustar jenkinsjenkins00000000000000# Copyright 2016 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from oslo_db import exception as oslo_db_exc from oslo_log import log as logging from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy import and_ from sqlalchemy import orm from sqlalchemy.orm import aliased from sqlalchemy.orm import exc as sa_exc from neutron_lib import constants as lib_consts from neutron.api.v2 import attributes as attr from neutron.common import exceptions as n_exc from neutron.db import address_scope_db from neutron.db import common_db_mixin as common_db from neutron.db import l3_attrs_db from neutron.db import l3_db from neutron.db import model_base from neutron.db import models_v2 from neutron.extensions import bgp as bgp_ext from neutron.plugins.ml2 import models as ml2_models LOG = logging.getLogger(__name__) DEVICE_OWNER_ROUTER_GW = lib_consts.DEVICE_OWNER_ROUTER_GW DEVICE_OWNER_ROUTER_INTF = lib_consts.DEVICE_OWNER_ROUTER_INTF class BgpSpeakerPeerBinding(model_base.BASEV2): """Represents a mapping between BGP speaker and BGP peer""" __tablename__ = 'bgp_speaker_peer_bindings' bgp_speaker_id = sa.Column(sa.String(length=36), sa.ForeignKey('bgp_speakers.id', ondelete='CASCADE'), nullable=False, primary_key=True) bgp_peer_id = sa.Column(sa.String(length=36), sa.ForeignKey('bgp_peers.id', ondelete='CASCADE'), nullable=False, primary_key=True) class BgpSpeakerNetworkBinding(model_base.BASEV2): """Represents a mapping between a network and BGP speaker""" __tablename__ = 'bgp_speaker_network_bindings' bgp_speaker_id = sa.Column(sa.String(length=36), sa.ForeignKey('bgp_speakers.id', ondelete='CASCADE'), nullable=False, primary_key=True) network_id = sa.Column(sa.String(length=36), sa.ForeignKey('networks.id', ondelete='CASCADE'), nullable=False, primary_key=True) ip_version = sa.Column(sa.Integer, nullable=False, autoincrement=False, primary_key=True) class BgpSpeaker(model_base.BASEV2, model_base.HasId, model_base.HasTenant): """Represents a BGP speaker""" __tablename__ = 'bgp_speakers' name = sa.Column(sa.String(attr.NAME_MAX_LEN), nullable=False) local_as = sa.Column(sa.Integer, nullable=False, autoincrement=False) advertise_floating_ip_host_routes = sa.Column(sa.Boolean, nullable=False) advertise_tenant_networks = sa.Column(sa.Boolean, nullable=False) peers = orm.relationship(BgpSpeakerPeerBinding, backref='bgp_speaker_peer_bindings', cascade='all, delete, delete-orphan', lazy='joined') networks = orm.relationship(BgpSpeakerNetworkBinding, backref='bgp_speaker_network_bindings', cascade='all, delete, delete-orphan', lazy='joined') ip_version = sa.Column(sa.Integer, nullable=False, autoincrement=False) class BgpPeer(model_base.BASEV2, model_base.HasId, model_base.HasTenant): """Represents a BGP routing peer.""" __tablename__ = 'bgp_peers' name = sa.Column(sa.String(attr.NAME_MAX_LEN), nullable=False) peer_ip = sa.Column(sa.String(64), nullable=False) remote_as = sa.Column(sa.Integer, nullable=False, autoincrement=False) auth_type = sa.Column(sa.String(16), nullable=False) password = sa.Column(sa.String(255), nullable=True) class BgpDbMixin(common_db.CommonDbMixin): def create_bgp_speaker(self, context, bgp_speaker): uuid = uuidutils.generate_uuid() self._save_bgp_speaker(context, bgp_speaker, uuid) return self.get_bgp_speaker(context, uuid) def get_bgp_speakers(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): with context.session.begin(subtransactions=True): return self._get_collection(context, BgpSpeaker, self._make_bgp_speaker_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, page_reverse=page_reverse) def get_bgp_speaker(self, context, bgp_speaker_id, fields=None): with context.session.begin(subtransactions=True): bgp_speaker = self._get_bgp_speaker(context, bgp_speaker_id) return self._make_bgp_speaker_dict(bgp_speaker, fields) def get_bgp_speaker_with_advertised_routes(self, context, bgp_speaker_id): bgp_speaker_attrs = ['id', 'local_as', 'tenant_id'] bgp_peer_attrs = ['peer_ip', 'remote_as', 'password'] with context.session.begin(subtransactions=True): bgp_speaker = self.get_bgp_speaker(context, bgp_speaker_id, fields=bgp_speaker_attrs) res = dict((k, bgp_speaker[k]) for k in bgp_speaker_attrs) res['peers'] = self.get_bgp_peers_by_bgp_speaker(context, bgp_speaker['id'], fields=bgp_peer_attrs) res['advertised_routes'] = self.get_routes_by_bgp_speaker_id( context, bgp_speaker_id) return res def update_bgp_speaker(self, context, bgp_speaker_id, bgp_speaker): bp = bgp_speaker[bgp_ext.BGP_SPEAKER_BODY_KEY_NAME] with context.session.begin(subtransactions=True): bgp_speaker_db = self._get_bgp_speaker(context, bgp_speaker_id) bgp_speaker_db.update(bp) bgp_speaker_dict = self._make_bgp_speaker_dict(bgp_speaker_db) return bgp_speaker_dict def _save_bgp_speaker(self, context, bgp_speaker, uuid): ri = bgp_speaker[bgp_ext.BGP_SPEAKER_BODY_KEY_NAME] ri['tenant_id'] = context.tenant_id with context.session.begin(subtransactions=True): res_keys = ['local_as', 'tenant_id', 'name', 'ip_version', 'advertise_floating_ip_host_routes', 'advertise_tenant_networks'] res = dict((k, ri[k]) for k in res_keys) res['id'] = uuid bgp_speaker_db = BgpSpeaker(**res) context.session.add(bgp_speaker_db) def add_bgp_peer(self, context, bgp_speaker_id, bgp_peer_info): bgp_peer_id = self._get_id_for(bgp_peer_info, 'bgp_peer_id') self._save_bgp_speaker_peer_binding(context, bgp_speaker_id, bgp_peer_id) return {'bgp_peer_id': bgp_peer_id} def remove_bgp_peer(self, context, bgp_speaker_id, bgp_peer_info): bgp_peer_id = self._get_id_for(bgp_peer_info, 'bgp_peer_id') self._remove_bgp_speaker_peer_binding(context, bgp_speaker_id, bgp_peer_id) return {'bgp_peer_id': bgp_peer_id} def add_gateway_network(self, context, bgp_speaker_id, network_info): network_id = self._get_id_for(network_info, 'network_id') with context.session.begin(subtransactions=True): try: self._save_bgp_speaker_network_binding(context, bgp_speaker_id, network_id) except oslo_db_exc.DBDuplicateEntry: raise bgp_ext.BgpSpeakerNetworkBindingError( network_id=network_id, bgp_speaker_id=bgp_speaker_id) return {'network_id': network_id} def remove_gateway_network(self, context, bgp_speaker_id, network_info): with context.session.begin(subtransactions=True): network_id = self._get_id_for(network_info, 'network_id') self._remove_bgp_speaker_network_binding(context, bgp_speaker_id, network_id) return {'network_id': network_id} def delete_bgp_speaker(self, context, bgp_speaker_id): with context.session.begin(subtransactions=True): bgp_speaker_db = self._get_bgp_speaker(context, bgp_speaker_id) context.session.delete(bgp_speaker_db) def create_bgp_peer(self, context, bgp_peer): ri = bgp_peer[bgp_ext.BGP_PEER_BODY_KEY_NAME] auth_type = ri.get('auth_type') password = ri.get('password') if auth_type == 'md5' and not password: raise bgp_ext.InvalidBgpPeerMd5Authentication() with context.session.begin(subtransactions=True): res_keys = ['tenant_id', 'name', 'remote_as', 'peer_ip', 'auth_type', 'password'] res = dict((k, ri[k]) for k in res_keys) res['id'] = uuidutils.generate_uuid() bgp_peer_db = BgpPeer(**res) context.session.add(bgp_peer_db) peer = self._make_bgp_peer_dict(bgp_peer_db) peer.pop('password') return peer def get_bgp_peers(self, context, fields=None, filters=None, sorts=None, limit=None, marker=None, page_reverse=False): return self._get_collection(context, BgpPeer, self._make_bgp_peer_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, page_reverse=page_reverse) def get_bgp_peers_by_bgp_speaker(self, context, bgp_speaker_id, fields=None): filters = [BgpSpeakerPeerBinding.bgp_speaker_id == bgp_speaker_id, BgpSpeakerPeerBinding.bgp_peer_id == BgpPeer.id] with context.session.begin(subtransactions=True): query = context.session.query(BgpPeer) query = query.filter(*filters) return [self._make_bgp_peer_dict(x) for x in query.all()] def get_bgp_peer(self, context, bgp_peer_id, fields=None): bgp_peer_db = self._get_bgp_peer(context, bgp_peer_id) return self._make_bgp_peer_dict(bgp_peer_db, fields=fields) def delete_bgp_peer(self, context, bgp_peer_id): with context.session.begin(subtransactions=True): bgp_peer_db = self._get_bgp_peer(context, bgp_peer_id) context.session.delete(bgp_peer_db) def update_bgp_peer(self, context, bgp_peer_id, bgp_peer): bp = bgp_peer[bgp_ext.BGP_PEER_BODY_KEY_NAME] with context.session.begin(subtransactions=True): bgp_peer_db = self._get_bgp_peer(context, bgp_peer_id) if ((bp.get('password') is not None) and (bgp_peer_db['auth_type'] == 'none')): raise bgp_ext.BgpPeerNotAuthenticated(bgp_peer_id=bgp_peer_id) bgp_peer_db.update(bp) bgp_peer_dict = self._make_bgp_peer_dict(bgp_peer_db) return bgp_peer_dict def _get_bgp_speaker(self, context, bgp_speaker_id): try: return self._get_by_id(context, BgpSpeaker, bgp_speaker_id) except sa_exc.NoResultFound: raise bgp_ext.BgpSpeakerNotFound(id=bgp_speaker_id) def _get_bgp_speaker_ids_by_router(self, context, router_id): with context.session.begin(subtransactions=True): network_binding = aliased(BgpSpeakerNetworkBinding) r_port = aliased(l3_db.RouterPort) query = context.session.query(network_binding.bgp_speaker_id) query = query.filter( r_port.router_id == router_id, r_port.port_type == lib_consts.DEVICE_OWNER_ROUTER_GW, r_port.port_id == models_v2.Port.id, models_v2.Port.network_id == network_binding.network_id) return [binding.bgp_speaker_id for binding in query.all()] def _get_bgp_speaker_ids_by_binding_network(self, context, network_id): with context.session.begin(subtransactions=True): query = context.session.query( BgpSpeakerNetworkBinding.bgp_speaker_id) query = query.filter( BgpSpeakerNetworkBinding.network_id == network_id) return query.all() def get_advertised_routes(self, context, bgp_speaker_id): routes = self.get_routes_by_bgp_speaker_id(context, bgp_speaker_id) return self._make_advertised_routes_dict(routes) def _get_id_for(self, resource, id_name): try: return resource.get(id_name) except AttributeError: msg = _("%s must be specified") % id_name raise n_exc.BadRequest(resource=bgp_ext.BGP_SPEAKER_RESOURCE_NAME, msg=msg) def _get_bgp_peers_by_bgp_speaker_binding(self, context, bgp_speaker_id): with context.session.begin(subtransactions=True): query = context.session.query(BgpPeer) query = query.filter( BgpSpeakerPeerBinding.bgp_speaker_id == bgp_speaker_id, BgpSpeakerPeerBinding.bgp_peer_id == BgpPeer.id) return query.all() def _save_bgp_speaker_peer_binding(self, context, bgp_speaker_id, bgp_peer_id): with context.session.begin(subtransactions=True): try: bgp_speaker = self._get_by_id(context, BgpSpeaker, bgp_speaker_id) except sa_exc.NoResultFound: raise bgp_ext.BgpSpeakerNotFound(id=bgp_speaker_id) try: bgp_peer = self._get_by_id(context, BgpPeer, bgp_peer_id) except sa_exc.NoResultFound: raise bgp_ext.BgpPeerNotFound(id=bgp_peer_id) peers = self._get_bgp_peers_by_bgp_speaker_binding(context, bgp_speaker_id) self._validate_peer_ips(bgp_speaker_id, peers, bgp_peer) binding = BgpSpeakerPeerBinding(bgp_speaker_id=bgp_speaker.id, bgp_peer_id=bgp_peer.id) context.session.add(binding) def _validate_peer_ips(self, bgp_speaker_id, current_peers, new_peer): for peer in current_peers: if peer.peer_ip == new_peer.peer_ip: raise bgp_ext.DuplicateBgpPeerIpException( bgp_peer_id=new_peer.id, peer_ip=new_peer.peer_ip, bgp_speaker_id=bgp_speaker_id) def _remove_bgp_speaker_peer_binding(self, context, bgp_speaker_id, bgp_peer_id): with context.session.begin(subtransactions=True): try: binding = self._get_bgp_speaker_peer_binding(context, bgp_speaker_id, bgp_peer_id) except sa_exc.NoResultFound: raise bgp_ext.BgpSpeakerPeerNotAssociated( bgp_peer_id=bgp_peer_id, bgp_speaker_id=bgp_speaker_id) context.session.delete(binding) def _save_bgp_speaker_network_binding(self, context, bgp_speaker_id, network_id): with context.session.begin(subtransactions=True): try: bgp_speaker = self._get_by_id(context, BgpSpeaker, bgp_speaker_id) except sa_exc.NoResultFound: raise bgp_ext.BgpSpeakerNotFound(id=bgp_speaker_id) try: network = self._get_by_id(context, models_v2.Network, network_id) except sa_exc.NoResultFound: raise n_exc.NetworkNotFound(net_id=network_id) binding = BgpSpeakerNetworkBinding( bgp_speaker_id=bgp_speaker.id, network_id=network.id, ip_version=bgp_speaker.ip_version) context.session.add(binding) def _remove_bgp_speaker_network_binding(self, context, bgp_speaker_id, network_id): with context.session.begin(subtransactions=True): try: binding = self._get_bgp_speaker_network_binding( context, bgp_speaker_id, network_id) except sa_exc.NoResultFound: raise bgp_ext.BgpSpeakerNetworkNotAssociated( network_id=network_id, bgp_speaker_id=bgp_speaker_id) context.session.delete(binding) def _make_bgp_speaker_dict(self, bgp_speaker, fields=None): attrs = {'id', 'local_as', 'tenant_id', 'name', 'ip_version', 'advertise_floating_ip_host_routes', 'advertise_tenant_networks'} peer_bindings = bgp_speaker['peers'] network_bindings = bgp_speaker['networks'] res = dict((k, bgp_speaker[k]) for k in attrs) res['peers'] = [x.bgp_peer_id for x in peer_bindings] res['networks'] = [x.network_id for x in network_bindings] return self._fields(res, fields) def _make_advertised_routes_dict(self, routes): return {'advertised_routes': list(routes)} def _get_bgp_peer(self, context, bgp_peer_id): try: return self._get_by_id(context, BgpPeer, bgp_peer_id) except sa_exc.NoResultFound: raise bgp_ext.BgpPeerNotFound(id=bgp_peer_id) def _get_bgp_speaker_peer_binding(self, context, bgp_speaker_id, bgp_peer_id): query = self._model_query(context, BgpSpeakerPeerBinding) return query.filter( BgpSpeakerPeerBinding.bgp_speaker_id == bgp_speaker_id, BgpSpeakerPeerBinding.bgp_peer_id == bgp_peer_id).one() def _get_bgp_speaker_network_binding(self, context, bgp_speaker_id, network_id): query = self._model_query(context, BgpSpeakerNetworkBinding) return query.filter( BgpSpeakerNetworkBinding.bgp_speaker_id == bgp_speaker_id, BgpSpeakerNetworkBinding.network_id == network_id).one() def _make_bgp_peer_dict(self, bgp_peer, fields=None): attrs = ['tenant_id', 'id', 'name', 'peer_ip', 'remote_as', 'auth_type', 'password'] res = dict((k, bgp_peer[k]) for k in attrs) return self._fields(res, fields) def _get_address_scope_ids_for_bgp_speaker(self, context, bgp_speaker_id): with context.session.begin(subtransactions=True): binding = aliased(BgpSpeakerNetworkBinding) address_scope = aliased(address_scope_db.AddressScope) query = context.session.query(address_scope) query = query.filter( binding.bgp_speaker_id == bgp_speaker_id, models_v2.Subnet.ip_version == binding.ip_version, models_v2.Subnet.network_id == binding.network_id, models_v2.Subnet.subnetpool_id == models_v2.SubnetPool.id, models_v2.SubnetPool.address_scope_id == address_scope.id) return [scope.id for scope in query.all()] def get_routes_by_bgp_speaker_id(self, context, bgp_speaker_id): """Get all routes that should be advertised by a BgpSpeaker.""" with context.session.begin(subtransactions=True): net_routes = self._get_tenant_network_routes_by_bgp_speaker( context, bgp_speaker_id) fip_routes = self._get_central_fip_host_routes_by_bgp_speaker( context, bgp_speaker_id) dvr_fip_routes = self._get_dvr_fip_host_routes_by_bgp_speaker( context, bgp_speaker_id) return itertools.chain(fip_routes, net_routes, dvr_fip_routes) def get_routes_by_bgp_speaker_binding(self, context, bgp_speaker_id, network_id): """Get all routes for the given bgp_speaker binding.""" with context.session.begin(subtransactions=True): fip_routes = self._get_central_fip_host_routes_by_binding( context, network_id, bgp_speaker_id) net_routes = self._get_tenant_network_routes_by_binding( context, network_id, bgp_speaker_id) dvr_fip_routes = self._get_dvr_fip_host_routes_by_binding( context, network_id, bgp_speaker_id) return itertools.chain(fip_routes, net_routes, dvr_fip_routes) def _get_routes_by_router(self, context, router_id): bgp_speaker_ids = self._get_bgp_speaker_ids_by_router(context, router_id) route_dict = {} for bgp_speaker_id in bgp_speaker_ids: fip_routes = self._get_central_fip_host_routes_by_router( context, router_id, bgp_speaker_id) net_routes = self._get_tenant_network_routes_by_router( context, router_id, bgp_speaker_id) dvr_fip_routes = self._get_dvr_fip_host_routes_by_router( context, router_id, bgp_speaker_id) routes = itertools.chain(fip_routes, net_routes, dvr_fip_routes) route_dict[bgp_speaker_id] = list(routes) return route_dict def _get_central_fip_host_routes_by_router(self, context, router_id, bgp_speaker_id): """Get floating IP host routes with the given router as nexthop.""" with context.session.begin(subtransactions=True): dest_alias = aliased(l3_db.FloatingIP, name='destination') next_hop_alias = aliased(models_v2.IPAllocation, name='next_hop') binding_alias = aliased(BgpSpeakerNetworkBinding, name='binding') router_attrs = aliased(l3_attrs_db.RouterExtraAttributes, name='router_attrs') query = context.session.query(dest_alias.floating_ip_address, next_hop_alias.ip_address) query = query.join( next_hop_alias, next_hop_alias.network_id == dest_alias.floating_network_id) query = query.join(l3_db.Router, dest_alias.router_id == l3_db.Router.id) query = query.filter( l3_db.Router.id == router_id, dest_alias.router_id == l3_db.Router.id, l3_db.Router.id == router_attrs.router_id, router_attrs.distributed == sa.sql.false(), l3_db.Router.gw_port_id == next_hop_alias.port_id, next_hop_alias.subnet_id == models_v2.Subnet.id, models_v2.Subnet.ip_version == 4, binding_alias.network_id == models_v2.Subnet.network_id, binding_alias.bgp_speaker_id == bgp_speaker_id, binding_alias.ip_version == 4, BgpSpeaker.advertise_floating_ip_host_routes == sa.sql.true()) query = query.outerjoin(router_attrs, l3_db.Router.id == router_attrs.router_id) query = query.filter(router_attrs.distributed != sa.sql.true()) return self._host_route_list_from_tuples(query.all()) def _get_dvr_fip_host_routes_by_router(self, context, bgp_speaker_id, router_id): with context.session.begin(subtransactions=True): gw_query = self._get_gateway_query(context, bgp_speaker_id) fip_query = self._get_fip_query(context, bgp_speaker_id) fip_query.filter(l3_db.FloatingIP.router_id == router_id) #Create the join query join_query = self._join_fip_by_host_binding_to_agent_gateway( context, fip_query.subquery(), gw_query.subquery()) return self._host_route_list_from_tuples(join_query.all()) def _get_central_fip_host_routes_by_binding(self, context, network_id, bgp_speaker_id): """Get all floating IP host routes for the given network binding.""" with context.session.begin(subtransactions=True): # Query the DB for floating IP's and the IP address of the # gateway port dest_alias = aliased(l3_db.FloatingIP, name='destination') next_hop_alias = aliased(models_v2.IPAllocation, name='next_hop') binding_alias = aliased(BgpSpeakerNetworkBinding, name='binding') router_attrs = aliased(l3_attrs_db.RouterExtraAttributes, name='router_attrs') query = context.session.query(dest_alias.floating_ip_address, next_hop_alias.ip_address) query = query.join( next_hop_alias, next_hop_alias.network_id == dest_alias.floating_network_id) query = query.join( binding_alias, binding_alias.network_id == dest_alias.floating_network_id) query = query.join(l3_db.Router, dest_alias.router_id == l3_db.Router.id) query = query.filter( dest_alias.floating_network_id == network_id, dest_alias.router_id == l3_db.Router.id, l3_db.Router.gw_port_id == next_hop_alias.port_id, next_hop_alias.subnet_id == models_v2.Subnet.id, models_v2.Subnet.ip_version == 4, binding_alias.network_id == models_v2.Subnet.network_id, binding_alias.bgp_speaker_id == BgpSpeaker.id, BgpSpeaker.id == bgp_speaker_id, BgpSpeaker.advertise_floating_ip_host_routes == sa.sql.true()) query = query.outerjoin(router_attrs, l3_db.Router.id == router_attrs.router_id) query = query.filter(router_attrs.distributed != sa.sql.true()) return self._host_route_list_from_tuples(query.all()) def _get_dvr_fip_host_routes_by_binding(self, context, network_id, bgp_speaker_id): with context.session.begin(subtransactions=True): BgpBinding = BgpSpeakerNetworkBinding gw_query = self._get_gateway_query(context, bgp_speaker_id) gw_query.filter(BgpBinding.network_id == network_id) fip_query = self._get_fip_query(context, bgp_speaker_id) fip_query.filter(BgpBinding.network_id == network_id) #Create the join query join_query = self._join_fip_by_host_binding_to_agent_gateway( context, fip_query.subquery(), gw_query.subquery()) return self._host_route_list_from_tuples(join_query.all()) def _get_central_fip_host_routes_by_bgp_speaker(self, context, bgp_speaker_id): """Get all the floating IP host routes advertised by a BgpSpeaker.""" with context.session.begin(subtransactions=True): dest_alias = aliased(l3_db.FloatingIP, name='destination') next_hop_alias = aliased(models_v2.IPAllocation, name='next_hop') speaker_binding = aliased(BgpSpeakerNetworkBinding, name="speaker_network_mapping") router_attrs = aliased(l3_attrs_db.RouterExtraAttributes, name='router_attrs') query = context.session.query(dest_alias.floating_ip_address, next_hop_alias.ip_address) query = query.select_from(dest_alias, BgpSpeaker, l3_db.Router, models_v2.Subnet) query = query.join( next_hop_alias, next_hop_alias.network_id == dest_alias.floating_network_id) query = query.join( speaker_binding, speaker_binding.network_id == dest_alias.floating_network_id) query = query.join(l3_db.Router, dest_alias.router_id == l3_db.Router.id) query = query.filter( BgpSpeaker.id == bgp_speaker_id, BgpSpeaker.advertise_floating_ip_host_routes, speaker_binding.bgp_speaker_id == BgpSpeaker.id, dest_alias.floating_network_id == speaker_binding.network_id, next_hop_alias.network_id == speaker_binding.network_id, dest_alias.router_id == l3_db.Router.id, l3_db.Router.gw_port_id == next_hop_alias.port_id, next_hop_alias.subnet_id == models_v2.Subnet.id, models_v2.Subnet.ip_version == 4) query = query.outerjoin(router_attrs, l3_db.Router.id == router_attrs.router_id) query = query.filter(router_attrs.distributed != sa.sql.true()) return self._host_route_list_from_tuples(query.all()) def _get_gateway_query(self, context, bgp_speaker_id): BgpBinding = BgpSpeakerNetworkBinding ML2PortBinding = ml2_models.PortBinding IpAllocation = models_v2.IPAllocation Port = models_v2.Port gw_query = context.session.query(Port.network_id, ML2PortBinding.host, IpAllocation.ip_address) #Subquery for FIP agent gateway ports gw_query = gw_query.filter( ML2PortBinding.port_id == Port.id, IpAllocation.port_id == Port.id, IpAllocation.subnet_id == models_v2.Subnet.id, models_v2.Subnet.ip_version == 4, Port.device_owner == lib_consts.DEVICE_OWNER_AGENT_GW, Port.network_id == BgpBinding.network_id, BgpBinding.bgp_speaker_id == bgp_speaker_id, BgpBinding.ip_version == 4) return gw_query def _get_fip_query(self, context, bgp_speaker_id): BgpBinding = BgpSpeakerNetworkBinding ML2PortBinding = ml2_models.PortBinding #Subquery for floating IP's fip_query = context.session.query( l3_db.FloatingIP.floating_network_id, ML2PortBinding.host, l3_db.FloatingIP.floating_ip_address) fip_query = fip_query.filter( l3_db.FloatingIP.fixed_port_id == ML2PortBinding.port_id, l3_db.FloatingIP.floating_network_id == BgpBinding.network_id, BgpBinding.bgp_speaker_id == bgp_speaker_id) return fip_query def _get_dvr_fip_host_routes_by_bgp_speaker(self, context, bgp_speaker_id): router_attrs = l3_attrs_db.RouterExtraAttributes with context.session.begin(subtransactions=True): gw_query = self._get_gateway_query(context, bgp_speaker_id) fip_query = self._get_fip_query(context, bgp_speaker_id) fip_query = fip_query.filter( l3_db.FloatingIP.router_id == router_attrs.router_id, router_attrs.distributed == sa.sql.true()) #Create the join query join_query = self._join_fip_by_host_binding_to_agent_gateway( context, fip_query.subquery(), gw_query.subquery()) return self._host_route_list_from_tuples(join_query.all()) def _join_fip_by_host_binding_to_agent_gateway(self, context, fip_subq, gw_subq): join_query = context.session.query(fip_subq.c.floating_ip_address, gw_subq.c.ip_address) and_cond = and_( gw_subq.c.host == fip_subq.c.host, gw_subq.c.network_id == fip_subq.c.floating_network_id) return join_query.join(gw_subq, and_cond) def _get_tenant_network_routes_by_binding(self, context, network_id, bgp_speaker_id): """Get all tenant network routes for the given network.""" with context.session.begin(subtransactions=True): tenant_networks_query = self._tenant_networks_by_network_query( context, network_id, bgp_speaker_id) nexthops_query = self._nexthop_ip_addresses_by_binding_query( context, network_id, bgp_speaker_id) join_q = self._join_tenant_networks_to_next_hops( context, tenant_networks_query.subquery(), nexthops_query.subquery()) return self._make_advertised_routes_list(join_q.all()) def _get_tenant_network_routes_by_router(self, context, router_id, bgp_speaker_id): """Get all tenant network routes with the given router as nexthop.""" with context.session.begin(subtransactions=True): scopes = self._get_address_scope_ids_for_bgp_speaker( context, bgp_speaker_id) address_scope = aliased(address_scope_db.AddressScope) inside_query = context.session.query( models_v2.Subnet.cidr, models_v2.IPAllocation.ip_address, address_scope.id) outside_query = context.session.query( address_scope.id, models_v2.IPAllocation.ip_address) speaker_binding = aliased(BgpSpeakerNetworkBinding, name="speaker_network_mapping") port_alias = aliased(l3_db.RouterPort, name='routerport') inside_query = inside_query.filter( port_alias.router_id == router_id, models_v2.IPAllocation.port_id == port_alias.port_id, models_v2.IPAllocation.subnet_id == models_v2.Subnet.id, models_v2.Subnet.subnetpool_id == models_v2.SubnetPool.id, models_v2.SubnetPool.address_scope_id == address_scope.id, address_scope.id.in_(scopes), port_alias.port_type != lib_consts.DEVICE_OWNER_ROUTER_GW, speaker_binding.bgp_speaker_id == bgp_speaker_id) outside_query = outside_query.filter( port_alias.router_id == router_id, port_alias.port_type == lib_consts.DEVICE_OWNER_ROUTER_GW, models_v2.IPAllocation.port_id == port_alias.port_id, models_v2.IPAllocation.subnet_id == models_v2.Subnet.id, models_v2.Subnet.subnetpool_id == models_v2.SubnetPool.id, models_v2.SubnetPool.address_scope_id == address_scope.id, address_scope.id.in_(scopes), speaker_binding.bgp_speaker_id == bgp_speaker_id, speaker_binding.network_id == models_v2.Port.network_id, port_alias.port_id == models_v2.Port.id) inside_query = inside_query.subquery() outside_query = outside_query.subquery() join_query = context.session.query(inside_query.c.cidr, outside_query.c.ip_address) and_cond = and_(inside_query.c.id == outside_query.c.id) join_query = join_query.join(outside_query, and_cond) return self._make_advertised_routes_list(join_query.all()) def _get_tenant_network_routes_by_bgp_speaker(self, context, bgp_speaker_id): """Get all tenant network routes to be advertised by a BgpSpeaker.""" with context.session.begin(subtransactions=True): tenant_nets_q = self._tenant_networks_by_bgp_speaker_query( context, bgp_speaker_id) nexthops_q = self._nexthop_ip_addresses_by_bgp_speaker_query( context, bgp_speaker_id) join_q = self._join_tenant_networks_to_next_hops( context, tenant_nets_q.subquery(), nexthops_q.subquery()) return self._make_advertised_routes_list(join_q.all()) def _join_tenant_networks_to_next_hops(self, context, tenant_networks_subquery, nexthops_subquery): """Join subquery for tenant networks to subquery for nexthop IP's""" left_subq = tenant_networks_subquery right_subq = nexthops_subquery join_query = context.session.query(left_subq.c.cidr, right_subq.c.ip_address) and_cond = and_(left_subq.c.router_id == right_subq.c.router_id, left_subq.c.ip_version == right_subq.c.ip_version) join_query = join_query.join(right_subq, and_cond) return join_query def _tenant_networks_by_network_query(self, context, network_id, bgp_speaker_id): """Return subquery for tenant networks by binding network ID""" address_scope = aliased(address_scope_db.AddressScope, name='address_scope') router_attrs = aliased(l3_attrs_db.RouterExtraAttributes, name='router_attrs') tenant_networks_query = context.session.query( l3_db.RouterPort.router_id, models_v2.Subnet.cidr, models_v2.Subnet.ip_version, address_scope.id) tenant_networks_query = tenant_networks_query.filter( l3_db.RouterPort.port_type != lib_consts.DEVICE_OWNER_ROUTER_GW, l3_db.RouterPort.port_type != lib_consts.DEVICE_OWNER_ROUTER_SNAT, l3_db.RouterPort.router_id == router_attrs.router_id, models_v2.IPAllocation.port_id == l3_db.RouterPort.port_id, models_v2.IPAllocation.subnet_id == models_v2.Subnet.id, models_v2.Subnet.network_id != network_id, models_v2.Subnet.subnetpool_id == models_v2.SubnetPool.id, models_v2.SubnetPool.address_scope_id == address_scope.id, BgpSpeaker.id == bgp_speaker_id, BgpSpeaker.ip_version == address_scope.ip_version, models_v2.Subnet.ip_version == address_scope.ip_version) return tenant_networks_query def _tenant_networks_by_bgp_speaker_query(self, context, bgp_speaker_id): """Return subquery for tenant networks by binding bgp_speaker_id""" router_id = l3_db.RouterPort.router_id.distinct().label('router_id') tenant_nets_subq = context.session.query(router_id, models_v2.Subnet.cidr, models_v2.Subnet.ip_version) scopes = self._get_address_scope_ids_for_bgp_speaker(context, bgp_speaker_id) filters = self._tenant_networks_by_bgp_speaker_filters(scopes) tenant_nets_subq = tenant_nets_subq.filter(*filters) return tenant_nets_subq def _tenant_networks_by_bgp_speaker_filters(self, address_scope_ids): """Return the filters for querying tenant networks by BGP speaker""" router_attrs = aliased(l3_attrs_db.RouterExtraAttributes, name='router_attrs') return [models_v2.IPAllocation.port_id == l3_db.RouterPort.port_id, l3_db.RouterPort.router_id == router_attrs.router_id, l3_db.RouterPort.port_type != lib_consts.DEVICE_OWNER_ROUTER_GW, l3_db.RouterPort.port_type != lib_consts.DEVICE_OWNER_ROUTER_SNAT, models_v2.IPAllocation.subnet_id == models_v2.Subnet.id, models_v2.Subnet.network_id != BgpSpeakerNetworkBinding.network_id, models_v2.Subnet.subnetpool_id == models_v2.SubnetPool.id, models_v2.SubnetPool.address_scope_id.in_(address_scope_ids), models_v2.Subnet.ip_version == BgpSpeakerNetworkBinding.ip_version, BgpSpeakerNetworkBinding.bgp_speaker_id == BgpSpeaker.id, BgpSpeaker.advertise_tenant_networks == sa.sql.true()] def _nexthop_ip_addresses_by_binding_query(self, context, network_id, bgp_speaker_id): """Return the subquery for locating nexthops by binding network""" nexthops_query = context.session.query( l3_db.RouterPort.router_id, models_v2.IPAllocation.ip_address, models_v2.Subnet.ip_version) filters = self._next_hop_ip_addresses_by_binding_filters( network_id, bgp_speaker_id) nexthops_query = nexthops_query.filter(*filters) return nexthops_query def _next_hop_ip_addresses_by_binding_filters(self, network_id, bgp_speaker_id): """Return the filters for querying nexthops by binding network""" address_scope = aliased(address_scope_db.AddressScope, name='address_scope') return [models_v2.IPAllocation.port_id == l3_db.RouterPort.port_id, models_v2.IPAllocation.subnet_id == models_v2.Subnet.id, BgpSpeaker.id == bgp_speaker_id, BgpSpeakerNetworkBinding.bgp_speaker_id == BgpSpeaker.id, BgpSpeakerNetworkBinding.network_id == network_id, models_v2.Subnet.network_id == BgpSpeakerNetworkBinding.network_id, models_v2.Subnet.subnetpool_id == models_v2.SubnetPool.id, models_v2.SubnetPool.address_scope_id == address_scope.id, models_v2.Subnet.ip_version == address_scope.ip_version, l3_db.RouterPort.port_type == DEVICE_OWNER_ROUTER_GW] def _nexthop_ip_addresses_by_bgp_speaker_query(self, context, bgp_speaker_id): """Return the subquery for locating nexthops by BGP speaker""" nexthops_query = context.session.query( l3_db.RouterPort.router_id, models_v2.IPAllocation.ip_address, models_v2.Subnet.ip_version) filters = self._next_hop_ip_addresses_by_bgp_speaker_filters( bgp_speaker_id) nexthops_query = nexthops_query.filter(*filters) return nexthops_query def _next_hop_ip_addresses_by_bgp_speaker_filters(self, bgp_speaker_id): """Return the filters for querying nexthops by BGP speaker""" router_attrs = aliased(l3_attrs_db.RouterExtraAttributes, name='router_attrs') return [l3_db.RouterPort.port_type == DEVICE_OWNER_ROUTER_GW, l3_db.RouterPort.router_id == router_attrs.router_id, BgpSpeakerNetworkBinding.network_id == models_v2.Subnet.network_id, BgpSpeakerNetworkBinding.ip_version == models_v2.Subnet.ip_version, BgpSpeakerNetworkBinding.bgp_speaker_id == bgp_speaker_id, models_v2.IPAllocation.port_id == l3_db.RouterPort.port_id, models_v2.IPAllocation.subnet_id == models_v2.Subnet.id] def _tenant_prefixes_by_router(self, context, router_id, bgp_speaker_id): with context.session.begin(subtransactions=True): query = context.session.query(models_v2.Subnet.cidr.distinct()) filters = self._tenant_prefixes_by_router_filters(router_id, bgp_speaker_id) query = query.filter(*filters) return [x[0] for x in query.all()] def _tenant_prefixes_by_router_filters(self, router_id, bgp_speaker_id): binding = aliased(BgpSpeakerNetworkBinding, name='network_binding') subnetpool = aliased(models_v2.SubnetPool, name='subnetpool') router_attrs = aliased(l3_attrs_db.RouterExtraAttributes, name='router_attrs') return [models_v2.Subnet.id == models_v2.IPAllocation.subnet_id, models_v2.Subnet.subnetpool_id == subnetpool.id, l3_db.RouterPort.router_id == router_id, l3_db.Router.id == l3_db.RouterPort.router_id, l3_db.Router.id == router_attrs.router_id, l3_db.Router.gw_port_id == models_v2.Port.id, models_v2.Port.network_id == binding.network_id, binding.bgp_speaker_id == BgpSpeaker.id, l3_db.RouterPort.port_type == DEVICE_OWNER_ROUTER_INTF, models_v2.IPAllocation.port_id == l3_db.RouterPort.port_id] def _tenant_prefixes_by_router_interface(self, context, router_port_id, bgp_speaker_id): with context.session.begin(subtransactions=True): query = context.session.query(models_v2.Subnet.cidr.distinct()) filters = self._tenant_prefixes_by_router_filters(router_port_id, bgp_speaker_id) query = query.filter(*filters) return [x[0] for x in query.all()] def _tenant_prefixes_by_router_port_filters(self, router_port_id, bgp_speaker_id): binding = aliased(BgpSpeakerNetworkBinding, name='network_binding') return [models_v2.Subnet.id == models_v2.IPAllocation.subnet_id, l3_db.RouterPort.port_id == router_port_id, l3_db.Router.id == l3_db.RouterPort.router_id, l3_db.Router.gw_port_id == models_v2.Port.id, models_v2.Port.network_id == binding.network_id, binding.bgp_speaker_id == BgpSpeaker.id, models_v2.Subnet.ip_version == binding.ip_version, l3_db.RouterPort.port_type == DEVICE_OWNER_ROUTER_INTF, models_v2.IPAllocation.port_id == l3_db.RouterPort.port_id] def _bgp_speakers_for_gateway_network(self, context, network_id): """Return all BgpSpeakers for the given gateway network""" with context.session.begin(subtransactions=True): query = context.session.query(BgpSpeaker) query = query.filter( BgpSpeakerNetworkBinding.network_id == network_id, BgpSpeakerNetworkBinding.bgp_speaker_id == BgpSpeaker.id) return query.all() def _bgp_speakers_for_gw_network_by_family(self, context, network_id, ip_version): """Return the BgpSpeaker by given gateway network and ip_version""" with context.session.begin(subtransactions=True): query = context.session.query(BgpSpeaker) query = query.filter( BgpSpeakerNetworkBinding.network_id == network_id, BgpSpeakerNetworkBinding.bgp_speaker_id == BgpSpeaker.id, BgpSpeakerNetworkBinding.ip_version == ip_version) return query.all() def _make_advertised_routes_list(self, routes): route_list = ({'destination': x, 'next_hop': y} for x, y in routes) return route_list def _route_list_from_prefixes_and_next_hop(self, routes, next_hop): route_list = [{'destination': x, 'next_hop': next_hop} for x in routes] return route_list def _host_route_list_from_tuples(self, ip_next_hop_tuples): """Return the list of host routes given a list of (IP, nexthop)""" return ({'destination': x + '/32', 'next_hop': y} for x, y in ip_next_hop_tuples) neutron-8.4.0/neutron/db/agentschedulers_db.py0000664000567000056710000005355413044372760022656 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import random import time from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import timeutils import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.orm import exc from neutron._i18n import _, _LE, _LI, _LW from neutron.common import constants from neutron.common import utils from neutron import context as ncontext from neutron.db import agents_db from neutron.db.availability_zone import network as network_az from neutron.db import model_base from neutron.extensions import agent as ext_agent from neutron.extensions import dhcpagentscheduler LOG = logging.getLogger(__name__) AGENTS_SCHEDULER_OPTS = [ cfg.StrOpt('network_scheduler_driver', default='neutron.scheduler.' 'dhcp_agent_scheduler.WeightScheduler', help=_('Driver to use for scheduling network to DHCP agent')), cfg.BoolOpt('network_auto_schedule', default=True, help=_('Allow auto scheduling networks to DHCP agent.')), cfg.BoolOpt('allow_automatic_dhcp_failover', default=True, help=_('Automatically remove networks from offline DHCP ' 'agents.')), cfg.IntOpt('dhcp_agents_per_network', default=1, help=_('Number of DHCP agents scheduled to host a tenant ' 'network. If this number is greater than 1, the ' 'scheduler automatically assigns multiple DHCP agents ' 'for a given tenant network, providing high ' 'availability for DHCP service.')), cfg.BoolOpt('enable_services_on_agents_with_admin_state_down', default=False, help=_('Enable services on an agent with admin_state_up ' 'False. If this option is False, when admin_state_up ' 'of an agent is turned False, services on it will be ' 'disabled. Agents with admin_state_up False are not ' 'selected for automatic scheduling regardless of this ' 'option. But manual scheduling to such agents is ' 'available if this option is True.')), ] cfg.CONF.register_opts(AGENTS_SCHEDULER_OPTS) class NetworkDhcpAgentBinding(model_base.BASEV2): """Represents binding between neutron networks and DHCP agents.""" network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id", ondelete='CASCADE'), primary_key=True) dhcp_agent = orm.relation(agents_db.Agent) dhcp_agent_id = sa.Column(sa.String(36), sa.ForeignKey("agents.id", ondelete='CASCADE'), primary_key=True) class AgentSchedulerDbMixin(agents_db.AgentDbMixin): """Common class for agent scheduler mixins.""" # agent notifiers to handle agent update operations; # should be updated by plugins; agent_notifiers = { constants.AGENT_TYPE_DHCP: None, constants.AGENT_TYPE_L3: None, constants.AGENT_TYPE_LOADBALANCER: None, } @staticmethod def is_eligible_agent(active, agent): if active is None: # filtering by activeness is disabled, all agents are eligible return True else: # note(rpodolyaka): original behaviour is saved here: if active # filter is set, only agents which are 'up' # (i.e. have a recent heartbeat timestamp) # are eligible, even if active is False return not agents_db.AgentDbMixin.is_agent_down( agent['heartbeat_timestamp']) def update_agent(self, context, id, agent): original_agent = self.get_agent(context, id) result = super(AgentSchedulerDbMixin, self).update_agent( context, id, agent) agent_data = agent['agent'] agent_notifier = self.agent_notifiers.get(original_agent['agent_type']) if (agent_notifier and 'admin_state_up' in agent_data and original_agent['admin_state_up'] != agent_data['admin_state_up']): agent_notifier.agent_updated(context, agent_data['admin_state_up'], original_agent['host']) return result def add_agent_status_check(self, function): loop = loopingcall.FixedIntervalLoopingCall(function) # TODO(enikanorov): make interval configurable rather than computed interval = max(cfg.CONF.agent_down_time // 2, 1) # add random initial delay to allow agents to check in after the # neutron server first starts. random to offset multiple servers initial_delay = random.randint(interval, interval * 2) loop.start(interval=interval, initial_delay=initial_delay) if hasattr(self, 'periodic_agent_loops'): self.periodic_agent_loops.append(loop) else: self.periodic_agent_loops = [loop] def agent_dead_limit_seconds(self): return cfg.CONF.agent_down_time * 2 def wait_down_agents(self, agent_type, agent_dead_limit): """Gives chance for agents to send a heartbeat.""" # check for an abrupt clock change since last check. if a change is # detected, sleep for a while to let the agents check in. tdelta = timeutils.utcnow() - getattr(self, '_clock_jump_canary', timeutils.utcnow()) if tdelta.total_seconds() > cfg.CONF.agent_down_time: LOG.warning(_LW("Time since last %s agent reschedule check has " "exceeded the interval between checks. Waiting " "before check to allow agents to send a heartbeat " "in case there was a clock adjustment."), agent_type) time.sleep(agent_dead_limit) self._clock_jump_canary = timeutils.utcnow() def get_cutoff_time(self, agent_dead_limit): cutoff = timeutils.utcnow() - datetime.timedelta( seconds=agent_dead_limit) return cutoff class DhcpAgentSchedulerDbMixin(dhcpagentscheduler .DhcpAgentSchedulerPluginBase, AgentSchedulerDbMixin): """Mixin class to add DHCP agent scheduler extension to db_base_plugin_v2. """ network_scheduler = None def start_periodic_dhcp_agent_status_check(self): if not cfg.CONF.allow_automatic_dhcp_failover: LOG.info(_LI("Skipping periodic DHCP agent status check because " "automatic network rescheduling is disabled.")) return self.add_agent_status_check(self.remove_networks_from_down_agents) def is_eligible_agent(self, context, active, agent): # eligible agent is active or starting up return (AgentSchedulerDbMixin.is_eligible_agent(active, agent) or self.agent_starting_up(context, agent)) def agent_starting_up(self, context, agent): """Check if agent was just started. Method returns True if agent is in its 'starting up' period. Return value depends on amount of networks assigned to the agent. It doesn't look at latest heartbeat timestamp as it is assumed that this method is called for agents that are considered dead. """ agent_dead_limit = datetime.timedelta( seconds=self.agent_dead_limit_seconds()) network_count = (context.session.query(NetworkDhcpAgentBinding). filter_by(dhcp_agent_id=agent['id']).count()) # amount of networks assigned to agent affect amount of time we give # it so startup. Tests show that it's more or less sage to assume # that DHCP agent processes each network in less than 2 seconds. # So, give it this additional time for each of the networks. additional_time = datetime.timedelta(seconds=2 * network_count) LOG.debug("Checking if agent starts up and giving it additional %s", additional_time) agent_expected_up = (agent['started_at'] + agent_dead_limit + additional_time) return agent_expected_up > timeutils.utcnow() def _schedule_network(self, context, network_id, dhcp_notifier): LOG.info(_LI("Scheduling unhosted network %s"), network_id) try: # TODO(enikanorov): have to issue redundant db query # to satisfy scheduling interface network = self.get_network(context, network_id) agents = self.schedule_network(context, network) if not agents: LOG.info(_LI("Failed to schedule network %s, " "no eligible agents or it might be " "already scheduled by another server"), network_id) return if not dhcp_notifier: return for agent in agents: LOG.info(_LI("Adding network %(net)s to agent " "%(agent)s on host %(host)s"), {'net': network_id, 'agent': agent.id, 'host': agent.host}) dhcp_notifier.network_added_to_agent( context, network_id, agent.host) except Exception: # catching any exception during scheduling # so if _schedule_network is invoked in the loop it could # continue in any case LOG.exception(_LE("Failed to schedule network %s"), network_id) def _filter_bindings(self, context, bindings): """Skip bindings for which the agent is dead, but starting up.""" # to save few db calls: store already checked agents in dict # id -> is_agent_starting_up checked_agents = {} for binding in bindings: try: agent_id = binding.dhcp_agent['id'] if agent_id not in checked_agents: if self.agent_starting_up(context, binding.dhcp_agent): # When agent starts and it has many networks to process # it may fail to send state reports in defined interval # The server will consider it dead and try to remove # networks from it. checked_agents[agent_id] = True LOG.debug("Agent %s is starting up, skipping", agent_id) else: checked_agents[agent_id] = False if not checked_agents[agent_id]: yield binding except exc.ObjectDeletedError: # we're not within a transaction, so object can be lost # because underlying row is removed, just ignore this issue LOG.debug("binding was removed concurrently, skipping it") def remove_networks_from_down_agents(self): """Remove networks from down DHCP agents if admin state is up. Reschedule them if configured so. """ agent_dead_limit = self.agent_dead_limit_seconds() self.wait_down_agents('DHCP', agent_dead_limit) cutoff = self.get_cutoff_time(agent_dead_limit) context = ncontext.get_admin_context() try: down_bindings = ( context.session.query(NetworkDhcpAgentBinding). join(agents_db.Agent). filter(agents_db.Agent.heartbeat_timestamp < cutoff, agents_db.Agent.admin_state_up)) dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP) dead_bindings = [b for b in self._filter_bindings(context, down_bindings)] agents = self.get_agents_db( context, {'agent_type': [constants.AGENT_TYPE_DHCP]}) if not agents: # No agents configured so nothing to do. return active_agents = [agent for agent in agents if self.is_eligible_agent(context, True, agent)] if not active_agents: LOG.warning(_LW("No DHCP agents available, " "skipping rescheduling")) return for binding in dead_bindings: LOG.warning(_LW("Removing network %(network)s from agent " "%(agent)s because the agent did not report " "to the server in the last %(dead_time)s " "seconds."), {'network': binding.network_id, 'agent': binding.dhcp_agent_id, 'dead_time': agent_dead_limit}) # save binding object to avoid ObjectDeletedError # in case binding is concurrently deleted from the DB saved_binding = {'net': binding.network_id, 'agent': binding.dhcp_agent_id} try: # do not notify agent if it considered dead # so when it is restarted it won't see network delete # notifications on its queue self.remove_network_from_dhcp_agent(context, binding.dhcp_agent_id, binding.network_id, notify=False) except dhcpagentscheduler.NetworkNotHostedByDhcpAgent: # measures against concurrent operation LOG.debug("Network %(net)s already removed from DHCP " "agent %(agent)s", saved_binding) # still continue and allow concurrent scheduling attempt except Exception: LOG.exception(_LE("Unexpected exception occurred while " "removing network %(net)s from agent " "%(agent)s"), saved_binding) if cfg.CONF.network_auto_schedule: self._schedule_network( context, saved_binding['net'], dhcp_notifier) except Exception: # we want to be thorough and catch whatever is raised # to avoid loop abortion LOG.exception(_LE("Exception encountered during network " "rescheduling")) def get_dhcp_agents_hosting_networks( self, context, network_ids, active=None, admin_state_up=None): if not network_ids: return [] query = context.session.query(NetworkDhcpAgentBinding) query = query.options(orm.contains_eager( NetworkDhcpAgentBinding.dhcp_agent)) query = query.join(NetworkDhcpAgentBinding.dhcp_agent) if len(network_ids) == 1: query = query.filter( NetworkDhcpAgentBinding.network_id == network_ids[0]) elif network_ids: query = query.filter( NetworkDhcpAgentBinding.network_id in network_ids) if admin_state_up is not None: query = query.filter(agents_db.Agent.admin_state_up == admin_state_up) return [binding.dhcp_agent for binding in query if self.is_eligible_agent(context, active, binding.dhcp_agent)] def add_network_to_dhcp_agent(self, context, id, network_id): self._get_network(context, network_id) with context.session.begin(subtransactions=True): agent_db = self._get_agent(context, id) if (agent_db['agent_type'] != constants.AGENT_TYPE_DHCP or not services_available(agent_db['admin_state_up'])): raise dhcpagentscheduler.InvalidDHCPAgent(id=id) dhcp_agents = self.get_dhcp_agents_hosting_networks( context, [network_id]) for dhcp_agent in dhcp_agents: if id == dhcp_agent.id: raise dhcpagentscheduler.NetworkHostedByDHCPAgent( network_id=network_id, agent_id=id) binding = NetworkDhcpAgentBinding() binding.dhcp_agent_id = id binding.network_id = network_id context.session.add(binding) dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP) if dhcp_notifier: dhcp_notifier.network_added_to_agent( context, network_id, agent_db.host) def remove_network_from_dhcp_agent(self, context, id, network_id, notify=True): agent = self._get_agent(context, id) with context.session.begin(subtransactions=True): try: query = context.session.query(NetworkDhcpAgentBinding) query = query.filter( NetworkDhcpAgentBinding.network_id == network_id, NetworkDhcpAgentBinding.dhcp_agent_id == id) # just ensure the binding exists query.one() except exc.NoResultFound: raise dhcpagentscheduler.NetworkNotHostedByDhcpAgent( network_id=network_id, agent_id=id) # reserve the port, so the ip is reused on a subsequent add device_id = utils.get_dhcp_agent_device_id(network_id, agent['host']) filters = dict(device_id=[device_id]) ports = self.get_ports(context, filters=filters) for port in ports: port['device_id'] = constants.DEVICE_ID_RESERVED_DHCP_PORT self.update_port(context, port['id'], dict(port=port)) # avoid issues with query.one() object that was # loaded into the session query.delete(synchronize_session=False) if not notify: return dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP) if dhcp_notifier: dhcp_notifier.network_removed_from_agent( context, network_id, agent.host) def list_networks_on_dhcp_agent(self, context, id): query = context.session.query(NetworkDhcpAgentBinding.network_id) query = query.filter(NetworkDhcpAgentBinding.dhcp_agent_id == id) net_ids = [item[0] for item in query] if net_ids: return {'networks': self.get_networks(context, filters={'id': net_ids})} else: # Exception will be thrown if the requested agent does not exist. self._get_agent(context, id) return {'networks': []} def list_active_networks_on_active_dhcp_agent(self, context, host): try: agent = self._get_agent_by_type_and_host( context, constants.AGENT_TYPE_DHCP, host) except ext_agent.AgentNotFoundByTypeHost: LOG.debug("DHCP Agent not found on host %s", host) return [] if not services_available(agent.admin_state_up): return [] query = context.session.query(NetworkDhcpAgentBinding.network_id) query = query.filter(NetworkDhcpAgentBinding.dhcp_agent_id == agent.id) net_ids = [item[0] for item in query] if net_ids: return self.get_networks( context, filters={'id': net_ids, 'admin_state_up': [True]} ) else: return [] def list_dhcp_agents_hosting_network(self, context, network_id): dhcp_agents = self.get_dhcp_agents_hosting_networks( context, [network_id]) agent_ids = [dhcp_agent.id for dhcp_agent in dhcp_agents] if agent_ids: return { 'agents': self.get_agents(context, filters={'id': agent_ids})} else: return {'agents': []} def schedule_network(self, context, created_network): if self.network_scheduler: return self.network_scheduler.schedule( self, context, created_network) def auto_schedule_networks(self, context, host): if self.network_scheduler: self.network_scheduler.auto_schedule_networks(self, context, host) class AZDhcpAgentSchedulerDbMixin(DhcpAgentSchedulerDbMixin, network_az.NetworkAvailabilityZoneMixin): """Mixin class to add availability_zone supported DHCP agent scheduler.""" def get_network_availability_zones(self, network): zones = {agent.availability_zone for agent in network.dhcp_agents} return list(zones) # helper functions for readability. def services_available(admin_state_up): if cfg.CONF.enable_services_on_agents_with_admin_state_down: # Services are available regardless admin_state_up return True return admin_state_up def get_admin_state_up_filter(): if cfg.CONF.enable_services_on_agents_with_admin_state_down: # Avoid filtering on admin_state_up at all return None # Filters on admin_state_up is True return True neutron-8.4.0/neutron/db/l3_attrs_db.py0000664000567000056710000000722413044372760021222 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from sqlalchemy import orm from neutron.db import db_base_plugin_v2 from neutron.db import model_base from neutron.extensions import l3 class RouterExtraAttributes(model_base.BASEV2): """Additional attributes for a Virtual Router.""" # NOTE(armando-migliaccio): this model can be a good place to # add extension attributes to a Router model. Each case needs # to be individually examined, however 'distributed' and other # simple ones fit the pattern well. __tablename__ = "router_extra_attributes" router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id', ondelete="CASCADE"), primary_key=True) # Whether the router is a legacy (centralized) or a distributed one distributed = sa.Column(sa.Boolean, default=False, server_default=sa.sql.false(), nullable=False) # Whether the router is to be considered a 'service' router service_router = sa.Column(sa.Boolean, default=False, server_default=sa.sql.false(), nullable=False) ha = sa.Column(sa.Boolean, default=False, server_default=sa.sql.false(), nullable=False) ha_vr_id = sa.Column(sa.Integer()) # Availability Zone support availability_zone_hints = sa.Column(sa.String(255)) router = orm.relationship( 'Router', backref=orm.backref("extra_attributes", lazy='joined', uselist=False, cascade='delete')) class ExtraAttributesMixin(object): """Mixin class to enable router's extra attributes.""" extra_attributes = [] def _extend_extra_router_dict(self, router_res, router_db): extra_attrs = router_db['extra_attributes'] or {} for attr in self.extra_attributes: name = attr['name'] default = attr['default'] router_res[name] = ( extra_attrs[name] if name in extra_attrs else default) def _get_extra_attributes(self, router, extra_attributes): return (dict((attr['name'], router.get(attr['name'], attr['default'])) for attr in extra_attributes)) def _process_extra_attr_router_create( self, context, router_db, router_req): kwargs = self._get_extra_attributes(router_req, self.extra_attributes) # extra_attributes reference is populated via backref if not router_db['extra_attributes']: attributes_db = RouterExtraAttributes( router_id=router_db['id'], **kwargs) context.session.add(attributes_db) router_db['extra_attributes'] = attributes_db else: # The record will exist if RouterExtraAttributes model's # attributes are added with db migrations over time router_db['extra_attributes'].update(kwargs) db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( l3.ROUTERS, ['_extend_extra_router_dict']) neutron-8.4.0/neutron/db/l3_agentschedulers_db.py0000664000567000056710000006135313044372760023250 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging import oslo_messaging import six import sqlalchemy as sa from sqlalchemy import func from sqlalchemy import or_ from sqlalchemy import orm from sqlalchemy.orm import joinedload from sqlalchemy import sql from neutron._i18n import _, _LE, _LI, _LW from neutron.common import constants from neutron.common import utils as n_utils from neutron import context as n_ctx from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.db import l3_attrs_db from neutron.db import model_base from neutron.extensions import l3agentscheduler from neutron.extensions import router_availability_zone as router_az from neutron import manager from neutron.plugins.common import constants as service_constants LOG = logging.getLogger(__name__) L3_AGENTS_SCHEDULER_OPTS = [ cfg.StrOpt('router_scheduler_driver', default='neutron.scheduler.l3_agent_scheduler.' 'LeastRoutersScheduler', help=_('Driver to use for scheduling ' 'router to a default L3 agent')), cfg.BoolOpt('router_auto_schedule', default=True, help=_('Allow auto scheduling of routers to L3 agent.')), cfg.BoolOpt('allow_automatic_l3agent_failover', default=False, help=_('Automatically reschedule routers from offline L3 ' 'agents to online L3 agents.')), ] cfg.CONF.register_opts(L3_AGENTS_SCHEDULER_OPTS) # default messaging timeout is 60 sec, so 2 here is chosen to not block API # call for more than 2 minutes AGENT_NOTIFY_MAX_ATTEMPTS = 2 class RouterL3AgentBinding(model_base.BASEV2): """Represents binding between neutron routers and L3 agents.""" router_id = sa.Column(sa.String(36), sa.ForeignKey("routers.id", ondelete='CASCADE'), primary_key=True) l3_agent = orm.relation(agents_db.Agent) l3_agent_id = sa.Column(sa.String(36), sa.ForeignKey("agents.id", ondelete='CASCADE'), primary_key=True) class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase, agentschedulers_db.AgentSchedulerDbMixin): """Mixin class to add l3 agent scheduler extension to plugins using the l3 agent for routing. """ router_scheduler = None def start_periodic_l3_agent_status_check(self): if not cfg.CONF.allow_automatic_l3agent_failover: LOG.info(_LI("Skipping period L3 agent status check because " "automatic router rescheduling is disabled.")) return self.add_agent_status_check( self.reschedule_routers_from_down_agents) def reschedule_routers_from_down_agents(self): """Reschedule routers from down l3 agents if admin state is up.""" agent_dead_limit = self.agent_dead_limit_seconds() self.wait_down_agents('L3', agent_dead_limit) cutoff = self.get_cutoff_time(agent_dead_limit) context = n_ctx.get_admin_context() try: down_bindings = ( context.session.query(RouterL3AgentBinding). join(agents_db.Agent). filter(agents_db.Agent.heartbeat_timestamp < cutoff, agents_db.Agent.admin_state_up). outerjoin(l3_attrs_db.RouterExtraAttributes, l3_attrs_db.RouterExtraAttributes.router_id == RouterL3AgentBinding.router_id). filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha == sql.false(), l3_attrs_db.RouterExtraAttributes.ha == sql.null()))) agents_back_online = set() for binding in down_bindings: if binding.l3_agent_id in agents_back_online: continue else: # we need new context to make sure we use different DB # transaction - otherwise we may fetch same agent record # each time due to REPEATABLE_READ isolation level context = n_ctx.get_admin_context() agent = self._get_agent(context, binding.l3_agent_id) if agent.is_active: agents_back_online.add(binding.l3_agent_id) continue LOG.warning(_LW( "Rescheduling router %(router)s from agent %(agent)s " "because the agent did not report to the server in " "the last %(dead_time)s seconds."), {'router': binding.router_id, 'agent': binding.l3_agent_id, 'dead_time': agent_dead_limit}) try: self.reschedule_router(context, binding.router_id) except (l3agentscheduler.RouterReschedulingFailed, oslo_messaging.RemoteError): # Catch individual router rescheduling errors here # so one broken one doesn't stop the iteration. LOG.exception(_LE("Failed to reschedule router %s"), binding.router_id) except Exception: # we want to be thorough and catch whatever is raised # to avoid loop abortion LOG.exception(_LE("Exception encountered during router " "rescheduling.")) def _get_agent_mode(self, agent_db): agent_conf = self.get_configuration_dict(agent_db) return agent_conf.get(constants.L3_AGENT_MODE, constants.L3_AGENT_MODE_LEGACY) def validate_agent_router_combination(self, context, agent, router): """Validate if the router can be correctly assigned to the agent. :raises: RouterL3AgentMismatch if attempting to assign DVR router to legacy agent. :raises: InvalidL3Agent if attempting to assign router to an unsuitable agent (disabled, type != L3, incompatible configuration) :raises: DVRL3CannotAssignToDvrAgent if attempting to assign a router to an agent in 'dvr' mode. """ if agent['agent_type'] != constants.AGENT_TYPE_L3: raise l3agentscheduler.InvalidL3Agent(id=agent['id']) agent_mode = self._get_agent_mode(agent) if agent_mode == constants.L3_AGENT_MODE_DVR: raise l3agentscheduler.DVRL3CannotAssignToDvrAgent() if (agent_mode == constants.L3_AGENT_MODE_LEGACY and router.get('distributed')): raise l3agentscheduler.RouterL3AgentMismatch( router_id=router['id'], agent_id=agent['id']) is_suitable_agent = ( agentschedulers_db.services_available(agent['admin_state_up']) and self.get_l3_agent_candidates(context, router, [agent], ignore_admin_state=True)) if not is_suitable_agent: raise l3agentscheduler.InvalidL3Agent(id=agent['id']) def check_agent_router_scheduling_needed(self, context, agent, router): """Check if the router scheduling is needed. :raises: RouterHostedByL3Agent if router is already assigned to a different agent. :returns: True if scheduling is needed, otherwise False """ router_id = router['id'] agent_id = agent['id'] query = context.session.query(RouterL3AgentBinding) bindings = query.filter_by(router_id=router_id).all() if not bindings: return True for binding in bindings: if binding.l3_agent_id == agent_id: # router already bound to the agent we need return False if router.get('ha'): return True # legacy router case: router is already bound to some agent raise l3agentscheduler.RouterHostedByL3Agent( router_id=router_id, agent_id=bindings[0].l3_agent_id) def create_router_to_agent_binding(self, context, agent, router): """Create router to agent binding.""" router_id = router['id'] agent_id = agent['id'] if self.router_scheduler: try: if router.get('ha'): plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) self.router_scheduler.create_ha_port_and_bind( plugin, context, router['id'], router['tenant_id'], agent) else: self.router_scheduler.bind_router( context, router_id, agent) except db_exc.DBError: raise l3agentscheduler.RouterSchedulingFailed( router_id=router_id, agent_id=agent_id) def add_router_to_l3_agent(self, context, agent_id, router_id): """Add a l3 agent to host a router.""" with context.session.begin(subtransactions=True): router = self.get_router(context, router_id) agent = self._get_agent(context, agent_id) self.validate_agent_router_combination(context, agent, router) if not self.check_agent_router_scheduling_needed( context, agent, router): return self.create_router_to_agent_binding(context, agent, router) l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3) if l3_notifier: l3_notifier.router_added_to_agent( context, [router_id], agent.host) def remove_router_from_l3_agent(self, context, agent_id, router_id): """Remove the router from l3 agent. After removal, the router will be non-hosted until there is update which leads to re-schedule or be added to another agent manually. """ agent = self._get_agent(context, agent_id) agent_mode = self._get_agent_mode(agent) if agent_mode == constants.L3_AGENT_MODE_DVR: raise l3agentscheduler.DVRL3CannotRemoveFromDvrAgent() self._unbind_router(context, router_id, agent_id) router = self.get_router(context, router_id) plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) if router.get('ha'): plugin.delete_ha_interfaces_on_host(context, router_id, agent.host) # NOTE(Swami): Need to verify if there are DVR serviceable # ports owned by this agent. If owned by this agent, then # the routers should be retained. This flag will be used # to check if there are valid routers in this agent. retain_router = False if router.get('distributed'): subnet_ids = plugin.get_subnet_ids_on_router(context, router_id) if subnet_ids and agent.host: retain_router = plugin._check_dvr_serviceable_ports_on_host( context, agent.host, subnet_ids) l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3) if retain_router and l3_notifier: l3_notifier.routers_updated_on_host( context, [router_id], agent.host) elif l3_notifier: l3_notifier.router_removed_from_agent( context, router_id, agent.host) def _unbind_router(self, context, router_id, agent_id): with context.session.begin(subtransactions=True): query = context.session.query(RouterL3AgentBinding) query = query.filter( RouterL3AgentBinding.router_id == router_id, RouterL3AgentBinding.l3_agent_id == agent_id) query.delete() def _unschedule_router(self, context, router_id, agents_ids): with context.session.begin(subtransactions=True): for agent_id in agents_ids: self._unbind_router(context, router_id, agent_id) def reschedule_router(self, context, router_id, candidates=None): """Reschedule router to (a) new l3 agent(s) Remove the router from the agent(s) currently hosting it and schedule it again """ cur_agents = self.list_l3_agents_hosting_router( context, router_id)['agents'] with context.session.begin(subtransactions=True): cur_agents_ids = [agent['id'] for agent in cur_agents] self._unschedule_router(context, router_id, cur_agents_ids) self.schedule_router(context, router_id, candidates=candidates) new_agents = self.list_l3_agents_hosting_router( context, router_id)['agents'] if not new_agents: raise l3agentscheduler.RouterReschedulingFailed( router_id=router_id) self._notify_agents_router_rescheduled(context, router_id, cur_agents, new_agents) def _notify_agents_router_rescheduled(self, context, router_id, old_agents, new_agents): l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3) if not l3_notifier: return old_hosts = [agent['host'] for agent in old_agents] new_hosts = [agent['host'] for agent in new_agents] for host in set(old_hosts) - set(new_hosts): l3_notifier.router_removed_from_agent( context, router_id, host) for agent in new_agents: # Need to make sure agents are notified or unschedule otherwise for attempt in range(AGENT_NOTIFY_MAX_ATTEMPTS): try: l3_notifier.router_added_to_agent( context, [router_id], agent['host']) break except oslo_messaging.MessagingException: LOG.warning(_LW('Failed to notify L3 agent on host ' '%(host)s about added router. Attempt ' '%(attempt)d out of %(max_attempts)d'), {'host': agent['host'], 'attempt': attempt + 1, 'max_attempts': AGENT_NOTIFY_MAX_ATTEMPTS}) else: self._unbind_router(context, router_id, agent['id']) raise l3agentscheduler.RouterReschedulingFailed( router_id=router_id) def list_routers_on_l3_agent(self, context, agent_id): query = context.session.query(RouterL3AgentBinding.router_id) query = query.filter(RouterL3AgentBinding.l3_agent_id == agent_id) router_ids = [item[0] for item in query] if router_ids: return {'routers': self.get_routers(context, filters={'id': router_ids})} else: # Exception will be thrown if the requested agent does not exist. self._get_agent(context, agent_id) return {'routers': []} def _get_active_l3_agent_routers_sync_data(self, context, host, agent, router_ids): if n_utils.is_extension_supported(self, constants.L3_HA_MODE_EXT_ALIAS): routers = self.get_ha_sync_data_for_host(context, host, agent, router_ids=router_ids, active=True) else: routers = self.get_sync_data(context, router_ids=router_ids, active=True) return self.filter_allocating_and_missing_routers(context, routers) def list_router_ids_on_host(self, context, host, router_ids=None): agent = self._get_agent_by_type_and_host( context, constants.AGENT_TYPE_L3, host) if not agentschedulers_db.services_available(agent.admin_state_up): return [] return self._get_router_ids_for_agent(context, agent, router_ids) def _get_router_ids_for_agent(self, context, agent, router_ids): """Get IDs of routers that the agent should host Overridden for DVR to handle agents in 'dvr' mode which have no explicit bindings with routers """ query = context.session.query(RouterL3AgentBinding.router_id) query = query.filter( RouterL3AgentBinding.l3_agent_id == agent.id) if router_ids: query = query.filter( RouterL3AgentBinding.router_id.in_(router_ids)) return [item[0] for item in query] def list_active_sync_routers_on_active_l3_agent( self, context, host, router_ids): agent = self._get_agent_by_type_and_host( context, constants.AGENT_TYPE_L3, host) if not agentschedulers_db.services_available(agent.admin_state_up): LOG.debug("Agent has its services disabled. Returning " "no active routers. Agent: %s", agent) return [] scheduled_router_ids = self._get_router_ids_for_agent( context, agent, router_ids) diff = set(router_ids or []) - set(scheduled_router_ids or []) if diff: LOG.debug("Agent requested router IDs not scheduled to it. " "Scheduled: %(sched)s. Unscheduled: %(diff)s. " "Agent: %(agent)s.", {'sched': scheduled_router_ids, 'diff': diff, 'agent': agent}) if scheduled_router_ids: return self._get_active_l3_agent_routers_sync_data( context, host, agent, scheduled_router_ids) return [] def get_l3_agents_hosting_routers(self, context, router_ids, admin_state_up=None, active=None): if not router_ids: return [] query = context.session.query(RouterL3AgentBinding) query = query.options(orm.contains_eager( RouterL3AgentBinding.l3_agent)) query = query.join(RouterL3AgentBinding.l3_agent) query = query.filter(RouterL3AgentBinding.router_id.in_(router_ids)) if admin_state_up is not None: query = (query.filter(agents_db.Agent.admin_state_up == admin_state_up)) l3_agents = [binding.l3_agent for binding in query] if active is not None: l3_agents = [l3_agent for l3_agent in l3_agents if not agents_db.AgentDbMixin.is_agent_down( l3_agent['heartbeat_timestamp'])] return l3_agents def _get_l3_bindings_hosting_routers(self, context, router_ids): if not router_ids: return [] query = context.session.query(RouterL3AgentBinding) query = query.options(joinedload('l3_agent')).filter( RouterL3AgentBinding.router_id.in_(router_ids)) return query.all() def list_l3_agents_hosting_router(self, context, router_id): with context.session.begin(subtransactions=True): bindings = self._get_l3_bindings_hosting_routers( context, [router_id]) return {'agents': [self._make_agent_dict(binding.l3_agent) for binding in bindings]} def get_l3_agents(self, context, active=None, filters=None): query = context.session.query(agents_db.Agent) query = query.filter( agents_db.Agent.agent_type == constants.AGENT_TYPE_L3) if active is not None: query = (query.filter(agents_db.Agent.admin_state_up == active)) if filters: for key, value in six.iteritems(filters): column = getattr(agents_db.Agent, key, None) if column: if not value: return [] query = query.filter(column.in_(value)) agent_modes = filters.get('agent_modes', []) if agent_modes: agent_mode_key = '\"agent_mode\": \"' configuration_filter = ( [agents_db.Agent.configurations.contains('%s%s\"' % (agent_mode_key, agent_mode)) for agent_mode in agent_modes]) query = query.filter(or_(*configuration_filter)) return [l3_agent for l3_agent in query if agentschedulers_db.AgentSchedulerDbMixin.is_eligible_agent( active, l3_agent)] def get_l3_agent_candidates(self, context, sync_router, l3_agents, ignore_admin_state=False): """Get the valid l3 agents for the router from a list of l3_agents. It will not return agents in 'dvr' mode for a dvr router as dvr routers are not explicitly scheduled to l3 agents on compute nodes """ candidates = [] is_router_distributed = sync_router.get('distributed', False) for l3_agent in l3_agents: if not ignore_admin_state and not l3_agent.admin_state_up: # ignore_admin_state True comes from manual scheduling # where admin_state_up judgement is already done. continue agent_conf = self.get_configuration_dict(l3_agent) agent_mode = agent_conf.get(constants.L3_AGENT_MODE, constants.L3_AGENT_MODE_LEGACY) if (agent_mode == constants.L3_AGENT_MODE_DVR or (agent_mode == constants.L3_AGENT_MODE_LEGACY and is_router_distributed)): continue router_id = agent_conf.get('router_id', None) if router_id and router_id != sync_router['id']: continue handle_internal_only_routers = agent_conf.get( 'handle_internal_only_routers', True) gateway_external_network_id = agent_conf.get( 'gateway_external_network_id', None) ex_net_id = (sync_router['external_gateway_info'] or {}).get( 'network_id') if ((not ex_net_id and not handle_internal_only_routers) or (ex_net_id and gateway_external_network_id and ex_net_id != gateway_external_network_id)): continue candidates.append(l3_agent) return candidates def auto_schedule_routers(self, context, host, router_ids): if self.router_scheduler: return self.router_scheduler.auto_schedule_routers( self, context, host, router_ids) def schedule_router(self, context, router, candidates=None): if self.router_scheduler: return self.router_scheduler.schedule( self, context, router, candidates=candidates) def schedule_routers(self, context, routers): """Schedule the routers to l3 agents.""" for router in routers: self.schedule_router(context, router, candidates=None) def get_l3_agent_with_min_routers(self, context, agent_ids): """Return l3 agent with the least number of routers.""" if not agent_ids: return None query = context.session.query( agents_db.Agent, func.count( RouterL3AgentBinding.router_id ).label('count')).outerjoin(RouterL3AgentBinding).group_by( agents_db.Agent.id, RouterL3AgentBinding.l3_agent_id).order_by('count') res = query.filter(agents_db.Agent.id.in_(agent_ids)).first() return res[0] def get_hosts_to_notify(self, context, router_id): """Returns all hosts to send notification about router update""" state = agentschedulers_db.get_admin_state_up_filter() agents = self.get_l3_agents_hosting_routers( context, [router_id], admin_state_up=state, active=True) return [a.host for a in agents] class AZL3AgentSchedulerDbMixin(L3AgentSchedulerDbMixin, router_az.RouterAvailabilityZonePluginBase): """Mixin class to add availability_zone supported l3 agent scheduler.""" def get_router_availability_zones(self, router): return list({agent.availability_zone for agent in router.l3_agents}) neutron-8.4.0/neutron/db/__init__.py0000664000567000056710000000000013044372736020545 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/db/l3_dvr_db.py0000664000567000056710000014046213044372760020662 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from oslo_config import cfg from oslo_log import helpers as log_helper from oslo_log import log as logging from oslo_utils import excutils import six from neutron._i18n import _, _LI, _LW from neutron.api.v2 import attributes from neutron.callbacks import events from neutron.callbacks import exceptions from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants as l3_const from neutron.common import exceptions as n_exc from neutron.common import utils as n_utils from neutron.db import allowedaddresspairs_db as addr_pair_db from neutron.db import l3_agentschedulers_db as l3_sched_db from neutron.db import l3_attrs_db from neutron.db import l3_db from neutron.db import models_v2 from neutron.extensions import l3 from neutron.extensions import portbindings from neutron.ipam import utils as ipam_utils from neutron import manager from neutron.plugins.common import constants from neutron.plugins.common import utils as p_utils LOG = logging.getLogger(__name__) router_distributed_opts = [ cfg.BoolOpt('router_distributed', default=False, help=_("System-wide flag to determine the type of router " "that tenants can create. Only admin can override.")), ] cfg.CONF.register_opts(router_distributed_opts) class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin, l3_attrs_db.ExtraAttributesMixin): """Mixin class to enable DVR support.""" router_device_owners = ( l3_db.L3_NAT_db_mixin.router_device_owners + (l3_const.DEVICE_OWNER_DVR_INTERFACE, l3_const.DEVICE_OWNER_ROUTER_SNAT, l3_const.DEVICE_OWNER_AGENT_GW)) extra_attributes = ( l3_attrs_db.ExtraAttributesMixin.extra_attributes + [{ 'name': "distributed", 'default': cfg.CONF.router_distributed }]) def _create_router_db(self, context, router, tenant_id): """Create a router db object with dvr additions.""" router['distributed'] = is_distributed_router(router) with context.session.begin(subtransactions=True): router_db = super( L3_NAT_with_dvr_db_mixin, self)._create_router_db( context, router, tenant_id) self._process_extra_attr_router_create(context, router_db, router) return router_db def _validate_router_migration(self, context, router_db, router_res): """Allow centralized -> distributed state transition only.""" if (router_db.extra_attributes.distributed and router_res.get('distributed') is False): LOG.info(_LI("Centralizing distributed router %s " "is not supported"), router_db['id']) raise n_exc.BadRequest( resource='router', msg=_("Migration from distributed router to centralized is " "not supported")) elif (not router_db.extra_attributes.distributed and router_res.get('distributed')): # router should be disabled in order for upgrade if router_db.admin_state_up: msg = _('Cannot upgrade active router to distributed. Please ' 'set router admin_state_up to False prior to upgrade.') raise n_exc.BadRequest(resource='router', msg=msg) # Notify advanced services of the imminent state transition # for the router. try: kwargs = {'context': context, 'router': router_db} registry.notify( resources.ROUTER, events.BEFORE_UPDATE, self, **kwargs) except exceptions.CallbackFailure as e: with excutils.save_and_reraise_exception(): # NOTE(armax): preserve old check's behavior if len(e.errors) == 1: raise e.errors[0].error raise l3.RouterInUse(router_id=router_db['id'], reason=e) def _update_distributed_attr( self, context, router_id, router_db, data): """Update the model to support the dvr case of a router.""" if data.get('distributed'): old_owner = l3_const.DEVICE_OWNER_ROUTER_INTF new_owner = l3_const.DEVICE_OWNER_DVR_INTERFACE for rp in router_db.attached_ports.filter_by(port_type=old_owner): rp.port_type = new_owner rp.port.device_owner = new_owner def _update_router_db(self, context, router_id, data): with context.session.begin(subtransactions=True): router_db = super( L3_NAT_with_dvr_db_mixin, self)._update_router_db( context, router_id, data) migrating_to_distributed = ( not router_db.extra_attributes.distributed and data.get('distributed') is True) self._validate_router_migration(context, router_db, data) router_db.extra_attributes.update(data) self._update_distributed_attr( context, router_id, router_db, data) if migrating_to_distributed: if router_db['gw_port_id']: # If the Legacy router is getting migrated to a DVR # router, make sure to create corresponding # snat interface ports that are to be consumed by # the Service Node. if not self._create_snat_intf_ports_if_not_exists( context.elevated(), router_db): LOG.debug("SNAT interface ports not created: %s", router_db['id']) cur_agents = self.list_l3_agents_hosting_router( context, router_db['id'])['agents'] for agent in cur_agents: self._unbind_router(context, router_db['id'], agent['id']) return router_db def _delete_current_gw_port(self, context, router_id, router, new_network): """ Overriden here to handle deletion of dvr internal ports. If there is a valid router update with gateway port to be deleted, then go ahead and delete the csnat ports and the floatingip agent gateway port associated with the dvr router. """ gw_ext_net_id = ( router.gw_port['network_id'] if router.gw_port else None) super(L3_NAT_with_dvr_db_mixin, self)._delete_current_gw_port(context, router_id, router, new_network) if (is_distributed_router(router) and gw_ext_net_id != new_network and gw_ext_net_id is not None): self.delete_csnat_router_interface_ports( context.elevated(), router) # NOTE(Swami): Delete the Floatingip agent gateway port # on all hosts when it is the last gateway port in the # given external network. filters = {'network_id': [gw_ext_net_id], 'device_owner': [l3_const.DEVICE_OWNER_ROUTER_GW]} ext_net_gw_ports = self._core_plugin.get_ports( context.elevated(), filters) if not ext_net_gw_ports: self.delete_floatingip_agent_gateway_port( context.elevated(), None, gw_ext_net_id) # Send the information to all the L3 Agent hosts # to clean up the fip namespace as it is no longer required. self.l3_rpc_notifier.delete_fipnamespace_for_ext_net( context, gw_ext_net_id) def _create_gw_port(self, context, router_id, router, new_network, ext_ips): super(L3_NAT_with_dvr_db_mixin, self)._create_gw_port(context, router_id, router, new_network, ext_ips) # Make sure that the gateway port exists before creating the # snat interface ports for distributed router. if router.extra_attributes.distributed and router.gw_port: snat_p_list = self._create_snat_intf_ports_if_not_exists( context.elevated(), router) if not snat_p_list: LOG.debug("SNAT interface ports not created: %s", snat_p_list) def _get_device_owner(self, context, router=None): """Get device_owner for the specified router.""" router_is_uuid = isinstance(router, six.string_types) if router_is_uuid: router = self._get_router(context, router) if is_distributed_router(router): return l3_const.DEVICE_OWNER_DVR_INTERFACE return super(L3_NAT_with_dvr_db_mixin, self)._get_device_owner(context, router) def _get_ports_for_allowed_address_pair_ip( self, context, network_id, fixed_ip): """Return all active ports associated with the allowed_addr_pair ip.""" query = context.session.query( models_v2.Port).filter( models_v2.Port.id == addr_pair_db.AllowedAddressPair.port_id, addr_pair_db.AllowedAddressPair.ip_address == fixed_ip, models_v2.Port.network_id == network_id, models_v2.Port.admin_state_up.is_(True)) return query.all() def _update_fip_assoc(self, context, fip, floatingip_db, external_port): """Override to create floating agent gw port for DVR. Floating IP Agent gateway port will be created when a floatingIP association happens. """ fip_port = fip.get('port_id') super(L3_NAT_with_dvr_db_mixin, self)._update_fip_assoc( context, fip, floatingip_db, external_port) associate_fip = fip_port and floatingip_db['id'] if associate_fip and floatingip_db.get('router_id'): admin_ctx = context.elevated() router_dict = self.get_router( admin_ctx, floatingip_db['router_id']) # Check if distributed router and then create the # FloatingIP agent gateway port if router_dict.get('distributed'): hostid = self._get_dvr_service_port_hostid( context, fip_port) if hostid: # FIXME (Swami): This FIP Agent Gateway port should be # created only once and there should not be a duplicate # for the same host. Until we find a good solution for # augmenting multiple server requests we should use the # existing flow. fip_agent_port = ( self.create_fip_agent_gw_port_if_not_exists( admin_ctx, external_port['network_id'], hostid)) LOG.debug("FIP Agent gateway port: %s", fip_agent_port) else: # If not hostid check if the fixed ip provided has to # deal with allowed_address_pairs for a given service # port. Get the port_dict, inherit the service port host # and device owner(if it does not exist). port = self._core_plugin.get_port( admin_ctx, fip_port) allowed_device_owners = ( n_utils.get_dvr_allowed_address_pair_device_owners()) # NOTE: We just need to deal with ports that do not # have a device_owner and ports that are owned by the # dvr service ports except for the compute port and # dhcp port. if (port['device_owner'] == "" or port['device_owner'] in allowed_device_owners): addr_pair_active_service_port_list = ( self._get_ports_for_allowed_address_pair_ip( admin_ctx, port['network_id'], floatingip_db['fixed_ip_address'])) if not addr_pair_active_service_port_list: return if len(addr_pair_active_service_port_list) > 1: LOG.warning(_LW("Multiple active ports associated " "with the allowed_address_pairs.")) return self._inherit_service_port_and_arp_update( context, addr_pair_active_service_port_list[0], port) def _inherit_service_port_and_arp_update( self, context, service_port, allowed_address_port): """Function inherits port host bindings for allowed_address_pair.""" service_port_dict = self._core_plugin._make_port_dict(service_port, None) address_pair_list = service_port_dict.get('allowed_address_pairs') for address_pair in address_pair_list: updated_port = ( self.update_unbound_allowed_address_pair_port_binding( context, service_port_dict, address_pair, address_pair_port=allowed_address_port)) if not updated_port: LOG.warning(_LW("Allowed_address_pair port update failed: %s"), updated_port) self.update_arp_entry_for_dvr_service_port(context, service_port_dict) def _get_floatingip_on_port(self, context, port_id=None): """Helper function to retrieve the fip associated with port.""" fip_qry = context.session.query(l3_db.FloatingIP) floating_ip = fip_qry.filter_by(fixed_port_id=port_id) return floating_ip.first() def add_router_interface(self, context, router_id, interface_info): add_by_port, add_by_sub = self._validate_interface_info(interface_info) router = self._get_router(context, router_id) device_owner = self._get_device_owner(context, router) # This should be True unless adding an IPv6 prefix to an existing port new_port = True if add_by_port: port, subnets = self._add_interface_by_port( context, router, interface_info['port_id'], device_owner) elif add_by_sub: port, subnets, new_port = self._add_interface_by_subnet( context, router, interface_info['subnet_id'], device_owner) subnet = subnets[0] if new_port: if router.extra_attributes.distributed and router.gw_port: try: admin_context = context.elevated() self._add_csnat_router_interface_port( admin_context, router, port['network_id'], port['fixed_ips'][-1]['subnet_id']) except Exception: with excutils.save_and_reraise_exception(): # we need to preserve the original state prior # the request by rolling back the port creation # that led to new_port=True self._core_plugin.delete_port( admin_context, port['id']) with context.session.begin(subtransactions=True): router_port = l3_db.RouterPort( port_id=port['id'], router_id=router.id, port_type=device_owner ) context.session.add(router_port) # NOTE: For IPv6 additional subnets added to the same # network we need to update the CSNAT port with respective # IPv6 subnet elif subnet and port: fixed_ip = {'subnet_id': subnet['id']} if subnet['ip_version'] == 6: # Add new prefix to an existing ipv6 csnat port with the # same network id if one exists cs_port = self._find_router_port_by_network_and_device_owner( router, subnet['network_id'], l3_const.DEVICE_OWNER_ROUTER_SNAT) if cs_port: fixed_ips = list(cs_port['port']['fixed_ips']) fixed_ips.append(fixed_ip) updated_port = self._core_plugin.update_port( context.elevated(), cs_port['port_id'], {'port': {'fixed_ips': fixed_ips}}) LOG.debug("CSNAT port updated for IPv6 subnet: " "%s", updated_port) router_interface_info = self._make_router_interface_info( router_id, port['tenant_id'], port['id'], port['network_id'], subnet['id'], [subnet['id']]) self.notify_router_interface_action( context, router_interface_info, 'add') if router.gw_port: gw_network_id = router.gw_port.network_id gw_ips = [x['ip_address'] for x in router.gw_port.fixed_ips] registry.notify(resources.ROUTER_INTERFACE, events.AFTER_CREATE, self, context=context, network_id=gw_network_id, gateway_ips=gw_ips, cidrs=[x['cidr'] for x in subnets], port_id=port['id'], router_id=router_id, port=port, interface_info=interface_info) return router_interface_info def _port_has_ipv6_address(self, port, csnat_port_check=True): """Overridden to return False if DVR SNAT port.""" if csnat_port_check: if port['device_owner'] == l3_const.DEVICE_OWNER_ROUTER_SNAT: return False return super(L3_NAT_with_dvr_db_mixin, self)._port_has_ipv6_address(port) def _find_router_port_by_network_and_device_owner( self, router, net_id, device_owner): for port in router.attached_ports: p = port['port'] if (p['network_id'] == net_id and p['device_owner'] == device_owner and self._port_has_ipv6_address(p, csnat_port_check=False)): return port def _check_for_multiprefix_csnat_port_and_update( self, context, router, network_id, subnet_id): """Checks if the csnat port contains multiple ipv6 prefixes. If the csnat port contains multiple ipv6 prefixes for the given network when a router interface is deleted, make sure we don't delete the port when a single subnet is deleted and just update it with the right fixed_ip. This function returns true if it is a multiprefix port. """ if router.gw_port: # If router has a gateway port, check if it has IPV6 subnet cs_port = ( self._find_router_port_by_network_and_device_owner( router, network_id, l3_const.DEVICE_OWNER_ROUTER_SNAT)) if cs_port: fixed_ips = ( [fixedip for fixedip in cs_port['port']['fixed_ips'] if fixedip['subnet_id'] != subnet_id]) if len(fixed_ips) == len(cs_port['port']['fixed_ips']): # The subnet being detached from router is not part of # ipv6 router port. No need to update the multiprefix. return False if fixed_ips: # multiple prefix port - delete prefix from port self._core_plugin.update_port( context.elevated(), cs_port['port_id'], {'port': {'fixed_ips': fixed_ips}}) return True return False def remove_router_interface(self, context, router_id, interface_info): router = self._get_router(context, router_id) if not router.extra_attributes.distributed: return super( L3_NAT_with_dvr_db_mixin, self).remove_router_interface( context, router_id, interface_info) plugin = manager.NeutronManager.get_service_plugins().get( constants.L3_ROUTER_NAT) router_hosts_before = plugin._get_dvr_hosts_for_router( context, router_id) interface_info = super( L3_NAT_with_dvr_db_mixin, self).remove_router_interface( context, router_id, interface_info) router_hosts_after = plugin._get_dvr_hosts_for_router( context, router_id) removed_hosts = set(router_hosts_before) - set(router_hosts_after) if removed_hosts: agents = plugin.get_l3_agents(context, filters={'host': removed_hosts}) binding_table = l3_sched_db.RouterL3AgentBinding snat_binding = context.session.query(binding_table).filter_by( router_id=router_id).first() for agent in agents: is_this_snat_agent = ( snat_binding and snat_binding.l3_agent_id == agent['id']) if not is_this_snat_agent: self.l3_rpc_notifier.router_removed_from_agent( context, router_id, agent['host']) is_multiple_prefix_csport = ( self._check_for_multiprefix_csnat_port_and_update( context, router, interface_info['network_id'], interface_info['subnet_id'])) if not is_multiple_prefix_csport: # Single prefix port - go ahead and delete the port self.delete_csnat_router_interface_ports( context.elevated(), router, subnet_id=interface_info['subnet_id']) return interface_info def _get_snat_sync_interfaces(self, context, router_ids): """Query router interfaces that relate to list of router_ids.""" if not router_ids: return [] qry = context.session.query(l3_db.RouterPort) qry = qry.filter( l3_db.RouterPort.router_id.in_(router_ids), l3_db.RouterPort.port_type == l3_const.DEVICE_OWNER_ROUTER_SNAT ) interfaces = collections.defaultdict(list) for rp in qry: interfaces[rp.router_id].append( self._core_plugin._make_port_dict(rp.port, None)) LOG.debug("Return the SNAT ports: %s", interfaces) return interfaces def _build_routers_list(self, context, routers, gw_ports): # Perform a single query up front for all routers if not routers: return [] router_ids = [r['id'] for r in routers] snat_binding = l3_sched_db.RouterL3AgentBinding query = (context.session.query(snat_binding). filter(snat_binding.router_id.in_(router_ids))).all() bindings = dict((b.router_id, b) for b in query) for rtr in routers: gw_port_id = rtr['gw_port_id'] # Collect gw ports only if available if gw_port_id and gw_ports.get(gw_port_id): rtr['gw_port'] = gw_ports[gw_port_id] if 'enable_snat' in rtr[l3.EXTERNAL_GW_INFO]: rtr['enable_snat'] = ( rtr[l3.EXTERNAL_GW_INFO]['enable_snat']) binding = bindings.get(rtr['id']) if not binding: rtr['gw_port_host'] = None LOG.debug('No snat is bound to router %s', rtr['id']) continue rtr['gw_port_host'] = binding.l3_agent.host return routers def _process_routers(self, context, routers): routers_dict = {} snat_intfs_by_router_id = self._get_snat_sync_interfaces( context, [r['id'] for r in routers]) for router in routers: routers_dict[router['id']] = router if router['gw_port_id']: snat_router_intfs = snat_intfs_by_router_id[router['id']] LOG.debug("SNAT ports returned: %s ", snat_router_intfs) router[l3_const.SNAT_ROUTER_INTF_KEY] = snat_router_intfs return routers_dict def _process_floating_ips_dvr(self, context, routers_dict, floating_ips, host, agent): fip_sync_interfaces = None LOG.debug("FIP Agent : %s ", agent.id) for floating_ip in floating_ips: router = routers_dict.get(floating_ip['router_id']) if router: router_floatingips = router.get(l3_const.FLOATINGIP_KEY, []) if router['distributed']: if (floating_ip.get('host', None) != host and floating_ip.get('dest_host') is None): continue LOG.debug("Floating IP host: %s", floating_ip['host']) router_floatingips.append(floating_ip) router[l3_const.FLOATINGIP_KEY] = router_floatingips if not fip_sync_interfaces: fip_sync_interfaces = self._get_fip_sync_interfaces( context, agent.id) LOG.debug("FIP Agent ports: %s", fip_sync_interfaces) router[l3_const.FLOATINGIP_AGENT_INTF_KEY] = ( fip_sync_interfaces) def _get_fip_sync_interfaces(self, context, fip_agent_id): """Query router interfaces that relate to list of router_ids.""" if not fip_agent_id: return [] filters = {'device_id': [fip_agent_id], 'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW]} interfaces = self._core_plugin.get_ports(context.elevated(), filters) LOG.debug("Return the FIP ports: %s ", interfaces) return interfaces @log_helper.log_method_call def _get_dvr_sync_data(self, context, host, agent, router_ids=None, active=None): routers, interfaces, floating_ips = self._get_router_info_list( context, router_ids=router_ids, active=active, device_owners=l3_const.ROUTER_INTERFACE_OWNERS) dvr_router_ids = set(router['id'] for router in routers if is_distributed_router(router)) floating_ip_port_ids = [fip['port_id'] for fip in floating_ips if fip['router_id'] in dvr_router_ids] if floating_ip_port_ids: port_filter = {'id': floating_ip_port_ids} ports = self._core_plugin.get_ports(context, port_filter) port_dict = {} for port in ports: # Make sure that we check for cases were the port # might be in a pre-live migration state or also # check for the portbinding profile 'migrating_to' # key for the host. port_profile = port.get(portbindings.PROFILE) port_in_migration = ( port_profile and port_profile.get('migrating_to') == host) if (port[portbindings.HOST_ID] == host or port_in_migration): port_dict.update({port['id']: port}) # Add the port binding host to the floatingip dictionary for fip in floating_ips: vm_port = port_dict.get(fip['port_id'], None) if vm_port: fip['host'] = self._get_dvr_service_port_hostid( context, fip['port_id'], port=vm_port) fip['dest_host'] = ( self._get_dvr_migrating_service_port_hostid( context, fip['port_id'], port=vm_port)) routers_dict = self._process_routers(context, routers) self._process_floating_ips_dvr(context, routers_dict, floating_ips, host, agent) ports_to_populate = [] for router in routers_dict.values(): if router.get('gw_port'): ports_to_populate.append(router['gw_port']) if router.get(l3_const.FLOATINGIP_AGENT_INTF_KEY): ports_to_populate += router[l3_const.FLOATINGIP_AGENT_INTF_KEY] if router.get(l3_const.SNAT_ROUTER_INTF_KEY): ports_to_populate += router[l3_const.SNAT_ROUTER_INTF_KEY] ports_to_populate += interfaces self._populate_mtu_and_subnets_for_ports(context, ports_to_populate) self._process_interfaces(routers_dict, interfaces) return list(routers_dict.values()) def _get_dvr_service_port_hostid(self, context, port_id, port=None): """Returns the portbinding host_id for dvr service port.""" port_db = port or self._core_plugin.get_port(context, port_id) device_owner = port_db['device_owner'] if port_db else "" if n_utils.is_dvr_serviced(device_owner): return port_db[portbindings.HOST_ID] def _get_dvr_migrating_service_port_hostid( self, context, port_id, port=None): """Returns the migrating host_id from the migrating profile.""" port_db = port or self._core_plugin.get_port(context, port_id) port_profile = port_db.get(portbindings.PROFILE) port_dest_host = None if port_profile: port_dest_host = port_profile.get('migrating_to') device_owner = port_db['device_owner'] if port_db else "" if n_utils.is_dvr_serviced(device_owner): return port_dest_host def _get_agent_gw_ports_exist_for_network( self, context, network_id, host, agent_id): """Return agent gw port if exist, or None otherwise.""" if not network_id: LOG.debug("Network not specified") return filters = { 'network_id': [network_id], 'device_id': [agent_id], 'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW] } ports = self._core_plugin.get_ports(context, filters) if ports: return ports[0] def delete_floatingip_agent_gateway_port( self, context, host_id, ext_net_id): """Function to delete FIP gateway port with given ext_net_id.""" # delete any fip agent gw port device_filter = {'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW], 'network_id': [ext_net_id]} ports = self._core_plugin.get_ports(context, filters=device_filter) for p in ports: if not host_id or p[portbindings.HOST_ID] == host_id: self._core_plugin.ipam.delete_port(context, p['id']) if host_id: return def check_for_fip_and_create_agent_gw_port_on_host_if_not_exists( self, context, port, host): """Create fip agent_gw_port on host if not exists""" fip = self._get_floatingip_on_port(context, port_id=port['id']) if not fip: return network_id = fip.get('floating_network_id') agent_gw_port = self.create_fip_agent_gw_port_if_not_exists( context.elevated(), network_id, host) LOG.debug("Port-in-Migration: Floatingip Agent Gateway port " "%(gw)s created for the future host: %(dest_host)s", {'gw': agent_gw_port, 'dest_host': host}) def create_fip_agent_gw_port_if_not_exists( self, context, network_id, host): """Function to return the FIP Agent GW port. This function will create a FIP Agent GW port if required. If the port already exists, it will return the existing port and will not create a new one. """ l3_agent_db = self._get_agent_by_type_and_host( context, l3_const.AGENT_TYPE_L3, host) if l3_agent_db: LOG.debug("Agent ID exists: %s", l3_agent_db['id']) f_port = self._get_agent_gw_ports_exist_for_network( context, network_id, host, l3_agent_db['id']) if not f_port: LOG.info(_LI('Agent Gateway port does not exist,' ' so create one: %s'), f_port) port_data = {'tenant_id': '', 'network_id': network_id, 'device_id': l3_agent_db['id'], 'device_owner': l3_const.DEVICE_OWNER_AGENT_GW, portbindings.HOST_ID: host, 'admin_state_up': True, 'name': ''} agent_port = p_utils.create_port(self._core_plugin, context, {'port': port_data}) if agent_port: self._populate_mtu_and_subnets_for_ports(context, [agent_port]) return agent_port msg = _("Unable to create the Agent Gateway Port") raise n_exc.BadRequest(resource='router', msg=msg) else: self._populate_mtu_and_subnets_for_ports(context, [f_port]) return f_port def _get_snat_interface_ports_for_router(self, context, router_id): """Return all existing snat_router_interface ports.""" qry = context.session.query(l3_db.RouterPort) qry = qry.filter_by( router_id=router_id, port_type=l3_const.DEVICE_OWNER_ROUTER_SNAT ) ports = [self._core_plugin._make_port_dict(rp.port, None) for rp in qry] return ports def _add_csnat_router_interface_port( self, context, router, network_id, subnet_id, do_pop=True): """Add SNAT interface to the specified router and subnet.""" port_data = {'tenant_id': '', 'network_id': network_id, 'fixed_ips': [{'subnet_id': subnet_id}], 'device_id': router.id, 'device_owner': l3_const.DEVICE_OWNER_ROUTER_SNAT, 'admin_state_up': True, 'name': ''} snat_port = p_utils.create_port(self._core_plugin, context, {'port': port_data}) if not snat_port: msg = _("Unable to create the SNAT Interface Port") raise n_exc.BadRequest(resource='router', msg=msg) with context.session.begin(subtransactions=True): router_port = l3_db.RouterPort( port_id=snat_port['id'], router_id=router.id, port_type=l3_const.DEVICE_OWNER_ROUTER_SNAT ) context.session.add(router_port) if do_pop: return self._populate_mtu_and_subnets_for_ports(context, [snat_port]) return snat_port def _create_snat_intf_ports_if_not_exists(self, context, router): """Function to return the snat interface port list. This function will return the snat interface port list if it exists. If the port does not exist it will create new ports and then return the list. """ port_list = self._get_snat_interface_ports_for_router( context, router.id) if port_list: self._populate_mtu_and_subnets_for_ports(context, port_list) return port_list port_list = [] int_ports = ( rp.port for rp in router.attached_ports.filter_by( port_type=l3_const.DEVICE_OWNER_DVR_INTERFACE ) ) LOG.info(_LI('SNAT interface port list does not exist,' ' so create one: %s'), port_list) for intf in int_ports: if intf.fixed_ips: # Passing the subnet for the port to make sure the IP's # are assigned on the right subnet if multiple subnet # exists snat_port = self._add_csnat_router_interface_port( context, router, intf['network_id'], intf['fixed_ips'][0]['subnet_id'], do_pop=False) port_list.append(snat_port) if port_list: self._populate_mtu_and_subnets_for_ports(context, port_list) return port_list def _generate_arp_table_and_notify_agent( self, context, fixed_ip, mac_address, notifier): """Generates the arp table entry and notifies the l3 agent.""" ip_address = fixed_ip['ip_address'] subnet = fixed_ip['subnet_id'] filters = {'fixed_ips': {'subnet_id': [subnet]}, 'device_owner': [l3_const.DEVICE_OWNER_DVR_INTERFACE]} ports = self._core_plugin.get_ports(context, filters=filters) router_id = next((port['device_id'] for port in ports), None) if not router_id: return arp_table = {'ip_address': ip_address, 'mac_address': mac_address, 'subnet_id': subnet} notifier(context, router_id, arp_table) def _should_update_arp_entry_for_dvr_service_port(self, port_dict): # Check this is a valid VM or service port return (n_utils.is_dvr_serviced(port_dict['device_owner']) and port_dict['fixed_ips']) def _get_subnet_id_for_given_fixed_ip( self, context, fixed_ip, port_dict): """Returns the subnet_id that matches the fixedip on a network.""" filters = {'network_id': [port_dict['network_id']]} subnets = self._core_plugin.get_subnets(context, filters) for subnet in subnets: if ipam_utils.check_subnet_ip(subnet['cidr'], fixed_ip): return subnet['id'] def _get_allowed_address_pair_fixed_ips(self, context, port_dict): """Returns all fixed_ips associated with the allowed_address_pair.""" aa_pair_fixed_ips = [] if port_dict.get('allowed_address_pairs'): for address_pair in port_dict['allowed_address_pairs']: aap_ip_cidr = address_pair['ip_address'].split("/") if len(aap_ip_cidr) == 1 or int(aap_ip_cidr[1]) == 32: subnet_id = self._get_subnet_id_for_given_fixed_ip( context, aap_ip_cidr[0], port_dict) if subnet_id is not None: fixed_ip = {'subnet_id': subnet_id, 'ip_address': aap_ip_cidr[0]} aa_pair_fixed_ips.append(fixed_ip) else: LOG.debug("Subnet does not match for the given " "fixed_ip %s for arp update", aap_ip_cidr[0]) return aa_pair_fixed_ips def update_arp_entry_for_dvr_service_port(self, context, port_dict): """Notify L3 agents of ARP table entry for dvr service port. When a dvr service port goes up, look for the DVR router on the port's subnet, and send the ARP details to all L3 agents hosting the router to add it. If there are any allowed_address_pairs associated with the port those fixed_ips should also be updated in the ARP table. """ if not self._should_update_arp_entry_for_dvr_service_port(port_dict): return fixed_ips = port_dict['fixed_ips'] allowed_address_pair_fixed_ips = ( self._get_allowed_address_pair_fixed_ips(context, port_dict)) changed_fixed_ips = fixed_ips + allowed_address_pair_fixed_ips for fixed_ip in changed_fixed_ips: self._generate_arp_table_and_notify_agent( context, fixed_ip, port_dict['mac_address'], self.l3_rpc_notifier.add_arp_entry) def delete_arp_entry_for_dvr_service_port( self, context, port_dict, fixed_ips_to_delete=None): """Notify L3 agents of ARP table entry for dvr service port. When a dvr service port goes down, look for the DVR router on the port's subnet, and send the ARP details to all L3 agents hosting the router to delete it. If there are any allowed_address_pairs associated with the port, those fixed_ips should be removed from the ARP table. """ if not self._should_update_arp_entry_for_dvr_service_port(port_dict): return if not fixed_ips_to_delete: fixed_ips = port_dict['fixed_ips'] allowed_address_pair_fixed_ips = ( self._get_allowed_address_pair_fixed_ips(context, port_dict)) fixed_ips_to_delete = fixed_ips + allowed_address_pair_fixed_ips for fixed_ip in fixed_ips_to_delete: self._generate_arp_table_and_notify_agent( context, fixed_ip, port_dict['mac_address'], self.l3_rpc_notifier.del_arp_entry) def delete_csnat_router_interface_ports(self, context, router, subnet_id=None): # Each csnat router interface port is associated # with a subnet, so we need to pass the subnet id to # delete the right ports. # TODO(markmcclain): This is suboptimal but was left to reduce # changeset size since it is late in cycle ports = [ rp.port.id for rp in router.attached_ports.filter_by( port_type=l3_const.DEVICE_OWNER_ROUTER_SNAT) if rp.port ] c_snat_ports = self._core_plugin.get_ports( context, filters={'id': ports} ) for p in c_snat_ports: if subnet_id is None: self._core_plugin.delete_port(context, p['id'], l3_port_check=False) else: if p['fixed_ips'][0]['subnet_id'] == subnet_id: LOG.debug("Subnet matches: %s", subnet_id) self._core_plugin.delete_port(context, p['id'], l3_port_check=False) def create_floatingip(self, context, floatingip, initial_status=l3_const.FLOATINGIP_STATUS_ACTIVE): floating_ip = self._create_floatingip( context, floatingip, initial_status) self._notify_floating_ip_change(context, floating_ip) return floating_ip def _notify_floating_ip_change(self, context, floating_ip): router_id = floating_ip['router_id'] fixed_port_id = floating_ip['port_id'] # we need to notify agents only in case Floating IP is associated if not router_id or not fixed_port_id: return try: # using admin context as router may belong to admin tenant router = self._get_router(context.elevated(), router_id) except l3.RouterNotFound: LOG.warning(_LW("Router %s was not found. " "Skipping agent notification."), router_id) return if is_distributed_router(router): host = self._get_dvr_service_port_hostid(context, fixed_port_id) dest_host = self._get_dvr_migrating_service_port_hostid( context, fixed_port_id) self.l3_rpc_notifier.routers_updated_on_host( context, [router_id], host) if dest_host and dest_host != host: self.l3_rpc_notifier.routers_updated_on_host( context, [router_id], dest_host) else: self.notify_router_updated(context, router_id) def update_floatingip(self, context, id, floatingip): old_floatingip, floatingip = self._update_floatingip( context, id, floatingip) self._notify_floating_ip_change(context, old_floatingip) if (floatingip['router_id'] != old_floatingip['router_id'] or floatingip['port_id'] != old_floatingip['port_id']): self._notify_floating_ip_change(context, floatingip) return floatingip def delete_floatingip(self, context, id): floating_ip = self._delete_floatingip(context, id) self._notify_floating_ip_change(context, floating_ip) def _get_address_pair_active_port_with_fip( self, context, port_dict, port_addr_pair_ip): port_valid_state = (port_dict['admin_state_up'] or (port_dict['status'] == l3_const.PORT_STATUS_ACTIVE)) if not port_valid_state: return query = context.session.query(l3_db.FloatingIP).filter( l3_db.FloatingIP.fixed_ip_address == port_addr_pair_ip) fip = query.first() return self._core_plugin.get_port( context, fip.fixed_port_id) if fip else None def update_unbound_allowed_address_pair_port_binding( self, context, service_port_dict, port_address_pairs, address_pair_port=None): """Update allowed address pair port with host and device_owner This function sets the host and device_owner to the port associated with the port_addr_pair_ip with the port_dict's host and device_owner. """ port_addr_pair_ip = port_address_pairs['ip_address'] if not address_pair_port: address_pair_port = self._get_address_pair_active_port_with_fip( context, service_port_dict, port_addr_pair_ip) if address_pair_port: host = service_port_dict[portbindings.HOST_ID] dev_owner = service_port_dict['device_owner'] address_pair_dev_owner = address_pair_port.get('device_owner') # If the allowed_address_pair port already has an associated # device owner, and if the device_owner is a dvr serviceable # port, then don't update the device_owner. port_profile = address_pair_port.get(portbindings.PROFILE, {}) if n_utils.is_dvr_serviced(address_pair_dev_owner): port_profile['original_owner'] = address_pair_dev_owner port_data = {portbindings.HOST_ID: host, portbindings.PROFILE: port_profile} else: port_data = {portbindings.HOST_ID: host, 'device_owner': dev_owner} update_port = self._core_plugin.update_port( context, address_pair_port['id'], {'port': port_data}) return update_port def remove_unbound_allowed_address_pair_port_binding( self, context, service_port_dict, port_address_pairs, address_pair_port=None): """Remove allowed address pair port binding and device_owner This function clears the host and device_owner associated with the port_addr_pair_ip. """ port_addr_pair_ip = port_address_pairs['ip_address'] if not address_pair_port: address_pair_port = self._get_address_pair_active_port_with_fip( context, service_port_dict, port_addr_pair_ip) if address_pair_port: # Before reverting the changes, fetch the original # device owner saved in profile and update the port port_profile = address_pair_port.get(portbindings.PROFILE) orig_device_owner = "" if port_profile: orig_device_owner = port_profile.get('original_owner') del port_profile['original_owner'] port_data = {portbindings.HOST_ID: "", 'device_owner': orig_device_owner, portbindings.PROFILE: port_profile} update_port = self._core_plugin.update_port( context, address_pair_port['id'], {'port': port_data}) return update_port def is_distributed_router(router): """Return True if router to be handled is distributed.""" try: # See if router is a DB object first requested_router_type = router.extra_attributes.distributed except AttributeError: # if not, try to see if it is a request body requested_router_type = router.get('distributed') if attributes.is_attr_set(requested_router_type): return requested_router_type return cfg.CONF.router_distributed neutron-8.4.0/neutron/db/quota/0000775000567000056710000000000013044373210017563 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/db/quota/__init__.py0000664000567000056710000000000013044372736021676 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/db/quota/driver.py0000664000567000056710000002750613044372760021453 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import api as oslo_db_api from oslo_log import log from neutron.common import exceptions from neutron.db import api as db_api from neutron.db import common_db_mixin as common_db from neutron.db.quota import api as quota_api from neutron.db.quota import models as quota_models LOG = log.getLogger(__name__) class DbQuotaDriver(object): """Driver to perform necessary checks to enforce quotas and obtain quota information. The default driver utilizes the local database. """ @staticmethod def get_tenant_quotas(context, resources, tenant_id): """Given a list of resources, retrieve the quotas for the given tenant. If no limits are found for the specified tenant, the operation returns the default limits. :param context: The request context, for access checks. :param resources: A dictionary of the registered resource keys. :param tenant_id: The ID of the tenant to return quotas for. :return dict: from resource name to dict of name and limit """ # init with defaults tenant_quota = dict((key, resource.default) for key, resource in resources.items()) # update with tenant specific limits q_qry = common_db.model_query(context, quota_models.Quota).filter_by( tenant_id=tenant_id) for item in q_qry: tenant_quota[item['resource']] = item['limit'] return tenant_quota @staticmethod def delete_tenant_quota(context, tenant_id): """Delete the quota entries for a given tenant_id. After deletion, this tenant will use default quota values in conf. """ with context.session.begin(): tenant_quotas = context.session.query(quota_models.Quota) tenant_quotas = tenant_quotas.filter_by(tenant_id=tenant_id) tenant_quotas.delete() @staticmethod def get_all_quotas(context, resources): """Given a list of resources, retrieve the quotas for the all tenants. :param context: The request context, for access checks. :param resources: A dictionary of the registered resource keys. :return quotas: list of dict of tenant_id:, resourcekey1: resourcekey2: ... """ tenant_default = dict((key, resource.default) for key, resource in resources.items()) all_tenant_quotas = {} for quota in context.session.query(quota_models.Quota): tenant_id = quota['tenant_id'] # avoid setdefault() because only want to copy when actually # required tenant_quota = all_tenant_quotas.get(tenant_id) if tenant_quota is None: tenant_quota = tenant_default.copy() tenant_quota['tenant_id'] = tenant_id all_tenant_quotas[tenant_id] = tenant_quota tenant_quota[quota['resource']] = quota['limit'] # Convert values to a list to as caller expect an indexable iterable, # where python3's dict_values does not support indexing return list(all_tenant_quotas.values()) @staticmethod def update_quota_limit(context, tenant_id, resource, limit): with context.session.begin(): tenant_quota = context.session.query(quota_models.Quota).filter_by( tenant_id=tenant_id, resource=resource).first() if tenant_quota: tenant_quota.update({'limit': limit}) else: tenant_quota = quota_models.Quota(tenant_id=tenant_id, resource=resource, limit=limit) context.session.add(tenant_quota) def _get_quotas(self, context, tenant_id, resources): """Retrieves the quotas for specific resources. A helper method which retrieves the quotas for the specific resources identified by keys, and which apply to the current context. :param context: The request context, for access checks. :param tenant_id: the tenant_id to check quota. :param resources: A dictionary of the registered resources. """ # Grab and return the quotas (without usages) quotas = DbQuotaDriver.get_tenant_quotas( context, resources, tenant_id) return dict((k, v) for k, v in quotas.items()) def _handle_expired_reservations(self, context, tenant_id): LOG.debug("Deleting expired reservations for tenant:%s" % tenant_id) # Delete expired reservations (we don't want them to accrue # in the database) quota_api.remove_expired_reservations( context, tenant_id=tenant_id) @oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES, retry_interval=0.1, inc_retry_interval=True, retry_on_request=True, exception_checker=db_api.is_retriable) def make_reservation(self, context, tenant_id, resources, deltas, plugin): # Lock current reservation table # NOTE(salv-orlando): This routine uses DB write locks. # These locks are acquired by the count() method invoked on resources. # Please put your shotguns aside. # A non locking algorithm for handling reservation is feasible, however # it will require two database writes even in cases when there are not # concurrent reservations. # For this reason it might be advisable to handle contention using # this kind of locks and paying the cost of a write set certification # failure when a MySQL Galera cluster is employed. Also, this class of # locks should be ok to use when support for sending "hotspot" writes # to a single node will be available. requested_resources = deltas.keys() with db_api.autonested_transaction(context.session): # get_tenant_quotes needs in input a dictionary mapping resource # name to BaseResosurce instances so that the default quota can be # retrieved current_limits = self.get_tenant_quotas( context, resources, tenant_id) unlimited_resources = set([resource for (resource, limit) in current_limits.items() if limit < 0]) # Do not even bother counting resources and calculating headroom # for resources with unlimited quota LOG.debug(("Resources %s have unlimited quota limit. It is not " "required to calculated headroom "), ",".join(unlimited_resources)) requested_resources = (set(requested_resources) - unlimited_resources) # Gather current usage information # TODO(salv-orlando): calling count() for every resource triggers # multiple queries on quota usage. This should be improved, however # this is not an urgent matter as the REST API currently only # allows allocation of a resource at a time # NOTE: pass plugin too for compatibility with CountableResource # instances current_usages = dict( (resource, resources[resource].count( context, plugin, tenant_id, resync_usage=False)) for resource in requested_resources) # Adjust for expired reservations. Apparently it is cheaper than # querying every time for active reservations and counting overall # quantity of resources reserved expired_deltas = quota_api.get_reservations_for_resources( context, tenant_id, requested_resources, expired=True) # Verify that the request can be accepted with current limits resources_over_limit = [] for resource in requested_resources: expired_reservations = expired_deltas.get(resource, 0) total_usage = current_usages[resource] - expired_reservations res_headroom = current_limits[resource] - total_usage LOG.debug(("Attempting to reserve %(delta)d items for " "resource %(resource)s. Total usage: %(total)d; " "quota limit: %(limit)d; headroom:%(headroom)d"), {'resource': resource, 'delta': deltas[resource], 'total': total_usage, 'limit': current_limits[resource], 'headroom': res_headroom}) if res_headroom < deltas[resource]: resources_over_limit.append(resource) if expired_reservations: self._handle_expired_reservations(context, tenant_id) if resources_over_limit: raise exceptions.OverQuota(overs=sorted(resources_over_limit)) # Success, store the reservation # TODO(salv-orlando): Make expiration time configurable return quota_api.create_reservation( context, tenant_id, deltas) def commit_reservation(self, context, reservation_id): # Do not mark resource usage as dirty. If a reservation is committed, # then the relevant resources have been created. Usage data for these # resources has therefore already been marked dirty. quota_api.remove_reservation(context, reservation_id, set_dirty=False) def cancel_reservation(self, context, reservation_id): # Mark resource usage as dirty so the next time both actual resources # used and reserved will be recalculated quota_api.remove_reservation(context, reservation_id, set_dirty=True) def limit_check(self, context, tenant_id, resources, values): """Check simple quota limits. For limits--those quotas for which there is no usage synchronization function--this method checks that a set of proposed values are permitted by the limit restriction. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks. :param tenant_id: The tenant_id to check the quota. :param resources: A dictionary of the registered resources. :param values: A dictionary of the values to check against the quota. """ # Ensure no value is less than zero unders = [key for key, val in values.items() if val < 0] if unders: raise exceptions.InvalidQuotaValue(unders=sorted(unders)) # Get the applicable quotas quotas = self._get_quotas(context, tenant_id, resources) # Check the quotas and construct a list of the resources that # would be put over limit by the desired values overs = [key for key, val in values.items() if quotas[key] >= 0 and quotas[key] < val] if overs: raise exceptions.OverQuota(overs=sorted(overs)) neutron-8.4.0/neutron/db/quota/models.py0000664000567000056710000000502413044372760021432 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy import sql from neutron.api.v2 import attributes as attr from neutron.db import model_base class ResourceDelta(model_base.BASEV2): resource = sa.Column(sa.String(255), primary_key=True) reservation_id = sa.Column(sa.String(36), sa.ForeignKey('reservations.id', ondelete='CASCADE'), primary_key=True, nullable=False) # Requested amount of resource amount = sa.Column(sa.Integer) class Reservation(model_base.BASEV2, model_base.HasId): tenant_id = sa.Column(sa.String(attr.TENANT_ID_MAX_LEN)) expiration = sa.Column(sa.DateTime()) resource_deltas = orm.relationship(ResourceDelta, backref='reservation', lazy="joined", cascade='all, delete-orphan') class Quota(model_base.BASEV2, model_base.HasId, model_base.HasTenant): """Represent a single quota override for a tenant. If there is no row for a given tenant id and resource, then the default for the deployment is used. """ resource = sa.Column(sa.String(255)) limit = sa.Column(sa.Integer) class QuotaUsage(model_base.BASEV2): """Represents the current usage for a given resource.""" resource = sa.Column(sa.String(255), nullable=False, primary_key=True, index=True) tenant_id = sa.Column(sa.String(attr.TENANT_ID_MAX_LEN), nullable=False, primary_key=True, index=True) dirty = sa.Column(sa.Boolean, nullable=False, server_default=sql.false()) in_use = sa.Column(sa.Integer, nullable=False, server_default="0") reserved = sa.Column(sa.Integer, nullable=False, server_default="0") neutron-8.4.0/neutron/db/quota/api.py0000664000567000056710000002447413044372760020732 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import datetime import sqlalchemy as sa from sqlalchemy.orm import exc as orm_exc from sqlalchemy import sql from neutron.db import api as db_api from neutron.db import common_db_mixin as common_db_api from neutron.db.quota import models as quota_models # Wrapper for utcnow - needed for mocking it in unit tests def utcnow(): return datetime.datetime.utcnow() class QuotaUsageInfo(collections.namedtuple( 'QuotaUsageInfo', ['resource', 'tenant_id', 'used', 'dirty'])): """Information about resource quota usage.""" class ReservationInfo(collections.namedtuple( 'ReservationInfo', ['reservation_id', 'tenant_id', 'expiration', 'deltas'])): """Information about a resource reservation.""" def get_quota_usage_by_resource_and_tenant(context, resource, tenant_id, lock_for_update=False): """Return usage info for a given resource and tenant. :param context: Request context :param resource: Name of the resource :param tenant_id: Tenant identifier :param lock_for_update: if True sets a write-intent lock on the query :returns: a QuotaUsageInfo instance """ query = common_db_api.model_query(context, quota_models.QuotaUsage) query = query.filter_by(resource=resource, tenant_id=tenant_id) if lock_for_update: query = query.with_lockmode('update') result = query.first() if not result: return return QuotaUsageInfo(result.resource, result.tenant_id, result.in_use, result.dirty) def get_quota_usage_by_resource(context, resource): query = common_db_api.model_query(context, quota_models.QuotaUsage) query = query.filter_by(resource=resource) return [QuotaUsageInfo(item.resource, item.tenant_id, item.in_use, item.dirty) for item in query] def get_quota_usage_by_tenant_id(context, tenant_id): query = common_db_api.model_query(context, quota_models.QuotaUsage) query = query.filter_by(tenant_id=tenant_id) return [QuotaUsageInfo(item.resource, item.tenant_id, item.in_use, item.dirty) for item in query] def set_quota_usage(context, resource, tenant_id, in_use=None, delta=False): """Set resource quota usage. :param context: instance of neutron context with db session :param resource: name of the resource for which usage is being set :param tenant_id: identifier of the tenant for which quota usage is being set :param in_use: integer specifying the new quantity of used resources, or a delta to apply to current used resource :param delta: Specifies whether in_use is an absolute number or a delta (default to False) """ with db_api.autonested_transaction(context.session): query = common_db_api.model_query(context, quota_models.QuotaUsage) query = query.filter_by(resource=resource).filter_by( tenant_id=tenant_id) usage_data = query.first() if not usage_data: # Must create entry usage_data = quota_models.QuotaUsage( resource=resource, tenant_id=tenant_id) context.session.add(usage_data) # Perform explicit comparison with None as 0 is a valid value if in_use is not None: if delta: in_use = usage_data.in_use + in_use usage_data.in_use = in_use # After an explicit update the dirty bit should always be reset usage_data.dirty = False return QuotaUsageInfo(usage_data.resource, usage_data.tenant_id, usage_data.in_use, usage_data.dirty) def set_quota_usage_dirty(context, resource, tenant_id, dirty=True): """Set quota usage dirty bit for a given resource and tenant. :param resource: a resource for which quota usage if tracked :param tenant_id: tenant identifier :param dirty: the desired value for the dirty bit (defaults to True) :returns: 1 if the quota usage data were updated, 0 otherwise. """ query = common_db_api.model_query(context, quota_models.QuotaUsage) query = query.filter_by(resource=resource).filter_by(tenant_id=tenant_id) return query.update({'dirty': dirty}) def set_resources_quota_usage_dirty(context, resources, tenant_id, dirty=True): """Set quota usage dirty bit for a given tenant and multiple resources. :param resources: list of resource for which the dirty bit is going to be set :param tenant_id: tenant identifier :param dirty: the desired value for the dirty bit (defaults to True) :returns: the number of records for which the bit was actually set. """ query = common_db_api.model_query(context, quota_models.QuotaUsage) query = query.filter_by(tenant_id=tenant_id) if resources: query = query.filter(quota_models.QuotaUsage.resource.in_(resources)) # synchronize_session=False needed because of the IN condition return query.update({'dirty': dirty}, synchronize_session=False) def set_all_quota_usage_dirty(context, resource, dirty=True): """Set the dirty bit on quota usage for all tenants. :param resource: the resource for which the dirty bit should be set :returns: the number of tenants for which the dirty bit was actually updated """ query = common_db_api.model_query(context, quota_models.QuotaUsage) query = query.filter_by(resource=resource) return query.update({'dirty': dirty}) def create_reservation(context, tenant_id, deltas, expiration=None): # This method is usually called from within another transaction. # Consider using begin_nested with context.session.begin(subtransactions=True): expiration = expiration or (utcnow() + datetime.timedelta(0, 120)) resv = quota_models.Reservation(tenant_id=tenant_id, expiration=expiration) context.session.add(resv) for (resource, delta) in deltas.items(): context.session.add( quota_models.ResourceDelta(resource=resource, amount=delta, reservation=resv)) return ReservationInfo(resv['id'], resv['tenant_id'], resv['expiration'], dict((delta.resource, delta.amount) for delta in resv.resource_deltas)) def get_reservation(context, reservation_id): query = context.session.query(quota_models.Reservation).filter_by( id=reservation_id) resv = query.first() if not resv: return return ReservationInfo(resv['id'], resv['tenant_id'], resv['expiration'], dict((delta.resource, delta.amount) for delta in resv.resource_deltas)) def remove_reservation(context, reservation_id, set_dirty=False): delete_query = context.session.query(quota_models.Reservation).filter_by( id=reservation_id) # Not handling MultipleResultsFound as the query is filtering by primary # key try: reservation = delete_query.one() except orm_exc.NoResultFound: # TODO(salv-orlando): Raise here and then handle the exception? return tenant_id = reservation.tenant_id resources = [delta.resource for delta in reservation.resource_deltas] num_deleted = delete_query.delete() if set_dirty: # quota_usage for all resource involved in this reservation must # be marked as dirty set_resources_quota_usage_dirty(context, resources, tenant_id) return num_deleted def get_reservations_for_resources(context, tenant_id, resources, expired=False): """Retrieve total amount of reservations for specified resources. :param context: Neutron context with db session :param tenant_id: Tenant identifier :param resources: Resources for which reserved amounts should be fetched :param expired: False to fetch active reservations, True to fetch expired reservations (defaults to False) :returns: a dictionary mapping resources with corresponding deltas """ if not resources: # Do not waste time return now = utcnow() resv_query = context.session.query( quota_models.ResourceDelta.resource, quota_models.Reservation.expiration, sql.func.sum(quota_models.ResourceDelta.amount)).join( quota_models.Reservation) if expired: exp_expr = (quota_models.Reservation.expiration < now) else: exp_expr = (quota_models.Reservation.expiration >= now) resv_query = resv_query.filter(sa.and_( quota_models.Reservation.tenant_id == tenant_id, quota_models.ResourceDelta.resource.in_(resources), exp_expr)).group_by( quota_models.ResourceDelta.resource, quota_models.Reservation.expiration) return dict((resource, total_reserved) for (resource, exp, total_reserved) in resv_query) def remove_expired_reservations(context, tenant_id=None): now = utcnow() resv_query = context.session.query(quota_models.Reservation) if tenant_id: tenant_expr = (quota_models.Reservation.tenant_id == tenant_id) else: tenant_expr = sql.true() resv_query = resv_query.filter(sa.and_( tenant_expr, quota_models.Reservation.expiration < now)) return resv_query.delete() neutron-8.4.0/neutron/db/portbindings_base.py0000664000567000056710000000265013044372736022517 0ustar jenkinsjenkins00000000000000# Copyright 2013 UnitedStack Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api.v2 import attributes from neutron.db import db_base_plugin_v2 class PortBindingBaseMixin(object): base_binding_dict = None def _process_portbindings_create_and_update(self, context, port_data, port): self.extend_port_dict_binding(port, None) def extend_port_dict_binding(self, port_res, port_db): if self.base_binding_dict: port_res.update(self.base_binding_dict) def _extend_port_dict_binding(plugin, port_res, port_db): if not isinstance(plugin, PortBindingBaseMixin): return plugin.extend_port_dict_binding(port_res, port_db) def register_port_dict_function(): db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attributes.PORTS, [_extend_port_dict_binding]) neutron-8.4.0/neutron/db/extraroute_db.py0000664000567000056710000001603013044372760021664 0ustar jenkinsjenkins00000000000000# Copyright 2013, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_config import cfg from oslo_log import log as logging import sqlalchemy as sa from sqlalchemy import orm from neutron._i18n import _ from neutron.common import utils from neutron.db import db_base_plugin_v2 from neutron.db import l3_db from neutron.db import model_base from neutron.db import models_v2 from neutron.extensions import extraroute from neutron.extensions import l3 LOG = logging.getLogger(__name__) extra_route_opts = [ #TODO(nati): use quota framework when it support quota for attributes cfg.IntOpt('max_routes', default=30, help=_("Maximum number of routes per router")), ] cfg.CONF.register_opts(extra_route_opts) class RouterRoute(model_base.BASEV2, models_v2.Route): router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id', ondelete="CASCADE"), primary_key=True) router = orm.relationship(l3_db.Router, backref=orm.backref("route_list", lazy='joined', cascade='delete')) class ExtraRoute_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin): """Mixin class to support extra route configuration on router.""" def _extend_router_dict_extraroute(self, router_res, router_db): router_res['routes'] = (ExtraRoute_dbonly_mixin. _make_extra_route_list( router_db['route_list'] )) db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( l3.ROUTERS, ['_extend_router_dict_extraroute']) def update_router(self, context, id, router): r = router['router'] with context.session.begin(subtransactions=True): #check if route exists and have permission to access router_db = self._get_router(context, id) if 'routes' in r: self._update_extra_routes(context, router_db, r['routes']) routes = self._get_extra_routes_by_router_id(context, id) router_updated = super(ExtraRoute_dbonly_mixin, self).update_router( context, id, router) router_updated['routes'] = routes return router_updated def _get_subnets_by_cidr(self, context, cidr): query_subnets = context.session.query(models_v2.Subnet) return query_subnets.filter_by(cidr=cidr).all() def _validate_routes_nexthop(self, cidrs, ips, routes, nexthop): #Note(nati): Nexthop should be connected, # so we need to check # nexthop belongs to one of cidrs of the router ports if not netaddr.all_matching_cidrs(nexthop, cidrs): raise extraroute.InvalidRoutes( routes=routes, reason=_('the nexthop is not connected with router')) #Note(nati) nexthop should not be same as fixed_ips if nexthop in ips: raise extraroute.InvalidRoutes( routes=routes, reason=_('the nexthop is used by router')) def _validate_routes(self, context, router_id, routes): if len(routes) > cfg.CONF.max_routes: raise extraroute.RoutesExhausted( router_id=router_id, quota=cfg.CONF.max_routes) context = context.elevated() filters = {'device_id': [router_id]} ports = self._core_plugin.get_ports(context, filters) cidrs = [] ips = [] for port in ports: for ip in port['fixed_ips']: cidrs.append(self._core_plugin.get_subnet( context, ip['subnet_id'])['cidr']) ips.append(ip['ip_address']) for route in routes: self._validate_routes_nexthop( cidrs, ips, routes, route['nexthop']) def _update_extra_routes(self, context, router, routes): self._validate_routes(context, router['id'], routes) old_routes, routes_dict = self._get_extra_routes_dict_by_router_id( context, router['id']) added, removed = utils.diff_list_of_dict(old_routes, routes) LOG.debug('Added routes are %s', added) for route in added: router_routes = RouterRoute( router_id=router['id'], destination=route['destination'], nexthop=route['nexthop']) context.session.add(router_routes) LOG.debug('Removed routes are %s', removed) for route in removed: context.session.delete( routes_dict[(route['destination'], route['nexthop'])]) @staticmethod def _make_extra_route_list(extra_routes): return [{'destination': route['destination'], 'nexthop': route['nexthop']} for route in extra_routes] def _get_extra_routes_by_router_id(self, context, id): query = context.session.query(RouterRoute) query = query.filter_by(router_id=id) return self._make_extra_route_list(query) def _get_extra_routes_dict_by_router_id(self, context, id): query = context.session.query(RouterRoute) query = query.filter_by(router_id=id) routes = [] routes_dict = {} for route in query: routes.append({'destination': route['destination'], 'nexthop': route['nexthop']}) routes_dict[(route['destination'], route['nexthop'])] = route return routes, routes_dict def _confirm_router_interface_not_in_use(self, context, router_id, subnet_id): super(ExtraRoute_dbonly_mixin, self)._confirm_router_interface_not_in_use( context, router_id, subnet_id) subnet = self._core_plugin.get_subnet(context, subnet_id) subnet_cidr = netaddr.IPNetwork(subnet['cidr']) extra_routes = self._get_extra_routes_by_router_id(context, router_id) for route in extra_routes: if netaddr.all_matching_cidrs(route['nexthop'], [subnet_cidr]): raise extraroute.RouterInterfaceInUseByRoute( router_id=router_id, subnet_id=subnet_id) class ExtraRoute_db_mixin(ExtraRoute_dbonly_mixin, l3_db.L3_NAT_db_mixin): """Mixin class to support extra route configuration on router with rpc.""" pass neutron-8.4.0/neutron/db/securitygroups_db.py0000664000567000056710000011113613044372760022574 0ustar jenkinsjenkins00000000000000# Copyright 2012 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.orm import exc from sqlalchemy.orm import scoped_session from neutron._i18n import _ from neutron.api.v2 import attributes from neutron.callbacks import events from neutron.callbacks import exceptions from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants from neutron.common import utils from neutron.db import api as db_api from neutron.db import db_base_plugin_v2 from neutron.db import model_base from neutron.db import models_v2 from neutron.extensions import securitygroup as ext_sg LOG = logging.getLogger(__name__) class SecurityGroup(model_base.HasStandardAttributes, model_base.BASEV2, model_base.HasId, model_base.HasTenant): """Represents a v2 neutron security group.""" name = sa.Column(sa.String(attributes.NAME_MAX_LEN)) class DefaultSecurityGroup(model_base.BASEV2): __tablename__ = 'default_security_group' tenant_id = sa.Column(sa.String(attributes.TENANT_ID_MAX_LEN), primary_key=True, nullable=False) security_group_id = sa.Column(sa.String(36), sa.ForeignKey("securitygroups.id", ondelete="CASCADE"), nullable=False) security_group = orm.relationship( SecurityGroup, lazy='joined', backref=orm.backref('default_security_group', cascade='all,delete'), primaryjoin="SecurityGroup.id==DefaultSecurityGroup.security_group_id", ) class SecurityGroupPortBinding(model_base.BASEV2): """Represents binding between neutron ports and security profiles.""" port_id = sa.Column(sa.String(36), sa.ForeignKey("ports.id", ondelete='CASCADE'), primary_key=True) security_group_id = sa.Column(sa.String(36), sa.ForeignKey("securitygroups.id"), primary_key=True) # Add a relationship to the Port model in order to instruct SQLAlchemy to # eagerly load security group bindings ports = orm.relationship( models_v2.Port, backref=orm.backref("security_groups", lazy='joined', cascade='delete')) class SecurityGroupRule(model_base.HasStandardAttributes, model_base.BASEV2, model_base.HasId, model_base.HasTenant): """Represents a v2 neutron security group rule.""" security_group_id = sa.Column(sa.String(36), sa.ForeignKey("securitygroups.id", ondelete="CASCADE"), nullable=False) remote_group_id = sa.Column(sa.String(36), sa.ForeignKey("securitygroups.id", ondelete="CASCADE"), nullable=True) direction = sa.Column(sa.Enum('ingress', 'egress', name='securitygrouprules_direction')) ethertype = sa.Column(sa.String(40)) protocol = sa.Column(sa.String(40)) port_range_min = sa.Column(sa.Integer) port_range_max = sa.Column(sa.Integer) remote_ip_prefix = sa.Column(sa.String(255)) security_group = orm.relationship( SecurityGroup, backref=orm.backref('rules', cascade='all,delete', lazy='joined'), primaryjoin="SecurityGroup.id==SecurityGroupRule.security_group_id") source_group = orm.relationship( SecurityGroup, backref=orm.backref('source_rules', cascade='all,delete'), primaryjoin="SecurityGroup.id==SecurityGroupRule.remote_group_id") class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase): """Mixin class to add security group to db_base_plugin_v2.""" __native_bulk_support = True def create_security_group_bulk(self, context, security_group_rule): return self._create_bulk('security_group', context, security_group_rule) def _registry_notify(self, res, event, id=None, exc_cls=None, **kwargs): # NOTE(armax): a callback exception here will prevent the request # from being processed. This is a hook point for backend's validation; # we raise to propagate the reason for the failure. try: registry.notify(res, event, self, **kwargs) except exceptions.CallbackFailure as e: if exc_cls: reason = (_('cannot perform %(event)s due to %(reason)s') % {'event': event, 'reason': e}) raise exc_cls(reason=reason, id=id) def create_security_group(self, context, security_group, default_sg=False): """Create security group. If default_sg is true that means we are a default security group for a given tenant if it does not exist. """ s = security_group['security_group'] kwargs = { 'context': context, 'security_group': s, 'is_default': default_sg, } self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_CREATE, exc_cls=ext_sg.SecurityGroupConflict, **kwargs) tenant_id = s['tenant_id'] if not default_sg: self._ensure_default_security_group(context, tenant_id) with db_api.autonested_transaction(context.session): security_group_db = SecurityGroup(id=s.get('id') or ( uuidutils.generate_uuid()), description=s['description'], tenant_id=tenant_id, name=s['name']) context.session.add(security_group_db) if default_sg: context.session.add(DefaultSecurityGroup( security_group=security_group_db, tenant_id=security_group_db['tenant_id'])) for ethertype in ext_sg.sg_supported_ethertypes: if default_sg: # Allow intercommunication ingress_rule = SecurityGroupRule( id=uuidutils.generate_uuid(), tenant_id=tenant_id, security_group=security_group_db, direction='ingress', ethertype=ethertype, source_group=security_group_db) context.session.add(ingress_rule) egress_rule = SecurityGroupRule( id=uuidutils.generate_uuid(), tenant_id=tenant_id, security_group=security_group_db, direction='egress', ethertype=ethertype) context.session.add(egress_rule) self._registry_notify(resources.SECURITY_GROUP, events.PRECOMMIT_CREATE, exc_cls=ext_sg.SecurityGroupConflict, **kwargs) secgroup_dict = self._make_security_group_dict(security_group_db) kwargs['security_group'] = secgroup_dict registry.notify(resources.SECURITY_GROUP, events.AFTER_CREATE, self, **kwargs) return secgroup_dict def get_security_groups(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False, default_sg=False): # If default_sg is True do not call _ensure_default_security_group() # so this can be done recursively. Context.tenant_id is checked # because all the unit tests do not explicitly set the context on # GETS. TODO(arosen) context handling can probably be improved here. if not default_sg and context.tenant_id: tenant_id = filters.get('tenant_id') if tenant_id: tenant_id = tenant_id[0] else: tenant_id = context.tenant_id self._ensure_default_security_group(context, tenant_id) marker_obj = self._get_marker_obj(context, 'security_group', limit, marker) return self._get_collection(context, SecurityGroup, self._make_security_group_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) def get_security_groups_count(self, context, filters=None): return self._get_collection_count(context, SecurityGroup, filters=filters) def get_security_group(self, context, id, fields=None, tenant_id=None): """Tenant id is given to handle the case when creating a security group rule on behalf of another use. """ if tenant_id: tmp_context_tenant_id = context.tenant_id context.tenant_id = tenant_id try: with context.session.begin(subtransactions=True): ret = self._make_security_group_dict(self._get_security_group( context, id), fields) ret['security_group_rules'] = self.get_security_group_rules( context, {'security_group_id': [id]}) finally: if tenant_id: context.tenant_id = tmp_context_tenant_id return ret def _get_security_group(self, context, id): try: query = self._model_query(context, SecurityGroup) sg = query.filter(SecurityGroup.id == id).one() except exc.NoResultFound: raise ext_sg.SecurityGroupNotFound(id=id) return sg def delete_security_group(self, context, id): filters = {'security_group_id': [id]} ports = self._get_port_security_group_bindings(context, filters) if ports: raise ext_sg.SecurityGroupInUse(id=id) # confirm security group exists sg = self._get_security_group(context, id) if sg['name'] == 'default' and not context.is_admin: raise ext_sg.SecurityGroupCannotRemoveDefault() kwargs = { 'context': context, 'security_group_id': id, 'security_group': sg, } self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_DELETE, exc_cls=ext_sg.SecurityGroupInUse, id=id, **kwargs) with context.session.begin(subtransactions=True): self._registry_notify(resources.SECURITY_GROUP, events.PRECOMMIT_DELETE, exc_cls=ext_sg.SecurityGroupInUse, id=id, **kwargs) context.session.delete(sg) kwargs.pop('security_group') registry.notify(resources.SECURITY_GROUP, events.AFTER_DELETE, self, **kwargs) def update_security_group(self, context, id, security_group): s = security_group['security_group'] kwargs = { 'context': context, 'security_group_id': id, 'security_group': s, } self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_UPDATE, exc_cls=ext_sg.SecurityGroupConflict, **kwargs) with context.session.begin(subtransactions=True): sg = self._get_security_group(context, id) if sg['name'] == 'default' and 'name' in s: raise ext_sg.SecurityGroupCannotUpdateDefault() self._registry_notify( resources.SECURITY_GROUP, events.PRECOMMIT_UPDATE, exc_cls=ext_sg.SecurityGroupConflict, **kwargs) sg.update(s) sg_dict = self._make_security_group_dict(sg) kwargs['security_group'] = sg_dict registry.notify(resources.SECURITY_GROUP, events.AFTER_UPDATE, self, **kwargs) return sg_dict def _make_security_group_dict(self, security_group, fields=None): res = {'id': security_group['id'], 'name': security_group['name'], 'tenant_id': security_group['tenant_id'], 'description': security_group['description']} res['security_group_rules'] = [self._make_security_group_rule_dict(r) for r in security_group.rules] self._apply_dict_extend_functions(ext_sg.SECURITYGROUPS, res, security_group) return self._fields(res, fields) def _make_security_group_binding_dict(self, security_group, fields=None): res = {'port_id': security_group['port_id'], 'security_group_id': security_group['security_group_id']} return self._fields(res, fields) def _create_port_security_group_binding(self, context, port_id, security_group_id): with context.session.begin(subtransactions=True): db = SecurityGroupPortBinding(port_id=port_id, security_group_id=security_group_id) context.session.add(db) def _get_port_security_group_bindings(self, context, filters=None, fields=None): return self._get_collection(context, SecurityGroupPortBinding, self._make_security_group_binding_dict, filters=filters, fields=fields) def _delete_port_security_group_bindings(self, context, port_id): query = self._model_query(context, SecurityGroupPortBinding) bindings = query.filter( SecurityGroupPortBinding.port_id == port_id) with context.session.begin(subtransactions=True): for binding in bindings: context.session.delete(binding) def create_security_group_rule_bulk(self, context, security_group_rules): return self._create_bulk('security_group_rule', context, security_group_rules) def create_security_group_rule_bulk_native(self, context, security_group_rules): rules = security_group_rules['security_group_rules'] scoped_session(context.session) security_group_id = self._validate_security_group_rules( context, security_group_rules) with context.session.begin(subtransactions=True): if not self.get_security_group(context, security_group_id): raise ext_sg.SecurityGroupNotFound(id=security_group_id) self._check_for_duplicate_rules(context, rules) ret = [] for rule_dict in rules: res_rule_dict = self._create_security_group_rule( context, rule_dict, validate=False) ret.append(res_rule_dict) return ret def create_security_group_rule(self, context, security_group_rule): return self._create_security_group_rule(context, security_group_rule) def _create_security_group_rule(self, context, security_group_rule, validate=True): if validate: self._validate_security_group_rule(context, security_group_rule) self._check_for_duplicate_rules_in_db(context, security_group_rule) rule_dict = security_group_rule['security_group_rule'] kwargs = { 'context': context, 'security_group_rule': rule_dict } self._registry_notify(resources.SECURITY_GROUP_RULE, events.BEFORE_CREATE, exc_cls=ext_sg.SecurityGroupConflict, **kwargs) with context.session.begin(subtransactions=True): db = SecurityGroupRule( id=(rule_dict.get('id') or uuidutils.generate_uuid()), tenant_id=rule_dict['tenant_id'], security_group_id=rule_dict['security_group_id'], direction=rule_dict['direction'], remote_group_id=rule_dict.get('remote_group_id'), ethertype=rule_dict['ethertype'], protocol=rule_dict['protocol'], port_range_min=rule_dict['port_range_min'], port_range_max=rule_dict['port_range_max'], remote_ip_prefix=rule_dict.get('remote_ip_prefix'), description=rule_dict.get('description') ) context.session.add(db) self._registry_notify(resources.SECURITY_GROUP_RULE, events.PRECOMMIT_CREATE, exc_cls=ext_sg.SecurityGroupConflict, **kwargs) res_rule_dict = self._make_security_group_rule_dict(db) kwargs['security_group_rule'] = res_rule_dict registry.notify( resources.SECURITY_GROUP_RULE, events.AFTER_CREATE, self, **kwargs) return res_rule_dict def _get_ip_proto_number(self, protocol): if protocol is None: return # According to bug 1381379, protocol is always set to string to avoid # problems with comparing int and string in PostgreSQL. Here this # string is converted to int to give an opportunity to use it as # before. if protocol in constants.IP_PROTOCOL_NAME_ALIASES: protocol = constants.IP_PROTOCOL_NAME_ALIASES[protocol] return int(constants.IP_PROTOCOL_MAP.get(protocol, protocol)) def _get_ip_proto_name_and_num(self, protocol): if protocol is None: return protocol = str(protocol) if protocol in constants.IP_PROTOCOL_MAP: return [protocol, str(constants.IP_PROTOCOL_MAP.get(protocol))] elif protocol in constants.IP_PROTOCOL_NUM_TO_NAME_MAP: return [constants.IP_PROTOCOL_NUM_TO_NAME_MAP.get(protocol), protocol] return [protocol, protocol] def _validate_port_range(self, rule): """Check that port_range is valid.""" if (rule['port_range_min'] is None and rule['port_range_max'] is None): return if not rule['protocol']: raise ext_sg.SecurityGroupProtocolRequiredWithPorts() ip_proto = self._get_ip_proto_number(rule['protocol']) if ip_proto in [constants.PROTO_NUM_TCP, constants.PROTO_NUM_UDP]: if rule['port_range_min'] == 0 or rule['port_range_max'] == 0: raise ext_sg.SecurityGroupInvalidPortValue(port=0) elif (rule['port_range_min'] is not None and rule['port_range_max'] is not None and rule['port_range_min'] <= rule['port_range_max']): pass else: raise ext_sg.SecurityGroupInvalidPortRange() elif ip_proto in [constants.PROTO_NUM_ICMP, constants.PROTO_NUM_IPV6_ICMP]: for attr, field in [('port_range_min', 'type'), ('port_range_max', 'code')]: if rule[attr] is not None and not (0 <= rule[attr] <= 255): raise ext_sg.SecurityGroupInvalidIcmpValue( field=field, attr=attr, value=rule[attr]) if (rule['port_range_min'] is None and rule['port_range_max'] is not None): raise ext_sg.SecurityGroupMissingIcmpType( value=rule['port_range_max']) def _validate_ethertype_and_protocol(self, rule): """Check if given ethertype and protocol are valid or not""" if rule['protocol'] in [constants.PROTO_NAME_IPV6_ENCAP, constants.PROTO_NAME_IPV6_FRAG, constants.PROTO_NAME_IPV6_ICMP, constants.PROTO_NAME_IPV6_ICMP_LEGACY, constants.PROTO_NAME_IPV6_NONXT, constants.PROTO_NAME_IPV6_OPTS, constants.PROTO_NAME_IPV6_ROUTE]: if rule['ethertype'] == constants.IPv4: raise ext_sg.SecurityGroupEthertypeConflictWithProtocol( ethertype=rule['ethertype'], protocol=rule['protocol']) def _validate_single_tenant_and_group(self, security_group_rules): """Check that all rules belong to the same security group and tenant """ sg_groups = set() tenants = set() for rule_dict in security_group_rules['security_group_rules']: rule = rule_dict['security_group_rule'] sg_groups.add(rule['security_group_id']) if len(sg_groups) > 1: raise ext_sg.SecurityGroupNotSingleGroupRules() tenants.add(rule['tenant_id']) if len(tenants) > 1: raise ext_sg.SecurityGroupRulesNotSingleTenant() return sg_groups.pop() def _validate_security_group_rule(self, context, security_group_rule): rule = security_group_rule['security_group_rule'] self._validate_port_range(rule) self._validate_ip_prefix(rule) self._validate_ethertype_and_protocol(rule) if rule['remote_ip_prefix'] and rule['remote_group_id']: raise ext_sg.SecurityGroupRemoteGroupAndRemoteIpPrefix() remote_group_id = rule['remote_group_id'] # Check that remote_group_id exists for tenant if remote_group_id: self.get_security_group(context, remote_group_id, tenant_id=rule['tenant_id']) security_group_id = rule['security_group_id'] # Confirm that the tenant has permission # to add rules to this security group. self.get_security_group(context, security_group_id, tenant_id=rule['tenant_id']) return security_group_id def _validate_security_group_rules(self, context, security_group_rules): sg_id = self._validate_single_tenant_and_group(security_group_rules) for rule in security_group_rules['security_group_rules']: self._validate_security_group_rule(context, rule) return sg_id def _make_security_group_rule_dict(self, security_group_rule, fields=None): res = {'id': security_group_rule['id'], 'tenant_id': security_group_rule['tenant_id'], 'security_group_id': security_group_rule['security_group_id'], 'ethertype': security_group_rule['ethertype'], 'direction': security_group_rule['direction'], 'protocol': security_group_rule['protocol'], 'port_range_min': security_group_rule['port_range_min'], 'port_range_max': security_group_rule['port_range_max'], 'remote_ip_prefix': security_group_rule['remote_ip_prefix'], 'remote_group_id': security_group_rule['remote_group_id']} self._apply_dict_extend_functions(ext_sg.SECURITYGROUPRULES, res, security_group_rule) return self._fields(res, fields) def _make_security_group_rule_filter_dict(self, security_group_rule): sgr = security_group_rule['security_group_rule'] res = {'tenant_id': [sgr['tenant_id']], 'security_group_id': [sgr['security_group_id']], 'direction': [sgr['direction']]} include_if_present = ['protocol', 'port_range_max', 'port_range_min', 'ethertype', 'remote_ip_prefix', 'remote_group_id', 'description'] for key in include_if_present: value = sgr.get(key) if value: res[key] = [value] # protocol field will get corresponding name and number value = sgr.get('protocol') if value: res['protocol'] = self._get_ip_proto_name_and_num(value) return res def _check_for_duplicate_rules(self, context, security_group_rules): for i in security_group_rules: found_self = False for j in security_group_rules: if i['security_group_rule'] == j['security_group_rule']: if found_self: raise ext_sg.DuplicateSecurityGroupRuleInPost(rule=i) found_self = True self._check_for_duplicate_rules_in_db(context, i) def _check_for_duplicate_rules_in_db(self, context, security_group_rule): # Check in database if rule exists filters = self._make_security_group_rule_filter_dict( security_group_rule) keys = security_group_rule['security_group_rule'].keys() fields = list(keys) + ['id'] db_rules = self.get_security_group_rules(context, filters, fields=fields) # Note(arosen): the call to get_security_group_rules wildcards # values in the filter that have a value of [None]. For # example, filters = {'remote_group_id': [None]} will return # all security group rules regardless of their value of # remote_group_id. Therefore it is not possible to do this # query unless the behavior of _get_collection() # is changed which cannot be because other methods are already # relying on this behavior. Therefore, we do the filtering # below to check for these corner cases. rule_dict = security_group_rule['security_group_rule'].copy() sg_protocol = rule_dict.pop('protocol', None) for db_rule in db_rules: rule_id = db_rule.pop('id', None) # remove protocol and match separately for number and type db_protocol = db_rule.pop('protocol', None) is_protocol_matching = ( self._get_ip_proto_name_and_num(db_protocol) == self._get_ip_proto_name_and_num(sg_protocol)) if (is_protocol_matching and rule_dict == db_rule): raise ext_sg.SecurityGroupRuleExists(rule_id=rule_id) def _validate_ip_prefix(self, rule): """Check that a valid cidr was specified as remote_ip_prefix No need to check that it is in fact an IP address as this is already validated by attribute validators. Check that rule ethertype is consistent with remote_ip_prefix ip type. Add mask to ip_prefix if absent (192.168.1.10 -> 192.168.1.10/32). """ input_prefix = rule['remote_ip_prefix'] if input_prefix: addr = netaddr.IPNetwork(input_prefix) # set input_prefix to always include the netmask: rule['remote_ip_prefix'] = str(addr) # check consistency of ethertype with addr version if rule['ethertype'] != "IPv%d" % (addr.version): raise ext_sg.SecurityGroupRuleParameterConflict( ethertype=rule['ethertype'], cidr=input_prefix) def get_security_group_rules(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = self._get_marker_obj(context, 'security_group_rule', limit, marker) return self._get_collection(context, SecurityGroupRule, self._make_security_group_rule_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) def get_security_group_rules_count(self, context, filters=None): return self._get_collection_count(context, SecurityGroupRule, filters=filters) def get_security_group_rule(self, context, id, fields=None): security_group_rule = self._get_security_group_rule(context, id) return self._make_security_group_rule_dict(security_group_rule, fields) def _get_security_group_rule(self, context, id): try: query = self._model_query(context, SecurityGroupRule) sgr = query.filter(SecurityGroupRule.id == id).one() except exc.NoResultFound: raise ext_sg.SecurityGroupRuleNotFound(id=id) return sgr def delete_security_group_rule(self, context, id): kwargs = { 'context': context, 'security_group_rule_id': id } self._registry_notify(resources.SECURITY_GROUP_RULE, events.BEFORE_DELETE, id=id, exc_cls=ext_sg.SecurityGroupRuleInUse, **kwargs) with context.session.begin(subtransactions=True): query = self._model_query(context, SecurityGroupRule).filter( SecurityGroupRule.id == id) self._registry_notify(resources.SECURITY_GROUP_RULE, events.PRECOMMIT_DELETE, exc_cls=ext_sg.SecurityGroupRuleInUse, id=id, **kwargs) try: # As there is a filter on a primary key it is not possible for # MultipleResultsFound to be raised context.session.delete(query.one()) except exc.NoResultFound: raise ext_sg.SecurityGroupRuleNotFound(id=id) registry.notify( resources.SECURITY_GROUP_RULE, events.AFTER_DELETE, self, **kwargs) def _extend_port_dict_security_group(self, port_res, port_db): # Security group bindings will be retrieved from the SQLAlchemy # model. As they're loaded eagerly with ports because of the # joined load they will not cause an extra query. security_group_ids = [sec_group_mapping['security_group_id'] for sec_group_mapping in port_db.security_groups] port_res[ext_sg.SECURITYGROUPS] = security_group_ids return port_res # Register dict extend functions for ports db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attributes.PORTS, ['_extend_port_dict_security_group']) def _process_port_create_security_group(self, context, port, security_group_ids): if attributes.is_attr_set(security_group_ids): for security_group_id in security_group_ids: self._create_port_security_group_binding(context, port['id'], security_group_id) # Convert to list as a set might be passed here and # this has to be serialized port[ext_sg.SECURITYGROUPS] = (security_group_ids and list(security_group_ids) or []) def _ensure_default_security_group(self, context, tenant_id): """Create a default security group if one doesn't exist. :returns: the default security group id for given tenant. """ try: query = self._model_query(context, DefaultSecurityGroup) default_group = query.filter_by(tenant_id=tenant_id).one() return default_group['security_group_id'] except exc.NoResultFound: return self._create_default_security_group(context, tenant_id) def _create_default_security_group(self, context, tenant_id): security_group = { 'security_group': {'name': 'default', 'tenant_id': tenant_id, 'description': _('Default security group')} } try: security_group = self.create_security_group( context, security_group, default_sg=True) return security_group['id'] except db_exc.DBDuplicateEntry as ex: # default security group was created concurrently LOG.debug("Duplicate default security group %s was " "not created", ex.value) # raise a retry request to restart the whole process since # we could be in a REPEATABLE READ isolation level and won't # be able to see the SG group in this transaction. raise db_exc.RetryRequest(ex) def _get_security_groups_on_port(self, context, port): """Check that all security groups on port belong to tenant. :returns: all security groups IDs on port belonging to tenant. """ port = port['port'] if not attributes.is_attr_set(port.get(ext_sg.SECURITYGROUPS)): return if port.get('device_owner') and utils.is_port_trusted(port): return port_sg = port.get(ext_sg.SECURITYGROUPS, []) filters = {'id': port_sg} tenant_id = port.get('tenant_id') if tenant_id: filters['tenant_id'] = [tenant_id] valid_groups = set(g['id'] for g in self.get_security_groups(context, fields=['id'], filters=filters)) requested_groups = set(port_sg) port_sg_missing = requested_groups - valid_groups if port_sg_missing: raise ext_sg.SecurityGroupNotFound(id=', '.join(port_sg_missing)) return requested_groups def _ensure_default_security_group_on_port(self, context, port): # we don't apply security groups for dhcp, router port = port['port'] if port.get('device_owner') and utils.is_port_trusted(port): return default_sg = self._ensure_default_security_group(context, port['tenant_id']) if not attributes.is_attr_set(port.get(ext_sg.SECURITYGROUPS)): port[ext_sg.SECURITYGROUPS] = [default_sg] def _check_update_deletes_security_groups(self, port): """Return True if port has as a security group and it's value is either [] or not is_attr_set, otherwise return False """ if (ext_sg.SECURITYGROUPS in port['port'] and not (attributes.is_attr_set(port['port'][ext_sg.SECURITYGROUPS]) and port['port'][ext_sg.SECURITYGROUPS] != [])): return True return False def _check_update_has_security_groups(self, port): """Return True if port has security_groups attribute set and its not empty, or False otherwise. This method is called both for port create and port update. """ if (ext_sg.SECURITYGROUPS in port['port'] and (attributes.is_attr_set(port['port'][ext_sg.SECURITYGROUPS]) and port['port'][ext_sg.SECURITYGROUPS] != [])): return True return False def update_security_group_on_port(self, context, id, port, original_port, updated_port): """Update security groups on port. This method returns a flag which indicates request notification is required and does not perform notification itself. It is because another changes for the port may require notification. """ need_notify = False port_updates = port['port'] if (ext_sg.SECURITYGROUPS in port_updates and not utils.compare_elements( original_port.get(ext_sg.SECURITYGROUPS), port_updates[ext_sg.SECURITYGROUPS])): # delete the port binding and read it with the new rules port_updates[ext_sg.SECURITYGROUPS] = ( self._get_security_groups_on_port(context, port)) self._delete_port_security_group_bindings(context, id) self._process_port_create_security_group( context, updated_port, port_updates[ext_sg.SECURITYGROUPS]) need_notify = True else: updated_port[ext_sg.SECURITYGROUPS] = ( original_port[ext_sg.SECURITYGROUPS]) return need_notify neutron-8.4.0/neutron/db/portsecurity_db.py0000664000567000056710000000534113044372760022241 0ustar jenkinsjenkins00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api.v2 import attributes as attrs from neutron.common import utils from neutron.db import db_base_plugin_v2 from neutron.db import portsecurity_db_common from neutron.extensions import portsecurity as psec class PortSecurityDbMixin(portsecurity_db_common.PortSecurityDbCommon): # Register dict extend functions for ports and networks db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attrs.NETWORKS, ['_extend_port_security_dict']) db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attrs.PORTS, ['_extend_port_security_dict']) def _extend_port_security_dict(self, response_data, db_data): if ('port-security' in getattr(self, 'supported_extension_aliases', [])): super(PortSecurityDbMixin, self)._extend_port_security_dict( response_data, db_data) def _determine_port_security_and_has_ip(self, context, port): """Returns a tuple of booleans (port_security_enabled, has_ip). Port_security is the value associated with the port if one is present otherwise the value associated with the network is returned. has_ip is if the port is associated with an ip or not. """ has_ip = self._ip_on_port(port) # we don't apply security groups for dhcp, router if port.get('device_owner') and utils.is_port_trusted(port): return (False, has_ip) if attrs.is_attr_set(port.get(psec.PORTSECURITY)): port_security_enabled = port[psec.PORTSECURITY] # If port has an ip and security_groups are passed in # conveniently set port_security_enabled to true this way # user doesn't also have to pass in port_security_enabled=True # when creating ports. elif (has_ip and attrs.is_attr_set(port.get('security_groups'))): port_security_enabled = True else: port_security_enabled = self._get_network_security_binding( context, port['network_id']) return (port_security_enabled, has_ip) def _ip_on_port(self, port): return bool(port.get('fixed_ips')) neutron-8.4.0/neutron/db/standardattrdescription_db.py0000664000567000056710000000266313044372760024430 0ustar jenkinsjenkins00000000000000# All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api.v2 import attributes from neutron.db import common_db_mixin from neutron.extensions import l3 from neutron.extensions import securitygroup class StandardAttrDescriptionMixin(object): supported_extension_aliases = ['standard-attr-description'] def _extend_standard_attr_description(self, res, db_object): if not hasattr(db_object, 'description'): return res['description'] = db_object.description for resource in [attributes.NETWORKS, attributes.PORTS, attributes.SUBNETS, attributes.SUBNETPOOLS, securitygroup.SECURITYGROUPS, securitygroup.SECURITYGROUPRULES, l3.ROUTERS, l3.FLOATINGIPS]: common_db_mixin.CommonDbMixin.register_dict_extend_funcs( resource, ['_extend_standard_attr_description']) neutron-8.4.0/neutron/db/tag_db.py0000664000567000056710000000707213044372760020243 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.orm import aliased from neutron.db import model_base class Tag(model_base.BASEV2): standard_attr_id = sa.Column( sa.BigInteger().with_variant(sa.Integer(), 'sqlite'), sa.ForeignKey(model_base.StandardAttribute.id, ondelete="CASCADE"), nullable=False, primary_key=True) tag = sa.Column(sa.String(60), nullable=False, primary_key=True) standard_attr = orm.relationship( 'StandardAttribute', backref=orm.backref('tags', lazy='joined', viewonly=True)) def _get_tag_list(tag_strings): tags = set() for tag_str in tag_strings: tags |= set(tag_str.split(',')) return list(tags) def apply_tag_filters(model, query, filters): """Apply tag filters There are four types of filter: `tags` -- One or more strings that will be used to filter results in an AND expression: T1 AND T2 `tags-any` -- One or more strings that will be used to filter results in an OR expression: T1 OR T2 `not-tags` -- One or more strings that will be used to filter results in a NOT AND expression: NOT (T1 AND T2) `not-tags-any` -- One or more strings that will be used to filter results in a NOT OR expression: NOT (T1 OR T2) Note: tag values can be specified comma separated string. for example, 'GET /v2.0/networks?tags-any=red,blue' is equivalent to 'GET /v2.0/networks?tags-any=red&tags-any=blue' it means 'red' or 'blue'. """ if 'tags' in filters: tags = _get_tag_list(filters.pop('tags')) first_tag = tags.pop(0) query = query.join(Tag, model.standard_attr_id == Tag.standard_attr_id) query = query.filter(Tag.tag == first_tag) for tag in tags: tag_alias = aliased(Tag) query = query.join(tag_alias, model.standard_attr_id == tag_alias.standard_attr_id) query = query.filter(tag_alias.tag == tag) if 'tags-any' in filters: tags = _get_tag_list(filters.pop('tags-any')) query = query.join(Tag, model.standard_attr_id == Tag.standard_attr_id) query = query.filter(Tag.tag.in_(tags)) if 'not-tags' in filters: tags = _get_tag_list(filters.pop('not-tags')) first_tag = tags.pop(0) subq = query.session.query(Tag.standard_attr_id) subq = subq.filter(Tag.tag == first_tag) for tag in tags: tag_alias = aliased(Tag) subq = subq.join(tag_alias, Tag.standard_attr_id == tag_alias.standard_attr_id) subq = subq.filter(tag_alias.tag == tag) query = query.filter(~model.standard_attr_id.in_(subq)) if 'not-tags-any' in filters: tags = _get_tag_list(filters.pop('not-tags-any')) subq = query.session.query(Tag.standard_attr_id) subq = subq.filter(Tag.tag.in_(tags)) query = query.filter(~model.standard_attr_id.in_(subq)) return query neutron-8.4.0/neutron/db/bgp_dragentscheduler_db.py0000664000567000056710000002147213044372760023643 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.orm import exc from neutron._i18n import _ from neutron._i18n import _LW from neutron.db import agents_db from neutron.db import agentschedulers_db as as_db from neutron.db import model_base from neutron.extensions import bgp_dragentscheduler as bgp_dras_ext from neutron.services.bgp.common import constants as bgp_consts LOG = logging.getLogger(__name__) BGP_DRAGENT_SCHEDULER_OPTS = [ cfg.StrOpt( 'bgp_drscheduler_driver', default='neutron.services.bgp.scheduler' '.bgp_dragent_scheduler.ChanceScheduler', help=_('Driver used for scheduling BGP speakers to BGP DrAgent')) ] cfg.CONF.register_opts(BGP_DRAGENT_SCHEDULER_OPTS) class BgpSpeakerDrAgentBinding(model_base.BASEV2): """Represents a mapping between BGP speaker and BGP DRAgent""" __tablename__ = 'bgp_speaker_dragent_bindings' bgp_speaker_id = sa.Column(sa.String(length=36), sa.ForeignKey("bgp_speakers.id", ondelete='CASCADE'), nullable=False) dragent = orm.relation(agents_db.Agent) agent_id = sa.Column(sa.String(length=36), sa.ForeignKey("agents.id", ondelete='CASCADE'), primary_key=True) class BgpDrAgentSchedulerDbMixin(bgp_dras_ext.BgpDrSchedulerPluginBase, as_db.AgentSchedulerDbMixin): bgp_drscheduler = None def schedule_unscheduled_bgp_speakers(self, context, host): if self.bgp_drscheduler: return self.bgp_drscheduler.schedule_unscheduled_bgp_speakers( context, host) else: LOG.warning(_LW("Cannot schedule BgpSpeaker to DrAgent. " "Reason: No scheduler registered.")) def schedule_bgp_speaker(self, context, created_bgp_speaker): if self.bgp_drscheduler: agents = self.bgp_drscheduler.schedule(context, created_bgp_speaker) for agent in agents: self._bgp_rpc.bgp_speaker_created(context, created_bgp_speaker['id'], agent.host) else: LOG.warning(_LW("Cannot schedule BgpSpeaker to DrAgent. " "Reason: No scheduler registered.")) def add_bgp_speaker_to_dragent(self, context, agent_id, speaker_id): """Associate a BgpDrAgent with a BgpSpeaker.""" try: self._save_bgp_speaker_dragent_binding(context, agent_id, speaker_id) except db_exc.DBDuplicateEntry: raise bgp_dras_ext.DrAgentAssociationError( agent_id=agent_id) LOG.debug('BgpSpeaker %(bgp_speaker_id)s added to ' 'BgpDrAgent %(agent_id)s', {'bgp_speaker_id': speaker_id, 'agent_id': agent_id}) def _save_bgp_speaker_dragent_binding(self, context, agent_id, speaker_id): with context.session.begin(subtransactions=True): agent_db = self._get_agent(context, agent_id) agent_up = agent_db['admin_state_up'] is_agent_bgp = (agent_db['agent_type'] == bgp_consts.AGENT_TYPE_BGP_ROUTING) if not is_agent_bgp or not agent_up: raise bgp_dras_ext.DrAgentInvalid(id=agent_id) binding = BgpSpeakerDrAgentBinding() binding.bgp_speaker_id = speaker_id binding.agent_id = agent_id context.session.add(binding) self._bgp_rpc.bgp_speaker_created(context, speaker_id, agent_db.host) def remove_bgp_speaker_from_dragent(self, context, agent_id, speaker_id): with context.session.begin(subtransactions=True): agent_db = self._get_agent(context, agent_id) is_agent_bgp = (agent_db['agent_type'] == bgp_consts.AGENT_TYPE_BGP_ROUTING) if not is_agent_bgp: raise bgp_dras_ext.DrAgentInvalid(id=agent_id) query = context.session.query(BgpSpeakerDrAgentBinding) query = query.filter_by(bgp_speaker_id=speaker_id, agent_id=agent_id) num_deleted = query.delete() if not num_deleted: raise bgp_dras_ext.DrAgentNotHostingBgpSpeaker( bgp_speaker_id=speaker_id, agent_id=agent_id) LOG.debug('BgpSpeaker %(bgp_speaker_id)s removed from ' 'BgpDrAgent %(agent_id)s', {'bgp_speaker_id': speaker_id, 'agent_id': agent_id}) self._bgp_rpc.bgp_speaker_removed(context, speaker_id, agent_db.host) def get_dragents_hosting_bgp_speakers(self, context, bgp_speaker_ids, active=None, admin_state_up=None): query = context.session.query(BgpSpeakerDrAgentBinding) query = query.options(orm.contains_eager( BgpSpeakerDrAgentBinding.dragent)) query = query.join(BgpSpeakerDrAgentBinding.dragent) if len(bgp_speaker_ids) == 1: query = query.filter( BgpSpeakerDrAgentBinding.bgp_speaker_id == ( bgp_speaker_ids[0])) elif bgp_speaker_ids: query = query.filter( BgpSpeakerDrAgentBinding.bgp_speaker_id in bgp_speaker_ids) if admin_state_up is not None: query = query.filter(agents_db.Agent.admin_state_up == admin_state_up) return [binding.dragent for binding in query if as_db.AgentSchedulerDbMixin.is_eligible_agent( active, binding.dragent)] def get_dragent_bgp_speaker_bindings(self, context): return context.session.query(BgpSpeakerDrAgentBinding).all() def list_dragent_hosting_bgp_speaker(self, context, speaker_id): dragents = self.get_dragents_hosting_bgp_speakers(context, [speaker_id]) agent_ids = [dragent.id for dragent in dragents] if not agent_ids: return {'agents': []} return {'agents': self.get_agents(context, filters={'id': agent_ids})} def list_bgp_speaker_on_dragent(self, context, agent_id): query = context.session.query(BgpSpeakerDrAgentBinding.bgp_speaker_id) query = query.filter_by(agent_id=agent_id) bgp_speaker_ids = [item[0] for item in query] if not bgp_speaker_ids: # Exception will be thrown if the requested agent does not exist. self._get_agent(context, agent_id) return {'bgp_speakers': []} return {'bgp_speakers': self.get_bgp_speakers(context, filters={'id': bgp_speaker_ids})} def get_bgp_speakers_for_agent_host(self, context, host): agent = self._get_agent_by_type_and_host( context, bgp_consts.AGENT_TYPE_BGP_ROUTING, host) if not agent.admin_state_up: return {} query = context.session.query(BgpSpeakerDrAgentBinding) query = query.filter(BgpSpeakerDrAgentBinding.agent_id == agent.id) try: binding = query.one() except exc.NoResultFound: return [] bgp_speaker = self.get_bgp_speaker_with_advertised_routes( context, binding['bgp_speaker_id']) return [bgp_speaker] def get_bgp_speaker_by_speaker_id(self, context, bgp_speaker_id): try: return self.get_bgp_speaker(context, bgp_speaker_id) except exc.NoResultFound: return {} def get_bgp_peer_by_peer_id(self, context, bgp_peer_id): try: return self.get_bgp_peer(context, bgp_peer_id) except exc.NoResultFound: return {} neutron-8.4.0/neutron/db/l3_db.py0000664000567000056710000024527213044372760020014 0ustar jenkinsjenkins00000000000000# Copyright 2012 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import netaddr from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import uuidutils import six import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.orm import exc from neutron._i18n import _, _LI from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api from neutron.api.v2 import attributes from neutron.callbacks import events from neutron.callbacks import exceptions from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants as l3_constants from neutron.common import exceptions as n_exc from neutron.common import ipv6_utils from neutron.common import rpc as n_rpc from neutron.common import utils from neutron.db import api as db_api from neutron.db import l3_agentschedulers_db as l3_agt from neutron.db import model_base from neutron.db import models_v2 from neutron.db import standardattrdescription_db as st_attr from neutron.extensions import external_net from neutron.extensions import l3 from neutron import manager from neutron.plugins.common import constants from neutron.plugins.common import utils as p_utils LOG = logging.getLogger(__name__) DEVICE_OWNER_ROUTER_INTF = l3_constants.DEVICE_OWNER_ROUTER_INTF DEVICE_OWNER_ROUTER_GW = l3_constants.DEVICE_OWNER_ROUTER_GW DEVICE_OWNER_FLOATINGIP = l3_constants.DEVICE_OWNER_FLOATINGIP EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO # Maps API field to DB column # API parameter name and Database column names may differ. # Useful to keep the filtering between API and Database. API_TO_DB_COLUMN_MAP = {'port_id': 'fixed_port_id'} CORE_ROUTER_ATTRS = ('id', 'name', 'tenant_id', 'admin_state_up', 'status') class RouterPort(model_base.BASEV2): router_id = sa.Column( sa.String(36), sa.ForeignKey('routers.id', ondelete="CASCADE"), primary_key=True) port_id = sa.Column( sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) # The port_type attribute is redundant as the port table already specifies # it in DEVICE_OWNER.However, this redundancy enables more efficient # queries on router ports, and also prevents potential error-prone # conditions which might originate from users altering the DEVICE_OWNER # property of router ports. port_type = sa.Column(sa.String(attributes.DEVICE_OWNER_MAX_LEN)) port = orm.relationship( models_v2.Port, backref=orm.backref('routerport', uselist=False, cascade="all,delete"), lazy='joined') class Router(model_base.HasStandardAttributes, model_base.BASEV2, model_base.HasId, model_base.HasTenant): """Represents a v2 neutron router.""" name = sa.Column(sa.String(attributes.NAME_MAX_LEN)) status = sa.Column(sa.String(16)) admin_state_up = sa.Column(sa.Boolean) gw_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id')) gw_port = orm.relationship(models_v2.Port, lazy='joined') attached_ports = orm.relationship( RouterPort, backref='router', lazy='dynamic') l3_agents = orm.relationship( 'Agent', lazy='joined', viewonly=True, secondary=l3_agt.RouterL3AgentBinding.__table__) class FloatingIP(model_base.HasStandardAttributes, model_base.BASEV2, model_base.HasId, model_base.HasTenant): """Represents a floating IP address. This IP address may or may not be allocated to a tenant, and may or may not be associated with an internal port/ip address/router. """ floating_ip_address = sa.Column(sa.String(64), nullable=False) floating_network_id = sa.Column(sa.String(36), nullable=False) floating_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), nullable=False) # The ORM-level "delete" cascade relationship between port and floating_ip # is required for causing the in-Python event "after_delete" that needs for # proper quota management in case when cascade removal of the floating_ip # happens after removal of the floating_port port = orm.relationship(models_v2.Port, backref=orm.backref('floating_ips', cascade='all,delete-orphan'), foreign_keys='FloatingIP.floating_port_id') fixed_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id')) fixed_ip_address = sa.Column(sa.String(64)) router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id')) # Additional attribute for keeping track of the router where the floating # ip was associated in order to be able to ensure consistency even if an # asynchronous backend is unavailable when the floating IP is disassociated last_known_router_id = sa.Column(sa.String(36)) status = sa.Column(sa.String(16)) router = orm.relationship(Router, backref='floating_ips') class L3_NAT_dbonly_mixin(l3.RouterPluginBase, st_attr.StandardAttrDescriptionMixin): """Mixin class to add L3/NAT router methods to db_base_plugin_v2.""" router_device_owners = ( DEVICE_OWNER_ROUTER_INTF, DEVICE_OWNER_ROUTER_GW, DEVICE_OWNER_FLOATINGIP ) @property def _core_plugin(self): return manager.NeutronManager.get_plugin() def _get_router(self, context, router_id): try: router = self._get_by_id(context, Router, router_id) except exc.NoResultFound: raise l3.RouterNotFound(router_id=router_id) return router def _make_router_dict(self, router, fields=None, process_extensions=True): res = dict((key, router[key]) for key in CORE_ROUTER_ATTRS) if router['gw_port_id']: ext_gw_info = { 'network_id': router.gw_port['network_id'], 'external_fixed_ips': [{'subnet_id': ip["subnet_id"], 'ip_address': ip["ip_address"]} for ip in router.gw_port['fixed_ips']]} else: ext_gw_info = None res.update({ EXTERNAL_GW_INFO: ext_gw_info, 'gw_port_id': router['gw_port_id'], }) # NOTE(salv-orlando): The following assumes this mixin is used in a # class inheriting from CommonDbMixin, which is true for all existing # plugins. if process_extensions: self._apply_dict_extend_functions(l3.ROUTERS, res, router) return self._fields(res, fields) def filter_allocating_and_missing_routers(self, context, routers): """Filter out routers that shouldn't go to the agent. Any routers in the ALLOCATING state will be excluded by this query because this indicates that the server is still building necessary dependent sub-resources for the router and it is not ready for consumption by the agent. It will also filter out any routers that no longer exist to prevent conditions where only part of a router's information was populated in sync_routers due to it being deleted during the sync. """ router_ids = set(r['id'] for r in routers) query = (context.session.query(Router.id). filter( Router.id.in_(router_ids), Router.status != l3_constants.ROUTER_STATUS_ALLOCATING)) valid_routers = set(r.id for r in query) if router_ids - valid_routers: LOG.debug("Removing routers that were either concurrently " "deleted or are in the ALLOCATING state: %s", (router_ids - valid_routers)) return [r for r in routers if r['id'] in valid_routers] def _create_router_db(self, context, router, tenant_id): """Create the DB object.""" with context.session.begin(subtransactions=True): # pre-generate id so it will be available when # configuring external gw port status = router.get('status', l3_constants.ROUTER_STATUS_ACTIVE) router_db = Router(id=(router.get('id') or uuidutils.generate_uuid()), tenant_id=tenant_id, name=router['name'], admin_state_up=router['admin_state_up'], status=status, description=router.get('description')) context.session.add(router_db) return router_db def create_router(self, context, router): r = router['router'] gw_info = r.pop(EXTERNAL_GW_INFO, None) router_db = self._create_router_db(context, r, r['tenant_id']) try: if gw_info: self._update_router_gw_info(context, router_db['id'], gw_info, router=router_db) except Exception: with excutils.save_and_reraise_exception(): LOG.debug("Could not update gateway info, deleting router.") self.delete_router(context, router_db.id) return self._make_router_dict(router_db) def _update_router_db(self, context, router_id, data): """Update the DB object.""" with context.session.begin(subtransactions=True): router_db = self._get_router(context, router_id) if data: router_db.update(data) return router_db def update_router(self, context, id, router): r = router['router'] gw_info = r.pop(EXTERNAL_GW_INFO, attributes.ATTR_NOT_SPECIFIED) # check whether router needs and can be rescheduled to the proper # l3 agent (associated with given external network); # do check before update in DB as an exception will be raised # in case no proper l3 agent found if gw_info != attributes.ATTR_NOT_SPECIFIED: candidates = self._check_router_needs_rescheduling( context, id, gw_info) # Update the gateway outside of the DB update since it involves L2 # calls that don't make sense to rollback and may cause deadlocks # in a transaction. self._update_router_gw_info(context, id, gw_info) else: candidates = None router_db = self._update_router_db(context, id, r) if candidates: l3_plugin = manager.NeutronManager.get_service_plugins().get( constants.L3_ROUTER_NAT) l3_plugin.reschedule_router(context, id, candidates) return self._make_router_dict(router_db) def _check_router_needs_rescheduling(self, context, router_id, gw_info): """Checks whether router's l3 agent can handle the given network When external_network_bridge is set, each L3 agent can be associated with at most one external network. If router's new external gateway is on other network then the router needs to be rescheduled to the proper l3 agent. If external_network_bridge is not set then the agent can support multiple external networks and rescheduling is not needed :return: list of candidate agents if rescheduling needed, None otherwise; raises exception if there is no eligible l3 agent associated with target external network """ # TODO(obondarev): rethink placement of this func as l3 db manager is # not really a proper place for agent scheduling stuff network_id = gw_info.get('network_id') if gw_info else None if not network_id: return nets = self._core_plugin.get_networks( context, {external_net.EXTERNAL: [True]}) # nothing to do if there is only one external network if len(nets) <= 1: return # first get plugin supporting l3 agent scheduling # (either l3 service plugin or core_plugin) l3_plugin = manager.NeutronManager.get_service_plugins().get( constants.L3_ROUTER_NAT) if (not utils.is_extension_supported( l3_plugin, l3_constants.L3_AGENT_SCHEDULER_EXT_ALIAS) or l3_plugin.router_scheduler is None): # that might mean that we are dealing with non-agent-based # implementation of l3 services return cur_agents = l3_plugin.list_l3_agents_hosting_router( context, router_id)['agents'] for agent in cur_agents: ext_net_id = agent['configurations'].get( 'gateway_external_network_id') ext_bridge = agent['configurations'].get( 'external_network_bridge', 'br-ex') if (ext_net_id == network_id or (not ext_net_id and not ext_bridge)): return # otherwise find l3 agent with matching gateway_external_network_id active_agents = l3_plugin.get_l3_agents(context, active=True) router = { 'id': router_id, 'external_gateway_info': {'network_id': network_id} } candidates = l3_plugin.get_l3_agent_candidates(context, router, active_agents) if not candidates: msg = (_('No eligible l3 agent associated with external network ' '%s found') % network_id) raise n_exc.BadRequest(resource='router', msg=msg) return candidates def _create_router_gw_port(self, context, router, network_id, ext_ips): # Port has no 'tenant-id', as it is hidden from user port_data = {'tenant_id': '', # intentionally not set 'network_id': network_id, 'fixed_ips': ext_ips or attributes.ATTR_NOT_SPECIFIED, 'device_id': router['id'], 'device_owner': DEVICE_OWNER_ROUTER_GW, 'admin_state_up': True, 'name': ''} gw_port = p_utils.create_port(self._core_plugin, context.elevated(), {'port': port_data}) if not gw_port['fixed_ips']: LOG.debug('No IPs available for external network %s', network_id) with context.session.begin(subtransactions=True): router.gw_port = self._core_plugin._get_port(context.elevated(), gw_port['id']) router_port = RouterPort( router_id=router.id, port_id=gw_port['id'], port_type=DEVICE_OWNER_ROUTER_GW ) context.session.add(router) context.session.add(router_port) def _validate_gw_info(self, context, gw_port, info, ext_ips): network_id = info['network_id'] if info else None if network_id: network_db = self._core_plugin._get_network(context, network_id) if not network_db.external: msg = _("Network %s is not an external network") % network_id raise n_exc.BadRequest(resource='router', msg=msg) if ext_ips: subnets = self._core_plugin.get_subnets_by_network(context, network_id) for s in subnets: if not s['gateway_ip']: continue for ext_ip in ext_ips: if ext_ip.get('ip_address') == s['gateway_ip']: msg = _("External IP %s is the same as the " "gateway IP") % ext_ip.get('ip_address') raise n_exc.BadRequest(resource='router', msg=msg) return network_id def _delete_current_gw_port(self, context, router_id, router, new_network_id): """Delete gw port if attached to an old network.""" port_requires_deletion = ( router.gw_port and router.gw_port['network_id'] != new_network_id) if not port_requires_deletion: return admin_ctx = context.elevated() old_network_id = router.gw_port['network_id'] if self.get_floatingips_count( admin_ctx, {'router_id': [router_id]}): raise l3.RouterExternalGatewayInUseByFloatingIp( router_id=router_id, net_id=router.gw_port['network_id']) gw_ips = [x['ip_address'] for x in router.gw_port.fixed_ips] with context.session.begin(subtransactions=True): gw_port = router.gw_port router.gw_port = None context.session.add(router) context.session.expire(gw_port) self._check_router_gw_port_in_use(context, router_id) self._core_plugin.delete_port( admin_ctx, gw_port['id'], l3_port_check=False) registry.notify(resources.ROUTER_GATEWAY, events.AFTER_DELETE, self, router_id=router_id, network_id=old_network_id, gateway_ips=gw_ips) def _check_router_gw_port_in_use(self, context, router_id): try: kwargs = {'context': context, 'router_id': router_id} registry.notify( resources.ROUTER_GATEWAY, events.BEFORE_DELETE, self, **kwargs) except exceptions.CallbackFailure as e: with excutils.save_and_reraise_exception(): # NOTE(armax): preserve old check's behavior if len(e.errors) == 1: raise e.errors[0].error raise l3.RouterInUse(router_id=router_id, reason=e) def _create_gw_port(self, context, router_id, router, new_network_id, ext_ips): new_valid_gw_port_attachment = ( new_network_id and (not router.gw_port or router.gw_port['network_id'] != new_network_id)) if new_valid_gw_port_attachment: subnets = self._core_plugin.get_subnets_by_network(context, new_network_id) try: kwargs = {'context': context, 'router_id': router_id, 'network_id': new_network_id, 'subnets': subnets} registry.notify( resources.ROUTER_GATEWAY, events.BEFORE_CREATE, self, **kwargs) except exceptions.CallbackFailure as e: # raise the underlying exception raise e.errors[0].error self._check_for_dup_router_subnets(context, router, new_network_id, subnets) self._create_router_gw_port(context, router, new_network_id, ext_ips) registry.notify(resources.ROUTER_GATEWAY, events.AFTER_CREATE, self._create_gw_port, gw_ips=ext_ips, network_id=new_network_id, router_id=router_id) def _update_current_gw_port(self, context, router_id, router, ext_ips): self._core_plugin.update_port(context, router.gw_port['id'], {'port': {'fixed_ips': ext_ips}}) context.session.expire(router.gw_port) def _update_router_gw_info(self, context, router_id, info, router=None): # TODO(salvatore-orlando): guarantee atomic behavior also across # operations that span beyond the model classes handled by this # class (e.g.: delete_port) router = router or self._get_router(context, router_id) gw_port = router.gw_port ext_ips = info.get('external_fixed_ips') if info else [] ext_ip_change = self._check_for_external_ip_change( context, gw_port, ext_ips) network_id = self._validate_gw_info(context, gw_port, info, ext_ips) if gw_port and ext_ip_change and gw_port['network_id'] == network_id: self._update_current_gw_port(context, router_id, router, ext_ips) else: self._delete_current_gw_port(context, router_id, router, network_id) self._create_gw_port(context, router_id, router, network_id, ext_ips) def _check_for_external_ip_change(self, context, gw_port, ext_ips): # determine if new external IPs differ from the existing fixed_ips if not ext_ips: # no external_fixed_ips were included return False if not gw_port: return True subnet_ids = set(ip['subnet_id'] for ip in gw_port['fixed_ips']) new_subnet_ids = set(f['subnet_id'] for f in ext_ips if f.get('subnet_id')) subnet_change = not new_subnet_ids == subnet_ids if subnet_change: return True ip_addresses = set(ip['ip_address'] for ip in gw_port['fixed_ips']) new_ip_addresses = set(f['ip_address'] for f in ext_ips if f.get('ip_address')) ip_address_change = not ip_addresses == new_ip_addresses return ip_address_change def _ensure_router_not_in_use(self, context, router_id): """Ensure that no internal network interface is attached to the router. """ router = self._get_router(context, router_id) device_owner = self._get_device_owner(context, router) if any(rp.port_type == device_owner for rp in router.attached_ports.all()): raise l3.RouterInUse(router_id=router_id) return router def delete_router(self, context, id): #TODO(nati) Refactor here when we have router insertion model router = self._ensure_router_not_in_use(context, id) self._delete_current_gw_port(context, id, router, None) router_ports = router.attached_ports.all() for rp in router_ports: self._core_plugin.delete_port(context.elevated(), rp.port.id, l3_port_check=False) with context.session.begin(subtransactions=True): context.session.delete(router) def get_router(self, context, id, fields=None): router = self._get_router(context, id) return self._make_router_dict(router, fields) def get_routers(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = self._get_marker_obj(context, 'router', limit, marker) return self._get_collection(context, Router, self._make_router_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) def get_routers_count(self, context, filters=None): return self._get_collection_count(context, Router, filters=filters) def _check_for_dup_router_subnets(self, context, router, network_id, new_subnets): # It's possible these ports are on the same network, but # different subnets. new_subnet_ids = {s['id'] for s in new_subnets} router_subnets = [] for p in (rp.port for rp in router.attached_ports): for ip in p['fixed_ips']: if ip['subnet_id'] in new_subnet_ids: msg = (_("Router already has a port on subnet %s") % ip['subnet_id']) raise n_exc.BadRequest(resource='router', msg=msg) router_subnets.append(ip['subnet_id']) # Ignore temporary Prefix Delegation CIDRs new_subnets = [s for s in new_subnets if s['cidr'] != l3_constants.PROVISIONAL_IPV6_PD_PREFIX] id_filter = {'id': router_subnets} subnets = self._core_plugin.get_subnets(context.elevated(), filters=id_filter) for sub in subnets: cidr = sub['cidr'] ipnet = netaddr.IPNetwork(cidr) for s in new_subnets: new_cidr = s['cidr'] new_ipnet = netaddr.IPNetwork(new_cidr) match1 = netaddr.all_matching_cidrs(new_ipnet, [cidr]) match2 = netaddr.all_matching_cidrs(ipnet, [new_cidr]) if match1 or match2: data = {'subnet_cidr': new_cidr, 'subnet_id': s['id'], 'cidr': cidr, 'sub_id': sub['id']} msg = (_("Cidr %(subnet_cidr)s of subnet " "%(subnet_id)s overlaps with cidr %(cidr)s " "of subnet %(sub_id)s") % data) raise n_exc.BadRequest(resource='router', msg=msg) def _get_device_owner(self, context, router=None): """Get device_owner for the specified router.""" # NOTE(armando-migliaccio): in the base case this is invariant return DEVICE_OWNER_ROUTER_INTF def _validate_interface_info(self, interface_info, for_removal=False): port_id_specified = interface_info and 'port_id' in interface_info subnet_id_specified = interface_info and 'subnet_id' in interface_info if not (port_id_specified or subnet_id_specified): msg = _("Either subnet_id or port_id must be specified") raise n_exc.BadRequest(resource='router', msg=msg) for key in ('port_id', 'subnet_id'): if key not in interface_info: continue err = attributes._validate_uuid(interface_info[key]) if err: raise n_exc.BadRequest(resource='router', msg=err) if not for_removal: if port_id_specified and subnet_id_specified: msg = _("Cannot specify both subnet-id and port-id") raise n_exc.BadRequest(resource='router', msg=msg) return port_id_specified, subnet_id_specified def _check_router_port(self, context, port_id, device_id): port = self._core_plugin.get_port(context, port_id) if port['device_id'] != device_id: raise n_exc.PortInUse(net_id=port['network_id'], port_id=port['id'], device_id=port['device_id']) if not port['fixed_ips']: msg = _('Router port must have at least one fixed IP') raise n_exc.BadRequest(resource='router', msg=msg) return port def _validate_router_port_info(self, context, router, port_id): with db_api.autonested_transaction(context.session): # check again within transaction to mitigate race port = self._check_router_port(context, port_id, router.id) # Only allow one router port with IPv6 subnets per network id if self._port_has_ipv6_address(port): for existing_port in (rp.port for rp in router.attached_ports): if (existing_port['network_id'] == port['network_id'] and self._port_has_ipv6_address(existing_port)): msg = _("Cannot have multiple router ports with the " "same network id if both contain IPv6 " "subnets. Existing port %(p)s has IPv6 " "subnet(s) and network id %(nid)s") raise n_exc.BadRequest(resource='router', msg=msg % { 'p': existing_port['id'], 'nid': existing_port['network_id']}) fixed_ips = [ip for ip in port['fixed_ips']] subnets = [] for fixed_ip in fixed_ips: subnet = self._core_plugin.get_subnet(context, fixed_ip['subnet_id']) subnets.append(subnet) if subnets: self._check_for_dup_router_subnets(context, router, port['network_id'], subnets) # Keep the restriction against multiple IPv4 subnets if len([s for s in subnets if s['ip_version'] == 4]) > 1: msg = _("Cannot have multiple " "IPv4 subnets on router port") raise n_exc.BadRequest(resource='router', msg=msg) return port, subnets def _add_interface_by_port(self, context, router, port_id, owner): # Update owner before actual process in order to avoid the # case where a port might get attached to a router without the # owner successfully updating due to an unavailable backend. port = self._check_router_port(context, port_id, '') prev_owner = port['device_owner'] self._core_plugin.update_port( context, port_id, {'port': {'device_id': router.id, 'device_owner': owner}}) try: return self._validate_router_port_info(context, router, port_id) except Exception: with excutils.save_and_reraise_exception(): self._core_plugin.update_port( context, port_id, {'port': {'device_id': '', 'device_owner': prev_owner}}) def _port_has_ipv6_address(self, port): for fixed_ip in port['fixed_ips']: if netaddr.IPNetwork(fixed_ip['ip_address']).version == 6: return True def _find_ipv6_router_port_by_network(self, router, net_id): for port in router.attached_ports: p = port['port'] if p['network_id'] == net_id and self._port_has_ipv6_address(p): return port def _add_interface_by_subnet(self, context, router, subnet_id, owner): subnet = self._core_plugin.get_subnet(context, subnet_id) if not subnet['gateway_ip']: msg = _('Subnet for router interface must have a gateway IP') raise n_exc.BadRequest(resource='router', msg=msg) if (subnet['ip_version'] == 6 and subnet['ipv6_ra_mode'] is None and subnet['ipv6_address_mode'] is not None): msg = (_('IPv6 subnet %s configured to receive RAs from an ' 'external router cannot be added to Neutron Router.') % subnet['id']) raise n_exc.BadRequest(resource='router', msg=msg) self._check_for_dup_router_subnets(context, router, subnet['network_id'], [subnet]) fixed_ip = {'ip_address': subnet['gateway_ip'], 'subnet_id': subnet['id']} if (subnet['ip_version'] == 6 and not ipv6_utils.is_ipv6_pd_enabled(subnet)): # Add new prefix to an existing ipv6 port with the same network id # if one exists port = self._find_ipv6_router_port_by_network(router, subnet['network_id']) if port: fixed_ips = list(port['port']['fixed_ips']) fixed_ips.append(fixed_ip) return self._core_plugin.update_port(context, port['port_id'], {'port': {'fixed_ips': fixed_ips}}), [subnet], False port_data = {'tenant_id': subnet['tenant_id'], 'network_id': subnet['network_id'], 'fixed_ips': [fixed_ip], 'admin_state_up': True, 'device_id': router.id, 'device_owner': owner, 'name': ''} return p_utils.create_port(self._core_plugin, context, {'port': port_data}), [subnet], True @staticmethod def _make_router_interface_info( router_id, tenant_id, port_id, network_id, subnet_id, subnet_ids): return { 'id': router_id, 'tenant_id': tenant_id, 'port_id': port_id, 'network_id': network_id, 'subnet_id': subnet_id, # deprecated by IPv6 multi-prefix 'subnet_ids': subnet_ids } def add_router_interface(self, context, router_id, interface_info): router = self._get_router(context, router_id) add_by_port, add_by_sub = self._validate_interface_info(interface_info) device_owner = self._get_device_owner(context, router_id) # This should be True unless adding an IPv6 prefix to an existing port new_port = True if add_by_port: port, subnets = self._add_interface_by_port( context, router, interface_info['port_id'], device_owner) # add_by_subnet is not used here, because the validation logic of # _validate_interface_info ensures that either of add_by_* is True. else: port, subnets, new_port = self._add_interface_by_subnet( context, router, interface_info['subnet_id'], device_owner) if new_port: with context.session.begin(subtransactions=True): router_port = RouterPort( port_id=port['id'], router_id=router.id, port_type=device_owner ) context.session.add(router_port) gw_ips = [] gw_network_id = None if router.gw_port: gw_network_id = router.gw_port.network_id gw_ips = router.gw_port.fixed_ips registry.notify(resources.ROUTER_INTERFACE, events.AFTER_CREATE, self, context=context, network_id=gw_network_id, gateway_ips=gw_ips, cidrs=[x['cidr'] for x in subnets], port_id=port['id'], router_id=router_id, port=port, interface_info=interface_info) return self._make_router_interface_info( router.id, port['tenant_id'], port['id'], port['network_id'], subnets[-1]['id'], [subnet['id'] for subnet in subnets]) def _confirm_router_interface_not_in_use(self, context, router_id, subnet_id): subnet = self._core_plugin.get_subnet(context, subnet_id) subnet_cidr = netaddr.IPNetwork(subnet['cidr']) fip_qry = context.session.query(FloatingIP) try: kwargs = {'context': context, 'subnet_id': subnet_id} registry.notify( resources.ROUTER_INTERFACE, events.BEFORE_DELETE, self, **kwargs) except exceptions.CallbackFailure as e: with excutils.save_and_reraise_exception(): # NOTE(armax): preserve old check's behavior if len(e.errors) == 1: raise e.errors[0].error raise l3.RouterInUse(router_id=router_id, reason=e) for fip_db in fip_qry.filter_by(router_id=router_id): if netaddr.IPAddress(fip_db['fixed_ip_address']) in subnet_cidr: raise l3.RouterInterfaceInUseByFloatingIP( router_id=router_id, subnet_id=subnet_id) def _remove_interface_by_port(self, context, router_id, port_id, subnet_id, owner): qry = context.session.query(RouterPort) qry = qry.filter_by( port_id=port_id, router_id=router_id, port_type=owner ) try: port_db = qry.one().port except exc.NoResultFound: raise l3.RouterInterfaceNotFound(router_id=router_id, port_id=port_id) port_subnet_ids = [fixed_ip['subnet_id'] for fixed_ip in port_db['fixed_ips']] if subnet_id and subnet_id not in port_subnet_ids: raise n_exc.SubnetMismatchForPort( port_id=port_id, subnet_id=subnet_id) subnets = [self._core_plugin.get_subnet(context, port_subnet_id) for port_subnet_id in port_subnet_ids] for port_subnet_id in port_subnet_ids: self._confirm_router_interface_not_in_use( context, router_id, port_subnet_id) self._core_plugin.delete_port(context, port_db['id'], l3_port_check=False) return (port_db, subnets) def _remove_interface_by_subnet(self, context, router_id, subnet_id, owner): self._confirm_router_interface_not_in_use( context, router_id, subnet_id) subnet = self._core_plugin.get_subnet(context, subnet_id) try: rport_qry = context.session.query(models_v2.Port).join(RouterPort) ports = rport_qry.filter( RouterPort.router_id == router_id, RouterPort.port_type == owner, models_v2.Port.network_id == subnet['network_id'] ) for p in ports: port_subnets = [fip['subnet_id'] for fip in p['fixed_ips']] if subnet_id in port_subnets and len(port_subnets) > 1: # multiple prefix port - delete prefix from port fixed_ips = [fip for fip in p['fixed_ips'] if fip['subnet_id'] != subnet_id] self._core_plugin.update_port(context, p['id'], {'port': {'fixed_ips': fixed_ips}}) return (p, [subnet]) elif subnet_id in port_subnets: # only one subnet on port - delete the port self._core_plugin.delete_port(context, p['id'], l3_port_check=False) return (p, [subnet]) except exc.NoResultFound: pass raise l3.RouterInterfaceNotFoundForSubnet(router_id=router_id, subnet_id=subnet_id) def remove_router_interface(self, context, router_id, interface_info): remove_by_port, remove_by_subnet = ( self._validate_interface_info(interface_info, for_removal=True) ) port_id = interface_info.get('port_id') subnet_id = interface_info.get('subnet_id') device_owner = self._get_device_owner(context, router_id) if remove_by_port: port, subnets = self._remove_interface_by_port(context, router_id, port_id, subnet_id, device_owner) # remove_by_subnet is not used here, because the validation logic of # _validate_interface_info ensures that at least one of remote_by_* # is True. else: port, subnets = self._remove_interface_by_subnet( context, router_id, subnet_id, device_owner) gw_network_id = None gw_ips = [] router = self._get_router(context, router_id) if router.gw_port: gw_network_id = router.gw_port.network_id gw_ips = [x['ip_address'] for x in router.gw_port.fixed_ips] registry.notify(resources.ROUTER_INTERFACE, events.AFTER_DELETE, self, context=context, cidrs=[x['cidr'] for x in subnets], network_id=gw_network_id, gateway_ips=gw_ips, port=port) return self._make_router_interface_info(router_id, port['tenant_id'], port['id'], port['network_id'], subnets[0]['id'], [subnet['id'] for subnet in subnets]) def _get_floatingip(self, context, id): try: floatingip = self._get_by_id(context, FloatingIP, id) except exc.NoResultFound: raise l3.FloatingIPNotFound(floatingip_id=id) return floatingip def _make_floatingip_dict(self, floatingip, fields=None, process_extensions=True): res = {'id': floatingip['id'], 'tenant_id': floatingip['tenant_id'], 'floating_ip_address': floatingip['floating_ip_address'], 'floating_network_id': floatingip['floating_network_id'], 'router_id': floatingip['router_id'], 'port_id': floatingip['fixed_port_id'], 'fixed_ip_address': floatingip['fixed_ip_address'], 'status': floatingip['status']} # NOTE(mlavalle): The following assumes this mixin is used in a # class inheriting from CommonDbMixin, which is true for all existing # plugins. if process_extensions: self._apply_dict_extend_functions(l3.FLOATINGIPS, res, floatingip) return self._fields(res, fields) def _get_router_for_floatingip(self, context, internal_port, internal_subnet_id, external_network_id): subnet = self._core_plugin.get_subnet(context, internal_subnet_id) if not subnet['gateway_ip']: msg = (_('Cannot add floating IP to port on subnet %s ' 'which has no gateway_ip') % internal_subnet_id) raise n_exc.BadRequest(resource='floatingip', msg=msg) return self.get_router_for_floatingip(context, internal_port, subnet, external_network_id) # NOTE(yamamoto): This method is an override point for plugins # inheriting this class. Do not optimize this out. def get_router_for_floatingip(self, context, internal_port, internal_subnet, external_network_id): """Find a router to handle the floating-ip association. :param internal_port: The port for the fixed-ip. :param internal_subnet: The subnet for the fixed-ip. :param external_network_id: The external network for floating-ip. :raises: ExternalGatewayForFloatingIPNotFound if no suitable router is found. """ # Find routers(with router_id and interface address) that # connect given internal subnet and the external network. # Among them, if the router's interface address matches # with subnet's gateway-ip, return that router. # Otherwise return the first router. gw_port = orm.aliased(models_v2.Port, name="gw_port") routerport_qry = context.session.query( RouterPort.router_id, models_v2.IPAllocation.ip_address).join( models_v2.Port, models_v2.IPAllocation).filter( models_v2.Port.network_id == internal_port['network_id'], RouterPort.port_type.in_(l3_constants.ROUTER_INTERFACE_OWNERS), models_v2.IPAllocation.subnet_id == internal_subnet['id'] ).join(gw_port, gw_port.device_id == RouterPort.router_id).filter( gw_port.network_id == external_network_id, gw_port.device_owner == l3_constants.DEVICE_OWNER_ROUTER_GW ).distinct() first_router_id = None for router_id, interface_ip in routerport_qry: if interface_ip == internal_subnet['gateway_ip']: return router_id if not first_router_id: first_router_id = router_id if first_router_id: return first_router_id raise l3.ExternalGatewayForFloatingIPNotFound( subnet_id=internal_subnet['id'], external_network_id=external_network_id, port_id=internal_port['id']) def _port_ipv4_fixed_ips(self, port): return [ip for ip in port['fixed_ips'] if netaddr.IPAddress(ip['ip_address']).version == 4] def _internal_fip_assoc_data(self, context, fip): """Retrieve internal port data for floating IP. Retrieve information concerning the internal port where the floating IP should be associated to. """ internal_port = self._core_plugin.get_port(context, fip['port_id']) if not internal_port['tenant_id'] == fip['tenant_id']: port_id = fip['port_id'] if 'id' in fip: floatingip_id = fip['id'] data = {'port_id': port_id, 'floatingip_id': floatingip_id} msg = (_('Port %(port_id)s is associated with a different ' 'tenant than Floating IP %(floatingip_id)s and ' 'therefore cannot be bound.') % data) else: msg = (_('Cannot create floating IP and bind it to ' 'Port %s, since that port is owned by a ' 'different tenant.') % port_id) raise n_exc.BadRequest(resource='floatingip', msg=msg) internal_subnet_id = None if fip.get('fixed_ip_address'): internal_ip_address = fip['fixed_ip_address'] if netaddr.IPAddress(internal_ip_address).version != 4: if 'id' in fip: data = {'floatingip_id': fip['id'], 'internal_ip': internal_ip_address} msg = (_('Floating IP %(floatingip_id)s is associated ' 'with non-IPv4 address %s(internal_ip)s and ' 'therefore cannot be bound.') % data) else: msg = (_('Cannot create floating IP and bind it to %s, ' 'since that is not an IPv4 address.') % internal_ip_address) raise n_exc.BadRequest(resource='floatingip', msg=msg) for ip in internal_port['fixed_ips']: if ip['ip_address'] == internal_ip_address: internal_subnet_id = ip['subnet_id'] if not internal_subnet_id: msg = (_('Port %(id)s does not have fixed ip %(address)s') % {'id': internal_port['id'], 'address': internal_ip_address}) raise n_exc.BadRequest(resource='floatingip', msg=msg) else: ipv4_fixed_ips = self._port_ipv4_fixed_ips(internal_port) if not ipv4_fixed_ips: msg = (_('Cannot add floating IP to port %s that has ' 'no fixed IPv4 addresses') % internal_port['id']) raise n_exc.BadRequest(resource='floatingip', msg=msg) if len(ipv4_fixed_ips) > 1: msg = (_('Port %s has multiple fixed IPv4 addresses. Must ' 'provide a specific IPv4 address when assigning a ' 'floating IP') % internal_port['id']) raise n_exc.BadRequest(resource='floatingip', msg=msg) internal_ip_address = ipv4_fixed_ips[0]['ip_address'] internal_subnet_id = ipv4_fixed_ips[0]['subnet_id'] return internal_port, internal_subnet_id, internal_ip_address def _get_assoc_data(self, context, fip, floating_network_id): """Determine/extract data associated with the internal port. When a floating IP is associated with an internal port, we need to extract/determine some data associated with the internal port, including the internal_ip_address, and router_id. The confirmation of the internal port whether owned by the tenant who owns the floating IP will be confirmed by _get_router_for_floatingip. """ (internal_port, internal_subnet_id, internal_ip_address) = self._internal_fip_assoc_data(context, fip) router_id = self._get_router_for_floatingip(context, internal_port, internal_subnet_id, floating_network_id) return (fip['port_id'], internal_ip_address, router_id) def _check_and_get_fip_assoc(self, context, fip, floatingip_db): port_id = internal_ip_address = router_id = None if fip.get('fixed_ip_address') and not fip.get('port_id'): msg = _("fixed_ip_address cannot be specified without a port_id") raise n_exc.BadRequest(resource='floatingip', msg=msg) if fip.get('port_id'): port_id, internal_ip_address, router_id = self._get_assoc_data( context, fip, floatingip_db['floating_network_id']) fip_qry = context.session.query(FloatingIP) try: fip_qry.filter_by( fixed_port_id=fip['port_id'], floating_network_id=floatingip_db['floating_network_id'], fixed_ip_address=internal_ip_address).one() raise l3.FloatingIPPortAlreadyAssociated( port_id=fip['port_id'], fip_id=floatingip_db['id'], floating_ip_address=floatingip_db['floating_ip_address'], fixed_ip=internal_ip_address, net_id=floatingip_db['floating_network_id']) except exc.NoResultFound: pass return port_id, internal_ip_address, router_id def _update_fip_assoc(self, context, fip, floatingip_db, external_port): previous_router_id = floatingip_db.router_id port_id, internal_ip_address, router_id = ( self._check_and_get_fip_assoc(context, fip, floatingip_db)) update = {'fixed_ip_address': internal_ip_address, 'fixed_port_id': port_id, 'router_id': router_id, 'last_known_router_id': previous_router_id} if 'description' in fip: update['description'] = fip['description'] floatingip_db.update(update) next_hop = None if router_id: # NOTE(tidwellr) use admin context here # tenant may not own the router and that's OK on a FIP association router = self._get_router(context.elevated(), router_id) gw_port = router.gw_port for fixed_ip in gw_port.fixed_ips: addr = netaddr.IPAddress(fixed_ip.ip_address) if addr.version == l3_constants.IP_VERSION_4: next_hop = fixed_ip.ip_address break args = {'fixed_ip_address': internal_ip_address, 'fixed_port_id': port_id, 'router_id': router_id, 'last_known_router_id': previous_router_id, 'floating_ip_address': floatingip_db.floating_ip_address, 'floating_network_id': floatingip_db.floating_network_id, 'next_hop': next_hop, 'context': context} registry.notify(resources.FLOATING_IP, events.AFTER_UPDATE, self._update_fip_assoc, **args) def _is_ipv4_network(self, context, net_id): net = self._core_plugin._get_network(context, net_id) return any(s.ip_version == 4 for s in net.subnets) def _create_floatingip(self, context, floatingip, initial_status=l3_constants.FLOATINGIP_STATUS_ACTIVE): fip = floatingip['floatingip'] fip_id = uuidutils.generate_uuid() f_net_id = fip['floating_network_id'] if not self._core_plugin._network_is_external(context, f_net_id): msg = _("Network %s is not a valid external network") % f_net_id raise n_exc.BadRequest(resource='floatingip', msg=msg) if not self._is_ipv4_network(context, f_net_id): msg = _("Network %s does not contain any IPv4 subnet") % f_net_id raise n_exc.BadRequest(resource='floatingip', msg=msg) dns_integration = utils.is_extension_supported(self._core_plugin, 'dns-integration') with context.session.begin(subtransactions=True): # This external port is never exposed to the tenant. # it is used purely for internal system and admin use when # managing floating IPs. port = {'tenant_id': '', # tenant intentionally not set 'network_id': f_net_id, 'admin_state_up': True, 'device_id': fip_id, 'device_owner': DEVICE_OWNER_FLOATINGIP, 'status': l3_constants.PORT_STATUS_NOTAPPLICABLE, 'name': ''} if fip.get('floating_ip_address'): port['fixed_ips'] = [ {'ip_address': fip['floating_ip_address']}] if fip.get('subnet_id'): port['fixed_ips'] = [ {'subnet_id': fip['subnet_id']}] # 'status' in port dict could not be updated by default, use # check_allow_post to stop the verification of system external_port = p_utils.create_port(self._core_plugin, context.elevated(), {'port': port}, check_allow_post=False) # Ensure IPv4 addresses are allocated on external port external_ipv4_ips = self._port_ipv4_fixed_ips(external_port) if not external_ipv4_ips: raise n_exc.ExternalIpAddressExhausted(net_id=f_net_id) floating_fixed_ip = external_ipv4_ips[0] floating_ip_address = floating_fixed_ip['ip_address'] floatingip_db = FloatingIP( id=fip_id, tenant_id=fip['tenant_id'], status=initial_status, floating_network_id=fip['floating_network_id'], floating_ip_address=floating_ip_address, floating_port_id=external_port['id'], description=fip.get('description')) # Update association with internal port # and define external IP address self._update_fip_assoc(context, fip, floatingip_db, external_port) context.session.add(floatingip_db) floatingip_dict = self._make_floatingip_dict( floatingip_db, process_extensions=False) if dns_integration: dns_data = self._process_dns_floatingip_create_precommit( context, floatingip_dict, fip) if dns_integration: self._process_dns_floatingip_create_postcommit(context, floatingip_dict, dns_data) self._apply_dict_extend_functions(l3.FLOATINGIPS, floatingip_dict, floatingip_db) return floatingip_dict def create_floatingip(self, context, floatingip, initial_status=l3_constants.FLOATINGIP_STATUS_ACTIVE): return self._create_floatingip(context, floatingip, initial_status) def _update_floatingip(self, context, id, floatingip): fip = floatingip['floatingip'] dns_integration = utils.is_extension_supported(self._core_plugin, 'dns-integration') with context.session.begin(subtransactions=True): floatingip_db = self._get_floatingip(context, id) old_floatingip = self._make_floatingip_dict(floatingip_db) fip['tenant_id'] = floatingip_db['tenant_id'] fip['id'] = id fip_port_id = floatingip_db['floating_port_id'] self._update_fip_assoc(context, fip, floatingip_db, self._core_plugin.get_port( context.elevated(), fip_port_id)) floatingip_dict = self._make_floatingip_dict(floatingip_db) if dns_integration: dns_data = self._process_dns_floatingip_update_precommit( context, floatingip_dict) if dns_integration: self._process_dns_floatingip_update_postcommit(context, floatingip_dict, dns_data) return old_floatingip, floatingip_dict def _floatingips_to_router_ids(self, floatingips): return list(set([floatingip['router_id'] for floatingip in floatingips if floatingip['router_id']])) def update_floatingip(self, context, id, floatingip): _old_floatingip, floatingip = self._update_floatingip( context, id, floatingip) return floatingip def update_floatingip_status(self, context, floatingip_id, status): """Update operational status for floating IP in neutron DB.""" fip_query = self._model_query(context, FloatingIP).filter( FloatingIP.id == floatingip_id) fip_query.update({'status': status}, synchronize_session=False) def _delete_floatingip(self, context, id): floatingip = self._get_floatingip(context, id) floatingip_dict = self._make_floatingip_dict(floatingip) if utils.is_extension_supported(self._core_plugin, 'dns-integration'): self._process_dns_floatingip_delete(context, floatingip_dict) # Foreign key cascade will take care of the removal of the # floating IP record once the port is deleted. We can't start # a transaction first to remove it ourselves because the delete_port # method will yield in its post-commit activities. self._core_plugin.delete_port(context.elevated(), floatingip['floating_port_id'], l3_port_check=False) return floatingip_dict def delete_floatingip(self, context, id): self._delete_floatingip(context, id) def get_floatingip(self, context, id, fields=None): floatingip = self._get_floatingip(context, id) return self._make_floatingip_dict(floatingip, fields) def get_floatingips(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = self._get_marker_obj(context, 'floatingip', limit, marker) if filters is not None: for key, val in six.iteritems(API_TO_DB_COLUMN_MAP): if key in filters: filters[val] = filters.pop(key) return self._get_collection(context, FloatingIP, self._make_floatingip_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) def delete_disassociated_floatingips(self, context, network_id): query = self._model_query(context, FloatingIP) query = query.filter_by(floating_network_id=network_id, fixed_port_id=None, router_id=None) for fip in query: self.delete_floatingip(context, fip.id) def get_floatingips_count(self, context, filters=None): return self._get_collection_count(context, FloatingIP, filters=filters) def _router_exists(self, context, router_id): try: self.get_router(context.elevated(), router_id) return True except l3.RouterNotFound: return False def _floating_ip_exists(self, context, floating_ip_id): try: self.get_floatingip(context, floating_ip_id) return True except l3.FloatingIPNotFound: return False def prevent_l3_port_deletion(self, context, port_id): """Checks to make sure a port is allowed to be deleted. Raises an exception if this is not the case. This should be called by any plugin when the API requests the deletion of a port, since some ports for L3 are not intended to be deleted directly via a DELETE to /ports, but rather via other API calls that perform the proper deletion checks. """ try: port = self._core_plugin.get_port(context, port_id) except n_exc.PortNotFound: # non-existent ports don't need to be protected from deletion return if port['device_owner'] not in self.router_device_owners: return # Raise port in use only if the port has IP addresses # Otherwise it's a stale port that can be removed fixed_ips = port['fixed_ips'] if not fixed_ips: LOG.debug("Port %(port_id)s has owner %(port_owner)s, but " "no IP address, so it can be deleted", {'port_id': port['id'], 'port_owner': port['device_owner']}) return # NOTE(kevinbenton): we also check to make sure that the # router still exists. It's possible for HA router interfaces # to remain after the router is deleted if they encounter an # error during deletion. # Elevated context in case router is owned by another tenant if port['device_owner'] == DEVICE_OWNER_FLOATINGIP: if not self._floating_ip_exists(context, port['device_id']): LOG.debug("Floating IP %(f_id)s corresponding to port " "%(port_id)s no longer exists, allowing deletion.", {'f_id': port['device_id'], 'port_id': port['id']}) return elif not self._router_exists(context, port['device_id']): LOG.debug("Router %(router_id)s corresponding to port " "%(port_id)s no longer exists, allowing deletion.", {'router_id': port['device_id'], 'port_id': port['id']}) return reason = _('has device owner %s') % port['device_owner'] raise n_exc.ServicePortInUse(port_id=port['id'], reason=reason) def disassociate_floatingips(self, context, port_id): """Disassociate all floating IPs linked to specific port. @param port_id: ID of the port to disassociate floating IPs. @param do_notify: whether we should notify routers right away. @return: set of router-ids that require notification updates if do_notify is False, otherwise None. """ router_ids = set() with context.session.begin(subtransactions=True): fip_qry = context.session.query(FloatingIP) floating_ips = fip_qry.filter_by(fixed_port_id=port_id) for floating_ip in floating_ips: router_ids.add(floating_ip['router_id']) floating_ip.update({'fixed_port_id': None, 'fixed_ip_address': None, 'router_id': None}) return router_ids def _build_routers_list(self, context, routers, gw_ports): """Subclasses can override this to add extra gateway info""" return routers def _make_router_dict_with_gw_port(self, router, fields): result = self._make_router_dict(router, fields) if router.get('gw_port'): result['gw_port'] = self._core_plugin._make_port_dict( router['gw_port'], None) return result def _get_sync_routers(self, context, router_ids=None, active=None): """Query routers and their gw ports for l3 agent. Query routers with the router_ids. The gateway ports, if any, will be queried too. l3 agent has an option to deal with only one router id. In addition, when we need to notify the agent the data about only one router (when modification of router, its interfaces, gw_port and floatingips), we will have router_ids. @param router_ids: the list of router ids which we want to query. if it is None, all of routers will be queried. @return: a list of dicted routers with dicted gw_port populated if any """ filters = {'id': router_ids} if router_ids else {} if active is not None: filters['admin_state_up'] = [active] router_dicts = self._get_collection( context, Router, self._make_router_dict_with_gw_port, filters=filters) if not router_dicts: return [] gw_ports = dict((r['gw_port']['id'], r['gw_port']) for r in router_dicts if r.get('gw_port')) return self._build_routers_list(context, router_dicts, gw_ports) @staticmethod def _unique_floatingip_iterator(query): """Iterates over only one row per floating ip. Ignores others.""" # Group rows by fip id. They must be sorted by same. q = query.order_by(FloatingIP.id) keyfunc = lambda row: row[0]['id'] group_iterator = itertools.groupby(q, keyfunc) # Just hit the first row of each group for key, value in group_iterator: yield six.next(value) def _make_floatingip_dict_with_scope(self, floatingip_db, scope_id): d = self._make_floatingip_dict(floatingip_db) d['fixed_ip_address_scope'] = scope_id return d def _get_sync_floating_ips(self, context, router_ids): """Query floating_ips that relate to list of router_ids with scope. This is different than the regular get_floatingips in that it finds the address scope of the fixed IP. The router needs to know this to distinguish it from other scopes. There are a few redirections to go through to discover the address scope from the floating ip. """ if not router_ids: return [] query = context.session.query(FloatingIP, models_v2.SubnetPool.address_scope_id) query = query.join(models_v2.Port, FloatingIP.fixed_port_id == models_v2.Port.id) # Outer join of Subnet can cause each ip to have more than one row. query = query.outerjoin(models_v2.Subnet, models_v2.Subnet.network_id == models_v2.Port.network_id) query = query.filter(models_v2.Subnet.ip_version == 4) query = query.outerjoin(models_v2.SubnetPool, models_v2.Subnet.subnetpool_id == models_v2.SubnetPool.id) # Filter out on router_ids query = query.filter(FloatingIP.router_id.in_(router_ids)) return [self._make_floatingip_dict_with_scope(*row) for row in self._unique_floatingip_iterator(query)] def _get_sync_interfaces(self, context, router_ids, device_owners=None): """Query router interfaces that relate to list of router_ids.""" device_owners = device_owners or [DEVICE_OWNER_ROUTER_INTF] if not router_ids: return [] qry = context.session.query(RouterPort) qry = qry.filter( RouterPort.router_id.in_(router_ids), RouterPort.port_type.in_(device_owners) ) interfaces = [self._core_plugin._make_port_dict(rp.port, None) for rp in qry] return interfaces @staticmethod def _each_port_having_fixed_ips(ports): for port in ports or []: fixed_ips = port.get('fixed_ips', []) if not fixed_ips: # Skip ports without IPs, which can occur if a subnet # attached to a router is deleted LOG.info(_LI("Skipping port %s as no IP is configure on " "it"), port['id']) continue yield port def _get_subnets_by_network_list(self, context, network_ids): if not network_ids: return {} query = context.session.query(models_v2.Subnet, models_v2.SubnetPool.address_scope_id) query = query.outerjoin( models_v2.SubnetPool, models_v2.Subnet.subnetpool_id == models_v2.SubnetPool.id) query = query.filter(models_v2.Subnet.network_id.in_(network_ids)) fields = ['id', 'cidr', 'gateway_ip', 'dns_nameservers', 'network_id', 'ipv6_ra_mode', 'subnetpool_id'] def make_subnet_dict_with_scope(row): subnet_db, address_scope_id = row subnet = self._core_plugin._make_subnet_dict( subnet_db, fields, context=context) subnet['address_scope_id'] = address_scope_id return subnet subnets_by_network = dict((id, []) for id in network_ids) for subnet in (make_subnet_dict_with_scope(row) for row in query): subnets_by_network[subnet['network_id']].append(subnet) return subnets_by_network def _get_mtus_by_network_list(self, context, network_ids): if not network_ids: return {} filters = {'network_id': network_ids} fields = ['id', 'mtu'] networks = self._core_plugin.get_networks(context, filters=filters, fields=fields) mtus_by_network = dict((network['id'], network.get('mtu', 0)) for network in networks) return mtus_by_network def _populate_mtu_and_subnets_for_ports(self, context, ports): """Populate ports with subnets. These ports already have fixed_ips populated. """ network_ids = [p['network_id'] for p in self._each_port_having_fixed_ips(ports)] mtus_by_network = self._get_mtus_by_network_list(context, network_ids) subnets_by_network = self._get_subnets_by_network_list( context, network_ids) for port in self._each_port_having_fixed_ips(ports): port['subnets'] = [] port['extra_subnets'] = [] port['address_scopes'] = {l3_constants.IP_VERSION_4: None, l3_constants.IP_VERSION_6: None} scopes = {} for subnet in subnets_by_network[port['network_id']]: scope = subnet['address_scope_id'] cidr = netaddr.IPNetwork(subnet['cidr']) scopes[cidr.version] = scope # If this subnet is used by the port (has a matching entry # in the port's fixed_ips), then add this subnet to the # port's subnets list, and populate the fixed_ips entry # entry with the subnet's prefix length. subnet_info = {'id': subnet['id'], 'cidr': subnet['cidr'], 'gateway_ip': subnet['gateway_ip'], 'dns_nameservers': subnet['dns_nameservers'], 'ipv6_ra_mode': subnet['ipv6_ra_mode'], 'subnetpool_id': subnet['subnetpool_id']} for fixed_ip in port['fixed_ips']: if fixed_ip['subnet_id'] == subnet['id']: port['subnets'].append(subnet_info) prefixlen = cidr.prefixlen fixed_ip['prefixlen'] = prefixlen break else: # This subnet is not used by the port. port['extra_subnets'].append(subnet_info) port['address_scopes'].update(scopes) port['mtu'] = mtus_by_network.get(port['network_id'], 0) def _process_floating_ips(self, context, routers_dict, floating_ips): for floating_ip in floating_ips: router = routers_dict.get(floating_ip['router_id']) if router: router_floatingips = router.get(l3_constants.FLOATINGIP_KEY, []) router_floatingips.append(floating_ip) router[l3_constants.FLOATINGIP_KEY] = router_floatingips def _process_interfaces(self, routers_dict, interfaces): for interface in interfaces: router = routers_dict.get(interface['device_id']) if router: router_interfaces = router.get(l3_constants.INTERFACE_KEY, []) router_interfaces.append(interface) router[l3_constants.INTERFACE_KEY] = router_interfaces def _get_router_info_list(self, context, router_ids=None, active=None, device_owners=None): """Query routers and their related floating_ips, interfaces.""" with context.session.begin(subtransactions=True): routers = self._get_sync_routers(context, router_ids=router_ids, active=active) router_ids = [router['id'] for router in routers] interfaces = self._get_sync_interfaces( context, router_ids, device_owners) floating_ips = self._get_sync_floating_ips(context, router_ids) return (routers, interfaces, floating_ips) def get_sync_data(self, context, router_ids=None, active=None): routers, interfaces, floating_ips = self._get_router_info_list( context, router_ids=router_ids, active=active) ports_to_populate = [router['gw_port'] for router in routers if router.get('gw_port')] + interfaces self._populate_mtu_and_subnets_for_ports(context, ports_to_populate) routers_dict = dict((router['id'], router) for router in routers) self._process_floating_ips(context, routers_dict, floating_ips) self._process_interfaces(routers_dict, interfaces) return list(routers_dict.values()) class L3RpcNotifierMixin(object): """Mixin class to add rpc notifier attribute to db_base_plugin_v2.""" @property def l3_rpc_notifier(self): if not hasattr(self, '_l3_rpc_notifier'): self._l3_rpc_notifier = l3_rpc_agent_api.L3AgentNotifyAPI() return self._l3_rpc_notifier @l3_rpc_notifier.setter def l3_rpc_notifier(self, value): self._l3_rpc_notifier = value def notify_router_updated(self, context, router_id, operation=None): if router_id: self.l3_rpc_notifier.routers_updated( context, [router_id], operation) def notify_routers_updated(self, context, router_ids, operation=None, data=None): if router_ids: self.l3_rpc_notifier.routers_updated( context, router_ids, operation, data) def notify_router_deleted(self, context, router_id): self.l3_rpc_notifier.router_deleted(context, router_id) class L3_NAT_db_mixin(L3_NAT_dbonly_mixin, L3RpcNotifierMixin): """Mixin class to add rpc notifier methods to db_base_plugin_v2.""" def create_router(self, context, router): router_dict = super(L3_NAT_db_mixin, self).create_router(context, router) if router_dict.get('external_gateway_info'): self.notify_router_updated(context, router_dict['id'], None) return router_dict def update_router(self, context, id, router): router_dict = super(L3_NAT_db_mixin, self).update_router(context, id, router) self.notify_router_updated(context, router_dict['id'], None) return router_dict def delete_router(self, context, id): super(L3_NAT_db_mixin, self).delete_router(context, id) self.notify_router_deleted(context, id) def notify_router_interface_action( self, context, router_interface_info, action): l3_method = '%s_router_interface' % action super(L3_NAT_db_mixin, self).notify_routers_updated( context, [router_interface_info['id']], l3_method, {'subnet_id': router_interface_info['subnet_id']}) mapping = {'add': 'create', 'remove': 'delete'} notifier = n_rpc.get_notifier('network') router_event = 'router.interface.%s' % mapping[action] notifier.info(context, router_event, {'router_interface': router_interface_info}) def add_router_interface(self, context, router_id, interface_info): router_interface_info = super( L3_NAT_db_mixin, self).add_router_interface( context, router_id, interface_info) self.notify_router_interface_action( context, router_interface_info, 'add') return router_interface_info def remove_router_interface(self, context, router_id, interface_info): router_interface_info = super( L3_NAT_db_mixin, self).remove_router_interface( context, router_id, interface_info) self.notify_router_interface_action( context, router_interface_info, 'remove') return router_interface_info def create_floatingip(self, context, floatingip, initial_status=l3_constants.FLOATINGIP_STATUS_ACTIVE): floatingip_dict = super(L3_NAT_db_mixin, self).create_floatingip( context, floatingip, initial_status) router_id = floatingip_dict['router_id'] self.notify_router_updated(context, router_id, 'create_floatingip') return floatingip_dict def update_floatingip(self, context, id, floatingip): old_floatingip, floatingip = self._update_floatingip( context, id, floatingip) router_ids = self._floatingips_to_router_ids( [old_floatingip, floatingip]) super(L3_NAT_db_mixin, self).notify_routers_updated( context, router_ids, 'update_floatingip', {}) return floatingip def delete_floatingip(self, context, id): floating_ip = self._delete_floatingip(context, id) self.notify_router_updated(context, floating_ip['router_id'], 'delete_floatingip') def disassociate_floatingips(self, context, port_id, do_notify=True): """Disassociate all floating IPs linked to specific port. @param port_id: ID of the port to disassociate floating IPs. @param do_notify: whether we should notify routers right away. @return: set of router-ids that require notification updates if do_notify is False, otherwise None. """ router_ids = super(L3_NAT_db_mixin, self).disassociate_floatingips( context, port_id) if do_notify: self.notify_routers_updated(context, router_ids) # since caller assumes that we handled notifications on its # behalf, return nothing return return router_ids def notify_routers_updated(self, context, router_ids): super(L3_NAT_db_mixin, self).notify_routers_updated( context, list(router_ids), 'disassociate_floatingips', {}) def _prevent_l3_port_delete_callback(resource, event, trigger, **kwargs): context = kwargs['context'] port_id = kwargs['port_id'] port_check = kwargs['port_check'] l3plugin = manager.NeutronManager.get_service_plugins().get( constants.L3_ROUTER_NAT) if l3plugin and port_check: l3plugin.prevent_l3_port_deletion(context, port_id) def _notify_routers_callback(resource, event, trigger, **kwargs): context = kwargs['context'] router_ids = kwargs['router_ids'] l3plugin = manager.NeutronManager.get_service_plugins().get( constants.L3_ROUTER_NAT) l3plugin.notify_routers_updated(context, router_ids) def _notify_subnet_gateway_ip_update(resource, event, trigger, **kwargs): l3plugin = manager.NeutronManager.get_service_plugins().get( constants.L3_ROUTER_NAT) if not l3plugin: return context = kwargs['context'] network_id = kwargs['network_id'] subnet_id = kwargs['subnet_id'] query = context.session.query(models_v2.Port).filter_by( network_id=network_id, device_owner=l3_constants.DEVICE_OWNER_ROUTER_GW) query = query.join(models_v2.Port.fixed_ips).filter( models_v2.IPAllocation.subnet_id == subnet_id) router_ids = set(port['device_id'] for port in query) for router_id in router_ids: l3plugin.notify_router_updated(context, router_id) def _notify_subnetpool_address_scope_update(resource, event, trigger, **kwargs): context = kwargs['context'] subnetpool_id = kwargs['subnetpool_id'] query = context.session.query(RouterPort.router_id) query = query.join(models_v2.Port) query = query.join( models_v2.Subnet, models_v2.Subnet.network_id == models_v2.Port.network_id) query = query.filter( models_v2.Subnet.subnetpool_id == subnetpool_id, RouterPort.port_type.in_(l3_constants.ROUTER_PORT_OWNERS)) query = query.distinct() router_ids = [r[0] for r in query] l3plugin = manager.NeutronManager.get_service_plugins().get( constants.L3_ROUTER_NAT) l3plugin.notify_routers_updated(context, router_ids) def subscribe(): registry.subscribe( _prevent_l3_port_delete_callback, resources.PORT, events.BEFORE_DELETE) registry.subscribe( _notify_routers_callback, resources.PORT, events.AFTER_DELETE) registry.subscribe( _notify_subnet_gateway_ip_update, resources.SUBNET_GATEWAY, events.AFTER_UPDATE) registry.subscribe( _notify_subnetpool_address_scope_update, resources.SUBNETPOOL_ADDRESS_SCOPE, events.AFTER_UPDATE) # NOTE(armax): multiple l3 service plugins (potentially out of tree) inherit # from l3_db and may need the callbacks to be processed. Having an implicit # subscription (through the module import) preserves the existing behavior, # and at the same time it avoids fixing it manually in each and every l3 plugin # out there. That said, The subscription is also made explicit in the # reference l3 plugin. The subscription operation is idempotent so there is no # harm in registering the same callback multiple times. subscribe() neutron-8.4.0/neutron/db/agents_db.py0000664000567000056710000005754513044372760020763 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from eventlet import greenthread from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging import oslo_messaging from oslo_serialization import jsonutils from oslo_utils import importutils from oslo_utils import timeutils import six import sqlalchemy as sa from sqlalchemy.orm import exc from sqlalchemy import sql from neutron._i18n import _, _LE, _LI, _LW from neutron.api.rpc.callbacks import version_manager from neutron.api.v2 import attributes from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants from neutron import context from neutron.db import api as db_api from neutron.db import model_base from neutron.extensions import agent as ext_agent from neutron.extensions import availability_zone as az_ext from neutron import manager LOG = logging.getLogger(__name__) AGENT_OPTS = [ cfg.IntOpt('agent_down_time', default=75, help=_("Seconds to regard the agent is down; should be at " "least twice report_interval, to be sure the " "agent is down for good.")), cfg.StrOpt('dhcp_load_type', default='networks', choices=['networks', 'subnets', 'ports'], help=_('Representing the resource type whose load is being ' 'reported by the agent. This can be "networks", ' '"subnets" or "ports". ' 'When specified (Default is networks), the server will ' 'extract particular load sent as part of its agent ' 'configuration object from the agent report state, ' 'which is the number of resources being consumed, at ' 'every report_interval.' 'dhcp_load_type can be used in combination with ' 'network_scheduler_driver = ' 'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler ' 'When the network_scheduler_driver is WeightScheduler, ' 'dhcp_load_type can be configured to represent the ' 'choice for the resource being balanced. ' 'Example: dhcp_load_type=networks')), cfg.BoolOpt('enable_new_agents', default=True, help=_("Agent starts with admin_state_up=False when " "enable_new_agents=False. In the case, user's " "resources will not be scheduled automatically to the " "agent until admin changes admin_state_up to True.")), ] cfg.CONF.register_opts(AGENT_OPTS) # this is the ratio from agent_down_time to the time we use to consider # the agents down for considering their resource versions in the # version_manager callback DOWNTIME_VERSIONS_RATIO = 2 class Agent(model_base.BASEV2, model_base.HasId): """Represents agents running in neutron deployments.""" __table_args__ = ( sa.UniqueConstraint('agent_type', 'host', name='uniq_agents0agent_type0host'), model_base.BASEV2.__table_args__ ) # L3 agent, DHCP agent, OVS agent, LinuxBridge agent_type = sa.Column(sa.String(255), nullable=False) binary = sa.Column(sa.String(255), nullable=False) # TOPIC is a fanout exchange topic topic = sa.Column(sa.String(255), nullable=False) # TOPIC.host is a target topic host = sa.Column(sa.String(255), nullable=False) availability_zone = sa.Column(sa.String(255)) admin_state_up = sa.Column(sa.Boolean, default=True, server_default=sql.true(), nullable=False) # the time when first report came from agents created_at = sa.Column(sa.DateTime, nullable=False) # the time when first report came after agents start started_at = sa.Column(sa.DateTime, nullable=False) # updated when agents report heartbeat_timestamp = sa.Column(sa.DateTime, nullable=False) # description is note for admin user description = sa.Column(sa.String(attributes.DESCRIPTION_MAX_LEN)) # configurations: a json dict string, I think 4095 is enough configurations = sa.Column(sa.String(4095), nullable=False) # resource_versions: json dict, 8191 allows for ~256 resource versions # assuming ~32byte length "'name': 'ver'," # the whole row limit is 65535 bytes in mysql resource_versions = sa.Column(sa.String(8191)) # load - number of resources hosted by the agent load = sa.Column(sa.Integer, server_default='0', nullable=False) @property def is_active(self): return not AgentDbMixin.is_agent_down(self.heartbeat_timestamp) class AgentAvailabilityZoneMixin(az_ext.AvailabilityZonePluginBase): """Mixin class to add availability_zone extension to AgentDbMixin.""" def _list_availability_zones(self, context, filters=None): result = {} query = self._get_collection_query(context, Agent, filters=filters) for agent in query.group_by(Agent.admin_state_up, Agent.availability_zone, Agent.agent_type): if not agent.availability_zone: continue if agent.agent_type == constants.AGENT_TYPE_DHCP: resource = 'network' elif agent.agent_type == constants.AGENT_TYPE_L3: resource = 'router' else: continue key = (agent.availability_zone, resource) result[key] = agent.admin_state_up or result.get(key, False) return result def get_availability_zones(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Return a list of availability zones.""" # NOTE(hichihara): 'tenant_id' is dummy for policy check. # it is not visible via API. return [{'state': 'available' if v else 'unavailable', 'name': k[0], 'resource': k[1], 'tenant_id': context.tenant_id} for k, v in six.iteritems(self._list_availability_zones( context, filters))] def validate_availability_zones(self, context, resource_type, availability_zones): """Verify that the availability zones exist.""" if not availability_zones: return if resource_type == 'network': agent_type = constants.AGENT_TYPE_DHCP elif resource_type == 'router': agent_type = constants.AGENT_TYPE_L3 else: return query = context.session.query(Agent.availability_zone).filter_by( agent_type=agent_type).group_by(Agent.availability_zone) query = query.filter(Agent.availability_zone.in_(availability_zones)) azs = [item[0] for item in query] diff = set(availability_zones) - set(azs) if diff: raise az_ext.AvailabilityZoneNotFound(availability_zone=diff.pop()) class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin): """Mixin class to add agent extension to db_base_plugin_v2.""" def _get_agent(self, context, id): try: agent = self._get_by_id(context, Agent, id) except exc.NoResultFound: raise ext_agent.AgentNotFound(id=id) return agent def get_enabled_agent_on_host(self, context, agent_type, host): """Return agent of agent_type for the specified host.""" query = context.session.query(Agent) query = query.filter(Agent.agent_type == agent_type, Agent.host == host, Agent.admin_state_up == sql.true()) try: agent = query.one() except exc.NoResultFound: LOG.debug('No enabled %(agent_type)s agent on host ' '%(host)s', {'agent_type': agent_type, 'host': host}) return if self.is_agent_down(agent.heartbeat_timestamp): LOG.warning(_LW('%(agent_type)s agent %(agent_id)s is not active'), {'agent_type': agent_type, 'agent_id': agent.id}) return agent @staticmethod def is_agent_down(heart_beat_time): return timeutils.is_older_than(heart_beat_time, cfg.CONF.agent_down_time) @staticmethod def is_agent_considered_for_versions(agent_dict): return not timeutils.is_older_than(agent_dict['heartbeat_timestamp'], cfg.CONF.agent_down_time * DOWNTIME_VERSIONS_RATIO) def get_configuration_dict(self, agent_db): return self._get_dict(agent_db, 'configurations') def _get_dict(self, agent_db, dict_name, ignore_missing=False): json_value = None try: json_value = getattr(agent_db, dict_name) conf = jsonutils.loads(json_value) except Exception: if json_value or not ignore_missing: msg = _LW('Dictionary %(dict_name)s for agent %(agent_type)s ' 'on host %(host)s is invalid.') LOG.warning(msg, {'dict_name': dict_name, 'agent_type': agent_db.agent_type, 'host': agent_db.host}) conf = {} return conf def _get_agent_load(self, agent): configs = agent.get('configurations', {}) load_type = None load = 0 if(agent['agent_type'] == constants.AGENT_TYPE_DHCP): load_type = cfg.CONF.dhcp_load_type if load_type: load = int(configs.get(load_type, 0)) return load def _make_agent_dict(self, agent, fields=None): attr = ext_agent.RESOURCE_ATTRIBUTE_MAP.get( ext_agent.RESOURCE_NAME + 's') res = dict((k, agent[k]) for k in attr if k not in ['alive', 'configurations']) res['alive'] = not self.is_agent_down(res['heartbeat_timestamp']) res['configurations'] = self._get_dict(agent, 'configurations') res['resource_versions'] = self._get_dict(agent, 'resource_versions', ignore_missing=True) res['availability_zone'] = agent['availability_zone'] return self._fields(res, fields) def delete_agent(self, context, id): agent = self._get_agent(context, id) registry.notify(resources.AGENT, events.BEFORE_DELETE, self, context=context, agent=agent) with context.session.begin(subtransactions=True): context.session.delete(agent) def update_agent(self, context, id, agent): agent_data = agent['agent'] with context.session.begin(subtransactions=True): agent = self._get_agent(context, id) agent.update(agent_data) return self._make_agent_dict(agent) def get_agents_db(self, context, filters=None): query = self._get_collection_query(context, Agent, filters=filters) return query.all() def get_agents(self, context, filters=None, fields=None): agents = self._get_collection(context, Agent, self._make_agent_dict, filters=filters, fields=fields) alive = filters and filters.get('alive', None) if alive: alive = attributes.convert_to_boolean(alive[0]) agents = [agent for agent in agents if agent['alive'] == alive] return agents def agent_health_check(self): """Scan agents and log if some are considered dead.""" agents = self.get_agents(context.get_admin_context(), filters={'admin_state_up': [True]}) dead_agents = [agent for agent in agents if not agent['alive']] if dead_agents: data = '%20s %20s %s\n' % ('Type', 'Last heartbeat', "host") data += '\n'.join(['%20s %20s %s' % (agent['agent_type'], agent['heartbeat_timestamp'], agent['host']) for agent in dead_agents]) LOG.warning(_LW("Agent healthcheck: found %(count)s dead agents " "out of %(total)s:\n%(data)s"), {'count': len(dead_agents), 'total': len(agents), 'data': data}) else: LOG.debug("Agent healthcheck: found %s active agents", len(agents)) def _get_agent_by_type_and_host(self, context, agent_type, host): query = self._model_query(context, Agent) try: agent_db = query.filter(Agent.agent_type == agent_type, Agent.host == host).one() return agent_db except exc.NoResultFound: raise ext_agent.AgentNotFoundByTypeHost(agent_type=agent_type, host=host) except exc.MultipleResultsFound: raise ext_agent.MultipleAgentFoundByTypeHost(agent_type=agent_type, host=host) def get_agent(self, context, id, fields=None): agent = self._get_agent(context, id) return self._make_agent_dict(agent, fields) def _log_heartbeat(self, state, agent_db, agent_conf): if agent_conf.get('log_agent_heartbeats'): delta = timeutils.utcnow() - agent_db.heartbeat_timestamp LOG.info(_LI("Heartbeat received from %(type)s agent on " "host %(host)s, uuid %(uuid)s after %(delta)s"), {'type': agent_db.agent_type, 'host': agent_db.host, 'uuid': state.get('uuid'), 'delta': delta}) def _create_or_update_agent(self, context, agent_state): """Registers new agent in the database or updates existing. Returns agent status from server point of view: alive, new or revived. It could be used by agent to do some sync with the server if needed. """ status = constants.AGENT_ALIVE with context.session.begin(subtransactions=True): res_keys = ['agent_type', 'binary', 'host', 'topic'] res = dict((k, agent_state[k]) for k in res_keys) if 'availability_zone' in agent_state: res['availability_zone'] = agent_state['availability_zone'] configurations_dict = agent_state.get('configurations', {}) res['configurations'] = jsonutils.dumps(configurations_dict) resource_versions_dict = agent_state.get('resource_versions') if resource_versions_dict: res['resource_versions'] = jsonutils.dumps( resource_versions_dict) res['load'] = self._get_agent_load(agent_state) current_time = timeutils.utcnow() try: agent_db = self._get_agent_by_type_and_host( context, agent_state['agent_type'], agent_state['host']) if not agent_db.is_active: status = constants.AGENT_REVIVED if 'resource_versions' not in agent_state: # updating agent_state with resource_versions taken # from db so that # _update_local_agent_resource_versions() will call # version_manager and bring it up to date agent_state['resource_versions'] = self._get_dict( agent_db, 'resource_versions', ignore_missing=True) res['heartbeat_timestamp'] = current_time if agent_state.get('start_flag'): res['started_at'] = current_time greenthread.sleep(0) self._log_heartbeat(agent_state, agent_db, configurations_dict) agent_db.update(res) except ext_agent.AgentNotFoundByTypeHost: greenthread.sleep(0) res['created_at'] = current_time res['started_at'] = current_time res['heartbeat_timestamp'] = current_time res['admin_state_up'] = cfg.CONF.enable_new_agents agent_db = Agent(**res) greenthread.sleep(0) context.session.add(agent_db) self._log_heartbeat(agent_state, agent_db, configurations_dict) status = constants.AGENT_NEW greenthread.sleep(0) return status def create_or_update_agent(self, context, agent): """Create or update agent according to report.""" try: return self._create_or_update_agent(context, agent) except db_exc.DBDuplicateEntry: # It might happen that two or more concurrent transactions # are trying to insert new rows having the same value of # (agent_type, host) pair at the same time (if there has # been no such entry in the table and multiple agent status # updates are being processed at the moment). In this case # having a unique constraint on (agent_type, host) columns # guarantees that only one transaction will succeed and # insert a new agent entry, others will fail and be rolled # back. That means we must retry them one more time: no # INSERTs will be issued, because # _get_agent_by_type_and_host() will return the existing # agent entry, which will be updated multiple times return self._create_or_update_agent(context, agent) def _get_agents_considered_for_versions(self): up_agents = self.get_agents(context.get_admin_context(), filters={'admin_state_up': [True]}) return filter(self.is_agent_considered_for_versions, up_agents) def get_agents_resource_versions(self, tracker): """Get the known agent resource versions and update the tracker. This function looks up into the database and updates every agent resource versions. This method is called from version_manager when the cached information has passed TTL. :param tracker: receives a version_manager.ResourceConsumerTracker """ for agent in self._get_agents_considered_for_versions(): resource_versions = agent.get('resource_versions', {}) consumer = version_manager.AgentConsumer( agent_type=agent['agent_type'], host=agent['host']) LOG.debug("Update consumer %(consumer)s versions to: " "%(versions)s", {'consumer': consumer, 'versions': resource_versions}) tracker.set_versions(consumer, resource_versions) class AgentExtRpcCallback(object): """Processes the rpc report in plugin implementations. This class implements the server side of an rpc interface. The client side can be found in neutron.agent.rpc.PluginReportStateAPI. For more information on changing rpc interfaces, see doc/source/devref/rpc_api.rst. API version history: 1.0 - Initial version. 1.1 - report_state now returns agent state. """ target = oslo_messaging.Target(version='1.1', namespace=constants.RPC_NAMESPACE_STATE) START_TIME = timeutils.utcnow() def __init__(self, plugin=None): super(AgentExtRpcCallback, self).__init__() self.plugin = plugin #TODO(ajo): fix the resources circular dependency issue by dynamically # registering object types in the RPC callbacks api resources_rpc = importutils.import_module( 'neutron.api.rpc.handlers.resources_rpc') # Initialize RPC api directed to other neutron-servers self.server_versions_rpc = resources_rpc.ResourcesPushToServersRpcApi() @db_api.retry_db_errors def report_state(self, context, **kwargs): """Report state from agent to server. Returns - agent's status: AGENT_NEW, AGENT_REVIVED, AGENT_ALIVE """ time = kwargs['time'] time = timeutils.parse_strtime(time) agent_state = kwargs['agent_state']['agent_state'] self._check_clock_sync_on_agent_start(agent_state, time) if self.START_TIME > time: time_agent = datetime.datetime.isoformat(time) time_server = datetime.datetime.isoformat(self.START_TIME) log_dict = {'agent_time': time_agent, 'server_time': time_server} LOG.debug("Stale message received with timestamp: %(agent_time)s. " "Skipping processing because it's older than the " "server start timestamp: %(server_time)s", log_dict) return if not self.plugin: self.plugin = manager.NeutronManager.get_plugin() agent_status = self.plugin.create_or_update_agent(context, agent_state) self._update_local_agent_resource_versions(context, agent_state) return agent_status def _update_local_agent_resource_versions(self, context, agent_state): resource_versions_dict = agent_state.get('resource_versions') if not resource_versions_dict: return version_manager.update_versions( version_manager.AgentConsumer(agent_type=agent_state['agent_type'], host=agent_state['host']), resource_versions_dict) # report other neutron-servers about this quickly self.server_versions_rpc.report_agent_resource_versions( context, agent_state['agent_type'], agent_state['host'], resource_versions_dict) def _check_clock_sync_on_agent_start(self, agent_state, agent_time): """Checks if the server and the agent times are in sync. Method checks if the agent time is in sync with the server time on start up. Ignores it, on subsequent re-connects. """ if agent_state.get('start_flag'): time_server_now = timeutils.utcnow() diff = abs(timeutils.delta_seconds(time_server_now, agent_time)) if diff > cfg.CONF.agent_down_time: agent_name = agent_state['agent_type'] time_agent = datetime.datetime.isoformat(agent_time) host = agent_state['host'] log_dict = {'host': host, 'agent_name': agent_name, 'agent_time': time_agent, 'threshold': cfg.CONF.agent_down_time, 'serv_time': (datetime.datetime.isoformat (time_server_now)), 'diff': diff} LOG.error(_LE("Message received from the host: %(host)s " "during the registration of %(agent_name)s has " "a timestamp: %(agent_time)s. This differs from " "the current server timestamp: %(serv_time)s by " "%(diff)s seconds, which is more than the " "threshold agent down" "time: %(threshold)s."), log_dict) neutron-8.4.0/neutron/db/network_ip_availability_db.py0000664000567000056710000001605413044372736024406 0ustar jenkinsjenkins00000000000000# Copyright 2016 GoDaddy. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import netaddr import six from sqlalchemy import func import neutron.db.models_v2 as mod NETWORK_ID = 'network_id' NETWORK_NAME = 'network_name' SUBNET_ID = 'subnet_id' SUBNET_NAME = 'subnet_name' SUPPORTED_FILTERS = { NETWORK_ID: mod.Network.id, NETWORK_NAME: mod.Network.name, 'tenant_id': mod.Network.tenant_id, 'ip_version': mod.Subnet.ip_version, } SUPPORTED_FILTER_KEYS = six.viewkeys(SUPPORTED_FILTERS) class IpAvailabilityMixin(object): """Mixin class to query for IP availability.""" # Columns common to all queries common_columns = [ mod.Network.id.label(NETWORK_ID), mod.Subnet.id.label(SUBNET_ID), mod.Subnet.cidr, mod.Subnet.ip_version ] # Columns for the network/subnet and used_ip counts network_used_ips_columns = list(common_columns) network_used_ips_columns.append(mod.Network.name.label(NETWORK_NAME)) network_used_ips_columns.append(mod.Network.tenant_id) network_used_ips_columns.append(mod.Subnet.name.label(SUBNET_NAME)) # Aggregate query computed column network_used_ips_computed_columns = [ func.count(mod.IPAllocation.subnet_id).label('used_ips')] # Columns for total_ips query total_ips_columns = list(common_columns) total_ips_columns.append(mod.IPAllocationPool.first_ip) total_ips_columns.append(mod.IPAllocationPool.last_ip) @classmethod def get_network_ip_availabilities(cls, context, filters=None): """Get IP availability stats on a per subnet basis. Returns a list of network summaries which internally contains a list of subnet summaries. The used_ip and total_ip counts are returned at both levels. """ # Fetch total_ips by subnet subnet_total_ips_dict = cls._generate_subnet_total_ips_dict(context, filters) # Query network/subnet data along with used IP counts record_and_count_query = cls._build_network_used_ip_query(context, filters) # Assemble results result_dict = {} for row in record_and_count_query: cls._add_result(row, result_dict, subnet_total_ips_dict.get(row.subnet_id, 0)) # Convert result back into the list it expects net_ip_availabilities = list(six.viewvalues(result_dict)) return net_ip_availabilities @classmethod def _build_network_used_ip_query(cls, context, filters): # Generate a query to gather network/subnet/used_ips. # Ensure query is tolerant of missing child table data (outerjoins) # Process these outerjoin columns assuming their values may be None query = context.session.query() query = query.add_columns(*cls.network_used_ips_columns) query = query.add_columns(*cls.network_used_ips_computed_columns) query = query.outerjoin(mod.Subnet, mod.Network.id == mod.Subnet.network_id) query = query.outerjoin(mod.IPAllocation, mod.Subnet.id == mod.IPAllocation.subnet_id) query = query.group_by(*cls.network_used_ips_columns) return cls._adjust_query_for_filters(query, filters) @classmethod def _build_total_ips_query(cls, context, filters): query = context.session.query() query = query.add_columns(*cls.total_ips_columns) query = query.outerjoin(mod.Subnet, mod.Network.id == mod.Subnet.network_id) query = query.outerjoin( mod.IPAllocationPool, mod.Subnet.id == mod.IPAllocationPool.subnet_id) return cls._adjust_query_for_filters(query, filters) @classmethod def _generate_subnet_total_ips_dict(cls, context, filters): """Generates a dict whose key=subnet_id, value=total_ips in subnet""" # Query to get total_ips counts total_ips_query = cls._build_total_ips_query(context, filters) subnet_totals_dict = {} for row in total_ips_query: # Skip networks without subnets if not row.subnet_id: continue # Add IPAllocationPool data if row.last_ip: pool_total = netaddr.IPRange( netaddr.IPAddress(row.first_ip), netaddr.IPAddress(row.last_ip)).size cur_total = subnet_totals_dict.get(row.subnet_id, 0) subnet_totals_dict[row.subnet_id] = cur_total + pool_total else: subnet_totals_dict[row.subnet_id] = netaddr.IPNetwork( row.cidr, version=row.ip_version).size return subnet_totals_dict @classmethod def _adjust_query_for_filters(cls, query, filters): # The intersect of sets gets us applicable filter keys (others ignored) common_keys = six.viewkeys(filters) & SUPPORTED_FILTER_KEYS for key in common_keys: filter_vals = filters[key] if filter_vals: query = query.filter(SUPPORTED_FILTERS[key].in_(filter_vals)) return query @classmethod def _add_result(cls, db_row, result_dict, subnet_total_ips): # Find network in results. Create and add if missing if db_row.network_id in result_dict: network = result_dict[db_row.network_id] else: network = {NETWORK_ID: db_row.network_id, NETWORK_NAME: db_row.network_name, 'tenant_id': db_row.tenant_id, 'subnet_ip_availability': [], 'used_ips': 0, 'total_ips': 0} result_dict[db_row.network_id] = network # Only add subnet data if outerjoin rows have it if db_row.subnet_id: cls._add_subnet_data_to_net(db_row, network, subnet_total_ips) @classmethod def _add_subnet_data_to_net(cls, db_row, network_dict, subnet_total_ips): subnet = { SUBNET_ID: db_row.subnet_id, 'ip_version': db_row.ip_version, 'cidr': db_row.cidr, SUBNET_NAME: db_row.subnet_name, 'used_ips': db_row.used_ips if db_row.used_ips else 0, 'total_ips': subnet_total_ips } # Attach subnet result and rollup subnet sums into the parent network_dict['subnet_ip_availability'].append(subnet) network_dict['total_ips'] += subnet['total_ips'] network_dict['used_ips'] += subnet['used_ips'] neutron-8.4.0/neutron/db/dns_db.py0000664000567000056710000003464513044372760020262 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 IBM # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging import sqlalchemy as sa from sqlalchemy import orm from neutron._i18n import _, _LE from neutron.api.v2 import attributes from neutron.common import exceptions as n_exc from neutron.common import utils from neutron.db import db_base_plugin_v2 from neutron.db import l3_db from neutron.db import model_base from neutron.db import models_v2 from neutron.extensions import dns from neutron.extensions import l3 from neutron.services.externaldns import driver LOG = logging.getLogger(__name__) class NetworkDNSDomain(model_base.BASEV2): network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), primary_key=True, index=True) dns_domain = sa.Column(sa.String(255), nullable=False) # Add a relationship to the Network model in order to instruct # SQLAlchemy to eagerly load this association network = orm.relationship(models_v2.Network, backref=orm.backref("dns_domain", lazy='joined', uselist=False, cascade='delete')) class FloatingIPDNS(model_base.BASEV2): __tablename__ = 'floatingipdnses' floatingip_id = sa.Column(sa.String(36), sa.ForeignKey('floatingips.id', ondelete="CASCADE"), primary_key=True, index=True) dns_name = sa.Column(sa.String(255), nullable=False) dns_domain = sa.Column(sa.String(255), nullable=False) published_dns_name = sa.Column(sa.String(255), nullable=False) published_dns_domain = sa.Column(sa.String(255), nullable=False) # Add a relationship to the FloatingIP model in order to instruct # SQLAlchemy to eagerly load this association floatingip = orm.relationship(l3_db.FloatingIP, backref=orm.backref("dns", lazy='joined', uselist=False, cascade='delete')) class PortDNS(model_base.BASEV2): __tablename__ = 'portdnses' port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True, index=True) current_dns_name = sa.Column(sa.String(255), nullable=False) current_dns_domain = sa.Column(sa.String(255), nullable=False) previous_dns_name = sa.Column(sa.String(255), nullable=False) previous_dns_domain = sa.Column(sa.String(255), nullable=False) # Add a relationship to the Port model in order to instruct # SQLAlchemy to eagerly load this association port = orm.relationship(models_v2.Port, backref=orm.backref("dns", lazy='joined', uselist=False, cascade='delete')) class DNSActionsData(object): def __init__(self, current_dns_name=None, current_dns_domain=None, previous_dns_name=None, previous_dns_domain=None): self.current_dns_name = current_dns_name self.current_dns_domain = current_dns_domain self.previous_dns_name = previous_dns_name self.previous_dns_domain = previous_dns_domain class DNSDbMixin(object): """Mixin class to add DNS methods to db_base_plugin_v2.""" _dns_driver = None @property def dns_driver(self): if self._dns_driver: return self._dns_driver if not cfg.CONF.external_dns_driver: return try: self._dns_driver = driver.ExternalDNSService.get_instance() LOG.debug("External DNS driver loaded: %s", cfg.CONF.external_dns_driver) return self._dns_driver except ImportError: LOG.exception(_LE("ImportError exception occurred while loading " "the external DNS service driver")) raise dns.ExternalDNSDriverNotFound( driver=cfg.CONF.external_dns_driver) def _extend_floatingip_dict_dns(self, floatingip_res, floatingip_db): floatingip_res['dns_domain'] = '' floatingip_res['dns_name'] = '' if floatingip_db.dns: floatingip_res['dns_domain'] = floatingip_db.dns['dns_domain'] floatingip_res['dns_name'] = floatingip_db.dns['dns_name'] return floatingip_res # Register dict extend functions for floating ips db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( l3.FLOATINGIPS, ['_extend_floatingip_dict_dns']) def _process_dns_floatingip_create_precommit(self, context, floatingip_data, req_data): # expects to be called within a plugin's session dns_domain = req_data.get(dns.DNSDOMAIN) if not attributes.is_attr_set(dns_domain): return if not self.dns_driver: return dns_name = req_data[dns.DNSNAME] self._validate_floatingip_dns(dns_name, dns_domain) current_dns_name, current_dns_domain = ( self._get_requested_state_for_external_dns_service_create( context, floatingip_data, req_data)) dns_actions_data = None if current_dns_name and current_dns_domain: context.session.add(FloatingIPDNS( floatingip_id=floatingip_data['id'], dns_name=req_data[dns.DNSNAME], dns_domain=req_data[dns.DNSDOMAIN], published_dns_name=current_dns_name, published_dns_domain=current_dns_domain)) dns_actions_data = DNSActionsData( current_dns_name=current_dns_name, current_dns_domain=current_dns_domain) floatingip_data['dns_name'] = dns_name floatingip_data['dns_domain'] = dns_domain return dns_actions_data def _process_dns_floatingip_create_postcommit(self, context, floatingip_data, dns_actions_data): if not dns_actions_data: return self._add_ips_to_external_dns_service( context, dns_actions_data.current_dns_domain, dns_actions_data.current_dns_name, [floatingip_data['floating_ip_address']]) def _process_dns_floatingip_update_precommit(self, context, floatingip_data): # expects to be called within a plugin's session if not utils.is_extension_supported(self._core_plugin, dns.Dns.get_alias()): return if not self.dns_driver: return dns_data_db = context.session.query(FloatingIPDNS).filter_by( floatingip_id=floatingip_data['id']).one_or_none() if dns_data_db and dns_data_db['dns_name']: # dns_name and dns_domain assigned for floating ip. It doesn't # matter whether they are defined for internal port return current_dns_name, current_dns_domain = ( self._get_requested_state_for_external_dns_service_update( context, floatingip_data)) if dns_data_db: if (dns_data_db['published_dns_name'] != current_dns_name or dns_data_db['published_dns_domain'] != current_dns_domain): dns_actions_data = DNSActionsData( previous_dns_name=dns_data_db['published_dns_name'], previous_dns_domain=dns_data_db['published_dns_domain']) if current_dns_name and current_dns_domain: dns_data_db['published_dns_name'] = current_dns_name dns_data_db['published_dns_domain'] = current_dns_domain dns_actions_data.current_dns_name = current_dns_name dns_actions_data.current_dns_domain = current_dns_domain else: context.session.delete(dns_data_db) return dns_actions_data else: return if current_dns_name and current_dns_domain: context.session.add(FloatingIPDNS( floatingip_id=floatingip_data['id'], dns_name='', dns_domain='', published_dns_name=current_dns_name, published_dns_domain=current_dns_domain)) return DNSActionsData(current_dns_name=current_dns_name, current_dns_domain=current_dns_domain) def _process_dns_floatingip_update_postcommit(self, context, floatingip_data, dns_actions_data): if not dns_actions_data: return if dns_actions_data.previous_dns_name: self._delete_floatingip_from_external_dns_service( context, dns_actions_data.previous_dns_domain, dns_actions_data.previous_dns_name, [floatingip_data['floating_ip_address']]) if dns_actions_data.current_dns_name: self._add_ips_to_external_dns_service( context, dns_actions_data.current_dns_domain, dns_actions_data.current_dns_name, [floatingip_data['floating_ip_address']]) def _process_dns_floatingip_delete(self, context, floatingip_data): if not utils.is_extension_supported(self._core_plugin, dns.Dns.get_alias()): return dns_data_db = context.session.query(FloatingIPDNS).filter_by( floatingip_id=floatingip_data['id']).one_or_none() if dns_data_db: self._delete_floatingip_from_external_dns_service( context, dns_data_db['published_dns_domain'], dns_data_db['published_dns_name'], [floatingip_data['floating_ip_address']]) def _validate_floatingip_dns(self, dns_name, dns_domain): if dns_domain and not dns_name: msg = _("dns_domain cannot be specified without a dns_name") raise n_exc.BadRequest(resource='floatingip', msg=msg) if dns_name and not dns_domain: msg = _("dns_name cannot be specified without a dns_domain") raise n_exc.BadRequest(resource='floatingip', msg=msg) def _get_internal_port_dns_data(self, context, floatingip_data): internal_port = context.session.query(models_v2.Port).filter_by( id=floatingip_data['port_id']).one() dns_domain = None if internal_port['dns_name']: net_dns = context.session.query(NetworkDNSDomain).filter_by( network_id=internal_port['network_id']).one_or_none() if net_dns: dns_domain = net_dns['dns_domain'] return internal_port['dns_name'], dns_domain def _delete_floatingip_from_external_dns_service(self, context, dns_domain, dns_name, records): try: self.dns_driver.delete_record_set(context, dns_domain, dns_name, records) except (dns.DNSDomainNotFound, dns.DuplicateRecordSet) as e: LOG.exception(_LE("Error deleting Floating IP data from external " "DNS service. Name: '%(name)s'. Domain: " "'%(domain)s'. IP addresses '%(ips)s'. DNS " "service driver message '%(message)s'") % {"name": dns_name, "domain": dns_domain, "message": e.msg, "ips": ', '.join(records)}) def _get_requested_state_for_external_dns_service_create(self, context, floatingip_data, req_data): fip_dns_name = req_data[dns.DNSNAME] if fip_dns_name: return fip_dns_name, req_data[dns.DNSDOMAIN] if floatingip_data['port_id']: return self._get_internal_port_dns_data(context, floatingip_data) return None, None def _get_requested_state_for_external_dns_service_update(self, context, floatingip_data): if floatingip_data['port_id']: return self._get_internal_port_dns_data(context, floatingip_data) return None, None def _add_ips_to_external_dns_service(self, context, dns_domain, dns_name, records): try: self.dns_driver.create_record_set(context, dns_domain, dns_name, records) except (dns.DNSDomainNotFound, dns.DuplicateRecordSet) as e: LOG.exception(_LE("Error publishing floating IP data in external " "DNS service. Name: '%(name)s'. Domain: " "'%(domain)s'. DNS service driver message " "'%(message)s'") % {"name": dns_name, "domain": dns_domain, "message": e.msg}) neutron-8.4.0/neutron/db/common_db_mixin.py0000664000567000056710000003515413044372760022166 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import weakref from debtcollector import removals from oslo_log import log as logging from oslo_utils import excutils import six from sqlalchemy import and_ from sqlalchemy.ext import associationproxy from sqlalchemy import or_ from sqlalchemy import sql from neutron._i18n import _, _LE from neutron.common import exceptions as n_exc from neutron.db import sqlalchemyutils LOG = logging.getLogger(__name__) @contextlib.contextmanager def _noop_context_manager(): yield def safe_creation(context, create_fn, delete_fn, create_bindings, transaction=True): '''This function wraps logic of object creation in safe atomic way. In case of exception, object is deleted. More information when this method could be used can be found in developer guide - Effective Neutron: Database interaction section. http://docs.openstack.org/developer/neutron/devref/effective_neutron.html :param context: context :param create_fn: function without arguments that is called to create object and returns this object. :param delete_fn: function that is called to delete an object. It is called with object's id field as an argument. :param create_bindings: function that is called to create bindings for an object. It is called with object's id field as an argument. :param transaction: if true the whole operation will be wrapped in a transaction. if false, no transaction will be used. ''' cm = (context.session.begin(subtransactions=True) if transaction else _noop_context_manager()) with cm: obj = create_fn() try: value = create_bindings(obj['id']) except Exception: with excutils.save_and_reraise_exception(): try: delete_fn(obj['id']) except Exception as e: LOG.error(_LE("Cannot clean up created object %(obj)s. " "Exception: %(exc)s"), {'obj': obj['id'], 'exc': e}) return obj, value def model_query_scope(context, model): # Unless a context has 'admin' or 'advanced-service' rights the # query will be scoped to a single tenant_id return ((not context.is_admin and hasattr(model, 'tenant_id')) and (not context.is_advsvc and hasattr(model, 'tenant_id'))) def model_query(context, model): query = context.session.query(model) # define basic filter condition for model query query_filter = None if model_query_scope(context, model): query_filter = (model.tenant_id == context.tenant_id) if query_filter is not None: query = query.filter(query_filter) return query class CommonDbMixin(object): """Common methods used in core and service plugins.""" # Plugins, mixin classes implementing extension will register # hooks into the dict below for "augmenting" the "core way" of # building a query for retrieving objects from a model class. # To this aim, the register_model_query_hook and unregister_query_hook # from this class should be invoked _model_query_hooks = {} # This dictionary will store methods for extending attributes of # api resources. Mixins can use this dict for adding their own methods # TODO(salvatore-orlando): Avoid using class-level variables _dict_extend_functions = {} @classmethod def register_model_query_hook(cls, model, name, query_hook, filter_hook, result_filters=None): """Register a hook to be invoked when a query is executed. Add the hooks to the _model_query_hooks dict. Models are the keys of this dict, whereas the value is another dict mapping hook names to callables performing the hook. Each hook has a "query" component, used to build the query expression and a "filter" component, which is used to build the filter expression. Query hooks take as input the query being built and return a transformed query expression. Filter hooks take as input the filter expression being built and return a transformed filter expression """ cls._model_query_hooks.setdefault(model, {})[name] = { 'query': query_hook, 'filter': filter_hook, 'result_filters': result_filters} @classmethod def register_dict_extend_funcs(cls, resource, funcs): cls._dict_extend_functions.setdefault(resource, []).extend(funcs) @property def safe_reference(self): """Return a weakref to the instance. Minimize the potential for the instance persisting unnecessarily in memory by returning a weakref proxy that won't prevent deallocation. """ return weakref.proxy(self) def model_query_scope(self, context, model): return model_query_scope(context, model) def _model_query(self, context, model): query = context.session.query(model) # define basic filter condition for model query query_filter = None if self.model_query_scope(context, model): if hasattr(model, 'rbac_entries'): query = query.outerjoin(model.rbac_entries) rbac_model = model.rbac_entries.property.mapper.class_ query_filter = ( (model.tenant_id == context.tenant_id) | ((rbac_model.action == 'access_as_shared') & ((rbac_model.target_tenant == context.tenant_id) | (rbac_model.target_tenant == '*')))) elif hasattr(model, 'shared'): query_filter = ((model.tenant_id == context.tenant_id) | (model.shared == sql.true())) else: query_filter = (model.tenant_id == context.tenant_id) # Execute query hooks registered from mixins and plugins for _name, hooks in six.iteritems(self._model_query_hooks.get(model, {})): query_hook = hooks.get('query') if isinstance(query_hook, six.string_types): query_hook = getattr(self, query_hook, None) if query_hook: query = query_hook(context, model, query) filter_hook = hooks.get('filter') if isinstance(filter_hook, six.string_types): filter_hook = getattr(self, filter_hook, None) if filter_hook: query_filter = filter_hook(context, model, query_filter) # NOTE(salvatore-orlando): 'if query_filter' will try to evaluate the # condition, raising an exception if query_filter is not None: query = query.filter(query_filter) return query def _fields(self, resource, fields): if fields: return dict(((key, item) for key, item in resource.items() if key in fields)) return resource @removals.remove(message='This method will be removed in N') def _get_tenant_id_for_create(self, context, resource): if context.is_admin and 'tenant_id' in resource: tenant_id = resource['tenant_id'] elif ('tenant_id' in resource and resource['tenant_id'] != context.tenant_id): reason = _('Cannot create resource for another tenant') raise n_exc.AdminRequired(reason=reason) else: tenant_id = context.tenant_id return tenant_id def _get_by_id(self, context, model, id): query = self._model_query(context, model) return query.filter(model.id == id).one() def _apply_filters_to_query(self, query, model, filters, context=None): if filters: for key, value in six.iteritems(filters): column = getattr(model, key, None) # NOTE(kevinbenton): if column is a hybrid property that # references another expression, attempting to convert to # a boolean will fail so we must compare to None. # See "An Important Expression Language Gotcha" in: # docs.sqlalchemy.org/en/rel_0_9/changelog/migration_06.html if column is not None: if not value: query = query.filter(sql.false()) return query if isinstance(column, associationproxy.AssociationProxy): # association proxies don't support in_ so we have to # do multiple equals matches query = query.filter( or_(*[column == v for v in value])) else: query = query.filter(column.in_(value)) elif key == 'shared' and hasattr(model, 'rbac_entries'): # translate a filter on shared into a query against the # object's rbac entries rbac = model.rbac_entries.property.mapper.class_ matches = [rbac.target_tenant == '*'] if context: matches.append(rbac.target_tenant == context.tenant_id) # any 'access_as_shared' records that match the # wildcard or requesting tenant is_shared = and_(rbac.action == 'access_as_shared', or_(*matches)) if not value[0]: # NOTE(kevinbenton): we need to find objects that don't # have an entry that matches the criteria above so # we use a subquery to exclude them. # We can't just filter the inverse of the query above # because that will still give us a network shared to # our tenant (or wildcard) if it's shared to another # tenant. # This is the column joining the table to rbac via # the object_id. We can't just use model.id because # subnets join on network.id so we have to inspect the # relationship. join_cols = model.rbac_entries.property.local_columns oid_col = list(join_cols)[0] is_shared = ~oid_col.in_( query.session.query(rbac.object_id). filter(is_shared) ) elif (not context or not self.model_query_scope(context, model)): # we only want to join if we aren't using the subquery # and if we aren't already joined because this is a # scoped query query = query.outerjoin(model.rbac_entries) query = query.filter(is_shared) for _nam, hooks in six.iteritems(self._model_query_hooks.get(model, {})): result_filter = hooks.get('result_filters', None) if isinstance(result_filter, six.string_types): result_filter = getattr(self, result_filter, None) if result_filter: query = result_filter(query, filters) return query def _apply_dict_extend_functions(self, resource_type, response, db_object): for func in self._dict_extend_functions.get( resource_type, []): args = (response, db_object) if isinstance(func, six.string_types): func = getattr(self, func, None) else: # must call unbound method - use self as 1st argument args = (self,) + args if func: func(*args) def _get_collection_query(self, context, model, filters=None, sorts=None, limit=None, marker_obj=None, page_reverse=False): collection = self._model_query(context, model) collection = self._apply_filters_to_query(collection, model, filters, context) if limit and page_reverse and sorts: sorts = [(s[0], not s[1]) for s in sorts] collection = sqlalchemyutils.paginate_query(collection, model, limit, sorts, marker_obj=marker_obj) return collection def _get_collection(self, context, model, dict_func, filters=None, fields=None, sorts=None, limit=None, marker_obj=None, page_reverse=False): query = self._get_collection_query(context, model, filters=filters, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) items = [dict_func(c, fields) for c in query] if limit and page_reverse: items.reverse() return items def _get_collection_count(self, context, model, filters=None): return self._get_collection_query(context, model, filters).count() def _get_marker_obj(self, context, resource, limit, marker): if limit and marker: return getattr(self, '_get_%s' % resource)(context, marker) return None def _filter_non_model_columns(self, data, model): """Remove all the attributes from data which are not columns or association proxies of the model passed as second parameter """ columns = [c.name for c in model.__table__.columns] return dict((k, v) for (k, v) in six.iteritems(data) if k in columns or isinstance(getattr(model, k, None), associationproxy.AssociationProxy)) neutron-8.4.0/neutron/db/portbindings_db.py0000664000567000056710000001104513044372760022165 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from sqlalchemy import orm from neutron.api.v2 import attributes from neutron.db import db_base_plugin_v2 from neutron.db import model_base from neutron.db import models_v2 from neutron.db import portbindings_base from neutron.extensions import portbindings class PortBindingPort(model_base.BASEV2): port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) host = sa.Column(sa.String(255), nullable=False) port = orm.relationship( models_v2.Port, backref=orm.backref("portbinding", lazy='joined', uselist=False, cascade='delete')) class PortBindingMixin(portbindings_base.PortBindingBaseMixin): extra_binding_dict = None def _port_model_hook(self, context, original_model, query): query = query.outerjoin(PortBindingPort, (original_model.id == PortBindingPort.port_id)) return query def _port_result_filter_hook(self, query, filters): values = filters and filters.get(portbindings.HOST_ID, []) if not values: return query query = query.filter(PortBindingPort.host.in_(values)) return query db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( models_v2.Port, "portbindings_port", '_port_model_hook', None, '_port_result_filter_hook') def _process_portbindings_create_and_update(self, context, port_data, port): binding_profile = port.get(portbindings.PROFILE) binding_profile_set = attributes.is_attr_set(binding_profile) if not binding_profile_set and binding_profile is not None: del port[portbindings.PROFILE] binding_vnic = port.get(portbindings.VNIC_TYPE) binding_vnic_set = attributes.is_attr_set(binding_vnic) if not binding_vnic_set and binding_vnic is not None: del port[portbindings.VNIC_TYPE] # REVISIT(irenab) Add support for vnic_type for plugins that # can handle more than one type. # Currently implemented for ML2 plugin that does not use # PortBindingMixin. host = port_data.get(portbindings.HOST_ID) host_set = attributes.is_attr_set(host) with context.session.begin(subtransactions=True): bind_port = context.session.query( PortBindingPort).filter_by(port_id=port['id']).first() if host_set: if not bind_port: context.session.add(PortBindingPort(port_id=port['id'], host=host)) else: bind_port.host = host else: host = bind_port.host if bind_port else None self._extend_port_dict_binding_host(port, host) def get_port_host(self, context, port_id): with context.session.begin(subtransactions=True): bind_port = context.session.query( PortBindingPort).filter_by(port_id=port_id).first() return bind_port.host if bind_port else None def _extend_port_dict_binding_host(self, port_res, host): super(PortBindingMixin, self).extend_port_dict_binding( port_res, None) port_res[portbindings.HOST_ID] = host def extend_port_dict_binding(self, port_res, port_db): host = port_db.portbinding.host if port_db.portbinding else None self._extend_port_dict_binding_host(port_res, host) def _extend_port_dict_binding(plugin, port_res, port_db): if not isinstance(plugin, PortBindingMixin): return plugin.extend_port_dict_binding(port_res, port_db) # Register dict extend functions for ports db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attributes.PORTS, [_extend_port_dict_binding]) neutron-8.4.0/neutron/db/l3_dvrscheduler_db.py0000664000567000056710000005532613044372760022565 0ustar jenkinsjenkins00000000000000# (c) Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from sqlalchemy import or_ from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants as n_const from neutron.common import utils as n_utils from neutron.db import agentschedulers_db from neutron.db import l3_agentschedulers_db as l3agent_sch_db from neutron.db import models_v2 from neutron.extensions import portbindings from neutron import manager from neutron.plugins.common import constants as service_constants from neutron.plugins.ml2 import db as ml2_db from neutron.plugins.ml2 import models as ml2_models LOG = logging.getLogger(__name__) class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin): """Mixin class for L3 DVR scheduler. DVR currently supports the following use cases: - East/West (E/W) traffic between VMs: this is handled in a distributed manner across Compute Nodes without a centralized element. This includes E/W traffic between VMs on the same Compute Node. - North/South traffic for Floating IPs (FIP N/S): this is supported on the distributed routers on Compute Nodes without any centralized element. - North/South traffic for SNAT (SNAT N/S): this is supported via a centralized element that handles the SNAT traffic. To support these use cases, DVR routers rely on an L3 agent that runs on a central node (also known as Network Node or Service Node), as well as, L3 agents that run individually on each Compute Node of an OpenStack cloud. Each L3 agent creates namespaces to route traffic according to the use cases outlined above. The mechanism adopted for creating and managing these namespaces is via (Router, Agent) binding and Scheduling in general. The main difference between distributed routers and centralized ones is that in the distributed case, multiple bindings will exist, one for each of the agents participating in the routed topology for the specific router. These bindings are created in the following circumstances: - A subnet is added to a router via router-interface-add, and that subnet has running VM's deployed in it. A binding will be created between the router and any L3 agent whose Compute Node is hosting the VM(s). - An external gateway is set to a router via router-gateway-set. A binding will be created between the router and the L3 agent running centrally on the Network Node. Therefore, any time a router operation occurs (create, update or delete), scheduling will determine whether the router needs to be associated to an L3 agent, just like a regular centralized router, with the difference that, in the distributed case, the bindings required are established based on the state of the router and the Compute Nodes. """ def dvr_handle_new_service_port(self, context, port, dest_host=None): """Handle new dvr service port creation. When a new dvr service port is created, this function will schedule a dvr router to new compute node if needed and notify l3 agent on that node. The 'dest_host' will provide the destinaton host of the port in case of service port migration. """ port_host = dest_host or port[portbindings.HOST_ID] l3_agent_on_host = (self.get_l3_agents( context, filters={'host': [port_host]}) or [None])[0] if not l3_agent_on_host: return if dest_host: # Make sure we create the floatingip agent gateway port # for the destination node if fip is associated with this # fixed port l3plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) ( l3plugin. check_for_fip_and_create_agent_gw_port_on_host_if_not_exists( context, port, dest_host)) subnet_ids = [ip['subnet_id'] for ip in port['fixed_ips']] router_ids = self.get_dvr_routers_by_subnet_ids(context, subnet_ids) if router_ids: LOG.debug('DVR: Handle new service port, host %(host)s, ' 'router ids %(router_ids)s', {'host': port_host, 'router_ids': router_ids}) self.l3_rpc_notifier.routers_updated_on_host( context, router_ids, port_host) def get_dvr_routers_by_subnet_ids(self, context, subnet_ids): """Gets the dvr routers on vmport subnets.""" if not subnet_ids: return set() router_ids = set() filter_sub = {'fixed_ips': {'subnet_id': subnet_ids}, 'device_owner': [n_const.DEVICE_OWNER_DVR_INTERFACE]} subnet_ports = self._core_plugin.get_ports( context, filters=filter_sub) for subnet_port in subnet_ports: router_ids.add(subnet_port['device_id']) return router_ids def get_subnet_ids_on_router(self, context, router_id): """Return subnet IDs for interfaces attached to the given router.""" subnet_ids = set() filter_rtr = {'device_id': [router_id]} int_ports = self._core_plugin.get_ports(context, filters=filter_rtr) for int_port in int_ports: int_ips = int_port['fixed_ips'] if int_ips: int_subnet = int_ips[0]['subnet_id'] subnet_ids.add(int_subnet) else: LOG.debug('DVR: Could not find a subnet id ' 'for router %s', router_id) return subnet_ids def get_dvr_routers_to_remove(self, context, deleted_port): """Returns info about which routers should be removed In case dvr serviceable port was deleted we need to check if any dvr routers should be removed from l3 agent on port's host """ if not n_utils.is_dvr_serviced(deleted_port['device_owner']): return [] admin_context = context.elevated() port_host = deleted_port[portbindings.HOST_ID] subnet_ids = [ip['subnet_id'] for ip in deleted_port['fixed_ips']] router_ids = self.get_dvr_routers_by_subnet_ids(admin_context, subnet_ids) if not router_ids: LOG.debug('No DVR routers for this DVR port %(port)s ' 'on host %(host)s', {'port': deleted_port['id'], 'host': port_host}) return [] agent = self._get_agent_by_type_and_host( context, n_const.AGENT_TYPE_L3, port_host) removed_router_info = [] for router_id in router_ids: snat_binding = context.session.query( l3agent_sch_db.RouterL3AgentBinding).filter_by( router_id=router_id).filter_by( l3_agent_id=agent.id).first() if snat_binding: # not removing from the agent hosting SNAT for the router continue subnet_ids = self.get_subnet_ids_on_router(admin_context, router_id) if self._check_dvr_serviceable_ports_on_host( admin_context, port_host, subnet_ids): continue filter_rtr = {'device_id': [router_id], 'device_owner': [n_const.DEVICE_OWNER_DVR_INTERFACE]} int_ports = self._core_plugin.get_ports( admin_context, filters=filter_rtr) for port in int_ports: dvr_binding = (ml2_db. get_dvr_port_binding_by_host(context.session, port['id'], port_host)) if dvr_binding: # unbind this port from router dvr_binding['router_id'] = None dvr_binding.update(dvr_binding) info = {'router_id': router_id, 'host': port_host, 'agent_id': str(agent.id)} removed_router_info.append(info) LOG.debug('Router %(router_id)s on host %(host)s to be deleted', info) return removed_router_info def _get_active_l3_agent_routers_sync_data(self, context, host, agent, router_ids): if n_utils.is_extension_supported(self, n_const.L3_HA_MODE_EXT_ALIAS): return self.get_ha_sync_data_for_host(context, host, agent, router_ids=router_ids, active=True) return self._get_dvr_sync_data(context, host, agent, router_ids=router_ids, active=True) def get_hosts_to_notify(self, context, router_id): """Returns all hosts to send notification about router update""" hosts = super(L3_DVRsch_db_mixin, self).get_hosts_to_notify( context, router_id) router = self.get_router(context, router_id) if router.get('distributed', False): dvr_hosts = self._get_dvr_hosts_for_router(context, router_id) dvr_hosts = set(dvr_hosts) - set(hosts) state = agentschedulers_db.get_admin_state_up_filter() agents = self.get_l3_agents(context, active=state, filters={'host': dvr_hosts}) hosts += [a.host for a in agents] return hosts def _get_dvr_hosts_for_router(self, context, router_id): """Get a list of hosts where specified DVR router should be hosted It will first get IDs of all subnets connected to the router and then get a set of hosts where all dvr serviceable ports on those subnets are bound """ subnet_ids = self.get_subnet_ids_on_router(context, router_id) Binding = ml2_models.PortBinding Port = models_v2.Port IPAllocation = models_v2.IPAllocation query = context.session.query(Binding.host).distinct() query = query.join(Binding.port) query = query.join(Port.fixed_ips) query = query.filter(IPAllocation.subnet_id.in_(subnet_ids)) owner_filter = or_( Port.device_owner.startswith(n_const.DEVICE_OWNER_COMPUTE_PREFIX), Port.device_owner.in_( n_utils.get_other_dvr_serviced_device_owners())) query = query.filter(owner_filter) hosts = [item[0] for item in query] LOG.debug('Hosts for router %s: %s', router_id, hosts) return hosts def _get_dvr_subnet_ids_on_host_query(self, context, host): query = context.session.query( models_v2.IPAllocation.subnet_id).distinct() query = query.join(models_v2.IPAllocation.port) query = query.join(models_v2.Port.port_binding) query = query.filter(ml2_models.PortBinding.host == host) owner_filter = or_( models_v2.Port.device_owner.startswith( n_const.DEVICE_OWNER_COMPUTE_PREFIX), models_v2.Port.device_owner.in_( n_utils.get_other_dvr_serviced_device_owners())) query = query.filter(owner_filter) return query def _get_dvr_router_ids_for_host(self, context, host): subnet_ids_on_host_query = self._get_dvr_subnet_ids_on_host_query( context, host) query = context.session.query(models_v2.Port.device_id).distinct() query = query.filter( models_v2.Port.device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE) query = query.join(models_v2.Port.fixed_ips) query = query.filter( models_v2.IPAllocation.subnet_id.in_(subnet_ids_on_host_query)) router_ids = [item[0] for item in query] LOG.debug('DVR routers on host %s: %s', host, router_ids) return router_ids def _get_router_ids_for_agent(self, context, agent_db, router_ids): result_set = set(super(L3_DVRsch_db_mixin, self)._get_router_ids_for_agent( context, agent_db, router_ids)) router_ids = set(router_ids or []) if router_ids and result_set == router_ids: # no need for extra dvr checks if requested routers are # explicitly scheduled to the agent return list(result_set) # dvr routers are not explicitly scheduled to agents on hosts with # dvr serviceable ports, so need special handling if self._get_agent_mode(agent_db) in [n_const.L3_AGENT_MODE_DVR, n_const.L3_AGENT_MODE_DVR_SNAT]: if not router_ids: result_set |= set(self._get_dvr_router_ids_for_host( context, agent_db['host'])) else: for router_id in (router_ids - result_set): subnet_ids = self.get_subnet_ids_on_router( context, router_id) if (subnet_ids and self._check_dvr_serviceable_ports_on_host( context, agent_db['host'], list(subnet_ids))): result_set.add(router_id) return list(result_set) def _check_dvr_serviceable_ports_on_host(self, context, host, subnet_ids): """Check for existence of dvr serviceable ports on host :param context: request context :param host: host to look ports on :param subnet_ids: IDs of subnets to look ports on :return: return True if dvr serviceable port exists on host, otherwise return False """ # db query will return ports for all subnets if subnet_ids is empty, # so need to check first if not subnet_ids: return False Binding = ml2_models.PortBinding IPAllocation = models_v2.IPAllocation Port = models_v2.Port query = context.session.query(Binding) query = query.join(Binding.port) query = query.join(Port.fixed_ips) query = query.filter( IPAllocation.subnet_id.in_(subnet_ids)) device_filter = or_( models_v2.Port.device_owner.startswith( n_const.DEVICE_OWNER_COMPUTE_PREFIX), models_v2.Port.device_owner.in_( n_utils.get_other_dvr_serviced_device_owners())) query = query.filter(device_filter) host_filter = or_( ml2_models.PortBinding.host == host, ml2_models.PortBinding.profile.contains(host)) query = query.filter(host_filter) return query.first() is not None def _dvr_handle_unbound_allowed_addr_pair_add( plugin, context, port, allowed_address_pair): updated_port = plugin.update_unbound_allowed_address_pair_port_binding( context, port, allowed_address_pair) if updated_port: LOG.debug("Allowed address pair port binding updated " "based on service port binding: %s", updated_port) plugin.dvr_handle_new_service_port(context, updated_port) plugin.update_arp_entry_for_dvr_service_port(context, port) def _dvr_handle_unbound_allowed_addr_pair_del( plugin, context, port, allowed_address_pair): updated_port = plugin.remove_unbound_allowed_address_pair_port_binding( context, port, allowed_address_pair) if updated_port: LOG.debug("Allowed address pair port binding removed " "from service port binding: %s", updated_port) aa_fixed_ips = plugin._get_allowed_address_pair_fixed_ips(context, port) if aa_fixed_ips: plugin.delete_arp_entry_for_dvr_service_port( context, port, fixed_ips_to_delete=aa_fixed_ips) def _notify_l3_agent_new_port(resource, event, trigger, **kwargs): LOG.debug('Received %(resource)s %(event)s', { 'resource': resource, 'event': event}) port = kwargs.get('port') if not port: return if n_utils.is_dvr_serviced(port['device_owner']): l3plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) context = kwargs['context'] l3plugin.dvr_handle_new_service_port(context, port) l3plugin.update_arp_entry_for_dvr_service_port(context, port) def _notify_port_delete(event, resource, trigger, **kwargs): context = kwargs['context'] port = kwargs['port'] l3plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) if port: port_host = port.get(portbindings.HOST_ID) allowed_address_pairs_list = port.get('allowed_address_pairs') if allowed_address_pairs_list and port_host: for address_pair in allowed_address_pairs_list: _dvr_handle_unbound_allowed_addr_pair_del( l3plugin, context, port, address_pair) l3plugin.delete_arp_entry_for_dvr_service_port(context, port) removed_routers = l3plugin.get_dvr_routers_to_remove(context, port) for info in removed_routers: l3plugin.l3_rpc_notifier.router_removed_from_agent( context, info['router_id'], info['host']) def _notify_l3_agent_port_update(resource, event, trigger, **kwargs): new_port = kwargs.get('port') original_port = kwargs.get('original_port') if new_port and original_port: original_device_owner = original_port.get('device_owner', '') new_device_owner = new_port.get('device_owner', '') is_new_device_dvr_serviced = n_utils.is_dvr_serviced(new_device_owner) l3plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) context = kwargs['context'] is_port_no_longer_serviced = ( n_utils.is_dvr_serviced(original_device_owner) and not n_utils.is_dvr_serviced(new_device_owner)) is_port_moved = ( original_port[portbindings.HOST_ID] and original_port[portbindings.HOST_ID] != new_port[portbindings.HOST_ID]) if is_port_no_longer_serviced or is_port_moved: removed_routers = l3plugin.get_dvr_routers_to_remove( context, original_port) if removed_routers: removed_router_args = { 'context': context, 'port': original_port, 'removed_routers': removed_routers, } _notify_port_delete( event, resource, trigger, **removed_router_args) fip = l3plugin._get_floatingip_on_port(context, port_id=original_port['id']) if fip and not (removed_routers and fip['router_id'] in removed_routers): l3plugin.l3_rpc_notifier.routers_updated_on_host( context, [fip['router_id']], original_port[portbindings.HOST_ID]) if not is_new_device_dvr_serviced: return is_new_port_binding_changed = ( new_port[portbindings.HOST_ID] and (original_port[portbindings.HOST_ID] != new_port[portbindings.HOST_ID])) dest_host = None new_port_profile = new_port.get(portbindings.PROFILE) if new_port_profile: dest_host = new_port_profile.get('migrating_to') # This check is required to prevent an arp update # of the allowed_address_pair port. if new_port_profile.get('original_owner'): return # If dest_host is set, then the port profile has changed # and this port is in migration. The call below will # pre-create the router on the new host if ((is_new_port_binding_changed or dest_host) and is_new_device_dvr_serviced): l3plugin.dvr_handle_new_service_port(context, new_port, dest_host=dest_host) l3plugin.update_arp_entry_for_dvr_service_port( context, new_port) return # Check for allowed_address_pairs and port state new_port_host = new_port.get(portbindings.HOST_ID) allowed_address_pairs_list = new_port.get('allowed_address_pairs') if allowed_address_pairs_list and new_port_host: new_port_state = new_port.get('admin_state_up') original_port_state = original_port.get('admin_state_up') if new_port_state and not original_port_state: # Case were we activate the port from inactive state. for address_pair in allowed_address_pairs_list: _dvr_handle_unbound_allowed_addr_pair_add( l3plugin, context, new_port, address_pair) return elif original_port_state and not new_port_state: # Case were we deactivate the port from active state. for address_pair in allowed_address_pairs_list: _dvr_handle_unbound_allowed_addr_pair_del( l3plugin, context, original_port, address_pair) return elif new_port_state and original_port_state: # Case were the same port has additional address_pairs # added. for address_pair in allowed_address_pairs_list: _dvr_handle_unbound_allowed_addr_pair_add( l3plugin, context, new_port, address_pair) return is_fixed_ips_changed = ( 'fixed_ips' in new_port and 'fixed_ips' in original_port and new_port['fixed_ips'] != original_port['fixed_ips']) if kwargs.get('mac_address_updated') or is_fixed_ips_changed: l3plugin.update_arp_entry_for_dvr_service_port( context, new_port) def subscribe(): registry.subscribe( _notify_l3_agent_port_update, resources.PORT, events.AFTER_UPDATE) registry.subscribe( _notify_l3_agent_new_port, resources.PORT, events.AFTER_CREATE) registry.subscribe( _notify_port_delete, resources.PORT, events.AFTER_DELETE) neutron-8.4.0/neutron/db/metering/0000775000567000056710000000000013044373210020244 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/db/metering/__init__.py0000664000567000056710000000000013044372736022357 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/db/metering/metering_rpc.py0000664000567000056710000000403413044372760023306 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import oslo_messaging from neutron._i18n import _LE from neutron.common import constants as consts from neutron.common import utils from neutron import manager from neutron.plugins.common import constants as service_constants LOG = logging.getLogger(__name__) class MeteringRpcCallbacks(object): target = oslo_messaging.Target(version='1.0') def __init__(self, meter_plugin): self.meter_plugin = meter_plugin def get_sync_data_metering(self, context, **kwargs): l3_plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) if not l3_plugin: return host = kwargs.get('host') if not utils.is_extension_supported( l3_plugin, consts.L3_AGENT_SCHEDULER_EXT_ALIAS) or not host: return self.meter_plugin.get_sync_data_metering(context) else: agents = l3_plugin.get_l3_agents(context, filters={'host': [host]}) if not agents: LOG.error(_LE('Unable to find agent %s.'), host) return routers = l3_plugin.list_routers_on_l3_agent(context, agents[0].id) router_ids = [router['id'] for router in routers['routers']] if not router_ids: return return self.meter_plugin.get_sync_data_metering(context, router_ids=router_ids) neutron-8.4.0/neutron/db/metering/metering_db.py0000664000567000056710000002613513044372760023115 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy import sql from neutron.api.rpc.agentnotifiers import metering_rpc_agent_api from neutron.api.v2 import attributes as attr from neutron.common import constants from neutron.db import common_db_mixin as base_db from neutron.db import l3_db from neutron.db import model_base from neutron.extensions import metering class MeteringLabelRule(model_base.BASEV2, model_base.HasId): direction = sa.Column(sa.Enum('ingress', 'egress', name='meteringlabels_direction')) remote_ip_prefix = sa.Column(sa.String(64)) metering_label_id = sa.Column(sa.String(36), sa.ForeignKey("meteringlabels.id", ondelete="CASCADE"), nullable=False) excluded = sa.Column(sa.Boolean, default=False, server_default=sql.false()) class MeteringLabel(model_base.BASEV2, model_base.HasId, model_base.HasTenant): name = sa.Column(sa.String(attr.NAME_MAX_LEN)) description = sa.Column(sa.String(attr.LONG_DESCRIPTION_MAX_LEN)) rules = orm.relationship(MeteringLabelRule, backref="label", cascade="delete", lazy="joined") routers = orm.relationship( l3_db.Router, primaryjoin="MeteringLabel.tenant_id==Router.tenant_id", foreign_keys='MeteringLabel.tenant_id', uselist=True) shared = sa.Column(sa.Boolean, default=False, server_default=sql.false()) class MeteringDbMixin(metering.MeteringPluginBase, base_db.CommonDbMixin): def __init__(self): self.meter_rpc = metering_rpc_agent_api.MeteringAgentNotifyAPI() def _make_metering_label_dict(self, metering_label, fields=None): res = {'id': metering_label['id'], 'name': metering_label['name'], 'description': metering_label['description'], 'shared': metering_label['shared'], 'tenant_id': metering_label['tenant_id']} return self._fields(res, fields) def create_metering_label(self, context, metering_label): m = metering_label['metering_label'] with context.session.begin(subtransactions=True): metering_db = MeteringLabel(id=uuidutils.generate_uuid(), description=m['description'], tenant_id=m['tenant_id'], name=m['name'], shared=m['shared']) context.session.add(metering_db) return self._make_metering_label_dict(metering_db) def delete_metering_label(self, context, label_id): with context.session.begin(subtransactions=True): try: label = self._get_by_id(context, MeteringLabel, label_id) except orm.exc.NoResultFound: raise metering.MeteringLabelNotFound(label_id=label_id) context.session.delete(label) def get_metering_label(self, context, label_id, fields=None): try: metering_label = self._get_by_id(context, MeteringLabel, label_id) except orm.exc.NoResultFound: raise metering.MeteringLabelNotFound(label_id=label_id) return self._make_metering_label_dict(metering_label, fields) def get_metering_labels(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = self._get_marker_obj(context, 'metering_labels', limit, marker) return self._get_collection(context, MeteringLabel, self._make_metering_label_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) def _make_metering_label_rule_dict(self, metering_label_rule, fields=None): res = {'id': metering_label_rule['id'], 'metering_label_id': metering_label_rule['metering_label_id'], 'direction': metering_label_rule['direction'], 'remote_ip_prefix': metering_label_rule['remote_ip_prefix'], 'excluded': metering_label_rule['excluded']} return self._fields(res, fields) def get_metering_label_rules(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = self._get_marker_obj(context, 'metering_label_rules', limit, marker) return self._get_collection(context, MeteringLabelRule, self._make_metering_label_rule_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) def get_metering_label_rule(self, context, rule_id, fields=None): try: metering_label_rule = self._get_by_id(context, MeteringLabelRule, rule_id) except orm.exc.NoResultFound: raise metering.MeteringLabelRuleNotFound(rule_id=rule_id) return self._make_metering_label_rule_dict(metering_label_rule, fields) def _validate_cidr(self, context, label_id, remote_ip_prefix, direction, excluded): r_ips = self.get_metering_label_rules(context, filters={'metering_label_id': [label_id], 'direction': [direction], 'excluded': [excluded]}, fields=['remote_ip_prefix']) cidrs = [r['remote_ip_prefix'] for r in r_ips] new_cidr_ipset = netaddr.IPSet([remote_ip_prefix]) if (netaddr.IPSet(cidrs) & new_cidr_ipset): raise metering.MeteringLabelRuleOverlaps( remote_ip_prefix=remote_ip_prefix) def create_metering_label_rule(self, context, metering_label_rule): m = metering_label_rule['metering_label_rule'] with context.session.begin(subtransactions=True): label_id = m['metering_label_id'] ip_prefix = m['remote_ip_prefix'] direction = m['direction'] excluded = m['excluded'] self._validate_cidr(context, label_id, ip_prefix, direction, excluded) metering_db = MeteringLabelRule(id=uuidutils.generate_uuid(), metering_label_id=label_id, direction=direction, excluded=m['excluded'], remote_ip_prefix=ip_prefix) context.session.add(metering_db) return self._make_metering_label_rule_dict(metering_db) def delete_metering_label_rule(self, context, rule_id): with context.session.begin(subtransactions=True): try: rule = self._get_by_id(context, MeteringLabelRule, rule_id) except orm.exc.NoResultFound: raise metering.MeteringLabelRuleNotFound(rule_id=rule_id) context.session.delete(rule) return self._make_metering_label_rule_dict(rule) def _get_metering_rules_dict(self, metering_label): rules = [] for rule in metering_label.rules: rule_dict = self._make_metering_label_rule_dict(rule) rules.append(rule_dict) return rules def _make_router_dict(self, router): res = {'id': router['id'], 'name': router['name'], 'tenant_id': router['tenant_id'], 'admin_state_up': router['admin_state_up'], 'status': router['status'], 'gw_port_id': router['gw_port_id'], constants.METERING_LABEL_KEY: []} return res def _process_sync_metering_data(self, context, labels): all_routers = None routers_dict = {} for label in labels: if label.shared: if not all_routers: all_routers = self._get_collection_query(context, l3_db.Router) routers = all_routers else: routers = label.routers for router in routers: router_dict = routers_dict.get( router['id'], self._make_router_dict(router)) rules = self._get_metering_rules_dict(label) data = {'id': label['id'], 'rules': rules} router_dict[constants.METERING_LABEL_KEY].append(data) routers_dict[router['id']] = router_dict return list(routers_dict.values()) def get_sync_data_for_rule(self, context, rule): label = context.session.query(MeteringLabel).get( rule['metering_label_id']) if label.shared: routers = self._get_collection_query(context, l3_db.Router) else: routers = label.routers routers_dict = {} for router in routers: router_dict = routers_dict.get(router['id'], self._make_router_dict(router)) data = {'id': label['id'], 'rule': rule} router_dict[constants.METERING_LABEL_KEY].append(data) routers_dict[router['id']] = router_dict return list(routers_dict.values()) def get_sync_data_metering(self, context, label_id=None, router_ids=None): labels = context.session.query(MeteringLabel) if label_id: labels = labels.filter(MeteringLabel.id == label_id) elif router_ids: labels = (labels.join(MeteringLabel.routers). filter(l3_db.Router.id.in_(router_ids))) return self._process_sync_metering_data(context, labels) neutron-8.4.0/neutron/db/netmtu_db.py0000664000567000056710000000215413044372760021000 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api.v2 import attributes from neutron.db import db_base_plugin_v2 from neutron.extensions import netmtu class Netmtu_db_mixin(object): """Mixin class to add network MTU methods to db_base_plugin_v2.""" def _extend_network_dict_mtu(self, network_res, network_db): network_res[netmtu.MTU] = network_db.mtu return network_res db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attributes.NETWORKS, ['_extend_network_dict_mtu']) neutron-8.4.0/neutron/db/portsecurity_db_common.py0000664000567000056710000001470013044372760023610 0ustar jenkinsjenkins00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.orm import exc from neutron.db import model_base from neutron.db import models_v2 from neutron.extensions import portsecurity as psec class PortSecurityBinding(model_base.BASEV2): port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) port_security_enabled = sa.Column(sa.Boolean(), nullable=False) # Add a relationship to the Port model in order to be to able to # instruct SQLAlchemy to eagerly load port security binding port = orm.relationship( models_v2.Port, backref=orm.backref("port_security", uselist=False, cascade='delete', lazy='joined')) class NetworkSecurityBinding(model_base.BASEV2): network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), primary_key=True) port_security_enabled = sa.Column(sa.Boolean(), nullable=False) # Add a relationship to the Port model in order to be able to instruct # SQLAlchemy to eagerly load default port security setting for ports # on this network network = orm.relationship( models_v2.Network, backref=orm.backref("port_security", uselist=False, cascade='delete', lazy='joined')) class PortSecurityDbCommon(object): """Mixin class to add port security.""" def _extend_port_security_dict(self, response_data, db_data): if db_data.get('port_security') is None: response_data[psec.PORTSECURITY] = psec.DEFAULT_PORT_SECURITY else: response_data[psec.PORTSECURITY] = ( db_data['port_security'][psec.PORTSECURITY]) def _process_network_port_security_create( self, context, network_req, network_res): with context.session.begin(subtransactions=True): db = NetworkSecurityBinding( network_id=network_res['id'], port_security_enabled=network_req[psec.PORTSECURITY]) context.session.add(db) network_res[psec.PORTSECURITY] = network_req[psec.PORTSECURITY] return self._make_network_port_security_dict(db) def _process_port_port_security_create( self, context, port_req, port_res): with context.session.begin(subtransactions=True): db = PortSecurityBinding( port_id=port_res['id'], port_security_enabled=port_req[psec.PORTSECURITY]) context.session.add(db) port_res[psec.PORTSECURITY] = port_req[psec.PORTSECURITY] return self._make_port_security_dict(db) def _get_network_security_binding(self, context, network_id): try: query = self._model_query(context, NetworkSecurityBinding) binding = query.filter( NetworkSecurityBinding.network_id == network_id).one() return binding.port_security_enabled except exc.NoResultFound: # NOTE(ihrachys) the resource may have been created before port # security extension was enabled; return default value return psec.DEFAULT_PORT_SECURITY def _get_port_security_binding(self, context, port_id): try: query = self._model_query(context, PortSecurityBinding) binding = query.filter( PortSecurityBinding.port_id == port_id).one() return binding.port_security_enabled except exc.NoResultFound: # NOTE(ihrachys) the resource may have been created before port # security extension was enabled; return default value return psec.DEFAULT_PORT_SECURITY def _process_port_port_security_update( self, context, port_req, port_res): if psec.PORTSECURITY not in port_req: return port_security_enabled = port_req[psec.PORTSECURITY] try: query = self._model_query(context, PortSecurityBinding) port_id = port_res['id'] binding = query.filter( PortSecurityBinding.port_id == port_id).one() binding.port_security_enabled = port_security_enabled port_res[psec.PORTSECURITY] = port_security_enabled except exc.NoResultFound: # NOTE(ihrachys) the resource may have been created before port # security extension was enabled; create the binding model self._process_port_port_security_create( context, port_req, port_res) def _process_network_port_security_update( self, context, network_req, network_res): if psec.PORTSECURITY not in network_req: return port_security_enabled = network_req[psec.PORTSECURITY] try: query = self._model_query(context, NetworkSecurityBinding) network_id = network_res['id'] binding = query.filter( NetworkSecurityBinding.network_id == network_id).one() binding.port_security_enabled = port_security_enabled network_res[psec.PORTSECURITY] = port_security_enabled except exc.NoResultFound: # NOTE(ihrachys) the resource may have been created before port # security extension was enabled; create the binding model self._process_network_port_security_create( context, network_req, network_res) def _make_network_port_security_dict(self, port_security, fields=None): res = {'network_id': port_security['network_id'], psec.PORTSECURITY: port_security.port_security_enabled} return self._fields(res, fields) def _make_port_security_dict(self, port, fields=None): res = {'port_id': port['port_id'], psec.PORTSECURITY: port.port_security_enabled} return self._fields(res, fields) neutron-8.4.0/neutron/db/l3_gwmode_db.py0000664000567000056710000000673013044372760021350 0ustar jenkinsjenkins00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_config import cfg import sqlalchemy as sa from sqlalchemy import sql from neutron._i18n import _ from neutron.db import db_base_plugin_v2 from neutron.db import l3_db from neutron.extensions import l3 OPTS = [ cfg.BoolOpt('enable_snat_by_default', default=True, help=_('Define the default value of enable_snat if not ' 'provided in external_gateway_info.')) ] cfg.CONF.register_opts(OPTS) EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO # Modify the Router Data Model adding the enable_snat attribute setattr(l3_db.Router, 'enable_snat', sa.Column(sa.Boolean, default=True, server_default=sql.true(), nullable=False)) class L3_NAT_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin): """Mixin class to add configurable gateway modes.""" # Register dict extend functions for ports and networks db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( l3.ROUTERS, ['_extend_router_dict_gw_mode']) def _extend_router_dict_gw_mode(self, router_res, router_db): if router_db.gw_port_id: nw_id = router_db.gw_port['network_id'] router_res[EXTERNAL_GW_INFO] = { 'network_id': nw_id, 'enable_snat': router_db.enable_snat, 'external_fixed_ips': [ {'subnet_id': ip["subnet_id"], 'ip_address': ip["ip_address"]} for ip in router_db.gw_port['fixed_ips'] ] } def _update_router_gw_info(self, context, router_id, info, router=None): # Load the router only if necessary if not router: router = self._get_router(context, router_id) with context.session.begin(subtransactions=True): router.enable_snat = self._get_enable_snat(info) # Calls superclass, pass router db object for avoiding re-loading super(L3_NAT_dbonly_mixin, self)._update_router_gw_info( context, router_id, info, router=router) # Returning the router might come back useful if this # method is overridden in child classes return router @staticmethod def _get_enable_snat(info): if info and 'enable_snat' in info: return info['enable_snat'] # if enable_snat is not specified then use the default value return cfg.CONF.enable_snat_by_default def _build_routers_list(self, context, routers, gw_ports): for rtr in routers: gw_port_id = rtr['gw_port_id'] # Collect gw ports only if available if gw_port_id and gw_ports.get(gw_port_id): rtr['gw_port'] = gw_ports[gw_port_id] # Add enable_snat key rtr['enable_snat'] = rtr[EXTERNAL_GW_INFO]['enable_snat'] return routers class L3_NAT_db_mixin(L3_NAT_dbonly_mixin, l3_db.L3_NAT_db_mixin): pass neutron-8.4.0/neutron/db/ipam_backend_mixin.py0000664000567000056710000005030213044372760022616 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import itertools import netaddr from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging from sqlalchemy.orm import exc as orm_exc from neutron._i18n import _, _LI from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import ipv6_utils from neutron.common import utils as common_utils from neutron.db import db_base_plugin_common from neutron.db import models_v2 from neutron.ipam import utils as ipam_utils LOG = logging.getLogger(__name__) class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): """Contains IPAM specific code which is common for both backends. """ # Tracks changes in ip allocation for port using namedtuple Changes = collections.namedtuple('Changes', 'add original remove') @staticmethod def _rebuild_availability_ranges(context, subnets): """Should be redefined for non-ipam backend only """ pass @staticmethod def _gateway_ip_str(subnet, cidr_net): if subnet.get('gateway_ip') is attributes.ATTR_NOT_SPECIFIED: return str(netaddr.IPNetwork(cidr_net).network + 1) return subnet.get('gateway_ip') @staticmethod def pools_to_ip_range(ip_pools): ip_range_pools = [] for ip_pool in ip_pools: try: ip_range_pools.append(netaddr.IPRange(ip_pool['start'], ip_pool['end'])) except netaddr.AddrFormatError: LOG.info(_LI("Found invalid IP address in pool: " "%(start)s - %(end)s:"), {'start': ip_pool['start'], 'end': ip_pool['end']}) raise n_exc.InvalidAllocationPool(pool=ip_pool) return ip_range_pools def delete_subnet(self, context, subnet_id): pass def validate_pools_with_subnetpool(self, subnet): """Verifies that allocation pools are set correctly Allocation pools can be set for specific subnet request only """ has_allocpool = attributes.is_attr_set(subnet['allocation_pools']) is_any_subnetpool_request = not attributes.is_attr_set(subnet['cidr']) if is_any_subnetpool_request and has_allocpool: reason = _("allocation_pools allowed only " "for specific subnet requests.") raise n_exc.BadRequest(resource='subnets', msg=reason) def _validate_ip_version_with_subnetpool(self, subnet, subnetpool): """Validates ip version for subnet_pool and requested subnet""" ip_version = subnet.get('ip_version') has_ip_version = attributes.is_attr_set(ip_version) if has_ip_version and ip_version != subnetpool.ip_version: args = {'req_ver': str(subnet['ip_version']), 'pool_ver': str(subnetpool.ip_version)} reason = _("Cannot allocate IPv%(req_ver)s subnet from " "IPv%(pool_ver)s subnet pool") % args raise n_exc.BadRequest(resource='subnets', msg=reason) def _update_db_port(self, context, db_port, new_port, network_id, new_mac): # Remove all attributes in new_port which are not in the port DB model # and then update the port try: db_port.update(self._filter_non_model_columns(new_port, models_v2.Port)) context.session.flush() except db_exc.DBDuplicateEntry: raise n_exc.MacAddressInUse(net_id=network_id, mac=new_mac) def _update_subnet_host_routes(self, context, id, s): def _combine(ht): return ht['destination'] + "_" + ht['nexthop'] old_route_list = self._get_route_by_subnet(context, id) new_route_set = set([_combine(route) for route in s['host_routes']]) old_route_set = set([_combine(route) for route in old_route_list]) for route_str in old_route_set - new_route_set: for route in old_route_list: if _combine(route) == route_str: context.session.delete(route) for route_str in new_route_set - old_route_set: route = models_v2.SubnetRoute( destination=route_str.partition("_")[0], nexthop=route_str.partition("_")[2], subnet_id=id) context.session.add(route) # Gather host routes for result new_routes = [] for route_str in new_route_set: new_routes.append( {'destination': route_str.partition("_")[0], 'nexthop': route_str.partition("_")[2]}) del s["host_routes"] return new_routes def _update_subnet_dns_nameservers(self, context, id, s): old_dns_list = self._get_dns_by_subnet(context, id) new_dns_addr_list = s["dns_nameservers"] # NOTE(changzhi) delete all dns nameservers from db # when update subnet's DNS nameservers. And store new # nameservers with order one by one. for dns in old_dns_list: context.session.delete(dns) for order, server in enumerate(new_dns_addr_list): dns = models_v2.DNSNameServer( address=server, order=order, subnet_id=id) context.session.add(dns) del s["dns_nameservers"] return new_dns_addr_list def _update_subnet_allocation_pools(self, context, subnet_id, s): context.session.query(models_v2.IPAllocationPool).filter_by( subnet_id=subnet_id).delete() pools = [(netaddr.IPAddress(p.first, p.version).format(), netaddr.IPAddress(p.last, p.version).format()) for p in s['allocation_pools']] new_pools = [models_v2.IPAllocationPool(first_ip=p[0], last_ip=p[1], subnet_id=subnet_id) for p in pools] context.session.add_all(new_pools) # Call static method with self to redefine in child # (non-pluggable backend) if not ipv6_utils.is_ipv6_pd_enabled(s): self._rebuild_availability_ranges(context, [s]) # Gather new pools for result result_pools = [{'start': p[0], 'end': p[1]} for p in pools] del s['allocation_pools'] return result_pools def update_db_subnet(self, context, subnet_id, s, oldpools): changes = {} if "dns_nameservers" in s: changes['dns_nameservers'] = ( self._update_subnet_dns_nameservers(context, subnet_id, s)) if "host_routes" in s: changes['host_routes'] = self._update_subnet_host_routes( context, subnet_id, s) if "allocation_pools" in s: changes['allocation_pools'] = ( self._update_subnet_allocation_pools(context, subnet_id, s)) subnet = self._get_subnet(context, subnet_id) subnet.update(s) return subnet, changes def _validate_subnet_cidr(self, context, network, new_subnet_cidr): """Validate the CIDR for a subnet. Verifies the specified CIDR does not overlap with the ones defined for the other subnets specified for this network, or with any other CIDR if overlapping IPs are disabled. Does not apply to subnets with temporary IPv6 Prefix Delegation CIDRs (::/64). """ new_subnet_ipset = netaddr.IPSet([new_subnet_cidr]) # Disallow subnets with prefix length 0 as they will lead to # dnsmasq failures (see bug 1362651). # This is not a discrimination against /0 subnets. # A /0 subnet is conceptually possible but hardly a practical # scenario for neutron's use cases. for cidr in new_subnet_ipset.iter_cidrs(): if cidr.prefixlen == 0: err_msg = _("0 is not allowed as CIDR prefix length") raise n_exc.InvalidInput(error_message=err_msg) if cfg.CONF.allow_overlapping_ips: subnet_list = network.subnets else: subnet_list = self._get_all_subnets(context) for subnet in subnet_list: if ((netaddr.IPSet([subnet.cidr]) & new_subnet_ipset) and subnet.cidr != constants.PROVISIONAL_IPV6_PD_PREFIX): # don't give out details of the overlapping subnet err_msg = (_("Requested subnet with cidr: %(cidr)s for " "network: %(network_id)s overlaps with another " "subnet") % {'cidr': new_subnet_cidr, 'network_id': network.id}) LOG.info(_LI("Validation for CIDR: %(new_cidr)s failed - " "overlaps with subnet %(subnet_id)s " "(CIDR: %(cidr)s)"), {'new_cidr': new_subnet_cidr, 'subnet_id': subnet.id, 'cidr': subnet.cidr}) raise n_exc.InvalidInput(error_message=err_msg) def _validate_network_subnetpools(self, network, new_subnetpool_id, ip_version): """Validate all subnets on the given network have been allocated from the same subnet pool as new_subnetpool_id """ for subnet in network.subnets: if (subnet.ip_version == ip_version and new_subnetpool_id != subnet.subnetpool_id): raise n_exc.NetworkSubnetPoolAffinityError() def validate_allocation_pools(self, ip_pools, subnet_cidr): """Validate IP allocation pools. Verify start and end address for each allocation pool are valid, ie: constituted by valid and appropriately ordered IP addresses. Also, verify pools do not overlap among themselves. Finally, verify that each range fall within the subnet's CIDR. """ subnet = netaddr.IPNetwork(subnet_cidr) subnet_first_ip = netaddr.IPAddress(subnet.first + 1) # last address is broadcast in v4 subnet_last_ip = netaddr.IPAddress(subnet.last - (subnet.version == 4)) LOG.debug("Performing IP validity checks on allocation pools") ip_sets = [] for ip_pool in ip_pools: start_ip = netaddr.IPAddress(ip_pool.first, ip_pool.version) end_ip = netaddr.IPAddress(ip_pool.last, ip_pool.version) if (start_ip.version != subnet.version or end_ip.version != subnet.version): LOG.info(_LI("Specified IP addresses do not match " "the subnet IP version")) raise n_exc.InvalidAllocationPool(pool=ip_pool) if start_ip < subnet_first_ip or end_ip > subnet_last_ip: LOG.info(_LI("Found pool larger than subnet " "CIDR:%(start)s - %(end)s"), {'start': start_ip, 'end': end_ip}) raise n_exc.OutOfBoundsAllocationPool( pool=ip_pool, subnet_cidr=subnet_cidr) # Valid allocation pool # Create an IPSet for it for easily verifying overlaps ip_sets.append(netaddr.IPSet(ip_pool.cidrs())) LOG.debug("Checking for overlaps among allocation pools " "and gateway ip") ip_ranges = ip_pools[:] # Use integer cursors as an efficient way for implementing # comparison and avoiding comparing the same pair twice for l_cursor in range(len(ip_sets)): for r_cursor in range(l_cursor + 1, len(ip_sets)): if ip_sets[l_cursor] & ip_sets[r_cursor]: l_range = ip_ranges[l_cursor] r_range = ip_ranges[r_cursor] LOG.info(_LI("Found overlapping ranges: %(l_range)s and " "%(r_range)s"), {'l_range': l_range, 'r_range': r_range}) raise n_exc.OverlappingAllocationPools( pool_1=l_range, pool_2=r_range, subnet_cidr=subnet_cidr) def _validate_max_ips_per_port(self, fixed_ip_list, device_owner): if common_utils.is_port_trusted({'device_owner': device_owner}): return if len(fixed_ip_list) > cfg.CONF.max_fixed_ips_per_port: msg = _('Exceeded maximum amount of fixed ips per port.') raise n_exc.InvalidInput(error_message=msg) def _get_subnet_for_fixed_ip(self, context, fixed, network_id): if 'subnet_id' in fixed: subnet = self._get_subnet(context, fixed['subnet_id']) if subnet['network_id'] != network_id: msg = (_("Failed to create port on network %(network_id)s" ", because fixed_ips included invalid subnet " "%(subnet_id)s") % {'network_id': network_id, 'subnet_id': fixed['subnet_id']}) raise n_exc.InvalidInput(error_message=msg) # Ensure that the IP is valid on the subnet if ('ip_address' in fixed and not ipam_utils.check_subnet_ip(subnet['cidr'], fixed['ip_address'])): raise n_exc.InvalidIpForSubnet(ip_address=fixed['ip_address']) return subnet if 'ip_address' not in fixed: msg = _('IP allocation requires subnet_id or ip_address') raise n_exc.InvalidInput(error_message=msg) filter = {'network_id': [network_id]} subnets = self._get_subnets(context, filters=filter) for subnet in subnets: if ipam_utils.check_subnet_ip(subnet['cidr'], fixed['ip_address']): return subnet raise n_exc.InvalidIpForNetwork(ip_address=fixed['ip_address']) def generate_pools(self, cidr, gateway_ip): return ipam_utils.generate_pools(cidr, gateway_ip) def _prepare_allocation_pools(self, allocation_pools, cidr, gateway_ip): """Returns allocation pools represented as list of IPRanges""" if not attributes.is_attr_set(allocation_pools): return self.generate_pools(cidr, gateway_ip) ip_range_pools = self.pools_to_ip_range(allocation_pools) self.validate_allocation_pools(ip_range_pools, cidr) if gateway_ip: self.validate_gw_out_of_pools(gateway_ip, ip_range_pools) return ip_range_pools def validate_gw_out_of_pools(self, gateway_ip, pools): for pool_range in pools: if netaddr.IPAddress(gateway_ip) in pool_range: raise n_exc.GatewayConflictWithAllocationPools( pool=pool_range, ip_address=gateway_ip) def _is_ip_required_by_subnet(self, context, subnet_id, device_owner): # For ports that are not router ports, retain any automatic # (non-optional, e.g. IPv6 SLAAC) addresses. # NOTE: Need to check the SNAT ports for DVR routers here since # they consume an IP. if device_owner in constants.ROUTER_INTERFACE_OWNERS_SNAT: return True subnet = self._get_subnet(context, subnet_id) return not (ipv6_utils.is_auto_address_subnet(subnet) and not ipv6_utils.is_ipv6_pd_enabled(subnet)) def _get_changed_ips_for_port(self, context, original_ips, new_ips, device_owner): """Calculate changes in IPs for the port.""" # Collect auto addressed subnet ids that has to be removed on update delete_subnet_ids = set(ip['subnet_id'] for ip in new_ips if ip.get('delete_subnet')) ips = [ip for ip in new_ips if ip.get('subnet_id') not in delete_subnet_ids] # the new_ips contain all of the fixed_ips that are to be updated self._validate_max_ips_per_port(ips, device_owner) add_ips = [] remove_ips = [] ips_map = {ip['ip_address']: ip for ip in itertools.chain(new_ips, original_ips) if 'ip_address' in ip} new = set() for ip in new_ips: if ip.get('subnet_id') not in delete_subnet_ids: if 'ip_address' in ip: new.add(ip['ip_address']) else: add_ips.append(ip) # Convert original ip addresses to sets orig = set(ip['ip_address'] for ip in original_ips) add = new - orig unchanged = new & orig remove = orig - new # Convert results back to list of dicts add_ips += [ips_map[ip] for ip in add] prev_ips = [ips_map[ip] for ip in unchanged] # Mark ip for removing if it is not found in new_ips # and subnet requires ip to be set manually. # For auto addressed subnet leave ip unchanged # unless it is explicitly marked for delete. for ip in remove: subnet_id = ips_map[ip]['subnet_id'] ip_required = self._is_ip_required_by_subnet(context, subnet_id, device_owner) if ip_required or subnet_id in delete_subnet_ids: remove_ips.append(ips_map[ip]) else: prev_ips.append(ips_map[ip]) return self.Changes(add=add_ips, original=prev_ips, remove=remove_ips) def delete_port(self, context, port_id): query = (context.session.query(models_v2.Port). enable_eagerloads(False).filter_by(id=port_id)) # Use of the ORM mapper is needed for ensuring appropriate resource # tracking; otherwise SQL Alchemy events won't be triggered. # For more info check 'caveats' in doc/source/devref/quota.rst try: context.session.delete(query.first()) except orm_exc.UnmappedInstanceError: LOG.debug("Port %s was not found and therefore no delete " "operation was performed", port_id) def _save_subnet(self, context, network, subnet_args, dns_nameservers, host_routes, subnet_request): self._validate_subnet_cidr(context, network, subnet_args['cidr']) self._validate_network_subnetpools(network, subnet_args['subnetpool_id'], subnet_args['ip_version']) subnet = models_v2.Subnet(**subnet_args) context.session.add(subnet) # NOTE(changzhi) Store DNS nameservers with order into DB one # by one when create subnet with DNS nameservers if attributes.is_attr_set(dns_nameservers): for order, server in enumerate(dns_nameservers): dns = models_v2.DNSNameServer( address=server, order=order, subnet_id=subnet.id) context.session.add(dns) if attributes.is_attr_set(host_routes): for rt in host_routes: route = models_v2.SubnetRoute( subnet_id=subnet.id, destination=rt['destination'], nexthop=rt['nexthop']) context.session.add(route) self.save_allocation_pools(context, subnet, subnet_request.allocation_pools) return subnet neutron-8.4.0/neutron/db/rbac_db_mixin.py0000664000567000056710000001374113044372760021603 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from sqlalchemy.orm import exc from neutron.callbacks import events from neutron.callbacks import exceptions as c_exc from neutron.callbacks import registry from neutron.common import exceptions as n_exc from neutron.db import common_db_mixin from neutron.db import rbac_db_models as models from neutron.extensions import rbac as ext_rbac # resource name using in callbacks RBAC_POLICY = 'rbac-policy' class RbacPluginMixin(common_db_mixin.CommonDbMixin): """Plugin mixin that implements the RBAC DB operations.""" object_type_cache = {} supported_extension_aliases = ['rbac-policies'] def create_rbac_policy(self, context, rbac_policy): e = rbac_policy['rbac_policy'] try: registry.notify(RBAC_POLICY, events.BEFORE_CREATE, self, context=context, object_type=e['object_type'], policy=e) except c_exc.CallbackFailure as e: raise n_exc.InvalidInput(error_message=e) dbmodel = models.get_type_model_map()[e['object_type']] try: with context.session.begin(subtransactions=True): db_entry = dbmodel(object_id=e['object_id'], target_tenant=e['target_tenant'], action=e['action'], tenant_id=e['tenant_id']) context.session.add(db_entry) except db_exc.DBDuplicateEntry: raise ext_rbac.DuplicateRbacPolicy() return self._make_rbac_policy_dict(db_entry) def _make_rbac_policy_dict(self, db_entry, fields=None): res = {f: db_entry[f] for f in ('id', 'tenant_id', 'target_tenant', 'action', 'object_id')} res['object_type'] = db_entry.object_type return self._fields(res, fields) def update_rbac_policy(self, context, id, rbac_policy): pol = rbac_policy['rbac_policy'] entry = self._get_rbac_policy(context, id) object_type = entry['object_type'] try: registry.notify(RBAC_POLICY, events.BEFORE_UPDATE, self, context=context, policy=entry, object_type=object_type, policy_update=pol) except c_exc.CallbackFailure as ex: raise ext_rbac.RbacPolicyInUse(object_id=entry['object_id'], details=ex) with context.session.begin(subtransactions=True): entry.update(pol) return self._make_rbac_policy_dict(entry) def delete_rbac_policy(self, context, id): entry = self._get_rbac_policy(context, id) object_type = entry['object_type'] try: registry.notify(RBAC_POLICY, events.BEFORE_DELETE, self, context=context, object_type=object_type, policy=entry) except c_exc.CallbackFailure as ex: raise ext_rbac.RbacPolicyInUse(object_id=entry['object_id'], details=ex) with context.session.begin(subtransactions=True): context.session.delete(entry) self.object_type_cache.pop(id, None) def _get_rbac_policy(self, context, id): object_type = self._get_object_type(context, id) dbmodel = models.get_type_model_map()[object_type] try: return self._model_query(context, dbmodel).filter(dbmodel.id == id).one() except exc.NoResultFound: raise ext_rbac.RbacPolicyNotFound(id=id, object_type=object_type) def get_rbac_policy(self, context, id, fields=None): return self._make_rbac_policy_dict( self._get_rbac_policy(context, id), fields=fields) def get_rbac_policies(self, context, filters=None, fields=None, sorts=None, limit=None, page_reverse=False): filters = filters or {} object_type_filters = filters.pop('object_type', None) models_to_query = [ m for t, m in models.get_type_model_map().items() if object_type_filters is None or t in object_type_filters ] collections = [self._get_collection( context, model, self._make_rbac_policy_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, page_reverse=page_reverse) for model in models_to_query] # NOTE(kevinbenton): we don't have to worry about pagination, # limits, or page_reverse currently because allow_pagination is # set to False in 'neutron.extensions.rbac' return [item for c in collections for item in c] def _get_object_type(self, context, entry_id): """Scans all RBAC tables for an ID to figure out the type. This will be an expensive operation as the number of RBAC tables grows. The result is cached since object types cannot be updated for a policy. """ if entry_id in self.object_type_cache: return self.object_type_cache[entry_id] for otype, model in models.get_type_model_map().items(): if (context.session.query(model). filter(model.id == entry_id).first()): self.object_type_cache[entry_id] = otype return otype raise ext_rbac.RbacPolicyNotFound(id=entry_id, object_type='unknown') neutron-8.4.0/neutron/db/address_scope_db.py0000664000567000056710000001510413044372760022301 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy.orm import exc from neutron._i18n import _ from neutron.api.v2 import attributes as attr from neutron.common import constants from neutron.db import db_base_plugin_v2 from neutron.db import model_base from neutron.extensions import address_scope as ext_address_scope class AddressScope(model_base.BASEV2, model_base.HasId, model_base.HasTenant): """Represents a neutron address scope.""" __tablename__ = "address_scopes" name = sa.Column(sa.String(attr.NAME_MAX_LEN), nullable=False) shared = sa.Column(sa.Boolean, nullable=False) ip_version = sa.Column(sa.Integer(), nullable=False) class AddressScopeDbMixin(ext_address_scope.AddressScopePluginBase): """Mixin class to add address scope to db_base_plugin_v2.""" __native_bulk_support = True def _make_address_scope_dict(self, address_scope, fields=None): res = {'id': address_scope['id'], 'name': address_scope['name'], 'tenant_id': address_scope['tenant_id'], 'shared': address_scope['shared'], 'ip_version': address_scope['ip_version']} return self._fields(res, fields) def _get_address_scope(self, context, id): try: return self._get_by_id(context, AddressScope, id) except exc.NoResultFound: raise ext_address_scope.AddressScopeNotFound(address_scope_id=id) def is_address_scope_owned_by_tenant(self, context, id): """Check if address scope id is owned by the tenant or not. AddressScopeNotFound is raised if the - address scope id doesn't exist or - if the (unshared) address scope id is not owned by this tenant. @return Returns true if the user is admin or tenant is owner Returns false if the address scope id is shared and not owned by the tenant. """ address_scope = self._get_address_scope(context, id) return context.is_admin or ( address_scope.tenant_id == context.tenant_id) def get_ip_version_for_address_scope(self, context, id): address_scope = self._get_address_scope(context, id) return address_scope.ip_version def create_address_scope(self, context, address_scope): """Create an address scope.""" a_s = address_scope['address_scope'] address_scope_id = a_s.get('id') or uuidutils.generate_uuid() with context.session.begin(subtransactions=True): pool_args = {'tenant_id': a_s['tenant_id'], 'id': address_scope_id, 'name': a_s['name'], 'shared': a_s['shared'], 'ip_version': a_s['ip_version']} address_scope = AddressScope(**pool_args) context.session.add(address_scope) return self._make_address_scope_dict(address_scope) def update_address_scope(self, context, id, address_scope): a_s = address_scope['address_scope'] with context.session.begin(subtransactions=True): address_scope = self._get_address_scope(context, id) if address_scope.shared and not a_s.get('shared', True): reason = _("Shared address scope can't be unshared") raise ext_address_scope.AddressScopeUpdateError( address_scope_id=id, reason=reason) address_scope.update(a_s) return self._make_address_scope_dict(address_scope) def get_address_scope(self, context, id, fields=None): address_scope = self._get_address_scope(context, id) return self._make_address_scope_dict(address_scope, fields) def get_address_scopes(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = self._get_marker_obj(context, 'addrscope', limit, marker) collection = self._get_collection(context, AddressScope, self._make_address_scope_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) return collection def get_address_scopes_count(self, context, filters=None): return self._get_collection_count(context, AddressScope, filters=filters) def delete_address_scope(self, context, id): with context.session.begin(subtransactions=True): if self._get_subnetpools_by_address_scope_id(context, id): raise ext_address_scope.AddressScopeInUse(address_scope_id=id) address_scope = self._get_address_scope(context, id) context.session.delete(address_scope) def _extend_network_dict_address_scope(self, network_res, network_db): network_res[ext_address_scope.IPV4_ADDRESS_SCOPE] = None network_res[ext_address_scope.IPV6_ADDRESS_SCOPE] = None subnetpools = {subnet.subnetpool for subnet in network_db.subnets if subnet.subnetpool} for subnetpool in subnetpools: # A network will be constrained to only one subnetpool per address # family. Retrieve the address scope of subnetpools as the address # scopes of network. as_id = subnetpool[ext_address_scope.ADDRESS_SCOPE_ID] if subnetpool['ip_version'] == constants.IP_VERSION_4: network_res[ext_address_scope.IPV4_ADDRESS_SCOPE] = as_id if subnetpool['ip_version'] == constants.IP_VERSION_6: network_res[ext_address_scope.IPV6_ADDRESS_SCOPE] = as_id return network_res db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attr.NETWORKS, ['_extend_network_dict_address_scope']) neutron-8.4.0/neutron/db/ipam_pluggable_backend.py0000664000567000056710000005317613044372760023450 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Infoblox Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import netaddr from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import excutils from sqlalchemy import and_ from neutron._i18n import _, _LE from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import ipv6_utils from neutron.db import ipam_backend_mixin from neutron.db import models_v2 from neutron.ipam import driver from neutron.ipam import exceptions as ipam_exc LOG = logging.getLogger(__name__) class IpamPluggableBackend(ipam_backend_mixin.IpamBackendMixin): def _get_failed_ips(self, all_ips, success_ips): ips_list = (ip_dict['ip_address'] for ip_dict in success_ips) return (ip_dict['ip_address'] for ip_dict in all_ips if ip_dict['ip_address'] not in ips_list) def _ipam_deallocate_ips(self, context, ipam_driver, port, ips, revert_on_fail=True): """Deallocate set of ips over IPAM. If any single ip deallocation fails, tries to allocate deallocated ip addresses with fixed ip request """ deallocated = [] try: for ip in ips: try: ipam_subnet = ipam_driver.get_subnet(ip['subnet_id']) ipam_subnet.deallocate(ip['ip_address']) deallocated.append(ip) except n_exc.SubnetNotFound: LOG.debug("Subnet was not found on ip deallocation: %s", ip) except Exception: with excutils.save_and_reraise_exception(): LOG.debug("An exception occurred during IP deallocation.") if revert_on_fail and deallocated: LOG.debug("Reverting deallocation") self._ipam_allocate_ips(context, ipam_driver, port, deallocated, revert_on_fail=False) elif not revert_on_fail and ips: addresses = ', '.join(self._get_failed_ips(ips, deallocated)) LOG.error(_LE("IP deallocation failed on " "external system for %s"), addresses) return deallocated def _ipam_try_allocate_ip(self, context, ipam_driver, port, ip_dict): factory = ipam_driver.get_address_request_factory() ip_request = factory.get_request(context, port, ip_dict) ipam_subnet = ipam_driver.get_subnet(ip_dict['subnet_id']) return ipam_subnet.allocate(ip_request) def _ipam_allocate_single_ip(self, context, ipam_driver, port, subnets): """Allocates single ip from set of subnets Raises n_exc.IpAddressGenerationFailure if allocation failed for all subnets. """ for subnet in subnets: try: return [self._ipam_try_allocate_ip(context, ipam_driver, port, subnet), subnet] except ipam_exc.IpAddressGenerationFailure: continue raise n_exc.IpAddressGenerationFailure( net_id=port['network_id']) def _ipam_allocate_ips(self, context, ipam_driver, port, ips, revert_on_fail=True): """Allocate set of ips over IPAM. If any single ip allocation fails, tries to deallocate all allocated ip addresses. """ allocated = [] # we need to start with entries that asked for a specific IP in case # those IPs happen to be next in the line for allocation for ones that # didn't ask for a specific IP ips.sort(key=lambda x: 'ip_address' not in x) try: for ip in ips: # By default IP info is dict, used to allocate single ip # from single subnet. # IP info can be list, used to allocate single ip from # multiple subnets (i.e. first successful ip allocation # is returned) ip_list = [ip] if isinstance(ip, dict) else ip ip_address, ip_subnet = self._ipam_allocate_single_ip( context, ipam_driver, port, ip_list) allocated.append({'ip_address': ip_address, 'subnet_id': ip_subnet['subnet_id']}) except Exception: with excutils.save_and_reraise_exception(): LOG.debug("An exception occurred during IP allocation.") if revert_on_fail and allocated: LOG.debug("Reverting allocation") self._ipam_deallocate_ips(context, ipam_driver, port, allocated, revert_on_fail=False) elif not revert_on_fail and ips: addresses = ', '.join(self._get_failed_ips(ips, allocated)) LOG.error(_LE("IP allocation failed on " "external system for %s"), addresses) return allocated def _ipam_update_allocation_pools(self, context, ipam_driver, subnet): factory = ipam_driver.get_subnet_request_factory() subnet_request = factory.get_request(context, subnet, None) ipam_driver.update_subnet(subnet_request) def delete_subnet(self, context, subnet_id): ipam_driver = driver.Pool.get_instance(None, context) ipam_driver.remove_subnet(subnet_id) def allocate_ips_for_port_and_store(self, context, port, port_id): # Make a copy of port dict to prevent changing # incoming dict by adding 'id' to it. # Deepcopy doesn't work correctly in this case, because copy of # ATTR_NOT_SPECIFIED object happens. Address of copied object doesn't # match original object, so 'is' check fails port_copy = {'port': port['port'].copy()} port_copy['port']['id'] = port_id network_id = port_copy['port']['network_id'] ips = [] try: ips = self._allocate_ips_for_port(context, port_copy) for ip in ips: ip_address = ip['ip_address'] subnet_id = ip['subnet_id'] IpamPluggableBackend._store_ip_allocation( context, ip_address, network_id, subnet_id, port_id) return ips except Exception: with excutils.save_and_reraise_exception(): if ips: LOG.debug("An exception occurred during port creation. " "Reverting IP allocation") ipam_driver = driver.Pool.get_instance(None, context) self._ipam_deallocate_ips(context, ipam_driver, port_copy['port'], ips, revert_on_fail=False) def _allocate_ips_for_port(self, context, port): """Allocate IP addresses for the port. IPAM version. If port['fixed_ips'] is set to 'ATTR_NOT_SPECIFIED', allocate IP addresses for the port. If port['fixed_ips'] contains an IP address or a subnet_id then allocate an IP address accordingly. """ p = port['port'] ips = [] v6_stateless = [] net_id_filter = {'network_id': [p['network_id']]} subnets = self._get_subnets(context, filters=net_id_filter) is_router_port = ( p['device_owner'] in constants.ROUTER_INTERFACE_OWNERS_SNAT) fixed_configured = p['fixed_ips'] is not attributes.ATTR_NOT_SPECIFIED if fixed_configured: ips = self._test_fixed_ips_for_port(context, p["network_id"], p['fixed_ips'], p['device_owner']) # For ports that are not router ports, implicitly include all # auto-address subnets for address association. if not is_router_port: v6_stateless += [subnet for subnet in subnets if ipv6_utils.is_auto_address_subnet(subnet)] else: # Split into v4, v6 stateless and v6 stateful subnets v4 = [] v6_stateful = [] for subnet in subnets: if subnet['ip_version'] == 4: v4.append(subnet) else: if ipv6_utils.is_auto_address_subnet(subnet): if not is_router_port: v6_stateless.append(subnet) else: v6_stateful.append(subnet) version_subnets = [v4, v6_stateful] for subnets in version_subnets: if subnets: ips.append([{'subnet_id': s['id']} for s in subnets]) for subnet in v6_stateless: # IP addresses for IPv6 SLAAC and DHCPv6-stateless subnets # are implicitly included. ips.append({'subnet_id': subnet['id'], 'subnet_cidr': subnet['cidr'], 'eui64_address': True, 'mac': p['mac_address']}) ipam_driver = driver.Pool.get_instance(None, context) return self._ipam_allocate_ips(context, ipam_driver, p, ips) def _test_fixed_ips_for_port(self, context, network_id, fixed_ips, device_owner): """Test fixed IPs for port. Check that configured subnets are valid prior to allocating any IPs. Include the subnet_id in the result if only an IP address is configured. :raises: InvalidInput, IpAddressInUse, InvalidIpForNetwork, InvalidIpForSubnet """ fixed_ip_list = [] for fixed in fixed_ips: subnet = self._get_subnet_for_fixed_ip(context, fixed, network_id) is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet) if 'ip_address' in fixed: if (is_auto_addr_subnet and device_owner not in constants.ROUTER_INTERFACE_OWNERS): msg = (_("IPv6 address %(address)s can not be directly " "assigned to a port on subnet %(id)s since the " "subnet is configured for automatic addresses") % {'address': fixed['ip_address'], 'id': subnet['id']}) raise n_exc.InvalidInput(error_message=msg) fixed_ip_list.append({'subnet_id': subnet['id'], 'ip_address': fixed['ip_address']}) else: # A scan for auto-address subnets on the network is done # separately so that all such subnets (not just those # listed explicitly here by subnet ID) are associated # with the port. if (device_owner in constants.ROUTER_INTERFACE_OWNERS_SNAT or not is_auto_addr_subnet): fixed_ip_list.append({'subnet_id': subnet['id']}) self._validate_max_ips_per_port(fixed_ip_list, device_owner) return fixed_ip_list def _update_ips_for_port(self, context, port, original_ips, new_ips, mac): """Add or remove IPs from the port. IPAM version""" added = [] removed = [] changes = self._get_changed_ips_for_port( context, original_ips, new_ips, port['device_owner']) # Check if the IP's to add are OK to_add = self._test_fixed_ips_for_port( context, port['network_id'], changes.add, port['device_owner']) ipam_driver = driver.Pool.get_instance(None, context) if changes.remove: removed = self._ipam_deallocate_ips(context, ipam_driver, port, changes.remove) if to_add: added = self._ipam_allocate_ips(context, ipam_driver, port, to_add) return self.Changes(add=added, original=changes.original, remove=removed) def save_allocation_pools(self, context, subnet, allocation_pools): for pool in allocation_pools: first_ip = str(netaddr.IPAddress(pool.first, pool.version)) last_ip = str(netaddr.IPAddress(pool.last, pool.version)) ip_pool = models_v2.IPAllocationPool(subnet=subnet, first_ip=first_ip, last_ip=last_ip) context.session.add(ip_pool) def update_port_with_ips(self, context, db_port, new_port, new_mac): changes = self.Changes(add=[], original=[], remove=[]) if 'fixed_ips' in new_port: original = self._make_port_dict(db_port, process_extensions=False) changes = self._update_ips_for_port(context, db_port, original["fixed_ips"], new_port['fixed_ips'], new_mac) try: # Check if the IPs need to be updated network_id = db_port['network_id'] for ip in changes.add: self._store_ip_allocation( context, ip['ip_address'], network_id, ip['subnet_id'], db_port.id) for ip in changes.remove: self._delete_ip_allocation(context, network_id, ip['subnet_id'], ip['ip_address']) self._update_db_port(context, db_port, new_port, network_id, new_mac) except Exception: with excutils.save_and_reraise_exception(): if 'fixed_ips' in new_port: LOG.debug("An exception occurred during port update.") ipam_driver = driver.Pool.get_instance(None, context) if changes.add: LOG.debug("Reverting IP allocation.") self._ipam_deallocate_ips(context, ipam_driver, db_port, changes.add, revert_on_fail=False) if changes.remove: LOG.debug("Reverting IP deallocation.") self._ipam_allocate_ips(context, ipam_driver, db_port, changes.remove, revert_on_fail=False) return changes def delete_port(self, context, id): # Get fixed_ips list before port deletion port = self._get_port(context, id) ipam_driver = driver.Pool.get_instance(None, context) super(IpamPluggableBackend, self).delete_port(context, id) # Deallocating ips via IPAM after port is deleted locally. # So no need to do rollback actions on remote server # in case of fail to delete port locally self._ipam_deallocate_ips(context, ipam_driver, port, port['fixed_ips']) def update_db_subnet(self, context, id, s, old_pools): # 'allocation_pools' is removed from 's' in # _update_subnet_allocation_pools (ipam_backend_mixin), # so create unchanged copy for ipam driver subnet_copy = copy.deepcopy(s) subnet, changes = super(IpamPluggableBackend, self).update_db_subnet( context, id, s, old_pools) ipam_driver = driver.Pool.get_instance(None, context) # Set old allocation pools if no new pools are provided by user. # Passing old pools allows to call ipam driver on each subnet update # even if allocation pools are not changed. So custom ipam drivers # are able to track other fields changes on subnet update. if 'allocation_pools' not in subnet_copy: subnet_copy['allocation_pools'] = old_pools self._ipam_update_allocation_pools(context, ipam_driver, subnet_copy) return subnet, changes def add_auto_addrs_on_network_ports(self, context, subnet, ipam_subnet): """For an auto-address subnet, add addrs for ports on the net.""" with context.session.begin(subtransactions=True): network_id = subnet['network_id'] port_qry = context.session.query(models_v2.Port) ports = port_qry.filter( and_(models_v2.Port.network_id == network_id, ~models_v2.Port.device_owner.in_( constants.ROUTER_INTERFACE_OWNERS_SNAT))) updated_ports = [] ipam_driver = driver.Pool.get_instance(None, context) factory = ipam_driver.get_address_request_factory() for port in ports: ip = {'subnet_id': subnet['id'], 'subnet_cidr': subnet['cidr'], 'eui64_address': True, 'mac': port['mac_address']} ip_request = factory.get_request(context, port, ip) ip_address = ipam_subnet.allocate(ip_request) allocated = models_v2.IPAllocation(network_id=network_id, port_id=port['id'], ip_address=ip_address, subnet_id=subnet['id']) try: # Do the insertion of each IP allocation entry within # the context of a nested transaction, so that the entry # is rolled back independently of other entries whenever # the corresponding port has been deleted. with context.session.begin_nested(): context.session.add(allocated) updated_ports.append(port['id']) except db_exc.DBReferenceError: LOG.debug("Port %s was deleted while updating it with an " "IPv6 auto-address. Ignoring.", port['id']) LOG.debug("Reverting IP allocation for %s", ip_address) # Do not fail if reverting allocation was unsuccessful try: ipam_subnet.deallocate(ip_address) except Exception: LOG.debug("Reverting IP allocation failed for %s", ip_address) return updated_ports def allocate_subnet(self, context, network, subnet, subnetpool_id): subnetpool = None if subnetpool_id and not subnetpool_id == constants.IPV6_PD_POOL_ID: subnetpool = self._get_subnetpool(context, subnetpool_id) self._validate_ip_version_with_subnetpool(subnet, subnetpool) # gateway_ip and allocation pools should be validated or generated # only for specific request if subnet['cidr'] is not attributes.ATTR_NOT_SPECIFIED: subnet['gateway_ip'] = self._gateway_ip_str(subnet, subnet['cidr']) subnet['allocation_pools'] = self._prepare_allocation_pools( subnet['allocation_pools'], subnet['cidr'], subnet['gateway_ip']) ipam_driver = driver.Pool.get_instance(subnetpool, context) subnet_factory = ipam_driver.get_subnet_request_factory() subnet_request = subnet_factory.get_request(context, subnet, subnetpool) ipam_subnet = ipam_driver.allocate_subnet(subnet_request) # get updated details with actually allocated subnet subnet_request = ipam_subnet.get_details() try: subnet = self._save_subnet(context, network, self._make_subnet_args( subnet_request, subnet, subnetpool_id), subnet['dns_nameservers'], subnet['host_routes'], subnet_request) except Exception: # Note(pbondar): Third-party ipam servers can't rely # on transaction rollback, so explicit rollback call needed. # IPAM part rolled back in exception handling # and subnet part is rolled back by transaction rollback. with excutils.save_and_reraise_exception(): LOG.debug("An exception occurred during subnet creation. " "Reverting subnet allocation.") self.delete_subnet(context, subnet_request.subnet_id) return subnet, ipam_subnet neutron-8.4.0/neutron/db/db_base_plugin_v2.py0000664000567000056710000020513613044372760022370 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import netaddr from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import uuidutils from sqlalchemy import and_ from sqlalchemy import event from sqlalchemy import not_ from neutron._i18n import _, _LE, _LI from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api from neutron.api.v2 import attributes from neutron.callbacks import events from neutron.callbacks import exceptions from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import ipv6_utils from neutron.common import utils from neutron import context as ctx from neutron.db import api as db_api from neutron.db import db_base_plugin_common from neutron.db import ipam_non_pluggable_backend from neutron.db import ipam_pluggable_backend from neutron.db import models_v2 from neutron.db import rbac_db_mixin as rbac_mixin from neutron.db import rbac_db_models as rbac_db from neutron.db import sqlalchemyutils from neutron.db import standardattrdescription_db as stattr_db from neutron.extensions import l3 from neutron import ipam from neutron.ipam import subnet_alloc from neutron import manager from neutron import neutron_plugin_base_v2 from neutron.notifiers import nova as nova_notifier from neutron.plugins.common import constants as service_constants LOG = logging.getLogger(__name__) # Ports with the following 'device_owner' values will not prevent # network deletion. If delete_network() finds that all ports on a # network have these owners, it will explicitly delete each port # and allow network deletion to continue. Similarly, if delete_subnet() # finds out that all existing IP Allocations are associated with ports # with these owners, it will allow subnet deletion to proceed with the # IP allocations being cleaned up by cascade. AUTO_DELETE_PORT_OWNERS = [constants.DEVICE_OWNER_DHCP] DNS_DOMAIN_DEFAULT = 'openstacklocal.' FQDN_MAX_LEN = 255 def _check_subnet_not_used(context, subnet_id): try: kwargs = {'context': context, 'subnet_id': subnet_id} registry.notify( resources.SUBNET, events.BEFORE_DELETE, None, **kwargs) except exceptions.CallbackFailure as e: raise n_exc.SubnetInUse(subnet_id=subnet_id, reason=e) class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, neutron_plugin_base_v2.NeutronPluginBaseV2, rbac_mixin.RbacPluginMixin, stattr_db.StandardAttrDescriptionMixin): """V2 Neutron plugin interface implementation using SQLAlchemy models. Whenever a non-read call happens the plugin will call an event handler class method (e.g., network_created()). The result is that this class can be sub-classed by other classes that add custom behaviors on certain events. """ # This attribute specifies whether the plugin supports or not # bulk/pagination/sorting operations. Name mangling is used in # order to ensure it is qualified by class __native_bulk_support = True __native_pagination_support = True __native_sorting_support = True def __init__(self): self.set_ipam_backend() if cfg.CONF.notify_nova_on_port_status_changes: # NOTE(arosen) These event listeners are here to hook into when # port status changes and notify nova about their change. self.nova_notifier = nova_notifier.Notifier() event.listen(models_v2.Port, 'after_insert', self.nova_notifier.send_port_status) event.listen(models_v2.Port, 'after_update', self.nova_notifier.send_port_status) event.listen(models_v2.Port.status, 'set', self.nova_notifier.record_port_status_changed) for e in (events.BEFORE_CREATE, events.BEFORE_UPDATE, events.BEFORE_DELETE): registry.subscribe(self.validate_network_rbac_policy_change, rbac_mixin.RBAC_POLICY, e) def validate_network_rbac_policy_change(self, resource, event, trigger, context, object_type, policy, **kwargs): """Validates network RBAC policy changes. On creation, verify that the creator is an admin or that it owns the network it is sharing. On update and delete, make sure the tenant losing access does not have resources that depend on that access. """ if object_type != 'network' or policy['action'] != 'access_as_shared': # we only care about shared network policies return # The object a policy targets cannot be changed so we can look # at the original network for the update event as well. net = self._get_network(context, policy['object_id']) if event in (events.BEFORE_CREATE, events.BEFORE_UPDATE): # we still have to verify that the caller owns the network because # _get_network will succeed on a shared network if not context.is_admin and net['tenant_id'] != context.tenant_id: msg = _("Only admins can manipulate policies on networks " "they do not own.") raise n_exc.InvalidInput(error_message=msg) tenant_to_check = None if event == events.BEFORE_UPDATE: new_tenant = kwargs['policy_update']['target_tenant'] if policy['target_tenant'] != new_tenant: tenant_to_check = policy['target_tenant'] if event == events.BEFORE_DELETE: tenant_to_check = policy['target_tenant'] if tenant_to_check: self.ensure_no_tenant_ports_on_network(net['id'], net['tenant_id'], tenant_to_check) def ensure_no_tenant_ports_on_network(self, network_id, net_tenant_id, tenant_id): ctx_admin = ctx.get_admin_context() rb_model = rbac_db.NetworkRBAC other_rbac_entries = self._model_query(ctx_admin, rb_model).filter( and_(rb_model.object_id == network_id, rb_model.action == 'access_as_shared')) ports = self._model_query(ctx_admin, models_v2.Port).filter( models_v2.Port.network_id == network_id) if tenant_id == '*': # for the wildcard we need to get all of the rbac entries to # see if any allow the remaining ports on the network. other_rbac_entries = other_rbac_entries.filter( rb_model.target_tenant != tenant_id) # any port with another RBAC entry covering it or one belonging to # the same tenant as the network owner is ok allowed_tenants = [entry['target_tenant'] for entry in other_rbac_entries] allowed_tenants.append(net_tenant_id) ports = ports.filter( ~models_v2.Port.tenant_id.in_(allowed_tenants)) else: # if there is a wildcard rule, we can return early because it # allows any ports query = other_rbac_entries.filter(rb_model.target_tenant == '*') if query.count(): return ports = ports.filter(models_v2.Port.tenant_id == tenant_id) if ports.count(): raise n_exc.InvalidSharedSetting(network=network_id) def set_ipam_backend(self): if cfg.CONF.ipam_driver: self.ipam = ipam_pluggable_backend.IpamPluggableBackend() else: self.ipam = ipam_non_pluggable_backend.IpamNonPluggableBackend() def _validate_host_route(self, route, ip_version): try: netaddr.IPNetwork(route['destination']) netaddr.IPAddress(route['nexthop']) except netaddr.core.AddrFormatError: err_msg = _("Invalid route: %s") % route raise n_exc.InvalidInput(error_message=err_msg) except ValueError: # netaddr.IPAddress would raise this err_msg = _("Invalid route: %s") % route raise n_exc.InvalidInput(error_message=err_msg) self._validate_ip_version(ip_version, route['nexthop'], 'nexthop') self._validate_ip_version(ip_version, route['destination'], 'destination') def _validate_shared_update(self, context, id, original, updated): # The only case that needs to be validated is when 'shared' # goes from True to False if updated['shared'] == original.shared or updated['shared']: return ports = self._model_query( context, models_v2.Port).filter(models_v2.Port.network_id == id) ports = ports.filter(not_(models_v2.Port.device_owner.startswith( constants.DEVICE_OWNER_NETWORK_PREFIX))) subnets = self._model_query( context, models_v2.Subnet).filter( models_v2.Subnet.network_id == id) tenant_ids = set([port['tenant_id'] for port in ports] + [subnet['tenant_id'] for subnet in subnets]) # raise if multiple tenants found or if the only tenant found # is not the owner of the network if (len(tenant_ids) > 1 or len(tenant_ids) == 1 and tenant_ids.pop() != original.tenant_id): raise n_exc.InvalidSharedSetting(network=original.name) def _validate_ipv6_attributes(self, subnet, cur_subnet): if cur_subnet: self._validate_ipv6_update_dhcp(subnet, cur_subnet) return ra_mode_set = attributes.is_attr_set(subnet.get('ipv6_ra_mode')) address_mode_set = attributes.is_attr_set( subnet.get('ipv6_address_mode')) self._validate_ipv6_dhcp(ra_mode_set, address_mode_set, subnet['enable_dhcp']) if ra_mode_set and address_mode_set: self._validate_ipv6_combination(subnet['ipv6_ra_mode'], subnet['ipv6_address_mode']) if address_mode_set or ra_mode_set: self._validate_eui64_applicable(subnet) def _validate_eui64_applicable(self, subnet): # Per RFC 4862, section 5.5.3, prefix length and interface # id together should be equal to 128. Currently neutron supports # EUI64 interface id only, thus limiting the prefix # length to be 64 only. if ipv6_utils.is_auto_address_subnet(subnet): if netaddr.IPNetwork(subnet['cidr']).prefixlen != 64: msg = _('Invalid CIDR %s for IPv6 address mode. ' 'OpenStack uses the EUI-64 address format, ' 'which requires the prefix to be /64.') raise n_exc.InvalidInput( error_message=(msg % subnet['cidr'])) def _validate_ipv6_combination(self, ra_mode, address_mode): if ra_mode != address_mode: msg = _("ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode " "set to '%(addr_mode)s' is not valid. " "If both attributes are set, they must be the same value" ) % {'ra_mode': ra_mode, 'addr_mode': address_mode} raise n_exc.InvalidInput(error_message=msg) def _validate_ipv6_dhcp(self, ra_mode_set, address_mode_set, enable_dhcp): if (ra_mode_set or address_mode_set) and not enable_dhcp: msg = _("ipv6_ra_mode or ipv6_address_mode cannot be set when " "enable_dhcp is set to False.") raise n_exc.InvalidInput(error_message=msg) def _validate_ipv6_update_dhcp(self, subnet, cur_subnet): if ('enable_dhcp' in subnet and not subnet['enable_dhcp']): msg = _("Cannot disable enable_dhcp with " "ipv6 attributes set") ra_mode_set = attributes.is_attr_set(subnet.get('ipv6_ra_mode')) address_mode_set = attributes.is_attr_set( subnet.get('ipv6_address_mode')) if ra_mode_set or address_mode_set: raise n_exc.InvalidInput(error_message=msg) old_ra_mode_set = attributes.is_attr_set( cur_subnet.get('ipv6_ra_mode')) old_address_mode_set = attributes.is_attr_set( cur_subnet.get('ipv6_address_mode')) if old_ra_mode_set or old_address_mode_set: raise n_exc.InvalidInput(error_message=msg) def _create_bulk(self, resource, context, request_items): objects = [] collection = "%ss" % resource items = request_items[collection] context.session.begin(subtransactions=True) try: for item in items: obj_creator = getattr(self, 'create_%s' % resource) objects.append(obj_creator(context, item)) context.session.commit() except Exception: context.session.rollback() with excutils.save_and_reraise_exception(): LOG.error(_LE("An exception occurred while creating " "the %(resource)s:%(item)s"), {'resource': resource, 'item': item}) return objects def create_network_bulk(self, context, networks): return self._create_bulk('network', context, networks) def create_network(self, context, network): """Handle creation of a single network.""" net_db = self.create_network_db(context, network) return self._make_network_dict(net_db, process_extensions=False, context=context) def create_network_db(self, context, network): # single request processing n = network['network'] # NOTE(jkoelker) Get the tenant_id outside of the session to avoid # unneeded db action if the operation raises tenant_id = n['tenant_id'] with context.session.begin(subtransactions=True): args = {'tenant_id': tenant_id, 'id': n.get('id') or uuidutils.generate_uuid(), 'name': n['name'], 'admin_state_up': n['admin_state_up'], 'mtu': n.get('mtu', constants.DEFAULT_NETWORK_MTU), 'status': n.get('status', constants.NET_STATUS_ACTIVE), 'description': n.get('description')} network = models_v2.Network(**args) if n['shared']: entry = rbac_db.NetworkRBAC( network=network, action='access_as_shared', target_tenant='*', tenant_id=network['tenant_id']) context.session.add(entry) context.session.add(network) return network def update_network(self, context, id, network): n = network['network'] with context.session.begin(subtransactions=True): network = self._get_network(context, id) # validate 'shared' parameter if 'shared' in n: entry = None for item in network.rbac_entries: if (item.action == 'access_as_shared' and item.target_tenant == '*'): entry = item break setattr(network, 'shared', True if entry else False) self._validate_shared_update(context, id, network, n) update_shared = n.pop('shared') if update_shared and not entry: entry = rbac_db.NetworkRBAC( network=network, action='access_as_shared', target_tenant='*', tenant_id=network['tenant_id']) context.session.add(entry) elif not update_shared and entry: context.session.delete(entry) context.session.expire(network, ['rbac_entries']) # The filter call removes attributes from the body received from # the API that are logically tied to network resources but are # stored in other database tables handled by extensions network.update(self._filter_non_model_columns(n, models_v2.Network)) return self._make_network_dict(network, context=context) def delete_network(self, context, id): with context.session.begin(subtransactions=True): network = self._get_network(context, id) context.session.query(models_v2.Port).filter_by( network_id=id).filter( models_v2.Port.device_owner. in_(AUTO_DELETE_PORT_OWNERS)).delete(synchronize_session=False) port_in_use = context.session.query(models_v2.Port).filter_by( network_id=id).first() if port_in_use: raise n_exc.NetworkInUse(net_id=id) # clean up subnets subnets = self._get_subnets_by_network(context, id) for subnet in subnets: self.delete_subnet(context, subnet['id']) context.session.delete(network) def get_network(self, context, id, fields=None): network = self._get_network(context, id) return self._make_network_dict(network, fields, context=context) def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = self._get_marker_obj(context, 'network', limit, marker) make_network_dict = functools.partial(self._make_network_dict, context=context) return self._get_collection(context, models_v2.Network, make_network_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) def get_networks_count(self, context, filters=None): return self._get_collection_count(context, models_v2.Network, filters=filters) def create_subnet_bulk(self, context, subnets): return self._create_bulk('subnet', context, subnets) def _validate_ip_version(self, ip_version, addr, name): """Check IP field of a subnet match specified ip version.""" ip = netaddr.IPNetwork(addr) if ip.version != ip_version: data = {'name': name, 'addr': addr, 'ip_version': ip_version} msg = _("%(name)s '%(addr)s' does not match " "the ip_version '%(ip_version)s'") % data raise n_exc.InvalidInput(error_message=msg) def _validate_subnet(self, context, s, cur_subnet=None): """Validate a subnet spec.""" # This method will validate attributes which may change during # create_subnet() and update_subnet(). # The method requires the subnet spec 's' has 'ip_version' field. # If 's' dict does not have 'ip_version' field in an API call # (e.g., update_subnet()), you need to set 'ip_version' field # before calling this method. ip_ver = s['ip_version'] if attributes.is_attr_set(s.get('cidr')): self._validate_ip_version(ip_ver, s['cidr'], 'cidr') # TODO(watanabe.isao): After we found a way to avoid the re-sync # from the agent side, this restriction could be removed. if cur_subnet: dhcp_was_enabled = cur_subnet.enable_dhcp else: dhcp_was_enabled = False if s.get('enable_dhcp') and not dhcp_was_enabled: subnet_prefixlen = netaddr.IPNetwork(s['cidr']).prefixlen error_message = _("Subnet has a prefix length that is " "incompatible with DHCP service enabled.") if ((ip_ver == 4 and subnet_prefixlen > 30) or (ip_ver == 6 and subnet_prefixlen > 126)): raise n_exc.InvalidInput(error_message=error_message) net = netaddr.IPNetwork(s['cidr']) if net.is_multicast(): error_message = _("Multicast IP subnet is not supported " "if enable_dhcp is True.") raise n_exc.InvalidInput(error_message=error_message) elif net.is_loopback(): error_message = _("Loopback IP subnet is not supported " "if enable_dhcp is True.") raise n_exc.InvalidInput(error_message=error_message) if attributes.is_attr_set(s.get('gateway_ip')): self._validate_ip_version(ip_ver, s['gateway_ip'], 'gateway_ip') if cfg.CONF.force_gateway_on_subnet: # TODO(sreesiv) check_gateway_in_subnet() will be # obsolete and should be removed when the option # 'force_gateway_on_subnet' is removed. is_gateway_not_valid = not ipam.utils.check_gateway_in_subnet( s['cidr'], s['gateway_ip']) else: is_gateway_not_valid = ( ipam.utils.check_gateway_invalid_in_subnet( s['cidr'], s['gateway_ip'])) if is_gateway_not_valid: error_message = _("Gateway is not valid on subnet") raise n_exc.InvalidInput(error_message=error_message) # Ensure the gateway IP is not assigned to any port # skip this check in case of create (s parameter won't have id) # NOTE(salv-orlando): There is slight chance of a race, when # a subnet-update and a router-interface-add operation are # executed concurrently if cur_subnet and not ipv6_utils.is_ipv6_pd_enabled(s): ipal = models_v2.IPAllocation alloc_qry = context.session.query(ipal) alloc_qry = alloc_qry.join("port", "routerport") allocated = alloc_qry.filter( ipal.ip_address == cur_subnet['gateway_ip'], ipal.subnet_id == cur_subnet['id']).first() if allocated and allocated['port_id']: raise n_exc.GatewayIpInUse( ip_address=cur_subnet['gateway_ip'], port_id=allocated['port_id']) if attributes.is_attr_set(s.get('dns_nameservers')): if len(s['dns_nameservers']) > cfg.CONF.max_dns_nameservers: raise n_exc.DNSNameServersExhausted( subnet_id=s.get('id', _('new subnet')), quota=cfg.CONF.max_dns_nameservers) for dns in s['dns_nameservers']: try: netaddr.IPAddress(dns) except Exception: raise n_exc.InvalidInput( error_message=(_("Error parsing dns address %s") % dns)) self._validate_ip_version(ip_ver, dns, 'dns_nameserver') if attributes.is_attr_set(s.get('host_routes')): if len(s['host_routes']) > cfg.CONF.max_subnet_host_routes: raise n_exc.HostRoutesExhausted( subnet_id=s.get('id', _('new subnet')), quota=cfg.CONF.max_subnet_host_routes) # check if the routes are all valid for rt in s['host_routes']: self._validate_host_route(rt, ip_ver) if ip_ver == 4: if attributes.is_attr_set(s.get('ipv6_ra_mode')): raise n_exc.InvalidInput( error_message=(_("ipv6_ra_mode is not valid when " "ip_version is 4"))) if attributes.is_attr_set(s.get('ipv6_address_mode')): raise n_exc.InvalidInput( error_message=(_("ipv6_address_mode is not valid when " "ip_version is 4"))) if ip_ver == 6: self._validate_ipv6_attributes(s, cur_subnet) def _validate_subnet_for_pd(self, subnet): """Validates that subnet parameters are correct for IPv6 PD""" if (subnet.get('ip_version') != constants.IP_VERSION_6): reason = _("Prefix Delegation can only be used with IPv6 " "subnets.") raise n_exc.BadRequest(resource='subnets', msg=reason) mode_list = [constants.IPV6_SLAAC, constants.DHCPV6_STATELESS] ra_mode = subnet.get('ipv6_ra_mode') if ra_mode not in mode_list: reason = _("IPv6 RA Mode must be SLAAC or Stateless for " "Prefix Delegation.") raise n_exc.BadRequest(resource='subnets', msg=reason) address_mode = subnet.get('ipv6_address_mode') if address_mode not in mode_list: reason = _("IPv6 Address Mode must be SLAAC or Stateless for " "Prefix Delegation.") raise n_exc.BadRequest(resource='subnets', msg=reason) def _update_router_gw_ports(self, context, network, subnet): l3plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) if l3plugin: gw_ports = self._get_router_gw_ports_by_network(context, network['id']) router_ids = [p['device_id'] for p in gw_ports] ctx_admin = context.elevated() ext_subnets_dict = {s['id']: s for s in network['subnets']} for id in router_ids: router = l3plugin.get_router(ctx_admin, id) external_gateway_info = router['external_gateway_info'] # Get all stateful (i.e. non-SLAAC/DHCPv6-stateless) fixed ips fips = [f for f in external_gateway_info['external_fixed_ips'] if not ipv6_utils.is_auto_address_subnet( ext_subnets_dict[f['subnet_id']])] num_fips = len(fips) # Don't add the fixed IP to the port if it already # has a stateful fixed IP of the same IP version if num_fips > 1: continue if num_fips == 1 and netaddr.IPAddress( fips[0]['ip_address']).version == subnet['ip_version']: continue external_gateway_info['external_fixed_ips'].append( {'subnet_id': subnet['id']}) info = {'router': {'external_gateway_info': external_gateway_info}} l3plugin.update_router(context, id, info) def _create_subnet(self, context, subnet, subnetpool_id): s = subnet['subnet'] with context.session.begin(subtransactions=True): network = self._get_network(context, s["network_id"]) subnet, ipam_subnet = self.ipam.allocate_subnet(context, network, s, subnetpool_id) if hasattr(network, 'external') and network.external: self._update_router_gw_ports(context, network, subnet) # If this subnet supports auto-addressing, then update any # internal ports on the network with addresses for this subnet. if ipv6_utils.is_auto_address_subnet(subnet): updated_ports = self.ipam.add_auto_addrs_on_network_ports(context, subnet, ipam_subnet) for port_id in updated_ports: port_info = {'port': {'id': port_id}} self.update_port(context, port_id, port_info) return self._make_subnet_dict(subnet, context=context) def _get_subnetpool_id(self, context, subnet): """Return the subnetpool id for this request :param subnet: The subnet dict from the request """ use_default_subnetpool = subnet.get('use_default_subnetpool') if use_default_subnetpool == attributes.ATTR_NOT_SPECIFIED: use_default_subnetpool = False subnetpool_id = subnet.get('subnetpool_id') if subnetpool_id == attributes.ATTR_NOT_SPECIFIED: subnetpool_id = None if use_default_subnetpool and subnetpool_id: msg = _('subnetpool_id and use_default_subnetpool cannot both be ' 'specified') raise n_exc.BadRequest(resource='subnets', msg=msg) if subnetpool_id: return subnetpool_id if not use_default_subnetpool: return cidr = subnet.get('cidr') if attributes.is_attr_set(cidr): ip_version = netaddr.IPNetwork(cidr).version else: ip_version = subnet.get('ip_version') if not attributes.is_attr_set(ip_version): msg = _('ip_version must be specified in the absence of ' 'cidr and subnetpool_id') raise n_exc.BadRequest(resource='subnets', msg=msg) if ip_version == 6 and cfg.CONF.ipv6_pd_enabled: return constants.IPV6_PD_POOL_ID subnetpool = self.get_default_subnetpool(context, ip_version) if subnetpool: return subnetpool['id'] # Until the default_subnet_pool config options are removed in the N # release, check for them after get_default_subnetpool returns None. # TODO(john-davidge): Remove after Mitaka release. if ip_version == 4 and cfg.CONF.default_ipv4_subnet_pool: return cfg.CONF.default_ipv4_subnet_pool if ip_version == 6 and cfg.CONF.default_ipv6_subnet_pool: return cfg.CONF.default_ipv6_subnet_pool msg = _('No default subnetpool found for IPv%s') % ip_version raise n_exc.BadRequest(resource='subnets', msg=msg) def create_subnet(self, context, subnet): s = subnet['subnet'] cidr = s.get('cidr', attributes.ATTR_NOT_SPECIFIED) prefixlen = s.get('prefixlen', attributes.ATTR_NOT_SPECIFIED) has_cidr = attributes.is_attr_set(cidr) has_prefixlen = attributes.is_attr_set(prefixlen) if has_cidr and has_prefixlen: msg = _('cidr and prefixlen must not be supplied together') raise n_exc.BadRequest(resource='subnets', msg=msg) if has_cidr: # turn the CIDR into a proper subnet net = netaddr.IPNetwork(s['cidr']) subnet['subnet']['cidr'] = '%s/%s' % (net.network, net.prefixlen) subnetpool_id = self._get_subnetpool_id(context, s) if not subnetpool_id and not has_cidr: msg = _('a subnetpool must be specified in the absence of a cidr') raise n_exc.BadRequest(resource='subnets', msg=msg) if subnetpool_id: self.ipam.validate_pools_with_subnetpool(s) if subnetpool_id == constants.IPV6_PD_POOL_ID: if has_cidr: # We do not currently support requesting a specific # cidr with IPv6 prefix delegation. Set the subnetpool_id # to None and allow the request to continue as normal. subnetpool_id = None self._validate_subnet(context, s) else: prefix = constants.PROVISIONAL_IPV6_PD_PREFIX subnet['subnet']['cidr'] = prefix self._validate_subnet_for_pd(s) else: if not has_cidr: msg = _('A cidr must be specified in the absence of a ' 'subnet pool') raise n_exc.BadRequest(resource='subnets', msg=msg) self._validate_subnet(context, s) return self._create_subnet(context, subnet, subnetpool_id) def _update_allocation_pools(self, subnet): """Gets new allocation pools and formats them correctly""" allocation_pools = self.ipam.generate_pools(subnet['cidr'], subnet['gateway_ip']) return [{'start': str(netaddr.IPAddress(p.first, subnet['ip_version'])), 'end': str(netaddr.IPAddress(p.last, subnet['ip_version']))} for p in allocation_pools] def update_subnet(self, context, id, subnet): """Update the subnet with new info. The change however will not be realized until the client renew the dns lease or we support gratuitous DHCP offers """ s = subnet['subnet'] new_cidr = s.get('cidr') db_subnet = self._get_subnet(context, id) # Fill 'ip_version' and 'allocation_pools' fields with the current # value since _validate_subnet() expects subnet spec has 'ip_version' # and 'allocation_pools' fields. s['ip_version'] = db_subnet.ip_version s['cidr'] = db_subnet.cidr s['id'] = db_subnet.id s['tenant_id'] = db_subnet.tenant_id s['subnetpool_id'] = db_subnet.subnetpool_id self._validate_subnet(context, s, cur_subnet=db_subnet) db_pools = [netaddr.IPRange(p['first_ip'], p['last_ip']) for p in db_subnet.allocation_pools] update_ports_needed = False if new_cidr and ipv6_utils.is_ipv6_pd_enabled(s): # This is an ipv6 prefix delegation-enabled subnet being given an # updated cidr by the process_prefix_update RPC s['cidr'] = new_cidr update_ports_needed = True net = netaddr.IPNetwork(s['cidr'], s['ip_version']) # Update gateway_ip and allocation pools based on new cidr s['gateway_ip'] = utils.get_first_host_ip(net, s['ip_version']) s['allocation_pools'] = self._update_allocation_pools(s) range_pools = None if s.get('allocation_pools') is not None: # Convert allocation pools to IPRange to simplify future checks range_pools = self.ipam.pools_to_ip_range(s['allocation_pools']) self.ipam.validate_allocation_pools(range_pools, s['cidr']) s['allocation_pools'] = range_pools # If either gateway_ip or allocation_pools were specified gateway_ip = s.get('gateway_ip', db_subnet.gateway_ip) gateway_ip_changed = gateway_ip != db_subnet.gateway_ip if gateway_ip_changed or s.get('allocation_pools') is not None: pools = range_pools if range_pools is not None else db_pools if gateway_ip: self.ipam.validate_gw_out_of_pools(gateway_ip, pools) if gateway_ip_changed: # Provide pre-update notification not to break plugins that don't # support gateway ip change kwargs = {'context': context, 'subnet_id': id, 'network_id': db_subnet.network_id} registry.notify(resources.SUBNET_GATEWAY, events.BEFORE_UPDATE, self, **kwargs) with context.session.begin(subtransactions=True): subnet, changes = self.ipam.update_db_subnet(context, id, s, db_pools) result = self._make_subnet_dict(subnet, context=context) # Keep up with fields that changed result.update(changes) if update_ports_needed: # Find ports that have not yet been updated # with an IP address by Prefix Delegation, and update them ports = self.get_ports(context) routers = [] for port in ports: fixed_ips = [] new_port = {'port': port} for ip in port['fixed_ips']: if ip['subnet_id'] == s['id']: fixed_ip = {'subnet_id': s['id']} if "router_interface" in port['device_owner']: routers.append(port['device_id']) fixed_ip['ip_address'] = s['gateway_ip'] fixed_ips.append(fixed_ip) if fixed_ips: new_port['port']['fixed_ips'] = fixed_ips self.update_port(context, port['id'], new_port) # Send router_update to l3_agent if routers: l3_rpc_notifier = l3_rpc_agent_api.L3AgentNotifyAPI() l3_rpc_notifier.routers_updated(context, routers) if gateway_ip_changed: kwargs = {'context': context, 'subnet_id': id, 'network_id': db_subnet.network_id} registry.notify(resources.SUBNET_GATEWAY, events.AFTER_UPDATE, self, **kwargs) return result def _subnet_check_ip_allocations(self, context, subnet_id): return (context.session.query(models_v2.IPAllocation). filter_by(subnet_id=subnet_id).join(models_v2.Port).first()) def _subnet_get_user_allocation(self, context, subnet_id): """Check if there are any user ports on subnet and return first.""" # need to join with ports table as IPAllocation's port # is not joined eagerly and thus producing query which yields # incorrect results return (context.session.query(models_v2.IPAllocation). filter_by(subnet_id=subnet_id).join(models_v2.Port). filter(~models_v2.Port.device_owner. in_(AUTO_DELETE_PORT_OWNERS)).first()) def _subnet_check_ip_allocations_internal_router_ports(self, context, subnet_id): # Do not delete the subnet if IP allocations for internal # router ports still exist allocs = context.session.query(models_v2.IPAllocation).filter_by( subnet_id=subnet_id).join(models_v2.Port).filter( models_v2.Port.device_owner.in_( constants.ROUTER_INTERFACE_OWNERS) ).first() if allocs: LOG.debug("Subnet %s still has internal router ports, " "cannot delete", subnet_id) raise n_exc.SubnetInUse(subnet_id=id) def delete_subnet(self, context, id): with context.session.begin(subtransactions=True): subnet = self._get_subnet(context, id) # Make sure the subnet isn't used by other resources _check_subnet_not_used(context, id) # Delete all network owned ports qry_network_ports = ( context.session.query(models_v2.IPAllocation). filter_by(subnet_id=subnet['id']). join(models_v2.Port)) # Remove network owned ports, and delete IP allocations # for IPv6 addresses which were automatically generated # via SLAAC is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet) if is_auto_addr_subnet: self._subnet_check_ip_allocations_internal_router_ports( context, id) else: qry_network_ports = ( qry_network_ports.filter(models_v2.Port.device_owner. in_(AUTO_DELETE_PORT_OWNERS))) network_ports = qry_network_ports.all() if network_ports: for port in network_ports: context.session.delete(port) # Check if there are more IP allocations, unless # is_auto_address_subnet is True. In that case the check is # unnecessary. This additional check not only would be wasteful # for this class of subnet, but is also error-prone since when # the isolation level is set to READ COMMITTED allocations made # concurrently will be returned by this query if not is_auto_addr_subnet: alloc = self._subnet_check_ip_allocations(context, id) if alloc: LOG.info(_LI("Found port (%(port_id)s, %(ip)s) having IP " "allocation on subnet " "%(subnet)s, cannot delete"), {'ip': alloc.ip_address, 'port_id': alloc.port_id, 'subnet': id}) raise n_exc.SubnetInUse(subnet_id=id) context.session.delete(subnet) # Delete related ipam subnet manually, # since there is no FK relationship self.ipam.delete_subnet(context, id) def get_subnet(self, context, id, fields=None): subnet = self._get_subnet(context, id) return self._make_subnet_dict(subnet, fields, context=context) def get_subnets(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): return self._get_subnets(context, filters, fields, sorts, limit, marker, page_reverse) def get_subnets_count(self, context, filters=None): return self._get_collection_count(context, models_v2.Subnet, filters=filters) def get_subnets_by_network(self, context, network_id): return [self._make_subnet_dict(subnet_db) for subnet_db in self._get_subnets_by_network(context, network_id)] def _create_subnetpool_prefix(self, context, cidr, subnetpool_id): prefix_args = {'cidr': cidr, 'subnetpool_id': subnetpool_id} subnetpool_prefix = models_v2.SubnetPoolPrefix(**prefix_args) context.session.add(subnetpool_prefix) def _validate_address_scope_id(self, context, address_scope_id, subnetpool_id, sp_prefixes, ip_version): """Validate the address scope before associating. Subnetpool can associate with an address scope if - the tenant user is the owner of both the subnetpool and address scope - the admin is associating the subnetpool with the shared address scope - there is no prefix conflict with the existing subnetpools associated with the address scope. - the address family of the subnetpool and address scope are the same """ if not attributes.is_attr_set(address_scope_id): return if not self.is_address_scope_owned_by_tenant(context, address_scope_id): raise n_exc.IllegalSubnetPoolAssociationToAddressScope( subnetpool_id=subnetpool_id, address_scope_id=address_scope_id) as_ip_version = self.get_ip_version_for_address_scope(context, address_scope_id) if ip_version != as_ip_version: raise n_exc.IllegalSubnetPoolIpVersionAssociationToAddressScope( subnetpool_id=subnetpool_id, address_scope_id=address_scope_id, ip_version=as_ip_version) subnetpools = self._get_subnetpools_by_address_scope_id( context, address_scope_id) new_set = netaddr.IPSet(sp_prefixes) for sp in subnetpools: if sp.id == subnetpool_id: continue sp_set = netaddr.IPSet([prefix['cidr'] for prefix in sp.prefixes]) if sp_set.intersection(new_set): raise n_exc.AddressScopePrefixConflict() def _check_subnetpool_update_allowed(self, context, subnetpool_id, address_scope_id): """Check if the subnetpool can be updated or not. If the subnetpool is associated to a shared address scope not owned by the tenant, then the subnetpool cannot be updated. """ if not self.is_address_scope_owned_by_tenant(context, address_scope_id): msg = _("subnetpool %(subnetpool_id)s cannot be updated when" " associated with shared address scope " "%(address_scope_id)s") % { 'subnetpool_id': subnetpool_id, 'address_scope_id': address_scope_id} raise n_exc.IllegalSubnetPoolUpdate(reason=msg) def _check_default_subnetpool_exists(self, context, ip_version): """Check if a default already exists for the given IP version. There can only be one default subnetpool for each IP family. Raise an InvalidInput error if a default has already been set. """ if self.get_default_subnetpool(context, ip_version): msg = _("A default subnetpool for this IP family has already " "been set. Only one default may exist per IP family") raise n_exc.InvalidInput(error_message=msg) def create_subnetpool(self, context, subnetpool): """Create a subnetpool""" sp = subnetpool['subnetpool'] sp_reader = subnet_alloc.SubnetPoolReader(sp) if sp_reader.address_scope_id is attributes.ATTR_NOT_SPECIFIED: sp_reader.address_scope_id = None if sp_reader.is_default: self._check_default_subnetpool_exists(context, sp_reader.ip_version) self._validate_address_scope_id(context, sp_reader.address_scope_id, id, sp_reader.prefixes, sp_reader.ip_version) with context.session.begin(subtransactions=True): pool_args = {'tenant_id': sp['tenant_id'], 'id': sp_reader.id, 'name': sp_reader.name, 'ip_version': sp_reader.ip_version, 'default_prefixlen': sp_reader.default_prefixlen, 'min_prefixlen': sp_reader.min_prefixlen, 'max_prefixlen': sp_reader.max_prefixlen, 'is_default': sp_reader.is_default, 'shared': sp_reader.shared, 'default_quota': sp_reader.default_quota, 'address_scope_id': sp_reader.address_scope_id, 'description': sp_reader.description} subnetpool = models_v2.SubnetPool(**pool_args) context.session.add(subnetpool) for prefix in sp_reader.prefixes: self._create_subnetpool_prefix(context, prefix, subnetpool.id) return self._make_subnetpool_dict(subnetpool) def _update_subnetpool_prefixes(self, context, prefix_list, id): with context.session.begin(subtransactions=True): context.session.query(models_v2.SubnetPoolPrefix).filter_by( subnetpool_id=id).delete() for prefix in prefix_list: model_prefix = models_v2.SubnetPoolPrefix(cidr=prefix, subnetpool_id=id) context.session.add(model_prefix) def _updated_subnetpool_dict(self, model, new_pool): updated = {} new_prefixes = new_pool.get('prefixes', attributes.ATTR_NOT_SPECIFIED) orig_prefixes = [str(x.cidr) for x in model['prefixes']] if new_prefixes is not attributes.ATTR_NOT_SPECIFIED: orig_set = netaddr.IPSet(orig_prefixes) new_set = netaddr.IPSet(new_prefixes) if not orig_set.issubset(new_set): msg = _("Existing prefixes must be " "a subset of the new prefixes") raise n_exc.IllegalSubnetPoolPrefixUpdate(msg=msg) new_set.compact() updated['prefixes'] = [str(x.cidr) for x in new_set.iter_cidrs()] else: updated['prefixes'] = orig_prefixes for key in ['id', 'name', 'ip_version', 'min_prefixlen', 'max_prefixlen', 'default_prefixlen', 'is_default', 'shared', 'default_quota', 'address_scope_id', 'standard_attr', 'description']: self._write_key(key, updated, model, new_pool) return updated def _write_key(self, key, update, orig, new_dict): new_val = new_dict.get(key, attributes.ATTR_NOT_SPECIFIED) if new_val is not attributes.ATTR_NOT_SPECIFIED: update[key] = new_dict[key] else: update[key] = orig[key] def update_subnetpool(self, context, id, subnetpool): """Update a subnetpool""" new_sp = subnetpool['subnetpool'] with context.session.begin(subtransactions=True): orig_sp = self._get_subnetpool(context, id) updated = self._updated_subnetpool_dict(orig_sp, new_sp) updated['tenant_id'] = orig_sp.tenant_id reader = subnet_alloc.SubnetPoolReader(updated) if reader.is_default and not orig_sp.is_default: self._check_default_subnetpool_exists(context, reader.ip_version) if orig_sp.address_scope_id: self._check_subnetpool_update_allowed(context, id, orig_sp.address_scope_id) self._validate_address_scope_id(context, reader.address_scope_id, id, reader.prefixes, reader.ip_version) address_scope_changed = (orig_sp.address_scope_id != reader.address_scope_id) orig_sp.update(self._filter_non_model_columns( reader.subnetpool, models_v2.SubnetPool)) self._update_subnetpool_prefixes(context, reader.prefixes, id) if address_scope_changed: # Notify about the update of subnetpool's address scope kwargs = {'context': context, 'subnetpool_id': id} registry.notify(resources.SUBNETPOOL_ADDRESS_SCOPE, events.AFTER_UPDATE, self.update_subnetpool, **kwargs) for key in ['min_prefixlen', 'max_prefixlen', 'default_prefixlen']: updated['key'] = str(updated[key]) self._apply_dict_extend_functions(attributes.SUBNETPOOLS, updated, orig_sp) return updated def get_subnetpool(self, context, id, fields=None): """Retrieve a subnetpool.""" subnetpool = self._get_subnetpool(context, id) return self._make_subnetpool_dict(subnetpool, fields) def get_subnetpools(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Retrieve list of subnetpools.""" marker_obj = self._get_marker_obj(context, 'subnetpool', limit, marker) collection = self._get_collection(context, models_v2.SubnetPool, self._make_subnetpool_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) return collection def get_default_subnetpool(self, context, ip_version): """Retrieve the default subnetpool for the given IP version.""" filters = {'is_default': [True], 'ip_version': [ip_version]} subnetpool = self.get_subnetpools(context, filters=filters) if subnetpool: return subnetpool[0] def delete_subnetpool(self, context, id): """Delete a subnetpool.""" with context.session.begin(subtransactions=True): subnetpool = self._get_subnetpool(context, id) subnets = self._get_subnets_by_subnetpool(context, id) if subnets: reason = _("Subnet pool has existing allocations") raise n_exc.SubnetPoolDeleteError(reason=reason) context.session.delete(subnetpool) def _check_mac_addr_update(self, context, port, new_mac, device_owner): if (device_owner and device_owner.startswith(constants.DEVICE_OWNER_NETWORK_PREFIX)): raise n_exc.UnsupportedPortDeviceOwner( op=_("mac address update"), port_id=id, device_owner=device_owner) def create_port_bulk(self, context, ports): return self._create_bulk('port', context, ports) def _get_dns_domain(self): if not cfg.CONF.dns_domain: return '' if cfg.CONF.dns_domain.endswith('.'): return cfg.CONF.dns_domain return '%s.' % cfg.CONF.dns_domain def _get_request_dns_name(self, port): dns_domain = self._get_dns_domain() if ((dns_domain and dns_domain != DNS_DOMAIN_DEFAULT)): return port.get('dns_name', '') return '' def _get_dns_names_for_port(self, context, ips, request_dns_name): dns_assignment = [] dns_domain = self._get_dns_domain() if request_dns_name: request_fqdn = request_dns_name if not request_dns_name.endswith('.'): request_fqdn = '%s.%s' % (request_dns_name, dns_domain) for ip in ips: if request_dns_name: hostname = request_dns_name fqdn = request_fqdn else: hostname = 'host-%s' % ip['ip_address'].replace( '.', '-').replace(':', '-') fqdn = hostname if dns_domain: fqdn = '%s.%s' % (hostname, dns_domain) dns_assignment.append({'ip_address': ip['ip_address'], 'hostname': hostname, 'fqdn': fqdn}) return dns_assignment def _create_port_with_mac(self, context, network_id, port_data, mac_address): try: # since this method could either be used within or outside the # transaction, use convenience method to avoid passing a flag with db_api.autonested_transaction(context.session): db_port = models_v2.Port(mac_address=mac_address, **port_data) context.session.add(db_port) return db_port except db_exc.DBDuplicateEntry: raise n_exc.MacAddressInUse(net_id=network_id, mac=mac_address) def _create_port(self, context, network_id, port_data): max_retries = cfg.CONF.mac_generation_retries for i in range(max_retries): mac = self._generate_mac() try: return self._create_port_with_mac( context, network_id, port_data, mac) except n_exc.MacAddressInUse: LOG.debug('Generated mac %(mac_address)s exists on ' 'network %(network_id)s', {'mac_address': mac, 'network_id': network_id}) LOG.error(_LE("Unable to generate mac address after %s attempts"), max_retries) raise n_exc.MacAddressGenerationFailure(net_id=network_id) def create_port(self, context, port): db_port = self.create_port_db(context, port) return self._make_port_dict(db_port, process_extensions=False) def create_port_db(self, context, port): p = port['port'] port_id = p.get('id') or uuidutils.generate_uuid() network_id = p['network_id'] # NOTE(jkoelker) Get the tenant_id outside of the session to avoid # unneeded db action if the operation raises tenant_id = p['tenant_id'] if p.get('device_owner'): self._enforce_device_owner_not_router_intf_or_device_id( context, p.get('device_owner'), p.get('device_id'), tenant_id) port_data = dict(tenant_id=tenant_id, name=p['name'], id=port_id, network_id=network_id, admin_state_up=p['admin_state_up'], status=p.get('status', constants.PORT_STATUS_ACTIVE), device_id=p['device_id'], device_owner=p['device_owner'], description=p.get('description')) if ('dns-integration' in self.supported_extension_aliases and 'dns_name' in p): request_dns_name = self._get_request_dns_name(p) port_data['dns_name'] = request_dns_name with context.session.begin(subtransactions=True): # Ensure that the network exists. self._get_network(context, network_id) # Create the port if p['mac_address'] is attributes.ATTR_NOT_SPECIFIED: db_port = self._create_port(context, network_id, port_data) p['mac_address'] = db_port['mac_address'] else: db_port = self._create_port_with_mac( context, network_id, port_data, p['mac_address']) ips = self.ipam.allocate_ips_for_port_and_store(context, port, port_id) if ('dns-integration' in self.supported_extension_aliases and 'dns_name' in p): dns_assignment = [] if ips: dns_assignment = self._get_dns_names_for_port( context, ips, request_dns_name) db_port['dns_assignment'] = dns_assignment return db_port def _validate_port_for_update(self, context, db_port, new_port, new_mac): changed_owner = 'device_owner' in new_port current_owner = (new_port.get('device_owner') or db_port['device_owner']) changed_device_id = new_port.get('device_id') != db_port['device_id'] current_device_id = new_port.get('device_id') or db_port['device_id'] if current_owner and changed_device_id or changed_owner: self._enforce_device_owner_not_router_intf_or_device_id( context, current_owner, current_device_id, db_port['tenant_id']) if new_mac and new_mac != db_port['mac_address']: self._check_mac_addr_update(context, db_port, new_mac, current_owner) def _get_dns_names_for_updated_port(self, context, original_ips, original_dns_name, request_dns_name, changes): if changes.original or changes.add or changes.remove: return self._get_dns_names_for_port( context, changes.original + changes.add, request_dns_name or original_dns_name) if original_ips: return self._get_dns_names_for_port( context, original_ips, request_dns_name or original_dns_name) return [] def update_port(self, context, id, port): new_port = port['port'] with context.session.begin(subtransactions=True): port = self._get_port(context, id) if 'dns-integration' in self.supported_extension_aliases: original_ips = self._make_fixed_ip_dict(port['fixed_ips']) original_dns_name = port.get('dns_name', '') request_dns_name = self._get_request_dns_name(new_port) if 'dns_name' in new_port and not request_dns_name: new_port['dns_name'] = '' new_mac = new_port.get('mac_address') self._validate_port_for_update(context, port, new_port, new_mac) changes = self.ipam.update_port_with_ips(context, port, new_port, new_mac) if 'dns-integration' in self.supported_extension_aliases: dns_assignment = self._get_dns_names_for_updated_port( context, original_ips, original_dns_name, request_dns_name, changes) result = self._make_port_dict(port) # Keep up with fields that changed if changes.original or changes.add or changes.remove: result['fixed_ips'] = self._make_fixed_ip_dict( changes.original + changes.add) if 'dns-integration' in self.supported_extension_aliases: result['dns_assignment'] = dns_assignment return result def delete_port(self, context, id): with context.session.begin(subtransactions=True): self.ipam.delete_port(context, id) def delete_ports_by_device_id(self, context, device_id, network_id=None): query = (context.session.query(models_v2.Port.id) .enable_eagerloads(False) .filter(models_v2.Port.device_id == device_id)) if network_id: query = query.filter(models_v2.Port.network_id == network_id) port_ids = [p[0] for p in query] for port_id in port_ids: try: self.delete_port(context, port_id) except n_exc.PortNotFound: # Don't raise if something else concurrently deleted the port LOG.debug("Ignoring PortNotFound when deleting port '%s'. " "The port has already been deleted.", port_id) def _get_dns_name_for_port_get(self, context, port): if port['fixed_ips']: return self._get_dns_names_for_port( context, port['fixed_ips'], port['dns_name']) return [] def get_port(self, context, id, fields=None): port = self._get_port(context, id) if (('dns-integration' in self.supported_extension_aliases and 'dns_name' in port)): port['dns_assignment'] = self._get_dns_name_for_port_get(context, port) return self._make_port_dict(port, fields) def _get_ports_query(self, context, filters=None, sorts=None, limit=None, marker_obj=None, page_reverse=False): Port = models_v2.Port IPAllocation = models_v2.IPAllocation if not filters: filters = {} query = self._model_query(context, Port) fixed_ips = filters.pop('fixed_ips', {}) ip_addresses = fixed_ips.get('ip_address') subnet_ids = fixed_ips.get('subnet_id') if ip_addresses or subnet_ids: query = query.join(Port.fixed_ips) if ip_addresses: query = query.filter(IPAllocation.ip_address.in_(ip_addresses)) if subnet_ids: query = query.filter(IPAllocation.subnet_id.in_(subnet_ids)) query = self._apply_filters_to_query(query, Port, filters, context) if limit and page_reverse and sorts: sorts = [(s[0], not s[1]) for s in sorts] query = sqlalchemyutils.paginate_query(query, Port, limit, sorts, marker_obj) return query def get_ports(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = self._get_marker_obj(context, 'port', limit, marker) query = self._get_ports_query(context, filters=filters, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) items = [] for c in query: if (('dns-integration' in self.supported_extension_aliases and 'dns_name' in c)): c['dns_assignment'] = self._get_dns_name_for_port_get(context, c) items.append(self._make_port_dict(c, fields)) if limit and page_reverse: items.reverse() return items def get_ports_count(self, context, filters=None): return self._get_ports_query(context, filters).count() def _enforce_device_owner_not_router_intf_or_device_id(self, context, device_owner, device_id, tenant_id): """Prevent tenants from replacing the device id of router ports with a router uuid belonging to another tenant. """ if device_owner not in constants.ROUTER_INTERFACE_OWNERS: return if not context.is_admin: # check to make sure device_id does not match another tenants # router. if device_id: if hasattr(self, 'get_router'): try: ctx_admin = context.elevated() router = self.get_router(ctx_admin, device_id) except l3.RouterNotFound: return else: l3plugin = ( manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT)) if l3plugin: try: ctx_admin = context.elevated() router = l3plugin.get_router(ctx_admin, device_id) except l3.RouterNotFound: return else: # raise as extension doesn't support L3 anyways. raise n_exc.DeviceIDNotOwnedByTenant( device_id=device_id) if tenant_id != router['tenant_id']: raise n_exc.DeviceIDNotOwnedByTenant(device_id=device_id) db_base_plugin_common.DbBasePluginCommon.register_model_query_hook( models_v2.Port, "port", '_port_query_hook', '_port_filter_hook', None) neutron-8.4.0/neutron/db/dvr_mac_db.py0000664000567000056710000002203413044372760021076 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import helpers as log_helpers from oslo_log import log as logging import sqlalchemy as sa from sqlalchemy import or_ from sqlalchemy.orm import exc from neutron._i18n import _, _LE from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import utils from neutron.db import model_base from neutron.db import models_v2 from neutron.extensions import dvr as ext_dvr from neutron.extensions import portbindings from neutron import manager LOG = logging.getLogger(__name__) dvr_mac_address_opts = [ cfg.StrOpt('dvr_base_mac', default="fa:16:3f:00:00:00", help=_("The base mac address used for unique " "DVR instances by Neutron. The first 3 octets will " "remain unchanged. If the 4th octet is not 00, it will " "also be used. The others will be randomly generated. " "The 'dvr_base_mac' *must* be different from " "'base_mac' to avoid mixing them up with MAC's " "allocated for tenant ports. A 4 octet example would be " "dvr_base_mac = fa:16:3f:4f:00:00. The default is 3 " "octet")), ] cfg.CONF.register_opts(dvr_mac_address_opts) class DistributedVirtualRouterMacAddress(model_base.BASEV2): """Represents a v2 neutron distributed virtual router mac address.""" __tablename__ = 'dvr_host_macs' host = sa.Column(sa.String(255), primary_key=True, nullable=False) mac_address = sa.Column(sa.String(32), nullable=False, unique=True) def _delete_mac_associated_with_agent(resource, event, trigger, context, agent, **kwargs): host = agent['host'] plugin = manager.NeutronManager.get_plugin() if [a for a in plugin.get_agents(context, filters={'host': [host]}) if a['id'] != agent['id']]: # there are still agents on this host, don't mess with the mac entry # until they are all deleted. return try: with context.session.begin(subtransactions=True): entry = (context.session.query(DistributedVirtualRouterMacAddress). filter(DistributedVirtualRouterMacAddress.host == host). one()) context.session.delete(entry) except exc.NoResultFound: return # notify remaining agents so they cleanup flows dvr_macs = plugin.get_dvr_mac_address_list(context) plugin.notifier.dvr_mac_address_update(context, dvr_macs) class DVRDbMixin(ext_dvr.DVRMacAddressPluginBase): """Mixin class to add dvr mac address to db_plugin_base_v2.""" def __new__(cls, *args, **kwargs): registry.subscribe(_delete_mac_associated_with_agent, resources.AGENT, events.BEFORE_DELETE) return super(DVRDbMixin, cls).__new__(cls) @property def plugin(self): try: if self._plugin is not None: return self._plugin except AttributeError: pass self._plugin = manager.NeutronManager.get_plugin() return self._plugin def _get_dvr_mac_address_by_host(self, context, host): try: query = context.session.query(DistributedVirtualRouterMacAddress) dvrma = query.filter( DistributedVirtualRouterMacAddress.host == host).one() except exc.NoResultFound: raise ext_dvr.DVRMacAddressNotFound(host=host) return dvrma def _create_dvr_mac_address(self, context, host): """Create DVR mac address for a given host.""" base_mac = cfg.CONF.dvr_base_mac.split(':') max_retries = cfg.CONF.mac_generation_retries for attempt in reversed(range(max_retries)): try: with context.session.begin(subtransactions=True): mac_address = utils.get_random_mac(base_mac) dvr_mac_binding = DistributedVirtualRouterMacAddress( host=host, mac_address=mac_address) context.session.add(dvr_mac_binding) LOG.debug("Generated DVR mac for host %(host)s " "is %(mac_address)s", {'host': host, 'mac_address': mac_address}) dvr_macs = self.get_dvr_mac_address_list(context) # TODO(vivek): improve scalability of this fanout by # sending a single mac address rather than the entire set self.notifier.dvr_mac_address_update(context, dvr_macs) return self._make_dvr_mac_address_dict(dvr_mac_binding) except db_exc.DBDuplicateEntry: LOG.debug("Generated DVR mac %(mac)s exists." " Remaining attempts %(attempts_left)s.", {'mac': mac_address, 'attempts_left': attempt}) LOG.error(_LE("MAC generation error after %s attempts"), max_retries) raise ext_dvr.MacAddressGenerationFailure(host=host) def get_dvr_mac_address_list(self, context): with context.session.begin(subtransactions=True): return (context.session. query(DistributedVirtualRouterMacAddress).all()) def get_dvr_mac_address_by_host(self, context, host): """Determine the MAC for the DVR port associated to host.""" if not host: return try: return self._get_dvr_mac_address_by_host(context, host) except ext_dvr.DVRMacAddressNotFound: return self._create_dvr_mac_address(context, host) def _make_dvr_mac_address_dict(self, dvr_mac_entry, fields=None): return {'host': dvr_mac_entry['host'], 'mac_address': dvr_mac_entry['mac_address']} @log_helpers.log_method_call def get_ports_on_host_by_subnet(self, context, host, subnet): """Returns DVR serviced ports on a given subnet in the input host This method returns ports that need to be serviced by DVR. :param context: rpc request context :param host: host id to match and extract ports of interest :param subnet: subnet id to match and extract ports of interest :returns list -- Ports on the given subnet in the input host """ filters = {'fixed_ips': {'subnet_id': [subnet]}, portbindings.HOST_ID: [host]} ports_query = self.plugin._get_ports_query(context, filters=filters) owner_filter = or_( models_v2.Port.device_owner.startswith( constants.DEVICE_OWNER_COMPUTE_PREFIX), models_v2.Port.device_owner.in_( utils.get_other_dvr_serviced_device_owners())) ports_query = ports_query.filter(owner_filter) ports = [ self.plugin._make_port_dict(port, process_extensions=False) for port in ports_query.all() ] LOG.debug("Returning list of dvr serviced ports on host %(host)s" " for subnet %(subnet)s ports %(ports)s", {'host': host, 'subnet': subnet, 'ports': ports}) return ports @log_helpers.log_method_call def get_subnet_for_dvr(self, context, subnet, fixed_ips=None): if fixed_ips: subnet_data = fixed_ips[0]['subnet_id'] else: subnet_data = subnet try: subnet_info = self.plugin.get_subnet( context, subnet_data) except n_exc.SubnetNotFound: return {} else: # retrieve the gateway port on this subnet if fixed_ips: ip_address = fixed_ips[0]['ip_address'] else: ip_address = subnet_info['gateway_ip'] filter = {'fixed_ips': {'subnet_id': [subnet], 'ip_address': [ip_address]}} internal_gateway_ports = self.plugin.get_ports( context, filters=filter) if not internal_gateway_ports: LOG.error(_LE("Could not retrieve gateway port " "for subnet %s"), subnet_info) return {} internal_port = internal_gateway_ports[0] subnet_info['gateway_mac'] = internal_port['mac_address'] return subnet_info neutron-8.4.0/neutron/db/qos/0000775000567000056710000000000013044373210017234 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/db/qos/__init__.py0000664000567000056710000000000013044372736021347 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/db/qos/models.py0000775000567000056710000000645613044372760021120 0ustar jenkinsjenkins00000000000000# Copyright 2015 Huawei Technologies India Pvt Ltd, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from neutron.api.v2 import attributes as attrs from neutron.db import model_base from neutron.db import models_v2 from neutron.db import rbac_db_models class QosPolicy(model_base.BASEV2, model_base.HasId, model_base.HasTenant): __tablename__ = 'qos_policies' name = sa.Column(sa.String(attrs.NAME_MAX_LEN)) description = sa.Column(sa.String(attrs.DESCRIPTION_MAX_LEN)) rbac_entries = sa.orm.relationship(rbac_db_models.QosPolicyRBAC, backref='qos_policy', lazy='joined', cascade='all, delete, delete-orphan') class QosNetworkPolicyBinding(model_base.BASEV2): __tablename__ = 'qos_network_policy_bindings' policy_id = sa.Column(sa.String(36), sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), nullable=False, primary_key=True) network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete='CASCADE'), nullable=False, unique=True, primary_key=True) network = sa.orm.relationship( models_v2.Network, backref=sa.orm.backref("qos_policy_binding", uselist=False, cascade='delete', lazy='joined')) class QosPortPolicyBinding(model_base.BASEV2): __tablename__ = 'qos_port_policy_bindings' policy_id = sa.Column(sa.String(36), sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), nullable=False, primary_key=True) port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete='CASCADE'), nullable=False, unique=True, primary_key=True) port = sa.orm.relationship( models_v2.Port, backref=sa.orm.backref("qos_policy_binding", uselist=False, cascade='delete', lazy='joined')) class QosBandwidthLimitRule(model_base.HasId, model_base.BASEV2): __tablename__ = 'qos_bandwidth_limit_rules' qos_policy_id = sa.Column(sa.String(36), sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), nullable=False, unique=True) max_kbps = sa.Column(sa.Integer) max_burst_kbps = sa.Column(sa.Integer) neutron-8.4.0/neutron/db/qos/api.py0000664000567000056710000000551613044372760020377 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as oslo_db_exception from sqlalchemy.orm import exc as orm_exc from neutron.common import exceptions as n_exc from neutron.db import common_db_mixin as db from neutron.db.qos import models def create_policy_network_binding(context, policy_id, network_id): try: with context.session.begin(subtransactions=True): db_obj = models.QosNetworkPolicyBinding(policy_id=policy_id, network_id=network_id) context.session.add(db_obj) except oslo_db_exception.DBReferenceError: raise n_exc.NetworkQosBindingNotFound(net_id=network_id, policy_id=policy_id) def delete_policy_network_binding(context, policy_id, network_id): try: with context.session.begin(subtransactions=True): db_object = (db.model_query(context, models.QosNetworkPolicyBinding) .filter_by(policy_id=policy_id, network_id=network_id).one()) context.session.delete(db_object) except orm_exc.NoResultFound: raise n_exc.NetworkQosBindingNotFound(net_id=network_id, policy_id=policy_id) def create_policy_port_binding(context, policy_id, port_id): try: with context.session.begin(subtransactions=True): db_obj = models.QosPortPolicyBinding(policy_id=policy_id, port_id=port_id) context.session.add(db_obj) except oslo_db_exception.DBReferenceError: raise n_exc.PortQosBindingNotFound(port_id=port_id, policy_id=policy_id) def delete_policy_port_binding(context, policy_id, port_id): try: with context.session.begin(subtransactions=True): db_object = (db.model_query(context, models.QosPortPolicyBinding) .filter_by(policy_id=policy_id, port_id=port_id).one()) context.session.delete(db_object) except orm_exc.NoResultFound: raise n_exc.PortQosBindingNotFound(port_id=port_id, policy_id=policy_id) neutron-8.4.0/neutron/db/servicetype_db.py0000664000567000056710000001112713044372760022026 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from itertools import chain from oslo_log import log as logging import sqlalchemy as sa from neutron.api.v2 import attributes as attr from neutron.db import model_base from neutron.services import provider_configuration as pconf LOG = logging.getLogger(__name__) class ProviderResourceAssociation(model_base.BASEV2): provider_name = sa.Column(sa.String(attr.NAME_MAX_LEN), nullable=False, primary_key=True) # should be manually deleted on resource deletion resource_id = sa.Column(sa.String(36), nullable=False, primary_key=True, unique=True) class ServiceTypeManager(object): """Manage service type objects in Neutron.""" _instance = None @classmethod def get_instance(cls): if cls._instance is None: cls._instance = cls() return cls._instance def __init__(self): self.config = {} def add_provider_configuration(self, service_type, configuration): """Add or update the provider configuration for the service type.""" LOG.debug('Adding provider configuration for service %s', service_type) self.config.update({service_type: configuration}) def get_service_providers(self, context, filters=None, fields=None): if filters and 'service_type' in filters: return list( chain.from_iterable(self.config[svc_type]. get_service_providers(filters, fields) for svc_type in filters['service_type'] if svc_type in self.config) ) return list( chain.from_iterable( self.config[p].get_service_providers(filters, fields) for p in self.config) ) def get_default_service_provider(self, context, service_type): """Return the default provider for a given service type.""" filters = {'service_type': [service_type], 'default': [True]} providers = self.get_service_providers(context, filters=filters) # By construction we expect at most a single item in provider if not providers: raise pconf.DefaultServiceProviderNotFound( service_type=service_type ) return providers[0] def add_resource_association(self, context, service_type, provider_name, resource_id): r = self.get_service_providers(context, filters={'service_type': [service_type], 'name': [provider_name]}) if not r: raise pconf.ServiceProviderNotFound(provider=provider_name, service_type=service_type) with context.session.begin(subtransactions=True): # we don't actually need service type for association. # resource_id is unique and belongs to specific service # which knows its type assoc = ProviderResourceAssociation(provider_name=provider_name, resource_id=resource_id) context.session.add(assoc) # NOTE(blogan): the ProviderResourceAssociation relationship will not # be populated if a resource was created before this. The expire_all # will force the session to go retrieve the new data when that # resource will be read again. It has been suggested that we can # crawl through everything in the mapper to find the resource with # the ID that matches resource_id and expire that one, but we can # just start with this. context.session.expire_all() def del_resource_associations(self, context, resource_ids): if not resource_ids: return with context.session.begin(subtransactions=True): (context.session.query(ProviderResourceAssociation). filter( ProviderResourceAssociation.resource_id.in_(resource_ids)). delete(synchronize_session='fetch')) neutron-8.4.0/neutron/db/rbac_db_models.py0000664000567000056710000000713713044372760021744 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import sqlalchemy as sa from sqlalchemy.ext import declarative from sqlalchemy.orm import validates from neutron._i18n import _ from neutron.api.v2 import attributes as attr from neutron.common import exceptions as n_exc from neutron.db import model_base from neutron import manager ACCESS_SHARED = 'access_as_shared' ACCESS_EXTERNAL = 'access_as_external' class InvalidActionForType(n_exc.InvalidInput): message = _("Invalid action '%(action)s' for object type " "'%(object_type)s'. Valid actions: %(valid_actions)s") class RBACColumns(model_base.HasId, model_base.HasTenant): """Mixin that object-specific RBAC tables should inherit. All RBAC tables should inherit directly from this one because the RBAC code uses the __subclasses__() method to discover the RBAC types. """ # the target_tenant is the subject that the policy will affect. this may # also be a wildcard '*' to indicate all tenants or it may be a role if # neutron gets better integration with keystone target_tenant = sa.Column(sa.String(attr.TENANT_ID_MAX_LEN), nullable=False) action = sa.Column(sa.String(255), nullable=False) @abc.abstractproperty def object_type(self): # this determines the name that users will use in the API # to reference the type. sub-classes should set their own pass @declarative.declared_attr def __table_args__(cls): return ( sa.UniqueConstraint('target_tenant', 'object_id', 'action'), model_base.BASEV2.__table_args__ ) @validates('action') def _validate_action(self, key, action): if action not in self.get_valid_actions(): raise InvalidActionForType( action=action, object_type=self.object_type, valid_actions=self.get_valid_actions()) return action @abc.abstractmethod def get_valid_actions(self): # object table needs to override this to return an interable # with the valid actions rbac entries pass def get_type_model_map(): return {table.object_type: table for table in RBACColumns.__subclasses__()} def _object_id_column(foreign_key): return sa.Column(sa.String(36), sa.ForeignKey(foreign_key, ondelete="CASCADE"), nullable=False) class NetworkRBAC(RBACColumns, model_base.BASEV2): """RBAC table for networks.""" object_id = _object_id_column('networks.id') object_type = 'network' def get_valid_actions(self): actions = (ACCESS_SHARED,) pl = manager.NeutronManager.get_plugin() if 'external-net' in pl.supported_extension_aliases: actions += (ACCESS_EXTERNAL,) return actions class QosPolicyRBAC(RBACColumns, model_base.BASEV2): """RBAC table for qos policies.""" object_id = _object_id_column('qos_policies.id') object_type = 'qos_policy' def get_valid_actions(self): return (ACCESS_SHARED,) neutron-8.4.0/neutron/db/vlantransparent_db.py0000664000567000056710000000231313044372736022706 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Cisco Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api.v2 import attributes from neutron.db import db_base_plugin_v2 from neutron.extensions import vlantransparent class Vlantransparent_db_mixin(object): """Mixin class to add vlan transparent methods to db_base_plugin_v2.""" def _extend_network_dict_vlan_transparent(self, network_res, network_db): network_res[vlantransparent.VLANTRANSPARENT] = ( network_db.vlan_transparent) return network_res db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attributes.NETWORKS, ['_extend_network_dict_vlan_transparent']) neutron-8.4.0/neutron/db/l3_hascheduler_db.py0000664000567000056710000001145613044372760022356 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from sqlalchemy import func from sqlalchemy import sql from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources from neutron.db import agents_db from neutron.db import l3_agentschedulers_db as l3_sch_db from neutron.db import l3_attrs_db from neutron.db import l3_db from neutron.extensions import portbindings from neutron import manager from neutron.plugins.common import constants as service_constants class L3_HA_scheduler_db_mixin(l3_sch_db.AZL3AgentSchedulerDbMixin): def get_ha_routers_l3_agents_count(self, context): """Return a map between HA routers and how many agents every router is scheduled to. """ # Postgres requires every column in the select to be present in # the group by statement when using an aggregate function. # One solution is to generate a subquery and join it with the desired # columns. binding_model = l3_sch_db.RouterL3AgentBinding sub_query = (context.session.query( binding_model.router_id, func.count(binding_model.router_id).label('count')). join(l3_attrs_db.RouterExtraAttributes, binding_model.router_id == l3_attrs_db.RouterExtraAttributes.router_id). join(l3_db.Router). filter(l3_attrs_db.RouterExtraAttributes.ha == sql.true()). group_by(binding_model.router_id).subquery()) query = (context.session.query(l3_db.Router, sub_query.c.count). join(sub_query)) return [(self._make_router_dict(router), agent_count) for router, agent_count in query] def get_l3_agents_ordered_by_num_routers(self, context, agent_ids): if not agent_ids: return [] query = (context.session.query(agents_db.Agent, func.count( l3_sch_db.RouterL3AgentBinding.router_id).label('count')). outerjoin(l3_sch_db.RouterL3AgentBinding). group_by(agents_db.Agent.id). filter(agents_db.Agent.id.in_(agent_ids)). order_by('count')) return [record[0] for record in query] def _get_agents_dict_for_router(self, agents_and_states): agents = [] for agent, ha_state in agents_and_states: l3_agent_dict = self._make_agent_dict(agent) l3_agent_dict['ha_state'] = ha_state agents.append(l3_agent_dict) return {'agents': agents} def list_l3_agents_hosting_router(self, context, router_id): with context.session.begin(subtransactions=True): router_db = self._get_router(context, router_id) if router_db.extra_attributes.ha: bindings = self.get_l3_bindings_hosting_router_with_ha_states( context, router_id) else: bindings = self._get_l3_bindings_hosting_routers( context, [router_id]) bindings = [(binding.l3_agent, None) for binding in bindings] return self._get_agents_dict_for_router(bindings) def _notify_l3_agent_ha_port_update(resource, event, trigger, **kwargs): # HA router on a agent has to spawn keepalived only when HA network port # is active. So notify agent when HA network port becomes active. # 'update_device_up' will be set only when port status changed to active. if not kwargs.get('update_device_up'): return port_db = kwargs.get('port') context = kwargs.get('context') core_plugin = manager.NeutronManager.get_plugin() new_port = core_plugin._make_port_dict(port_db) host = new_port.get(portbindings.HOST_ID) if new_port and host: new_device_owner = new_port.get('device_owner', '') if (new_device_owner == constants.DEVICE_OWNER_ROUTER_HA_INTF and new_port['status'] == constants.PORT_STATUS_ACTIVE): l3plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) l3plugin.l3_rpc_notifier.routers_updated_on_host( context, [new_port['device_id']], host) def subscribe(): registry.subscribe( _notify_l3_agent_ha_port_update, resources.PORT, events.AFTER_UPDATE) neutron-8.4.0/neutron/db/allowedaddresspairs_db.py0000664000567000056710000001465013044372760023524 0ustar jenkinsjenkins00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import sqlalchemy as sa from oslo_db import exception as db_exc from sqlalchemy import orm from neutron.api.v2 import attributes as attr from neutron.db import db_base_plugin_v2 from neutron.db import model_base from neutron.db import models_v2 from neutron.extensions import allowedaddresspairs as addr_pair class AllowedAddressPair(model_base.BASEV2): port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) mac_address = sa.Column(sa.String(32), nullable=False, primary_key=True) ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True) port = orm.relationship( models_v2.Port, backref=orm.backref("allowed_address_pairs", lazy="joined", cascade="delete")) class AllowedAddressPairsMixin(object): """Mixin class for allowed address pairs.""" def _process_create_allowed_address_pairs(self, context, port, allowed_address_pairs): if not attr.is_attr_set(allowed_address_pairs): return [] try: with context.session.begin(subtransactions=True): for address_pair in allowed_address_pairs: # use port.mac_address if no mac address in address pair if 'mac_address' not in address_pair: address_pair['mac_address'] = port['mac_address'] db_pair = AllowedAddressPair( port_id=port['id'], mac_address=address_pair['mac_address'], ip_address=address_pair['ip_address']) context.session.add(db_pair) except db_exc.DBDuplicateEntry: raise addr_pair.DuplicateAddressPairInRequest( mac_address=address_pair['mac_address'], ip_address=address_pair['ip_address']) return allowed_address_pairs def get_allowed_address_pairs(self, context, port_id): pairs = (context.session.query(AllowedAddressPair). filter_by(port_id=port_id)) return [self._make_allowed_address_pairs_dict(pair) for pair in pairs] def _extend_port_dict_allowed_address_pairs(self, port_res, port_db): # If port_db is provided, allowed address pairs will be accessed via # sqlalchemy models. As they're loaded together with ports this # will not cause an extra query. allowed_address_pairs = [ self._make_allowed_address_pairs_dict(address_pair) for address_pair in port_db.allowed_address_pairs] port_res[addr_pair.ADDRESS_PAIRS] = allowed_address_pairs return port_res # Register dict extend functions for ports db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attr.PORTS, ['_extend_port_dict_allowed_address_pairs']) def _delete_allowed_address_pairs(self, context, id): query = self._model_query(context, AllowedAddressPair) with context.session.begin(subtransactions=True): query.filter(AllowedAddressPair.port_id == id).delete() def _make_allowed_address_pairs_dict(self, allowed_address_pairs, fields=None): res = {'mac_address': allowed_address_pairs['mac_address'], 'ip_address': allowed_address_pairs['ip_address']} return self._fields(res, fields) def _has_address_pairs(self, port): return (attr.is_attr_set(port['port'][addr_pair.ADDRESS_PAIRS]) and port['port'][addr_pair.ADDRESS_PAIRS] != []) def _check_update_has_allowed_address_pairs(self, port): """Determine if request has an allowed address pair. Return True if the port parameter has a non-empty 'allowed_address_pairs' attribute. Otherwise returns False. """ return (addr_pair.ADDRESS_PAIRS in port['port'] and self._has_address_pairs(port)) def _check_update_deletes_allowed_address_pairs(self, port): """Determine if request deletes address pair. Return True if port has an allowed address pair and its value is either [] or not is_attr_set, otherwise return False """ return (addr_pair.ADDRESS_PAIRS in port['port'] and not self._has_address_pairs(port)) def is_address_pairs_attribute_updated(self, port, update_attrs): """Check if the address pairs attribute is being updated. Returns True if there is an update. This can be used to decide if a port update notification should be sent to agents or third party controllers. """ new_pairs = update_attrs.get(addr_pair.ADDRESS_PAIRS) if new_pairs is None: return False old_pairs = port.get(addr_pair.ADDRESS_PAIRS) # Missing or unchanged address pairs in attributes mean no update return new_pairs != old_pairs def update_address_pairs_on_port(self, context, port_id, port, original_port, updated_port): """Update allowed address pairs on port. Returns True if an update notification is required. Notification is not done here because other changes on the port may need notification. This method is expected to be called within a transaction. """ new_pairs = port['port'].get(addr_pair.ADDRESS_PAIRS) if self.is_address_pairs_attribute_updated(original_port, port['port']): updated_port[addr_pair.ADDRESS_PAIRS] = new_pairs self._delete_allowed_address_pairs(context, port_id) self._process_create_allowed_address_pairs( context, updated_port, new_pairs) return True return False neutron-8.4.0/neutron/db/migration/0000775000567000056710000000000013044373210020423 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/db/migration/cli.py0000664000567000056710000006421213044372760021562 0ustar jenkinsjenkins00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from alembic import command as alembic_command from alembic import config as alembic_config from alembic import environment from alembic import migration as alembic_migration from alembic import script as alembic_script from alembic import util as alembic_util import debtcollector from oslo_config import cfg from oslo_utils import fileutils from oslo_utils import importutils import pkg_resources import six from neutron._i18n import _ from neutron.common import utils from neutron.db import migration from neutron.db.migration.connection import DBConnection HEAD_FILENAME = 'HEAD' HEADS_FILENAME = 'HEADS' CONTRACT_HEAD_FILENAME = 'CONTRACT_HEAD' EXPAND_HEAD_FILENAME = 'EXPAND_HEAD' CURRENT_RELEASE = migration.MITAKA RELEASES = ( migration.LIBERTY, migration.MITAKA, ) EXPAND_BRANCH = 'expand' CONTRACT_BRANCH = 'contract' MIGRATION_BRANCHES = (EXPAND_BRANCH, CONTRACT_BRANCH) MIGRATION_ENTRYPOINTS = 'neutron.db.alembic_migrations' migration_entrypoints = { entrypoint.name: entrypoint for entrypoint in pkg_resources.iter_entry_points(MIGRATION_ENTRYPOINTS) } BRANCHLESS_WARNING = 'Branchless migration chains are deprecated as of Mitaka.' neutron_alembic_ini = os.path.join(os.path.dirname(__file__), 'alembic.ini') VALID_SERVICES = ['fwaas', 'lbaas', 'vpnaas'] INSTALLED_SERVICES = [service_ for service_ in VALID_SERVICES if 'neutron-%s' % service_ in migration_entrypoints] INSTALLED_SUBPROJECTS = [project_ for project_ in migration_entrypoints] _core_opts = [ cfg.StrOpt('core_plugin', default='', help=_('Neutron plugin provider module'), deprecated_for_removal=True), cfg.StrOpt('service', choices=INSTALLED_SERVICES, help=(_("(Deprecated. Use '--subproject neutron-SERVICE' " "instead.) The advanced service to execute the " "command against.")), deprecated_for_removal=True), cfg.StrOpt('subproject', choices=INSTALLED_SUBPROJECTS, help=(_("The subproject to execute the command against. " "Can be one of: '%s'.") % "', '".join(INSTALLED_SUBPROJECTS))), cfg.BoolOpt('split_branches', default=False, help=_("Enforce using split branches file structure.")) ] _quota_opts = [ cfg.StrOpt('quota_driver', default='', help=_('Neutron quota driver class'), deprecated_for_removal=True), ] _db_opts = [ cfg.StrOpt('connection', deprecated_name='sql_connection', default='', secret=True, help=_('URL to database')), cfg.StrOpt('engine', default='', help=_('Database engine for which script will be generated ' 'when using offline migration.')), ] CONF = cfg.ConfigOpts() CONF.register_cli_opts(_core_opts) CONF.register_cli_opts(_db_opts, 'database') CONF.register_opts(_quota_opts, 'QUOTAS') def do_alembic_command(config, cmd, revision=None, desc=None, **kwargs): args = [] if revision: args.append(revision) project = config.get_main_option('neutron_project') if desc: alembic_util.msg(_('Running %(cmd)s (%(desc)s) for %(project)s ...') % {'cmd': cmd, 'desc': desc, 'project': project}) else: alembic_util.msg(_('Running %(cmd)s for %(project)s ...') % {'cmd': cmd, 'project': project}) try: getattr(alembic_command, cmd)(config, *args, **kwargs) except alembic_util.CommandError as e: alembic_util.err(six.text_type(e)) alembic_util.msg(_('OK')) def _get_alembic_entrypoint(project): if project not in migration_entrypoints: alembic_util.err(_('Sub-project %s not installed.') % project) return migration_entrypoints[project] def do_generic_show(config, cmd): kwargs = {'verbose': CONF.command.verbose} do_alembic_command(config, cmd, **kwargs) def do_check_migration(config, cmd): do_alembic_command(config, 'branches') validate_revisions(config) validate_head_file(config) def add_alembic_subparser(sub, cmd): return sub.add_parser(cmd, help=getattr(alembic_command, cmd).__doc__) def add_branch_options(parser): group = parser.add_mutually_exclusive_group() group.add_argument('--expand', action='store_true') group.add_argument('--contract', action='store_true') return group def _find_milestone_revisions(config, milestone, branch=None): """Return the revision(s) for a given milestone.""" script = alembic_script.ScriptDirectory.from_config(config) return [ (m.revision, label) for m in _get_revisions(script) for label in (m.branch_labels or [None]) if milestone in getattr(m.module, 'neutron_milestone', []) and (branch is None or branch in m.branch_labels) ] def do_upgrade(config, cmd): branch = None if ((CONF.command.revision or CONF.command.delta) and (CONF.command.expand or CONF.command.contract)): raise SystemExit(_( 'Phase upgrade options do not accept revision specification')) if CONF.command.expand: branch = EXPAND_BRANCH revision = _get_branch_head(EXPAND_BRANCH) elif CONF.command.contract: branch = CONTRACT_BRANCH revision = _get_branch_head(CONTRACT_BRANCH) elif not CONF.command.revision and not CONF.command.delta: raise SystemExit(_('You must provide a revision or relative delta')) else: revision = CONF.command.revision or '' if '-' in revision: raise SystemExit(_('Negative relative revision (downgrade) not ' 'supported')) delta = CONF.command.delta if delta: if '+' in revision: raise SystemExit(_('Use either --delta or relative revision, ' 'not both')) if delta < 0: raise SystemExit(_('Negative delta (downgrade) not supported')) revision = '%s+%d' % (revision, delta) # leave branchless 'head' revision request backward compatible by # applying all heads in all available branches. if revision == 'head': revision = 'heads' if revision in migration.NEUTRON_MILESTONES: expand_revisions = _find_milestone_revisions(config, revision, EXPAND_BRANCH) contract_revisions = _find_milestone_revisions(config, revision, CONTRACT_BRANCH) # Expand revisions must be run before contract revisions revisions = expand_revisions + contract_revisions else: revisions = [(revision, branch)] for revision, branch in revisions: if not CONF.command.sql: run_sanity_checks(config, revision) do_alembic_command(config, cmd, revision=revision, desc=branch, sql=CONF.command.sql) def no_downgrade(config, cmd): raise SystemExit(_("Downgrade no longer supported")) def do_stamp(config, cmd): do_alembic_command(config, cmd, revision=CONF.command.revision, sql=CONF.command.sql) def _get_branch_head(branch): '''Get the latest @head specification for a branch.''' return '%s@head' % branch def _check_bootstrap_new_branch(branch, version_path, addn_kwargs): addn_kwargs['version_path'] = version_path addn_kwargs['head'] = _get_branch_head(branch) if not os.path.exists(version_path): # Bootstrap initial directory structure utils.ensure_dir(version_path) def do_revision(config, cmd): kwargs = { 'message': CONF.command.message, 'autogenerate': CONF.command.autogenerate, 'sql': CONF.command.sql, } if CONF.command.expand: kwargs['head'] = 'expand@head' elif CONF.command.contract: kwargs['head'] = 'contract@head' do_alembic_command(config, cmd, **kwargs) if _use_separate_migration_branches(config): update_head_files(config) else: update_head_file(config) def _get_release_labels(labels): result = set() for label in labels: # release labels were introduced Liberty for a short time and dropped # in that same release cycle result.add('%s_%s' % (migration.LIBERTY, label)) return result def _compare_labels(revision, expected_labels): # validate that the script has expected labels only bad_labels = revision.branch_labels - expected_labels if bad_labels: # NOTE(ihrachyshka): this hack is temporary to accommodate those # projects that already initialized their branches with liberty_* # labels. Let's notify them about the deprecation for now and drop it # later. bad_labels_with_release = (revision.branch_labels - _get_release_labels(expected_labels)) if not bad_labels_with_release: alembic_util.warn( _('Release aware branch labels (%s) are deprecated. ' 'Please switch to expand@ and contract@ ' 'labels.') % bad_labels) return script_name = os.path.basename(revision.path) alembic_util.err( _('Unexpected label for script %(script_name)s: %(labels)s') % {'script_name': script_name, 'labels': bad_labels} ) def _validate_single_revision_labels(script_dir, revision, label=None): expected_labels = set() if label is not None: expected_labels.add(label) _compare_labels(revision, expected_labels) # if it's not the root element of the branch, expect the parent of the # script to have the same label if revision.down_revision is not None: down_revision = script_dir.get_revision(revision.down_revision) _compare_labels(down_revision, expected_labels) def _validate_revision(script_dir, revision): for branch in MIGRATION_BRANCHES: if branch in revision.path: _validate_single_revision_labels( script_dir, revision, label=branch) return # validate script from branchless part of migration rules _validate_single_revision_labels(script_dir, revision) def validate_revisions(config): script_dir = alembic_script.ScriptDirectory.from_config(config) revisions = _get_revisions(script_dir) for revision in revisions: _validate_revision(script_dir, revision) branchpoints = _get_branch_points(script_dir) if len(branchpoints) > 1: branchpoints = ', '.join(p.revision for p in branchpoints) alembic_util.err( _('Unexpected number of alembic branch points: %(branchpoints)s') % {'branchpoints': branchpoints} ) def _get_revisions(script): return list(script.walk_revisions(base='base', head='heads')) def _get_branch_points(script): branchpoints = [] for revision in _get_revisions(script): if revision.is_branch_point: branchpoints.append(revision) return branchpoints def validate_head_file(config): '''Check that HEAD file contains the latest head for the branch.''' if _use_separate_migration_branches(config): _validate_head_files(config) else: _validate_head_file(config) @debtcollector.removals.remove(message=BRANCHLESS_WARNING) def _validate_head_file(config): '''Check that HEAD file contains the latest head for the branch.''' script = alembic_script.ScriptDirectory.from_config(config) expected_head = script.get_heads() head_path = _get_head_file_path(config) try: with open(head_path) as file_: observed_head = file_.read().split() if observed_head == expected_head: return except IOError: pass alembic_util.err( _('HEAD file does not match migration timeline head, expected: %s') % expected_head) def _get_heads_map(config): script = alembic_script.ScriptDirectory.from_config(config) heads = script.get_heads() head_map = {} for head in heads: if CONTRACT_BRANCH in script.get_revision(head).branch_labels: head_map[CONTRACT_BRANCH] = head else: head_map[EXPAND_BRANCH] = head return head_map def _check_head(branch_name, head_file, head): try: with open(head_file) as file_: observed_head = file_.read().strip() except IOError: pass else: if observed_head != head: alembic_util.err( _('%(branch)s HEAD file does not match migration timeline ' 'head, expected: %(head)s') % {'branch': branch_name.title(), 'head': head}) def _validate_head_files(config): '''Check that HEAD files contain the latest head for the branch.''' contract_head = _get_contract_head_file_path(config) expand_head = _get_expand_head_file_path(config) if not os.path.exists(contract_head) or not os.path.exists(expand_head): alembic_util.warn(_("Repository does not contain HEAD files for " "contract and expand branches.")) return head_map = _get_heads_map(config) _check_head(CONTRACT_BRANCH, contract_head, head_map[CONTRACT_BRANCH]) _check_head(EXPAND_BRANCH, expand_head, head_map[EXPAND_BRANCH]) def update_head_files(config): '''Update HEAD files with the latest branch heads.''' head_map = _get_heads_map(config) contract_head = _get_contract_head_file_path(config) expand_head = _get_expand_head_file_path(config) with open(contract_head, 'w+') as f: f.write(head_map[CONTRACT_BRANCH] + '\n') with open(expand_head, 'w+') as f: f.write(head_map[EXPAND_BRANCH] + '\n') old_head_file = _get_head_file_path(config) old_heads_file = _get_heads_file_path(config) for file_ in (old_head_file, old_heads_file): fileutils.delete_if_exists(file_) @debtcollector.removals.remove(message=BRANCHLESS_WARNING) def update_head_file(config): script = alembic_script.ScriptDirectory.from_config(config) head = script.get_heads() with open(_get_head_file_path(config), 'w+') as f: f.write('\n'.join(head)) def _get_current_database_heads(config): with DBConnection(config.neutron_config.database.connection) as conn: opts = { 'version_table': get_alembic_version_table(config) } context = alembic_migration.MigrationContext.configure( conn, opts=opts) return context.get_current_heads() def has_offline_migrations(config, cmd): heads_map = _get_heads_map(config) if heads_map[CONTRACT_BRANCH] not in _get_current_database_heads(config): # If there is at least one contract revision not applied to database, # it means we should shut down all neutron-server instances before # proceeding with upgrade. project = config.get_main_option('neutron_project') alembic_util.msg(_('Need to apply migrations from %(project)s ' 'contract branch. This will require all Neutron ' 'server instances to be shutdown before ' 'proceeding with the upgrade.') % {"project": project}) return True return False def add_command_parsers(subparsers): for name in ['current', 'history', 'branches', 'heads']: parser = add_alembic_subparser(subparsers, name) parser.set_defaults(func=do_generic_show) parser.add_argument('--verbose', action='store_true', help='Display more verbose output for the ' 'specified command') help_text = (getattr(alembic_command, 'branches').__doc__ + ' and validate head file') parser = subparsers.add_parser('check_migration', help=help_text) parser.set_defaults(func=do_check_migration) parser = add_alembic_subparser(subparsers, 'upgrade') parser.add_argument('--delta', type=int) parser.add_argument('--sql', action='store_true') parser.add_argument('revision', nargs='?') parser.add_argument('--mysql-engine', default='', help='Change MySQL storage engine of current ' 'existing tables') add_branch_options(parser) parser.set_defaults(func=do_upgrade) parser = subparsers.add_parser('downgrade', help="(No longer supported)") parser.add_argument('None', nargs='?', help="Downgrade not supported") parser.set_defaults(func=no_downgrade) parser = add_alembic_subparser(subparsers, 'stamp') parser.add_argument('--sql', action='store_true') parser.add_argument('revision') parser.set_defaults(func=do_stamp) parser = add_alembic_subparser(subparsers, 'revision') parser.add_argument('-m', '--message') parser.add_argument('--sql', action='store_true') group = add_branch_options(parser) group.add_argument('--autogenerate', action='store_true') parser.set_defaults(func=do_revision) parser = subparsers.add_parser( 'has_offline_migrations', help='Determine whether there are pending migration scripts that ' 'require full shutdown for all services that directly access ' 'database.') parser.set_defaults(func=has_offline_migrations) command_opt = cfg.SubCommandOpt('command', title='Command', help=_('Available commands'), handler=add_command_parsers) CONF.register_cli_opt(command_opt) def _get_project_base(config): '''Return the base python namespace name for a project.''' script_location = config.get_main_option('script_location') return script_location.split(':')[0].split('.')[0] def _get_package_root_dir(config): root_module = importutils.try_import(_get_project_base(config)) if not root_module: project = config.get_main_option('neutron_project') alembic_util.err(_("Failed to locate source for %s.") % project) # The root_module.__file__ property is a path like # '/opt/stack/networking-foo/networking_foo/__init__.py' # We return just # '/opt/stack/networking-foo' return os.path.dirname(os.path.dirname(root_module.__file__)) def _get_root_versions_dir(config): '''Return root directory that contains all migration rules.''' root_dir = _get_package_root_dir(config) script_location = config.get_main_option('script_location') # Script location is something like: # 'project_base.db.migration:alembic_migrations' # Convert it to: # 'project_base/db/migration/alembic_migrations/versions' part1, part2 = script_location.split(':') parts = part1.split('.') + part2.split('.') + ['versions'] # Return the absolute path to the versions dir return os.path.join(root_dir, *parts) def _get_head_file_path(config): '''Return the path of the file that contains single head.''' return os.path.join( _get_root_versions_dir(config), HEAD_FILENAME) def _get_heads_file_path(config): ''' Return the path of the file that was once used to maintain the list of latest heads. ''' return os.path.join( _get_root_versions_dir(config), HEADS_FILENAME) def _get_contract_head_file_path(config): ''' Return the path of the file that is used to maintain contract head ''' return os.path.join( _get_root_versions_dir(config), CONTRACT_HEAD_FILENAME) def _get_expand_head_file_path(config): ''' Return the path of the file that is used to maintain expand head ''' return os.path.join( _get_root_versions_dir(config), EXPAND_HEAD_FILENAME) def _get_version_branch_path(config, release=None, branch=None): version_path = _get_root_versions_dir(config) if branch and release: return os.path.join(version_path, release, branch) return version_path def _use_separate_migration_branches(config): '''Detect whether split migration branches should be used.''' if CONF.split_branches: return True script_dir = alembic_script.ScriptDirectory.from_config(config) if _get_branch_points(script_dir): return True return False def _set_version_locations(config): '''Make alembic see all revisions in all migration branches.''' split_branches = False version_paths = [_get_version_branch_path(config)] for release in RELEASES: for branch in MIGRATION_BRANCHES: version_path = _get_version_branch_path(config, release, branch) if split_branches or os.path.exists(version_path): split_branches = True version_paths.append(version_path) config.set_main_option('version_locations', ' '.join(version_paths)) def _get_installed_entrypoint(subproject): '''Get the entrypoint for the subproject, which must be installed.''' if subproject not in migration_entrypoints: alembic_util.err(_('Package %s not installed') % subproject) return migration_entrypoints[subproject] def _get_subproject_script_location(subproject): '''Get the script location for the installed subproject.''' entrypoint = _get_installed_entrypoint(subproject) return ':'.join([entrypoint.module_name, entrypoint.attrs[0]]) def _get_service_script_location(service): '''Get the script location for the service, which must be installed.''' return _get_subproject_script_location('neutron-%s' % service) def _get_subproject_base(subproject): '''Get the import base name for the installed subproject.''' entrypoint = _get_installed_entrypoint(subproject) return entrypoint.module_name.split('.')[0] def get_alembic_version_table(config): script_dir = alembic_script.ScriptDirectory.from_config(config) alembic_version_table = [None] def alembic_version_table_from_env(rev, context): alembic_version_table[0] = context.version_table return [] with environment.EnvironmentContext(config, script_dir, fn=alembic_version_table_from_env): script_dir.run_env() return alembic_version_table[0] def get_alembic_configs(): '''Return a list of alembic configs, one per project. ''' # Get the script locations for the specified or installed projects. # Which projects to get script locations for is determined by the CLI # options as follows: # --service X # only subproject neutron-X (deprecated) # --subproject Y # only subproject Y (where Y can be neutron) # (none specified) # neutron and all installed subprojects script_locations = {} if CONF.service: script_location = _get_service_script_location(CONF.service) script_locations['neutron-%s' % CONF.service] = script_location elif CONF.subproject: script_location = _get_subproject_script_location(CONF.subproject) script_locations[CONF.subproject] = script_location else: for subproject, ep in migration_entrypoints.items(): script_locations[subproject] = _get_subproject_script_location( subproject) # Return a list of alembic configs from the projects in the # script_locations dict. If neutron is in the list it is first. configs = [] project_seq = sorted(script_locations.keys()) # Core neutron must be the first project if there is more than one if len(project_seq) > 1 and 'neutron' in project_seq: project_seq.insert(0, project_seq.pop(project_seq.index('neutron'))) for project in project_seq: config = alembic_config.Config(neutron_alembic_ini) config.set_main_option('neutron_project', project) script_location = script_locations[project] config.set_main_option('script_location', script_location) _set_version_locations(config) config.neutron_config = CONF configs.append(config) return configs def get_neutron_config(): # Neutron's alembic config is always the first one return get_alembic_configs()[0] def run_sanity_checks(config, revision): script_dir = alembic_script.ScriptDirectory.from_config(config) def check_sanity(rev, context): # TODO(ihrachyshka): here we use internal API for alembic; we may need # alembic to expose implicit_base= argument into public # iterate_revisions() call for script in script_dir.revision_map.iterate_revisions( revision, rev, implicit_base=True): if hasattr(script.module, 'check_sanity'): script.module.check_sanity(context.connection) return [] with environment.EnvironmentContext(config, script_dir, fn=check_sanity, starting_rev=None, destination_rev=revision): script_dir.run_env() def validate_cli_options(): if CONF.subproject and CONF.service: alembic_util.err(_("Cannot specify both --service and --subproject.")) def get_engine_config(): return [obj for obj in _db_opts if obj.name == 'engine'] def main(): CONF(project='neutron') validate_cli_options() return_val = False for config in get_alembic_configs(): #TODO(gongysh) enable logging return_val |= bool(CONF.command.func(config, CONF.command.name)) if CONF.command.name == 'has_offline_migrations' and not return_val: alembic_util.msg(_('No offline migrations pending.')) return return_val neutron-8.4.0/neutron/db/migration/alembic_migrations/0000775000567000056710000000000013044373210024253 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/ml2_init_ops.py0000664000567000056710000001762313044372736027250 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for ML2 plugin and drivers from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'ml2_vlan_allocations', sa.Column('physical_network', sa.String(length=64), nullable=False), sa.Column('vlan_id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint('physical_network', 'vlan_id'), sa.Index(op.f('ix_ml2_vlan_allocations_physical_network_allocated'), 'physical_network', 'allocated')) op.create_table( 'ml2_vxlan_endpoints', sa.Column('ip_address', sa.String(length=64), nullable=False), sa.Column('udp_port', sa.Integer(), autoincrement=False, nullable=False), sa.Column('host', sa.String(length=255), nullable=True), sa.UniqueConstraint('host', name='unique_ml2_vxlan_endpoints0host'), sa.PrimaryKeyConstraint('ip_address')) op.create_table( 'ml2_gre_endpoints', sa.Column('ip_address', sa.String(length=64), nullable=False), sa.Column('host', sa.String(length=255), nullable=True), sa.UniqueConstraint('host', name='unique_ml2_gre_endpoints0host'), sa.PrimaryKeyConstraint('ip_address')) op.create_table( 'ml2_vxlan_allocations', sa.Column('vxlan_vni', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), nullable=False, server_default=sa.sql.false(), index=True), sa.PrimaryKeyConstraint('vxlan_vni')) op.create_table( 'ml2_gre_allocations', sa.Column('gre_id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), nullable=False, server_default=sa.sql.false(), index=True), sa.PrimaryKeyConstraint('gre_id')) op.create_table( 'ml2_flat_allocations', sa.Column('physical_network', sa.String(length=64), nullable=False), sa.PrimaryKeyConstraint('physical_network')) op.create_table( 'ml2_network_segments', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('network_type', sa.String(length=32), nullable=False), sa.Column('physical_network', sa.String(length=64), nullable=True), sa.Column('segmentation_id', sa.Integer(), nullable=True), sa.Column('is_dynamic', sa.Boolean(), nullable=False, server_default=sa.sql.false()), sa.Column('segment_index', sa.Integer(), nullable=False, server_default='0'), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id')) op.create_table( 'ml2_port_bindings', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('host', sa.String(length=255), nullable=False, server_default=''), sa.Column('vif_type', sa.String(length=64), nullable=False), sa.Column('vnic_type', sa.String(length=64), nullable=False, server_default='normal'), sa.Column('profile', sa.String(length=4095), nullable=False, server_default=''), sa.Column('vif_details', sa.String(length=4095), nullable=False, server_default=''), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id')) op.create_table( 'ml2_port_binding_levels', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('host', sa.String(length=255), nullable=False), sa.Column('level', sa.Integer(), autoincrement=False, nullable=False), sa.Column('driver', sa.String(length=64), nullable=True), sa.Column('segment_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['segment_id'], ['ml2_network_segments.id'], ondelete='SET NULL'), sa.PrimaryKeyConstraint('port_id', 'host', 'level') ) op.create_table( 'cisco_ml2_nexusport_bindings', sa.Column('binding_id', sa.Integer(), nullable=False), sa.Column('port_id', sa.String(length=255), nullable=True), sa.Column('vlan_id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('switch_ip', sa.String(length=255), nullable=True), sa.Column('instance_id', sa.String(length=255), nullable=True), sa.Column('vni', sa.Integer(), nullable=True), sa.Column('is_provider_vlan', sa.Boolean(), nullable=False, server_default=sa.sql.false()), sa.PrimaryKeyConstraint('binding_id'), ) op.create_table( 'arista_provisioned_nets', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('network_id', sa.String(length=36), nullable=True), sa.Column('segmentation_id', sa.Integer(), autoincrement=False, nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'arista_provisioned_vms', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('vm_id', sa.String(length=255), nullable=True), sa.Column('host_id', sa.String(length=255), nullable=True), sa.Column('port_id', sa.String(length=36), nullable=True), sa.Column('network_id', sa.String(length=36), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'arista_provisioned_tenants', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('id')) op.create_table( 'ml2_nexus_vxlan_allocations', sa.Column('vxlan_vni', sa.Integer(), nullable=False, autoincrement=False), sa.Column('allocated', sa.Boolean(), nullable=False, server_default=sa.sql.false()), sa.PrimaryKeyConstraint('vxlan_vni') ) op.create_table( 'ml2_nexus_vxlan_mcast_groups', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('mcast_group', sa.String(length=64), nullable=False), sa.Column('associated_vni', sa.Integer(), nullable=False), sa.PrimaryKeyConstraint('id'), sa.ForeignKeyConstraint(['associated_vni'], ['ml2_nexus_vxlan_allocations.vxlan_vni'], ondelete='CASCADE') ) op.create_table( 'cisco_ml2_nexus_nve', sa.Column('vni', sa.Integer(), nullable=False), sa.Column('switch_ip', sa.String(length=255), nullable=True), sa.Column('device_id', sa.String(length=255), nullable=True), sa.Column('mcast_group', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('vni', 'switch_ip', 'device_id')) neutron-8.4.0/neutron/db/migration/alembic_migrations/vpn_init_ops.py0000664000567000056710000001425213044372736027354 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial schema operations for IPSEC VPN service plugin from alembic import op import sqlalchemy as sa auth_algorithms = sa.Enum('sha1', name='vpn_auth_algorithms') encryption_algorithms = sa.Enum('3des', 'aes-128', 'aes-256', 'aes-192', name='vpn_encrypt_algorithms') encapsulation_modes = sa.Enum('tunnel', 'transport', name='ipsec_encapsulations') lifetime_unit_types = sa.Enum('seconds', 'kilobytes', name='vpn_lifetime_units') transform_protocols = sa.Enum('esp', 'ah', 'ah-esp', name='ipsec_transform_protocols') pfs_types = sa.Enum('group2', 'group5', 'group14', name='vpn_pfs') phase1_negotiation_modes = sa.Enum('main', name='ike_phase1_mode') ike_versions = sa.Enum('v1', 'v2', name='ike_versions') initiator_types = sa.Enum('bi-directional', 'response-only', name='vpn_initiators') dpd_actions = sa.Enum('hold', 'clear', 'restart', 'disabled', 'restart-by-peer', name='vpn_dpd_actions') def upgrade(): op.create_table( 'ipsecpolicies', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('transform_protocol', transform_protocols, nullable=False), sa.Column('auth_algorithm', auth_algorithms, nullable=False), sa.Column('encryption_algorithm', encryption_algorithms, nullable=False), sa.Column('encapsulation_mode', encapsulation_modes, nullable=False), sa.Column('lifetime_units', lifetime_unit_types, nullable=False), sa.Column('lifetime_value', sa.Integer(), nullable=False), sa.Column('pfs', pfs_types, nullable=False), sa.PrimaryKeyConstraint('id')) op.create_table( 'ikepolicies', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('auth_algorithm', auth_algorithms, nullable=False), sa.Column('encryption_algorithm', encryption_algorithms, nullable=False), sa.Column('phase1_negotiation_mode', phase1_negotiation_modes, nullable=False), sa.Column('lifetime_units', lifetime_unit_types, nullable=False), sa.Column('lifetime_value', sa.Integer(), nullable=False), sa.Column('ike_version', ike_versions, nullable=False), sa.Column('pfs', pfs_types, nullable=False), sa.PrimaryKeyConstraint('id')) op.create_table( 'vpnservices', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('status', sa.String(length=16), nullable=False), sa.Column('admin_state_up', sa.Boolean(), nullable=False), sa.Column('subnet_id', sa.String(length=36), nullable=False), sa.Column('router_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], ), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ), sa.PrimaryKeyConstraint('id')) op.create_table( 'ipsec_site_connections', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('peer_address', sa.String(length=255), nullable=False), sa.Column('peer_id', sa.String(length=255), nullable=False), sa.Column('route_mode', sa.String(length=8), nullable=False), sa.Column('mtu', sa.Integer(), nullable=False), sa.Column('initiator', initiator_types, nullable=False), sa.Column('auth_mode', sa.String(length=16), nullable=False), sa.Column('psk', sa.String(length=255), nullable=False), sa.Column('dpd_action', dpd_actions, nullable=False), sa.Column('dpd_interval', sa.Integer(), nullable=False), sa.Column('dpd_timeout', sa.Integer(), nullable=False), sa.Column('status', sa.String(length=16), nullable=False), sa.Column('admin_state_up', sa.Boolean(), nullable=False), sa.Column('vpnservice_id', sa.String(length=36), nullable=False), sa.Column('ipsecpolicy_id', sa.String(length=36), nullable=False), sa.Column('ikepolicy_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['vpnservice_id'], ['vpnservices.id'], ), sa.ForeignKeyConstraint(['ipsecpolicy_id'], ['ipsecpolicies.id'], ), sa.ForeignKeyConstraint(['ikepolicy_id'], ['ikepolicies.id'], ), sa.PrimaryKeyConstraint('id')) op.create_table( 'ipsecpeercidrs', sa.Column('cidr', sa.String(length=32), nullable=False), sa.Column('ipsec_site_connection_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['ipsec_site_connection_id'], ['ipsec_site_connections.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('cidr', 'ipsec_site_connection_id')) neutron-8.4.0/neutron/db/migration/alembic_migrations/cisco_init_ops.py0000664000567000056710000003663613044372736027663 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial schema operations for cisco plugin from alembic import op import sqlalchemy as sa segment_type = sa.Enum('vlan', 'overlay', 'trunk', 'multi-segment', name='segment_type') profile_type = sa.Enum('network', 'policy', name='profile_type') network_profile_type = sa.Enum('vlan', 'vxlan', name='network_profile_type') def upgrade(): op.create_table( 'cisco_policy_profiles', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'cisco_network_profiles', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('segment_type', segment_type, nullable=False), sa.Column('sub_type', sa.String(length=255), nullable=True), sa.Column('segment_range', sa.String(length=255), nullable=True), sa.Column('multicast_ip_index', sa.Integer(), nullable=True, server_default='0'), sa.Column('multicast_ip_range', sa.String(length=255), nullable=True), sa.Column('physical_network', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'cisco_n1kv_vxlan_allocations', sa.Column('vxlan_id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), nullable=False, server_default=sa.sql.false()), sa.Column('network_profile_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['network_profile_id'], ['cisco_network_profiles.id'], ondelete='CASCADE', name='cisco_n1kv_vxlan_allocations_ibfk_1'), sa.PrimaryKeyConstraint('vxlan_id')) op.create_table( 'cisco_n1kv_vlan_allocations', sa.Column('physical_network', sa.String(length=64), nullable=False), sa.Column('vlan_id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), autoincrement=False, nullable=False, server_default=sa.sql.false()), sa.Column('network_profile_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('physical_network', 'vlan_id'), sa.ForeignKeyConstraint(['network_profile_id'], ['cisco_network_profiles.id'], ondelete='CASCADE', name='cisco_n1kv_vlan_allocations_ibfk_1')) op.create_table( 'cisco_credentials', sa.Column('credential_id', sa.String(length=255), nullable=True), sa.Column('credential_name', sa.String(length=255), nullable=False), sa.Column('user_name', sa.String(length=255), nullable=True), sa.Column('password', sa.String(length=255), nullable=True), sa.Column('type', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('credential_name')) op.create_table( 'cisco_qos_policies', sa.Column('qos_id', sa.String(length=255), nullable=True), sa.Column('tenant_id', sa.String(length=255), nullable=False), sa.Column('qos_name', sa.String(length=255), nullable=False), sa.Column('qos_desc', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('tenant_id', 'qos_name')) op.create_table( 'cisco_n1kv_profile_bindings', sa.Column('profile_type', profile_type, nullable=True), sa.Column('tenant_id', sa.String(length=36), nullable=False, server_default='TENANT_ID_NOT_SET'), sa.Column('profile_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('tenant_id', 'profile_id')) op.create_table( 'cisco_n1kv_vmnetworks', sa.Column('name', sa.String(length=80), nullable=False), sa.Column('profile_id', sa.String(length=36), nullable=True), sa.Column('network_id', sa.String(length=36), nullable=True), sa.Column('port_count', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['profile_id'], ['cisco_policy_profiles.id'], ), sa.PrimaryKeyConstraint('name')) op.create_table( 'cisco_n1kv_trunk_segments', sa.Column('trunk_segment_id', sa.String(length=36), nullable=False), sa.Column('segment_id', sa.String(length=36), nullable=False), sa.Column('dot1qtag', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['trunk_segment_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('trunk_segment_id', 'segment_id', 'dot1qtag')) op.create_table( 'cisco_provider_networks', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('network_type', sa.String(length=255), nullable=False), sa.Column('segmentation_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id')) op.create_table( 'cisco_n1kv_multi_segments', sa.Column('multi_segment_id', sa.String(length=36), nullable=False), sa.Column('segment1_id', sa.String(length=36), nullable=False), sa.Column('segment2_id', sa.String(length=36), nullable=False), sa.Column('encap_profile_name', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['multi_segment_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('multi_segment_id', 'segment1_id', 'segment2_id')) op.create_table( 'cisco_n1kv_network_bindings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('network_type', sa.String(length=32), nullable=False), sa.Column('physical_network', sa.String(length=64), nullable=True), sa.Column('segmentation_id', sa.Integer(), nullable=True), sa.Column('multicast_ip', sa.String(length=32), nullable=True), sa.Column('profile_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['profile_id'], ['cisco_network_profiles.id']), sa.PrimaryKeyConstraint('network_id')) op.create_table( 'cisco_n1kv_port_bindings', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('profile_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['profile_id'], ['cisco_policy_profiles.id']), sa.PrimaryKeyConstraint('port_id')) op.create_table( 'cisco_csr_identifier_map', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('ipsec_site_conn_id', sa.String(length=36), primary_key=True), sa.Column('csr_tunnel_id', sa.Integer(), nullable=False), sa.Column('csr_ike_policy_id', sa.Integer(), nullable=False), sa.Column('csr_ipsec_policy_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['ipsec_site_conn_id'], ['ipsec_site_connections.id'], ondelete='CASCADE') ) op.create_table( 'cisco_ml2_apic_host_links', sa.Column('host', sa.String(length=255), nullable=False), sa.Column('ifname', sa.String(length=64), nullable=False), sa.Column('ifmac', sa.String(length=32), nullable=True), sa.Column('swid', sa.String(length=32), nullable=False), sa.Column('module', sa.String(length=32), nullable=False), sa.Column('port', sa.String(length=32), nullable=False), sa.PrimaryKeyConstraint('host', 'ifname')) op.create_table( 'cisco_ml2_apic_names', sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.Column('neutron_type', sa.String(length=32), nullable=False), sa.Column('apic_name', sa.String(length=255), nullable=False), sa.PrimaryKeyConstraint('neutron_id', 'neutron_type')) op.create_table( 'cisco_ml2_apic_contracts', sa.Column('tenant_id', sa.String(length=255), index=True), sa.Column('router_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['router_id'], ['routers.id']), sa.PrimaryKeyConstraint('router_id')) op.create_table('cisco_hosting_devices', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('complementary_id', sa.String(length=36), nullable=True), sa.Column('device_id', sa.String(length=255), nullable=True), sa.Column('admin_state_up', sa.Boolean(), nullable=False), sa.Column('management_port_id', sa.String(length=36), nullable=True), sa.Column('protocol_port', sa.Integer(), nullable=True), sa.Column('cfg_agent_id', sa.String(length=36), nullable=True), sa.Column('created_at', sa.DateTime(), nullable=False), sa.Column('status', sa.String(length=16), nullable=True), sa.ForeignKeyConstraint(['cfg_agent_id'], ['agents.id'], ), sa.ForeignKeyConstraint(['management_port_id'], ['ports.id'], ondelete='SET NULL'), sa.PrimaryKeyConstraint('id') ) op.create_table('cisco_port_mappings', sa.Column('logical_resource_id', sa.String(length=36), nullable=False), sa.Column('logical_port_id', sa.String(length=36), nullable=False), sa.Column('port_type', sa.String(length=32), nullable=True), sa.Column('network_type', sa.String(length=32), nullable=True), sa.Column('hosting_port_id', sa.String(length=36), nullable=True), sa.Column('segmentation_id', sa.Integer(), autoincrement=False, nullable=True), sa.ForeignKeyConstraint(['hosting_port_id'], ['ports.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['logical_port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('logical_resource_id', 'logical_port_id') ) op.create_table('cisco_router_mappings', sa.Column('router_id', sa.String(length=36), nullable=False), sa.Column('auto_schedule', sa.Boolean(), nullable=False), sa.Column('hosting_device_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['hosting_device_id'], ['cisco_hosting_devices.id'], ondelete='SET NULL'), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('router_id') ) op.create_table( 'cisco_ml2_n1kv_policy_profiles', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('vsm_ip', sa.String(length=16), nullable=False), sa.PrimaryKeyConstraint('id', 'vsm_ip'), ) op.create_table( 'cisco_ml2_n1kv_network_profiles', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('segment_type', network_profile_type, nullable=False), sa.Column('segment_range', sa.String(length=255), nullable=True), sa.Column('multicast_ip_index', sa.Integer(), nullable=True), sa.Column('multicast_ip_range', sa.String(length=255), nullable=True), sa.Column('sub_type', sa.String(length=255), nullable=True), sa.Column('physical_network', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('id'), ) op.create_table( 'cisco_ml2_n1kv_port_bindings', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('profile_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id'), ) op.create_table( 'cisco_ml2_n1kv_network_bindings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('network_type', sa.String(length=32), nullable=False), sa.Column('segmentation_id', sa.Integer(), autoincrement=False), sa.Column('profile_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['profile_id'], ['cisco_ml2_n1kv_network_profiles.id']), sa.PrimaryKeyConstraint('network_id') ) op.create_table( 'cisco_ml2_n1kv_vxlan_allocations', sa.Column('vxlan_id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), nullable=False), sa.Column('network_profile_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['network_profile_id'], ['cisco_ml2_n1kv_network_profiles.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('vxlan_id') ) op.create_table( 'cisco_ml2_n1kv_vlan_allocations', sa.Column('physical_network', sa.String(length=64), nullable=False), sa.Column('vlan_id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), autoincrement=False, nullable=False), sa.Column('network_profile_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['network_profile_id'], ['cisco_ml2_n1kv_network_profiles.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('physical_network', 'vlan_id') ) op.create_table( 'cisco_ml2_n1kv_profile_bindings', sa.Column('profile_type', profile_type, nullable=True), sa.Column('tenant_id', sa.String(length=36), nullable=False, server_default='tenant_id_not_set'), sa.Column('profile_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('tenant_id', 'profile_id') ) op.create_table( 'ml2_ucsm_port_profiles', sa.Column('vlan_id', sa.Integer(), nullable=False), sa.Column('profile_id', sa.String(length=64), nullable=False), sa.Column('created_on_ucs', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint('vlan_id') ) neutron-8.4.0/neutron/db/migration/alembic_migrations/env.py0000664000567000056710000000741313044372760025433 0ustar jenkinsjenkins00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from logging import config as logging_config from alembic import context from oslo_config import cfg import sqlalchemy as sa from sqlalchemy import event from neutron.db.migration.alembic_migrations import external from neutron.db.migration import autogen from neutron.db.migration.connection import DBConnection from neutron.db.migration.models import head # noqa from neutron.db import model_base try: # NOTE(mriedem): This is to register the DB2 alembic code which # is an optional runtime dependency. from ibm_db_alembic.ibm_db import IbmDbImpl # noqa # pylint: disable=unused-import except ImportError: pass MYSQL_ENGINE = None # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config neutron_config = config.neutron_config # Interpret the config file for Python logging. # This line sets up loggers basically. logging_config.fileConfig(config.config_file_name) # set the target for 'autogenerate' support target_metadata = model_base.BASEV2.metadata def set_mysql_engine(): try: mysql_engine = neutron_config.command.mysql_engine except cfg.NoSuchOptError: mysql_engine = None global MYSQL_ENGINE MYSQL_ENGINE = (mysql_engine or model_base.BASEV2.__table_args__['mysql_engine']) def include_object(object_, name, type_, reflected, compare_to): if type_ == 'table' and name in external.TABLES: return False elif type_ == 'index' and reflected and name.startswith("idx_autoinc_"): # skip indexes created by SQLAlchemy autoincrement=True # on composite PK integer columns return False else: return True def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with either a URL or an Engine. Calls to context.execute() here emit the given string to the script output. """ set_mysql_engine() kwargs = dict() if neutron_config.database.connection: kwargs['url'] = neutron_config.database.connection else: kwargs['dialect_name'] = neutron_config.database.engine kwargs['include_object'] = include_object context.configure(**kwargs) with context.begin_transaction(): context.run_migrations() @event.listens_for(sa.Table, 'after_parent_attach') def set_storage_engine(target, parent): if MYSQL_ENGINE: target.kwargs['mysql_engine'] = MYSQL_ENGINE def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ set_mysql_engine() connection = config.attributes.get('connection') with DBConnection(neutron_config.database.connection, connection) as conn: context.configure( connection=conn, target_metadata=target_metadata, include_object=include_object, process_revision_directives=autogen.process_revision_directives ) with context.begin_transaction(): context.run_migrations() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() neutron-8.4.0/neutron/db/migration/alembic_migrations/__init__.py0000664000567000056710000000000013044372736026366 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/nuage_init_opts.py0000664000567000056710000000667013044372736030041 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for Nuage plugin from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'nuage_net_partitions', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=64), nullable=True), sa.Column('l3dom_tmplt_id', sa.String(length=36), nullable=True), sa.Column('l2dom_tmplt_id', sa.String(length=36), nullable=True), sa.Column('isolated_zone', sa.String(length=64), nullable=True), sa.Column('shared_zone', sa.String(length=64), nullable=True), sa.PrimaryKeyConstraint('id'), ) op.create_table( 'nuage_subnet_l2dom_mapping', sa.Column('subnet_id', sa.String(length=36), nullable=False), sa.Column('net_partition_id', sa.String(length=36), nullable=True), sa.Column('nuage_subnet_id', sa.String(length=36), nullable=True, unique=True), sa.Column('nuage_l2dom_tmplt_id', sa.String(length=36), nullable=True), sa.Column('nuage_user_id', sa.String(length=36), nullable=True), sa.Column('nuage_group_id', sa.String(length=36), nullable=True), sa.Column('nuage_managed_subnet', sa.Boolean(), nullable=True), sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['net_partition_id'], ['nuage_net_partitions.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('subnet_id'), ) op.create_table( 'nuage_net_partition_router_mapping', sa.Column('net_partition_id', sa.String(length=36), nullable=False), sa.Column('router_id', sa.String(length=36), nullable=False), sa.Column('nuage_router_id', sa.String(length=36), nullable=True, unique=True), sa.Column('nuage_rtr_rd', sa.String(length=36), nullable=True), sa.Column('nuage_rtr_rt', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['net_partition_id'], ['nuage_net_partitions.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('net_partition_id', 'router_id'), ) op.create_table( 'nuage_provider_net_bindings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('network_type', sa.String(length=32), nullable=False), sa.Column('physical_network', sa.String(length=64), nullable=False), sa.Column('vlan_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint( ['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id') ) neutron-8.4.0/neutron/db/migration/alembic_migrations/l3_init_ops.py0000664000567000056710000001553513044372736027074 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for l3 extension from alembic import op import sqlalchemy as sa l3_ha_states = sa.Enum('active', 'standby', name='l3_ha_states') def create_routerroutes(): op.create_table( 'routerroutes', sa.Column('destination', sa.String(length=64), nullable=False), sa.Column('nexthop', sa.String(length=64), nullable=False), sa.Column('router_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('destination', 'nexthop', 'router_id')) def upgrade(): op.create_table( 'externalnetworks', sa.Column('network_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id')) op.create_table( 'routers', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('status', sa.String(length=16), nullable=True), sa.Column('admin_state_up', sa.Boolean(), nullable=True), sa.Column('gw_port_id', sa.String(length=36), nullable=True), sa.Column('enable_snat', sa.Boolean(), nullable=False, server_default=sa.sql.true()), sa.ForeignKeyConstraint(['gw_port_id'], ['ports.id'], ), sa.PrimaryKeyConstraint('id')) op.create_table( 'floatingips', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('floating_ip_address', sa.String(length=64), nullable=False), sa.Column('floating_network_id', sa.String(length=36), nullable=False), sa.Column('floating_port_id', sa.String(length=36), nullable=False), sa.Column('fixed_port_id', sa.String(length=36), nullable=True), sa.Column('fixed_ip_address', sa.String(length=64), nullable=True), sa.Column('router_id', sa.String(length=36), nullable=True), sa.Column('last_known_router_id', sa.String(length=36), nullable=True), sa.Column('status', sa.String(length=16), nullable=True), sa.ForeignKeyConstraint(['fixed_port_id'], ['ports.id'], ), sa.ForeignKeyConstraint(['floating_port_id'], ['ports.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ), sa.PrimaryKeyConstraint('id')) create_routerroutes() op.create_table( 'routerl3agentbindings', sa.Column('router_id', sa.String(length=36), nullable=True), sa.Column('l3_agent_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['l3_agent_id'], ['agents.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('router_id', 'l3_agent_id')) op.create_table( 'router_extra_attributes', sa.Column('router_id', sa.String(length=36), nullable=False), sa.Column('distributed', sa.Boolean(), nullable=False, server_default=sa.sql.false()), sa.Column('service_router', sa.Boolean(), nullable=False, server_default=sa.sql.false()), sa.Column('ha', sa.Boolean(), nullable=False, server_default=sa.sql.false()), sa.Column('ha_vr_id', sa.Integer()), sa.ForeignKeyConstraint( ['router_id'], ['routers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('router_id') ) op.create_table('ha_router_agent_port_bindings', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('router_id', sa.String(length=36), nullable=False), sa.Column('l3_agent_id', sa.String(length=36), nullable=True), sa.Column('state', l3_ha_states, server_default='standby'), sa.PrimaryKeyConstraint('port_id'), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['l3_agent_id'], ['agents.id'], ondelete='CASCADE')) op.create_table('ha_router_networks', sa.Column('tenant_id', sa.String(length=255), nullable=False, primary_key=True), sa.Column('network_id', sa.String(length=36), nullable=False, primary_key=True), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE')) op.create_table('ha_router_vrid_allocations', sa.Column('network_id', sa.String(length=36), nullable=False, primary_key=True), sa.Column('vr_id', sa.Integer(), nullable=False, primary_key=True), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE')) op.create_table( 'routerports', sa.Column('router_id', sa.String(length=36), nullable=False), sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('port_type', sa.String(length=255)), sa.PrimaryKeyConstraint('router_id', 'port_id'), sa.ForeignKeyConstraint( ['router_id'], ['routers.id'], ondelete='CASCADE' ), sa.ForeignKeyConstraint( ['port_id'], ['ports.id'], ondelete='CASCADE' ), ) neutron-8.4.0/neutron/db/migration/alembic_migrations/secgroup_init_ops.py0000664000567000056710000000612713044372736030402 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for security group extension from alembic import op import sqlalchemy as sa rule_direction_enum = sa.Enum('ingress', 'egress', name='securitygrouprules_direction') def upgrade(): op.create_table( 'securitygroups', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'securitygrouprules', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('security_group_id', sa.String(length=36), nullable=False), sa.Column('remote_group_id', sa.String(length=36), nullable=True), sa.Column('direction', rule_direction_enum, nullable=True), sa.Column('ethertype', sa.String(length=40), nullable=True), sa.Column('protocol', sa.String(length=40), nullable=True), sa.Column('port_range_min', sa.Integer(), nullable=True), sa.Column('port_range_max', sa.Integer(), nullable=True), sa.Column('remote_ip_prefix', sa.String(length=255), nullable=True), sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['remote_group_id'], ['securitygroups.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id')) op.create_table( 'securitygroupportbindings', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('security_group_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id']), sa.PrimaryKeyConstraint('port_id', 'security_group_id')) op.create_table( 'default_security_group', sa.Column('tenant_id', sa.String(length=255), nullable=False), sa.Column('security_group_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('tenant_id'), sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id'], ondelete="CASCADE")) neutron-8.4.0/neutron/db/migration/alembic_migrations/firewall_init_ops.py0000664000567000056710000000672013044372736030357 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial schema operations for firewall service plugin from alembic import op import sqlalchemy as sa action_types = sa.Enum('allow', 'deny', name='firewallrules_action') def upgrade(): op.create_table( 'firewall_policies', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=1024), nullable=True), sa.Column('shared', sa.Boolean(), nullable=True), sa.Column('audited', sa.Boolean(), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'firewalls', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=1024), nullable=True), sa.Column('shared', sa.Boolean(), nullable=True), sa.Column('admin_state_up', sa.Boolean(), nullable=True), sa.Column('status', sa.String(length=16), nullable=True), sa.Column('firewall_policy_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['firewall_policy_id'], ['firewall_policies.id'], name='firewalls_ibfk_1'), sa.PrimaryKeyConstraint('id')) op.create_table( 'firewall_rules', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=1024), nullable=True), sa.Column('firewall_policy_id', sa.String(length=36), nullable=True), sa.Column('shared', sa.Boolean(), nullable=True), sa.Column('protocol', sa.String(length=40), nullable=True), sa.Column('ip_version', sa.Integer(), nullable=False), sa.Column('source_ip_address', sa.String(length=46), nullable=True), sa.Column('destination_ip_address', sa.String(length=46), nullable=True), sa.Column('source_port_range_min', sa.Integer(), nullable=True), sa.Column('source_port_range_max', sa.Integer(), nullable=True), sa.Column('destination_port_range_min', sa.Integer(), nullable=True), sa.Column('destination_port_range_max', sa.Integer(), nullable=True), sa.Column('action', action_types, nullable=True), sa.Column('enabled', sa.Boolean(), nullable=True), sa.Column('position', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['firewall_policy_id'], ['firewall_policies.id'], name='firewall_rules_ibfk_1'), sa.PrimaryKeyConstraint('id')) neutron-8.4.0/neutron/db/migration/alembic_migrations/nsxv_initial_opts.py0000664000567000056710000001542513044372736030424 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa appliance_sizes_enum = sa.Enum('compact', 'large', 'xlarge', 'quadlarge', name='nsxv_router_bindings_appliance_size') edge_types_enum = sa.Enum('service', 'vdr', name='nsxv_router_bindings_edge_type') internal_network_purpose_enum = sa.Enum('inter_edge_net', name='nsxv_internal_networks_purpose') internal_edge_purpose_enum = sa.Enum('inter_edge_net', name='nsxv_internal_edges_purpose') tz_binding_type_enum = sa.Enum('flat', 'vlan', 'portgroup', name='nsxv_tz_network_bindings_binding_type') router_types_enum = sa.Enum('shared', 'exclusive', name='nsxv_router_type') def upgrade(): op.create_table( 'nsxv_router_bindings', sa.Column('status', sa.String(length=16), nullable=False), sa.Column('status_description', sa.String(length=255), nullable=True), sa.Column('router_id', sa.String(length=36), nullable=False), sa.Column('edge_id', sa.String(length=36), nullable=True), sa.Column('lswitch_id', sa.String(length=36), nullable=True), sa.Column('appliance_size', appliance_sizes_enum, nullable=True), sa.Column('edge_type', edge_types_enum, nullable=True), sa.PrimaryKeyConstraint('router_id')) op.create_table( 'nsxv_internal_networks', sa.Column('network_purpose', internal_network_purpose_enum, nullable=False), sa.Column('network_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_purpose')) op.create_table( 'nsxv_internal_edges', sa.Column('ext_ip_address', sa.String(length=64), nullable=False), sa.Column('router_id', sa.String(length=36), nullable=True), sa.Column('purpose', internal_edge_purpose_enum, nullable=True), sa.PrimaryKeyConstraint('ext_ip_address')) op.create_table( 'nsxv_firewall_rule_bindings', sa.Column('rule_id', sa.String(length=36), nullable=False), sa.Column('edge_id', sa.String(length=36), nullable=False), sa.Column('rule_vse_id', sa.String(length=36), nullable=True), sa.PrimaryKeyConstraint('rule_id', 'edge_id')) op.create_table( 'nsxv_edge_dhcp_static_bindings', sa.Column('edge_id', sa.String(length=36), nullable=False), sa.Column('mac_address', sa.String(length=32), nullable=False), sa.Column('binding_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('edge_id', 'mac_address')) op.create_table( 'nsxv_edge_vnic_bindings', sa.Column('edge_id', sa.String(length=36), nullable=False), sa.Column('vnic_index', sa.Integer(), nullable=False), sa.Column('tunnel_index', sa.Integer(), nullable=False), sa.Column('network_id', sa.String(length=36), nullable=True), sa.PrimaryKeyConstraint('edge_id', 'vnic_index', 'tunnel_index')) op.create_table( 'nsxv_spoofguard_policy_network_mappings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('policy_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id')) op.create_table( 'nsxv_security_group_section_mappings', sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.Column('ip_section_id', sa.String(length=100), nullable=True), sa.ForeignKeyConstraint(['neutron_id'], ['securitygroups.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('neutron_id')) op.create_table( 'nsxv_tz_network_bindings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('binding_type', tz_binding_type_enum, nullable=False), sa.Column('phy_uuid', sa.String(length=36), nullable=True), sa.Column('vlan_id', sa.Integer(), autoincrement=False, nullable=True), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id', 'binding_type', 'phy_uuid', 'vlan_id')) op.create_table( 'nsxv_port_vnic_mappings', sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.Column('nsx_id', sa.String(length=42), nullable=False), sa.ForeignKeyConstraint(['neutron_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('neutron_id', 'nsx_id')) op.create_table( 'nsxv_port_index_mappings', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('device_id', sa.String(length=255), nullable=False), sa.Column('index', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id'), sa.UniqueConstraint('device_id', 'index')) op.create_table( 'nsxv_rule_mappings', sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.Column('nsx_rule_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['neutron_id'], ['securitygrouprules.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('neutron_id', 'nsx_rule_id')) op.create_table( 'nsxv_router_ext_attributes', sa.Column('router_id', sa.String(length=36), nullable=False), sa.Column('distributed', sa.Boolean(), nullable=False), sa.Column('router_type', router_types_enum, default='exclusive', nullable=False), sa.Column('service_router', sa.Boolean(), nullable=False), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('router_id')) neutron-8.4.0/neutron/db/migration/alembic_migrations/script.py.mako0000664000567000056710000000202713044372736027074 0ustar jenkinsjenkins00000000000000# Copyright ${create_date.year} OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} % if branch_labels: branch_labels = ${repr(branch_labels)} % endif from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} neutron-8.4.0/neutron/db/migration/alembic_migrations/other_extensions_init_ops.py0000664000567000056710000001043513044372736032150 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for extensions: # allowedaddresspairs # extradhcpopts # portbindings # quotas # routedserviceinsertion # servicetype from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'providerresourceassociations', sa.Column('provider_name', sa.String(length=255), nullable=False), sa.Column('resource_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('provider_name', 'resource_id'), sa.UniqueConstraint('resource_id')) op.create_table( 'quotas', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('resource', sa.String(length=255), nullable=True), sa.Column('limit', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'allowedaddresspairs', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('mac_address', sa.String(length=32), nullable=False), sa.Column('ip_address', sa.String(length=64), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id', 'mac_address', 'ip_address')) op.create_table( 'portbindingports', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('host', sa.String(length=255), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id')) op.create_table( 'extradhcpopts', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('opt_name', sa.String(length=64), nullable=False), sa.Column('opt_value', sa.String(length=255), nullable=False), sa.Column('ip_version', sa.Integer(), server_default='4', nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint( 'port_id', 'opt_name', 'ip_version', name='uniq_extradhcpopts0portid0optname0ipversion')) op.create_table('subnetpools', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('ip_version', sa.Integer(), nullable=False), sa.Column('default_prefixlen', sa.Integer(), nullable=False), sa.Column('min_prefixlen', sa.Integer(), nullable=False), sa.Column('max_prefixlen', sa.Integer(), nullable=False), sa.Column('shared', sa.Boolean(), nullable=False), sa.Column('default_quota', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table('subnetpoolprefixes', sa.Column('cidr', sa.String(length=64), nullable=False), sa.Column('subnetpool_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['subnetpool_id'], ['subnetpools.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('cidr', 'subnetpool_id')) neutron-8.4.0/neutron/db/migration/alembic_migrations/external.py0000664000567000056710000000737413044372760026473 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # These tables are in the neutron database, but their models have moved # to separate repositories. We skip the migration checks for these tables. VPNAAS_TABLES = ['vpnservices', 'ipsecpolicies', 'ipsecpeercidrs', 'ipsec_site_connections', 'cisco_csr_identifier_map', 'ikepolicies'] LBAAS_TABLES = ['vips', 'sessionpersistences', 'pools', 'healthmonitors', 'poolstatisticss', 'members', 'poolloadbalanceragentbindings', 'poolmonitorassociations'] FWAAS_TABLES = ['firewall_rules', 'firewalls', 'firewall_policies'] # Arista ML2 driver Models moved to openstack/networking-arista REPO_ARISTA_TABLES = [ 'arista_provisioned_nets', 'arista_provisioned_vms', 'arista_provisioned_tenants', ] # Models moved to openstack/networking-cisco REPO_CISCO_TABLES = [ 'cisco_ml2_apic_contracts', 'cisco_ml2_apic_names', 'cisco_ml2_apic_host_links', 'cisco_ml2_n1kv_policy_profiles', 'cisco_ml2_n1kv_network_profiles', 'cisco_ml2_n1kv_port_bindings', 'cisco_ml2_n1kv_network_bindings', 'cisco_ml2_n1kv_vxlan_allocations', 'cisco_ml2_n1kv_vlan_allocations', 'cisco_ml2_n1kv_profile_bindings', 'cisco_ml2_nexusport_bindings', 'cisco_ml2_nexus_nve', 'ml2_nexus_vxlan_allocations', 'ml2_nexus_vxlan_mcast_groups', 'ml2_ucsm_port_profiles', 'cisco_hosting_devices', 'cisco_port_mappings', 'cisco_router_mappings', ] # VMware-NSX models moved to openstack/vmware-nsx REPO_VMWARE_TABLES = [ 'tz_network_bindings', 'neutron_nsx_network_mappings', 'neutron_nsx_security_group_mappings', 'neutron_nsx_port_mappings', 'neutron_nsx_router_mappings', 'multi_provider_networks', 'networkconnections', 'networkgatewaydevicereferences', 'networkgatewaydevices', 'networkgateways', 'maclearningstates', 'qosqueues', 'portqueuemappings', 'networkqueuemappings', 'lsn_port', 'lsn', 'nsxv_router_bindings', 'nsxv_edge_vnic_bindings', 'nsxv_edge_dhcp_static_bindings', 'nsxv_internal_networks', 'nsxv_internal_edges', 'nsxv_security_group_section_mappings', 'nsxv_rule_mappings', 'nsxv_port_vnic_mappings', 'nsxv_router_ext_attributes', 'nsxv_tz_network_bindings', 'nsxv_port_index_mappings', 'nsxv_firewall_rule_bindings', 'nsxv_spoofguard_policy_network_mappings', 'nsxv_vdr_dhcp_bindings', 'vcns_router_bindings', ] # Brocade models are in openstack/networking-brocade REPO_BROCADE_TABLES = [ 'brocadenetworks', 'brocadeports', 'ml2_brocadenetworks', 'ml2_brocadeports', ] # BigSwitch models are in openstack/networking-bigswitch REPO_BIGSWITCH_TABLES = [ 'consistencyhashes', 'routerrules', 'nexthops', ] # Nuage models are in github.com/nuagenetworks/nuage-openstack-neutron REPO_NUAGE_TABLES = [ 'nuage_net_partitions', 'nuage_net_partition_router_mapping', 'nuage_provider_net_bindings', 'nuage_subnet_l2dom_mapping', ] TABLES = (FWAAS_TABLES + LBAAS_TABLES + VPNAAS_TABLES + REPO_ARISTA_TABLES + REPO_CISCO_TABLES + REPO_VMWARE_TABLES + REPO_BROCADE_TABLES + REPO_BIGSWITCH_TABLES + REPO_NUAGE_TABLES) neutron-8.4.0/neutron/db/migration/alembic_migrations/portsec_init_ops.py0000664000567000056710000000265413044372736030233 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for the port security extension from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'networksecuritybindings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('port_security_enabled', sa.Boolean(), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id')) op.create_table( 'portsecuritybindings', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('port_security_enabled', sa.Boolean(), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id')) neutron-8.4.0/neutron/db/migration/alembic_migrations/ovs_init_ops.py0000664000567000056710000000414213044372736027355 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for the OVS plugin from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'ovs_tunnel_endpoints', sa.Column('ip_address', sa.String(length=64), nullable=False), sa.Column('id', sa.Integer(), nullable=False), sa.PrimaryKeyConstraint('ip_address'), sa.UniqueConstraint('id', name='uniq_ovs_tunnel_endpoints0id')) op.create_table( 'ovs_tunnel_allocations', sa.Column('tunnel_id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint('tunnel_id')) op.create_table( 'ovs_vlan_allocations', sa.Column('physical_network', sa.String(length=64), nullable=False), sa.Column('vlan_id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint('physical_network', 'vlan_id')) op.create_table( 'ovs_network_bindings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('network_type', sa.String(length=32), nullable=False), sa.Column('physical_network', sa.String(length=64), nullable=True), sa.Column('segmentation_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id')) neutron-8.4.0/neutron/db/migration/alembic_migrations/other_plugins_init_ops.py0000664000567000056710000000473213044372736031435 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for plugins: # bigswitch # metaplugin from alembic import op import sqlalchemy as sa def upgrade(): # metaplugin op.create_table( 'networkflavors', sa.Column('flavor', sa.String(length=255), nullable=True), sa.Column('network_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id')) op.create_table( 'routerflavors', sa.Column('flavor', sa.String(length=255), nullable=True), sa.Column('router_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('router_id')) # big switch op.create_table( 'routerrules', sa.Column('id', sa.Integer(), nullable=False), sa.Column('source', sa.String(length=64), nullable=False), sa.Column('destination', sa.String(length=64), nullable=False), sa.Column('action', sa.String(length=10), nullable=False), sa.Column('router_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id')) op.create_table( 'nexthops', sa.Column('rule_id', sa.Integer(), nullable=False), sa.Column('nexthop', sa.String(length=64), nullable=False), sa.ForeignKeyConstraint(['rule_id'], ['routerrules.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('rule_id', 'nexthop')) op.create_table( 'consistencyhashes', sa.Column('hash_id', sa.String(255), primary_key=True), sa.Column('hash', sa.String(255), nullable=False) ) neutron-8.4.0/neutron/db/migration/alembic_migrations/core_init_ops.py0000664000567000056710000001547413044372736027510 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for core resources from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'networks', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('status', sa.String(length=16), nullable=True), sa.Column('admin_state_up', sa.Boolean(), nullable=True), sa.Column('shared', sa.Boolean(), nullable=True), sa.Column('mtu', sa.Integer(), nullable=True), sa.Column('vlan_transparent', sa.Boolean(), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'ports', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('mac_address', sa.String(length=32), nullable=False), sa.Column('admin_state_up', sa.Boolean(), nullable=False), sa.Column('status', sa.String(length=16), nullable=False), sa.Column('device_id', sa.String(length=255), nullable=False), sa.Column('device_owner', sa.String(length=255), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id']), sa.UniqueConstraint('network_id', 'mac_address', name='uniq_ports0network_id0mac_address'), sa.PrimaryKeyConstraint('id'), sa.Index(op.f('ix_ports_network_id_device_owner'), 'network_id', 'device_owner'), sa.Index(op.f('ix_ports_network_id_mac_address'), 'network_id', 'mac_address')) op.create_table( 'subnets', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('network_id', sa.String(length=36), nullable=True), sa.Column('ip_version', sa.Integer(), nullable=False), sa.Column('cidr', sa.String(length=64), nullable=False), sa.Column('gateway_ip', sa.String(length=64), nullable=True), sa.Column('enable_dhcp', sa.Boolean(), nullable=True), sa.Column('shared', sa.Boolean(), nullable=True), sa.Column('ipv6_ra_mode', sa.Enum('slaac', 'dhcpv6-stateful', 'dhcpv6-stateless', name='ipv6_ra_modes'), nullable=True), sa.Column('ipv6_address_mode', sa.Enum('slaac', 'dhcpv6-stateful', 'dhcpv6-stateless', name='ipv6_address_modes'), nullable=True), sa.Column('subnetpool_id', sa.String(length=36), nullable=True, index=True), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ), sa.PrimaryKeyConstraint('id')) op.create_table( 'dnsnameservers', sa.Column('address', sa.String(length=128), nullable=False), sa.Column('subnet_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('address', 'subnet_id')) op.create_table( 'ipallocationpools', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('subnet_id', sa.String(length=36), nullable=True), sa.Column('first_ip', sa.String(length=64), nullable=False), sa.Column('last_ip', sa.String(length=64), nullable=False), sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id')) op.create_table( 'subnetroutes', sa.Column('destination', sa.String(length=64), nullable=False), sa.Column('nexthop', sa.String(length=64), nullable=False), sa.Column('subnet_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('destination', 'nexthop', 'subnet_id')) op.create_table( 'ipallocations', sa.Column('port_id', sa.String(length=36), nullable=True), sa.Column('ip_address', sa.String(length=64), nullable=False), sa.Column('subnet_id', sa.String(length=36), nullable=False), sa.Column('network_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('ip_address', 'subnet_id', 'network_id')) op.create_table( 'ipavailabilityranges', sa.Column('allocation_pool_id', sa.String(length=36), nullable=False), sa.Column('first_ip', sa.String(length=64), nullable=False), sa.Column('last_ip', sa.String(length=64), nullable=False), sa.ForeignKeyConstraint(['allocation_pool_id'], ['ipallocationpools.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('allocation_pool_id', 'first_ip', 'last_ip'), sa.UniqueConstraint( 'first_ip', 'allocation_pool_id', name='uniq_ipavailabilityranges0first_ip0allocation_pool_id'), sa.UniqueConstraint( 'last_ip', 'allocation_pool_id', name='uniq_ipavailabilityranges0last_ip0allocation_pool_id')) op.create_table( 'networkdhcpagentbindings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('dhcp_agent_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['dhcp_agent_id'], ['agents.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id', 'dhcp_agent_id')) neutron-8.4.0/neutron/db/migration/alembic_migrations/vmware_init_ops.py0000664000567000056710000002220013044372736030042 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial schema operations for VMware plugins from alembic import op import sqlalchemy as sa net_binding_type = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', name='tz_network_bindings_binding_type') l2gw_segmentation_type = sa.Enum('flat', 'vlan', name='networkconnections_segmentation_type') qos_marking = sa.Enum('untrusted', 'trusted', name='qosqueues_qos_marking') def upgrade(): op.create_table( 'tz_network_bindings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('binding_type', net_binding_type, nullable=False), sa.Column('phy_uuid', sa.String(length=36), nullable=True), sa.Column('vlan_id', sa.Integer(), autoincrement=False, nullable=True), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id', 'binding_type', 'phy_uuid', 'vlan_id')) op.create_table( 'multi_provider_networks', sa.Column('network_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id')) op.create_table( 'vcns_router_bindings', sa.Column('status', sa.String(length=16), nullable=False), sa.Column('status_description', sa.String(length=255), nullable=True), sa.Column('router_id', sa.String(length=36), nullable=False), sa.Column('edge_id', sa.String(length=16), nullable=True), sa.Column('lswitch_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('router_id')) op.create_table( 'networkgateways', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('tenant_id', sa.String(length=36), nullable=True), sa.Column('default', sa.Boolean(), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'networkconnections', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('network_gateway_id', sa.String(length=36), nullable=True), sa.Column('network_id', sa.String(length=36), nullable=True), sa.Column('segmentation_type', l2gw_segmentation_type, nullable=True), sa.Column('segmentation_id', sa.Integer(), nullable=True), sa.Column('port_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id'), sa.UniqueConstraint('network_gateway_id', 'segmentation_type', 'segmentation_id')) op.create_table( 'qosqueues', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('default', sa.Boolean(), nullable=True, server_default=sa.sql.false()), sa.Column('min', sa.Integer(), nullable=False), sa.Column('max', sa.Integer(), nullable=True), sa.Column('qos_marking', qos_marking, nullable=True), sa.Column('dscp', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'networkqueuemappings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('queue_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['queue_id'], ['qosqueues.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id')) op.create_table( 'portqueuemappings', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('queue_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['queue_id'], ['qosqueues.id'], ), sa.PrimaryKeyConstraint('port_id', 'queue_id')) op.create_table( 'maclearningstates', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('mac_learning_enabled', sa.Boolean(), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id')) op.create_table('neutron_nsx_port_mappings', sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.Column('nsx_port_id', sa.String(length=36), nullable=False), sa.Column('nsx_switch_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['neutron_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('neutron_id')) op.create_table( 'lsn', sa.Column('net_id', sa.String(length=36), nullable=False), sa.Column('lsn_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('lsn_id')) op.create_table( 'lsn_port', sa.Column('lsn_port_id', sa.String(length=36), nullable=False), sa.Column('lsn_id', sa.String(length=36), nullable=False), sa.Column('sub_id', sa.String(length=36), nullable=False, unique=True), sa.Column('mac_addr', sa.String(length=32), nullable=False, unique=True), sa.ForeignKeyConstraint(['lsn_id'], ['lsn.lsn_id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('lsn_port_id')) op.create_table( 'neutron_nsx_network_mappings', sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.Column('nsx_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['neutron_id'], ['networks.id'], ondelete='CASCADE'), # There might be multiple switches for a neutron network sa.PrimaryKeyConstraint('neutron_id', 'nsx_id'), ) op.create_table( 'neutron_nsx_router_mappings', sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.Column('nsx_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['neutron_id'], ['routers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('neutron_id'), ) op.create_table( 'neutron_nsx_security_group_mappings', sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.Column('nsx_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['neutron_id'], ['securitygroups.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('neutron_id', 'nsx_id')) op.create_table( 'networkgatewaydevicereferences', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('network_gateway_id', sa.String(length=36), nullable=True), sa.Column('interface_name', sa.String(length=64), nullable=True), sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id', 'network_gateway_id', 'interface_name')) op.create_table( 'networkgatewaydevices', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('nsx_id', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('connector_type', sa.String(length=10), nullable=True), sa.Column('connector_ip', sa.String(length=64), nullable=True), sa.Column('status', sa.String(length=16), nullable=True), sa.PrimaryKeyConstraint('id')) neutron-8.4.0/neutron/db/migration/alembic_migrations/lb_init_ops.py0000664000567000056710000000274013044372736027145 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for the port security extension from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'network_states', sa.Column('physical_network', sa.String(length=64), nullable=False), sa.Column('vlan_id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint('physical_network', 'vlan_id')) op.create_table( 'network_bindings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('physical_network', sa.String(length=64), nullable=True), sa.Column('vlan_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id')) neutron-8.4.0/neutron/db/migration/alembic_migrations/nec_init_ops.py0000664000567000056710000001044713044372736027320 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for NEC plugin from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'ofcportmappings', sa.Column('ofc_id', sa.String(length=255), nullable=False), sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('neutron_id'), sa.UniqueConstraint('ofc_id')) op.create_table( 'ofcroutermappings', sa.Column('ofc_id', sa.String(length=255), nullable=False), sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('neutron_id'), sa.UniqueConstraint('ofc_id')) op.create_table( 'routerproviders', sa.Column('provider', sa.String(length=255), nullable=True), sa.Column('router_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('router_id')) op.create_table( 'ofctenantmappings', sa.Column('ofc_id', sa.String(length=255), nullable=False), sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('neutron_id'), sa.UniqueConstraint('ofc_id')) op.create_table( 'ofcfiltermappings', sa.Column('ofc_id', sa.String(length=255), nullable=False), sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('neutron_id'), sa.UniqueConstraint('ofc_id')) op.create_table( 'ofcnetworkmappings', sa.Column('ofc_id', sa.String(length=255), nullable=False), sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('neutron_id'), sa.UniqueConstraint('ofc_id')) op.create_table( 'packetfilters', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('priority', sa.Integer(), nullable=False), sa.Column('action', sa.String(length=16), nullable=False), sa.Column('in_port', sa.String(length=36), nullable=True), sa.Column('src_mac', sa.String(length=32), nullable=False), sa.Column('dst_mac', sa.String(length=32), nullable=False), sa.Column('eth_type', sa.Integer(), nullable=False), sa.Column('src_cidr', sa.String(length=64), nullable=False), sa.Column('dst_cidr', sa.String(length=64), nullable=False), sa.Column('protocol', sa.String(length=16), nullable=False), sa.Column('src_port', sa.Integer(), nullable=False), sa.Column('dst_port', sa.Integer(), nullable=False), sa.Column('admin_state_up', sa.Boolean(), nullable=False), sa.Column('status', sa.String(length=16), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['in_port'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id')) op.create_table( 'portinfos', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('datapath_id', sa.String(length=36), nullable=False), sa.Column('port_no', sa.Integer(), nullable=False), sa.Column('vlan_id', sa.Integer(), nullable=False), sa.Column('mac', sa.String(length=32), nullable=False), sa.ForeignKeyConstraint(['id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id')) neutron-8.4.0/neutron/db/migration/alembic_migrations/dvr_init_opts.py0000664000567000056710000000507313044372736027531 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for dvr from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'dvr_host_macs', sa.Column('host', sa.String(length=255), nullable=False), sa.Column('mac_address', sa.String(length=32), nullable=False, unique=True), sa.PrimaryKeyConstraint('host') ) op.create_table( 'ml2_dvr_port_bindings', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('host', sa.String(length=255), nullable=False), sa.Column('router_id', sa.String(length=36), nullable=True), sa.Column('vif_type', sa.String(length=64), nullable=False), sa.Column('vif_details', sa.String(length=4095), nullable=False, server_default=''), sa.Column('vnic_type', sa.String(length=64), nullable=False, server_default='normal'), sa.Column('profile', sa.String(length=4095), nullable=False, server_default=''), sa.Column(u'status', sa.String(16), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id', 'host') ) op.create_table( 'csnat_l3_agent_bindings', sa.Column('router_id', sa.String(length=36), nullable=False), sa.Column('l3_agent_id', sa.String(length=36), nullable=False), sa.Column('host_id', sa.String(length=255), nullable=True), sa.Column('csnat_gw_port_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['l3_agent_id'], ['agents.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['csnat_gw_port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('router_id', 'l3_agent_id') ) neutron-8.4.0/neutron/db/migration/alembic_migrations/brocade_init_ops.py0000664000567000056710000000511213044372736030143 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for the Mellanox plugin from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'brocadenetworks', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('vlan', sa.String(length=10), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'brocadeports', sa.Column('port_id', sa.String(length=36), nullable=False, server_default=''), sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('admin_state_up', sa.Boolean(), nullable=False), sa.Column('physical_interface', sa.String(length=36), nullable=True), sa.Column('vlan_id', sa.String(length=36), nullable=True), sa.Column('tenant_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['network_id'], ['brocadenetworks.id'], ), sa.PrimaryKeyConstraint('port_id')) op.create_table( 'ml2_brocadenetworks', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('vlan', sa.String(length=10), nullable=True), sa.Column('segment_id', sa.String(length=36), nullable=True), sa.Column('network_type', sa.String(length=10), nullable=True), sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'ml2_brocadeports', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('admin_state_up', sa.Boolean(), nullable=False), sa.Column('physical_interface', sa.String(length=36), nullable=True), sa.Column('vlan_id', sa.String(length=36), nullable=True), sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.PrimaryKeyConstraint('id'), sa.ForeignKeyConstraint(['network_id'], ['ml2_brocadenetworks.id'])) neutron-8.4.0/neutron/db/migration/alembic_migrations/metering_init_ops.py0000664000567000056710000000374513044372736030370 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for the metering service plugin from alembic import op import sqlalchemy as sa direction = sa.Enum('ingress', 'egress', name='meteringlabels_direction') def create_meteringlabels(): op.create_table( 'meteringlabels', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=1024), nullable=True), sa.Column('shared', sa.Boolean(), server_default=sa.sql.false(), nullable=True), sa.PrimaryKeyConstraint('id')) def upgrade(): create_meteringlabels() op.create_table( 'meteringlabelrules', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('direction', direction, nullable=True), sa.Column('remote_ip_prefix', sa.String(length=64), nullable=True), sa.Column('metering_label_id', sa.String(length=36), nullable=False), sa.Column('excluded', sa.Boolean(), nullable=True, server_default=sa.sql.false()), sa.ForeignKeyConstraint(['metering_label_id'], ['meteringlabels.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id')) neutron-8.4.0/neutron/db/migration/alembic_migrations/agent_init_ops.py0000664000567000056710000000357613044372736027656 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for agent management extension # This module only manages the 'agents' table. Binding tables are created # in the modules for relevant resources from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'agents', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('agent_type', sa.String(length=255), nullable=False), sa.Column('binary', sa.String(length=255), nullable=False), sa.Column('topic', sa.String(length=255), nullable=False), sa.Column('host', sa.String(length=255), nullable=False), sa.Column('admin_state_up', sa.Boolean(), nullable=False, server_default=sa.sql.true()), sa.Column('created_at', sa.DateTime(), nullable=False), sa.Column('started_at', sa.DateTime(), nullable=False), sa.Column('heartbeat_timestamp', sa.DateTime(), nullable=False), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('configurations', sa.String(length=4095), nullable=False), sa.Column('load', sa.Integer(), server_default='0', nullable=False), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('agent_type', 'host', name='uniq_agents0agent_type0host')) neutron-8.4.0/neutron/db/migration/alembic_migrations/loadbalancer_init_ops.py0000664000567000056710000001561113044372736031160 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial schema operations for the load balancer service plugin from alembic import op import sqlalchemy as sa protocols = sa.Enum('HTTP', 'HTTPS', 'TCP', name='lb_protocols') session_persistence_type = sa.Enum('SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE', name='sesssionpersistences_type') lb_methods = sa.Enum('ROUND_ROBIN', 'LEAST_CONNECTIONS', 'SOURCE_IP', name='pools_lb_method') health_monitor_type = sa.Enum('PING', 'TCP', 'HTTP', 'HTTPS', name='healthmontiors_type') def upgrade(): op.create_table( 'healthmonitors', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('type', health_monitor_type, nullable=False), sa.Column('delay', sa.Integer(), nullable=False), sa.Column('timeout', sa.Integer(), nullable=False), sa.Column('max_retries', sa.Integer(), nullable=False), sa.Column('http_method', sa.String(length=16), nullable=True), sa.Column('url_path', sa.String(length=255), nullable=True), sa.Column('expected_codes', sa.String(length=64), nullable=True), sa.Column('admin_state_up', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint('id')) op.create_table( 'vips', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('status', sa.String(length=16), nullable=False), sa.Column('status_description', sa.String(length=255), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('port_id', sa.String(length=36), nullable=True), sa.Column('protocol_port', sa.Integer(), nullable=False), sa.Column('protocol', protocols, nullable=False), sa.Column('pool_id', sa.String(length=36), nullable=False), sa.Column('admin_state_up', sa.Boolean(), nullable=False), sa.Column('connection_limit', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('pool_id')) op.create_table( 'pools', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('status', sa.String(length=16), nullable=False), sa.Column('status_description', sa.String(length=255), nullable=True), sa.Column('vip_id', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('subnet_id', sa.String(length=36), nullable=False), sa.Column('protocol', protocols, nullable=False), sa.Column('lb_method', lb_methods, nullable=False), sa.Column('admin_state_up', sa.Boolean(), nullable=False), sa.ForeignKeyConstraint(['vip_id'], ['vips.id'], ), sa.PrimaryKeyConstraint('id')) op.create_table( 'sessionpersistences', sa.Column('vip_id', sa.String(length=36), nullable=False), sa.Column('type', session_persistence_type, nullable=False), sa.Column('cookie_name', sa.String(length=1024), nullable=True), sa.ForeignKeyConstraint(['vip_id'], ['vips.id'], ), sa.PrimaryKeyConstraint('vip_id')) op.create_table( 'poolloadbalanceragentbindings', sa.Column('pool_id', sa.String(length=36), nullable=False), sa.Column('agent_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['agent_id'], ['agents.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('pool_id')) op.create_table( 'members', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('status', sa.String(length=16), nullable=False), sa.Column('status_description', sa.String(length=255), nullable=True), sa.Column('pool_id', sa.String(length=36), nullable=False), sa.Column('address', sa.String(length=64), nullable=False), sa.Column('protocol_port', sa.Integer(), nullable=False), sa.Column('weight', sa.Integer(), nullable=False), sa.Column('admin_state_up', sa.Boolean(), nullable=False), sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('pool_id', 'address', 'protocol_port', name='uniq_member0pool_id0address0port')) op.create_table( 'poolmonitorassociations', sa.Column('status', sa.String(length=16), nullable=False), sa.Column('status_description', sa.String(length=255), nullable=True), sa.Column('pool_id', sa.String(length=36), nullable=False), sa.Column('monitor_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], ), sa.ForeignKeyConstraint(['monitor_id'], ['healthmonitors.id'], ), sa.PrimaryKeyConstraint('pool_id', 'monitor_id')) op.create_table( 'poolstatisticss', sa.Column('pool_id', sa.String(length=36), nullable=False), sa.Column('bytes_in', sa.BigInteger(), nullable=False), sa.Column('bytes_out', sa.BigInteger(), nullable=False), sa.Column('active_connections', sa.BigInteger(), nullable=False), sa.Column('total_connections', sa.BigInteger(), nullable=False), sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], ), sa.PrimaryKeyConstraint('pool_id')) op.create_table( u'embrane_pool_port', sa.Column(u'pool_id', sa.String(length=36), nullable=False), sa.Column(u'port_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['pool_id'], [u'pools.id'], name=u'embrane_pool_port_ibfk_1'), sa.ForeignKeyConstraint(['port_id'], [u'ports.id'], name=u'embrane_pool_port_ibfk_2'), sa.PrimaryKeyConstraint(u'pool_id')) neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/0000775000567000056710000000000013044373210026123 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/EXPAND_HEAD0000664000567000056710000000001513044372760027613 0ustar jenkinsjenkins000000000000000e66c5227a8a neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/CONTRACT_HEAD0000664000567000056710000000001413044372760030050 0ustar jenkinsjenkins000000000000004ffceebfcdc neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/README0000664000567000056710000000024313044372736027016 0ustar jenkinsjenkins00000000000000This directory contains the migration scripts for the Neutron project. Please see the README in neutron/db/migration on how to use and generate new migrations. neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/0000775000567000056710000000000013044373210027371 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/contract/0000775000567000056710000000000013044373210031206 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000017600000000000011221 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/contract/8a6d8bdae39_migrate_neutron_resources_table.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/contract/8a6d8bdae39_migrate_n0000664000567000056710000000643013044372736035017 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """standardattributes migration Revision ID: 8a6d8bdae39 Revises: 1b294093239c Create Date: 2015-09-10 03:12:04.012457 """ # revision identifiers, used by Alembic. revision = '8a6d8bdae39' down_revision = '1b294093239c' depends_on = ('32e5974ada25',) from alembic import op import sqlalchemy as sa # basic model of the tables with required field for migration TABLES = ('ports', 'networks', 'subnets', 'subnetpools', 'securitygroups', 'floatingips', 'routers', 'securitygrouprules') TABLE_MODELS = [ (table, sa.Table(table, sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('standard_attr_id', sa.BigInteger(), nullable=True))) for table in TABLES ] standardattrs = sa.Table( 'standardattributes', sa.MetaData(), sa.Column('id', sa.BigInteger(), primary_key=True, autoincrement=True), sa.Column('resource_type', sa.String(length=255), nullable=False)) def upgrade(): generate_records_for_existing() for table, model in TABLE_MODELS: # add constraint(s) now that everything is populated on that table. # note that some MariaDB versions will *not* allow the ALTER to # NOT NULL on a column that has an FK constraint, so we set NOT NULL # first, then the FK constraint. op.alter_column(table, 'standard_attr_id', nullable=False, existing_type=sa.BigInteger(), existing_nullable=True, existing_server_default=False) op.create_foreign_key( constraint_name=None, source_table=table, referent_table='standardattributes', local_cols=['standard_attr_id'], remote_cols=['id'], ondelete='CASCADE') op.create_unique_constraint( constraint_name='uniq_%s0standard_attr_id' % table, table_name=table, columns=['standard_attr_id']) def generate_records_for_existing(): session = sa.orm.Session(bind=op.get_bind()) values = [] with session.begin(subtransactions=True): for table, model in TABLE_MODELS: for row in session.query(model): # NOTE(kevinbenton): without this disabled, pylint complains # about a missing 'dml' argument. #pylint: disable=no-value-for-parameter res = session.execute( standardattrs.insert().values(resource_type=table)) session.execute( model.update().values( standard_attr_id=res.inserted_primary_key[0]).where( model.c.id == row[0])) # this commit is necessary to allow further operations session.commit() return values ././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/contract/5ffceebfada_rbac_network_external.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/contract/5ffceebfada_rbac_netw0000664000567000056710000000501113044372760035276 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """network_rbac_external Revision ID: 5ffceebfada Revises: c6c112992c9 Create Date: 2015-06-14 13:12:04.012457 """ # revision identifiers, used by Alembic. revision = '5ffceebfada' down_revision = 'c6c112992c9' depends_on = () from alembic import op from oslo_utils import uuidutils import sqlalchemy as sa from neutron.api.v2 import attributes # A simple model of the external network table with only the fields needed for # the migration. external = sa.Table('externalnetworks', sa.MetaData(), sa.Column('network_id', sa.String(length=36), nullable=False)) TENANT_ID_MAX_LEN = attributes.TENANT_ID_MAX_LEN network = sa.Table('networks', sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=TENANT_ID_MAX_LEN))) networkrbacs = sa.Table( 'networkrbacs', sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('object_id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=TENANT_ID_MAX_LEN), nullable=True, index=True), sa.Column('target_tenant', sa.String(length=TENANT_ID_MAX_LEN), nullable=False), sa.Column('action', sa.String(length=255), nullable=False)) def upgrade(): op.bulk_insert(networkrbacs, get_values()) def get_values(): session = sa.orm.Session(bind=op.get_bind()) values = [] net_to_tenant_id = {} for row in session.query(network).all(): net_to_tenant_id[row[0]] = row[1] for row in session.query(external).all(): values.append({'id': uuidutils.generate_uuid(), 'object_id': row[0], 'tenant_id': net_to_tenant_id[row[0]], 'target_tenant': '*', 'action': 'access_as_external'}) # this commit appears to be necessary to allow further operations session.commit() return values ././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/contract/e3278ee65050_drop_nec_plugin_tables.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/contract/e3278ee65050_drop_nec0000664000567000056710000000222413044372736034500 0ustar jenkinsjenkins00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Drop NEC plugin tables Revision ID: e3278ee65050 Revises: 2b4c2465d44b Create Date: 2016-02-15 18:50:56.870043 """ # revision identifiers, used by Alembic. revision = 'e3278ee65050' down_revision = '2b4c2465d44b' from alembic import op def upgrade(): op.drop_table('ofcnetworkmappings') op.drop_table('ofcportmappings') op.drop_table('ofcroutermappings') op.drop_table('ofcfiltermappings') op.drop_table('ofctenantmappings') op.drop_table('portinfos') op.drop_table('routerproviders') op.drop_table('packetfilters') ././@LongLink0000000000000000000000000000017100000000000011214 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/contract/2b4c2465d44b_dvr_sheduling_refactoring.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/contract/2b4c2465d44b_dvr_shed0000664000567000056710000000534213044372736034560 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """DVR sheduling refactoring Revision ID: 2b4c2465d44b Revises: 8a6d8bdae39 Create Date: 2015-12-23 07:39:49.062767 """ # revision identifiers, used by Alembic. revision = '2b4c2465d44b' down_revision = '8a6d8bdae39' from alembic import op import sqlalchemy as sa ROUTER_ATTR_TABLE = 'router_extra_attributes' ROUTER_BINDING_TABLE = 'routerl3agentbindings' CSNAT_BINDING_TABLE = 'csnat_l3_agent_bindings' def upgrade(): transfer_snat_bindings() op.drop_table(CSNAT_BINDING_TABLE) def transfer_snat_bindings(): router_attr_table = sa.Table(ROUTER_ATTR_TABLE, sa.MetaData(), sa.Column('router_id', sa.String(36)), sa.Column('distributed', sa.Boolean),) csnat_binding = sa.Table(CSNAT_BINDING_TABLE, sa.MetaData(), sa.Column('router_id', sa.String(36)), sa.Column('l3_agent_id', sa.String(36))) router_binding = sa.Table(ROUTER_BINDING_TABLE, sa.MetaData(), sa.Column('router_id', sa.String(36)), sa.Column('l3_agent_id', sa.String(36))) session = sa.orm.Session(bind=op.get_bind()) with session.begin(subtransactions=True): # first delete all bindings for dvr routers from # routerl3agentbindings as this might be bindings with l3 agents # on compute nodes for router_attr in session.query( router_attr_table).filter(router_attr_table.c.distributed): session.execute(router_binding.delete( router_binding.c.router_id == router_attr.router_id)) # now routerl3agentbindings will only contain bindings for snat # portion of the router for csnat_binding in session.query(csnat_binding): session.execute( router_binding.insert().values( router_id=csnat_binding.router_id, l3_agent_id=csnat_binding.l3_agent_id)) # this commit is necessary to allow further operations session.commit() ././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/contract/c6c112992c9_rbac_qos_policy.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/contract/c6c112992c9_rbac_qos_0000664000567000056710000000454713044372760034563 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """rbac_qos_policy Revision ID: c6c112992c9 Revises: 8a6d8bdae39 Create Date: 2015-11-25 18:45:03.831359 """ from alembic import op from oslo_utils import uuidutils import sqlalchemy as sa from neutron.api.v2 import attributes as attrs from neutron.db import rbac_db_models # revision identifiers, used by Alembic. revision = 'c6c112992c9' down_revision = 'e3278ee65050' depends_on = ('15e43b934f81',) qos_rbacs = sa.Table( 'qospolicyrbacs', sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=attrs.TENANT_ID_MAX_LEN), nullable=True), sa.Column('target_tenant', sa.String(length=attrs.TENANT_ID_MAX_LEN), nullable=False), sa.Column('action', sa.String(length=255), nullable=False), sa.Column('object_id', sa.String(length=36), nullable=False)) # A simple model of the qos_policies table with only the fields needed for # the migration. qos_policy = sa.Table('qos_policies', sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=attrs.TENANT_ID_MAX_LEN)), sa.Column('shared', sa.Boolean(), nullable=False)) def upgrade(): op.bulk_insert(qos_rbacs, get_values()) op.drop_column('qos_policies', 'shared') def get_values(): session = sa.orm.Session(bind=op.get_bind()) values = [] for row in session.query(qos_policy).filter(qos_policy.c.shared).all(): values.append({'id': uuidutils.generate_uuid(), 'object_id': row[0], 'tenant_id': row[1], 'target_tenant': '*', 'action': rbac_db_models.ACCESS_SHARED}) session.commit() return values ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/contract/4ffceebfcdc_standard_desc.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/contract/4ffceebfcdc_standard_0000664000567000056710000000422613044372736035306 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """standard_desc Revision ID: 4ffceebfcdc Revises: 5ffceebfada Create Date: 2016-02-10 23:12:04.012457 """ from alembic import op import sqlalchemy as sa from neutron.db import migration # revision identifiers, used by Alembic. revision = '4ffceebfcdc' down_revision = '5ffceebfada' depends_on = ('0e66c5227a8a',) neutron_milestone = [migration.MITAKA] # A simple model of the security groups table with only the fields needed for # the migration. securitygroups = sa.Table('securitygroups', sa.MetaData(), sa.Column('standard_attr_id', sa.BigInteger(), nullable=False), sa.Column('description', sa.String(length=255))) standardattr = sa.Table( 'standardattributes', sa.MetaData(), sa.Column('id', sa.BigInteger(), primary_key=True, autoincrement=True), sa.Column('description', sa.String(length=255))) def upgrade(): migrate_values() op.drop_column('securitygroups', 'description') def migrate_values(): session = sa.orm.Session(bind=op.get_bind()) values = [] for row in session.query(securitygroups): values.append({'id': row[0], 'description': row[1]}) with session.begin(subtransactions=True): for value in values: session.execute( standardattr.update().values( description=value['description']).where( standardattr.c.id == value['id'])) # this commit appears to be necessary to allow further operations session.commit() ././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/contract/1b294093239c_remove_embrane_plugin.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/contract/1b294093239c_remove_e0000664000567000056710000000155513044372736034432 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Drop embrane plugin table Revision ID: 1b294093239c Revises: 4af11ca47297 Create Date: 2015-10-09 14:07:59.968597 """ # revision identifiers, used by Alembic. revision = '1b294093239c' down_revision = '4af11ca47297' from alembic import op def upgrade(): op.drop_table('embrane_pool_port') neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/0000775000567000056710000000000013044373210030650 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/19f26505c74f_auto_allocated_topology.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/19f26505c74f_auto_alloc0000664000567000056710000000320513044372736034476 0ustar jenkinsjenkins00000000000000# Copyright 2015-2016 Hewlett Packard Enterprise Development Company, LP # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Auto Allocated Topology - aka Get-Me-A-Network Revision ID: 19f26505c74f Revises: 1df244e556f5 Create Date: 2015-11-20 11:27:53.419742 """ from alembic import op import sqlalchemy as sa from sqlalchemy import sql # revision identifiers, used by Alembic. revision = '19f26505c74f' down_revision = '1df244e556f5' def upgrade(): op.create_table( 'auto_allocated_topologies', sa.Column('tenant_id', sa.String(length=255), primary_key=True), sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('router_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='SET NULL'), ) op.add_column('externalnetworks', sa.Column('is_default', sa.Boolean(), nullable=False, server_default=sql.false())) ././@LongLink0000000000000000000000000000022000000000000011207 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/659bf3d90664_add_attributes_to_support_external_dns_integration.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/659bf3d90664_add_attrib0000664000567000056710000000722113044372736034461 0ustar jenkinsjenkins00000000000000# Copyright 2016 IBM # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add tables and attributes to support external DNS integration Revision ID: 659bf3d90664 Revises: c3a73f615e4 Create Date: 2015-09-11 00:22:47.618593 """ # revision identifiers, used by Alembic. revision = '659bf3d90664' down_revision = 'c3a73f615e4' from alembic import op import sqlalchemy as sa from neutron.extensions import dns def upgrade(): op.create_table('networkdnsdomains', sa.Column('network_id', sa.String(length=36), nullable=False, index=True), sa.Column('dns_domain', sa.String(length=dns.FQDN_MAX_LEN), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id')) op.create_table('floatingipdnses', sa.Column('floatingip_id', sa.String(length=36), nullable=False, index=True), sa.Column('dns_name', sa.String(length=dns.FQDN_MAX_LEN), nullable=False), sa.Column('dns_domain', sa.String(length=dns.FQDN_MAX_LEN), nullable=False), sa.Column('published_dns_name', sa.String(length=dns.FQDN_MAX_LEN), nullable=False), sa.Column('published_dns_domain', sa.String(length=dns.FQDN_MAX_LEN), nullable=False), sa.ForeignKeyConstraint(['floatingip_id'], ['floatingips.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('floatingip_id')) op.create_table('portdnses', sa.Column('port_id', sa.String(length=36), nullable=False, index=True), sa.Column('current_dns_name', sa.String(length=dns.FQDN_MAX_LEN), nullable=False), sa.Column('current_dns_domain', sa.String(length=dns.FQDN_MAX_LEN), nullable=False), sa.Column('previous_dns_name', sa.String(length=dns.FQDN_MAX_LEN), nullable=False), sa.Column('previous_dns_domain', sa.String(length=dns.FQDN_MAX_LEN), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id')) ././@LongLink0000000000000000000000000000017400000000000011217 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/c3a73f615e4_add_ip_version_to_address_scope.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/c3a73f615e4_add_ip_vers0000664000567000056710000000172013044372736034621 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add ip_version to AddressScope Revision ID: c3a73f615e4 Revises: 13cfb89f881a Create Date: 2015-10-08 17:34:32.231256 """ # revision identifiers, used by Alembic. revision = 'c3a73f615e4' down_revision = 'dce3ec7a25c9' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('address_scopes', sa.Column('ip_version', sa.Integer(), nullable=False)) ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/ec7fcfbf72ee_network_az.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/ec7fcfbf72ee_network_az0000664000567000056710000000172113044372736035203 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add network availability zone Revision ID: ec7fcfbf72ee Revises: 32e5974ada25 Create Date: 2015-09-17 09:21:51.257579 """ # revision identifiers, used by Alembic. revision = 'ec7fcfbf72ee' down_revision = '32e5974ada25' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('networks', sa.Column('availability_zone_hints', sa.String(length=255))) ././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/59cb5b6cf4d_availability_zone.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/59cb5b6cf4d_availabilit0000664000567000056710000000167713044372736035003 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add availability zone Revision ID: 59cb5b6cf4d Revises: 34af2b5c5a59 Create Date: 2015-01-20 14:38:47.156574 """ # revision identifiers, used by Alembic. revision = '59cb5b6cf4d' down_revision = '34af2b5c5a59' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('agents', sa.Column('availability_zone', sa.String(length=255))) ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/15e43b934f81_rbac_qos_policy.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/15e43b934f81_rbac_qos_p0000664000567000056710000000401213044372760034453 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """rbac_qos_policy Revision ID: 15e43b934f81 Revises: 1df244e556f5 Create Date: 2015-11-25 18:45:03.819115 """ from alembic import op import sqlalchemy as sa from neutron.api.v2 import attributes as attrs # revision identifiers, used by Alembic. revision = '15e43b934f81' down_revision = 'b4caf27aae4' def upgrade(): op.create_table('qospolicyrbacs', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=attrs.TENANT_ID_MAX_LEN), nullable=True), sa.Column('target_tenant', sa.String(length=attrs.TENANT_ID_MAX_LEN), nullable=False), sa.Column('action', sa.String(length=255), nullable=False), sa.Column('object_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['object_id'], ['qos_policies.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('target_tenant', 'object_id', 'action')) op.create_index(op.f('ix_qospolicyrbacs_tenant_id'), 'qospolicyrbacs', ['tenant_id'], unique=False) ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/2f9e956e7532_tag_support.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/2f9e956e7532_tag_suppor0000664000567000056710000000223213044372736034550 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """tag support Revision ID: 2f9e956e7532 Revises: 31ed664953e6 Create Date: 2016-01-21 08:11:49.604182 """ # revision identifiers, used by Alembic. revision = '2f9e956e7532' down_revision = '31ed664953e6' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'tags', sa.Column('standard_attr_id', sa.BigInteger(), sa.ForeignKey('standardattributes.id', ondelete='CASCADE'), nullable=False, primary_key=True), sa.Column('tag', sa.String(length=60), nullable=False, primary_key=True) ) ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/b4caf27aae4_add_bgp_dragent_model_data.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/b4caf27aae4_add_bgp_dra0000664000567000056710000000264113044372736034763 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add_bgp_dragent_model_data Revision ID: b4caf27aae4 Revises: 15be7321482 Create Date: 2015-08-20 17:05:31.038704 """ # revision identifiers, used by Alembic. revision = 'b4caf27aae4' down_revision = '15be73214821' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'bgp_speaker_dragent_bindings', sa.Column('agent_id', sa.String(length=36), primary_key=True), sa.Column('bgp_speaker_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['agent_id'], ['agents.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['bgp_speaker_id'], ['bgp_speakers.id'], ondelete='CASCADE'), ) ././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/13cfb89f881a_add_is_default_to_subnetpool.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/13cfb89f881a_add_is_def0000664000567000056710000000216513044372736034563 0ustar jenkinsjenkins00000000000000# Copyright 2015 Cisco Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add is_default to subnetpool Revision ID: 13cfb89f881a Revises: 59cb5b6cf4d Create Date: 2015-09-30 15:58:31.170153 """ # revision identifiers, used by Alembic. revision = '13cfb89f881a' down_revision = '59cb5b6cf4d' from alembic import op import sqlalchemy as sa from sqlalchemy import sql def upgrade(): op.add_column('subnetpools', sa.Column('is_default', sa.Boolean(), server_default=sql.false(), nullable=False)) ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/0e66c5227a8a_add_desc_to_standard_attr.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/0e66c5227a8a_add_desc_t0000664000567000056710000000212113044372736034471 0ustar jenkinsjenkins00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add desc to standard attr table Revision ID: 0e66c5227a8a Revises: 3894bccad37f Create Date: 2016-02-02 10:50:34.238563 """ from alembic import op import sqlalchemy as sa from neutron.db import migration # revision identifiers, used by Alembic. revision = '0e66c5227a8a' down_revision = '3894bccad37f' neutron_milestone = [migration.MITAKA] def upgrade(): op.add_column('standardattributes', sa.Column('description', sa.String(length=255), nullable=True)) ././@LongLink0000000000000000000000000000020600000000000011213 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/1df244e556f5_add_unique_ha_router_agent_port_bindings.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/1df244e556f5_add_unique0000664000567000056710000000462613044372760034556 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add_unique_ha_router_agent_port_bindings Revision ID: 1df244e556f5 Revises: 34af2b5c5a59 Create Date: 2015-10-02 18:06:01.696742 """ # revision identifiers, used by Alembic. revision = '1df244e556f5' down_revision = '659bf3d90664' from alembic import op import sqlalchemy as sa from neutron._i18n import _ from neutron.common import exceptions UNIQUE_NAME = 'uniq_ha_router_agent_port_bindings0port_id0l3_agent_id' TABLE_NAME = 'ha_router_agent_port_bindings' ha_router_agent_port_bindings = sa.Table( 'ha_router_agent_port_bindings', sa.MetaData(), sa.Column('port_id', sa.String(36)), sa.Column('router_id', sa.String(36)), sa.Column('l3_agent_id', sa.String(36))) class DuplicateL3HARouterAgentPortBinding(exceptions.Conflict): message = _("Duplicate L3HARouterAgentPortBinding is created for " "router(s) %(router)s. Database cannot be upgraded. Please, " "remove all duplicates before upgrading the database.") def upgrade(): op.create_unique_constraint(UNIQUE_NAME, TABLE_NAME, ['router_id', 'l3_agent_id']) def check_sanity(connection): res = get_duplicate_l3_ha_port_bindings(connection) if res: raise DuplicateL3HARouterAgentPortBinding(router=", ".join(res)) def get_duplicate_l3_ha_port_bindings(connection): insp = sa.engine.reflection.Inspector.from_engine(connection) if 'ha_router_agent_port_bindings' not in insp.get_table_names(): return {} session = sa.orm.Session(bind=connection.connect()) query = (session.query(ha_router_agent_port_bindings.c.router_id) .group_by(ha_router_agent_port_bindings.c.router_id, ha_router_agent_port_bindings.c.l3_agent_id) .having(sa.func.count() > 1)).all() return [q[0] for q in query] ././@LongLink0000000000000000000000000000017500000000000011220 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/3894bccad37f_add_timestamp_to_base_resources.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/3894bccad37f_add_timest0000664000567000056710000000210513044372736034706 0ustar jenkinsjenkins00000000000000# Copyright 2015 HuaWei Technologies. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add_timestamp_to_base_resources Revision ID: 3894bccad37f Revises: 2f9e956e7532 Create Date: 2016-03-01 04:19:58.852612 """ # revision identifiers, used by Alembic. revision = '3894bccad37f' down_revision = '2f9e956e7532' from alembic import op import sqlalchemy as sa def upgrade(): for column_name in ['created_at', 'updated_at']: op.add_column( 'standardattributes', sa.Column(column_name, sa.DateTime(), nullable=True) ) ././@LongLink0000000000000000000000000000017100000000000011214 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/32e5974ada25_add_neutron_resources_table.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/32e5974ada25_add_neutro0000664000567000056710000000255613044372736034562 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add standard attribute table Revision ID: 32e5974ada25 Revises: 13cfb89f881a Create Date: 2015-09-10 00:22:47.618593 """ # revision identifiers, used by Alembic. revision = '32e5974ada25' down_revision = '13cfb89f881a' from alembic import op import sqlalchemy as sa TABLES = ('ports', 'networks', 'subnets', 'subnetpools', 'securitygroups', 'floatingips', 'routers', 'securitygrouprules') def upgrade(): op.create_table( 'standardattributes', sa.Column('id', sa.BigInteger(), autoincrement=True), sa.Column('resource_type', sa.String(length=255), nullable=False), sa.PrimaryKeyConstraint('id') ) for table in TABLES: op.add_column(table, sa.Column('standard_attr_id', sa.BigInteger(), nullable=True)) ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/15be73214821_add_bgp_model_data.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/15be73214821_add_bgp_mo0000664000567000056710000000730513044372736034337 0ustar jenkinsjenkins00000000000000# Copyright 2016 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add dynamic routing model data Revision ID: 15be73214821 Create Date: 2015-07-29 13:16:08.604175 """ # revision identifiers, used by Alembic. revision = '15be73214821' down_revision = '19f26505c74f' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'bgp_speakers', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('local_as', sa.Integer, nullable=False, autoincrement=False), sa.Column('ip_version', sa.Integer, nullable=False, autoincrement=False), sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('advertise_floating_ip_host_routes', sa.Boolean(), nullable=False), sa.Column('advertise_tenant_networks', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint('id') ) op.create_table( 'bgp_peers', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('auth_type', sa.String(length=16), nullable=False), sa.Column('password', sa.String(length=255), nullable=True), sa.Column('peer_ip', sa.String(length=64), nullable=False), sa.Column('remote_as', sa.Integer, nullable=False, autoincrement=False), sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.PrimaryKeyConstraint('id') ) op.create_table( 'bgp_speaker_network_bindings', sa.Column('bgp_speaker_id', sa.String(length=36), nullable=False), sa.Column('network_id', sa.String(length=36), nullable=True), sa.Column('ip_version', sa.Integer, nullable=False, autoincrement=False), sa.ForeignKeyConstraint(['bgp_speaker_id'], ['bgp_speakers.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id', 'bgp_speaker_id', 'ip_version') ) op.create_table( 'bgp_speaker_peer_bindings', sa.Column('bgp_speaker_id', sa.String(length=36), nullable=False), sa.Column('bgp_peer_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['bgp_speaker_id'], ['bgp_speakers.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['bgp_peer_id'], ['bgp_peers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('bgp_speaker_id', 'bgp_peer_id') ) ././@LongLink0000000000000000000000000000020600000000000011213 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/31ed664953e6_add_resource_versions_row_to_agent_table.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/31ed664953e6_add_resour0000664000567000056710000000177213044372736034515 0ustar jenkinsjenkins00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add resource_versions row to agent table Revision ID: 31ed664953e6 Revises: c3a73f615e4 Create Date: 2016-01-15 13:41:30.016915 """ # revision identifiers, used by Alembic. revision = '31ed664953e6' down_revision = '15e43b934f81' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('agents', sa.Column('resource_versions', sa.String(length=8191))) ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/dce3ec7a25c9_router_az.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/mitaka/expand/dce3ec7a25c9_router_az.0000664000567000056710000000173713044372736034750 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add router availability zone Revision ID: dce3ec7a25c9 Revises: ec7fcfbf72ee Create Date: 2015-09-17 09:36:17.468901 """ # revision identifiers, used by Alembic. revision = 'dce3ec7a25c9' down_revision = 'ec7fcfbf72ee' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('router_extra_attributes', sa.Column('availability_zone_hints', sa.String(length=255))) neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/0000775000567000056710000000000013044373210027575 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/contract/0000775000567000056710000000000013044373210031412 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000017500000000000011220 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/contract/4af11ca47297_drop_cisco_monolithic_tables.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/contract/4af11ca47297_drop_ci0000664000567000056710000000307513044372736034612 0ustar jenkinsjenkins00000000000000# Copyright 2015 Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Drop cisco monolithic tables Revision ID: 4af11ca47297 Revises: 11926bcfe72d Create Date: 2015-08-13 08:01:19.709839 """ from alembic import op from neutron.db import migration # revision identifiers, used by Alembic. revision = '4af11ca47297' down_revision = '11926bcfe72d' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.LIBERTY] def upgrade(): op.drop_table('cisco_n1kv_port_bindings') op.drop_table('cisco_n1kv_network_bindings') op.drop_table('cisco_n1kv_multi_segments') op.drop_table('cisco_provider_networks') op.drop_table('cisco_n1kv_trunk_segments') op.drop_table('cisco_n1kv_vmnetworks') op.drop_table('cisco_n1kv_profile_bindings') op.drop_table('cisco_qos_policies') op.drop_table('cisco_credentials') op.drop_table('cisco_n1kv_vlan_allocations') op.drop_table('cisco_n1kv_vxlan_allocations') op.drop_table('cisco_network_profiles') op.drop_table('cisco_policy_profiles') ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/contract/2a16083502f3_metaplugin_removal.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/contract/2a16083502f3_metaplu0000664000567000056710000000165613044372736034474 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Metaplugin removal Revision ID: 2a16083502f3 Revises: 5498d17be016 Create Date: 2015-06-16 09:11:10.488566 """ # revision identifiers, used by Alembic. revision = '2a16083502f3' down_revision = '5498d17be016' from alembic import op def upgrade(): op.drop_table('networkflavors') op.drop_table('routerflavors') ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/contract/5498d17be016_drop_legacy_ovs_and_lb.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/contract/5498d17be016_drop_le0000664000567000056710000000215313044372736034541 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Drop legacy OVS and LB plugin tables Revision ID: 5498d17be016 Revises: 4ffceebfada Create Date: 2015-06-25 14:08:30.984419 """ # revision identifiers, used by Alembic. revision = '5498d17be016' down_revision = '4ffceebfada' from alembic import op def upgrade(): op.drop_table('ovs_network_bindings') op.drop_table('ovs_vlan_allocations') op.drop_table('network_bindings') op.drop_table('ovs_tunnel_allocations') op.drop_table('network_states') op.drop_table('ovs_tunnel_endpoints') ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/contract/4ffceebfada_rbac_network.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/contract/4ffceebfada_rbac_net0000664000567000056710000000467413044372736035333 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """network_rbac Revision ID: 4ffceebfada Revises: 30018084ec99 Create Date: 2015-06-14 13:12:04.012457 """ # revision identifiers, used by Alembic. revision = '4ffceebfada' down_revision = '30018084ec99' depends_on = ('8675309a5c4f',) from alembic import op from oslo_utils import uuidutils import sqlalchemy as sa # A simple model of the networks table with only the fields needed for # the migration. network = sa.Table('networks', sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=255)), sa.Column('shared', sa.Boolean(), nullable=False)) networkrbacs = sa.Table( 'networkrbacs', sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('object_id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('target_tenant', sa.String(length=255), nullable=False), sa.Column('action', sa.String(length=255), nullable=False)) def upgrade(): op.bulk_insert(networkrbacs, get_values()) op.drop_column('networks', 'shared') # the shared column on subnets was just an internal representation of the # shared status of the network it was related to. This is now handled by # other logic so we just drop it. op.drop_column('subnets', 'shared') def get_values(): session = sa.orm.Session(bind=op.get_bind()) values = [] for row in session.query(network).filter(network.c.shared).all(): values.append({'id': uuidutils.generate_uuid(), 'object_id': row[0], 'tenant_id': row[1], 'target_tenant': '*', 'action': 'access_as_shared'}) # this commit appears to be necessary to allow further operations session.commit() return values ././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/contract/11926bcfe72d_add_geneve_ml2_type_driver.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/contract/11926bcfe72d_add_gen0000664000567000056710000000322413044372760034627 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add geneve ml2 type driver Revision ID: 11926bcfe72d Revises: 2e5352a0ad4d Create Date: 2015-08-27 19:56:16.356522 """ # revision identifiers, used by Alembic. revision = '11926bcfe72d' down_revision = '2e5352a0ad4d' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'ml2_geneve_allocations', sa.Column('geneve_vni', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), server_default=sa.sql.false(), nullable=False), sa.PrimaryKeyConstraint('geneve_vni'), ) op.create_index(op.f('ix_ml2_geneve_allocations_allocated'), 'ml2_geneve_allocations', ['allocated'], unique=False) op.create_table( 'ml2_geneve_endpoints', sa.Column('ip_address', sa.String(length=64), nullable=False), sa.Column('host', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('ip_address'), sa.UniqueConstraint('host', name='unique_ml2_geneve_endpoints0host'), ) ././@LongLink0000000000000000000000000000017100000000000011214 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/contract/2e5352a0ad4d_add_missing_foreign_keys.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/contract/2e5352a0ad4d_add_mis0000664000567000056710000000232713044372760034635 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add missing foreign keys Revision ID: 2e5352a0ad4d Revises: 2a16083502f3 Create Date: 2015-08-20 12:43:09.110427 """ # revision identifiers, used by Alembic. revision = '2e5352a0ad4d' down_revision = '2a16083502f3' from alembic import op from sqlalchemy.engine import reflection from neutron.db import migration TABLE_NAME = 'flavorserviceprofilebindings' def upgrade(): inspector = reflection.Inspector.from_engine(op.get_bind()) fk_constraints = inspector.get_foreign_keys(TABLE_NAME) migration.remove_foreign_keys(TABLE_NAME, fk_constraints) migration.create_foreign_keys(TABLE_NAME, fk_constraints) ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/contract/30018084ec99_initial.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/contract/30018084ec99_initial0000664000567000056710000000160013044372736034460 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Initial no-op Liberty contract rule. Revision ID: 30018084ec99 Revises: None Create Date: 2015-06-22 00:00:00.000000 """ from neutron.db.migration import cli # revision identifiers, used by Alembic. revision = '30018084ec99' down_revision = 'kilo' branch_labels = (cli.CONTRACT_BRANCH,) def upgrade(): pass neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/0000775000567000056710000000000013044373210031054 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/9859ac9c136_quota_reservations.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/9859ac9c136_quota_rese0000664000567000056710000000311513044372736034652 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """quota_reservations Revision ID: 9859ac9c136 Revises: 48153cb5f051 Create Date: 2015-03-11 06:40:56.775075 """ # revision identifiers, used by Alembic. revision = '9859ac9c136' down_revision = '48153cb5f051' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'reservations', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('expiration', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'resourcedeltas', sa.Column('resource', sa.String(length=255), nullable=False), sa.Column('reservation_id', sa.String(length=36), nullable=False), sa.Column('amount', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['reservation_id'], ['reservations.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('resource', 'reservation_id')) ././@LongLink0000000000000000000000000000020200000000000011207 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/1b4c6e320f79_address_scope_support_in_subnetpool.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/1b4c6e320f79_address_s0000664000567000056710000000211613044372736034601 0ustar jenkinsjenkins00000000000000# Copyright 2015 Huawei Technologies India Pvt. Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """address scope support in subnetpool Revision ID: 1b4c6e320f79 Revises: 1c844d1677f7 Create Date: 2015-07-03 09:48:39.491058 """ # revision identifiers, used by Alembic. revision = '1b4c6e320f79' down_revision = '1c844d1677f7' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('subnetpools', sa.Column('address_scope_id', sa.String(length=36), nullable=True)) ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/48153cb5f051_qos_db_changes.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/48153cb5f051_qos_db_ch0000775000567000056710000000476113044372760034476 0ustar jenkinsjenkins00000000000000# Copyright 2015 Huawei Technologies India Pvt Ltd, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """qos db changes Revision ID: 48153cb5f051 Revises: 1b4c6e320f79 Create Date: 2015-06-24 17:03:34.965101 """ # revision identifiers, used by Alembic. revision = '48153cb5f051' down_revision = '1b4c6e320f79' from alembic import op import sqlalchemy as sa from neutron.api.v2 import attributes as attrs def upgrade(): op.create_table( 'qos_policies', sa.Column('id', sa.String(length=36), primary_key=True), sa.Column('name', sa.String(length=attrs.NAME_MAX_LEN)), sa.Column('description', sa.String(length=attrs.DESCRIPTION_MAX_LEN)), sa.Column('shared', sa.Boolean(), nullable=False), sa.Column('tenant_id', sa.String(length=attrs.TENANT_ID_MAX_LEN), index=True)) op.create_table( 'qos_network_policy_bindings', sa.Column('policy_id', sa.String(length=36), sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), nullable=False), sa.Column('network_id', sa.String(length=36), sa.ForeignKey('networks.id', ondelete='CASCADE'), nullable=False, unique=True)) op.create_table( 'qos_port_policy_bindings', sa.Column('policy_id', sa.String(length=36), sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), nullable=False), sa.Column('port_id', sa.String(length=36), sa.ForeignKey('ports.id', ondelete='CASCADE'), nullable=False, unique=True)) op.create_table( 'qos_bandwidth_limit_rules', sa.Column('id', sa.String(length=36), primary_key=True), sa.Column('qos_policy_id', sa.String(length=36), sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), nullable=False, unique=True), sa.Column('max_kbps', sa.Integer()), sa.Column('max_burst_kbps', sa.Integer())) ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/34af2b5c5a59_add_dns_name_to_port.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/34af2b5c5a59_add_dns_n0000664000567000056710000000230413044372736034620 0ustar jenkinsjenkins00000000000000# Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add dns_name to Port Revision ID: 34af2b5c5a59 Revises: 9859ac9c136 Create Date: 2015-08-23 00:22:47.618593 """ from alembic import op import sqlalchemy as sa from neutron.db import migration from neutron.extensions import dns # revision identifiers, used by Alembic. revision = '34af2b5c5a59' down_revision = '9859ac9c136' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.LIBERTY] def upgrade(): op.add_column('ports', sa.Column('dns_name', sa.String(length=dns.FQDN_MAX_LEN), nullable=True)) ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/8675309a5c4f_rbac_network.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/8675309a5c4f_rbac_netw0000664000567000056710000000316113044372736034530 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """network_rbac Revision ID: 8675309a5c4f Revises: 313373c0ffee Create Date: 2015-06-14 13:12:04.012457 """ # revision identifiers, used by Alembic. revision = '8675309a5c4f' down_revision = '313373c0ffee' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'networkrbacs', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('object_id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('target_tenant', sa.String(length=255), nullable=False), sa.Column('action', sa.String(length=255), nullable=False), sa.ForeignKeyConstraint(['object_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint( 'action', 'object_id', 'target_tenant', name='uniq_networkrbacs0tenant_target0object_id0action')) ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/354db87e3225_nsxv_vdr_metadata.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/354db87e3225_nsxv_vdr_0000664000567000056710000000277213044372736034574 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """nsxv_vdr_metadata.py Revision ID: 354db87e3225 Revises: kilo Create Date: 2015-04-19 14:59:15.102609 """ from alembic import op import sqlalchemy as sa from neutron.db.migration import cli # revision identifiers, used by Alembic. revision = '354db87e3225' down_revision = 'kilo' branch_labels = (cli.EXPAND_BRANCH,) def upgrade(): op.create_table( 'nsxv_vdr_dhcp_bindings', sa.Column('vdr_router_id', sa.String(length=36), nullable=False), sa.Column('dhcp_router_id', sa.String(length=36), nullable=False), sa.Column('dhcp_edge_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('vdr_router_id'), sa.UniqueConstraint( 'dhcp_router_id', name='unique_nsxv_vdr_dhcp_bindings0dhcp_router_id'), sa.UniqueConstraint( 'dhcp_edge_id', name='unique_nsxv_vdr_dhcp_bindings0dhcp_edge_id')) ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/599c6a226151_neutrodb_ipam.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/599c6a226151_neutrodb_0000664000567000056710000000551113044372736034457 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """neutrodb_ipam Revision ID: 599c6a226151 Revises: 354db87e3225 Create Date: 2015-03-08 18:12:08.962378 """ # revision identifiers, used by Alembic. revision = '599c6a226151' down_revision = '354db87e3225' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'ipamsubnets', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('neutron_subnet_id', sa.String(length=36), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'ipamallocations', sa.Column('ip_address', sa.String(length=64), nullable=False), sa.Column('status', sa.String(length=36), nullable=True), sa.Column('ipam_subnet_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['ipam_subnet_id'], ['ipamsubnets.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('ip_address', 'ipam_subnet_id')) op.create_table( 'ipamallocationpools', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('ipam_subnet_id', sa.String(length=36), nullable=False), sa.Column('first_ip', sa.String(length=64), nullable=False), sa.Column('last_ip', sa.String(length=64), nullable=False), sa.ForeignKeyConstraint(['ipam_subnet_id'], ['ipamsubnets.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id')) op.create_table( 'ipamavailabilityranges', sa.Column('allocation_pool_id', sa.String(length=36), nullable=False), sa.Column('first_ip', sa.String(length=64), nullable=False), sa.Column('last_ip', sa.String(length=64), nullable=False), sa.ForeignKeyConstraint(['allocation_pool_id'], ['ipamallocationpools.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('allocation_pool_id', 'first_ip', 'last_ip'), sa.Index('ix_ipamavailabilityranges_first_ip_allocation_pool_id', 'first_ip', 'allocation_pool_id'), sa.Index('ix_ipamavailabilityranges_last_ip_allocation_pool_id', 'last_ip', 'allocation_pool_id')) ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/52c5312f6baf_address_scopes.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/52c5312f6baf_address_s0000664000567000056710000000226613044372736034661 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Initial operations in support of address scopes """ # revision identifiers, used by Alembic. revision = '52c5312f6baf' down_revision = '599c6a226151' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'address_scopes', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('shared', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint('id')) ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/45f955889773_quota_usage.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/45f955889773_quota_usa0000664000567000056710000000273013044372736034451 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """quota_usage Revision ID: 45f955889773 Revises: 8675309a5c4f Create Date: 2015-04-17 08:09:37.611546 """ # revision identifiers, used by Alembic. revision = '45f955889773' down_revision = '8675309a5c4f' from alembic import op import sqlalchemy as sa from sqlalchemy import sql def upgrade(): op.create_table( 'quotausages', sa.Column('tenant_id', sa.String(length=255), nullable=False, primary_key=True, index=True), sa.Column('resource', sa.String(length=255), nullable=False, primary_key=True, index=True), sa.Column('dirty', sa.Boolean(), nullable=False, server_default=sql.false()), sa.Column('in_use', sa.Integer(), nullable=False, server_default='0'), sa.Column('reserved', sa.Integer(), nullable=False, server_default='0')) ././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/26c371498592_subnetpool_hash.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/26c371498592_subnetpoo0000664000567000056710000000176613044372736034455 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Thales Services SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """subnetpool hash Revision ID: 26c371498592 Revises: 45f955889773 Create Date: 2015-06-02 21:18:19.942076 """ # revision identifiers, used by Alembic. revision = '26c371498592' down_revision = '45f955889773' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column( 'subnetpools', sa.Column('hash', sa.String(36), nullable=False, server_default='')) ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/31337ec0ffee_flavors.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/31337ec0ffee_flavors.p0000664000567000056710000000412113044372736034701 0ustar jenkinsjenkins00000000000000# Copyright 2014-2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Flavor framework Revision ID: 313373c0ffee Revises: 52c5312f6baf Create Date: 2014-07-17 03:00:00.00 """ # revision identifiers, used by Alembic. revision = '313373c0ffee' down_revision = '52c5312f6baf' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'flavors', sa.Column('id', sa.String(36)), sa.Column('name', sa.String(255)), sa.Column('description', sa.String(1024)), sa.Column('enabled', sa.Boolean, nullable=False, server_default=sa.sql.true()), sa.Column('service_type', sa.String(36), nullable=True), sa.PrimaryKeyConstraint('id') ) op.create_table( 'serviceprofiles', sa.Column('id', sa.String(36)), sa.Column('description', sa.String(1024)), sa.Column('driver', sa.String(1024), nullable=False), sa.Column('enabled', sa.Boolean, nullable=False, server_default=sa.sql.true()), sa.Column('metainfo', sa.String(4096)), sa.PrimaryKeyConstraint('id') ) op.create_table( 'flavorserviceprofilebindings', sa.Column('service_profile_id', sa.String(36), nullable=False), sa.Column('flavor_id', sa.String(36), nullable=False), sa.ForeignKeyConstraint(['service_profile_id'], ['serviceprofiles.id']), sa.ForeignKeyConstraint(['flavor_id'], ['flavors.id']), sa.PrimaryKeyConstraint('service_profile_id', 'flavor_id') ) ././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/1c844d1677f7_dns_nameservers_order.pyneutron-8.4.0/neutron/db/migration/alembic_migrations/versions/liberty/expand/1c844d1677f7_dns_names0000664000567000056710000000204013044372736034527 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add order to dnsnameservers Revision ID: 1c844d1677f7 Revises: 26c371498592 Create Date: 2015-07-21 22:59:03.383850 """ # revision identifiers, used by Alembic. revision = '1c844d1677f7' down_revision = '26c371498592' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('dnsnameservers', sa.Column('order', sa.Integer(), server_default='0', nullable=False)) neutron-8.4.0/neutron/db/migration/alembic_migrations/versions/kilo_initial.py0000664000567000056710000000543113044372760031160 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """kilo_initial Revision ID: kilo Revises: None """ # revision identifiers, used by Alembic. revision = 'kilo' down_revision = None from neutron.db.migration.alembic_migrations import agent_init_ops from neutron.db.migration.alembic_migrations import brocade_init_ops from neutron.db.migration.alembic_migrations import cisco_init_ops from neutron.db.migration.alembic_migrations import core_init_ops from neutron.db.migration.alembic_migrations import dvr_init_opts from neutron.db.migration.alembic_migrations import firewall_init_ops from neutron.db.migration.alembic_migrations import l3_init_ops from neutron.db.migration.alembic_migrations import lb_init_ops from neutron.db.migration.alembic_migrations import loadbalancer_init_ops from neutron.db.migration.alembic_migrations import metering_init_ops from neutron.db.migration.alembic_migrations import ml2_init_ops from neutron.db.migration.alembic_migrations import nec_init_ops from neutron.db.migration.alembic_migrations import nsxv_initial_opts from neutron.db.migration.alembic_migrations import nuage_init_opts from neutron.db.migration.alembic_migrations import other_extensions_init_ops from neutron.db.migration.alembic_migrations import other_plugins_init_ops from neutron.db.migration.alembic_migrations import ovs_init_ops from neutron.db.migration.alembic_migrations import portsec_init_ops from neutron.db.migration.alembic_migrations import secgroup_init_ops from neutron.db.migration.alembic_migrations import vmware_init_ops from neutron.db.migration.alembic_migrations import vpn_init_ops def upgrade(): agent_init_ops.upgrade() core_init_ops.upgrade() l3_init_ops.upgrade() secgroup_init_ops.upgrade() portsec_init_ops.upgrade() other_extensions_init_ops.upgrade() lb_init_ops.upgrade() ovs_init_ops.upgrade() ml2_init_ops.upgrade() dvr_init_opts.upgrade() firewall_init_ops.upgrade() loadbalancer_init_ops.upgrade() vpn_init_ops.upgrade() metering_init_ops.upgrade() brocade_init_ops.upgrade() cisco_init_ops.upgrade() nec_init_ops.upgrade() other_plugins_init_ops.upgrade() vmware_init_ops.upgrade() nuage_init_opts.upgrade() nsxv_initial_opts.upgrade() neutron-8.4.0/neutron/db/migration/__init__.py0000664000567000056710000001355713044372760022560 0ustar jenkinsjenkins00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import functools import alembic from alembic import context from alembic import op import sqlalchemy as sa from sqlalchemy.engine import reflection from neutron._i18n import _ # Neutron milestones for upgrade aliases LIBERTY = 'liberty' MITAKA = 'mitaka' NEUTRON_MILESTONES = [ # earlier milestones were not tagged LIBERTY, MITAKA, ] CREATION_OPERATIONS = (sa.sql.ddl.CreateIndex, sa.sql.ddl.CreateTable, sa.sql.ddl.CreateColumn, ) DROP_OPERATIONS = (sa.sql.ddl.DropConstraint, sa.sql.ddl.DropIndex, sa.sql.ddl.DropTable, alembic.ddl.base.DropColumn) def skip_if_offline(func): """Decorator for skipping migrations in offline mode.""" @functools.wraps(func) def decorator(*args, **kwargs): if context.is_offline_mode(): return return func(*args, **kwargs) return decorator def raise_if_offline(func): """Decorator for raising if a function is called in offline mode.""" @functools.wraps(func) def decorator(*args, **kwargs): if context.is_offline_mode(): raise RuntimeError(_("%s cannot be called while in offline mode") % func.__name__) return func(*args, **kwargs) return decorator @raise_if_offline def schema_has_table(table_name): """Check whether the specified table exists in the current schema. This method cannot be executed in offline mode. """ bind = op.get_bind() insp = sa.engine.reflection.Inspector.from_engine(bind) return table_name in insp.get_table_names() @raise_if_offline def schema_has_column(table_name, column_name): """Check whether the specified column exists in the current schema. This method cannot be executed in offline mode. """ bind = op.get_bind() insp = sa.engine.reflection.Inspector.from_engine(bind) # first check that the table exists if not schema_has_table(table_name): return # check whether column_name exists in table columns return column_name in [column['name'] for column in insp.get_columns(table_name)] @raise_if_offline def alter_column_if_exists(table_name, column_name, **kwargs): """Alter a column only if it exists in the schema.""" if schema_has_column(table_name, column_name): op.alter_column(table_name, column_name, **kwargs) @raise_if_offline def drop_table_if_exists(table_name): if schema_has_table(table_name): op.drop_table(table_name) @raise_if_offline def rename_table_if_exists(old_table_name, new_table_name): if schema_has_table(old_table_name): op.rename_table(old_table_name, new_table_name) def alter_enum(table, column, enum_type, nullable): bind = op.get_bind() engine = bind.engine if engine.name == 'postgresql': values = {'table': table, 'column': column, 'name': enum_type.name} op.execute("ALTER TYPE %(name)s RENAME TO old_%(name)s" % values) enum_type.create(bind, checkfirst=False) op.execute("ALTER TABLE %(table)s RENAME COLUMN %(column)s TO " "old_%(column)s" % values) op.add_column(table, sa.Column(column, enum_type, nullable=nullable)) op.execute("UPDATE %(table)s SET %(column)s = " "old_%(column)s::text::%(name)s" % values) op.execute("ALTER TABLE %(table)s DROP COLUMN old_%(column)s" % values) op.execute("DROP TYPE old_%(name)s" % values) else: op.alter_column(table, column, type_=enum_type, existing_nullable=nullable) def create_table_if_not_exist_psql(table_name, values): if op.get_bind().engine.dialect.server_version_info < (9, 1, 0): op.execute("CREATE LANGUAGE plpgsql") op.execute("CREATE OR REPLACE FUNCTION execute(TEXT) RETURNS VOID AS $$" "BEGIN EXECUTE $1; END;" "$$ LANGUAGE plpgsql STRICT;") op.execute("CREATE OR REPLACE FUNCTION table_exist(TEXT) RETURNS bool as " "$$ SELECT exists(select 1 from pg_class where relname=$1);" "$$ language sql STRICT;") op.execute("SELECT execute($$CREATE TABLE %(name)s %(columns)s $$) " "WHERE NOT table_exist(%(name)r);" % {'name': table_name, 'columns': values}) def remove_foreign_keys(table, foreign_keys): for fk in foreign_keys: op.drop_constraint( constraint_name=fk['name'], table_name=table, type_='foreignkey' ) def create_foreign_keys(table, foreign_keys): for fk in foreign_keys: op.create_foreign_key( constraint_name=fk['name'], source_table=table, referent_table=fk['referred_table'], local_cols=fk['constrained_columns'], remote_cols=fk['referred_columns'], ondelete='CASCADE' ) @contextlib.contextmanager def remove_fks_from_table(table): try: inspector = reflection.Inspector.from_engine(op.get_bind()) foreign_keys = inspector.get_foreign_keys(table) remove_foreign_keys(table, foreign_keys) yield finally: create_foreign_keys(table, foreign_keys) neutron-8.4.0/neutron/db/migration/README0000664000567000056710000000020513044372736021314 0ustar jenkinsjenkins00000000000000See doc/source/devref/alembic_migrations.rst Rendered at http://docs.openstack.org/developer/neutron/devref/alembic_migrations.html neutron-8.4.0/neutron/db/migration/models/0000775000567000056710000000000013044373210021706 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/db/migration/models/head.py0000664000567000056710000000521113044372760023171 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The module provides all database models at current HEAD. Its purpose is to create comparable metadata with current database schema. Based on this comparison database can be healed with healing migration. """ from neutron.db import address_scope_db # noqa from neutron.db import agents_db # noqa from neutron.db import agentschedulers_db # noqa from neutron.db import allowedaddresspairs_db # noqa from neutron.db import bgp_db # noqa from neutron.db import bgp_dragentscheduler_db # noqa from neutron.db import dns_db # noqa from neutron.db import dvr_mac_db # noqa from neutron.db import external_net_db # noqa from neutron.db import extradhcpopt_db # noqa from neutron.db import extraroute_db # noqa from neutron.db import flavors_db # noqa from neutron.db import l3_agentschedulers_db # noqa from neutron.db import l3_attrs_db # noqa from neutron.db import l3_db # noqa from neutron.db import l3_dvrscheduler_db # noqa from neutron.db import l3_gwmode_db # noqa from neutron.db import l3_hamode_db # noqa from neutron.db.metering import metering_db # noqa from neutron.db import model_base from neutron.db import models_v2 # noqa from neutron.db import portbindings_db # noqa from neutron.db import portsecurity_db # noqa from neutron.db.qos import models as qos_models # noqa from neutron.db.quota import models # noqa from neutron.db import rbac_db_models # noqa from neutron.db import securitygroups_db # noqa from neutron.db import servicetype_db # noqa from neutron.db import tag_db # noqa from neutron.ipam.drivers.neutrondb_ipam import db_models # noqa from neutron.plugins.ml2.drivers import type_flat # noqa from neutron.plugins.ml2.drivers import type_geneve # noqa from neutron.plugins.ml2.drivers import type_gre # noqa from neutron.plugins.ml2.drivers import type_vlan # noqa from neutron.plugins.ml2.drivers import type_vxlan # noqa from neutron.plugins.ml2 import models # noqa from neutron.services.auto_allocate import models # noqa def get_metadata(): return model_base.BASEV2.metadata neutron-8.4.0/neutron/db/migration/models/__init__.py0000664000567000056710000000000013044372736024021 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/db/migration/alembic.ini0000664000567000056710000000177513044372736022546 0ustar jenkinsjenkins00000000000000# A generic, single database configuration. [alembic] # path to migration scripts script_location = %(here)s/alembic_migrations # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false # default to an empty string because the Neutron migration cli will # extract the correct value and set it programmatically before alembic is fully # invoked. sqlalchemy.url = # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S neutron-8.4.0/neutron/db/migration/connection.py0000664000567000056710000000273613044372736023160 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import session class DBConnection(object): """Context manager class which handles a DB connection. An existing connection can be passed as a parameter. When nested block is complete the new connection will be closed. This class is not thread safe. """ def __init__(self, connection_url, connection=None): self.connection = connection self.connection_url = connection_url self.new_engine = False def __enter__(self): self.new_engine = self.connection is None if self.new_engine: self.engine = session.create_engine(self.connection_url) self.connection = self.engine.connect() return self.connection def __exit__(self, type, value, traceback): if self.new_engine: try: self.connection.close() finally: self.engine.dispose() neutron-8.4.0/neutron/db/migration/autogen.py0000664000567000056710000000763213044372760022460 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from alembic.operations import ops from alembic.util import Dispatcher from alembic.util import rev_id as new_rev_id from neutron.db.migration import cli _ec_dispatcher = Dispatcher() def process_revision_directives(context, revision, directives): if cli._use_separate_migration_branches(context.config): directives[:] = [ directive for directive in _assign_directives(context, directives) ] def _assign_directives(context, directives, phase=None): for directive in directives: decider = _ec_dispatcher.dispatch(directive) if phase is None: phases = cli.MIGRATION_BRANCHES else: phases = (phase,) for phase in phases: decided = decider(context, directive, phase) if decided: yield decided @_ec_dispatcher.dispatch_for(ops.MigrationScript) def _migration_script_ops(context, directive, phase): """Generate a new ops.MigrationScript() for a given phase. E.g. given an ops.MigrationScript() directive from a vanilla autogenerate and an expand/contract phase name, produce a new ops.MigrationScript() which contains only those sub-directives appropriate to "expand" or "contract". Also ensure that the branch directory exists and that the correct branch labels/depends_on/head revision are set up. """ version_path = cli._get_version_branch_path( context.config, release=cli.CURRENT_RELEASE, branch=phase) autogen_kwargs = {} cli._check_bootstrap_new_branch(phase, version_path, autogen_kwargs) op = ops.MigrationScript( new_rev_id(), ops.UpgradeOps(ops=[ d for d in _assign_directives( context, directive.upgrade_ops.ops, phase) ]), ops.DowngradeOps(ops=[]), message=directive.message, **autogen_kwargs ) if not op.upgrade_ops.is_empty(): return op @_ec_dispatcher.dispatch_for(ops.AddConstraintOp) @_ec_dispatcher.dispatch_for(ops.CreateIndexOp) @_ec_dispatcher.dispatch_for(ops.CreateTableOp) @_ec_dispatcher.dispatch_for(ops.AddColumnOp) def _expands(context, directive, phase): if phase == 'expand': return directive else: return None @_ec_dispatcher.dispatch_for(ops.DropConstraintOp) @_ec_dispatcher.dispatch_for(ops.DropIndexOp) @_ec_dispatcher.dispatch_for(ops.DropTableOp) @_ec_dispatcher.dispatch_for(ops.DropColumnOp) def _contracts(context, directive, phase): if phase == 'contract': return directive else: return None @_ec_dispatcher.dispatch_for(ops.AlterColumnOp) def _alter_column(context, directive, phase): is_expand = phase == 'expand' if is_expand and ( directive.modify_nullable is True ): return directive elif not is_expand and ( directive.modify_nullable is False ): return directive else: raise NotImplementedError( "Don't know if operation is an expand or " "contract at the moment: %s" % directive) @_ec_dispatcher.dispatch_for(ops.ModifyTableOps) def _modify_table_ops(context, directive, phase): op = ops.ModifyTableOps( directive.table_name, ops=[ d for d in _assign_directives(context, directive.ops, phase) ], schema=directive.schema) if not op.is_empty(): return op neutron-8.4.0/neutron/db/quota_db.py0000664000567000056710000000153113044372736020616 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from neutron.db.quota import driver # noqa # This module has been preserved for backward compatibility, and will be # deprecated in the future sys.modules[__name__] = sys.modules['neutron.db.quota.driver'] neutron-8.4.0/neutron/db/availability_zone/0000775000567000056710000000000013044373210022137 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/db/availability_zone/router.py0000664000567000056710000000367313044372760024053 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import utils from neutron.db import l3_attrs_db from neutron.extensions import availability_zone as az_ext class RouterAvailabilityZoneMixin(l3_attrs_db.ExtraAttributesMixin): """Mixin class to enable router's availability zone attributes.""" extra_attributes = [{'name': az_ext.AZ_HINTS, 'default': "[]"}] def _extend_extra_router_dict(self, router_res, router_db): super(RouterAvailabilityZoneMixin, self)._extend_extra_router_dict( router_res, router_db) if not utils.is_extension_supported(self, 'router_availability_zone'): return router_res[az_ext.AZ_HINTS] = az_ext.convert_az_string_to_list( router_res[az_ext.AZ_HINTS]) router_res['availability_zones'] = ( self.get_router_availability_zones(router_db)) def _process_extra_attr_router_create( self, context, router_db, router_req): if az_ext.AZ_HINTS in router_req: self.validate_availability_zones(context, 'router', router_req[az_ext.AZ_HINTS]) router_req[az_ext.AZ_HINTS] = az_ext.convert_az_list_to_string( router_req[az_ext.AZ_HINTS]) super(RouterAvailabilityZoneMixin, self)._process_extra_attr_router_create(context, router_db, router_req) neutron-8.4.0/neutron/db/availability_zone/network.py0000664000567000056710000000245213044372736024221 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api.v2 import attributes from neutron.db import common_db_mixin from neutron.extensions import availability_zone as az_ext from neutron.extensions import network_availability_zone as net_az class NetworkAvailabilityZoneMixin(net_az.NetworkAvailabilityZonePluginBase): """Mixin class to enable network's availability zone attributes.""" def _extend_availability_zone(self, net_res, net_db): net_res[az_ext.AZ_HINTS] = az_ext.convert_az_string_to_list( net_db[az_ext.AZ_HINTS]) net_res[az_ext.AVAILABILITY_ZONES] = ( self.get_network_availability_zones(net_db)) common_db_mixin.CommonDbMixin.register_dict_extend_funcs( attributes.NETWORKS, ['_extend_availability_zone']) neutron-8.4.0/neutron/db/availability_zone/__init__.py0000664000567000056710000000000013044372736024252 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/db/api.py0000664000567000056710000001210213044372760017562 0ustar jenkinsjenkins00000000000000# Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import debtcollector from oslo_config import cfg from oslo_db import api as oslo_db_api from oslo_db import exception as db_exc from oslo_db.sqlalchemy import session from oslo_utils import excutils from oslo_utils import uuidutils from sqlalchemy.orm import exc from neutron.common import exceptions as n_exc from neutron.db import common_db_mixin _FACADE = None MAX_RETRIES = 10 def is_deadlock(exc): return _is_nested_instance(exc, db_exc.DBDeadlock) def is_retriable(e): return _is_nested_instance(e, (db_exc.DBDeadlock, exc.StaleDataError)) retry_db_errors = oslo_db_api.wrap_db_retry( max_retries=MAX_RETRIES, retry_on_request=True, exception_checker=is_retriable ) def _is_nested_instance(e, etypes): """Check if exception or its inner excepts are an instance of etypes.""" return (isinstance(e, etypes) or isinstance(e, n_exc.MultipleExceptions) and any(_is_nested_instance(i, etypes) for i in e.inner_exceptions)) @contextlib.contextmanager def exc_to_retry(exceptions): try: yield except Exception as e: with excutils.save_and_reraise_exception() as ctx: if _is_nested_instance(e, exceptions): ctx.reraise = False raise db_exc.RetryRequest(e) def _create_facade_lazily(): global _FACADE if _FACADE is None: _FACADE = session.EngineFacade.from_config(cfg.CONF, sqlite_fk=True) return _FACADE def get_engine(): """Helper method to grab engine.""" facade = _create_facade_lazily() return facade.get_engine() def dispose(): # Don't need to do anything if an enginefacade hasn't been created if _FACADE is not None: get_engine().pool.dispose() def get_session(autocommit=True, expire_on_commit=False, use_slave=False): """Helper method to grab session.""" facade = _create_facade_lazily() return facade.get_session(autocommit=autocommit, expire_on_commit=expire_on_commit, use_slave=use_slave) @contextlib.contextmanager def autonested_transaction(sess): """This is a convenience method to not bother with 'nested' parameter.""" if sess.is_active: session_context = sess.begin(nested=True) else: session_context = sess.begin(subtransactions=True) with session_context as tx: yield tx # Common database operation implementations @debtcollector.removals.remove(message="This will be removed in the N cycle.") def get_object(context, model, **kwargs): with context.session.begin(subtransactions=True): return (common_db_mixin.model_query(context, model) .filter_by(**kwargs) .first()) @debtcollector.removals.remove(message="This will be removed in the N cycle.") def get_objects(context, model, **kwargs): with context.session.begin(subtransactions=True): return (common_db_mixin.model_query(context, model) .filter_by(**kwargs) .all()) @debtcollector.removals.remove(message="This will be removed in the N cycle.") def create_object(context, model, values): with context.session.begin(subtransactions=True): if 'id' not in values and hasattr(model, 'id'): values['id'] = uuidutils.generate_uuid() db_obj = model(**values) context.session.add(db_obj) return db_obj.__dict__ @debtcollector.removals.remove(message="This will be removed in the N cycle.") def _safe_get_object(context, model, id, key='id'): db_obj = get_object(context, model, **{key: id}) if db_obj is None: raise n_exc.ObjectNotFound(id=id) return db_obj @debtcollector.removals.remove(message="This will be removed in the N cycle.") def update_object(context, model, id, values, key=None): with context.session.begin(subtransactions=True): kwargs = {} if key: kwargs['key'] = key db_obj = _safe_get_object(context, model, id, **kwargs) db_obj.update(values) db_obj.save(session=context.session) return db_obj.__dict__ @debtcollector.removals.remove(message="This will be removed in the N cycle.") def delete_object(context, model, id, key=None): with context.session.begin(subtransactions=True): kwargs = {} if key: kwargs['key'] = key db_obj = _safe_get_object(context, model, id, **kwargs) context.session.delete(db_obj) neutron-8.4.0/neutron/db/sqlalchemytypes.py0000664000567000056710000000446413044372760022254 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Custom SQLAlchemy types.""" import netaddr from sqlalchemy import types class IPAddress(types.TypeDecorator): impl = types.String(64) def process_result_value(self, value, dialect): return netaddr.IPAddress(value) def process_bind_param(self, value, dialect): if not isinstance(value, netaddr.IPAddress): raise AttributeError(_("Received type '%(type)s' and value " "'%(value)s'. Expecting netaddr.IPAddress " "type.") % {'type': type(value), 'value': value}) return str(value) class CIDR(types.TypeDecorator): impl = types.String(64) def process_result_value(self, value, dialect): return netaddr.IPNetwork(value) def process_bind_param(self, value, dialect): if not isinstance(value, netaddr.IPNetwork): raise AttributeError(_("Received type '%(type)s' and value " "'%(value)s'. Expecting netaddr.IPNetwork " "type.") % {'type': type(value), 'value': value}) return str(value) class MACAddress(types.TypeDecorator): impl = types.String(64) def process_result_value(self, value, dialect): return netaddr.EUI(value) def process_bind_param(self, value, dialect): if not isinstance(value, netaddr.EUI): raise AttributeError(_("Received type '%(type)s' and value " "'%(value)s'. Expecting netaddr.EUI " "type.") % {'type': type(value), 'value': value}) return str(value) neutron-8.4.0/neutron/db/db_base_plugin_common.py0000664000567000056710000003473313044372760023334 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from oslo_config import cfg from oslo_log import log as logging from sqlalchemy.orm import exc from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import utils from neutron.db import common_db_mixin from neutron.db import models_v2 LOG = logging.getLogger(__name__) def convert_result_to_dict(f): @functools.wraps(f) def inner(*args, **kwargs): result = f(*args, **kwargs) if result is None: return None elif isinstance(result, list): return [r.to_dict() for r in result] else: return result.to_dict() return inner def filter_fields(f): @functools.wraps(f) def inner_filter(*args, **kwargs): result = f(*args, **kwargs) fields = kwargs.get('fields') if not fields: try: pos = f.__code__.co_varnames.index('fields') fields = args[pos] except (IndexError, ValueError): return result do_filter = lambda d: {k: v for k, v in d.items() if k in fields} if isinstance(result, list): return [do_filter(obj) for obj in result] else: return do_filter(result) return inner_filter class DbBasePluginCommon(common_db_mixin.CommonDbMixin): """Stores getters and helper methods for db_base_plugin_v2 All private getters and simple helpers like _make_*_dict were moved from db_base_plugin_v2. More complicated logic and public methods left in db_base_plugin_v2. Main purpose of this class is to make getters accessible for Ipam backends. """ @staticmethod def _generate_mac(): return utils.get_random_mac(cfg.CONF.base_mac.split(':')) @staticmethod def _delete_ip_allocation(context, network_id, subnet_id, ip_address): # Delete the IP address from the IPAllocate table LOG.debug("Delete allocated IP %(ip_address)s " "(%(network_id)s/%(subnet_id)s)", {'ip_address': ip_address, 'network_id': network_id, 'subnet_id': subnet_id}) context.session.query(models_v2.IPAllocation).filter_by( network_id=network_id, ip_address=ip_address, subnet_id=subnet_id).delete() @staticmethod def _store_ip_allocation(context, ip_address, network_id, subnet_id, port_id): LOG.debug("Allocated IP %(ip_address)s " "(%(network_id)s/%(subnet_id)s/%(port_id)s)", {'ip_address': ip_address, 'network_id': network_id, 'subnet_id': subnet_id, 'port_id': port_id}) allocated = models_v2.IPAllocation( network_id=network_id, port_id=port_id, ip_address=ip_address, subnet_id=subnet_id ) context.session.add(allocated) # NOTE(kevinbenton): We add this to the session info so the sqlalchemy # object isn't immediately garbage collected. Otherwise when the # fixed_ips relationship is referenced a new persistent object will be # added to the session that will interfere with retry operations. # See bug 1556178 for details. context.session.info.setdefault('allocated_ips', []).append(allocated) def _make_subnet_dict(self, subnet, fields=None, context=None): res = {'id': subnet['id'], 'name': subnet['name'], 'tenant_id': subnet['tenant_id'], 'network_id': subnet['network_id'], 'ip_version': subnet['ip_version'], 'cidr': subnet['cidr'], 'subnetpool_id': subnet.get('subnetpool_id'), 'allocation_pools': [{'start': pool['first_ip'], 'end': pool['last_ip']} for pool in subnet['allocation_pools']], 'gateway_ip': subnet['gateway_ip'], 'enable_dhcp': subnet['enable_dhcp'], 'ipv6_ra_mode': subnet['ipv6_ra_mode'], 'ipv6_address_mode': subnet['ipv6_address_mode'], 'dns_nameservers': [dns['address'] for dns in subnet['dns_nameservers']], 'host_routes': [{'destination': route['destination'], 'nexthop': route['nexthop']} for route in subnet['routes']], } # The shared attribute for a subnet is the same as its parent network res['shared'] = self._is_network_shared(context, subnet.rbac_entries) # Call auxiliary extend functions, if any self._apply_dict_extend_functions(attributes.SUBNETS, res, subnet) return self._fields(res, fields) def _make_subnetpool_dict(self, subnetpool, fields=None): default_prefixlen = str(subnetpool['default_prefixlen']) min_prefixlen = str(subnetpool['min_prefixlen']) max_prefixlen = str(subnetpool['max_prefixlen']) res = {'id': subnetpool['id'], 'name': subnetpool['name'], 'tenant_id': subnetpool['tenant_id'], 'default_prefixlen': default_prefixlen, 'min_prefixlen': min_prefixlen, 'max_prefixlen': max_prefixlen, 'is_default': subnetpool['is_default'], 'shared': subnetpool['shared'], 'prefixes': [prefix['cidr'] for prefix in subnetpool['prefixes']], 'ip_version': subnetpool['ip_version'], 'default_quota': subnetpool['default_quota'], 'address_scope_id': subnetpool['address_scope_id']} self._apply_dict_extend_functions(attributes.SUBNETPOOLS, res, subnetpool) return self._fields(res, fields) def _make_port_dict(self, port, fields=None, process_extensions=True): res = {"id": port["id"], 'name': port['name'], "network_id": port["network_id"], 'tenant_id': port['tenant_id'], "mac_address": port["mac_address"], "admin_state_up": port["admin_state_up"], "status": port["status"], "fixed_ips": [{'subnet_id': ip["subnet_id"], 'ip_address': ip["ip_address"]} for ip in port["fixed_ips"]], "device_id": port["device_id"], "device_owner": port["device_owner"]} if "dns_name" in port: res["dns_name"] = port["dns_name"] if "dns_assignment" in port: res["dns_assignment"] = [{"ip_address": a["ip_address"], "hostname": a["hostname"], "fqdn": a["fqdn"]} for a in port["dns_assignment"]] # Call auxiliary extend functions, if any if process_extensions: self._apply_dict_extend_functions( attributes.PORTS, res, port) return self._fields(res, fields) def _get_network(self, context, id): try: network = self._get_by_id(context, models_v2.Network, id) except exc.NoResultFound: raise n_exc.NetworkNotFound(net_id=id) return network def _get_subnet(self, context, id): try: subnet = self._get_by_id(context, models_v2.Subnet, id) except exc.NoResultFound: raise n_exc.SubnetNotFound(subnet_id=id) return subnet def _get_subnetpool(self, context, id): try: return self._get_by_id(context, models_v2.SubnetPool, id) except exc.NoResultFound: raise n_exc.SubnetPoolNotFound(subnetpool_id=id) def _get_all_subnetpools(self, context): # NOTE(tidwellr): see note in _get_all_subnets() return context.session.query(models_v2.SubnetPool).all() def _get_subnetpools_by_address_scope_id(self, context, address_scope_id): # NOTE(vikram.choudhary): see note in _get_all_subnets() subnetpool_qry = context.session.query(models_v2.SubnetPool) return subnetpool_qry.filter_by( address_scope_id=address_scope_id).all() def _get_port(self, context, id): try: port = self._get_by_id(context, models_v2.Port, id) except exc.NoResultFound: raise n_exc.PortNotFound(port_id=id) return port def _get_dns_by_subnet(self, context, subnet_id): dns_qry = context.session.query(models_v2.DNSNameServer) return dns_qry.filter_by(subnet_id=subnet_id).order_by( models_v2.DNSNameServer.order).all() def _get_route_by_subnet(self, context, subnet_id): route_qry = context.session.query(models_v2.SubnetRoute) return route_qry.filter_by(subnet_id=subnet_id).all() def _get_router_gw_ports_by_network(self, context, network_id): port_qry = context.session.query(models_v2.Port) return port_qry.filter_by(network_id=network_id, device_owner=constants.DEVICE_OWNER_ROUTER_GW).all() def _get_subnets_by_network(self, context, network_id): subnet_qry = context.session.query(models_v2.Subnet) return subnet_qry.filter_by(network_id=network_id).all() def _get_subnets_by_subnetpool(self, context, subnetpool_id): subnet_qry = context.session.query(models_v2.Subnet) return subnet_qry.filter_by(subnetpool_id=subnetpool_id).all() def _get_all_subnets(self, context): # NOTE(salvatore-orlando): This query might end up putting # a lot of stress on the db. Consider adding a cache layer return context.session.query(models_v2.Subnet).all() def _get_subnets(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = self._get_marker_obj(context, 'subnet', limit, marker) make_subnet_dict = functools.partial(self._make_subnet_dict, context=context) return self._get_collection(context, models_v2.Subnet, make_subnet_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) def _make_network_dict(self, network, fields=None, process_extensions=True, context=None): res = {'id': network['id'], 'name': network['name'], 'tenant_id': network['tenant_id'], 'admin_state_up': network['admin_state_up'], 'mtu': network.get('mtu', constants.DEFAULT_NETWORK_MTU), 'status': network['status'], 'subnets': [subnet['id'] for subnet in network['subnets']]} res['shared'] = self._is_network_shared(context, network.rbac_entries) # Call auxiliary extend functions, if any if process_extensions: self._apply_dict_extend_functions( attributes.NETWORKS, res, network) return self._fields(res, fields) def _is_network_shared(self, context, rbac_entries): # The shared attribute for a network now reflects if the network # is shared to the calling tenant via an RBAC entry. matches = ('*',) + ((context.tenant_id,) if context else ()) for entry in rbac_entries: if (entry.action == 'access_as_shared' and entry.target_tenant in matches): return True return False def _make_subnet_args(self, detail, subnet, subnetpool_id): gateway_ip = str(detail.gateway_ip) if detail.gateway_ip else None args = {'tenant_id': detail.tenant_id, 'id': detail.subnet_id, 'name': subnet['name'], 'network_id': subnet['network_id'], 'ip_version': subnet['ip_version'], 'cidr': str(detail.subnet_cidr), 'subnetpool_id': subnetpool_id, 'enable_dhcp': subnet['enable_dhcp'], 'gateway_ip': gateway_ip, 'description': subnet.get('description')} if subnet['ip_version'] == 6 and subnet['enable_dhcp']: if attributes.is_attr_set(subnet['ipv6_ra_mode']): args['ipv6_ra_mode'] = subnet['ipv6_ra_mode'] if attributes.is_attr_set(subnet['ipv6_address_mode']): args['ipv6_address_mode'] = subnet['ipv6_address_mode'] return args def _make_fixed_ip_dict(self, ips): # Excludes from dict all keys except subnet_id and ip_address return [{'subnet_id': ip["subnet_id"], 'ip_address': ip["ip_address"]} for ip in ips] def _port_filter_hook(self, context, original_model, conditions): # Apply the port filter only in non-admin and non-advsvc context if self.model_query_scope(context, original_model): conditions |= ( (context.tenant_id == models_v2.Network.tenant_id) & (models_v2.Network.id == models_v2.Port.network_id)) return conditions def _port_query_hook(self, context, original_model, query): # we need to outerjoin to networks if the model query scope # is necessary so we can filter based on network id. without # this the conditions in the filter hook cause the networks # table to be added to the FROM statement so we get lots of # duplicated rows that break the COUNT operation if self.model_query_scope(context, original_model): query = query.outerjoin(models_v2.Network) return query neutron-8.4.0/neutron/tests/0000775000567000056710000000000013044373210017207 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/tools.py0000664000567000056710000002042413044372760020734 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 NEC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import importlib import os import platform import random import string import sys import time import warnings import fixtures import mock import six import neutron from neutron.api.v2 import attributes class AttributeMapMemento(fixtures.Fixture): """Create a copy of the resource attribute map so it can be restored during test cleanup. There are a few reasons why this is not included in a class derived from BaseTestCase: - Test cases may need more control about when the backup is made, especially if they are not direct descendants of BaseTestCase. - Inheritance is a bit of overkill for this facility and it's a stretch to rationalize the "is a" criteria. """ def _setUp(self): # Shallow copy is not a proper choice for keeping a backup copy as # the RESOURCE_ATTRIBUTE_MAP map is modified in place through the # 0th level keys. Ideally deepcopy() would be used but this seems # to result in test failures. A compromise is to copy one level # deeper than a shallow copy. self.contents_backup = {} for res, attrs in six.iteritems(attributes.RESOURCE_ATTRIBUTE_MAP): self.contents_backup[res] = attrs.copy() self.addCleanup(self.restore) def restore(self): attributes.RESOURCE_ATTRIBUTE_MAP = self.contents_backup class WarningsFixture(fixtures.Fixture): """Filters out warnings during test runs.""" warning_types = ( DeprecationWarning, PendingDeprecationWarning, ImportWarning ) def _setUp(self): self.addCleanup(warnings.resetwarnings) for wtype in self.warning_types: warnings.filterwarnings( "always", category=wtype, module='^neutron\\.') class OpenFixture(fixtures.Fixture): """Mock access to a specific file while preserving open for others.""" def __init__(self, filepath, contents=''): self.path = filepath self.contents = contents def _setUp(self): self.mock_open = mock.mock_open(read_data=self.contents) self._orig_open = open def replacement_open(name, *args, **kwargs): if name == self.path: return self.mock_open(name, *args, **kwargs) return self._orig_open(name, *args, **kwargs) self._patch = mock.patch('six.moves.builtins.open', new=replacement_open) self._patch.start() self.addCleanup(self._patch.stop) class SafeCleanupFixture(fixtures.Fixture): """Catch errors in daughter fixture cleanup.""" def __init__(self, fixture): self.fixture = fixture def _setUp(self): def cleanUp(): try: self.fixture.cleanUp() except Exception: pass self.fixture.setUp() self.addCleanup(cleanUp) import unittest from neutron.common import utils def setup_mock_calls(mocked_call, expected_calls_and_values): """A convenient method to setup a sequence of mock calls. expected_calls_and_values is a list of (expected_call, return_value): expected_calls_and_values = [ (mock.call(["ovs-vsctl", self.TO, '--', "--may-exist", "add-port", self.BR_NAME, pname]), None), (mock.call(["ovs-vsctl", self.TO, "set", "Interface", pname, "type=gre"]), None), .... ] * expected_call should be mock.call(expected_arg, ....) * return_value is passed to side_effect of a mocked call. A return value or an exception can be specified. """ return_values = [call[1] for call in expected_calls_and_values] mocked_call.side_effect = return_values def verify_mock_calls(mocked_call, expected_calls_and_values, any_order=False): """A convenient method to setup a sequence of mock calls. expected_calls_and_values is a list of (expected_call, return_value): expected_calls_and_values = [ (mock.call(["ovs-vsctl", self.TO, '--', "--may-exist", "add-port", self.BR_NAME, pname]), None), (mock.call(["ovs-vsctl", self.TO, "set", "Interface", pname, "type=gre"]), None), .... ] * expected_call should be mock.call(expected_arg, ....) * return_value is passed to side_effect of a mocked call. A return value or an exception can be specified. """ expected_calls = [call[0] for call in expected_calls_and_values] mocked_call.assert_has_calls(expected_calls, any_order=any_order) def fail(msg=None): """Fail immediately, with the given message. This method is equivalent to TestCase.fail without requiring a testcase instance (usefully for reducing coupling). """ raise unittest.TestCase.failureException(msg) class UnorderedList(list): """A list that is equals to any permutation of itself.""" def __eq__(self, other): if not isinstance(other, list): return False return (sorted(self, key=utils.safe_sort_key) == sorted(other, key=utils.safe_sort_key)) def __neq__(self, other): return not self == other def import_modules_recursively(topdir): '''Import and return all modules below the topdir directory.''' modules = [] for root, dirs, files in os.walk(topdir): for file_ in files: if file_[-3:] != '.py': continue module = file_[:-3] if module == '__init__': continue import_base = root.replace('/', '.') # NOTE(ihrachys): in Python3, or when we are not located in the # directory containing neutron code, __file__ is absolute, so we # should truncate it to exclude PYTHONPATH prefix prefixlen = len(os.path.dirname(neutron.__file__)) import_base = 'neutron' + import_base[prefixlen:] module = '.'.join([import_base, module]) if module not in sys.modules: importlib.import_module(module) modules.append(module) for dir_ in dirs: modules.extend(import_modules_recursively(dir_)) return modules def get_random_string(n=10): return ''.join(random.choice(string.ascii_lowercase) for _ in range(n)) def get_random_boolean(): return bool(random.getrandbits(1)) def get_random_integer(range_begin=0, range_end=1000): return random.randint(range_begin, range_end) def get_random_cidr(version=4): if version == 4: return '10.%d.%d.0/%d' % (random.randint(3, 254), random.randint(3, 254), 24) return '2001:db8:%x::/&d' % (random.getrandbits(16), 64) def get_random_mac(): """Generate a random mac address starting with fe:16:3e""" mac = [0xfe, 0x16, 0x3e, random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff)] return ':'.join(map(lambda x: "%02x" % x, mac)) def is_bsd(): """Return True on BSD-based systems.""" system = platform.system() if system == 'Darwin': return True if 'bsd' in system.lower(): return True return False def reset_random_seed(): # reset random seed to make sure other processes extracting values from RNG # don't get the same results (useful especially when you then use the # random values to allocate system resources from global pool, like ports # to listen). Use both current time and pid to make sure no tests started # at the same time get the same values from RNG seed = time.time() + os.getpid() random.seed(seed) neutron-8.4.0/neutron/tests/fullstack/0000775000567000056710000000000013044373210021177 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/fullstack/test_connectivity.py0000664000567000056710000000702113044372760025337 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils import testscenarios from neutron.common import constants from neutron.tests.fullstack import base from neutron.tests.fullstack.resources import environment from neutron.tests.fullstack.resources import machine load_tests = testscenarios.load_tests_apply_scenarios class BaseConnectivitySameNetworkTest(base.BaseFullStackTestCase): def setUp(self): host_descriptions = [ # There's value in enabling L3 agents registration when l2pop # is enabled, because l2pop code makes assumptions about the # agent types present on machines. environment.HostDescription( l3_agent=self.l2_pop, of_interface=self.of_interface, l2_agent_type=self.l2_agent_type) for _ in range(3)] env = environment.Environment( environment.EnvironmentDescription( network_type=self.network_type, l2_pop=self.l2_pop), host_descriptions) super(BaseConnectivitySameNetworkTest, self).setUp(env) def _test_connectivity(self): tenant_uuid = uuidutils.generate_uuid() network = self.safe_client.create_network(tenant_uuid) self.safe_client.create_subnet( tenant_uuid, network['id'], '20.0.0.0/24') vms = [ self.useFixture( machine.FakeFullstackMachine( self.environment.hosts[i], network['id'], tenant_uuid, self.safe_client)) for i in range(3)] for vm in vms: vm.block_until_boot() vms[0].block_until_ping(vms[1].ip) vms[0].block_until_ping(vms[2].ip) vms[1].block_until_ping(vms[2].ip) class TestOvsConnectivitySameNetwork(BaseConnectivitySameNetworkTest): l2_agent_type = constants.AGENT_TYPE_OVS network_scenarios = [ ('VXLAN', {'network_type': 'vxlan', 'l2_pop': False}), ('GRE and l2pop', {'network_type': 'gre', 'l2_pop': True}), ('VLANs', {'network_type': 'vlan', 'l2_pop': False})] interface_scenarios = [ ('Ofctl', {'of_interface': 'ovs-ofctl'}), ('Native', {'of_interface': 'native'})] scenarios = testscenarios.multiply_scenarios( network_scenarios, interface_scenarios) def test_connectivity(self): self._test_connectivity() class TestLinuxBridgeConnectivitySameNetwork(BaseConnectivitySameNetworkTest): l2_agent_type = constants.AGENT_TYPE_LINUXBRIDGE scenarios = [ ('VXLAN', {'network_type': 'vxlan', 'l2_pop': False}), ('VLANs', {'network_type': 'vlan', 'l2_pop': False}), ('VXLAN and l2pop', {'network_type': 'vxlan', 'l2_pop': True}) ] of_interface = None def test_connectivity(self): self._test_connectivity() neutron-8.4.0/neutron/tests/fullstack/resources/0000775000567000056710000000000013044373210023211 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/fullstack/resources/config.py0000664000567000056710000002436613044372760025054 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import tempfile import fixtures from neutron.common import constants from neutron.plugins.ml2.extensions import qos as qos_ext from neutron.tests import base from neutron.tests.common import config_fixtures from neutron.tests.common import helpers as c_helpers from neutron.tests.common import net_helpers def _generate_port(): """Get a free TCP port from the Operating System and return it. This might fail if some other process occupies this port after this function finished but before the neutron-server process started. """ return str(net_helpers.get_free_namespace_port( constants.PROTO_NAME_TCP)) class ConfigFixture(fixtures.Fixture): """A fixture that holds an actual Neutron configuration. Note that 'self.config' is intended to only be updated once, during the constructor, so if this fixture is re-used (setUp is called twice), then the dynamic configuration values won't change. The correct usage is initializing a new instance of the class. """ def __init__(self, env_desc, host_desc, temp_dir, base_filename): super(ConfigFixture, self).__init__() self.config = config_fixtures.ConfigDict() self.env_desc = env_desc self.host_desc = host_desc self.temp_dir = temp_dir self.base_filename = base_filename def _setUp(self): cfg_fixture = config_fixtures.ConfigFileFixture( self.base_filename, self.config, self.temp_dir) self.useFixture(cfg_fixture) self.filename = cfg_fixture.filename class NeutronConfigFixture(ConfigFixture): def __init__(self, env_desc, host_desc, temp_dir, connection, rabbitmq_environment): super(NeutronConfigFixture, self).__init__( env_desc, host_desc, temp_dir, base_filename='neutron.conf') service_plugins = ['router'] if env_desc.qos: service_plugins.append('qos') self.config.update({ 'DEFAULT': { 'host': self._generate_host(), 'state_path': self._generate_state_path(self.temp_dir), 'lock_path': '$state_path/lock', 'bind_port': _generate_port(), 'api_paste_config': self._generate_api_paste(), 'policy_file': self._generate_policy_json(), 'core_plugin': 'neutron.plugins.ml2.plugin.Ml2Plugin', 'service_plugins': ','.join(service_plugins), 'auth_strategy': 'noauth', 'verbose': 'True', 'debug': 'True', }, 'database': { 'connection': connection, }, 'oslo_messaging_rabbit': { 'rabbit_userid': rabbitmq_environment.user, 'rabbit_password': rabbitmq_environment.password, 'rabbit_hosts': rabbitmq_environment.host, 'rabbit_virtual_host': rabbitmq_environment.vhost, } }) def _generate_host(self): return base.get_rand_name(prefix='host-') def _generate_state_path(self, temp_dir): # Assume that temp_dir will be removed by the caller self.state_path = tempfile.mkdtemp(prefix='state_path', dir=temp_dir) return self.state_path def _generate_api_paste(self): return c_helpers.find_sample_file('api-paste.ini') def _generate_policy_json(self): return c_helpers.find_sample_file('policy.json') class ML2ConfigFixture(ConfigFixture): def __init__(self, env_desc, host_desc, temp_dir, tenant_network_types): super(ML2ConfigFixture, self).__init__( env_desc, host_desc, temp_dir, base_filename='ml2_conf.ini') mechanism_drivers = 'openvswitch,linuxbridge' if self.env_desc.l2_pop: mechanism_drivers += ',l2population' self.config.update({ 'ml2': { 'tenant_network_types': tenant_network_types, 'mechanism_drivers': mechanism_drivers, }, 'ml2_type_vlan': { 'network_vlan_ranges': 'physnet1:1000:2999', }, 'ml2_type_gre': { 'tunnel_id_ranges': '1:1000', }, 'ml2_type_vxlan': { 'vni_ranges': '1001:2000', }, }) if env_desc.qos: self.config['ml2']['extension_drivers'] =\ qos_ext.QOS_EXT_DRIVER_ALIAS class OVSConfigFixture(ConfigFixture): def __init__(self, env_desc, host_desc, temp_dir, local_ip): super(OVSConfigFixture, self).__init__( env_desc, host_desc, temp_dir, base_filename='openvswitch_agent.ini') self.tunneling_enabled = self.env_desc.tunneling_enabled self.config.update({ 'ovs': { 'local_ip': local_ip, 'integration_bridge': self._generate_integration_bridge(), 'of_interface': host_desc.of_interface, }, 'securitygroup': { 'firewall_driver': 'noop', }, 'agent': { 'l2_population': str(self.env_desc.l2_pop), } }) if self.config['ovs']['of_interface'] == 'native': self.config['ovs'].update({ 'of_listen_port': _generate_port()}) if self.tunneling_enabled: self.config['agent'].update({ 'tunnel_types': self.env_desc.network_type}) self.config['ovs'].update({ 'tunnel_bridge': self._generate_tunnel_bridge(), 'int_peer_patch_port': self._generate_int_peer(), 'tun_peer_patch_port': self._generate_tun_peer()}) else: self.config['ovs']['bridge_mappings'] = ( self._generate_bridge_mappings()) if env_desc.qos: self.config['agent']['extensions'] = 'qos' def _generate_bridge_mappings(self): return 'physnet1:%s' % base.get_rand_device_name(prefix='br-eth') def _generate_integration_bridge(self): return base.get_rand_device_name(prefix='br-int') def _generate_tunnel_bridge(self): return base.get_rand_device_name(prefix='br-tun') def _generate_int_peer(self): return base.get_rand_device_name(prefix='patch-tun') def _generate_tun_peer(self): return base.get_rand_device_name(prefix='patch-int') def get_br_int_name(self): return self.config.ovs.integration_bridge def get_br_phys_name(self): return self.config.ovs.bridge_mappings.split(':')[1] def get_br_tun_name(self): return self.config.ovs.tunnel_bridge class LinuxBridgeConfigFixture(ConfigFixture): def __init__(self, env_desc, host_desc, temp_dir, local_ip, physical_device_name): super(LinuxBridgeConfigFixture, self).__init__( env_desc, host_desc, temp_dir, base_filename="linuxbridge_agent.ini" ) self.config.update({ 'VXLAN': { 'enable_vxlan': str(self.env_desc.tunneling_enabled), 'local_ip': local_ip, 'l2_population': str(self.env_desc.l2_pop), } }) if env_desc.qos: self.config.update({ 'AGENT': { 'extensions': 'qos' } }) if self.env_desc.tunneling_enabled: self.config.update({ 'LINUX_BRIDGE': { 'bridge_mappings': self._generate_bridge_mappings( physical_device_name ) } }) else: self.config.update({ 'LINUX_BRIDGE': { 'physical_interface_mappings': self._generate_bridge_mappings( physical_device_name ) } }) def _generate_bridge_mappings(self, device_name): return 'physnet1:%s' % device_name class L3ConfigFixture(ConfigFixture): def __init__(self, env_desc, host_desc, temp_dir, integration_bridge=None): super(L3ConfigFixture, self).__init__( env_desc, host_desc, temp_dir, base_filename='l3_agent.ini') if host_desc.l2_agent_type == constants.AGENT_TYPE_OVS: self._prepare_config_with_ovs_agent(integration_bridge) elif host_desc.l2_agent_type == constants.AGENT_TYPE_LINUXBRIDGE: self._prepare_config_with_linuxbridge_agent() self.config['DEFAULT'].update({ 'debug': 'True', 'verbose': 'True', 'test_namespace_suffix': self._generate_namespace_suffix(), }) def _prepare_config_with_ovs_agent(self, integration_bridge): self.config.update({ 'DEFAULT': { 'l3_agent_manager': ('neutron.agent.l3_agent.' 'L3NATAgentWithStateReport'), 'interface_driver': ('neutron.agent.linux.interface.' 'OVSInterfaceDriver'), 'ovs_integration_bridge': integration_bridge, 'external_network_bridge': self._generate_external_bridge(), } }) def _prepare_config_with_linuxbridge_agent(self): self.config.update({ 'DEFAULT': { 'interface_driver': ('neutron.agent.linux.interface.' 'BridgeInterfaceDriver'), } }) def _generate_external_bridge(self): return base.get_rand_device_name(prefix='br-ex') def get_external_bridge(self): return self.config.DEFAULT.external_network_bridge def _generate_namespace_suffix(self): return base.get_rand_name(prefix='test') neutron-8.4.0/neutron/tests/fullstack/resources/environment.py0000664000567000056710000003346713044372760026155 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import fixtures import netaddr from neutronclient.common import exceptions as nc_exc from oslo_config import cfg from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.common import constants from neutron.common import utils as common_utils from neutron.plugins.ml2.drivers.linuxbridge.agent import \ linuxbridge_neutron_agent as lb_agent from neutron.tests.common import net_helpers from neutron.tests.fullstack.resources import config from neutron.tests.fullstack.resources import process class EnvironmentDescription(object): """A set of characteristics of an environment setup. Does the setup, as a whole, support tunneling? How about l2pop? """ def __init__(self, network_type='vxlan', l2_pop=True, qos=False): self.network_type = network_type self.l2_pop = l2_pop self.qos = qos self.network_range = None @property def tunneling_enabled(self): return self.network_type in ('vxlan', 'gre') class HostDescription(object): """A set of characteristics of an environment Host. What agents should the host spawn? What mode should each agent operate under? """ def __init__(self, l3_agent=False, of_interface='ovs-ofctl', l2_agent_type=constants.AGENT_TYPE_OVS): self.l2_agent_type = l2_agent_type self.l3_agent = l3_agent self.of_interface = of_interface class Host(fixtures.Fixture): """The Host class models a physical host running agents, all reporting with the same hostname. OpenStack installers or administrators connect compute nodes to the physical tenant network by connecting the provider bridges to their respective physical NICs. Or, if using tunneling, by configuring an IP address on the appropriate physical NIC. The Host class does the same with the connect_* methods. TODO(amuller): Add start/stop/restart methods that will start/stop/restart all of the agents on this host. Add a kill method that stops all agents and disconnects the host from other hosts. """ def __init__(self, env_desc, host_desc, test_name, neutron_config, central_data_bridge, central_external_bridge): self.env_desc = env_desc self.host_desc = host_desc self.test_name = test_name self.neutron_config = neutron_config # Use reserved class E addresses self.local_ip = self.allocate_local_ip() self.central_data_bridge = central_data_bridge self.central_external_bridge = central_external_bridge self.host_namespace = None self.agents = {} # we need to cache already created "per network" bridges if linuxbridge # agent is used on host: self.network_bridges = {} def _setUp(self): if self.host_desc.l2_agent_type == constants.AGENT_TYPE_OVS: self.setup_host_with_ovs_agent() elif self.host_desc.l2_agent_type == constants.AGENT_TYPE_LINUXBRIDGE: self.setup_host_with_linuxbridge_agent() if self.host_desc.l3_agent: self.l3_agent = self.useFixture( process.L3AgentFixture( self.env_desc, self.host_desc, self.test_name, self.neutron_config, self.l3_agent_cfg_fixture)) def setup_host_with_ovs_agent(self): agent_cfg_fixture = config.OVSConfigFixture( self.env_desc, self.host_desc, self.neutron_config.temp_dir, self.local_ip) self.useFixture(agent_cfg_fixture) if self.env_desc.tunneling_enabled: self.useFixture( net_helpers.OVSBridgeFixture( agent_cfg_fixture.get_br_tun_name())).bridge self.connect_to_internal_network_via_tunneling() else: br_phys = self.useFixture( net_helpers.OVSBridgeFixture( agent_cfg_fixture.get_br_phys_name())).bridge self.connect_to_internal_network_via_vlans(br_phys) self.ovs_agent = self.useFixture( process.OVSAgentFixture( self.env_desc, self.host_desc, self.test_name, self.neutron_config, agent_cfg_fixture)) if self.host_desc.l3_agent: self.l3_agent_cfg_fixture = self.useFixture( config.L3ConfigFixture( self.env_desc, self.host_desc, self.neutron_config.temp_dir, self.ovs_agent.agent_cfg_fixture.get_br_int_name())) br_ex = self.useFixture( net_helpers.OVSBridgeFixture( self.l3_agent_cfg_fixture.get_external_bridge())).bridge self.connect_to_external_network(br_ex) def setup_host_with_linuxbridge_agent(self): #First we need to provide connectivity for agent to prepare proper #bridge mappings in agent's config: self.host_namespace = self.useFixture( net_helpers.NamespaceFixture(prefix="host-") ).name self.connect_namespace_to_control_network() agent_cfg_fixture = config.LinuxBridgeConfigFixture( self.env_desc, self.host_desc, self.neutron_config.temp_dir, self.local_ip, physical_device_name=self.host_port.name ) self.useFixture(agent_cfg_fixture) self.linuxbridge_agent = self.useFixture( process.LinuxBridgeAgentFixture( self.env_desc, self.host_desc, self.test_name, self.neutron_config, agent_cfg_fixture, namespace=self.host_namespace ) ) if self.host_desc.l3_agent: self.l3_agent_cfg_fixture = self.useFixture( config.L3ConfigFixture( self.env_desc, self.host_desc, self.neutron_config.temp_dir)) def _connect_ovs_port(self, cidr_address): ovs_device = self.useFixture( net_helpers.OVSPortFixture( bridge=self.central_data_bridge, namespace=self.host_namespace)).port # NOTE: This sets an IP address on the host's root namespace # which is cleaned up when the device is deleted. ovs_device.addr.add(cidr_address) return ovs_device def connect_namespace_to_control_network(self): self.host_port = self._connect_ovs_port( common_utils.ip_to_cidr(self.local_ip, 24) ) self.host_port.link.set_up() def connect_to_internal_network_via_tunneling(self): veth_1, veth_2 = self.useFixture( net_helpers.VethFixture()).ports # NOTE: This sets an IP address on the host's root namespace # which is cleaned up when the device is deleted. veth_1.addr.add(common_utils.ip_to_cidr(self.local_ip, 32)) veth_1.link.set_up() veth_2.link.set_up() def connect_to_internal_network_via_vlans(self, host_data_bridge): # If using VLANs as a segmentation device, it's needed to connect # a provider bridge to a centralized, shared bridge. net_helpers.create_patch_ports( self.central_data_bridge, host_data_bridge) def connect_to_external_network(self, host_external_bridge): net_helpers.create_patch_ports( self.central_external_bridge, host_external_bridge) def allocate_local_ip(self): if not self.env_desc.network_range: return self.get_random_ip('240.0.0.1', '240.255.255.254') return self.get_random_ip( str(self.env_desc.network_range[2]), str(self.env_desc.network_range[-1]) ) def get_bridge(self, network_id): if "ovs" in self.agents.keys(): return self.ovs_agent.br_int elif "linuxbridge" in self.agents.keys(): bridge = self.network_bridges.get(network_id, None) if not bridge: br_prefix = lb_agent.LinuxBridgeManager.get_bridge_name( network_id) bridge = self.useFixture( net_helpers.LinuxBridgeFixture( prefix=br_prefix, namespace=self.host_namespace, prefix_is_full_name=True)).bridge self.network_bridges[network_id] = bridge return bridge @staticmethod def get_random_ip(low, high): parent_range = netaddr.IPRange(low, high) return str(random.choice(parent_range)) @property def hostname(self): return self.neutron_config.config.DEFAULT.host @property def l3_agent(self): return self.agents['l3'] @l3_agent.setter def l3_agent(self, agent): self.agents['l3'] = agent @property def ovs_agent(self): return self.agents['ovs'] @ovs_agent.setter def ovs_agent(self, agent): self.agents['ovs'] = agent @property def linuxbridge_agent(self): return self.agents['linuxbridge'] @linuxbridge_agent.setter def linuxbridge_agent(self, agent): self.agents['linuxbridge'] = agent class Environment(fixtures.Fixture): """Represents a deployment topology. Environment is a collection of hosts. It starts a Neutron server and a parametrized number of Hosts, each a collection of agents. The Environment accepts a collection of HostDescription, each describing the type of Host to create. """ def __init__(self, env_desc, hosts_desc): """ :param env_desc: An EnvironmentDescription instance. :param hosts_desc: A list of HostDescription instances. """ super(Environment, self).__init__() self.env_desc = env_desc self.hosts_desc = hosts_desc self.hosts = [] def wait_until_env_is_up(self): utils.wait_until_true(self._processes_are_ready) def _processes_are_ready(self): try: running_agents = self.neutron_server.client.list_agents()['agents'] agents_count = sum(len(host.agents) for host in self.hosts) return len(running_agents) == agents_count except nc_exc.NeutronClientException: return False def _create_host(self, host_desc): temp_dir = self.useFixture(fixtures.TempDir()).path neutron_config = config.NeutronConfigFixture( self.env_desc, host_desc, temp_dir, cfg.CONF.database.connection, self.rabbitmq_environment) self.useFixture(neutron_config) return self.useFixture( Host(self.env_desc, host_desc, self.test_name, neutron_config, self.central_data_bridge, self.central_external_bridge)) def _setUp(self): self.temp_dir = self.useFixture(fixtures.TempDir()).path #we need this bridge before rabbit and neutron service will start self.central_data_bridge = self.useFixture( net_helpers.OVSBridgeFixture('cnt-data')).bridge self.central_external_bridge = self.useFixture( net_helpers.OVSBridgeFixture('cnt-ex')).bridge #Get rabbitmq address (and cnt-data network) rabbitmq_ip_address = self._configure_port_for_rabbitmq() self.rabbitmq_environment = self.useFixture( process.RabbitmqEnvironmentFixture(host=rabbitmq_ip_address) ) plugin_cfg_fixture = self.useFixture( config.ML2ConfigFixture( self.env_desc, self.hosts_desc, self.temp_dir, self.env_desc.network_type)) neutron_cfg_fixture = self.useFixture( config.NeutronConfigFixture( self.env_desc, None, self.temp_dir, cfg.CONF.database.connection, self.rabbitmq_environment)) self.neutron_server = self.useFixture( process.NeutronServerFixture( self.env_desc, None, self.test_name, neutron_cfg_fixture, plugin_cfg_fixture)) self.hosts = [self._create_host(desc) for desc in self.hosts_desc] self.wait_until_env_is_up() def _configure_port_for_rabbitmq(self): self.env_desc.network_range = self._get_network_range() if not self.env_desc.network_range: return "127.0.0.1" rabbitmq_ip = str(self.env_desc.network_range[1]) rabbitmq_port = ip_lib.IPDevice(self.central_data_bridge.br_name) rabbitmq_port.addr.add(common_utils.ip_to_cidr(rabbitmq_ip, 24)) rabbitmq_port.link.set_up() return rabbitmq_ip def _get_network_range(self): #NOTE(slaweq): We need to choose IP address on which rabbitmq will be # available because LinuxBridge agents are spawned in their own # namespaces and need to know where the rabbitmq server is listening. # For ovs agent it is not necessary because agents are spawned in # globalscope together with rabbitmq server so default localhost # address is fine for them for desc in self.hosts_desc: if desc.l2_agent_type == constants.AGENT_TYPE_LINUXBRIDGE: return self.get_random_network( "240.0.0.0", "240.255.255.255", "24") @staticmethod def get_random_network(low, high, netmask): ip = Host.get_random_ip(low, high) return netaddr.IPNetwork("%s/%s" % (ip, netmask)) neutron-8.4.0/neutron/tests/fullstack/resources/__init__.py0000664000567000056710000000000013044372736025324 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/fullstack/resources/client.py0000664000567000056710000001151013044372760025050 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Thales Services SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import functools import fixtures from neutronclient.common import exceptions from neutron.extensions import portbindings from neutron.tests import base def _safe_method(f): @functools.wraps(f) def delete(*args, **kwargs): try: return f(*args, **kwargs) except exceptions.NotFound: pass return delete class ClientFixture(fixtures.Fixture): """Manage and cleanup neutron resources.""" def __init__(self, client): super(ClientFixture, self).__init__() self.client = client def _create_resource(self, resource_type, spec): create = getattr(self.client, 'create_%s' % resource_type) delete = getattr(self.client, 'delete_%s' % resource_type) body = {resource_type: spec} resp = create(body=body) data = resp[resource_type] self.addCleanup(_safe_method(delete), data['id']) return data def create_router(self, tenant_id, name=None, ha=False): resource_type = 'router' name = name or base.get_rand_name(prefix=resource_type) spec = {'tenant_id': tenant_id, 'name': name, 'ha': ha} return self._create_resource(resource_type, spec) def create_network(self, tenant_id, name=None): resource_type = 'network' name = name or base.get_rand_name(prefix=resource_type) spec = {'tenant_id': tenant_id, 'name': name} return self._create_resource(resource_type, spec) def create_subnet(self, tenant_id, network_id, cidr, gateway_ip=None, ip_version=4, name=None, enable_dhcp=True): resource_type = 'subnet' name = name or base.get_rand_name(prefix=resource_type) spec = {'tenant_id': tenant_id, 'network_id': network_id, 'name': name, 'cidr': cidr, 'ip_version': ip_version, 'enable_dhcp': enable_dhcp} if gateway_ip: spec['gateway_ip'] = gateway_ip return self._create_resource(resource_type, spec) def create_port(self, tenant_id, network_id, hostname, qos_policy_id=None): spec = { 'network_id': network_id, 'tenant_id': tenant_id, portbindings.HOST_ID: hostname, } if qos_policy_id: spec['qos_policy_id'] = qos_policy_id return self._create_resource('port', spec) def add_router_interface(self, router_id, subnet_id): body = {'subnet_id': subnet_id} self.client.add_interface_router(router=router_id, body=body) self.addCleanup(_safe_method(self.client.remove_interface_router), router=router_id, body=body) def create_qos_policy(self, tenant_id, name, description, shared): policy = self.client.create_qos_policy( body={'policy': {'name': name, 'description': description, 'shared': shared, 'tenant_id': tenant_id}}) def detach_and_delete_policy(): qos_policy_id = policy['policy']['id'] ports_with_policy = self.client.list_ports( qos_policy_id=qos_policy_id)['ports'] for port in ports_with_policy: self.client.update_port( port['id'], body={'port': {'qos_policy_id': None}}) self.client.delete_qos_policy(qos_policy_id) # NOTE: We'll need to add support for detaching from network once # create_network() supports qos_policy_id. self.addCleanup(_safe_method(detach_and_delete_policy)) return policy['policy'] def create_bandwidth_limit_rule(self, tenant_id, qos_policy_id, limit=None, burst=None): rule = {'tenant_id': tenant_id} if limit: rule['max_kbps'] = limit if burst: rule['max_burst_kbps'] = burst rule = self.client.create_bandwidth_limit_rule( policy=qos_policy_id, body={'bandwidth_limit_rule': rule}) self.addCleanup(_safe_method(self.client.delete_bandwidth_limit_rule), rule['bandwidth_limit_rule']['id'], qos_policy_id) return rule['bandwidth_limit_rule'] neutron-8.4.0/neutron/tests/fullstack/resources/process.py0000664000567000056710000002045113044372760025254 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from distutils import spawn import os import signal import fixtures from neutronclient.common import exceptions as nc_exc from neutronclient.v2_0 import client from oslo_log import log as logging from neutron.agent.linux import async_process from neutron.agent.linux import utils from neutron.common import utils as common_utils from neutron.tests import base from neutron.tests.common import net_helpers LOG = logging.getLogger(__name__) # This is the directory from which infra fetches log files for fullstack tests DEFAULT_LOG_DIR = '/tmp/dsvm-fullstack-logs/' class ProcessFixture(fixtures.Fixture): def __init__(self, test_name, process_name, exec_name, config_filenames, namespace=None, kill_signal=signal.SIGKILL): super(ProcessFixture, self).__init__() self.test_name = test_name self.process_name = process_name self.exec_name = exec_name self.config_filenames = config_filenames self.process = None self.kill_signal = kill_signal self.namespace = namespace def _setUp(self): self.start() self.addCleanup(self.stop) def start(self): test_name = base.sanitize_log_path(self.test_name) log_dir = os.path.join(DEFAULT_LOG_DIR, test_name) common_utils.ensure_dir(log_dir) timestamp = datetime.datetime.now().strftime("%Y-%m-%d--%H-%M-%S-%f") log_file = "%s--%s.log" % (self.process_name, timestamp) cmd = [spawn.find_executable(self.exec_name), '--log-dir', log_dir, '--log-file', log_file] for filename in self.config_filenames: cmd += ['--config-file', filename] run_as_root = bool(self.namespace) self.process = async_process.AsyncProcess( cmd, run_as_root=run_as_root, namespace=self.namespace ) self.process.start(block=True) def stop(self): self.process.stop(block=True, kill_signal=self.kill_signal) class RabbitmqEnvironmentFixture(fixtures.Fixture): def __init__(self, host="127.0.0.1"): super(RabbitmqEnvironmentFixture, self).__init__() self.host = host def _setUp(self): self.user = base.get_rand_name(prefix='user') self.password = base.get_rand_name(prefix='pass') self.vhost = base.get_rand_name(prefix='vhost') self._execute('add_user', self.user, self.password) self.addCleanup(self._execute, 'delete_user', self.user) self._execute('add_vhost', self.vhost) self.addCleanup(self._execute, 'delete_vhost', self.vhost) self._execute('set_permissions', '-p', self.vhost, self.user, '.*', '.*', '.*') def _execute(self, *args): cmd = ['rabbitmqctl'] cmd.extend(args) utils.execute(cmd, run_as_root=True) class NeutronServerFixture(fixtures.Fixture): NEUTRON_SERVER = "neutron-server" def __init__(self, env_desc, host_desc, test_name, neutron_cfg_fixture, plugin_cfg_fixture): super(NeutronServerFixture, self).__init__() self.env_desc = env_desc self.host_desc = host_desc self.test_name = test_name self.neutron_cfg_fixture = neutron_cfg_fixture self.plugin_cfg_fixture = plugin_cfg_fixture def _setUp(self): config_filenames = [self.neutron_cfg_fixture.filename, self.plugin_cfg_fixture.filename] self.process_fixture = self.useFixture(ProcessFixture( test_name=self.test_name, process_name=self.NEUTRON_SERVER, exec_name=self.NEUTRON_SERVER, config_filenames=config_filenames, kill_signal=signal.SIGTERM)) utils.wait_until_true(self.server_is_live) def server_is_live(self): try: self.client.list_networks() return True except nc_exc.NeutronClientException: return False @property def client(self): url = ("http://127.0.0.1:%s" % self.neutron_cfg_fixture.config.DEFAULT.bind_port) return client.Client(auth_strategy="noauth", endpoint_url=url) class OVSAgentFixture(fixtures.Fixture): NEUTRON_OVS_AGENT = "neutron-openvswitch-agent" def __init__(self, env_desc, host_desc, test_name, neutron_cfg_fixture, agent_cfg_fixture): super(OVSAgentFixture, self).__init__() self.env_desc = env_desc self.host_desc = host_desc self.test_name = test_name self.neutron_cfg_fixture = neutron_cfg_fixture self.neutron_config = self.neutron_cfg_fixture.config self.agent_cfg_fixture = agent_cfg_fixture self.agent_config = agent_cfg_fixture.config def _setUp(self): self.br_int = self.useFixture( net_helpers.OVSBridgeFixture( self.agent_cfg_fixture.get_br_int_name())).bridge config_filenames = [self.neutron_cfg_fixture.filename, self.agent_cfg_fixture.filename] self.process_fixture = self.useFixture(ProcessFixture( test_name=self.test_name, process_name=self.NEUTRON_OVS_AGENT, exec_name=spawn.find_executable( 'ovs_agent.py', path=os.path.join(base.ROOTDIR, 'common', 'agents')), config_filenames=config_filenames)) class LinuxBridgeAgentFixture(fixtures.Fixture): NEUTRON_LINUXBRIDGE_AGENT = "neutron-linuxbridge-agent" def __init__(self, env_desc, host_desc, test_name, neutron_cfg_fixture, agent_cfg_fixture, namespace=None): super(LinuxBridgeAgentFixture, self).__init__() self.env_desc = env_desc self.host_desc = host_desc self.test_name = test_name self.neutron_cfg_fixture = neutron_cfg_fixture self.neutron_config = self.neutron_cfg_fixture.config self.agent_cfg_fixture = agent_cfg_fixture self.agent_config = agent_cfg_fixture.config self.namespace = namespace def _setUp(self): config_filenames = [self.neutron_cfg_fixture.filename, self.agent_cfg_fixture.filename] self.process_fixture = self.useFixture( ProcessFixture( test_name=self.test_name, process_name=self.NEUTRON_LINUXBRIDGE_AGENT, exec_name=self.NEUTRON_LINUXBRIDGE_AGENT, config_filenames=config_filenames, namespace=self.namespace ) ) class L3AgentFixture(fixtures.Fixture): NEUTRON_L3_AGENT = "neutron-l3-agent" def __init__(self, env_desc, host_desc, test_name, neutron_cfg_fixture, l3_agent_cfg_fixture, namespace=None): super(L3AgentFixture, self).__init__() self.env_desc = env_desc self.host_desc = host_desc self.test_name = test_name self.neutron_cfg_fixture = neutron_cfg_fixture self.l3_agent_cfg_fixture = l3_agent_cfg_fixture self.namespace = namespace def _setUp(self): self.plugin_config = self.l3_agent_cfg_fixture.config config_filenames = [self.neutron_cfg_fixture.filename, self.l3_agent_cfg_fixture.filename] self.process_fixture = self.useFixture( ProcessFixture( test_name=self.test_name, process_name=self.NEUTRON_L3_AGENT, exec_name=spawn.find_executable( 'l3_agent.py', path=os.path.join(base.ROOTDIR, 'common', 'agents')), config_filenames=config_filenames, namespace=self.namespace ) ) def get_namespace_suffix(self): return self.plugin_config.DEFAULT.test_namespace_suffix neutron-8.4.0/neutron/tests/fullstack/resources/machine.py0000664000567000056710000000515213044372760025203 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron.agent.linux import utils from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers class FakeFullstackMachine(machine_fixtures.FakeMachineBase): def __init__(self, host, network_id, tenant_id, safe_client, neutron_port=None): super(FakeFullstackMachine, self).__init__() self.host = host self.tenant_id = tenant_id self.network_id = network_id self.safe_client = safe_client self.neutron_port = neutron_port def _setUp(self): super(FakeFullstackMachine, self)._setUp() self.bridge = self.host.get_bridge(self.network_id) if not self.neutron_port: self.neutron_port = self.safe_client.create_port( network_id=self.network_id, tenant_id=self.tenant_id, hostname=self.host.hostname) mac_address = self.neutron_port['mac_address'] self.port = self.useFixture( net_helpers.PortFixture.get( self.bridge, self.namespace, mac_address, self.neutron_port['id'])).port self._ip = self.neutron_port['fixed_ips'][0]['ip_address'] subnet_id = self.neutron_port['fixed_ips'][0]['subnet_id'] subnet = self.safe_client.client.show_subnet(subnet_id) prefixlen = netaddr.IPNetwork(subnet['subnet']['cidr']).prefixlen self._ip_cidr = '%s/%s' % (self._ip, prefixlen) # TODO(amuller): Support DHCP self.port.addr.add(self.ip_cidr) self.gateway_ip = subnet['subnet']['gateway_ip'] if self.gateway_ip: net_helpers.set_namespace_gateway(self.port, self.gateway_ip) @property def ip(self): return self._ip @property def ip_cidr(self): return self._ip_cidr def block_until_boot(self): utils.wait_until_true( lambda: (self.safe_client.client.show_port(self.neutron_port['id']) ['port']['status'] == 'ACTIVE'), sleep=3) neutron-8.4.0/neutron/tests/fullstack/__init__.py0000664000567000056710000000000013044372760023307 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/fullstack/test_qos.py0000664000567000056710000001522113044372760023424 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils import testscenarios from neutron.agent.common import ovs_lib from neutron.agent.linux import bridge_lib from neutron.agent.linux import tc_lib from neutron.agent.linux import utils from neutron.common import constants from neutron.services.qos import qos_consts from neutron.tests.fullstack import base from neutron.tests.fullstack.resources import environment from neutron.tests.fullstack.resources import machine from neutron.plugins.ml2.drivers.linuxbridge.agent.common import \ config as linuxbridge_agent_config from neutron.plugins.ml2.drivers.linuxbridge.agent import \ linuxbridge_neutron_agent as linuxbridge_agent from neutron.plugins.ml2.drivers.openvswitch.mech_driver import \ mech_openvswitch as mech_ovs load_tests = testscenarios.load_tests_apply_scenarios BANDWIDTH_LIMIT = 500 BANDWIDTH_BURST = 100 def _wait_for_rule_applied_ovs_agent(vm, limit, burst): utils.wait_until_true( lambda: vm.bridge.get_egress_bw_limit_for_port( vm.port.name) == (limit, burst)) def _wait_for_rule_applied_linuxbridge_agent(vm, limit, burst): port_name = linuxbridge_agent.LinuxBridgeManager.get_tap_device_name( vm.neutron_port['id']) tc = tc_lib.TcCommand( port_name, linuxbridge_agent_config.DEFAULT_KERNEL_HZ_VALUE, namespace=vm.host.host_namespace ) utils.wait_until_true( lambda: tc.get_filters_bw_limits() == (limit, burst)) def _wait_for_rule_applied(vm, limit, burst): if isinstance(vm.bridge, ovs_lib.OVSBridge): _wait_for_rule_applied_ovs_agent(vm, limit, burst) if isinstance(vm.bridge, bridge_lib.BridgeDevice): _wait_for_rule_applied_linuxbridge_agent(vm, limit, burst) def _wait_for_rule_removed(vm): # No values are provided when port doesn't have qos policy _wait_for_rule_applied(vm, None, None) class TestQoSWithL2Agent(base.BaseFullStackTestCase): scenarios = [ ("ovs", {'l2_agent_type': constants.AGENT_TYPE_OVS}), ("linuxbridge", {'l2_agent_type': constants.AGENT_TYPE_LINUXBRIDGE}) ] def setUp(self): host_desc = [environment.HostDescription( l3_agent=False, l2_agent_type=self.l2_agent_type)] env_desc = environment.EnvironmentDescription(qos=True) env = environment.Environment(env_desc, host_desc) super(TestQoSWithL2Agent, self).setUp(env) def _create_qos_policy(self): return self.safe_client.create_qos_policy( self.tenant_id, 'fs_policy', 'Fullstack testing policy', shared='False') def _prepare_vm_with_qos_policy(self, limit, burst): qos_policy = self._create_qos_policy() qos_policy_id = qos_policy['id'] rule = self.safe_client.create_bandwidth_limit_rule( self.tenant_id, qos_policy_id, limit, burst) # Make it consistent with GET reply qos_policy['rules'].append(rule) rule['type'] = qos_consts.RULE_TYPE_BANDWIDTH_LIMIT rule['qos_policy_id'] = qos_policy_id port = self.safe_client.create_port( self.tenant_id, self.network['id'], self.environment.hosts[0].hostname, qos_policy_id) vm = self.useFixture( machine.FakeFullstackMachine( self.environment.hosts[0], self.network['id'], self.tenant_id, self.safe_client, neutron_port=port)) return vm, qos_policy def test_qos_policy_rule_lifecycle(self): new_limit = BANDWIDTH_LIMIT + 100 self.tenant_id = uuidutils.generate_uuid() self.network = self.safe_client.create_network(self.tenant_id, 'network-test') self.subnet = self.safe_client.create_subnet( self.tenant_id, self.network['id'], cidr='10.0.0.0/24', gateway_ip='10.0.0.1', name='subnet-test', enable_dhcp=False) # Create port with qos policy attached vm, qos_policy = self._prepare_vm_with_qos_policy(BANDWIDTH_LIMIT, BANDWIDTH_BURST) _wait_for_rule_applied(vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST) qos_policy_id = qos_policy['id'] rule = qos_policy['rules'][0] # Remove rule from qos policy self.client.delete_bandwidth_limit_rule(rule['id'], qos_policy_id) _wait_for_rule_removed(vm) # Create new rule with no given burst value, in such case ovs and lb # agent should apply burst value as # bandwidth_limit * qos_consts.DEFAULT_BURST_RATE new_expected_burst = int( new_limit * qos_consts.DEFAULT_BURST_RATE ) new_rule = self.safe_client.create_bandwidth_limit_rule( self.tenant_id, qos_policy_id, new_limit) _wait_for_rule_applied(vm, new_limit, new_expected_burst) # Update qos policy rule id self.client.update_bandwidth_limit_rule( new_rule['id'], qos_policy_id, body={'bandwidth_limit_rule': {'max_kbps': BANDWIDTH_LIMIT, 'max_burst_kbps': BANDWIDTH_BURST}}) _wait_for_rule_applied(vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST) # Remove qos policy from port self.client.update_port( vm.neutron_port['id'], body={'port': {'qos_policy_id': None}}) _wait_for_rule_removed(vm) class TestQoSWithL2Population(base.BaseFullStackTestCase): def setUp(self): host_desc = [] # No need to register agents for this test case env_desc = environment.EnvironmentDescription(qos=True, l2_pop=True) env = environment.Environment(env_desc, host_desc) super(TestQoSWithL2Population, self).setUp(env) def test_supported_qos_rule_types(self): res = self.client.list_qos_rule_types() rule_types = {t['type'] for t in res['rule_types']} expected_rules = ( set(mech_ovs.OpenvswitchMechanismDriver.supported_qos_rule_types)) self.assertEqual(expected_rules, rule_types) neutron-8.4.0/neutron/tests/fullstack/README0000664000567000056710000000003713044372736022073 0ustar jenkinsjenkins00000000000000Please see neutron/TESTING.rst.neutron-8.4.0/neutron/tests/fullstack/test_l3_agent.py0000664000567000056710000000655313044372760024326 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from oslo_utils import uuidutils from neutron.agent.l3 import agent as l3_agent from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.tests.fullstack import base from neutron.tests.fullstack.resources import environment class TestLegacyL3Agent(base.BaseFullStackTestCase): def setUp(self): host_descriptions = [environment.HostDescription(l3_agent=True)] env = environment.Environment( environment.EnvironmentDescription( network_type='vlan', l2_pop=False), host_descriptions) super(TestLegacyL3Agent, self).setUp(env) def _get_namespace(self, router_id): return namespaces.build_ns_name(l3_agent.NS_PREFIX, router_id) def _assert_namespace_exists(self, ns_name): ip = ip_lib.IPWrapper(ns_name) utils.wait_until_true(lambda: ip.netns.exists(ns_name)) def test_namespace_exists(self): tenant_id = uuidutils.generate_uuid() router = self.safe_client.create_router(tenant_id) network = self.safe_client.create_network(tenant_id) subnet = self.safe_client.create_subnet( tenant_id, network['id'], '20.0.0.0/24', gateway_ip='20.0.0.1') self.safe_client.add_router_interface(router['id'], subnet['id']) namespace = "%s@%s" % ( self._get_namespace(router['id']), self.environment.hosts[0].l3_agent.get_namespace_suffix(), ) self._assert_namespace_exists(namespace) class TestHAL3Agent(base.BaseFullStackTestCase): def setUp(self): host_descriptions = [ environment.HostDescription(l3_agent=True) for _ in range(2)] env = environment.Environment( environment.EnvironmentDescription( network_type='vxlan', l2_pop=True), host_descriptions) super(TestHAL3Agent, self).setUp(env) def _is_ha_router_active_on_one_agent(self, router_id): agents = self.client.list_l3_agent_hosting_routers(router_id) return ( agents['agents'][0]['ha_state'] != agents['agents'][1]['ha_state']) def test_ha_router(self): # TODO(amuller): Test external connectivity before and after a # failover, see: https://review.openstack.org/#/c/196393/ tenant_id = uuidutils.generate_uuid() router = self.safe_client.create_router(tenant_id, ha=True) agents = self.client.list_l3_agent_hosting_routers(router['id']) self.assertEqual(2, len(agents['agents']), 'HA router must be scheduled to both nodes') utils.wait_until_true( functools.partial( self._is_ha_router_active_on_one_agent, router['id']), timeout=90) neutron-8.4.0/neutron/tests/fullstack/base.py0000664000567000056710000000607513044372760022504 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_db.sqlalchemy import test_base from neutron.db.migration import cli as migration from neutron.tests.common import base from neutron.tests.fullstack.resources import client as client_resource from neutron.tests import tools class BaseFullStackTestCase(base.MySQLTestCase): """Base test class for full-stack tests.""" def setUp(self, environment): super(BaseFullStackTestCase, self).setUp() # NOTE(ihrachys): seed should be reset before environment fixture below # since the latter starts services that may rely on generated port # numbers tools.reset_random_seed() self.create_db_tables() self.environment = environment self.environment.test_name = self.get_name() self.useFixture(self.environment) self.client = self.environment.neutron_server.client self.safe_client = self.useFixture( client_resource.ClientFixture(self.client)) def get_name(self): class_name, test_name = self.id().split(".")[-2:] return "%s.%s" % (class_name, test_name) def create_db_tables(self): """Populate the new database. MySQLTestCase creates a new database for each test, but these need to be populated with the appropriate tables. Before we can do that, we must change the 'connection' option which the Neutron code knows to look at. Currently, the username and password options are hard-coded by oslo.db and neutron/tests/functional/contrib/gate_hook.sh. Also, we only support MySQL for now, but the groundwork for adding Postgres is already laid. """ conn = ("mysql+pymysql://%(username)s:%(password)s" "@127.0.0.1/%(db_name)s" % { 'username': test_base.DbFixture.USERNAME, 'password': test_base.DbFixture.PASSWORD, 'db_name': self.engine.url.database}) alembic_config = migration.get_neutron_config() alembic_config.neutron_config = cfg.CONF self.original_conn = cfg.CONF.database.connection self.addCleanup(self._revert_connection_address) cfg.CONF.set_override('connection', conn, group='database') migration.do_alembic_command(alembic_config, 'upgrade', 'heads') def _revert_connection_address(self): cfg.CONF.set_override('connection', self.original_conn, group='database') neutron-8.4.0/neutron/tests/contrib/0000775000567000056710000000000013044373210020647 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/contrib/functional-testing.filters0000664000567000056710000000346613044372760026100 0ustar jenkinsjenkins00000000000000# neutron-rootwrap command filters to support functional testing. It # is NOT intended to be used outside of a test environment. # # This file should be owned by (and only-writeable by) the root user [Filters] # enable ping from namespace ping_filter: CommandFilter, ping, root ping6_filter: CommandFilter, ping6, root ping_kill: KillFilter, root, ping, -2 # enable curl from namespace curl_filter: RegExpFilter, /usr/bin/curl, root, curl, --max-time, \d+, -D-, http://[0-9a-z:./-]+ nc_filter: CommandFilter, nc, root # netcat has different binaries depending on linux distribution nc_kill: KillFilter, root, nc, -9 ncbsd_kill: KillFilter, root, nc.openbsd, -9 ncat_kill: KillFilter, root, ncat, -9 ss_filter: CommandFilter, ss, root # enable neutron-linuxbridge-cleanup from namespace lb_cleanup_filter: RegExpFilter, neutron-linuxbridge-cleanup, root, neutron-linuxbridge-cleanup, --config-file, .* # enable dhclient from namespace dhclient_filter: CommandFilter, dhclient, root dhclient_kill: KillFilter, root, dhclient, -9 # Actually, dhclient is used for test dhcp-agent and runs # in dhcp-agent namespace. If in that namespace resolv.conf file not exist # dhclient will override system /etc/resolv.conf # Filters below are limit functions mkdir, rm and touch # only to create and delete file resolv.conf in the that namespace mkdir_filter: RegExpFilter, /bin/mkdir, root, mkdir, -p, /etc/netns/qdhcp-[0-9a-z./-]+ rm_filter: RegExpFilter, /bin/rm, root, rm, -r, /etc/netns/qdhcp-[0-9a-z./-]+ touch_filter: RegExpFilter, /bin/touch, root, touch, /etc/netns/qdhcp-[0-9a-z./-]+/resolv.conf touch_filter2: RegExpFilter, /usr/bin/touch, root, touch, /etc/netns/qdhcp-[0-9a-z./-]+/resolv.conf # needed for TestGetRootHelperChildPid bash_filter: RegExpFilter, /bin/bash, root, bash, -c, \(sleep 100\) sleep_kill: KillFilter, root, sleep, -9 neutron-8.4.0/neutron/tests/contrib/README0000664000567000056710000000020413044372736021537 0ustar jenkinsjenkins00000000000000The files in this directory are intended for use by the Neutron infra jobs that run the various functional test suites in the gate. neutron-8.4.0/neutron/tests/contrib/gate_hook.sh0000664000567000056710000000434413044372760023161 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash set -ex VENV=${1:-"dsvm-functional"} GATE_DEST=$BASE/new DEVSTACK_PATH=$GATE_DEST/devstack if [ "$VENV" == "dsvm-functional" ] || [ "$VENV" == "dsvm-fullstack" ] then # The following need to be set before sourcing # configure_for_func_testing. GATE_STACK_USER=stack NEUTRON_PATH=$GATE_DEST/neutron PROJECT_NAME=neutron IS_GATE=True source $DEVSTACK_PATH/functions source $NEUTRON_PATH/devstack/lib/ovs source $NEUTRON_PATH/tools/configure_for_func_testing.sh configure_host_for_func_testing if [[ "$VENV" =~ "dsvm-functional" ]]; then # The OVS_BRANCH variable is used by git checkout. In the case below # we use a commit on branch-2.5 that fixes compilation with the # latest ubuntu trusty kernel. OVS_BRANCH="a35342879f1a7d8b1503d4945bd0791c58f5fc87" for package in openvswitch openvswitch-switch openvswitch-common; do if is_package_installed $package; then uninstall_package $package fi done compile_ovs True /usr /var start_new_ovs fi # Make the workspace owned by the stack user sudo chown -R $STACK_USER:$STACK_USER $BASE elif [ "$VENV" == "api" -o "$VENV" == "api-pecan" -o "$VENV" == "full-pecan" ] then if [ "$VENV" == "api-pecan" -o "$VENV" == "full-pecan" ] then cat >> $DEVSTACK_PATH/local.conf < ./testrepository.subunit $SCRIPTS_DIR/subunit2html ./testrepository.subunit testr_results.html gzip -9 ./testrepository.subunit gzip -9 ./testr_results.html sudo mv ./*.gz /opt/stack/logs/ fi if [[ "$venv" == dsvm-functional* ]] || [[ "$venv" == dsvm-fullstack* ]] then generate_test_logs $log_dir fi } if [ "$venv" == "api-pecan" ]; then # api-pecan is the same as the regular api job venv='api' fi if [[ "$venv" == dsvm-functional* ]] || [[ "$venv" == dsvm-fullstack* ]] then owner=stack sudo_env= log_dir="/tmp/${venv}-logs" elif [ "$venv" == "api" ] then owner=tempest # Configure the api tests to use the tempest.conf set by devstack. sudo_env="TEMPEST_CONFIG_DIR=$TEMPEST_DIR/etc" fi # Set owner permissions according to job's requirements. cd $NEUTRON_DIR sudo chown -R $owner:stack $NEUTRON_DIR # Run tests echo "Running neutron $venv test suite" set +e sudo -H -u $owner $sudo_env tox -e $venv testr_exit_code=$? set -e # Collect and parse results generate_testr_results exit $testr_exit_code neutron-8.4.0/neutron/tests/__init__.py0000664000567000056710000000115313044372760021331 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import eventlet_utils eventlet_utils.monkey_patch() neutron-8.4.0/neutron/tests/var/0000775000567000056710000000000013044373210017777 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/var/certandkey.pem0000664000567000056710000001175513044372736022660 0ustar jenkinsjenkins00000000000000-----BEGIN CERTIFICATE----- MIIFLjCCAxYCAQEwDQYJKoZIhvcNAQEFBQAwYTELMAkGA1UEBhMCQVUxEzARBgNV BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwHhcNMTIwMjA5MTcxMDUzWhcN MjIwMjA2MTcxMDUzWjBZMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0 ZTESMBAGA1UEChMJT3BlbnN0YWNrMQ8wDQYDVQQLEwZHbGFuY2UxEDAOBgNVBAMT BzAuMC4wLjAwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXpUkQN6pu avo+gz3o1K4krVdPl1m7NjNJDyD/+ZH0EGNcEN7iag1qPE7JsjqGPNZsQK1dMoXb Sz+OSi9qvNeJnBcfwUx5qTAtwyAb9AxGkwuMafIU+lWbsclo+dPGsja01ywbXTCZ bF32iqnpOMYhfxWUdoQYiBkhxxhW9eMPKLS/KkP8/bx+Vaa2XJiAebqkd9nrksAA BeGc9mlafYBEmiChPdJEPw+1ePA4QVq9aPepDsqAKtGN8JLpmoC3BdxQQTbbwL3Q 8fTXK4tCNUaVk4AbDy/McFq6y0ocQoBPJjihOY35mWG/OLtcI99yPOpWGnps/5aG /64DDJ2D67Fnaj6gKHV+6TXFO8KZxlnxtgtiZDJBZkneTBt9ArSOv+l6NBsumRz0 iEJ4o4H1S2TSMnprAvX7WnGtc6Xi9gXahYcDHEelwwYzqAiTBv6hxSp4MZ2dNXa+ KzOitC7ZbV2qsg0au0wjfE/oSQ3NvsvUr8nOmfutJTvHRAwbC1v4G/tuAsO7O0w2 0u2B3u+pG06m5+rnEqp+rB9hmukRYTfgEFRRsVIvpFl/cwvPXKRcX03UIMx+lLr9 Ft+ep7YooBhY3wY2kwCxD4lRYNmbwsCIVywZt40f/4ad98TkufR9NhsfycxGeqbr mTMFlZ8TTlmP82iohekKCOvoyEuTIWL2+wIDAQABMA0GCSqGSIb3DQEBBQUAA4IC AQBMUBgV0R+Qltf4Du7u/8IFmGAoKR/mktB7R1gRRAqsvecUt7kIwBexGdavGg1y 0pU0+lgUZjJ20N1SlPD8gkNHfXE1fL6fmMjWz4dtYJjzRVhpufHPeBW4tl8DgHPN rBGAYQ+drDSXaEjiPQifuzKx8WS+DGA3ki4co5mPjVnVH1xvLIdFsk89z3b3YD1k yCJ/a9K36x6Z/c67JK7s6MWtrdRF9+MVnRKJ2PK4xznd1kBz16V+RA466wBDdARY vFbtkafbEqOb96QTonIZB7+fAldKDPZYnwPqasreLmaGOaM8sxtlPYAJ5bjDONbc AaXG8BMRQyO4FyH237otDKlxPyHOFV66BaffF5S8OlwIMiZoIvq+IcTZOdtDUSW2 KHNLfe5QEDZdKjWCBrfqAfvNuG13m03WqfmcMHl3o/KiPJlx8l9Z4QEzZ9xcyQGL cncgeHM9wJtzi2cD/rTDNFsx/gxvoyutRmno7I3NRbKmpsXF4StZioU3USRspB07 hYXOVnG3pS+PjVby7ThT3gvFHSocguOsxClx1epdUJAmJUbmM7NmOp5WVBVtMtC2 Su4NG/xJciXitKzw+btb7C7RjO6OEqv/1X/oBDzKBWQAwxUC+lqmnM7W6oqWJFEM YfTLnrjs7Hj6ThMGcEnfvc46dWK3dz0RjsQzUxugPuEkLA== -----END CERTIFICATE----- -----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEA16VJEDeqbmr6PoM96NSuJK1XT5dZuzYzSQ8g//mR9BBjXBDe 4moNajxOybI6hjzWbECtXTKF20s/jkovarzXiZwXH8FMeakwLcMgG/QMRpMLjGny FPpVm7HJaPnTxrI2tNcsG10wmWxd9oqp6TjGIX8VlHaEGIgZIccYVvXjDyi0vypD /P28flWmtlyYgHm6pHfZ65LAAAXhnPZpWn2ARJogoT3SRD8PtXjwOEFavWj3qQ7K gCrRjfCS6ZqAtwXcUEE228C90PH01yuLQjVGlZOAGw8vzHBaustKHEKATyY4oTmN +Zlhvzi7XCPfcjzqVhp6bP+Whv+uAwydg+uxZ2o+oCh1fuk1xTvCmcZZ8bYLYmQy QWZJ3kwbfQK0jr/pejQbLpkc9IhCeKOB9Utk0jJ6awL1+1pxrXOl4vYF2oWHAxxH pcMGM6gIkwb+ocUqeDGdnTV2viszorQu2W1dqrINGrtMI3xP6EkNzb7L1K/Jzpn7 rSU7x0QMGwtb+Bv7bgLDuztMNtLtgd7vqRtOpufq5xKqfqwfYZrpEWE34BBUUbFS L6RZf3MLz1ykXF9N1CDMfpS6/Rbfnqe2KKAYWN8GNpMAsQ+JUWDZm8LAiFcsGbeN H/+GnffE5Ln0fTYbH8nMRnqm65kzBZWfE05Zj/NoqIXpCgjr6MhLkyFi9vsCAwEA AQKCAgAA96baQcWr9SLmQOR4NOwLEhQAMWefpWCZhU3amB4FgEVR1mmJjnw868RW t0v36jH0Dl44us9K6o2Ab+jCi9JTtbWM2Osk6JNkwSlVtsSPVH2KxbbmTTExH50N sYE3tPj12rlB7isXpRrOzlRwzWZmJBHOtrFlAsdKFYCQc03vdXlKGkBv1BuSXYP/ 8W5ltSYXMspxehkOZvhaIejbFREMPbzDvGlDER1a7Q320qQ7kUr7ISvbY1XJUzj1 f1HwgEA6w/AhED5Jv6wfgvx+8Yo9hYnflTPbsO1XRS4x7kJxGHTMlFuEsSF1ICYH Bcos0wUiGcBO2N6uAFuhe98BBn+nOwAPZYWwGkmVuK2psm2mXAHx94GT/XqgK/1r VWGSoOV7Fhjauc2Nv8/vJU18DXT3OY5hc4iXVeEBkuZwRb/NVUtnFoHxVO/Mp5Fh /W5KZaLWVrLghzvSQ/KUIM0k4lfKDZpY9ZpOdNgWDyZY8tNrXumUZZimzWdXZ9vR dBssmd8qEKs1AHGFnMDt56IjLGou6j0qnWsLdR1e/WEFsYzGXLVHCv6vXRNkbjqh WFw5nA+2Dw1YAsy+YkTfgx2pOe+exM/wxsVPa7tG9oZ374dywUi1k6VoHw5dkmJw 1hbXqSLZtx2N51G+SpGmNAV4vLUF0y3dy2wnrzFkFT4uxh1w8QKCAQEA+h6LwHTK hgcJx6CQQ6zYRqXo4wdvMooY1FcqJOq7LvJUA2CX5OOLs8qN1TyFrOCuAUTurOrM ABlQ0FpsIaP8TOGz72dHe2eLB+dD6Bqjn10sEFMn54zWd/w9ympQrO9jb5X3ViTh sCcdYyXVS9Hz8nzbbIF+DaKlxF2Hh71uRDxXpMPxRcGbOIuKZXUj6RkTIulzqT6o uawlegWxch05QSgzq/1ASxtjTzo4iuDCAii3N45xqxnB+fV9NXEt4R2oOGquBRPJ LxKcOnaQKBD0YNX4muTq+zPlv/kOb8/ys2WGWDUrNkpyJXqhTve4KONjqM7+iL/U 4WdJuiCjonzk/QKCAQEA3Lc+kNq35FNLxMcnCVcUgkmiCWZ4dyGZZPdqjOPww1+n bbudGPzY1nxOvE60dZM4or/tm6qlXYfb2UU3+OOJrK9s297EQybZ8DTZu2GHyitc NSFV3Gl4cgvKdbieGKkk9X2dV9xSNesNvX9lJEnQxuwHDTeo8ubLHtV88Ml1xokn 7W+IFiyEuUIL4e5/fadbrI3EwMrbCF4+9VcfABx4PTNMzdc8LsncCMXE+jFX8AWp TsT2JezTe5o2WpvBoKMAYhJQNQiaWATn00pDVY/70H1vK3ljomAa1IUdOr/AhAF7 3jL0MYMgXSHzXZOKAtc7yf+QfFWF1Ls8+sen1clJVwKCAQEAp59rB0r+Iz56RmgL 5t7ifs5XujbURemY5E2aN+18DuVmenD0uvfoO1DnJt4NtCNLWhxpXEdq+jH9H/VJ fG4a+ydT4IC1vjVRTrWlo9qeh4H4suQX3S1c2kKY4pvHf25blH/Lp9bFzbkZD8Ze IRcOxxb4MsrBwL+dGnGYD9dbG63ZCtoqSxaKQSX7VS1hKKmeUopj8ivFBdIht5oz JogBQ/J+Vqg9u1gagRFCrYgdXTcOOtRix0lW336vL+6u0ax/fXe5MjvlW3+8Zc3p pIBgVrlvh9ccx8crFTIDg9m4DJRgqaLQV+0ifI2np3WK3RQvSQWYPetZ7sm69ltD bvUGvQKCAQAz5CEhjUqOs8asjOXwnDiGKSmfbCgGWi/mPQUf+rcwN9z1P5a/uTKB utgIDbj/q401Nkp2vrgCNV7KxitSqKxFnTjKuKUL5KZ4gvRtyZBTR751/1BgcauP pJYE91K0GZBG5zGG5pWtd4XTd5Af5/rdycAeq2ddNEWtCiRFuBeohbaNbBtimzTZ GV4R0DDJKf+zoeEQMqEsZnwG0mTHceoS+WylOGU92teQeG7HI7K5C5uymTwFzpgq ByegRd5QFgKRDB0vWsZuyzh1xI/wHdnmOpdYcUGre0zTijhFB7ALWQ32P6SJv3ps av78kSNxZ4j3BM7DbJf6W8sKasZazOghAoIBAHekpBcLq9gRv2+NfLYxWN2sTZVB 1ldwioG7rWvk5YQR2akukecI3NRjtC5gG2vverawG852Y4+oLfgRMHxgp0qNStwX juTykzPkCwZn8AyR+avC3mkrtJyM3IigcYOu4/UoaRDFa0xvCC1EfumpnKXIpHag miSQZf2sVbgqb3/LWvHIg/ceOP9oGJve87/HVfQtBoLaIe5RXCWkqB7mcI/exvTS 8ShaW6v2Fe5Bzdvawj7sbsVYRWe93Aq2tmIgSX320D2RVepb6mjD4nr0IUaM3Yed TFT7e2ikWXyDLLgVkDTU4Qe8fr3ZKGfanCIDzvgNw6H1gRi+2WQgOmjilMQ= -----END RSA PRIVATE KEY----- neutron-8.4.0/neutron/tests/var/certificate.crt0000664000567000056710000000350213044372736023007 0ustar jenkinsjenkins00000000000000-----BEGIN CERTIFICATE----- MIIFLjCCAxYCAQEwDQYJKoZIhvcNAQEFBQAwYTELMAkGA1UEBhMCQVUxEzARBgNV BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwHhcNMTIwMjA5MTcxMDUzWhcN MjIwMjA2MTcxMDUzWjBZMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0 ZTESMBAGA1UEChMJT3BlbnN0YWNrMQ8wDQYDVQQLEwZHbGFuY2UxEDAOBgNVBAMT BzAuMC4wLjAwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXpUkQN6pu avo+gz3o1K4krVdPl1m7NjNJDyD/+ZH0EGNcEN7iag1qPE7JsjqGPNZsQK1dMoXb Sz+OSi9qvNeJnBcfwUx5qTAtwyAb9AxGkwuMafIU+lWbsclo+dPGsja01ywbXTCZ bF32iqnpOMYhfxWUdoQYiBkhxxhW9eMPKLS/KkP8/bx+Vaa2XJiAebqkd9nrksAA BeGc9mlafYBEmiChPdJEPw+1ePA4QVq9aPepDsqAKtGN8JLpmoC3BdxQQTbbwL3Q 8fTXK4tCNUaVk4AbDy/McFq6y0ocQoBPJjihOY35mWG/OLtcI99yPOpWGnps/5aG /64DDJ2D67Fnaj6gKHV+6TXFO8KZxlnxtgtiZDJBZkneTBt9ArSOv+l6NBsumRz0 iEJ4o4H1S2TSMnprAvX7WnGtc6Xi9gXahYcDHEelwwYzqAiTBv6hxSp4MZ2dNXa+ KzOitC7ZbV2qsg0au0wjfE/oSQ3NvsvUr8nOmfutJTvHRAwbC1v4G/tuAsO7O0w2 0u2B3u+pG06m5+rnEqp+rB9hmukRYTfgEFRRsVIvpFl/cwvPXKRcX03UIMx+lLr9 Ft+ep7YooBhY3wY2kwCxD4lRYNmbwsCIVywZt40f/4ad98TkufR9NhsfycxGeqbr mTMFlZ8TTlmP82iohekKCOvoyEuTIWL2+wIDAQABMA0GCSqGSIb3DQEBBQUAA4IC AQBMUBgV0R+Qltf4Du7u/8IFmGAoKR/mktB7R1gRRAqsvecUt7kIwBexGdavGg1y 0pU0+lgUZjJ20N1SlPD8gkNHfXE1fL6fmMjWz4dtYJjzRVhpufHPeBW4tl8DgHPN rBGAYQ+drDSXaEjiPQifuzKx8WS+DGA3ki4co5mPjVnVH1xvLIdFsk89z3b3YD1k yCJ/a9K36x6Z/c67JK7s6MWtrdRF9+MVnRKJ2PK4xznd1kBz16V+RA466wBDdARY vFbtkafbEqOb96QTonIZB7+fAldKDPZYnwPqasreLmaGOaM8sxtlPYAJ5bjDONbc AaXG8BMRQyO4FyH237otDKlxPyHOFV66BaffF5S8OlwIMiZoIvq+IcTZOdtDUSW2 KHNLfe5QEDZdKjWCBrfqAfvNuG13m03WqfmcMHl3o/KiPJlx8l9Z4QEzZ9xcyQGL cncgeHM9wJtzi2cD/rTDNFsx/gxvoyutRmno7I3NRbKmpsXF4StZioU3USRspB07 hYXOVnG3pS+PjVby7ThT3gvFHSocguOsxClx1epdUJAmJUbmM7NmOp5WVBVtMtC2 Su4NG/xJciXitKzw+btb7C7RjO6OEqv/1X/oBDzKBWQAwxUC+lqmnM7W6oqWJFEM YfTLnrjs7Hj6ThMGcEnfvc46dWK3dz0RjsQzUxugPuEkLA== -----END CERTIFICATE----- neutron-8.4.0/neutron/tests/var/ca.crt0000664000567000056710000000415713044372736021117 0ustar jenkinsjenkins00000000000000-----BEGIN CERTIFICATE----- MIIGDDCCA/SgAwIBAgIJAPSvwQYk4qI4MA0GCSqGSIb3DQEBBQUAMGExCzAJBgNV BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMRUwEwYDVQQKEwxPcGVuc3RhY2sg Q0ExEjAQBgNVBAsTCUdsYW5jZSBDQTESMBAGA1UEAxMJR2xhbmNlIENBMB4XDTEy MDIwOTE3MTAwMloXDTIyMDIwNjE3MTAwMlowYTELMAkGA1UEBhMCQVUxEzARBgNV BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwggIiMA0GCSqGSIb3DQEBAQUA A4ICDwAwggIKAoICAQDmf+fapWfzy1Uylus0KGalw4X/5xZ+ltPVOr+IdCPbstvi RTC5g+O+TvXeOP32V/cnSY4ho/+f2q730za+ZA/cgWO252rcm3Q7KTJn3PoqzJvX /l3EXe3/TCrbzgZ7lW3QLTCTEE2eEzwYG3wfDTOyoBq+F6ct6ADh+86gmpbIRfYI N+ixB0hVyz9427PTof97fL7qxxkjAayB28OfwHrkEBl7iblNhUC0RoH+/H9r5GEl GnWiebxfNrONEHug6PHgiaGq7/Dj+u9bwr7J3/NoS84I08ajMnhlPZxZ8bS/O8If ceWGZv7clPozyhABT/otDfgVcNH1UdZ4zLlQwc1MuPYN7CwxrElxc8Quf94ttGjb tfGTl4RTXkDofYdG1qBWW962PsGl2tWmbYDXV0q5JhV/IwbrE1X9f+OksJQne1/+ dZDxMhdf2Q1V0P9hZZICu4+YhmTMs5Mc9myKVnzp4NYdX5fXoB/uNYph+G7xG5IK WLSODKhr1wFGTTcuaa8LhOH5UREVenGDJuc6DdgX9a9PzyJGIi2ngQ03TJIkCiU/ 4J/r/vsm81ezDiYZSp2j5JbME+ixW0GBLTUWpOIxUSHgUFwH5f7lQwbXWBOgwXQk BwpZTmdQx09MfalhBtWeu4/6BnOCOj7e/4+4J0eVxXST0AmVyv8YjJ2nz1F9oQID AQABo4HGMIHDMB0GA1UdDgQWBBTk7Krj4bEsTjHXaWEtI2GZ5ACQyTCBkwYDVR0j BIGLMIGIgBTk7Krj4bEsTjHXaWEtI2GZ5ACQyaFlpGMwYTELMAkGA1UEBhMCQVUx EzARBgNVBAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAG A1UECxMJR2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0GCCQD0r8EGJOKiODAM BgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4ICAQA8Zrss/MiwFHGmDlercE0h UvzA54n/EvKP9nP3jHM2qW/VPfKdnFw99nEPFLhb+lN553vdjOpCYFm+sW0Z5Mi4 qsFkk4AmXIIEFOPt6zKxMioLYDQ9Sw/BUv6EZGeANWr/bhmaE+dMcKJt5le/0jJm 2ahsVB9fbFu9jBFeYb7Ba/x2aLkEGMxaDLla+6EQhj148fTnS1wjmX9G2cNzJvj/ +C2EfKJIuDJDqw2oS2FGVpP37FA2Bz2vga0QatNneLkGKCFI3ZTenBznoN+fmurX TL3eJE4IFNrANCcdfMpdyLAtXz4KpjcehqpZMu70er3d30zbi1l0Ajz4dU+WKz/a NQES+vMkT2wqjXHVTjrNwodxw3oLK/EuTgwoxIHJuplx5E5Wrdx9g7Gl1PBIJL8V xiOYS5N7CakyALvdhP7cPubA2+TPAjNInxiAcmhdASS/Vrmpvrkat6XhGn8h9liv ysDOpMQmYQkmgZBpW8yBKK7JABGGsJADJ3E6J5MMWBX2RR4kFoqVGAzdOU3oyaTy I0kz5sfuahaWpdYJVlkO+esc0CRXw8fLDYivabK2tOgUEWeZsZGZ9uK6aV1VxTAY 9Guu3BJ4Rv/KP/hk7mP8rIeCwotV66/2H8nq72ImQhzSVyWcxbFf2rJiFQJ3BFwA WoRMgEwjGJWqzhJZUYpUAQ== -----END CERTIFICATE----- neutron-8.4.0/neutron/tests/var/privatekey.key0000664000567000056710000000625313044372736022716 0ustar jenkinsjenkins00000000000000-----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEA16VJEDeqbmr6PoM96NSuJK1XT5dZuzYzSQ8g//mR9BBjXBDe 4moNajxOybI6hjzWbECtXTKF20s/jkovarzXiZwXH8FMeakwLcMgG/QMRpMLjGny FPpVm7HJaPnTxrI2tNcsG10wmWxd9oqp6TjGIX8VlHaEGIgZIccYVvXjDyi0vypD /P28flWmtlyYgHm6pHfZ65LAAAXhnPZpWn2ARJogoT3SRD8PtXjwOEFavWj3qQ7K gCrRjfCS6ZqAtwXcUEE228C90PH01yuLQjVGlZOAGw8vzHBaustKHEKATyY4oTmN +Zlhvzi7XCPfcjzqVhp6bP+Whv+uAwydg+uxZ2o+oCh1fuk1xTvCmcZZ8bYLYmQy QWZJ3kwbfQK0jr/pejQbLpkc9IhCeKOB9Utk0jJ6awL1+1pxrXOl4vYF2oWHAxxH pcMGM6gIkwb+ocUqeDGdnTV2viszorQu2W1dqrINGrtMI3xP6EkNzb7L1K/Jzpn7 rSU7x0QMGwtb+Bv7bgLDuztMNtLtgd7vqRtOpufq5xKqfqwfYZrpEWE34BBUUbFS L6RZf3MLz1ykXF9N1CDMfpS6/Rbfnqe2KKAYWN8GNpMAsQ+JUWDZm8LAiFcsGbeN H/+GnffE5Ln0fTYbH8nMRnqm65kzBZWfE05Zj/NoqIXpCgjr6MhLkyFi9vsCAwEA AQKCAgAA96baQcWr9SLmQOR4NOwLEhQAMWefpWCZhU3amB4FgEVR1mmJjnw868RW t0v36jH0Dl44us9K6o2Ab+jCi9JTtbWM2Osk6JNkwSlVtsSPVH2KxbbmTTExH50N sYE3tPj12rlB7isXpRrOzlRwzWZmJBHOtrFlAsdKFYCQc03vdXlKGkBv1BuSXYP/ 8W5ltSYXMspxehkOZvhaIejbFREMPbzDvGlDER1a7Q320qQ7kUr7ISvbY1XJUzj1 f1HwgEA6w/AhED5Jv6wfgvx+8Yo9hYnflTPbsO1XRS4x7kJxGHTMlFuEsSF1ICYH Bcos0wUiGcBO2N6uAFuhe98BBn+nOwAPZYWwGkmVuK2psm2mXAHx94GT/XqgK/1r VWGSoOV7Fhjauc2Nv8/vJU18DXT3OY5hc4iXVeEBkuZwRb/NVUtnFoHxVO/Mp5Fh /W5KZaLWVrLghzvSQ/KUIM0k4lfKDZpY9ZpOdNgWDyZY8tNrXumUZZimzWdXZ9vR dBssmd8qEKs1AHGFnMDt56IjLGou6j0qnWsLdR1e/WEFsYzGXLVHCv6vXRNkbjqh WFw5nA+2Dw1YAsy+YkTfgx2pOe+exM/wxsVPa7tG9oZ374dywUi1k6VoHw5dkmJw 1hbXqSLZtx2N51G+SpGmNAV4vLUF0y3dy2wnrzFkFT4uxh1w8QKCAQEA+h6LwHTK hgcJx6CQQ6zYRqXo4wdvMooY1FcqJOq7LvJUA2CX5OOLs8qN1TyFrOCuAUTurOrM ABlQ0FpsIaP8TOGz72dHe2eLB+dD6Bqjn10sEFMn54zWd/w9ympQrO9jb5X3ViTh sCcdYyXVS9Hz8nzbbIF+DaKlxF2Hh71uRDxXpMPxRcGbOIuKZXUj6RkTIulzqT6o uawlegWxch05QSgzq/1ASxtjTzo4iuDCAii3N45xqxnB+fV9NXEt4R2oOGquBRPJ LxKcOnaQKBD0YNX4muTq+zPlv/kOb8/ys2WGWDUrNkpyJXqhTve4KONjqM7+iL/U 4WdJuiCjonzk/QKCAQEA3Lc+kNq35FNLxMcnCVcUgkmiCWZ4dyGZZPdqjOPww1+n bbudGPzY1nxOvE60dZM4or/tm6qlXYfb2UU3+OOJrK9s297EQybZ8DTZu2GHyitc NSFV3Gl4cgvKdbieGKkk9X2dV9xSNesNvX9lJEnQxuwHDTeo8ubLHtV88Ml1xokn 7W+IFiyEuUIL4e5/fadbrI3EwMrbCF4+9VcfABx4PTNMzdc8LsncCMXE+jFX8AWp TsT2JezTe5o2WpvBoKMAYhJQNQiaWATn00pDVY/70H1vK3ljomAa1IUdOr/AhAF7 3jL0MYMgXSHzXZOKAtc7yf+QfFWF1Ls8+sen1clJVwKCAQEAp59rB0r+Iz56RmgL 5t7ifs5XujbURemY5E2aN+18DuVmenD0uvfoO1DnJt4NtCNLWhxpXEdq+jH9H/VJ fG4a+ydT4IC1vjVRTrWlo9qeh4H4suQX3S1c2kKY4pvHf25blH/Lp9bFzbkZD8Ze IRcOxxb4MsrBwL+dGnGYD9dbG63ZCtoqSxaKQSX7VS1hKKmeUopj8ivFBdIht5oz JogBQ/J+Vqg9u1gagRFCrYgdXTcOOtRix0lW336vL+6u0ax/fXe5MjvlW3+8Zc3p pIBgVrlvh9ccx8crFTIDg9m4DJRgqaLQV+0ifI2np3WK3RQvSQWYPetZ7sm69ltD bvUGvQKCAQAz5CEhjUqOs8asjOXwnDiGKSmfbCgGWi/mPQUf+rcwN9z1P5a/uTKB utgIDbj/q401Nkp2vrgCNV7KxitSqKxFnTjKuKUL5KZ4gvRtyZBTR751/1BgcauP pJYE91K0GZBG5zGG5pWtd4XTd5Af5/rdycAeq2ddNEWtCiRFuBeohbaNbBtimzTZ GV4R0DDJKf+zoeEQMqEsZnwG0mTHceoS+WylOGU92teQeG7HI7K5C5uymTwFzpgq ByegRd5QFgKRDB0vWsZuyzh1xI/wHdnmOpdYcUGre0zTijhFB7ALWQ32P6SJv3ps av78kSNxZ4j3BM7DbJf6W8sKasZazOghAoIBAHekpBcLq9gRv2+NfLYxWN2sTZVB 1ldwioG7rWvk5YQR2akukecI3NRjtC5gG2vverawG852Y4+oLfgRMHxgp0qNStwX juTykzPkCwZn8AyR+avC3mkrtJyM3IigcYOu4/UoaRDFa0xvCC1EfumpnKXIpHag miSQZf2sVbgqb3/LWvHIg/ceOP9oGJve87/HVfQtBoLaIe5RXCWkqB7mcI/exvTS 8ShaW6v2Fe5Bzdvawj7sbsVYRWe93Aq2tmIgSX320D2RVepb6mjD4nr0IUaM3Yed TFT7e2ikWXyDLLgVkDTU4Qe8fr3ZKGfanCIDzvgNw6H1gRi+2WQgOmjilMQ= -----END RSA PRIVATE KEY----- neutron-8.4.0/neutron/tests/common/0000775000567000056710000000000013044373210020477 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/common/helpers.py0000664000567000056710000001456513044372760022537 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import os from oslo_utils import timeutils import six import testtools import neutron from neutron.common import constants from neutron.common import topics from neutron import context from neutron.db import agents_db from neutron.db import common_db_mixin from neutron.services.bgp.common import constants as bgp_const HOST = 'localhost' DEFAULT_AZ = 'nova' def find_file(filename, path): """Find a file with name 'filename' located in 'path'.""" for root, _, files in os.walk(path): if filename in files: return os.path.abspath(os.path.join(root, filename)) def find_sample_file(filename): """Find a file with name 'filename' located in the sample directory.""" return find_file( filename, path=os.path.join(neutron.__path__[0], '..', 'etc')) class FakePlugin(common_db_mixin.CommonDbMixin, agents_db.AgentDbMixin): pass def _get_l3_agent_dict(host, agent_mode, internal_only=True, ext_net_id='', ext_bridge='', router_id=None, az=DEFAULT_AZ): return { 'agent_type': constants.AGENT_TYPE_L3, 'binary': 'neutron-l3-agent', 'host': host, 'topic': topics.L3_AGENT, 'availability_zone': az, 'configurations': {'agent_mode': agent_mode, 'handle_internal_only_routers': internal_only, 'external_network_bridge': ext_bridge, 'gateway_external_network_id': ext_net_id, 'router_id': router_id}} def _register_agent(agent): plugin = FakePlugin() admin_context = context.get_admin_context() plugin.create_or_update_agent(admin_context, agent) return plugin._get_agent_by_type_and_host( admin_context, agent['agent_type'], agent['host']) def register_l3_agent(host=HOST, agent_mode=constants.L3_AGENT_MODE_LEGACY, internal_only=True, ext_net_id='', ext_bridge='', router_id=None, az=DEFAULT_AZ): agent = _get_l3_agent_dict(host, agent_mode, internal_only, ext_net_id, ext_bridge, router_id, az) return _register_agent(agent) def _get_dhcp_agent_dict(host, networks=0, az=DEFAULT_AZ): agent = { 'binary': 'neutron-dhcp-agent', 'host': host, 'topic': topics.DHCP_AGENT, 'agent_type': constants.AGENT_TYPE_DHCP, 'availability_zone': az, 'configurations': {'dhcp_driver': 'dhcp_driver', 'networks': networks}} return agent def register_dhcp_agent(host=HOST, networks=0, admin_state_up=True, alive=True, az=DEFAULT_AZ): agent = _register_agent( _get_dhcp_agent_dict(host, networks, az=az)) if not admin_state_up: set_agent_admin_state(agent['id']) if not alive: kill_agent(agent['id']) return FakePlugin()._get_agent_by_type_and_host( context.get_admin_context(), agent['agent_type'], agent['host']) def _get_bgp_dragent_dict(host): agent = { 'binary': 'neutron-bgp-dragent', 'host': host, 'topic': 'q-bgp_dragent', 'agent_type': bgp_const.AGENT_TYPE_BGP_ROUTING, 'configurations': {'bgp_speakers': 1}} return agent def register_bgp_dragent(host=HOST, admin_state_up=True, alive=True): agent = _register_agent( _get_bgp_dragent_dict(host)) if not admin_state_up: set_agent_admin_state(agent['id']) if not alive: kill_agent(agent['id']) return FakePlugin()._get_agent_by_type_and_host( context.get_admin_context(), agent['agent_type'], agent['host']) def kill_agent(agent_id): hour_ago = timeutils.utcnow() - datetime.timedelta(hours=1) FakePlugin().update_agent( context.get_admin_context(), agent_id, {'agent': { 'started_at': hour_ago, 'heartbeat_timestamp': hour_ago}}) def revive_agent(agent_id): now = timeutils.utcnow() FakePlugin().update_agent( context.get_admin_context(), agent_id, {'agent': {'started_at': now, 'heartbeat_timestamp': now}}) def set_agent_admin_state(agent_id, admin_state_up=False): FakePlugin().update_agent( context.get_admin_context(), agent_id, {'agent': {'admin_state_up': admin_state_up}}) def _get_ovs_agent_dict(host, agent_type, binary, tunnel_types, tunneling_ip='20.0.0.1', interface_mappings=None, l2pop_network_types=None): agent = { 'binary': binary, 'host': host, 'topic': constants.L2_AGENT_TOPIC, 'configurations': {'tunneling_ip': tunneling_ip, 'tunnel_types': tunnel_types}, 'agent_type': agent_type, 'tunnel_type': [], 'start_flag': True} if interface_mappings is not None: agent['configurations']['interface_mappings'] = interface_mappings if l2pop_network_types is not None: agent['configurations']['l2pop_network_types'] = l2pop_network_types return agent def register_ovs_agent(host=HOST, agent_type=constants.AGENT_TYPE_OVS, binary='neutron-openvswitch-agent', tunnel_types=['vxlan'], tunneling_ip='20.0.0.1', interface_mappings=None, l2pop_network_types=None): agent = _get_ovs_agent_dict(host, agent_type, binary, tunnel_types, tunneling_ip, interface_mappings, l2pop_network_types) return _register_agent(agent) def requires_py2(testcase): return testtools.skipUnless(six.PY2, "requires python 2.x")(testcase) def requires_py3(testcase): return testtools.skipUnless(six.PY3, "requires python 3.x")(testcase) neutron-8.4.0/neutron/tests/common/config_fixtures.py0000664000567000056710000000472513044372760024270 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path import fixtures import six from neutron.tests import base class ConfigDict(base.AttributeDict): def update(self, other): self.convert_to_attr_dict(other) super(ConfigDict, self).update(other) def convert_to_attr_dict(self, other): """Convert nested dicts to AttributeDict. :param other: dictionary to be directly modified. """ for key, value in six.iteritems(other): if isinstance(value, dict): if not isinstance(value, base.AttributeDict): other[key] = base.AttributeDict(value) self.convert_to_attr_dict(value) class ConfigFileFixture(fixtures.Fixture): """A fixture that knows how to translate configurations to files. :param base_filename: the filename to use on disk. :param config: a ConfigDict instance. :param temp_dir: an existing temporary directory to use for storage. """ def __init__(self, base_filename, config, temp_dir): super(ConfigFileFixture, self).__init__() self.base_filename = base_filename self.config = config self.temp_dir = temp_dir def _setUp(self): config_parser = self.dict_to_config_parser(self.config) # Need to randomly generate a unique folder to put the file in self.filename = os.path.join(self.temp_dir, self.base_filename) with open(self.filename, 'w') as f: config_parser.write(f) f.flush() def dict_to_config_parser(self, config_dict): config_parser = six.moves.configparser.SafeConfigParser() for section, section_dict in six.iteritems(config_dict): if section != 'DEFAULT': config_parser.add_section(section) for option, value in six.iteritems(section_dict): config_parser.set(section, option, value) return config_parser neutron-8.4.0/neutron/tests/common/__init__.py0000664000567000056710000000000013044372760022607 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/common/l3_test_common.py0000664000567000056710000002652013044372760024014 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import netaddr from oslo_utils import uuidutils from six import moves from neutron.common import constants as l3_constants _uuid = uuidutils.generate_uuid class FakeDev(object): def __init__(self, name): self.name = name def get_ha_interface(ip='169.254.192.1', mac='12:34:56:78:2b:5d'): subnet_id = _uuid() return {'admin_state_up': True, 'device_id': _uuid(), 'device_owner': l3_constants.DEVICE_OWNER_ROUTER_HA_INTF, 'fixed_ips': [{'ip_address': ip, 'prefixlen': 18, 'subnet_id': subnet_id}], 'id': _uuid(), 'mac_address': mac, 'name': u'L3 HA Admin port 0', 'network_id': _uuid(), 'status': u'ACTIVE', 'subnets': [{'cidr': '169.254.192.0/18', 'gateway_ip': '169.254.255.254', 'id': subnet_id}], 'tenant_id': '', 'agent_id': _uuid(), 'agent_host': 'aaa', 'priority': 1} def prepare_router_data(ip_version=4, enable_snat=None, num_internal_ports=1, enable_floating_ip=False, enable_ha=False, extra_routes=False, dual_stack=False, enable_gw=True, v6_ext_gw_with_sub=True, **kwargs): fixed_ips = [] subnets = [] gateway_mac = kwargs.get('gateway_mac', 'ca:fe:de:ad:be:ee') extra_subnets = [] for loop_version in (4, 6): if loop_version == 4 and (ip_version == 4 or dual_stack): ip_address = kwargs.get('ip_address', '19.4.4.4') prefixlen = 24 subnet_cidr = kwargs.get('subnet_cidr', '19.4.4.0/24') gateway_ip = kwargs.get('gateway_ip', '19.4.4.1') _extra_subnet = {'cidr': '9.4.5.0/24'} elif (loop_version == 6 and (ip_version == 6 or dual_stack) and v6_ext_gw_with_sub): ip_address = kwargs.get('ip_address', 'fd00::4') prefixlen = 64 subnet_cidr = kwargs.get('subnet_cidr', 'fd00::/64') gateway_ip = kwargs.get('gateway_ip', 'fd00::1') _extra_subnet = {'cidr': 'fd01::/64'} else: continue subnet_id = _uuid() fixed_ips.append({'ip_address': ip_address, 'subnet_id': subnet_id, 'prefixlen': prefixlen}) subnets.append({'id': subnet_id, 'cidr': subnet_cidr, 'gateway_ip': gateway_ip}) extra_subnets.append(_extra_subnet) if not fixed_ips and v6_ext_gw_with_sub: raise ValueError("Invalid ip_version: %s" % ip_version) router_id = _uuid() ex_gw_port = {} if enable_gw: ex_gw_port = {'id': _uuid(), 'mac_address': gateway_mac, 'network_id': _uuid(), 'fixed_ips': fixed_ips, 'subnets': subnets, 'extra_subnets': extra_subnets} routes = [] if extra_routes: routes = [{'destination': '8.8.8.0/24', 'nexthop': '19.4.4.4'}] router = { 'id': router_id, 'distributed': False, l3_constants.INTERFACE_KEY: [], 'routes': routes, 'gw_port': ex_gw_port} if enable_floating_ip: router[l3_constants.FLOATINGIP_KEY] = [{ 'id': _uuid(), 'port_id': _uuid(), 'status': 'DOWN', 'floating_ip_address': '19.4.4.2', 'fixed_ip_address': '10.0.0.1'}] router_append_interface(router, count=num_internal_ports, ip_version=ip_version, dual_stack=dual_stack) if enable_ha: router['ha'] = True router['ha_vr_id'] = 1 router[l3_constants.HA_INTERFACE_KEY] = (get_ha_interface()) if enable_snat is not None: router['enable_snat'] = enable_snat return router def get_subnet_id(port): return port['fixed_ips'][0]['subnet_id'] def router_append_interface(router, count=1, ip_version=4, ra_mode=None, addr_mode=None, dual_stack=False): interfaces = router[l3_constants.INTERFACE_KEY] current = sum( [netaddr.IPNetwork(subnet['cidr']).version == ip_version for p in interfaces for subnet in p['subnets']]) mac_address = netaddr.EUI('ca:fe:de:ad:be:ef') mac_address.dialect = netaddr.mac_unix for i in range(current, current + count): fixed_ips = [] subnets = [] for loop_version in (4, 6): if loop_version == 4 and (ip_version == 4 or dual_stack): ip_pool = '35.4.%i.4' cidr_pool = '35.4.%i.0/24' prefixlen = 24 gw_pool = '35.4.%i.1' elif loop_version == 6 and (ip_version == 6 or dual_stack): ip_pool = 'fd01:%x:1::6' cidr_pool = 'fd01:%x:1::/64' prefixlen = 64 gw_pool = 'fd01:%x:1::1' else: continue subnet_id = _uuid() fixed_ips.append({'ip_address': ip_pool % i, 'subnet_id': subnet_id, 'prefixlen': prefixlen}) subnets.append({'id': subnet_id, 'cidr': cidr_pool % i, 'gateway_ip': gw_pool % i, 'ipv6_ra_mode': ra_mode, 'ipv6_address_mode': addr_mode}) if not fixed_ips: raise ValueError("Invalid ip_version: %s" % ip_version) interfaces.append( {'id': _uuid(), 'network_id': _uuid(), 'admin_state_up': True, 'fixed_ips': fixed_ips, 'mac_address': str(mac_address), 'subnets': subnets}) mac_address.value += 1 def router_append_subnet(router, count=1, ip_version=4, ipv6_subnet_modes=None, interface_id=None, dns_nameservers=None, network_mtu=0): if ip_version == 6: subnet_mode_none = {'ra_mode': None, 'address_mode': None} if not ipv6_subnet_modes: ipv6_subnet_modes = [subnet_mode_none] * count elif len(ipv6_subnet_modes) != count: ipv6_subnet_modes.extend([subnet_mode_none for i in moves.range(len(ipv6_subnet_modes), count)]) if ip_version == 4: ip_pool = '35.4.%i.4' cidr_pool = '35.4.%i.0/24' prefixlen = 24 gw_pool = '35.4.%i.1' elif ip_version == 6: ip_pool = 'fd01:%x::6' cidr_pool = 'fd01:%x::/64' prefixlen = 64 gw_pool = 'fd01:%x::1' else: raise ValueError("Invalid ip_version: %s" % ip_version) interfaces = copy.deepcopy(router.get(l3_constants.INTERFACE_KEY, [])) if interface_id: try: interface = next(i for i in interfaces if i['id'] == interface_id) except StopIteration: raise ValueError("interface_id not found") fixed_ips, subnets = interface['fixed_ips'], interface['subnets'] else: interface = None fixed_ips, subnets = [], [] num_existing_subnets = len(subnets) for i in moves.range(count): subnet_id = _uuid() fixed_ips.append( {'ip_address': ip_pool % (i + num_existing_subnets), 'subnet_id': subnet_id, 'prefixlen': prefixlen}) subnets.append( {'id': subnet_id, 'cidr': cidr_pool % (i + num_existing_subnets), 'gateway_ip': gw_pool % (i + num_existing_subnets), 'dns_nameservers': dns_nameservers, 'ipv6_ra_mode': ipv6_subnet_modes[i]['ra_mode'], 'ipv6_address_mode': ipv6_subnet_modes[i]['address_mode']}) if interface: # Update old interface index = interfaces.index(interface) interfaces[index].update({'fixed_ips': fixed_ips, 'subnets': subnets}) else: # New interface appended to interfaces list mac_address = netaddr.EUI('ca:fe:de:ad:be:ef') mac_address.dialect = netaddr.mac_unix interfaces.append( {'id': _uuid(), 'mtu': network_mtu, 'network_id': _uuid(), 'admin_state_up': True, 'mac_address': str(mac_address), 'fixed_ips': fixed_ips, 'subnets': subnets}) router[l3_constants.INTERFACE_KEY] = interfaces def router_append_pd_enabled_subnet(router, count=1): interfaces = router[l3_constants.INTERFACE_KEY] current = sum(netaddr.IPNetwork(subnet['cidr']).version == 6 for p in interfaces for subnet in p['subnets']) mac_address = netaddr.EUI('ca:fe:de:ad:be:ef') mac_address.dialect = netaddr.mac_unix pd_intfs = [] for i in range(current, current + count): subnet_id = _uuid() intf = {'id': _uuid(), 'network_id': _uuid(), 'admin_state_up': True, 'fixed_ips': [{'ip_address': '::1', 'prefixlen': 64, 'subnet_id': subnet_id}], 'mac_address': str(mac_address), 'subnets': [{'id': subnet_id, 'cidr': l3_constants.PROVISIONAL_IPV6_PD_PREFIX, 'gateway_ip': '::1', 'ipv6_ra_mode': l3_constants.IPV6_SLAAC, 'subnetpool_id': l3_constants.IPV6_PD_POOL_ID}]} interfaces.append(intf) pd_intfs.append(intf) mac_address.value += 1 return pd_intfs def prepare_ext_gw_test(context, ri, dual_stack=False): subnet_id = _uuid() fixed_ips = [{'subnet_id': subnet_id, 'ip_address': '20.0.0.30', 'prefixlen': 24}] subnets = [{'id': subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}] if dual_stack: subnet_id_v6 = _uuid() fixed_ips.append({'subnet_id': subnet_id_v6, 'ip_address': '2001:192:168:100::2', 'prefixlen': 64}) subnets.append({'id': subnet_id_v6, 'cidr': '2001:192:168:100::/64', 'gateway_ip': '2001:192:168:100::1'}) ex_gw_port = {'fixed_ips': fixed_ips, 'subnets': subnets, 'extra_subnets': [{'cidr': '172.16.0.0/24'}], 'id': _uuid(), 'network_id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef'} interface_name = ri.get_external_device_name(ex_gw_port['id']) context.device_exists.return_value = True return interface_name, ex_gw_port neutron-8.4.0/neutron/tests/common/machine_fixtures.py0000664000567000056710000001047413044372760024425 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Thales Services SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import functools import fixtures from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.tests.common import net_helpers class FakeMachineBase(fixtures.Fixture): """Create a fake machine. :ivar bridge: bridge on which the fake machine is bound :ivar ip_cidr: fake machine ip_cidr :type ip_cidr: str :ivar ip: fake machine ip :type ip: str :ivar gateway_ip: fake machine gateway ip :type gateway_ip: str :ivar namespace: namespace emulating the machine :type namespace: str :ivar port: port binding the namespace to the bridge :type port: IPDevice """ def __init__(self): self.port = None def _setUp(self): ns_fixture = self.useFixture( net_helpers.NamespaceFixture()) self.namespace = ns_fixture.name def execute(self, *args, **kwargs): ns_ip_wrapper = ip_lib.IPWrapper(self.namespace) return ns_ip_wrapper.netns.execute(*args, **kwargs) def ping_predicate(self, dst_ip): try: self.assert_ping(dst_ip) except RuntimeError: return False return True def block_until_ping(self, dst_ip): predicate = functools.partial(self.ping_predicate, dst_ip) utils.wait_until_true(predicate) def assert_ping(self, dst_ip): net_helpers.assert_ping(self.namespace, dst_ip) def assert_no_ping(self, dst_ip): net_helpers.assert_no_ping(self.namespace, dst_ip) @property def ip(self): raise NotImplementedError() @property def ip_cidr(self): raise NotImplementedError() @property def mac_address(self): return self.port.link.address class FakeMachine(FakeMachineBase): def __init__(self, bridge, ip_cidr, gateway_ip=None): super(FakeMachine, self).__init__() self.bridge = bridge self._ip_cidr = ip_cidr self.gateway_ip = gateway_ip def _setUp(self): super(FakeMachine, self)._setUp() self.port = self.useFixture( net_helpers.PortFixture.get(self.bridge, self.namespace)).port self.port.addr.add(self._ip_cidr) if self.gateway_ip: net_helpers.set_namespace_gateway(self.port, self.gateway_ip) @property def ip(self): return self._ip_cidr.partition('/')[0] @property def ip_cidr(self): return self._ip_cidr @ip_cidr.setter def ip_cidr(self, ip_cidr): self.port.addr.add(ip_cidr) self.port.addr.delete(self._ip_cidr) self._ip_cidr = ip_cidr @FakeMachineBase.mac_address.setter def mac_address(self, mac_address): self.port.link.set_down() self.port.link.set_address(mac_address) self.port.link.set_up() def set_default_gateway(self, default_gw): self.port.route.add_gateway(default_gw) class PeerMachines(fixtures.Fixture): """Create 'amount' peered machines on an ip_cidr. :ivar bridge: bridge on which peer machines are bound :ivar ip_cidr: ip_cidr on which peer machines have ips :type ip_cidr: str :ivar machines: fake machines :type machines: FakeMachine list """ CIDR = '192.168.0.1/24' def __init__(self, bridge, ip_cidr=None, gateway_ip=None, amount=2): super(PeerMachines, self).__init__() self.bridge = bridge self.ip_cidr = ip_cidr or self.CIDR self.gateway_ip = gateway_ip self.amount = amount def _setUp(self): self.machines = [] for index in range(self.amount): ip_cidr = net_helpers.increment_ip_cidr(self.ip_cidr, index) self.machines.append( self.useFixture( FakeMachine(self.bridge, ip_cidr, self.gateway_ip))) neutron-8.4.0/neutron/tests/common/conn_testers.py0000664000567000056710000003545013044372760023577 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import fixtures from neutron_lib import constants as n_consts from oslo_utils import uuidutils from neutron.agent import firewall from neutron.agent.linux import ip_lib from neutron.common import constants from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers # NOTE: IPv6 uses NDP for obtaining destination endpoints link address that # extends round-trip packet time in ICMP tests. The timeout value should be # sufficient for correct scenarios but not too high because of negative # tests. ICMP_VERSION_TIMEOUTS = { n_consts.IP_VERSION_4: 1, n_consts.IP_VERSION_6: 2, } class ConnectionTesterException(Exception): pass def _validate_direction(f): @functools.wraps(f) def wrap(self, direction, *args, **kwargs): if direction not in (firewall.INGRESS_DIRECTION, firewall.EGRESS_DIRECTION): raise ConnectionTesterException('Unknown direction %s' % direction) return f(self, direction, *args, **kwargs) return wrap class ConnectionTester(fixtures.Fixture): """Base class for testers This class implements API for various methods for testing connectivity. The concrete implementation relies on how encapsulated resources are configured. That means child classes should define resources by themselves (e.g. endpoints connected through linux bridge or ovs bridge). """ UDP = net_helpers.NetcatTester.UDP TCP = net_helpers.NetcatTester.TCP ICMP = constants.PROTO_NAME_ICMP ARP = constants.ETHERTYPE_NAME_ARP INGRESS = firewall.INGRESS_DIRECTION EGRESS = firewall.EGRESS_DIRECTION def __init__(self, ip_cidr): self.ip_cidr = ip_cidr def _setUp(self): self._protocol_to_method = { self.UDP: self._test_transport_connectivity, self.TCP: self._test_transport_connectivity, self.ICMP: self._test_icmp_connectivity, self.ARP: self._test_arp_connectivity} self._nc_testers = {} self._pingers = {} self.addCleanup(self.cleanup) def cleanup(self): for nc in self._nc_testers.values(): nc.stop_processes() for pinger in self._pingers.values(): pinger.stop() @property def vm_namespace(self): return self._vm.namespace @property def vm_ip_address(self): return self._vm.ip @property def vm_ip_cidr(self): return self._vm.ip_cidr @vm_ip_cidr.setter def vm_ip_cidr(self, ip_cidr): self._vm.ip_cidr = ip_cidr @property def vm_mac_address(self): return self._vm.port.link.address @vm_mac_address.setter def vm_mac_address(self, mac_address): self._vm.mac_address = mac_address @property def peer_mac_address(self): return self._peer.port.link.address @peer_mac_address.setter def peer_mac_address(self, mac_address): self._peer.mac_address = mac_address @property def peer_namespace(self): return self._peer.namespace @property def peer_ip_address(self): return self._peer.ip def set_vm_default_gateway(self, default_gw): self._vm.set_default_gateway(default_gw) def flush_arp_tables(self): """Flush arptables in all used namespaces""" for machine in (self._peer, self._vm): machine.port.neigh.flush(4, 'all') def _test_transport_connectivity(self, direction, protocol, src_port, dst_port): nc_tester = self._create_nc_tester(direction, protocol, src_port, dst_port) try: nc_tester.test_connectivity() except RuntimeError as exc: nc_tester.stop_processes() raise ConnectionTesterException( "%s connection over %s protocol with %s source port and " "%s destination port can't be established: %s" % ( direction, protocol, src_port, dst_port, exc)) @_validate_direction def _get_namespace_and_address(self, direction): if direction == self.INGRESS: return self.peer_namespace, self.vm_ip_address return self.vm_namespace, self.peer_ip_address def _test_icmp_connectivity(self, direction, protocol, src_port, dst_port): src_namespace, ip_address = self._get_namespace_and_address(direction) ip_version = ip_lib.get_ip_version(ip_address) icmp_timeout = ICMP_VERSION_TIMEOUTS[ip_version] try: net_helpers.assert_ping(src_namespace, ip_address, timeout=icmp_timeout) except RuntimeError: raise ConnectionTesterException( "ICMP packets can't get from %s namespace to %s address" % ( src_namespace, ip_address)) def _test_arp_connectivity(self, direction, protocol, src_port, dst_port): src_namespace, ip_address = self._get_namespace_and_address(direction) try: net_helpers.assert_arping(src_namespace, ip_address) except RuntimeError: raise ConnectionTesterException( "ARP queries to %s address have no response from %s namespace" % (ip_address, src_namespace)) @_validate_direction def assert_connection(self, direction, protocol, src_port=None, dst_port=None): testing_method = self._protocol_to_method[protocol] testing_method(direction, protocol, src_port, dst_port) def assert_no_connection(self, direction, protocol, src_port=None, dst_port=None): try: self.assert_connection(direction, protocol, src_port, dst_port) except ConnectionTesterException: pass else: dst_port_info = str() src_port_info = str() if dst_port is not None: dst_port_info = " and destination port %d" % dst_port if src_port is not None: src_port_info = " and source port %d" % src_port raise ConnectionTesterException("%s connection with protocol %s, " "source port %s, destination " "port %s was established but it " "shouldn't be possible" % ( direction, protocol, src_port_info, dst_port_info)) @_validate_direction def assert_established_connection(self, direction, protocol, src_port=None, dst_port=None): nc_params = (direction, protocol, src_port, dst_port) nc_tester = self._nc_testers.get(nc_params) if nc_tester: if nc_tester.is_established: try: nc_tester.test_connectivity() except RuntimeError: raise ConnectionTesterException( "Established %s connection with protocol %s, source " "port %s and destination port %s can no longer " "communicate") else: nc_tester.stop_processes() raise ConnectionTesterException( '%s connection with protocol %s, source port %s and ' 'destination port %s is not established' % nc_params) else: raise ConnectionTesterException( "Attempting to test established %s connection with protocol %s" ", source port %s and destination port %s that hasn't been " "established yet by calling establish_connection()" % nc_params) def assert_no_established_connection(self, direction, protocol, src_port=None, dst_port=None): try: self.assert_established_connection(direction, protocol, src_port, dst_port) except ConnectionTesterException: pass else: raise ConnectionTesterException( 'Established %s connection with protocol %s, source port %s, ' 'destination port %s can still send packets through' % ( direction, protocol, src_port, dst_port)) @_validate_direction def establish_connection(self, direction, protocol, src_port=None, dst_port=None): nc_tester = self._create_nc_tester(direction, protocol, src_port, dst_port) nc_tester.establish_connection() self.addCleanup(nc_tester.stop_processes) def _create_nc_tester(self, direction, protocol, src_port, dst_port): """Create netcat tester If there already exists a netcat tester that has established connection, exception is raised. """ nc_key = (direction, protocol, src_port, dst_port) nc_tester = self._nc_testers.get(nc_key) if nc_tester and nc_tester.is_established: raise ConnectionTesterException( '%s connection using %s protocol, source port %s and ' 'destination port %s is already established' % ( direction, protocol, src_port, dst_port)) if direction == self.INGRESS: client_ns = self.peer_namespace server_ns = self.vm_namespace server_addr = self.vm_ip_address else: client_ns = self.vm_namespace server_ns = self.peer_namespace server_addr = self.peer_ip_address server_port = dst_port or net_helpers.get_free_namespace_port( protocol, server_ns) nc_tester = net_helpers.NetcatTester(client_namespace=client_ns, server_namespace=server_ns, address=server_addr, protocol=protocol, src_port=src_port, dst_port=server_port) self._nc_testers[nc_key] = nc_tester return nc_tester def _get_pinger(self, direction): try: pinger = self._pingers[direction] except KeyError: src_namespace, dst_address = self._get_namespace_and_address( direction) pinger = net_helpers.Pinger(src_namespace, dst_address) self._pingers[direction] = pinger return pinger def start_sending_icmp(self, direction): pinger = self._get_pinger(direction) pinger.start() def stop_sending_icmp(self, direction): pinger = self._get_pinger(direction) pinger.stop() def get_sent_icmp_packets(self, direction): pinger = self._get_pinger(direction) return pinger.sent def get_received_icmp_packets(self, direction): pinger = self._get_pinger(direction) return pinger.received def assert_net_unreachable(self, direction, destination): src_namespace, dst_address = self._get_namespace_and_address( direction) pinger = net_helpers.Pinger(src_namespace, destination, count=5) pinger.start() pinger.wait() if not pinger.destination_unreachable: raise ConnectionTesterException( 'No Host Destination Unreachable packets were received when ' 'sending icmp packets to %s' % destination) class OVSConnectionTester(ConnectionTester): """Tester with OVS bridge in the middle The endpoints are created as OVS ports attached to the OVS bridge. NOTE: The OVS ports are connected from the namespace. This connection is currently not supported in OVS and may lead to unpredicted behavior: https://bugzilla.redhat.com/show_bug.cgi?id=1160340 """ def _setUp(self): super(OVSConnectionTester, self)._setUp() self.bridge = self.useFixture(net_helpers.OVSBridgeFixture()).bridge self._peer, self._vm = self.useFixture( machine_fixtures.PeerMachines( self.bridge, self.ip_cidr)).machines self._set_port_attrs(self._peer.port) self._set_port_attrs(self._vm.port) def _set_port_attrs(self, port): port.id = uuidutils.generate_uuid() attrs = [('type', 'internal'), ('external_ids', { 'iface-id': port.id, 'iface-status': 'active', 'attached-mac': port.link.address})] for column, value in attrs: self.bridge.set_db_attribute('Interface', port.name, column, value) @property def peer_port_id(self): return self._peer.port.id @property def vm_port_id(self): return self._vm.port.id def set_tag(self, port_name, tag): self.bridge.set_db_attribute('Port', port_name, 'tag', tag) other_config = self.bridge.db_get_val( 'Port', port_name, 'other_config') other_config['tag'] = tag self.bridge.set_db_attribute( 'Port', port_name, 'other_config', other_config) def set_vm_tag(self, tag): self.set_tag(self._vm.port.name, tag) def set_peer_tag(self, tag): self.set_tag(self._peer.port.name, tag) class LinuxBridgeConnectionTester(ConnectionTester): """Tester with linux bridge in the middle Both endpoints are placed in their separated namespace connected to bridge's namespace via veth pair. """ def _setUp(self): super(LinuxBridgeConnectionTester, self)._setUp() self.bridge = self.useFixture(net_helpers.LinuxBridgeFixture()).bridge self._peer, self._vm = self.useFixture( machine_fixtures.PeerMachines( self.bridge, self.ip_cidr)).machines @property def bridge_namespace(self): return self.bridge.namespace @property def vm_port_id(self): return net_helpers.VethFixture.get_peer_name(self._vm.port.name) @property def peer_port_id(self): return net_helpers.VethFixture.get_peer_name(self._peer.port.name) def flush_arp_tables(self): self.bridge.neigh.flush(4, 'all') super(LinuxBridgeConnectionTester, self).flush_arp_tables() neutron-8.4.0/neutron/tests/common/base.py0000664000567000056710000000624613044372760022004 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import functools import unittest.case from oslo_db.sqlalchemy import test_base import testtools.testcase from neutron.common import constants as n_const from neutron.tests import base from neutron.tests import tools def create_resource(prefix, creation_func, *args, **kwargs): """Create a new resource that does not already exist. If prefix isn't 'max_length' in size, a random suffix is concatenated to ensure it is random. Otherwise, 'prefix' is used as is. :param prefix: The prefix for a randomly generated name :param creation_func: A function taking the name of the resource to be created as it's first argument. An error is assumed to indicate a name collision. :param *args *kwargs: These will be passed to the create function. """ # Don't generate a random name if prefix is already full-length. if len(prefix) == n_const.DEVICE_NAME_MAX_LEN: return creation_func(prefix, *args, **kwargs) while True: name = base.get_rand_name( max_length=n_const.DEVICE_NAME_MAX_LEN, prefix=prefix) try: return creation_func(name, *args, **kwargs) except RuntimeError: pass def no_skip_on_missing_deps(wrapped): """Do not allow a method/test to skip on missing dependencies. This decorator raises an error if a skip is raised by wrapped method when OS_FAIL_ON_MISSING_DEPS is evaluated to True. This decorator should be used only for missing dependencies (including missing system requirements). """ @functools.wraps(wrapped) def wrapper(*args, **kwargs): try: return wrapped(*args, **kwargs) except (testtools.TestCase.skipException, unittest.case.SkipTest) as e: if base.bool_from_env('OS_FAIL_ON_MISSING_DEPS'): tools.fail( '%s cannot be skipped because OS_FAIL_ON_MISSING_DEPS ' 'is enabled, skip reason: %s' % (wrapped.__name__, e)) raise return wrapper class MySQLTestCase(test_base.MySQLOpportunisticTestCase): """Base test class for MySQL tests. If the MySQL db is unavailable then this test is skipped, unless OS_FAIL_ON_MISSING_DEPS is enabled. """ SKIP_ON_UNAVAILABLE_DB = not base.bool_from_env('OS_FAIL_ON_MISSING_DEPS') class PostgreSQLTestCase(test_base.PostgreSQLOpportunisticTestCase): """Base test class for PostgreSQL tests. If the PostgreSQL db is unavailable then this test is skipped, unless OS_FAIL_ON_MISSING_DEPS is enabled. """ SKIP_ON_UNAVAILABLE_DB = not base.bool_from_env('OS_FAIL_ON_MISSING_DEPS') neutron-8.4.0/neutron/tests/common/net_helpers.py0000664000567000056710000007052313044372760023401 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Thales Services SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc from concurrent import futures import contextlib import functools import os import random import re import select import shlex import signal import subprocess import time import fixtures import netaddr from oslo_config import cfg from oslo_utils import uuidutils import six from neutron.agent.common import config from neutron.agent.common import ovs_lib from neutron.agent.linux import bridge_lib from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.common import constants as n_const from neutron.db import db_base_plugin_common from neutron.plugins.ml2.drivers.linuxbridge.agent import \ linuxbridge_neutron_agent as linuxbridge_agent from neutron.tests import base as tests_base from neutron.tests.common import base as common_base from neutron.tests import tools UNDEFINED = object() NS_PREFIX = 'test-' BR_PREFIX = 'test-br' PORT_PREFIX = 'port' VETH0_PREFIX = 'test-veth0' VETH1_PREFIX = 'test-veth1' PATCH_PREFIX = 'patch' MACVTAP_PREFIX = 'macvtap' # port name should be shorter than DEVICE_NAME_MAX_LEN because if this # port is used to provide vlan connection between two linuxbridge # agents then place for vlan ID is also required, Vlan ID can take max 4 digits # and there is also additional "." in device name so it will in overall gives # DEVICE_NAME_MAX_LEN = 15 chars LB_DEVICE_NAME_MAX_LEN = 10 SS_SOURCE_PORT_PATTERN = re.compile( r'^.*\s+\d+\s+.*:(?P\d+)\s+[^\s]+:.*') READ_TIMEOUT = os.environ.get('OS_TEST_READ_TIMEOUT', 5) CHILD_PROCESS_TIMEOUT = os.environ.get('OS_TEST_CHILD_PROCESS_TIMEOUT', 20) CHILD_PROCESS_SLEEP = os.environ.get('OS_TEST_CHILD_PROCESS_SLEEP', 0.5) TRANSPORT_PROTOCOLS = (n_const.PROTO_NAME_TCP, n_const.PROTO_NAME_UDP) def increment_ip_cidr(ip_cidr, offset=1): """Increment ip_cidr offset times. example: increment_ip_cidr("1.2.3.4/24", 2) ==> "1.2.3.6/24" """ net0 = netaddr.IPNetwork(ip_cidr) net = netaddr.IPNetwork(ip_cidr) net.value += offset if not net0.network < net.ip < net0[-1]: tools.fail( 'Incorrect ip_cidr,offset tuple (%s,%s): "incremented" ip_cidr is ' 'outside ip_cidr' % (ip_cidr, offset)) return str(net) def set_namespace_gateway(port_dev, gateway_ip): """Set gateway for the namespace associated to the port.""" if not port_dev.namespace: tools.fail('tests should not change test machine gateway') port_dev.route.add_gateway(gateway_ip) def assert_ping(src_namespace, dst_ip, timeout=1, count=1): ipversion = netaddr.IPAddress(dst_ip).version ping_command = 'ping' if ipversion == 4 else 'ping6' ns_ip_wrapper = ip_lib.IPWrapper(src_namespace) ns_ip_wrapper.netns.execute([ping_command, '-c', count, '-W', timeout, dst_ip]) def assert_async_ping(src_namespace, dst_ip, timeout=1, count=1, interval=1): ipversion = netaddr.IPAddress(dst_ip).version ping_command = 'ping' if ipversion == 4 else 'ping6' ns_ip_wrapper = ip_lib.IPWrapper(src_namespace) # See bug 1588731 for explanation why using -c count ping option # cannot be used and it needs to be done using the following workaround. for _index in range(count): start_time = time.time() ns_ip_wrapper.netns.execute([ping_command, '-c', '1', '-W', timeout, dst_ip]) end_time = time.time() diff = end_time - start_time if 0 < diff < interval: # wait at most "interval" seconds between individual pings time.sleep(interval - diff) @contextlib.contextmanager def async_ping(namespace, ips): with futures.ThreadPoolExecutor(max_workers=len(ips)) as executor: fs = [executor.submit(assert_async_ping, namespace, ip, count=10) for ip in ips] yield lambda: all(f.done() for f in fs) futures.wait(fs) for f in fs: f.result() def assert_no_ping(src_namespace, dst_ip, timeout=1, count=1): try: assert_ping(src_namespace, dst_ip, timeout, count) except RuntimeError: pass else: tools.fail("destination ip %(destination)s is replying to ping from " "namespace %(ns)s, but it shouldn't" % {'ns': src_namespace, 'destination': dst_ip}) def assert_arping(src_namespace, dst_ip, source=None, timeout=1, count=1): """Send arp request using arping executable. NOTE: ARP protocol is used in IPv4 only. IPv6 uses Neighbour Discovery Protocol instead. """ ns_ip_wrapper = ip_lib.IPWrapper(src_namespace) arping_cmd = ['arping', '-c', count, '-w', timeout] if source: arping_cmd.extend(['-s', source]) arping_cmd.append(dst_ip) ns_ip_wrapper.netns.execute(arping_cmd) def assert_no_arping(src_namespace, dst_ip, source=None, timeout=1, count=1): try: assert_arping(src_namespace, dst_ip, source, timeout, count) except RuntimeError: pass else: tools.fail("destination ip %(destination)s is replying to arp from " "namespace %(ns)s, but it shouldn't" % {'ns': src_namespace, 'destination': dst_ip}) def _get_source_ports_from_ss_output(output): ports = set() for line in output.splitlines(): match = SS_SOURCE_PORT_PATTERN.match(line) if match: ports.add(int(match.group('port'))) return ports def get_unused_port(used, start=1024, end=65535): candidates = set(range(start, end + 1)) return random.choice(list(candidates - used)) def get_free_namespace_port(protocol, namespace=None): """Return an unused port from given namespace WARNING: This function returns a port that is free at the execution time of this function. If this port is used later for binding then there is a potential danger that port will be no longer free. It's up to the programmer to handle error if port is already in use. :param protocol: Return free port for given protocol. Supported protocols are 'tcp' and 'udp'. """ if protocol == n_const.PROTO_NAME_TCP: param = '-tna' elif protocol == n_const.PROTO_NAME_UDP: param = '-una' else: raise ValueError("Unsupported procotol %s" % protocol) ip_wrapper = ip_lib.IPWrapper(namespace=namespace) output = ip_wrapper.netns.execute(['ss', param]) used_ports = _get_source_ports_from_ss_output(output) return get_unused_port(used_ports) def create_patch_ports(source, destination): """Hook up two OVS bridges. The result is two patch ports, each end connected to a bridge. The two patch port names will start with 'patch-', followed by identical four characters. For example patch-xyzw-fedora, and patch-xyzw-ubuntu, where fedora and ubuntu are random strings. :param source: Instance of OVSBridge :param destination: Instance of OVSBridge """ common = tests_base.get_rand_name(max_length=4, prefix='') prefix = '%s-%s-' % (PATCH_PREFIX, common) source_name = tests_base.get_rand_device_name(prefix=prefix) destination_name = tests_base.get_rand_device_name(prefix=prefix) source.add_patch_port(source_name, destination_name) destination.add_patch_port(destination_name, source_name) class RootHelperProcess(subprocess.Popen): def __init__(self, cmd, *args, **kwargs): for arg in ('stdin', 'stdout', 'stderr'): kwargs.setdefault(arg, subprocess.PIPE) self.namespace = kwargs.pop('namespace', None) self.cmd = cmd if self.namespace is not None: cmd = ['ip', 'netns', 'exec', self.namespace] + cmd root_helper = config.get_root_helper(utils.cfg.CONF) cmd = shlex.split(root_helper) + cmd self.child_pid = None super(RootHelperProcess, self).__init__(cmd, *args, **kwargs) self._wait_for_child_process() def kill(self, sig=signal.SIGKILL): pid = self.child_pid or str(self.pid) utils.execute(['kill', '-%d' % sig, pid], run_as_root=True) def read_stdout(self, timeout=None): return self._read_stream(self.stdout, timeout) @staticmethod def _read_stream(stream, timeout): if timeout: poller = select.poll() poller.register(stream.fileno()) poll_predicate = functools.partial(poller.poll, 1) utils.wait_until_true(poll_predicate, timeout, 0.1, RuntimeError( 'No output in %.2f seconds' % timeout)) return stream.readline() def writeline(self, data): self.stdin.write(data + os.linesep) self.stdin.flush() def _wait_for_child_process(self, timeout=CHILD_PROCESS_TIMEOUT, sleep=CHILD_PROCESS_SLEEP): def child_is_running(): child_pid = utils.get_root_helper_child_pid( self.pid, self.cmd, run_as_root=True) if utils.pid_invoked_with_cmdline(child_pid, self.cmd): return True utils.wait_until_true( child_is_running, timeout, exception=RuntimeError("Process %s hasn't been spawned " "in %d seconds" % (self.cmd, timeout))) self.child_pid = utils.get_root_helper_child_pid( self.pid, self.cmd, run_as_root=True) @property def is_running(self): return self.poll() is None class Pinger(object): """Class for sending ICMP packets asynchronously The aim is to keep sending ICMP packets on background while executing other code. After background 'ping' command is stopped, statistics are available. Difference to assert_(no_)ping() functions located in this module is that these methods send given count of ICMP packets while they wait for the exit code of 'ping' command. >>> pinger = Pinger('pinger_test', '192.168.0.2') >>> pinger.start(); time.sleep(5); pinger.stop() >>> pinger.sent, pinger.received 7 7 """ stats_pattern = re.compile( r'^(?P\d+) packets transmitted,.*(?P\d+) received.*$') unreachable_pattern = re.compile( r'.* Destination .* Unreachable') TIMEOUT = 15 def __init__(self, namespace, address, count=None, timeout=1): self.proc = None self.namespace = namespace self.address = address self.count = count self.timeout = timeout self.destination_unreachable = False self.sent = 0 self.received = 0 def _wait_for_death(self): is_dead = lambda: self.proc.poll() is not None utils.wait_until_true( is_dead, timeout=self.TIMEOUT, exception=RuntimeError( "Ping command hasn't ended after %d seconds." % self.TIMEOUT)) def _parse_stats(self): for line in self.proc.stdout: if (not self.destination_unreachable and self.unreachable_pattern.match(line)): self.destination_unreachable = True continue result = self.stats_pattern.match(line) if result: self.sent = int(result.group('trans')) self.received = int(result.group('recv')) break else: raise RuntimeError("Didn't find ping statistics.") def start(self): if self.proc and self.proc.is_running: raise RuntimeError("This pinger has already a running process") ip_version = ip_lib.get_ip_version(self.address) ping_exec = 'ping' if ip_version == 4 else 'ping6' cmd = [ping_exec, self.address, '-W', str(self.timeout)] if self.count: cmd.extend(['-c', str(self.count)]) self.proc = RootHelperProcess(cmd, namespace=self.namespace) def stop(self): if self.proc and self.proc.is_running: self.proc.kill(signal.SIGINT) self._wait_for_death() self._parse_stats() def wait(self): if self.count: self._wait_for_death() self._parse_stats() else: raise RuntimeError("Pinger is running infinitelly, use stop() " "first") class NetcatTester(object): TCP = n_const.PROTO_NAME_TCP UDP = n_const.PROTO_NAME_UDP VERSION_TO_ALL_ADDRESS = { 4: '0.0.0.0', 6: '::', } def __init__(self, client_namespace, server_namespace, address, dst_port, protocol, server_address=None, src_port=None): """ Tool for testing connectivity on transport layer using netcat executable. The processes are spawned lazily. :param client_namespace: Namespace in which netcat process that connects to other netcat will be spawned :param server_namespace: Namespace in which listening netcat process will be spawned :param address: Server address from client point of view :param dst_port: Port on which netcat listens :param protocol: Transport protocol, either 'tcp' or 'udp' :param server_address: Address in server namespace on which netcat should listen :param src_port: Source port of netcat process spawned in client namespace - packet will have src_port in TCP/UDP header with this value """ self.client_namespace = client_namespace self.server_namespace = server_namespace self._client_process = None self._server_process = None self.address = address self.dst_port = str(dst_port) self.src_port = str(src_port) if src_port else None if protocol not in TRANSPORT_PROTOCOLS: raise ValueError("Unsupported protocol %s" % protocol) self.protocol = protocol ip_version = netaddr.IPAddress(address).version self.server_address = ( server_address or self.VERSION_TO_ALL_ADDRESS[ip_version]) @property def client_process(self): if not self._client_process: self.establish_connection() return self._client_process @property def server_process(self): if not self._server_process: self._spawn_server_process() return self._server_process def _spawn_server_process(self): self._server_process = self._spawn_nc_in_namespace( self.server_namespace, address=self.server_address, listen=True) @property def is_established(self): return bool(self._client_process and not self._client_process.poll()) def establish_connection(self): if self.is_established: raise RuntimeError('%(proto)s connection to %(ip_addr)s is already' ' established' % {'proto': self.protocol, 'ip_addr': self.address}) if not self._server_process: self._spawn_server_process() self._client_process = self._spawn_nc_in_namespace( self.client_namespace, address=self.address) if self.protocol == self.UDP: # Create an ASSURED entry in conntrack table for UDP packets, # that requires 3-way communication # 1st transmission creates UNREPLIED # 2nd transmission removes UNREPLIED # 3rd transmission creates ASSURED data = 'foo' self.client_process.writeline(data) self.server_process.read_stdout(READ_TIMEOUT) self.server_process.writeline(data) self.client_process.read_stdout(READ_TIMEOUT) self.client_process.writeline(data) self.server_process.read_stdout(READ_TIMEOUT) def test_connectivity(self, respawn=False): testing_string = uuidutils.generate_uuid() if respawn: self.stop_processes() self.client_process.writeline(testing_string) message = self.server_process.read_stdout(READ_TIMEOUT).strip() self.server_process.writeline(message) message = self.client_process.read_stdout(READ_TIMEOUT).strip() return message == testing_string def _spawn_nc_in_namespace(self, namespace, address, listen=False): cmd = ['nc', address, self.dst_port] if self.protocol == self.UDP: cmd.append('-u') if listen: cmd.append('-l') if self.protocol == self.TCP: cmd.append('-k') else: cmd.extend(['-w', '20']) if self.src_port: cmd.extend(['-p', self.src_port]) proc = RootHelperProcess(cmd, namespace=namespace) return proc def stop_processes(self): for proc_attr in ('_client_process', '_server_process'): proc = getattr(self, proc_attr) if proc: if proc.poll() is None: proc.kill() proc.wait() setattr(self, proc_attr, None) class NamespaceFixture(fixtures.Fixture): """Create a namespace. :ivar ip_wrapper: created namespace :type ip_wrapper: IPWrapper :ivar name: created namespace name :type name: str """ def __init__(self, prefix=NS_PREFIX): super(NamespaceFixture, self).__init__() self.prefix = prefix def _setUp(self): ip = ip_lib.IPWrapper() self.name = self.prefix + uuidutils.generate_uuid() self.addCleanup(self.destroy) self.ip_wrapper = ip.ensure_namespace(self.name) def destroy(self): if self.ip_wrapper.netns.exists(self.name): self.ip_wrapper.netns.delete(self.name) class VethFixture(fixtures.Fixture): """Create a veth. :ivar ports: created veth ports :type ports: tuple of 2 IPDevice """ def _setUp(self): ip_wrapper = ip_lib.IPWrapper() self.ports = common_base.create_resource( VETH0_PREFIX, lambda name: ip_wrapper.add_veth(name, self.get_peer_name(name))) self.addCleanup(self.destroy) def destroy(self): for port in self.ports: ip_wrapper = ip_lib.IPWrapper(port.namespace) if (ip_wrapper.netns.exists(port.namespace) or port.namespace is None): try: ip_wrapper.del_veth(port.name) break except RuntimeError: # NOTE(cbrandily): It seems a veth is automagically deleted # when a namespace owning a veth endpoint is deleted. pass @staticmethod def get_peer_name(name): if name.startswith(VETH0_PREFIX): return name.replace(VETH0_PREFIX, VETH1_PREFIX) elif name.startswith(VETH1_PREFIX): return name.replace(VETH1_PREFIX, VETH0_PREFIX) else: tools.fail('%s is not a valid VethFixture veth endpoint' % name) class NamedVethFixture(VethFixture): """Create a veth with at least one specified name of a device :ivar ports: created veth ports :type ports: tuple of 2 IPDevice """ def __init__(self, veth0_prefix=VETH0_PREFIX, veth1_prefix=VETH1_PREFIX): super(NamedVethFixture, self).__init__() self.veth0_name = self.get_veth_name(veth0_prefix) self.veth1_name = self.get_veth_name(veth1_prefix) def _setUp(self): ip_wrapper = ip_lib.IPWrapper() self.ports = ip_wrapper.add_veth(self.veth0_name, self.veth1_name) self.addCleanup(self.destroy) @staticmethod def get_veth_name(name): if name.startswith(VETH0_PREFIX): return tests_base.get_rand_device_name(VETH0_PREFIX) if name.startswith(VETH1_PREFIX): return tests_base.get_rand_device_name(VETH1_PREFIX) return name class MacvtapFixture(fixtures.Fixture): """Create a macvtap. :param src_dev: source device for macvtap :type src_dev: IPDevice :param mode: mode of macvtap :type mode: string :ivar ip_dev: created macvtap :type ip_dev: IPDevice """ def __init__(self, src_dev=None, mode=None, prefix=MACVTAP_PREFIX): super(MacvtapFixture, self).__init__() self.src_dev = src_dev self.mode = mode self.prefix = prefix def _setUp(self): ip_wrapper = ip_lib.IPWrapper() self.ip_dev = common_base.create_resource( self.prefix, ip_wrapper.add_macvtap, self.src_dev, mode=self.mode) self.addCleanup(self.destroy) def destroy(self): ip_wrapper = ip_lib.IPWrapper(self.ip_dev.namespace) if (ip_wrapper.netns.exists(self.ip_dev.namespace) or self.ip_dev.namespace is None): try: self.ip_dev.link.delete() except RuntimeError: pass @six.add_metaclass(abc.ABCMeta) class PortFixture(fixtures.Fixture): """Create a port. :ivar port: created port :type port: IPDevice :ivar bridge: port bridge """ def __init__(self, bridge=None, namespace=None, mac=None, port_id=None): super(PortFixture, self).__init__() self.bridge = bridge self.namespace = namespace self.mac = ( mac or db_base_plugin_common.DbBasePluginCommon._generate_mac()) self.port_id = port_id or uuidutils.generate_uuid() @abc.abstractmethod def _create_bridge_fixture(self): pass @abc.abstractmethod def _setUp(self): super(PortFixture, self)._setUp() if not self.bridge: self.bridge = self.useFixture(self._create_bridge_fixture()).bridge @classmethod def get(cls, bridge, namespace=None, mac=None, port_id=None): """Deduce PortFixture class from bridge type and instantiate it.""" if isinstance(bridge, ovs_lib.OVSBridge): return OVSPortFixture(bridge, namespace, mac, port_id) if isinstance(bridge, bridge_lib.BridgeDevice): return LinuxBridgePortFixture(bridge, namespace, mac, port_id) if isinstance(bridge, VethBridge): return VethPortFixture(bridge, namespace) tools.fail('Unexpected bridge type: %s' % type(bridge)) class OVSBridgeFixture(fixtures.Fixture): """Create an OVS bridge. :ivar prefix: bridge name prefix :type prefix: str :ivar bridge: created bridge :type bridge: OVSBridge """ def __init__(self, prefix=BR_PREFIX): super(OVSBridgeFixture, self).__init__() self.prefix = prefix def _setUp(self): ovs = ovs_lib.BaseOVS() self.bridge = common_base.create_resource(self.prefix, ovs.add_bridge) self.addCleanup(self.bridge.destroy) class OVSPortFixture(PortFixture): def _create_bridge_fixture(self): return OVSBridgeFixture() def _setUp(self): super(OVSPortFixture, self)._setUp() interface_config = cfg.ConfigOpts() interface_config.register_opts(interface.OPTS) ovs_interface = interface.OVSInterfaceDriver(interface_config) # because in some tests this port can be used to providing connection # between linuxbridge agents and vlan_id can be also added to this # device name it has to be max LB_DEVICE_NAME_MAX_LEN long port_name = tests_base.get_rand_name( LB_DEVICE_NAME_MAX_LEN, PORT_PREFIX ) ovs_interface.plug_new( None, self.port_id, port_name, self.mac, bridge=self.bridge.br_name, namespace=self.namespace) self.addCleanup(self.bridge.delete_port, port_name) self.port = ip_lib.IPDevice(port_name, self.namespace) class LinuxBridgeFixture(fixtures.Fixture): """Create a linux bridge. :ivar bridge: created bridge :type bridge: BridgeDevice :ivar namespace: created bridge namespace :type namespace: str """ def __init__(self, prefix=BR_PREFIX, namespace=UNDEFINED, prefix_is_full_name=False): super(LinuxBridgeFixture, self).__init__() self.prefix = prefix self.prefix_is_full_name = prefix_is_full_name self.namespace = namespace def _setUp(self): if self.namespace is UNDEFINED: self.namespace = self.useFixture(NamespaceFixture()).name self.bridge = self._create_bridge() self.addCleanup(self.safe_delete) self.bridge.link.set_up() self.addCleanup(self.safe_set_down) def safe_set_down(self): try: self.bridge.link.set_down() except RuntimeError: pass def safe_delete(self): try: self.bridge.delbr() except RuntimeError: pass def _create_bridge(self): if self.prefix_is_full_name: return bridge_lib.BridgeDevice.addbr( name=self.prefix, namespace=self.namespace ) else: return common_base.create_resource( self.prefix, bridge_lib.BridgeDevice.addbr, namespace=self.namespace) class LinuxBridgePortFixture(PortFixture): """Create a linux bridge port. :ivar port: created port :type port: IPDevice :ivar br_port: bridge side veth peer port :type br_port: IPDevice """ def __init__(self, bridge, namespace=None, mac=None, port_id=None): super(LinuxBridgePortFixture, self).__init__( bridge, namespace, mac, port_id) # we need to override port_id value here because in Port() class it is # always generated as random. In LinuxBridgePortFixture we need to have # it empty if it was not give because then proper veth_pair will be # created (for example in some functional tests) self.port_id = port_id def _create_bridge_fixture(self): return LinuxBridgeFixture() def _setUp(self): super(LinuxBridgePortFixture, self)._setUp() br_port_name = self._get_port_name() if br_port_name: self.br_port, self.port = self.useFixture( NamedVethFixture(veth0_prefix=br_port_name)).ports else: self.br_port, self.port = self.useFixture(VethFixture()).ports if self.mac: self.port.link.set_address(self.mac) # bridge side br_ip_wrapper = ip_lib.IPWrapper(self.bridge.namespace) br_ip_wrapper.add_device_to_namespace(self.br_port) self.bridge.addif(self.br_port) self.br_port.link.set_up() # port side ns_ip_wrapper = ip_lib.IPWrapper(self.namespace) ns_ip_wrapper.add_device_to_namespace(self.port) self.port.link.set_up() def _get_port_name(self): if self.port_id: return linuxbridge_agent.LinuxBridgeManager.get_tap_device_name( self.port_id) return None class VethBridge(object): def __init__(self, ports): self.ports = ports self.unallocated_ports = set(self.ports) def allocate_port(self): try: return self.unallocated_ports.pop() except KeyError: tools.fail('All FakeBridge ports (%s) are already allocated.' % len(self.ports)) class VethBridgeFixture(fixtures.Fixture): """Simulate a bridge with a veth. :ivar bridge: created bridge :type bridge: FakeBridge """ def _setUp(self): ports = self.useFixture(VethFixture()).ports self.bridge = VethBridge(ports) class VethPortFixture(PortFixture): """Create a veth bridge port. :ivar port: created port :type port: IPDevice """ def _create_bridge_fixture(self): return VethBridgeFixture() def _setUp(self): super(VethPortFixture, self)._setUp() self.port = self.bridge.allocate_port() ns_ip_wrapper = ip_lib.IPWrapper(self.namespace) ns_ip_wrapper.add_device_to_namespace(self.port) self.port.link.set_up() neutron-8.4.0/neutron/tests/common/agents/0000775000567000056710000000000013044373210021760 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/common/agents/ovs_agent.py0000775000567000056710000000335313044372760024337 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import sys from oslo_utils import encodeutils from neutron.cmd.eventlet.plugins.ovs_neutron_agent import main as _main from neutron.common import constants as n_const from neutron.plugins.ml2.drivers.openvswitch.agent.ovs_neutron_agent \ import OVSNeutronAgent def get_tunnel_name_full(cls, network_type, local_ip, remote_ip): network_type = network_type[:3] # Remove length of network_type and two dashes hashlen = (n_const.DEVICE_NAME_MAX_LEN - len(network_type) - 2) // 2 remote_tunnel_hash = cls.get_tunnel_hash(remote_ip, hashlen) if not remote_tunnel_hash: return None remote_tunnel_hash = encodeutils.to_utf8(remote_tunnel_hash) remote_ip_hash = hashlib.sha1(remote_tunnel_hash).hexdigest()[:hashlen] local_tunnel_hash = cls.get_tunnel_hash(local_ip, hashlen) local_tunnel_hash = encodeutils.to_utf8(local_tunnel_hash) source_ip_hash = hashlib.sha1(local_tunnel_hash).hexdigest()[:hashlen] return '%s-%s-%s' % (network_type, source_ip_hash, remote_ip_hash) OVSNeutronAgent.get_tunnel_name = get_tunnel_name_full def main(): _main() if __name__ == "__main__": sys.exit(main()) neutron-8.4.0/neutron/tests/common/agents/__init__.py0000664000567000056710000000000013044372736024073 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/common/agents/l2_extensions.py0000664000567000056710000000205213044372760025136 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.linux import utils as agent_utils def wait_until_bandwidth_limit_rule_applied(bridge, port_vif, rule): def _bandwidth_limit_rule_applied(): bw_rule = bridge.get_egress_bw_limit_for_port(port_vif) expected = None, None if rule: expected = rule.max_kbps, rule.max_burst_kbps return bw_rule == expected agent_utils.wait_until_true(_bandwidth_limit_rule_applied) neutron-8.4.0/neutron/tests/common/agents/l3_agent.py0000775000567000056710000000504613044372736024052 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import mock from oslo_config import cfg from neutron._i18n import _ from neutron.agent.l3 import agent from neutron.agent.l3 import namespaces from neutron.agent import l3_agent class L3NATAgentForTest(agent.L3NATAgentWithStateReport): def __init__(self, host, conf=None): ns_suffix = '@%s' % cfg.CONF.test_namespace_suffix # Mock out building of namespace names orig_build_ns_name = namespaces.build_ns_name def build_ns_name(prefix, identifier): return "%s%s" % (orig_build_ns_name(prefix, identifier), ns_suffix) build_ns = mock.patch.object(namespaces, 'build_ns_name').start() build_ns.side_effect = build_ns_name # Mock the parsing prefix from namespace names orig_get_prefix = namespaces.get_prefix_from_ns_name def get_prefix_from_ns_name(ns_name): if ns_name.endswith(ns_suffix): return orig_get_prefix(ns_name[:-len(ns_suffix)]) parse_prefix = mock.patch.object(namespaces, 'get_prefix_from_ns_name').start() parse_prefix.side_effect = get_prefix_from_ns_name # Mock the parsing id from namespace names orig_get_id = namespaces.get_id_from_ns_name def get_id_from_ns_name(ns_name): if ns_name.endswith(ns_suffix): return orig_get_id(ns_name[:-len(ns_suffix)]) parse_id = mock.patch.object(namespaces, 'get_id_from_ns_name').start() parse_id.side_effect = get_id_from_ns_name super(L3NATAgentForTest, self).__init__(host, conf) OPTS = [ cfg.StrOpt('test_namespace_suffix', default='testprefix', help=_("Suffix to append to all namespace names.")), ] def register_opts(conf): conf.register_opts(OPTS) def main(manager='neutron.tests.common.agents.l3_agent.L3NATAgentForTest'): register_opts(cfg.CONF) l3_agent.main(manager=manager) if __name__ == "__main__": sys.exit(main()) neutron-8.4.0/neutron/tests/etc/0000775000567000056710000000000013044373210017762 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/etc/api-paste.ini.test0000664000567000056710000000044213044372736023340 0ustar jenkinsjenkins00000000000000[pipeline:extensions_app_with_filter] pipeline = extensions extensions_test_app [filter:extensions] paste.filter_factory = neutron.common.extensions:plugin_aware_extension_middleware_factory [app:extensions_test_app] paste.app_factory = neutron.tests.unit.api.test_extensions:app_factory neutron-8.4.0/neutron/tests/etc/neutron_test2.conf.example0000664000567000056710000000005113044372736025106 0ustar jenkinsjenkins00000000000000[service_providers] service_provider=zzz neutron-8.4.0/neutron/tests/etc/neutron_test.conf0000664000567000056710000000007613044372736023401 0ustar jenkinsjenkins00000000000000[service_providers] service_provider=foo service_provider=bar neutron-8.4.0/neutron/tests/etc/policy.json0000664000567000056710000002503313044372760022170 0ustar jenkinsjenkins00000000000000{ "context_is_admin": "role:admin", "owner": "tenant_id:%(tenant_id)s", "admin_or_owner": "rule:context_is_admin or rule:owner", "context_is_advsvc": "role:advsvc", "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s", "admin_owner_or_network_owner": "rule:owner or rule:admin_or_network_owner", "admin_only": "rule:context_is_admin", "regular_user": "", "shared": "field:networks:shared=True", "shared_firewalls": "field:firewalls:shared=True", "shared_firewall_policies": "field:firewall_policies:shared=True", "shared_subnetpools": "field:subnetpools:shared=True", "shared_address_scopes": "field:address_scopes:shared=True", "external": "field:networks:router:external=True", "default": "rule:admin_or_owner", "create_subnet": "rule:admin_or_network_owner", "get_subnet": "rule:admin_or_owner or rule:shared", "update_subnet": "rule:admin_or_network_owner", "delete_subnet": "rule:admin_or_network_owner", "create_subnetpool": "", "create_subnetpool:shared": "rule:admin_only", "create_subnetpool:is_default": "rule:admin_only", "get_subnetpool": "rule:admin_or_owner or rule:shared_subnetpools", "update_subnetpool": "rule:admin_or_owner", "update_subnetpool:is_default": "rule:admin_only", "delete_subnetpool": "rule:admin_or_owner", "create_address_scope": "", "create_address_scope:shared": "rule:admin_only", "get_address_scope": "rule:admin_or_owner or rule:shared_address_scopes", "update_address_scope": "rule:admin_or_owner", "update_address_scope:shared": "rule:admin_only", "delete_address_scope": "rule:admin_or_owner", "create_network": "", "get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc", "get_network:router:external": "rule:regular_user", "get_network:segments": "rule:admin_only", "get_network:provider:network_type": "rule:admin_only", "get_network:provider:physical_network": "rule:admin_only", "get_network:provider:segmentation_id": "rule:admin_only", "get_network:queue_id": "rule:admin_only", "get_network_ip_availabilities": "rule:admin_only", "get_network_ip_availability": "rule:admin_only", "create_network:shared": "rule:admin_only", "create_network:router:external": "rule:admin_only", "create_network:is_default": "rule:admin_only", "create_network:segments": "rule:admin_only", "create_network:provider:network_type": "rule:admin_only", "create_network:provider:physical_network": "rule:admin_only", "create_network:provider:segmentation_id": "rule:admin_only", "update_network": "rule:admin_or_owner", "update_network:segments": "rule:admin_only", "update_network:shared": "rule:admin_only", "update_network:provider:network_type": "rule:admin_only", "update_network:provider:physical_network": "rule:admin_only", "update_network:provider:segmentation_id": "rule:admin_only", "update_network:router:external": "rule:admin_only", "delete_network": "rule:admin_or_owner", "network_device": "field:port:device_owner=~^network:", "create_port": "", "create_port:device_owner": "not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner", "create_port:mac_address": "rule:context_is_advsvc or rule:admin_or_network_owner", "create_port:fixed_ips": "rule:context_is_advsvc or rule:admin_or_network_owner", "create_port:port_security_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner", "create_port:binding:host_id": "rule:admin_only", "create_port:binding:profile": "rule:admin_only", "create_port:mac_learning_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner", "create_port:allowed_address_pairs": "rule:admin_or_network_owner", "get_port": "rule:context_is_advsvc or rule:admin_owner_or_network_owner", "get_port:queue_id": "rule:admin_only", "get_port:binding:vif_type": "rule:admin_only", "get_port:binding:vif_details": "rule:admin_only", "get_port:binding:host_id": "rule:admin_only", "get_port:binding:profile": "rule:admin_only", "update_port": "rule:admin_or_owner or rule:context_is_advsvc", "update_port:device_owner": "not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner", "update_port:mac_address": "rule:admin_only or rule:context_is_advsvc", "update_port:fixed_ips": "rule:context_is_advsvc or rule:admin_or_network_owner", "update_port:port_security_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner", "update_port:binding:host_id": "rule:admin_only", "update_port:binding:profile": "rule:admin_only", "update_port:mac_learning_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner", "update_port:allowed_address_pairs": "rule:admin_or_network_owner", "delete_port": "rule:context_is_advsvc or rule:admin_owner_or_network_owner", "get_router:ha": "rule:admin_only", "create_router": "rule:regular_user", "create_router:external_gateway_info:enable_snat": "rule:admin_only", "create_router:distributed": "rule:admin_only", "create_router:ha": "rule:admin_only", "get_router": "rule:admin_or_owner", "get_router:distributed": "rule:admin_only", "update_router:external_gateway_info:enable_snat": "rule:admin_only", "update_router:distributed": "rule:admin_only", "update_router:ha": "rule:admin_only", "delete_router": "rule:admin_or_owner", "add_router_interface": "rule:admin_or_owner", "remove_router_interface": "rule:admin_or_owner", "create_router:external_gateway_info:external_fixed_ips": "rule:admin_only", "update_router:external_gateway_info:external_fixed_ips": "rule:admin_only", "create_firewall": "", "get_firewall": "rule:admin_or_owner", "create_firewall:shared": "rule:admin_only", "get_firewall:shared": "rule:admin_only", "update_firewall": "rule:admin_or_owner", "update_firewall:shared": "rule:admin_only", "delete_firewall": "rule:admin_or_owner", "create_firewall_policy": "", "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewall_policies", "create_firewall_policy:shared": "rule:admin_or_owner", "update_firewall_policy": "rule:admin_or_owner", "delete_firewall_policy": "rule:admin_or_owner", "insert_rule": "rule:admin_or_owner", "remove_rule": "rule:admin_or_owner", "create_firewall_rule": "", "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls", "update_firewall_rule": "rule:admin_or_owner", "delete_firewall_rule": "rule:admin_or_owner", "create_qos_queue": "rule:admin_only", "get_qos_queue": "rule:admin_only", "update_agent": "rule:admin_only", "delete_agent": "rule:admin_only", "get_agent": "rule:admin_only", "create_dhcp-network": "rule:admin_only", "delete_dhcp-network": "rule:admin_only", "get_dhcp-networks": "rule:admin_only", "create_l3-router": "rule:admin_only", "delete_l3-router": "rule:admin_only", "get_l3-routers": "rule:admin_only", "get_dhcp-agents": "rule:admin_only", "get_l3-agents": "rule:admin_only", "get_loadbalancer-agent": "rule:admin_only", "get_loadbalancer-pools": "rule:admin_only", "get_agent-loadbalancers": "rule:admin_only", "get_loadbalancer-hosting-agent": "rule:admin_only", "create_floatingip": "rule:regular_user", "create_floatingip:floating_ip_address": "rule:admin_only", "update_floatingip": "rule:admin_or_owner", "delete_floatingip": "rule:admin_or_owner", "get_floatingip": "rule:admin_or_owner", "create_network_profile": "rule:admin_only", "update_network_profile": "rule:admin_only", "delete_network_profile": "rule:admin_only", "get_network_profiles": "", "get_network_profile": "", "update_policy_profiles": "rule:admin_only", "get_policy_profiles": "", "get_policy_profile": "", "create_metering_label": "rule:admin_only", "delete_metering_label": "rule:admin_only", "get_metering_label": "rule:admin_only", "create_metering_label_rule": "rule:admin_only", "delete_metering_label_rule": "rule:admin_only", "get_metering_label_rule": "rule:admin_only", "get_service_provider": "rule:regular_user", "get_lsn": "rule:admin_only", "create_lsn": "rule:admin_only", "create_flavor": "rule:admin_only", "update_flavor": "rule:admin_only", "delete_flavor": "rule:admin_only", "get_flavors": "rule:regular_user", "get_flavor": "rule:regular_user", "create_service_profile": "rule:admin_only", "update_service_profile": "rule:admin_only", "delete_service_profile": "rule:admin_only", "get_service_profiles": "rule:admin_only", "get_service_profile": "rule:admin_only", "get_policy": "rule:regular_user", "create_policy": "rule:admin_only", "update_policy": "rule:admin_only", "delete_policy": "rule:admin_only", "get_policy_bandwidth_limit_rule": "rule:regular_user", "create_policy_bandwidth_limit_rule": "rule:admin_only", "delete_policy_bandwidth_limit_rule": "rule:admin_only", "update_policy_bandwidth_limit_rule": "rule:admin_only", "get_rule_type": "rule:regular_user", "restrict_wildcard": "(not field:rbac_policy:target_tenant=*) or rule:admin_only", "create_rbac_policy": "", "create_rbac_policy:target_tenant": "rule:restrict_wildcard", "update_rbac_policy": "rule:admin_or_owner", "update_rbac_policy:target_tenant": "rule:restrict_wildcard and rule:admin_or_owner", "get_rbac_policy": "rule:admin_or_owner", "delete_rbac_policy": "rule:admin_or_owner", "create_flavor_service_profile": "rule:admin_only", "delete_flavor_service_profile": "rule:admin_only", "get_flavor_service_profile": "rule:regular_user", "get_auto_allocated_topology": "rule:admin_or_owner", "get_bgp_speaker": "rule:admin_only", "create_bgp_speaker": "rule:admin_only", "update_bgp_speaker": "rule:admin_only", "delete_bgp_speaker": "rule:admin_only", "get_bgp_peer": "rule:admin_only", "create_bgp_peer": "rule:admin_only", "update_bgp_peer": "rule:admin_only", "delete_bgp_peer": "rule:admin_only", "add_bgp_peer": "rule:admin_only", "remove_bgp_peer": "rule:admin_only", "add_gateway_network": "rule:admin_only", "remove_gateway_network": "rule:admin_only", "get_advertised_routes":"rule:admin_only", "add_bgp_speaker_to_dragent": "rule:admin_only", "remove_bgp_speaker_from_dragent": "rule:admin_only", "list_bgp_speaker_on_dragent": "rule:admin_only", "list_dragent_hosting_bgp_speaker": "rule:admin_only" } neutron-8.4.0/neutron/tests/etc/neutron.conf0000664000567000056710000000104613044372760022335 0ustar jenkinsjenkins00000000000000[DEFAULT] # Show more verbose log output (sets INFO log level output) verbose = True # Show debugging output in logs (sets DEBUG log level output) debug = False # Address to bind the API server bind_host = 0.0.0.0 # Port the bind the API server to bind_port = 9696 # Path to the extensions api_extensions_path = neutron/tests/unit/extensions # Paste configuration file api_paste_config = api-paste.ini.test # The messaging module to use, defaults to kombu. rpc_backend = fake lock_path = $state_path/lock [database] connection = 'sqlite://' neutron-8.4.0/neutron/tests/unit/0000775000567000056710000000000013044373210020166 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/test_service.py0000664000567000056710000000160713044372760023254 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron import service from neutron.tests.unit import test_wsgi class TestRpcWorker(test_wsgi.TestServiceBase): def test_reset(self): _plugin = mock.Mock() rpc_worker = service.RpcWorker(_plugin) self._test_reset(rpc_worker) neutron-8.4.0/neutron/tests/unit/plugins/0000775000567000056710000000000013044373210021647 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/__init__.py0000664000567000056710000000000013044372736023762 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/common/0000775000567000056710000000000013044373210023137 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/common/__init__.py0000664000567000056710000000000013044372736025252 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/common/test_utils.py0000664000567000056710000000557613044372760025736 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import mock from neutron.common import constants from neutron.plugins.common import utils from neutron.tests import base LONG_NAME1 = "A_REALLY_LONG_INTERFACE_NAME1" LONG_NAME2 = "A_REALLY_LONG_INTERFACE_NAME2" SHORT_NAME = "SHORT" MOCKED_HASH = "mockedhash" class MockSHA(object): def hexdigest(self): return MOCKED_HASH class TestUtils(base.BaseTestCase): @mock.patch.object(hashlib, 'sha1', return_value=MockSHA()) def test_get_interface_name(self, mock_sha1): prefix = "pre-" prefix_long = "long_prefix" prefix_exceeds_max_dev_len = "much_too_long_prefix" hash_used = MOCKED_HASH[0:6] self.assertEqual("A_REALLY_" + hash_used, utils.get_interface_name(LONG_NAME1)) self.assertEqual("SHORT", utils.get_interface_name(SHORT_NAME)) self.assertEqual("pre-A_REA" + hash_used, utils.get_interface_name(LONG_NAME1, prefix=prefix)) self.assertEqual("pre-SHORT", utils.get_interface_name(SHORT_NAME, prefix=prefix)) # len(prefix) > max_device_len - len(hash_used) self.assertRaises(ValueError, utils.get_interface_name, SHORT_NAME, prefix_long) # len(prefix) > max_device_len self.assertRaises(ValueError, utils.get_interface_name, SHORT_NAME, prefix=prefix_exceeds_max_dev_len) def test_get_interface_uniqueness(self): prefix = "prefix-" if_prefix1 = utils.get_interface_name(LONG_NAME1, prefix=prefix) if_prefix2 = utils.get_interface_name(LONG_NAME2, prefix=prefix) self.assertNotEqual(if_prefix1, if_prefix2) @mock.patch.object(hashlib, 'sha1', return_value=MockSHA()) def test_get_interface_max_len(self, mock_sha1): self.assertEqual(constants.DEVICE_NAME_MAX_LEN, len(utils.get_interface_name(LONG_NAME1))) self.assertEqual(10, len(utils.get_interface_name(LONG_NAME1, max_len=10))) self.assertEqual(12, len(utils.get_interface_name(LONG_NAME1, prefix="pre-", max_len=12))) neutron-8.4.0/neutron/tests/unit/plugins/ml2/0000775000567000056710000000000013044373210022341 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/test_managers.py0000664000567000056710000000436613044372760025571 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_db import exception as db_exc from neutron.plugins.ml2.common import exceptions as ml2_exc from neutron.plugins.ml2 import config as config from neutron.plugins.ml2 import managers from neutron.tests import base from neutron.tests.unit.plugins.ml2.drivers import mechanism_test class TestMechManager(base.BaseTestCase): def setUp(self): config.cfg.CONF.set_override('mechanism_drivers', ['test'], group='ml2') super(TestMechManager, self).setUp() self._manager = managers.MechanismManager() def _check_precommit(self, resource, operation): meth_name = "%s_%s_precommit" % (operation, resource) method = getattr(self._manager, meth_name) fake_ctxt = mock.Mock() fake_ctxt.current = {} with mock.patch.object(mechanism_test.TestMechanismDriver, meth_name, side_effect=db_exc.DBDeadlock()): self.assertRaises(db_exc.DBDeadlock, method, fake_ctxt) with mock.patch.object(mechanism_test.TestMechanismDriver, meth_name, side_effect=RuntimeError()): self.assertRaises(ml2_exc.MechanismDriverError, method, fake_ctxt) def _check_resource(self, resource): self._check_precommit(resource, 'create') self._check_precommit(resource, 'update') self._check_precommit(resource, 'delete') def test_network_precommit(self): self._check_resource('network') def test_subnet_precommit(self): self._check_resource('subnet') def test_port_precommit(self): self._check_resource('port') neutron-8.4.0/neutron/tests/unit/plugins/ml2/test_driver_context.py0000664000567000056710000000761313044372760027031 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.common import constants from neutron.extensions import portbindings from neutron.plugins.ml2 import driver_context from neutron.tests import base class TestPortContext(base.BaseTestCase): # REVISIT(rkukura): These was originally for DvrPortContext tests, # but DvrPortContext functionality has been folded into the # regular PortContext class. Tests for non-DVR-specific # functionality are needed here as well. def test_host(self): plugin = mock.Mock() plugin_context = mock.Mock() network = mock.MagicMock() binding = mock.Mock() port = {'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE} binding.host = 'foohost' with mock.patch.object(driver_context.db, 'get_network_segments'): ctx = driver_context.PortContext(plugin, plugin_context, port, network, binding, None) self.assertEqual('foohost', ctx.host) def test_host_super(self): plugin = mock.Mock() plugin_context = mock.Mock() network = mock.MagicMock() binding = mock.Mock() port = {'device_owner': 'compute', portbindings.HOST_ID: 'host'} binding.host = 'foohost' with mock.patch.object(driver_context.db, 'get_network_segments'): ctx = driver_context.PortContext(plugin, plugin_context, port, network, binding, None) self.assertEqual('host', ctx.host) def test_status(self): plugin = mock.Mock() plugin_context = mock.Mock() network = mock.MagicMock() binding = mock.Mock() port = {'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE} binding.status = 'foostatus' with mock.patch.object(driver_context.db, 'get_network_segments'): ctx = driver_context.PortContext(plugin, plugin_context, port, network, binding, None) self.assertEqual('foostatus', ctx.status) def test_status_super(self): plugin = mock.Mock() plugin_context = mock.Mock() network = mock.MagicMock() binding = mock.Mock() port = {'device_owner': 'compute', 'status': 'status'} binding.status = 'foostatus' with mock.patch.object(driver_context.db, 'get_network_segments'): ctx = driver_context.PortContext(plugin, plugin_context, port, network, binding, None) self.assertEqual('status', ctx.status) neutron-8.4.0/neutron/tests/unit/plugins/ml2/_test_mech_agent.py0000664000567000056710000002275713044372760026231 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.extensions import portbindings from neutron.plugins.ml2 import driver_api as api from neutron.tests import base NETWORK_ID = "fake_network" PORT_ID = "fake_port" class FakeNetworkContext(api.NetworkContext): def __init__(self, segments): self._network_segments = segments @property def current(self): return {'id': NETWORK_ID} @property def original(self): return None @property def network_segments(self): return self._network_segments class FakePortContext(api.PortContext): def __init__(self, agent_type, agents, segments, vnic_type=portbindings.VNIC_NORMAL): self._agent_type = agent_type self._agents = agents self._network_context = FakeNetworkContext(segments) self._bound_vnic_type = vnic_type self._bound_segment_id = None self._bound_vif_type = None self._bound_vif_details = None @property def current(self): return {'id': PORT_ID, portbindings.VNIC_TYPE: self._bound_vnic_type} @property def original(self): return None @property def status(self): return 'DOWN' @property def original_status(self): return None @property def network(self): return self._network_context @property def binding_levels(self): if self._bound_segment: return [{ api.BOUND_DRIVER: 'fake_driver', api.BOUND_SEGMENT: self._expand_segment(self._bound_segment) }] @property def original_binding_levels(self): return None @property def top_bound_segment(self): return self._expand_segment(self._bound_segment) @property def original_top_bound_segment(self): return None @property def bottom_bound_segment(self): return self._expand_segment(self._bound_segment) @property def original_bottom_bound_segment(self): return None def _expand_segment(self, segment_id): for segment in self._network_context.network_segments: if segment[api.ID] == self._bound_segment_id: return segment @property def host(self): return '' @property def original_host(self): return None @property def vif_type(self): return portbindings.UNBOUND @property def original_vif_type(self): return portbindings.UNBOUND @property def vif_details(self): return None @property def original_vif_details(self): return None @property def segments_to_bind(self): return self._network_context.network_segments def host_agents(self, agent_type): if agent_type == self._agent_type: return self._agents else: return [] def set_binding(self, segment_id, vif_type, vif_details): self._bound_segment_id = segment_id self._bound_vif_type = vif_type self._bound_vif_details = vif_details def continue_binding(self, segment_id, next_segments_to_bind): pass def allocate_dynamic_segment(self, segment): pass def release_dynamic_segment(self, segment_id): pass class AgentMechanismBaseTestCase(base.BaseTestCase): # The following must be overridden for the specific mechanism # driver being tested: VIF_TYPE = None VIF_DETAILS = None AGENT_TYPE = None AGENTS = None AGENTS_DEAD = None AGENTS_BAD = None VNIC_TYPE = portbindings.VNIC_NORMAL def _check_unbound(self, context): self.assertIsNone(context._bound_segment_id) self.assertIsNone(context._bound_vif_type) self.assertIsNone(context._bound_vif_details) def _check_bound(self, context, segment): self.assertEqual(context._bound_segment_id, segment[api.ID]) self.assertEqual(context._bound_vif_type, self.VIF_TYPE) vif_details = context._bound_vif_details self.assertIsNotNone(vif_details) # NOTE(r-mibu): The following five lines are just for backward # compatibility. In this class, HAS_PORT_FILTER has been replaced # by VIF_DETAILS which can be set expected vif_details to check, # but all replacement of HAS_PORT_FILTER in successor has not been # completed. if self.VIF_DETAILS is None: expected = getattr(self, 'CAP_PORT_FILTER', None) port_filter = vif_details[portbindings.CAP_PORT_FILTER] self.assertEqual(expected, port_filter) return self.assertEqual(self.VIF_DETAILS, vif_details) class AgentMechanismGenericTestCase(AgentMechanismBaseTestCase): UNKNOWN_TYPE_SEGMENTS = [{api.ID: 'unknown_segment_id', api.NETWORK_TYPE: 'no_such_type'}] def test_unknown_type(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS, self.UNKNOWN_TYPE_SEGMENTS, vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) self._check_unbound(context) class AgentMechanismLocalTestCase(AgentMechanismBaseTestCase): LOCAL_SEGMENTS = [{api.ID: 'unknown_segment_id', api.NETWORK_TYPE: 'no_such_type'}, {api.ID: 'local_segment_id', api.NETWORK_TYPE: 'local'}] def test_type_local(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS, self.LOCAL_SEGMENTS, vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) self._check_bound(context, self.LOCAL_SEGMENTS[1]) def test_type_local_dead(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS_DEAD, self.LOCAL_SEGMENTS, vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) self._check_unbound(context) class AgentMechanismFlatTestCase(AgentMechanismBaseTestCase): FLAT_SEGMENTS = [{api.ID: 'unknown_segment_id', api.NETWORK_TYPE: 'no_such_type'}, {api.ID: 'flat_segment_id', api.NETWORK_TYPE: 'flat', api.PHYSICAL_NETWORK: 'fake_physical_network'}] def test_type_flat(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS, self.FLAT_SEGMENTS, vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) self._check_bound(context, self.FLAT_SEGMENTS[1]) def test_type_flat_bad(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS_BAD, self.FLAT_SEGMENTS, vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) self._check_unbound(context) class AgentMechanismVlanTestCase(AgentMechanismBaseTestCase): VLAN_SEGMENTS = [{api.ID: 'unknown_segment_id', api.NETWORK_TYPE: 'no_such_type'}, {api.ID: 'vlan_segment_id', api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'fake_physical_network', api.SEGMENTATION_ID: 1234}] def test_type_vlan(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS, self.VLAN_SEGMENTS, vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) self._check_bound(context, self.VLAN_SEGMENTS[1]) def test_type_vlan_bad(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS_BAD, self.VLAN_SEGMENTS, vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) self._check_unbound(context) class AgentMechanismGreTestCase(AgentMechanismBaseTestCase): GRE_SEGMENTS = [{api.ID: 'unknown_segment_id', api.NETWORK_TYPE: 'no_such_type'}, {api.ID: 'gre_segment_id', api.NETWORK_TYPE: 'gre', api.SEGMENTATION_ID: 1234}] def test_type_gre(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS, self.GRE_SEGMENTS) self.driver.bind_port(context) self._check_bound(context, self.GRE_SEGMENTS[1]) def test_type_gre_bad(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS_BAD, self.GRE_SEGMENTS) self.driver.bind_port(context) self._check_unbound(context) neutron-8.4.0/neutron/tests/unit/plugins/ml2/__init__.py0000664000567000056710000000000013044372736024454 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/test_tracked_resources.py0000664000567000056710000003604113044372736027501 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import uuidutils from neutron import context from neutron.db.quota import api as quota_db_api from neutron.tests.unit.api import test_extensions from neutron.tests.unit.extensions import test_l3 from neutron.tests.unit.extensions import test_securitygroup from neutron.tests.unit.plugins.ml2 import base as ml2_base from neutron.tests.unit.plugins.ml2 import test_plugin class SgTestCaseWrapper(test_securitygroup.SecurityGroupDBTestCase): # This wrapper class enables Ml2PluginV2TestCase to correctly call the # setup method in SecurityGroupDBTestCase which does not accept the # service_plugins keyword parameter. def setUp(self, plugin, **kwargs): super(SgTestCaseWrapper, self).setUp(plugin) class BaseTestTrackedResources(test_plugin.Ml2PluginV2TestCase, SgTestCaseWrapper): def setUp(self): self.ctx = context.get_admin_context() # Prevent noise from default security group operations def_sec_group_patch = mock.patch( 'neutron.db.securitygroups_db.SecurityGroupDbMixin.' '_ensure_default_security_group') def_sec_group_patch.start() get_sec_group_port_patch = mock.patch( 'neutron.db.securitygroups_db.SecurityGroupDbMixin.' '_get_security_groups_on_port') get_sec_group_port_patch.start() super(BaseTestTrackedResources, self).setUp() self._tenant_id = uuidutils.generate_uuid() def _test_init(self, resource_name): quota_db_api.set_quota_usage( self.ctx, resource_name, self._tenant_id) class BaseTestEventHandler(object): def setUp(self): handler_patch = mock.patch( 'neutron.quota.resource.TrackedResource._db_event_handler') self.handler_mock = handler_patch.start() super(BaseTestEventHandler, self).setUp() def _verify_event_handler_calls(self, data, expected_call_count=1): if not hasattr(data, '__iter__') or isinstance(data, dict): data = [data] self.assertEqual(expected_call_count, self.handler_mock.call_count) call_idx = -1 for item in data: if item: model = self.handler_mock.call_args_list[call_idx][0][-1] self.assertEqual(model['id'], item['id']) self.assertEqual(model['tenant_id'], item['tenant_id']) call_idx = call_idx - 1 class TestTrackedResourcesEventHandler(BaseTestEventHandler, BaseTestTrackedResources): def test_create_delete_network_triggers_event(self): self._test_init('network') net = self._make_network('json', 'meh', True)['network'] self._verify_event_handler_calls(net) self._delete('networks', net['id']) self._verify_event_handler_calls(net, expected_call_count=2) def test_create_delete_port_triggers_event(self): self._test_init('port') net = self._make_network('json', 'meh', True)['network'] port = self._make_port('json', net['id'])['port'] # Expecting 2 calls - 1 for the network, 1 for the port self._verify_event_handler_calls(port, expected_call_count=2) self._delete('ports', port['id']) self._verify_event_handler_calls(port, expected_call_count=3) def test_create_delete_subnet_triggers_event(self): self._test_init('subnet') net = self._make_network('json', 'meh', True) subnet = self._make_subnet('json', net, '10.0.0.1', '10.0.0.0/24')['subnet'] # Expecting 2 calls - 1 for the network, 1 for the subnet self._verify_event_handler_calls([subnet, net['network']], expected_call_count=2) self._delete('subnets', subnet['id']) self._verify_event_handler_calls(subnet, expected_call_count=3) def test_create_delete_network_with_subnet_triggers_event(self): self._test_init('network') self._test_init('subnet') net = self._make_network('json', 'meh', True) subnet = self._make_subnet('json', net, '10.0.0.1', '10.0.0.0/24')['subnet'] # Expecting 2 calls - 1 for the network, 1 for the subnet self._verify_event_handler_calls([subnet, net['network']], expected_call_count=2) self._delete('networks', net['network']['id']) # Expecting 2 more calls - 1 for the network, 1 for the subnet self._verify_event_handler_calls([net['network'], subnet], expected_call_count=4) def test_create_delete_subnetpool_triggers_event(self): self._test_init('subnetpool') pool = self._make_subnetpool('json', ['10.0.0.0/8'], name='meh', tenant_id=self._tenant_id)['subnetpool'] self._verify_event_handler_calls(pool) self._delete('subnetpools', pool['id']) self._verify_event_handler_calls(pool, expected_call_count=2) def test_create_delete_securitygroup_triggers_event(self): self._test_init('security_group') sec_group = self._make_security_group( 'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group'] # When a security group is created it also creates 2 rules, therefore # there will be three calls and we need to verify the first self._verify_event_handler_calls([None, None, sec_group], expected_call_count=3) self._delete('security-groups', sec_group['id']) # When a security group is deleted it also removes the 2 rules # generated upon creation self._verify_event_handler_calls(sec_group, expected_call_count=6) def test_create_delete_securitygrouprule_triggers_event(self): self._test_init('security_group_rule') sec_group = self._make_security_group( 'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group'] rule_req = self._build_security_group_rule( sec_group['id'], 'ingress', 'TCP', tenant_id=self._tenant_id) sec_group_rule = self._make_security_group_rule( 'json', rule_req)['security_group_rule'] # When a security group is created it also creates 2 rules, therefore # there will be four calls in total to the event handler self._verify_event_handler_calls(sec_group_rule, expected_call_count=4) self._delete('security-group-rules', sec_group_rule['id']) self._verify_event_handler_calls(sec_group_rule, expected_call_count=5) class TestL3ResourcesEventHandler(BaseTestEventHandler, ml2_base.ML2TestFramework, test_l3.L3NatTestCaseMixin): def setUp(self): super(TestL3ResourcesEventHandler, self).setUp() ext_mgr = test_l3.L3TestExtensionManager() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) def test_create_delete_floating_ip_triggers_event(self): net = self._make_network('json', 'meh', True) subnet = self._make_subnet('json', net, '14.0.0.1', '14.0.0.0/24')['subnet'] self._set_net_external(subnet['network_id']) floatingip = self._make_floatingip('json', subnet['network_id']) internal_port = self._show( 'ports', floatingip['floatingip']['port_id'])['ports'][0] # When a floatingip is created it also creates port, therefore # there will be four calls in total to the event handler self._verify_event_handler_calls(floatingip['floatingip'], expected_call_count=4) self._delete('floatingips', floatingip['floatingip']['id']) # Expecting 2 more calls - 1 for the port, 1 for the floatingip self._verify_event_handler_calls( [internal_port, floatingip['floatingip']], expected_call_count=6) class TestTrackedResources(BaseTestTrackedResources): def _verify_dirty_bit(self, resource_name, expected_value=True): usage = quota_db_api.get_quota_usage_by_resource_and_tenant( self.ctx, resource_name, self._tenant_id) self.assertEqual(expected_value, usage.dirty) def test_create_delete_network_marks_dirty(self): self._test_init('network') net = self._make_network('json', 'meh', True)['network'] self._verify_dirty_bit('network') # Clear the dirty bit quota_db_api.set_quota_usage_dirty( self.ctx, 'network', self._tenant_id, dirty=False) self._delete('networks', net['id']) self._verify_dirty_bit('network') def test_list_networks_clears_dirty(self): self._test_init('network') net = self._make_network('json', 'meh', True)['network'] self.ctx.tenant_id = net['tenant_id'] self._list('networks', neutron_context=self.ctx) self._verify_dirty_bit('network', expected_value=False) def test_create_delete_port_marks_dirty(self): self._test_init('port') net = self._make_network('json', 'meh', True)['network'] port = self._make_port('json', net['id'])['port'] self._verify_dirty_bit('port') # Clear the dirty bit quota_db_api.set_quota_usage_dirty( self.ctx, 'port', self._tenant_id, dirty=False) self._delete('ports', port['id']) self._verify_dirty_bit('port') def test_list_ports_clears_dirty(self): self._test_init('port') net = self._make_network('json', 'meh', True)['network'] port = self._make_port('json', net['id'])['port'] self.ctx.tenant_id = port['tenant_id'] self._list('ports', neutron_context=self.ctx) self._verify_dirty_bit('port', expected_value=False) def test_create_delete_subnet_marks_dirty(self): self._test_init('subnet') net = self._make_network('json', 'meh', True) subnet = self._make_subnet('json', net, '10.0.0.1', '10.0.0.0/24')['subnet'] self._verify_dirty_bit('subnet') # Clear the dirty bit quota_db_api.set_quota_usage_dirty( self.ctx, 'subnet', self._tenant_id, dirty=False) self._delete('subnets', subnet['id']) self._verify_dirty_bit('subnet') def test_create_delete_network_with_subnet_marks_dirty(self): self._test_init('network') self._test_init('subnet') net = self._make_network('json', 'meh', True) self._make_subnet('json', net, '10.0.0.1', '10.0.0.0/24')['subnet'] self._verify_dirty_bit('subnet') # Clear the dirty bit quota_db_api.set_quota_usage_dirty( self.ctx, 'subnet', self._tenant_id, dirty=False) self._delete('networks', net['network']['id']) self._verify_dirty_bit('network') self._verify_dirty_bit('subnet') def test_list_subnets_clears_dirty(self): self._test_init('subnet') net = self._make_network('json', 'meh', True) subnet = self._make_subnet('json', net, '10.0.0.1', '10.0.0.0/24')['subnet'] self.ctx.tenant_id = subnet['tenant_id'] self._list('subnets', neutron_context=self.ctx) self._verify_dirty_bit('subnet', expected_value=False) def test_create_delete_subnetpool_marks_dirty(self): self._test_init('subnetpool') pool = self._make_subnetpool('json', ['10.0.0.0/8'], name='meh', tenant_id=self._tenant_id)['subnetpool'] self._verify_dirty_bit('subnetpool') # Clear the dirty bit quota_db_api.set_quota_usage_dirty( self.ctx, 'subnetpool', self._tenant_id, dirty=False) self._delete('subnetpools', pool['id']) self._verify_dirty_bit('subnetpool') def test_list_subnetpools_clears_dirty(self): self._test_init('subnetpool') pool = self._make_subnetpool('json', ['10.0.0.0/8'], name='meh', tenant_id=self._tenant_id)['subnetpool'] self.ctx.tenant_id = pool['tenant_id'] self._list('subnetpools', neutron_context=self.ctx) self._verify_dirty_bit('subnetpool', expected_value=False) def test_create_delete_securitygroup_marks_dirty(self): self._test_init('security_group') sec_group = self._make_security_group( 'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group'] self._verify_dirty_bit('security_group') # Clear the dirty bit quota_db_api.set_quota_usage_dirty( self.ctx, 'security_group', self._tenant_id, dirty=False) self._delete('security-groups', sec_group['id']) self._verify_dirty_bit('security_group') def test_list_securitygroups_clears_dirty(self): self._test_init('security_group') self._make_security_group( 'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group'] self.ctx.tenant_id = self._tenant_id self._list('security-groups', neutron_context=self.ctx) self._verify_dirty_bit('security_group', expected_value=False) def test_create_delete_securitygrouprule_marks_dirty(self): self._test_init('security_group_rule') sec_group = self._make_security_group( 'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group'] rule_req = self._build_security_group_rule( sec_group['id'], 'ingress', 'TCP', tenant_id=self._tenant_id) sec_group_rule = self._make_security_group_rule( 'json', rule_req)['security_group_rule'] self._verify_dirty_bit('security_group_rule') # Clear the dirty bit quota_db_api.set_quota_usage_dirty( self.ctx, 'security_group_rule', self._tenant_id, dirty=False) self._delete('security-group-rules', sec_group_rule['id']) self._verify_dirty_bit('security_group_rule') def test_list_securitygrouprules_clears_dirty(self): self._test_init('security_group_rule') self._make_security_group( 'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group'] # As the security group create operation also creates 2 security group # rules there is no need to explicitly create any rule self.ctx.tenant_id = self._tenant_id self._list('security-group-rules', neutron_context=self.ctx) self._verify_dirty_bit('security_group_rule', expected_value=False) neutron-8.4.0/neutron/tests/unit/plugins/ml2/test_security_group.py0000664000567000056710000001550413044372760027053 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # Copyright 2013, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math import mock from neutron.common import constants as const from neutron import context from neutron.extensions import securitygroup as ext_sg from neutron import manager from neutron.tests import tools from neutron.tests.unit.agent import test_securitygroups_rpc as test_sg_rpc from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit.extensions import test_securitygroup as test_sg PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin' NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi' class Ml2SecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase): _plugin_name = PLUGIN_NAME def setUp(self, plugin=None): test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_HYBRID_DRIVER) notifier_p = mock.patch(NOTIFIER) notifier_cls = notifier_p.start() self.notifier = mock.Mock() notifier_cls.return_value = self.notifier self.useFixture(tools.AttributeMapMemento()) super(Ml2SecurityGroupsTestCase, self).setUp(PLUGIN_NAME) def tearDown(self): super(Ml2SecurityGroupsTestCase, self).tearDown() class TestMl2SecurityGroups(Ml2SecurityGroupsTestCase, test_sg.TestSecurityGroups, test_sg_rpc.SGNotificationTestMixin): def setUp(self): super(TestMl2SecurityGroups, self).setUp() self.ctx = context.get_admin_context() plugin = manager.NeutronManager.get_plugin() plugin.start_rpc_listeners() def _make_port_with_new_sec_group(self, net_id): sg = self._make_security_group(self.fmt, 'name', 'desc') port = self._make_port( self.fmt, net_id, security_groups=[sg['security_group']['id']]) return port['port'] def _make_port_without_sec_group(self, net_id): port = self._make_port( self.fmt, net_id, security_groups=[]) return port['port'] def test_security_group_get_ports_from_devices(self): with self.network() as n: with self.subnet(n): orig_ports = [ self._make_port_with_new_sec_group(n['network']['id']), self._make_port_with_new_sec_group(n['network']['id']), self._make_port_without_sec_group(n['network']['id']) ] plugin = manager.NeutronManager.get_plugin() # should match full ID and starting chars ports = plugin.get_ports_from_devices(self.ctx, [orig_ports[0]['id'], orig_ports[1]['id'][0:8], orig_ports[2]['id']]) self.assertEqual(len(orig_ports), len(ports)) for port_dict in ports: p = next(p for p in orig_ports if p['id'] == port_dict['id']) self.assertEqual(p['id'], port_dict['id']) self.assertEqual(p['security_groups'], port_dict[ext_sg.SECURITYGROUPS]) self.assertEqual([], port_dict['security_group_rules']) self.assertEqual([p['fixed_ips'][0]['ip_address']], port_dict['fixed_ips']) self._delete('ports', p['id']) def test_security_group_get_ports_from_devices_with_bad_id(self): plugin = manager.NeutronManager.get_plugin() ports = plugin.get_ports_from_devices(self.ctx, ['bad_device_id']) self.assertFalse(ports) def test_security_group_no_db_calls_with_no_ports(self): plugin = manager.NeutronManager.get_plugin() with mock.patch( 'neutron.plugins.ml2.db.get_sg_ids_grouped_by_port' ) as get_mock: self.assertFalse(plugin.get_ports_from_devices(self.ctx, [])) self.assertFalse(get_mock.called) def test_large_port_count_broken_into_parts(self): plugin = manager.NeutronManager.get_plugin() max_ports_per_query = 5 ports_to_query = 73 for max_ports_per_query in (1, 2, 5, 7, 9, 31): with mock.patch('neutron.plugins.ml2.db.MAX_PORTS_PER_QUERY', new=max_ports_per_query),\ mock.patch( 'neutron.plugins.ml2.db.get_sg_ids_grouped_by_port', return_value={}) as get_mock: plugin.get_ports_from_devices(self.ctx, ['%s%s' % (const.TAP_DEVICE_PREFIX, i) for i in range(ports_to_query)]) all_call_args = [x[1][1] for x in get_mock.mock_calls] last_call_args = all_call_args.pop() # all but last should be getting MAX_PORTS_PER_QUERY ports self.assertTrue( all(map(lambda x: len(x) == max_ports_per_query, all_call_args)) ) remaining = ports_to_query % max_ports_per_query if remaining: self.assertEqual(remaining, len(last_call_args)) # should be broken into ceil(total/MAX_PORTS_PER_QUERY) calls self.assertEqual( math.ceil(ports_to_query / float(max_ports_per_query)), get_mock.call_count ) def test_full_uuids_skip_port_id_lookup(self): plugin = manager.NeutronManager.get_plugin() # when full UUIDs are provided, the _or statement should only # have one matching 'IN' criteria for all of the IDs with mock.patch('neutron.plugins.ml2.db.or_') as or_mock,\ mock.patch('sqlalchemy.orm.Session.query') as qmock: fmock = qmock.return_value.outerjoin.return_value.filter # return no ports to exit the method early since we are mocking # the query fmock.return_value = [] plugin.get_ports_from_devices(self.ctx, [test_base._uuid(), test_base._uuid()]) # the or_ function should only have one argument or_mock.assert_called_once_with(mock.ANY) class TestMl2SGServerRpcCallBack( Ml2SecurityGroupsTestCase, test_sg_rpc.SGServerRpcCallBackTestCase): pass neutron-8.4.0/neutron/tests/unit/plugins/ml2/test_db.py0000664000567000056710000004115413044372760024355 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation, all rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings import mock from oslo_utils import uuidutils from sqlalchemy.orm import query from neutron.common import constants from neutron import context from neutron.db import db_base_plugin_v2 from neutron.db import l3_db from neutron.db import models_v2 from neutron.extensions import portbindings from neutron.plugins.ml2 import db as ml2_db from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2 import models from neutron.tests.unit import testlib_api class Ml2DBTestCase(testlib_api.SqlTestCase): def setUp(self): super(Ml2DBTestCase, self).setUp() self.ctx = context.get_admin_context() def _setup_neutron_network(self, network_id): with self.ctx.session.begin(subtransactions=True): self.ctx.session.add(models_v2.Network(id=network_id)) def _setup_neutron_port(self, network_id, port_id): mac_address = db_base_plugin_v2.NeutronDbPluginV2._generate_mac() with self.ctx.session.begin(subtransactions=True): port = models_v2.Port(id=port_id, network_id=network_id, mac_address=mac_address, admin_state_up=True, status='DOWN', device_id='', device_owner='') self.ctx.session.add(port) return port def _setup_neutron_portbinding(self, port_id, vif_type, host): with self.ctx.session.begin(subtransactions=True): self.ctx.session.add(models.PortBinding(port_id=port_id, vif_type=vif_type, host=host)) def _create_segments(self, segments, is_seg_dynamic=False, network_id='foo-network-id'): self._setup_neutron_network(network_id) for segment in segments: ml2_db.add_network_segment( self.ctx.session, network_id, segment, is_dynamic=is_seg_dynamic) net_segments = ml2_db.get_network_segments( self.ctx.session, network_id, filter_dynamic=is_seg_dynamic) for segment_index, segment in enumerate(segments): self.assertEqual(segment, net_segments[segment_index]) return net_segments def test_network_segments_for_provider_network(self): segment = {api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1', api.SEGMENTATION_ID: 1} self._create_segments([segment]) def test_network_segments_is_dynamic_true(self): segment = {api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1', api.SEGMENTATION_ID: 1} self._create_segments([segment], is_seg_dynamic=True) def test_network_segments_for_multiprovider_network(self): segments = [{api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1', api.SEGMENTATION_ID: 1}, {api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1', api.SEGMENTATION_ID: 2}] self._create_segments(segments) def test_get_networks_segments(self): segments1 = [{api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1', api.SEGMENTATION_ID: 1}, {api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1', api.SEGMENTATION_ID: 2}] segments2 = [{api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1', api.SEGMENTATION_ID: 3}, {api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1', api.SEGMENTATION_ID: 4}] net1segs = self._create_segments(segments1, network_id='net1') net2segs = self._create_segments(segments2, network_id='net2') segs = ml2_db.get_networks_segments(self.ctx.session, ['net1', 'net2']) self.assertEqual(net1segs, segs['net1']) self.assertEqual(net2segs, segs['net2']) def test_get_networks_segments_no_segments(self): self._create_segments([], network_id='net1') self._create_segments([], network_id='net2') segs = ml2_db.get_networks_segments(self.ctx.session, ['net1', 'net2']) self.assertEqual([], segs['net1']) self.assertEqual([], segs['net2']) def test_get_segment_by_id(self): segment = {api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1', api.SEGMENTATION_ID: 1} net_segment = self._create_segments([segment])[0] segment_uuid = net_segment[api.ID] net_segment = ml2_db.get_segment_by_id(self.ctx.session, segment_uuid) self.assertEqual(segment, net_segment) def test_get_segment_by_id_result_not_found(self): segment_uuid = uuidutils.generate_uuid() net_segment = ml2_db.get_segment_by_id(self.ctx.session, segment_uuid) self.assertIsNone(net_segment) def test_delete_network_segment(self): segment = {api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1', api.SEGMENTATION_ID: 1} net_segment = self._create_segments([segment])[0] segment_uuid = net_segment[api.ID] ml2_db.delete_network_segment(self.ctx.session, segment_uuid) # Get segment and verify its empty net_segment = ml2_db.get_segment_by_id(self.ctx.session, segment_uuid) self.assertIsNone(net_segment) def test_add_port_binding(self): network_id = 'foo-network-id' port_id = 'foo-port-id' self._setup_neutron_network(network_id) self._setup_neutron_port(network_id, port_id) port = ml2_db.add_port_binding(self.ctx.session, port_id) self.assertEqual(port_id, port.port_id) self.assertEqual(portbindings.VIF_TYPE_UNBOUND, port.vif_type) def test_get_port_binding_host(self): network_id = 'foo-network-id' port_id = 'foo-port-id' host = 'fake_host' vif_type = portbindings.VIF_TYPE_UNBOUND self._setup_neutron_network(network_id) self._setup_neutron_port(network_id, port_id) self._setup_neutron_portbinding(port_id, vif_type, host) port_host = ml2_db.get_port_binding_host(self.ctx.session, port_id) self.assertEqual(host, port_host) def test_get_port_binding_host_multiple_results_found(self): network_id = 'foo-network-id' port_id = 'foo-port-id' port_id_one = 'foo-port-id-one' port_id_two = 'foo-port-id-two' host = 'fake_host' vif_type = portbindings.VIF_TYPE_UNBOUND self._setup_neutron_network(network_id) self._setup_neutron_port(network_id, port_id_one) self._setup_neutron_portbinding(port_id_one, vif_type, host) self._setup_neutron_port(network_id, port_id_two) self._setup_neutron_portbinding(port_id_two, vif_type, host) port_host = ml2_db.get_port_binding_host(self.ctx.session, port_id) self.assertIsNone(port_host) def test_get_port_binding_host_result_not_found(self): port_id = uuidutils.generate_uuid() port_host = ml2_db.get_port_binding_host(self.ctx.session, port_id) self.assertIsNone(port_host) def test_get_port(self): network_id = 'foo-network-id' port_id = 'foo-port-id' self._setup_neutron_network(network_id) self._setup_neutron_port(network_id, port_id) port = ml2_db.get_port(self.ctx.session, port_id) self.assertEqual(port_id, port.id) def test_get_port_multiple_results_found(self): network_id = 'foo-network-id' port_id = 'foo-port-id' port_id_one = 'foo-port-id-one' port_id_two = 'foo-port-id-two' self._setup_neutron_network(network_id) self._setup_neutron_port(network_id, port_id_one) self._setup_neutron_port(network_id, port_id_two) port = ml2_db.get_port(self.ctx.session, port_id) self.assertIsNone(port) def test_get_port_result_not_found(self): port_id = uuidutils.generate_uuid() port = ml2_db.get_port(self.ctx.session, port_id) self.assertIsNone(port) def test_get_port_from_device_mac(self): network_id = 'foo-network-id' port_id = 'foo-port-id' self._setup_neutron_network(network_id) port = self._setup_neutron_port(network_id, port_id) observed_port = ml2_db.get_port_from_device_mac(self.ctx, port['mac_address']) self.assertEqual(port_id, observed_port.id) def test_get_locked_port_and_binding(self): network_id = 'foo-network-id' port_id = 'foo-port-id' host = 'fake_host' vif_type = portbindings.VIF_TYPE_UNBOUND self._setup_neutron_network(network_id) self._setup_neutron_port(network_id, port_id) self._setup_neutron_portbinding(port_id, vif_type, host) port, binding = ml2_db.get_locked_port_and_binding(self.ctx.session, port_id) self.assertEqual(port_id, port.id) self.assertEqual(port_id, binding.port_id) def test_get_locked_port_and_binding_result_not_found(self): port_id = uuidutils.generate_uuid() port, binding = ml2_db.get_locked_port_and_binding(self.ctx.session, port_id) self.assertIsNone(port) self.assertIsNone(binding) class Ml2DvrDBTestCase(testlib_api.SqlTestCase): def setUp(self): super(Ml2DvrDBTestCase, self).setUp() self.ctx = context.get_admin_context() def _setup_neutron_network(self, network_id, port_ids): with self.ctx.session.begin(subtransactions=True): self.ctx.session.add(models_v2.Network(id=network_id)) ports = [] for port_id in port_ids: mac_address = (db_base_plugin_v2.NeutronDbPluginV2. _generate_mac()) port = models_v2.Port(id=port_id, network_id=network_id, mac_address=mac_address, admin_state_up=True, status='ACTIVE', device_id='', device_owner='') self.ctx.session.add(port) ports.append(port) return ports def _setup_neutron_router(self): with self.ctx.session.begin(subtransactions=True): router = l3_db.Router() self.ctx.session.add(router) return router def _setup_dvr_binding(self, network_id, port_id, router_id, host_id): with self.ctx.session.begin(subtransactions=True): record = models.DVRPortBinding( port_id=port_id, host=host_id, router_id=router_id, vif_type=portbindings.VIF_TYPE_UNBOUND, vnic_type=portbindings.VNIC_NORMAL, status='DOWN') self.ctx.session.add(record) return record def test_ensure_dvr_port_binding_deals_with_db_duplicate(self): network_id = 'foo_network_id' port_id = 'foo_port_id' router_id = 'foo_router_id' host_id = 'foo_host_id' self._setup_neutron_network(network_id, [port_id]) self._setup_dvr_binding(network_id, port_id, router_id, host_id) with mock.patch.object(query.Query, 'first') as query_first: query_first.return_value = [] with mock.patch.object(ml2_db.LOG, 'debug') as log_trace: binding = ml2_db.ensure_dvr_port_binding( self.ctx.session, port_id, host_id, router_id) self.assertTrue(query_first.called) self.assertTrue(log_trace.called) self.assertEqual(port_id, binding.port_id) def test_ensure_dvr_port_binding(self): network_id = 'foo_network_id' port_id = 'foo_port_id' self._setup_neutron_network(network_id, [port_id]) router = self._setup_neutron_router() ml2_db.ensure_dvr_port_binding( self.ctx.session, port_id, 'foo_host', router.id) expected = (self.ctx.session.query(models.DVRPortBinding). filter_by(port_id=port_id).one()) self.assertEqual(port_id, expected.port_id) def test_ensure_dvr_port_binding_multiple_bindings(self): network_id = 'foo_network_id' port_id = 'foo_port_id' self._setup_neutron_network(network_id, [port_id]) router = self._setup_neutron_router() ml2_db.ensure_dvr_port_binding( self.ctx.session, port_id, 'foo_host_1', router.id) ml2_db.ensure_dvr_port_binding( self.ctx.session, port_id, 'foo_host_2', router.id) bindings = (self.ctx.session.query(models.DVRPortBinding). filter_by(port_id=port_id).all()) self.assertEqual(2, len(bindings)) def test_delete_dvr_port_binding_if_stale(self): network_id = 'foo_network_id' port_id = 'foo_port_id' self._setup_neutron_network(network_id, [port_id]) binding = self._setup_dvr_binding( network_id, port_id, None, 'foo_host_id') ml2_db.delete_dvr_port_binding_if_stale(self.ctx.session, binding) count = (self.ctx.session.query(models.DVRPortBinding). filter_by(port_id=binding.port_id).count()) self.assertFalse(count) def test_get_dvr_port_binding_by_host_not_found(self): port = ml2_db.get_dvr_port_binding_by_host( self.ctx.session, 'foo_port_id', 'foo_host_id') self.assertIsNone(port) def test_get_dvr_port_bindings_not_found(self): port = ml2_db.get_dvr_port_bindings(self.ctx.session, 'foo_port_id') self.assertFalse(len(port)) def test_get_dvr_port_bindings(self): network_id = 'foo_network_id' port_id_1 = 'foo_port_id_1' port_id_2 = 'foo_port_id_2' self._setup_neutron_network(network_id, [port_id_1, port_id_2]) router = self._setup_neutron_router() self._setup_dvr_binding( network_id, port_id_1, router.id, 'foo_host_id_1') self._setup_dvr_binding( network_id, port_id_1, router.id, 'foo_host_id_2') ports = ml2_db.get_dvr_port_bindings(self.ctx.session, 'foo_port_id') self.assertEqual(2, len(ports)) def test_dvr_port_binding_deleted_by_port_deletion(self): with self.ctx.session.begin(subtransactions=True): self.ctx.session.add(models_v2.Network(id='network_id')) device_owner = constants.DEVICE_OWNER_DVR_INTERFACE port = models_v2.Port( id='port_id', network_id='network_id', mac_address='00:11:22:33:44:55', admin_state_up=True, status=constants.PORT_STATUS_ACTIVE, device_id='device_id', device_owner=device_owner) self.ctx.session.add(port) binding_kwarg = { 'port_id': 'port_id', 'host': 'host', 'vif_type': portbindings.VIF_TYPE_UNBOUND, 'vnic_type': portbindings.VNIC_NORMAL, 'router_id': 'router_id', 'status': constants.PORT_STATUS_DOWN } self.ctx.session.add(models.DVRPortBinding(**binding_kwarg)) binding_kwarg['host'] = 'another-host' self.ctx.session.add(models.DVRPortBinding(**binding_kwarg)) with warnings.catch_warnings(record=True) as warning_list: with self.ctx.session.begin(subtransactions=True): self.ctx.session.delete(port) self.assertEqual([], warning_list) ports = ml2_db.get_dvr_port_bindings(self.ctx.session, 'port_id') self.assertEqual(0, len(ports)) neutron-8.4.0/neutron/tests/unit/plugins/ml2/test_agent_scheduler.py0000664000567000056710000000251313044372736027123 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.tests.unit.db import test_agentschedulers_db from neutron.tests.unit.plugins.ml2 import test_plugin class Ml2AgentSchedulerTestCase( test_agentschedulers_db.OvsAgentSchedulerTestCase): plugin_str = test_plugin.PLUGIN_NAME l3_plugin = ('neutron.services.l3_router.' 'l3_router_plugin.L3RouterPlugin') class Ml2L3AgentNotifierTestCase( test_agentschedulers_db.OvsL3AgentNotifierTestCase): plugin_str = test_plugin.PLUGIN_NAME l3_plugin = ('neutron.services.l3_router.' 'l3_router_plugin.L3RouterPlugin') class Ml2DhcpAgentNotifierTestCase( test_agentschedulers_db.OvsDhcpAgentNotifierTestCase): plugin_str = test_plugin.PLUGIN_NAME neutron-8.4.0/neutron/tests/unit/plugins/ml2/extensions/0000775000567000056710000000000013044373210024540 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/extensions/__init__.py0000664000567000056710000000000013044372736026653 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/extensions/test_port_security.py0000664000567000056710000000310413044372736031076 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.extensions import portsecurity as psec from neutron.plugins.ml2.extensions import port_security from neutron.tests.unit.plugins.ml2 import test_plugin class TestML2ExtensionPortSecurity(test_plugin.Ml2PluginV2TestCase): def _test_extend_dict_no_port_security(self, func): """Test extend_*_dict won't crash if port_security item is None.""" for db_data in ({'port_security': None, 'name': 'net1'}, {}): response_data = {} session = mock.Mock() driver = port_security.PortSecurityExtensionDriver() getattr(driver, func)(session, db_data, response_data) self.assertTrue(response_data[psec.PORTSECURITY]) def test_extend_port_dict_no_port_security(self): self._test_extend_dict_no_port_security('extend_port_dict') def test_extend_network_dict_no_port_security(self): self._test_extend_dict_no_port_security('extend_network_dict') neutron-8.4.0/neutron/tests/unit/plugins/ml2/extensions/test_dns_integration.py0000664000567000056710000006270413044372760031362 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 IBM # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import mock import netaddr import testtools from neutron import context from neutron.db import dns_db from neutron.extensions import dns from neutron.extensions import providernet as pnet from neutron.plugins.ml2 import config from neutron.plugins.ml2.extensions import dns_integration from neutron.services.externaldns.drivers.designate import driver from neutron.tests.unit.plugins.ml2 import test_plugin mock_client = mock.Mock() mock_admin_client = mock.Mock() mock_config = {'return_value': (mock_client, mock_admin_client)} DNSDOMAIN = 'domain.com.' DNSNAME = 'port-dns-name' NEWDNSNAME = 'new-port-dns-name' V4UUID = 'v4_uuid' V6UUID = 'v6_uuid' @mock.patch( 'neutron.services.externaldns.drivers.designate.driver.get_clients', **mock_config) class DNSIntegrationTestCase(test_plugin.Ml2PluginV2TestCase): _extension_drivers = ['dns'] def setUp(self): config.cfg.CONF.set_override('extension_drivers', self._extension_drivers, group='ml2') config.cfg.CONF.set_override('external_dns_driver', 'designate') mock_client.reset_mock() mock_admin_client.reset_mock() super(DNSIntegrationTestCase, self).setUp() dns_integration.DNS_DRIVER = None dns_integration.subscribe() def _create_port_for_test(self, provider_net=True, dns_domain=True, dns_name=True, ipv4=True, ipv6=True): net_kwargs = {} if provider_net: net_kwargs = { 'arg_list': (pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID,), pnet.NETWORK_TYPE: 'vxlan', pnet.SEGMENTATION_ID: '2016', } if dns_domain: net_kwargs[dns.DNSDOMAIN] = DNSDOMAIN net_kwargs['arg_list'] = \ net_kwargs.get('arg_list', ()) + (dns.DNSDOMAIN,) res = self._create_network(self.fmt, 'test_network', True, **net_kwargs) network = self.deserialize(self.fmt, res) if ipv4: self._create_subnet(self.fmt, network['network']['id'], '10.0.0.0/24', ip_version=4) if ipv6: self._create_subnet(self.fmt, network['network']['id'], 'fd3d:bdd4:da60::/64', ip_version=6) port_kwargs = {} if dns_name: port_kwargs = { 'arg_list': (dns.DNSNAME,), dns.DNSNAME: DNSNAME } res = self._create_port('json', network['network']['id'], **port_kwargs) self.assertEqual(201, res.status_int) port = self.deserialize(self.fmt, res)['port'] ctx = context.get_admin_context() dns_data_db = ctx.session.query( dns_db.PortDNS).filter_by( port_id=port['id']).one_or_none() return network['network'], port, dns_data_db def _update_port_for_test(self, port, new_dns_name=NEWDNSNAME, **kwargs): mock_client.reset_mock() records_v4 = [ip['ip_address'] for ip in port['fixed_ips'] if netaddr.IPAddress(ip['ip_address']).version == 4] records_v6 = [ip['ip_address'] for ip in port['fixed_ips'] if netaddr.IPAddress(ip['ip_address']).version == 6] recordsets = [] if records_v4: recordsets.append({'id': V4UUID, 'records': records_v4}) if records_v4: recordsets.append({'id': V6UUID, 'records': records_v6}) mock_client.recordsets.list.return_value = recordsets mock_admin_client.reset_mock() body = {} if new_dns_name is not None: body['dns_name'] = new_dns_name body.update(kwargs) data = {'port': body} req = self.new_update_request('ports', data, port['id']) res = req.get_response(self.api) self.assertEqual(200, res.status_int) port = self.deserialize(self.fmt, res)['port'] ctx = context.get_admin_context() dns_data_db = ctx.session.query( dns_db.PortDNS).filter_by( port_id=port['id']).one_or_none() return port, dns_data_db def _verify_port_dns(self, net, port, dns_data_db, dns_name=True, dns_domain=True, ptr_zones=True, delete_records=False, provider_net=True, dns_driver=True, original_ips=None, current_dns_name=DNSNAME, previous_dns_name=''): if dns_name: self.assertEqual(current_dns_name, port[dns.DNSNAME]) if dns_name and dns_domain and provider_net and dns_driver: self.assertEqual(current_dns_name, dns_data_db['current_dns_name']) self.assertEqual(previous_dns_name, dns_data_db['previous_dns_name']) if current_dns_name: self.assertEqual(net[dns.DNSDOMAIN], dns_data_db['current_dns_domain']) else: self.assertFalse(dns_data_db['current_dns_domain']) records_v4 = [ip['ip_address'] for ip in port['fixed_ips'] if netaddr.IPAddress(ip['ip_address']).version == 4] records_v6 = [ip['ip_address'] for ip in port['fixed_ips'] if netaddr.IPAddress(ip['ip_address']).version == 6] expected = [] expected_delete = [] if records_v4: if current_dns_name: expected.append( mock.call(net[dns.DNSDOMAIN], current_dns_name, 'A', records_v4)) if delete_records: expected_delete.append(mock.call(net[dns.DNSDOMAIN], V4UUID)) if records_v6: if current_dns_name: expected.append( mock.call(net[dns.DNSDOMAIN], current_dns_name, 'AAAA', records_v6)) if delete_records: expected_delete.append(mock.call(net[dns.DNSDOMAIN], V6UUID)) mock_client.recordsets.create.assert_has_calls(expected, any_order=True) self.assertTrue( len(mock_client.recordsets.create.call_args_list) == len(expected)) mock_client.recordsets.delete.assert_has_calls(expected_delete, any_order=True) self.assertTrue( len(mock_client.recordsets.delete.call_args_list) == len(expected_delete)) expected = [] expected_delete = [] if ptr_zones: records = records_v4 + records_v6 recordset_name = '%s.%s' % (current_dns_name, net[dns.DNSDOMAIN]) for record in records: in_addr_name = netaddr.IPAddress(record).reverse_dns in_addr_zone_name = self._get_in_addr_zone_name( in_addr_name) if current_dns_name: expected.append(mock.call(in_addr_zone_name, in_addr_name, 'PTR', [recordset_name])) if delete_records and not original_ips: expected_delete.append(mock.call(in_addr_zone_name, in_addr_name)) if delete_records and original_ips: for record in original_ips: in_addr_name = netaddr.IPAddress(record).reverse_dns in_addr_zone_name = self._get_in_addr_zone_name( in_addr_name) expected_delete.append(mock.call(in_addr_zone_name, in_addr_name)) mock_admin_client.recordsets.create.assert_has_calls( expected, any_order=True) self.assertTrue( len(mock_admin_client.recordsets.create.call_args_list) == len(expected)) mock_admin_client.recordsets.delete.assert_has_calls( expected_delete, any_order=True) self.assertTrue( len(mock_admin_client.recordsets.delete.call_args_list) == len(expected_delete)) else: self.assertTrue(dns_data_db is None) self.assertFalse(mock_client.recordsets.create.call_args_list) self.assertFalse( mock_admin_client.recordsets.create.call_args_list) self.assertFalse(mock_client.recordsets.delete.call_args_list) self.assertFalse( mock_admin_client.recordsets.delete.call_args_list) def _get_in_addr_zone_name(self, in_addr_name): units = self._get_bytes_or_nybles_to_skip(in_addr_name) return '.'.join(in_addr_name.split('.')[int(units):]) def _get_bytes_or_nybles_to_skip(self, in_addr_name): if 'in-addr.arpa' in in_addr_name: return (( 32 - config.cfg.CONF.designate.ipv4_ptr_zone_prefix_size) / 8) return (128 - config.cfg.CONF.designate.ipv6_ptr_zone_prefix_size) / 4 def test_create_port(self, *mocks): config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() self._verify_port_dns(net, port, dns_data_db) def test_create_port_tenant_network(self, *mocks): config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test(provider_net=False) self._verify_port_dns(net, port, dns_data_db, provider_net=False) def test_create_port_no_dns_name(self, *mocks): config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test(dns_name=False) self._verify_port_dns(net, port, dns_data_db, dns_name=False) def test_create_port_no_dns_domain(self, *mocks): config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test(dns_domain=False) self._verify_port_dns(net, port, dns_data_db, dns_domain=False) def test_create_port_no_dns_driver(self, *mocks): config.cfg.CONF.set_override('external_dns_driver', '') config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() self._verify_port_dns(net, port, dns_data_db, dns_driver=False) def test_create_port_no_ipv6(self, *mocks): config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test(ipv6=False) self._verify_port_dns(net, port, dns_data_db) def test_create_port_no_ipv4(self, *mocks): config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test(ipv4=False) self._verify_port_dns(net, port, dns_data_db) def test_create_port_no_ptr_zones(self, *mocks): config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) config.cfg.CONF.set_override('allow_reverse_dns_lookup', False, group='designate') net, port, dns_data_db = self._create_port_for_test() self._verify_port_dns(net, port, dns_data_db, ptr_zones=False) config.cfg.CONF.set_override('allow_reverse_dns_lookup', True, group='designate') def test_update_port(self, *mocks): config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() port, dns_data_db = self._update_port_for_test(port) self._verify_port_dns(net, port, dns_data_db, delete_records=True, current_dns_name=NEWDNSNAME, previous_dns_name=DNSNAME) def test_update_port_with_current_dns_name(self, *mocks): config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() port, dns_data_db = self._update_port_for_test(port, new_dns_name=DNSNAME) self.assertEqual(DNSNAME, dns_data_db['current_dns_name']) self.assertEqual(DNSDOMAIN, dns_data_db['current_dns_domain']) self.assertEqual('', dns_data_db['previous_dns_name']) self.assertEqual('', dns_data_db['previous_dns_domain']) self.assertFalse(mock_client.recordsets.create.call_args_list) self.assertFalse( mock_admin_client.recordsets.create.call_args_list) self.assertFalse(mock_client.recordsets.delete.call_args_list) self.assertFalse( mock_admin_client.recordsets.delete.call_args_list) def test_update_port_tenant_network(self, *mocks): config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test(provider_net=False) port, dns_data_db = self._update_port_for_test(port) self._verify_port_dns(net, port, dns_data_db, delete_records=True, current_dns_name=NEWDNSNAME, previous_dns_name=DNSNAME, provider_net=False) def test_update_port_no_dns_domain(self, *mocks): config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test(dns_domain=False) port, dns_data_db = self._update_port_for_test(port) self._verify_port_dns(net, port, dns_data_db, delete_records=True, current_dns_name=NEWDNSNAME, previous_dns_name=DNSNAME, dns_domain=False) def test_update_port_add_dns_name(self, *mocks): config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test(dns_name=False) port, dns_data_db = self._update_port_for_test(port) self._verify_port_dns(net, port, dns_data_db, delete_records=False, current_dns_name=NEWDNSNAME, previous_dns_name='') def test_update_port_clear_dns_name(self, *mocks): config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() port, dns_data_db = self._update_port_for_test(port, new_dns_name='') self._verify_port_dns(net, port, dns_data_db, delete_records=True, current_dns_name='', previous_dns_name=DNSNAME) def test_update_port_non_dns_name_attribute(self, *mocks): config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() port_name = 'port_name' kwargs = {'name': port_name} port, dns_data_db = self._update_port_for_test(port, new_dns_name=None, **kwargs) self.assertEqual(DNSNAME, dns_data_db['current_dns_name']) self.assertEqual(DNSDOMAIN, dns_data_db['current_dns_domain']) self.assertEqual('', dns_data_db['previous_dns_name']) self.assertEqual('', dns_data_db['previous_dns_domain']) self.assertFalse(mock_client.recordsets.create.call_args_list) self.assertFalse( mock_admin_client.recordsets.create.call_args_list) self.assertFalse(mock_client.recordsets.delete.call_args_list) self.assertFalse( mock_admin_client.recordsets.delete.call_args_list) self.assertEqual(port_name, port['name']) def test_update_port_fixed_ips(self, *mocks): config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() original_ips = [ip['ip_address'] for ip in port['fixed_ips']] kwargs = {'fixed_ips': []} for ip in port['fixed_ips']: kwargs['fixed_ips'].append( {'subnet_id': ip['subnet_id'], 'ip_address': str(netaddr.IPAddress(ip['ip_address']) + 1)}) port, dns_data_db = self._update_port_for_test(port, new_dns_name=None, **kwargs) self._verify_port_dns(net, port, dns_data_db, delete_records=True, current_dns_name=DNSNAME, previous_dns_name=DNSNAME, original_ips=original_ips) def test_update_port_fixed_ips_with_subnet_ids(self, *mocks): config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() original_ips = [ip['ip_address'] for ip in port['fixed_ips']] kwargs = {'fixed_ips': []} for ip in port['fixed_ips']: kwargs['fixed_ips'].append( {'subnet_id': ip['subnet_id']}) port, dns_data_db = self._update_port_for_test(port, new_dns_name=None, **kwargs) self._verify_port_dns(net, port, dns_data_db, delete_records=True, current_dns_name=DNSNAME, previous_dns_name=DNSNAME, original_ips=original_ips) def test_update_port_fixed_ips_with_new_dns_name(self, *mocks): config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() original_ips = [ip['ip_address'] for ip in port['fixed_ips']] kwargs = {'fixed_ips': []} for ip in port['fixed_ips']: kwargs['fixed_ips'].append( {'subnet_id': ip['subnet_id'], 'ip_address': str(netaddr.IPAddress(ip['ip_address']) + 1)}) port, dns_data_db = self._update_port_for_test(port, new_dns_name=NEWDNSNAME, **kwargs) self._verify_port_dns(net, port, dns_data_db, delete_records=True, current_dns_name=NEWDNSNAME, previous_dns_name=DNSNAME, original_ips=original_ips) def test_update_port_fixed_ips_with_current_dns_name(self, *mocks): config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() original_ips = [ip['ip_address'] for ip in port['fixed_ips']] kwargs = {'fixed_ips': []} for ip in port['fixed_ips']: kwargs['fixed_ips'].append( {'subnet_id': ip['subnet_id'], 'ip_address': str(netaddr.IPAddress(ip['ip_address']) + 1)}) port, dns_data_db = self._update_port_for_test(port, new_dns_name=DNSNAME, **kwargs) self._verify_port_dns(net, port, dns_data_db, delete_records=True, current_dns_name=DNSNAME, previous_dns_name=DNSNAME, original_ips=original_ips) def test_update_port_fixed_ips_clearing_dns_name(self, *mocks): config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() original_ips = [ip['ip_address'] for ip in port['fixed_ips']] kwargs = {'fixed_ips': []} for ip in port['fixed_ips']: kwargs['fixed_ips'].append( {'subnet_id': ip['subnet_id'], 'ip_address': str(netaddr.IPAddress(ip['ip_address']) + 1)}) port, dns_data_db = self._update_port_for_test(port, new_dns_name='', **kwargs) self._verify_port_dns(net, port, dns_data_db, delete_records=True, current_dns_name='', previous_dns_name=DNSNAME, original_ips=original_ips) def test_update_fixed_ips_no_effect_after_clearing_dns_name(self, *mocks): config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() port, dns_data_db_1 = self._update_port_for_test(port, new_dns_name='') kwargs = {'fixed_ips': []} for ip in port['fixed_ips']: kwargs['fixed_ips'].append( {'subnet_id': ip['subnet_id'], 'ip_address': str(netaddr.IPAddress(ip['ip_address']) + 1)}) mock_client.reset_mock() mock_admin_client.reset_mock() port, dns_data_db_2 = self._update_port_for_test(port, new_dns_name='', **kwargs) self.assertEqual('', dns_data_db_2['current_dns_name']) self.assertEqual('', dns_data_db_2['current_dns_domain']) self.assertEqual(dns_data_db_1['current_dns_name'], dns_data_db_2['current_dns_name']) self.assertEqual(dns_data_db_1['current_dns_domain'], dns_data_db_2['current_dns_domain']) self.assertEqual(dns_data_db_1['previous_dns_name'], dns_data_db_2['previous_dns_name']) self.assertEqual(dns_data_db_1['previous_dns_domain'], dns_data_db_2['previous_dns_domain']) self.assertFalse(mock_client.recordsets.create.call_args_list) self.assertFalse( mock_admin_client.recordsets.create.call_args_list) self.assertFalse(mock_client.recordsets.delete.call_args_list) self.assertFalse( mock_admin_client.recordsets.delete.call_args_list) def test_dns_driver_loaded_after_server_restart(self, *mocks): dns_integration.DNS_DRIVER = None config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() self._verify_port_dns(net, port, dns_data_db) class TestDesignateClient(testtools.TestCase): """Test case for designate clients """ TEST_URL = 'http://127.0.0.1:9001/v2' TEST_ADMIN_USERNAME = uuid.uuid4().hex TEST_ADMIN_PASSWORD = uuid.uuid4().hex TEST_ADMIN_TENANT_NAME = uuid.uuid4().hex TEST_ADMIN_TENANT_ID = uuid.uuid4().hex TEST_ADMIN_AUTH_URL = 'http://127.0.0.1:35357/v2.0' TEST_CA_CERT = uuid.uuid4().hex TEST_CONTEXT = mock.Mock() TEST_CONTEXT.auth_token = uuid.uuid4().hex def setUp(self): super(TestDesignateClient, self).setUp() config.cfg.CONF.set_override('url', self.TEST_URL, group='designate') config.cfg.CONF.set_override('admin_username', self.TEST_ADMIN_USERNAME, group='designate') config.cfg.CONF.set_override('admin_password', self.TEST_ADMIN_PASSWORD, group='designate') config.cfg.CONF.set_override('admin_auth_url', self.TEST_ADMIN_AUTH_URL, group='designate') config.cfg.CONF.set_override('admin_tenant_id', self.TEST_ADMIN_TENANT_ID, group='designate') config.cfg.CONF.set_override('admin_tenant_name', self.TEST_ADMIN_TENANT_NAME, group='designate') # enforce session recalculation mock.patch.object(driver, '_SESSION', new=None).start() self.driver_session = ( mock.patch.object(driver.session, 'Session').start() ) def test_insecure_client(self): config.cfg.CONF.set_override('insecure', True, group='designate') driver.get_clients(self.TEST_CONTEXT) self.driver_session.assert_called_with(verify=False) def test_secure_client(self): config.cfg.CONF.set_override('insecure', False, group='designate') config.cfg.CONF.set_override('ca_cert', self.TEST_CA_CERT, group='designate') driver.get_clients(self.TEST_CONTEXT) self.driver_session.assert_called_with(verify=self.TEST_CA_CERT) neutron-8.4.0/neutron/tests/unit/plugins/ml2/extensions/fake_extension.py0000664000567000056710000000412513044372760030127 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes as attr EXTENDED_ATTRIBUTES_2_0 = { 'networks': { 'network_extension': {'allow_post': True, 'allow_put': True, 'default': attr.ATTR_NOT_SPECIFIED, 'is_visible': True, 'enforce_policy': True}, }, 'subnets': { 'subnet_extension': {'allow_post': True, 'allow_put': True, 'default': attr.ATTR_NOT_SPECIFIED, 'is_visible': True, 'enforce_policy': True}, }, 'ports': { 'port_extension': {'allow_post': True, 'allow_put': True, 'default': attr.ATTR_NOT_SPECIFIED, 'is_visible': True, 'enforce_policy': True}, }, } class Fake_extension(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "ML2 fake extension" @classmethod def get_alias(cls): return "fake_extension" @classmethod def get_description(cls): return _("Adds test attributes to core resources.") @classmethod def get_updated(cls): return "2014-07-16T10:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} neutron-8.4.0/neutron/tests/unit/plugins/ml2/test_extension_driver_api.py0000664000567000056710000003123013044372760030202 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import mock from neutron import context from neutron import manager from neutron.plugins.ml2 import config from neutron.tests.unit.plugins.ml2.drivers import ext_test from neutron.tests.unit.plugins.ml2 import test_plugin class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase): _extension_drivers = ['test'] def setUp(self): config.cfg.CONF.set_override('extension_drivers', self._extension_drivers, group='ml2') super(ExtensionDriverTestCase, self).setUp() self._plugin = manager.NeutronManager.get_plugin() self._ctxt = context.get_admin_context() def _verify_network_create(self, code, exc_reason): tenant_id = str(uuid.uuid4()) data = {'network': {'name': 'net1', 'tenant_id': tenant_id}} req = self.new_create_request('networks', data) res = req.get_response(self.api) self.assertEqual(code, res.status_int) network = self.deserialize(self.fmt, res) if exc_reason: self.assertEqual(exc_reason, network['NeutronError']['type']) return (network, tenant_id) def _verify_network_update(self, network, code, exc_reason): net_id = network['network']['id'] new_name = 'a_brand_new_name' data = {'network': {'name': new_name}} req = self.new_update_request('networks', data, net_id) res = req.get_response(self.api) self.assertEqual(code, res.status_int) error = self.deserialize(self.fmt, res) self.assertEqual(exc_reason, error['NeutronError']['type']) def test_faulty_process_create(self): with mock.patch.object(ext_test.TestExtensionDriver, 'process_create_network', side_effect=TypeError): net, tenant_id = self._verify_network_create(500, 'HTTPInternalServerError') # Verify the operation is rolled back query_params = "tenant_id=%s" % tenant_id nets = self._list('networks', query_params=query_params) self.assertFalse(nets['networks']) def test_faulty_process_update(self): with mock.patch.object(ext_test.TestExtensionDriver, 'process_update_network', side_effect=TypeError): network, tid = self._verify_network_create(201, None) self._verify_network_update(network, 500, 'HTTPInternalServerError') def test_faulty_extend_dict(self): with mock.patch.object(ext_test.TestExtensionDriver, 'extend_network_dict', side_effect=[None, None, TypeError]): network, tid = self._verify_network_create(201, None) self._verify_network_update(network, 400, 'ExtensionDriverError') def test_network_attr(self): with self.network() as network: # Test create network ent = network['network'].get('network_extension') self.assertIsNotNone(ent) # Test list networks res = self._list('networks') val = res['networks'][0].get('network_extension') self.assertEqual('Test_Network_Extension_extend', val) # Test network update data = {'network': {'network_extension': 'Test_Network_Extension_Update'}} res = self._update('networks', network['network']['id'], data) val = res['network'].get('network_extension') self.assertEqual('Test_Network_Extension_Update_update', val) def test_subnet_attr(self): with self.subnet() as subnet: # Test create subnet ent = subnet['subnet'].get('subnet_extension') self.assertIsNotNone(ent) # Test list subnets res = self._list('subnets') val = res['subnets'][0].get('subnet_extension') self.assertEqual('Test_Subnet_Extension_extend', val) # Test subnet update data = {'subnet': {'subnet_extension': 'Test_Subnet_Extension_Update'}} res = self._update('subnets', subnet['subnet']['id'], data) val = res['subnet'].get('subnet_extension') self.assertEqual('Test_Subnet_Extension_Update_update', val) def test_port_attr(self): with self.port() as port: # Test create port ent = port['port'].get('port_extension') self.assertIsNotNone(ent) # Test list ports res = self._list('ports') val = res['ports'][0].get('port_extension') self.assertEqual('Test_Port_Extension_extend', val) # Test port update data = {'port': {'port_extension': 'Test_Port_Extension_Update'}} res = self._update('ports', port['port']['id'], data) val = res['port'].get('port_extension') self.assertEqual('Test_Port_Extension_Update_update', val) def test_extend_network_dict(self): with mock.patch.object(ext_test.TestExtensionDriver, 'process_update_network') as ext_update_net,\ mock.patch.object(ext_test.TestExtensionDriver, 'extend_network_dict') as ext_net_dict,\ self.network() as network: net_id = network['network']['id'] net_data = {'network': {'id': net_id}} self._plugin.update_network(self._ctxt, net_id, net_data) self.assertTrue(ext_update_net.called) self.assertTrue(ext_net_dict.called) def test_extend_subnet_dict(self): with mock.patch.object(ext_test.TestExtensionDriver, 'process_update_subnet') as ext_update_subnet,\ mock.patch.object(ext_test.TestExtensionDriver, 'extend_subnet_dict') as ext_subnet_dict,\ self.subnet() as subnet: subnet_id = subnet['subnet']['id'] subnet_data = {'subnet': {'id': subnet_id}} self._plugin.update_subnet(self._ctxt, subnet_id, subnet_data) self.assertTrue(ext_update_subnet.called) self.assertTrue(ext_subnet_dict.called) def test_extend_port_dict(self): with mock.patch.object(ext_test.TestExtensionDriver, 'process_update_port') as ext_update_port,\ mock.patch.object(ext_test.TestExtensionDriver, 'extend_port_dict') as ext_port_dict,\ self.port() as port: port_id = port['port']['id'] port_data = {'port': {'id': port_id}} self._plugin.update_port(self._ctxt, port_id, port_data) self.assertTrue(ext_update_port.called) self.assertTrue(ext_port_dict.called) class DBExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase): _extension_drivers = ['testdb'] def setUp(self): config.cfg.CONF.set_override('extension_drivers', self._extension_drivers, group='ml2') super(DBExtensionDriverTestCase, self).setUp() self._plugin = manager.NeutronManager.get_plugin() self._ctxt = context.get_admin_context() def test_network_attr(self): with self.network() as network: # Test create with default value. net_id = network['network']['id'] val = network['network']['network_extension'] self.assertEqual("", val) res = self._show('networks', net_id) val = res['network']['network_extension'] self.assertEqual("", val) # Test list. res = self._list('networks') val = res['networks'][0]['network_extension'] self.assertEqual("", val) # Test create with explicit value. res = self._create_network(self.fmt, 'test-network', True, arg_list=('network_extension', ), network_extension="abc") network = self.deserialize(self.fmt, res) net_id = network['network']['id'] val = network['network']['network_extension'] self.assertEqual("abc", val) res = self._show('networks', net_id) val = res['network']['network_extension'] self.assertEqual("abc", val) # Test update. data = {'network': {'network_extension': "def"}} res = self._update('networks', net_id, data) val = res['network']['network_extension'] self.assertEqual("def", val) res = self._show('networks', net_id) val = res['network']['network_extension'] self.assertEqual("def", val) def test_subnet_attr(self): with self.subnet() as subnet: # Test create with default value. net_id = subnet['subnet']['id'] val = subnet['subnet']['subnet_extension'] self.assertEqual("", val) res = self._show('subnets', net_id) val = res['subnet']['subnet_extension'] self.assertEqual("", val) # Test list. res = self._list('subnets') val = res['subnets'][0]['subnet_extension'] self.assertEqual("", val) with self.network() as network: # Test create with explicit value. data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.1.0.0/24', 'ip_version': '4', 'tenant_id': self._tenant_id, 'subnet_extension': 'abc'}} req = self.new_create_request('subnets', data, self.fmt) res = req.get_response(self.api) subnet = self.deserialize(self.fmt, res) subnet_id = subnet['subnet']['id'] val = subnet['subnet']['subnet_extension'] self.assertEqual("abc", val) res = self._show('subnets', subnet_id) val = res['subnet']['subnet_extension'] self.assertEqual("abc", val) # Test update. data = {'subnet': {'subnet_extension': "def"}} res = self._update('subnets', subnet_id, data) val = res['subnet']['subnet_extension'] self.assertEqual("def", val) res = self._show('subnets', subnet_id) val = res['subnet']['subnet_extension'] self.assertEqual("def", val) def test_port_attr(self): with self.port() as port: # Test create with default value. net_id = port['port']['id'] val = port['port']['port_extension'] self.assertEqual("", val) res = self._show('ports', net_id) val = res['port']['port_extension'] self.assertEqual("", val) # Test list. res = self._list('ports') val = res['ports'][0]['port_extension'] self.assertEqual("", val) with self.network() as network: # Test create with explicit value. res = self._create_port(self.fmt, network['network']['id'], arg_list=('port_extension', ), port_extension="abc") port = self.deserialize(self.fmt, res) port_id = port['port']['id'] val = port['port']['port_extension'] self.assertEqual("abc", val) res = self._show('ports', port_id) val = res['port']['port_extension'] self.assertEqual("abc", val) # Test update. data = {'port': {'port_extension': "def"}} res = self._update('ports', port_id, data) val = res['port']['port_extension'] self.assertEqual("def", val) res = self._show('ports', port_id) val = res['port']['port_extension'] self.assertEqual("def", val) neutron-8.4.0/neutron/tests/unit/plugins/ml2/test_ext_portsecurity.py0000664000567000056710000000542113044372760027421 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron import context from neutron.extensions import portsecurity as psec from neutron import manager from neutron.plugins.ml2 import config from neutron.tests.unit.extensions import test_portsecurity as test_psec from neutron.tests.unit.plugins.ml2 import test_plugin class PSExtDriverTestCase(test_plugin.Ml2PluginV2TestCase, test_psec.TestPortSecurity): _extension_drivers = ['port_security'] def setUp(self): config.cfg.CONF.set_override('extension_drivers', self._extension_drivers, group='ml2') super(PSExtDriverTestCase, self).setUp() def test_create_net_port_security_default(self): _core_plugin = manager.NeutronManager.get_plugin() admin_ctx = context.get_admin_context() args = {'network': {'name': 'test', 'tenant_id': '', 'shared': False, 'admin_state_up': True, 'status': 'ACTIVE'}} try: network = _core_plugin.create_network(admin_ctx, args) _value = network[psec.PORTSECURITY] finally: if network: _core_plugin.delete_network(admin_ctx, network['id']) self.assertEqual(psec.DEFAULT_PORT_SECURITY, _value) def test_create_port_with_secgroup_none_and_port_security_false(self): if self._skip_security_group: self.skipTest("Plugin does not support security groups") with self.network() as net: with self.subnet(network=net): res = self._create_port('json', net['network']['id'], arg_list=('security_groups', 'port_security_enabled'), security_groups=[], port_security_enabled=False) self.assertEqual(201, res.status_int) port = self.deserialize('json', res) self.assertFalse(port['port'][psec.PORTSECURITY]) self.assertEqual([], port['port']['security_groups']) neutron-8.4.0/neutron/tests/unit/plugins/ml2/base.py0000664000567000056710000000302613044372760023637 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron import manager from neutron.plugins.common import constants as plugin_constants from neutron.tests.unit.plugins.ml2 import test_plugin class ML2TestFramework(test_plugin.Ml2PluginV2TestCase): l3_plugin = ('neutron.services.l3_router.l3_router_plugin.' 'L3RouterPlugin') _mechanism_drivers = ['openvswitch'] def setUp(self): super(ML2TestFramework, self).setUp() self.core_plugin = manager.NeutronManager.get_instance().get_plugin() self.l3_plugin = manager.NeutronManager.get_service_plugins().get( plugin_constants.L3_ROUTER_NAT) def _create_router(self, distributed=False, ha=False): return self.l3_plugin.create_router( self.context, {'router': {'name': 'router', 'admin_state_up': True, 'tenant_id': self._tenant_id, 'ha': ha, 'distributed': distributed}}) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/0000775000567000056710000000000013044373210024017 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/mech_fake_agent.py0000664000567000056710000000510113044372760027457 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014 Fumihiko Kakuma # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Based on openvswitch mechanism driver. # # Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent import securitygroups_rpc from neutron.common import constants from neutron.extensions import portbindings from neutron.plugins.common import constants as p_constants from neutron.plugins.ml2.drivers import mech_agent class FakeAgentMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): """ML2 mechanism driver for testing. This is a ML2 mechanism driver used by UTs in test_l2population. This driver implements minimum requirements for L2pop mech driver. As there are some agent-based mechanism drivers and OVS agent mech driver is not the only one to support L2pop, it is useful to test L2pop with multiple drivers like this to check the minimum requirements. NOTE(yamamoto): This is a modified copy of ofagent mechanism driver as of writing this. There's no need to keep this synced with the "real" ofagent mechansim driver or its agent. """ def __init__(self): sg_enabled = securitygroups_rpc.is_firewall_enabled() vif_details = {portbindings.CAP_PORT_FILTER: sg_enabled, portbindings.OVS_HYBRID_PLUG: sg_enabled} super(FakeAgentMechanismDriver, self).__init__( # NOTE(yamamoto): l2pop driver has a hardcoded list of # supported agent types. constants.AGENT_TYPE_OFA, portbindings.VIF_TYPE_OVS, vif_details) def get_allowed_network_types(self, agent): return (agent['configurations'].get('tunnel_types', []) + [p_constants.TYPE_LOCAL, p_constants.TYPE_FLAT, p_constants.TYPE_VLAN]) def get_mappings(self, agent): return dict(agent['configurations'].get('interface_mappings', {})) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/test_type_local.py0000664000567000056710000000524013044372760027575 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Thales Services SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import exceptions as exc from neutron.plugins.common import constants as p_const from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers import type_local from neutron.tests import base class LocalTypeTest(base.BaseTestCase): def setUp(self): super(LocalTypeTest, self).setUp() self.driver = type_local.LocalTypeDriver() self.session = None def test_is_partial_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL} self.assertFalse(self.driver.is_partial_segment(segment)) def test_validate_provider_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL} self.driver.validate_provider_segment(segment) def test_validate_provider_segment_with_unallowed_physical_network(self): segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL, api.PHYSICAL_NETWORK: 'phys_net'} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_validate_provider_segment_with_unallowed_segmentation_id(self): segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL, api.SEGMENTATION_ID: 2} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_reserve_provider_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL} observed = self.driver.reserve_provider_segment(self.session, segment) self.assertEqual(segment, observed) def test_release_provider_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL} observed = self.driver.reserve_provider_segment(self.session, segment) self.driver.release_segment(self.session, observed) def test_allocate_tenant_segment(self): expected = {api.NETWORK_TYPE: p_const.TYPE_LOCAL} observed = self.driver.allocate_tenant_segment(self.session) self.assertEqual(expected, observed) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/0000775000567000056710000000000013044373210026155 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/__init__.py0000664000567000056710000000000013044372736030270 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/0000775000567000056710000000000013044373210030444 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/__init__.py0000664000567000056710000000000013044372736032557 0ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/test_mech_sriov_nic_switch.pyneutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/test_mech_sriov_nic_swit0000664000567000056710000003061713044372760035503 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg import testtools from neutron.common import constants from neutron.extensions import portbindings from neutron.plugins.common import constants as p_const from neutron.plugins.ml2 import config # noqa from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers.mech_sriov.mech_driver \ import exceptions as exc from neutron.plugins.ml2.drivers.mech_sriov.mech_driver import mech_driver from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base MELLANOX_CONNECTX3_PCI_INFO = '15b3:1004' DEFAULT_PCI_INFO = ['15b3:1004', '8086:10ca'] class TestFakePortContext(base.FakePortContext): def __init__(self, agent_type, agents, segments, vnic_type=portbindings.VNIC_NORMAL, profile={'pci_vendor_info': MELLANOX_CONNECTX3_PCI_INFO}): super(TestFakePortContext, self).__init__(agent_type, agents, segments, vnic_type) self._bound_profile = profile @property def current(self): return {'id': base.PORT_ID, portbindings.VNIC_TYPE: self._bound_vnic_type, portbindings.PROFILE: self._bound_profile} def set_binding(self, segment_id, vif_type, vif_details, state): self._bound_segment_id = segment_id self._bound_vif_type = vif_type self._bound_vif_details = vif_details self._bound_state = state class SriovNicSwitchMechanismBaseTestCase(base.AgentMechanismBaseTestCase): VIF_TYPE = mech_driver.VIF_TYPE_HW_VEB CAP_PORT_FILTER = False AGENT_TYPE = constants.AGENT_TYPE_NIC_SWITCH VLAN_SEGMENTS = base.AgentMechanismVlanTestCase.VLAN_SEGMENTS GOOD_MAPPINGS = {'fake_physical_network': ['fake_device']} GOOD_CONFIGS = {'device_mappings': GOOD_MAPPINGS} BAD_MAPPINGS = {'wrong_physical_network': ['wrong_device']} BAD_CONFIGS = {'device_mappings': BAD_MAPPINGS} AGENTS = [{'alive': True, 'configurations': GOOD_CONFIGS}] AGENTS_DEAD = [{'alive': False, 'configurations': GOOD_CONFIGS}] AGENTS_BAD = [{'alive': False, 'configurations': GOOD_CONFIGS}, {'alive': True, 'configurations': BAD_CONFIGS}] def setUp(self): cfg.CONF.set_override('supported_pci_vendor_devs', DEFAULT_PCI_INFO, 'ml2_sriov') super(SriovNicSwitchMechanismBaseTestCase, self).setUp() self.driver = mech_driver.SriovNicSwitchMechanismDriver() self.driver.initialize() class SriovSwitchMechGenericTestCase(SriovNicSwitchMechanismBaseTestCase, base.AgentMechanismGenericTestCase): def test_check_segment(self): """Validate the check_segment call.""" segment = {'api.NETWORK_TYPE': ""} segment[api.NETWORK_TYPE] = p_const.TYPE_VLAN self.assertTrue(self.driver.check_segment(segment)) # Validate a network type not currently supported segment[api.NETWORK_TYPE] = p_const.TYPE_GRE self.assertFalse(self.driver.check_segment(segment)) def test_check_segment_allows_supported_network_types(self): for network_type in self.driver.supported_network_types: segment = {api.NETWORK_TYPE: network_type} self.assertTrue(self.driver.check_segment(segment)) class SriovMechVlanTestCase(SriovNicSwitchMechanismBaseTestCase, base.AgentMechanismBaseTestCase): VLAN_SEGMENTS = [{api.ID: 'unknown_segment_id', api.NETWORK_TYPE: 'no_such_type'}, {api.ID: 'vlan_segment_id', api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'fake_physical_network', api.SEGMENTATION_ID: 1234}] def test_type_vlan(self): context = TestFakePortContext(self.AGENT_TYPE, self.AGENTS, self.VLAN_SEGMENTS, portbindings.VNIC_DIRECT) self.driver.bind_port(context) self._check_bound(context, self.VLAN_SEGMENTS[1]) def test_type_vlan_bad(self): context = TestFakePortContext(self.AGENT_TYPE, self.AGENTS_BAD, self.VLAN_SEGMENTS, portbindings.VNIC_DIRECT) self.driver.bind_port(context) self._check_unbound(context) class SriovSwitchMechVnicTypeTestCase(SriovNicSwitchMechanismBaseTestCase): def _check_vif_type_for_vnic_type(self, vnic_type, expected_vif_type): context = TestFakePortContext(self.AGENT_TYPE, self.AGENTS, self.VLAN_SEGMENTS, vnic_type) self.driver.bind_port(context) self.assertEqual(expected_vif_type, context._bound_vif_type) vlan = int(context._bound_vif_details[portbindings.VIF_DETAILS_VLAN]) self.assertEqual(1234, vlan) def test_vnic_type_direct(self): self._check_vif_type_for_vnic_type(portbindings.VNIC_DIRECT, mech_driver.VIF_TYPE_HW_VEB) def test_vnic_type_macvtap(self): self._check_vif_type_for_vnic_type(portbindings.VNIC_MACVTAP, mech_driver.VIF_TYPE_HW_VEB) def test_vnic_type_direct_physical(self): self._check_vif_type_for_vnic_type(portbindings.VNIC_DIRECT_PHYSICAL, mech_driver.VIF_TYPE_HOSTDEV_PHY) class SriovSwitchMechProfileTestCase(SriovNicSwitchMechanismBaseTestCase): def _check_vif_for_pci_info(self, pci_vendor_info, expected_vif_type): context = TestFakePortContext(self.AGENT_TYPE, self.AGENTS, self.VLAN_SEGMENTS, portbindings.VNIC_DIRECT, {'pci_vendor_info': pci_vendor_info}) self.driver.bind_port(context) self.assertEqual(expected_vif_type, context._bound_vif_type) def test_profile_supported_pci_info(self): self._check_vif_for_pci_info(MELLANOX_CONNECTX3_PCI_INFO, mech_driver.VIF_TYPE_HW_VEB) def test_profile_unsupported_pci_info(self): with mock.patch('neutron.plugins.ml2.drivers.mech_sriov.' 'mech_driver.mech_driver.LOG') as log_mock: self._check_vif_for_pci_info('xxxx:yyyy', None) log_mock.debug.assert_called_with('Refusing to bind due to ' 'unsupported pci_vendor device') class SriovSwitchMechProfileFailTestCase(SriovNicSwitchMechanismBaseTestCase): def _check_for_pci_vendor_info(self, pci_vendor_info): context = TestFakePortContext(self.AGENT_TYPE, self.AGENTS, self.VLAN_SEGMENTS, portbindings.VNIC_DIRECT, pci_vendor_info) self.driver._check_supported_pci_vendor_device(context) def test_profile_missing_profile(self): with mock.patch('neutron.plugins.ml2.drivers.mech_sriov.' 'mech_driver.mech_driver.LOG') as log_mock: self._check_for_pci_vendor_info({}) log_mock.debug.assert_called_with("Missing profile in port" " binding") def test_profile_missing_pci_vendor_info(self): with mock.patch('neutron.plugins.ml2.drivers.mech_sriov.' 'mech_driver.mech_driver.LOG') as log_mock: self._check_for_pci_vendor_info({'aa': 'bb'}) log_mock.debug.assert_called_with("Missing pci vendor" " info in profile") class SriovSwitchMechVifDetailsTestCase(SriovNicSwitchMechanismBaseTestCase): VLAN_SEGMENTS = [{api.ID: 'vlan_segment_id', api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'fake_physical_network', api.SEGMENTATION_ID: 1234}] def test_vif_details_contains_vlan_id(self): context = TestFakePortContext(self.AGENT_TYPE, self.AGENTS, self.VLAN_SEGMENTS, portbindings.VNIC_DIRECT) self.driver.bind_port(context) vif_details = context._bound_vif_details self.assertIsNotNone(vif_details) vlan_id = int(vif_details.get(portbindings.VIF_DETAILS_VLAN)) self.assertEqual(1234, vlan_id) def test_get_vif_details_for_flat_network(self): segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT} vif_details = self.driver._get_vif_details(segment) vlan_id = vif_details[portbindings.VIF_DETAILS_VLAN] self.assertEqual('0', vlan_id) def test_get_vif_details_unsupported_net(self): segment = {api.NETWORK_TYPE: 'foo'} with testtools.ExpectedException(exc.SriovUnsupportedNetworkType): self.driver._get_vif_details(segment) def test_get_vif_details_with_agent(self): context = TestFakePortContext(self.AGENT_TYPE, self.AGENTS, self.VLAN_SEGMENTS, portbindings.VNIC_DIRECT) self.driver.bind_port(context) self.assertEqual(constants.PORT_STATUS_DOWN, context._bound_state) def test_get_vif_details_with_agent_direct_physical(self): context = TestFakePortContext(self.AGENT_TYPE, self.AGENTS, self.VLAN_SEGMENTS, portbindings.VNIC_DIRECT_PHYSICAL) self.driver.bind_port(context) self.assertEqual(constants.PORT_STATUS_ACTIVE, context._bound_state) class SriovSwitchMechConfigTestCase(SriovNicSwitchMechanismBaseTestCase): def _set_config(self, pci_devs=['aa:bb']): cfg.CONF.set_override('mechanism_drivers', ['logger', 'sriovnicswitch'], 'ml2') cfg.CONF.set_override('supported_pci_vendor_devs', pci_devs, 'ml2_sriov') def test_pci_vendor_config_single_entry(self): self._set_config() self.driver.initialize() self.assertEqual(['aa:bb'], self.driver.pci_vendor_info) def test_pci_vendor_config_multiple_entry(self): self._set_config(['x:y', 'a:b']) self.driver.initialize() self.assertEqual(['x:y', 'a:b'], self.driver.pci_vendor_info) def test_pci_vendor_config_default_entry(self): self.driver.initialize() self.assertEqual(DEFAULT_PCI_INFO, self.driver.pci_vendor_info) def test_pci_vendor_config_wrong_entry(self): self._set_config(['wrong_entry']) self.assertRaises(cfg.Error, self.driver.initialize) def test_initialize_missing_product_id(self): self._set_config(['vendor_id:']) self.assertRaises(cfg.Error, self.driver.initialize) def test_initialize_missing_vendor_id(self): self._set_config([':product_id']) self.assertRaises(cfg.Error, self.driver.initialize) def test_initialize_multiple_colons(self): self._set_config(['foo:bar:baz']) self.assertRaises(cfg.Error, self.driver.initialize) def test_initialize_empty_string(self): self._set_config(['']) self.assertRaises(cfg.Error, self.driver.initialize) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/0000775000567000056710000000000013044373210027253 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_sriov_nic_agent.py0000664000567000056710000005001113044372760034043 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from oslo_utils import uuidutils from neutron.agent.l2.extensions import manager as l2_ext_manager from neutron.agent import rpc as agent_rpc from neutron.extensions import portbindings from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config # noqa from neutron.plugins.ml2.drivers.mech_sriov.agent.common import exceptions from neutron.plugins.ml2.drivers.mech_sriov.agent import sriov_nic_agent from neutron.tests import base DEVICE_MAC = '11:22:33:44:55:66' PCI_SLOT = "0000:06:00.1" class TestSriovAgent(base.BaseTestCase): def setUp(self): super(TestSriovAgent, self).setUp() # disable setting up periodic state reporting cfg.CONF.set_override('report_interval', 0, 'AGENT') cfg.CONF.set_default('firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') cfg.CONF.set_default('enable_security_group', False, group='SECURITYGROUP') class MockFixedIntervalLoopingCall(object): def __init__(self, f): self.f = f def start(self, interval=0): self.f() mock.patch('oslo_service.loopingcall.' 'FixedIntervalLoopingCall', new=MockFixedIntervalLoopingCall) self.agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0) @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.eswitch_manager" ".ESwitchManager.get_assigned_devices_info", return_value=set()) @mock.patch.object(agent_rpc.PluginReportStateAPI, 'report_state') def test_cached_device_count_report_state(self, report_state, get_dev): self.agent._report_state() agent_conf = self.agent.agent_state['configurations'] # ensure devices aren't calculated until first scan_devices call self.assertNotIn('devices', agent_conf) self.agent.scan_devices(set(), set()) self.assertEqual(0, agent_conf['devices']) # ensure report_state doesn't call get_dev get_dev.reset_mock() get_dev.return_value = set(['dev1', 'dev2']) self.agent._report_state() self.assertEqual(0, agent_conf['devices']) # after a device scan, conf should bump to 2 self.agent.scan_devices(set(), set()) self.assertEqual(2, agent_conf['devices']) @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.get_assigned_macs", return_value=[(DEVICE_MAC, PCI_SLOT)]) @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.is_assigned_vf", return_value=True) def test_treat_devices_removed_with_existed_device(self, *args): agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0) devices = [(DEVICE_MAC, PCI_SLOT)] with mock.patch.object(agent.plugin_rpc, "update_device_down") as fn_udd: fn_udd.return_value = {'device': DEVICE_MAC, 'exists': True} resync = agent.treat_devices_removed(devices) self.assertFalse(resync) self.assertTrue(fn_udd.called) @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.get_assigned_macs", return_value=[(DEVICE_MAC, PCI_SLOT)]) @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.is_assigned_vf", return_value=True) def test_treat_devices_removed_with_not_existed_device(self, *args): agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0) devices = [(DEVICE_MAC, PCI_SLOT)] with mock.patch.object(agent.plugin_rpc, "update_device_down") as fn_udd: fn_udd.return_value = {'device': DEVICE_MAC, 'exists': False} with mock.patch.object(sriov_nic_agent.LOG, 'debug') as log: resync = agent.treat_devices_removed(devices) self.assertEqual(1, log.call_count) self.assertFalse(resync) self.assertTrue(fn_udd.called) @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.get_assigned_macs", return_value=[(DEVICE_MAC, PCI_SLOT)]) @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.is_assigned_vf", return_value=True) def test_treat_devices_removed_failed(self, *args): agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0) devices = [(DEVICE_MAC, PCI_SLOT)] with mock.patch.object(agent.plugin_rpc, "update_device_down") as fn_udd: fn_udd.side_effect = Exception() with mock.patch.object(sriov_nic_agent.LOG, 'debug') as log: resync = agent.treat_devices_removed(devices) self.assertEqual(1, log.call_count) self.assertTrue(resync) self.assertTrue(fn_udd.called) def mock_scan_devices(self, expected, mock_current, registered_devices, updated_devices): self.agent.eswitch_mgr = mock.Mock() self.agent.eswitch_mgr.get_assigned_devices_info.return_value = ( mock_current) results = self.agent.scan_devices(registered_devices, updated_devices) self.assertEqual(expected, results) def test_scan_devices_returns_empty_sets(self): registered = set() updated = set() mock_current = set() expected = {'current': set(), 'updated': set(), 'added': set(), 'removed': set()} self.mock_scan_devices(expected, mock_current, registered, updated) def test_scan_devices_no_changes(self): registered = set(['1', '2']) updated = set() mock_current = set(['1', '2']) expected = {'current': set(['1', '2']), 'updated': set(), 'added': set(), 'removed': set()} self.mock_scan_devices(expected, mock_current, registered, updated) def test_scan_devices_new_and_removed(self): registered = set(['1', '2']) updated = set() mock_current = set(['2', '3']) expected = {'current': set(['2', '3']), 'updated': set(), 'added': set(['3']), 'removed': set(['1'])} self.mock_scan_devices(expected, mock_current, registered, updated) def test_scan_devices_updated_and_removed(self): registered = set(['1', '2']) # '1' is in removed and updated tuple updated = set(['1']) mock_current = set(['2', '3']) expected = {'current': set(['2', '3']), 'updated': set(), 'added': set(['3']), 'removed': set(['1'])} self.mock_scan_devices(expected, mock_current, registered, updated) def test_scan_devices_new_updates(self): registered = set(['1']) updated = set(['2']) mock_current = set(['1', '2']) expected = {'current': set(['1', '2']), 'updated': set(['2']), 'added': set(['2']), 'removed': set()} self.mock_scan_devices(expected, mock_current, registered, updated) def test_scan_devices_updated_missing(self): registered = set(['1']) updated = set(['2']) mock_current = set(['1']) expected = {'current': set(['1']), 'updated': set(), 'added': set(), 'removed': set()} self.mock_scan_devices(expected, mock_current, registered, updated) def test_process_network_devices(self): agent = self.agent device_info = {'current': set(), 'added': set(['mac3', 'mac4']), 'updated': set(['mac2', 'mac3']), 'removed': set(['mac1'])} agent.sg_agent.prepare_devices_filter = mock.Mock() agent.sg_agent.refresh_firewall = mock.Mock() agent.treat_devices_added_updated = mock.Mock(return_value=False) agent.treat_devices_removed = mock.Mock(return_value=False) agent.process_network_devices(device_info) agent.sg_agent.prepare_devices_filter.assert_called_with( set(['mac3', 'mac4'])) self.assertTrue(agent.sg_agent.refresh_firewall.called) agent.treat_devices_added_updated.assert_called_with(set(['mac2', 'mac3', 'mac4'])) agent.treat_devices_removed.assert_called_with(set(['mac1'])) def test_treat_devices_added_updated_and_removed(self): agent = self.agent MAC1 = 'aa:bb:cc:dd:ee:ff' SLOT1 = '1:2:3.0' MAC2 = 'aa:bb:cc:dd:ee:fe' SLOT2 = '1:3:3.0' mac_pci_slot_device1 = (MAC1, SLOT1) mac_pci_slot_device2 = (MAC2, SLOT2) mock_device1_details = {'device': MAC1, 'port_id': 'port123', 'network_id': 'net123', 'admin_state_up': True, 'network_type': 'vlan', 'segmentation_id': 100, 'profile': {'pci_slot': SLOT1}, 'physical_network': 'physnet1', 'port_security_enabled': False} mock_device2_details = {'device': MAC2, 'port_id': 'port124', 'network_id': 'net123', 'admin_state_up': True, 'network_type': 'vlan', 'segmentation_id': 100, 'profile': {'pci_slot': SLOT2}, 'physical_network': 'physnet1', 'port_security_enabled': False} agent.plugin_rpc = mock.Mock() agent.plugin_rpc.get_devices_details_list.return_value = ( [mock_device1_details]) agent.treat_devices_added_updated(set([MAC1])) self.assertEqual({'net123': [{'port_id': 'port123', 'device': mac_pci_slot_device1}]}, agent.network_ports) agent.plugin_rpc.get_devices_details_list.return_value = ( [mock_device2_details]) # add the second device and check the network_ports dict agent.treat_devices_added_updated(set([MAC2])) self.assertEqual( {'net123': [{'port_id': 'port123', 'device': mac_pci_slot_device1}, {'port_id': 'port124', 'device': mac_pci_slot_device2}]}, agent.network_ports) with mock.patch.object(agent.plugin_rpc, "update_device_down"): agent.treat_devices_removed([mac_pci_slot_device2]) # remove the second device and check the network_ports dict self.assertEqual({'net123': [{'port_id': 'port123', 'device': mac_pci_slot_device1}]}, agent.network_ports) def test_treat_devices_added_updated_admin_state_up_true(self): agent = self.agent mock_details = {'device': 'aa:bb:cc:dd:ee:ff', 'port_id': 'port123', 'network_id': 'net123', 'admin_state_up': True, 'network_type': 'vlan', 'segmentation_id': 100, 'profile': {'pci_slot': '1:2:3.0'}, 'physical_network': 'physnet1', 'port_security_enabled': False} agent.plugin_rpc = mock.Mock() agent.plugin_rpc.get_devices_details_list.return_value = [mock_details] agent.eswitch_mgr = mock.Mock() agent.eswitch_mgr.device_exists.return_value = True agent.set_device_state = mock.Mock() agent.set_device_spoofcheck = mock.Mock() resync_needed = agent.treat_devices_added_updated( set(['aa:bb:cc:dd:ee:ff'])) self.assertFalse(resync_needed) agent.eswitch_mgr.device_exists.assert_called_with('aa:bb:cc:dd:ee:ff', '1:2:3.0') agent.eswitch_mgr.set_device_state.assert_called_with( 'aa:bb:cc:dd:ee:ff', '1:2:3.0', True) agent.eswitch_mgr.set_device_spoofcheck.assert_called_with( 'aa:bb:cc:dd:ee:ff', '1:2:3.0', False) self.assertTrue(agent.plugin_rpc.update_device_up.called) def test_treat_device_ip_link_state_not_supported(self): agent = self.agent agent.plugin_rpc = mock.Mock() agent.eswitch_mgr = mock.Mock() agent.eswitch_mgr.device_exists.return_value = True agent.eswitch_mgr.set_device_state.side_effect = ( exceptions.IpCommandOperationNotSupportedError( dev_name='aa:bb:cc:dd:ee:ff')) agent.treat_device('aa:bb:cc:dd:ee:ff', '1:2:3:0', admin_state_up=True) self.assertTrue(agent.plugin_rpc.update_device_up.called) def test_treat_device_set_device_state_exception(self): agent = self.agent agent.plugin_rpc = mock.Mock() agent.eswitch_mgr = mock.Mock() agent.eswitch_mgr.device_exists.return_value = True agent.eswitch_mgr.set_device_state.side_effect = ( exceptions.SriovNicError()) agent.treat_device('aa:bb:cc:dd:ee:ff', '1:2:3:0', admin_state_up=True) self.assertFalse(agent.plugin_rpc.update_device_up.called) def test_treat_devices_added_updated_admin_state_up_false(self): agent = self.agent mock_details = {'device': 'aa:bb:cc:dd:ee:ff', 'port_id': 'port123', 'network_id': 'net123', 'admin_state_up': False, 'network_type': 'vlan', 'segmentation_id': 100, 'profile': {'pci_slot': '1:2:3.0'}, 'physical_network': 'physnet1'} agent.plugin_rpc = mock.Mock() agent.plugin_rpc.get_devices_details_list.return_value = [mock_details] agent.remove_port_binding = mock.Mock() resync_needed = agent.treat_devices_added_updated( set(['aa:bb:cc:dd:ee:ff'])) self.assertFalse(resync_needed) self.assertFalse(agent.plugin_rpc.update_device_up.called) def test_update_and_clean_network_ports(self): network_id1 = 'network_id1' network_id2 = 'network_id2' port_id1 = 'port_id1' port_id2 = 'port_id2' mac_slot_1 = ('mac1', 'slot1') mac_slot_2 = ('mac2', 'slot2') self.agent.network_ports[network_id1] = [{'port_id': port_id1, 'device': mac_slot_1}, {'port_id': port_id2, 'device': mac_slot_2}] self.agent._update_network_ports(network_id2, port_id1, mac_slot_1) self.assertEqual({network_id1: [{'port_id': port_id2, 'device': mac_slot_2}], network_id2: [ {'port_id': port_id1, 'device': mac_slot_1}]}, self.agent.network_ports) cleaned_port_id = self.agent._clean_network_ports(mac_slot_1) self.assertEqual(cleaned_port_id, port_id1) self.assertEqual({network_id1: [{'port_id': port_id2, 'device': mac_slot_2}]}, self.agent.network_ports) cleaned_port_id = self.agent._clean_network_ports(mac_slot_2) self.assertEqual({}, self.agent.network_ports) class FakeAgent(object): def __init__(self): self.updated_devices = set() class TestSriovNicSwitchRpcCallbacks(base.BaseTestCase): def setUp(self): super(TestSriovNicSwitchRpcCallbacks, self).setUp() self.context = object() self.agent = FakeAgent() sg_agent = object() self.sriov_rpc_callback = sriov_nic_agent.SriovNicSwitchRpcCallbacks( self.context, self.agent, sg_agent) def _create_fake_port(self): return {'id': uuidutils.generate_uuid(), portbindings.PROFILE: {'pci_slot': PCI_SLOT}, 'mac_address': DEVICE_MAC} def test_port_update_with_pci_slot(self): port = self._create_fake_port() kwargs = {'context': self.context, 'port': port} self.sriov_rpc_callback.port_update(**kwargs) self.assertEqual(set([(DEVICE_MAC, PCI_SLOT)]), self.agent.updated_devices) def test_port_update_with_vnic_physical_direct(self): port = self._create_fake_port() port[portbindings.VNIC_TYPE] = portbindings.VNIC_DIRECT_PHYSICAL kwargs = {'context': self.context, 'port': port} self.sriov_rpc_callback.port_update(**kwargs) self.assertEqual(set(), self.agent.updated_devices) def test_port_update_without_pci_slot(self): port = self._create_fake_port() port[portbindings.PROFILE] = None kwargs = {'context': self.context, 'port': port} self.sriov_rpc_callback.port_update(**kwargs) self.assertEqual(set(), self.agent.updated_devices) def test_network_update(self): TEST_NETWORK_ID1 = "n1" TEST_NETWORK_ID2 = "n2" TEST_PORT_ID1 = 'p1' TEST_PORT_ID2 = 'p2' network1 = {'id': TEST_NETWORK_ID1} port1 = {'id': TEST_PORT_ID1, 'network_id': TEST_NETWORK_ID1} port2 = {'id': TEST_PORT_ID2, 'network_id': TEST_NETWORK_ID2} self.agent.network_ports = { TEST_NETWORK_ID1: [{'port_id': port1['id'], 'device': ('mac1', 'slot1')}], TEST_NETWORK_ID2: [{'port_id': port2['id'], 'device': ('mac2', 'slot2')}]} kwargs = {'context': self.context, 'network': network1} self.sriov_rpc_callback.network_update(**kwargs) self.assertEqual(set([('mac1', 'slot1')]), self.agent.updated_devices) class TestSRIOVAgentExtensionConfig(base.BaseTestCase): def setUp(self): super(TestSRIOVAgentExtensionConfig, self).setUp() l2_ext_manager.register_opts(cfg.CONF) # disable setting up periodic state reporting cfg.CONF.set_override('report_interval', 0, group='AGENT') cfg.CONF.set_override('extensions', ['qos'], group='agent') @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.eswitch_manager" ".ESwitchManager.get_assigned_devices_info", return_value=[]) def test_report_loaded_extension(self, *args): with mock.patch.object(agent_rpc.PluginReportStateAPI, 'report_state') as mock_report_state: agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0) agent._report_state() mock_report_state.assert_called_with( agent.context, agent.agent_state) self.assertEqual( ['qos'], agent.agent_state['configurations']['extensions']) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/__init__.py0000664000567000056710000000000013044372736031366 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_pci_lib.py0000664000567000056710000001722013044372760032300 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.plugins.ml2.drivers.mech_sriov.agent.common \ import exceptions as exc from neutron.plugins.ml2.drivers.mech_sriov.agent import pci_lib from neutron.tests import base class TestPciLib(base.BaseTestCase): DEV_NAME = "p7p1" VF_INDEX = 1 VF_INDEX_DISABLE = 0 PF_LINK_SHOW = ('122: p7p1: mtu 1500 qdisc noop' ' state DOWN mode DEFAULT group default qlen 1000') PF_MAC = ' link/ether f4:52:14:2a:3e:c0 brd ff:ff:ff:ff:ff:ff' VF_0_LINK_SHOW = (' vf 0 MAC fa:16:3e:b4:81:ac, vlan 4095, spoof' ' checking off, link-state disable') VF_1_LINK_SHOW = (' vf 1 MAC 00:00:00:00:00:11, vlan 4095, spoof' ' checking off, link-state enable') VF_2_LINK_SHOW = (' vf 2 MAC fa:16:3e:68:4e:79, vlan 4095, spoof' ' checking off, link-state enable') VF_LINK_SHOW = '\n'.join((PF_LINK_SHOW, PF_MAC, VF_0_LINK_SHOW, VF_1_LINK_SHOW, VF_2_LINK_SHOW)) MACVTAP_LINK_SHOW = ('63: macvtap1@enp129s0f1: mtu ' '1500 qdisc noop state DOWN mode DEFAULT group ' 'default qlen 500 link/ether 4a:9b:6d:de:65:b5 brd ' 'ff:ff:ff:ff:ff:ff') MACVTAP_LINK_SHOW2 = ('64: macvtap2@p1p2_1: mtu ' '1500 qdisc noop state DOWN mode DEFAULT group ' 'default qlen 500 link/ether 4a:9b:6d:de:65:b5 brd ' 'ff:ff:ff:ff:ff:ff') IP_LINK_SHOW_WITH_MACVTAP = '\n'.join((VF_LINK_SHOW, MACVTAP_LINK_SHOW)) IP_LINK_SHOW_WITH_MACVTAP2 = '\n'.join((VF_LINK_SHOW, MACVTAP_LINK_SHOW2)) MAC_MAPPING = { 0: "fa:16:3e:b4:81:ac", 1: "00:00:00:00:00:11", 2: "fa:16:3e:68:4e:79", } def setUp(self): super(TestPciLib, self).setUp() self.pci_wrapper = pci_lib.PciDeviceIPWrapper(self.DEV_NAME) def test_get_assigned_macs(self): with mock.patch.object(self.pci_wrapper, "_as_root") as mock_as_root: mock_as_root.return_value = self.VF_LINK_SHOW result = self.pci_wrapper.get_assigned_macs([self.VF_INDEX]) self.assertEqual( {self.VF_INDEX: self.MAC_MAPPING[self.VF_INDEX]}, result) def test_get_assigned_macs_fail(self): with mock.patch.object(self.pci_wrapper, "_as_root") as mock_as_root: mock_as_root.side_effect = Exception() self.assertRaises(exc.IpCommandDeviceError, self.pci_wrapper.get_assigned_macs, [self.VF_INDEX]) def test_get_vf_state_enable(self): with mock.patch.object(self.pci_wrapper, "_as_root") as mock_as_root: mock_as_root.return_value = self.VF_LINK_SHOW result = self.pci_wrapper.get_vf_state(self.VF_INDEX) self.assertTrue(result) def test_get_vf_state_disable(self): with mock.patch.object(self.pci_wrapper, "_as_root") as mock_as_root: mock_as_root.return_value = self.VF_LINK_SHOW result = self.pci_wrapper.get_vf_state(self.VF_INDEX_DISABLE) self.assertFalse(result) def test_get_vf_state_fail(self): with mock.patch.object(self.pci_wrapper, "_as_root") as mock_as_root: mock_as_root.side_effect = Exception() self.assertRaises(exc.IpCommandDeviceError, self.pci_wrapper.get_vf_state, self.VF_INDEX) def test_set_vf_state(self): with mock.patch.object(self.pci_wrapper, "_as_root"): result = self.pci_wrapper.set_vf_state(self.VF_INDEX, True) self.assertIsNone(result) def test_set_vf_state_fail(self): with mock.patch.object(self.pci_wrapper, "_as_root") as mock_as_root: mock_as_root.side_effect = Exception() self.assertRaises(exc.IpCommandDeviceError, self.pci_wrapper.set_vf_state, self.VF_INDEX, True) def test_set_vf_spoofcheck(self): with mock.patch.object(self.pci_wrapper, "_execute"): result = self.pci_wrapper.set_vf_spoofcheck(self.VF_INDEX, True) self.assertIsNone(result) def test_set_vf_spoofcheck_fail(self): with mock.patch.object(self.pci_wrapper, "_execute") as mock_exec: mock_exec.side_effect = Exception() self.assertRaises(exc.IpCommandDeviceError, self.pci_wrapper.set_vf_spoofcheck, self.VF_INDEX, True) def test_set_vf_max_rate(self): with mock.patch.object(self.pci_wrapper, "_as_root") \ as mock_as_root: result = self.pci_wrapper.set_vf_max_rate(self.VF_INDEX, 1000) self.assertIsNone(result) mock_as_root.assert_called_once_with([], "link", ("set", self.DEV_NAME, "vf", str(self.VF_INDEX), "rate", '1000')) def test_set_vf_max_rate_fail(self): with mock.patch.object(self.pci_wrapper, "_execute") as mock_exec: mock_exec.side_effect = Exception() self.assertRaises(exc.IpCommandDeviceError, self.pci_wrapper.set_vf_max_rate, self.VF_INDEX, 1000) def test_set_vf_state_not_supported(self): with mock.patch.object(self.pci_wrapper, "_execute") as mock_exec: mock_exec.side_effect = Exception( pci_lib.PciDeviceIPWrapper.IP_LINK_OP_NOT_SUPPORTED) self.assertRaises(exc.IpCommandOperationNotSupportedError, self.pci_wrapper.set_vf_state, self.VF_INDEX, state=True) def test_is_macvtap_assigned(self): self.assertTrue(pci_lib.PciDeviceIPWrapper.is_macvtap_assigned( 'enp129s0f1', self.IP_LINK_SHOW_WITH_MACVTAP)) def test_is_macvtap_assigned_interface_with_underscore(self): self.assertTrue(pci_lib.PciDeviceIPWrapper.is_macvtap_assigned( 'p1p2_1', self.IP_LINK_SHOW_WITH_MACVTAP2)) def test_is_macvtap_assigned_not_assigned(self): self.assertFalse(pci_lib.PciDeviceIPWrapper.is_macvtap_assigned( 'enp129s0f2', self.IP_LINK_SHOW_WITH_MACVTAP)) def test_link_show_command_failed(self): with mock.patch.object(pci_lib.PciDeviceIPWrapper, "_execute") as mock_exec: mock_exec.side_effect = Exception() self.assertRaises(exc.IpCommandError, pci_lib.PciDeviceIPWrapper.link_show) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/common/0000775000567000056710000000000013044373210030543 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/common/__init__.py0000664000567000056710000000000013044372736032656 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/common/test_config.py0000664000567000056710000001277513044372760033446 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from neutron.common import utils as n_utils from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config from neutron.plugins.ml2.drivers.mech_sriov.agent \ import sriov_nic_agent as agent from neutron.tests import base class TestSriovAgentConfig(base.BaseTestCase): EXCLUDE_DEVICES_LIST = ['p7p1:0000:07:00.1;0000:07:00.2', 'p3p1:0000:04:00.3'] EXCLUDE_DEVICES_LIST_INVALID = ['p7p2:0000:07:00.1;0000:07:00.2'] EXCLUDE_DEVICES_WITH_SPACES_LIST = ['p7p1: 0000:07:00.1 ; 0000:07:00.2', 'p3p1:0000:04:00.3 '] EXCLUDE_DEVICES_WITH_SPACES_ERROR = ['p7p1', 'p3p1:0000:04:00.3 '] EXCLUDE_DEVICES = {'p7p1': set(['0000:07:00.1', '0000:07:00.2']), 'p3p1': set(['0000:04:00.3'])} DEVICE_MAPPING_LIST = ['physnet7:p7p1', 'physnet3:p3p1'] DEVICE_MAPPING_WITH_ERROR_LIST = ['physnet7', 'physnet3:p3p1'] DEVICE_MAPPING_WITH_SPACES_LIST = ['physnet7 : p7p1', 'physnet3 : p3p1 '] DEVICE_MAPPING = {'physnet7': ['p7p1'], 'physnet3': ['p3p1']} def test_defaults(self): self.assertEqual(config.DEFAULT_DEVICE_MAPPINGS, cfg.CONF.SRIOV_NIC.physical_device_mappings) self.assertEqual(config.DEFAULT_EXCLUDE_DEVICES, cfg.CONF.SRIOV_NIC.exclude_devices) self.assertEqual(2, cfg.CONF.AGENT.polling_interval) def test_device_mappings(self): cfg.CONF.set_override('physical_device_mappings', self.DEVICE_MAPPING_LIST, 'SRIOV_NIC') device_mappings = n_utils.parse_mappings( cfg.CONF.SRIOV_NIC.physical_device_mappings, unique_keys=False) self.assertEqual(self.DEVICE_MAPPING, device_mappings) def test_device_mappings_with_error(self): cfg.CONF.set_override('physical_device_mappings', self.DEVICE_MAPPING_WITH_ERROR_LIST, 'SRIOV_NIC') self.assertRaises(ValueError, n_utils.parse_mappings, cfg.CONF.SRIOV_NIC.physical_device_mappings, unique_keys=False) def test_device_mappings_with_spaces(self): cfg.CONF.set_override('physical_device_mappings', self.DEVICE_MAPPING_WITH_SPACES_LIST, 'SRIOV_NIC') device_mappings = n_utils.parse_mappings( cfg.CONF.SRIOV_NIC.physical_device_mappings, unique_keys=False) self.assertEqual(self.DEVICE_MAPPING, device_mappings) def test_exclude_devices(self): cfg.CONF.set_override('exclude_devices', self.EXCLUDE_DEVICES_LIST, 'SRIOV_NIC') exclude_devices = config.parse_exclude_devices( cfg.CONF.SRIOV_NIC.exclude_devices) self.assertEqual(self.EXCLUDE_DEVICES, exclude_devices) def test_exclude_devices_with_spaces(self): cfg.CONF.set_override('exclude_devices', self.EXCLUDE_DEVICES_WITH_SPACES_LIST, 'SRIOV_NIC') exclude_devices = config.parse_exclude_devices( cfg.CONF.SRIOV_NIC.exclude_devices) self.assertEqual(self.EXCLUDE_DEVICES, exclude_devices) def test_exclude_devices_with_error(self): cfg.CONF.set_override('exclude_devices', self.EXCLUDE_DEVICES_WITH_SPACES_ERROR, 'SRIOV_NIC') self.assertRaises(ValueError, config.parse_exclude_devices, cfg.CONF.SRIOV_NIC.exclude_devices) def test_validate_config_ok(self): cfg.CONF.set_override('physical_device_mappings', self.DEVICE_MAPPING_LIST, 'SRIOV_NIC') cfg.CONF.set_override('exclude_devices', self.EXCLUDE_DEVICES_LIST, 'SRIOV_NIC') config_parser = agent.SriovNicAgentConfigParser() config_parser.parse() device_mappings = config_parser.device_mappings exclude_devices = config_parser.exclude_devices self.assertEqual(self.EXCLUDE_DEVICES, exclude_devices) self.assertEqual(self.DEVICE_MAPPING, device_mappings) def test_validate_config_fail(self): cfg.CONF.set_override('physical_device_mappings', self.DEVICE_MAPPING_LIST, 'SRIOV_NIC') cfg.CONF.set_override('exclude_devices', self.EXCLUDE_DEVICES_LIST_INVALID, 'SRIOV_NIC') config_parser = agent.SriovNicAgentConfigParser() self.assertRaises(ValueError, config_parser.parse) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_eswitch_manager.py0000664000567000056710000006371413044372760034050 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import mock from neutron.plugins.ml2.drivers.mech_sriov.agent.common \ import exceptions as exc from neutron.plugins.ml2.drivers.mech_sriov.agent import eswitch_manager as esm from neutron.tests import base class TestCreateESwitchManager(base.BaseTestCase): SCANNED_DEVICES = [('0000:06:00.1', 0), ('0000:06:00.2', 1), ('0000:06:00.3', 2)] @staticmethod def cleanup(): if hasattr(esm.ESwitchManager, '_instance'): del esm.ESwitchManager._instance def test_create_eswitch_mgr_fail(self): device_mappings = {'physnet1': ['p6p1']} with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.scan_vf_devices", side_effect=exc.InvalidDeviceError( dev_name="p6p1", reason="device" " not found")),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.is_assigned_vf", return_value=True): eswitch_mgr = esm.ESwitchManager() self.addCleanup(self.cleanup) self.assertRaises(exc.InvalidDeviceError, eswitch_mgr.discover_devices, device_mappings, None) def test_create_eswitch_mgr_ok(self): device_mappings = {'physnet1': ['p6p1']} with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.scan_vf_devices", return_value=self.SCANNED_DEVICES),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.is_assigned_vf", return_value=True): eswitch_mgr = esm.ESwitchManager() self.addCleanup(self.cleanup) eswitch_mgr.discover_devices(device_mappings, None) class TestESwitchManagerApi(base.BaseTestCase): SCANNED_DEVICES = [('0000:06:00.1', 0), ('0000:06:00.2', 1), ('0000:06:00.3', 2)] ASSIGNED_MAC = '00:00:00:00:00:66' PCI_SLOT = '0000:06:00.1' WRONG_MAC = '00:00:00:00:00:67' WRONG_PCI = "0000:06:00.6" def setUp(self): super(TestESwitchManagerApi, self).setUp() device_mappings = {'physnet1': ['p6p1']} self.eswitch_mgr = esm.ESwitchManager() self.addCleanup(self.cleanup) self._set_eswitch_manager(self.eswitch_mgr, device_mappings) @staticmethod def cleanup(): if hasattr(esm.ESwitchManager, '_instance'): del esm.ESwitchManager._instance def _set_eswitch_manager(self, eswitch_mgr, device_mappings): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.scan_vf_devices", return_value=self.SCANNED_DEVICES), \ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.is_assigned_vf", return_value=True): eswitch_mgr.discover_devices(device_mappings, None) def test_get_assigned_devices_info(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_assigned_devices_info", return_value=[(self.ASSIGNED_MAC, self.PCI_SLOT)]),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "pci_lib.PciDeviceIPWrapper.link_show", return_value=''): result = self.eswitch_mgr.get_assigned_devices_info() self.assertIn(self.ASSIGNED_MAC, list(result)[0]) self.assertIn(self.PCI_SLOT, list(result)[0]) def test_get_assigned_devices_info_multiple_nics_for_physnet(self): device_mappings = {'physnet1': ['p6p1', 'p6p2']} devices_info = { 'p6p1': [(self.ASSIGNED_MAC, self.PCI_SLOT)], 'p6p2': [(self.WRONG_MAC, self.WRONG_PCI)], } def get_assigned_devices_info(self): return devices_info[self.dev_name] self._set_eswitch_manager(self.eswitch_mgr, device_mappings) with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_assigned_devices_info", side_effect=get_assigned_devices_info, autospec=True): result = self.eswitch_mgr.get_assigned_devices_info() self.assertIn(devices_info['p6p1'][0], list(result)) self.assertIn(devices_info['p6p2'][0], list(result)) def test_get_device_status_true(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_pci_device", return_value=self.ASSIGNED_MAC),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_device_state", return_value=True): result = self.eswitch_mgr.get_device_state(self.ASSIGNED_MAC, self.PCI_SLOT) self.assertTrue(result) def test_get_device_status_false(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_pci_device", return_value=self.ASSIGNED_MAC),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_device_state", return_value=False): result = self.eswitch_mgr.get_device_state(self.ASSIGNED_MAC, self.PCI_SLOT) self.assertFalse(result) def test_get_device_status_mismatch(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_pci_device", return_value=self.ASSIGNED_MAC),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_device_state", return_value=True): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.LOG.warning") as log_mock: result = self.eswitch_mgr.get_device_state(self.WRONG_MAC, self.PCI_SLOT) log_mock.assert_called_with('device pci mismatch: ' '%(device_mac)s - %(pci_slot)s', {'pci_slot': self.PCI_SLOT, 'device_mac': self.WRONG_MAC}) self.assertFalse(result) def test_set_device_status(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_pci_device", return_value=self.ASSIGNED_MAC),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.set_device_state"): self.eswitch_mgr.set_device_state(self.ASSIGNED_MAC, self.PCI_SLOT, True) def test_set_device_max_rate(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_pci_device", return_value=self.ASSIGNED_MAC) as get_pci_mock,\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.set_device_max_rate")\ as set_device_max_rate_mock: self.eswitch_mgr.set_device_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT, 1000) get_pci_mock.assert_called_once_with(self.PCI_SLOT) set_device_max_rate_mock.assert_called_once_with( self.PCI_SLOT, 1000) def test_set_device_status_mismatch(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_pci_device", return_value=self.ASSIGNED_MAC),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.set_device_state"): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.LOG.warning") as log_mock: self.eswitch_mgr.set_device_state(self.WRONG_MAC, self.PCI_SLOT, True) log_mock.assert_called_with('device pci mismatch: ' '%(device_mac)s - %(pci_slot)s', {'pci_slot': self.PCI_SLOT, 'device_mac': self.WRONG_MAC}) def _mock_device_exists(self, pci_slot, mac_address, expected_result): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_pci_device", return_value=self.ASSIGNED_MAC): result = self.eswitch_mgr.device_exists(mac_address, pci_slot) self.assertEqual(expected_result, result) def test_device_exists_true(self): self._mock_device_exists(self.PCI_SLOT, self.ASSIGNED_MAC, True) def test_device_exists_false(self): self._mock_device_exists(self.WRONG_PCI, self.WRONG_MAC, False) def test_device_exists_mismatch(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_pci_device", return_value=self.ASSIGNED_MAC): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.LOG.warning") as log_mock: result = self.eswitch_mgr.device_exists(self.WRONG_MAC, self.PCI_SLOT) log_mock.assert_called_with('device pci mismatch: ' '%(device_mac)s - %(pci_slot)s', {'pci_slot': self.PCI_SLOT, 'device_mac': self.WRONG_MAC}) self.assertFalse(result) @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.get_assigned_macs", return_value={}) @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "pci_lib.PciDeviceIPWrapper.link_show", return_value='') @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.set_device_max_rate") def test_clear_max_rate_existing_pci_slot(self, max_rate_mock, *args): self.eswitch_mgr.clear_max_rate(self.PCI_SLOT) max_rate_mock.assert_called_once_with(self.PCI_SLOT, 0) @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.get_assigned_macs", return_value={0: ASSIGNED_MAC}) @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "pci_lib.PciDeviceIPWrapper.link_show", return_value='') @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.set_device_max_rate") def test_clear_max_rate_exist_and_assigned_pci( self, max_rate_mock, *args): self.eswitch_mgr.clear_max_rate(self.PCI_SLOT) self.assertFalse(max_rate_mock.called) @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.set_device_max_rate") def test_clear_max_rate_nonexisting_pci_slot(self, max_rate_mock): self.eswitch_mgr.clear_max_rate(self.WRONG_PCI) self.assertFalse(max_rate_mock.called) class TestEmbSwitch(base.BaseTestCase): DEV_NAME = "eth2" PHYS_NET = "default" ASSIGNED_MAC = '00:00:00:00:00:66' PCI_SLOT = "0000:06:00.1" WRONG_PCI_SLOT = "0000:06:00.4" SCANNED_DEVICES = [('0000:06:00.1', 0), ('0000:06:00.2', 1), ('0000:06:00.3', 2)] VF_TO_MAC_MAPPING = {0: '00:00:00:00:00:11', 1: '00:00:00:00:00:22', 2: '00:00:00:00:00:33'} EXPECTED_MAC_TO_PCI = { '00:00:00:00:00:11': '0000:06:00.1', '00:00:00:00:00:22': '0000:06:00.2', '00:00:00:00:00:33': '0000:06:00.3'} def setUp(self): super(TestEmbSwitch, self).setUp() exclude_devices = set() with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.scan_vf_devices", return_value=self.SCANNED_DEVICES): self.emb_switch = esm.EmbSwitch(self.PHYS_NET, self.DEV_NAME, exclude_devices) @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.scan_vf_devices", return_value=[(PCI_SLOT, 0)]) def test_get_assigned_devices_info(self, *args): emb_switch = esm.EmbSwitch(self.PHYS_NET, self.DEV_NAME, ()) with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.get_assigned_macs", return_value={0: self.ASSIGNED_MAC}),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.is_assigned_vf", return_value=True), \ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "pci_lib.PciDeviceIPWrapper.link_show", return_value=''): result = emb_switch.get_assigned_devices_info() self.assertIn(self.ASSIGNED_MAC, list(result)[0]) self.assertIn(self.PCI_SLOT, list(result)[0]) @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.scan_vf_devices", return_value=SCANNED_DEVICES) def test_get_assigned_devices_info_multiple_slots(self, *args): emb_switch = esm.EmbSwitch(self.PHYS_NET, self.DEV_NAME, ()) with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.get_assigned_macs", return_value=self.VF_TO_MAC_MAPPING),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.is_assigned_vf", return_value=True),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "pci_lib.PciDeviceIPWrapper.link_show", return_value=''): devices_info = emb_switch.get_assigned_devices_info() for device_info in devices_info: mac = device_info[0] pci_slot = device_info[1] self.assertEqual( self.EXPECTED_MAC_TO_PCI[mac], pci_slot) def test_get_assigned_devices_empty(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.is_assigned_vf", return_value=False), \ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "pci_lib.PciDeviceIPWrapper.link_show", return_value=''): result = self.emb_switch.get_assigned_devices_info() self.assertFalse(result) def test_get_device_state_ok(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.get_vf_state", return_value=False): result = self.emb_switch.get_device_state(self.PCI_SLOT) self.assertFalse(result) def test_get_device_state_fail(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.get_vf_state", return_value=False): self.assertRaises(exc.InvalidPciSlotError, self.emb_switch.get_device_state, self.WRONG_PCI_SLOT) def test_set_device_state_ok(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.set_vf_state"): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "pci_lib.LOG.warning") as log_mock: self.emb_switch.set_device_state(self.PCI_SLOT, True) self.assertEqual(0, log_mock.call_count) def test_set_device_state_fail(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.set_vf_state"): self.assertRaises(exc.InvalidPciSlotError, self.emb_switch.set_device_state, self.WRONG_PCI_SLOT, True) def test_set_device_spoofcheck_ok(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.set_vf_spoofcheck") as \ set_vf_spoofcheck_mock: self.emb_switch.set_device_spoofcheck(self.PCI_SLOT, True) self.assertTrue(set_vf_spoofcheck_mock.called) def test_set_device_spoofcheck_fail(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.set_vf_spoofcheck"): self.assertRaises(exc.InvalidPciSlotError, self.emb_switch.set_device_spoofcheck, self.WRONG_PCI_SLOT, True) def test_set_device_max_rate_ok(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock: self.emb_switch.set_device_max_rate(self.PCI_SLOT, 2000) pci_lib_mock.assert_called_with(0, 2) def test_set_device_max_rate_ok2(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock: self.emb_switch.set_device_max_rate(self.PCI_SLOT, 99) pci_lib_mock.assert_called_with(0, 1) def test_set_device_max_rate_rounded_ok(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock: self.emb_switch.set_device_max_rate(self.PCI_SLOT, 2001) pci_lib_mock.assert_called_with(0, 2) def test_set_device_max_rate_rounded_ok2(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock: self.emb_switch.set_device_max_rate(self.PCI_SLOT, 2499) pci_lib_mock.assert_called_with(0, 2) def test_set_device_max_rate_rounded_ok3(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock: self.emb_switch.set_device_max_rate(self.PCI_SLOT, 2500) pci_lib_mock.assert_called_with(0, 3) def test_set_device_max_rate_disable(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock: self.emb_switch.set_device_max_rate(self.PCI_SLOT, 0) pci_lib_mock.assert_called_with(0, 0) def test_set_device_max_rate_fail(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.set_vf_max_rate"): self.assertRaises(exc.InvalidPciSlotError, self.emb_switch.set_device_max_rate, self.WRONG_PCI_SLOT, 1000) def test_get_pci_device(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.get_assigned_macs", return_value={0: self.ASSIGNED_MAC}),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.is_assigned_vf", return_value=True),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "pci_lib.PciDeviceIPWrapper.link_show", return_value=''): result = self.emb_switch.get_pci_device(self.PCI_SLOT) self.assertEqual(self.ASSIGNED_MAC, result) def test_get_pci_device_fail(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.get_assigned_macs", return_value=[self.ASSIGNED_MAC]),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.is_assigned_vf", return_value=True): result = self.emb_switch.get_pci_device(self.WRONG_PCI_SLOT) self.assertIsNone(result) def test_get_pci_list(self): result = self.emb_switch.get_pci_slot_list() self.assertEqual([tup[0] for tup in self.SCANNED_DEVICES], sorted(result)) class TestPciOsWrapper(base.BaseTestCase): DEV_NAME = "p7p1" VF_INDEX = 1 DIR_CONTENTS = [ "mlx4_port1", "virtfn0", "virtfn1", "virtfn2" ] DIR_CONTENTS_NO_MATCH = [ "mlx4_port1", "mlx4_port1" ] LINKS = { "virtfn0": "../0000:04:00.1", "virtfn1": "../0000:04:00.2", "virtfn2": "../0000:04:00.3" } PCI_SLOTS = [ ('0000:04:00.1', 0), ('0000:04:00.2', 1), ('0000:04:00.3', 2) ] def test_scan_vf_devices(self): def _get_link(file_path): file_name = os.path.basename(file_path) return self.LINKS[file_name] with mock.patch("os.path.isdir", return_value=True),\ mock.patch("os.listdir", return_value=self.DIR_CONTENTS),\ mock.patch("os.path.islink", return_value=True),\ mock.patch("os.readlink", side_effect=_get_link): result = esm.PciOsWrapper.scan_vf_devices(self.DEV_NAME) self.assertEqual(self.PCI_SLOTS, result) def test_scan_vf_devices_no_dir(self): with mock.patch("os.path.isdir", return_value=False): self.assertRaises(exc.InvalidDeviceError, esm.PciOsWrapper.scan_vf_devices, self.DEV_NAME) def test_scan_vf_devices_no_content(self): with mock.patch("os.path.isdir", return_value=True),\ mock.patch("os.listdir", return_value=[]): self.assertEqual([], esm.PciOsWrapper.scan_vf_devices(self.DEV_NAME)) def test_scan_vf_devices_no_match(self): with mock.patch("os.path.isdir", return_value=True),\ mock.patch("os.listdir", return_value=self.DIR_CONTENTS_NO_MATCH): self.assertEqual([], esm.PciOsWrapper.scan_vf_devices(self.DEV_NAME)) @mock.patch("os.listdir", side_effect=OSError()) def test_is_assigned_vf_true(self, *args): self.assertTrue(esm.PciOsWrapper.is_assigned_vf( self.DEV_NAME, self.VF_INDEX, '')) @mock.patch("os.listdir", return_value=[DEV_NAME, "eth1"]) @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.is_macvtap_assigned", return_value=False) def test_is_assigned_vf_false(self, *args): self.assertFalse(esm.PciOsWrapper.is_assigned_vf( self.DEV_NAME, self.VF_INDEX, '')) @mock.patch("os.listdir", return_value=["eth0", "eth1"]) @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.is_macvtap_assigned", return_value=True) def test_is_assigned_vf_macvtap( self, mock_is_macvtap_assigned, *args): esm.PciOsWrapper.is_assigned_vf(self.DEV_NAME, self.VF_INDEX, '') mock_is_macvtap_assigned.called_with(self.VF_INDEX, "eth0") @mock.patch("os.listdir", side_effect=OSError()) @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.is_macvtap_assigned") def test_is_assigned_vf_macvtap_failure( self, mock_is_macvtap_assigned, *args): esm.PciOsWrapper.is_assigned_vf(self.DEV_NAME, self.VF_INDEX, '') self.assertFalse(mock_is_macvtap_assigned.called) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/0000775000567000056710000000000013044373210033025 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/__init__.py0000775000567000056710000000000013044372760035140 0ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/test_qos_driver.pyneutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/test_qos_dri0000775000567000056710000001044113044372760035463 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_utils import uuidutils from neutron import context from neutron.objects.qos import policy from neutron.objects.qos import rule from neutron.plugins.ml2.drivers.mech_sriov.agent.common import exceptions from neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers import ( qos_driver) from neutron.services.qos import qos_consts from neutron.tests import base class QosSRIOVAgentDriverTestCase(base.BaseTestCase): ASSIGNED_MAC = '00:00:00:00:00:66' PCI_SLOT = '0000:06:00.1' def setUp(self): super(QosSRIOVAgentDriverTestCase, self).setUp() self.context = context.get_admin_context() self.qos_driver = qos_driver.QosSRIOVAgentDriver() self.qos_driver.initialize() self.qos_driver.eswitch_mgr = mock.Mock() self.qos_driver.eswitch_mgr.set_device_max_rate = mock.Mock() self.qos_driver.eswitch_mgr.clear_max_rate = mock.Mock() self.max_rate_mock = self.qos_driver.eswitch_mgr.set_device_max_rate self.clear_max_rate_mock = self.qos_driver.eswitch_mgr.clear_max_rate self.rule = self._create_bw_limit_rule_obj() self.qos_policy = self._create_qos_policy_obj([self.rule]) self.port = self._create_fake_port(self.qos_policy.id) def _create_bw_limit_rule_obj(self): rule_obj = rule.QosBandwidthLimitRule() rule_obj.id = uuidutils.generate_uuid() rule_obj.max_kbps = 2 rule_obj.max_burst_kbps = 200 rule_obj.obj_reset_changes() return rule_obj def _create_qos_policy_obj(self, rules): policy_dict = {'id': uuidutils.generate_uuid(), 'tenant_id': uuidutils.generate_uuid(), 'name': 'test', 'description': 'test', 'shared': False, 'rules': rules} policy_obj = policy.QosPolicy(self.context, **policy_dict) policy_obj.obj_reset_changes() for policy_rule in policy_obj.rules: policy_rule.qos_policy_id = policy_obj.id policy_rule.obj_reset_changes() return policy_obj def _create_fake_port(self, qos_policy_id): return {'port_id': uuidutils.generate_uuid(), 'profile': {'pci_slot': self.PCI_SLOT}, 'device': self.ASSIGNED_MAC, qos_consts.QOS_POLICY_ID: qos_policy_id, 'device_owner': uuidutils.generate_uuid()} def test_create_rule(self): self.qos_driver.create(self.port, self.qos_policy) self.max_rate_mock.assert_called_once_with( self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps) def test_update_rule(self): self.qos_driver.update(self.port, self.qos_policy) self.max_rate_mock.assert_called_once_with( self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps) def test_delete_rules_on_assigned_vf(self): self.qos_driver.delete(self.port, self.qos_policy) self.max_rate_mock.assert_called_once_with( self.ASSIGNED_MAC, self.PCI_SLOT, 0) def test_delete_rules_on_released_vf(self): del self.port['device_owner'] self.qos_driver.delete(self.port, self.qos_policy) self.clear_max_rate_mock.assert_called_once_with(self.PCI_SLOT) def test__set_vf_max_rate_captures_sriov_failure(self): self.max_rate_mock.side_effect = exceptions.SriovNicError() self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT) def test__set_vf_max_rate_unknown_device(self): with mock.patch.object(self.qos_driver.eswitch_mgr, 'device_exists', return_value=False): self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT) self.assertFalse(self.max_rate_mock.called) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/test_type_vxlan.py0000664000567000056710000001053513044372760027636 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.plugins.common import constants as p_const from neutron.plugins.ml2 import config from neutron.plugins.ml2.drivers import type_vxlan from neutron.tests.unit.plugins.ml2.drivers import base_type_tunnel from neutron.tests.unit.plugins.ml2 import test_rpc from neutron.tests.unit import testlib_api VXLAN_UDP_PORT_ONE = 9999 VXLAN_UDP_PORT_TWO = 8888 class VxlanTypeTest(base_type_tunnel.TunnelTypeTestMixin, testlib_api.SqlTestCase): DRIVER_MODULE = type_vxlan DRIVER_CLASS = type_vxlan.VxlanTypeDriver TYPE = p_const.TYPE_VXLAN def add_endpoint(self, ip=base_type_tunnel.TUNNEL_IP_ONE, host=base_type_tunnel.HOST_ONE): if ip == base_type_tunnel.TUNNEL_IP_ONE: port = VXLAN_UDP_PORT_ONE else: port = VXLAN_UDP_PORT_TWO return self.driver.add_endpoint(ip, host, port) def test_add_endpoint(self): endpoint = super(VxlanTypeTest, self).test_add_endpoint() self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint.udp_port) def test_get_endpoint_by_host(self): endpoint = super(VxlanTypeTest, self).test_get_endpoint_by_host() self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint.udp_port) def test_get_endpoint_by_ip(self): endpoint = super(VxlanTypeTest, self).test_get_endpoint_by_ip() self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint.udp_port) def test_get_endpoints(self): self.add_endpoint() self.add_endpoint(base_type_tunnel.TUNNEL_IP_TWO, base_type_tunnel.HOST_TWO) endpoints = self.driver.get_endpoints() for endpoint in endpoints: if endpoint['ip_address'] == base_type_tunnel.TUNNEL_IP_ONE: self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint['udp_port']) self.assertEqual(base_type_tunnel.HOST_ONE, endpoint['host']) elif endpoint['ip_address'] == base_type_tunnel.TUNNEL_IP_TWO: self.assertEqual(VXLAN_UDP_PORT_TWO, endpoint['udp_port']) self.assertEqual(base_type_tunnel.HOST_TWO, endpoint['host']) def test_get_mtu(self): config.cfg.CONF.set_override('global_physnet_mtu', 1500) config.cfg.CONF.set_override('path_mtu', 1475, group='ml2') self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400} self.assertEqual(1475 - p_const.VXLAN_ENCAP_OVERHEAD, self.driver.get_mtu('physnet1')) config.cfg.CONF.set_override('global_physnet_mtu', 1450) config.cfg.CONF.set_override('path_mtu', 1475, group='ml2') self.driver.physnet_mtus = {'physnet1': 1400, 'physnet2': 1425} self.assertEqual(1450 - p_const.VXLAN_ENCAP_OVERHEAD, self.driver.get_mtu('physnet1')) config.cfg.CONF.set_override('global_physnet_mtu', 0) config.cfg.CONF.set_override('path_mtu', 1450, group='ml2') self.driver.physnet_mtus = {'physnet1': 1425, 'physnet2': 1400} self.assertEqual(1450 - p_const.VXLAN_ENCAP_OVERHEAD, self.driver.get_mtu('physnet1')) config.cfg.CONF.set_override('global_physnet_mtu', 0) config.cfg.CONF.set_override('path_mtu', 0, group='ml2') self.driver.physnet_mtus = {} self.assertEqual(0, self.driver.get_mtu('physnet1')) class VxlanTypeMultiRangeTest(base_type_tunnel.TunnelTypeMultiRangeTestMixin, testlib_api.SqlTestCase): DRIVER_CLASS = type_vxlan.VxlanTypeDriver class VxlanTypeRpcCallbackTest(base_type_tunnel.TunnelRpcCallbackTestMixin, test_rpc.RpcCallbacksTestCase, testlib_api.SqlTestCase): DRIVER_CLASS = type_vxlan.VxlanTypeDriver TYPE = p_const.TYPE_VXLAN neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/test_type_flat.py0000664000567000056710000001624213044372760027435 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Thales Services SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import exceptions as exc import neutron.db.api as db from neutron.plugins.common import constants as p_const from neutron.plugins.ml2 import config from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers import type_flat from neutron.tests import base from neutron.tests.unit import testlib_api FLAT_NETWORKS = ['flat_net1', 'flat_net2'] class FlatTypeTest(testlib_api.SqlTestCase): def setUp(self): super(FlatTypeTest, self).setUp() config.cfg.CONF.set_override('flat_networks', FLAT_NETWORKS, group='ml2_type_flat') self.driver = type_flat.FlatTypeDriver() self.session = db.get_session() self.driver.physnet_mtus = [] def _get_allocation(self, session, segment): return session.query(type_flat.FlatAllocation).filter_by( physical_network=segment[api.PHYSICAL_NETWORK]).first() def test_is_partial_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'flat_net1'} self.assertFalse(self.driver.is_partial_segment(segment)) def test_validate_provider_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'flat_net1'} self.driver.validate_provider_segment(segment) def test_validate_provider_phynet_name(self): self.driver._parse_networks([]) segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'flat_net1'} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment=segment) def test_validate_provider_phynet_name_multiple(self): self.driver._parse_networks(['flat_net1', 'flat_net2']) segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'flat_net1'} self.driver.validate_provider_segment(segment) segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'flat_net2'} self.driver.validate_provider_segment(segment) def test_validate_provider_segment_without_physnet_restriction(self): self.driver._parse_networks('*') segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'other_flat_net'} self.driver.validate_provider_segment(segment) def test_validate_provider_segment_with_missing_physical_network(self): segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_validate_provider_segment_with_unsupported_physical_network(self): segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'other_flat_net'} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_validate_provider_segment_with_unallowed_segmentation_id(self): segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'flat_net1', api.SEGMENTATION_ID: 1234} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_reserve_provider_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'flat_net1'} observed = self.driver.reserve_provider_segment(self.session, segment) alloc = self._get_allocation(self.session, observed) self.assertEqual(segment[api.PHYSICAL_NETWORK], alloc.physical_network) def test_release_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'flat_net1'} self.driver.reserve_provider_segment(self.session, segment) self.driver.release_segment(self.session, segment) alloc = self._get_allocation(self.session, segment) self.assertIsNone(alloc) def test_reserve_provider_segment_already_reserved(self): segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'flat_net1'} self.driver.reserve_provider_segment(self.session, segment) self.assertRaises(exc.FlatNetworkInUse, self.driver.reserve_provider_segment, self.session, segment) def test_allocate_tenant_segment(self): observed = self.driver.allocate_tenant_segment(self.session) self.assertIsNone(observed) def test_get_mtu(self): config.cfg.CONF.set_override('global_physnet_mtu', 1475) config.cfg.CONF.set_override('path_mtu', 1400, group='ml2') self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400} self.assertEqual(1450, self.driver.get_mtu('physnet1')) config.cfg.CONF.set_override('global_physnet_mtu', 1375) config.cfg.CONF.set_override('path_mtu', 1400, group='ml2') self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400} self.assertEqual(1375, self.driver.get_mtu('physnet1')) config.cfg.CONF.set_override('global_physnet_mtu', 0) config.cfg.CONF.set_override('path_mtu', 1425, group='ml2') self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400} self.assertEqual(1400, self.driver.get_mtu('physnet2')) config.cfg.CONF.set_override('global_physnet_mtu', 0) config.cfg.CONF.set_override('path_mtu', 0, group='ml2') self.driver.physnet_mtus = {} self.assertEqual(0, self.driver.get_mtu('physnet1')) def test_parse_physical_network_mtus(self): config.cfg.CONF.set_override( 'physical_network_mtus', ['physnet1:1500', 'physnet2:1500', 'physnet3:9000'], group='ml2') driver = type_flat.FlatTypeDriver() self.assertEqual('1500', driver.physnet_mtus['physnet1']) self.assertEqual('1500', driver.physnet_mtus['physnet2']) self.assertEqual('9000', driver.physnet_mtus['physnet3']) class FlatTypeDefaultTest(base.BaseTestCase): def setUp(self): super(FlatTypeDefaultTest, self).setUp() self.driver = type_flat.FlatTypeDriver() self.driver.physnet_mtus = [] def test_validate_provider_segment_default(self): segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'other_flat_net'} self.driver.validate_provider_segment(segment) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/0000775000567000056710000000000013044373210026370 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/__init__.py0000664000567000056710000000000013044372736030503 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/0000775000567000056710000000000013044373210030657 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/__init__.py0000664000567000056710000000000013044372736032772 0ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/test_mech_openvswitch.pyneutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/test_mech_openvswitch.p0000664000567000056710000001445213044372760035463 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron.common import constants from neutron.extensions import portbindings from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers.openvswitch.mech_driver \ import mech_openvswitch from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base class OpenvswitchMechanismBaseTestCase(base.AgentMechanismBaseTestCase): VIF_TYPE = portbindings.VIF_TYPE_OVS VIF_DETAILS = {portbindings.CAP_PORT_FILTER: True, portbindings.OVS_HYBRID_PLUG: True} AGENT_TYPE = constants.AGENT_TYPE_OVS GOOD_MAPPINGS = {'fake_physical_network': 'fake_bridge'} GOOD_TUNNEL_TYPES = ['gre', 'vxlan'] GOOD_CONFIGS = {'bridge_mappings': GOOD_MAPPINGS, 'tunnel_types': GOOD_TUNNEL_TYPES} BAD_MAPPINGS = {'wrong_physical_network': 'wrong_bridge'} BAD_TUNNEL_TYPES = ['bad_tunnel_type'] BAD_CONFIGS = {'bridge_mappings': BAD_MAPPINGS, 'tunnel_types': BAD_TUNNEL_TYPES} AGENTS = [{'alive': True, 'configurations': GOOD_CONFIGS, 'host': 'host'}] AGENTS_DEAD = [{'alive': False, 'configurations': GOOD_CONFIGS, 'host': 'dead_host'}] AGENTS_BAD = [{'alive': False, 'configurations': GOOD_CONFIGS, 'host': 'bad_host_1'}, {'alive': True, 'configurations': BAD_CONFIGS, 'host': 'bad_host_2'}] def setUp(self): super(OpenvswitchMechanismBaseTestCase, self).setUp() cfg.CONF.set_override('firewall_driver', 'iptables_hybrid', 'SECURITYGROUP') self.driver = mech_openvswitch.OpenvswitchMechanismDriver() self.driver.initialize() class OpenvswitchMechanismSGDisabledBaseTestCase( OpenvswitchMechanismBaseTestCase): VIF_DETAILS = {portbindings.CAP_PORT_FILTER: False, portbindings.OVS_HYBRID_PLUG: False} def setUp(self): cfg.CONF.set_override('enable_security_group', False, group='SECURITYGROUP') super(OpenvswitchMechanismSGDisabledBaseTestCase, self).setUp() class OpenvswitchMechanismHybridPlugTestCase(OpenvswitchMechanismBaseTestCase): def _make_port_ctx(self, agents): segments = [{api.ID: 'local_segment_id', api.NETWORK_TYPE: 'local'}] return base.FakePortContext(self.AGENT_TYPE, agents, segments, vnic_type=self.VNIC_TYPE) def test_backward_compat_with_unreporting_agent(self): hybrid = portbindings.OVS_HYBRID_PLUG # agent didn't report so it should be hybrid based on server config context = self._make_port_ctx(self.AGENTS) self.driver.bind_port(context) self.assertTrue(context._bound_vif_details[hybrid]) self.driver.vif_details[hybrid] = False context = self._make_port_ctx(self.AGENTS) self.driver.bind_port(context) self.assertFalse(context._bound_vif_details[hybrid]) def test_hybrid_plug_true_if_agent_requests(self): hybrid = portbindings.OVS_HYBRID_PLUG # set server side default to false and ensure that hybrid becomes # true if requested by the agent self.driver.vif_details[hybrid] = False agents = [{'alive': True, 'configurations': {hybrid: True}, 'host': 'host'}] context = self._make_port_ctx(agents) self.driver.bind_port(context) self.assertTrue(context._bound_vif_details[hybrid]) def test_hybrid_plug_false_if_agent_requests(self): hybrid = portbindings.OVS_HYBRID_PLUG # set server side default to true and ensure that hybrid becomes # false if requested by the agent self.driver.vif_details[hybrid] = True agents = [{'alive': True, 'configurations': {hybrid: False}, 'host': 'host'}] context = self._make_port_ctx(agents) self.driver.bind_port(context) self.assertFalse(context._bound_vif_details[hybrid]) class OpenvswitchMechanismGenericTestCase(OpenvswitchMechanismBaseTestCase, base.AgentMechanismGenericTestCase): pass class OpenvswitchMechanismLocalTestCase(OpenvswitchMechanismBaseTestCase, base.AgentMechanismLocalTestCase): pass class OpenvswitchMechanismFlatTestCase(OpenvswitchMechanismBaseTestCase, base.AgentMechanismFlatTestCase): pass class OpenvswitchMechanismVlanTestCase(OpenvswitchMechanismBaseTestCase, base.AgentMechanismVlanTestCase): pass class OpenvswitchMechanismGreTestCase(OpenvswitchMechanismBaseTestCase, base.AgentMechanismGreTestCase): pass class OpenvswitchMechanismSGDisabledLocalTestCase( OpenvswitchMechanismSGDisabledBaseTestCase, base.AgentMechanismLocalTestCase): pass class OpenvswitchMechanismFirewallUndefinedTestCase( OpenvswitchMechanismBaseTestCase, base.AgentMechanismLocalTestCase): VIF_DETAILS = {portbindings.CAP_PORT_FILTER: True, portbindings.OVS_HYBRID_PLUG: True} def setUp(self): # this simple test case just ensures backward compatibility where # the server has no firewall driver configured, which should result # in hybrid plugging. super(OpenvswitchMechanismFirewallUndefinedTestCase, self).setUp() cfg.CONF.set_override('firewall_driver', '', 'SECURITYGROUP') self.driver = mech_openvswitch.OpenvswitchMechanismDriver() self.driver.initialize() neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/0000775000567000056710000000000013044373210027466 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/0000775000567000056710000000000013044373210031317 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/test_br_cookie.py0000664000567000056710000000452513044372760034703 0ustar jenkinsjenkins00000000000000# Copyright 2016 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.common import ovs_lib from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ import ovs_bridge from neutron.tests import base class TestBRCookieOpenflow(base.BaseTestCase): def setUp(self): super(TestBRCookieOpenflow, self).setUp() self.br = ovs_bridge.OVSAgentBridge('br-int') def test_reserved_cookies(self): def_cookie = self.br.default_cookie self.assertIn(def_cookie, self.br.reserved_cookies) def test_request_cookie(self): default_cookie = self.br.default_cookie requested_cookie = self.br.request_cookie() self.assertEqual(default_cookie, self.br.default_cookie) self.assertIn(default_cookie, self.br.reserved_cookies) self.assertIn(requested_cookie, self.br.reserved_cookies) def test_set_agent_uuid_stamp(self): self.br = ovs_bridge.OVSAgentBridge('br-int') def_cookie = self.br.default_cookie new_cookie = ovs_lib.generate_random_cookie() self.br.set_agent_uuid_stamp(new_cookie) self.assertEqual(new_cookie, self.br.default_cookie) self.assertIn(new_cookie, self.br.reserved_cookies) self.assertNotIn(def_cookie, self.br.reserved_cookies) def test_set_agent_uuid_stamp_with_reserved_cookie(self): self.br = ovs_bridge.OVSAgentBridge('br-int') def_cookie = self.br.default_cookie new_cookie = self.br.request_cookie() self.br.set_agent_uuid_stamp(new_cookie) self.assertEqual(new_cookie, self.br.default_cookie) self.assertIn(new_cookie, self.br.reserved_cookies) self.assertNotIn(def_cookie, self.br.reserved_cookies) self.assertEqual(set([new_cookie]), self.br.reserved_cookies) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/0000775000567000056710000000000013044373210033315 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/__init__.pyneutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/__init__.p0000664000567000056710000000000013044372736035237 0ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_phys.pyneutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_ph0000664000567000056710000000704513044372736035413 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import neutron.plugins.ml2.drivers.openvswitch.agent.common.constants \ as ovs_const from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.\ openflow.ovs_ofctl import ovs_bridge_test_base call = mock.call # short hand class OVSPhysicalBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase, ovs_bridge_test_base.OVSDVRProcessTestMixin): dvr_process_table_id = ovs_const.DVR_PROCESS_VLAN dvr_process_next_table_id = ovs_const.LOCAL_VLAN_TRANSLATION def setUp(self): super(OVSPhysicalBridgeTest, self).setUp() self.setup_bridge_mock('br-phys', self.br_phys_cls) def test_setup_default_table(self): self.br.setup_default_table() expected = [ call.add_flow(priority=0, table=0, actions='normal'), ] self.assertEqual(expected, self.mock.mock_calls) def test_provision_local_vlan(self): port = 999 lvid = 888 segmentation_id = 777 distributed = False self.br.provision_local_vlan(port=port, lvid=lvid, segmentation_id=segmentation_id, distributed=distributed) expected = [ call.add_flow(priority=4, table=0, dl_vlan=lvid, in_port=port, actions='mod_vlan_vid:%s,normal' % segmentation_id), ] self.assertEqual(expected, self.mock.mock_calls) def test_provision_local_vlan_novlan(self): port = 999 lvid = 888 segmentation_id = None distributed = False self.br.provision_local_vlan(port=port, lvid=lvid, segmentation_id=segmentation_id, distributed=distributed) expected = [ call.add_flow(priority=4, table=0, dl_vlan=lvid, in_port=port, actions='strip_vlan,normal') ] self.assertEqual(expected, self.mock.mock_calls) def test_reclaim_local_vlan(self): port = 999 lvid = 888 self.br.reclaim_local_vlan(port=port, lvid=lvid) expected = [ call.delete_flows(dl_vlan=lvid, in_port=port), ] self.assertEqual(expected, self.mock.mock_calls) def test_add_dvr_mac_vlan(self): mac = '00:02:b3:13:fe:3d' port = 8888 self.br.add_dvr_mac_vlan(mac=mac, port=port) expected = [ call.add_flow(priority=2, table=3, dl_src=mac, actions='output:%s' % port), ] self.assertEqual(expected, self.mock.mock_calls) def test_remove_dvr_mac_vlan(self): mac = '00:02:b3:13:fe:3d' self.br.remove_dvr_mac_vlan(mac=mac) expected = [ call.delete_flows(eth_src=mac, table_id=3), ] self.assertEqual(expected, self.mock.mock_calls) ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge_test_base.pyneutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge0000664000567000056710000001566013044372760035404 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.common import constants from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ import ovs_test_base call = mock.call # short hand class OVSBridgeTestBase(ovs_test_base.OVSOFCtlTestBase): def setup_bridge_mock(self, name, cls): self.br = cls(name) mock_add_flow = mock.patch.object(self.br, 'add_flow').start() mock_mod_flow = mock.patch.object(self.br, 'mod_flow').start() mock_delete_flows = mock.patch.object(self.br, 'delete_flows').start() self.mock = mock.Mock() self.mock.attach_mock(mock_add_flow, 'add_flow') self.mock.attach_mock(mock_mod_flow, 'mod_flow') self.mock.attach_mock(mock_delete_flows, 'delete_flows') def test_drop_port(self): in_port = 2345 self.br.drop_port(in_port=in_port) expected = [ call.add_flow(priority=2, table=0, actions='drop', in_port=in_port), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_goto(self): dest_table_id = 123 priority = 99 in_port = 666 self.br.install_goto(dest_table_id=dest_table_id, priority=priority, in_port=in_port) expected = [ call.add_flow(priority=priority, table=0, actions='resubmit(,%s)' % dest_table_id, in_port=in_port), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_drop(self): priority = 99 in_port = 666 self.br.install_drop(priority=priority, in_port=in_port) expected = [ call.add_flow(priority=priority, table=0, actions='drop', in_port=in_port), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_normal(self): priority = 99 in_port = 666 self.br.install_normal(priority=priority, in_port=in_port) expected = [ call.add_flow(priority=priority, table=0, actions='normal', in_port=in_port), ] self.assertEqual(expected, self.mock.mock_calls) def test_dump_flows_for_table(self): table = 23 with mock.patch.object(self.br, 'run_ofctl') as run_ofctl: self.br.dump_flows(table) run_ofctl.assert_has_calls([mock.call("dump-flows", mock.ANY)]) def test_dump_all_flows(self): with mock.patch.object(self.br, 'run_ofctl') as run_ofctl: self.br.dump_flows_all_tables() run_ofctl.assert_has_calls([mock.call("dump-flows", [])]) class OVSDVRProcessTestMixin(object): def test_install_dvr_process_ipv4(self): vlan_tag = 999 gateway_ip = '192.0.2.1' self.br.install_dvr_process_ipv4(vlan_tag=vlan_tag, gateway_ip=gateway_ip) expected = [ call.add_flow(table=self.dvr_process_table_id, proto='arp', nw_dst=gateway_ip, actions='drop', priority=3, dl_vlan=vlan_tag), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_dvr_process_ipv4(self): vlan_tag = 999 gateway_ip = '192.0.2.1' self.br.delete_dvr_process_ipv4(vlan_tag=vlan_tag, gateway_ip=gateway_ip) expected = [ call.delete_flows(table=self.dvr_process_table_id, dl_vlan=vlan_tag, proto='arp', nw_dst=gateway_ip), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_dvr_process_ipv6(self): vlan_tag = 999 gateway_mac = '08:60:6e:7f:74:e7' self.br.install_dvr_process_ipv6(vlan_tag=vlan_tag, gateway_mac=gateway_mac) expected = [ call.add_flow(table=self.dvr_process_table_id, proto='icmp6', dl_src=gateway_mac, actions='drop', priority=3, dl_vlan=vlan_tag, icmp_type=constants.ICMPV6_TYPE_RA), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_dvr_process_ipv6(self): vlan_tag = 999 gateway_mac = '08:60:6e:7f:74:e7' self.br.delete_dvr_process_ipv6(vlan_tag=vlan_tag, gateway_mac=gateway_mac) expected = [ call.delete_flows(table=self.dvr_process_table_id, dl_vlan=vlan_tag, dl_src=gateway_mac, proto='icmp6', icmp_type=constants.ICMPV6_TYPE_RA), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_dvr_process(self): vlan_tag = 999 vif_mac = '00:0e:0c:5e:95:d0' dvr_mac_address = 'f2:0b:a4:5b:b2:ab' self.br.install_dvr_process(vlan_tag=vlan_tag, vif_mac=vif_mac, dvr_mac_address=dvr_mac_address) expected = [ call.add_flow(priority=2, table=self.dvr_process_table_id, dl_dst=vif_mac, dl_vlan=vlan_tag, actions='drop'), call.add_flow(priority=1, table=self.dvr_process_table_id, dl_vlan=vlan_tag, dl_src=vif_mac, actions='mod_dl_src:%(mac)s,resubmit(,%(next)s)' % { 'mac': dvr_mac_address, 'next': self.dvr_process_next_table_id, }), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_dvr_process(self): vlan_tag = 999 vif_mac = '00:0e:0c:5e:95:d0' self.br.delete_dvr_process(vlan_tag=vlan_tag, vif_mac=vif_mac) expected = [ call.delete_flows(table=self.dvr_process_table_id, dl_dst=vif_mac, dl_vlan=vlan_tag), call.delete_flows(table=self.dvr_process_table_id, dl_vlan=vlan_tag, dl_src=vif_mac), ] self.assertEqual(expected, self.mock.mock_calls) ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_int.pyneutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_in0000664000567000056710000002275413044372760035413 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.common import constants as const from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.\ openflow.ovs_ofctl import ovs_bridge_test_base call = mock.call # short hand class OVSIntegrationBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase): def setUp(self): super(OVSIntegrationBridgeTest, self).setUp() self.setup_bridge_mock('br-int', self.br_int_cls) def test_setup_default_table(self): self.br.setup_default_table() expected = [ call.add_flow(priority=0, table=0, actions='normal'), call.add_flow(priority=0, table=23, actions='drop'), call.add_flow(priority=0, table=24, actions='drop'), ] self.assertEqual(expected, self.mock.mock_calls) def test_provision_local_vlan(self): port = 999 lvid = 888 segmentation_id = 777 self.br.provision_local_vlan(port=port, lvid=lvid, segmentation_id=segmentation_id) expected = [ call.add_flow(priority=3, dl_vlan=segmentation_id, in_port=port, actions='mod_vlan_vid:%s,normal' % lvid), ] self.assertEqual(expected, self.mock.mock_calls) def test_provision_local_vlan_novlan(self): port = 999 lvid = 888 segmentation_id = None self.br.provision_local_vlan(port=port, lvid=lvid, segmentation_id=segmentation_id) expected = [ call.add_flow(priority=3, dl_vlan=0xffff, in_port=port, actions='mod_vlan_vid:%s,normal' % lvid), ] self.assertEqual(expected, self.mock.mock_calls) def test_reclaim_local_vlan(self): port = 999 segmentation_id = 777 self.br.reclaim_local_vlan(port=port, segmentation_id=segmentation_id) expected = [ call.delete_flows(dl_vlan=segmentation_id, in_port=port), ] self.assertEqual(expected, self.mock.mock_calls) def test_reclaim_local_vlan_novlan(self): port = 999 segmentation_id = None self.br.reclaim_local_vlan(port=port, segmentation_id=segmentation_id) expected = [ call.delete_flows(dl_vlan=0xffff, in_port=port), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_dvr_to_src_mac(self): network_type = 'vxlan' vlan_tag = 1111 gateway_mac = '08:60:6e:7f:74:e7' dst_mac = '00:02:b3:13:fe:3d' dst_port = 6666 self.br.install_dvr_to_src_mac(network_type=network_type, vlan_tag=vlan_tag, gateway_mac=gateway_mac, dst_mac=dst_mac, dst_port=dst_port) expected = [ call.add_flow(priority=4, table=1, dl_dst=dst_mac, dl_vlan=vlan_tag, actions='strip_vlan,mod_dl_src:%(mac)s,' 'output:%(port)s' % { 'mac': gateway_mac, 'port': dst_port, }), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_dvr_to_src_mac(self): network_type = 'vxlan' vlan_tag = 1111 dst_mac = '00:02:b3:13:fe:3d' self.br.delete_dvr_to_src_mac(network_type=network_type, vlan_tag=vlan_tag, dst_mac=dst_mac) expected = [ call.delete_flows(table=1, dl_dst=dst_mac, dl_vlan=vlan_tag), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_dvr_to_src_mac_vlan(self): network_type = 'vlan' vlan_tag = 1111 gateway_mac = '08:60:6e:7f:74:e7' dst_mac = '00:02:b3:13:fe:3d' dst_port = 6666 self.br.install_dvr_to_src_mac(network_type=network_type, vlan_tag=vlan_tag, gateway_mac=gateway_mac, dst_mac=dst_mac, dst_port=dst_port) expected = [ call.add_flow(priority=4, table=2, dl_dst=dst_mac, dl_vlan=vlan_tag, actions='strip_vlan,mod_dl_src:%(mac)s,' 'output:%(port)s' % { 'mac': gateway_mac, 'port': dst_port, }), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_dvr_to_src_mac_vlan(self): network_type = 'vlan' vlan_tag = 1111 dst_mac = '00:02:b3:13:fe:3d' self.br.delete_dvr_to_src_mac(network_type=network_type, vlan_tag=vlan_tag, dst_mac=dst_mac) expected = [ call.delete_flows(table=2, dl_dst=dst_mac, dl_vlan=vlan_tag), ] self.assertEqual(expected, self.mock.mock_calls) def test_add_dvr_mac_vlan(self): mac = '00:02:b3:13:fe:3d' port = 8888 self.br.add_dvr_mac_vlan(mac=mac, port=port) expected = [ call.add_flow(priority=4, table=0, actions='resubmit(,2)', dl_src=mac, in_port=port), ] self.assertEqual(expected, self.mock.mock_calls) def test_remove_dvr_mac_vlan(self): mac = '00:02:b3:13:fe:3d' self.br.remove_dvr_mac_vlan(mac=mac) expected = [ call.delete_flows(eth_src=mac, table_id=0), ] self.assertEqual(expected, self.mock.mock_calls) def test_add_dvr_mac_tun(self): mac = '00:02:b3:13:fe:3d' port = 8888 self.br.add_dvr_mac_tun(mac=mac, port=port) expected = [ call.add_flow(priority=2, table=0, actions='resubmit(,1)', dl_src=mac, in_port=port), ] self.assertEqual(expected, self.mock.mock_calls) def test_remove_dvr_mac_tun(self): mac = '00:02:b3:13:fe:3d' port = 8888 self.br.remove_dvr_mac_tun(mac=mac, port=port) expected = [ call.delete_flows(eth_src=mac, table_id=0, in_port=port), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_icmpv6_na_spoofing_protection(self): port = 8888 ip_addresses = ['2001:db8::1', 'fdf8:f53b:82e4::1/128'] self.br.install_icmpv6_na_spoofing_protection(port, ip_addresses) expected = [ call.add_flow(dl_type=const.ETHERTYPE_IPV6, actions='normal', icmp_type=const.ICMPV6_TYPE_NA, nw_proto=const.PROTO_NUM_IPV6_ICMP, nd_target='2001:db8::1', priority=2, table=24, in_port=8888), call.add_flow(dl_type=const.ETHERTYPE_IPV6, actions='normal', icmp_type=const.ICMPV6_TYPE_NA, nw_proto=const.PROTO_NUM_IPV6_ICMP, nd_target='fdf8:f53b:82e4::1/128', priority=2, table=24, in_port=8888), call.add_flow(dl_type=const.ETHERTYPE_IPV6, icmp_type=const.ICMPV6_TYPE_NA, nw_proto=const.PROTO_NUM_IPV6_ICMP, priority=10, table=0, in_port=8888, actions='resubmit(,24)') ] self.assertEqual(expected, self.mock.mock_calls) def test_install_arp_spoofing_protection(self): port = 8888 ip_addresses = ['192.0.2.1', '192.0.2.2/32'] self.br.install_arp_spoofing_protection(port, ip_addresses) expected = [ call.add_flow(proto='arp', actions='resubmit(,25)', arp_spa='192.0.2.1', priority=2, table=24, in_port=8888), call.add_flow(proto='arp', actions='resubmit(,25)', arp_spa='192.0.2.2/32', priority=2, table=24, in_port=8888), call.add_flow(priority=10, table=0, in_port=8888, actions='resubmit(,24)', proto='arp') ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_arp_spoofing_protection(self): port = 8888 self.br.delete_arp_spoofing_protection(port) expected = [ call.delete_flows(table_id=0, in_port=8888, proto='arp'), call.delete_flows(table_id=0, in_port=8888, icmp_type=136, nw_proto=58), call.delete_flows(table_id=24, in_port=8888), ] self.assertEqual(expected, self.mock.mock_calls) ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_tun.pyneutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_tu0000664000567000056710000003277013044372736035437 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netaddr import neutron.plugins.ml2.drivers.openvswitch.agent.common.constants \ as ovs_const from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.\ openflow.ovs_ofctl import ovs_bridge_test_base call = mock.call # short hand class OVSTunnelBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase, ovs_bridge_test_base.OVSDVRProcessTestMixin): dvr_process_table_id = ovs_const.DVR_PROCESS dvr_process_next_table_id = ovs_const.PATCH_LV_TO_TUN def setUp(self): super(OVSTunnelBridgeTest, self).setUp() self.setup_bridge_mock('br-tun', self.br_tun_cls) self.stamp = self.br.default_cookie def test_setup_default_table(self): patch_int_ofport = 5555 mock_do_action_flows = mock.patch.object(self.br, 'do_action_flows').start() self.mock.attach_mock(mock_do_action_flows, 'do_action_flows') self.br.setup_default_table(patch_int_ofport=patch_int_ofport, arp_responder_enabled=False) flow_args = [{'priority': 1, 'in_port': patch_int_ofport, 'actions': 'resubmit(,2)'}, {'priority': 0, 'actions': 'drop'}, {'priority': 0, 'table': 2, 'dl_dst': '00:00:00:00:00:00/01:00:00:00:00:00', 'actions': 'resubmit(,20)'}, {'priority': 0, 'table': 2, 'dl_dst': '01:00:00:00:00:00/01:00:00:00:00:00', 'actions': 'resubmit(,22)'}, {'priority': 0, 'table': 3, 'actions': 'drop'}, {'priority': 0, 'table': 4, 'actions': 'drop'}, {'priority': 0, 'table': 6, 'actions': 'drop'}, {'priority': 1, 'table': 10, 'actions': 'learn(cookie=' + str(self.stamp) + ',table=20,priority=1,hard_timeout=300,' 'NXM_OF_VLAN_TCI[0..11],' 'NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],' 'load:0->NXM_OF_VLAN_TCI[],' 'load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],' 'output:NXM_OF_IN_PORT[]),' 'output:%s' % patch_int_ofport}, {'priority': 0, 'table': 20, 'actions': 'resubmit(,22)'} ] expected = [call.do_action_flows('add', flow_args), call.add_flow(priority=0, table=22, actions='drop')] self.assertEqual(expected, self.mock.mock_calls) def test_setup_default_table_arp_responder_enabled(self): patch_int_ofport = 5555 mock_do_action_flows = mock.patch.object(self.br, 'do_action_flows').start() self.mock.attach_mock(mock_do_action_flows, 'do_action_flows') self.br.setup_default_table(patch_int_ofport=patch_int_ofport, arp_responder_enabled=True) flow_args = [{'priority': 1, 'in_port': patch_int_ofport, 'actions': 'resubmit(,2)'}, {'priority': 0, 'actions': 'drop'}, {'priority': 1, 'table': 2, 'dl_dst': 'ff:ff:ff:ff:ff:ff', 'actions': 'resubmit(,21)', 'proto': 'arp'}, {'priority': 0, 'table': 2, 'dl_dst': '00:00:00:00:00:00/01:00:00:00:00:00', 'actions': 'resubmit(,20)'}, {'priority': 0, 'table': 2, 'dl_dst': '01:00:00:00:00:00/01:00:00:00:00:00', 'actions': 'resubmit(,22)'}, {'priority': 0, 'table': 3, 'actions': 'drop'}, {'priority': 0, 'table': 4, 'actions': 'drop'}, {'priority': 0, 'table': 6, 'actions': 'drop'}, {'priority': 1, 'table': 10, 'actions': 'learn(cookie=' + str(self.stamp) + ',table=20,priority=1,hard_timeout=300,' 'NXM_OF_VLAN_TCI[0..11],' 'NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],' 'load:0->NXM_OF_VLAN_TCI[],' 'load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],' 'output:NXM_OF_IN_PORT[]),' 'output:%s' % patch_int_ofport}, {'priority': 0, 'table': 20, 'actions': 'resubmit(,22)'}, {'priority': 0, 'table': 21, 'actions': 'resubmit(,22)'} ] expected = [call.do_action_flows('add', flow_args), call.add_flow(priority=0, table=22, actions='drop')] self.assertEqual(expected, self.mock.mock_calls) def test_provision_local_vlan(self): network_type = 'vxlan' lvid = 888 segmentation_id = 777 distributed = False self.br.provision_local_vlan(network_type=network_type, lvid=lvid, segmentation_id=segmentation_id, distributed=distributed) expected = [ call.add_flow(priority=1, tun_id=segmentation_id, actions='mod_vlan_vid:%s,resubmit(,10)' % lvid, table=4), ] self.assertEqual(expected, self.mock.mock_calls) def test_reclaim_local_vlan(self): network_type = 'vxlan' segmentation_id = 777 self.br.reclaim_local_vlan(network_type=network_type, segmentation_id=segmentation_id) expected = [ call.delete_flows(tun_id=segmentation_id, table=4), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_flood_to_tun(self): vlan = 3333 tun_id = 2222 ports = [11, 44, 22, 33] self.br.install_flood_to_tun(vlan=vlan, tun_id=tun_id, ports=ports) expected = [ call.mod_flow(table=22, dl_vlan=vlan, actions='strip_vlan,set_tunnel:%(tun)s,' 'output:%(ports)s' % { 'tun': tun_id, 'ports': ','.join(map(str, ports)), }), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_flood_to_tun(self): vlan = 3333 self.br.delete_flood_to_tun(vlan=vlan) expected = [ call.delete_flows(table=22, dl_vlan=vlan), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_unicast_to_tun(self): vlan = 3333 port = 55 mac = '08:60:6e:7f:74:e7' tun_id = 2222 self.br.install_unicast_to_tun(vlan=vlan, tun_id=tun_id, port=port, mac=mac) expected = [ call.add_flow(priority=2, table=20, dl_dst=mac, dl_vlan=vlan, actions='strip_vlan,set_tunnel:%(tun)s,' 'output:%(port)s' % { 'tun': tun_id, 'port': port, }), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_unicast_to_tun(self): vlan = 3333 mac = '08:60:6e:7f:74:e7' self.br.delete_unicast_to_tun(vlan=vlan, mac=mac) expected = [ call.delete_flows(table=20, dl_dst=mac, dl_vlan=vlan), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_unicast_to_tun_without_mac(self): vlan = 3333 mac = None self.br.delete_unicast_to_tun(vlan=vlan, mac=mac) expected = [ call.delete_flows(table=20, dl_vlan=vlan), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_arp_responder(self): vlan = 3333 ip = '192.0.2.1' mac = '08:60:6e:7f:74:e7' self.br.install_arp_responder(vlan=vlan, ip=ip, mac=mac) expected = [ call.add_flow(proto='arp', nw_dst=ip, actions='move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],' 'mod_dl_src:%(mac)s,load:0x2->NXM_OF_ARP_OP[],' 'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],' 'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],' 'load:%(mac)#x->NXM_NX_ARP_SHA[],' 'load:%(ip)#x->NXM_OF_ARP_SPA[],in_port' % { 'mac': netaddr.EUI(mac, dialect=netaddr.mac_unix), 'ip': netaddr.IPAddress(ip), }, priority=1, table=21, dl_vlan=vlan), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_arp_responder(self): vlan = 3333 ip = '192.0.2.1' self.br.delete_arp_responder(vlan=vlan, ip=ip) expected = [ call.delete_flows(table=21, dl_vlan=vlan, proto='arp', nw_dst=ip), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_arp_responder_without_ip(self): vlan = 3333 ip = None self.br.delete_arp_responder(vlan=vlan, ip=ip) expected = [ call.delete_flows(table=21, dl_vlan=vlan, proto='arp'), ] self.assertEqual(expected, self.mock.mock_calls) def test_setup_tunnel_port(self): network_type = 'vxlan' port = 11111 self.br.setup_tunnel_port(network_type=network_type, port=port) expected = [ call.add_flow(priority=1, in_port=port, actions='resubmit(,4)'), ] self.assertEqual(expected, self.mock.mock_calls) def test_cleanup_tunnel_port(self): port = 11111 self.br.cleanup_tunnel_port(port=port) expected = [ call.delete_flows(in_port=port), ] self.assertEqual(expected, self.mock.mock_calls) def test_add_dvr_mac_tun(self): mac = '00:02:b3:13:fe:3d' port = 8888 self.br.add_dvr_mac_tun(mac=mac, port=port) expected = [ call.add_flow(priority=1, table=9, dl_src=mac, actions='output:%s' % port), ] self.assertEqual(expected, self.mock.mock_calls) def test_remove_dvr_mac_tun(self): mac = '00:02:b3:13:fe:3d' self.br.remove_dvr_mac_tun(mac=mac) expected = [ call.delete_flows(eth_src=mac, table_id=9), ] self.assertEqual(expected, self.mock.mock_calls) def _mock_add_tunnel_port(self, deferred_br=False): port_name = 'fake_port' remote_ip = '192.168.1.3' local_ip = '192.168.1.2' tunnel_type = 'vxlan' vxlan_udp_port = '4789' dont_fragment = True if deferred_br: with mock.patch('neutron.agent.common.ovs_lib.OVSBridge.add_port', return_value=9999) as add_port, \ self.br.deferred() as deferred_br: ofport = deferred_br.add_tunnel_port(port_name, remote_ip, local_ip, tunnel_type, vxlan_udp_port, dont_fragment) else: with mock.patch('neutron.agent.common.ovs_lib.OVSBridge.add_port', return_value=9999) as add_port: ofport = self.br.add_tunnel_port(port_name, remote_ip, local_ip, tunnel_type, vxlan_udp_port, dont_fragment) self.assertEqual(9999, ofport) self.assertEqual(1, add_port.call_count) self.assertEqual(port_name, add_port.call_args[0][0]) def _mock_delete_port(self, deferred_br=False): port_name = 'fake_port' if deferred_br: with mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' 'delete_port') as delete_port, \ self.br.deferred() as deferred_br: deferred_br.delete_port(port_name) else: with mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' 'delete_port') as delete_port: self.br.delete_port(port_name) self.assertEqual([call(port_name)], delete_port.mock_calls) def test_add_tunnel_port(self): self._mock_add_tunnel_port() def test_delete_port(self): self._mock_delete_port() def test_deferred_br_add_tunnel_port(self): self._mock_add_tunnel_port(True) def test_deferred_br_delete_port(self): self._mock_delete_port(True) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/0000775000567000056710000000000013044373210032605 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_ovs_bridge.pyneutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_ovs_brid0000664000567000056710000000263213044372736035415 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ import ovs_test_base class OVSAgentBridgeTestCase(ovs_test_base.OVSRyuTestBase): def test__get_dp(self): mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge.get_datapath_id', return_value="3e9").start() mock.patch( "neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native." "ofswitch.OpenFlowSwitchMixin._get_dp_by_dpid", side_effect=RuntimeError).start() br = self.br_int_cls('br-int') br._cached_dpid = int("3e9", 16) # make sure it correctly raises RuntimeError, not UnboundLocalError as # in LP https://bugs.launchpad.net/neutron/+bug/1588042 self.assertRaises(RuntimeError, br._get_dp) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/__init__.py0000664000567000056710000000000013044372736034720 0ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_phys.pyneutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_phys.0000664000567000056710000001253413044372760035331 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import neutron.plugins.ml2.drivers.openvswitch.agent.common.constants \ as ovs_const from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import ovs_bridge_test_base call = mock.call # short hand class OVSPhysicalBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase, ovs_bridge_test_base.OVSDVRProcessTestMixin): dvr_process_table_id = ovs_const.DVR_PROCESS_VLAN dvr_process_next_table_id = ovs_const.LOCAL_VLAN_TRANSLATION def setUp(self): super(OVSPhysicalBridgeTest, self).setUp() self.setup_bridge_mock('br-phys', self.br_phys_cls) self.stamp = self.br.default_cookie def test_setup_default_table(self): self.br.setup_default_table() (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0), ]), ], match=ofpp.OFPMatch(), priority=0, table_id=0)), ] self.assertEqual(expected, self.mock.mock_calls) def test_provision_local_vlan(self): port = 999 lvid = 888 segmentation_id = 777 distributed = False self.br.provision_local_vlan(port=port, lvid=lvid, segmentation_id=segmentation_id, distributed=distributed) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionSetField( vlan_vid=segmentation_id | ofp.OFPVID_PRESENT), ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0), ]), ], match=ofpp.OFPMatch( in_port=port, vlan_vid=lvid | ofp.OFPVID_PRESENT), priority=4, table_id=0)), ] self.assertEqual(expected, self.mock.mock_calls) def test_provision_local_vlan_novlan(self): port = 999 lvid = 888 segmentation_id = None distributed = False self.br.provision_local_vlan(port=port, lvid=lvid, segmentation_id=segmentation_id, distributed=distributed) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionPopVlan(), ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0), ]), ], match=ofpp.OFPMatch( in_port=port, vlan_vid=lvid | ofp.OFPVID_PRESENT), priority=4, table_id=0)), ] self.assertEqual(expected, self.mock.mock_calls) def test_reclaim_local_vlan(self): port = 999 lvid = 888 self.br.reclaim_local_vlan(port=port, lvid=lvid) (dp, ofp, ofpp) = self._get_dp() expected = [ call.delete_flows( match=ofpp.OFPMatch( in_port=port, vlan_vid=lvid | ofp.OFPVID_PRESENT)), ] self.assertEqual(expected, self.mock.mock_calls) def test_add_dvr_mac_vlan(self): mac = '00:02:b3:13:fe:3d' port = 8888 self.br.add_dvr_mac_vlan(mac=mac, port=port) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionOutput(port, 0), ]), ], match=ofpp.OFPMatch(eth_src=mac), priority=2, table_id=3)), ] self.assertEqual(expected, self.mock.mock_calls) def test_remove_dvr_mac_vlan(self): mac = '00:02:b3:13:fe:3d' self.br.remove_dvr_mac_vlan(mac=mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call.delete_flows(eth_src=mac, table_id=3), ] self.assertEqual(expected, self.mock.mock_calls) ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge_test_base.pyneutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge_te0000664000567000056710000002515713044372736035371 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import importutils from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ import ovs_test_base call = mock.call # short hand class OVSBridgeTestBase(ovs_test_base.OVSRyuTestBase): _ARP_MODULE = 'ryu.lib.packet.arp' _ETHER_TYPES_MODULE = 'ryu.lib.packet.ether_types' _ICMPV6_MODULE = 'ryu.lib.packet.icmpv6' _IN_PROTO_MODULE = 'ryu.lib.packet.in_proto' _OFP_MODULE = 'ryu.ofproto.ofproto_v1_3' _OFPP_MODULE = 'ryu.ofproto.ofproto_v1_3_parser' def setup_bridge_mock(self, name, cls): self.br = cls(name) self.stamp = self.br.default_cookie self.dp = mock.Mock() self.ofp = importutils.import_module(self._OFP_MODULE) self.ofpp = importutils.import_module(self._OFPP_MODULE) self.arp = importutils.import_module(self._ARP_MODULE) self.ether_types = importutils.import_module(self._ETHER_TYPES_MODULE) self.icmpv6 = importutils.import_module(self._ICMPV6_MODULE) self.in_proto = importutils.import_module(self._IN_PROTO_MODULE) mock.patch.object(self.br, '_get_dp', autospec=True, return_value=self._get_dp()).start() mock__send_msg = mock.patch.object(self.br, '_send_msg').start() mock_delete_flows = mock.patch.object(self.br, 'delete_flows').start() self.mock = mock.Mock() self.mock.attach_mock(mock__send_msg, '_send_msg') self.mock.attach_mock(mock_delete_flows, 'delete_flows') def _get_dp(self): return self.dp, self.ofp, self.ofpp def test_drop_port(self): in_port = 2345 self.br.drop_port(in_port=in_port) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg( ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(in_port=in_port), priority=2, table_id=0)), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_goto(self): dest_table_id = 123 priority = 99 in_port = 666 self.br.install_goto(dest_table_id=dest_table_id, priority=priority, in_port=in_port) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg( ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=dest_table_id), ], match=ofpp.OFPMatch(in_port=in_port), priority=priority, table_id=0)), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_drop(self): priority = 99 in_port = 666 self.br.install_drop(priority=priority, in_port=in_port) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg( ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(in_port=in_port), priority=priority, table_id=0)), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_normal(self): priority = 99 in_port = 666 self.br.install_normal(priority=priority, in_port=in_port) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg( ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0) ]), ], match=ofpp.OFPMatch(in_port=in_port), priority=priority, table_id=0)), ] self.assertEqual(expected, self.mock.mock_calls) def test__cidr_to_ryu(self): f = self.br._cidr_to_ryu self.assertEqual('192.168.0.1', f('192.168.0.1')) self.assertEqual('192.168.0.1', f('192.168.0.1/32')) self.assertEqual(('192.168.0.0', '255.255.255.0'), f('192.168.0.0/24')) def test__setup_controllers__out_of_band(self): cfg = mock.MagicMock() cfg.OVS.of_listen_address = "" cfg.OVS.of_listen_port = "" m_set_protocols = mock.patch.object(self.br, 'set_protocols') m_set_controller = mock.patch.object(self.br, 'set_controller') m_set_ccm = mock.patch.object(self.br, 'set_controllers_connection_mode') with m_set_ccm as set_ccm, m_set_controller, m_set_protocols: self.br.setup_controllers(cfg) set_ccm.assert_called_once_with("out-of-band") class OVSDVRProcessTestMixin(object): def test_install_dvr_process_ipv4(self): vlan_tag = 999 gateway_ip = '192.0.2.1' self.br.install_dvr_process_ipv4(vlan_tag=vlan_tag, gateway_ip=gateway_ip) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_ARP, arp_tpa=gateway_ip, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT), priority=3, table_id=self.dvr_process_table_id)), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_dvr_process_ipv4(self): vlan_tag = 999 gateway_ip = '192.0.2.1' self.br.delete_dvr_process_ipv4(vlan_tag=vlan_tag, gateway_ip=gateway_ip) (dp, ofp, ofpp) = self._get_dp() expected = [ call.delete_flows(table_id=self.dvr_process_table_id, match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_ARP, arp_tpa=gateway_ip, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_dvr_process_ipv6(self): vlan_tag = 999 gateway_mac = '08:60:6e:7f:74:e7' self.br.install_dvr_process_ipv6(vlan_tag=vlan_tag, gateway_mac=gateway_mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch( eth_src=gateway_mac, eth_type=self.ether_types.ETH_TYPE_IPV6, icmpv6_type=self.icmpv6.ND_ROUTER_ADVERT, ip_proto=self.in_proto.IPPROTO_ICMPV6, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT), priority=3, table_id=self.dvr_process_table_id)), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_dvr_process_ipv6(self): vlan_tag = 999 gateway_mac = '08:60:6e:7f:74:e7' self.br.delete_dvr_process_ipv6(vlan_tag=vlan_tag, gateway_mac=gateway_mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call.delete_flows(table_id=self.dvr_process_table_id, match=ofpp.OFPMatch( eth_src=gateway_mac, eth_type=self.ether_types.ETH_TYPE_IPV6, icmpv6_type=self.icmpv6.ND_ROUTER_ADVERT, ip_proto=self.in_proto.IPPROTO_ICMPV6, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_dvr_process(self): vlan_tag = 999 vif_mac = '00:0e:0c:5e:95:d0' dvr_mac_address = 'f2:0b:a4:5b:b2:ab' self.br.install_dvr_process(vlan_tag=vlan_tag, vif_mac=vif_mac, dvr_mac_address=dvr_mac_address) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch( eth_dst=vif_mac, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT), priority=2, table_id=self.dvr_process_table_id)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionSetField(eth_src=dvr_mac_address), ]), ofpp.OFPInstructionGotoTable( table_id=self.dvr_process_next_table_id), ], match=ofpp.OFPMatch( eth_src=vif_mac, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT), priority=1, table_id=self.dvr_process_table_id)), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_dvr_process(self): vlan_tag = 999 vif_mac = '00:0e:0c:5e:95:d0' self.br.delete_dvr_process(vlan_tag=vlan_tag, vif_mac=vif_mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call.delete_flows(table_id=self.dvr_process_table_id, match=ofpp.OFPMatch( eth_dst=vif_mac, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)), call.delete_flows(table_id=self.dvr_process_table_id, match=ofpp.OFPMatch( eth_src=vif_mac, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)), ] self.assertEqual(expected, self.mock.mock_calls) ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_int.pyneutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_int.p0000664000567000056710000003564013044372760035323 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import ovs_bridge_test_base call = mock.call # short hand class OVSIntegrationBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase): def setUp(self): super(OVSIntegrationBridgeTest, self).setUp() self.setup_bridge_mock('br-int', self.br_int_cls) self.stamp = self.br.default_cookie def test_setup_default_table(self): self.br.setup_default_table() (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions( ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0) ]), ], match=ofpp.OFPMatch(), priority=0, table_id=0)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=23)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=24)), ] self.assertEqual(expected, self.mock.mock_calls) def test_provision_local_vlan(self): port = 999 lvid = 888 segmentation_id = 777 self.br.provision_local_vlan(port=port, lvid=lvid, segmentation_id=segmentation_id) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionSetField( vlan_vid=lvid | ofp.OFPVID_PRESENT), ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0) ]), ], match=ofpp.OFPMatch( in_port=port, vlan_vid=segmentation_id | ofp.OFPVID_PRESENT), priority=3, table_id=0)), ] self.assertEqual(expected, self.mock.mock_calls) def test_provision_local_vlan_novlan(self): port = 999 lvid = 888 segmentation_id = None self.br.provision_local_vlan(port=port, lvid=lvid, segmentation_id=segmentation_id) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionPushVlan(), ofpp.OFPActionSetField( vlan_vid=lvid | ofp.OFPVID_PRESENT), ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0), ]), ], match=ofpp.OFPMatch( in_port=port, vlan_vid=ofp.OFPVID_NONE), priority=3, table_id=0)), ] self.assertEqual(expected, self.mock.mock_calls) def test_reclaim_local_vlan(self): port = 999 segmentation_id = 777 self.br.reclaim_local_vlan(port=port, segmentation_id=segmentation_id) (dp, ofp, ofpp) = self._get_dp() expected = [ call.delete_flows( match=ofpp.OFPMatch( in_port=port, vlan_vid=segmentation_id | ofp.OFPVID_PRESENT)), ] self.assertEqual(expected, self.mock.mock_calls) def test_reclaim_local_vlan_novlan(self): port = 999 segmentation_id = None self.br.reclaim_local_vlan(port=port, segmentation_id=segmentation_id) (dp, ofp, ofpp) = self._get_dp() expected = [ call.delete_flows( match=ofpp.OFPMatch( in_port=port, vlan_vid=ofp.OFPVID_NONE)), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_dvr_to_src_mac(self): network_type = 'vxlan' vlan_tag = 1111 gateway_mac = '08:60:6e:7f:74:e7' dst_mac = '00:02:b3:13:fe:3d' dst_port = 6666 self.br.install_dvr_to_src_mac(network_type=network_type, vlan_tag=vlan_tag, gateway_mac=gateway_mac, dst_mac=dst_mac, dst_port=dst_port) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionPopVlan(), ofpp.OFPActionSetField(eth_src=gateway_mac), ofpp.OFPActionOutput(6666, 0), ]), ], match=ofpp.OFPMatch( eth_dst=dst_mac, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT), priority=4, table_id=1)), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_dvr_to_src_mac(self): network_type = 'vxlan' vlan_tag = 1111 dst_mac = '00:02:b3:13:fe:3d' self.br.delete_dvr_to_src_mac(network_type=network_type, vlan_tag=vlan_tag, dst_mac=dst_mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call.delete_flows(table_id=1, match=ofpp.OFPMatch( eth_dst=dst_mac, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_dvr_to_src_mac_vlan(self): network_type = 'vlan' vlan_tag = 1111 gateway_mac = '08:60:6e:7f:74:e7' dst_mac = '00:02:b3:13:fe:3d' dst_port = 6666 self.br.install_dvr_to_src_mac(network_type=network_type, vlan_tag=vlan_tag, gateway_mac=gateway_mac, dst_mac=dst_mac, dst_port=dst_port) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionPopVlan(), ofpp.OFPActionSetField(eth_src=gateway_mac), ofpp.OFPActionOutput(dst_port, 0), ]), ], match=ofpp.OFPMatch( eth_dst=dst_mac, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT), priority=4, table_id=2)), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_dvr_to_src_mac_vlan(self): network_type = 'vlan' vlan_tag = 1111 dst_mac = '00:02:b3:13:fe:3d' self.br.delete_dvr_to_src_mac(network_type=network_type, vlan_tag=vlan_tag, dst_mac=dst_mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call.delete_flows(table_id=2, match=ofpp.OFPMatch( eth_dst=dst_mac, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)), ] self.assertEqual(expected, self.mock.mock_calls) def test_add_dvr_mac_vlan(self): mac = '00:02:b3:13:fe:3d' port = 8888 self.br.add_dvr_mac_vlan(mac=mac, port=port) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=2), ], match=ofpp.OFPMatch( eth_src=mac, in_port=port), priority=4, table_id=0)) ] self.assertEqual(expected, self.mock.mock_calls) def test_remove_dvr_mac_vlan(self): mac = '00:02:b3:13:fe:3d' self.br.remove_dvr_mac_vlan(mac=mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call.delete_flows(eth_src=mac, table_id=0), ] self.assertEqual(expected, self.mock.mock_calls) def test_add_dvr_mac_tun(self): mac = '00:02:b3:13:fe:3d' port = 8888 self.br.add_dvr_mac_tun(mac=mac, port=port) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=1), ], match=ofpp.OFPMatch( eth_src=mac, in_port=port), priority=2, table_id=0)) ] self.assertEqual(expected, self.mock.mock_calls) def test_remove_dvr_mac_tun(self): mac = '00:02:b3:13:fe:3d' port = 8888 self.br.remove_dvr_mac_tun(mac=mac, port=port) expected = [ call.delete_flows(eth_src=mac, in_port=port, table_id=0), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_icmpv6_na_spoofing_protection(self): port = 8888 ip_addresses = ['2001:db8::1', 'fdf8:f53b:82e4::1/128'] self.br.install_icmpv6_na_spoofing_protection(port, ip_addresses) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0), ]), ], match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_IPV6, icmpv6_type=self.icmpv6.ND_NEIGHBOR_ADVERT, ip_proto=self.in_proto.IPPROTO_ICMPV6, ipv6_nd_target='2001:db8::1', in_port=8888, ), priority=2, table_id=24)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0), ]), ], match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_IPV6, icmpv6_type=self.icmpv6.ND_NEIGHBOR_ADVERT, ip_proto=self.in_proto.IPPROTO_ICMPV6, ipv6_nd_target='fdf8:f53b:82e4::1', in_port=8888, ), priority=2, table_id=24)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=24), ], match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_IPV6, icmpv6_type=self.icmpv6.ND_NEIGHBOR_ADVERT, ip_proto=self.in_proto.IPPROTO_ICMPV6, in_port=8888, ), priority=10, table_id=0)), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_arp_spoofing_protection(self): port = 8888 ip_addresses = ['192.0.2.1', '192.0.2.2/32'] self.br.install_arp_spoofing_protection(port, ip_addresses) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=25), ], match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_ARP, arp_spa='192.0.2.1', in_port=8888, ), priority=2, table_id=24)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=25), ], match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_ARP, arp_spa='192.0.2.2', in_port=8888 ), priority=2, table_id=24)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=24), ], match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_ARP, in_port=8888, ), priority=10, table_id=0)), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_arp_spoofing_protection(self): port = 8888 self.br.delete_arp_spoofing_protection(port) (dp, ofp, ofpp) = self._get_dp() expected = [ call.delete_flows(table_id=0, match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_ARP, in_port=8888)), call.delete_flows(table_id=0, match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_IPV6, icmpv6_type=self.icmpv6.ND_NEIGHBOR_ADVERT, in_port=8888, ip_proto=self.in_proto.IPPROTO_ICMPV6)), call.delete_flows(table_id=24, in_port=port), ] self.assertEqual(expected, self.mock.mock_calls) ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_tun.pyneutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_tun.p0000664000567000056710000004646013044372760035341 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import neutron.plugins.ml2.drivers.openvswitch.agent.common.constants \ as ovs_const from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import ovs_bridge_test_base call = mock.call # short hand class OVSTunnelBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase, ovs_bridge_test_base.OVSDVRProcessTestMixin): dvr_process_table_id = ovs_const.DVR_PROCESS dvr_process_next_table_id = ovs_const.PATCH_LV_TO_TUN def setUp(self): super(OVSTunnelBridgeTest, self).setUp() self.setup_bridge_mock('br-tun', self.br_tun_cls) self.stamp = self.br.default_cookie def test_setup_default_table(self): patch_int_ofport = 5555 arp_responder_enabled = False self.br.setup_default_table(patch_int_ofport=patch_int_ofport, arp_responder_enabled=arp_responder_enabled) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ofpp.OFPInstructionGotoTable(table_id=2)], match=ofpp.OFPMatch(in_port=patch_int_ofport), priority=1, table_id=0)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=0)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ofpp.OFPInstructionGotoTable(table_id=20)], match=ofpp.OFPMatch( eth_dst=('00:00:00:00:00:00', '01:00:00:00:00:00')), priority=0, table_id=2)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ofpp.OFPInstructionGotoTable(table_id=22)], match=ofpp.OFPMatch( eth_dst=('01:00:00:00:00:00', '01:00:00:00:00:00')), priority=0, table_id=2)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=3)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=4)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=6)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.NXActionLearn( cookie=self.stamp, hard_timeout=300, priority=1, specs=[ ofpp.NXFlowSpecMatch( dst=('vlan_vid', 0), n_bits=12, src=('vlan_vid', 0)), ofpp.NXFlowSpecMatch( dst=('eth_dst', 0), n_bits=48, src=('eth_src', 0)), ofpp.NXFlowSpecLoad( dst=('vlan_vid', 0), n_bits=12, src=0), ofpp.NXFlowSpecLoad( dst=('tunnel_id', 0), n_bits=64, src=('tunnel_id', 0)), ofpp.NXFlowSpecOutput( dst='', n_bits=32, src=('in_port', 0)), ], table_id=20), ofpp.OFPActionOutput(patch_int_ofport, 0), ]), ], match=ofpp.OFPMatch(), priority=1, table_id=10)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ofpp.OFPInstructionGotoTable(table_id=22)], match=ofpp.OFPMatch(), priority=0, table_id=20)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=22)) ] self.assertEqual(expected, self.mock.mock_calls) def test_setup_default_table_arp_responder_enabled(self): patch_int_ofport = 5555 arp_responder_enabled = True self.br.setup_default_table(patch_int_ofport=patch_int_ofport, arp_responder_enabled=arp_responder_enabled) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ofpp.OFPInstructionGotoTable(table_id=2)], match=ofpp.OFPMatch(in_port=patch_int_ofport), priority=1, table_id=0)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=0)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ofpp.OFPInstructionGotoTable(table_id=21)], match=ofpp.OFPMatch( eth_dst='ff:ff:ff:ff:ff:ff', eth_type=self.ether_types.ETH_TYPE_ARP), priority=1, table_id=2)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ofpp.OFPInstructionGotoTable(table_id=20)], match=ofpp.OFPMatch( eth_dst=('00:00:00:00:00:00', '01:00:00:00:00:00')), priority=0, table_id=2)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ofpp.OFPInstructionGotoTable(table_id=22)], match=ofpp.OFPMatch( eth_dst=('01:00:00:00:00:00', '01:00:00:00:00:00')), priority=0, table_id=2)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=3)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=4)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=6)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.NXActionLearn( cookie=self.stamp, hard_timeout=300, priority=1, specs=[ ofpp.NXFlowSpecMatch( dst=('vlan_vid', 0), n_bits=12, src=('vlan_vid', 0)), ofpp.NXFlowSpecMatch( dst=('eth_dst', 0), n_bits=48, src=('eth_src', 0)), ofpp.NXFlowSpecLoad( dst=('vlan_vid', 0), n_bits=12, src=0), ofpp.NXFlowSpecLoad( dst=('tunnel_id', 0), n_bits=64, src=('tunnel_id', 0)), ofpp.NXFlowSpecOutput( dst='', n_bits=32, src=('in_port', 0)), ], table_id=20), ofpp.OFPActionOutput(patch_int_ofport, 0), ]), ], match=ofpp.OFPMatch(), priority=1, table_id=10)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ofpp.OFPInstructionGotoTable(table_id=22)], match=ofpp.OFPMatch(), priority=0, table_id=20)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ofpp.OFPInstructionGotoTable(table_id=22)], match=ofpp.OFPMatch(), priority=0, table_id=21)), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=22)) ] self.assertEqual(expected, self.mock.mock_calls) def test_provision_local_vlan(self): network_type = 'vxlan' lvid = 888 segmentation_id = 777 distributed = False self.br.provision_local_vlan(network_type=network_type, lvid=lvid, segmentation_id=segmentation_id, distributed=distributed) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionPushVlan(), ofpp.OFPActionSetField( vlan_vid=lvid | ofp.OFPVID_PRESENT) ]), ofpp.OFPInstructionGotoTable(table_id=10), ], match=ofpp.OFPMatch(tunnel_id=segmentation_id), priority=1, table_id=4)), ] self.assertEqual(expected, self.mock.mock_calls) def test_reclaim_local_vlan(self): network_type = 'vxlan' segmentation_id = 777 self.br.reclaim_local_vlan(network_type=network_type, segmentation_id=segmentation_id) (dp, ofp, ofpp) = self._get_dp() expected = [ call.delete_flows( table_id=4, match=ofpp.OFPMatch(tunnel_id=segmentation_id)), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_flood_to_tun(self): vlan = 3333 tun_id = 2222 ports = [11, 44, 22, 33] self.br.install_flood_to_tun(vlan=vlan, tun_id=tun_id, ports=ports) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionPopVlan(), ofpp.OFPActionSetField(tunnel_id=tun_id), ] + [ofpp.OFPActionOutput(p, 0) for p in ports]), ], match=ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT), priority=1, table_id=22)), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_flood_to_tun(self): vlan = 3333 self.br.delete_flood_to_tun(vlan=vlan) (dp, ofp, ofpp) = self._get_dp() expected = [ call.delete_flows(table_id=22, match=ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT)), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_unicast_to_tun(self): vlan = 3333 port = 55 mac = '08:60:6e:7f:74:e7' tun_id = 2222 self.br.install_unicast_to_tun(vlan=vlan, tun_id=tun_id, port=port, mac=mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionPopVlan(), ofpp.OFPActionSetField(tunnel_id=tun_id), ofpp.OFPActionOutput(port, 0), ]), ], match=ofpp.OFPMatch( eth_dst=mac, vlan_vid=vlan | ofp.OFPVID_PRESENT), priority=2, table_id=20)), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_unicast_to_tun(self): vlan = 3333 mac = '08:60:6e:7f:74:e7' self.br.delete_unicast_to_tun(vlan=vlan, mac=mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call.delete_flows(table_id=20, match=ofpp.OFPMatch( eth_dst=mac, vlan_vid=vlan | ofp.OFPVID_PRESENT)), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_unicast_to_tun_without_mac(self): vlan = 3333 mac = None self.br.delete_unicast_to_tun(vlan=vlan, mac=mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call.delete_flows(table_id=20, match=ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT)), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_arp_responder(self): vlan = 3333 ip = '192.0.2.1' mac = '08:60:6e:7f:74:e7' self.br.install_arp_responder(vlan=vlan, ip=ip, mac=mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionSetField(arp_op=self.arp.ARP_REPLY), ofpp.NXActionRegMove( dst_field='arp_tha', n_bits=48, src_field='arp_sha'), ofpp.NXActionRegMove( dst_field='arp_tpa', n_bits=32, src_field='arp_spa'), ofpp.OFPActionSetField(arp_sha=mac), ofpp.OFPActionSetField(arp_spa=ip), ofpp.NXActionRegMove(src_field='eth_src', dst_field='eth_dst', n_bits=48), ofpp.OFPActionSetField(eth_src_nxm=mac), ofpp.OFPActionOutput(ofp.OFPP_IN_PORT, 0), ]), ], match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_ARP, arp_tpa=ip, vlan_vid=vlan | ofp.OFPVID_PRESENT), priority=1, table_id=21)), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_arp_responder(self): vlan = 3333 ip = '192.0.2.1' self.br.delete_arp_responder(vlan=vlan, ip=ip) (dp, ofp, ofpp) = self._get_dp() expected = [ call.delete_flows( match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_ARP, arp_tpa=ip, vlan_vid=vlan | ofp.OFPVID_PRESENT), table_id=21), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_arp_responder_without_ip(self): vlan = 3333 ip = None self.br.delete_arp_responder(vlan=vlan, ip=ip) (dp, ofp, ofpp) = self._get_dp() expected = [ call.delete_flows( match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_ARP, vlan_vid=vlan | ofp.OFPVID_PRESENT), table_id=21), ] self.assertEqual(expected, self.mock.mock_calls) def test_setup_tunnel_port(self): network_type = 'vxlan' port = 11111 self.br.setup_tunnel_port(network_type=network_type, port=port) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=4), ], match=ofpp.OFPMatch(in_port=port), priority=1, table_id=0)), ] self.assertEqual(expected, self.mock.mock_calls) def test_cleanup_tunnel_port(self): port = 11111 self.br.cleanup_tunnel_port(port=port) (dp, ofp, ofpp) = self._get_dp() expected = [ call.delete_flows(in_port=port), ] self.assertEqual(expected, self.mock.mock_calls) def test_add_dvr_mac_tun(self): mac = '00:02:b3:13:fe:3d' port = 8888 self.br.add_dvr_mac_tun(mac=mac, port=port) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionOutput(port, 0), ]), ], match=ofpp.OFPMatch(eth_src=mac), priority=1, table_id=9)), ] self.assertEqual(expected, self.mock.mock_calls) def test_remove_dvr_mac_tun(self): mac = '00:02:b3:13:fe:3d' self.br.remove_dvr_mac_tun(mac=mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call.delete_flows(eth_src=mac, table_id=9), ] self.assertEqual(expected, self.mock.mock_calls) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/__init__.py0000664000567000056710000000000013044372736033432 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/__init__.py0000664000567000056710000000000013044372736031601 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/ovs_test_base.py0000664000567000056710000000575713044372760032727 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014 Fumihiko Kakuma # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import mock from oslo_utils import importutils from neutron.tests import base from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ import fake_oflib _AGENT_PACKAGE = 'neutron.plugins.ml2.drivers.openvswitch.agent' _AGENT_NAME = _AGENT_PACKAGE + '.ovs_neutron_agent' _DVR_AGENT_NAME = ('neutron.plugins.ml2.drivers.openvswitch.agent.' 'ovs_dvr_neutron_agent') class OVSAgentConfigTestBase(base.BaseTestCase): def setUp(self): super(OVSAgentConfigTestBase, self).setUp() self.mod_agent = importutils.import_module(_AGENT_NAME) self.mod_dvr_agent = importutils.import_module(_DVR_AGENT_NAME) class OVSAgentTestBase(OVSAgentConfigTestBase): def setUp(self): super(OVSAgentTestBase, self).setUp() self.br_int_cls = importutils.import_class(self._BR_INT_CLASS) self.br_phys_cls = importutils.import_class(self._BR_PHYS_CLASS) self.br_tun_cls = importutils.import_class(self._BR_TUN_CLASS) def _bridge_classes(self): return { 'br_int': self.br_int_cls, 'br_phys': self.br_phys_cls, 'br_tun': self.br_tun_cls, } class OVSOFCtlTestBase(OVSAgentTestBase): _DRIVER_PACKAGE = _AGENT_PACKAGE + '.openflow.ovs_ofctl' _BR_INT_CLASS = _DRIVER_PACKAGE + '.br_int.OVSIntegrationBridge' _BR_TUN_CLASS = _DRIVER_PACKAGE + '.br_tun.OVSTunnelBridge' _BR_PHYS_CLASS = _DRIVER_PACKAGE + '.br_phys.OVSPhysicalBridge' class OVSRyuTestBase(OVSAgentTestBase): _DRIVER_PACKAGE = _AGENT_PACKAGE + '.openflow.native' _BR_INT_CLASS = _DRIVER_PACKAGE + '.br_int.OVSIntegrationBridge' _BR_TUN_CLASS = _DRIVER_PACKAGE + '.br_tun.OVSTunnelBridge' _BR_PHYS_CLASS = _DRIVER_PACKAGE + '.br_phys.OVSPhysicalBridge' def setUp(self): self.fake_oflib_of = fake_oflib.patch_fake_oflib_of() self.fake_oflib_of.start() self.addCleanup(self.fake_oflib_of.stop) super(OVSRyuTestBase, self).setUp() ryu_app = mock.Mock() self.br_int_cls = functools.partial(self.br_int_cls, ryu_app=ryu_app) self.br_phys_cls = functools.partial(self.br_phys_cls, ryu_app=ryu_app) self.br_tun_cls = functools.partial(self.br_tun_cls, ryu_app=ryu_app) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/fake_oflib.py0000664000567000056710000001200013044372736032126 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 VA Linux Systems Japan K.K. # Copyright (C) 2014 Fumihiko Kakuma # Copyright (C) 2014 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock class _Eq(object): def __eq__(self, other): return repr(self) == repr(other) def __ne__(self, other): return not self.__eq__(other) class _Value(_Eq): def __or__(self, b): return _Op('|', self, b) def __ror__(self, a): return _Op('|', a, self) class _SimpleValue(_Value): def __init__(self, name): self.name = name def __repr__(self): return self.name class _Op(_Value): def __init__(self, op, a, b): self.op = op self.a = a self.b = b def __repr__(self): return '%s%s%s' % (self.a, self.op, self.b) def _mkcls(name): class Cls(_Eq): _name = name def __init__(self, *args, **kwargs): self._args = args self._kwargs = kwargs self._hist = [] def __getattr__(self, name): return self._kwargs[name] def __repr__(self): args = list(map(repr, self._args)) kwargs = sorted(['%s=%s' % (x, y) for x, y in self._kwargs.items()]) return '%s(%s)' % (self._name, ', '.join(args + kwargs)) return Cls class _Mod(object): _cls_cache = {} def __init__(self, name): self._name = name def __getattr__(self, name): fullname = '%s.%s' % (self._name, name) if '_' in name: # constants are named like OFPxxx_yyy_zzz return _SimpleValue(fullname) try: return self._cls_cache[fullname] except KeyError: pass cls = _mkcls(fullname) self._cls_cache[fullname] = cls return cls def __repr__(self): return 'Mod(%s)' % (self._name,) def patch_fake_oflib_of(): ryu_mod = mock.Mock() ryu_base_mod = ryu_mod.base ryu_exc_mod = ryu_mod.exception ryu_ctrl_mod = ryu_mod.controller handler = _Mod('ryu.controller.handler') handler.set_ev_cls = mock.Mock() ofp_event = _Mod('ryu.controller.ofp_event') ryu_ctrl_mod.handler = handler ryu_ctrl_mod.ofp_event = ofp_event ryu_lib_mod = ryu_mod.lib ryu_lib_hub = ryu_lib_mod.hub ryu_packet_mod = ryu_lib_mod.packet packet = _Mod('ryu.lib.packet.packet') arp = _Mod('ryu.lib.packet.arp') ethernet = _Mod('ryu.lib.packet.ethernet') ether_types = _Mod('ryu.lib.packet.ether_types') in_proto = _Mod('ryu.lib.packet.in_proto') icmpv6 = _Mod('ryu.lib.packet.icmpv6') vlan = _Mod('ryu.lib.packet.vlan') ryu_packet_mod.packet = packet packet.Packet = mock.Mock() ryu_packet_mod.arp = arp ryu_packet_mod.ethernet = ethernet ryu_packet_mod.ether_types = ether_types ryu_packet_mod.icmpv6 = icmpv6 ryu_packet_mod.in_proto = in_proto ryu_packet_mod.vlan = vlan ryu_ofproto_mod = ryu_mod.ofproto ofp = _Mod('ryu.ofproto.ofproto_v1_3') ofpp = _Mod('ryu.ofproto.ofproto_v1_3_parser') ryu_ofproto_mod.ofproto_v1_3 = ofp ryu_ofproto_mod.ofproto_v1_3_parser = ofpp ryu_app_mod = ryu_mod.app ryu_app_ofctl_mod = ryu_app_mod.ofctl ryu_ofctl_api = ryu_app_ofctl_mod.api modules = {'ryu': ryu_mod, 'ryu.base': ryu_base_mod, 'ryu.controller': ryu_ctrl_mod, 'ryu.controller.handler': handler, 'ryu.controller.handler.set_ev_cls': handler.set_ev_cls, 'ryu.controller.ofp_event': ofp_event, 'ryu.exception': ryu_exc_mod, 'ryu.lib': ryu_lib_mod, 'ryu.lib.hub': ryu_lib_hub, 'ryu.lib.packet': ryu_packet_mod, 'ryu.lib.packet.packet': packet, 'ryu.lib.packet.packet.Packet': packet.Packet, 'ryu.lib.packet.arp': arp, 'ryu.lib.packet.ethernet': ethernet, 'ryu.lib.packet.ether_types': ether_types, 'ryu.lib.packet.icmpv6': icmpv6, 'ryu.lib.packet.in_proto': in_proto, 'ryu.lib.packet.vlan': vlan, 'ryu.ofproto': ryu_ofproto_mod, 'ryu.ofproto.ofproto_v1_3': ofp, 'ryu.ofproto.ofproto_v1_3_parser': ofpp, 'ryu.app': ryu_app_mod, 'ryu.app.ofctl': ryu_app_ofctl_mod, 'ryu.app.ofctl.api': ryu_ofctl_api} return mock.patch.dict('sys.modules', modules) ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_agent_extension_api.pyneutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_agent_extension_api.0000664000567000056710000001512113044372760035451 0ustar jenkinsjenkins00000000000000# Copyright 2012 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ import ovs_bridge from neutron.plugins.ml2.drivers.openvswitch.agent \ import ovs_agent_extension_api as ovs_ext_agt from neutron.tests import base class TestOVSAgentExtensionAPI(base.BaseTestCase): def setUp(self): super(base.BaseTestCase, self).setUp() self.br_int = ovs_bridge.OVSAgentBridge("br-int") self.br_tun = ovs_bridge.OVSAgentBridge("br-tun") def _test_bridge(self, orig_bridge, new_bridge): self.assertIsNotNone(new_bridge) self.assertEqual(orig_bridge.br_name, new_bridge.br_name) self.assertIn(new_bridge.default_cookie, orig_bridge.reserved_cookies) self.assertNotEqual(orig_bridge.default_cookie, new_bridge.default_cookie) def test_request_int_br(self): agent_extension_api = ovs_ext_agt.OVSAgentExtensionAPI(self.br_int, self.br_tun) new_int_br = agent_extension_api.request_int_br() self._test_bridge(self.br_int, new_int_br) def test_request_tun_br(self): agent_extension_api = ovs_ext_agt.OVSAgentExtensionAPI(self.br_int, self.br_tun) new_tun_br = agent_extension_api.request_tun_br() self._test_bridge(self.br_tun, new_tun_br) def test_request_tun_br_tunneling_disabled(self): agent_extension_api = ovs_ext_agt.OVSAgentExtensionAPI(self.br_int, None) self.assertIsNone(agent_extension_api.request_tun_br()) class TestOVSCookieBridge(base.DietTestCase): def setUp(self): super(TestOVSCookieBridge, self).setUp() self.bridge = ovs_bridge.OVSAgentBridge("br-foo") self.bridge.do_action_flows = mock.Mock() self.tested_bridge = ovs_ext_agt.OVSCookieBridge(self.bridge) def test_reserved(self): self.assertIn(self.tested_bridge.default_cookie, self.bridge.reserved_cookies) def test_add_flow_without_cookie(self): self.tested_bridge.add_flow(in_port=1, actions="output:2") self.bridge.do_action_flows.assert_called_once_with( 'add', [{"in_port": 1, "actions": "output:2", "cookie": self.tested_bridge.default_cookie}] ) def test_mod_flow_without_cookie(self): self.tested_bridge.mod_flow(in_port=1, actions="output:2") self.bridge.do_action_flows.assert_called_once_with( 'mod', [{"in_port": 1, "actions": "output:2", "cookie": str(self.tested_bridge.default_cookie) + '/-1'}] ) def test_del_flows_without_cookie(self): self.tested_bridge.delete_flows(in_port=1) self.bridge.do_action_flows.assert_called_once_with( 'del', [{"in_port": 1, "cookie": str(self.tested_bridge.default_cookie) + '/-1'}] ) def test_add_flow_with_cookie(self): self.tested_bridge.add_flow(cookie=1234, in_port=1, actions="output:2") self.bridge.do_action_flows.assert_called_once_with( 'add', [{"in_port": 1, "actions": "output:2", "cookie": 1234}] ) def test_mod_flow_with_cookie(self): self.tested_bridge.mod_flow(cookie='1234', in_port=1, actions="output:2") self.bridge.do_action_flows.assert_called_once_with( 'mod', [{"in_port": 1, "actions": "output:2", "cookie": str(1234) + '/-1'}] ) def test_del_flows_with_cookie(self): self.tested_bridge.delete_flows(cookie=1234, in_port=1) self.bridge.do_action_flows.assert_called_once_with( 'del', [{"in_port": 1, "cookie": str(1234) + '/-1'}] ) def test_mod_flow_with_mask(self): self.tested_bridge.mod_flow(cookie='1234/3', in_port=1, actions="output:2") self.bridge.do_action_flows.assert_called_once_with( 'mod', [{"in_port": 1, "actions": "output:2", "cookie": str(1234) + '/3'}] ) def test_del_flows_with_mask(self): self.tested_bridge.delete_flows(cookie='1234/7', in_port=1) self.bridge.do_action_flows.assert_called_once_with( 'del', [{"in_port": 1, "cookie": str(1234) + '/7'}] ) class TestOVSDeferredCookieBridge(base.DietTestCase): def setUp(self): super(TestOVSDeferredCookieBridge, self).setUp() self.bridge = ovs_bridge.OVSAgentBridge("br-foo") self.bridge.do_action_flows = mock.Mock() self.cookie_bridge = ovs_ext_agt.OVSCookieBridge(self.bridge) self.tested_bridge = self.cookie_bridge.deferred() def test_add_flow(self): self.tested_bridge.add_flow(in_port=1, actions="output:2") self.tested_bridge.apply_flows() self.bridge.do_action_flows.assert_called_once_with( 'add', [{"in_port": 1, "actions": "output:2", "cookie": self.cookie_bridge.default_cookie}] ) def test_mod_flow(self): self.tested_bridge.mod_flow(in_port=1, actions="output:2") self.tested_bridge.apply_flows() self.bridge.do_action_flows.assert_called_once_with( 'mod', [{"in_port": 1, "actions": "output:2", "cookie": str(self.cookie_bridge.default_cookie) + '/-1'}] ) def test_del_flows(self): self.tested_bridge.delete_flows(in_port=1) self.tested_bridge.apply_flows() self.bridge.do_action_flows.assert_called_once_with( 'del', [{"in_port": 1, "cookie": str(self.cookie_bridge.default_cookie) + '/-1'}] ) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py0000664000567000056710000007265613044372760033324 0ustar jenkinsjenkins00000000000000# Copyright 2012 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import time import mock from oslo_config import cfg from oslo_log import log import six from neutron.agent.common import ip_lib from neutron.agent.common import ovs_lib from neutron.common import constants as n_const from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ import ovs_test_base def nonzero(f): if six.PY3: return f.__bool__() else: return f.__nonzero__() # Useful global dummy variables. NET_UUID = '3faeebfe-5d37-11e1-a64b-000c29d5f0a7' LS_ID = 420 LV_ID = 42 LV_IDS = [42, 43] VIF_ID = '404deaec-5d37-11e1-a64b-000c29d5f0a8' VIF_MAC = '3c:09:24:1e:78:23' OFPORT_NUM = 1 VIF_PORT = ovs_lib.VifPort('port', OFPORT_NUM, VIF_ID, VIF_MAC, 'switch') VIF_PORTS = {VIF_ID: VIF_PORT} FIXED_IPS = [{'subnet_id': 'my-subnet-uuid', 'ip_address': '1.1.1.1'}] VM_DEVICE_OWNER = n_const.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' TUN_OFPORTS = {p_const.TYPE_GRE: {'ip1': '11', 'ip2': '12'}} BCAST_MAC = "01:00:00:00:00:00/01:00:00:00:00:00" UCAST_MAC = "00:00:00:00:00:00/01:00:00:00:00:00" class DummyPort(object): def __init__(self, interface_id): self.interface_id = interface_id class DummyVlanBinding(object): def __init__(self, network_id, vlan_id): self.network_id = network_id self.vlan_id = vlan_id class TunnelTest(object): USE_VETH_INTERCONNECTION = False VETH_MTU = None def setUp(self): super(TunnelTest, self).setUp() cfg.CONF.set_default('firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') cfg.CONF.set_override('report_interval', 0, 'AGENT') self.INT_BRIDGE = 'integration_bridge' self.TUN_BRIDGE = 'tunnel_bridge' self.MAP_TUN_BRIDGE = 'tun_br_map' self.AUX_BRIDGE = 'ancillary_bridge' self.NET_MAPPING = ['net1:%s' % self.MAP_TUN_BRIDGE] self.INT_OFPORT = 11111 self.TUN_OFPORT = 22222 self.MAP_TUN_INT_OFPORT = 33333 self.MAP_TUN_PHY_OFPORT = 44444 self.LVM = self.mod_agent.LocalVLANMapping( LV_ID, 'gre', None, LS_ID, VIF_PORTS) self.LVM_FLAT = self.mod_agent.LocalVLANMapping( LV_ID, 'flat', 'net1', LS_ID, VIF_PORTS) self.LVM_VLAN = self.mod_agent.LocalVLANMapping( LV_ID, 'vlan', 'net1', LS_ID, VIF_PORTS) self.inta = mock.Mock() self.intb = mock.Mock() mock.patch.object(ovs_lib.BaseOVS, 'config', new_callable=mock.PropertyMock, return_value={}).start() self.ovs_bridges = { self.INT_BRIDGE: mock.create_autospec( self.br_int_cls('br-int')), self.TUN_BRIDGE: mock.create_autospec( self.br_tun_cls('br-tun')), self.MAP_TUN_BRIDGE: mock.create_autospec( self.br_phys_cls('br-phys')), self.AUX_BRIDGE: mock.create_autospec( ovs_lib.OVSBridge('br-aux')), } self.ovs_int_ofports = { 'patch-tun': self.TUN_OFPORT, 'int-%s' % self.MAP_TUN_BRIDGE: self.MAP_TUN_INT_OFPORT } def lookup_br(br_name, *args, **kwargs): return self.ovs_bridges[br_name] self.mock_int_bridge_cls = mock.patch(self._BR_INT_CLASS, autospec=True).start() self.mock_int_bridge_cls.side_effect = lookup_br self.mock_phys_bridge_cls = mock.patch(self._BR_PHYS_CLASS, autospec=True).start() self.mock_phys_bridge_cls.side_effect = lookup_br self.mock_tun_bridge_cls = mock.patch(self._BR_TUN_CLASS, autospec=True).start() self.mock_tun_bridge_cls.side_effect = lookup_br self.mock_aux_bridge_cls = mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge', autospec=True).start() self.mock_aux_bridge_cls.side_effect = lookup_br self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE] self.mock_int_bridge.add_port.return_value = self.MAP_TUN_INT_OFPORT self.mock_int_bridge.add_patch_port.side_effect = ( lambda tap, peer: self.ovs_int_ofports[tap]) self.mock_int_bridge.port_exists.return_value = False self.mock_int_bridge.get_vif_ports.return_value = [] self.mock_int_bridge.get_ports_attributes.return_value = [] self.mock_int_bridge.db_get_val.return_value = {} self.mock_map_tun_bridge = self.ovs_bridges[self.MAP_TUN_BRIDGE] self.mock_map_tun_bridge.br_name = self.MAP_TUN_BRIDGE self.mock_map_tun_bridge.add_port.return_value = ( self.MAP_TUN_PHY_OFPORT) self.mock_map_tun_bridge.add_patch_port.return_value = ( self.MAP_TUN_PHY_OFPORT) self.mock_map_tun_bridge.port_exists.return_value = False self.mock_tun_bridge = self.ovs_bridges[self.TUN_BRIDGE] self.mock_tun_bridge.add_port.return_value = self.INT_OFPORT self.mock_tun_bridge.add_patch_port.return_value = self.INT_OFPORT self.ipdevice = mock.patch.object(ip_lib, 'IPDevice').start() self.ipwrapper = mock.patch.object(ip_lib, 'IPWrapper').start() add_veth = self.ipwrapper.return_value.add_veth add_veth.return_value = [self.inta, self.intb] self.get_bridges = mock.patch.object(ovs_lib.BaseOVS, 'get_bridges').start() self.get_bridges.return_value = [self.INT_BRIDGE, self.TUN_BRIDGE, self.MAP_TUN_BRIDGE, self.AUX_BRIDGE] self.get_bridge_external_bridge_id = mock.patch.object( ovs_lib.BaseOVS, 'get_bridge_external_bridge_id').start() self.get_bridge_external_bridge_id.side_effect = ( lambda bridge: bridge if bridge in self.ovs_bridges else None) self.execute = mock.patch('neutron.agent.common.utils.execute').start() self._define_expected_calls() def _define_expected_calls(self, arp_responder=False): self.mock_int_bridge_cls_expected = [ mock.call(self.INT_BRIDGE, datapath_type=mock.ANY), ] self.mock_phys_bridge_cls_expected = [ mock.call(self.MAP_TUN_BRIDGE, datapath_type=mock.ANY), ] self.mock_tun_bridge_cls_expected = [ mock.call(self.TUN_BRIDGE, datapath_type=mock.ANY), ] self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE] self.mock_int_bridge_expected = [ mock.call.create(), mock.call.set_secure_mode(), mock.call.setup_controllers(mock.ANY), mock.call.setup_default_table(), ] self.mock_map_tun_bridge_expected = [ mock.call.create(), mock.call.set_secure_mode(), mock.call.setup_controllers(mock.ANY), mock.call.setup_default_table(), mock.call.port_exists('phy-%s' % self.MAP_TUN_BRIDGE), mock.call.add_patch_port('phy-%s' % self.MAP_TUN_BRIDGE, constants.NONEXISTENT_PEER), ] self.mock_int_bridge_expected += [ mock.call.db_get_val('Interface', 'int-%s' % self.MAP_TUN_BRIDGE, 'type'), mock.call.port_exists('int-%s' % self.MAP_TUN_BRIDGE), mock.call.add_patch_port('int-%s' % self.MAP_TUN_BRIDGE, constants.NONEXISTENT_PEER), ] self.mock_int_bridge_expected += [ mock.call.drop_port(in_port=self.MAP_TUN_INT_OFPORT), mock.call.set_db_attribute( 'Interface', 'int-%s' % self.MAP_TUN_BRIDGE, 'options', {'peer': 'phy-%s' % self.MAP_TUN_BRIDGE}), ] self.mock_map_tun_bridge_expected += [ mock.call.drop_port(in_port=self.MAP_TUN_PHY_OFPORT), mock.call.set_db_attribute( 'Interface', 'phy-%s' % self.MAP_TUN_BRIDGE, 'options', {'peer': 'int-%s' % self.MAP_TUN_BRIDGE}), ] self.mock_aux_bridge = self.ovs_bridges[self.AUX_BRIDGE] self.mock_aux_bridge_expected = [ ] self.mock_tun_bridge_expected = [ mock.call.create(secure_mode=True), mock.call.setup_controllers(mock.ANY), mock.call.port_exists('patch-int'), nonzero(mock.call.port_exists()), mock.call.add_patch_port('patch-int', 'patch-tun'), ] self.mock_int_bridge_expected += [ mock.call.port_exists('patch-tun'), mock.call.add_patch_port('patch-tun', 'patch-int'), ] self.mock_int_bridge_expected += [ mock.call.get_vif_ports((ovs_lib.INVALID_OFPORT, ovs_lib.UNASSIGNED_OFPORT)), mock.call.get_ports_attributes( 'Port', columns=['name', 'other_config', 'tag'], ports=[]) ] self.mock_tun_bridge_expected += [ mock.call.setup_default_table(self.INT_OFPORT, arp_responder), ] self.ipdevice_expected = [] self.ipwrapper_expected = [mock.call()] self.get_bridges_expected = [mock.call(), mock.call()] self.inta_expected = [] self.intb_expected = [] self.execute_expected = [] def _build_agent(self, **config_opts_agent): """Configure and initialize OVS agent. :param config_opts_agent: a dict with options to override the default values for the AGENT group. """ bridge_classes = { 'br_int': self.mock_int_bridge_cls, 'br_phys': self.mock_phys_bridge_cls, 'br_tun': self.mock_tun_bridge_cls, } cfg.CONF.set_override('integration_bridge', self.INT_BRIDGE, 'OVS') cfg.CONF.set_override('tunnel_bridge', self.TUN_BRIDGE, 'OVS') cfg.CONF.set_override('local_ip', '10.0.0.1', 'OVS') cfg.CONF.set_override('bridge_mappings', self.NET_MAPPING, 'OVS') cfg.CONF.set_override('polling_interval', 2, 'AGENT') cfg.CONF.set_override('tunnel_types', ['gre'], 'AGENT') cfg.CONF.set_override('veth_mtu', self.VETH_MTU, 'AGENT') cfg.CONF.set_override('minimize_polling', False, 'AGENT') cfg.CONF.set_override('use_veth_interconnection', self.USE_VETH_INTERCONNECTION, 'OVS') for k, v in config_opts_agent.items(): cfg.CONF.set_override(k, v, 'AGENT') return self.mod_agent.OVSNeutronAgent(bridge_classes, cfg.CONF) def _verify_mock_call(self, mock_obj, expected): mock_obj.assert_has_calls(expected) self.assertEqual(expected, mock_obj.mock_calls) def _verify_mock_calls(self): self._verify_mock_call(self.mock_int_bridge_cls, self.mock_int_bridge_cls_expected) self._verify_mock_call(self.mock_tun_bridge_cls, self.mock_tun_bridge_cls_expected) self._verify_mock_call(self.mock_phys_bridge_cls, self.mock_phys_bridge_cls_expected) self._verify_mock_call(self.mock_int_bridge, self.mock_int_bridge_expected) self._verify_mock_call(self.mock_map_tun_bridge, self.mock_map_tun_bridge_expected) self._verify_mock_call(self.mock_tun_bridge, self.mock_tun_bridge_expected) self._verify_mock_call(self.mock_aux_bridge, self.mock_aux_bridge_expected) self._verify_mock_call(self.ipdevice, self.ipdevice_expected) self._verify_mock_call(self.ipwrapper, self.ipwrapper_expected) self._verify_mock_call(self.get_bridges, self.get_bridges_expected) self._verify_mock_call(self.inta, self.inta_expected) self._verify_mock_call(self.intb, self.intb_expected) self._verify_mock_call(self.execute, self.execute_expected) def test_construct(self): agent = self._build_agent() self.assertEqual(agent.agent_id, 'ovs-agent-%s' % cfg.CONF.host) self._verify_mock_calls() # TODO(ethuleau): Initially, local ARP responder is be dependent to the # ML2 l2 population mechanism driver. # The next two tests use l2_pop flag to test ARP responder def test_construct_with_arp_responder(self): self._build_agent(l2_population=True, arp_responder=True) self._define_expected_calls(True) self._verify_mock_calls() def test_construct_without_arp_responder(self): self._build_agent(l2_population=False, arp_responder=True) self._verify_mock_calls() def test_construct_vxlan(self): self._build_agent(tunnel_types=['vxlan']) self._verify_mock_calls() def test_provision_local_vlan(self): ofports = list(TUN_OFPORTS[p_const.TYPE_GRE].values()) self.mock_tun_bridge_expected += [ mock.call.install_flood_to_tun(LV_ID, LS_ID, ofports), mock.call.provision_local_vlan( network_type=p_const.TYPE_GRE, lvid=LV_ID, segmentation_id=LS_ID), ] a = self._build_agent() a.available_local_vlans = set([LV_ID]) a.tun_br_ofports = TUN_OFPORTS a.provision_local_vlan(NET_UUID, p_const.TYPE_GRE, None, LS_ID) self._verify_mock_calls() def test_provision_local_vlan_flat(self): self.mock_map_tun_bridge_expected.append( mock.call.provision_local_vlan( port=self.MAP_TUN_PHY_OFPORT, lvid=LV_ID, segmentation_id=None, distributed=False)) self.mock_int_bridge_expected.append( mock.call.provision_local_vlan( port=self.INT_OFPORT, lvid=LV_ID, segmentation_id=None)) a = self._build_agent() a.available_local_vlans = set([LV_ID]) a.phys_brs['net1'] = self.mock_map_tun_bridge a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT a.int_ofports['net1'] = self.INT_OFPORT a.provision_local_vlan(NET_UUID, p_const.TYPE_FLAT, 'net1', LS_ID) self._verify_mock_calls() def test_provision_local_vlan_flat_fail(self): a = self._build_agent() a.provision_local_vlan(NET_UUID, p_const.TYPE_FLAT, 'net2', LS_ID) self._verify_mock_calls() def test_provision_local_vlan_vlan(self): self.mock_map_tun_bridge_expected.append( mock.call.provision_local_vlan( port=self.MAP_TUN_PHY_OFPORT, lvid=LV_ID, segmentation_id=LS_ID, distributed=False)) self.mock_int_bridge_expected.append( mock.call.provision_local_vlan( port=self.INT_OFPORT, lvid=LV_ID, segmentation_id=LS_ID)) a = self._build_agent() a.available_local_vlans = set([LV_ID]) a.phys_brs['net1'] = self.mock_map_tun_bridge a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT a.int_ofports['net1'] = self.INT_OFPORT a.provision_local_vlan(NET_UUID, p_const.TYPE_VLAN, 'net1', LS_ID) self._verify_mock_calls() def test_provision_local_vlan_vlan_fail(self): a = self._build_agent() a.provision_local_vlan(NET_UUID, p_const.TYPE_VLAN, 'net2', LS_ID) self._verify_mock_calls() def test_reclaim_local_vlan(self): self.mock_tun_bridge_expected += [ mock.call.reclaim_local_vlan(network_type='gre', segmentation_id=LS_ID), mock.call.delete_flood_to_tun(LV_ID), mock.call.delete_unicast_to_tun(LV_ID, None), mock.call.delete_arp_responder(LV_ID, None), ] a = self._build_agent() a.available_local_vlans = set() a.local_vlan_map[NET_UUID] = self.LVM a.reclaim_local_vlan(NET_UUID) self.assertIn(self.LVM.vlan, a.available_local_vlans) self._verify_mock_calls() def test_reclaim_local_vlan_flat(self): self.mock_map_tun_bridge_expected.append( mock.call.reclaim_local_vlan( port=self.MAP_TUN_PHY_OFPORT, lvid=self.LVM_FLAT.vlan)) self.mock_int_bridge_expected.append( mock.call.reclaim_local_vlan( port=self.INT_OFPORT, segmentation_id=None)) a = self._build_agent() a.phys_brs['net1'] = self.mock_map_tun_bridge a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT a.int_ofports['net1'] = self.INT_OFPORT a.available_local_vlans = set() a.local_vlan_map[NET_UUID] = self.LVM_FLAT a.reclaim_local_vlan(NET_UUID) self.assertIn(self.LVM_FLAT.vlan, a.available_local_vlans) self._verify_mock_calls() def test_reclaim_local_vlan_vlan(self): self.mock_map_tun_bridge_expected.append( mock.call.reclaim_local_vlan( port=self.MAP_TUN_PHY_OFPORT, lvid=self.LVM_VLAN.vlan)) self.mock_int_bridge_expected.append( mock.call.reclaim_local_vlan( port=self.INT_OFPORT, segmentation_id=LS_ID)) a = self._build_agent() a.phys_brs['net1'] = self.mock_map_tun_bridge a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT a.int_ofports['net1'] = self.INT_OFPORT a.available_local_vlans = set() a.local_vlan_map[NET_UUID] = self.LVM_VLAN a.reclaim_local_vlan(NET_UUID) self.assertIn(self.LVM_VLAN.vlan, a.available_local_vlans) self._verify_mock_calls() def test_port_bound(self): vlan_mapping = {'segmentation_id': str(LS_ID), 'physical_network': 'None', 'net_uuid': NET_UUID, 'network_type': 'gre'} self.mock_int_bridge_expected += [ mock.call.db_get_val('Port', 'port', 'other_config'), mock.call.set_db_attribute('Port', VIF_PORT.port_name, 'other_config', vlan_mapping)] a = self._build_agent() a.local_vlan_map[NET_UUID] = self.LVM a.local_dvr_map = {} self.ovs_bridges[self.INT_BRIDGE].db_get_val.return_value = {} a.port_bound(VIF_PORT, NET_UUID, 'gre', None, LS_ID, FIXED_IPS, VM_DEVICE_OWNER, False) self._verify_mock_calls() def test_port_unbound(self): with mock.patch.object(self.mod_agent.OVSNeutronAgent, 'reclaim_local_vlan') as reclaim_local_vlan: a = self._build_agent() a.local_vlan_map[NET_UUID] = self.LVM a.port_unbound(VIF_ID, NET_UUID) reclaim_local_vlan.assert_called_once_with(NET_UUID) self._verify_mock_calls() def test_port_dead(self): self.mock_int_bridge_expected += [ mock.call.db_get_val('Port', VIF_PORT.port_name, 'tag', log_errors=True), mock.call.set_db_attribute( 'Port', VIF_PORT.port_name, 'tag', constants.DEAD_VLAN_TAG, log_errors=True), mock.call.drop_port(in_port=VIF_PORT.ofport), ] a = self._build_agent() a.available_local_vlans = set([LV_ID]) a.local_vlan_map[NET_UUID] = self.LVM self.ovs_bridges[self.INT_BRIDGE].db_get_val.return_value = mock.Mock() a.port_dead(VIF_PORT) self._verify_mock_calls() def test_tunnel_update(self): tunnel_port = '9999' self.mock_tun_bridge.add_tunnel_port.return_value = tunnel_port self.mock_tun_bridge_expected += [ mock.call.add_tunnel_port('gre-0a000a01', '10.0.10.1', '10.0.0.1', 'gre', 4789, True, False), mock.call.setup_tunnel_port('gre', tunnel_port), ] a = self._build_agent() a.tunnel_update( mock.sentinel.ctx, tunnel_ip='10.0.10.1', tunnel_type=p_const.TYPE_GRE) self._verify_mock_calls() def test_tunnel_update_self(self): a = self._build_agent() a.tunnel_update( mock.sentinel.ctx, tunnel_ip='10.0.0.1') self._verify_mock_calls() def test_daemon_loop(self): reply_ge_1 = {'added': [{'name': 'tap0', 'ofport': 3, 'external_ids': { 'attached-mac': 'test_mac'}}], 'removed': []} reply_ge_2 = {'added': [], 'removed': [{'name': 'tap0', 'ofport': 3, 'external_ids': { 'attached-mac': 'test_mac'}}]} reply_pe_1 = {'current': set(['tap0']), 'added': set(['tap0']), 'removed': set([])} reply_pe_2 = {'current': set([]), 'added': set([]), 'removed': set(['tap0'])} reply_ancillary = {'current': set([]), 'added': set([]), 'removed': set([])} self.mock_int_bridge_expected += [ mock.call.check_canary_table(), mock.call.cleanup_flows(), mock.call.check_canary_table() ] self.mock_tun_bridge_expected += [ mock.call.cleanup_flows() ] self.mock_map_tun_bridge_expected += [ mock.call.cleanup_flows() ] # No cleanup is expected on ancillary bridge self.ovs_bridges[self.INT_BRIDGE].check_canary_table.return_value = \ constants.OVS_NORMAL with mock.patch.object(log.KeywordArgumentAdapter, 'exception') as log_exception,\ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'process_ports_events') as process_p_events,\ mock.patch.object( self.mod_agent.OVSNeutronAgent, 'process_network_ports') as process_network_ports,\ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'tunnel_sync'),\ mock.patch.object(time, 'sleep'),\ mock.patch.object( self.mod_agent.OVSNeutronAgent, 'update_stale_ofport_rules') as update_stale: log_exception.side_effect = Exception( 'Fake exception to get out of the loop') update_stale.return_value = [] devices_not_ready = set() process_p_events.side_effect = [ (reply_pe_1, reply_ancillary, devices_not_ready), (reply_pe_2, reply_ancillary, devices_not_ready)] interface_polling = mock.Mock() interface_polling.get_events.side_effect = [reply_ge_1, reply_ge_2] failed_devices = {'removed': set([]), 'added': set([])} failed_ancillary_devices = {'removed': set([]), 'added': set([])} process_network_ports.side_effect = [ failed_devices, Exception('Fake exception to get out of the loop')] n_agent = self._build_agent() # Hack to test loop # We start method and expect it will raise after 2nd loop # If something goes wrong, assert_has_calls below will catch it try: n_agent.rpc_loop(interface_polling) except Exception: pass # FIXME(salv-orlando): There should not be assertions on log # messages log_exception.assert_called_once_with( "Error while processing VIF ports") process_p_events.assert_has_calls([ mock.call(reply_ge_1, set(), set(), devices_not_ready, failed_devices, failed_ancillary_devices, set()), mock.call(reply_ge_2, set(['tap0']), set(), devices_not_ready, failed_devices, failed_ancillary_devices, set()) ]) process_network_ports.assert_has_calls([ mock.call({'current': set(['tap0']), 'removed': set([]), 'added': set(['tap0'])}, False), ]) self.assertTrue(update_stale.called) self._verify_mock_calls() class TunnelTestOFCtl(TunnelTest, ovs_test_base.OVSOFCtlTestBase): pass class TunnelTestRyu(TunnelTest, ovs_test_base.OVSRyuTestBase): pass class TunnelTestUseVethInterco(TunnelTest): USE_VETH_INTERCONNECTION = True def _define_expected_calls(self, arp_responder=False): self.mock_int_bridge_cls_expected = [ mock.call(self.INT_BRIDGE, datapath_type=mock.ANY), ] self.mock_phys_bridge_cls_expected = [ mock.call(self.MAP_TUN_BRIDGE, datapath_type=mock.ANY), ] self.mock_tun_bridge_cls_expected = [ mock.call(self.TUN_BRIDGE, datapath_type=mock.ANY), ] self.mock_int_bridge_expected = [ mock.call.create(), mock.call.set_secure_mode(), mock.call.setup_controllers(mock.ANY), mock.call.setup_default_table(), ] self.mock_map_tun_bridge_expected = [ mock.call.create(), mock.call.set_secure_mode(), mock.call.setup_controllers(mock.ANY), mock.call.setup_default_table(), mock.call.add_port('phy-%s' % self.MAP_TUN_BRIDGE), ] self.mock_int_bridge_expected += [ mock.call.db_get_val('Interface', 'int-%s' % self.MAP_TUN_BRIDGE, 'type'), mock.call.add_port('int-%s' % self.MAP_TUN_BRIDGE) ] self.mock_int_bridge_expected += [ mock.call.drop_port(in_port=self.MAP_TUN_INT_OFPORT), ] self.mock_map_tun_bridge_expected += [ mock.call.drop_port(in_port=self.MAP_TUN_PHY_OFPORT), ] self.mock_aux_bridge = self.ovs_bridges[self.AUX_BRIDGE] self.mock_aux_bridge_expected = [ ] self.mock_tun_bridge_expected = [ mock.call.create(secure_mode=True), mock.call.setup_controllers(mock.ANY), mock.call.port_exists('patch-int'), nonzero(mock.call.port_exists()), mock.call.add_patch_port('patch-int', 'patch-tun'), ] self.mock_int_bridge_expected += [ mock.call.port_exists('patch-tun'), mock.call.add_patch_port('patch-tun', 'patch-int') ] self.mock_int_bridge_expected += [ mock.call.get_vif_ports((ovs_lib.INVALID_OFPORT, ovs_lib.UNASSIGNED_OFPORT)), mock.call.get_ports_attributes( 'Port', columns=['name', 'other_config', 'tag'], ports=[]) ] self.mock_tun_bridge_expected += [ mock.call.setup_default_table(self.INT_OFPORT, arp_responder), ] self.ipdevice_expected = [ mock.call('int-%s' % self.MAP_TUN_BRIDGE), mock.call().exists(), nonzero(mock.call().exists()), mock.call().link.delete() ] self.ipwrapper_expected = [ mock.call(), mock.call().add_veth('int-%s' % self.MAP_TUN_BRIDGE, 'phy-%s' % self.MAP_TUN_BRIDGE) ] self.get_bridges_expected = [mock.call(), mock.call()] self.inta_expected = [mock.call.link.set_up()] self.intb_expected = [mock.call.link.set_up()] self.execute_expected = [mock.call(['udevadm', 'settle', '--timeout=10'])] class TunnelTestUseVethIntercoOFCtl(TunnelTestUseVethInterco, ovs_test_base.OVSOFCtlTestBase): pass class TunnelTestUseVethIntercoRyu(TunnelTestUseVethInterco, ovs_test_base.OVSRyuTestBase): pass class TunnelTestWithMTU(TunnelTestUseVethInterco): VETH_MTU = 1500 def _define_expected_calls(self, arp_responder=False): super(TunnelTestWithMTU, self)._define_expected_calls(arp_responder) self.inta_expected.append(mock.call.link.set_mtu(self.VETH_MTU)) self.intb_expected.append(mock.call.link.set_mtu(self.VETH_MTU)) class TunnelTestWithMTUOFCtl(TunnelTestWithMTU, ovs_test_base.OVSOFCtlTestBase): pass class TunnelTestWithMTURyu(TunnelTestWithMTU, ovs_test_base.OVSRyuTestBase): pass neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py0000664000567000056710000046372513044372760034670 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import time import mock from oslo_config import cfg from oslo_log import log import oslo_messaging import testtools from neutron._i18n import _ from neutron.agent.common import ovs_lib from neutron.agent.common import utils from neutron.agent.linux import async_process from neutron.agent.linux import ip_lib from neutron.common import constants as n_const from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent \ as ovs_agent from neutron.tests import base from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ import ovs_test_base NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi' OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0" FAKE_MAC = '00:11:22:33:44:55' FAKE_IP1 = '10.0.0.1' FAKE_IP2 = '10.0.0.2' FAKE_IP6 = '2001:db8:42:42::10' TEST_PORT_ID1 = 'port-id-1' TEST_PORT_ID2 = 'port-id-2' TEST_PORT_ID3 = 'port-id-3' TEST_NETWORK_ID1 = 'net-id-1' TEST_NETWORK_ID2 = 'net-id-2' DEVICE_OWNER_COMPUTE = n_const.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' class FakeVif(object): ofport = 99 port_name = 'name' vif_mac = 'aa:bb:cc:11:22:33' class MockFixedIntervalLoopingCall(object): def __init__(self, f): self.f = f def start(self, interval=0): self.f() class ValidateTunnelTypes(ovs_test_base.OVSAgentConfigTestBase): def setUp(self): super(ValidateTunnelTypes, self).setUp() self.mock_validate_local_ip = mock.patch.object( self.mod_agent, 'validate_local_ip').start() def test_validate_tunnel_types_succeeds(self): cfg.CONF.set_override('local_ip', '10.10.10.10', group='OVS') cfg.CONF.set_override('tunnel_types', [p_const.TYPE_GRE], group='AGENT') self.mod_agent.validate_tunnel_config(cfg.CONF.AGENT.tunnel_types, cfg.CONF.OVS.local_ip) self.mock_validate_local_ip.assert_called_once_with('10.10.10.10') def test_validate_tunnel_types_fails_for_invalid_tunnel_type(self): cfg.CONF.set_override('local_ip', '10.10.10.10', group='OVS') cfg.CONF.set_override('tunnel_types', ['foobar'], group='AGENT') with testtools.ExpectedException(SystemExit): self.mod_agent.validate_tunnel_config(cfg.CONF.AGENT.tunnel_types, cfg.CONF.OVS.local_ip) class TestOvsNeutronAgent(object): def setUp(self): super(TestOvsNeutronAgent, self).setUp() notifier_p = mock.patch(NOTIFIER) notifier_cls = notifier_p.start() self.notifier = mock.Mock() notifier_cls.return_value = self.notifier systemd_patch = mock.patch('oslo_service.systemd.notify_once') self.systemd_notify = systemd_patch.start() cfg.CONF.set_default('firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') cfg.CONF.set_default('quitting_rpc_timeout', 10, 'AGENT') cfg.CONF.set_default('prevent_arp_spoofing', False, 'AGENT') cfg.CONF.set_default('local_ip', '127.0.0.1', 'OVS') mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge.get_ports_attributes', return_value=[]).start() mock.patch('neutron.agent.common.ovs_lib.BaseOVS.config', new_callable=mock.PropertyMock, return_value={}).start() self.agent = self._make_agent() self.agent.sg_agent = mock.Mock() def _make_agent(self): with mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_integration_br'),\ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_ancillary_bridges', return_value=[]),\ mock.patch('neutron.agent.linux.utils.get_interface_mac', return_value='00:00:00:00:00:01'),\ mock.patch( 'neutron.agent.common.ovs_lib.BaseOVS.get_bridges'),\ mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=MockFixedIntervalLoopingCall),\ mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports', return_value=[]): agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(), cfg.CONF) agent.tun_br = self.br_tun_cls(br_name='br-tun') return agent def _mock_port_bound(self, ofport=None, new_local_vlan=None, old_local_vlan=None, db_get_val=None): port = mock.Mock() port.ofport = ofport net_uuid = 'my-net-uuid' fixed_ips = [{'subnet_id': 'my-subnet-uuid', 'ip_address': '1.1.1.1'}] if old_local_vlan is not None: self.agent.local_vlan_map[net_uuid] = ( self.mod_agent.LocalVLANMapping( old_local_vlan, None, None, None)) with mock.patch.object(self.agent, 'int_br', autospec=True) as int_br: int_br.db_get_val.return_value = db_get_val int_br.set_db_attribute.return_value = True needs_binding = self.agent.port_bound( port, net_uuid, 'local', None, None, fixed_ips, DEVICE_OWNER_COMPUTE, False) if db_get_val is None: self.assertEqual(0, int_br.set_db_attribute.call_count) self.assertFalse(needs_binding) else: vlan_mapping = {'net_uuid': net_uuid, 'network_type': 'local', 'physical_network': 'None'} int_br.set_db_attribute.assert_called_once_with( "Port", mock.ANY, "other_config", vlan_mapping) self.assertTrue(needs_binding) def test_datapath_type_system(self): # verify kernel datapath is default expected = constants.OVS_DATAPATH_SYSTEM self.assertEqual(expected, self.agent.int_br.datapath_type) def test_datapath_type_netdev(self): with mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_integration_br'), \ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_ancillary_bridges', return_value=[]), \ mock.patch('neutron.agent.linux.utils.get_interface_mac', return_value='00:00:00:00:00:01'), \ mock.patch( 'neutron.agent.common.ovs_lib.BaseOVS.get_bridges'), \ mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=MockFixedIntervalLoopingCall), \ mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports', return_value=[]), \ mock.patch('neutron.agent.common.ovs_lib.BaseOVS.config', new_callable=mock.PropertyMock, return_value={'datapath_types': ['netdev']}): # validate setting non default datapath expected = constants.OVS_DATAPATH_NETDEV cfg.CONF.set_override('datapath_type', expected, group='OVS') self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(), cfg.CONF) self.assertEqual(expected, self.agent.int_br.datapath_type) def test_agent_type_ovs(self): # verify agent_type is default expected = n_const.AGENT_TYPE_OVS self.assertEqual(expected, self.agent.agent_state['agent_type']) def test_agent_type_alt(self): with mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_integration_br'),\ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_ancillary_bridges', return_value=[]), \ mock.patch('neutron.agent.linux.utils.get_interface_mac', return_value='00:00:00:00:00:01'), \ mock.patch( 'neutron.agent.common.ovs_lib.BaseOVS.get_bridges'), \ mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=MockFixedIntervalLoopingCall), \ mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports', return_value=[]): # validate setting non default agent_type expected = 'alt agent type' cfg.CONF.set_override('agent_type', expected, group='AGENT') self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(), cfg.CONF) self.assertEqual(expected, self.agent.agent_state['agent_type']) def _test_restore_local_vlan_maps(self, tag, segmentation_id='1'): port = mock.Mock() port.port_name = 'fake_port' net_uuid = 'fake_network_id' local_vlan_map = {'net_uuid': net_uuid, 'network_type': 'vlan', 'physical_network': 'fake_network'} if segmentation_id is not None: local_vlan_map['segmentation_id'] = segmentation_id # this is for the call inside get_vif_ports() get_interfaces = [{'name': port.port_name, 'ofport': '1', 'external_ids': { 'iface-id': '1', 'attached-mac': 'mac1'}}, {'name': 'invalid', 'ofport': ovs_lib.INVALID_OFPORT, 'external_ids': { 'iface-id': '2', 'attached-mac': 'mac2'}}, {'name': 'unassigned', 'ofport': ovs_lib.UNASSIGNED_OFPORT, 'external_ids': { 'iface-id': '3', 'attached-mac': 'mac3'}}] # this is for the call inside _restore_local_vlan_map() get_ports = [{'name': port.port_name, 'other_config': local_vlan_map, 'tag': tag}] with mock.patch.object(self.agent.int_br, 'get_ports_attributes', side_effect=[get_interfaces, get_ports]) as gpa: self.agent._restore_local_vlan_map() expected_hints = {} if tag: expected_hints[net_uuid] = tag self.assertEqual(expected_hints, self.agent._local_vlan_hints) # make sure invalid and unassigned ports were skipped gpa.assert_has_calls([ mock.call('Interface', columns=mock.ANY, if_exists=True), mock.call('Port', columns=mock.ANY, ports=['fake_port']) ]) def test_restore_local_vlan_map_with_device_has_tag(self): self._test_restore_local_vlan_maps(2) def test_restore_local_vlan_map_with_device_no_tag(self): self._test_restore_local_vlan_maps([]) def test_restore_local_vlan_map_no_segmentation_id(self): self._test_restore_local_vlan_maps(2, segmentation_id=None) def test_restore_local_vlan_map_segmentation_id_compat(self): self._test_restore_local_vlan_maps(2, segmentation_id='None') def test_check_agent_configurations_for_dvr_raises(self): self.agent.enable_distributed_routing = True self.agent.enable_tunneling = True self.agent.l2_pop = False self.assertRaises(ValueError, self.agent._check_agent_configurations) def test_check_agent_configurations_for_dvr(self): self.agent.enable_distributed_routing = True self.agent.enable_tunneling = True self.agent.l2_pop = True self.assertIsNone(self.agent._check_agent_configurations()) def test_check_agent_configurations_for_dvr_with_vlan(self): self.agent.enable_distributed_routing = True self.agent.enable_tunneling = False self.agent.l2_pop = False self.assertIsNone(self.agent._check_agent_configurations()) def test_port_bound_deletes_flows_for_valid_ofport(self): self._mock_port_bound(ofport=1, new_local_vlan=1, db_get_val={}) def test_port_bound_ignores_flows_for_invalid_ofport(self): self._mock_port_bound(ofport=-1, new_local_vlan=1, db_get_val={}) def test_port_bound_does_not_rewire_if_already_bound(self): self._mock_port_bound( ofport=-1, new_local_vlan=1, old_local_vlan=1, db_get_val={}) def test_port_bound_not_found(self): self._mock_port_bound(ofport=1, new_local_vlan=1, db_get_val=None) def _test_port_dead(self, cur_tag=None): port = mock.Mock() port.ofport = 1 with mock.patch.object(self.agent, 'int_br') as int_br: int_br.db_get_val.return_value = cur_tag self.agent.port_dead(port) if cur_tag is None or cur_tag == constants.DEAD_VLAN_TAG: self.assertFalse(int_br.set_db_attribute.called) self.assertFalse(int_br.drop_port.called) else: int_br.assert_has_calls([ mock.call.set_db_attribute("Port", mock.ANY, "tag", constants.DEAD_VLAN_TAG, log_errors=True), mock.call.drop_port(in_port=port.ofport), ]) def test_port_dead(self): self._test_port_dead() def test_port_dead_with_port_already_dead(self): self._test_port_dead(constants.DEAD_VLAN_TAG) def test_port_dead_with_valid_tag(self): self._test_port_dead(cur_tag=1) def mock_scan_ports(self, vif_port_set=None, registered_ports=None, updated_ports=None, port_tags_dict=None, sync=False): if port_tags_dict is None: # Because empty dicts evaluate as False. port_tags_dict = {} with mock.patch.object(self.agent.int_br, 'get_vif_port_set', return_value=vif_port_set),\ mock.patch.object(self.agent.int_br, 'get_port_tag_dict', return_value=port_tags_dict): return self.agent.scan_ports(registered_ports, sync, updated_ports) def test_scan_ports_returns_current_only_for_unchanged_ports(self): vif_port_set = set([1, 3]) registered_ports = set([1, 3]) expected = {'current': vif_port_set} actual = self.mock_scan_ports(vif_port_set, registered_ports) self.assertEqual(expected, actual) def test_scan_ports_returns_port_changes(self): vif_port_set = set([1, 3]) registered_ports = set([1, 2]) expected = dict(current=vif_port_set, added=set([3]), removed=set([2])) actual = self.mock_scan_ports(vif_port_set, registered_ports) self.assertEqual(expected, actual) def test_scan_ports_returns_port_changes_with_sync(self): vif_port_set = set([1, 3]) registered_ports = set([1, 2]) expected = dict(current=vif_port_set, added=vif_port_set, removed=set([2])) actual = self.mock_scan_ports(vif_port_set, registered_ports, sync=True) self.assertEqual(expected, actual) def _test_scan_ports_with_updated_ports(self, updated_ports): vif_port_set = set([1, 3, 4]) registered_ports = set([1, 2, 4]) expected = dict(current=vif_port_set, added=set([3]), removed=set([2]), updated=set([4])) actual = self.mock_scan_ports(vif_port_set, registered_ports, updated_ports) self.assertEqual(expected, actual) def test_scan_ports_finds_known_updated_ports(self): self._test_scan_ports_with_updated_ports(set([4])) def test_scan_ports_ignores_unknown_updated_ports(self): # the port '5' was not seen on current ports. Hence it has either # never been wired or already removed and should be ignored self._test_scan_ports_with_updated_ports(set([4, 5])) def test_scan_ports_ignores_updated_port_if_removed(self): vif_port_set = set([1, 3]) registered_ports = set([1, 2]) updated_ports = set([1, 2]) expected = dict(current=vif_port_set, added=set([3]), removed=set([2]), updated=set([1])) actual = self.mock_scan_ports(vif_port_set, registered_ports, updated_ports) self.assertEqual(expected, actual) def test_scan_ports_no_vif_changes_returns_updated_port_only(self): vif_port_set = set([1, 2, 3]) registered_ports = set([1, 2, 3]) updated_ports = set([2]) expected = dict(current=vif_port_set, updated=set([2])) actual = self.mock_scan_ports(vif_port_set, registered_ports, updated_ports) self.assertEqual(expected, actual) def _test_process_ports_events(self, events, registered_ports, ancillary_ports, expected_ports, expected_ancillary, updated_ports=None, ): with mock.patch.object(self.agent, 'check_changed_vlans', return_value=set()): devices_not_ready_yet = set() failed_devices = {'added': set(), 'removed': set()} failed_ancillary_devices = { 'added': set(), 'removed': set()} actual = self.agent.process_ports_events( events, registered_ports, ancillary_ports, devices_not_ready_yet, failed_devices, failed_ancillary_devices, updated_ports) self.assertEqual( (expected_ports, expected_ancillary, devices_not_ready_yet), actual) def test_process_ports_events_port_removed_and_added(self): port_id = 'f6f104bd-37c7-4f7b-9d70-53a6bb42728f' events = { 'removed': [{'ofport': 1, 'external_ids': {'iface-id': port_id, 'attached-mac': 'fa:16:3e:f6:1b:fb'}, 'name': 'qvof6f104bd-37'}], 'added': [{'ofport': 2, 'external_ids': {'iface-id': port_id, 'attached-mac': 'fa:16:3e:f6:1b:fb'}, 'name': 'qvof6f104bd-37'}] } registered_ports = {port_id} expected_ancillary = dict(current=set(), added=set(), removed=set()) # port was removed and then added expected_ports = dict(current={port_id}, added={port_id}, removed=set()) with mock.patch.object(ovs_lib.BaseOVS, "port_exists", return_value=True): self._test_process_ports_events(events.copy(), registered_ports, set(), expected_ports, expected_ancillary) # port was added and then removed expected_ports = dict(current=set(), added=set(), removed={port_id}) with mock.patch.object(ovs_lib.BaseOVS, "port_exists", return_value=False): self._test_process_ports_events(events.copy(), registered_ports, set(), expected_ports, expected_ancillary) def test_process_ports_events_returns_current_for_unchanged_ports(self): events = {'added': [], 'removed': []} registered_ports = {1, 3} ancillary_ports = {2, 5} expected_ports = {'current': registered_ports, 'added': set(), 'removed': set()} expected_ancillary = {'current': ancillary_ports, 'added': set(), 'removed': set()} self._test_process_ports_events(events, registered_ports, ancillary_ports, expected_ports, expected_ancillary) def test_process_port_events_no_vif_changes_return_updated_port_only(self): events = {'added': [], 'removed': []} registered_ports = {1, 2, 3} updated_ports = {2} expected_ports = dict(current=registered_ports, updated={2}, added=set(), removed=set()) expected_ancillary = dict(current=set(), added=set(), removed=set()) self._test_process_ports_events(events, registered_ports, set(), expected_ports, expected_ancillary, updated_ports) def test_process_port_events_ignores_removed_port_if_never_added(self): events = {'added': [], 'removed': [{'name': 'port2', 'ofport': 2, 'external_ids': {'attached-mac': 'test-mac'}}]} registered_ports = {1} expected_ports = dict(current=registered_ports, added=set(), removed=set()) expected_ancillary = dict(current=set(), added=set(), removed=set()) devices_not_ready_yet = set() with mock.patch.object(self.agent.int_br, 'portid_from_external_ids', side_effect=[2]), \ mock.patch.object(self.agent, 'check_changed_vlans', return_value=set()): failed_devices = {'added': set(), 'removed': set()} failed_ancillary_devices = { 'added': set(), 'removed': set()} ports_not_ready_yet = set() actual = self.agent.process_ports_events( events, registered_ports, set(), ports_not_ready_yet, failed_devices, failed_ancillary_devices) self.assertEqual( (expected_ports, expected_ancillary, devices_not_ready_yet), actual) def test_process_port_events_port_not_ready_yet(self): events = {'added': [{'name': 'port5', 'ofport': [], 'external_ids': {'attached-mac': 'test-mac'}}], 'removed': []} old_devices_not_ready = {'port4'} registered_ports = set([1, 2, 3]) expected_ports = dict(current=set([1, 2, 3, 4]), added=set([4]), removed=set()) self.agent.ancillary_brs = [] expected_ancillary = dict(current=set(), added=set(), removed=set()) with mock.patch.object(self.agent.int_br, 'portid_from_external_ids', side_effect=[5, 4]), \ mock.patch.object(self.agent, 'check_changed_vlans', return_value=set()), \ mock.patch.object(self.agent.int_br, 'get_ports_attributes', return_value=[{'name': 'port4', 'ofport': 4, 'external_ids': { 'attached-mac': 'mac4'}}]): expected_devices_not_ready = {'port5'} failed_devices = {'added': set(), 'removed': set()} failed_ancillary_devices = { 'added': set(), 'removed': set()} actual = self.agent.process_ports_events( events, registered_ports, set(), old_devices_not_ready, failed_devices, failed_ancillary_devices) self.assertEqual( (expected_ports, expected_ancillary, expected_devices_not_ready), actual) def _test_process_port_events_with_updated_ports(self, updated_ports): events = {'added': [{'name': 'port3', 'ofport': 3, 'external_ids': {'attached-mac': 'test-mac'}}, {'name': 'qg-port2', 'ofport': 6, 'external_ids': {'attached-mac': 'test-mac'}}], 'removed': [{'name': 'port2', 'ofport': 2, 'external_ids': {'attached-mac': 'test-mac'}}, {'name': 'qg-port1', 'ofport': 5, 'external_ids': {'attached-mac': 'test-mac'}}]} registered_ports = {1, 2, 4} ancillary_ports = {5, 8} expected_ports = dict(current={1, 3, 4}, added={3}, removed={2}) if updated_ports: expected_ports['updated'] = updated_ports expected_ancillary = dict(current={6, 8}, added={6}, removed={5}) ancillary_bridge = mock.Mock() ancillary_bridge.get_vif_port_set.return_value = {5, 6, 8} self.agent.ancillary_brs = [ancillary_bridge] with mock.patch.object(self.agent.int_br, 'portid_from_external_ids', side_effect=[3, 6, 2, 5]), \ mock.patch.object(self.agent, 'check_changed_vlans', return_value=set()): devices_not_ready_yet = set() failed_devices = {'added': set(), 'removed': set()} failed_ancillary_devices = { 'added': set(), 'removed': set()} actual = self.agent.process_ports_events( events, registered_ports, ancillary_ports, devices_not_ready_yet, failed_devices, failed_ancillary_devices, updated_ports) self.assertEqual( (expected_ports, expected_ancillary, devices_not_ready_yet), actual) def test_process_port_events_returns_port_changes(self): self._test_process_port_events_with_updated_ports(set()) def test_process_port_events_finds_known_updated_ports(self): self._test_process_port_events_with_updated_ports({4}) def test_process_port_events_ignores_unknown_updated_ports(self): # the port '10' was not seen on current ports. Hence it has either # never been wired or already removed and should be ignored self._test_process_port_events_with_updated_ports({4, 10}) def test_process_port_events_ignores_updated_port_if_removed(self): self._test_process_port_events_with_updated_ports({4, 5}) def test_update_ports_returns_changed_vlan(self): br = self.br_int_cls('br-int') mac = "ca:fe:de:ad:be:ef" port = ovs_lib.VifPort(1, 1, 1, mac, br) lvm = self.mod_agent.LocalVLANMapping( 1, '1', None, 1, {port.vif_id: port}) local_vlan_map = {'1': lvm} vif_port_set = set([1, 3]) registered_ports = set([1, 2]) port_tags_dict = {1: []} expected = dict( added=set([3]), current=vif_port_set, removed=set([2]), updated=set([1]) ) with mock.patch.dict(self.agent.local_vlan_map, local_vlan_map),\ mock.patch.object(self.agent, 'tun_br', autospec=True): actual = self.mock_scan_ports( vif_port_set, registered_ports, port_tags_dict=port_tags_dict) self.assertEqual(expected, actual) def test_update_retries_map_and_remove_devs_not_to_retry(self): failed_devices_retries_map = { 'device_not_to_retry': constants.MAX_DEVICE_RETRIES, 'device_to_retry': 2, 'ancillary_not_to_retry': constants.MAX_DEVICE_RETRIES, 'ancillary_to_retry': 1} failed_devices = { 'added': set(['device_not_to_retry']), 'removed': set(['device_to_retry', 'new_device'])} failed_ancillary_devices = {'added': set(['ancillary_to_retry']), 'removed': set(['ancillary_not_to_retry'])} expected_failed_devices_retries_map = { 'device_to_retry': 3, 'new_device': 1, 'ancillary_to_retry': 2} (new_failed_devices_retries_map, devices_not_to_retry, ancillary_devices_not_t_retry) = self.agent._get_devices_not_to_retry( failed_devices, failed_ancillary_devices, failed_devices_retries_map) self.agent._remove_devices_not_to_retry( failed_devices, failed_ancillary_devices, devices_not_to_retry, ancillary_devices_not_t_retry) self.assertIn('device_to_retry', failed_devices['removed']) self.assertNotIn('device_not_to_retry', failed_devices['added']) self.assertEqual( expected_failed_devices_retries_map, new_failed_devices_retries_map) def test_add_port_tag_info(self): self.agent.local_vlan_map["net1"] = mock.Mock() self.agent.local_vlan_map["net1"].vlan = "1" ovs_db_list = [{'name': 'tap1', 'tag': [], 'other_config': {'segmentation_id': '1'}}, {'name': 'tap2', 'tag': [], 'other_config': {}}, {'name': 'tap3', 'tag': [], 'other_config': None}] vif_port1 = mock.Mock() vif_port1.port_name = 'tap1' vif_port2 = mock.Mock() vif_port2.port_name = 'tap2' vif_port3 = mock.Mock() vif_port3.port_name = 'tap3' port_details = [ {'network_id': 'net1', 'vif_port': vif_port1}, {'network_id': 'net1', 'vif_port': vif_port2}, {'network_id': 'net1', 'vif_port': vif_port3}] with mock.patch.object(self.agent, 'int_br') as int_br: int_br.get_ports_attributes.return_value = ovs_db_list self.agent._add_port_tag_info(port_details) set_db_attribute_calls = \ [mock.call.set_db_attribute("Port", "tap1", "other_config", {"segmentation_id": "1", "tag": "1"}), mock.call.set_db_attribute("Port", "tap2", "other_config", {"tag": "1"}), mock.call.set_db_attribute("Port", "tap3", "other_config", {"tag": "1"})] int_br.assert_has_calls(set_db_attribute_calls, any_order=True) def test_bind_devices(self): devices_up = ['tap1'] devices_down = ['tap2'] self.agent.local_vlan_map["net1"] = mock.Mock() ovs_db_list = [{'name': 'tap1', 'tag': []}, {'name': 'tap2', 'tag': []}] vif_port1 = mock.Mock() vif_port1.port_name = 'tap1' vif_port2 = mock.Mock() vif_port2.port_name = 'tap2' port_details = [ {'network_id': 'net1', 'vif_port': vif_port1, 'device': devices_up[0], 'admin_state_up': True}, {'network_id': 'net1', 'vif_port': vif_port2, 'device': devices_down[0], 'admin_state_up': False}] with mock.patch.object( self.agent.plugin_rpc, 'update_device_list', return_value={'devices_up': devices_up, 'devices_down': devices_down, 'failed_devices_up': [], 'failed_devices_down': []}) as update_devices, \ mock.patch.object(self.agent, 'int_br') as int_br: int_br.get_ports_attributes.return_value = ovs_db_list self.agent._bind_devices(port_details) update_devices.assert_called_once_with(mock.ANY, devices_up, devices_down, mock.ANY, mock.ANY) def _test_arp_spoofing(self, enable_prevent_arp_spoofing): self.agent.prevent_arp_spoofing = enable_prevent_arp_spoofing ovs_db_list = [{'name': 'fake_device', 'tag': []}] self.agent.local_vlan_map = { 'fake_network': ovs_agent.LocalVLANMapping(1, None, None, 1)} vif_port = mock.Mock() vif_port.port_name = 'fake_device' vif_port.ofport = 1 need_binding_ports = [{'network_id': 'fake_network', 'vif_port': vif_port, 'device': 'fake_device', 'admin_state_up': True}] with mock.patch.object( self.agent.plugin_rpc, 'update_device_list', return_value={'devices_up': [], 'devices_down': [], 'failed_devices_up': [], 'failed_devices_down': []}), \ mock.patch.object(self.agent, 'int_br') as int_br, \ mock.patch.object( self.agent, 'setup_arp_spoofing_protection') as setup_arp: int_br.get_ports_attributes.return_value = ovs_db_list self.agent._bind_devices(need_binding_ports) self.assertEqual(enable_prevent_arp_spoofing, setup_arp.called) def test_setup_arp_spoofing_protection_enable(self): self._test_arp_spoofing(True) def test_setup_arp_spoofing_protection_disabled(self): self._test_arp_spoofing(False) def _mock_treat_devices_added_updated(self, details, port, func_name): """Mock treat devices added or updated. :param details: the details to return for the device :param port: the port that get_vif_port_by_id should return :param func_name: the function that should be called :returns: whether the named function was called """ with mock.patch.object(self.agent.plugin_rpc, 'get_devices_details_list_and_failed_devices', return_value={'devices': [details], 'failed_devices': []}),\ mock.patch.object(self.agent.int_br, 'get_vifs_by_ids', return_value={details['device']: port}),\ mock.patch.object(self.agent.plugin_rpc, 'update_device_list', return_value={'devices_up': [], 'devices_down': details, 'failed_devices_up': [], 'failed_devices_down': []}),\ mock.patch.object(self.agent.int_br, 'get_port_tag_dict', return_value={}),\ mock.patch.object(self.agent, func_name) as func: skip_devs, need_bound_devices, insecure_ports, _ = ( self.agent.treat_devices_added_or_updated([], False)) # The function should not raise self.assertFalse(skip_devs) return func.called def test_treat_devices_added_updated_ignores_invalid_ofport(self): port = mock.Mock() port.ofport = -1 self.assertFalse(self._mock_treat_devices_added_updated( mock.MagicMock(), port, 'port_dead')) def test_treat_devices_added_updated_marks_unknown_port_as_dead(self): port = mock.Mock() port.ofport = 1 self.assertTrue(self._mock_treat_devices_added_updated( mock.MagicMock(), port, 'port_dead')) def test_treat_devices_added_does_not_process_missing_port(self): with mock.patch.object( self.agent.plugin_rpc, 'get_devices_details_list_and_failed_devices') as get_dev_fn,\ mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', return_value=None): self.assertFalse(get_dev_fn.called) def test_treat_devices_added_updated_updates_known_port(self): details = mock.MagicMock() details.__contains__.side_effect = lambda x: True self.assertTrue(self._mock_treat_devices_added_updated( details, mock.Mock(), 'treat_vif_port')) def test_treat_devices_added_updated_sends_vif_port_into_extension_manager( self, *args): details = mock.MagicMock() details.__contains__.side_effect = lambda x: True port = mock.MagicMock() def fake_handle_port(context, port): self.assertIn('vif_port', port) with mock.patch.object(self.agent.plugin_rpc, 'get_devices_details_list_and_failed_devices', return_value={'devices': [details], 'failed_devices': []}),\ mock.patch.object(self.agent.ext_manager, 'handle_port', new=fake_handle_port),\ mock.patch.object(self.agent.int_br, 'get_vifs_by_ids', return_value={details['device']: port}),\ mock.patch.object(self.agent, 'treat_vif_port', return_value=False): self.agent.treat_devices_added_or_updated([], False) def test_treat_devices_added_updated_skips_if_port_not_found(self): dev_mock = mock.MagicMock() dev_mock.__getitem__.return_value = 'the_skipped_one' with mock.patch.object(self.agent.plugin_rpc, 'get_devices_details_list_and_failed_devices', return_value={'devices': [dev_mock], 'failed_devices': []}),\ mock.patch.object(self.agent.int_br, 'get_port_tag_dict', return_value={}),\ mock.patch.object(self.agent.int_br, 'get_vifs_by_ids', return_value={}),\ mock.patch.object(self.agent, 'treat_vif_port') as treat_vif_port: skip_devs = self.agent.treat_devices_added_or_updated([], False) # The function should return False for resync and no device # processed self.assertEqual((['the_skipped_one'], [], [], set()), skip_devs) self.assertFalse(treat_vif_port.called) def test_treat_devices_added_failed_devices(self): dev_mock = 'the_failed_one' with mock.patch.object(self.agent.plugin_rpc, 'get_devices_details_list_and_failed_devices', return_value={'devices': [], 'failed_devices': [dev_mock]}),\ mock.patch.object(self.agent.int_br, 'get_vifs_by_ids', return_value={}),\ mock.patch.object(self.agent, 'treat_vif_port') as treat_vif_port: failed_devices = {'added': set(), 'removed': set()} (_, _, _, failed_devices['added']) = ( self.agent.treat_devices_added_or_updated([], False)) # The function should return False for resync and no device # processed self.assertEqual(set([dev_mock]), failed_devices.get('added')) self.assertFalse(treat_vif_port.called) def test_treat_devices_added_updated_put_port_down(self): fake_details_dict = {'admin_state_up': False, 'port_id': 'xxx', 'device': 'xxx', 'network_id': 'yyy', 'physical_network': 'foo', 'segmentation_id': 'bar', 'network_type': 'baz', 'fixed_ips': [{'subnet_id': 'my-subnet-uuid', 'ip_address': '1.1.1.1'}], 'device_owner': DEVICE_OWNER_COMPUTE, 'port_security_enabled': True } with mock.patch.object(self.agent.plugin_rpc, 'get_devices_details_list_and_failed_devices', return_value={'devices': [fake_details_dict], 'failed_devices': []}),\ mock.patch.object(self.agent.int_br, 'get_vifs_by_ids', return_value={'xxx': mock.MagicMock()}),\ mock.patch.object(self.agent.int_br, 'get_port_tag_dict', return_value={}),\ mock.patch.object(self.agent, 'treat_vif_port') as treat_vif_port: skip_devs, need_bound_devices, insecure_ports, _ = ( self.agent.treat_devices_added_or_updated([], False)) # The function should return False for resync self.assertFalse(skip_devs) self.assertTrue(treat_vif_port.called) def _mock_treat_devices_removed(self, port_exists): details = dict(exists=port_exists) with mock.patch.object(self.agent.plugin_rpc, 'update_device_list', return_value={'devices_up': [], 'devices_down': details, 'failed_devices_up': [], 'failed_devices_down': []}): with mock.patch.object(self.agent, 'port_unbound') as port_unbound: self.assertFalse(self.agent.treat_devices_removed([{}])) self.assertTrue(port_unbound.called) def test_treat_devices_removed_unbinds_port(self): self._mock_treat_devices_removed(True) def test_treat_devices_removed_ignores_missing_port(self): self._mock_treat_devices_removed(False) def test_treat_devices_removed_failed_devices(self): dev_mock = 'the_failed_one' with mock.patch.object(self.agent.plugin_rpc, 'update_device_list', return_value={'devices_up': [], 'devices_down': [], 'failed_devices_up': [], 'failed_devices_down': [ dev_mock]}): failed_devices = {'added': set(), 'removed': set()} failed_devices['removed'] = self.agent.treat_devices_removed([{}]) self.assertEqual(set([dev_mock]), failed_devices.get('removed')) def test_treat_devices_removed_ext_delete_port(self): port_id = 'fake-id' m_delete = mock.patch.object(self.agent.ext_manager, 'delete_port') m_rpc = mock.patch.object(self.agent.plugin_rpc, 'update_device_list', return_value={'devices_up': [], 'devices_down': [], 'failed_devices_up': [], 'failed_devices_down': []}) m_unbound = mock.patch.object(self.agent, 'port_unbound') with m_delete as delete, m_rpc, m_unbound: self.agent.treat_devices_removed([port_id]) delete.assert_called_with(mock.ANY, {'port_id': port_id}) def test_bind_port_with_missing_network(self): vif_port = mock.Mock() vif_port.name.return_value = 'port' self.agent._bind_devices([{'network_id': 'non-existent', 'vif_port': vif_port}]) def _test_process_network_ports(self, port_info): failed_devices = {'added': set(), 'removed': set()} with mock.patch.object(self.agent.sg_agent, "setup_port_filters") as setup_port_filters,\ mock.patch.object( self.agent, "treat_devices_added_or_updated", return_value=( [], [], [], failed_devices['added'])) as device_added_updated,\ mock.patch.object(self.agent.int_br, "get_ports_attributes", return_value=[]),\ mock.patch.object(self.agent, "treat_devices_removed", return_value=( failed_devices[ 'removed'])) as device_removed: self.assertEqual( failed_devices, self.agent.process_network_ports(port_info, False)) setup_port_filters.assert_called_once_with( port_info.get('added', set()), port_info.get('updated', set())) devices_added_updated = (port_info.get('added', set()) | port_info.get('updated', set())) if devices_added_updated: device_added_updated.assert_called_once_with( devices_added_updated, False) if port_info.get('removed', set()): device_removed.assert_called_once_with(port_info['removed']) def test_process_network_ports(self): self._test_process_network_ports( {'current': set(['tap0']), 'removed': set(['eth0']), 'added': set(['eth1'])}) def test_process_network_port_with_updated_ports(self): self._test_process_network_ports( {'current': set(['tap0', 'tap1']), 'updated': set(['tap1', 'eth1']), 'removed': set(['eth0']), 'added': set(['eth1'])}) def test_process_network_port_with_empty_port(self): self._test_process_network_ports({}) def test_process_network_ports_with_insecure_ports(self): port_info = {'current': set(['tap0', 'tap1']), 'updated': set(['tap1']), 'removed': set([]), 'added': set(['eth1'])} failed_dev = {'added': set(), 'removed': set()} with mock.patch.object(self.agent.sg_agent, "setup_port_filters") as setup_port_filters,\ mock.patch.object( self.agent, "treat_devices_added_or_updated", return_value=( [], [], ['eth1'], failed_dev['added'])) as device_added_updated: self.assertEqual( failed_dev, self.agent.process_network_ports(port_info, False)) device_added_updated.assert_called_once_with( set(['eth1', 'tap1']), False) setup_port_filters.assert_called_once_with( set(), port_info.get('updated', set())) def test_hybrid_plug_flag_based_on_firewall(self): cfg.CONF.set_default( 'firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') agt = self._make_agent() self.assertFalse(agt.agent_state['configurations']['ovs_hybrid_plug']) cfg.CONF.set_default( 'firewall_driver', 'neutron.agent.linux.openvswitch_firewall.OVSFirewallDriver', group='SECURITYGROUP') with mock.patch('neutron.agent.linux.openvswitch_firewall.' 'OVSFirewallDriver.initialize_bridge'): agt = self._make_agent() self.assertFalse(agt.agent_state['configurations']['ovs_hybrid_plug']) cfg.CONF.set_default( 'firewall_driver', 'neutron.agent.linux.iptables_firewall.' 'OVSHybridIptablesFirewallDriver', group='SECURITYGROUP') with mock.patch('neutron.agent.linux.iptables_firewall.' 'IptablesFirewallDriver._populate_initial_zone_map'): agt = self._make_agent() self.assertTrue(agt.agent_state['configurations']['ovs_hybrid_plug']) def test_report_state(self): with mock.patch.object(self.agent.state_rpc, "report_state") as report_st: self.agent.int_br_device_count = 5 self.systemd_notify.assert_not_called() self.agent._report_state() report_st.assert_called_with(self.agent.context, self.agent.agent_state, True) self.systemd_notify.assert_called_once_with() self.systemd_notify.reset_mock() self.assertNotIn("start_flag", self.agent.agent_state) self.assertEqual( self.agent.agent_state["configurations"]["devices"], self.agent.int_br_device_count ) self.agent._report_state() report_st.assert_called_with(self.agent.context, self.agent.agent_state, True) self.systemd_notify.assert_not_called() def test_report_state_fail(self): with mock.patch.object(self.agent.state_rpc, "report_state") as report_st: report_st.side_effect = Exception() self.agent._report_state() report_st.assert_called_with(self.agent.context, self.agent.agent_state, True) self.agent._report_state() report_st.assert_called_with(self.agent.context, self.agent.agent_state, True) self.systemd_notify.assert_not_called() def test_report_state_revived(self): with mock.patch.object(self.agent.state_rpc, "report_state") as report_st: report_st.return_value = n_const.AGENT_REVIVED self.agent._report_state() self.assertTrue(self.agent.fullsync) def test_port_update(self): port = {"id": TEST_PORT_ID1, "network_id": TEST_NETWORK_ID1, "admin_state_up": False} self.agent.port_update("unused_context", port=port, network_type="vlan", segmentation_id="1", physical_network="physnet") self.assertEqual(set([TEST_PORT_ID1]), self.agent.updated_ports) def test_port_delete_after_update(self): """Make sure a port is not marked for delete and update.""" port = {'id': TEST_PORT_ID1} self.agent.port_update(context=None, port=port) self.agent.port_delete(context=None, port_id=port['id']) self.assertEqual(set(), self.agent.updated_ports) self.assertEqual(set([port['id']]), self.agent.deleted_ports) def test_process_deleted_ports_cleans_network_ports(self): self.agent._update_port_network(TEST_PORT_ID1, TEST_NETWORK_ID1) self.agent.port_delete(context=None, port_id=TEST_PORT_ID1) self.agent.sg_agent = mock.Mock() self.agent.int_br = mock.Mock() self.agent.process_deleted_ports(port_info={}) self.assertEqual(set(), self.agent.network_ports[TEST_NETWORK_ID1]) def test_network_update(self): """Network update marks port for update. """ network = {'id': TEST_NETWORK_ID1} port = {'id': TEST_PORT_ID1, 'network_id': network['id']} self.agent._update_port_network(port['id'], port['network_id']) self.agent.network_update(context=None, network=network) self.assertEqual(set([port['id']]), self.agent.updated_ports) def test_network_update_outoforder(self): """Network update arrives later than port_delete. But the main agent loop still didn't process the ports, so we ensure the port is not marked for update. """ network = {'id': TEST_NETWORK_ID1} port = {'id': TEST_PORT_ID1, 'network_id': network['id']} self.agent._update_port_network(port['id'], port['network_id']) self.agent.port_delete(context=None, port_id=port['id']) self.agent.network_update(context=None, network=network) self.assertEqual(set(), self.agent.updated_ports) def test_update_port_network(self): """Ensure ports are associated and moved across networks correctly.""" self.agent._update_port_network(TEST_PORT_ID1, TEST_NETWORK_ID1) self.agent._update_port_network(TEST_PORT_ID2, TEST_NETWORK_ID1) self.agent._update_port_network(TEST_PORT_ID3, TEST_NETWORK_ID2) self.agent._update_port_network(TEST_PORT_ID1, TEST_NETWORK_ID2) self.assertEqual(set([TEST_PORT_ID2]), self.agent.network_ports[TEST_NETWORK_ID1]) self.assertEqual(set([TEST_PORT_ID1, TEST_PORT_ID3]), self.agent.network_ports[TEST_NETWORK_ID2]) def test_port_delete(self): vif = FakeVif() with mock.patch.object(self.agent, 'int_br') as int_br: int_br.get_vif_by_port_id.return_value = vif.port_name int_br.get_vif_port_by_id.return_value = vif self.agent.port_delete("unused_context", port_id='id') self.agent.process_deleted_ports(port_info={}) # the main things we care about are that it gets put in the # dead vlan and gets blocked int_br.set_db_attribute.assert_any_call( 'Port', vif.port_name, 'tag', constants.DEAD_VLAN_TAG, log_errors=False) int_br.drop_port.assert_called_once_with(in_port=vif.ofport) def test_port_delete_removed_port(self): with mock.patch.object(self.agent, 'int_br') as int_br: self.agent.port_delete("unused_context", port_id='id') # if it was removed from the bridge, we shouldn't be processing it self.agent.process_deleted_ports(port_info={'removed': {'id', }}) self.assertFalse(int_br.set_db_attribute.called) self.assertFalse(int_br.drop_port.called) def _test_setup_physical_bridges(self, port_exists=False): with mock.patch.object(ip_lib.IPDevice, "exists") as devex_fn,\ mock.patch.object(sys, "exit"),\ mock.patch.object(utils, "execute"),\ mock.patch.object(self.agent, 'br_phys_cls') as phys_br_cls,\ mock.patch.object(self.agent, 'int_br') as int_br: devex_fn.return_value = True parent = mock.MagicMock() phys_br = phys_br_cls() parent.attach_mock(phys_br_cls, 'phys_br_cls') parent.attach_mock(phys_br, 'phys_br') parent.attach_mock(int_br, 'int_br') if port_exists: phys_br.get_port_ofport.return_value = "phy_ofport" int_br.get_port_ofport.return_value = "int_ofport" else: phys_br.add_patch_port.return_value = "phy_ofport" int_br.add_patch_port.return_value = "int_ofport" phys_br.port_exists.return_value = port_exists int_br.port_exists.return_value = port_exists self.agent.setup_physical_bridges({"physnet1": "br-eth"}) expected_calls = [ mock.call.phys_br_cls('br-eth'), mock.call.phys_br.create(), mock.call.phys_br.set_secure_mode(), mock.call.phys_br.setup_controllers(mock.ANY), mock.call.phys_br.setup_default_table(), mock.call.int_br.db_get_val('Interface', 'int-br-eth', 'type'), # Have to use __getattr__ here to avoid mock._Call.__eq__ # method being called mock.call.int_br.db_get_val().__getattr__('__eq__')('veth'), mock.call.int_br.port_exists('int-br-eth'), ] if port_exists: expected_calls += [ mock.call.int_br.get_port_ofport('int-br-eth'), ] else: expected_calls += [ mock.call.int_br.add_patch_port( 'int-br-eth', constants.NONEXISTENT_PEER), ] expected_calls += [ mock.call.phys_br.port_exists('phy-br-eth'), ] if port_exists: expected_calls += [ mock.call.phys_br.get_port_ofport('phy-br-eth'), ] else: expected_calls += [ mock.call.phys_br.add_patch_port( 'phy-br-eth', constants.NONEXISTENT_PEER), ] expected_calls += [ mock.call.int_br.drop_port(in_port='int_ofport'), mock.call.phys_br.drop_port(in_port='phy_ofport'), mock.call.int_br.set_db_attribute('Interface', 'int-br-eth', 'options', {'peer': 'phy-br-eth'}), mock.call.phys_br.set_db_attribute('Interface', 'phy-br-eth', 'options', {'peer': 'int-br-eth'}), ] parent.assert_has_calls(expected_calls) self.assertEqual("int_ofport", self.agent.int_ofports["physnet1"]) self.assertEqual("phy_ofport", self.agent.phys_ofports["physnet1"]) def test_setup_physical_bridges(self): self._test_setup_physical_bridges() def test_setup_physical_bridges_port_exists(self): self._test_setup_physical_bridges(port_exists=True) def test_setup_physical_bridges_using_veth_interconnection(self): self.agent.use_veth_interconnection = True with mock.patch.object(ip_lib.IPDevice, "exists") as devex_fn,\ mock.patch.object(sys, "exit"),\ mock.patch.object(utils, "execute") as utilsexec_fn,\ mock.patch.object(self.agent, 'br_phys_cls') as phys_br_cls,\ mock.patch.object(self.agent, 'int_br') as int_br,\ mock.patch.object(ip_lib.IPWrapper, "add_veth") as addveth_fn,\ mock.patch.object(ip_lib.IpLinkCommand, "delete") as linkdel_fn,\ mock.patch.object(ip_lib.IpLinkCommand, "set_up"),\ mock.patch.object(ip_lib.IpLinkCommand, "set_mtu"),\ mock.patch.object(ovs_lib.BaseOVS, "get_bridges") as get_br_fn: devex_fn.return_value = True parent = mock.MagicMock() parent.attach_mock(utilsexec_fn, 'utils_execute') parent.attach_mock(linkdel_fn, 'link_delete') parent.attach_mock(addveth_fn, 'add_veth') addveth_fn.return_value = (ip_lib.IPDevice("int-br-eth1"), ip_lib.IPDevice("phy-br-eth1")) phys_br = phys_br_cls() phys_br.add_port.return_value = "phys_veth_ofport" int_br.add_port.return_value = "int_veth_ofport" get_br_fn.return_value = ["br-eth"] self.agent.setup_physical_bridges({"physnet1": "br-eth"}) expected_calls = [mock.call.link_delete(), mock.call.utils_execute(['udevadm', 'settle', '--timeout=10']), mock.call.add_veth('int-br-eth', 'phy-br-eth')] parent.assert_has_calls(expected_calls, any_order=False) self.assertEqual("int_veth_ofport", self.agent.int_ofports["physnet1"]) self.assertEqual("phys_veth_ofport", self.agent.phys_ofports["physnet1"]) int_br.add_port.assert_called_with("int-br-eth") phys_br.add_port.assert_called_with("phy-br-eth") def _test_setup_physical_bridges_change_from_veth_to_patch_conf( self, port_exists=False): with mock.patch.object(sys, "exit"),\ mock.patch.object(utils, "execute"),\ mock.patch.object(self.agent, 'br_phys_cls') as phys_br_cls,\ mock.patch.object(self.agent, 'int_br') as int_br,\ mock.patch.object(self.agent.int_br, 'db_get_val', return_value='veth'): phys_br = phys_br_cls() parent = mock.MagicMock() parent.attach_mock(phys_br_cls, 'phys_br_cls') parent.attach_mock(phys_br, 'phys_br') parent.attach_mock(int_br, 'int_br') if port_exists: phys_br.get_port_ofport.return_value = "phy_ofport" int_br.get_port_ofport.return_value = "int_ofport" else: phys_br.add_patch_port.return_value = "phy_ofport" int_br.add_patch_port.return_value = "int_ofport" phys_br.port_exists.return_value = port_exists int_br.port_exists.return_value = port_exists self.agent.setup_physical_bridges({"physnet1": "br-eth"}) expected_calls = [ mock.call.phys_br_cls('br-eth'), mock.call.phys_br.create(), mock.call.phys_br.set_secure_mode(), mock.call.phys_br.setup_controllers(mock.ANY), mock.call.phys_br.setup_default_table(), mock.call.int_br.delete_port('int-br-eth'), mock.call.phys_br.delete_port('phy-br-eth'), mock.call.int_br.port_exists('int-br-eth'), ] if port_exists: expected_calls += [ mock.call.int_br.get_port_ofport('int-br-eth'), ] else: expected_calls += [ mock.call.int_br.add_patch_port( 'int-br-eth', constants.NONEXISTENT_PEER), ] expected_calls += [ mock.call.phys_br.port_exists('phy-br-eth'), ] if port_exists: expected_calls += [ mock.call.phys_br.get_port_ofport('phy-br-eth'), ] else: expected_calls += [ mock.call.phys_br.add_patch_port( 'phy-br-eth', constants.NONEXISTENT_PEER), ] expected_calls += [ mock.call.int_br.drop_port(in_port='int_ofport'), mock.call.phys_br.drop_port(in_port='phy_ofport'), mock.call.int_br.set_db_attribute('Interface', 'int-br-eth', 'options', {'peer': 'phy-br-eth'}), mock.call.phys_br.set_db_attribute('Interface', 'phy-br-eth', 'options', {'peer': 'int-br-eth'}), ] parent.assert_has_calls(expected_calls) self.assertEqual("int_ofport", self.agent.int_ofports["physnet1"]) self.assertEqual("phy_ofport", self.agent.phys_ofports["physnet1"]) def test_setup_physical_bridges_change_from_veth_to_patch_conf(self): self._test_setup_physical_bridges_change_from_veth_to_patch_conf() def test_setup_physical_bridges_change_from_veth_to_patch_conf_port_exists( self): self._test_setup_physical_bridges_change_from_veth_to_patch_conf( port_exists=True) def test_setup_tunnel_br(self): self.tun_br = mock.Mock() with mock.patch.object(self.agent.int_br, "add_patch_port", return_value=1) as int_patch_port,\ mock.patch.object(self.agent.tun_br, "add_patch_port", return_value=1) as tun_patch_port,\ mock.patch.object(self.agent.tun_br, 'bridge_exists', return_value=False),\ mock.patch.object(self.agent.tun_br, 'create') as create_tun,\ mock.patch.object(self.agent.tun_br, 'setup_controllers') as setup_controllers,\ mock.patch.object(self.agent.tun_br, 'port_exists', return_value=False),\ mock.patch.object(self.agent.int_br, 'port_exists', return_value=False),\ mock.patch.object(sys, "exit"): self.agent.setup_tunnel_br(None) self.agent.setup_tunnel_br() self.assertTrue(create_tun.called) self.assertTrue(setup_controllers.called) self.assertTrue(int_patch_port.called) self.assertTrue(tun_patch_port.called) def test_setup_tunnel_br_ports_exits_drop_flows(self): cfg.CONF.set_override('drop_flows_on_start', True, 'AGENT') with mock.patch.object(self.agent.tun_br, 'port_exists', return_value=True),\ mock.patch.object(self.agent, 'tun_br'),\ mock.patch.object(self.agent.int_br, 'port_exists', return_value=True),\ mock.patch.object(self.agent.tun_br, 'setup_controllers'),\ mock.patch.object(self.agent, 'patch_tun_ofport', new=2),\ mock.patch.object(self.agent, 'patch_int_ofport', new=2),\ mock.patch.object(self.agent.tun_br, 'delete_flows') as delete,\ mock.patch.object(self.agent.int_br, "add_patch_port") as int_patch_port,\ mock.patch.object(self.agent.tun_br, "add_patch_port") as tun_patch_port,\ mock.patch.object(sys, "exit"): self.agent.setup_tunnel_br(None) self.agent.setup_tunnel_br() self.assertFalse(int_patch_port.called) self.assertFalse(tun_patch_port.called) self.assertTrue(delete.called) def test_setup_tunnel_port(self): self.agent.tun_br = mock.Mock() self.agent.l2_pop = False self.agent.udp_vxlan_port = 8472 self.agent.tun_br_ofports['vxlan'] = {} self.agent.local_ip = '2.3.4.5' with mock.patch.object(self.agent.tun_br, "add_tunnel_port", return_value='6') as add_tun_port_fn,\ mock.patch.object(self.agent.tun_br, "add_flow"): self.agent._setup_tunnel_port(self.agent.tun_br, 'portname', '1.2.3.4', 'vxlan') self.assertTrue(add_tun_port_fn.called) def test_port_unbound(self): with mock.patch.object(self.agent, "reclaim_local_vlan") as reclvl_fn: self.agent.enable_tunneling = True lvm = mock.Mock() lvm.network_type = "gre" lvm.vif_ports = {"vif1": mock.Mock()} self.agent.local_vlan_map["netuid12345"] = lvm self.agent.port_unbound("vif1", "netuid12345") self.assertTrue(reclvl_fn.called) lvm.vif_ports = {} self.agent.port_unbound("vif1", "netuid12345") self.assertEqual(2, reclvl_fn.call_count) lvm.vif_ports = {"vif1": mock.Mock()} self.agent.port_unbound("vif3", "netuid12345") self.assertEqual(2, reclvl_fn.call_count) def _prepare_l2_pop_ofports(self): lvm1 = mock.Mock() lvm1.network_type = 'gre' lvm1.vlan = 'vlan1' lvm1.segmentation_id = 'seg1' lvm1.tun_ofports = set(['1']) lvm2 = mock.Mock() lvm2.network_type = 'gre' lvm2.vlan = 'vlan2' lvm2.segmentation_id = 'seg2' lvm2.tun_ofports = set(['1', '2']) self.agent.local_vlan_map = {'net1': lvm1, 'net2': lvm2} self.agent.tun_br_ofports = {'gre': {'1.1.1.1': '1', '2.2.2.2': '2'}} self.agent.arp_responder_enabled = True def test_fdb_ignore_network(self): self._prepare_l2_pop_ofports() fdb_entry = {'net3': {}} with mock.patch.object(self.agent.tun_br, 'add_flow') as add_flow_fn,\ mock.patch.object(self.agent.tun_br, 'delete_flows') as del_flow_fn,\ mock.patch.object(self.agent, '_setup_tunnel_port') as add_tun_fn,\ mock.patch.object(self.agent, 'cleanup_tunnel_port') as clean_tun_fn: self.agent.fdb_add(None, fdb_entry) self.assertFalse(add_flow_fn.called) self.assertFalse(add_tun_fn.called) self.agent.fdb_remove(None, fdb_entry) self.assertFalse(del_flow_fn.called) self.assertFalse(clean_tun_fn.called) def test_fdb_ignore_self(self): self._prepare_l2_pop_ofports() self.agent.local_ip = 'agent_ip' fdb_entry = {'net2': {'network_type': 'gre', 'segment_id': 'tun2', 'ports': {'agent_ip': [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1), n_const.FLOODING_ENTRY]}}} with mock.patch.object(self.agent.tun_br, "deferred") as defer_fn: self.agent.fdb_add(None, fdb_entry) self.assertFalse(defer_fn.called) self.agent.fdb_remove(None, fdb_entry) self.assertFalse(defer_fn.called) def test_fdb_add_flows(self): self._prepare_l2_pop_ofports() fdb_entry = {'net1': {'network_type': 'gre', 'segment_id': 'tun1', 'ports': {'2.2.2.2': [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1), n_const.FLOODING_ENTRY]}}} with mock.patch.object(self.agent, 'tun_br', autospec=True) as tun_br,\ mock.patch.object(self.agent, '_setup_tunnel_port', autospec=True) as add_tun_fn: self.agent.fdb_add(None, fdb_entry) self.assertFalse(add_tun_fn.called) deferred_br_call = mock.call.deferred().__enter__() expected_calls = [ deferred_br_call.install_arp_responder('vlan1', FAKE_IP1, FAKE_MAC), deferred_br_call.install_unicast_to_tun('vlan1', 'seg1', '2', FAKE_MAC), deferred_br_call.install_flood_to_tun('vlan1', 'seg1', set(['1', '2'])), ] tun_br.assert_has_calls(expected_calls) def test_fdb_del_flows(self): self._prepare_l2_pop_ofports() fdb_entry = {'net2': {'network_type': 'gre', 'segment_id': 'tun2', 'ports': {'2.2.2.2': [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1), n_const.FLOODING_ENTRY]}}} with mock.patch.object(self.agent, 'tun_br', autospec=True) as br_tun: self.agent.fdb_remove(None, fdb_entry) deferred_br_call = mock.call.deferred().__enter__() expected_calls = [ mock.call.deferred(), mock.call.deferred().__enter__(), deferred_br_call.delete_arp_responder('vlan2', FAKE_IP1), deferred_br_call.delete_unicast_to_tun('vlan2', FAKE_MAC), deferred_br_call.install_flood_to_tun('vlan2', 'seg2', set(['1'])), deferred_br_call.delete_port('gre-02020202'), deferred_br_call.cleanup_tunnel_port('2'), mock.call.deferred().__exit__(None, None, None), ] br_tun.assert_has_calls(expected_calls) def test_fdb_add_port(self): self._prepare_l2_pop_ofports() fdb_entry = {'net1': {'network_type': 'gre', 'segment_id': 'tun1', 'ports': {'1.1.1.1': [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1)]}}} with mock.patch.object(self.agent, 'tun_br', autospec=True) as tun_br,\ mock.patch.object(self.agent, '_setup_tunnel_port') as add_tun_fn: self.agent.fdb_add(None, fdb_entry) self.assertFalse(add_tun_fn.called) fdb_entry['net1']['ports']['10.10.10.10'] = [ l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1)] self.agent.fdb_add(None, fdb_entry) deferred_br = tun_br.deferred().__enter__() add_tun_fn.assert_called_with( deferred_br, 'gre-0a0a0a0a', '10.10.10.10', 'gre') def test_fdb_del_port(self): self._prepare_l2_pop_ofports() fdb_entry = {'net2': {'network_type': 'gre', 'segment_id': 'tun2', 'ports': {'2.2.2.2': [n_const.FLOODING_ENTRY]}}} with mock.patch.object(self.agent.tun_br, 'deferred') as defer_fn,\ mock.patch.object(self.agent.tun_br, 'delete_port') as delete_port_fn: self.agent.fdb_remove(None, fdb_entry) deferred_br = defer_fn().__enter__() deferred_br.delete_port.assert_called_once_with('gre-02020202') self.assertFalse(delete_port_fn.called) def test_fdb_update_chg_ip(self): self._prepare_l2_pop_ofports() fdb_entries = {'chg_ip': {'net1': {'agent_ip': {'before': [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1)], 'after': [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP2)]}}}} with mock.patch.object(self.agent.tun_br, 'deferred') as deferred_fn: self.agent.fdb_update(None, fdb_entries) deferred_br = deferred_fn().__enter__() deferred_br.assert_has_calls([ mock.call.install_arp_responder('vlan1', FAKE_IP2, FAKE_MAC), mock.call.delete_arp_responder('vlan1', FAKE_IP1) ]) def test_del_fdb_flow_idempotency(self): lvm = mock.Mock() lvm.network_type = 'gre' lvm.vlan = 'vlan1' lvm.segmentation_id = 'seg1' lvm.tun_ofports = set(['1', '2']) with mock.patch.object(self.agent.tun_br, 'mod_flow') as mod_flow_fn,\ mock.patch.object(self.agent.tun_br, 'delete_flows') as delete_flows_fn: self.agent.del_fdb_flow(self.agent.tun_br, n_const.FLOODING_ENTRY, '1.1.1.1', lvm, '3') self.assertFalse(mod_flow_fn.called) self.assertFalse(delete_flows_fn.called) def test_recl_lv_port_to_preserve(self): self._prepare_l2_pop_ofports() self.agent.l2_pop = True self.agent.enable_tunneling = True with mock.patch.object(self.agent, 'tun_br', autospec=True) as tun_br: self.agent.reclaim_local_vlan('net1') self.assertFalse(tun_br.cleanup_tunnel_port.called) def test_recl_lv_port_to_remove(self): self._prepare_l2_pop_ofports() self.agent.l2_pop = True self.agent.enable_tunneling = True with mock.patch.object(self.agent, 'tun_br', autospec=True) as tun_br: self.agent.reclaim_local_vlan('net2') tun_br.delete_port.assert_called_once_with('gre-02020202') def test_daemon_loop_uses_polling_manager(self): with mock.patch( 'neutron.agent.common.polling.get_polling_manager') as mock_get_pm: with mock.patch.object(self.agent, 'rpc_loop') as mock_loop: self.agent.daemon_loop() mock_get_pm.assert_called_with(True, constants.DEFAULT_OVSDBMON_RESPAWN) mock_loop.assert_called_once_with(polling_manager=mock.ANY) def test_setup_tunnel_port_invalid_ofport(self): remote_ip = '1.2.3.4' with mock.patch.object( self.agent.tun_br, 'add_tunnel_port', return_value=ovs_lib.INVALID_OFPORT) as add_tunnel_port_fn,\ mock.patch.object(self.mod_agent.LOG, 'error') as log_error_fn: self.agent.local_ip = '1.2.3.4' ofport = self.agent._setup_tunnel_port( self.agent.tun_br, 'gre-1', remote_ip, p_const.TYPE_GRE) add_tunnel_port_fn.assert_called_once_with( 'gre-1', remote_ip, self.agent.local_ip, p_const.TYPE_GRE, self.agent.vxlan_udp_port, self.agent.dont_fragment, self.agent.tunnel_csum) log_error_fn.assert_called_once_with( _("Failed to set-up %(type)s tunnel port to %(ip)s"), {'type': p_const.TYPE_GRE, 'ip': remote_ip}) self.assertEqual(0, ofport) def test_setup_tunnel_port_invalid_address_mismatch(self): remote_ip = '2001:db8::2' with mock.patch.object(self.mod_agent.LOG, 'error') as log_error_fn: self.agent.local_ip = '1.2.3.4' ofport = self.agent._setup_tunnel_port( self.agent.tun_br, 'gre-1', remote_ip, p_const.TYPE_GRE) log_error_fn.assert_called_once_with( _("IP version mismatch, cannot create tunnel: " "local_ip=%(lip)s remote_ip=%(rip)s"), {'lip': self.agent.local_ip, 'rip': remote_ip}) self.assertEqual(0, ofport) def test_setup_tunnel_port_invalid_netaddr_exception(self): remote_ip = '2001:db8::2' with mock.patch.object(self.mod_agent.LOG, 'error') as log_error_fn: self.agent.local_ip = '1.2.3.4.5' ofport = self.agent._setup_tunnel_port( self.agent.tun_br, 'gre-1', remote_ip, p_const.TYPE_GRE) log_error_fn.assert_called_once_with( _("Invalid local or remote IP, cannot create tunnel: " "local_ip=%(lip)s remote_ip=%(rip)s"), {'lip': self.agent.local_ip, 'rip': remote_ip}) self.assertEqual(0, ofport) def test_setup_tunnel_port_error_negative_df_disabled(self): remote_ip = '1.2.3.4' with mock.patch.object( self.agent.tun_br, 'add_tunnel_port', return_value=ovs_lib.INVALID_OFPORT) as add_tunnel_port_fn,\ mock.patch.object(self.mod_agent.LOG, 'error') as log_error_fn: self.agent.dont_fragment = False self.agent.tunnel_csum = False self.agent.local_ip = '2.3.4.5' ofport = self.agent._setup_tunnel_port( self.agent.tun_br, 'gre-1', remote_ip, p_const.TYPE_GRE) add_tunnel_port_fn.assert_called_once_with( 'gre-1', remote_ip, self.agent.local_ip, p_const.TYPE_GRE, self.agent.vxlan_udp_port, self.agent.dont_fragment, self.agent.tunnel_csum) log_error_fn.assert_called_once_with( _("Failed to set-up %(type)s tunnel port to %(ip)s"), {'type': p_const.TYPE_GRE, 'ip': remote_ip}) self.assertEqual(0, ofport) def test_setup_tunnel_port_error_negative_tunnel_csum(self): remote_ip = '1.2.3.4' with mock.patch.object( self.agent.tun_br, 'add_tunnel_port', return_value=ovs_lib.INVALID_OFPORT) as add_tunnel_port_fn,\ mock.patch.object(self.mod_agent.LOG, 'error') as log_error_fn: self.agent.dont_fragment = True self.agent.tunnel_csum = True self.agent.local_ip = '2.3.4.5' ofport = self.agent._setup_tunnel_port( self.agent.tun_br, 'gre-1', remote_ip, p_const.TYPE_GRE) add_tunnel_port_fn.assert_called_once_with( 'gre-1', remote_ip, self.agent.local_ip, p_const.TYPE_GRE, self.agent.vxlan_udp_port, self.agent.dont_fragment, self.agent.tunnel_csum) log_error_fn.assert_called_once_with( _("Failed to set-up %(type)s tunnel port to %(ip)s"), {'type': p_const.TYPE_GRE, 'ip': remote_ip}) self.assertEqual(0, ofport) def test_tunnel_sync_with_ml2_plugin(self): fake_tunnel_details = {'tunnels': [{'ip_address': '100.101.31.15'}]} with mock.patch.object(self.agent.plugin_rpc, 'tunnel_sync', return_value=fake_tunnel_details),\ mock.patch.object( self.agent, '_setup_tunnel_port') as _setup_tunnel_port_fn,\ mock.patch.object(self.agent, 'cleanup_stale_flows') as cleanup: self.agent.tunnel_types = ['vxlan'] self.agent.tunnel_sync() expected_calls = [mock.call(self.agent.tun_br, 'vxlan-64651f0f', '100.101.31.15', 'vxlan')] _setup_tunnel_port_fn.assert_has_calls(expected_calls) self.assertEqual([], cleanup.mock_calls) def test_tunnel_sync_invalid_ip_address(self): fake_tunnel_details = {'tunnels': [{'ip_address': '300.300.300.300'}, {'ip_address': '100.100.100.100'}]} with mock.patch.object(self.agent.plugin_rpc, 'tunnel_sync', return_value=fake_tunnel_details),\ mock.patch.object( self.agent, '_setup_tunnel_port') as _setup_tunnel_port_fn,\ mock.patch.object(self.agent, 'cleanup_stale_flows') as cleanup: self.agent.tunnel_types = ['vxlan'] self.agent.tunnel_sync() _setup_tunnel_port_fn.assert_called_once_with(self.agent.tun_br, 'vxlan-64646464', '100.100.100.100', 'vxlan') self.assertEqual([], cleanup.mock_calls) def test_tunnel_update(self): kwargs = {'tunnel_ip': '10.10.10.10', 'tunnel_type': 'gre'} self.agent._setup_tunnel_port = mock.Mock() self.agent.enable_tunneling = True self.agent.tunnel_types = ['gre'] self.agent.l2_pop = False self.agent.tunnel_update(context=None, **kwargs) expected_calls = [ mock.call(self.agent.tun_br, 'gre-0a0a0a0a', '10.10.10.10', 'gre')] self.agent._setup_tunnel_port.assert_has_calls(expected_calls) def test_tunnel_delete(self): kwargs = {'tunnel_ip': '10.10.10.10', 'tunnel_type': 'gre'} self.agent.enable_tunneling = True self.agent.tunnel_types = ['gre'] self.agent.tun_br_ofports = {'gre': {'10.10.10.10': '1'}} with mock.patch.object( self.agent, 'cleanup_tunnel_port' ) as clean_tun_fn: self.agent.tunnel_delete(context=None, **kwargs) self.assertTrue(clean_tun_fn.called) def test_reset_tunnel_ofports(self): tunnel_handles = self.agent.tun_br_ofports self.agent.tun_br_ofports = {'gre': {'10.10.10.10': '1'}} self.agent._reset_tunnel_ofports() self.assertEqual(self.agent.tun_br_ofports, tunnel_handles) def _test_ovs_status(self, *args): reply2 = {'current': set(['tap0']), 'added': set(['tap2']), 'removed': set([])} reply3 = {'current': set(['tap2']), 'added': set([]), 'removed': set(['tap0'])} reply_ancillary = {'current': set([]), 'added': set([]), 'removed': set([])} self.agent.enable_tunneling = True with mock.patch.object(async_process.AsyncProcess, "_spawn"),\ mock.patch.object(async_process.AsyncProcess, "start"),\ mock.patch.object(async_process.AsyncProcess, "stop"),\ mock.patch.object(log.KeywordArgumentAdapter, 'exception') as log_exception,\ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'process_ports_events') as process_p_events,\ mock.patch.object( self.mod_agent.OVSNeutronAgent, 'process_network_ports') as process_network_ports,\ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'check_ovs_status') as check_ovs_status,\ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_integration_br') as setup_int_br,\ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_physical_bridges') as setup_phys_br,\ mock.patch.object(time, 'sleep'),\ mock.patch.object( self.mod_agent.OVSNeutronAgent, 'update_stale_ofport_rules') as update_stale, \ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'cleanup_stale_flows') as cleanup, \ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_tunnel_br') as setup_tunnel_br,\ mock.patch.object( self.mod_agent.OVSNeutronAgent, 'setup_tunnel_br_flows') as setup_tunnel_br_flows,\ mock.patch.object( self.mod_agent.OVSNeutronAgent, '_reset_tunnel_ofports') as reset_tunnel_ofports: log_exception.side_effect = Exception( 'Fake exception to get out of the loop') devices_not_ready = set() process_p_events.side_effect = [(reply2, reply_ancillary, devices_not_ready), (reply3, reply_ancillary, devices_not_ready)] failed_devices = {'added': set(), 'removed': set()} failed_ancillary_devices = {'added': set(), 'removed': set()} process_network_ports.side_effect = [ failed_devices, Exception('Fake exception to get out of the loop')] check_ovs_status.side_effect = args try: self.agent.daemon_loop() except Exception: pass process_p_events.assert_has_calls([ mock.call({'removed': [], 'added': []}, set(), set(), set(), failed_devices, failed_ancillary_devices, set()), mock.call({'removed': [], 'added': []}, set(['tap0']), set(), set(), failed_devices, failed_ancillary_devices, set()) ]) process_network_ports.assert_has_calls([ mock.call(reply2, False), mock.call(reply3, True) ]) cleanup.assert_called_once_with() self.assertTrue(update_stale.called) # Verify the OVS restart we triggered in the loop # re-setup the bridges setup_int_br.assert_has_calls([mock.call()]) setup_phys_br.assert_has_calls([mock.call({})]) # Ensure that tunnel handles are reset and bridge # and flows reconfigured. self.assertTrue(reset_tunnel_ofports.called) self.assertTrue(setup_tunnel_br_flows.called) self.assertTrue(setup_tunnel_br.called) def test_ovs_status(self): self._test_ovs_status(constants.OVS_NORMAL, constants.OVS_DEAD, constants.OVS_RESTARTED) # OVS will not DEAD in some exception, like DBConnectionError. self._test_ovs_status(constants.OVS_NORMAL, constants.OVS_RESTARTED) def test_rpc_loop_fail_to_process_network_ports_keep_flows(self): with mock.patch.object(async_process.AsyncProcess, "_spawn"),\ mock.patch.object(async_process.AsyncProcess, "start"),\ mock.patch.object(async_process.AsyncProcess, "stop"),\ mock.patch.object( self.mod_agent.OVSNeutronAgent, 'process_network_ports') as process_network_ports,\ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'check_ovs_status') as check_ovs_status,\ mock.patch.object(time, 'sleep'),\ mock.patch.object( self.mod_agent.OVSNeutronAgent, 'update_stale_ofport_rules') as update_stale, \ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'cleanup_stale_flows') as cleanup,\ mock.patch.object( self.mod_agent.OVSNeutronAgent, '_check_and_handle_signal') as check_and_handle_signal: process_network_ports.side_effect = Exception("Trigger resync") check_ovs_status.return_value = constants.OVS_NORMAL check_and_handle_signal.side_effect = [True, False] self.agent.daemon_loop() self.assertTrue(update_stale.called) self.assertFalse(cleanup.called) def test_set_rpc_timeout(self): self.agent._handle_sigterm(None, None) for rpc_client in (self.agent.plugin_rpc.client, self.agent.sg_plugin_rpc.client, self.agent.dvr_plugin_rpc.client, self.agent.state_rpc.client): self.assertEqual(10, rpc_client.timeout) def test_set_rpc_timeout_no_value(self): self.agent.quitting_rpc_timeout = None with mock.patch.object(self.agent, 'set_rpc_timeout') as mock_set_rpc: self.agent._handle_sigterm(None, None) self.assertFalse(mock_set_rpc.called) def test_arp_spoofing_network_port(self): int_br = mock.create_autospec(self.agent.int_br) self.agent.setup_arp_spoofing_protection( int_br, FakeVif(), {'device_owner': n_const.DEVICE_OWNER_ROUTER_INTF}) self.assertTrue(int_br.delete_arp_spoofing_protection.called) self.assertFalse(int_br.install_arp_spoofing_protection.called) def test_arp_spoofing_port_security_disabled(self): int_br = mock.create_autospec(self.agent.int_br) self.agent.setup_arp_spoofing_protection( int_br, FakeVif(), {'port_security_enabled': False}) self.assertTrue(int_br.delete_arp_spoofing_protection.called) self.assertFalse(int_br.install_arp_spoofing_protection.called) def test_arp_spoofing_basic_rule_setup(self): vif = FakeVif() fake_details = {'fixed_ips': [], 'device_owner': 'nobody'} self.agent.prevent_arp_spoofing = True int_br = mock.create_autospec(self.agent.int_br) self.agent.setup_arp_spoofing_protection(int_br, vif, fake_details) self.assertEqual( [mock.call(port=vif.ofport)], int_br.delete_arp_spoofing_allow_rules.mock_calls) self.assertEqual( [mock.call(ip_addresses=set(), port=vif.ofport)], int_br.install_arp_spoofing_protection.mock_calls) def test_arp_spoofing_basic_rule_setup_fixed_ipv6(self): vif = FakeVif() fake_details = {'fixed_ips': [{'ip_address': 'fdf8:f53b:82e4::1'}], 'device_owner': 'nobody'} self.agent.prevent_arp_spoofing = True br = mock.create_autospec(self.agent.int_br) self.agent.setup_arp_spoofing_protection(br, vif, fake_details) self.assertEqual( [mock.call(port=vif.ofport)], br.delete_arp_spoofing_allow_rules.mock_calls) self.assertTrue(br.install_icmpv6_na_spoofing_protection.called) def test_arp_spoofing_fixed_and_allowed_addresses(self): vif = FakeVif() fake_details = { 'device_owner': 'nobody', 'fixed_ips': [{'ip_address': '192.168.44.100'}, {'ip_address': '192.168.44.101'}], 'allowed_address_pairs': [{'ip_address': '192.168.44.102/32'}, {'ip_address': '192.168.44.103/32'}] } self.agent.prevent_arp_spoofing = True int_br = mock.create_autospec(self.agent.int_br) self.agent.setup_arp_spoofing_protection(int_br, vif, fake_details) # make sure all addresses are allowed addresses = {'192.168.44.100', '192.168.44.101', '192.168.44.102/32', '192.168.44.103/32'} self.assertEqual( [mock.call(port=vif.ofport, ip_addresses=addresses)], int_br.install_arp_spoofing_protection.mock_calls) def test_arp_spoofing_fixed_and_allowed_addresses_ipv6(self): vif = FakeVif() fake_details = { 'device_owner': 'nobody', 'fixed_ips': [{'ip_address': '2001:db8::1'}, {'ip_address': '2001:db8::2'}], 'allowed_address_pairs': [{'ip_address': '2001:db8::200', 'mac_address': 'aa:22:33:44:55:66'}] } self.agent.prevent_arp_spoofing = True int_br = mock.create_autospec(self.agent.int_br) self.agent.setup_arp_spoofing_protection(int_br, vif, fake_details) # make sure all addresses are allowed including ipv6 LLAs addresses = {'2001:db8::1', '2001:db8::2', '2001:db8::200', 'fe80::a822:33ff:fe44:5566', 'fe80::a8bb:ccff:fe11:2233'} self.assertEqual( [mock.call(port=vif.ofport, ip_addresses=addresses)], int_br.install_icmpv6_na_spoofing_protection.mock_calls) def test__get_ofport_moves(self): previous = {'port1': 1, 'port2': 2} current = {'port1': 5, 'port2': 2} # we expect it to tell us port1 moved expected = ['port1'] self.assertEqual(expected, self.agent._get_ofport_moves(current, previous)) def test_update_stale_ofport_rules_clears_old(self): self.agent.prevent_arp_spoofing = True self.agent.vifname_to_ofport_map = {'port1': 1, 'port2': 2} self.agent.int_br = mock.Mock() # simulate port1 was removed newmap = {'port2': 2} self.agent.int_br.get_vif_port_to_ofport_map.return_value = newmap self.agent.update_stale_ofport_rules() # rules matching port 1 should have been deleted self.assertEqual( [mock.call(port=1)], self.agent.int_br.delete_arp_spoofing_protection.mock_calls) # make sure the state was updated with the new map self.assertEqual(newmap, self.agent.vifname_to_ofport_map) def test_update_stale_ofport_rules_treats_moved(self): self.agent.prevent_arp_spoofing = True self.agent.vifname_to_ofport_map = {'port1': 1, 'port2': 2} self.agent.treat_devices_added_or_updated = mock.Mock() self.agent.int_br = mock.Mock() # simulate port1 was moved newmap = {'port2': 2, 'port1': 90} self.agent.int_br.get_vif_port_to_ofport_map.return_value = newmap ofport_changed_ports = self.agent.update_stale_ofport_rules() self.assertEqual(['port1'], ofport_changed_ports) def test__setup_tunnel_port_while_new_mapping_is_added(self): """ Test that _setup_tunnel_port doesn't fail if new vlan mapping is added in a different coroutine while iterating over existing mappings. See bug 1449944 for more info. """ def add_new_vlan_mapping(*args, **kwargs): self.agent.local_vlan_map['bar'] = ( self.mod_agent.LocalVLANMapping(1, 2, 3, 4)) bridge = mock.Mock() tunnel_type = 'vxlan' self.agent.tun_br_ofports = {tunnel_type: dict()} self.agent.l2_pop = False self.agent.local_vlan_map = { 'foo': self.mod_agent.LocalVLANMapping(4, tunnel_type, 2, 1)} self.agent.local_ip = '2.3.4.5' bridge.install_flood_to_tun.side_effect = add_new_vlan_mapping self.agent._setup_tunnel_port(bridge, 1, '1.2.3.4', tunnel_type=tunnel_type) self.assertIn('bar', self.agent.local_vlan_map) def test_setup_entry_for_arp_reply_ignores_ipv6_addresses(self): self.agent.arp_responder_enabled = True ip = '2001:db8::1' br = mock.Mock() self.agent.setup_entry_for_arp_reply( br, 'add', mock.Mock(), mock.Mock(), ip) self.assertFalse(br.install_arp_responder.called) class TestOvsNeutronAgentOFCtl(TestOvsNeutronAgent, ovs_test_base.OVSOFCtlTestBase): def test_cleanup_stale_flows(self): with mock.patch.object(self.agent.int_br, 'dump_flows_all_tables') as dump_flows,\ mock.patch.object(self.agent.int_br, 'delete_flows') as del_flow: self.agent.int_br.set_agent_uuid_stamp(1234) dump_flows.return_value = [ 'cookie=0x4d2, duration=50.156s, table=0,actions=drop', 'cookie=0x4321, duration=54.143s, table=2, priority=0', 'cookie=0x2345, duration=50.125s, table=2, priority=0', 'cookie=0x4d2, duration=52.112s, table=3, actions=drop', ] self.agent.iter_num = 3 self.agent.cleanup_stale_flows() expected = [ mock.call(cookie='0x4321/-1', table='2'), mock.call(cookie='0x2345/-1', table='2'), ] self.assertEqual(expected, del_flow.mock_calls) class TestOvsNeutronAgentRyu(TestOvsNeutronAgent, ovs_test_base.OVSRyuTestBase): def test_cleanup_stale_flows(self): uint64_max = (1 << 64) - 1 with mock.patch.object(self.agent.int_br, 'dump_flows') as dump_flows,\ mock.patch.object(self.agent.int_br, 'delete_flows') as del_flow: self.agent.int_br.set_agent_uuid_stamp(1234) dump_flows.return_value = [ # mock ryu.ofproto.ofproto_v1_3_parser.OFPFlowStats mock.Mock(cookie=1234, table_id=0), mock.Mock(cookie=17185, table_id=2), mock.Mock(cookie=9029, table_id=2), mock.Mock(cookie=1234, table_id=3), ] self.agent.iter_num = 3 self.agent.cleanup_stale_flows() expected = [mock.call(cookie=17185, cookie_mask=uint64_max), mock.call(cookie=9029, cookie_mask=uint64_max)] del_flow.assert_has_calls(expected, any_order=True) self.assertEqual(len(expected), len(del_flow.mock_calls)) class AncillaryBridgesTest(object): def setUp(self): super(AncillaryBridgesTest, self).setUp() notifier_p = mock.patch(NOTIFIER) notifier_cls = notifier_p.start() self.notifier = mock.Mock() notifier_cls.return_value = self.notifier cfg.CONF.set_default('firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') cfg.CONF.set_override('report_interval', 0, 'AGENT') mock.patch('neutron.agent.common.ovs_lib.BaseOVS.config', new_callable=mock.PropertyMock, return_value={}).start() def _test_ancillary_bridges(self, bridges, ancillary): device_ids = ancillary[:] def pullup_side_effect(*args): # Check that the device_id exists, if it does return it # if it does not return None try: device_ids.remove(args[0]) return args[0] except Exception: return None with mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_integration_br'),\ mock.patch('neutron.agent.linux.utils.get_interface_mac', return_value='00:00:00:00:00:01'),\ mock.patch('neutron.agent.common.ovs_lib.BaseOVS.get_bridges', return_value=bridges),\ mock.patch('neutron.agent.common.ovs_lib.BaseOVS.' 'get_bridge_external_bridge_id', side_effect=pullup_side_effect),\ mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge.' 'get_ports_attributes', return_value=[]),\ mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports', return_value=[]): self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(), cfg.CONF) self.assertEqual(len(ancillary), len(self.agent.ancillary_brs)) if ancillary: bridges = [br.br_name for br in self.agent.ancillary_brs] for br in ancillary: self.assertIn(br, bridges) def test_ancillary_bridges_single(self): bridges = ['br-int', 'br-ex'] self._test_ancillary_bridges(bridges, ['br-ex']) def test_ancillary_bridges_none(self): bridges = ['br-int'] self._test_ancillary_bridges(bridges, []) def test_ancillary_bridges_multiple(self): bridges = ['br-int', 'br-ex1', 'br-ex2'] self._test_ancillary_bridges(bridges, ['br-ex1', 'br-ex2']) def mock_scan_ancillary_ports(self, vif_port_set=None, registered_ports=None, sync=False): bridges = ['br-int', 'br-ex'] ancillary = ['br-ex'] with mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_integration_br'), \ mock.patch.object(self.mod_agent.OVSNeutronAgent, '_restore_local_vlan_map'), \ mock.patch('neutron.agent.common.ovs_lib.BaseOVS.get_bridges', return_value=bridges), \ mock.patch('neutron.agent.common.ovs_lib.BaseOVS.' 'get_bridge_external_bridge_id', side_effect=ancillary), \ mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_port_set', return_value=vif_port_set): self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(), cfg.CONF) return self.agent.scan_ancillary_ports(registered_ports, sync) def test_scan_ancillary_ports_returns_cur_only_for_unchanged_ports(self): vif_port_set = set([1, 2]) registered_ports = set([1, 2]) expected = dict(current=vif_port_set) actual = self.mock_scan_ancillary_ports(vif_port_set, registered_ports) self.assertEqual(expected, actual) def test_scan_ancillary_ports_returns_port_changes(self): vif_port_set = set([1, 3]) registered_ports = set([1, 2]) expected = dict(current=vif_port_set, added=set([3]), removed=set([2])) actual = self.mock_scan_ancillary_ports(vif_port_set, registered_ports) self.assertEqual(expected, actual) def test_scan_ancillary_ports_returns_port_changes_with_sync(self): vif_port_set = set([1, 3]) registered_ports = set([1, 2]) expected = dict(current=vif_port_set, added=vif_port_set, removed=set([2])) actual = self.mock_scan_ancillary_ports(vif_port_set, registered_ports, sync=True) self.assertEqual(expected, actual) class AncillaryBridgesTestOFCtl(AncillaryBridgesTest, ovs_test_base.OVSOFCtlTestBase): pass class AncillaryBridgesTestRyu(AncillaryBridgesTest, ovs_test_base.OVSRyuTestBase): pass class TestOvsDvrNeutronAgent(object): def setUp(self): super(TestOvsDvrNeutronAgent, self).setUp() notifier_p = mock.patch(NOTIFIER) notifier_cls = notifier_p.start() self.notifier = mock.Mock() notifier_cls.return_value = self.notifier cfg.CONF.set_default('firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') mock.patch('neutron.agent.common.ovs_lib.BaseOVS.config', new_callable=mock.PropertyMock, return_value={}).start() with mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_integration_br'),\ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_ancillary_bridges', return_value=[]),\ mock.patch('neutron.agent.linux.utils.get_interface_mac', return_value='00:00:00:00:00:01'),\ mock.patch( 'neutron.agent.common.ovs_lib.BaseOVS.get_bridges'),\ mock.patch('oslo_service.loopingcall.' 'FixedIntervalLoopingCall', new=MockFixedIntervalLoopingCall),\ mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge.' 'get_ports_attributes', return_value=[]),\ mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports', return_value=[]): self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(), cfg.CONF) self.agent.tun_br = self.br_tun_cls(br_name='br-tun') self.agent.sg_agent = mock.Mock() def _setup_for_dvr_test(self): self._port = mock.Mock() self._port.ofport = 10 self._port.vif_id = "1234-5678-90" self._physical_network = 'physeth1' self._old_local_vlan = None self._segmentation_id = 2001 self.agent.enable_distributed_routing = True self.agent.enable_tunneling = True self.agent.patch_tun_ofport = 1 self.agent.patch_int_ofport = 2 self.agent.dvr_agent.local_ports = {} self.agent.local_vlan_map = {} self.agent.dvr_agent.enable_distributed_routing = True self.agent.dvr_agent.enable_tunneling = True self.agent.dvr_agent.patch_tun_ofport = 1 self.agent.dvr_agent.patch_int_ofport = 2 self.agent.dvr_agent.tun_br = mock.Mock() self.agent.dvr_agent.phys_brs[self._physical_network] = mock.Mock() self.agent.dvr_agent.bridge_mappings = {self._physical_network: 'br-eth1'} self.agent.dvr_agent.int_ofports[self._physical_network] = 30 self.agent.dvr_agent.phys_ofports[self._physical_network] = 40 self.agent.dvr_agent.local_dvr_map = {} self.agent.dvr_agent.registered_dvr_macs = set() self.agent.dvr_agent.dvr_mac_address = 'aa:22:33:44:55:66' self._net_uuid = 'my-net-uuid' self._fixed_ips = [{'subnet_id': 'my-subnet-uuid', 'ip_address': '1.1.1.1'}] self._compute_port = mock.Mock() self._compute_port.ofport = 20 self._compute_port.vif_id = "1234-5678-91" self._compute_fixed_ips = [{'subnet_id': 'my-subnet-uuid', 'ip_address': '1.1.1.3'}] @staticmethod def _expected_port_bound(port, lvid, is_dvr=True): resp = [ mock.call.db_get_val('Port', port.port_name, 'other_config'), mock.call.set_db_attribute('Port', port.port_name, 'other_config', mock.ANY), ] if is_dvr: resp = [mock.call.get_vifs_by_ids([])] + resp return resp def _expected_install_dvr_process(self, lvid, port, ip_version, gateway_ip, gateway_mac): if ip_version == 4: ipvx_calls = [ mock.call.install_dvr_process_ipv4( vlan_tag=lvid, gateway_ip=gateway_ip), ] else: ipvx_calls = [ mock.call.install_dvr_process_ipv6( vlan_tag=lvid, gateway_mac=gateway_mac), ] return ipvx_calls + [ mock.call.install_dvr_process( vlan_tag=lvid, dvr_mac_address=self.agent.dvr_agent.dvr_mac_address, vif_mac=port.vif_mac, ), ] def _test_port_bound_for_dvr_on_vlan_network(self, device_owner, ip_version=4): self._setup_for_dvr_test() if ip_version == 4: gateway_ip = '1.1.1.1' cidr = '1.1.1.0/24' else: gateway_ip = '2001:100::1' cidr = '2001:100::0/64' self._port.vif_mac = gateway_mac = 'aa:bb:cc:11:22:33' self._compute_port.vif_mac = '77:88:99:00:11:22' physical_network = self._physical_network segmentation_id = self._segmentation_id network_type = p_const.TYPE_VLAN int_br = mock.create_autospec(self.agent.int_br) tun_br = mock.create_autospec(self.agent.tun_br) phys_br = mock.create_autospec(self.br_phys_cls('br-phys')) int_br.set_db_attribute.return_value = True int_br.db_get_val.return_value = {} with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', return_value={'gateway_ip': gateway_ip, 'cidr': cidr, 'ip_version': ip_version, 'gateway_mac': gateway_mac}),\ mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_ports_on_host_by_subnet', return_value=[]),\ mock.patch.object(self.agent.dvr_agent.int_br, 'get_vif_port_by_id', return_value=self._port),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.dict(self.agent.phys_brs, {physical_network: phys_br}),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\ mock.patch.dict(self.agent.dvr_agent.phys_brs, {physical_network: phys_br}): self.agent.port_bound( self._port, self._net_uuid, network_type, physical_network, segmentation_id, self._fixed_ips, n_const.DEVICE_OWNER_DVR_INTERFACE, False) phy_ofp = self.agent.dvr_agent.phys_ofports[physical_network] int_ofp = self.agent.dvr_agent.int_ofports[physical_network] lvid = self.agent.local_vlan_map[self._net_uuid].vlan expected_on_phys_br = [ mock.call.provision_local_vlan( port=phy_ofp, lvid=lvid, segmentation_id=segmentation_id, distributed=True, ), ] + self._expected_install_dvr_process( port=self._port, lvid=lvid, ip_version=ip_version, gateway_ip=gateway_ip, gateway_mac=gateway_mac) expected_on_int_br = [ mock.call.provision_local_vlan( port=int_ofp, lvid=lvid, segmentation_id=segmentation_id, ), ] + self._expected_port_bound(self._port, lvid) self.assertEqual(expected_on_int_br, int_br.mock_calls) self.assertEqual([], tun_br.mock_calls) self.assertEqual(expected_on_phys_br, phys_br.mock_calls) int_br.reset_mock() tun_br.reset_mock() phys_br.reset_mock() self.agent.port_bound(self._compute_port, self._net_uuid, network_type, physical_network, segmentation_id, self._compute_fixed_ips, device_owner, False) expected_on_int_br = [ mock.call.install_dvr_to_src_mac( network_type=network_type, gateway_mac=gateway_mac, dst_mac=self._compute_port.vif_mac, dst_port=self._compute_port.ofport, vlan_tag=segmentation_id, ), ] + self._expected_port_bound(self._compute_port, lvid, False) self.assertEqual(expected_on_int_br, int_br.mock_calls) self.assertFalse([], tun_br.mock_calls) self.assertFalse([], phys_br.mock_calls) def _test_port_bound_for_dvr_on_vxlan_network(self, device_owner, ip_version=4): self._setup_for_dvr_test() if ip_version == 4: gateway_ip = '1.1.1.1' cidr = '1.1.1.0/24' else: gateway_ip = '2001:100::1' cidr = '2001:100::0/64' network_type = p_const.TYPE_VXLAN self._port.vif_mac = gateway_mac = 'aa:bb:cc:11:22:33' self._compute_port.vif_mac = '77:88:99:00:11:22' physical_network = self._physical_network segmentation_id = self._segmentation_id int_br = mock.create_autospec(self.agent.int_br) tun_br = mock.create_autospec(self.agent.tun_br) phys_br = mock.create_autospec(self.br_phys_cls('br-phys')) int_br.set_db_attribute.return_value = True int_br.db_get_val.return_value = {} with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', return_value={'gateway_ip': gateway_ip, 'cidr': cidr, 'ip_version': ip_version, 'gateway_mac': gateway_mac}),\ mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_ports_on_host_by_subnet', return_value=[]),\ mock.patch.object(self.agent.dvr_agent.int_br, 'get_vif_port_by_id', return_value=self._port),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.dict(self.agent.phys_brs, {physical_network: phys_br}),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\ mock.patch.dict(self.agent.dvr_agent.phys_brs, {physical_network: phys_br}): self.agent.port_bound( self._port, self._net_uuid, network_type, physical_network, segmentation_id, self._fixed_ips, n_const.DEVICE_OWNER_DVR_INTERFACE, False) lvid = self.agent.local_vlan_map[self._net_uuid].vlan expected_on_int_br = self._expected_port_bound( self._port, lvid) expected_on_tun_br = [ mock.call.provision_local_vlan( network_type=network_type, segmentation_id=segmentation_id, lvid=lvid, distributed=True), ] + self._expected_install_dvr_process( port=self._port, lvid=lvid, ip_version=ip_version, gateway_ip=gateway_ip, gateway_mac=gateway_mac) self.assertEqual(expected_on_int_br, int_br.mock_calls) self.assertEqual(expected_on_tun_br, tun_br.mock_calls) self.assertEqual([], phys_br.mock_calls) int_br.reset_mock() tun_br.reset_mock() phys_br.reset_mock() self.agent.port_bound(self._compute_port, self._net_uuid, network_type, physical_network, segmentation_id, self._compute_fixed_ips, device_owner, False) expected_on_int_br = [ mock.call.install_dvr_to_src_mac( network_type=network_type, gateway_mac=gateway_mac, dst_mac=self._compute_port.vif_mac, dst_port=self._compute_port.ofport, vlan_tag=lvid, ), ] + self._expected_port_bound(self._compute_port, lvid, False) self.assertEqual(expected_on_int_br, int_br.mock_calls) self.assertEqual([], tun_br.mock_calls) self.assertEqual([], phys_br.mock_calls) def test_port_bound_for_dvr_with_compute_ports(self): self._test_port_bound_for_dvr_on_vlan_network( device_owner=DEVICE_OWNER_COMPUTE) self._test_port_bound_for_dvr_on_vlan_network( device_owner=DEVICE_OWNER_COMPUTE, ip_version=6) self._test_port_bound_for_dvr_on_vxlan_network( device_owner=DEVICE_OWNER_COMPUTE) self._test_port_bound_for_dvr_on_vxlan_network( device_owner=DEVICE_OWNER_COMPUTE, ip_version=6) def test_port_bound_for_dvr_with_lbaas_vip_ports(self): self._test_port_bound_for_dvr_on_vlan_network( device_owner=n_const.DEVICE_OWNER_LOADBALANCER) self._test_port_bound_for_dvr_on_vlan_network( device_owner=n_const.DEVICE_OWNER_LOADBALANCER, ip_version=6) self._test_port_bound_for_dvr_on_vxlan_network( device_owner=n_const.DEVICE_OWNER_LOADBALANCER) self._test_port_bound_for_dvr_on_vxlan_network( device_owner=n_const.DEVICE_OWNER_LOADBALANCER, ip_version=6) def test_port_bound_for_dvr_with_lbaasv2_vip_ports(self): self._test_port_bound_for_dvr_on_vlan_network( device_owner=n_const.DEVICE_OWNER_LOADBALANCERV2) self._test_port_bound_for_dvr_on_vlan_network( device_owner=n_const.DEVICE_OWNER_LOADBALANCERV2, ip_version=6) self._test_port_bound_for_dvr_on_vxlan_network( device_owner=n_const.DEVICE_OWNER_LOADBALANCERV2) self._test_port_bound_for_dvr_on_vxlan_network( device_owner=n_const.DEVICE_OWNER_LOADBALANCERV2, ip_version=6) def test_port_bound_for_dvr_with_dhcp_ports(self): self._test_port_bound_for_dvr_on_vlan_network( device_owner=n_const.DEVICE_OWNER_DHCP) self._test_port_bound_for_dvr_on_vlan_network( device_owner=n_const.DEVICE_OWNER_DHCP, ip_version=6) self._test_port_bound_for_dvr_on_vxlan_network( device_owner=n_const.DEVICE_OWNER_DHCP) self._test_port_bound_for_dvr_on_vxlan_network( device_owner=n_const.DEVICE_OWNER_DHCP, ip_version=6) def test_port_bound_for_dvr_with_csnat_ports(self): self._setup_for_dvr_test() int_br, tun_br = self._port_bound_for_dvr_with_csnat_ports() lvid = self.agent.local_vlan_map[self._net_uuid].vlan expected_on_int_br = [ mock.call.install_dvr_to_src_mac( network_type='vxlan', gateway_mac='aa:bb:cc:11:22:33', dst_mac=self._port.vif_mac, dst_port=self._port.ofport, vlan_tag=lvid, ), ] + self._expected_port_bound(self._port, lvid, is_dvr=False) self.assertEqual(expected_on_int_br, int_br.mock_calls) expected_on_tun_br = [ mock.call.provision_local_vlan( network_type='vxlan', lvid=lvid, segmentation_id=None, distributed=True, ), ] self.assertEqual(expected_on_tun_br, tun_br.mock_calls) def test_port_bound_for_dvr_with_csnat_ports_ofport_change(self): self._setup_for_dvr_test() self._port_bound_for_dvr_with_csnat_ports() # simulate a replug self._port.ofport = 12 int_br, tun_br = self._port_bound_for_dvr_with_csnat_ports() lvid = self.agent.local_vlan_map[self._net_uuid].vlan expected_on_int_br = [ mock.call.delete_dvr_to_src_mac( network_type='vxlan', dst_mac=self._port.vif_mac, vlan_tag=lvid, ), mock.call.install_dvr_to_src_mac( network_type='vxlan', gateway_mac='aa:bb:cc:11:22:33', dst_mac=self._port.vif_mac, dst_port=self._port.ofport, vlan_tag=lvid, ), ] + self._expected_port_bound(self._port, lvid, is_dvr=False) self.assertEqual(expected_on_int_br, int_br.mock_calls) # a local vlan was already provisioned so there should be no new # calls to tunbr self.assertEqual([], tun_br.mock_calls) # make sure ofport was updated self.assertEqual(12, self.agent.dvr_agent.local_ports[self._port.vif_id].ofport) def _port_bound_for_dvr_with_csnat_ports(self): int_br = mock.create_autospec(self.agent.int_br) tun_br = mock.create_autospec(self.agent.tun_br) int_br.set_db_attribute.return_value = True int_br.db_get_val.return_value = {} with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', return_value={'gateway_ip': '1.1.1.1', 'cidr': '1.1.1.0/24', 'ip_version': 4, 'gateway_mac': 'aa:bb:cc:11:22:33'}),\ mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_ports_on_host_by_subnet', return_value=[]),\ mock.patch.object(self.agent.dvr_agent.int_br, 'get_vif_port_by_id', return_value=self._port),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br): self.agent.port_bound( self._port, self._net_uuid, 'vxlan', None, None, self._fixed_ips, n_const.DEVICE_OWNER_ROUTER_SNAT, False) return int_br, tun_br def test_port_bound_for_dvr_with_csnat_ports_without_subnet(self): self._setup_for_dvr_test() int_br = mock.create_autospec(self.agent.int_br) tun_br = mock.create_autospec(self.agent.tun_br) # get_subnet_for_dvr RPC returns {} on error with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', return_value={}),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br): self.agent.port_bound( self._port, self._net_uuid, 'vxlan', None, None, self._fixed_ips, n_const.DEVICE_OWNER_ROUTER_SNAT, False) self.assertFalse(int_br.install_dvr_to_src_mac.called) def test_treat_devices_removed_for_dvr_interface(self): self._test_treat_devices_removed_for_dvr_interface() self._test_treat_devices_removed_for_dvr_interface(ip_version=6) self._test_treat_devices_removed_for_dvr_interface(network_type='vlan') self._test_treat_devices_removed_for_dvr_interface(ip_version=6, network_type='vlan') def _test_treat_devices_removed_for_dvr_interface( self, ip_version=4, network_type='vxlan'): self._setup_for_dvr_test() if ip_version == 4: gateway_ip = '1.1.1.1' cidr = '1.1.1.0/24' else: gateway_ip = '2001:100::1' cidr = '2001:100::0/64' gateway_mac = 'aa:bb:cc:11:22:33' int_br = mock.create_autospec(self.agent.int_br) tun_br = mock.create_autospec(self.agent.tun_br) int_br.set_db_attribute.return_value = True int_br.db_get_val.return_value = {} with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', return_value={'gateway_ip': gateway_ip, 'cidr': cidr, 'ip_version': ip_version, 'gateway_mac': gateway_mac}),\ mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_ports_on_host_by_subnet', return_value=[]),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent.dvr_agent.int_br, 'get_vif_port_by_id', return_value=self._port): if network_type == 'vlan': self.agent.port_bound(self._port, self._net_uuid, network_type, self._physical_network, self._segmentation_id, self._compute_fixed_ips, n_const.DEVICE_OWNER_DVR_INTERFACE, False) else: self.agent.port_bound( self._port, self._net_uuid, 'vxlan', None, None, self._fixed_ips, n_const.DEVICE_OWNER_DVR_INTERFACE, False) lvid = self.agent.local_vlan_map[self._net_uuid].vlan self.assertEqual(self._expected_port_bound(self._port, lvid), int_br.mock_calls) expected_on_tun_br = [ mock.call.provision_local_vlan(network_type='vxlan', lvid=lvid, segmentation_id=None, distributed=True), ] + self._expected_install_dvr_process( port=self._port, lvid=lvid, ip_version=ip_version, gateway_ip=gateway_ip, gateway_mac=gateway_mac) self.assertEqual(expected_on_tun_br, tun_br.mock_calls) int_br.reset_mock() tun_br.reset_mock() phys_br = mock.create_autospec(self.br_phys_cls('br-phys')) with mock.patch.object(self.agent, 'reclaim_local_vlan'),\ mock.patch.object(self.agent.plugin_rpc, 'update_device_list', return_value={ 'devices_up': [], 'devices_down': [self._port.vif_id], 'failed_devices_up': [], 'failed_devices_down': []}),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.dict(self.agent.phys_brs, {self._physical_network: phys_br}),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\ mock.patch.dict(self.agent.dvr_agent.phys_brs, {self._physical_network: phys_br}): failed_devices = {'added': set(), 'removed': set()} failed_devices['removed'] = self.agent.treat_devices_removed( [self._port.vif_id]) lvid = self.agent.local_vlan_map[self._net_uuid].vlan if ip_version == 4: expected = [ mock.call.delete_dvr_process_ipv4( vlan_tag=lvid, gateway_ip=gateway_ip), ] else: expected = [ mock.call.delete_dvr_process_ipv6( vlan_tag=lvid, gateway_mac=gateway_mac), ] expected.extend([ mock.call.delete_dvr_process( vlan_tag=lvid, vif_mac=self._port.vif_mac), ]) if network_type == 'vlan': self.assertEqual([], int_br.mock_calls) self.assertEqual([], tun_br.mock_calls) self.assertEqual(expected, phys_br.mock_calls) self.assertEqual({}, self.agent.dvr_agent.local_ports) else: self.assertEqual([], int_br.mock_calls) self.assertEqual(expected, tun_br.mock_calls) self.assertEqual([], phys_br.mock_calls) def _test_treat_devices_removed_for_dvr(self, device_owner, ip_version=4): self._setup_for_dvr_test() if ip_version == 4: gateway_ip = '1.1.1.1' cidr = '1.1.1.0/24' else: gateway_ip = '2001:100::1' cidr = '2001:100::0/64' gateway_mac = 'aa:bb:cc:11:22:33' int_br = mock.create_autospec(self.agent.int_br) tun_br = mock.create_autospec(self.agent.tun_br) int_br.set_db_attribute.return_value = True int_br.db_get_val.return_value = {} with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', return_value={'gateway_ip': gateway_ip, 'cidr': cidr, 'ip_version': ip_version, 'gateway_mac': gateway_mac}),\ mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_ports_on_host_by_subnet', return_value=[]),\ mock.patch.object(self.agent.dvr_agent.int_br, 'get_vif_port_by_id', return_value=self._port),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br): self.agent.port_bound( self._port, self._net_uuid, 'vxlan', None, None, self._fixed_ips, n_const.DEVICE_OWNER_DVR_INTERFACE, False) lvid = self.agent.local_vlan_map[self._net_uuid].vlan self.assertEqual( self._expected_port_bound(self._port, lvid), int_br.mock_calls) expected_on_tun_br = [ mock.call.provision_local_vlan( network_type='vxlan', segmentation_id=None, lvid=lvid, distributed=True), ] + self._expected_install_dvr_process( port=self._port, lvid=lvid, ip_version=ip_version, gateway_ip=gateway_ip, gateway_mac=gateway_mac) self.assertEqual(expected_on_tun_br, tun_br.mock_calls) int_br.reset_mock() tun_br.reset_mock() self.agent.port_bound(self._compute_port, self._net_uuid, 'vxlan', None, None, self._compute_fixed_ips, device_owner, False) self.assertEqual( [ mock.call.install_dvr_to_src_mac( network_type='vxlan', gateway_mac='aa:bb:cc:11:22:33', dst_mac=self._compute_port.vif_mac, dst_port=self._compute_port.ofport, vlan_tag=lvid, ), ] + self._expected_port_bound(self._compute_port, lvid, False), int_br.mock_calls) self.assertEqual([], tun_br.mock_calls) int_br.reset_mock() tun_br.reset_mock() with mock.patch.object(self.agent, 'reclaim_local_vlan'),\ mock.patch.object(self.agent.plugin_rpc, 'update_device_list', return_value={ 'devices_up': [], 'devices_down': [ self._compute_port.vif_id], 'failed_devices_up': [], 'failed_devices_down': []}),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br): failed_devices = {'added': set(), 'removed': set()} failed_devices['removed'] = self.agent.treat_devices_removed( [self._compute_port.vif_id]) int_br.assert_has_calls([ mock.call.delete_dvr_to_src_mac( network_type='vxlan', vlan_tag=lvid, dst_mac=self._compute_port.vif_mac, ), ]) self.assertEqual([], tun_br.mock_calls) def test_treat_devices_removed_for_dvr_with_compute_ports(self): self._test_treat_devices_removed_for_dvr( device_owner=DEVICE_OWNER_COMPUTE) self._test_treat_devices_removed_for_dvr( device_owner=DEVICE_OWNER_COMPUTE, ip_version=6) def test_treat_devices_removed_for_dvr_with_lbaas_vip_ports(self): self._test_treat_devices_removed_for_dvr( device_owner=n_const.DEVICE_OWNER_LOADBALANCER) self._test_treat_devices_removed_for_dvr( device_owner=n_const.DEVICE_OWNER_LOADBALANCER, ip_version=6) def test_treat_devices_removed_for_dvr_with_lbaasv2_vip_ports(self): self._test_treat_devices_removed_for_dvr( device_owner=n_const.DEVICE_OWNER_LOADBALANCERV2) self._test_treat_devices_removed_for_dvr( device_owner=n_const.DEVICE_OWNER_LOADBALANCERV2, ip_version=6) def test_treat_devices_removed_for_dvr_with_dhcp_ports(self): self._test_treat_devices_removed_for_dvr( device_owner=n_const.DEVICE_OWNER_DHCP) self._test_treat_devices_removed_for_dvr( device_owner=n_const.DEVICE_OWNER_DHCP, ip_version=6) def test_treat_devices_removed_for_dvr_csnat_port(self): self._setup_for_dvr_test() gateway_mac = 'aa:bb:cc:11:22:33' int_br = mock.create_autospec(self.agent.int_br) tun_br = mock.create_autospec(self.agent.tun_br) int_br.set_db_attribute.return_value = True int_br.db_get_val.return_value = {} with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', return_value={'gateway_ip': '1.1.1.1', 'cidr': '1.1.1.0/24', 'ip_version': 4, 'gateway_mac': gateway_mac}),\ mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_ports_on_host_by_subnet', return_value=[]),\ mock.patch.object(self.agent.dvr_agent.int_br, 'get_vif_port_by_id', return_value=self._port),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br): self.agent.port_bound( self._port, self._net_uuid, 'vxlan', None, None, self._fixed_ips, n_const.DEVICE_OWNER_ROUTER_SNAT, False) lvid = self.agent.local_vlan_map[self._net_uuid].vlan expected_on_int_br = [ mock.call.install_dvr_to_src_mac( network_type='vxlan', gateway_mac=gateway_mac, dst_mac=self._port.vif_mac, dst_port=self._port.ofport, vlan_tag=lvid, ), ] + self._expected_port_bound(self._port, lvid, is_dvr=False) self.assertEqual(expected_on_int_br, int_br.mock_calls) expected_on_tun_br = [ mock.call.provision_local_vlan( network_type='vxlan', lvid=lvid, segmentation_id=None, distributed=True, ), ] self.assertEqual(expected_on_tun_br, tun_br.mock_calls) int_br.reset_mock() tun_br.reset_mock() with mock.patch.object(self.agent, 'reclaim_local_vlan'),\ mock.patch.object(self.agent.plugin_rpc, 'update_device_list', return_value={ 'devices_up': [], 'devices_down': [self._port.vif_id], 'failed_devices_up': [], 'failed_devices_down': []}),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br): failed_devices = {'added': set(), 'removed': set()} failed_devices['removed'] = self.agent.treat_devices_removed( [self._port.vif_id]) expected_on_int_br = [ mock.call.delete_dvr_to_src_mac( network_type='vxlan', dst_mac=self._port.vif_mac, vlan_tag=lvid, ), ] self.assertEqual(expected_on_int_br, int_br.mock_calls) expected_on_tun_br = [] self.assertEqual(expected_on_tun_br, tun_br.mock_calls) def test_setup_dvr_flows_on_int_br(self): self._setup_for_dvr_test() int_br = mock.create_autospec(self.agent.int_br) tun_br = mock.create_autospec(self.agent.tun_br) with mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_dvr_mac_address_list', return_value=[{'host': 'cn1', 'mac_address': 'aa:bb:cc:dd:ee:ff'}, {'host': 'cn2', 'mac_address': '11:22:33:44:55:66'}]): self.agent.dvr_agent.setup_dvr_flows_on_integ_br() self.assertTrue(self.agent.dvr_agent.in_distributed_mode()) physical_networks = list( self.agent.dvr_agent.bridge_mappings.keys()) ioport = self.agent.dvr_agent.int_ofports[physical_networks[0]] expected_on_int_br = [ # setup_dvr_flows_on_integ_br mock.call.setup_canary_table(), mock.call.install_drop(table_id=constants.DVR_TO_SRC_MAC, priority=1), mock.call.install_drop(table_id=constants.DVR_TO_SRC_MAC_VLAN, priority=1), mock.call.install_normal(table_id=constants.LOCAL_SWITCHING, priority=1), mock.call.install_drop(table_id=constants.LOCAL_SWITCHING, priority=2, in_port=ioport), ] self.assertEqual(expected_on_int_br, int_br.mock_calls) self.assertEqual([], tun_br.mock_calls) def test_get_dvr_mac_address(self): self._setup_for_dvr_test() self.agent.dvr_agent.dvr_mac_address = None with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_dvr_mac_address_by_host', return_value={'host': 'cn1', 'mac_address': 'aa:22:33:44:55:66'}): self.agent.dvr_agent.get_dvr_mac_address() self.assertEqual('aa:22:33:44:55:66', self.agent.dvr_agent.dvr_mac_address) self.assertTrue(self.agent.dvr_agent.in_distributed_mode()) def test_get_dvr_mac_address_exception(self): self._setup_for_dvr_test() self.agent.dvr_agent.dvr_mac_address = None int_br = mock.create_autospec(self.agent.int_br) with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_dvr_mac_address_by_host', side_effect=oslo_messaging.RemoteError),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br): with testtools.ExpectedException(SystemExit): self.agent.dvr_agent.get_dvr_mac_address() self.assertIsNone(self.agent.dvr_agent.dvr_mac_address) self.assertFalse(self.agent.dvr_agent.in_distributed_mode()) def test_get_dvr_mac_address_retried(self): valid_entry = {'host': 'cn1', 'mac_address': 'aa:22:33:44:55:66'} raise_timeout = oslo_messaging.MessagingTimeout() # Raise a timeout the first 2 times it calls get_dvr_mac_address() self._setup_for_dvr_test() self.agent.dvr_agent.dvr_mac_address = None with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_dvr_mac_address_by_host', side_effect=(raise_timeout, raise_timeout, valid_entry)): self.agent.dvr_agent.get_dvr_mac_address() self.assertEqual('aa:22:33:44:55:66', self.agent.dvr_agent.dvr_mac_address) self.assertTrue(self.agent.dvr_agent.in_distributed_mode()) self.assertEqual(self.agent.dvr_agent.plugin_rpc. get_dvr_mac_address_by_host.call_count, 3) def test_get_dvr_mac_address_retried_max(self): raise_timeout = oslo_messaging.MessagingTimeout() # Raise a timeout every time until we give up, currently 5 tries self._setup_for_dvr_test() self.agent.dvr_agent.dvr_mac_address = None int_br = mock.create_autospec(self.agent.int_br) with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_dvr_mac_address_by_host', side_effect=raise_timeout),\ mock.patch.object(utils, "execute"),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br): with testtools.ExpectedException(SystemExit): self.agent.dvr_agent.get_dvr_mac_address() self.assertIsNone(self.agent.dvr_agent.dvr_mac_address) self.assertFalse(self.agent.dvr_agent.in_distributed_mode()) self.assertEqual(self.agent.dvr_agent.plugin_rpc. get_dvr_mac_address_by_host.call_count, 5) def test_dvr_mac_address_update(self): self._setup_for_dvr_test() newhost = 'cn2' newmac = 'aa:bb:cc:dd:ee:ff' int_br = mock.create_autospec(self.agent.int_br) tun_br = mock.create_autospec(self.agent.tun_br) phys_br = mock.create_autospec(self.br_phys_cls('br-phys')) physical_network = 'physeth1' with mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.dict(self.agent.phys_brs, {physical_network: phys_br}),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\ mock.patch.dict(self.agent.dvr_agent.phys_brs, {physical_network: phys_br}): self.agent.dvr_agent.\ dvr_mac_address_update( dvr_macs=[{'host': newhost, 'mac_address': newmac}]) expected_on_int_br = [ mock.call.add_dvr_mac_vlan( mac=newmac, port=self.agent.int_ofports[physical_network]), mock.call.add_dvr_mac_tun( mac=newmac, port=self.agent.patch_tun_ofport), ] expected_on_tun_br = [ mock.call.add_dvr_mac_tun( mac=newmac, port=self.agent.patch_int_ofport), ] expected_on_phys_br = [ mock.call.add_dvr_mac_vlan( mac=newmac, port=self.agent.phys_ofports[physical_network]), ] self.assertEqual(expected_on_int_br, int_br.mock_calls) self.assertEqual(expected_on_tun_br, tun_br.mock_calls) self.assertEqual(expected_on_phys_br, phys_br.mock_calls) int_br.reset_mock() tun_br.reset_mock() phys_br.reset_mock() with mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.dict(self.agent.phys_brs, {physical_network: phys_br}),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\ mock.patch.dict(self.agent.dvr_agent.phys_brs, {physical_network: phys_br}): self.agent.dvr_agent.dvr_mac_address_update(dvr_macs=[]) expected_on_int_br = [ mock.call.remove_dvr_mac_vlan( mac=newmac), mock.call.remove_dvr_mac_tun( mac=newmac, port=self.agent.patch_tun_ofport), ] expected_on_tun_br = [ mock.call.remove_dvr_mac_tun( mac=newmac), ] expected_on_phys_br = [ mock.call.remove_dvr_mac_vlan( mac=newmac), ] self.assertEqual(expected_on_int_br, int_br.mock_calls) self.assertEqual(expected_on_tun_br, tun_br.mock_calls) self.assertEqual(expected_on_phys_br, phys_br.mock_calls) def test_ovs_restart(self): self._setup_for_dvr_test() reset_methods = ( 'reset_ovs_parameters', 'reset_dvr_parameters', 'setup_dvr_flows_on_integ_br', 'setup_dvr_flows_on_tun_br', 'setup_dvr_flows_on_phys_br', 'setup_dvr_mac_flows_on_all_brs') reset_mocks = [mock.patch.object(self.agent.dvr_agent, method).start() for method in reset_methods] tun_br = mock.create_autospec(self.agent.tun_br) with mock.patch.object(self.agent, 'check_ovs_status', return_value=constants.OVS_RESTARTED),\ mock.patch.object(self.agent, '_agent_has_updates', side_effect=TypeError('loop exit')),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent, 'setup_physical_bridges'),\ mock.patch.object(self.agent, 'setup_integration_br'),\ mock.patch.object(self.agent, 'setup_tunnel_br'),\ mock.patch.object(self.agent, 'state_rpc'): try: self.agent.rpc_loop(polling_manager=mock.Mock()) except TypeError: pass self.assertTrue(all([x.called for x in reset_mocks])) def _test_scan_ports_failure(self, scan_method_name): with mock.patch.object(self.agent, 'check_ovs_status', return_value=constants.OVS_RESTARTED),\ mock.patch.object(self.agent, scan_method_name, side_effect=TypeError('broken')),\ mock.patch.object(self.agent, '_agent_has_updates', return_value=True),\ mock.patch.object(self.agent, '_check_and_handle_signal', side_effect=[True, False]),\ mock.patch.object(self.agent, 'setup_physical_bridges'),\ mock.patch.object(self.agent, 'setup_integration_br'),\ mock.patch.object(self.agent, 'state_rpc'): # block RPC calls and bridge calls self.agent.rpc_loop(polling_manager=mock.Mock()) def test_scan_ports_failure(self): self._test_scan_ports_failure('scan_ports') def test_scan_ancillary_ports_failure(self): with mock.patch.object(self.agent, 'scan_ports'): with mock.patch.object(self.agent, 'update_stale_ofport_rules'): self.agent.ancillary_brs = mock.Mock() self._test_scan_ports_failure('scan_ancillary_ports') class TestOvsDvrNeutronAgentOFCtl(TestOvsDvrNeutronAgent, ovs_test_base.OVSOFCtlTestBase): pass class TestOvsDvrNeutronAgentRyu(TestOvsDvrNeutronAgent, ovs_test_base.OVSRyuTestBase): pass class TestValidateTunnelLocalIP(base.BaseTestCase): def test_validate_local_ip_with_valid_ip(self): mock_get_device_by_ip = mock.patch.object( ip_lib.IPWrapper, 'get_device_by_ip').start() ovs_agent.validate_local_ip(FAKE_IP1) mock_get_device_by_ip.assert_called_once_with(FAKE_IP1) def test_validate_local_ip_with_valid_ipv6(self): mock_get_device_by_ip = mock.patch.object( ip_lib.IPWrapper, 'get_device_by_ip').start() ovs_agent.validate_local_ip(FAKE_IP6) mock_get_device_by_ip.assert_called_once_with(FAKE_IP6) def test_validate_local_ip_with_none_ip(self): with testtools.ExpectedException(SystemExit): ovs_agent.validate_local_ip(None) def test_validate_local_ip_with_invalid_ip(self): mock_get_device_by_ip = mock.patch.object( ip_lib.IPWrapper, 'get_device_by_ip').start() mock_get_device_by_ip.return_value = None with testtools.ExpectedException(SystemExit): ovs_agent.validate_local_ip(FAKE_IP1) mock_get_device_by_ip.assert_called_once_with(FAKE_IP1) def test_validate_local_ip_with_invalid_ipv6(self): mock_get_device_by_ip = mock.patch.object( ip_lib.IPWrapper, 'get_device_by_ip').start() mock_get_device_by_ip.return_value = None with testtools.ExpectedException(SystemExit): ovs_agent.validate_local_ip(FAKE_IP6) mock_get_device_by_ip.assert_called_once_with(FAKE_IP6) class TestOvsAgentTunnelName(base.BaseTestCase): def test_get_tunnel_hash_invalid_address(self): hashlen = n_const.DEVICE_NAME_MAX_LEN self.assertIsNone( ovs_agent.OVSNeutronAgent.get_tunnel_hash('a.b.c.d', hashlen)) def test_get_tunnel_name_vxlan(self): self.assertEqual( 'vxlan-7f000002', ovs_agent.OVSNeutronAgent.get_tunnel_name( 'vxlan', '127.0.0.1', '127.0.0.2')) def test_get_tunnel_name_gre(self): self.assertEqual( 'gre-7f000002', ovs_agent.OVSNeutronAgent.get_tunnel_name( 'gre', '127.0.0.1', '127.0.0.2')) def test_get_tunnel_name_vxlan_ipv6(self): self.assertEqual( 'vxlan-pehtjzksi', ovs_agent.OVSNeutronAgent.get_tunnel_name( 'vxlan', '2001:db8::1', '2001:db8::2')) def test_get_tunnel_name_gre_ipv6(self): self.assertEqual( 'gre-pehtjzksiqr', ovs_agent.OVSNeutronAgent.get_tunnel_name( 'gre', '2001:db8::1', '2001:db8::2')) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/0000775000567000056710000000000013044373210033240 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/__init__.py0000664000567000056710000000000013044372736035353 0ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/test_qos_driver.pyneutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/test_qos_dr0000664000567000056710000001140213044372760035520 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from oslo_utils import uuidutils from neutron import context from neutron.objects.qos import policy from neutron.objects.qos import rule from neutron.plugins.ml2.drivers.openvswitch.agent.extension_drivers import ( qos_driver) from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent import ( ovs_test_base) class QosOVSAgentDriverTestCase(ovs_test_base.OVSAgentConfigTestBase): def setUp(self): super(QosOVSAgentDriverTestCase, self).setUp() self.context = context.get_admin_context() self.qos_driver = qos_driver.QosOVSAgentDriver() self.qos_driver.initialize() self.qos_driver.br_int = mock.Mock() self.qos_driver.br_int.get_egress_bw_limit_for_port = mock.Mock( return_value=(1000, 10)) self.get = self.qos_driver.br_int.get_egress_bw_limit_for_port self.qos_driver.br_int.del_egress_bw_limit_for_port = mock.Mock() self.delete = self.qos_driver.br_int.delete_egress_bw_limit_for_port self.qos_driver.br_int.create_egress_bw_limit_for_port = mock.Mock() self.create = self.qos_driver.br_int.create_egress_bw_limit_for_port self.rule = self._create_bw_limit_rule_obj() self.qos_policy = self._create_qos_policy_obj([self.rule]) self.port = self._create_fake_port(self.qos_policy.id) def _create_bw_limit_rule_obj(self): rule_obj = rule.QosBandwidthLimitRule() rule_obj.id = uuidutils.generate_uuid() rule_obj.max_kbps = 2 rule_obj.max_burst_kbps = 200 rule_obj.obj_reset_changes() return rule_obj def _create_qos_policy_obj(self, rules): policy_dict = {'id': uuidutils.generate_uuid(), 'tenant_id': uuidutils.generate_uuid(), 'name': 'test', 'description': 'test', 'shared': False, 'rules': rules} policy_obj = policy.QosPolicy(self.context, **policy_dict) policy_obj.obj_reset_changes() for policy_rule in policy_obj.rules: policy_rule.qos_policy_id = policy_obj.id policy_rule.obj_reset_changes() return policy_obj def _create_fake_port(self, policy_id): self.port_name = 'fakeport' class FakeVifPort(object): port_name = self.port_name return {'vif_port': FakeVifPort(), 'qos_policy_id': policy_id, 'network_qos_policy_id': None, 'device_owner': uuidutils.generate_uuid()} def test_create_new_rule(self): self.qos_driver.br_int.get_egress_bw_limit_for_port = mock.Mock( return_value=(None, None)) self.qos_driver.create(self.port, self.qos_policy) # Assert create is the last call self.assertEqual( 'create_egress_bw_limit_for_port', self.qos_driver.br_int.method_calls[-1][0]) self.assertEqual(0, self.delete.call_count) self.create.assert_called_once_with( self.port_name, self.rule.max_kbps, self.rule.max_burst_kbps) def test_create_existing_rules(self): self.qos_driver.create(self.port, self.qos_policy) self._assert_rule_create_updated() def test_update_rules(self): self.qos_driver.update(self.port, self.qos_policy) self._assert_rule_create_updated() def test_update_rules_no_vif_port(self): port = copy.copy(self.port) port.pop("vif_port") self.qos_driver.update(port, self.qos_policy) self.create.assert_not_called() def test_delete_rules(self): self.qos_driver.delete(self.port, self.qos_policy) self.delete.assert_called_once_with(self.port_name) def test_delete_rules_no_vif_port(self): port = copy.copy(self.port) port.pop("vif_port") self.qos_driver.delete(port, self.qos_policy) self.delete.assert_not_called() def _assert_rule_create_updated(self): # Assert create is the last call self.assertEqual( 'create_egress_bw_limit_for_port', self.qos_driver.br_int.method_calls[-1][0]) self.create.assert_called_once_with( self.port_name, self.rule.max_kbps, self.rule.max_burst_kbps) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/__init__.py0000664000567000056710000000000013044372736026132 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/ext_test.py0000664000567000056710000002072313044372760026245 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel Corporation. # Copyright 2015 Isaku Yamahata # # All Rights Reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_db.sqlalchemy.session import sqlalchemy as sa from sqlalchemy import orm from neutron.api import extensions from neutron.api.v2 import attributes from neutron.db import model_base from neutron.db import models_v2 from neutron.plugins.ml2 import driver_api from neutron.tests.unit.plugins.ml2 import extensions as test_extensions class TestExtensionDriverBase(driver_api.ExtensionDriver): _supported_extension_aliases = 'fake_extension' def initialize(self): extensions.append_api_extensions_path(test_extensions.__path__) @property def extension_alias(self): return self._supported_extension_aliases class TestExtensionDriver(TestExtensionDriverBase): def initialize(self): super(TestExtensionDriver, self).initialize() self.network_extension = 'Test_Network_Extension' self.subnet_extension = 'Test_Subnet_Extension' self.port_extension = 'Test_Port_Extension' def _check_create(self, session, data, result): assert(isinstance(session, oslo_db.sqlalchemy.session.Session)) assert(isinstance(data, dict)) assert('id' not in data) assert(isinstance(result, dict)) assert(result['id'] is not None) def _check_update(self, session, data, result): assert(isinstance(session, oslo_db.sqlalchemy.session.Session)) assert(isinstance(data, dict)) assert(isinstance(result, dict)) assert(result['id'] is not None) def _check_extend(self, session, result, db_entry, expected_db_entry_class): assert(isinstance(session, oslo_db.sqlalchemy.session.Session)) assert(isinstance(result, dict)) assert(result['id'] is not None) assert(isinstance(db_entry, expected_db_entry_class)) assert(db_entry.id == result['id']) def process_create_network(self, plugin_context, data, result): session = plugin_context.session self._check_create(session, data, result) result['network_extension'] = self.network_extension + '_create' def process_update_network(self, plugin_context, data, result): session = plugin_context.session self._check_update(session, data, result) self.network_extension = data['network_extension'] result['network_extension'] = self.network_extension + '_update' def extend_network_dict(self, session, net_db, result): self._check_extend(session, result, net_db, models_v2.Network) result['network_extension'] = self.network_extension + '_extend' def process_create_subnet(self, plugin_context, data, result): session = plugin_context.session self._check_create(session, data, result) result['subnet_extension'] = self.subnet_extension + '_create' def process_update_subnet(self, plugin_context, data, result): session = plugin_context.session self._check_update(session, data, result) self.subnet_extension = data['subnet_extension'] result['subnet_extension'] = self.subnet_extension + '_update' def extend_subnet_dict(self, session, subnet_db, result): self._check_extend(session, result, subnet_db, models_v2.Subnet) result['subnet_extension'] = self.subnet_extension + '_extend' def process_create_port(self, plugin_context, data, result): session = plugin_context.session self._check_create(session, data, result) result['port_extension'] = self.port_extension + '_create' def process_update_port(self, plugin_context, data, result): session = plugin_context.session self._check_update(session, data, result) self.port_extension = data['port_extension'] result['port_extension'] = self.port_extension + '_update' def extend_port_dict(self, session, port_db, result): self._check_extend(session, result, port_db, models_v2.Port) result['port_extension'] = self.port_extension + '_extend' class TestNetworkExtension(model_base.BASEV2): network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), primary_key=True) value = sa.Column(sa.String(64)) network = orm.relationship( models_v2.Network, backref=orm.backref('extension', cascade='delete', uselist=False)) class TestSubnetExtension(model_base.BASEV2): subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', ondelete="CASCADE"), primary_key=True) value = sa.Column(sa.String(64)) subnet = orm.relationship( models_v2.Subnet, backref=orm.backref('extension', cascade='delete', uselist=False)) class TestPortExtension(model_base.BASEV2): port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) value = sa.Column(sa.String(64)) port = orm.relationship( models_v2.Port, backref=orm.backref('extension', cascade='delete', uselist=False)) class TestDBExtensionDriver(TestExtensionDriverBase): def _get_value(self, data, key): value = data[key] if not attributes.is_attr_set(value): value = '' return value def process_create_network(self, plugin_context, data, result): session = plugin_context.session value = self._get_value(data, 'network_extension') record = TestNetworkExtension(network_id=result['id'], value=value) session.add(record) result['network_extension'] = value def process_update_network(self, plugin_context, data, result): session = plugin_context.session record = (session.query(TestNetworkExtension). filter_by(network_id=result['id']).one()) value = data.get('network_extension') if value and value != record.value: record.value = value result['network_extension'] = record.value def extend_network_dict(self, session, net_db, result): result['network_extension'] = net_db.extension.value def process_create_subnet(self, plugin_context, data, result): session = plugin_context.session value = self._get_value(data, 'subnet_extension') record = TestSubnetExtension(subnet_id=result['id'], value=value) session.add(record) result['subnet_extension'] = value def process_update_subnet(self, plugin_context, data, result): session = plugin_context.session record = (session.query(TestSubnetExtension). filter_by(subnet_id=result['id']).one()) value = data.get('subnet_extension') if value and value != record.value: record.value = value result['subnet_extension'] = record.value def extend_subnet_dict(self, session, subnet_db, result): value = subnet_db.extension.value if subnet_db.extension else '' result['subnet_extension'] = value def process_create_port(self, plugin_context, data, result): session = plugin_context.session value = self._get_value(data, 'port_extension') record = TestPortExtension(port_id=result['id'], value=value) session.add(record) result['port_extension'] = value def process_update_port(self, plugin_context, data, result): session = plugin_context.session record = (session.query(TestPortExtension). filter_by(port_id=result['id']).one()) value = data.get('port_extension') if value and value != record.value: record.value = value result['port_extension'] = record.value def extend_port_dict(self, session, port_db, result): value = port_db.extension.value if port_db.extension else '' result['port_extension'] = value neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/test_type_gre.py0000664000567000056710000000726313044372760027267 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.plugins.common import constants as p_const from neutron.plugins.ml2 import config from neutron.plugins.ml2.drivers import type_gre from neutron.tests.unit.plugins.ml2.drivers import base_type_tunnel from neutron.tests.unit.plugins.ml2 import test_rpc from neutron.tests.unit import testlib_api TUNNEL_IP_ONE = "10.10.10.10" TUNNEL_IP_TWO = "10.10.10.20" HOST_ONE = 'fake_host_one' HOST_TWO = 'fake_host_two' def _add_allocation(session, gre_id, allocated=False): allocation = type_gre.GreAllocation(gre_id=gre_id, allocated=allocated) allocation.save(session) def _get_allocation(session, gre_id): return session.query(type_gre.GreAllocation).filter_by( gre_id=gre_id).one() class GreTypeTest(base_type_tunnel.TunnelTypeTestMixin, testlib_api.SqlTestCase): DRIVER_MODULE = type_gre DRIVER_CLASS = type_gre.GreTypeDriver TYPE = p_const.TYPE_GRE def test_get_endpoints(self): self.add_endpoint() self.add_endpoint( base_type_tunnel.TUNNEL_IP_TWO, base_type_tunnel.HOST_TWO) endpoints = self.driver.get_endpoints() for endpoint in endpoints: if endpoint['ip_address'] == base_type_tunnel.TUNNEL_IP_ONE: self.assertEqual(base_type_tunnel.HOST_ONE, endpoint['host']) elif endpoint['ip_address'] == base_type_tunnel.TUNNEL_IP_TWO: self.assertEqual(base_type_tunnel.HOST_TWO, endpoint['host']) def test_get_mtu(self): config.cfg.CONF.set_override('global_physnet_mtu', 1500) config.cfg.CONF.set_override('path_mtu', 1475, group='ml2') self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400} self.assertEqual(1475 - p_const.GRE_ENCAP_OVERHEAD, self.driver.get_mtu('physnet1')) config.cfg.CONF.set_override('global_physnet_mtu', 1425) config.cfg.CONF.set_override('path_mtu', 1475, group='ml2') self.driver.physnet_mtus = {'physnet1': 1400, 'physnet2': 1400} self.assertEqual(1425 - p_const.GRE_ENCAP_OVERHEAD, self.driver.get_mtu('physnet1')) config.cfg.CONF.set_override('global_physnet_mtu', 0) config.cfg.CONF.set_override('path_mtu', 1475, group='ml2') self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1425} self.assertEqual(1475 - p_const.GRE_ENCAP_OVERHEAD, self.driver.get_mtu('physnet2')) config.cfg.CONF.set_override('global_physnet_mtu', 0) config.cfg.CONF.set_override('path_mtu', 0, group='ml2') self.driver.physnet_mtus = {} self.assertEqual(0, self.driver.get_mtu('physnet1')) class GreTypeMultiRangeTest(base_type_tunnel.TunnelTypeMultiRangeTestMixin, testlib_api.SqlTestCase): DRIVER_CLASS = type_gre.GreTypeDriver class GreTypeRpcCallbackTest(base_type_tunnel.TunnelRpcCallbackTestMixin, test_rpc.RpcCallbacksTestCase, testlib_api.SqlTestCase): DRIVER_CLASS = type_gre.GreTypeDriver TYPE = p_const.TYPE_GRE neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/test_type_geneve.py0000664000567000056710000000415013044372760027753 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers import type_geneve from neutron.tests.unit.plugins.ml2.drivers import base_type_tunnel from neutron.tests.unit.plugins.ml2 import test_rpc from neutron.tests.unit import testlib_api TUNNEL_IP_ONE = "10.10.10.77" TUNNEL_IP_TWO = "10.10.10.78" HOST_ONE = 'fake_host_one1' HOST_TWO = 'fake_host_two2' class GeneveTypeTest(base_type_tunnel.TunnelTypeTestMixin, testlib_api.SqlTestCase): DRIVER_CLASS = type_geneve.GeneveTypeDriver TYPE = p_const.TYPE_GENEVE def test_get_endpoints(self): self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE) self.driver.add_endpoint(TUNNEL_IP_TWO, HOST_TWO) endpoints = self.driver.get_endpoints() for endpoint in endpoints: if endpoint['ip_address'] == TUNNEL_IP_ONE: self.assertEqual(HOST_ONE, endpoint['host']) elif endpoint['ip_address'] == TUNNEL_IP_TWO: self.assertEqual(HOST_TWO, endpoint['host']) class GeneveTypeMultiRangeTest(base_type_tunnel.TunnelTypeMultiRangeTestMixin, testlib_api.SqlTestCase): DRIVER_CLASS = type_geneve.GeneveTypeDriver class GeneveTypeRpcCallbackTest(base_type_tunnel.TunnelRpcCallbackTestMixin, test_rpc.RpcCallbacksTestCase, testlib_api.SqlTestCase): DRIVER_CLASS = type_geneve.GeneveTypeDriver TYPE = p_const.TYPE_GENEVE neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py0000664000567000056710000003706113044372760027751 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation, all rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from six import moves import testtools from testtools import matchers from neutron.common import exceptions as exc from neutron.db import api as db from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers import type_tunnel TUNNEL_IP_ONE = "10.10.10.10" TUNNEL_IP_TWO = "10.10.10.20" HOST_ONE = 'fake_host_one' HOST_TWO = 'fake_host_two' TUN_MIN = 100 TUN_MAX = 109 TUNNEL_RANGES = [(TUN_MIN, TUN_MAX)] UPDATED_TUNNEL_RANGES = [(TUN_MIN + 5, TUN_MAX + 5)] class TunnelTypeTestMixin(object): DRIVER_CLASS = None TYPE = None def setUp(self): super(TunnelTypeTestMixin, self).setUp() self.driver = self.DRIVER_CLASS() self.driver.tunnel_ranges = TUNNEL_RANGES self.driver.sync_allocations() self.session = db.get_session() def test_tunnel_type(self): self.assertEqual(self.TYPE, self.driver.get_type()) def test_validate_provider_segment(self): segment = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: 'phys_net', api.SEGMENTATION_ID: None} with testtools.ExpectedException(exc.InvalidInput): self.driver.validate_provider_segment(segment) segment[api.PHYSICAL_NETWORK] = None self.driver.validate_provider_segment(segment) segment[api.SEGMENTATION_ID] = 1 self.driver.validate_provider_segment(segment) def test_sync_tunnel_allocations(self): self.assertIsNone( self.driver.get_allocation(self.session, (TUN_MIN - 1))) self.assertFalse( self.driver.get_allocation(self.session, (TUN_MIN)).allocated) self.assertFalse( self.driver.get_allocation(self.session, (TUN_MIN + 1)).allocated) self.assertFalse( self.driver.get_allocation(self.session, (TUN_MAX - 1)).allocated) self.assertFalse( self.driver.get_allocation(self.session, (TUN_MAX)).allocated) self.assertIsNone( self.driver.get_allocation(self.session, (TUN_MAX + 1))) self.driver.tunnel_ranges = UPDATED_TUNNEL_RANGES self.driver.sync_allocations() self.assertIsNone( self.driver.get_allocation(self.session, (TUN_MIN + 5 - 1))) self.assertFalse( self.driver.get_allocation(self.session, (TUN_MIN + 5)).allocated) self.assertFalse( self.driver.get_allocation(self.session, (TUN_MIN + 5 + 1)).allocated) self.assertFalse( self.driver.get_allocation(self.session, (TUN_MAX + 5 - 1)).allocated) self.assertFalse( self.driver.get_allocation(self.session, (TUN_MAX + 5)).allocated) self.assertIsNone( self.driver.get_allocation(self.session, (TUN_MAX + 5 + 1))) def _test_sync_allocations_and_allocated(self, tunnel_id): segment = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: None, api.SEGMENTATION_ID: tunnel_id} self.driver.reserve_provider_segment(self.session, segment) self.driver.tunnel_ranges = UPDATED_TUNNEL_RANGES self.driver.sync_allocations() self.assertTrue( self.driver.get_allocation(self.session, tunnel_id).allocated) def test_sync_allocations_and_allocated_in_initial_range(self): self._test_sync_allocations_and_allocated(TUN_MIN + 2) def test_sync_allocations_and_allocated_in_final_range(self): self._test_sync_allocations_and_allocated(TUN_MAX + 2) def test_sync_allocations_no_op(self): def verify_no_chunk(iterable, chunk_size): # no segment removed/added self.assertEqual(0, len(list(iterable))) return [] with mock.patch.object( type_tunnel, 'chunks', side_effect=verify_no_chunk) as chunks: self.driver.sync_allocations() self.assertEqual(2, len(chunks.mock_calls)) def test_partial_segment_is_partial_segment(self): segment = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: None, api.SEGMENTATION_ID: None} self.assertTrue(self.driver.is_partial_segment(segment)) def test_specific_segment_is_not_partial_segment(self): segment = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: None, api.SEGMENTATION_ID: 101} self.assertFalse(self.driver.is_partial_segment(segment)) def test_reserve_provider_segment_full_specs(self): segment = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: None, api.SEGMENTATION_ID: 101} observed = self.driver.reserve_provider_segment(self.session, segment) alloc = self.driver.get_allocation(self.session, observed[api.SEGMENTATION_ID]) self.assertTrue(alloc.allocated) with testtools.ExpectedException(exc.TunnelIdInUse): self.driver.reserve_provider_segment(self.session, segment) self.driver.release_segment(self.session, segment) alloc = self.driver.get_allocation(self.session, observed[api.SEGMENTATION_ID]) self.assertFalse(alloc.allocated) segment[api.SEGMENTATION_ID] = 1000 observed = self.driver.reserve_provider_segment(self.session, segment) alloc = self.driver.get_allocation(self.session, observed[api.SEGMENTATION_ID]) self.assertTrue(alloc.allocated) self.driver.release_segment(self.session, segment) alloc = self.driver.get_allocation(self.session, observed[api.SEGMENTATION_ID]) self.assertIsNone(alloc) def test_reserve_provider_segment(self): tunnel_ids = set() specs = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: 'None', api.SEGMENTATION_ID: None} for x in moves.range(TUN_MIN, TUN_MAX + 1): segment = self.driver.reserve_provider_segment(self.session, specs) self.assertEqual(self.TYPE, segment[api.NETWORK_TYPE]) self.assertThat(segment[api.SEGMENTATION_ID], matchers.GreaterThan(TUN_MIN - 1)) self.assertThat(segment[api.SEGMENTATION_ID], matchers.LessThan(TUN_MAX + 1)) tunnel_ids.add(segment[api.SEGMENTATION_ID]) with testtools.ExpectedException(exc.NoNetworkAvailable): segment = self.driver.reserve_provider_segment(self.session, specs) segment = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: 'None', api.SEGMENTATION_ID: tunnel_ids.pop()} self.driver.release_segment(self.session, segment) segment = self.driver.reserve_provider_segment(self.session, specs) self.assertThat(segment[api.SEGMENTATION_ID], matchers.GreaterThan(TUN_MIN - 1)) self.assertThat(segment[api.SEGMENTATION_ID], matchers.LessThan(TUN_MAX + 1)) tunnel_ids.add(segment[api.SEGMENTATION_ID]) for tunnel_id in tunnel_ids: segment[api.SEGMENTATION_ID] = tunnel_id self.driver.release_segment(self.session, segment) def test_allocate_tenant_segment(self): tunnel_ids = set() for x in moves.range(TUN_MIN, TUN_MAX + 1): segment = self.driver.allocate_tenant_segment(self.session) self.assertThat(segment[api.SEGMENTATION_ID], matchers.GreaterThan(TUN_MIN - 1)) self.assertThat(segment[api.SEGMENTATION_ID], matchers.LessThan(TUN_MAX + 1)) tunnel_ids.add(segment[api.SEGMENTATION_ID]) segment = self.driver.allocate_tenant_segment(self.session) self.assertIsNone(segment) segment = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: 'None', api.SEGMENTATION_ID: tunnel_ids.pop()} self.driver.release_segment(self.session, segment) segment = self.driver.allocate_tenant_segment(self.session) self.assertThat(segment[api.SEGMENTATION_ID], matchers.GreaterThan(TUN_MIN - 1)) self.assertThat(segment[api.SEGMENTATION_ID], matchers.LessThan(TUN_MAX + 1)) tunnel_ids.add(segment[api.SEGMENTATION_ID]) for tunnel_id in tunnel_ids: segment[api.SEGMENTATION_ID] = tunnel_id self.driver.release_segment(self.session, segment) def add_endpoint(self, ip=TUNNEL_IP_ONE, host=HOST_ONE): return self.driver.add_endpoint(ip, host) def test_add_endpoint(self): endpoint = self.add_endpoint() self.assertEqual(TUNNEL_IP_ONE, endpoint.ip_address) self.assertEqual(HOST_ONE, endpoint.host) return endpoint def test_add_endpoint_for_existing_tunnel_ip(self): self.add_endpoint() with mock.patch.object(type_tunnel.LOG, 'warning') as log_warn: self.add_endpoint() log_warn.assert_called_once_with(mock.ANY, TUNNEL_IP_ONE) def test_get_endpoint_by_host(self): self.add_endpoint() host_endpoint = self.driver.get_endpoint_by_host(HOST_ONE) self.assertEqual(TUNNEL_IP_ONE, host_endpoint.ip_address) return host_endpoint def test_get_endpoint_by_host_for_not_existing_host(self): ip_endpoint = self.driver.get_endpoint_by_host(HOST_TWO) self.assertIsNone(ip_endpoint) def test_get_endpoint_by_ip(self): self.add_endpoint() ip_endpoint = self.driver.get_endpoint_by_ip(TUNNEL_IP_ONE) self.assertEqual(HOST_ONE, ip_endpoint.host) return ip_endpoint def test_get_endpoint_by_ip_for_not_existing_tunnel_ip(self): ip_endpoint = self.driver.get_endpoint_by_ip(TUNNEL_IP_TWO) self.assertIsNone(ip_endpoint) def test_delete_endpoint(self): self.add_endpoint() self.assertIsNone(self.driver.delete_endpoint(TUNNEL_IP_ONE)) # Get all the endpoints and verify its empty endpoints = self.driver.get_endpoints() self.assertNotIn(TUNNEL_IP_ONE, endpoints) class TunnelTypeMultiRangeTestMixin(object): DRIVER_CLASS = None TUN_MIN0 = 100 TUN_MAX0 = 101 TUN_MIN1 = 200 TUN_MAX1 = 201 TUNNEL_MULTI_RANGES = [(TUN_MIN0, TUN_MAX0), (TUN_MIN1, TUN_MAX1)] def setUp(self): super(TunnelTypeMultiRangeTestMixin, self).setUp() self.driver = self.DRIVER_CLASS() self.driver.tunnel_ranges = self.TUNNEL_MULTI_RANGES self.driver.sync_allocations() self.session = db.get_session() def test_release_segment(self): segments = [self.driver.allocate_tenant_segment(self.session) for i in range(4)] # Release them in random order. No special meaning. for i in (0, 2, 1, 3): self.driver.release_segment(self.session, segments[i]) for key in (self.TUN_MIN0, self.TUN_MAX0, self.TUN_MIN1, self.TUN_MAX1): alloc = self.driver.get_allocation(self.session, key) self.assertFalse(alloc.allocated) class TunnelRpcCallbackTestMixin(object): DRIVER_CLASS = None TYPE = None def setUp(self): super(TunnelRpcCallbackTestMixin, self).setUp() self.driver = self.DRIVER_CLASS() def _test_tunnel_sync(self, kwargs, delete_tunnel=False): with mock.patch.object(self.notifier, 'tunnel_update') as tunnel_update,\ mock.patch.object(self.notifier, 'tunnel_delete') as tunnel_delete: details = self.callbacks.tunnel_sync('fake_context', **kwargs) tunnels = details['tunnels'] for tunnel in tunnels: self.assertEqual(kwargs['tunnel_ip'], tunnel['ip_address']) self.assertEqual(kwargs['host'], tunnel['host']) self.assertTrue(tunnel_update.called) if delete_tunnel: self.assertTrue(tunnel_delete.called) else: self.assertFalse(tunnel_delete.called) def _test_tunnel_sync_raises(self, kwargs): with mock.patch.object(self.notifier, 'tunnel_update') as tunnel_update,\ mock.patch.object(self.notifier, 'tunnel_delete') as tunnel_delete: self.assertRaises(exc.InvalidInput, self.callbacks.tunnel_sync, 'fake_context', **kwargs) self.assertFalse(tunnel_update.called) self.assertFalse(tunnel_delete.called) def test_tunnel_sync_called_without_host_passed(self): kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE, 'host': None} self._test_tunnel_sync(kwargs) def test_tunnel_sync_called_with_host_passed_for_existing_tunnel_ip(self): self.driver.add_endpoint(TUNNEL_IP_ONE, None) kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE, 'host': HOST_ONE} self._test_tunnel_sync(kwargs) def test_tunnel_sync_called_with_host_passed(self): kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE, 'host': HOST_ONE} self._test_tunnel_sync(kwargs) def test_tunnel_sync_called_for_existing_endpoint(self): self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE) kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE, 'host': HOST_ONE} self._test_tunnel_sync(kwargs) def test_tunnel_sync_called_for_existing_host_with_tunnel_ip_changed(self): self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE) kwargs = {'tunnel_ip': TUNNEL_IP_TWO, 'tunnel_type': self.TYPE, 'host': HOST_ONE} self._test_tunnel_sync(kwargs, True) def test_tunnel_sync_called_with_used_tunnel_ip_host_roaming(self): self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE) kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE, 'host': HOST_TWO} self._test_tunnel_sync(kwargs, False) def test_tunnel_sync_called_with_used_tunnel_ip_roaming_case_two(self): self.driver.add_endpoint(TUNNEL_IP_ONE, None) self.driver.add_endpoint(TUNNEL_IP_TWO, HOST_TWO) kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE, 'host': HOST_TWO} self._test_tunnel_sync(kwargs, False) def test_tunnel_sync_called_without_tunnel_ip(self): kwargs = {'tunnel_type': self.TYPE, 'host': None} self._test_tunnel_sync_raises(kwargs) def test_tunnel_sync_called_without_tunnel_type(self): kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'host': None} self._test_tunnel_sync_raises(kwargs) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/0000775000567000056710000000000013044373210026333 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/__init__.py0000664000567000056710000000000013044372736030446 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/mech_driver/0000775000567000056710000000000013044373210030622 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/mech_driver/test_mech_linuxbridge.pyneutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/mech_driver/test_mech_linuxbridge.p0000664000567000056710000000564413044372760035374 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import constants from neutron.extensions import portbindings from neutron.plugins.ml2.drivers.linuxbridge.mech_driver \ import mech_linuxbridge from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base class LinuxbridgeMechanismBaseTestCase(base.AgentMechanismBaseTestCase): VIF_TYPE = portbindings.VIF_TYPE_BRIDGE CAP_PORT_FILTER = True AGENT_TYPE = constants.AGENT_TYPE_LINUXBRIDGE GOOD_MAPPINGS = {'fake_physical_network': 'fake_interface'} GOOD_TUNNEL_TYPES = ['gre', 'vxlan'] GOOD_CONFIGS = {'interface_mappings': GOOD_MAPPINGS, 'tunnel_types': GOOD_TUNNEL_TYPES} BAD_MAPPINGS = {'wrong_physical_network': 'wrong_interface'} BAD_TUNNEL_TYPES = ['bad_tunnel_type'] BAD_CONFIGS = {'interface_mappings': BAD_MAPPINGS, 'tunnel_types': BAD_TUNNEL_TYPES} AGENTS = [{'alive': True, 'configurations': GOOD_CONFIGS, 'host': 'host'}] AGENTS_DEAD = [{'alive': False, 'configurations': GOOD_CONFIGS, 'host': 'dead_host'}] AGENTS_BAD = [{'alive': False, 'configurations': GOOD_CONFIGS, 'host': 'bad_host_1'}, {'alive': True, 'configurations': BAD_CONFIGS, 'host': 'bad_host_2'}] def setUp(self): super(LinuxbridgeMechanismBaseTestCase, self).setUp() self.driver = mech_linuxbridge.LinuxbridgeMechanismDriver() self.driver.initialize() class LinuxbridgeMechanismGenericTestCase(LinuxbridgeMechanismBaseTestCase, base.AgentMechanismGenericTestCase): pass class LinuxbridgeMechanismLocalTestCase(LinuxbridgeMechanismBaseTestCase, base.AgentMechanismLocalTestCase): pass class LinuxbridgeMechanismFlatTestCase(LinuxbridgeMechanismBaseTestCase, base.AgentMechanismFlatTestCase): pass class LinuxbridgeMechanismVlanTestCase(LinuxbridgeMechanismBaseTestCase, base.AgentMechanismVlanTestCase): pass class LinuxbridgeMechanismGreTestCase(LinuxbridgeMechanismBaseTestCase, base.AgentMechanismGreTestCase): pass neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/mech_driver/__init__.py0000664000567000056710000000000013044372736032735 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/0000775000567000056710000000000013044373210027431 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/__init__.py0000664000567000056710000000000013044372736031544 0ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_neutron_agent.pyneutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_neutron_agen0000664000567000056710000014011013044372760035501 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import sys import mock from oslo_config import cfg from neutron.agent.linux import bridge_lib from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.common import constants from neutron.common import exceptions from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb from neutron.plugins.ml2.drivers.linuxbridge.agent.common \ import constants as lconst from neutron.plugins.ml2.drivers.linuxbridge.agent \ import linuxbridge_neutron_agent from neutron.tests import base LOCAL_IP = '192.168.0.33' LOCAL_IPV6 = '2001:db8:1::33' VXLAN_GROUPV6 = 'ff05::/120' PORT_1 = 'abcdef01-12ddssdfds-fdsfsd' DEVICE_1 = 'tapabcdef01-12' NETWORK_ID = '57653b20-ed5b-4ed0-a31d-06f84e3fd909' BRIDGE_MAPPING_VALUE = 'br-eth2' BRIDGE_MAPPINGS = {'physnet0': BRIDGE_MAPPING_VALUE} INTERFACE_MAPPINGS = {'physnet1': 'eth1'} FAKE_DEFAULT_DEV = mock.Mock() FAKE_DEFAULT_DEV.name = 'eth1' PORT_DATA = { "port_id": PORT_1, "device": DEVICE_1 } class FakeIpLinkCommand(object): def set_up(self): pass class FakeIpDevice(object): def __init__(self): self.link = FakeIpLinkCommand() def disable_ipv6(self): pass def get_linuxbridge_manager(bridge_mappings, interface_mappings): with mock.patch.object(ip_lib.IPWrapper, 'get_device_by_ip', return_value=FAKE_DEFAULT_DEV),\ mock.patch.object(ip_lib, 'device_exists', return_value=True),\ mock.patch.object(linuxbridge_neutron_agent.LinuxBridgeManager, 'check_vxlan_support'): cfg.CONF.set_override('local_ip', LOCAL_IP, 'VXLAN') return linuxbridge_neutron_agent.LinuxBridgeManager( bridge_mappings, interface_mappings) class TestLinuxBridge(base.BaseTestCase): def setUp(self): super(TestLinuxBridge, self).setUp() self.linux_bridge = get_linuxbridge_manager( BRIDGE_MAPPINGS, INTERFACE_MAPPINGS) def test_ensure_physical_in_bridge_invalid(self): result = self.linux_bridge.ensure_physical_in_bridge('network_id', p_const.TYPE_VLAN, 'physnetx', 7) self.assertFalse(result) def test_ensure_physical_in_bridge_flat(self): with mock.patch.object(self.linux_bridge, 'ensure_flat_bridge') as flat_bridge_func: self.linux_bridge.ensure_physical_in_bridge( 'network_id', p_const.TYPE_FLAT, 'physnet1', None) self.assertTrue(flat_bridge_func.called) def test_ensure_physical_in_bridge_vlan(self): with mock.patch.object(self.linux_bridge, 'ensure_vlan_bridge') as vlan_bridge_func: self.linux_bridge.ensure_physical_in_bridge( 'network_id', p_const.TYPE_VLAN, 'physnet1', 7) self.assertTrue(vlan_bridge_func.called) def test_ensure_physical_in_bridge_vxlan(self): self.linux_bridge.vxlan_mode = lconst.VXLAN_UCAST with mock.patch.object(self.linux_bridge, 'ensure_vxlan_bridge') as vxlan_bridge_func: self.linux_bridge.ensure_physical_in_bridge( 'network_id', 'vxlan', 'physnet1', 7) self.assertTrue(vxlan_bridge_func.called) class TestLinuxBridgeManager(base.BaseTestCase): def setUp(self): super(TestLinuxBridgeManager, self).setUp() self.lbm = get_linuxbridge_manager( BRIDGE_MAPPINGS, INTERFACE_MAPPINGS) def test_local_ip_validation_with_valid_ip(self): with mock.patch.object(ip_lib.IPWrapper, 'get_device_by_ip', return_value=FAKE_DEFAULT_DEV): self.lbm.local_ip = LOCAL_IP result = self.lbm.get_local_ip_device() self.assertEqual(FAKE_DEFAULT_DEV, result) def test_local_ip_validation_with_invalid_ip(self): with mock.patch.object(ip_lib.IPWrapper, 'get_device_by_ip', return_value=None),\ mock.patch.object(sys, 'exit') as exit,\ mock.patch.object(linuxbridge_neutron_agent.LOG, 'error') as log: self.lbm.local_ip = LOCAL_IP self.lbm.get_local_ip_device() self.assertEqual(1, log.call_count) exit.assert_called_once_with(1) def _test_vxlan_group_validation(self, bad_local_ip, bad_vxlan_group): with mock.patch.object(ip_lib.IPWrapper, 'get_device_by_ip', return_value=FAKE_DEFAULT_DEV),\ mock.patch.object(sys, 'exit') as exit,\ mock.patch.object(linuxbridge_neutron_agent.LOG, 'error') as log: self.lbm.local_ip = bad_local_ip cfg.CONF.set_override('vxlan_group', bad_vxlan_group, 'VXLAN') self.lbm.validate_vxlan_group_with_local_ip() self.assertEqual(1, log.call_count) exit.assert_called_once_with(1) def test_vxlan_group_validation_with_mismatched_local_ip(self): self._test_vxlan_group_validation(LOCAL_IP, VXLAN_GROUPV6) def test_vxlan_group_validation_with_unicast_group(self): self._test_vxlan_group_validation(LOCAL_IP, '240.0.0.0') def test_vxlan_group_validation_with_invalid_cidr(self): self._test_vxlan_group_validation(LOCAL_IP, '224.0.0.1/') def test_vxlan_group_validation_with_v6_unicast_group(self): self._test_vxlan_group_validation(LOCAL_IPV6, '2001:db8::') def test_get_existing_bridge_name(self): phy_net = 'physnet0' self.assertEqual('br-eth2', self.lbm.get_existing_bridge_name(phy_net)) phy_net = '' self.assertIsNone(self.lbm.get_existing_bridge_name(phy_net)) def test_get_bridge_name(self): nw_id = "123456789101112" self.assertEqual("brq" + nw_id[0:11], self.lbm.get_bridge_name(nw_id)) nw_id = "" self.assertEqual("brq", self.lbm.get_bridge_name(nw_id)) def test_get_subinterface_name(self): self.assertEqual("eth0.0", self.lbm.get_subinterface_name("eth0", "0")) self.assertEqual("eth0.", self.lbm.get_subinterface_name("eth0", "")) def test_get_tap_device_name(self): if_id = "123456789101112" self.assertEqual(constants.TAP_DEVICE_PREFIX + if_id[0:11], self.lbm.get_tap_device_name(if_id)) if_id = "" self.assertEqual(constants.TAP_DEVICE_PREFIX, self.lbm.get_tap_device_name(if_id)) def test_get_vxlan_device_name(self): vn_id = p_const.MAX_VXLAN_VNI self.assertEqual("vxlan-" + str(vn_id), self.lbm.get_vxlan_device_name(vn_id)) self.assertIsNone(self.lbm.get_vxlan_device_name(vn_id + 1)) def test_get_vxlan_group(self): cfg.CONF.set_override('vxlan_group', '239.1.2.3/24', 'VXLAN') vn_id = p_const.MAX_VXLAN_VNI self.assertEqual('239.1.2.255', self.lbm.get_vxlan_group(vn_id)) vn_id = 256 self.assertEqual('239.1.2.0', self.lbm.get_vxlan_group(vn_id)) vn_id = 257 self.assertEqual('239.1.2.1', self.lbm.get_vxlan_group(vn_id)) def test_get_vxlan_group_with_ipv6(self): cfg.CONF.set_override('local_ip', LOCAL_IPV6, 'VXLAN') self.lbm.local_ip = LOCAL_IPV6 cfg.CONF.set_override('vxlan_group', VXLAN_GROUPV6, 'VXLAN') vn_id = p_const.MAX_VXLAN_VNI self.assertEqual('ff05::ff', self.lbm.get_vxlan_group(vn_id)) vn_id = 256 self.assertEqual('ff05::', self.lbm.get_vxlan_group(vn_id)) vn_id = 257 self.assertEqual('ff05::1', self.lbm.get_vxlan_group(vn_id)) def test_get_deletable_bridges(self): br_list = ["br-int", "brq1", "brq2", "brq-user"] expected = set(br_list[1:3]) lbm = get_linuxbridge_manager( bridge_mappings={"physnet0": "brq-user"}, interface_mappings={}) with mock.patch.object( bridge_lib, 'get_bridge_names', return_value=br_list): self.assertEqual(expected, lbm.get_deletable_bridges()) def test_get_tap_devices_count(self): with mock.patch.object( bridge_lib.BridgeDevice, 'get_interfaces') as get_ifs_fn: get_ifs_fn.return_value = ['tap2101', 'eth0.100', 'vxlan-1000'] self.assertEqual(1, self.lbm.get_tap_devices_count('br0')) def test_get_interface_details(self): with mock.patch.object(ip_lib.IpAddrCommand, 'list') as list_fn,\ mock.patch.object(ip_lib.IpRouteCommand, 'get_gateway') as getgw_fn: gwdict = dict(gateway='1.1.1.1') getgw_fn.return_value = gwdict ipdict = dict(cidr='1.1.1.1/24', broadcast='1.1.1.255', scope='global', ip_version=4, dynamic=False) list_fn.return_value = ipdict ret = self.lbm.get_interface_details("eth0") self.assertTrue(list_fn.called) self.assertTrue(getgw_fn.called) self.assertEqual(ret, (ipdict, gwdict)) def test_ensure_flat_bridge(self): with mock.patch.object(ip_lib.IpAddrCommand, 'list') as list_fn,\ mock.patch.object(ip_lib.IpRouteCommand, 'get_gateway') as getgw_fn: gwdict = dict(gateway='1.1.1.1') getgw_fn.return_value = gwdict ipdict = dict(cidr='1.1.1.1/24', broadcast='1.1.1.255', scope='global', ip_version=4, dynamic=False) list_fn.return_value = ipdict with mock.patch.object(self.lbm, 'ensure_bridge') as ens: self.assertEqual( "eth0", self.lbm.ensure_flat_bridge("123", None, "eth0")) self.assertTrue(list_fn.called) self.assertTrue(getgw_fn.called) ens.assert_called_once_with("brq123", "eth0", ipdict, gwdict) def test_ensure_flat_bridge_with_existed_brq(self): with mock.patch.object(self.lbm, 'ensure_bridge') as ens: ens.return_value = "br-eth2" self.assertEqual("br-eth2", self.lbm.ensure_flat_bridge("123", "br-eth2", None)) ens.assert_called_with("br-eth2") def test_ensure_vlan_bridge(self): with mock.patch.object(self.lbm, 'ensure_vlan') as ens_vl_fn,\ mock.patch.object(self.lbm, 'ensure_bridge') as ens,\ mock.patch.object(self.lbm, 'get_interface_details') as get_int_det_fn: ens_vl_fn.return_value = "eth0.1" get_int_det_fn.return_value = (None, None) self.assertEqual("eth0.1", self.lbm.ensure_vlan_bridge("123", None, "eth0", "1")) ens.assert_called_with("brq123", "eth0.1", None, None) get_int_det_fn.return_value = ("ips", "gateway") self.assertEqual("eth0.1", self.lbm.ensure_vlan_bridge("123", None, "eth0", "1")) ens.assert_called_with("brq123", "eth0.1", "ips", "gateway") def test_ensure_vlan_bridge_with_existed_brq(self): with mock.patch.object(self.lbm, 'ensure_vlan') as ens_vl_fn,\ mock.patch.object(self.lbm, 'ensure_bridge') as ens: ens_vl_fn.return_value = None ens.return_value = "br-eth2" self.assertEqual("br-eth2", self.lbm.ensure_vlan_bridge("123", "br-eth2", None, None)) ens.assert_called_with("br-eth2") def test_ensure_local_bridge(self): with mock.patch.object(self.lbm, 'ensure_bridge') as ens_fn: self.lbm.ensure_local_bridge("54321", None) ens_fn.assert_called_once_with("brq54321") def test_ensure_local_bridge_with_existed_brq(self): with mock.patch.object(self.lbm, 'ensure_bridge') as ens_fn: ens_fn.return_value = "br-eth2" self.lbm.ensure_local_bridge("54321", 'br-eth2') ens_fn.assert_called_once_with("br-eth2") def test_ensure_vlan(self): with mock.patch.object(ip_lib, 'device_exists') as de_fn: de_fn.return_value = True self.assertEqual("eth0.1", self.lbm.ensure_vlan("eth0", "1")) de_fn.return_value = False vlan_dev = FakeIpDevice() with mock.patch.object(vlan_dev, 'disable_ipv6') as dv6_fn,\ mock.patch.object(self.lbm.ip, 'add_vlan', return_value=vlan_dev) as add_vlan_fn: retval = self.lbm.ensure_vlan("eth0", "1") self.assertEqual("eth0.1", retval) add_vlan_fn.assert_called_with('eth0.1', 'eth0', '1') dv6_fn.assert_called_once_with() def test_ensure_vxlan(self, expected_proxy=False): seg_id = "12345678" self.lbm.local_int = 'eth0' self.lbm.vxlan_mode = lconst.VXLAN_MCAST with mock.patch.object(ip_lib, 'device_exists') as de_fn: de_fn.return_value = True self.assertEqual("vxlan-" + seg_id, self.lbm.ensure_vxlan(seg_id)) de_fn.return_value = False vxlan_dev = FakeIpDevice() with mock.patch.object(vxlan_dev, 'disable_ipv6') as dv6_fn,\ mock.patch.object(self.lbm.ip, 'add_vxlan', return_value=vxlan_dev) as add_vxlan_fn: retval = self.lbm.ensure_vxlan(seg_id) self.assertEqual("vxlan-" + seg_id, retval) add_vxlan_fn.assert_called_with("vxlan-" + seg_id, seg_id, group="224.0.0.1", dev=self.lbm.local_int) dv6_fn.assert_called_once_with() cfg.CONF.set_override('l2_population', 'True', 'VXLAN') self.assertEqual("vxlan-" + seg_id, self.lbm.ensure_vxlan(seg_id)) add_vxlan_fn.assert_called_with("vxlan-" + seg_id, seg_id, group="224.0.0.1", dev=self.lbm.local_int, proxy=expected_proxy) def test_ensure_vxlan_arp_responder_enabled(self): cfg.CONF.set_override('arp_responder', True, 'VXLAN') self.test_ensure_vxlan(expected_proxy=True) def test_update_interface_ip_details(self): gwdict = dict(gateway='1.1.1.1', metric=50) ipdict = dict(cidr='1.1.1.1/24', broadcast='1.1.1.255', scope='global', ip_version=4, dynamic=False) with mock.patch.object(ip_lib.IpAddrCommand, 'add') as add_fn,\ mock.patch.object(ip_lib.IpAddrCommand, 'delete') as del_fn: self.lbm.update_interface_ip_details("br0", "eth0", [ipdict], None) self.assertTrue(add_fn.called) self.assertTrue(del_fn.called) with mock.patch.object(ip_lib.IpRouteCommand, 'add_gateway') as addgw_fn,\ mock.patch.object(ip_lib.IpRouteCommand, 'delete_gateway') as delgw_fn: self.lbm.update_interface_ip_details("br0", "eth0", None, gwdict) self.assertTrue(addgw_fn.called) self.assertTrue(delgw_fn.called) def test_bridge_exists_and_ensure_up(self): ip_lib_mock = mock.Mock() with mock.patch.object(ip_lib, 'IPDevice', return_value=ip_lib_mock): # device exists self.assertTrue(self.lbm._bridge_exists_and_ensure_up("br0")) self.assertTrue(ip_lib_mock.link.set_up.called) # device doesn't exists ip_lib_mock.link.set_up.side_effect = RuntimeError self.assertFalse(self.lbm._bridge_exists_and_ensure_up("br0")) def test_ensure_bridge(self): bridge_device = mock.Mock() bridge_device_old = mock.Mock() with mock.patch.object(self.lbm, '_bridge_exists_and_ensure_up') as de_fn,\ mock.patch.object(bridge_lib, "BridgeDevice", return_value=bridge_device) as br_fn,\ mock.patch.object(self.lbm, 'update_interface_ip_details') as upd_fn,\ mock.patch.object(bridge_lib, 'is_bridged_interface'),\ mock.patch.object(bridge_lib.BridgeDevice, 'get_interface_bridge') as get_if_br_fn: de_fn.return_value = False br_fn.addbr.return_value = bridge_device bridge_device.setfd.return_value = False bridge_device.disable_stp.return_value = False bridge_device.disable_ipv6.return_value = False bridge_device.link.set_up.return_value = False self.assertEqual("br0", self.lbm.ensure_bridge("br0", None)) bridge_device.owns_interface.return_value = False self.lbm.ensure_bridge("br0", "eth0") upd_fn.assert_called_with("br0", "eth0", None, None) bridge_device.owns_interface.assert_called_with("eth0") self.lbm.ensure_bridge("br0", "eth0", "ips", "gateway") upd_fn.assert_called_with("br0", "eth0", "ips", "gateway") bridge_device.owns_interface.assert_called_with("eth0") de_fn.return_value = True bridge_device.delif.side_effect = Exception() self.lbm.ensure_bridge("br0", "eth0") bridge_device.owns_interface.assert_called_with("eth0") de_fn.return_value = True bridge_device.owns_interface.return_value = False get_if_br_fn.return_value = bridge_device_old bridge_device.addif.reset_mock() self.lbm.ensure_bridge("br0", "eth0") bridge_device_old.delif.assert_called_once_with('eth0') bridge_device.addif.assert_called_once_with('eth0') def test_ensure_physical_in_bridge(self): self.assertFalse( self.lbm.ensure_physical_in_bridge("123", p_const.TYPE_VLAN, "phys", "1") ) with mock.patch.object(self.lbm, "ensure_flat_bridge") as flbr_fn: self.assertTrue( self.lbm.ensure_physical_in_bridge("123", p_const.TYPE_FLAT, "physnet1", None) ) self.assertTrue(flbr_fn.called) with mock.patch.object(self.lbm, "ensure_vlan_bridge") as vlbr_fn: self.assertTrue( self.lbm.ensure_physical_in_bridge("123", p_const.TYPE_VLAN, "physnet1", "1") ) self.assertTrue(vlbr_fn.called) with mock.patch.object(self.lbm, "ensure_vxlan_bridge") as vlbr_fn: self.lbm.vxlan_mode = lconst.VXLAN_MCAST self.assertTrue( self.lbm.ensure_physical_in_bridge("123", p_const.TYPE_VXLAN, "physnet1", "1") ) self.assertTrue(vlbr_fn.called) def test_ensure_physical_in_bridge_with_existed_brq(self): with mock.patch.object(linuxbridge_neutron_agent.LOG, 'error') as log: self.lbm.ensure_physical_in_bridge("123", p_const.TYPE_FLAT, "physnet9", "1") self.assertEqual(1, log.call_count) @mock.patch.object(ip_lib, "device_exists", return_value=False) def test_add_tap_interface_with_interface_disappearing(self, exists): with mock.patch.object(self.lbm, "_add_tap_interface", side_effect=RuntimeError("No such dev")): self.assertFalse(self.lbm.add_tap_interface("123", p_const.TYPE_VLAN, "physnet1", None, "tap1", "foo")) @mock.patch.object(ip_lib, "device_exists", return_value=True) def test_add_tap_interface_with_other_error(self, exists): with mock.patch.object(self.lbm, "_add_tap_interface", side_effect=RuntimeError("No more fuel")): self.assertRaises(RuntimeError, self.lbm.add_tap_interface, "123", p_const.TYPE_VLAN, "physnet1", None, "tap1", "foo") def test_add_tap_interface_owner_other(self): with mock.patch.object(ip_lib, "device_exists"): with mock.patch.object(self.lbm, "ensure_local_bridge"): self.assertTrue(self.lbm.add_tap_interface("123", p_const.TYPE_LOCAL, "physnet1", None, "tap1", "foo")) def _test_add_tap_interface(self, dev_owner_prefix): with mock.patch.object(ip_lib, "device_exists") as de_fn: de_fn.return_value = False self.assertFalse( self.lbm.add_tap_interface("123", p_const.TYPE_VLAN, "physnet1", "1", "tap1", dev_owner_prefix)) de_fn.return_value = True bridge_device = mock.Mock() with mock.patch.object(self.lbm, "ensure_local_bridge") as en_fn,\ mock.patch.object(bridge_lib, "BridgeDevice", return_value=bridge_device), \ mock.patch.object(bridge_lib.BridgeDevice, "get_interface_bridge") as get_br: bridge_device.addif.retun_value = False get_br.return_value = True self.assertTrue(self.lbm.add_tap_interface("123", p_const.TYPE_LOCAL, "physnet1", None, "tap1", dev_owner_prefix)) en_fn.assert_called_with("123", "brq123") self.lbm.bridge_mappings = {"physnet1": "brq999"} self.assertTrue(self.lbm.add_tap_interface("123", p_const.TYPE_LOCAL, "physnet1", None, "tap1", dev_owner_prefix)) en_fn.assert_called_with("123", "brq999") get_br.return_value = False bridge_device.addif.retun_value = True self.assertFalse(self.lbm.add_tap_interface("123", p_const.TYPE_LOCAL, "physnet1", None, "tap1", dev_owner_prefix)) with mock.patch.object(self.lbm, "ensure_physical_in_bridge") as ens_fn,\ mock.patch.object(self.lbm, "ensure_tap_mtu") as en_mtu_fn,\ mock.patch.object(bridge_lib.BridgeDevice, "get_interface_bridge") as get_br: ens_fn.return_value = False self.assertFalse(self.lbm.add_tap_interface("123", p_const.TYPE_VLAN, "physnet1", "1", "tap1", dev_owner_prefix)) ens_fn.return_value = "eth0.1" get_br.return_value = "brq123" self.lbm.add_tap_interface("123", p_const.TYPE_VLAN, "physnet1", "1", "tap1", dev_owner_prefix) en_mtu_fn.assert_called_once_with("tap1", "eth0.1") bridge_device.addif.assert_called_once_with("tap1") def test_add_tap_interface_owner_network(self): self._test_add_tap_interface(constants.DEVICE_OWNER_NETWORK_PREFIX) def test_add_tap_interface_owner_neutron(self): self._test_add_tap_interface(constants.DEVICE_OWNER_NEUTRON_PREFIX) def test_plug_interface(self): segment = amb.NetworkSegment(p_const.TYPE_VLAN, "physnet-1", "1") with mock.patch.object(self.lbm, "add_tap_interface") as add_tap: self.lbm.plug_interface("123", segment, "tap234", constants.DEVICE_OWNER_NETWORK_PREFIX) add_tap.assert_called_with("123", p_const.TYPE_VLAN, "physnet-1", "1", "tap234", constants.DEVICE_OWNER_NETWORK_PREFIX) def test_delete_bridge(self): with mock.patch.object(ip_lib.IPDevice, "exists") as de_fn,\ mock.patch.object(ip_lib, "IpLinkCommand") as link_cmd,\ mock.patch.object(bridge_lib.BridgeDevice, "get_interfaces") as getif_fn,\ mock.patch.object(self.lbm, "remove_interface"),\ mock.patch.object(self.lbm, "get_interface_details") as if_det_fn,\ mock.patch.object(self.lbm, "update_interface_ip_details") as updif_fn,\ mock.patch.object(self.lbm, "delete_interface") as delif_fn: de_fn.return_value = False self.lbm.delete_bridge("br0") self.assertFalse(getif_fn.called) de_fn.return_value = True getif_fn.return_value = ["eth0", "eth1", "vxlan-1002"] if_det_fn.return_value = ("ips", "gateway") link_cmd.set_down.return_value = False self.lbm.delete_bridge("br0") updif_fn.assert_called_with("eth1", "br0", "ips", "gateway") delif_fn.assert_called_with("vxlan-1002") def test_delete_bridge_not_exist(self): self.lbm.interface_mappings.update({}) bridge_device = mock.Mock() with mock.patch.object(bridge_lib, "BridgeDevice", return_value=bridge_device): bridge_device.exists.side_effect = [True, False] bridge_device.get_interfaces.return_value = [] bridge_device.link.set_down.side_effect = RuntimeError self.lbm.delete_bridge("br0") self.assertEqual(2, bridge_device.exists.call_count) bridge_device.exists.side_effect = [True, True] self.assertRaises(RuntimeError, self.lbm.delete_bridge, "br0") def test_delete_bridge_with_ip(self): bridge_device = mock.Mock() with mock.patch.object(ip_lib, "device_exists") as de_fn,\ mock.patch.object(self.lbm, "remove_interface"),\ mock.patch.object(self.lbm, "get_interface_details") as if_det_fn,\ mock.patch.object(self.lbm, "update_interface_ip_details") as updif_fn,\ mock.patch.object(self.lbm, "delete_interface") as del_interface,\ mock.patch.object(bridge_lib, "BridgeDevice", return_value=bridge_device): de_fn.return_value = True bridge_device.get_interfaces.return_value = ["eth0", "eth1.1"] if_det_fn.return_value = ("ips", "gateway") bridge_device.link.set_down.return_value = False self.lbm.delete_bridge("br0") updif_fn.assert_called_with("eth1.1", "br0", "ips", "gateway") self.assertFalse(del_interface.called) def test_delete_bridge_no_ip(self): bridge_device = mock.Mock() with mock.patch.object(ip_lib, "device_exists") as de_fn,\ mock.patch.object(self.lbm, "remove_interface"),\ mock.patch.object(self.lbm, "get_interface_details") as if_det_fn,\ mock.patch.object(self.lbm, "update_interface_ip_details") as updif_fn,\ mock.patch.object(self.lbm, "delete_interface") as del_interface,\ mock.patch.object(bridge_lib, "BridgeDevice", return_value=bridge_device): de_fn.return_value = True bridge_device.get_interfaces.return_value = ["eth0", "eth1.1"] bridge_device.link.set_down.return_value = False if_det_fn.return_value = ([], None) self.lbm.delete_bridge("br0") del_interface.assert_called_with("eth1.1") self.assertFalse(updif_fn.called) def test_delete_bridge_no_int_mappings(self): lbm = get_linuxbridge_manager( bridge_mappings={}, interface_mappings={}) with mock.patch.object(ip_lib.IPDevice, "exists") as de_fn,\ mock.patch.object(ip_lib, "IpLinkCommand") as link_cmd,\ mock.patch.object(bridge_lib.BridgeDevice, "get_interfaces") as getif_fn,\ mock.patch.object(lbm, "remove_interface"),\ mock.patch.object(lbm, "delete_interface") as del_interface: de_fn.return_value = False lbm.delete_bridge("br0") self.assertFalse(getif_fn.called) de_fn.return_value = True getif_fn.return_value = ["vxlan-1002"] link_cmd.set_down.return_value = False lbm.delete_bridge("br0") del_interface.assert_called_with("vxlan-1002") def test_delete_bridge_with_physical_vlan(self): self.lbm.interface_mappings.update({"physnet2": "eth1.4000"}) bridge_device = mock.Mock() with mock.patch.object(ip_lib, "device_exists") as de_fn,\ mock.patch.object(self.lbm, "remove_interface"),\ mock.patch.object(self.lbm, "get_interface_details") as if_det_fn,\ mock.patch.object(self.lbm, "delete_interface") as del_int,\ mock.patch.object(bridge_lib, "BridgeDevice", return_value=bridge_device): de_fn.return_value = True bridge_device.get_interfaces.return_value = ["eth1.1", "eth1.4000"] if_det_fn.return_value = ([], None) bridge_device.link.set_down.return_value = False self.lbm.delete_bridge("br0") del_int.assert_called_once_with("eth1.1") def test_remove_interface(self): with mock.patch.object(ip_lib.IPDevice, "exists") as de_fn,\ mock.patch.object(bridge_lib, 'is_bridged_interface') as isdev_fn,\ mock.patch.object(bridge_lib.BridgeDevice, "delif") as delif_fn: de_fn.return_value = False self.assertFalse(self.lbm.remove_interface("br0", "eth0")) self.assertFalse(isdev_fn.called) de_fn.return_value = True isdev_fn.return_value = False self.assertTrue(self.lbm.remove_interface("br0", "eth0")) isdev_fn.return_value = True delif_fn.return_value = True self.assertFalse(self.lbm.remove_interface("br0", "eth0")) delif_fn.return_value = False self.assertTrue(self.lbm.remove_interface("br0", "eth0")) def test_delete_interface(self): with mock.patch.object(ip_lib.IPDevice, "exists") as de_fn,\ mock.patch.object(ip_lib.IpLinkCommand, "set_down") as down_fn,\ mock.patch.object(ip_lib.IpLinkCommand, "delete") as delete_fn: de_fn.return_value = False self.lbm.delete_interface("eth1.1") self.assertFalse(down_fn.called) self.assertFalse(delete_fn.called) de_fn.return_value = True self.lbm.delete_interface("eth1.1") self.assertTrue(down_fn.called) self.assertTrue(delete_fn.called) def _check_vxlan_support(self, expected, vxlan_ucast_supported, vxlan_mcast_supported): with mock.patch.object(self.lbm, 'vxlan_ucast_supported', return_value=vxlan_ucast_supported),\ mock.patch.object(self.lbm, 'vxlan_mcast_supported', return_value=vxlan_mcast_supported): if expected == lconst.VXLAN_NONE: self.assertRaises(exceptions.VxlanNetworkUnsupported, self.lbm.check_vxlan_support) self.assertEqual(expected, self.lbm.vxlan_mode) else: self.lbm.check_vxlan_support() self.assertEqual(expected, self.lbm.vxlan_mode) def test_check_vxlan_support(self): self._check_vxlan_support(expected=lconst.VXLAN_UCAST, vxlan_ucast_supported=True, vxlan_mcast_supported=True) self._check_vxlan_support(expected=lconst.VXLAN_MCAST, vxlan_ucast_supported=False, vxlan_mcast_supported=True) self._check_vxlan_support(expected=lconst.VXLAN_NONE, vxlan_ucast_supported=False, vxlan_mcast_supported=False) self._check_vxlan_support(expected=lconst.VXLAN_NONE, vxlan_ucast_supported=False, vxlan_mcast_supported=False) def _check_vxlan_ucast_supported( self, expected, l2_population, iproute_arg_supported, fdb_append): cfg.CONF.set_override('l2_population', l2_population, 'VXLAN') with mock.patch.object(ip_lib, 'device_exists', return_value=False),\ mock.patch.object(ip_lib, 'vxlan_in_use', return_value=False),\ mock.patch.object(self.lbm, 'delete_interface', return_value=None),\ mock.patch.object(self.lbm, 'ensure_vxlan', return_value=None),\ mock.patch.object( utils, 'execute', side_effect=None if fdb_append else RuntimeError()),\ mock.patch.object(ip_lib, 'iproute_arg_supported', return_value=iproute_arg_supported): self.assertEqual(expected, self.lbm.vxlan_ucast_supported()) def test_vxlan_ucast_supported(self): self._check_vxlan_ucast_supported( expected=False, l2_population=False, iproute_arg_supported=True, fdb_append=True) self._check_vxlan_ucast_supported( expected=False, l2_population=True, iproute_arg_supported=False, fdb_append=True) self._check_vxlan_ucast_supported( expected=False, l2_population=True, iproute_arg_supported=True, fdb_append=False) self._check_vxlan_ucast_supported( expected=True, l2_population=True, iproute_arg_supported=True, fdb_append=True) def _check_vxlan_mcast_supported( self, expected, vxlan_group, iproute_arg_supported): cfg.CONF.set_override('vxlan_group', vxlan_group, 'VXLAN') with mock.patch.object( ip_lib, 'iproute_arg_supported', return_value=iproute_arg_supported): self.assertEqual(expected, self.lbm.vxlan_mcast_supported()) def test_vxlan_mcast_supported(self): self._check_vxlan_mcast_supported( expected=False, vxlan_group='', iproute_arg_supported=True) self._check_vxlan_mcast_supported( expected=False, vxlan_group='224.0.0.1', iproute_arg_supported=False) self._check_vxlan_mcast_supported( expected=True, vxlan_group='224.0.0.1', iproute_arg_supported=True) def _test_ensure_port_admin_state(self, admin_state): port_id = 'fake_id' with mock.patch.object(ip_lib, 'IPDevice') as dev_mock: self.lbm.ensure_port_admin_state(port_id, admin_state) tap_name = self.lbm.get_tap_device_name(port_id) self.assertEqual(admin_state, dev_mock(tap_name).link.set_up.called) self.assertNotEqual(admin_state, dev_mock(tap_name).link.set_down.called) def test_ensure_port_admin_state_up(self): self._test_ensure_port_admin_state(True) def test_ensure_port_admin_state_down(self): self._test_ensure_port_admin_state(False) def test_get_agent_id_bridge_mappings(self): lbm = get_linuxbridge_manager(BRIDGE_MAPPINGS, INTERFACE_MAPPINGS) with mock.patch.object(utils, "get_interface_mac", return_value='16:63:69:10:a0:59') as mock_gim: agent_id = lbm.get_agent_id() self.assertEqual("lb16636910a059", agent_id) mock_gim.assert_called_with(BRIDGE_MAPPING_VALUE) def test_get_agent_id_no_bridge_mappings(self): devices_mock = [ mock.MagicMock(), mock.MagicMock() ] devices_mock[0].name = "eth1" devices_mock[1].name = "eth2" bridge_mappings = {} lbm = get_linuxbridge_manager(bridge_mappings, INTERFACE_MAPPINGS) with mock.patch.object(ip_lib.IPWrapper, 'get_devices', return_value=devices_mock), \ mock.patch.object( utils, "get_interface_mac", return_value='16:63:69:10:a0:59') as mock_gim: agent_id = lbm.get_agent_id() self.assertEqual("lb16636910a059", agent_id) mock_gim.assert_called_with("eth1") class TestLinuxBridgeRpcCallbacks(base.BaseTestCase): def setUp(self): super(TestLinuxBridgeRpcCallbacks, self).setUp() class FakeLBAgent(object): def __init__(self): self.agent_id = 1 self.mgr = get_linuxbridge_manager( BRIDGE_MAPPINGS, INTERFACE_MAPPINGS) self.mgr.vxlan_mode = lconst.VXLAN_UCAST self.network_ports = collections.defaultdict(list) self.lb_rpc = linuxbridge_neutron_agent.LinuxBridgeRpcCallbacks( object(), FakeLBAgent(), object() ) segment = mock.Mock() segment.network_type = 'vxlan' segment.segmentation_id = 1 self.lb_rpc.network_map['net_id'] = segment def test_network_delete(self): mock_net = mock.Mock() mock_net.physical_network = None self.lb_rpc.network_map = {NETWORK_ID: mock_net} with mock.patch.object(self.lb_rpc.agent.mgr, "get_bridge_name") as get_br_fn,\ mock.patch.object(self.lb_rpc.agent.mgr, "delete_bridge") as del_fn: get_br_fn.return_value = "br0" self.lb_rpc.network_delete("anycontext", network_id=NETWORK_ID) get_br_fn.assert_called_with(NETWORK_ID) del_fn.assert_called_with("br0") def test_port_update(self): port = {'id': PORT_1} self.lb_rpc.port_update(context=None, port=port) self.assertEqual(set([DEVICE_1]), self.lb_rpc.updated_devices) def test_network_update(self): updated_network = {'id': NETWORK_ID} self.lb_rpc.agent.network_ports = { NETWORK_ID: [PORT_DATA] } self.lb_rpc.network_update(context=None, network=updated_network) self.assertEqual(set([DEVICE_1]), self.lb_rpc.updated_devices) def test_network_delete_with_existed_brq(self): mock_net = mock.Mock() mock_net.physical_network = 'physnet0' self.lb_rpc.network_map = {'123': mock_net} with mock.patch.object(linuxbridge_neutron_agent.LOG, 'info') as log,\ mock.patch.object(self.lb_rpc.agent.mgr, "delete_bridge") as del_fn: self.lb_rpc.network_delete("anycontext", network_id="123") self.assertEqual(0, del_fn.call_count) self.assertEqual(1, log.call_count) def _test_fdb_add(self, proxy_enabled=False): fdb_entries = {'net_id': {'ports': {'agent_ip': [constants.FLOODING_ENTRY, ['port_mac', 'port_ip']]}, 'network_type': 'vxlan', 'segment_id': 1}} with mock.patch.object(utils, 'execute', return_value='') as execute_fn, \ mock.patch.object(ip_lib.IpNeighCommand, 'add', return_value='') as add_fn: self.lb_rpc.fdb_add(None, fdb_entries) expected = [ mock.call(['bridge', 'fdb', 'show', 'dev', 'vxlan-1'], run_as_root=True), mock.call(['bridge', 'fdb', 'add', constants.FLOODING_ENTRY[0], 'dev', 'vxlan-1', 'dst', 'agent_ip'], run_as_root=True, check_exit_code=False), mock.call(['bridge', 'fdb', 'replace', 'port_mac', 'dev', 'vxlan-1', 'dst', 'agent_ip'], run_as_root=True, check_exit_code=False), ] execute_fn.assert_has_calls(expected) if proxy_enabled: add_fn.assert_called_with('port_ip', 'port_mac') else: add_fn.assert_not_called() def test_fdb_add(self): self._test_fdb_add(proxy_enabled=False) def test_fdb_add_with_arp_responder(self): cfg.CONF.set_override('arp_responder', True, 'VXLAN') self._test_fdb_add(proxy_enabled=True) def test_fdb_ignore(self): fdb_entries = {'net_id': {'ports': {LOCAL_IP: [constants.FLOODING_ENTRY, ['port_mac', 'port_ip']]}, 'network_type': 'vxlan', 'segment_id': 1}} with mock.patch.object(utils, 'execute', return_value='') as execute_fn: self.lb_rpc.fdb_add(None, fdb_entries) self.lb_rpc.fdb_remove(None, fdb_entries) self.assertFalse(execute_fn.called) fdb_entries = {'other_net_id': {'ports': {'192.168.0.67': [constants.FLOODING_ENTRY, ['port_mac', 'port_ip']]}, 'network_type': 'vxlan', 'segment_id': 1}} with mock.patch.object(utils, 'execute', return_value='') as execute_fn: self.lb_rpc.fdb_add(None, fdb_entries) self.lb_rpc.fdb_remove(None, fdb_entries) self.assertFalse(execute_fn.called) def _test_fdb_remove(self, proxy_enabled=False): fdb_entries = {'net_id': {'ports': {'agent_ip': [constants.FLOODING_ENTRY, ['port_mac', 'port_ip']]}, 'network_type': 'vxlan', 'segment_id': 1}} with mock.patch.object(utils, 'execute', return_value='') as execute_fn, \ mock.patch.object(ip_lib.IpNeighCommand, 'delete', return_value='') as del_fn: self.lb_rpc.fdb_remove(None, fdb_entries) expected = [ mock.call(['bridge', 'fdb', 'del', constants.FLOODING_ENTRY[0], 'dev', 'vxlan-1', 'dst', 'agent_ip'], run_as_root=True, check_exit_code=False), mock.call(['bridge', 'fdb', 'del', 'port_mac', 'dev', 'vxlan-1', 'dst', 'agent_ip'], run_as_root=True, check_exit_code=False), ] execute_fn.assert_has_calls(expected) if proxy_enabled: del_fn.assert_called_with('port_ip', 'port_mac') else: del_fn.assert_not_called() def test_fdb_remove(self): self._test_fdb_remove(proxy_enabled=False) def test_fdb_remove_with_arp_responder(self): cfg.CONF.set_override('arp_responder', True, 'VXLAN') self._test_fdb_remove(proxy_enabled=True) def _test_fdb_update_chg_ip(self, proxy_enabled=False): fdb_entries = {'chg_ip': {'net_id': {'agent_ip': {'before': [['port_mac', 'port_ip_1']], 'after': [['port_mac', 'port_ip_2']]}}}} with mock.patch.object(ip_lib.IpNeighCommand, 'add', return_value='') as add_fn, \ mock.patch.object(ip_lib.IpNeighCommand, 'delete', return_value='') as del_fn: self.lb_rpc.fdb_update(None, fdb_entries) if proxy_enabled: del_fn.assert_called_with('port_ip_1', 'port_mac') add_fn.assert_called_with('port_ip_2', 'port_mac') else: del_fn.assert_not_called() add_fn.assert_not_called() def test_fdb_update_chg_ip(self): self._test_fdb_update_chg_ip(proxy_enabled=False) def test_fdb_update_chg_ip_with_arp_responder(self): cfg.CONF.set_override('arp_responder', True, 'VXLAN') self._test_fdb_update_chg_ip(proxy_enabled=True) def test_fdb_update_chg_ip_empty_lists(self): fdb_entries = {'chg_ip': {'net_id': {'agent_ip': {}}}} self.lb_rpc.fdb_update(None, fdb_entries) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/0000775000567000056710000000000013044373210033203 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/__init__.py0000664000567000056710000000000013044372736035316 0ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/test_qos_driver.pyneutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/test_qos_dr0000664000567000056710000000542013044372760035466 0ustar jenkinsjenkins00000000000000# Copyright 2016 OVH SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_utils import uuidutils from neutron.agent.linux import tc_lib from neutron.objects.qos import rule from neutron.plugins.ml2.drivers.linuxbridge.agent.common import config # noqa from neutron.plugins.ml2.drivers.linuxbridge.agent.extension_drivers import ( qos_driver) from neutron.tests import base TEST_LATENCY_VALUE = 100 class QosLinuxbridgeAgentDriverTestCase(base.BaseTestCase): def setUp(self): super(QosLinuxbridgeAgentDriverTestCase, self).setUp() cfg.CONF.set_override("tbf_latency", TEST_LATENCY_VALUE, "QOS") self.qos_driver = qos_driver.QosLinuxbridgeAgentDriver() self.qos_driver.initialize() self.rule = self._create_bw_limit_rule_obj() self.port = self._create_fake_port(uuidutils.generate_uuid()) def _create_bw_limit_rule_obj(self): rule_obj = rule.QosBandwidthLimitRule() rule_obj.id = uuidutils.generate_uuid() rule_obj.max_kbps = 2 rule_obj.max_burst_kbps = 200 rule_obj.obj_reset_changes() return rule_obj def _create_fake_port(self, policy_id): return {'qos_policy_id': policy_id, 'network_qos_policy_id': None, 'device': 'fake_tap'} def test_create_rule(self): with mock.patch.object( tc_lib.TcCommand, "set_filters_bw_limit" ) as set_bw_limit: self.qos_driver.create_bandwidth_limit(self.port, self.rule) set_bw_limit.assert_called_once_with( self.rule.max_kbps, self.rule.max_burst_kbps, ) def test_update_rule(self): with mock.patch.object( tc_lib.TcCommand, "update_filters_bw_limit" ) as update_bw_limit: self.qos_driver.update_bandwidth_limit(self.port, self.rule) update_bw_limit.assert_called_once_with( self.rule.max_kbps, self.rule.max_burst_kbps, ) def test_delete_rule(self): with mock.patch.object( tc_lib.TcCommand, "delete_filters_bw_limit" ) as delete_bw_limit: self.qos_driver.delete_bandwidth_limit(self.port) delete_bw_limit.assert_called_once_with() neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/agent/0000775000567000056710000000000013044373210025115 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/agent/test__common_agent.py0000664000567000056710000005270413044372760031354 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from neutron.agent.linux import bridge_lib from neutron.common import constants from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb from neutron.plugins.ml2.drivers.agent import _common_agent as ca from neutron.tests import base LOCAL_IP = '192.168.0.33' LOCAL_IPV6 = '2001:db8:1::33' VXLAN_GROUPV6 = 'ff05::/120' PORT_1 = 'abcdef01-12ddssdfds-fdsfsd' DEVICE_1 = 'tapabcdef01-12' NETWORK_ID = '57653b20-ed5b-4ed0-a31d-06f84e3fd909' BRIDGE_MAPPING_VALUE = 'br-eth2' BRIDGE_MAPPINGS = {'physnet0': BRIDGE_MAPPING_VALUE} INTERFACE_MAPPINGS = {'physnet1': 'eth1'} FAKE_DEFAULT_DEV = mock.Mock() FAKE_DEFAULT_DEV.name = 'eth1' PORT_DATA = { "port_id": PORT_1, "device": DEVICE_1 } class TestCommonAgentLoop(base.BaseTestCase): def setUp(self): super(TestCommonAgentLoop, self).setUp() # disable setting up periodic state reporting cfg.CONF.set_override('report_interval', 0, 'AGENT') cfg.CONF.set_override('prevent_arp_spoofing', False, 'AGENT') cfg.CONF.set_default('firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') cfg.CONF.set_override('local_ip', LOCAL_IP, 'VXLAN') self.get_bridge_names_p = mock.patch.object(bridge_lib, 'get_bridge_names') self.get_bridge_names = self.get_bridge_names_p.start() self.get_bridge_names.return_value = ["br-int", "brq1"] manager = mock.Mock() manager.get_all_devices.return_value = [] manager.get_agent_configurations.return_value = {} manager.get_rpc_consumers.return_value = [] with mock.patch.object(ca.CommonAgentLoop, '_validate_manager_class'),\ mock.patch.object(ca.CommonAgentLoop, '_validate_rpc_endpoints'): self.agent = ca.CommonAgentLoop(manager, 0, 10, 'fake_agent', 'foo-binary') with mock.patch.object(self.agent, "daemon_loop"): self.agent.start() def test_treat_devices_removed_with_existed_device(self): agent = self.agent agent.mgr.ensure_port_admin_state = mock.Mock() devices = [DEVICE_1] agent.network_ports[NETWORK_ID].append(PORT_DATA) with mock.patch.object(agent.plugin_rpc, "update_device_down") as fn_udd,\ mock.patch.object(agent.sg_agent, "remove_devices_filter") as fn_rdf,\ mock.patch.object(agent.ext_manager, "delete_port") as ext_mgr_delete_port: fn_udd.return_value = {'device': DEVICE_1, 'exists': True} with mock.patch.object(ca.LOG, 'info') as log: resync = agent.treat_devices_removed(devices) self.assertEqual(2, log.call_count) self.assertFalse(resync) self.assertTrue(fn_udd.called) self.assertTrue(fn_rdf.called) self.assertTrue(ext_mgr_delete_port.called) self.assertTrue( PORT_DATA not in agent.network_ports[NETWORK_ID] ) def test_treat_devices_removed_with_not_existed_device(self): agent = self.agent devices = [DEVICE_1] agent.network_ports[NETWORK_ID].append(PORT_DATA) with mock.patch.object(agent.plugin_rpc, "update_device_down") as fn_udd,\ mock.patch.object(agent.sg_agent, "remove_devices_filter") as fn_rdf,\ mock.patch.object(agent.ext_manager, "delete_port") as ext_mgr_delete_port: fn_udd.return_value = {'device': DEVICE_1, 'exists': False} with mock.patch.object(ca.LOG, 'debug') as log: resync = agent.treat_devices_removed(devices) self.assertEqual(1, log.call_count) self.assertFalse(resync) self.assertTrue(fn_udd.called) self.assertTrue(fn_rdf.called) self.assertTrue(ext_mgr_delete_port.called) self.assertTrue( PORT_DATA not in agent.network_ports[NETWORK_ID] ) def test_treat_devices_removed_failed(self): agent = self.agent devices = [DEVICE_1] agent.network_ports[NETWORK_ID].append(PORT_DATA) with mock.patch.object(agent.plugin_rpc, "update_device_down") as fn_udd,\ mock.patch.object(agent.sg_agent, "remove_devices_filter") as fn_rdf,\ mock.patch.object(agent.ext_manager, "delete_port") as ext_mgr_delete_port: fn_udd.side_effect = Exception() resync = agent.treat_devices_removed(devices) self.assertTrue(resync) self.assertTrue(fn_udd.called) self.assertTrue(fn_rdf.called) self.assertTrue(ext_mgr_delete_port.called) self.assertTrue( PORT_DATA not in agent.network_ports[NETWORK_ID] ) def test_treat_devices_removed_with_prevent_arp_spoofing_true(self): agent = self.agent agent.prevent_arp_spoofing = True agent._ensure_port_admin_state = mock.Mock() devices = [DEVICE_1] with mock.patch.object(agent.plugin_rpc, "update_device_down") as fn_udd,\ mock.patch.object(agent.sg_agent, "remove_devices_filter"): fn_udd.return_value = {'device': DEVICE_1, 'exists': True} with mock.patch.object(agent.mgr, 'delete_arp_spoofing_protection') as de_arp: agent.treat_devices_removed(devices) de_arp.assert_called_with(devices) def test__get_devices_locally_modified(self): new_ts = {1: 1000, 2: 2000, 3: 3000} old_ts = {1: 10, 2: 2000, 4: 900} # 3 and 4 are not returned because 3 is a new device and 4 is a # removed device self.assertEqual( set([1]), self.agent._get_devices_locally_modified(new_ts, old_ts)) def _test_scan_devices(self, previous, updated, fake_current, expected, sync, fake_ts_current=None): self.agent.mgr = mock.Mock() self.agent.mgr.get_all_devices.return_value = fake_current self.agent.mgr.get_devices_modified_timestamps.return_value = ( fake_ts_current or {}) self.agent.rpc_callbacks.get_and_clear_updated_devices.return_value =\ updated results = self.agent.scan_devices(previous, sync) self.assertEqual(expected, results) def test_scan_devices_no_changes(self): previous = {'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set(), 'timestamps': {}} fake_current = set([1, 2]) updated = set() expected = {'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set(), 'timestamps': {}} self._test_scan_devices(previous, updated, fake_current, expected, sync=False) def test_scan_devices_timestamp_triggers_updated(self): previous = {'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set(), 'timestamps': {2: 600}} fake_current = set([1, 2]) updated = set() expected = {'current': set([1, 2]), 'updated': set([2]), 'added': set(), 'removed': set(), 'timestamps': {2: 1000}} self._test_scan_devices(previous, updated, fake_current, expected, sync=False, fake_ts_current={2: 1000}) def test_scan_devices_added_removed(self): previous = {'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set(), 'timestamps': {}} fake_current = set([2, 3]) updated = set() expected = {'current': set([2, 3]), 'updated': set(), 'added': set([3]), 'removed': set([1]), 'timestamps': {}} self._test_scan_devices(previous, updated, fake_current, expected, sync=False) def test_scan_devices_removed_retried_on_sync(self): previous = {'current': set([2, 3]), 'updated': set(), 'added': set(), 'removed': set([1]), 'timestamps': {}} fake_current = set([2, 3]) updated = set() expected = {'current': set([2, 3]), 'updated': set(), 'added': set([2, 3]), 'removed': set([1]), 'timestamps': {}} self._test_scan_devices(previous, updated, fake_current, expected, sync=True) def test_scan_devices_vanished_removed_on_sync(self): previous = {'current': set([2, 3]), 'updated': set(), 'added': set(), 'removed': set([1]), 'timestamps': {}} # Device 2 disappeared. fake_current = set([3]) updated = set() # Device 1 should be retried. expected = {'current': set([3]), 'updated': set(), 'added': set([3]), 'removed': set([1, 2]), 'timestamps': {}} self._test_scan_devices(previous, updated, fake_current, expected, sync=True) def test_scan_devices_updated(self): previous = {'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set(), 'timestamps': {}} fake_current = set([1, 2]) updated = set([1]) expected = {'current': set([1, 2]), 'updated': set([1]), 'added': set(), 'removed': set(), 'timestamps': {}} self._test_scan_devices(previous, updated, fake_current, expected, sync=False) def test_scan_devices_updated_non_existing(self): previous = {'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set(), 'timestamps': {}} fake_current = set([1, 2]) updated = set([3]) expected = {'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set(), 'timestamps': {}} self._test_scan_devices(previous, updated, fake_current, expected, sync=False) def test_scan_devices_updated_deleted_concurrently(self): previous = { 'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set(), 'timestamps': {} } # Device 2 disappeared. fake_current = set([1]) # Device 2 got an concurrent update via network_update updated = set([2]) expected = { 'current': set([1]), 'updated': set(), 'added': set(), 'removed': set([2]), 'timestamps': {} } self._test_scan_devices( previous, updated, fake_current, expected, sync=False ) def test_scan_devices_updated_on_sync(self): previous = {'current': set([1, 2]), 'updated': set([1]), 'added': set(), 'removed': set(), 'timestamps': {}} fake_current = set([1, 2]) updated = set([2]) expected = {'current': set([1, 2]), 'updated': set([1, 2]), 'added': set([1, 2]), 'removed': set(), 'timestamps': {}} self._test_scan_devices(previous, updated, fake_current, expected, sync=True) def test_scan_devices_with_prevent_arp_spoofing_true(self): self.agent.prevent_arp_spoofing = True previous = None fake_current = set([1, 2]) updated = set() expected = {'current': set([1, 2]), 'updated': set(), 'added': set([1, 2]), 'removed': set(), 'timestamps': {}} self._test_scan_devices(previous, updated, fake_current, expected, sync=False) self.agent.mgr.delete_unreferenced_arp_protection.assert_called_with( fake_current) def test_process_network_devices(self): agent = self.agent device_info = {'current': set(), 'added': set(['tap3', 'tap4']), 'updated': set(['tap2', 'tap3']), 'removed': set(['tap1'])} agent.sg_agent.setup_port_filters = mock.Mock() agent.treat_devices_added_updated = mock.Mock(return_value=False) agent.treat_devices_removed = mock.Mock(return_value=False) agent.process_network_devices(device_info) agent.sg_agent.setup_port_filters.assert_called_with( device_info['added'], device_info['updated']) agent.treat_devices_added_updated.assert_called_with(set(['tap2', 'tap3', 'tap4'])) agent.treat_devices_removed.assert_called_with(set(['tap1'])) def test_treat_devices_added_updated_no_local_interface(self): agent = self.agent mock_details = {'device': 'dev123', 'port_id': 'port123', 'network_id': 'net123', 'admin_state_up': True, 'network_type': 'vlan', 'segmentation_id': 100, 'physical_network': 'physnet1', 'device_owner': constants.DEVICE_OWNER_NETWORK_PREFIX} agent.ext_manager = mock.Mock() agent.plugin_rpc = mock.Mock() agent.plugin_rpc.get_devices_details_list.return_value = [mock_details] agent.mgr = mock.Mock() agent.mgr.plug_interface.return_value = False agent.mgr.ensure_port_admin_state = mock.Mock() agent.treat_devices_added_updated(set(['tap1'])) self.assertFalse(agent.mgr.ensure_port_admin_state.called) def test_treat_devices_added_updated_admin_state_up_true(self): agent = self.agent mock_details = {'device': 'dev123', 'port_id': 'port123', 'network_id': 'net123', 'admin_state_up': True, 'network_type': 'vlan', 'segmentation_id': 100, 'physical_network': 'physnet1', 'device_owner': constants.DEVICE_OWNER_NETWORK_PREFIX} mock_port_data = { 'port_id': mock_details['port_id'], 'device': mock_details['device'] } agent.ext_manager = mock.Mock() agent.plugin_rpc = mock.Mock() agent.plugin_rpc.get_devices_details_list.return_value = [mock_details] agent.mgr = mock.Mock() agent.mgr.plug_interface.return_value = True agent.mgr.ensure_port_admin_state = mock.Mock() mock_segment = amb.NetworkSegment(mock_details['network_type'], mock_details['physical_network'], mock_details['segmentation_id']) with mock.patch('neutron.plugins.ml2.drivers.agent.' '_agent_manager_base.NetworkSegment', return_value=mock_segment): resync_needed = agent.treat_devices_added_updated(set(['tap1'])) self.assertFalse(resync_needed) agent.rpc_callbacks.add_network.assert_called_with('net123', mock_segment) agent.mgr.plug_interface.assert_called_with( 'net123', mock_segment, 'dev123', constants.DEVICE_OWNER_NETWORK_PREFIX) self.assertTrue(agent.plugin_rpc.update_device_up.called) self.assertTrue(agent.ext_manager.handle_port.called) self.assertTrue(mock_port_data in agent.network_ports[ mock_details['network_id']] ) def test_treat_devices_added_updated_prevent_arp_spoofing_true(self): agent = self.agent agent.prevent_arp_spoofing = True mock_details = {'device': 'dev123', 'port_id': 'port123', 'network_id': 'net123', 'admin_state_up': True, 'network_type': 'vlan', 'segmentation_id': 100, 'physical_network': 'physnet1', 'device_owner': constants.DEVICE_OWNER_NETWORK_PREFIX} agent.plugin_rpc = mock.Mock() agent.plugin_rpc.get_devices_details_list.return_value = [mock_details] agent.mgr = mock.Mock() agent.mgr.plug_interface.return_value = True with mock.patch.object(agent.mgr, 'setup_arp_spoofing_protection') as set_arp: agent.treat_devices_added_updated(set(['tap1'])) set_arp.assert_called_with(mock_details['device'], mock_details) def test_set_rpc_timeout(self): self.agent.stop() for rpc_client in (self.agent.plugin_rpc.client, self.agent.sg_plugin_rpc.client, self.agent.state_rpc.client): self.assertEqual(cfg.CONF.AGENT.quitting_rpc_timeout, rpc_client.timeout) def test_set_rpc_timeout_no_value(self): self.agent.quitting_rpc_timeout = None with mock.patch.object(self.agent, 'set_rpc_timeout') as mock_set_rpc: self.agent.stop() self.assertFalse(mock_set_rpc.called) def test_report_state_revived(self): with mock.patch.object(self.agent.state_rpc, "report_state") as report_st: report_st.return_value = constants.AGENT_REVIVED self.agent._report_state() self.assertTrue(self.agent.fullsync) def test_update_network_ports(self): port_1_data = PORT_DATA NETWORK_2_ID = 'fake_second_network' port_2_data = { 'port_id': 'fake_port_2', 'device': 'fake_port_2_device_name' } self.agent.network_ports[NETWORK_ID].append( port_1_data ) self.agent.network_ports[NETWORK_ID].append( port_2_data ) #check update port: self.agent._update_network_ports( NETWORK_2_ID, port_2_data['port_id'], port_2_data['device'] ) self.assertTrue( port_2_data not in self.agent.network_ports[NETWORK_ID] ) self.assertTrue( port_2_data in self.agent.network_ports[NETWORK_2_ID] ) def test_clean_network_ports(self): port_1_data = PORT_DATA port_2_data = { 'port_id': 'fake_port_2', 'device': 'fake_port_2_device_name' } self.agent.network_ports[NETWORK_ID].append( port_1_data ) self.agent.network_ports[NETWORK_ID].append( port_2_data ) #check removing port from network when other ports are still there: cleaned_port_id = self.agent._clean_network_ports(DEVICE_1) self.assertTrue( NETWORK_ID in self.agent.network_ports.keys() ) self.assertTrue( port_1_data not in self.agent.network_ports[NETWORK_ID] ) self.assertTrue( port_2_data in self.agent.network_ports[NETWORK_ID] ) self.assertEqual(PORT_1, cleaned_port_id) #and now remove last port from network: cleaned_port_id = self.agent._clean_network_ports( port_2_data['device'] ) self.assertTrue( NETWORK_ID not in self.agent.network_ports.keys() ) self.assertEqual(port_2_data['port_id'], cleaned_port_id) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/agent/__init__.py0000664000567000056710000000000013044372736027230 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/agent/test__agent_manager_base.py0000664000567000056710000000350013044372736032461 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb from neutron.tests import base class RPCCallBackImpl(amb.CommonAgentManagerRpcCallBackBase): def security_groups_rule_updated(self, context, **kwargs): pass def security_groups_member_updated(self, context, **kwargs): pass def security_groups_provider_updated(self, context, **kwargs): pass class Test_CommonAgentManagerRpcCallBackBase(base.BaseTestCase): def setUp(self): super(Test_CommonAgentManagerRpcCallBackBase, self).setUp() self.rpc_callbacks = RPCCallBackImpl(None, None, None) def test_get_and_clear_updated_devices(self): updated_devices = ['tap1', 'tap2'] self.rpc_callbacks.updated_devices = updated_devices self.assertEqual(updated_devices, self.rpc_callbacks.get_and_clear_updated_devices()) self.assertEqual(set(), self.rpc_callbacks.updated_devices) def test_add_network(self): segment = amb.NetworkSegment('vlan', 'physnet1', 100) network_id = "foo" self.rpc_callbacks.add_network(network_id, segment) self.assertEqual(segment, self.rpc_callbacks.network_map[network_id]) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/test_type_vlan.py0000664000567000056710000002715613044372760027455 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Thales Services SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from testtools import matchers from neutron.common import exceptions as exc import neutron.db.api as db from neutron.plugins.common import constants as p_const from neutron.plugins.common import utils as plugin_utils from neutron.plugins.ml2 import config from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers import type_vlan from neutron.tests.unit import testlib_api PROVIDER_NET = 'phys_net1' TENANT_NET = 'phys_net2' VLAN_MIN = 200 VLAN_MAX = 209 NETWORK_VLAN_RANGES = [PROVIDER_NET, "%s:%s:%s" % (TENANT_NET, VLAN_MIN, VLAN_MAX)] UPDATED_VLAN_RANGES = { PROVIDER_NET: [], TENANT_NET: [(VLAN_MIN + 5, VLAN_MAX + 5)], } class VlanTypeTest(testlib_api.SqlTestCase): def setUp(self): super(VlanTypeTest, self).setUp() config.cfg.CONF.set_override('network_vlan_ranges', NETWORK_VLAN_RANGES, group='ml2_type_vlan') self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( NETWORK_VLAN_RANGES) self.driver = type_vlan.VlanTypeDriver() self.driver._sync_vlan_allocations() self.session = db.get_session() self.driver.physnet_mtus = [] def test_parse_network_exception_handling(self): with mock.patch.object(plugin_utils, 'parse_network_vlan_ranges') as parse_ranges: parse_ranges.side_effect = Exception('any exception') self.assertRaises(SystemExit, self.driver._parse_network_vlan_ranges) def _get_allocation(self, session, segment): return session.query(type_vlan.VlanAllocation).filter_by( physical_network=segment[api.PHYSICAL_NETWORK], vlan_id=segment[api.SEGMENTATION_ID]).first() def test_partial_segment_is_partial_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN} self.assertTrue(self.driver.is_partial_segment(segment)) def test_specific_segment_is_not_partial_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: PROVIDER_NET, api.SEGMENTATION_ID: 1} self.assertFalse(self.driver.is_partial_segment(segment)) def test_validate_provider_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: PROVIDER_NET, api.SEGMENTATION_ID: 1} self.assertIsNone(self.driver.validate_provider_segment(segment)) def test_validate_provider_segment_without_segmentation_id(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: TENANT_NET} self.driver.validate_provider_segment(segment) def test_validate_provider_segment_without_physical_network(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN} self.driver.validate_provider_segment(segment) def test_validate_provider_segment_with_missing_physical_network(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.SEGMENTATION_ID: 1} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_validate_provider_segment_with_invalid_physical_network(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: 'other_phys_net', api.SEGMENTATION_ID: 1} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_validate_provider_segment_with_invalid_segmentation_id(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: PROVIDER_NET, api.SEGMENTATION_ID: 5000} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_validate_provider_segment_with_invalid_input(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: PROVIDER_NET, api.SEGMENTATION_ID: 1, 'invalid': 1} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_sync_vlan_allocations(self): def check_in_ranges(network_vlan_ranges): vlan_min, vlan_max = network_vlan_ranges[TENANT_NET][0] segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: TENANT_NET} segment[api.SEGMENTATION_ID] = vlan_min - 1 self.assertIsNone( self._get_allocation(self.session, segment)) segment[api.SEGMENTATION_ID] = vlan_max + 1 self.assertIsNone( self._get_allocation(self.session, segment)) segment[api.SEGMENTATION_ID] = vlan_min self.assertFalse( self._get_allocation(self.session, segment).allocated) segment[api.SEGMENTATION_ID] = vlan_max self.assertFalse( self._get_allocation(self.session, segment).allocated) check_in_ranges(self.network_vlan_ranges) self.driver.network_vlan_ranges = UPDATED_VLAN_RANGES self.driver._sync_vlan_allocations() check_in_ranges(UPDATED_VLAN_RANGES) def test_reserve_provider_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: PROVIDER_NET, api.SEGMENTATION_ID: 101} alloc = self._get_allocation(self.session, segment) self.assertIsNone(alloc) observed = self.driver.reserve_provider_segment(self.session, segment) alloc = self._get_allocation(self.session, observed) self.assertTrue(alloc.allocated) def test_reserve_provider_segment_already_allocated(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: PROVIDER_NET, api.SEGMENTATION_ID: 101} observed = self.driver.reserve_provider_segment(self.session, segment) self.assertRaises(exc.VlanIdInUse, self.driver.reserve_provider_segment, self.session, observed) def test_reserve_provider_segment_in_tenant_pools(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: TENANT_NET, api.SEGMENTATION_ID: VLAN_MIN} alloc = self._get_allocation(self.session, segment) self.assertFalse(alloc.allocated) observed = self.driver.reserve_provider_segment(self.session, segment) alloc = self._get_allocation(self.session, observed) self.assertTrue(alloc.allocated) def test_reserve_provider_segment_without_segmentation_id(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: TENANT_NET} observed = self.driver.reserve_provider_segment(self.session, segment) alloc = self._get_allocation(self.session, observed) self.assertTrue(alloc.allocated) vlan_id = observed[api.SEGMENTATION_ID] self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1)) self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1)) def test_reserve_provider_segment_without_physical_network(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN} observed = self.driver.reserve_provider_segment(self.session, segment) alloc = self._get_allocation(self.session, observed) self.assertTrue(alloc.allocated) vlan_id = observed[api.SEGMENTATION_ID] self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1)) self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1)) self.assertEqual(TENANT_NET, observed[api.PHYSICAL_NETWORK]) def test_reserve_provider_segment_all_allocateds(self): for __ in range(VLAN_MIN, VLAN_MAX + 1): self.driver.allocate_tenant_segment(self.session) segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN} self.assertRaises(exc.NoNetworkAvailable, self.driver.reserve_provider_segment, self.session, segment) def test_get_mtu(self): config.cfg.CONF.set_override('global_physnet_mtu', 1475) config.cfg.CONF.set_override('path_mtu', 1400, group='ml2') self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400} self.assertEqual(1450, self.driver.get_mtu('physnet1')) config.cfg.CONF.set_override('global_physnet_mtu', 1375) config.cfg.CONF.set_override('path_mtu', 1400, group='ml2') self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400} self.assertEqual(1375, self.driver.get_mtu('physnet1')) config.cfg.CONF.set_override('global_physnet_mtu', 0) config.cfg.CONF.set_override('path_mtu', 1400, group='ml2') self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400} self.assertEqual(1450, self.driver.get_mtu('physnet1')) config.cfg.CONF.set_override('global_physnet_mtu', 0) config.cfg.CONF.set_override('path_mtu', 0, group='ml2') self.driver.physnet_mtus = {} self.assertEqual(0, self.driver.get_mtu('physnet1')) def test_allocate_tenant_segment(self): for __ in range(VLAN_MIN, VLAN_MAX + 1): segment = self.driver.allocate_tenant_segment(self.session) alloc = self._get_allocation(self.session, segment) self.assertTrue(alloc.allocated) vlan_id = segment[api.SEGMENTATION_ID] self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1)) self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1)) self.assertEqual(TENANT_NET, segment[api.PHYSICAL_NETWORK]) def test_allocate_tenant_segment_no_available(self): for __ in range(VLAN_MIN, VLAN_MAX + 1): self.driver.allocate_tenant_segment(self.session) segment = self.driver.allocate_tenant_segment(self.session) self.assertIsNone(segment) def test_release_segment(self): segment = self.driver.allocate_tenant_segment(self.session) self.driver.release_segment(self.session, segment) alloc = self._get_allocation(self.session, segment) self.assertFalse(alloc.allocated) def test_release_segment_unallocated(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: PROVIDER_NET, api.SEGMENTATION_ID: 101} with mock.patch.object(type_vlan.LOG, 'warning') as log_warn: self.driver.release_segment(self.session, segment) log_warn.assert_called_once_with( "No vlan_id %(vlan_id)s found on physical network " "%(physical_network)s", {'vlan_id': 101, 'physical_network': PROVIDER_NET}) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/mechanism_test.py0000664000567000056710000002475013044372760027415 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import constants as const from neutron.extensions import portbindings from neutron.plugins.ml2 import driver_api as api class TestMechanismDriver(api.MechanismDriver): """Test mechanism driver for testing mechanism driver api.""" def initialize(self): self.bound_ports = set() def _check_network_context(self, context, original_expected): assert(isinstance(context, api.NetworkContext)) assert(isinstance(context.current, dict)) assert(context.current['id'] is not None) if original_expected: assert(isinstance(context.original, dict)) assert(context.current['id'] == context.original['id']) else: assert(not context.original) assert(context.network_segments) def create_network_precommit(self, context): self._check_network_context(context, False) def create_network_postcommit(self, context): self._check_network_context(context, False) def update_network_precommit(self, context): self._check_network_context(context, True) def update_network_postcommit(self, context): self._check_network_context(context, True) def delete_network_precommit(self, context): self._check_network_context(context, False) def delete_network_postcommit(self, context): self._check_network_context(context, False) def _check_subnet_context(self, context, original_expected): assert(isinstance(context, api.SubnetContext)) assert(isinstance(context.current, dict)) assert(context.current['id'] is not None) if original_expected: assert(isinstance(context.original, dict)) assert(context.current['id'] == context.original['id']) else: assert(not context.original) network_context = context.network assert(isinstance(network_context, api.NetworkContext)) self._check_network_context(network_context, False) def create_subnet_precommit(self, context): self._check_subnet_context(context, False) def create_subnet_postcommit(self, context): self._check_subnet_context(context, False) def update_subnet_precommit(self, context): self._check_subnet_context(context, True) def update_subnet_postcommit(self, context): self._check_subnet_context(context, True) def delete_subnet_precommit(self, context): self._check_subnet_context(context, False) def delete_subnet_postcommit(self, context): self._check_subnet_context(context, False) def _check_port_context(self, context, original_expected): assert(isinstance(context, api.PortContext)) self._check_port_info(context.current, context.host, context.vif_type, context.vif_details) if context.vif_type in (portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED): if (context.segments_to_bind and context.segments_to_bind[0][api.NETWORK_TYPE] == 'vlan'): # Partially bound. self._check_bound(context.binding_levels, context.top_bound_segment, context.bottom_bound_segment) else: self._check_unbound(context.binding_levels, context.top_bound_segment, context.bottom_bound_segment) assert((context.current['id'], context.host) not in self.bound_ports) else: self._check_bound(context.binding_levels, context.top_bound_segment, context.bottom_bound_segment) assert((context.current['id'], context.host) in self.bound_ports) if original_expected: self._check_port_info(context.original, context.original_host, context.original_vif_type, context.original_vif_details) assert(context.current['id'] == context.original['id']) if (context.original_vif_type in (portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED)): self._check_unbound(context.original_binding_levels, context.original_top_bound_segment, context.original_bottom_bound_segment) else: self._check_bound(context.original_binding_levels, context.original_top_bound_segment, context.original_bottom_bound_segment) else: assert(context.original is None) assert(context.original_host is None) assert(context.original_vif_type is None) assert(context.original_vif_details is None) assert(context.original_status is None) self._check_unbound(context.original_binding_levels, context.original_top_bound_segment, context.original_bottom_bound_segment) network_context = context.network assert(isinstance(network_context, api.NetworkContext)) self._check_network_context(network_context, False) def _check_port_info(self, port, host, vif_type, vif_details): assert(isinstance(port, dict)) assert(port['id'] is not None) assert(vif_type in (portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED, portbindings.VIF_TYPE_DISTRIBUTED, portbindings.VIF_TYPE_OVS, portbindings.VIF_TYPE_BRIDGE)) if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: assert(port[portbindings.HOST_ID] == '') assert(port[portbindings.VIF_TYPE] == portbindings.VIF_TYPE_DISTRIBUTED) assert(port[portbindings.VIF_DETAILS] == {}) else: assert(port[portbindings.HOST_ID] == host) assert(port[portbindings.VIF_TYPE] != portbindings.VIF_TYPE_DISTRIBUTED) assert(port[portbindings.VIF_TYPE] == vif_type) assert(isinstance(vif_details, dict)) assert(port[portbindings.VIF_DETAILS] == vif_details) def _check_unbound(self, levels, top_segment, bottom_segment): assert(levels is None) assert(top_segment is None) assert(bottom_segment is None) def _check_bound(self, levels, top_segment, bottom_segment): assert(isinstance(levels, list)) top_level = levels[0] assert(isinstance(top_level, dict)) assert(isinstance(top_segment, dict)) assert(top_segment == top_level[api.BOUND_SEGMENT]) assert('test' == top_level[api.BOUND_DRIVER]) bottom_level = levels[-1] assert(isinstance(bottom_level, dict)) assert(isinstance(bottom_segment, dict)) assert(bottom_segment == bottom_level[api.BOUND_SEGMENT]) assert('test' == bottom_level[api.BOUND_DRIVER]) def create_port_precommit(self, context): self._check_port_context(context, False) def create_port_postcommit(self, context): self._check_port_context(context, False) def update_port_precommit(self, context): if ((context.original_top_bound_segment and not context.top_bound_segment) or (context.host == "host-fail")): self.bound_ports.remove((context.original['id'], context.original_host)) self._check_port_context(context, True) def update_port_postcommit(self, context): self._check_port_context(context, True) def delete_port_precommit(self, context): self._check_port_context(context, False) def delete_port_postcommit(self, context): self._check_port_context(context, False) def bind_port(self, context): self._check_port_context(context, False) host = context.host segment = context.segments_to_bind[0] segment_id = segment[api.ID] if host == "host-ovs-no_filter": context.set_binding(segment_id, portbindings.VIF_TYPE_OVS, {portbindings.CAP_PORT_FILTER: False}) self.bound_ports.add((context.current['id'], host)) elif host == "host-bridge-filter": context.set_binding(segment_id, portbindings.VIF_TYPE_BRIDGE, {portbindings.CAP_PORT_FILTER: True}) self.bound_ports.add((context.current['id'], host)) elif host == "host-ovs-filter-active": context.set_binding(segment_id, portbindings.VIF_TYPE_OVS, {portbindings.CAP_PORT_FILTER: True}, status=const.PORT_STATUS_ACTIVE) self.bound_ports.add((context.current['id'], host)) elif host == "host-hierarchical": segment_type = segment[api.NETWORK_TYPE] if segment_type == 'local': next_segment = context.allocate_dynamic_segment( {api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1'} ) context.continue_binding(segment_id, [next_segment]) elif segment_type == 'vlan': context.set_binding(segment_id, portbindings.VIF_TYPE_OVS, {portbindings.CAP_PORT_FILTER: False}) self.bound_ports.add((context.current['id'], host)) elif host == "host-fail": context.set_binding(None, portbindings.VIF_TYPE_BINDING_FAILED, {portbindings.CAP_PORT_FILTER: False}) self.bound_ports.add((context.current['id'], host)) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/l2pop/0000775000567000056710000000000013044373210025053 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py0000664000567000056710000016362613044372760031002 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_serialization import jsonutils import testtools from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import topics from neutron import context from neutron.db import agents_db from neutron.db import common_db_mixin from neutron.db import l3_agentschedulers_db from neutron.db import l3_hamode_db from neutron.extensions import portbindings from neutron.extensions import providernet as pnet from neutron import manager from neutron.plugins.common import constants as service_constants from neutron.plugins.ml2.common import exceptions as ml2_exc from neutron.plugins.ml2 import driver_context from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db from neutron.plugins.ml2.drivers.l2pop import mech_driver as l2pop_mech_driver from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc from neutron.plugins.ml2 import managers from neutron.plugins.ml2 import rpc from neutron.scheduler import l3_agent_scheduler from neutron.tests import base from neutron.tests.common import helpers from neutron.tests.unit.plugins.ml2 import test_plugin HOST = 'my_l2_host' HOST_2 = HOST + '_2' HOST_3 = HOST + '_3' HOST_4 = HOST + '_4' HOST_5 = HOST + '_5' TEST_ROUTER_ID = 'router_id' NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi' DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' class FakeL3PluginWithAgents(common_db_mixin.CommonDbMixin, l3_hamode_db.L3_HA_NAT_db_mixin, l3_agentschedulers_db.L3AgentSchedulerDbMixin, agents_db.AgentDbMixin): pass class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase): _mechanism_drivers = ['openvswitch', 'fake_agent', 'l2population'] def setUp(self): super(TestL2PopulationRpcTestCase, self).setUp() self.adminContext = context.get_admin_context() self.type_manager = managers.TypeManager() self.notifier = rpc.AgentNotifierApi(topics.AGENT) self.callbacks = rpc.RpcCallbacks(self.notifier, self.type_manager) net_arg = {pnet.NETWORK_TYPE: 'vxlan', pnet.SEGMENTATION_ID: '1'} self._network = self._make_network(self.fmt, 'net1', True, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID,), **net_arg) net_arg = {pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: '2'} self._network2 = self._make_network(self.fmt, 'net2', True, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID,), **net_arg) net_arg = {pnet.NETWORK_TYPE: 'flat', pnet.PHYSICAL_NETWORK: 'noagent'} self._network3 = self._make_network(self.fmt, 'net3', True, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,), **net_arg) notifier_patch = mock.patch(NOTIFIER) notifier_patch.start() self.fanout_topic = topics.get_topic_name(topics.AGENT, topics.L2POPULATION, topics.UPDATE) fanout = ('neutron.plugins.ml2.drivers.l2pop.rpc.' 'L2populationAgentNotifyAPI._notification_fanout') fanout_patch = mock.patch(fanout) self.mock_fanout = fanout_patch.start() cast = ('neutron.plugins.ml2.drivers.l2pop.rpc.' 'L2populationAgentNotifyAPI._notification_host') cast_patch = mock.patch(cast) self.mock_cast = cast_patch.start() uptime = ('neutron.plugins.ml2.drivers.l2pop.db.get_agent_uptime') uptime_patch = mock.patch(uptime, return_value=190) uptime_patch.start() def _setup_l3(self): notif_p = mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin, '_notify_ha_interfaces_updated') self.notif_m = notif_p.start() self.plugin = FakeL3PluginWithAgents() self._register_ml2_agents() self._register_l3_agents() def _register_l3_agents(self): self.agent1 = helpers.register_l3_agent(host=HOST) self.agent2 = helpers.register_l3_agent(host=HOST_2) def _register_ml2_agents(self): helpers.register_ovs_agent(host=HOST, tunneling_ip='20.0.0.1') helpers.register_ovs_agent(host=HOST_2, tunneling_ip='20.0.0.2') helpers.register_ovs_agent(host=HOST_3, tunneling_ip='20.0.0.3', tunnel_types=[]) helpers.register_ovs_agent(host=HOST_4, tunneling_ip='20.0.0.4') helpers.register_ovs_agent(host=HOST_5, tunneling_ip='20.0.0.5', binary='neutron-fake-agent', tunnel_types=[], interface_mappings={'physnet1': 'eth9'}, agent_type=constants.AGENT_TYPE_OFA, l2pop_network_types=['vlan']) def test_port_info_compare(self): # An assumption the code makes is that PortInfo compares equal to # equivalent regular tuples. self.assertEqual(("mac", "ip"), l2pop_rpc.PortInfo("mac", "ip")) flooding_entry = l2pop_rpc.PortInfo(*constants.FLOODING_ENTRY) self.assertEqual(constants.FLOODING_ENTRY, flooding_entry) def test__unmarshall_fdb_entries(self): entries = {'foouuid': { 'segment_id': 1001, 'ports': {'192.168.0.10': [['00:00:00:00:00:00', '0.0.0.0'], ['fa:16:3e:ff:8c:0f', '10.0.0.6']]}, 'network_type': 'vxlan'}} entries['chg_ip'] = { 'foouuid': { '192.168.0.1': {'before': [['fa:16:3e:ff:8c:0f', '10.0.0.6']], 'after': [['fa:16:3e:ff:8c:0f', '10.0.0.7']]}, '192.168.0.2': {'before': [['fa:16:3e:ff:8c:0e', '10.0.0.8']]} }, 'foouuid2': { '192.168.0.1': {'before': [['ff:16:3e:ff:8c:0e', '1.0.0.8']]} } } mixin = l2population_rpc.L2populationRpcCallBackMixin entries = mixin._unmarshall_fdb_entries(entries) port_info_list = entries['foouuid']['ports']['192.168.0.10'] # Check that the lists have been properly converted to PortInfo self.assertIsInstance(port_info_list[0], l2pop_rpc.PortInfo) self.assertIsInstance(port_info_list[1], l2pop_rpc.PortInfo) self.assertEqual(('00:00:00:00:00:00', '0.0.0.0'), port_info_list[0]) self.assertEqual(('fa:16:3e:ff:8c:0f', '10.0.0.6'), port_info_list[1]) agt1 = entries['chg_ip']['foouuid']['192.168.0.1'] self.assertIsInstance(agt1['before'][0], l2pop_rpc.PortInfo) self.assertIsInstance(agt1['after'][0], l2pop_rpc.PortInfo) self.assertEqual(('fa:16:3e:ff:8c:0f', '10.0.0.6'), agt1['before'][0]) self.assertEqual(('fa:16:3e:ff:8c:0f', '10.0.0.7'), agt1['after'][0]) agt1_net2 = entries['chg_ip']['foouuid2']['192.168.0.1'] self.assertEqual(('ff:16:3e:ff:8c:0e', '1.0.0.8'), agt1_net2['before'][0]) self.assertIsInstance(agt1_net2['before'][0], l2pop_rpc.PortInfo) agt2 = entries['chg_ip']['foouuid']['192.168.0.2'] self.assertIsInstance(agt2['before'][0], l2pop_rpc.PortInfo) self.assertEqual(('fa:16:3e:ff:8c:0e', '10.0.0.8'), agt2['before'][0]) def test_portinfo_marshalled_as_list(self): entry = ['fa:16:3e:ff:8c:0f', '10.0.0.6'] payload = {'netuuid': {'ports': {'1': [l2pop_rpc.PortInfo(*entry)]}}} result = jsonutils.loads(jsonutils.dumps(payload)) self.assertEqual(entry, result['netuuid']['ports']['1'][0]) def _create_router(self, ha=True, tenant_id='tenant1', distributed=None, ctx=None): if ctx is None: ctx = self.adminContext ctx.tenant_id = tenant_id router = {'name': TEST_ROUTER_ID, 'admin_state_up': True, 'tenant_id': ctx.tenant_id} if ha is not None: router['ha'] = ha if distributed is not None: router['distributed'] = distributed return self.plugin.create_router(ctx, {'router': router}) def _bind_router(self, router_id): with self.adminContext.session.begin(subtransactions=True): scheduler = l3_agent_scheduler.ChanceScheduler() filters = {'agent_type': [constants.AGENT_TYPE_L3]} agents_db = self.plugin.get_agents_db(self.adminContext, filters=filters) scheduler._bind_ha_router_to_agents( self.plugin, self.adminContext, router_id, agents_db) self._bind_ha_network_ports(router_id) def _bind_ha_network_ports(self, router_id): port_bindings = self.plugin.get_ha_router_port_bindings( self.adminContext, [router_id]) plugin = manager.NeutronManager.get_plugin() for port_binding in port_bindings: filters = {'id': [port_binding.port_id]} port = plugin.get_ports(self.adminContext, filters=filters)[0] if port_binding.l3_agent_id == self.agent1['id']: port[portbindings.HOST_ID] = self.agent1['host'] else: port[portbindings.HOST_ID] = self.agent2['host'] plugin.update_port(self.adminContext, port['id'], {attributes.PORT: port}) def _get_first_interface(self, net_id, router_id): plugin = manager.NeutronManager.get_plugin() device_filter = {'device_id': [router_id], 'device_owner': [constants.DEVICE_OWNER_ROUTER_INTF]} return plugin.get_ports(self.adminContext, filters=device_filter)[0] def _add_router_interface(self, subnet, router, host): interface_info = {'subnet_id': subnet['id']} self.plugin.add_router_interface(self.adminContext, router['id'], interface_info) self.plugin.update_routers_states( self.adminContext, {router['id']: constants.HA_ROUTER_STATE_ACTIVE}, host) port = self._get_first_interface(subnet['network_id'], router['id']) self.mock_cast.reset_mock() self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=host, device=port['id'], host=host) return port def _create_ha_router(self): self._setup_l3() router = self._create_router() self._bind_router(router['id']) return router def _verify_remove_fdb(self, expected, agent_id, device, host=None): self.mock_fanout.reset_mock() self.callbacks.update_device_down(self.adminContext, agent_id=host, device=device, host=host) self.mock_fanout.assert_called_with( mock.ANY, 'remove_fdb_entries', expected) def test_other_agents_get_flood_entries_for_ha_agents(self): # First HA router port is added on HOST and HOST2, then network port # is added on HOST4. # HOST4 should get flood entries for HOST1 and HOST2 router = self._create_ha_router() service_plugins = manager.NeutronManager.get_service_plugins() service_plugins[service_constants.L3_ROUTER_NAT] = self.plugin with self.subnet(network=self._network, enable_dhcp=False) as snet, \ mock.patch('neutron.manager.NeutronManager.get_service_plugins', return_value=service_plugins): subnet = snet['subnet'] port = self._add_router_interface(subnet, router, HOST) host_arg = {portbindings.HOST_ID: HOST_4, 'admin_state_up': True} with self.port(subnet=snet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: p1 = port1['port'] device1 = 'tap' + p1['id'] self.mock_cast.reset_mock() self.mock_fanout.reset_mock() self.callbacks.update_device_up( self.adminContext, agent_id=HOST_4, device=device1) cast_expected = { port['network_id']: { 'ports': {'20.0.0.1': [constants.FLOODING_ENTRY], '20.0.0.2': [constants.FLOODING_ENTRY]}, 'network_type': 'vxlan', 'segment_id': 1}} self.assertEqual(1, self.mock_cast.call_count) self.mock_cast.assert_called_with( mock.ANY, 'add_fdb_entries', cast_expected, HOST_4) def test_delete_ha_port(self): # First network port is added on HOST, and then HA router port # is added on HOST and HOST2. # Remove_fdb should carry flood entry of only HOST2 and not HOST router = self._create_ha_router() service_plugins = manager.NeutronManager.get_service_plugins() service_plugins[service_constants.L3_ROUTER_NAT] = self.plugin with self.subnet(network=self._network, enable_dhcp=False) as snet, \ mock.patch('neutron.manager.NeutronManager.get_service_plugins', return_value=service_plugins): host_arg = {portbindings.HOST_ID: HOST, 'admin_state_up': True} with self.port(subnet=snet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: p1 = port1['port'] device1 = 'tap' + p1['id'] self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device1) subnet = snet['subnet'] port = self._add_router_interface(subnet, router, HOST) expected = {port['network_id']: {'ports': {'20.0.0.2': [constants.FLOODING_ENTRY]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.reset_mock() interface_info = {'subnet_id': subnet['id']} self.plugin.remove_router_interface(self.adminContext, router['id'], interface_info) self.mock_fanout.assert_called_with( mock.ANY, 'remove_fdb_entries', expected) def test_ha_agents_get_other_fdb(self): # First network port is added on HOST4, then HA router port is # added on HOST and HOST2. # Both HA agents should create tunnels to HOST4 and among themselves. # Both HA agents should be notified to other agents. router = self._create_ha_router() service_plugins = manager.NeutronManager.get_service_plugins() service_plugins[service_constants.L3_ROUTER_NAT] = self.plugin with self.subnet(network=self._network, enable_dhcp=False) as snet, \ mock.patch('neutron.manager.NeutronManager.get_service_plugins', return_value=service_plugins): host_arg = {portbindings.HOST_ID: HOST_4, 'admin_state_up': True} with self.port(subnet=snet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: p1 = port1['port'] device1 = 'tap' + p1['id'] self.callbacks.update_device_up( self.adminContext, agent_id=HOST_4, device=device1) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] subnet = snet['subnet'] port = self._add_router_interface(subnet, router, HOST) fanout_expected = {port['network_id']: { 'ports': {'20.0.0.1': [constants.FLOODING_ENTRY]}, 'network_type': 'vxlan', 'segment_id': 1}} cast_expected_host = {port['network_id']: { 'ports': { '20.0.0.4': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo(p1['mac_address'], p1_ips[0])], '20.0.0.2': [constants.FLOODING_ENTRY]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_cast.assert_called_with( mock.ANY, 'add_fdb_entries', cast_expected_host, HOST) self.mock_fanout.assert_called_with( mock.ANY, 'add_fdb_entries', fanout_expected) self.mock_cast.reset_mock() self.mock_fanout.reset_mock() self.callbacks.update_device_up( self.adminContext, agent_id=HOST_2, device=port['id'], host=HOST_2) cast_expected_host2 = {port['network_id']: { 'ports': { '20.0.0.4': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo(p1['mac_address'], p1_ips[0])], '20.0.0.1': [constants.FLOODING_ENTRY]}, 'network_type': 'vxlan', 'segment_id': 1}} fanout_expected = {port['network_id']: { 'ports': {'20.0.0.2': [constants.FLOODING_ENTRY]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_cast.assert_called_with( mock.ANY, 'add_fdb_entries', cast_expected_host2, HOST_2) self.mock_fanout.assert_called_with( mock.ANY, 'add_fdb_entries', fanout_expected) def test_fdb_add_called(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: with self.port(subnet=subnet, arg_list=(portbindings.HOST_ID,), **host_arg): p1 = port1['port'] device = 'tap' + p1['id'] self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] expected = {p1['network_id']: {'ports': {'20.0.0.1': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p1['mac_address'], p1_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.assert_called_with( mock.ANY, 'add_fdb_entries', expected) def test_fdb_add_not_called_type_local(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST + '_3'} with self.port(subnet=subnet, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: with self.port(subnet=subnet, arg_list=(portbindings.HOST_ID,), **host_arg): p1 = port1['port'] device = 'tap' + p1['id'] self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) self.assertFalse(self.mock_fanout.called) def test_fdb_add_called_for_l2pop_network_types(self): self._register_ml2_agents() host = HOST + '_5' with self.subnet(network=self._network2) as subnet: host_arg = {portbindings.HOST_ID: host} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: with self.port(subnet=subnet, arg_list=(portbindings.HOST_ID,), **host_arg): p1 = port1['port'] device = 'tap' + p1['id'] self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=host, device=device) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] expected = {p1['network_id']: {'ports': {'20.0.0.5': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p1['mac_address'], p1_ips[0])]}, 'network_type': 'vlan', 'segment_id': 2}} self.mock_fanout.assert_called_with( mock.ANY, 'add_fdb_entries', expected) def test_fdb_called_for_active_ports(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: host_arg = {portbindings.HOST_ID: HOST + '_2'} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg): p1 = port1['port'] device1 = 'tap' + p1['id'] self.mock_cast.reset_mock() self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device1) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] self.assertFalse(self.mock_cast.called) expected2 = {p1['network_id']: {'ports': {'20.0.0.1': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p1['mac_address'], p1_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.assert_called_with( mock.ANY, 'add_fdb_entries', expected2) def test_fdb_add_two_agents(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST, 'admin_state_up': True} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID, 'admin_state_up',), **host_arg) as port1: host_arg = {portbindings.HOST_ID: HOST + '_2', 'admin_state_up': True} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID, 'admin_state_up',), **host_arg) as port2: p1 = port1['port'] p2 = port2['port'] device1 = 'tap' + p1['id'] device2 = 'tap' + p2['id'] self.mock_cast.reset_mock() self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST + '_2', device=device2) self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device1) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] p2_ips = [p['ip_address'] for p in p2['fixed_ips']] expected1 = {p1['network_id']: {'ports': {'20.0.0.2': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p2['mac_address'], p2_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_cast.assert_called_with(mock.ANY, 'add_fdb_entries', expected1, HOST) expected2 = {p1['network_id']: {'ports': {'20.0.0.1': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p1['mac_address'], p1_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.assert_called_with( mock.ANY, 'add_fdb_entries', expected2) def test_fdb_add_called_two_networks(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST + '_2'} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: with self.subnet(cidr='10.1.0.0/24') as subnet2: with self.port(subnet=subnet2, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg): host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port3: p1 = port1['port'] p3 = port3['port'] device1 = 'tap' + p1['id'] device3 = 'tap' + p3['id'] self.mock_cast.reset_mock() self.mock_fanout.reset_mock() self.callbacks.update_device_up( self.adminContext, agent_id=HOST + '_2', device=device1) self.callbacks.update_device_up( self.adminContext, agent_id=HOST, device=device3) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] expected1 = {p1['network_id']: {'ports': {'20.0.0.2': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p1['mac_address'], p1_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_cast.assert_called_with( mock.ANY, 'add_fdb_entries', expected1, HOST) p3_ips = [p['ip_address'] for p in p3['fixed_ips']] expected2 = {p1['network_id']: {'ports': {'20.0.0.1': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p3['mac_address'], p3_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.assert_called_with( mock.ANY, 'add_fdb_entries', expected2) def test_fdb_add_called_dualstack(self): self._register_ml2_agents() host_arg = {portbindings.HOST_ID: HOST, 'admin_state_up': True} with self.subnet(self._network) as subnet,\ self.subnet( self._network, cidr='2001:db8::/64', ip_version=6, gateway_ip='fe80::1', ipv6_address_mode=constants.IPV6_SLAAC) as subnet2: with self.port( subnet, fixed_ips=[{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet2['subnet']['id']}], device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg ) as port: p1 = port['port'] device = 'tap' + p1['id'] self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] expected = {p1['network_id']: {'ports': {'20.0.0.1': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p1['mac_address'], p1_ips[0]), l2pop_rpc.PortInfo( p1['mac_address'], p1_ips[1])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.assert_called_with( mock.ANY, 'add_fdb_entries', expected) def test_update_port_down(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port2: p2 = port2['port'] device2 = 'tap' + p2['id'] self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device2) p1 = port1['port'] device1 = 'tap' + p1['id'] self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device1) self.mock_fanout.reset_mock() self.callbacks.update_device_down(self.adminContext, agent_id=HOST, device=device2) p2_ips = [p['ip_address'] for p in p2['fixed_ips']] expected = {p2['network_id']: {'ports': {'20.0.0.1': [l2pop_rpc.PortInfo( p2['mac_address'], p2_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.assert_called_with( mock.ANY, 'remove_fdb_entries', expected) def test_update_port_down_last_port_up(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg): with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port2: p2 = port2['port'] device2 = 'tap' + p2['id'] self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device2) self.callbacks.update_device_down(self.adminContext, agent_id=HOST, device=device2) p2_ips = [p['ip_address'] for p in p2['fixed_ips']] expected = {p2['network_id']: {'ports': {'20.0.0.1': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p2['mac_address'], p2_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.assert_called_with( mock.ANY, 'remove_fdb_entries', expected) def test_delete_port(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port: p1 = port['port'] device = 'tap' + p1['id'] self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port2: p2 = port2['port'] device1 = 'tap' + p2['id'] self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device1) self._delete('ports', port2['port']['id']) p2_ips = [p['ip_address'] for p in p2['fixed_ips']] expected = {p2['network_id']: {'ports': {'20.0.0.1': [l2pop_rpc.PortInfo( p2['mac_address'], p2_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.assert_any_call( mock.ANY, 'remove_fdb_entries', expected) def test_delete_port_last_port_up(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg): with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port: p1 = port['port'] device = 'tap' + p1['id'] self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) self._delete('ports', port['port']['id']) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] expected = {p1['network_id']: {'ports': {'20.0.0.1': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p1['mac_address'], p1_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.assert_any_call( mock.ANY, 'remove_fdb_entries', expected) def test_mac_addr_changed(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST + '_5'} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: p1 = port1['port'] self.mock_fanout.reset_mock() device = 'tap' + p1['id'] old_mac = p1['mac_address'] mac = old_mac.split(':') mac[5] = '01' if mac[5] != '01' else '00' new_mac = ':'.join(mac) data = {'port': {'mac_address': new_mac, portbindings.HOST_ID: HOST}} req = self.new_update_request('ports', data, p1['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertIn('port', res) self.assertEqual(new_mac, res['port']['mac_address']) # port was not bound before, so no fdb call expected yet self.assertFalse(self.mock_fanout.called) self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) self.assertEqual(1, self.mock_fanout.call_count) add_expected = { p1['network_id']: { 'segment_id': 1, 'network_type': 'vxlan', 'ports': { '20.0.0.1': [ l2pop_rpc.PortInfo('00:00:00:00:00:00', '0.0.0.0'), l2pop_rpc.PortInfo(new_mac, '10.0.0.2') ] } } } self.mock_fanout.assert_called_with( mock.ANY, 'add_fdb_entries', add_expected) def test_fixed_ips_changed(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, cidr='10.0.0.0/24', device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: p1 = port1['port'] device = 'tap' + p1['id'] self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) self.mock_fanout.reset_mock() data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'}, {'ip_address': '10.0.0.10'}]}} req = self.new_update_request('ports', data, p1['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] self.assertEqual(2, len(ips)) add_expected = {'chg_ip': {p1['network_id']: {'20.0.0.1': {'after': [(p1['mac_address'], '10.0.0.10')]}}}} self.mock_fanout.assert_any_call( mock.ANY, 'update_fdb_entries', add_expected) self.mock_fanout.reset_mock() data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'}, {'ip_address': '10.0.0.16'}]}} req = self.new_update_request('ports', data, p1['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] self.assertEqual(2, len(ips)) upd_expected = {'chg_ip': {p1['network_id']: {'20.0.0.1': {'before': [(p1['mac_address'], '10.0.0.10')], 'after': [(p1['mac_address'], '10.0.0.16')]}}}} self.mock_fanout.assert_any_call( mock.ANY, 'update_fdb_entries', upd_expected) self.mock_fanout.reset_mock() data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.16'}]}} req = self.new_update_request('ports', data, p1['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] self.assertEqual(1, len(ips)) del_expected = {'chg_ip': {p1['network_id']: {'20.0.0.1': {'before': [(p1['mac_address'], '10.0.0.2')]}}}} self.mock_fanout.assert_any_call( mock.ANY, 'update_fdb_entries', del_expected) def test_no_fdb_updates_without_port_updates(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, cidr='10.0.0.0/24', device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: p1 = port1['port'] device = 'tap' + p1['id'] self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) p1['status'] = 'ACTIVE' self.mock_fanout.reset_mock() plugin = manager.NeutronManager.get_plugin() plugin.update_port(self.adminContext, p1['id'], port1) self.assertFalse(self.mock_fanout.called) def test_get_device_details_port_id(self): self._register_ml2_agents() host_arg = {portbindings.HOST_ID: HOST} with self.port(arg_list=(portbindings.HOST_ID,), **host_arg) as port: port_id = port['port']['id'] # ensure various formats all result in correct port_id formats = ['tap' + port_id[0:8], port_id, port['port']['mac_address']] for device in formats: details = self.callbacks.get_device_details( self.adminContext, device=device, agent_id=HOST_2) self.assertEqual(port_id, details['port_id']) def _update_and_check_portbinding(self, port_id, host_id): data = {'port': {portbindings.HOST_ID: host_id}} req = self.new_update_request('ports', data, port_id) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(host_id, res['port'][portbindings.HOST_ID]) def _test_host_changed(self, twice): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, cidr='10.0.0.0/24', device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: tunnel_ip = '20.0.0.1' p1 = port1['port'] device1 = 'tap' + p1['id'] self.callbacks.update_device_up( self.adminContext, agent_id=HOST, device=device1) if twice: tunnel_ip = '20.0.0.4' self._update_and_check_portbinding(p1['id'], HOST_4) self.callbacks.update_device_up(self.adminContext, agent_id=HOST_4, device=device1) self.mock_fanout.reset_mock() self._update_and_check_portbinding(p1['id'], HOST_2) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] expected = {p1['network_id']: {'ports': {tunnel_ip: [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p1['mac_address'], p1_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.assert_called_with( mock.ANY, 'remove_fdb_entries', expected) def test_host_changed(self): self._test_host_changed(twice=False) def test_host_changed_twice(self): self._test_host_changed(twice=True) def test_delete_port_no_fdb_entries_with_ha_port(self): l2pop_mech = l2pop_mech_driver.L2populationMechanismDriver() l2pop_mech.L2PopulationAgentNotify = mock.Mock() l2pop_mech.rpc_ctx = mock.Mock() port = {'device_owner': l2pop_db.HA_ROUTER_PORTS[0]} context = mock.Mock() context.current = port with mock.patch.object(l2pop_mech, '_get_agent_fdb', return_value=None) as upd_port_down,\ mock.patch.object(l2pop_mech.L2PopulationAgentNotify, 'remove_fdb_entries'): l2pop_mech.delete_port_postcommit(context) self.assertTrue(upd_port_down.called) def test_delete_port_invokes_update_device_down(self): l2pop_mech = l2pop_mech_driver.L2populationMechanismDriver() l2pop_mech.L2PopulationAgentNotify = mock.Mock() l2pop_mech.rpc_ctx = mock.Mock() port = {'device_owner': ''} context = mock.Mock() context.current = port with mock.patch.object(l2pop_mech, '_get_agent_fdb', return_value=None) as upd_port_down,\ mock.patch.object(l2pop_mech.L2PopulationAgentNotify, 'remove_fdb_entries'): l2pop_mech.delete_port_postcommit(context) self.assertTrue(upd_port_down.called) def test_delete_unbound_port(self): l2pop_mech = l2pop_mech_driver.L2populationMechanismDriver() l2pop_mech.initialize() with self.port() as port: port_context = driver_context.PortContext( self.driver, self.context, port['port'], self.driver.get_network( self.context, port['port']['network_id']), None, None) # The point is to provide coverage and to assert that no exceptions # are raised. l2pop_mech.delete_port_postcommit(port_context) def test_fixed_ips_change_unbound_port_no_rpc(self): l2pop_mech = l2pop_mech_driver.L2populationMechanismDriver() l2pop_mech.initialize() l2pop_mech.L2populationAgentNotify = mock.Mock() with self.port() as port: port_context = driver_context.PortContext( self.driver, self.context, port['port'], self.driver.get_network( self.context, port['port']['network_id']), None, None) l2pop_mech._fixed_ips_changed( port_context, None, port['port'], (set(['10.0.0.1']), set())) # There's no need to send an RPC update if the IP address for an # unbound port changed. self.assertFalse( l2pop_mech.L2populationAgentNotify.update_fdb_entries.called) class TestL2PopulationMechDriver(base.BaseTestCase): def _test_get_tunnels(self, agent_ip, exclude_host=True): mech_driver = l2pop_mech_driver.L2populationMechanismDriver() agent = mock.Mock() agent.host = HOST network_ports = ((None, agent),) with mock.patch.object(l2pop_db, 'get_agent_ip', return_value=agent_ip): excluded_host = HOST + '-EXCLUDE' if exclude_host else HOST return mech_driver._get_tunnels(network_ports, excluded_host) def test_get_tunnels(self): tunnels = self._test_get_tunnels('20.0.0.1') self.assertIn('20.0.0.1', tunnels) def test_get_tunnels_no_ip(self): tunnels = self._test_get_tunnels(None) self.assertEqual(0, len(tunnels)) def test_get_tunnels_dont_exclude_host(self): tunnels = self._test_get_tunnels(None, exclude_host=False) self.assertEqual(0, len(tunnels)) def _test_create_agent_fdb(self, fdb_network_ports, agent_ips): mech_driver = l2pop_mech_driver.L2populationMechanismDriver() tunnel_network_ports, tunnel_agent = ( self._mock_network_ports(HOST + '1', [None])) agent_ips[tunnel_agent] = '10.0.0.1' def agent_ip_side_effect(agent): return agent_ips[agent] with mock.patch.object(l2pop_db, 'get_agent_ip', side_effect=agent_ip_side_effect),\ mock.patch.object(l2pop_db, 'get_nondistributed_active_network_ports', return_value=fdb_network_ports),\ mock.patch.object(l2pop_db, 'get_distributed_active_network_ports', return_value=tunnel_network_ports): session = mock.Mock() agent = mock.Mock() agent.host = HOST segment = {'segmentation_id': 1, 'network_type': 'vxlan'} return mech_driver._create_agent_fdb(session, agent, segment, 'network_id') def _mock_network_ports(self, host_name, bindings): agent = mock.Mock() agent.host = host_name return [(binding, agent) for binding in bindings], agent def test_create_agent_fdb(self): binding = mock.Mock() binding.port = {'mac_address': '00:00:DE:AD:BE:EF', 'fixed_ips': [{'ip_address': '1.1.1.1'}]} fdb_network_ports, fdb_agent = ( self._mock_network_ports(HOST + '2', [binding])) agent_ips = {fdb_agent: '20.0.0.1'} agent_fdb = self._test_create_agent_fdb(fdb_network_ports, agent_ips) result = agent_fdb['network_id'] expected_result = {'segment_id': 1, 'network_type': 'vxlan', 'ports': {'10.0.0.1': [constants.FLOODING_ENTRY], '20.0.0.1': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( mac_address='00:00:DE:AD:BE:EF', ip_address='1.1.1.1')]}} self.assertEqual(expected_result, result) def test_create_agent_fdb_only_tunnels(self): agent_fdb = self._test_create_agent_fdb([], {}) result = agent_fdb['network_id'] expected_result = {'segment_id': 1, 'network_type': 'vxlan', 'ports': {'10.0.0.1': [constants.FLOODING_ENTRY]}} self.assertEqual(expected_result, result) def test_create_agent_fdb_concurrent_port_deletion(self): binding = mock.Mock() binding.port = {'mac_address': '00:00:DE:AD:BE:EF', 'fixed_ips': [{'ip_address': '1.1.1.1'}]} binding2 = mock.Mock() # the port was deleted binding2.port = None fdb_network_ports, fdb_agent = ( self._mock_network_ports(HOST + '2', [binding, binding2])) agent_ips = {fdb_agent: '20.0.0.1'} agent_fdb = self._test_create_agent_fdb(fdb_network_ports, agent_ips) result = agent_fdb['network_id'] expected_result = {'segment_id': 1, 'network_type': 'vxlan', 'ports': {'10.0.0.1': [constants.FLOODING_ENTRY], '20.0.0.1': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( mac_address='00:00:DE:AD:BE:EF', ip_address='1.1.1.1')]}} self.assertEqual(expected_result, result) def test_update_port_precommit_mac_address_changed_raises(self): port = {'status': u'ACTIVE', 'device_owner': DEVICE_OWNER_COMPUTE, 'mac_address': u'12:34:56:78:4b:0e', 'id': u'1'} original_port = port.copy() original_port['mac_address'] = u'12:34:56:78:4b:0f' with mock.patch.object(driver_context.db, 'get_network_segments'): ctx = driver_context.PortContext(mock.Mock(), mock.Mock(), port, mock.MagicMock(), mock.Mock(), None, original_port=original_port) mech_driver = l2pop_mech_driver.L2populationMechanismDriver() with testtools.ExpectedException(ml2_exc.MechanismDriverError): mech_driver.update_port_precommit(ctx) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/l2pop/__init__.py0000664000567000056710000000000013044372736027166 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/0000775000567000056710000000000013044373210027331 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/test_l2population_rpc.py0000664000567000056710000003013513044372760034251 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 VA Linux Systems Japan K.K. # Copyright (C) 2014 Fumihiko Kakuma # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.common import constants as n_const from neutron.tests.unit.plugins.ml2.drivers.l2pop.rpc_manager \ import l2population_rpc_base class TestL2populationRpcCallBackTunnelMixin( l2population_rpc_base.TestL2populationRpcCallBackTunnelMixinBase): def test_get_agent_ports_no_data(self): self.assertFalse( list(self.fakeagent.get_agent_ports(self.fdb_entries1, {}))) def test_get_agent_ports_non_existence_key_in_lvm(self): results = {} del self.local_vlan_map1[self.lvms[1].net] for lvm, agent_ports in self.fakeagent.get_agent_ports( self.fdb_entries1, self.local_vlan_map1): results[lvm] = agent_ports expected = { self.lvm1: { self.ports[0].ip: [(self.lvms[0].mac, self.lvms[0].ip)], self.local_ip: []}, self.lvm3: { self.ports[2].ip: [(self.lvms[2].mac, self.lvms[2].ip)], self.local_ip: []}, } self.assertEqual(expected, results) def test_get_agent_ports_no_agent_ports(self): results = {} self.fdb_entries1[self.lvms[1].net]['ports'] = {} for lvm, agent_ports in self.fakeagent.get_agent_ports( self.fdb_entries1, self.local_vlan_map1): results[lvm] = agent_ports expected = { self.lvm1: { self.ports[0].ip: [(self.lvms[0].mac, self.lvms[0].ip)], self.local_ip: []}, self.lvm2: {}, self.lvm3: { self.ports[2].ip: [(self.lvms[2].mac, self.lvms[2].ip)], self.local_ip: []}, } self.assertEqual(expected, results) def test_fdb_add_tun(self): with mock.patch.object(self.fakeagent, 'setup_tunnel_port'),\ mock.patch.object(self.fakeagent, 'add_fdb_flow' ) as mock_add_fdb_flow: self.fakeagent.fdb_add_tun('context', self.fakebr, self.lvm1, self.agent_ports, self._tunnel_port_lookup) expected = [ mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip), self.ports[0].ip, self.lvm1, self.ports[0].ofport), mock.call(self.fakebr, (self.lvms[1].mac, self.lvms[1].ip), self.ports[1].ip, self.lvm1, self.ports[1].ofport), mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip), self.ports[2].ip, self.lvm1, self.ports[2].ofport), ] self.assertEqual(sorted(expected), sorted(mock_add_fdb_flow.call_args_list)) def test_fdb_add_tun_non_existence_key_in_ofports(self): ofport = self.lvm1.network_type + '0a0a0a0a' del self.ofports[self.type_gre][self.ports[1].ip] with mock.patch.object(self.fakeagent, 'setup_tunnel_port', return_value=ofport ) as mock_setup_tunnel_port,\ mock.patch.object(self.fakeagent, 'add_fdb_flow' ) as mock_add_fdb_flow: self.fakeagent.fdb_add_tun('context', self.fakebr, self.lvm1, self.agent_ports, self._tunnel_port_lookup) mock_setup_tunnel_port.assert_called_once_with( self.fakebr, self.ports[1].ip, self.lvm1.network_type) expected = [ mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip), self.ports[0].ip, self.lvm1, self.ports[0].ofport), mock.call(self.fakebr, (self.lvms[1].mac, self.lvms[1].ip), self.ports[1].ip, self.lvm1, ofport), mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip), self.ports[2].ip, self.lvm1, self.ports[2].ofport), ] self.assertEqual(sorted(expected), sorted(mock_add_fdb_flow.call_args_list)) def test_fdb_add_tun_unavailable_ofport(self): del self.ofports[self.type_gre][self.ports[1].ip] with mock.patch.object(self.fakeagent, 'setup_tunnel_port', return_value=0 ) as mock_setup_tunnel_port,\ mock.patch.object(self.fakeagent, 'add_fdb_flow' ) as mock_add_fdb_flow: self.fakeagent.fdb_add_tun('context', self.fakebr, self.lvm1, self.agent_ports, self._tunnel_port_lookup) mock_setup_tunnel_port.assert_called_once_with( self.fakebr, self.ports[1].ip, self.lvm1.network_type) expected = [ mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip), self.ports[0].ip, self.lvm1, self.ports[0].ofport), mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip), self.ports[2].ip, self.lvm1, self.ports[2].ofport), ] self.assertEqual(sorted(expected), sorted(mock_add_fdb_flow.call_args_list)) def test_fdb_remove_tun(self): with mock.patch.object( self.fakeagent, 'del_fdb_flow') as mock_del_fdb_flow: self.fakeagent.fdb_remove_tun('context', self.fakebr, self.lvm1, self.agent_ports, self._tunnel_port_lookup) expected = [ mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip), self.ports[0].ip, self.lvm1, self.ports[0].ofport), mock.call(self.fakebr, (self.lvms[1].mac, self.lvms[1].ip), self.ports[1].ip, self.lvm1, self.ports[1].ofport), mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip), self.ports[2].ip, self.lvm1, self.ports[2].ofport), ] self.assertEqual(sorted(expected), sorted(mock_del_fdb_flow.call_args_list)) def test_fdb_remove_tun_flooding_entry(self): self.agent_ports[self.ports[1].ip] = [n_const.FLOODING_ENTRY] with mock.patch.object(self.fakeagent, 'del_fdb_flow' ) as mock_del_fdb_flow,\ mock.patch.object(self.fakeagent, 'cleanup_tunnel_port' ) as mock_cleanup_tunnel_port: self.fakeagent.fdb_remove_tun('context', self.fakebr, self.lvm1, self.agent_ports, self._tunnel_port_lookup) expected = [ mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip), self.ports[0].ip, self.lvm1, self.ports[0].ofport), mock.call(self.fakebr, (n_const.FLOODING_ENTRY[0], n_const.FLOODING_ENTRY[1]), self.ports[1].ip, self.lvm1, self.ports[1].ofport), mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip), self.ports[2].ip, self.lvm1, self.ports[2].ofport), ] self.assertEqual(sorted(expected), sorted(mock_del_fdb_flow.call_args_list)) mock_cleanup_tunnel_port.assert_called_once_with( self.fakebr, self.ports[1].ofport, self.lvm1.network_type) def test_fdb_remove_tun_non_existence_key_in_ofports(self): del self.ofports[self.type_gre][self.ports[1].ip] with mock.patch.object( self.fakeagent, 'del_fdb_flow') as mock_del_fdb_flow: self.fakeagent.fdb_remove_tun('context', self.fakebr, self.lvm1, self.agent_ports, self._tunnel_port_lookup) expected = [ mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip), self.ports[0].ip, self.lvm1, self.ports[0].ofport), mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip), self.ports[2].ip, self.lvm1, self.ports[2].ofport), ] self.assertEqual(sorted(expected), sorted(mock_del_fdb_flow.call_args_list)) def test_fdb_update(self): fake__fdb_chg_ip = mock.Mock() self.fakeagent._fdb_chg_ip = fake__fdb_chg_ip self.fakeagent.fdb_update('context', self.upd_fdb_entry1) fake__fdb_chg_ip.assert_called_once_with( 'context', self.upd_fdb_entry1_val) def test_fdb_update_non_existence_method(self): self.assertRaises(NotImplementedError, self.fakeagent.fdb_update, 'context', self.upd_fdb_entry1) def test__fdb_chg_ip(self): m_setup_entry_for_arp_reply = mock.Mock() self.fakeagent.setup_entry_for_arp_reply = m_setup_entry_for_arp_reply self.fakeagent.fdb_chg_ip_tun('context', self.fakebr, self.upd_fdb_entry1_val, self.local_ip, self.local_vlan_map1) expected = [ mock.call(self.fakebr, 'remove', self.lvm1.vlan, self.lvms[0].mac, self.lvms[0].ip), mock.call(self.fakebr, 'add', self.lvm1.vlan, self.lvms[1].mac, self.lvms[1].ip), mock.call(self.fakebr, 'remove', self.lvm1.vlan, self.lvms[0].mac, self.lvms[0].ip), mock.call(self.fakebr, 'add', self.lvm1.vlan, self.lvms[1].mac, self.lvms[1].ip), mock.call(self.fakebr, 'remove', self.lvm2.vlan, self.lvms[0].mac, self.lvms[0].ip), mock.call(self.fakebr, 'add', self.lvm2.vlan, self.lvms[2].mac, self.lvms[2].ip), ] m_setup_entry_for_arp_reply.assert_has_calls(expected, any_order=True) def test__fdb_chg_ip_no_lvm(self): m_setup_entry_for_arp_reply = mock.Mock() self.fakeagent.setup_entry_for_arp_reply = m_setup_entry_for_arp_reply self.fakeagent.fdb_chg_ip_tun( 'context', self.fakebr, self.upd_fdb_entry1, self.local_ip, {}) self.assertFalse(m_setup_entry_for_arp_reply.call_count) def test__fdb_chg_ip_ip_is_local_ip(self): upd_fdb_entry_val = { self.lvms[0].net: { self.local_ip: { 'before': [(self.lvms[0].mac, self.lvms[0].ip)], 'after': [(self.lvms[1].mac, self.lvms[1].ip)], }, }, } m_setup_entry_for_arp_reply = mock.Mock() self.fakeagent.setup_entry_for_arp_reply = m_setup_entry_for_arp_reply self.fakeagent.fdb_chg_ip_tun('context', self.fakebr, upd_fdb_entry_val, self.local_ip, self.local_vlan_map1) self.assertFalse(m_setup_entry_for_arp_reply.call_count) def test_fdb_chg_ip_tun_empty_before_after(self): upd_fdb_entry_val = { self.lvms[0].net: { self.local_ip: {}, }, } m_setup_entry_for_arp_reply = mock.Mock() self.fakeagent.setup_entry_for_arp_reply = m_setup_entry_for_arp_reply # passing non-local ip self.fakeagent.fdb_chg_ip_tun('context', self.fakebr, upd_fdb_entry_val, "8.8.8.8", self.local_vlan_map1) self.assertFalse(m_setup_entry_for_arp_reply.call_count) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/__init__.py0000664000567000056710000000000013044372736031444 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc_base.py0000664000567000056710000001402613044372760034205 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 VA Linux Systems Japan K.K. # Copyright (C) 2014 Fumihiko Kakuma # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import mock from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent from neutron.tests import base class FakeNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin): def fdb_add(self, context, fdb_entries): pass def fdb_remove(self, context, fdb_entries): pass def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport): pass def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport): pass def setup_tunnel_port(self, br, remote_ip, network_type): pass def cleanup_tunnel_port(self, br, tun_ofport, tunnel_type): pass def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address, ip_address): pass class TestL2populationRpcCallBackTunnelMixinBase(base.BaseTestCase): def setUp(self): super(TestL2populationRpcCallBackTunnelMixinBase, self).setUp() self.fakeagent = FakeNeutronAgent() self.fakebr = mock.Mock() Port = collections.namedtuple('Port', 'ip, ofport') LVM = collections.namedtuple( 'LVM', 'net, vlan, phys, segid, mac, ip, vif, port') self.local_ip = '127.0.0.1' self.type_gre = 'gre' self.ports = [Port(ip='10.1.0.1', ofport='ofport1'), Port(ip='10.1.0.2', ofport='ofport2'), Port(ip='10.1.0.3', ofport='ofport3')] self.ofports = { self.type_gre: { self.ports[0].ip: self.ports[0].ofport, self.ports[1].ip: self.ports[1].ofport, self.ports[2].ip: self.ports[2].ofport, } } self.lvms = [LVM(net='net1', vlan=1, phys='phys1', segid='tun1', mac='mac1', ip='1.1.1.1', vif='vifid1', port='port1'), LVM(net='net2', vlan=2, phys='phys2', segid='tun2', mac='mac2', ip='2.2.2.2', vif='vifid2', port='port2'), LVM(net='net3', vlan=3, phys='phys3', segid='tun3', mac='mac3', ip='3.3.3.3', vif='vifid3', port='port3')] self.agent_ports = { self.ports[0].ip: [(self.lvms[0].mac, self.lvms[0].ip)], self.ports[1].ip: [(self.lvms[1].mac, self.lvms[1].ip)], self.ports[2].ip: [(self.lvms[2].mac, self.lvms[2].ip)], } self.fdb_entries1 = { self.lvms[0].net: { 'network_type': self.type_gre, 'segment_id': self.lvms[0].segid, 'ports': { self.local_ip: [], self.ports[0].ip: [(self.lvms[0].mac, self.lvms[0].ip)]}, }, self.lvms[1].net: { 'network_type': self.type_gre, 'segment_id': self.lvms[1].segid, 'ports': { self.local_ip: [], self.ports[1].ip: [(self.lvms[1].mac, self.lvms[1].ip)]}, }, self.lvms[2].net: { 'network_type': self.type_gre, 'segment_id': self.lvms[2].segid, 'ports': { self.local_ip: [], self.ports[2].ip: [(self.lvms[2].mac, self.lvms[2].ip)]}, }, } self.lvm1 = ovs_neutron_agent.LocalVLANMapping( self.lvms[0].vlan, self.type_gre, self.lvms[0].phys, self.lvms[0].segid, {self.lvms[0].vif: self.lvms[0].port}) self.lvm2 = ovs_neutron_agent.LocalVLANMapping( self.lvms[1].vlan, self.type_gre, self.lvms[1].phys, self.lvms[1].segid, {self.lvms[1].vif: self.lvms[1].port}) self.lvm3 = ovs_neutron_agent.LocalVLANMapping( self.lvms[2].vlan, self.type_gre, self.lvms[2].phys, self.lvms[2].segid, {self.lvms[2].vif: self.lvms[2].port}) self.local_vlan_map1 = { self.lvms[0].net: self.lvm1, self.lvms[1].net: self.lvm2, self.lvms[2].net: self.lvm3, } self.upd_fdb_entry1_val = { self.lvms[0].net: { self.ports[0].ip: { 'before': [l2pop_rpc.PortInfo(self.lvms[0].mac, self.lvms[0].ip)], 'after': [l2pop_rpc.PortInfo(self.lvms[1].mac, self.lvms[1].ip)], }, self.ports[1].ip: { 'before': [l2pop_rpc.PortInfo(self.lvms[0].mac, self.lvms[0].ip)], 'after': [l2pop_rpc.PortInfo(self.lvms[1].mac, self.lvms[1].ip)], }, }, self.lvms[1].net: { self.ports[2].ip: { 'before': [l2pop_rpc.PortInfo(self.lvms[0].mac, self.lvms[0].ip)], 'after': [l2pop_rpc.PortInfo(self.lvms[2].mac, self.lvms[2].ip)], }, }, } self.upd_fdb_entry1 = {'chg_ip': self.upd_fdb_entry1_val} def _tunnel_port_lookup(self, network_type, remote_ip): return self.ofports[network_type].get(remote_ip) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_db.py0000664000567000056710000003060313044372760027064 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import constants from oslo_utils import uuidutils from neutron.common import constants as n_const from neutron.common import utils from neutron import context from neutron.db import l3_attrs_db from neutron.db import l3_db from neutron.db import l3_hamode_db from neutron.db import models_v2 from neutron.extensions import portbindings from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db from neutron.plugins.ml2 import models from neutron.tests.common import helpers from neutron.tests import tools from neutron.tests.unit import testlib_api HOST = helpers.HOST HOST_2 = 'HOST_2' HOST_3 = 'HOST_3' HOST_2_TUNNELING_IP = '20.0.0.2' HOST_3_TUNNELING_IP = '20.0.0.3' TEST_ROUTER_ID = 'router_id' TEST_NETWORK_ID = 'network_id' TEST_HA_NETWORK_ID = 'ha_network_id' class TestL2PopulationDBTestCase(testlib_api.SqlTestCase): def setUp(self): super(TestL2PopulationDBTestCase, self).setUp() self.ctx = context.get_admin_context() self._create_network() def _create_network(self, network_id=TEST_NETWORK_ID): with self.ctx.session.begin(subtransactions=True): self.ctx.session.add(models_v2.Network(id=network_id)) def _create_router(self, distributed=True, ha=False): with self.ctx.session.begin(subtransactions=True): self.ctx.session.add(l3_db.Router(id=TEST_ROUTER_ID)) self.ctx.session.add(l3_attrs_db.RouterExtraAttributes( router_id=TEST_ROUTER_ID, distributed=distributed, ha=ha)) def _create_ha_router(self, distributed=False): helpers.register_l3_agent(HOST_2) helpers.register_ovs_agent(HOST_2, tunneling_ip=HOST_2_TUNNELING_IP) # Register l3 agent on host3, which doesn't host any HA router. # Tests should test that host3 is not a HA agent host. helpers.register_l3_agent(HOST_3) helpers.register_ovs_agent(HOST_3, tunneling_ip=HOST_3_TUNNELING_IP) with self.ctx.session.begin(subtransactions=True): self.ctx.session.add(models_v2.Network(id=TEST_HA_NETWORK_ID)) self._create_router(distributed=distributed, ha=True) for state, host in [(n_const.HA_ROUTER_STATE_ACTIVE, HOST), (n_const.HA_ROUTER_STATE_STANDBY, HOST_2)]: self._setup_port_binding( network_id=TEST_HA_NETWORK_ID, device_owner=constants.DEVICE_OWNER_ROUTER_HA_INTF, device_id=TEST_ROUTER_ID, host_state=state, host=host) def get_l3_agent_by_host(self, agent_host): plugin = helpers.FakePlugin() return plugin._get_agent_by_type_and_host( self.ctx, constants.AGENT_TYPE_L3, agent_host) def test_get_agent_by_host(self): helpers.register_l3_agent() helpers.register_dhcp_agent() helpers.register_ovs_agent() agent = l2pop_db.get_agent_by_host( self.ctx.session, helpers.HOST) self.assertEqual(constants.AGENT_TYPE_OVS, agent.agent_type) def test_get_agent_by_host_no_candidate(self): helpers.register_l3_agent() helpers.register_dhcp_agent() agent = l2pop_db.get_agent_by_host( self.ctx.session, helpers.HOST) self.assertIsNone(agent) def _setup_port_binding(self, **kwargs): with self.ctx.session.begin(subtransactions=True): mac = utils.get_random_mac('fa:16:3e:00:00:00'.split(':')) port_id = uuidutils.generate_uuid() network_id = kwargs.get('network_id', TEST_NETWORK_ID) device_owner = kwargs.get('device_owner', '') device_id = kwargs.get('device_id', '') host = kwargs.get('host', helpers.HOST) self.ctx.session.add(models_v2.Port( id=port_id, network_id=network_id, mac_address=mac, admin_state_up=True, status=constants.PORT_STATUS_ACTIVE, device_id=device_id, device_owner=device_owner)) port_binding_cls = models.PortBinding binding_kwarg = {'port_id': port_id, 'host': host, 'vif_type': portbindings.VIF_TYPE_UNBOUND, 'vnic_type': portbindings.VNIC_NORMAL} if device_owner == constants.DEVICE_OWNER_DVR_INTERFACE: port_binding_cls = models.DVRPortBinding binding_kwarg['router_id'] = TEST_ROUTER_ID binding_kwarg['status'] = constants.PORT_STATUS_DOWN self.ctx.session.add(port_binding_cls(**binding_kwarg)) if network_id == TEST_HA_NETWORK_ID: agent = self.get_l3_agent_by_host(host) haport_bindings_cls = l3_hamode_db.L3HARouterAgentPortBinding habinding_kwarg = {'port_id': port_id, 'router_id': device_id, 'l3_agent_id': agent['id'], 'state': kwargs.get('host_state', n_const.HA_ROUTER_STATE_ACTIVE)} self.ctx.session.add(haport_bindings_cls(**habinding_kwarg)) def test_get_dvr_active_network_ports(self): self._setup_port_binding( device_owner=constants.DEVICE_OWNER_DVR_INTERFACE) # Register a L2 agent + A bunch of other agents on the same host helpers.register_l3_agent() helpers.register_dhcp_agent() helpers.register_ovs_agent() tunnel_network_ports = l2pop_db.get_distributed_active_network_ports( self.ctx.session, TEST_NETWORK_ID) self.assertEqual(1, len(tunnel_network_ports)) _, agent = tunnel_network_ports[0] self.assertEqual(constants.AGENT_TYPE_OVS, agent.agent_type) def test_get_dvr_active_network_ports_no_candidate(self): self._setup_port_binding( device_owner=constants.DEVICE_OWNER_DVR_INTERFACE) # Register a bunch of non-L2 agents on the same host helpers.register_l3_agent() helpers.register_dhcp_agent() tunnel_network_ports = l2pop_db.get_distributed_active_network_ports( self.ctx.session, TEST_NETWORK_ID) self.assertEqual(0, len(tunnel_network_ports)) def test_get_nondvr_active_network_ports(self): self._setup_port_binding(dvr=False) # Register a L2 agent + A bunch of other agents on the same host helpers.register_l3_agent() helpers.register_dhcp_agent() helpers.register_ovs_agent() fdb_network_ports = l2pop_db.get_nondistributed_active_network_ports( self.ctx.session, TEST_NETWORK_ID) self.assertEqual(1, len(fdb_network_ports)) _, agent = fdb_network_ports[0] self.assertEqual(constants.AGENT_TYPE_OVS, agent.agent_type) def test_get_nondvr_active_network_ports_no_candidate(self): self._setup_port_binding(dvr=False) # Register a bunch of non-L2 agents on the same host helpers.register_l3_agent() helpers.register_dhcp_agent() fdb_network_ports = l2pop_db.get_nondistributed_active_network_ports( self.ctx.session, TEST_NETWORK_ID) self.assertEqual(0, len(fdb_network_ports)) def test__get_ha_router_interface_ids_with_ha_dvr_snat_port(self): helpers.register_dhcp_agent() helpers.register_l3_agent() helpers.register_ovs_agent() self._create_ha_router() self._setup_port_binding( device_owner=constants.DEVICE_OWNER_ROUTER_SNAT, device_id=TEST_ROUTER_ID) ha_iface_ids = l2pop_db._get_ha_router_interface_ids( self.ctx.session, TEST_NETWORK_ID) self.assertEqual(1, len(list(ha_iface_ids))) def test__get_ha_router_interface_ids_with_ha_replicated_port(self): helpers.register_dhcp_agent() helpers.register_l3_agent() helpers.register_ovs_agent() self._create_ha_router() self._setup_port_binding( device_owner=constants.DEVICE_OWNER_ROUTER_INTF, device_id=TEST_ROUTER_ID) ha_iface_ids = l2pop_db._get_ha_router_interface_ids( self.ctx.session, TEST_NETWORK_ID) self.assertEqual(1, len(list(ha_iface_ids))) def test__get_ha_router_interface_ids_with_no_ha_port(self): self._create_router() self._setup_port_binding( device_owner=constants.DEVICE_OWNER_ROUTER_SNAT, device_id=TEST_ROUTER_ID) ha_iface_ids = l2pop_db._get_ha_router_interface_ids( self.ctx.session, TEST_NETWORK_ID) self.assertEqual(0, len(list(ha_iface_ids))) def test_active_network_ports_with_dvr_snat_port(self): # Test to get agent hosting dvr snat port helpers.register_l3_agent() helpers.register_dhcp_agent() helpers.register_ovs_agent() # create DVR router self._create_router() # setup DVR snat port self._setup_port_binding( device_owner=constants.DEVICE_OWNER_ROUTER_SNAT, device_id=TEST_ROUTER_ID) helpers.register_dhcp_agent() fdb_network_ports = l2pop_db.get_nondistributed_active_network_ports( self.ctx.session, TEST_NETWORK_ID) self.assertEqual(1, len(fdb_network_ports)) def test_active_network_ports_with_ha_dvr_snat_port(self): # test to get HA agents hosting HA+DVR snat port helpers.register_dhcp_agent() helpers.register_l3_agent() helpers.register_ovs_agent() # create HA+DVR router self._create_ha_router() # setup HA snat port self._setup_port_binding( device_owner=constants.DEVICE_OWNER_ROUTER_SNAT, device_id=TEST_ROUTER_ID) fdb_network_ports = l2pop_db.get_nondistributed_active_network_ports( self.ctx.session, TEST_NETWORK_ID) self.assertEqual(0, len(fdb_network_ports)) ha_ports = l2pop_db.get_ha_active_network_ports( self.ctx.session, TEST_NETWORK_ID) self.assertEqual(2, len(ha_ports)) def test_active_port_count_with_dvr_snat_port(self): helpers.register_l3_agent() helpers.register_dhcp_agent() helpers.register_ovs_agent() self._create_router() self._setup_port_binding( device_owner=constants.DEVICE_OWNER_ROUTER_SNAT, device_id=TEST_ROUTER_ID) helpers.register_dhcp_agent() port_count = l2pop_db.get_agent_network_active_port_count( self.ctx.session, HOST, TEST_NETWORK_ID) self.assertEqual(1, port_count) port_count = l2pop_db.get_agent_network_active_port_count( self.ctx.session, HOST_2, TEST_NETWORK_ID) self.assertEqual(0, port_count) def test_active_port_count_with_ha_dvr_snat_port(self): helpers.register_dhcp_agent() helpers.register_l3_agent() helpers.register_ovs_agent() self._create_ha_router() self._setup_port_binding( device_owner=constants.DEVICE_OWNER_ROUTER_SNAT, device_id=TEST_ROUTER_ID) port_count = l2pop_db.get_agent_network_active_port_count( self.ctx.session, HOST, TEST_NETWORK_ID) self.assertEqual(1, port_count) port_count = l2pop_db.get_agent_network_active_port_count( self.ctx.session, HOST_2, TEST_NETWORK_ID) self.assertEqual(1, port_count) def test_get_ha_agents_by_router_id(self): helpers.register_dhcp_agent() helpers.register_l3_agent() helpers.register_ovs_agent() self._create_ha_router() self._setup_port_binding( device_owner=constants.DEVICE_OWNER_ROUTER_SNAT, device_id=TEST_ROUTER_ID) agents = l2pop_db.get_ha_agents_by_router_id( self.ctx.session, TEST_ROUTER_ID) ha_agents = [agent.host for agent in agents] self.assertEqual(tools.UnorderedList([HOST, HOST_2]), ha_agents) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/mechanism_logger.py0000664000567000056710000001240213044372760027704 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from neutron._i18n import _ from neutron.plugins.ml2 import driver_api as api LOG = log.getLogger(__name__) class LoggerMechanismDriver(api.MechanismDriver): """Mechanism driver that logs all calls and parameters made. Generally used for testing and debugging. """ def initialize(self): pass def _log_network_call(self, method_name, context): LOG.info(_("%(method)s called with network settings %(current)s " "(original settings %(original)s) and " "network segments %(segments)s"), {'method': method_name, 'current': context.current, 'original': context.original, 'segments': context.network_segments}) def create_network_precommit(self, context): self._log_network_call("create_network_precommit", context) def create_network_postcommit(self, context): self._log_network_call("create_network_postcommit", context) def update_network_precommit(self, context): self._log_network_call("update_network_precommit", context) def update_network_postcommit(self, context): self._log_network_call("update_network_postcommit", context) def delete_network_precommit(self, context): self._log_network_call("delete_network_precommit", context) def delete_network_postcommit(self, context): self._log_network_call("delete_network_postcommit", context) def _log_subnet_call(self, method_name, context): LOG.info(_("%(method)s called with subnet settings %(current)s " "(original settings %(original)s)"), {'method': method_name, 'current': context.current, 'original': context.original}) def create_subnet_precommit(self, context): self._log_subnet_call("create_subnet_precommit", context) def create_subnet_postcommit(self, context): self._log_subnet_call("create_subnet_postcommit", context) def update_subnet_precommit(self, context): self._log_subnet_call("update_subnet_precommit", context) def update_subnet_postcommit(self, context): self._log_subnet_call("update_subnet_postcommit", context) def delete_subnet_precommit(self, context): self._log_subnet_call("delete_subnet_precommit", context) def delete_subnet_postcommit(self, context): self._log_subnet_call("delete_subnet_postcommit", context) def _log_port_call(self, method_name, context): network_context = context.network LOG.info(_("%(method)s called with port settings %(current)s " "(original settings %(original)s) " "host %(host)s " "(original host %(original_host)s) " "vif type %(vif_type)s " "(original vif type %(original_vif_type)s) " "vif details %(vif_details)s " "(original vif details %(original_vif_details)s) " "binding levels %(levels)s " "(original binding levels %(original_levels)s) " "on network %(network)s " "with segments to bind %(segments_to_bind)s"), {'method': method_name, 'current': context.current, 'original': context.original, 'host': context.host, 'original_host': context.original_host, 'vif_type': context.vif_type, 'original_vif_type': context.original_vif_type, 'vif_details': context.vif_details, 'original_vif_details': context.original_vif_details, 'levels': context.binding_levels, 'original_levels': context.original_binding_levels, 'network': network_context.current, 'segments_to_bind': context.segments_to_bind}) def create_port_precommit(self, context): self._log_port_call("create_port_precommit", context) def create_port_postcommit(self, context): self._log_port_call("create_port_postcommit", context) def update_port_precommit(self, context): self._log_port_call("update_port_precommit", context) def update_port_postcommit(self, context): self._log_port_call("update_port_postcommit", context) def delete_port_precommit(self, context): self._log_port_call("delete_port_precommit", context) def delete_port_postcommit(self, context): self._log_port_call("delete_port_postcommit", context) def bind_port(self, context): self._log_port_call("bind_port", context) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/macvtap/0000775000567000056710000000000013044373210025452 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/macvtap/__init__.py0000664000567000056710000000000013044372736027565 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/macvtap/test_macvtap_common.py0000664000567000056710000000463613044372736032113 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import mock from neutron.plugins.ml2.drivers.macvtap import macvtap_common as m_common from neutron.tests import base MOCKED_HASH = "MOCKEDHASH" class MockSHA(object): def hexdigest(self): return MOCKED_HASH class MacvtapCommonTestCase(base.BaseTestCase): @mock.patch.object(hashlib, 'sha1', return_value=MockSHA()) def test_get_vlan_device_name(self, mocked_hash): # only the first six chars of the hash are being used in the algorithm hash_used = MOCKED_HASH[0:6] self.assertEqual('10charrrrr.1', m_common.get_vlan_device_name('10charrrrr', "1")) self.assertEqual('11ch' + hash_used + '.1', m_common.get_vlan_device_name('11charrrrrr', "1")) self.assertEqual('14ch' + hash_used + '.1', m_common.get_vlan_device_name('14charrrrrrrrr', "1")) self.assertEqual('14ch' + hash_used + '.1111', m_common.get_vlan_device_name('14charrrrrrrrr', "1111")) def test_get_vlan_subinterface_name_advanced(self): """Ensure the same hash is used for long interface names. If the generated vlan device name would be too long, make sure that everything before the '.' is equal. This might be helpful when debugging problems. """ max_device_name = "15charrrrrrrrrr" vlan_dev_name1 = m_common.get_vlan_device_name(max_device_name, "1") vlan_dev_name2 = m_common.get_vlan_device_name(max_device_name, "1111") self.assertEqual(vlan_dev_name1.partition(".")[0], vlan_dev_name2.partition(".")[0]) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/macvtap/mech_driver/0000775000567000056710000000000013044373210027741 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/macvtap/mech_driver/test_mech_macvtap.py0000664000567000056710000000764513044372760034026 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import constants from neutron.extensions import portbindings from neutron.plugins.ml2.drivers.macvtap.mech_driver import mech_macvtap from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base class MacvtapMechanismBaseTestCase(base.AgentMechanismBaseTestCase): VIF_TYPE = portbindings.VIF_TYPE_MACVTAP CAP_PORT_FILTER = False AGENT_TYPE = constants.AGENT_TYPE_MACVTAP GOOD_MAPPINGS = {'fake_physical_network': 'fake_if'} GOOD_CONFIGS = {'interface_mappings': GOOD_MAPPINGS} BAD_MAPPINGS = {'wrong_physical_network': 'wrong_if'} BAD_CONFIGS = {'interface_mappings': BAD_MAPPINGS} AGENTS = [{'alive': True, 'configurations': GOOD_CONFIGS, 'host': 'host'}] AGENTS_DEAD = [{'alive': False, 'configurations': GOOD_CONFIGS, 'host': 'dead_host'}] AGENTS_BAD = [{'alive': False, 'configurations': GOOD_CONFIGS, 'host': 'bad_host_1'}, {'alive': True, 'configurations': BAD_CONFIGS, 'host': 'bad_host_2'}] def setUp(self): super(MacvtapMechanismBaseTestCase, self).setUp() self.driver = mech_macvtap.MacvtapMechanismDriver() self.driver.initialize() class MacvtapMechanismGenericTestCase(MacvtapMechanismBaseTestCase, base.AgentMechanismGenericTestCase): pass class MacvtapMechanismFlatTestCase(MacvtapMechanismBaseTestCase, base.AgentMechanismFlatTestCase): def test_type_flat_vif_details(self): context = base.FakePortContext(self.AGENT_TYPE, self.AGENTS, self.FLAT_SEGMENTS, vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) vif_details = context._bound_vif_details self.assertIsNone(vif_details.get(portbindings.VIF_DETAILS_VLAN)) self.assertEqual("bridge", vif_details.get( portbindings.VIF_DETAILS_MACVTAP_MODE)) self.assertEqual("fake_if", vif_details.get( portbindings.VIF_DETAILS_PHYSICAL_INTERFACE)) self.assertEqual("fake_if", vif_details.get( portbindings.VIF_DETAILS_MACVTAP_SOURCE)) class MacvtapMechanismVlanTestCase(MacvtapMechanismBaseTestCase, base.AgentMechanismVlanTestCase): def test_type_vlan_vif_details(self): context = base.FakePortContext(self.AGENT_TYPE, self.AGENTS, self.VLAN_SEGMENTS, vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) vif_details = context._bound_vif_details self.assertEqual(1234, vif_details.get(portbindings.VIF_DETAILS_VLAN)) self.assertEqual("bridge", vif_details.get( portbindings.VIF_DETAILS_MACVTAP_MODE)) self.assertEqual("fake_if", vif_details.get( portbindings.VIF_DETAILS_PHYSICAL_INTERFACE)) self.assertEqual("fake_if.1234", vif_details.get( portbindings.VIF_DETAILS_MACVTAP_SOURCE)) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/macvtap/mech_driver/__init__.py0000664000567000056710000000000013044372736032054 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/macvtap/agent/0000775000567000056710000000000013044373210026550 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/macvtap/agent/test_macvtap_neutron_agent.py0000664000567000056710000002520013044372760034554 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys import mock from oslo_config import cfg from oslo_service import service from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.common import config as common_config from neutron.common import topics from neutron.common import utils as n_utils from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb from neutron.plugins.ml2.drivers.macvtap.agent import macvtap_neutron_agent from neutron.plugins.ml2.drivers.macvtap import macvtap_common from neutron.tests import base INTERFACE_MAPPINGS = {'physnet1': 'eth1'} NETWORK_ID = 'net-id123' NETWORK_SEGMENT_VLAN = amb.NetworkSegment('vlan', 'physnet1', 1) NETWORK_SEGMENT_FLAT = amb.NetworkSegment('flat', 'physnet1', None) class TestMacvtapRPCCallbacks(base.BaseTestCase): def setUp(self): super(TestMacvtapRPCCallbacks, self).setUp() agent = mock.Mock() agent.mgr = mock.Mock() agent.mgr.interface_mappings = INTERFACE_MAPPINGS self.rpc = macvtap_neutron_agent.MacvtapRPCCallBack(mock.Mock(), agent, mock.Mock()) def test_network_delete_vlan(self): self.rpc.network_map = {NETWORK_ID: NETWORK_SEGMENT_VLAN} with mock.patch.object(ip_lib.IpLinkCommand, 'delete') as mock_del,\ mock.patch.object(macvtap_common, 'get_vlan_device_name', return_value='vlan1'),\ mock.patch.object(ip_lib.IPDevice, 'exists', return_value=True): self.rpc.network_delete("anycontext", network_id=NETWORK_ID) self.assertTrue(mock_del.called) def test_network_delete_flat(self): self.rpc.network_map = {NETWORK_ID: NETWORK_SEGMENT_FLAT} with mock.patch.object(ip_lib.IpLinkCommand, 'delete') as mock_del: self.rpc.network_delete( "anycontext", network_id=NETWORK_SEGMENT_FLAT.segmentation_id) self.assertFalse(mock_del.called) def test_port_update(self): port = {'id': 'port-id123', 'mac_address': 'mac1'} self.rpc.port_update(context=None, port=port) self.assertEqual(set(['mac1']), self.rpc.updated_devices) class TestMacvtapManager(base.BaseTestCase): def setUp(self): super(TestMacvtapManager, self).setUp() with mock.patch.object(ip_lib, 'device_exists', return_value=True): self.mgr = macvtap_neutron_agent.MacvtapManager(INTERFACE_MAPPINGS) def test_validate_interface_mappings_dev_exists(self): good_mapping = {'physnet1': 'eth1', 'physnet2': 'eth2'} self.mgr.interface_mappings = good_mapping with mock.patch.object(ip_lib, 'device_exists', return_value=True)\ as mock_de: self.mgr.validate_interface_mappings() mock_de.assert_any_call('eth1') mock_de.assert_any_call('eth2') self.assertEqual(2, mock_de.call_count) def test_validate_interface_mappings_dev_not_exists(self): bad_mapping = {'physnet1': 'foo'} self.mgr.interface_mappings = bad_mapping with mock.patch.object(ip_lib, 'device_exists', return_value=False)\ as mock_de, mock.patch.object(sys, 'exit') as mock_exit: self.mgr.validate_interface_mappings() mock_de.assert_called_with('foo') mock_exit.assert_called_once_with(1) def _test_ensure_port_admin_state(self, admin_state): dev = 'macvtap1' mac = 'mac1' self.mgr.mac_device_name_mappings = {mac: dev} with mock.patch.object(ip_lib, 'IPDevice') as mock_ip_dev: self.mgr.ensure_port_admin_state(mac, admin_state) self.assertEqual(admin_state, mock_ip_dev(dev).link.set_up.called) self.assertNotEqual(admin_state, mock_ip_dev(dev).link.set_down.called) def test_ensure_port_admin_state_up(self): self._test_ensure_port_admin_state(True) def test_ensure_port_admin_state_down(self): self._test_ensure_port_admin_state(False) def test_get_all_devices(self): listing = ['foo', 'macvtap0', 'macvtap1', 'bar'] # set some mac mappings to make sure they are cleaned up self.mgr.mac_device_name_mappings = {'foo': 'bar'} with mock.patch.object(os, 'listdir', return_value=listing)\ as mock_ld,\ mock.patch.object(utils, 'get_interface_mac') as mock_gdn: mock_gdn.side_effect = ['mac0', 'mac1'] result = self.mgr.get_all_devices() mock_ld.assert_called_once_with(macvtap_neutron_agent.MACVTAP_FS) self.assertEqual(set(['mac0', 'mac1']), result) self.assertEqual({'mac0': 'macvtap0', 'mac1': 'macvtap1'}, self.mgr.mac_device_name_mappings) def test_get_agent_configurations(self): expected = {'interface_mappings': INTERFACE_MAPPINGS} self.assertEqual(expected, self.mgr.get_agent_configurations()) def test_get_agent_id_ok(self): mock_devices = [ip_lib.IPDevice('macvtap1')] with mock.patch.object(ip_lib.IPWrapper, 'get_devices', return_value=mock_devices),\ mock.patch.object(utils, 'get_interface_mac', return_value='foo:bar'): self.assertEqual('macvtapfoobar', self.mgr.get_agent_id()) def test_get_agent_id_fail(self): mock_devices = [] with mock.patch.object(ip_lib.IPWrapper, 'get_devices', return_value=mock_devices),\ mock.patch.object(sys, 'exit') as mock_exit: self.mgr.get_agent_id() mock_exit.assert_called_once_with(1) def test_get_extension_driver_type(self): self.assertEqual('macvtap', self.mgr.get_extension_driver_type()) def test_get_rpc_callbacks(self): context = mock.Mock() agent = mock.Mock() sg_agent = mock.Mock() obj = self.mgr.get_rpc_callbacks(context, agent, sg_agent) self.assertIsInstance(obj, macvtap_neutron_agent.MacvtapRPCCallBack) def test_get_rpc_consumers(self): consumers = [[topics.PORT, topics.UPDATE], [topics.NETWORK, topics.DELETE], [topics.SECURITY_GROUP, topics.UPDATE]] self.assertEqual(consumers, self.mgr.get_rpc_consumers()) def test_plug_interface(self): self.mgr.mac_device_name_mappings['mac1'] = 'macvtap0' with mock.patch.object(ip_lib.IpLinkCommand, 'set_allmulticast_on')\ as mock_sao: self.mgr.plug_interface('network_id', 'network_segment', 'mac1', 'device_owner') self.assertTrue(mock_sao.called) class TestMacvtapMain(base.BaseTestCase): def test_parse_interface_mappings_good(self): cfg.CONF.set_override('physical_interface_mappings', 'good_mapping', 'macvtap') with mock.patch.object(n_utils, 'parse_mappings', return_value=INTERFACE_MAPPINGS): mappings = macvtap_neutron_agent.parse_interface_mappings() self.assertEqual(INTERFACE_MAPPINGS, mappings) def test_parse_interface_mappings_bad(self): cfg.CONF.set_override('physical_interface_mappings', 'bad_mapping', 'macvtap') with mock.patch.object(n_utils, 'parse_mappings', side_effect=ValueError('bad mapping')),\ mock.patch.object(sys, 'exit') as mock_exit: macvtap_neutron_agent.parse_interface_mappings() mock_exit.assert_called_with(1) def test_validate_firewall_driver_noop_long(self): cfg.CONF.set_override('firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', 'SECURITYGROUP') macvtap_neutron_agent.validate_firewall_driver() def test_validate_firewall_driver_noop(self): cfg.CONF.set_override('firewall_driver', 'noop', 'SECURITYGROUP') macvtap_neutron_agent.validate_firewall_driver() def test_validate_firewall_driver_other(self): cfg.CONF.set_override('firewall_driver', 'foo', 'SECURITYGROUP') with mock.patch.object(sys, 'exit')as mock_exit: macvtap_neutron_agent.validate_firewall_driver() mock_exit.assert_called_with(1) def test_main(self): cfg.CONF.set_override('quitting_rpc_timeout', 1, 'AGENT') cfg.CONF.set_override('polling_interval', 2, 'AGENT') mock_manager_return = mock.Mock(spec=amb.CommonAgentManagerBase) mock_launch_return = mock.Mock() with mock.patch.object(common_config, 'init'),\ mock.patch.object(common_config, 'setup_logging'),\ mock.patch.object(service, 'launch', return_value=mock_launch_return) as mock_launch,\ mock.patch.object(macvtap_neutron_agent, 'parse_interface_mappings', return_value=INTERFACE_MAPPINGS) as mock_pim,\ mock.patch.object(macvtap_neutron_agent, 'validate_firewall_driver') as mock_vfd,\ mock.patch('neutron.plugins.ml2.drivers.agent._common_agent.' 'CommonAgentLoop') as mock_loop,\ mock.patch('neutron.plugins.ml2.drivers.macvtap.agent.' 'macvtap_neutron_agent.MacvtapManager', return_value=mock_manager_return) as mock_manager: macvtap_neutron_agent.main() self.assertTrue(mock_vfd.called) self.assertTrue(mock_pim.called) mock_manager.assert_called_with(INTERFACE_MAPPINGS) mock_loop.assert_called_with(mock_manager_return, 2, 1, 'Macvtap agent', 'neutron-macvtap-agent') self.assertTrue(mock_launch.called) self.assertTrue(mock_launch_return.wait.called) neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/macvtap/agent/__init__.py0000664000567000056710000000000013044372736030663 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/plugins/ml2/drivers/test_helpers.py0000664000567000056710000001345313044372760027111 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Thales Services SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_db import exception as exc from sqlalchemy.orm import query import neutron.db.api as db from neutron.plugins.ml2.drivers import type_vlan from neutron.tests.unit import testlib_api TENANT_NET = 'phys_net2' VLAN_MIN = 200 VLAN_MAX = 209 VLAN_OUTSIDE = 100 NETWORK_VLAN_RANGES = { TENANT_NET: [(VLAN_MIN, VLAN_MAX)], } class HelpersTest(testlib_api.SqlTestCase): def setUp(self): super(HelpersTest, self).setUp() self.driver = type_vlan.VlanTypeDriver() self.driver.network_vlan_ranges = NETWORK_VLAN_RANGES self.driver._sync_vlan_allocations() self.session = db.get_session() def check_raw_segment(self, expected, observed): for key, value in expected.items(): self.assertEqual(value, observed[key]) def test_primary_keys(self): self.assertEqual(set(['physical_network', 'vlan_id']), self.driver.primary_keys) def test_allocate_specific_unallocated_segment_in_pools(self): expected = dict(physical_network=TENANT_NET, vlan_id=VLAN_MIN) observed = self.driver.allocate_fully_specified_segment(self.session, **expected) self.check_raw_segment(expected, observed) def test_allocate_specific_allocated_segment_in_pools(self): raw_segment = dict(physical_network=TENANT_NET, vlan_id=VLAN_MIN) self.driver.allocate_fully_specified_segment(self.session, **raw_segment) observed = self.driver.allocate_fully_specified_segment(self.session, **raw_segment) self.assertIsNone(observed) def test_allocate_specific_finally_allocated_segment_in_pools(self): # Test case: allocate a specific unallocated segment in pools but # the segment is allocated concurrently between select and update raw_segment = dict(physical_network=TENANT_NET, vlan_id=VLAN_MIN) with mock.patch.object(query.Query, 'update', return_value=0): observed = self.driver.allocate_fully_specified_segment( self.session, **raw_segment) self.assertIsNone(observed) def test_allocate_specific_unallocated_segment_outside_pools(self): expected = dict(physical_network=TENANT_NET, vlan_id=VLAN_OUTSIDE) observed = self.driver.allocate_fully_specified_segment(self.session, **expected) self.check_raw_segment(expected, observed) def test_allocate_specific_allocated_segment_outside_pools(self): raw_segment = dict(physical_network=TENANT_NET, vlan_id=VLAN_OUTSIDE) self.driver.allocate_fully_specified_segment(self.session, **raw_segment) observed = self.driver.allocate_fully_specified_segment(self.session, **raw_segment) self.assertIsNone(observed) def test_allocate_specific_finally_unallocated_segment_outside_pools(self): # Test case: allocate a specific allocated segment in pools but # the segment is concurrently unallocated after select or update expected = dict(physical_network=TENANT_NET, vlan_id=VLAN_MIN) with mock.patch.object(self.driver.model, 'save'): observed = self.driver.allocate_fully_specified_segment( self.session, **expected) self.check_raw_segment(expected, observed) def test_allocate_partial_segment_without_filters(self): expected = dict(physical_network=TENANT_NET) observed = self.driver.allocate_partially_specified_segment( self.session) self.check_raw_segment(expected, observed) def test_allocate_partial_segment_with_filter(self): expected = dict(physical_network=TENANT_NET) observed = self.driver.allocate_partially_specified_segment( self.session, **expected) self.check_raw_segment(expected, observed) def test_allocate_partial_segment_no_resource_available(self): for i in range(VLAN_MIN, VLAN_MAX + 1): self.driver.allocate_partially_specified_segment(self.session) observed = self.driver.allocate_partially_specified_segment( self.session) self.assertIsNone(observed) def test_allocate_partial_segment_outside_pools(self): raw_segment = dict(physical_network='other_phys_net') observed = self.driver.allocate_partially_specified_segment( self.session, **raw_segment) self.assertIsNone(observed) def test_allocate_partial_segment_first_attempt_fails(self): expected = dict(physical_network=TENANT_NET) with mock.patch.object(query.Query, 'update', side_effect=[0, 1]): self.assertRaises( exc.RetryRequest, self.driver.allocate_partially_specified_segment, self.session, **expected) observed = self.driver.allocate_partially_specified_segment( self.session, **expected) self.check_raw_segment(expected, observed) neutron-8.4.0/neutron/tests/unit/plugins/ml2/test_rpc.py0000664000567000056710000005532313044372760024557 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for ml2 rpc """ import collections import mock from oslo_config import cfg from oslo_context import context as oslo_context import oslo_messaging from sqlalchemy.orm import exc from neutron.agent import rpc as agent_rpc from neutron.common import constants from neutron.common import exceptions from neutron.common import topics from neutron.plugins.ml2.drivers import type_tunnel from neutron.plugins.ml2 import managers from neutron.plugins.ml2 import rpc as plugin_rpc from neutron.services.qos import qos_consts from neutron.tests import base cfg.CONF.import_group('ml2', 'neutron.plugins.ml2.config') class RpcCallbacksTestCase(base.BaseTestCase): def setUp(self): super(RpcCallbacksTestCase, self).setUp() self.type_manager = managers.TypeManager() self.notifier = plugin_rpc.AgentNotifierApi(topics.AGENT) self.callbacks = plugin_rpc.RpcCallbacks(self.notifier, self.type_manager) self.manager = mock.patch.object( plugin_rpc.manager, 'NeutronManager').start() self.plugin = self.manager.get_plugin() def _test_update_device_up(self): kwargs = { 'agent_id': 'foo_agent', 'device': 'foo_device' } with mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin' '._device_to_port_id'): with mock.patch('neutron.callbacks.registry.notify') as notify,\ mock.patch.object(self.callbacks, 'notify_ha_port_status'): self.callbacks.update_device_up(mock.Mock(), **kwargs) return notify def test_update_device_up_notify(self): notify = self._test_update_device_up() kwargs = { 'context': mock.ANY, 'port': mock.ANY, 'update_device_up': True } notify.assert_called_once_with( 'port', 'after_update', self.plugin, **kwargs) def test_update_device_up_notify_not_sent_with_port_not_found(self): self.plugin._get_port.side_effect = ( exceptions.PortNotFound(port_id='foo_port_id')) notify = self._test_update_device_up() self.assertFalse(notify.call_count) def test_get_device_details_without_port_context(self): self.plugin.get_bound_port_context.return_value = None self.assertEqual( {'device': 'fake_device'}, self.callbacks.get_device_details(mock.Mock(), device='fake_device')) def test_get_device_details_port_context_without_bounded_segment(self): self.plugin.get_bound_port_context().bottom_bound_segment = None self.assertEqual( {'device': 'fake_device'}, self.callbacks.get_device_details(mock.Mock(), device='fake_device')) def test_get_device_details_port_status_equal_new_status(self): port = collections.defaultdict(lambda: 'fake') self.plugin.get_bound_port_context().current = port self.plugin.port_bound_to_host = mock.MagicMock(return_value=True) for admin_state_up in (True, False): new_status = (constants.PORT_STATUS_BUILD if admin_state_up else constants.PORT_STATUS_DOWN) for status in (constants.PORT_STATUS_ACTIVE, constants.PORT_STATUS_BUILD, constants.PORT_STATUS_DOWN, constants.PORT_STATUS_ERROR): port['admin_state_up'] = admin_state_up port['status'] = status self.plugin.update_port_status.reset_mock() self.callbacks.get_device_details(mock.Mock()) self.assertEqual(status == new_status, not self.plugin.update_port_status.called) def test_get_device_details_caching(self): port = collections.defaultdict(lambda: 'fake_port') cached_networks = {} self.plugin.get_bound_port_context().current = port self.plugin.get_bound_port_context().network.current = ( {"id": "fake_network"}) self.callbacks.get_device_details(mock.Mock(), host='fake_host', cached_networks=cached_networks) self.assertIn('fake_port', cached_networks) def test_get_device_details_wrong_host(self): port = collections.defaultdict(lambda: 'fake') port_context = self.plugin.get_bound_port_context() port_context.current = port port_context.host = 'fake' self.plugin.update_port_status.reset_mock() self.callbacks.get_device_details(mock.Mock(), host='fake_host') self.assertFalse(self.plugin.update_port_status.called) def test_get_device_details_port_no_host(self): port = collections.defaultdict(lambda: 'fake') port_context = self.plugin.get_bound_port_context() port_context.current = port self.plugin.update_port_status.reset_mock() self.callbacks.get_device_details(mock.Mock()) self.assertTrue(self.plugin.update_port_status.called) def test_get_device_details_qos_policy_id_none(self): port = collections.defaultdict(lambda: 'fake_port') self.plugin.get_bound_port_context().current = port self.plugin.get_bound_port_context().network._network = ( {"id": "fake_network"}) res = self.callbacks.get_device_details(mock.Mock(), host='fake') self.assertIsNone(res['qos_policy_id']) def test_get_device_details_network_qos_policy_id(self): port = collections.defaultdict(lambda: 'fake_port') self.plugin.get_bound_port_context().current = port self.plugin.get_bound_port_context().network._network = ( {"id": "fake_network", qos_consts.QOS_POLICY_ID: 'test-policy-id'}) res = self.callbacks.get_device_details(mock.Mock(), host='fake') self.assertEqual('test-policy-id', res['network_qos_policy_id']) def test_get_device_details_qos_policy_id_from_port(self): port = collections.defaultdict( lambda: 'fake_port', {qos_consts.QOS_POLICY_ID: 'test-port-policy-id'}) self.plugin.get_bound_port_context().current = port self.plugin.get_bound_port_context().network._network = ( {"id": "fake_network", qos_consts.QOS_POLICY_ID: 'test-net-policy-id'}) res = self.callbacks.get_device_details(mock.Mock(), host='fake') self.assertEqual('test-port-policy-id', res['qos_policy_id']) def _test_get_devices_list(self, callback, side_effect, expected): devices = [1, 2, 3, 4, 5] kwargs = {'host': 'fake_host', 'agent_id': 'fake_agent_id'} with mock.patch.object(self.callbacks, 'get_device_details', side_effect=side_effect) as f: res = callback('fake_context', devices=devices, **kwargs) self.assertEqual(expected, res) self.assertEqual(len(devices), f.call_count) calls = [mock.call('fake_context', device=i, cached_networks={}, **kwargs) for i in devices] f.assert_has_calls(calls) def test_get_devices_details_list(self): devices = [1, 2, 3, 4, 5] expected = devices callback = self.callbacks.get_devices_details_list self._test_get_devices_list(callback, devices, expected) def test_get_devices_details_list_with_empty_devices(self): with mock.patch.object(self.callbacks, 'get_device_details') as f: res = self.callbacks.get_devices_details_list('fake_context') self.assertFalse(f.called) self.assertEqual([], res) def test_get_devices_details_list_and_failed_devices(self): devices = [1, 2, 3, 4, 5] expected = {'devices': devices, 'failed_devices': []} callback = ( self.callbacks.get_devices_details_list_and_failed_devices) self._test_get_devices_list(callback, devices, expected) def test_get_devices_details_list_and_failed_devices_failures(self): devices = [1, Exception('testdevice'), 3, Exception('testdevice'), 5] expected = {'devices': [1, 3, 5], 'failed_devices': [2, 4]} callback = ( self.callbacks.get_devices_details_list_and_failed_devices) self._test_get_devices_list(callback, devices, expected) def test_get_devices_details_list_and_failed_devices_empty_dev(self): with mock.patch.object(self.callbacks, 'get_device_details') as f: res = self.callbacks.get_devices_details_list_and_failed_devices( 'fake_context') self.assertFalse(f.called) self.assertEqual({'devices': [], 'failed_devices': []}, res) def _test_update_device_not_bound_to_host(self, func): self.plugin.port_bound_to_host.return_value = False self.callbacks.notify_ha_port_status = mock.Mock() self.plugin._device_to_port_id.return_value = 'fake_port_id' res = func(mock.Mock(), device='fake_device', host='fake_host') self.plugin.port_bound_to_host.assert_called_once_with(mock.ANY, 'fake_port_id', 'fake_host') return res def test_update_device_up_with_device_not_bound_to_host(self): self.assertIsNone(self._test_update_device_not_bound_to_host( self.callbacks.update_device_up)) def test_update_device_down_with_device_not_bound_to_host(self): self.assertEqual( {'device': 'fake_device', 'exists': True}, self._test_update_device_not_bound_to_host( self.callbacks.update_device_down)) def test_update_device_down_call_update_port_status(self): self.plugin.update_port_status.return_value = False self.callbacks.notify_ha_port_status = mock.Mock() self.plugin._device_to_port_id.return_value = 'fake_port_id' self.assertEqual( {'device': 'fake_device', 'exists': False}, self.callbacks.update_device_down(mock.Mock(), device='fake_device', host='fake_host')) self.plugin.update_port_status.assert_called_once_with( mock.ANY, 'fake_port_id', constants.PORT_STATUS_DOWN, 'fake_host') def test_update_device_down_call_update_port_status_failed(self): self.plugin.update_port_status.side_effect = exc.StaleDataError self.assertEqual({'device': 'fake_device', 'exists': False}, self.callbacks.update_device_down( mock.Mock(), device='fake_device')) def _test_update_device_list(self, devices_up_side_effect, devices_down_side_effect, expected): devices_up = [1, 2, 3] devices_down = [4, 5] kwargs = {'host': 'fake_host', 'agent_id': 'fake_agent_id'} with mock.patch.object(self.callbacks, 'update_device_up', side_effect=devices_up_side_effect) as f_up, \ mock.patch.object(self.callbacks, 'update_device_down', side_effect=devices_down_side_effect) as f_down: res = self.callbacks.update_device_list( 'fake_context', devices_up=devices_up, devices_down=devices_down, **kwargs) self.assertEqual(expected, res) self.assertEqual(len(devices_up), f_up.call_count) self.assertEqual(len(devices_down), f_down.call_count) def test_update_device_list_no_failure(self): devices_up_side_effect = [1, 2, 3] devices_down_side_effect = [ {'device': 4, 'exists': True}, {'device': 5, 'exists': True}] expected = {'devices_up': devices_up_side_effect, 'failed_devices_up': [], 'devices_down': [{'device': 4, 'exists': True}, {'device': 5, 'exists': True}], 'failed_devices_down': []} self._test_update_device_list(devices_up_side_effect, devices_down_side_effect, expected) def test_update_device_list_failed_devices(self): devices_up_side_effect = [1, Exception('testdevice'), 3] devices_down_side_effect = [{'device': 4, 'exists': True}, Exception('testdevice')] expected = {'devices_up': [1, 3], 'failed_devices_up': [2], 'devices_down': [{'device': 4, 'exists': True}], 'failed_devices_down': [5]} self._test_update_device_list(devices_up_side_effect, devices_down_side_effect, expected) def test_update_device_list_empty_devices(self): expected = {'devices_up': [], 'failed_devices_up': [], 'devices_down': [], 'failed_devices_down': []} kwargs = {'host': 'fake_host', 'agent_id': 'fake_agent_id'} res = self.callbacks.update_device_list( 'fake_context', devices_up=[], devices_down=[], **kwargs) self.assertEqual(expected, res) class RpcApiTestCase(base.BaseTestCase): def _test_rpc_api(self, rpcapi, topic, method, rpc_method, **kwargs): ctxt = oslo_context.RequestContext('fake_user', 'fake_project') expected_retval = 'foo' if rpc_method == 'call' else None expected_version = kwargs.pop('version', None) fanout = kwargs.pop('fanout', False) with mock.patch.object(rpcapi.client, rpc_method) as rpc_mock,\ mock.patch.object(rpcapi.client, 'prepare') as prepare_mock: prepare_mock.return_value = rpcapi.client rpc_mock.return_value = expected_retval retval = getattr(rpcapi, method)(ctxt, **kwargs) prepare_args = {} if expected_version: prepare_args['version'] = expected_version if fanout: prepare_args['fanout'] = fanout if topic: prepare_args['topic'] = topic prepare_mock.assert_called_once_with(**prepare_args) self.assertEqual(retval, expected_retval) rpc_mock.assert_called_once_with(ctxt, method, **kwargs) def test_delete_network(self): rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT) self._test_rpc_api( rpcapi, topics.get_topic_name(topics.AGENT, topics.NETWORK, topics.DELETE), 'network_delete', rpc_method='cast', fanout=True, network_id='fake_request_spec') def test_port_update(self): rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT) self._test_rpc_api( rpcapi, topics.get_topic_name(topics.AGENT, topics.PORT, topics.UPDATE), 'port_update', rpc_method='cast', fanout=True, port='fake_port', network_type='fake_network_type', segmentation_id='fake_segmentation_id', physical_network='fake_physical_network') def test_port_delete(self): rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT) self._test_rpc_api( rpcapi, topics.get_topic_name(topics.AGENT, topics.PORT, topics.DELETE), 'port_delete', rpc_method='cast', fanout=True, port_id='fake_port') def test_tunnel_update(self): rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT) self._test_rpc_api( rpcapi, topics.get_topic_name(topics.AGENT, type_tunnel.TUNNEL, topics.UPDATE), 'tunnel_update', rpc_method='cast', fanout=True, tunnel_ip='fake_ip', tunnel_type='gre') def test_tunnel_delete(self): rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT) self._test_rpc_api( rpcapi, topics.get_topic_name(topics.AGENT, type_tunnel.TUNNEL, topics.DELETE), 'tunnel_delete', rpc_method='cast', fanout=True, tunnel_ip='fake_ip', tunnel_type='gre') def test_device_details(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) self._test_rpc_api(rpcapi, None, 'get_device_details', rpc_method='call', device='fake_device', agent_id='fake_agent_id', host='fake_host') def test_devices_details_list(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) self._test_rpc_api(rpcapi, None, 'get_devices_details_list', rpc_method='call', devices=['fake_device1', 'fake_device2'], agent_id='fake_agent_id', host='fake_host', version='1.3') def test_update_device_down(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) self._test_rpc_api(rpcapi, None, 'update_device_down', rpc_method='call', device='fake_device', agent_id='fake_agent_id', host='fake_host') def test_tunnel_sync(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) self._test_rpc_api(rpcapi, None, 'tunnel_sync', rpc_method='call', tunnel_ip='fake_tunnel_ip', tunnel_type=None, host='fake_host', version='1.4') def test_update_device_up(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) self._test_rpc_api(rpcapi, None, 'update_device_up', rpc_method='call', device='fake_device', agent_id='fake_agent_id', host='fake_host') def test_update_device_list(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) self._test_rpc_api(rpcapi, None, 'update_device_list', rpc_method='call', devices_up=['fake_device1', 'fake_device2'], devices_down=['fake_device3', 'fake_device4'], agent_id='fake_agent_id', host='fake_host', version='1.5') def test_update_device_list_unsupported(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) ctxt = oslo_context.RequestContext('fake_user', 'fake_project') devices_up = ['fake_device1', 'fake_device2'] devices_down = ['fake_device3', 'fake_device4'] expected_ret_val = {'devices_up': ['fake_device2'], 'failed_devices_up': ['fake_device1'], 'devices_down': [ {'device': 'fake_device3', 'exists': True}], 'failed_devices_down': ['fake_device4']} rpcapi.update_device_up = mock.Mock( side_effect=[Exception('fake_device1 fails'), None]) rpcapi.update_device_down = mock.Mock( side_effect=[{'device': 'fake_device3', 'exists': True}, Exception('fake_device4 fails')]) with mock.patch.object(rpcapi.client, 'call'),\ mock.patch.object(rpcapi.client, 'prepare') as prepare_mock: prepare_mock.side_effect = oslo_messaging.UnsupportedVersion( 'test') res = rpcapi.update_device_list(ctxt, devices_up, devices_down, 'fake_agent_id', 'fake_host') self.assertEqual(expected_ret_val, res) def test_get_devices_details_list_and_failed_devices(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) self._test_rpc_api(rpcapi, None, 'get_devices_details_list_and_failed_devices', rpc_method='call', devices=['fake_device1', 'fake_device2'], agent_id='fake_agent_id', host='fake_host', version='1.5') def test_devices_details_list_and_failed_devices(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) self._test_rpc_api(rpcapi, None, 'get_devices_details_list_and_failed_devices', rpc_method='call', devices=['fake_device1', 'fake_device2'], agent_id='fake_agent_id', host='fake_host', version='1.5') def test_get_devices_details_list_and_failed_devices_unsupported(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) ctxt = oslo_context.RequestContext('fake_user', 'fake_project') devices = ['fake_device1', 'fake_device2'] dev2_details = {'device': 'fake_device2', 'network_id': 'net_id', 'port_id': 'port_id', 'admin_state_up': True} expected_ret_val = {'devices': [dev2_details], 'failed_devices': ['fake_device1']} rpcapi.get_device_details = mock.Mock( side_effect=[Exception('fake_device1 fails'), dev2_details]) with mock.patch.object(rpcapi.client, 'call'),\ mock.patch.object(rpcapi.client, 'prepare') as prepare_mock: prepare_mock.side_effect = oslo_messaging.UnsupportedVersion( 'test') res = rpcapi.get_devices_details_list_and_failed_devices( ctxt, devices, 'fake_agent_id', 'fake_host') self.assertEqual(expected_ret_val, res) neutron-8.4.0/neutron/tests/unit/plugins/ml2/test_port_binding.py0000664000567000056710000003541013044372760026444 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.common import constants as const from neutron import context from neutron.extensions import portbindings from neutron import manager from neutron.plugins.ml2 import config as config from neutron.plugins.ml2 import driver_context from neutron.plugins.ml2 import models as ml2_models from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin' class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase): _plugin_name = PLUGIN_NAME def setUp(self): # Enable the test mechanism driver to ensure that # we can successfully call through to all mechanism # driver apis. config.cfg.CONF.set_override('mechanism_drivers', ['logger', 'test'], 'ml2') config.cfg.CONF.set_override('network_vlan_ranges', ['physnet1:1000:1099'], group='ml2_type_vlan') super(PortBindingTestCase, self).setUp(PLUGIN_NAME) self.port_create_status = 'DOWN' self.plugin = manager.NeutronManager.get_plugin() self.plugin.start_rpc_listeners() def _check_response(self, port, vif_type, has_port_filter, bound, status): self.assertEqual(vif_type, port[portbindings.VIF_TYPE]) vif_details = port[portbindings.VIF_DETAILS] port_status = port['status'] if bound: # TODO(rkukura): Replace with new VIF security details self.assertEqual(has_port_filter, vif_details[portbindings.CAP_PORT_FILTER]) self.assertEqual(status or 'DOWN', port_status) else: self.assertEqual('DOWN', port_status) def _test_port_binding(self, host, vif_type, has_port_filter, bound, status=None, network_type='local'): mac_address = 'aa:aa:aa:aa:aa:aa' host_arg = {portbindings.HOST_ID: host, 'mac_address': mac_address} with self.port(name='name', arg_list=(portbindings.HOST_ID,), **host_arg) as port: self._check_response(port['port'], vif_type, has_port_filter, bound, status) port_id = port['port']['id'] neutron_context = context.get_admin_context() details = self.plugin.endpoints[0].get_device_details( neutron_context, agent_id="theAgentId", device=port_id) if bound: self.assertEqual(network_type, details['network_type']) self.assertEqual(mac_address, details['mac_address']) else: self.assertNotIn('network_type', details) self.assertNotIn('mac_address', details) def test_unbound(self): self._test_port_binding("", portbindings.VIF_TYPE_UNBOUND, False, False) def test_binding_failed(self): self._test_port_binding("host-fail", portbindings.VIF_TYPE_BINDING_FAILED, False, False) def test_binding_no_filter(self): self._test_port_binding("host-ovs-no_filter", portbindings.VIF_TYPE_OVS, False, True) def test_binding_filter(self): self._test_port_binding("host-bridge-filter", portbindings.VIF_TYPE_BRIDGE, True, True) def test_binding_status_active(self): self._test_port_binding("host-ovs-filter-active", portbindings.VIF_TYPE_OVS, True, True, 'ACTIVE') def test_update_port_binding_no_binding(self): ctx = context.get_admin_context() with self.port(name='name') as port: # emulating concurrent binding deletion (ctx.session.query(ml2_models.PortBinding). filter_by(port_id=port['port']['id']).delete()) self.assertIsNone( self.plugin.get_bound_port_context(ctx, port['port']['id'])) def test_hierarchical_binding(self): self._test_port_binding("host-hierarchical", portbindings.VIF_TYPE_OVS, False, True, network_type='vlan') def test_get_bound_port_context_cache_hit(self): ctx = context.get_admin_context() with self.port(name='name') as port: cached_network_id = port['port']['network_id'] some_network = {'id': cached_network_id} cached_networks = {cached_network_id: some_network} self.plugin.get_network = mock.Mock(return_value=some_network) self.plugin.get_bound_port_context(ctx, port['port']['id'], cached_networks=cached_networks) self.assertFalse(self.plugin.get_network.called) def test_get_bound_port_context_cache_miss(self): ctx = context.get_admin_context() with self.port(name='name') as port: some_network = {'id': u'2ac23560-7638-44e2-9875-c1888b02af72'} self.plugin.get_network = mock.Mock(return_value=some_network) self.plugin.get_bound_port_context(ctx, port['port']['id'], cached_networks={}) self.assertEqual(1, self.plugin.get_network.call_count) def _test_update_port_binding(self, host, new_host=None): with mock.patch.object(self.plugin, '_notify_port_updated') as notify_mock: host_arg = {portbindings.HOST_ID: host} update_body = {'name': 'test_update'} if new_host is not None: update_body[portbindings.HOST_ID] = new_host with self.port(name='name', arg_list=(portbindings.HOST_ID,), **host_arg) as port: neutron_context = context.get_admin_context() updated_port = self._update('ports', port['port']['id'], {'port': update_body}, neutron_context=neutron_context) port_data = updated_port['port'] if new_host is not None: self.assertEqual(new_host, port_data[portbindings.HOST_ID]) else: self.assertEqual(host, port_data[portbindings.HOST_ID]) if new_host is not None and new_host != host: notify_mock.assert_called_once_with(mock.ANY) else: self.assertFalse(notify_mock.called) def test_update_with_new_host_binding_notifies_agent(self): self._test_update_port_binding('host-ovs-no_filter', 'host-bridge-filter') def test_update_with_same_host_binding_does_not_notify(self): self._test_update_port_binding('host-ovs-no_filter', 'host-ovs-no_filter') def test_update_without_binding_does_not_notify(self): self._test_update_port_binding('host-ovs-no_filter') def testt_update_from_empty_to_host_binding_notifies_agent(self): self._test_update_port_binding('', 'host-ovs-no_filter') def test_update_from_host_to_empty_binding_notifies_agent(self): self._test_update_port_binding('host-ovs-no_filter', '') def test_process_binding_port_host_id_changed(self): ctx = context.get_admin_context() plugin = manager.NeutronManager.get_plugin() host_id = {portbindings.HOST_ID: 'host1'} with self.port(**host_id) as port: # Since the port is DOWN at first # It's necessary to make its status ACTIVE for this test plugin.update_port_status(ctx, port['port']['id'], const.PORT_STATUS_ACTIVE) attrs = port['port'] attrs['status'] = const.PORT_STATUS_ACTIVE original_port = attrs.copy() attrs['binding:host_id'] = 'host2' updated_port = attrs.copy() network = {'id': attrs['network_id']} binding = ml2_models.PortBinding( port_id=original_port['id'], host=original_port['binding:host_id'], vnic_type=original_port['binding:vnic_type'], profile=original_port['binding:profile'], vif_type=original_port['binding:vif_type'], vif_details=original_port['binding:vif_details']) levels = 1 mech_context = driver_context.PortContext( plugin, ctx, updated_port, network, binding, levels, original_port=original_port) plugin._process_port_binding(mech_context, port['port']) self.assertEqual(const.PORT_STATUS_DOWN, updated_port['status']) port_dict = plugin.get_port(ctx, port['port']['id']) self.assertEqual(const.PORT_STATUS_DOWN, port_dict['status']) def test_dvr_binding(self): ctx = context.get_admin_context() with self.port(device_owner=const.DEVICE_OWNER_DVR_INTERFACE) as port: port_id = port['port']['id'] # Verify port's VIF type and status. self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED, port['port'][portbindings.VIF_TYPE]) self.assertEqual('DOWN', port['port']['status']) # Update port to bind for a host. self.plugin.update_dvr_port_binding(ctx, port_id, {'port': { portbindings.HOST_ID: 'host-ovs-no_filter', 'device_id': 'router1'}}) # Get port and verify VIF type and status unchanged. port = self._show('ports', port_id) self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED, port['port'][portbindings.VIF_TYPE]) self.assertEqual('DOWN', port['port']['status']) # Get and verify binding details for host details = self.plugin.endpoints[0].get_device_details( ctx, agent_id="theAgentId", device=port_id, host='host-ovs-no_filter') self.assertEqual('local', details['network_type']) # Get port and verify VIF type and changed status. port = self._show('ports', port_id) self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED, port['port'][portbindings.VIF_TYPE]) self.assertEqual('BUILD', port['port']['status']) # Mark device up. self.plugin.endpoints[0].update_device_up( ctx, agent_id="theAgentId", device=port_id, host='host-ovs-no_filter') # Get port and verify VIF type and changed status. port = self._show('ports', port_id) self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED, port['port'][portbindings.VIF_TYPE]) self.assertEqual('ACTIVE', port['port']['status']) # Mark device down. self.plugin.endpoints[0].update_device_down( ctx, agent_id="theAgentId", device=port_id, host='host-ovs-no_filter') # Get port and verify VIF type and changed status. port = self._show('ports', port_id) self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED, port['port'][portbindings.VIF_TYPE]) self.assertEqual('DOWN', port['port']['status']) def test_dvr_binding_multi_host_status(self): ctx = context.get_admin_context() with self.port(device_owner=const.DEVICE_OWNER_DVR_INTERFACE) as port: port_id = port['port']['id'] # Update port to bind for 1st host. self.plugin.update_dvr_port_binding(ctx, port_id, {'port': { portbindings.HOST_ID: 'host-ovs-no_filter', 'device_id': 'router1'}}) # Mark 1st device up. self.plugin.endpoints[0].update_device_up( ctx, agent_id="theAgentId", device=port_id, host='host-ovs-no_filter') # Get port and verify status is ACTIVE. port = self._show('ports', port_id) self.assertEqual('ACTIVE', port['port']['status']) # Update port to bind for a 2nd host. self.plugin.update_dvr_port_binding(ctx, port_id, {'port': { portbindings.HOST_ID: 'host-bridge-filter', 'device_id': 'router1'}}) # Mark 2nd device up. self.plugin.endpoints[0].update_device_up( ctx, agent_id="the2ndAgentId", device=port_id, host='host-bridge-filter') # Get port and verify status unchanged. port = self._show('ports', port_id) self.assertEqual('ACTIVE', port['port']['status']) # Mark 1st device down. self.plugin.endpoints[0].update_device_down( ctx, agent_id="theAgentId", device=port_id, host='host-ovs-no_filter') # Get port and verify status unchanged. port = self._show('ports', port_id) self.assertEqual('ACTIVE', port['port']['status']) # Mark 2nd device down. self.plugin.endpoints[0].update_device_down( ctx, agent_id="the2ndAgentId", device=port_id, host='host-bridge-filter') # Get port and verify status is DOWN. port = self._show('ports', port_id) self.assertEqual('DOWN', port['port']['status']) def test_dvr_binding_update_unbound_host(self): ctx = context.get_admin_context() with self.port(device_owner=const.DEVICE_OWNER_DVR_INTERFACE) as port: port_id = port['port']['id'] # Mark device up without first binding on host. self.plugin.endpoints[0].update_device_up( ctx, agent_id="theAgentId", device=port_id, host='host-ovs-no_filter') # Get port and verify status is still DOWN. port = self._show('ports', port_id) self.assertEqual('DOWN', port['port']['status']) neutron-8.4.0/neutron/tests/unit/plugins/ml2/test_plugin.py0000664000567000056710000030517713044372760025276 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import uuid import fixtures import mock import six import testtools import webob from oslo_db import exception as db_exc from oslo_utils import uuidutils from sqlalchemy.orm import exc as sqla_exc from neutron._i18n import _ from neutron.api.v2 import attributes as attrs from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants from neutron.common import exceptions as exc from neutron.common import topics from neutron.common import utils from neutron import context from neutron.db import agents_db from neutron.db import api as db_api from neutron.db import db_base_plugin_v2 as base_plugin from neutron.db import l3_db from neutron.db import l3_hascheduler_db from neutron.db import models_v2 from neutron.extensions import availability_zone as az_ext from neutron.extensions import external_net from neutron.extensions import multiprovidernet as mpnet from neutron.extensions import portbindings from neutron.extensions import providernet as pnet from neutron import manager from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.common import exceptions as ml2_exc from neutron.plugins.ml2 import config from neutron.plugins.ml2 import db as ml2_db from neutron.plugins.ml2 import driver_api from neutron.plugins.ml2 import driver_context from neutron.plugins.ml2.drivers import type_vlan from neutron.plugins.ml2 import models from neutron.plugins.ml2 import plugin as ml2_plugin from neutron.plugins.ml2 import rpc from neutron.services.l3_router import l3_router_plugin from neutron.services.qos import qos_consts from neutron.tests import base from neutron.tests.unit import _test_extension_portbindings as test_bindings from neutron.tests.unit.agent import test_securitygroups_rpc as test_sg_rpc from neutron.tests.unit.db import test_allowedaddresspairs_db as test_pair from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin from neutron.tests.unit.db import test_ipam_pluggable_backend as test_ipam from neutron.tests.unit.extensions import test_extra_dhcp_opt as test_dhcpopts from neutron.tests.unit.plugins.ml2.drivers import mechanism_logger as \ mech_logger from neutron.tests.unit.plugins.ml2.drivers import mechanism_test as mech_test config.cfg.CONF.import_opt('network_vlan_ranges', 'neutron.plugins.ml2.drivers.type_vlan', group='ml2_type_vlan') PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin' DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' HOST = 'fake_host' TEST_ROUTER_ID = 'router_id' # TODO(marun) - Move to somewhere common for reuse class PluginConfFixture(fixtures.Fixture): """Plugin configuration shared across the unit and functional tests.""" def __init__(self, plugin_name, parent_setup=None): super(PluginConfFixture, self).__init__() self.plugin_name = plugin_name self.parent_setup = parent_setup def _setUp(self): if self.parent_setup: self.parent_setup() class Ml2ConfFixture(PluginConfFixture): def __init__(self, parent_setup=None): super(Ml2ConfFixture, self).__init__(PLUGIN_NAME, parent_setup) class Ml2PluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): _mechanism_drivers = ['logger', 'test'] l3_plugin = ('neutron.tests.unit.extensions.test_l3.' 'TestL3NatServicePlugin') def setup_parent(self): """Perform parent setup with the common plugin configuration class.""" service_plugins = {'l3_plugin_name': self.l3_plugin} # Ensure that the parent setup can be called without arguments # by the common configuration setUp. parent_setup = functools.partial( super(Ml2PluginV2TestCase, self).setUp, plugin=PLUGIN_NAME, service_plugins=service_plugins, ) self.useFixture(Ml2ConfFixture(parent_setup)) self.port_create_status = 'DOWN' def setUp(self): # Enable the test mechanism driver to ensure that # we can successfully call through to all mechanism # driver apis. config.cfg.CONF.set_override('mechanism_drivers', self._mechanism_drivers, group='ml2') self.physnet = 'physnet1' self.vlan_range = '1:100' self.vlan_range2 = '200:300' self.physnet2 = 'physnet2' self.phys_vrange = ':'.join([self.physnet, self.vlan_range]) self.phys2_vrange = ':'.join([self.physnet2, self.vlan_range2]) config.cfg.CONF.set_override('network_vlan_ranges', [self.phys_vrange, self.phys2_vrange], group='ml2_type_vlan') self.setup_parent() self.driver = ml2_plugin.Ml2Plugin() self.context = context.get_admin_context() class TestMl2BulkToggleWithoutBulkless(Ml2PluginV2TestCase): _mechanism_drivers = ['logger', 'test'] def test_bulk_enabled_with_bulk_drivers(self): self.assertFalse(self._skip_native_bulk) class TestMl2SupportedQosRuleTypes(Ml2PluginV2TestCase): def test_empty_driver_list(self, *mocks): mech_drivers_mock = mock.PropertyMock(return_value=[]) with mock.patch.object(self.driver.mechanism_manager, 'ordered_mech_drivers', new_callable=mech_drivers_mock): self.assertEqual( [], self.driver.mechanism_manager.supported_qos_rule_types) def test_no_rule_types_in_common(self): self.assertEqual( [], self.driver.mechanism_manager.supported_qos_rule_types) @mock.patch.object(mech_logger.LoggerMechanismDriver, 'supported_qos_rule_types', new_callable=mock.PropertyMock, create=True) @mock.patch.object(mech_test.TestMechanismDriver, 'supported_qos_rule_types', new_callable=mock.PropertyMock, create=True) def test_rule_type_in_common(self, *mocks): # make sure both plugins have the same supported qos rule types for mock_ in mocks: mock_.return_value = qos_consts.VALID_RULE_TYPES self.assertEqual( qos_consts.VALID_RULE_TYPES, self.driver.mechanism_manager.supported_qos_rule_types) @mock.patch.object(mech_test.TestMechanismDriver, 'supported_qos_rule_types', new_callable=mock.PropertyMock, return_value=qos_consts.VALID_RULE_TYPES, create=True) @mock.patch.object(mech_logger.LoggerMechanismDriver, '_supports_port_binding', new_callable=mock.PropertyMock, return_value=False) def test_rule_types_with_driver_that_does_not_implement_binding(self, *mocks): self.assertEqual( qos_consts.VALID_RULE_TYPES, self.driver.mechanism_manager.supported_qos_rule_types) class TestMl2BasicGet(test_plugin.TestBasicGet, Ml2PluginV2TestCase): pass class TestMl2V2HTTPResponse(test_plugin.TestV2HTTPResponse, Ml2PluginV2TestCase): pass class TestMl2NetworksV2(test_plugin.TestNetworksV2, Ml2PluginV2TestCase): def setUp(self, plugin=None): super(TestMl2NetworksV2, self).setUp() # provider networks self.pnets = [{'name': 'net1', pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1, 'tenant_id': 'tenant_one'}, {'name': 'net2', pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet2', pnet.SEGMENTATION_ID: 210, 'tenant_id': 'tenant_one'}, {'name': 'net3', pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet2', pnet.SEGMENTATION_ID: 220, 'tenant_id': 'tenant_one'} ] # multiprovider networks self.mp_nets = [{'name': 'net4', mpnet.SEGMENTS: [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet2', pnet.SEGMENTATION_ID: 1}, {pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet2', pnet.SEGMENTATION_ID: 202}], 'tenant_id': 'tenant_one'} ] self.nets = self.mp_nets + self.pnets def test_port_delete_helper_tolerates_failure(self): plugin = manager.NeutronManager.get_plugin() with mock.patch.object(plugin, "delete_port", side_effect=exc.PortNotFound(port_id="123")): plugin._delete_ports(mock.MagicMock(), [mock.MagicMock()]) with mock.patch.object(plugin, "delete_port", side_effect=sqla_exc.ObjectDeletedError(None)): plugin._delete_ports(mock.MagicMock(), [mock.MagicMock()]) def test_subnet_delete_helper_tolerates_failure(self): plugin = manager.NeutronManager.get_plugin() with mock.patch.object(plugin, "delete_subnet", side_effect=exc.SubnetNotFound(subnet_id="1")): plugin._delete_subnets(mock.MagicMock(), [mock.MagicMock()]) with mock.patch.object(plugin, "delete_subnet", side_effect=sqla_exc.ObjectDeletedError(None)): plugin._delete_subnets(mock.MagicMock(), [mock.MagicMock()]) def _create_and_verify_networks(self, networks): for net_idx, net in enumerate(networks): # create req = self.new_create_request('networks', {'network': net}) # verify network = self.deserialize(self.fmt, req.get_response(self.api))['network'] if mpnet.SEGMENTS not in net: for k, v in six.iteritems(net): self.assertEqual(net[k], network[k]) self.assertNotIn(mpnet.SEGMENTS, network) else: segments = network[mpnet.SEGMENTS] expected_segments = net[mpnet.SEGMENTS] self.assertEqual(len(expected_segments), len(segments)) for expected, actual in zip(expected_segments, segments): self.assertEqual(expected, actual) def _lookup_network_by_segmentation_id(self, seg_id, num_expected_nets): params_str = "%s=%s" % (pnet.SEGMENTATION_ID, seg_id) net_req = self.new_list_request('networks', None, params=params_str) networks = self.deserialize(self.fmt, net_req.get_response(self.api)) if num_expected_nets: self.assertIsNotNone(networks) self.assertEqual(num_expected_nets, len(networks['networks'])) else: self.assertIsNone(networks) return networks def test_list_networks_with_segmentation_id(self): self._create_and_verify_networks(self.pnets) # verify we can find the network that we expect lookup_vlan_id = 1 expected_net = [n for n in self.pnets if n[pnet.SEGMENTATION_ID] == lookup_vlan_id].pop() networks = self._lookup_network_by_segmentation_id(lookup_vlan_id, 1) # verify all provider attributes network = networks['networks'][0] for attr in pnet.ATTRIBUTES: self.assertEqual(expected_net[attr], network[attr]) def test_list_mpnetworks_with_segmentation_id(self): self._create_and_verify_networks(self.nets) # get all networks with seg_id=1 (including multisegment networks) lookup_vlan_id = 1 networks = self._lookup_network_by_segmentation_id(lookup_vlan_id, 2) # get the mpnet networks = [n for n in networks['networks'] if mpnet.SEGMENTS in n] network = networks.pop() # verify attributes of the looked up item segments = network[mpnet.SEGMENTS] expected_segments = self.mp_nets[0][mpnet.SEGMENTS] self.assertEqual(len(expected_segments), len(segments)) for expected, actual in zip(expected_segments, segments): self.assertEqual(expected, actual) def test_create_network_segment_allocation_fails(self): plugin = manager.NeutronManager.get_plugin() with mock.patch.object( plugin.type_manager, 'create_network_segments', side_effect=db_exc.RetryRequest(ValueError()) ) as f: data = {'network': {'tenant_id': 'sometenant', 'name': 'dummy', 'admin_state_up': True, 'shared': False}} req = self.new_create_request('networks', data) res = req.get_response(self.api) self.assertEqual(500, res.status_int) self.assertEqual(db_api.MAX_RETRIES + 1, f.call_count) class TestExternalNetwork(Ml2PluginV2TestCase): def _create_external_network(self): data = {'network': {'name': 'net1', 'router:external': 'True', 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) return network def test_external_network_type_none(self): config.cfg.CONF.set_default('external_network_type', None, group='ml2') network = self._create_external_network() # For external network, expected network type to be # tenant_network_types which is by default 'local'. self.assertEqual(p_const.TYPE_LOCAL, network['network'][pnet.NETWORK_TYPE]) # No physical network specified, expected 'None'. self.assertIsNone(network['network'][pnet.PHYSICAL_NETWORK]) # External network will not have a segmentation id. self.assertIsNone(network['network'][pnet.SEGMENTATION_ID]) # External network will not have multiple segments. self.assertNotIn(mpnet.SEGMENTS, network['network']) def test_external_network_type_vlan(self): config.cfg.CONF.set_default('external_network_type', p_const.TYPE_VLAN, group='ml2') network = self._create_external_network() # For external network, expected network type to be 'vlan'. self.assertEqual(p_const.TYPE_VLAN, network['network'][pnet.NETWORK_TYPE]) # Physical network is expected. self.assertIsNotNone(network['network'][pnet.PHYSICAL_NETWORK]) # External network will have a segmentation id. self.assertIsNotNone(network['network'][pnet.SEGMENTATION_ID]) # External network will not have multiple segments. self.assertNotIn(mpnet.SEGMENTS, network['network']) class TestMl2NetworksWithVlanTransparencyBase(TestMl2NetworksV2): data = {'network': {'name': 'net1', mpnet.SEGMENTS: [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1'}], 'tenant_id': 'tenant_one', 'vlan_transparent': 'True'}} def setUp(self, plugin=None): config.cfg.CONF.set_override('vlan_transparent', True) super(TestMl2NetworksWithVlanTransparencyBase, self).setUp(plugin) class TestMl2NetworksWithVlanTransparency( TestMl2NetworksWithVlanTransparencyBase): _mechanism_drivers = ['test'] def test_create_network_vlan_transparent_fail(self): with mock.patch.object(mech_test.TestMechanismDriver, 'check_vlan_transparency', return_value=False): network_req = self.new_create_request('networks', self.data) res = network_req.get_response(self.api) self.assertEqual(500, res.status_int) error_result = self.deserialize(self.fmt, res)['NeutronError'] self.assertEqual("VlanTransparencyDriverError", error_result['type']) def test_create_network_vlan_transparent(self): with mock.patch.object(mech_test.TestMechanismDriver, 'check_vlan_transparency', return_value=True): network_req = self.new_create_request('networks', self.data) res = network_req.get_response(self.api) self.assertEqual(201, res.status_int) network = self.deserialize(self.fmt, res)['network'] self.assertIn('vlan_transparent', network) class TestMl2NetworksWithVlanTransparencyAndMTU( TestMl2NetworksWithVlanTransparencyBase): _mechanism_drivers = ['test'] def test_create_network_vlan_transparent_and_mtu(self): with mock.patch.object(mech_test.TestMechanismDriver, 'check_vlan_transparency', return_value=True): config.cfg.CONF.set_override('path_mtu', 1000, group='ml2') config.cfg.CONF.set_override('global_physnet_mtu', 1000) config.cfg.CONF.set_override('advertise_mtu', True) network_req = self.new_create_request('networks', self.data) res = network_req.get_response(self.api) self.assertEqual(201, res.status_int) network = self.deserialize(self.fmt, res)['network'] self.assertEqual(1000, network['mtu']) self.assertIn('vlan_transparent', network) self.assertTrue(network['vlan_transparent']) self.assertTrue(network['vlan_transparent']) class TestMl2NetworksWithAvailabilityZone(TestMl2NetworksV2): def test_create_network_availability_zone(self): az_hints = ['az1', 'az2'] data = {'network': {'name': 'net1', az_ext.AZ_HINTS: az_hints, 'tenant_id': 'tenant_one'}} with mock.patch.object(agents_db.AgentAvailabilityZoneMixin, 'validate_availability_zones'): network_req = self.new_create_request('networks', data) res = network_req.get_response(self.api) self.assertEqual(201, res.status_int) network = self.deserialize(self.fmt, res)['network'] self.assertEqual(az_hints, network[az_ext.AZ_HINTS]) class TestMl2SubnetsV2(test_plugin.TestSubnetsV2, Ml2PluginV2TestCase): def test_delete_subnet_race_with_dhcp_port_creation(self): with self.network() as network: with self.subnet(network=network) as subnet: subnet_id = subnet['subnet']['id'] attempt = [0] def check_and_create_ports(context, subnet_id): """A method to emulate race condition. Adds dhcp port in the middle of subnet delete """ if attempt[0] > 0: return False attempt[0] += 1 data = {'port': {'network_id': network['network']['id'], 'tenant_id': network['network']['tenant_id'], 'name': 'port1', 'admin_state_up': 1, 'device_owner': constants.DEVICE_OWNER_DHCP, 'fixed_ips': [{'subnet_id': subnet_id}]}} port_req = self.new_create_request('ports', data) port_res = port_req.get_response(self.api) self.assertEqual(201, port_res.status_int) return (context.session.query(models_v2.IPAllocation). filter_by(subnet_id=subnet_id). join(models_v2.Port).first()) plugin = manager.NeutronManager.get_plugin() # we mock _subnet_check_ip_allocations with method # that creates DHCP port 'in the middle' of subnet_delete # causing retry this way subnet is deleted on the # second attempt with mock.patch.object(plugin, '_subnet_check_ip_allocations', side_effect=check_and_create_ports): req = self.new_delete_request('subnets', subnet_id) res = req.get_response(self.api) self.assertEqual(204, res.status_int) # Validate chat check is called twice, i.e. after the # first check transaction gets restarted. calls = [mock.call(mock.ANY, subnet_id), mock.call(mock.ANY, subnet_id)] plugin._subnet_check_ip_allocations.assert_has_calls = ( calls) class TestMl2DbOperationBounds(test_plugin.DbOperationBoundMixin, Ml2PluginV2TestCase): """Test cases to assert constant query count for list operations. These test cases assert that an increase in the number of objects does not result in an increase of the number of db operations. All database lookups during a list operation should be performed in bulk so the number of queries required for 2 objects instead of 1 should stay the same. """ def setUp(self): super(TestMl2DbOperationBounds, self).setUp() self.kwargs = self.get_api_kwargs() def make_network(self): return self._make_network(self.fmt, 'name', True, **self.kwargs) def make_subnet(self): net = self.make_network() setattr(self, '_subnet_count', getattr(self, '_subnet_count', 0) + 1) cidr = '1.%s.0.0/24' % self._subnet_count return self._make_subnet(self.fmt, net, None, cidr, **self.kwargs) def make_port(self): net = self.make_network() return self._make_port(self.fmt, net['network']['id'], **self.kwargs) def test_network_list_queries_constant(self): self._assert_object_list_queries_constant(self.make_network, 'networks') def test_subnet_list_queries_constant(self): self._assert_object_list_queries_constant(self.make_subnet, 'subnets') def test_port_list_queries_constant(self): self._assert_object_list_queries_constant(self.make_port, 'ports') class TestMl2DbOperationBoundsTenant(TestMl2DbOperationBounds): admin = False class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase): def test_update_port_status_build(self): with self.port() as port: self.assertEqual('DOWN', port['port']['status']) self.assertEqual('DOWN', self.port_create_status) def test_update_port_status_short_id(self): ctx = context.get_admin_context() plugin = manager.NeutronManager.get_plugin() with self.port() as port: with mock.patch.object(ml2_db, 'get_binding_levels', return_value=[]) as mock_gbl: port_id = port['port']['id'] short_id = port_id[:11] plugin.update_port_status(ctx, short_id, 'UP') mock_gbl.assert_called_once_with(mock.ANY, port_id, mock.ANY) def test_update_port_fixed_ip_changed(self): ctx = context.get_admin_context() plugin = manager.NeutronManager.get_plugin() with self.port() as port, mock.patch.object( plugin.notifier, 'security_groups_member_updated') as sg_member_update: port['port']['fixed_ips'][0]['ip_address'] = '10.0.0.3' plugin.update_port(ctx, port['port']['id'], port) self.assertTrue(sg_member_update.called) def test_update_port_status_with_network(self): ctx = context.get_admin_context() plugin = manager.NeutronManager.get_plugin() with self.port() as port: net = plugin.get_network(ctx, port['port']['network_id']) with mock.patch.object(plugin, 'get_network') as get_net: plugin.update_port_status(ctx, port['port']['id'], 'UP', network=net) self.assertFalse(get_net.called) def test_update_port_mac(self): self.check_update_port_mac( host_arg={portbindings.HOST_ID: HOST}, arg_list=(portbindings.HOST_ID,)) def test_update_non_existent_port(self): ctx = context.get_admin_context() plugin = manager.NeutronManager.get_plugin() data = {'port': {'admin_state_up': False}} self.assertRaises(exc.PortNotFound, plugin.update_port, ctx, 'invalid-uuid', data) def test_delete_non_existent_port(self): ctx = context.get_admin_context() plugin = manager.NeutronManager.get_plugin() with mock.patch.object(ml2_plugin.LOG, 'debug') as log_debug: plugin.delete_port(ctx, 'invalid-uuid', l3_port_check=False) log_debug.assert_has_calls([ mock.call(_("Deleting port %s"), 'invalid-uuid'), mock.call(_("The port '%s' was deleted"), 'invalid-uuid') ]) def test_l3_cleanup_on_net_delete(self): l3plugin = manager.NeutronManager.get_service_plugins().get( p_const.L3_ROUTER_NAT) kwargs = {'arg_list': (external_net.EXTERNAL,), external_net.EXTERNAL: True} with self.network(**kwargs) as n: with self.subnet(network=n, cidr='200.0.0.0/22'): l3plugin.create_floatingip( context.get_admin_context(), {'floatingip': {'floating_network_id': n['network']['id'], 'tenant_id': n['network']['tenant_id'], 'dns_name': '', 'dns_domain': ''}} ) self._delete('networks', n['network']['id']) flips = l3plugin.get_floatingips(context.get_admin_context()) self.assertFalse(flips) def test_create_ports_bulk_port_binding_failure(self): ctx = context.get_admin_context() with self.network() as net: plugin = manager.NeutronManager.get_plugin() with mock.patch.object(plugin, '_bind_port_if_needed', side_effect=ml2_exc.MechanismDriverError( method='create_port_bulk')) as _bind_port_if_needed: res = self._create_port_bulk(self.fmt, 2, net['network']['id'], 'test', True, context=ctx) self.assertTrue(_bind_port_if_needed.called) # We expect a 500 as we injected a fault in the plugin self._validate_behavior_on_bulk_failure( res, 'ports', webob.exc.HTTPServerError.code) def test_create_ports_bulk_with_sec_grp(self): ctx = context.get_admin_context() plugin = manager.NeutronManager.get_plugin() with self.network() as net,\ mock.patch.object(plugin.notifier, 'security_groups_member_updated') as m_upd,\ mock.patch.object(plugin.notifier, 'security_groups_provider_updated') as p_upd: res = self._create_port_bulk(self.fmt, 3, net['network']['id'], 'test', True, context=ctx) ports = self.deserialize(self.fmt, res) used_sg = ports['ports'][0]['security_groups'] m_upd.assert_called_once_with(ctx, used_sg) self.assertFalse(p_upd.called) def _check_security_groups_provider_updated_args(self, p_upd_mock, net_id): query_params = "network_id=%s" % net_id network_ports = self._list('ports', query_params=query_params) network_ports_ids = [port['id'] for port in network_ports['ports']] self.assertTrue(p_upd_mock.called) p_upd_args = p_upd_mock.call_args ports_ids = p_upd_args[0][1] self.assertEqual(sorted(network_ports_ids), sorted(ports_ids)) def test_create_ports_bulk_with_sec_grp_member_provider_update(self): ctx = context.get_admin_context() plugin = manager.NeutronManager.get_plugin() with self.network() as net,\ mock.patch.object(plugin.notifier, 'security_groups_member_updated') as m_upd,\ mock.patch.object(plugin.notifier, 'security_groups_provider_updated') as p_upd: net_id = net['network']['id'] data = [{ 'network_id': net_id, 'tenant_id': self._tenant_id }, { 'network_id': net_id, 'tenant_id': self._tenant_id, 'device_owner': constants.DEVICE_OWNER_DHCP } ] res = self._create_bulk_from_list(self.fmt, 'port', data, context=ctx) ports = self.deserialize(self.fmt, res) used_sg = ports['ports'][0]['security_groups'] m_upd.assert_called_once_with(ctx, used_sg) self._check_security_groups_provider_updated_args(p_upd, net_id) m_upd.reset_mock() p_upd.reset_mock() data[0]['device_owner'] = constants.DEVICE_OWNER_DHCP self._create_bulk_from_list(self.fmt, 'port', data, context=ctx) self.assertFalse(m_upd.called) self._check_security_groups_provider_updated_args(p_upd, net_id) def test_create_ports_bulk_with_sec_grp_provider_update_ipv6(self): ctx = context.get_admin_context() plugin = manager.NeutronManager.get_plugin() fake_prefix = '2001:db8::/64' fake_gateway = 'fe80::1' with self.network() as net: with self.subnet(net, gateway_ip=fake_gateway, cidr=fake_prefix, ip_version=6) as snet_v6,\ mock.patch.object( plugin.notifier, 'security_groups_member_updated') as m_upd,\ mock.patch.object( plugin.notifier, 'security_groups_provider_updated') as p_upd: net_id = net['network']['id'] data = [{ 'network_id': net_id, 'tenant_id': self._tenant_id, 'fixed_ips': [{'subnet_id': snet_v6['subnet']['id']}], 'device_owner': constants.DEVICE_OWNER_ROUTER_INTF } ] self._create_bulk_from_list(self.fmt, 'port', data, context=ctx) self.assertFalse(m_upd.called) self._check_security_groups_provider_updated_args( p_upd, net_id) def test_delete_port_no_notify_in_disassociate_floatingips(self): ctx = context.get_admin_context() plugin = manager.NeutronManager.get_plugin() l3plugin = manager.NeutronManager.get_service_plugins().get( p_const.L3_ROUTER_NAT) with self.port() as port,\ mock.patch.object( l3plugin, 'disassociate_floatingips') as disassociate_floatingips,\ mock.patch.object(registry, 'notify') as notify: port_id = port['port']['id'] plugin.delete_port(ctx, port_id) # check that no notification was requested while under # transaction disassociate_floatingips.assert_has_calls([ mock.call(ctx, port_id, do_notify=False) ]) # check that notifier was still triggered self.assertTrue(notify.call_counts) def test_check_if_compute_port_serviced_by_dvr(self): self.assertTrue(utils.is_dvr_serviced(DEVICE_OWNER_COMPUTE)) def test_check_if_lbaas_vip_port_serviced_by_dvr(self): self.assertTrue(utils.is_dvr_serviced( constants.DEVICE_OWNER_LOADBALANCER)) def test_check_if_lbaasv2_vip_port_serviced_by_dvr(self): self.assertTrue(utils.is_dvr_serviced( constants.DEVICE_OWNER_LOADBALANCERV2)) def test_check_if_dhcp_port_serviced_by_dvr(self): self.assertTrue(utils.is_dvr_serviced(constants.DEVICE_OWNER_DHCP)) def test_check_if_port_not_serviced_by_dvr(self): self.assertFalse(utils.is_dvr_serviced( constants.DEVICE_OWNER_ROUTER_INTF)) def test_disassociate_floatingips_do_notify_returns_nothing(self): ctx = context.get_admin_context() l3plugin = manager.NeutronManager.get_service_plugins().get( p_const.L3_ROUTER_NAT) with self.port() as port: port_id = port['port']['id'] # check that nothing is returned when notifications are handled # by the called method self.assertIsNone(l3plugin.disassociate_floatingips(ctx, port_id)) def test_create_port_tolerates_db_deadlock(self): with self.network() as net: with self.subnet(network=net) as subnet: _orig = ml2_db.get_locked_port_and_binding self._failed = False def fail_once(*args, **kwargs): if not self._failed: self._failed = True raise db_exc.DBDeadlock() return _orig(*args, **kwargs) with mock.patch('neutron.plugins.ml2.plugin.' 'db.get_locked_port_and_binding', side_effect=fail_once) as get_port_mock: port_kwargs = {portbindings.HOST_ID: 'host1', 'subnet': subnet, 'device_id': 'deadlocktest'} with self.port(arg_list=(portbindings.HOST_ID,), **port_kwargs) as port: self.assertTrue(port['port']['id']) self.assertTrue(get_port_mock.called) # make sure that we didn't create more than one port on # the retry query_params = "network_id=%s" % net['network']['id'] query_params += "&device_id=%s" % 'deadlocktest' ports = self._list('ports', query_params=query_params) self.assertEqual(1, len(ports['ports'])) def test_delete_port_tolerates_db_deadlock(self): ctx = context.get_admin_context() plugin = manager.NeutronManager.get_plugin() with self.port() as port: port_db, binding = ml2_db.get_locked_port_and_binding( ctx.session, port['port']['id']) with mock.patch('neutron.plugins.ml2.plugin.' 'db.get_locked_port_and_binding') as lock: lock.side_effect = [db_exc.DBDeadlock, (port_db, binding)] req = self.new_delete_request('ports', port['port']['id']) res = req.get_response(self.api) self.assertEqual(204, res.status_int) self.assertEqual(2, lock.call_count) self.assertRaises( exc.PortNotFound, plugin.get_port, ctx, port['port']['id']) def test_port_create_resillient_to_duplicate_records(self): def make_port(): with self.port(): pass self._test_operation_resillient_to_ipallocation_failure(make_port) def test_port_update_resillient_to_duplicate_records(self): with self.port() as p: data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.9'}]}} req = self.new_update_request('ports', data, p['port']['id']) def do_request(): self.assertEqual(200, req.get_response(self.api).status_int) self._test_operation_resillient_to_ipallocation_failure(do_request) def _test_operation_resillient_to_ipallocation_failure(self, func): from sqlalchemy import event class IPAllocationsGrenade(object): insert_ip_called = False except_raised = False def execute(self, con, curs, stmt, *args, **kwargs): if 'INSERT INTO ipallocations' in stmt: self.insert_ip_called = True def commit(self, con): # we blow up on commit to simulate another thread/server # stealing our IP before our transaction was done if self.insert_ip_called and not self.except_raised: self.except_raised = True raise db_exc.DBDuplicateEntry() listener = IPAllocationsGrenade() engine = db_api.get_engine() event.listen(engine, 'before_cursor_execute', listener.execute) event.listen(engine, 'commit', listener.commit) self.addCleanup(event.remove, engine, 'before_cursor_execute', listener.execute) self.addCleanup(event.remove, engine, 'commit', listener.commit) func() # make sure that the grenade went off during the commit self.assertTrue(listener.except_raised) class TestMl2PortsV2WithL3(test_plugin.TestPortsV2, Ml2PluginV2TestCase): """For testing methods that require the L3 service plugin.""" def test_update_port_status_notify_port_event_after_update(self): # Check if _notify_l3_agent_ha_port_update has any trace _orig = l3_hascheduler_db._notify_l3_agent_ha_port_update self.raised = False def _mock_notify_l3_agent_ha_port_update(*args, **kwargs): try: _orig(*args, **kwargs) except Exception: self.raised = True # register mock for _notify_l3_agent_ha_port_update, to identify any # trace in that function def mock_subscribe(): registry.subscribe( _mock_notify_l3_agent_ha_port_update, resources.PORT, events.AFTER_UPDATE) mock.patch.object(l3_hascheduler_db, 'subscribe', side_effect=mock_subscribe).start() ctx = context.get_admin_context() plugin = manager.NeutronManager.get_plugin() notifier = rpc.AgentNotifierApi(topics.AGENT) self.plugin_rpc = rpc.RpcCallbacks(notifier, plugin.type_manager) # enable subscription for events l3_router_plugin.L3RouterPlugin() l3plugin = manager.NeutronManager.get_service_plugins().get( p_const.L3_ROUTER_NAT) with self.subnet() as subnet,\ mock.patch.object(l3plugin.l3_rpc_notifier, 'routers_updated_on_host') as mock_updated: port_data = { 'port': {'name': 'test1', 'network_id': subnet['subnet']['network_id'], 'tenant_id': self._tenant_id, 'admin_state_up': True, 'device_id': TEST_ROUTER_ID, 'device_owner': constants.DEVICE_OWNER_ROUTER_HA_INTF, 'mac_address': attrs.ATTR_NOT_SPECIFIED, 'fixed_ips': attrs.ATTR_NOT_SPECIFIED}} port = self.plugin.create_port(ctx, port_data) self.plugin.update_port( ctx, port['id'], {'port': {portbindings.HOST_ID: HOST}}) self.plugin_rpc.update_device_up( ctx, agent_id="theAgentId", device=port['id'], host=HOST) mock_updated.assert_called_once_with( mock.ANY, [TEST_ROUTER_ID], HOST) # Fail if _notify_l3_agent_ha_port_update has any trace self.assertFalse(self.raised) class TestMl2PluginOnly(Ml2PluginV2TestCase): """For testing methods that don't call drivers""" def test__verify_service_plugins_requirements(self): plugin = manager.NeutronManager.get_plugin() with mock.patch.dict(ml2_plugin.SERVICE_PLUGINS_REQUIRED_DRIVERS, {self.l3_plugin: self._mechanism_drivers}),\ mock.patch.object(plugin.extension_manager, 'names', return_value=self._mechanism_drivers): plugin._verify_service_plugins_requirements() def test__verify_service_plugins_requirements_missing_driver(self): plugin = manager.NeutronManager.get_plugin() with mock.patch.dict(ml2_plugin.SERVICE_PLUGINS_REQUIRED_DRIVERS, {self.l3_plugin: ['test_required_driver']}),\ mock.patch.object(plugin.extension_manager, 'names', return_value=self._mechanism_drivers): self.assertRaises( ml2_exc.ExtensionDriverNotFound, plugin._verify_service_plugins_requirements ) def _test_check_mac_update_allowed(self, vif_type, expect_change=True): plugin = manager.NeutronManager.get_plugin() port = {'mac_address': "fake_mac", 'id': "fake_id"} if expect_change: new_attrs = {"mac_address": "dummy_mac"} else: new_attrs = {"mac_address": port['mac_address']} binding = mock.Mock() binding.vif_type = vif_type mac_changed = plugin._check_mac_update_allowed(port, new_attrs, binding) self.assertEqual(expect_change, mac_changed) def test_check_mac_update_allowed_if_no_mac_change(self): self._test_check_mac_update_allowed(portbindings.VIF_TYPE_UNBOUND, expect_change=False) def test_check_mac_update_allowed_unless_bound(self): with testtools.ExpectedException(exc.PortBound): self._test_check_mac_update_allowed(portbindings.VIF_TYPE_OVS) def test__device_to_port_id_prefix_names(self): input_output = [('sg-abcdefg', 'abcdefg'), ('tap123456', '123456'), ('qvo567890', '567890')] for device, expected in input_output: self.assertEqual(expected, ml2_plugin.Ml2Plugin._device_to_port_id( self.context, device)) def test__device_to_port_id_mac_address(self): with self.port() as p: mac = p['port']['mac_address'] port_id = p['port']['id'] self.assertEqual(port_id, ml2_plugin.Ml2Plugin._device_to_port_id( self.context, mac)) def test__device_to_port_id_not_uuid_not_mac(self): dev = '1234567' self.assertEqual(dev, ml2_plugin.Ml2Plugin._device_to_port_id( self.context, dev)) def test__device_to_port_id_UUID(self): port_id = uuidutils.generate_uuid() self.assertEqual(port_id, ml2_plugin.Ml2Plugin._device_to_port_id( self.context, port_id)) class TestMl2DvrPortsV2(TestMl2PortsV2): def setUp(self): super(TestMl2DvrPortsV2, self).setUp() extensions = ['router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS, constants.L3_DISTRIBUTED_EXT_ALIAS] self.plugin = manager.NeutronManager.get_plugin() self.l3plugin = mock.Mock() type(self.l3plugin).supported_extension_aliases = ( mock.PropertyMock(return_value=extensions)) self.service_plugins = {'L3_ROUTER_NAT': self.l3plugin} def test_delete_port_notifies_l3_plugin(self, floating_ip=False): ns_to_delete = {'host': 'myhost', 'agent_id': 'vm_l3_agent', 'router_id': 'my_router'} router_ids = set() if floating_ip: router_ids.add(ns_to_delete['router_id']) with mock.patch.object(manager.NeutronManager, 'get_service_plugins', return_value=self.service_plugins),\ self.port() as port,\ mock.patch.object(registry, 'notify') as notify,\ mock.patch.object(self.l3plugin, 'disassociate_floatingips', return_value=router_ids): port_id = port['port']['id'] self.plugin.delete_port(self.context, port_id) self.assertEqual(2, notify.call_count) # needed for a full match in the assertion below port['port']['extra_dhcp_opts'] = [] expected = [mock.call(resources.PORT, events.BEFORE_DELETE, mock.ANY, context=self.context, port_id=port['port']['id'], port_check=True), mock.call(resources.PORT, events.AFTER_DELETE, mock.ANY, context=self.context, port=port['port'], router_ids=router_ids)] notify.assert_has_calls(expected) def test_delete_port_with_floatingip_notifies_l3_plugin(self): self.test_delete_port_notifies_l3_plugin(floating_ip=True) def test_concurrent_csnat_port_delete(self): plugin = manager.NeutronManager.get_service_plugins()[ p_const.L3_ROUTER_NAT] r = plugin.create_router( self.context, {'router': {'name': 'router', 'admin_state_up': True, 'tenant_id': self.context.tenant_id}}) with self.subnet() as s: p = plugin.add_router_interface(self.context, r['id'], {'subnet_id': s['subnet']['id']}) # lie to turn the port into an SNAT interface with self.context.session.begin(): rp = self.context.session.query(l3_db.RouterPort).filter_by( port_id=p['port_id']).first() rp.port_type = constants.DEVICE_OWNER_ROUTER_SNAT # take the port away before csnat gets a chance to delete it # to simulate a concurrent delete orig_get_ports = plugin._core_plugin.get_ports def get_ports_with_delete_first(*args, **kwargs): plugin._core_plugin.delete_port(self.context, p['port_id'], l3_port_check=False) return orig_get_ports(*args, **kwargs) plugin._core_plugin.get_ports = get_ports_with_delete_first # This should be able to handle a concurrent delete without raising # an exception router = plugin._get_router(self.context, r['id']) plugin.delete_csnat_router_interface_ports(self.context, router) class TestMl2PortBinding(Ml2PluginV2TestCase, test_bindings.PortBindingsTestCase): # Test case does not set binding:host_id, so ml2 does not attempt # to bind port VIF_TYPE = portbindings.VIF_TYPE_UNBOUND HAS_PORT_FILTER = False ENABLE_SG = True FIREWALL_DRIVER = test_sg_rpc.FIREWALL_HYBRID_DRIVER def setUp(self, firewall_driver=None): test_sg_rpc.set_firewall_driver(self.FIREWALL_DRIVER) config.cfg.CONF.set_override( 'enable_security_group', self.ENABLE_SG, group='SECURITYGROUP') super(TestMl2PortBinding, self).setUp() def _check_port_binding_profile(self, port, profile=None): self.assertIn('id', port) self.assertIn(portbindings.PROFILE, port) value = port[portbindings.PROFILE] self.assertEqual(profile or {}, value) def test_create_port_binding_profile(self): self._test_create_port_binding_profile({'a': 1, 'b': 2}) def test_update_port_binding_profile(self): self._test_update_port_binding_profile({'c': 3}) def test_create_port_binding_profile_too_big(self): s = 'x' * 5000 profile_arg = {portbindings.PROFILE: {'d': s}} try: with self.port(expected_res_status=400, arg_list=(portbindings.PROFILE,), **profile_arg): pass except webob.exc.HTTPClientError: pass def test_remove_port_binding_profile(self): profile = {'e': 5} profile_arg = {portbindings.PROFILE: profile} with self.port(arg_list=(portbindings.PROFILE,), **profile_arg) as port: self._check_port_binding_profile(port['port'], profile) port_id = port['port']['id'] profile_arg = {portbindings.PROFILE: None} port = self._update('ports', port_id, {'port': profile_arg})['port'] self._check_port_binding_profile(port) port = self._show('ports', port_id)['port'] self._check_port_binding_profile(port) def test_return_on_concurrent_delete_and_binding(self): # create a port and delete it so we have an expired mechanism context with self.port() as port: plugin = manager.NeutronManager.get_plugin() binding = ml2_db.get_locked_port_and_binding(self.context.session, port['port']['id'])[1] binding['host'] = 'test' mech_context = driver_context.PortContext( plugin, self.context, port['port'], plugin.get_network(self.context, port['port']['network_id']), binding, None) with mock.patch( 'neutron.plugins.ml2.plugin.' 'db.get_locked_port_and_binding', return_value=(None, None)) as glpab_mock,\ mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.' '_make_port_dict') as mpd_mock: plugin._bind_port_if_needed(mech_context) # called during deletion to get port self.assertTrue(glpab_mock.mock_calls) # should have returned before calling _make_port_dict self.assertFalse(mpd_mock.mock_calls) def _create_port_and_bound_context(self, port_vif_type, bound_vif_type): with self.port() as port: plugin = manager.NeutronManager.get_plugin() binding = ml2_db.get_locked_port_and_binding(self.context.session, port['port']['id'])[1] binding['host'] = 'fake_host' binding['vif_type'] = port_vif_type # Generates port context to be used before the bind. port_context = driver_context.PortContext( plugin, self.context, port['port'], plugin.get_network(self.context, port['port']['network_id']), binding, None) bound_context = mock.MagicMock() # Bound context is how port_context is expected to look # after _bind_port. bound_context.vif_type = bound_vif_type return plugin, port_context, bound_context def test__attempt_binding(self): # Simulate a successful binding for vif_type unbound # and keep the same binding state for other vif types. vif_types = [(portbindings.VIF_TYPE_BINDING_FAILED, portbindings.VIF_TYPE_BINDING_FAILED), (portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_OVS), (portbindings.VIF_TYPE_OVS, portbindings.VIF_TYPE_OVS)] for port_vif_type, bound_vif_type in vif_types: plugin, port_context, bound_context = ( self._create_port_and_bound_context(port_vif_type, bound_vif_type)) with mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin._bind_port', return_value=bound_context) as bd_mock: context, need_notify, try_again = (plugin._attempt_binding( port_context, False)) expected_need_notify = port_vif_type not in ( portbindings.VIF_TYPE_BINDING_FAILED, portbindings.VIF_TYPE_OVS) if bound_vif_type == portbindings.VIF_TYPE_BINDING_FAILED: expected_vif_type = port_vif_type expected_try_again = True expected_bd_mock_called = True else: expected_vif_type = portbindings.VIF_TYPE_OVS expected_try_again = False expected_bd_mock_called = (port_vif_type == portbindings.VIF_TYPE_UNBOUND) self.assertEqual(expected_need_notify, need_notify) self.assertEqual(expected_vif_type, context.vif_type) self.assertEqual(expected_try_again, try_again) self.assertEqual(expected_bd_mock_called, bd_mock.called) def test__attempt_binding_retries(self): # Simulate cases of both successful and failed binding states for # vif_type unbound vif_types = [(portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED), (portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_OVS)] for port_vif_type, bound_vif_type in vif_types: plugin, port_context, bound_context = ( self._create_port_and_bound_context(port_vif_type, bound_vif_type)) with mock.patch( 'neutron.plugins.ml2.plugin.Ml2Plugin._bind_port', return_value=bound_context),\ mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin._commit_' 'port_binding', return_value=(bound_context, True, False)),\ mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.' '_attempt_binding', side_effect=plugin._attempt_binding) as at_mock: plugin._bind_port_if_needed(port_context) if bound_vif_type == portbindings.VIF_TYPE_BINDING_FAILED: # An unsuccessful binding attempt should be retried # MAX_BIND_TRIES amount of times. self.assertEqual(ml2_plugin.MAX_BIND_TRIES, at_mock.call_count) else: # Successful binding should only be attempted once. self.assertEqual(1, at_mock.call_count) def test_port_binding_profile_not_changed(self): profile = {'e': 5} profile_arg = {portbindings.PROFILE: profile} with self.port(arg_list=(portbindings.PROFILE,), **profile_arg) as port: self._check_port_binding_profile(port['port'], profile) port_id = port['port']['id'] state_arg = {'admin_state_up': True} port = self._update('ports', port_id, {'port': state_arg})['port'] self._check_port_binding_profile(port, profile) port = self._show('ports', port_id)['port'] self._check_port_binding_profile(port, profile) def test_update_port_binding_host_id_none(self): with self.port() as port: plugin = manager.NeutronManager.get_plugin() binding = ml2_db.get_locked_port_and_binding(self.context.session, port['port']['id'])[1] binding['host'] = 'test' mech_context = driver_context.PortContext( plugin, self.context, port['port'], plugin.get_network(self.context, port['port']['network_id']), binding, None) with mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.' '_update_port_dict_binding') as update_mock: attrs = {portbindings.HOST_ID: None} plugin._process_port_binding(mech_context, attrs) self.assertTrue(update_mock.mock_calls) self.assertEqual('', binding.host) def test_update_port_binding_host_id_not_changed(self): with self.port() as port: plugin = manager.NeutronManager.get_plugin() binding = ml2_db.get_locked_port_and_binding(self.context.session, port['port']['id'])[1] binding['host'] = 'test' mech_context = driver_context.PortContext( plugin, self.context, port['port'], plugin.get_network(self.context, port['port']['network_id']), binding, None) with mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.' '_update_port_dict_binding') as update_mock: attrs = {portbindings.PROFILE: {'e': 5}} plugin._process_port_binding(mech_context, attrs) self.assertTrue(update_mock.mock_calls) self.assertEqual('test', binding.host) def test_process_dvr_port_binding_update_router_id(self): host_id = 'host' binding = models.DVRPortBinding( port_id='port_id', host=host_id, router_id='old_router_id', vif_type=portbindings.VIF_TYPE_OVS, vnic_type=portbindings.VNIC_NORMAL, status=constants.PORT_STATUS_DOWN) plugin = manager.NeutronManager.get_plugin() mock_network = {'id': 'net_id'} mock_port = {'id': 'port_id'} context = mock.Mock() new_router_id = 'new_router' attrs = {'device_id': new_router_id, portbindings.HOST_ID: host_id} with mock.patch.object(plugin, '_update_port_dict_binding'): with mock.patch.object(ml2_db, 'get_network_segments', return_value=[]): mech_context = driver_context.PortContext( self, context, mock_port, mock_network, binding, None) plugin._process_dvr_port_binding(mech_context, context, attrs) self.assertEqual(new_router_id, mech_context._binding.router_id) self.assertEqual(host_id, mech_context._binding.host) def test_update_dvr_port_binding_on_concurrent_port_delete(self): plugin = manager.NeutronManager.get_plugin() with self.port() as port: port = { 'id': port['port']['id'], portbindings.HOST_ID: 'foo_host', } with mock.patch.object(plugin, 'get_port', new=plugin.delete_port): res = plugin.update_dvr_port_binding( self.context, 'foo_port_id', {'port': port}) self.assertIsNone(res) def test_update_dvr_port_binding_on_non_existent_port(self): plugin = manager.NeutronManager.get_plugin() port = { 'id': 'foo_port_id', portbindings.HOST_ID: 'foo_host', } with mock.patch.object(ml2_db, 'ensure_dvr_port_binding') as mock_dvr: plugin.update_dvr_port_binding( self.context, 'foo_port_id', {'port': port}) self.assertFalse(mock_dvr.called) class TestMl2PortBindingNoSG(TestMl2PortBinding): HAS_PORT_FILTER = False ENABLE_SG = False FIREWALL_DRIVER = test_sg_rpc.FIREWALL_NOOP_DRIVER class TestMl2PortBindingHost(Ml2PluginV2TestCase, test_bindings.PortBindingsHostTestCaseMixin): pass class TestMl2PortBindingVnicType(Ml2PluginV2TestCase, test_bindings.PortBindingsVnicTestCaseMixin): pass class TestMultiSegmentNetworks(Ml2PluginV2TestCase): def setUp(self, plugin=None): super(TestMultiSegmentNetworks, self).setUp() def test_allocate_dynamic_segment(self): data = {'network': {'name': 'net1', 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) segment = {driver_api.NETWORK_TYPE: 'vlan', driver_api.PHYSICAL_NETWORK: 'physnet1'} network_id = network['network']['id'] self.driver.type_manager.allocate_dynamic_segment( self.context.session, network_id, segment) dynamic_segment = ml2_db.get_dynamic_segment(self.context.session, network_id, 'physnet1') self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE]) self.assertEqual('physnet1', dynamic_segment[driver_api.PHYSICAL_NETWORK]) self.assertTrue(dynamic_segment[driver_api.SEGMENTATION_ID] > 0) segment2 = {driver_api.NETWORK_TYPE: 'vlan', driver_api.SEGMENTATION_ID: 1234, driver_api.PHYSICAL_NETWORK: 'physnet3'} self.driver.type_manager.allocate_dynamic_segment( self.context.session, network_id, segment2) dynamic_segment = ml2_db.get_dynamic_segment(self.context.session, network_id, segmentation_id='1234') self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE]) self.assertEqual('physnet3', dynamic_segment[driver_api.PHYSICAL_NETWORK]) self.assertEqual(dynamic_segment[driver_api.SEGMENTATION_ID], 1234) def test_allocate_dynamic_segment_multiple_physnets(self): data = {'network': {'name': 'net1', 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) segment = {driver_api.NETWORK_TYPE: 'vlan', driver_api.PHYSICAL_NETWORK: 'physnet1'} network_id = network['network']['id'] self.driver.type_manager.allocate_dynamic_segment( self.context.session, network_id, segment) dynamic_segment = ml2_db.get_dynamic_segment(self.context.session, network_id, 'physnet1') self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE]) self.assertEqual('physnet1', dynamic_segment[driver_api.PHYSICAL_NETWORK]) dynamic_segmentation_id = dynamic_segment[driver_api.SEGMENTATION_ID] self.assertTrue(dynamic_segmentation_id > 0) dynamic_segment1 = ml2_db.get_dynamic_segment(self.context.session, network_id, 'physnet1') dynamic_segment1_id = dynamic_segment1[driver_api.SEGMENTATION_ID] self.assertEqual(dynamic_segmentation_id, dynamic_segment1_id) segment2 = {driver_api.NETWORK_TYPE: 'vlan', driver_api.PHYSICAL_NETWORK: 'physnet2'} self.driver.type_manager.allocate_dynamic_segment( self.context.session, network_id, segment2) dynamic_segment2 = ml2_db.get_dynamic_segment(self.context.session, network_id, 'physnet2') dynamic_segmentation2_id = dynamic_segment2[driver_api.SEGMENTATION_ID] self.assertNotEqual(dynamic_segmentation_id, dynamic_segmentation2_id) def test_allocate_release_dynamic_segment(self): data = {'network': {'name': 'net1', 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) segment = {driver_api.NETWORK_TYPE: 'vlan', driver_api.PHYSICAL_NETWORK: 'physnet1'} network_id = network['network']['id'] self.driver.type_manager.allocate_dynamic_segment( self.context.session, network_id, segment) dynamic_segment = ml2_db.get_dynamic_segment(self.context.session, network_id, 'physnet1') self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE]) self.assertEqual('physnet1', dynamic_segment[driver_api.PHYSICAL_NETWORK]) dynamic_segmentation_id = dynamic_segment[driver_api.SEGMENTATION_ID] self.assertTrue(dynamic_segmentation_id > 0) self.driver.type_manager.release_dynamic_segment( self.context.session, dynamic_segment[driver_api.ID]) self.assertIsNone(ml2_db.get_dynamic_segment( self.context.session, network_id, 'physnet1')) def test_create_network_provider(self): data = {'network': {'name': 'net1', pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1, 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE]) self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK]) self.assertEqual(1, network['network'][pnet.SEGMENTATION_ID]) self.assertNotIn(mpnet.SEGMENTS, network['network']) def test_create_network_single_multiprovider(self): data = {'network': {'name': 'net1', mpnet.SEGMENTS: [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1}], 'tenant_id': 'tenant_one'}} net_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, net_req.get_response(self.api)) self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE]) self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK]) self.assertEqual(1, network['network'][pnet.SEGMENTATION_ID]) self.assertNotIn(mpnet.SEGMENTS, network['network']) # Tests get_network() net_req = self.new_show_request('networks', network['network']['id']) network = self.deserialize(self.fmt, net_req.get_response(self.api)) self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE]) self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK]) self.assertEqual(1, network['network'][pnet.SEGMENTATION_ID]) self.assertNotIn(mpnet.SEGMENTS, network['network']) def test_create_network_multiprovider(self): data = {'network': {'name': 'net1', mpnet.SEGMENTS: [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1}, {pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 2}], 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) segments = network['network'][mpnet.SEGMENTS] for segment_index, segment in enumerate(data['network'] [mpnet.SEGMENTS]): for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID]: self.assertEqual(segment.get(field), segments[segment_index][field]) # Tests get_network() net_req = self.new_show_request('networks', network['network']['id']) network = self.deserialize(self.fmt, net_req.get_response(self.api)) segments = network['network'][mpnet.SEGMENTS] for segment_index, segment in enumerate(data['network'] [mpnet.SEGMENTS]): for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID]: self.assertEqual(segment.get(field), segments[segment_index][field]) def test_create_network_with_provider_and_multiprovider_fail(self): data = {'network': {'name': 'net1', mpnet.SEGMENTS: [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1}], pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1, 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) res = network_req.get_response(self.api) self.assertEqual(400, res.status_int) def test_create_network_duplicate_full_segments(self): data = {'network': {'name': 'net1', mpnet.SEGMENTS: [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1}, {pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1}], 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) res = network_req.get_response(self.api) self.assertEqual(400, res.status_int) def test_create_network_duplicate_partial_segments(self): data = {'network': {'name': 'net1', mpnet.SEGMENTS: [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1'}, {pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1'}], 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) res = network_req.get_response(self.api) self.assertEqual(201, res.status_int) def test_release_network_segments(self): data = {'network': {'name': 'net1', 'admin_state_up': True, 'shared': False, pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1, 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) res = network_req.get_response(self.api) network = self.deserialize(self.fmt, res) network_id = network['network']['id'] segment = {driver_api.NETWORK_TYPE: 'vlan', driver_api.PHYSICAL_NETWORK: 'physnet2'} self.driver.type_manager.allocate_dynamic_segment( self.context.session, network_id, segment) dynamic_segment = ml2_db.get_dynamic_segment(self.context.session, network_id, 'physnet2') self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE]) self.assertEqual('physnet2', dynamic_segment[driver_api.PHYSICAL_NETWORK]) self.assertTrue(dynamic_segment[driver_api.SEGMENTATION_ID] > 0) with mock.patch.object(type_vlan.VlanTypeDriver, 'release_segment') as rs: req = self.new_delete_request('networks', network_id) res = req.get_response(self.api) self.assertEqual(2, rs.call_count) self.assertEqual([], ml2_db.get_network_segments( self.context.session, network_id)) self.assertIsNone(ml2_db.get_dynamic_segment( self.context.session, network_id, 'physnet2')) def test_release_segment_no_type_driver(self): data = {'network': {'name': 'net1', 'admin_state_up': True, 'shared': False, pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1, 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) res = network_req.get_response(self.api) network = self.deserialize(self.fmt, res) network_id = network['network']['id'] segment = {driver_api.NETWORK_TYPE: 'faketype', driver_api.PHYSICAL_NETWORK: 'physnet1', driver_api.ID: 1} with mock.patch('neutron.plugins.ml2.managers.LOG') as log: with mock.patch('neutron.plugins.ml2.managers.db') as db: db.get_network_segments.return_value = (segment,) self.driver.type_manager.release_network_segments( self.context.session, network_id) log.error.assert_called_once_with( "Failed to release segment '%s' because " "network type is not supported.", segment) def test_create_provider_fail(self): segment = {pnet.NETWORK_TYPE: None, pnet.PHYSICAL_NETWORK: 'phys_net', pnet.SEGMENTATION_ID: None} with testtools.ExpectedException(exc.InvalidInput): self.driver.type_manager._process_provider_create(segment) def test_create_network_plugin(self): data = {'network': {'name': 'net1', 'admin_state_up': True, 'shared': False, pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1, 'tenant_id': 'tenant_one'}} def raise_mechanism_exc(*args, **kwargs): raise ml2_exc.MechanismDriverError( method='create_network_postcommit') with mock.patch('neutron.plugins.ml2.managers.MechanismManager.' 'create_network_precommit', new=raise_mechanism_exc): with testtools.ExpectedException(ml2_exc.MechanismDriverError): self.driver.create_network(self.context, data) def test_extend_dictionary_no_segments(self): network = dict(name='net_no_segment', id='5', tenant_id='tenant_one') self.driver.type_manager.extend_network_dict_provider(self.context, network) self.assertIsNone(network[pnet.NETWORK_TYPE]) self.assertIsNone(network[pnet.PHYSICAL_NETWORK]) self.assertIsNone(network[pnet.SEGMENTATION_ID]) class TestMl2AllowedAddressPairs(Ml2PluginV2TestCase, test_pair.TestAllowedAddressPairs): _extension_drivers = ['port_security'] def setUp(self, plugin=None): config.cfg.CONF.set_override('extension_drivers', self._extension_drivers, group='ml2') super(test_pair.TestAllowedAddressPairs, self).setUp( plugin=PLUGIN_NAME) class DHCPOptsTestCase(test_dhcpopts.TestExtraDhcpOpt): def setUp(self, plugin=None): super(test_dhcpopts.ExtraDhcpOptDBTestCase, self).setUp( plugin=PLUGIN_NAME) class Ml2PluginV2FaultyDriverTestCase(test_plugin.NeutronDbPluginV2TestCase): def setUp(self): # Enable the test mechanism driver to ensure that # we can successfully call through to all mechanism # driver apis. config.cfg.CONF.set_override('mechanism_drivers', ['test', 'logger'], group='ml2') super(Ml2PluginV2FaultyDriverTestCase, self).setUp(PLUGIN_NAME) self.port_create_status = 'DOWN' class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase): def test_create_network_faulty(self): with mock.patch.object(mech_test.TestMechanismDriver, 'create_network_postcommit', side_effect=ml2_exc.MechanismDriverError): tenant_id = str(uuid.uuid4()) data = {'network': {'name': 'net1', 'tenant_id': tenant_id}} req = self.new_create_request('networks', data) res = req.get_response(self.api) self.assertEqual(500, res.status_int) error = self.deserialize(self.fmt, res) self.assertEqual('MechanismDriverError', error['NeutronError']['type']) query_params = "tenant_id=%s" % tenant_id nets = self._list('networks', query_params=query_params) self.assertFalse(nets['networks']) def test_delete_network_faulty(self): with mock.patch.object(mech_test.TestMechanismDriver, 'delete_network_postcommit', side_effect=ml2_exc.MechanismDriverError): with mock.patch.object(mech_logger.LoggerMechanismDriver, 'delete_network_postcommit') as dnp: data = {'network': {'name': 'net1', 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network_res = network_req.get_response(self.api) self.assertEqual(201, network_res.status_int) network = self.deserialize(self.fmt, network_res) net_id = network['network']['id'] req = self.new_delete_request('networks', net_id) res = req.get_response(self.api) self.assertEqual(204, res.status_int) # Test if other mechanism driver was called self.assertTrue(dnp.called) self._show('networks', net_id, expected_code=webob.exc.HTTPNotFound.code) def test_update_network_faulty(self): with mock.patch.object(mech_test.TestMechanismDriver, 'update_network_postcommit', side_effect=ml2_exc.MechanismDriverError): with mock.patch.object(mech_logger.LoggerMechanismDriver, 'update_network_postcommit') as unp: data = {'network': {'name': 'net1', 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network_res = network_req.get_response(self.api) self.assertEqual(201, network_res.status_int) network = self.deserialize(self.fmt, network_res) net_id = network['network']['id'] new_name = 'a_brand_new_name' data = {'network': {'name': new_name}} req = self.new_update_request('networks', data, net_id) res = req.get_response(self.api) self.assertEqual(500, res.status_int) error = self.deserialize(self.fmt, res) self.assertEqual('MechanismDriverError', error['NeutronError']['type']) # Test if other mechanism driver was called self.assertTrue(unp.called) net = self._show('networks', net_id) self.assertEqual(new_name, net['network']['name']) self._delete('networks', net_id) def test_create_subnet_faulty(self): with mock.patch.object(mech_test.TestMechanismDriver, 'create_subnet_postcommit', side_effect=ml2_exc.MechanismDriverError): with self.network() as network: net_id = network['network']['id'] data = {'subnet': {'network_id': net_id, 'cidr': '10.0.20.0/24', 'ip_version': '4', 'name': 'subnet1', 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.20.1'}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(500, res.status_int) error = self.deserialize(self.fmt, res) self.assertEqual('MechanismDriverError', error['NeutronError']['type']) query_params = "network_id=%s" % net_id subnets = self._list('subnets', query_params=query_params) self.assertFalse(subnets['subnets']) def test_delete_subnet_faulty(self): with mock.patch.object(mech_test.TestMechanismDriver, 'delete_subnet_postcommit', side_effect=ml2_exc.MechanismDriverError): with mock.patch.object(mech_logger.LoggerMechanismDriver, 'delete_subnet_postcommit') as dsp: with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.20.0/24', 'ip_version': '4', 'name': 'subnet1', 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.20.1'}} subnet_req = self.new_create_request('subnets', data) subnet_res = subnet_req.get_response(self.api) self.assertEqual(201, subnet_res.status_int) subnet = self.deserialize(self.fmt, subnet_res) subnet_id = subnet['subnet']['id'] req = self.new_delete_request('subnets', subnet_id) res = req.get_response(self.api) self.assertEqual(204, res.status_int) # Test if other mechanism driver was called self.assertTrue(dsp.called) self._show('subnets', subnet_id, expected_code=webob.exc.HTTPNotFound.code) def test_update_subnet_faulty(self): with mock.patch.object(mech_test.TestMechanismDriver, 'update_subnet_postcommit', side_effect=ml2_exc.MechanismDriverError): with mock.patch.object(mech_logger.LoggerMechanismDriver, 'update_subnet_postcommit') as usp: with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.20.0/24', 'ip_version': '4', 'name': 'subnet1', 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.20.1'}} subnet_req = self.new_create_request('subnets', data) subnet_res = subnet_req.get_response(self.api) self.assertEqual(201, subnet_res.status_int) subnet = self.deserialize(self.fmt, subnet_res) subnet_id = subnet['subnet']['id'] new_name = 'a_brand_new_name' data = {'subnet': {'name': new_name}} req = self.new_update_request('subnets', data, subnet_id) res = req.get_response(self.api) self.assertEqual(500, res.status_int) error = self.deserialize(self.fmt, res) self.assertEqual('MechanismDriverError', error['NeutronError']['type']) # Test if other mechanism driver was called self.assertTrue(usp.called) subnet = self._show('subnets', subnet_id) self.assertEqual(new_name, subnet['subnet']['name']) self._delete('subnets', subnet['subnet']['id']) def test_create_port_faulty(self): with mock.patch.object(mech_test.TestMechanismDriver, 'create_port_postcommit', side_effect=ml2_exc.MechanismDriverError): with self.network() as network: net_id = network['network']['id'] data = {'port': {'network_id': net_id, 'tenant_id': network['network']['tenant_id'], 'name': 'port1', 'admin_state_up': 1, 'fixed_ips': []}} req = self.new_create_request('ports', data) res = req.get_response(self.api) self.assertEqual(500, res.status_int) error = self.deserialize(self.fmt, res) self.assertEqual('MechanismDriverError', error['NeutronError']['type']) query_params = "network_id=%s" % net_id ports = self._list('ports', query_params=query_params) self.assertFalse(ports['ports']) def test_update_port_faulty(self): with mock.patch.object(mech_test.TestMechanismDriver, 'update_port_postcommit', side_effect=ml2_exc.MechanismDriverError): with mock.patch.object(mech_logger.LoggerMechanismDriver, 'update_port_postcommit') as upp: with self.network() as network: data = {'port': {'network_id': network['network']['id'], 'tenant_id': network['network']['tenant_id'], 'name': 'port1', 'admin_state_up': 1, 'fixed_ips': []}} port_req = self.new_create_request('ports', data) port_res = port_req.get_response(self.api) self.assertEqual(201, port_res.status_int) port = self.deserialize(self.fmt, port_res) port_id = port['port']['id'] new_name = 'a_brand_new_name' data = {'port': {'name': new_name}} req = self.new_update_request('ports', data, port_id) res = req.get_response(self.api) self.assertEqual(200, res.status_int) # Test if other mechanism driver was called self.assertTrue(upp.called) port = self._show('ports', port_id) self.assertEqual(new_name, port['port']['name']) self._delete('ports', port['port']['id']) def test_update_dvr_router_interface_port(self): """Test validate dvr router interface update succeeds.""" host_id = 'host' binding = models.DVRPortBinding( port_id='port_id', host=host_id, router_id='old_router_id', vif_type=portbindings.VIF_TYPE_OVS, vnic_type=portbindings.VNIC_NORMAL, status=constants.PORT_STATUS_DOWN) with mock.patch.object( mech_test.TestMechanismDriver, 'update_port_postcommit', side_effect=ml2_exc.MechanismDriverError) as port_post,\ mock.patch.object( mech_test.TestMechanismDriver, 'update_port_precommit') as port_pre,\ mock.patch.object(ml2_db, 'get_dvr_port_bindings') as dvr_bindings: dvr_bindings.return_value = [binding] port_pre.return_value = True with self.network() as network: with self.subnet(network=network) as subnet: subnet_id = subnet['subnet']['id'] data = {'port': { 'network_id': network['network']['id'], 'tenant_id': network['network']['tenant_id'], 'name': 'port1', 'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE, 'admin_state_up': 1, 'fixed_ips': [{'subnet_id': subnet_id}]}} port_req = self.new_create_request('ports', data) port_res = port_req.get_response(self.api) self.assertEqual(201, port_res.status_int) port = self.deserialize(self.fmt, port_res) port_id = port['port']['id'] new_name = 'a_brand_new_name' data = {'port': {'name': new_name}} req = self.new_update_request('ports', data, port_id) res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertTrue(dvr_bindings.called) self.assertTrue(port_pre.called) self.assertTrue(port_post.called) port = self._show('ports', port_id) self.assertEqual(new_name, port['port']['name']) class TestML2PluggableIPAM(test_ipam.UseIpamMixin, TestMl2SubnetsV2): def test_create_subnet_delete_subnet_call_ipam_driver(self): driver = 'neutron.ipam.drivers.neutrondb_ipam.driver.NeutronDbPool' gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' with mock.patch(driver) as driver_mock: request = mock.Mock() request.subnet_id = uuidutils.generate_uuid() request.subnet_cidr = cidr request.allocation_pools = [] request.gateway_ip = gateway_ip request.tenant_id = uuidutils.generate_uuid() ipam_subnet = mock.Mock() ipam_subnet.get_details.return_value = request driver_mock().allocate_subnet.return_value = ipam_subnet self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr) driver_mock().allocate_subnet.assert_called_with(mock.ANY) driver_mock().remove_subnet.assert_called_with(request.subnet_id) def test_delete_subnet_deallocates_slaac_correctly(self): driver = 'neutron.ipam.drivers.neutrondb_ipam.driver.NeutronDbPool' with self.network() as network: with self.subnet(network=network, cidr='2001:100::0/64', ip_version=6, ipv6_ra_mode=constants.IPV6_SLAAC) as subnet: with self.port(subnet=subnet) as port: with mock.patch(driver) as driver_mock: # Validate that deletion of SLAAC allocation happens # via IPAM interface, i.e. ipam_subnet.deallocate is # called prior to subnet deletiong from db. self._delete('subnets', subnet['subnet']['id']) dealloc = driver_mock().get_subnet().deallocate dealloc.assert_called_with( port['port']['fixed_ips'][0]['ip_address']) driver_mock().remove_subnet.assert_called_with( subnet['subnet']['id']) class TestMl2PluginCreateUpdateDeletePort(base.BaseTestCase): def setUp(self): super(TestMl2PluginCreateUpdateDeletePort, self).setUp() self.context = mock.MagicMock() self.notify_p = mock.patch('neutron.callbacks.registry.notify') self.notify = self.notify_p.start() def _ensure_transaction_is_closed(self): transaction = self.context.session.begin(subtransactions=True) enter = transaction.__enter__.call_count exit = transaction.__exit__.call_count self.assertEqual(enter, exit) def _create_plugin_for_create_update_port(self): plugin = ml2_plugin.Ml2Plugin() plugin.extension_manager = mock.Mock() plugin.type_manager = mock.Mock() plugin.mechanism_manager = mock.Mock() plugin.notifier = mock.Mock() plugin._check_mac_update_allowed = mock.Mock(return_value=True) plugin._extend_availability_zone = mock.Mock() self.notify.side_effect = ( lambda r, e, t, **kwargs: self._ensure_transaction_is_closed()) return plugin def test_create_port_rpc_outside_transaction(self): with mock.patch.object(ml2_plugin.Ml2Plugin, '__init__') as init,\ mock.patch.object(base_plugin.NeutronDbPluginV2, '_make_port_dict') as make_port, \ mock.patch.object(base_plugin.NeutronDbPluginV2, 'update_port'),\ mock.patch.object(base_plugin.NeutronDbPluginV2, 'create_port_db'): init.return_value = None new_port = mock.MagicMock() make_port.return_value = new_port plugin = self._create_plugin_for_create_update_port() plugin.create_port(self.context, mock.MagicMock()) kwargs = {'context': self.context, 'port': new_port} self.notify.assert_called_once_with('port', 'after_create', plugin, **kwargs) def test_update_port_rpc_outside_transaction(self): port_id = 'fake_id' net_id = 'mynet' original_port_db = models_v2.Port( id=port_id, tenant_id='tenant', network_id=net_id, mac_address='08:00:01:02:03:04', admin_state_up=True, status='ACTIVE', device_id='vm_id', device_owner=DEVICE_OWNER_COMPUTE) binding = mock.Mock() binding.port_id = port_id binding.host = 'vm_host' binding.vnic_type = portbindings.VNIC_NORMAL binding.profile = '' binding.vif_type = '' binding.vif_details = '' with mock.patch.object(ml2_plugin.Ml2Plugin, '__init__') as init,\ mock.patch.object(ml2_db, 'get_locked_port_and_binding', return_value=(original_port_db, binding)),\ mock.patch.object(base_plugin.NeutronDbPluginV2, 'update_port') as db_update_port: init.return_value = None updated_port = mock.MagicMock() db_update_port.return_value = updated_port plugin = self._create_plugin_for_create_update_port() original_port = plugin._make_port_dict(original_port_db) plugin.update_port(self.context, port_id, mock.MagicMock()) kwargs = { 'context': self.context, 'port': updated_port, 'mac_address_updated': True, 'original_port': original_port, } self.notify.assert_called_once_with('port', 'after_update', plugin, **kwargs) def test_notify_outside_of_delete_transaction(self): self.notify.side_effect = ( lambda r, e, t, **kwargs: self._ensure_transaction_is_closed()) l3plugin = mock.Mock() l3plugin.supported_extension_aliases = [ 'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS, constants.L3_DISTRIBUTED_EXT_ALIAS ] with mock.patch.object(ml2_plugin.Ml2Plugin, '__init__', return_value=None),\ mock.patch.object(manager.NeutronManager, 'get_service_plugins', return_value={'L3_ROUTER_NAT': l3plugin}): plugin = self._create_plugin_for_create_update_port() # Set backend manually here since __init__ was mocked plugin.set_ipam_backend() # deleting the port will call registry.notify, which will # run the transaction balancing function defined in this test plugin.delete_port(self.context, 'fake_id') self.assertTrue(self.notify.call_count) class TestTransactionGuard(Ml2PluginV2TestCase): def test_delete_network_guard(self): plugin = ml2_plugin.Ml2Plugin() ctx = context.get_admin_context() with ctx.session.begin(subtransactions=True): with testtools.ExpectedException(RuntimeError): plugin.delete_network(ctx, 'id') def test_delete_subnet_guard(self): plugin = ml2_plugin.Ml2Plugin() ctx = context.get_admin_context() with ctx.session.begin(subtransactions=True): with testtools.ExpectedException(RuntimeError): plugin.delete_subnet(ctx, 'id') neutron-8.4.0/neutron/tests/unit/scheduler/0000775000567000056710000000000013044373210022144 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/scheduler/__init__.py0000664000567000056710000000000013044372736024257 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py0000664000567000056710000025152213044372760027327 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import contextlib import datetime import uuid import mock from oslo_config import cfg from oslo_utils import importutils from oslo_utils import timeutils from sqlalchemy import orm import testscenarios from neutron.common import constants from neutron import context as n_context from neutron.db import agents_db from neutron.db import common_db_mixin from neutron.db import db_base_plugin_v2 as db_v2 from neutron.db import l3_agentschedulers_db from neutron.db import l3_db from neutron.db import l3_dvr_ha_scheduler_db from neutron.db import l3_dvrscheduler_db from neutron.db import l3_hamode_db from neutron.db import l3_hascheduler_db from neutron.extensions import l3 from neutron.extensions import l3_ext_ha_mode as l3_ha from neutron.extensions import l3agentscheduler as l3agent from neutron.extensions import portbindings from neutron import manager from neutron.scheduler import l3_agent_scheduler from neutron.tests import base from neutron.tests.common import helpers from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.extensions import test_l3 from neutron.tests.unit import testlib_api # the below code is required for the following reason # (as documented in testscenarios) """Multiply tests depending on their 'scenarios' attribute. This can be assigned to 'load_tests' in any test module to make this automatically work across tests in the module. """ load_tests = testscenarios.load_tests_apply_scenarios HOST_DVR = 'my_l3_host_dvr' HOST_DVR_SNAT = 'my_l3_host_dvr_snat' DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' DEVICE_OWNER_COMPUTE_NOVA = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'nova' class FakeL3Scheduler(l3_agent_scheduler.L3Scheduler): def schedule(self): pass def _choose_router_agent(self): pass def _choose_router_agents_for_ha(self): pass class FakePortDB(object): def __init__(self, port_list): self._port_list = port_list def _get_query_answer(self, port_list, filters): answers = [] for port in port_list: matched = True for key, search_values in filters.items(): port_value = port.get(key, None) if not port_value: matched = False break if isinstance(port_value, list): sub_answers = self._get_query_answer(port_value, search_values) matched = len(sub_answers) > 0 else: matched = port_value in search_values if not matched: break if matched: answers.append(port) return answers def get_port(self, context, port_id): for port in self._port_list: if port['id'] == port_id: if port['tenant_id'] == context.tenant_id or context.is_admin: return port break return None def get_ports(self, context, filters=None): query_filters = dict() if filters: query_filters.update(filters) if not context.is_admin: query_filters['tenant_id'] = [context.tenant_id] result = self._get_query_answer(self._port_list, query_filters) return result class L3SchedulerBaseTestCase(base.BaseTestCase): def setUp(self): super(L3SchedulerBaseTestCase, self).setUp() self.scheduler = FakeL3Scheduler() self.plugin = mock.Mock() def test_auto_schedule_routers(self): self.plugin.get_enabled_agent_on_host.return_value = [mock.ANY] with mock.patch.object(self.scheduler, '_get_routers_to_schedule') as gs,\ mock.patch.object(self.scheduler, '_get_routers_can_schedule') as gr: result = self.scheduler.auto_schedule_routers( self.plugin, mock.ANY, mock.ANY, mock.ANY) self.assertTrue(self.plugin.get_enabled_agent_on_host.called) self.assertTrue(result) self.assertTrue(gs.called) self.assertTrue(gr.called) def test_auto_schedule_routers_no_agents(self): self.plugin.get_enabled_agent_on_host.return_value = None result = self.scheduler.auto_schedule_routers( self.plugin, mock.ANY, mock.ANY, mock.ANY) self.assertTrue(self.plugin.get_enabled_agent_on_host.called) self.assertFalse(result) def test_auto_schedule_routers_no_unscheduled_routers(self): type(self.plugin).supported_extension_aliases = ( mock.PropertyMock(return_value=[])) with mock.patch.object(self.scheduler, '_get_routers_to_schedule') as mock_routers: mock_routers.return_value = [] result = self.scheduler.auto_schedule_routers( self.plugin, mock.ANY, mock.ANY, mock.ANY) self.assertTrue(self.plugin.get_enabled_agent_on_host.called) self.assertFalse(result) def test_auto_schedule_routers_no_target_routers(self): self.plugin.get_enabled_agent_on_host.return_value = [mock.ANY] with mock.patch.object( self.scheduler, '_get_routers_to_schedule') as mock_unscheduled_routers,\ mock.patch.object( self.scheduler, '_get_routers_can_schedule') as mock_target_routers: mock_unscheduled_routers.return_value = mock.ANY mock_target_routers.return_value = [] result = self.scheduler.auto_schedule_routers( self.plugin, mock.ANY, mock.ANY, mock.ANY) self.assertTrue(self.plugin.get_enabled_agent_on_host.called) self.assertFalse(result) def test__get_routers_to_schedule_with_router_ids(self): router_ids = ['foo_router_1', 'foo_router_2'] expected_routers = [ {'id': 'foo_router1'}, {'id': 'foo_router_2'} ] self.plugin.get_routers.return_value = expected_routers with mock.patch.object(self.scheduler, '_filter_unscheduled_routers') as mock_filter: mock_filter.return_value = expected_routers unscheduled_routers = self.scheduler._get_routers_to_schedule( mock.ANY, self.plugin, router_ids) mock_filter.assert_called_once_with( mock.ANY, self.plugin, expected_routers) self.assertEqual(expected_routers, unscheduled_routers) def test__get_routers_to_schedule_without_router_ids(self): expected_routers = [ {'id': 'foo_router1'}, {'id': 'foo_router_2'} ] with mock.patch.object(self.scheduler, '_get_unscheduled_routers') as mock_get: mock_get.return_value = expected_routers unscheduled_routers = self.scheduler._get_routers_to_schedule( mock.ANY, self.plugin) mock_get.assert_called_once_with(mock.ANY, self.plugin) self.assertEqual(expected_routers, unscheduled_routers) def _test__get_routers_can_schedule(self, routers, agent, target_routers): self.plugin.get_l3_agent_candidates.return_value = agent result = self.scheduler._get_routers_can_schedule( mock.ANY, self.plugin, routers, mock.ANY) self.assertEqual(target_routers, result) def _test__filter_unscheduled_routers(self, routers, agents, expected): self.plugin.get_l3_agents_hosting_routers.return_value = agents unscheduled_routers = self.scheduler._filter_unscheduled_routers( mock.ANY, self.plugin, routers) self.assertEqual(expected, unscheduled_routers) def test__filter_unscheduled_routers_already_scheduled(self): self._test__filter_unscheduled_routers( [{'id': 'foo_router1'}, {'id': 'foo_router_2'}], [{'id': 'foo_agent_id'}], []) def test__filter_unscheduled_routers_non_scheduled(self): self._test__filter_unscheduled_routers( [{'id': 'foo_router1'}, {'id': 'foo_router_2'}], None, [{'id': 'foo_router1'}, {'id': 'foo_router_2'}]) def test__get_routers_can_schedule_with_compat_agent(self): routers = [{'id': 'foo_router'}] self._test__get_routers_can_schedule(routers, mock.ANY, routers) def test__get_routers_can_schedule_with_no_compat_agent(self): routers = [{'id': 'foo_router'}] self._test__get_routers_can_schedule(routers, None, []) def test__bind_routers_centralized(self): routers = [{'id': 'foo_router'}] with mock.patch.object(self.scheduler, 'bind_router') as mock_bind: self.scheduler._bind_routers(mock.ANY, mock.ANY, routers, mock.ANY) mock_bind.assert_called_once_with(mock.ANY, 'foo_router', mock.ANY) def _test__bind_routers_ha(self, has_binding): routers = [{'id': 'foo_router', 'ha': True, 'tenant_id': '42'}] agent = agents_db.Agent(id='foo_agent') with mock.patch.object(self.scheduler, '_router_has_binding', return_value=has_binding) as mock_has_binding,\ mock.patch.object(self.scheduler, 'create_ha_port_and_bind') as mock_bind: self.scheduler._bind_routers(mock.ANY, mock.ANY, routers, agent) mock_has_binding.assert_called_once_with(mock.ANY, 'foo_router', 'foo_agent') self.assertEqual(not has_binding, mock_bind.called) def test__bind_routers_ha_has_binding(self): self._test__bind_routers_ha(has_binding=True) def test__bind_routers_ha_no_binding(self): self._test__bind_routers_ha(has_binding=False) def test__get_candidates_iterable_on_early_returns(self): plugin = mock.MagicMock() # non-distributed router already hosted plugin.get_l3_agents_hosting_routers.return_value = [{'id': 'a1'}] router = {'distributed': False, 'id': 'falafel'} iter(self.scheduler._get_candidates(plugin, mock.MagicMock(), router)) # distributed router but no agents router['distributed'] = True plugin.get_l3_agents.return_value = [] iter(self.scheduler._get_candidates(plugin, mock.MagicMock(), router)) self.assertFalse(plugin.get_l3_agent_candidates.called) def test__get_candidates_skips_get_l3_agent_candidates_if_dvr_scheduled( self): plugin = mock.MagicMock() # distributed router already hosted plugin.get_l3_agents_hosting_routers.return_value = [{'id': 'a1'}] router = {'distributed': True, 'id': str(uuid.uuid4())} plugin.get_l3_agents.return_value = ['a1'] self.scheduler._get_candidates(plugin, mock.MagicMock(), router) self.assertFalse(plugin.get_l3_agent_candidates.called) class L3SchedulerBaseMixin(object): def _register_l3_agents(self, plugin=None): self.agent1 = helpers.register_l3_agent( 'host_1', constants.L3_AGENT_MODE_LEGACY) self.agent_id1 = self.agent1.id self.agent2 = helpers.register_l3_agent( 'host_2', constants.L3_AGENT_MODE_LEGACY) self.agent_id2 = self.agent2.id def _register_l3_dvr_agents(self): self.l3_dvr_agent = helpers.register_l3_agent( HOST_DVR, constants.L3_AGENT_MODE_DVR) self.l3_dvr_agent_id = self.l3_dvr_agent.id self.l3_dvr_snat_agent = helpers.register_l3_agent( HOST_DVR_SNAT, constants.L3_AGENT_MODE_DVR_SNAT) self.l3_dvr_snat_id = self.l3_dvr_snat_agent.id def _set_l3_agent_admin_state(self, context, agent_id, state=True): update = {'agent': {'admin_state_up': state}} self.plugin.update_agent(context, agent_id, update) def _set_l3_agent_dead(self, agent_id): update = { 'agent': { 'heartbeat_timestamp': timeutils.utcnow() - datetime.timedelta(hours=1)}} self.plugin.update_agent(self.adminContext, agent_id, update) @contextlib.contextmanager def router_with_ext_gw(self, name='router1', admin_state_up=True, fmt=None, tenant_id=str(uuid.uuid4()), external_gateway_info=None, subnet=None, set_context=False, **kwargs): router = self._make_router(fmt or self.fmt, tenant_id, name, admin_state_up, external_gateway_info, set_context, **kwargs) self._add_external_gateway_to_router( router['router']['id'], subnet['subnet']['network_id']) yield router self._remove_external_gateway_from_router( router['router']['id'], subnet['subnet']['network_id']) self._delete('routers', router['router']['id']) class L3SchedulerTestBaseMixin(object): def _test_add_router_to_l3_agent(self, distributed=False, already_scheduled=False, external_gw=None): agent_id = self.agent_id1 agent = self.agent1 if distributed: self._register_l3_dvr_agents() agent_id = self.l3_dvr_snat_id agent = self.l3_dvr_snat_agent router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r1') router['router']['distributed'] = distributed router['router']['external_gateway_info'] = external_gw if already_scheduled: self._test_schedule_bind_router(agent, router) with mock.patch.object(self, "validate_agent_router_combination"),\ mock.patch.object(self, "create_router_to_agent_binding") as auto_s,\ mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router', return_value=router['router']): self.add_router_to_l3_agent(self.adminContext, agent_id, router['router']['id']) self.assertNotEqual(already_scheduled, auto_s.called) def test__unbind_router_removes_binding(self): agent_id = self.agent_id1 agent = self.agent1 router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r1') self._test_schedule_bind_router(agent, router) self._unbind_router(self.adminContext, router['router']['id'], agent_id) bindings = self._get_l3_bindings_hosting_routers( self.adminContext, [router['router']['id']]) self.assertEqual(0, len(bindings)) def _create_router_for_l3_agent_dvr_test(self, distributed=False, external_gw=None): router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r1') router['router']['distributed'] = distributed router['router']['external_gateway_info'] = external_gw return router def _prepare_l3_agent_dvr_move_exceptions(self, distributed=False, external_gw=None, agent_id=None, expected_exception=None): router = self._create_router_for_l3_agent_dvr_test( distributed=distributed, external_gw=external_gw) with mock.patch.object(self, "create_router_to_agent_binding"),\ mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router', return_value=router['router']): self.assertRaises(expected_exception, self.add_router_to_l3_agent, self.adminContext, agent_id, router['router']['id']) def test_add_router_to_l3_agent_mismatch_error_dvr_to_legacy(self): self._register_l3_agents() self._prepare_l3_agent_dvr_move_exceptions( distributed=True, agent_id=self.agent_id1, expected_exception=l3agent.RouterL3AgentMismatch) def test_add_router_to_l3_agent_mismatch_error_legacy_to_dvr(self): self._register_l3_dvr_agents() self._prepare_l3_agent_dvr_move_exceptions( agent_id=self.l3_dvr_agent_id, expected_exception=l3agent.DVRL3CannotAssignToDvrAgent) def test_add_router_to_l3_agent_mismatch_error_dvr_to_dvr(self): self._register_l3_dvr_agents() self._prepare_l3_agent_dvr_move_exceptions( distributed=True, agent_id=self.l3_dvr_agent_id, expected_exception=l3agent.DVRL3CannotAssignToDvrAgent) def test_add_router_to_l3_agent_dvr_to_snat(self): external_gw_info = { "network_id": str(uuid.uuid4()), "enable_snat": True } self._register_l3_dvr_agents() agent_id = self.l3_dvr_snat_id router = self._create_router_for_l3_agent_dvr_test( distributed=True, external_gw=external_gw_info) with mock.patch.object(self, "validate_agent_router_combination"),\ mock.patch.object( self, "create_router_to_agent_binding") as rtr_agent_binding,\ mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router', return_value=router['router']): self.add_router_to_l3_agent(self.adminContext, agent_id, router['router']['id']) rtr_agent_binding.assert_called_once_with( self.adminContext, mock.ANY, router['router']) def test_add_router_to_l3_agent(self): self._test_add_router_to_l3_agent() def test_add_distributed_router_to_l3_agent(self): external_gw_info = { "network_id": str(uuid.uuid4()), "enable_snat": True } self._test_add_router_to_l3_agent(distributed=True, external_gw=external_gw_info) def test_add_router_to_l3_agent_already_scheduled(self): self._test_add_router_to_l3_agent(already_scheduled=True) def test_add_distributed_router_to_l3_agent_already_scheduled(self): external_gw_info = { "network_id": str(uuid.uuid4()), "enable_snat": True } self._test_add_router_to_l3_agent(distributed=True, already_scheduled=True, external_gw=external_gw_info) def test_remove_router_from_l3_agent_in_dvr_mode(self): self._register_l3_dvr_agents() self.assertRaises(l3agent.DVRL3CannotRemoveFromDvrAgent, self.remove_router_from_l3_agent, self.adminContext, self.l3_dvr_agent_id, mock.ANY) def test_remove_router_from_l3_agent_in_dvr_snat_mode(self): self._register_l3_dvr_agents() router = self._create_router_for_l3_agent_dvr_test( distributed=True) agent_id = self.l3_dvr_snat_id l3_notifier = mock.Mock() self.agent_notifiers = {constants.AGENT_TYPE_L3: l3_notifier} self.remove_router_from_l3_agent(self.adminContext, agent_id, router['router']['id']) l3_notifier.router_removed_from_agent.assert_called_once_with( self.adminContext, router['router']['id'], self.l3_dvr_snat_agent.host) def _prepare_schedule_dvr_tests(self): scheduler = l3_agent_scheduler.ChanceScheduler() agent = agents_db.Agent() agent.admin_state_up = True agent.heartbeat_timestamp = timeutils.utcnow() plugin = mock.Mock() plugin.get_l3_agents_hosting_routers.return_value = [] plugin.get_l3_agents.return_value = [agent] plugin.get_l3_agent_candidates.return_value = [agent] return scheduler, agent, plugin def test_schedule_dvr_router_without_snatbinding_and_no_gw(self): scheduler, agent, plugin = self._prepare_schedule_dvr_tests() sync_router = { 'id': 'foo_router_id', 'distributed': True } plugin.get_router.return_value = sync_router with mock.patch.object(scheduler, 'bind_router'),\ mock.patch.object(plugin, 'get_snat_bindings', return_value=False): scheduler._schedule_router( plugin, self.adminContext, 'foo_router_id', None) expected_calls = [ mock.call.get_router(mock.ANY, 'foo_router_id'), mock.call.get_l3_agents_hosting_routers( mock.ANY, ['foo_router_id'], admin_state_up=True), mock.call.get_l3_agents(mock.ANY, active=True), mock.call.get_l3_agent_candidates(mock.ANY, sync_router, [agent]), ] plugin.assert_has_calls(expected_calls) def test_schedule_router_distributed(self): scheduler, agent, plugin = self._prepare_schedule_dvr_tests() sync_router = { 'id': 'foo_router_id', 'distributed': True, 'external_gateway_info': { 'network_id': str(uuid.uuid4()), 'enable_snat': True } } plugin.get_router.return_value = sync_router with mock.patch.object(scheduler, 'bind_router'): scheduler._schedule_router( plugin, self.adminContext, 'foo_router_id', None) expected_calls = [ mock.call.get_router(mock.ANY, 'foo_router_id'), mock.call.get_l3_agents_hosting_routers( mock.ANY, ['foo_router_id'], admin_state_up=True), mock.call.get_l3_agents(mock.ANY, active=True), mock.call.get_l3_agent_candidates(mock.ANY, sync_router, [agent]), ] plugin.assert_has_calls(expected_calls) def _test_schedule_bind_router(self, agent, router): ctx = self.adminContext session = ctx.session db = l3_agentschedulers_db.RouterL3AgentBinding scheduler = l3_agent_scheduler.ChanceScheduler() rid = router['router']['id'] scheduler.bind_router(ctx, rid, agent) results = (session.query(db).filter_by(router_id=rid).all()) self.assertTrue(len(results) > 0) self.assertIn(agent.id, [bind.l3_agent_id for bind in results]) def test_bind_new_router(self): router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r1') with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog: self._test_schedule_bind_router(self.agent1, router) self.assertEqual(1, flog.call_count) args, kwargs = flog.call_args self.assertIn('is scheduled', args[0]) def test_bind_absent_router(self): scheduler = l3_agent_scheduler.ChanceScheduler() # checking that bind_router() is not throwing # when supplied with router_id of non-existing router scheduler.bind_router(self.adminContext, "dummyID", self.agent1) def test_bind_existing_router(self): router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r2') self._test_schedule_bind_router(self.agent1, router) with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog: self._test_schedule_bind_router(self.agent1, router) self.assertEqual(1, flog.call_count) args, kwargs = flog.call_args self.assertIn('has already been scheduled', args[0]) def _check_get_l3_agent_candidates( self, router, agent_list, exp_host, count=1): candidates = self.get_l3_agent_candidates(self.adminContext, router, agent_list) self.assertEqual(count, len(candidates)) if count: self.assertEqual(exp_host, candidates[0]['host']) def test_get_l3_agent_candidates_legacy(self): self._register_l3_dvr_agents() router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r2') router['external_gateway_info'] = None router['id'] = str(uuid.uuid4()) agent_list = [self.agent1, self.l3_dvr_agent] # test legacy agent_mode case: only legacy agent should be candidate router['distributed'] = False exp_host = 'host_1' self._check_get_l3_agent_candidates(router, agent_list, exp_host) def test_get_l3_agent_candidates_dvr(self): self._register_l3_dvr_agents() router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r2') router['external_gateway_info'] = None router['id'] = str(uuid.uuid4()) agent_list = [self.agent1, self.l3_dvr_agent] # test dvr agent_mode case no candidates router['distributed'] = True self.get_subnet_ids_on_router = mock.Mock() self._check_dvr_serviceable_ports_on_host = mock.Mock( return_value=True) self._check_get_l3_agent_candidates(router, agent_list, None, count=0) def test_get_l3_agent_candidates_dvr_no_vms(self): self._register_l3_dvr_agents() router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r2') router['external_gateway_info'] = None router['id'] = str(uuid.uuid4()) agent_list = [self.agent1, self.l3_dvr_agent] router['distributed'] = True # Test no VMs present case self.get_subnet_ids_on_router = mock.Mock() self._check_dvr_serviceable_ports_on_host = mock.Mock( return_value=False) self._check_get_l3_agent_candidates( router, agent_list, HOST_DVR, count=0) def test_get_l3_agent_candidates_dvr_snat(self): self._register_l3_dvr_agents() router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r2') router['external_gateway_info'] = None router['id'] = str(uuid.uuid4()) router['distributed'] = True agent_list = [self.l3_dvr_snat_agent] self.get_subnet_ids_on_router = mock.Mock() self._check_dvr_serviceable_ports_on_host = mock.Mock( return_value=True) self._check_get_l3_agent_candidates(router, agent_list, HOST_DVR_SNAT) def test_get_l3_agent_candidates_dvr_snat_no_vms(self): self._register_l3_dvr_agents() router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r2') router['external_gateway_info'] = None router['id'] = str(uuid.uuid4()) router['distributed'] = True agent_list = [self.l3_dvr_snat_agent] self._check_dvr_serviceable_ports_on_host = mock.Mock( return_value=False) # Test no VMs present case self.get_subnet_ids_on_router = mock.Mock() self._check_dvr_serviceable_ports_on_host.return_value = False self._check_get_l3_agent_candidates( router, agent_list, HOST_DVR_SNAT, count=1) def test_get_l3_agent_candidates_dvr_ha_snat_no_vms(self): self._register_l3_dvr_agents() router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r2') router['external_gateway_info'] = None router['id'] = str(uuid.uuid4()) router['distributed'] = True router['ha'] = True agent_list = [self.l3_dvr_snat_agent] self.check_ports_exist_on_l3agent = mock.Mock(return_value=False) # Test no VMs present case self.check_ports_exist_on_l3agent.return_value = False self.get_subnet_ids_on_router = mock.Mock(return_value=set()) self._check_get_l3_agent_candidates( router, agent_list, HOST_DVR_SNAT, count=1) def test_get_l3_agent_candidates_centralized(self): self._register_l3_dvr_agents() router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r2') router['external_gateway_info'] = None router['id'] = str(uuid.uuid4()) # check centralized test case router['distributed'] = False agent_list = [self.l3_dvr_snat_agent] self._check_get_l3_agent_candidates(router, agent_list, HOST_DVR_SNAT) def test_get_l3_agents_hosting_routers(self): agent = helpers.register_l3_agent('host_6') router = self._make_router(self.fmt, tenant_id=str(uuid.uuid4()), name='r1') ctx = self.adminContext router_id = router['router']['id'] self.plugin.router_scheduler.bind_router(ctx, router_id, agent) agents = self.get_l3_agents_hosting_routers(ctx, [router_id]) self.assertEqual([agent.id], [agt.id for agt in agents]) agents = self.get_l3_agents_hosting_routers(ctx, [router_id], admin_state_up=True) self.assertEqual([agent.id], [agt.id for agt in agents]) self._set_l3_agent_admin_state(ctx, agent.id, False) agents = self.get_l3_agents_hosting_routers(ctx, [router_id]) self.assertEqual([agent.id], [agt.id for agt in agents]) agents = self.get_l3_agents_hosting_routers(ctx, [router_id], admin_state_up=True) self.assertEqual([], agents) class L3SchedulerTestCaseMixin(l3_agentschedulers_db.L3AgentSchedulerDbMixin, l3_db.L3_NAT_db_mixin, common_db_mixin.CommonDbMixin, test_l3.L3NatTestCaseMixin, L3SchedulerBaseMixin, L3SchedulerTestBaseMixin): def setUp(self): self.mock_rescheduling = False ext_mgr = test_l3.L3TestExtensionManager() plugin_str = ('neutron.tests.unit.extensions.test_l3.' 'TestL3NatIntAgentSchedulingPlugin') super(L3SchedulerTestCaseMixin, self).setUp(plugin=plugin_str, ext_mgr=ext_mgr) self.adminContext = n_context.get_admin_context() self.plugin = manager.NeutronManager.get_plugin() self.plugin.router_scheduler = importutils.import_object( 'neutron.scheduler.l3_agent_scheduler.ChanceScheduler' ) self._register_l3_agents() class L3AgentChanceSchedulerTestCase(L3SchedulerTestCaseMixin, test_db_base_plugin_v2. NeutronDbPluginV2TestCase): def test_random_scheduling(self): random_patch = mock.patch('random.choice') random_mock = random_patch.start() def side_effect(seq): return seq[0] random_mock.side_effect = side_effect with self.subnet() as subnet: self._set_net_external(subnet['subnet']['network_id']) with self.router_with_ext_gw(name='r1', subnet=subnet) as r1: agents = self.get_l3_agents_hosting_routers( self.adminContext, [r1['router']['id']], admin_state_up=True) self.assertEqual(1, len(agents)) self.assertEqual(1, random_mock.call_count) with self.router_with_ext_gw(name='r2', subnet=subnet) as r2: agents = self.get_l3_agents_hosting_routers( self.adminContext, [r2['router']['id']], admin_state_up=True) self.assertEqual(len(agents), 1) self.assertEqual(2, random_mock.call_count) random_patch.stop() def test_scheduler_auto_schedule_when_agent_added(self): self._set_l3_agent_admin_state(self.adminContext, self.agent_id1, False) self._set_l3_agent_admin_state(self.adminContext, self.agent_id2, False) with self.subnet() as subnet: self._set_net_external(subnet['subnet']['network_id']) with self.router_with_ext_gw(name='r1', subnet=subnet) as r1: agents = self.get_l3_agents_hosting_routers( self.adminContext, [r1['router']['id']], admin_state_up=True) self.assertEqual(0, len(agents)) self._set_l3_agent_admin_state(self.adminContext, self.agent_id1, True) self.plugin.auto_schedule_routers(self.adminContext, 'host_1', [r1['router']['id']]) agents = self.get_l3_agents_hosting_routers( self.adminContext, [r1['router']['id']], admin_state_up=True) self.assertEqual('host_1', agents[0]['host']) class L3AgentLeastRoutersSchedulerTestCase(L3SchedulerTestCaseMixin, test_db_base_plugin_v2. NeutronDbPluginV2TestCase): def setUp(self): super(L3AgentLeastRoutersSchedulerTestCase, self).setUp() self.plugin.router_scheduler = importutils.import_object( 'neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler' ) def test_scheduler(self): # disable one agent to force the scheduling to the only one. self._set_l3_agent_admin_state(self.adminContext, self.agent_id2, False) with self.subnet() as subnet: self._set_net_external(subnet['subnet']['network_id']) with self.router_with_ext_gw(name='r1', subnet=subnet) as r1: agents = self.get_l3_agents_hosting_routers( self.adminContext, [r1['router']['id']], admin_state_up=True) self.assertEqual(1, len(agents)) agent_id1 = agents[0]['id'] with self.router_with_ext_gw(name='r2', subnet=subnet) as r2: agents = self.get_l3_agents_hosting_routers( self.adminContext, [r2['router']['id']], admin_state_up=True) self.assertEqual(1, len(agents)) agent_id2 = agents[0]['id'] self.assertEqual(agent_id1, agent_id2) # re-enable the second agent to see whether the next router # spawned will be on this one. self._set_l3_agent_admin_state(self.adminContext, self.agent_id2, True) with self.router_with_ext_gw(name='r3', subnet=subnet) as r3: agents = self.get_l3_agents_hosting_routers( self.adminContext, [r3['router']['id']], admin_state_up=True) self.assertEqual(1, len(agents)) agent_id3 = agents[0]['id'] self.assertNotEqual(agent_id1, agent_id3) class L3DvrScheduler(l3_db.L3_NAT_db_mixin, l3_dvrscheduler_db.L3_DVRsch_db_mixin): pass class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): def setUp(self): plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin' self.setup_coreplugin(plugin) super(L3DvrSchedulerTestCase, self).setUp() self.adminContext = n_context.get_admin_context() self.dut = L3DvrScheduler() def test__notify_l3_agent_update_port_with_allowed_address_pairs_revert( self): port_id = str(uuid.uuid4()) kwargs = { 'context': self.adminContext, 'port': { 'id': port_id, 'admin_state_up': False, portbindings.HOST_ID: 'vm-host', 'device_id': 'vm-id', 'allowed_address_pairs': [ {'ip_address': '10.1.0.201', 'mac_address': 'aa:bb:cc:dd:ee:ff'}], 'device_owner': DEVICE_OWNER_COMPUTE, }, 'original_port': { 'id': port_id, 'admin_state_up': True, portbindings.HOST_ID: 'vm-host', 'device_id': 'vm-id', 'allowed_address_pairs': [ {'ip_address': '10.1.0.201', 'mac_address': 'aa:bb:cc:dd:ee:ff'}], 'device_owner': DEVICE_OWNER_COMPUTE, }, } port = kwargs.get('original_port') port_addr_pairs = port['allowed_address_pairs'] l3plugin = mock.Mock() with mock.patch.object(manager.NeutronManager, 'get_service_plugins', return_value={'L3_ROUTER_NAT': l3plugin}): l3_dvrscheduler_db._notify_l3_agent_port_update( 'port', 'after_update', mock.ANY, **kwargs) l3plugin._get_allowed_address_pair_fixed_ips.return_value = ( ['10.1.0.21']) self.assertTrue( l3plugin.remove_unbound_allowed_address_pair_port_binding. called) l3plugin.remove_unbound_allowed_address_pair_port_binding.\ assert_called_once_with( self.adminContext, port, port_addr_pairs[0]) self.assertFalse( l3plugin.update_arp_entry_for_dvr_service_port.called) l3plugin.delete_arp_entry_for_dvr_service_port.\ assert_called_once_with( self.adminContext, port, fixed_ips_to_delete=mock.ANY) self.assertFalse(l3plugin.dvr_handle_new_service_port.called) def test__notify_l3_agent_update_port_with_allowed_address_pairs(self): port_id = str(uuid.uuid4()) kwargs = { 'context': self.adminContext, 'port': { 'id': port_id, portbindings.HOST_ID: 'vm-host', 'allowed_address_pairs': [ {'ip_address': '10.1.0.201', 'mac_address': 'aa:bb:cc:dd:ee:ff'}], 'device_id': 'vm-id', 'device_owner': DEVICE_OWNER_COMPUTE, 'admin_state_up': True, }, 'original_port': { 'id': port_id, portbindings.HOST_ID: 'vm-host', 'device_id': 'vm-id', 'device_owner': DEVICE_OWNER_COMPUTE, 'admin_state_up': True, }, } port = kwargs.get('port') port_addr_pairs = port['allowed_address_pairs'] l3plugin = mock.Mock() with mock.patch.object(manager.NeutronManager, 'get_service_plugins', return_value={'L3_ROUTER_NAT': l3plugin}): l3_dvrscheduler_db._notify_l3_agent_port_update( 'port', 'after_update', mock.ANY, **kwargs) self.assertTrue( l3plugin.update_unbound_allowed_address_pair_port_binding. called) l3plugin.update_unbound_allowed_address_pair_port_binding.\ assert_called_once_with( self.adminContext, port, port_addr_pairs[0]) self.assertTrue( l3plugin.update_arp_entry_for_dvr_service_port.called) self.assertTrue(l3plugin.dvr_handle_new_service_port.called) def test__notify_l3_agent_update_port_no_removing_routers(self): port_id = 'fake-port' kwargs = { 'context': self.adminContext, 'port': None, 'original_port': { 'id': port_id, portbindings.HOST_ID: 'vm-host', 'device_id': 'vm-id', 'device_owner': DEVICE_OWNER_COMPUTE, 'mac_address': '02:04:05:17:18:19' }, 'mac_address_updated': True } plugin = manager.NeutronManager.get_plugin() l3plugin = mock.Mock() l3plugin.supported_extension_aliases = [ 'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS, constants.L3_DISTRIBUTED_EXT_ALIAS ] with mock.patch.object(manager.NeutronManager, 'get_service_plugins', return_value={'L3_ROUTER_NAT': l3plugin}): l3_dvrscheduler_db._notify_l3_agent_port_update( 'port', 'after_update', plugin, **kwargs) self.assertFalse( l3plugin.update_arp_entry_for_dvr_service_port.called) self.assertFalse( l3plugin.dvr_handle_new_service_port.called) self.assertFalse(l3plugin.remove_router_from_l3_agent.called) self.assertFalse(l3plugin.get_dvr_routers_to_remove.called) def test__notify_l3_agent_new_port_action(self): kwargs = { 'context': self.adminContext, 'original_port': None, 'port': { 'device_owner': DEVICE_OWNER_COMPUTE, }, } l3plugin = mock.Mock() with mock.patch.object(manager.NeutronManager, 'get_service_plugins', return_value={'L3_ROUTER_NAT': l3plugin}): l3_dvrscheduler_db._notify_l3_agent_new_port( 'port', 'after_create', mock.ANY, **kwargs) l3plugin.update_arp_entry_for_dvr_service_port.\ assert_called_once_with( self.adminContext, kwargs.get('port')) l3plugin.dvr_handle_new_service_port.assert_called_once_with( self.adminContext, kwargs.get('port')) def test__notify_l3_agent_new_port_no_action(self): kwargs = { 'context': self.adminContext, 'original_port': None, 'port': { 'device_owner': 'network:None', } } l3plugin = mock.Mock() with mock.patch.object(manager.NeutronManager, 'get_service_plugins', return_value={'L3_ROUTER_NAT': l3plugin}): l3_dvrscheduler_db._notify_l3_agent_new_port( 'port', 'after_create', mock.ANY, **kwargs) self.assertFalse( l3plugin.update_arp_entry_for_dvr_service_port.called) self.assertFalse( l3plugin.dvr_handle_new_service_port.called) def test__notify_l3_agent_update_port_with_migration_port_profile(self): kwargs = { 'context': self.adminContext, 'original_port': { portbindings.HOST_ID: 'vm-host', 'device_owner': DEVICE_OWNER_COMPUTE, }, 'port': { portbindings.HOST_ID: 'vm-host', 'device_owner': DEVICE_OWNER_COMPUTE, portbindings.PROFILE: {'migrating_to': 'vm-host2'}, }, } l3plugin = mock.Mock() with mock.patch.object(manager.NeutronManager, 'get_service_plugins', return_value={'L3_ROUTER_NAT': l3plugin}): l3_dvrscheduler_db._notify_l3_agent_port_update( 'port', 'after_update', mock.ANY, **kwargs) l3plugin.dvr_handle_new_service_port.assert_called_once_with( self.adminContext, kwargs.get('port'), dest_host='vm-host2') l3plugin.update_arp_entry_for_dvr_service_port.\ assert_called_once_with( self.adminContext, kwargs.get('port')) def test__notify_l3_agent_update_port_no_action(self): kwargs = { 'context': self.adminContext, 'original_port': { portbindings.HOST_ID: 'vm-host', 'device_owner': DEVICE_OWNER_COMPUTE, }, 'port': { portbindings.HOST_ID: 'vm-host', 'device_owner': DEVICE_OWNER_COMPUTE, }, } l3plugin = mock.Mock() with mock.patch.object(manager.NeutronManager, 'get_service_plugins', return_value={'L3_ROUTER_NAT': l3plugin}): l3_dvrscheduler_db._notify_l3_agent_port_update( 'port', 'after_update', mock.ANY, **kwargs) self.assertFalse( l3plugin.update_arp_entry_for_dvr_service_port.called) self.assertFalse( l3plugin.dvr_handle_new_service_port.called) self.assertFalse(l3plugin.remove_router_from_l3_agent.called) self.assertFalse(l3plugin.get_dvr_routers_to_remove.called) def test__notify_l3_agent_update_port_with_mac_address_update(self): kwargs = { 'context': self.adminContext, 'original_port': { portbindings.HOST_ID: 'vm-host', 'mac_address': '02:04:05:17:18:19' }, 'port': { portbindings.HOST_ID: 'vm-host', 'mac_address': '02:04:05:17:18:29' }, 'mac_address_updated': True } l3plugin = mock.Mock() with mock.patch.object(manager.NeutronManager, 'get_service_plugins', return_value={'L3_ROUTER_NAT': l3plugin}): l3_dvrscheduler_db._notify_l3_agent_port_update( 'port', 'after_update', mock.ANY, **kwargs) l3plugin.update_arp_entry_for_dvr_service_port.\ assert_called_once_with( self.adminContext, kwargs.get('port')) self.assertFalse(l3plugin.dvr_handle_new_service_port.called) def test__notify_l3_agent_port_binding_change(self): self._test__notify_l3_agent_port_binding_change() def test__notify_l3_agent_port_binding_change_removed_routers(self): router_to_remove = [{'agent_id': 'foo_agent', 'router_id': 'foo_id', 'host': 'vm-host1'}] self._test__notify_l3_agent_port_binding_change(router_to_remove) def test__notify_l3_agent_port_binding_change_removed_routers_fip(self): fip = {'router_id': 'router_id'} router_to_remove = [{'agent_id': 'foo_agent', 'router_id': 'foo_id', 'host': 'vm-host1'}] self._test__notify_l3_agent_port_binding_change(router_to_remove, fip) def test__notify_l3_agent_port_binding_change_with_fip(self): fip = {'router_id': 'router_id'} self._test__notify_l3_agent_port_binding_change(None, fip) def _test__notify_l3_agent_port_binding_change(self, routers_to_remove=None, fip=None): source_host = 'vm-host1' kwargs = { 'context': self.adminContext, 'original_port': { 'id': str(uuid.uuid4()), portbindings.HOST_ID: source_host, 'device_owner': DEVICE_OWNER_COMPUTE, }, 'port': { portbindings.HOST_ID: 'vm-host2', 'device_owner': DEVICE_OWNER_COMPUTE, }, } l3plugin = mock.Mock() with mock.patch.object(manager.NeutronManager, 'get_service_plugins', return_value={'L3_ROUTER_NAT': l3plugin}),\ mock.patch.object(l3plugin, 'get_dvr_routers_to_remove', return_value=routers_to_remove),\ mock.patch.object(l3plugin, '_get_floatingip_on_port', return_value=fip): l3_dvrscheduler_db._notify_l3_agent_port_update( 'port', 'after_update', mock.ANY, **kwargs) if routers_to_remove: (l3plugin.l3_rpc_notifier.router_removed_from_agent. assert_called_once_with(mock.ANY, 'foo_id', source_host)) self.assertEqual( 1, l3plugin.delete_arp_entry_for_dvr_service_port.call_count) if fip and not routers_to_remove: (l3plugin.l3_rpc_notifier.routers_updated_on_host. assert_called_once_with(mock.ANY, ['router_id'], source_host)) self.assertEqual( 1, l3plugin.update_arp_entry_for_dvr_service_port.call_count) l3plugin.dvr_handle_new_service_port.assert_called_once_with( self.adminContext, kwargs.get('port'), dest_host=None) def test__notify_l3_agent_update_port_removing_routers(self): port_id = 'fake-port' source_host = 'vm-host' kwargs = { 'context': self.adminContext, 'port': { 'id': port_id, portbindings.HOST_ID: None, 'device_id': '', 'device_owner': '' }, 'mac_address_updated': False, 'original_port': { 'id': port_id, portbindings.HOST_ID: source_host, 'device_id': 'vm-id', 'device_owner': DEVICE_OWNER_COMPUTE } } plugin = manager.NeutronManager.get_plugin() l3plugin = mock.Mock() l3plugin.supported_extension_aliases = [ 'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS, constants.L3_DISTRIBUTED_EXT_ALIAS ] with mock.patch.object(manager.NeutronManager, 'get_service_plugins', return_value={'L3_ROUTER_NAT': l3plugin}),\ mock.patch.object(l3plugin, 'get_dvr_routers_to_remove', return_value=[{'agent_id': 'foo_agent', 'router_id': 'foo_id', 'host': source_host}]),\ mock.patch.object(l3plugin, '_get_floatingip_on_port', return_value=None): l3_dvrscheduler_db._notify_l3_agent_port_update( 'port', 'after_update', plugin, **kwargs) self.assertEqual( 1, l3plugin.delete_arp_entry_for_dvr_service_port.call_count) l3plugin.delete_arp_entry_for_dvr_service_port.\ assert_called_once_with( self.adminContext, mock.ANY) self.assertFalse( l3plugin.dvr_handle_new_service_port.called) (l3plugin.l3_rpc_notifier.router_removed_from_agent. assert_called_once_with(mock.ANY, 'foo_id', source_host)) def test__notify_port_delete(self): plugin = manager.NeutronManager.get_plugin() l3plugin = mock.Mock() l3plugin.supported_extension_aliases = [ 'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS, constants.L3_DISTRIBUTED_EXT_ALIAS ] port = { 'id': str(uuid.uuid4()), 'device_id': 'abcd', 'device_owner': DEVICE_OWNER_COMPUTE_NOVA, portbindings.HOST_ID: 'host1', } with mock.patch.object(manager.NeutronManager, 'get_service_plugins', return_value={'L3_ROUTER_NAT': l3plugin}): kwargs = { 'context': self.adminContext, 'port': port, 'removed_routers': [ {'agent_id': 'foo_agent', 'router_id': 'foo_id'}, ], } removed_routers = [{'agent_id': 'foo_agent', 'router_id': 'foo_id', 'host': 'foo_host'}] l3plugin.get_dvr_routers_to_remove.return_value = removed_routers l3_dvrscheduler_db._notify_port_delete( 'port', 'after_delete', plugin, **kwargs) l3plugin.delete_arp_entry_for_dvr_service_port.\ assert_called_once_with( self.adminContext, mock.ANY) (l3plugin.l3_rpc_notifier.router_removed_from_agent. assert_called_once_with(mock.ANY, 'foo_id', 'foo_host')) def test_dvr_handle_new_service_port(self): port = { 'id': 'port1', 'device_id': 'abcd', 'device_owner': DEVICE_OWNER_COMPUTE_NOVA, portbindings.HOST_ID: 'host1', 'fixed_ips': [ { 'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0', 'ip_address': '10.10.10.3' } ] } dvr_ports = [ { 'id': 'dvr_port1', 'device_id': 'r1', 'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE, 'fixed_ips': [ { 'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0', 'ip_address': '10.10.10.1' } ] }, { 'id': 'dvr_port2', 'device_id': 'r2', 'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE, 'fixed_ips': [ { 'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0', 'ip_address': '10.10.10.123' } ] } ] agent_on_host = {'id': 'agent1'} with mock.patch( 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports', return_value=dvr_ports),\ mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api' '.L3AgentNotifyAPI'),\ mock.patch.object( self.dut, 'get_l3_agents', return_value=[agent_on_host]) as get_l3_agents: self.dut.dvr_handle_new_service_port( self.adminContext, port) get_l3_agents.assert_called_once_with( self.adminContext, filters={'host': [port[portbindings.HOST_ID]]}) (self.dut.l3_rpc_notifier.routers_updated_on_host. assert_called_once_with( self.adminContext, {'r1', 'r2'}, 'host1')) self.assertFalse(self.dut.l3_rpc_notifier.routers_updated.called) def test_get_dvr_routers_by_subnet_ids(self): subnet_id = '80947d4a-fbc8-484b-9f92-623a6bfcf3e0' dvr_port = { 'id': 'dvr_port1', 'device_id': 'r1', 'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE, 'fixed_ips': [ { 'subnet_id': subnet_id, 'ip_address': '10.10.10.1' } ] } r1 = { 'id': 'r1', 'distributed': True, } with mock.patch( 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_port', return_value=dvr_port),\ mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports', return_value=[dvr_port]): router_id = self.dut.get_dvr_routers_by_subnet_ids( self.adminContext, [subnet_id]) self.assertEqual(r1['id'], router_id.pop()) def test_get_subnet_ids_on_router(self): dvr_port = { 'id': 'dvr_port1', 'device_id': 'r1', 'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE, 'fixed_ips': [ { 'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0', 'ip_address': '10.10.10.1' } ] } r1 = { 'id': 'r1', 'distributed': True, } with mock.patch( 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports', return_value=[dvr_port]): sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext, r1['id']) self.assertEqual(sub_ids.pop(), dvr_port.get('fixed_ips').pop(0).get('subnet_id')) def test_get_subnet_ids_on_router_no_subnet(self): dvr_port = { 'id': 'dvr_port1', 'device_id': 'r1', 'device_owner': 'network:router_interface_distributed', 'fixed_ips': [] } r1 = { 'id': 'r1', 'distributed': True, } with mock.patch.object(db_v2.NeutronDbPluginV2, 'get_ports', return_value=[dvr_port]): sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext, r1['id']) self.assertEqual(0, len(sub_ids)) def _prepare_schedule_snat_tests(self): agent = agents_db.Agent() agent.admin_state_up = True agent.heartbeat_timestamp = timeutils.utcnow() router = { 'id': 'foo_router_id', 'distributed': True, 'external_gateway_info': { 'network_id': str(uuid.uuid4()), 'enable_snat': True } } return agent, router class L3HAPlugin(db_v2.NeutronDbPluginV2, l3_hamode_db.L3_HA_NAT_db_mixin, l3_hascheduler_db.L3_HA_scheduler_db_mixin): supported_extension_aliases = ["l3-ha", "router_availability_zone"] class L3HATestCaseMixin(testlib_api.SqlTestCase, L3SchedulerBaseMixin): def setUp(self): super(L3HATestCaseMixin, self).setUp() self.adminContext = n_context.get_admin_context() mock.patch('neutron.common.rpc.get_client').start() self.plugin = L3HAPlugin() self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin') cfg.CONF.set_override('service_plugins', ['neutron.services.l3_router.' 'l3_router_plugin.L3RouterPlugin']) cfg.CONF.set_override('max_l3_agents_per_router', 0) self.plugin.router_scheduler = importutils.import_object( 'neutron.scheduler.l3_agent_scheduler.ChanceScheduler' ) self._register_l3_agents() def _create_ha_router(self, ha=True, tenant_id='tenant1', az_hints=None): self.adminContext.tenant_id = tenant_id router = {'name': 'router1', 'admin_state_up': True, 'tenant_id': tenant_id} if ha is not None: router['ha'] = ha if az_hints is None: az_hints = [] router['availability_zone_hints'] = az_hints return self.plugin.create_router(self.adminContext, {'router': router}) def test_create_ha_port_and_bind_catch_integrity_error(self): router = self._create_ha_router(tenant_id='foo_tenant') agent = {'id': 'foo_agent'} orig_fn = orm.Session.add def db_ref_err_for_add_haportbinding(s, instance): if instance.__class__.__name__ == 'L3HARouterAgentPortBinding': instance.router_id = 'nonexistent_router' return orig_fn(s, instance) with mock.patch.object(self.plugin.router_scheduler, 'bind_router') as bind_router: with mock.patch.object( orm.Session, 'add', side_effect=db_ref_err_for_add_haportbinding, autospec=True): self.plugin.router_scheduler.create_ha_port_and_bind( self.plugin, self.adminContext, router['id'], router['tenant_id'], agent) self.assertFalse(bind_router.called) def test_create_ha_port_and_bind_catch_router_not_found(self): router = self._create_ha_router(tenant_id='foo_tenant') agent = {'id': 'foo_agent'} with mock.patch.object(self.plugin.router_scheduler, 'bind_router') as bind_router: with mock.patch.object( self.plugin, 'add_ha_port', side_effect=l3.RouterNotFound(router_id='foo_router')): self.plugin.router_scheduler.create_ha_port_and_bind( self.plugin, self.adminContext, router['id'], router['tenant_id'], agent) self.assertFalse(bind_router.called) class L3_HA_scheduler_db_mixinTestCase(L3HATestCaseMixin): def _register_l3_agents(self, plugin=None): super(L3_HA_scheduler_db_mixinTestCase, self)._register_l3_agents(plugin=plugin) self.agent3 = helpers.register_l3_agent(host='host_3') self.agent_id3 = self.agent3.id self.agent4 = helpers.register_l3_agent(host='host_4') self.agent_id4 = self.agent4.id def test_get_ha_routers_l3_agents_count(self): router1 = self._create_ha_router() router2 = self._create_ha_router() router3 = self._create_ha_router(ha=False) result = self.plugin.get_ha_routers_l3_agents_count(self.adminContext) self.assertEqual(2, len(result)) check_result = [(router['id'], agents) for router, agents in result] self.assertIn((router1['id'], 4), check_result) self.assertIn((router2['id'], 4), check_result) self.assertNotIn((router3['id'], mock.ANY), check_result) def test_get_ordered_l3_agents_by_num_routers(self): # Mock scheduling so that the test can control it explicitly mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin, '_notify_ha_interfaces_updated').start() with mock.patch.object(self.plugin, 'schedule_router'): router1 = self._create_ha_router() router2 = self._create_ha_router() router3 = self._create_ha_router(ha=False) router4 = self._create_ha_router(ha=False) # Agent 1 will host 0 routers, agent 2 will host 1, agent 3 will # host 2, and agent 4 will host 3. self.plugin.schedule_router(self.adminContext, router1['id'], candidates=[self.agent2, self.agent4]) self.plugin.schedule_router(self.adminContext, router2['id'], candidates=[self.agent3, self.agent4]) self.plugin.schedule_router(self.adminContext, router3['id'], candidates=[self.agent3]) self.plugin.schedule_router(self.adminContext, router4['id'], candidates=[self.agent4]) agent_ids = [self.agent_id1, self.agent_id2, self.agent_id3, self.agent_id4] result = self.plugin.get_l3_agents_ordered_by_num_routers( self.adminContext, agent_ids) self.assertEqual(agent_ids, [record['id'] for record in result]) class L3AgentSchedulerDbMixinTestCase(L3HATestCaseMixin): def _setup_ha_router(self): router = self._create_ha_router() agents = self._get_agents_scheduled_for_router(router) return router, agents def test_reschedule_ha_routers_from_down_agents(self): agents = self._setup_ha_router()[1] self.assertEqual(2, len(agents)) self._set_l3_agent_dead(self.agent_id1) with mock.patch.object(self.plugin, 'reschedule_router') as reschedule: self.plugin.reschedule_routers_from_down_agents() self.assertFalse(reschedule.called) def test_list_l3_agents_hosting_ha_router(self): router = self._create_ha_router() agents = self.plugin.list_l3_agents_hosting_router( self.adminContext, router['id'])['agents'] for agent in agents: self.assertEqual('standby', agent['ha_state']) self.plugin.update_routers_states( self.adminContext, {router['id']: 'active'}, self.agent1.host) agents = self.plugin.list_l3_agents_hosting_router( self.adminContext, router['id'])['agents'] for agent in agents: expected_state = ('active' if agent['host'] == self.agent1.host else 'standby') self.assertEqual(expected_state, agent['ha_state']) def test_list_l3_agents_hosting_legacy_router(self): router = self._create_ha_router(ha=False) self.plugin.schedule_router(self.adminContext, router['id']) agent = self.plugin.list_l3_agents_hosting_router( self.adminContext, router['id'])['agents'][0] self.assertIsNone(agent['ha_state']) def test_get_agents_dict_for_router_unscheduled_returns_empty_list(self): self.assertEqual({'agents': []}, self.plugin._get_agents_dict_for_router([])) def test_manual_add_ha_router_to_agent(self): cfg.CONF.set_override('max_l3_agents_per_router', 2) router, agents = self._setup_ha_router() self.assertEqual(2, len(agents)) agent = helpers.register_l3_agent(host='myhost_3') # We allow to exceed max l3 agents per router via manual scheduling self.plugin.add_router_to_l3_agent( self.adminContext, agent.id, router['id']) agents = self._get_agents_scheduled_for_router(router) self.assertIn(agent.id, [_agent.id for _agent in agents]) self.assertEqual(3, len(agents)) def test_manual_remove_ha_router_from_agent(self): router, agents = self._setup_ha_router() self.assertEqual(2, len(agents)) agent = agents.pop() # Remove router from agent and make sure it is removed self.plugin.remove_router_from_l3_agent( self.adminContext, agent.id, router['id']) agents = self._get_agents_scheduled_for_router(router) self.assertEqual(1, len(agents)) self.assertNotIn(agent.id, [_agent.id for _agent in agents]) def test_manual_remove_ha_router_from_all_agents(self): router, agents = self._setup_ha_router() self.assertEqual(2, len(agents)) agent = agents.pop() self.plugin.remove_router_from_l3_agent( self.adminContext, agent.id, router['id']) agent = agents.pop() self.plugin.remove_router_from_l3_agent( self.adminContext, agent.id, router['id']) agents = self._get_agents_scheduled_for_router(router) self.assertEqual(0, len(agents)) def _get_agents_scheduled_for_router(self, router): return self.plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']], admin_state_up=True) def test_delete_ha_interfaces_from_agent(self): router, agents = self._setup_ha_router() agent = agents.pop() self.plugin.remove_router_from_l3_agent( self.adminContext, agent.id, router['id']) session = self.adminContext.session db = l3_hamode_db.L3HARouterAgentPortBinding results = session.query(db).filter_by( router_id=router['id']) results = [binding.l3_agent_id for binding in results.all()] self.assertNotIn(agent.id, results) def test_add_ha_interface_to_l3_agent(self): agent = self.plugin.get_agents_db(self.adminContext)[0] router = self._create_ha_router() self.plugin.add_router_to_l3_agent(self.adminContext, agent.id, router['id']) # Verify agent has HA interface ha_ports = self.plugin.get_ha_router_port_bindings(self.adminContext, [router['id']]) self.assertIn(agent.id, [ha_port.l3_agent_id for ha_port in ha_ports]) class L3HAChanceSchedulerTestCase(L3HATestCaseMixin): def test_scheduler_with_ha_enabled(self): router = self._create_ha_router() agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']], admin_state_up=True) self.assertEqual(2, len(agents)) for agent in agents: sync_data = self.plugin.get_ha_sync_data_for_host( self.adminContext, router_ids=[router['id']], host=agent.host, agent=agent) self.assertEqual(1, len(sync_data)) interface = sync_data[0][constants.HA_INTERFACE_KEY] self.assertIsNotNone(interface) def test_auto_schedule(self): # Mock scheduling so that the test can control it explicitly mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin, '_notify_ha_interfaces_updated').start() router = self._create_ha_router() self.plugin.auto_schedule_routers( self.adminContext, self.agent1.host, None) self.plugin.auto_schedule_routers( self.adminContext, self.agent2.host, None) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']]) self.assertEqual(2, len(agents)) def test_auto_schedule_specific_router_when_agent_added(self): self._auto_schedule_when_agent_added(True) def test_auto_schedule_all_routers_when_agent_added(self): self._auto_schedule_when_agent_added(False) def test_auto_schedule_ha_router_when_incompatible_agent_exist(self): handle_internal_only_routers_agent = helpers.register_l3_agent( 'host_3', constants.L3_AGENT_MODE_LEGACY, internal_only=False) router = self._create_ha_router() self.plugin.auto_schedule_routers( self.adminContext, handle_internal_only_routers_agent.host, []) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']], admin_state_up=True) agent_ids = [agent['id'] for agent in agents] self.assertEqual(2, len(agents)) self.assertNotIn(handle_internal_only_routers_agent.id, agent_ids) def test_auto_schedule_ha_router_when_dvr_agent_exist(self): dvr_agent = helpers.register_l3_agent( HOST_DVR, constants.L3_AGENT_MODE_DVR) router = self._create_ha_router() self.plugin.auto_schedule_routers(self.adminContext, dvr_agent.host, []) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']], admin_state_up=True) agent_ids = [agent['id'] for agent in agents] self.assertEqual(2, len(agents)) self.assertNotIn(dvr_agent.id, agent_ids) def _auto_schedule_when_agent_added(self, specific_router): router = self._create_ha_router() agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']], admin_state_up=True) self.assertEqual(2, len(agents)) agent_ids = [agent['id'] for agent in agents] self.assertIn(self.agent_id1, agent_ids) self.assertIn(self.agent_id2, agent_ids) agent = helpers.register_l3_agent(host='host_3') self.agent_id3 = agent.id routers_to_auto_schedule = [router['id']] if specific_router else [] self.plugin.auto_schedule_routers(self.adminContext, 'host_3', routers_to_auto_schedule) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']], admin_state_up=True) self.assertEqual(3, len(agents)) # Simulate agent restart to make sure we don't try to re-bind self.plugin.auto_schedule_routers(self.adminContext, 'host_3', routers_to_auto_schedule) def test_scheduler_with_ha_enabled_not_enough_agent(self): r1 = self._create_ha_router() agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['id']], admin_state_up=True) self.assertEqual(2, len(agents)) self._set_l3_agent_admin_state(self.adminContext, self.agent_id2, False) self.assertRaises( l3_ha.HANotEnoughAvailableAgents, self._create_ha_router) class L3HALeastRoutersSchedulerTestCase(L3HATestCaseMixin): def _register_l3_agents(self, plugin=None): super(L3HALeastRoutersSchedulerTestCase, self)._register_l3_agents(plugin=plugin) agent = helpers.register_l3_agent(host='host_3') self.agent_id3 = agent.id agent = helpers.register_l3_agent(host='host_4') self.agent_id4 = agent.id def setUp(self): super(L3HALeastRoutersSchedulerTestCase, self).setUp() self.plugin.router_scheduler = importutils.import_object( 'neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler' ) def test_scheduler(self): cfg.CONF.set_override('max_l3_agents_per_router', 2) # disable the third agent to be sure that the router will # be scheduled of the two firsts self._set_l3_agent_admin_state(self.adminContext, self.agent_id3, False) self._set_l3_agent_admin_state(self.adminContext, self.agent_id4, False) r1 = self._create_ha_router() agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['id']], admin_state_up=True) self.assertEqual(2, len(agents)) agent_ids = [agent['id'] for agent in agents] self.assertIn(self.agent_id1, agent_ids) self.assertIn(self.agent_id2, agent_ids) self._set_l3_agent_admin_state(self.adminContext, self.agent_id3, True) self._set_l3_agent_admin_state(self.adminContext, self.agent_id4, True) r2 = self._create_ha_router() agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r2['id']], admin_state_up=True) self.assertEqual(2, len(agents)) agent_ids = [agent['id'] for agent in agents] self.assertIn(self.agent_id3, agent_ids) self.assertIn(self.agent_id4, agent_ids) class TestGetL3AgentsWithAgentModeFilter(testlib_api.SqlTestCase, L3SchedulerBaseMixin): """Test cases to test get_l3_agents. This class tests the L3AgentSchedulerDbMixin.get_l3_agents() for the 'agent_mode' filter with various values. 5 l3 agents are registered in the order - legacy, dvr_snat, dvr, fake_mode and legacy """ scenarios = [ ('no filter', dict(agent_modes=[], expected_agent_modes=['legacy', 'dvr_snat', 'dvr', 'fake_mode', 'legacy'])), ('legacy', dict(agent_modes=['legacy'], expected_agent_modes=['legacy', 'legacy'])), ('dvr_snat', dict(agent_modes=['dvr_snat'], expected_agent_modes=['dvr_snat'])), ('dvr ', dict(agent_modes=['dvr'], expected_agent_modes=['dvr'])), ('legacy and dvr snat', dict(agent_modes=['legacy', 'dvr_snat', 'legacy'], expected_agent_modes=['legacy', 'dvr_snat', 'legacy'])), ('legacy and dvr', dict(agent_modes=['legacy', 'dvr'], expected_agent_modes=['legacy', 'dvr', 'legacy'])), ('dvr_snat and dvr', dict(agent_modes=['dvr_snat', 'dvr'], expected_agent_modes=['dvr_snat', 'dvr'])), ('legacy, dvr_snat and dvr', dict(agent_modes=['legacy', 'dvr_snat', 'dvr'], expected_agent_modes=['legacy', 'dvr_snat', 'dvr', 'legacy'])), ('invalid', dict(agent_modes=['invalid'], expected_agent_modes=[])), ] def setUp(self): super(TestGetL3AgentsWithAgentModeFilter, self).setUp() self.plugin = L3HAPlugin() self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin') self.adminContext = n_context.get_admin_context() hosts = ['host_1', 'host_2', 'host_3', 'host_4', 'host_5'] agent_modes = ['legacy', 'dvr_snat', 'dvr', 'fake_mode', 'legacy'] for host, agent_mode in zip(hosts, agent_modes): helpers.register_l3_agent(host, agent_mode) def _get_agent_mode(self, agent): agent_conf = self.plugin.get_configuration_dict(agent) return agent_conf.get('agent_mode', 'None') def test_get_l3_agents(self): l3_agents = self.plugin.get_l3_agents( self.adminContext, filters={'agent_modes': self.agent_modes}) self.assertEqual(len(self.expected_agent_modes), len(l3_agents)) returned_agent_modes = [self._get_agent_mode(agent) for agent in l3_agents] self.assertEqual(self.expected_agent_modes, returned_agent_modes) class L3AgentAZLeastRoutersSchedulerTestCase(L3HATestCaseMixin): def setUp(self): super(L3AgentAZLeastRoutersSchedulerTestCase, self).setUp() self.plugin.router_scheduler = importutils.import_object( 'neutron.scheduler.l3_agent_scheduler.AZLeastRoutersScheduler') # Mock scheduling so that the test can control it explicitly mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin, '_notify_ha_interfaces_updated').start() def _register_l3_agents(self): self.agent1 = helpers.register_l3_agent(host='az1-host1', az='az1') self.agent2 = helpers.register_l3_agent(host='az1-host2', az='az1') self.agent3 = helpers.register_l3_agent(host='az2-host1', az='az2') self.agent4 = helpers.register_l3_agent(host='az2-host2', az='az2') self.agent5 = helpers.register_l3_agent(host='az3-host1', az='az3') self.agent6 = helpers.register_l3_agent(host='az3-host2', az='az3') def test_az_scheduler_auto_schedule(self): r1 = self._create_ha_router(ha=False, az_hints=['az1']) self.plugin.auto_schedule_routers(self.adminContext, 'az1-host2', None) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['id']]) self.assertEqual(1, len(agents)) self.assertEqual('az1-host2', agents[0]['host']) def test_az_scheduler_auto_schedule_no_match(self): r1 = self._create_ha_router(ha=False, az_hints=['az1']) self.plugin.auto_schedule_routers(self.adminContext, 'az2-host1', None) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['id']]) self.assertEqual(0, len(agents)) def test_az_scheduler_default_az(self): cfg.CONF.set_override('default_availability_zones', ['az2']) r1 = self._create_ha_router(ha=False) r2 = self._create_ha_router(ha=False) r3 = self._create_ha_router(ha=False) self.plugin.schedule_router(self.adminContext, r1['id']) self.plugin.schedule_router(self.adminContext, r2['id']) self.plugin.schedule_router(self.adminContext, r3['id']) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['id'], r2['id'], r3['id']]) self.assertEqual(3, len(agents)) expected_hosts = set(['az2-host1', 'az2-host2']) hosts = set([a['host'] for a in agents]) self.assertEqual(expected_hosts, hosts) def test_az_scheduler_az_hints(self): r1 = self._create_ha_router(ha=False, az_hints=['az3']) r2 = self._create_ha_router(ha=False, az_hints=['az3']) r3 = self._create_ha_router(ha=False, az_hints=['az3']) self.plugin.schedule_router(self.adminContext, r1['id']) self.plugin.schedule_router(self.adminContext, r2['id']) self.plugin.schedule_router(self.adminContext, r3['id']) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['id'], r2['id'], r3['id']]) self.assertEqual(3, len(agents)) expected_hosts = set(['az3-host1', 'az3-host2']) hosts = set([a['host'] for a in agents]) self.assertEqual(expected_hosts, hosts) def test_az_scheduler_least_routers(self): r1 = self._create_ha_router(ha=False, az_hints=['az1']) r2 = self._create_ha_router(ha=False, az_hints=['az1']) r3 = self._create_ha_router(ha=False, az_hints=['az1']) r4 = self._create_ha_router(ha=False, az_hints=['az1']) self.plugin.schedule_router(self.adminContext, r1['id']) self.plugin.schedule_router(self.adminContext, r2['id']) self.plugin.schedule_router(self.adminContext, r3['id']) self.plugin.schedule_router(self.adminContext, r4['id']) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['id'], r2['id'], r3['id'], r4['id']]) host_num = collections.defaultdict(int) for agent in agents: host_num[agent['host']] += 1 self.assertEqual(2, host_num['az1-host1']) self.assertEqual(2, host_num['az1-host2']) def test_az_scheduler_ha_az_hints(self): cfg.CONF.set_override('max_l3_agents_per_router', 2) r1 = self._create_ha_router(az_hints=['az1', 'az3']) self.plugin.schedule_router(self.adminContext, r1['id']) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['id']]) self.assertEqual(2, len(agents)) expected_azs = set(['az1', 'az3']) azs = set([a['availability_zone'] for a in agents]) self.assertEqual(expected_azs, azs) def test_az_scheduler_ha_auto_schedule(self): cfg.CONF.set_override('max_l3_agents_per_router', 3) self._set_l3_agent_admin_state(self.adminContext, self.agent2['id'], state=False) self._set_l3_agent_admin_state(self.adminContext, self.agent6['id'], state=False) r1 = self._create_ha_router(az_hints=['az1', 'az3']) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['id']]) self.assertEqual(2, len(agents)) hosts = set([a['host'] for a in agents]) self.assertEqual(set(['az1-host1', 'az3-host1']), hosts) self._set_l3_agent_admin_state(self.adminContext, self.agent6['id'], state=True) self.plugin.auto_schedule_routers(self.adminContext, 'az3-host2', None) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['id']]) self.assertEqual(3, len(agents)) expected_hosts = set(['az1-host1', 'az3-host1', 'az3-host2']) hosts = set([a['host'] for a in agents]) self.assertEqual(expected_hosts, hosts) def test__get_routers_can_schedule_with_no_target_routers(self): result = self.plugin.router_scheduler._get_routers_can_schedule( self.plugin, mock.ANY, [], mock.ANY) self.assertEqual([], result) class L3DVRHAPlugin(db_v2.NeutronDbPluginV2, l3_hamode_db.L3_HA_NAT_db_mixin, l3_dvr_ha_scheduler_db.L3_DVR_HA_scheduler_db_mixin): pass class L3DVRHATestCaseMixin(testlib_api.SqlTestCase, L3SchedulerBaseMixin): def setUp(self): super(L3DVRHATestCaseMixin, self).setUp() self.adminContext = n_context.get_admin_context() self.plugin = L3DVRHAPlugin() neutron-8.4.0/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py0000664000567000056710000006472213044372760027733 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from oslo_utils import importutils import testscenarios from neutron.common import constants from neutron import context from neutron.db import agentschedulers_db as sched_db from neutron.db import common_db_mixin from neutron.db import models_v2 from neutron.extensions import dhcpagentscheduler from neutron.scheduler import dhcp_agent_scheduler from neutron.tests.common import helpers from neutron.tests.unit import testlib_api # Required to generate tests from scenarios. Not compatible with nose. load_tests = testscenarios.load_tests_apply_scenarios HOST_C = 'host-c' HOST_D = 'host-d' class TestDhcpSchedulerBaseTestCase(testlib_api.SqlTestCase): def setUp(self): super(TestDhcpSchedulerBaseTestCase, self).setUp() self.ctx = context.get_admin_context() self.network = {'id': 'foo_network_id'} self.network_id = 'foo_network_id' self._save_networks([self.network_id]) def _create_and_set_agents_down(self, hosts, down_agent_count=0, admin_state_up=True, az=helpers.DEFAULT_AZ): agents = [] for i, host in enumerate(hosts): is_alive = i >= down_agent_count agents.append(helpers.register_dhcp_agent( host, admin_state_up=admin_state_up, alive=is_alive, az=az)) return agents def _save_networks(self, networks): for network_id in networks: with self.ctx.session.begin(subtransactions=True): self.ctx.session.add(models_v2.Network(id=network_id)) def _test_schedule_bind_network(self, agents, network_id): scheduler = dhcp_agent_scheduler.ChanceScheduler() scheduler.resource_filter.bind(self.ctx, agents, network_id) results = self.ctx.session.query( sched_db.NetworkDhcpAgentBinding).filter_by( network_id=network_id).all() self.assertEqual(len(agents), len(results)) for result in results: self.assertEqual(network_id, result.network_id) class TestDhcpScheduler(TestDhcpSchedulerBaseTestCase): def test_schedule_bind_network_single_agent(self): agents = self._create_and_set_agents_down(['host-a']) self._test_schedule_bind_network(agents, self.network_id) def test_schedule_bind_network_multi_agents(self): agents = self._create_and_set_agents_down(['host-a', 'host-b']) self._test_schedule_bind_network(agents, self.network_id) def test_schedule_bind_network_multi_agent_fail_one(self): agents = self._create_and_set_agents_down(['host-a']) self._test_schedule_bind_network(agents, self.network_id) with mock.patch.object(dhcp_agent_scheduler.LOG, 'info') as fake_log: self._test_schedule_bind_network(agents, self.network_id) self.assertEqual(1, fake_log.call_count) def _test_get_agents_and_scheduler_for_dead_agent(self): agents = self._create_and_set_agents_down(['dead_host', 'alive_host'], 1) dead_agent = [agents[0]] alive_agent = [agents[1]] self._test_schedule_bind_network(dead_agent, self.network_id) scheduler = dhcp_agent_scheduler.ChanceScheduler() return dead_agent, alive_agent, scheduler def _test_reschedule_vs_network_on_dead_agent(self, active_hosts_only): dead_agent, alive_agent, scheduler = ( self._test_get_agents_and_scheduler_for_dead_agent()) network = {'id': self.network_id} plugin = mock.Mock() plugin.get_subnets.return_value = [{"network_id": self.network_id, "enable_dhcp": True}] plugin.get_agents_db.return_value = dead_agent + alive_agent if active_hosts_only: plugin.get_dhcp_agents_hosting_networks.return_value = [] self.assertTrue( scheduler.schedule( plugin, self.ctx, network)) else: plugin.get_dhcp_agents_hosting_networks.return_value = dead_agent self.assertFalse( scheduler.schedule( plugin, self.ctx, network)) def test_network_rescheduled_when_db_returns_active_hosts(self): self._test_reschedule_vs_network_on_dead_agent(True) def test_network_not_rescheduled_when_db_returns_all_hosts(self): self._test_reschedule_vs_network_on_dead_agent(False) def _get_agent_binding_from_db(self, agent): return self.ctx.session.query( sched_db.NetworkDhcpAgentBinding ).filter_by(dhcp_agent_id=agent[0].id).all() def _test_auto_reschedule_vs_network_on_dead_agent(self, active_hosts_only): dead_agent, alive_agent, scheduler = ( self._test_get_agents_and_scheduler_for_dead_agent()) plugin = mock.Mock() plugin.get_subnets.return_value = [{"network_id": self.network_id, "enable_dhcp": True}] plugin.get_network.return_value = self.network if active_hosts_only: plugin.get_dhcp_agents_hosting_networks.return_value = [] else: plugin.get_dhcp_agents_hosting_networks.return_value = dead_agent network_assigned_to_dead_agent = ( self._get_agent_binding_from_db(dead_agent)) self.assertEqual(1, len(network_assigned_to_dead_agent)) self.assertTrue( scheduler.auto_schedule_networks( plugin, self.ctx, "alive_host")) network_assigned_to_dead_agent = ( self._get_agent_binding_from_db(dead_agent)) network_assigned_to_alive_agent = ( self._get_agent_binding_from_db(alive_agent)) self.assertEqual(1, len(network_assigned_to_dead_agent)) if active_hosts_only: self.assertEqual(1, len(network_assigned_to_alive_agent)) else: self.assertEqual(0, len(network_assigned_to_alive_agent)) def test_network_auto_rescheduled_when_db_returns_active_hosts(self): self._test_auto_reschedule_vs_network_on_dead_agent(True) def test_network_not_auto_rescheduled_when_db_returns_all_hosts(self): self._test_auto_reschedule_vs_network_on_dead_agent(False) class TestAutoScheduleNetworks(TestDhcpSchedulerBaseTestCase): """Unit test scenarios for ChanceScheduler.auto_schedule_networks. network_present Network is present or not enable_dhcp Dhcp is enabled or disabled in the subnet of the network scheduled_already Network is already scheduled to the agent or not agent_down Dhcp agent is down or alive valid_host If true, then an valid host is passed to schedule the network, else an invalid host is passed. az_hints 'availability_zone_hints' of the network. note that default 'availability_zone' of an agent is 'nova'. """ scenarios = [ ('Network present', dict(network_present=True, enable_dhcp=True, scheduled_already=False, agent_down=False, valid_host=True, az_hints=[])), ('No network', dict(network_present=False, enable_dhcp=False, scheduled_already=False, agent_down=False, valid_host=True, az_hints=[])), ('Network already scheduled', dict(network_present=True, enable_dhcp=True, scheduled_already=True, agent_down=False, valid_host=True, az_hints=[])), ('Agent down', dict(network_present=True, enable_dhcp=True, scheduled_already=False, agent_down=False, valid_host=True, az_hints=[])), ('dhcp disabled', dict(network_present=True, enable_dhcp=False, scheduled_already=False, agent_down=False, valid_host=False, az_hints=[])), ('Invalid host', dict(network_present=True, enable_dhcp=True, scheduled_already=False, agent_down=False, valid_host=False, az_hints=[])), ('Match AZ', dict(network_present=True, enable_dhcp=True, scheduled_already=False, agent_down=False, valid_host=True, az_hints=['nova'])), ('Not match AZ', dict(network_present=True, enable_dhcp=True, scheduled_already=False, agent_down=False, valid_host=True, az_hints=['not-match'])), ] def test_auto_schedule_network(self): plugin = mock.MagicMock() plugin.get_subnets.return_value = ( [{"network_id": self.network_id, "enable_dhcp": self.enable_dhcp}] if self.network_present else []) plugin.get_network.return_value = {'availability_zone_hints': self.az_hints} scheduler = dhcp_agent_scheduler.ChanceScheduler() if self.network_present: down_agent_count = 1 if self.agent_down else 0 agents = self._create_and_set_agents_down( ['host-a'], down_agent_count=down_agent_count) if self.scheduled_already: self._test_schedule_bind_network(agents, self.network_id) expected_result = (self.network_present and self.enable_dhcp) expected_hosted_agents = (1 if expected_result and self.valid_host else 0) if (self.az_hints and agents[0]['availability_zone'] not in self.az_hints): expected_hosted_agents = 0 host = "host-a" if self.valid_host else "host-b" observed_ret_value = scheduler.auto_schedule_networks( plugin, self.ctx, host) self.assertEqual(expected_result, observed_ret_value) hosted_agents = self.ctx.session.query( sched_db.NetworkDhcpAgentBinding).all() self.assertEqual(expected_hosted_agents, len(hosted_agents)) class TestNetworksFailover(TestDhcpSchedulerBaseTestCase, sched_db.DhcpAgentSchedulerDbMixin, common_db_mixin.CommonDbMixin): def test_reschedule_network_from_down_agent(self): agents = self._create_and_set_agents_down(['host-a', 'host-b'], 1) self._test_schedule_bind_network([agents[0]], self.network_id) self._save_networks(["foo-network-2"]) self._test_schedule_bind_network([agents[1]], "foo-network-2") with mock.patch.object(self, 'remove_network_from_dhcp_agent') as rn,\ mock.patch.object(self, 'schedule_network', return_value=[agents[1]]) as sch,\ mock.patch.object(self, 'get_network', create=True, return_value={'id': self.network_id}): notifier = mock.MagicMock() self.agent_notifiers[constants.AGENT_TYPE_DHCP] = notifier self.remove_networks_from_down_agents() rn.assert_called_with(mock.ANY, agents[0].id, self.network_id, notify=False) sch.assert_called_with(mock.ANY, {'id': self.network_id}) notifier.network_added_to_agent.assert_called_with( mock.ANY, self.network_id, agents[1].host) def _test_failed_rescheduling(self, rn_side_effect=None): agents = self._create_and_set_agents_down(['host-a', 'host-b'], 1) self._test_schedule_bind_network([agents[0]], self.network_id) with mock.patch.object(self, 'remove_network_from_dhcp_agent', side_effect=rn_side_effect) as rn,\ mock.patch.object(self, 'schedule_network', return_value=None) as sch,\ mock.patch.object(self, 'get_network', create=True, return_value={'id': self.network_id}): notifier = mock.MagicMock() self.agent_notifiers[constants.AGENT_TYPE_DHCP] = notifier self.remove_networks_from_down_agents() rn.assert_called_with(mock.ANY, agents[0].id, self.network_id, notify=False) sch.assert_called_with(mock.ANY, {'id': self.network_id}) self.assertFalse(notifier.network_added_to_agent.called) def test_reschedule_network_from_down_agent_failed(self): self._test_failed_rescheduling() def test_reschedule_network_from_down_agent_concurrent_removal(self): self._test_failed_rescheduling( rn_side_effect=dhcpagentscheduler.NetworkNotHostedByDhcpAgent( network_id='foo', agent_id='bar')) def test_filter_bindings(self): bindings = [ sched_db.NetworkDhcpAgentBinding(network_id='foo1', dhcp_agent={'id': 'id1'}), sched_db.NetworkDhcpAgentBinding(network_id='foo2', dhcp_agent={'id': 'id1'}), sched_db.NetworkDhcpAgentBinding(network_id='foo3', dhcp_agent={'id': 'id2'}), sched_db.NetworkDhcpAgentBinding(network_id='foo4', dhcp_agent={'id': 'id2'})] with mock.patch.object(self, 'agent_starting_up', side_effect=[True, False]): res = [b for b in self._filter_bindings(None, bindings)] # once per each agent id1 and id2 self.assertEqual(2, len(res)) res_ids = [b.network_id for b in res] self.assertIn('foo3', res_ids) self.assertIn('foo4', res_ids) def test_reschedule_network_from_down_agent_failed_on_unexpected(self): agents = self._create_and_set_agents_down(['host-a'], 1) self._test_schedule_bind_network([agents[0]], self.network_id) with mock.patch.object( self, '_filter_bindings', side_effect=Exception()): # just make sure that no exception is raised self.remove_networks_from_down_agents() def test_reschedule_network_catches_exceptions_on_fetching_bindings(self): with mock.patch('neutron.context.get_admin_context') as get_ctx: mock_ctx = mock.Mock() get_ctx.return_value = mock_ctx mock_ctx.session.query.side_effect = Exception() # just make sure that no exception is raised self.remove_networks_from_down_agents() def test_reschedule_doesnt_occur_if_no_agents(self): agents = self._create_and_set_agents_down(['host-a', 'host-b'], 2) self._test_schedule_bind_network([agents[0]], self.network_id) with mock.patch.object( self, 'remove_network_from_dhcp_agent') as rn: self.remove_networks_from_down_agents() self.assertFalse(rn.called) class DHCPAgentWeightSchedulerTestCase(TestDhcpSchedulerBaseTestCase): """Unit test scenarios for WeightScheduler.schedule.""" def setUp(self): super(DHCPAgentWeightSchedulerTestCase, self).setUp() DB_PLUGIN_KLASS = 'neutron.plugins.ml2.plugin.Ml2Plugin' self.setup_coreplugin(DB_PLUGIN_KLASS) cfg.CONF.set_override("network_scheduler_driver", 'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler') self.plugin = importutils.import_object('neutron.plugins.ml2.plugin.' 'Ml2Plugin') self.assertEqual(1, self.patched_dhcp_periodic.call_count) self.plugin.network_scheduler = importutils.import_object( 'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler' ) cfg.CONF.set_override('dhcp_agents_per_network', 1) cfg.CONF.set_override("dhcp_load_type", "networks") def test_scheduler_one_agents_per_network(self): self._save_networks(['1111']) helpers.register_dhcp_agent(HOST_C) self.plugin.network_scheduler.schedule(self.plugin, self.ctx, {'id': '1111'}) agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx, ['1111']) self.assertEqual(1, len(agents)) def test_scheduler_two_agents_per_network(self): cfg.CONF.set_override('dhcp_agents_per_network', 2) self._save_networks(['1111']) helpers.register_dhcp_agent(HOST_C) helpers.register_dhcp_agent(HOST_D) self.plugin.network_scheduler.schedule(self.plugin, self.ctx, {'id': '1111'}) agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx, ['1111']) self.assertEqual(2, len(agents)) def test_scheduler_no_active_agents(self): self._save_networks(['1111']) self.plugin.network_scheduler.schedule(self.plugin, self.ctx, {'id': '1111'}) agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx, ['1111']) self.assertEqual(0, len(agents)) def test_scheduler_equal_distribution(self): self._save_networks(['1111', '2222', '3333']) helpers.register_dhcp_agent(HOST_C) helpers.register_dhcp_agent(HOST_D, networks=1) self.plugin.network_scheduler.schedule( self.plugin, context.get_admin_context(), {'id': '1111'}) helpers.register_dhcp_agent(HOST_D, networks=2) self.plugin.network_scheduler.schedule( self.plugin, context.get_admin_context(), {'id': '2222'}) helpers.register_dhcp_agent(HOST_C, networks=4) self.plugin.network_scheduler.schedule( self.plugin, context.get_admin_context(), {'id': '3333'}) agent1 = self.plugin.get_dhcp_agents_hosting_networks( self.ctx, ['1111']) agent2 = self.plugin.get_dhcp_agents_hosting_networks( self.ctx, ['2222']) agent3 = self.plugin.get_dhcp_agents_hosting_networks( self.ctx, ['3333']) self.assertEqual('host-c', agent1[0]['host']) self.assertEqual('host-c', agent2[0]['host']) self.assertEqual('host-d', agent3[0]['host']) class TestDhcpSchedulerFilter(TestDhcpSchedulerBaseTestCase, sched_db.DhcpAgentSchedulerDbMixin): def _test_get_dhcp_agents_hosting_networks(self, expected, **kwargs): agents = self._create_and_set_agents_down(['host-a', 'host-b'], 1) agents += self._create_and_set_agents_down(['host-c', 'host-d'], 1, admin_state_up=False) self._test_schedule_bind_network(agents, self.network_id) agents = self.get_dhcp_agents_hosting_networks(self.ctx, [self.network_id], **kwargs) host_ids = set(a['host'] for a in agents) self.assertEqual(expected, host_ids) def test_get_dhcp_agents_hosting_networks_default(self): self._test_get_dhcp_agents_hosting_networks({'host-a', 'host-b', 'host-c', 'host-d'}) def test_get_dhcp_agents_hosting_networks_active(self): self._test_get_dhcp_agents_hosting_networks({'host-b', 'host-d'}, active=True) def test_get_dhcp_agents_hosting_networks_admin_up(self): self._test_get_dhcp_agents_hosting_networks({'host-a', 'host-b'}, admin_state_up=True) def test_get_dhcp_agents_hosting_networks_active_admin_up(self): self._test_get_dhcp_agents_hosting_networks({'host-b'}, active=True, admin_state_up=True) def test_get_dhcp_agents_hosting_networks_admin_down(self): self._test_get_dhcp_agents_hosting_networks({'host-c', 'host-d'}, admin_state_up=False) def test_get_dhcp_agents_hosting_networks_active_admin_down(self): self._test_get_dhcp_agents_hosting_networks({'host-d'}, active=True, admin_state_up=False) class DHCPAgentAZAwareWeightSchedulerTestCase(TestDhcpSchedulerBaseTestCase): def setUp(self): super(DHCPAgentAZAwareWeightSchedulerTestCase, self).setUp() DB_PLUGIN_KLASS = 'neutron.plugins.ml2.plugin.Ml2Plugin' self.setup_coreplugin(DB_PLUGIN_KLASS) cfg.CONF.set_override("network_scheduler_driver", 'neutron.scheduler.dhcp_agent_scheduler.AZAwareWeightScheduler') self.plugin = importutils.import_object('neutron.plugins.ml2.plugin.' 'Ml2Plugin') cfg.CONF.set_override('dhcp_agents_per_network', 1) cfg.CONF.set_override("dhcp_load_type", "networks") def test_az_scheduler_one_az_hints(self): self._save_networks(['1111']) helpers.register_dhcp_agent('az1-host1', networks=1, az='az1') helpers.register_dhcp_agent('az1-host2', networks=2, az='az1') helpers.register_dhcp_agent('az2-host1', networks=3, az='az2') helpers.register_dhcp_agent('az2-host2', networks=4, az='az2') self.plugin.network_scheduler.schedule(self.plugin, self.ctx, {'id': '1111', 'availability_zone_hints': ['az2']}) agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx, ['1111']) self.assertEqual(1, len(agents)) self.assertEqual('az2-host1', agents[0]['host']) def test_az_scheduler_default_az_hints(self): cfg.CONF.set_override('default_availability_zones', ['az1']) self._save_networks(['1111']) helpers.register_dhcp_agent('az1-host1', networks=1, az='az1') helpers.register_dhcp_agent('az1-host2', networks=2, az='az1') helpers.register_dhcp_agent('az2-host1', networks=3, az='az2') helpers.register_dhcp_agent('az2-host2', networks=4, az='az2') self.plugin.network_scheduler.schedule(self.plugin, self.ctx, {'id': '1111', 'availability_zone_hints': []}) agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx, ['1111']) self.assertEqual(1, len(agents)) self.assertEqual('az1-host1', agents[0]['host']) def test_az_scheduler_two_az_hints(self): cfg.CONF.set_override('dhcp_agents_per_network', 2) self._save_networks(['1111']) helpers.register_dhcp_agent('az1-host1', networks=1, az='az1') helpers.register_dhcp_agent('az1-host2', networks=2, az='az1') helpers.register_dhcp_agent('az2-host1', networks=3, az='az2') helpers.register_dhcp_agent('az2-host2', networks=4, az='az2') helpers.register_dhcp_agent('az3-host1', networks=5, az='az3') helpers.register_dhcp_agent('az3-host2', networks=6, az='az3') self.plugin.network_scheduler.schedule(self.plugin, self.ctx, {'id': '1111', 'availability_zone_hints': ['az1', 'az3']}) agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx, ['1111']) self.assertEqual(2, len(agents)) expected_hosts = set(['az1-host1', 'az3-host1']) hosts = set([a['host'] for a in agents]) self.assertEqual(expected_hosts, hosts) def test_az_scheduler_two_az_hints_one_available_az(self): cfg.CONF.set_override('dhcp_agents_per_network', 2) self._save_networks(['1111']) helpers.register_dhcp_agent('az1-host1', networks=1, az='az1') helpers.register_dhcp_agent('az1-host2', networks=2, az='az1') helpers.register_dhcp_agent('az2-host1', networks=3, alive=False, az='az2') helpers.register_dhcp_agent('az2-host2', networks=4, admin_state_up=False, az='az2') self.plugin.network_scheduler.schedule(self.plugin, self.ctx, {'id': '1111', 'availability_zone_hints': ['az1', 'az2']}) agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx, ['1111']) self.assertEqual(2, len(agents)) expected_hosts = set(['az1-host1', 'az1-host2']) hosts = set([a['host'] for a in agents]) self.assertEqual(expected_hosts, hosts) def test_az_scheduler_no_az_hints(self): cfg.CONF.set_override('dhcp_agents_per_network', 2) self._save_networks(['1111']) helpers.register_dhcp_agent('az1-host1', networks=2, az='az1') helpers.register_dhcp_agent('az1-host2', networks=3, az='az1') helpers.register_dhcp_agent('az2-host1', networks=2, az='az2') helpers.register_dhcp_agent('az2-host2', networks=1, az='az2') self.plugin.network_scheduler.schedule(self.plugin, self.ctx, {'id': '1111', 'availability_zone_hints': []}) agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx, ['1111']) self.assertEqual(2, len(agents)) expected_hosts = set(['az1-host1', 'az2-host2']) hosts = {a['host'] for a in agents} self.assertEqual(expected_hosts, hosts) neutron-8.4.0/neutron/tests/unit/hacking/0000775000567000056710000000000013044373210021572 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/hacking/test_checks.py0000664000567000056710000002600513044372760024457 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools from neutron.hacking import checks from neutron.tests import base class HackingTestCase(base.BaseTestCase): def assertLinePasses(self, func, line): with testtools.ExpectedException(StopIteration): next(func(line)) def assertLineFails(self, func, line): self.assertIsInstance(next(func(line)), tuple) def test_log_translations(self): expected_marks = { 'error': '_LE', 'info': '_LI', 'warning': '_LW', 'critical': '_LC', 'exception': '_LE', } logs = expected_marks.keys() debug = "LOG.debug('OK')" self.assertEqual( 0, len(list(checks.validate_log_translations(debug, debug, 'f')))) for log in logs: bad = 'LOG.%s(_("Bad"))' % log self.assertEqual( 1, len(list(checks.validate_log_translations(bad, bad, 'f')))) bad = 'LOG.%s("Bad")' % log self.assertEqual( 1, len(list(checks.validate_log_translations(bad, bad, 'f')))) ok = "LOG.%s('OK') # noqa" % log self.assertEqual( 0, len(list(checks.validate_log_translations(ok, ok, 'f')))) ok = "LOG.%s(variable)" % log self.assertEqual( 0, len(list(checks.validate_log_translations(ok, ok, 'f')))) for mark in checks._all_hints: stmt = "LOG.%s(%s('test'))" % (log, mark) self.assertEqual( 0 if expected_marks[log] == mark else 1, len(list(checks.validate_log_translations(stmt, stmt, 'f')))) def test_no_translate_debug_logs(self): for hint in checks._all_hints: bad = "LOG.debug(%s('bad'))" % hint self.assertEqual( 1, len(list(checks.no_translate_debug_logs(bad, 'f')))) def test_use_jsonutils(self): def __get_msg(fun): msg = ("N321: jsonutils.%(fun)s must be used instead of " "json.%(fun)s" % {'fun': fun}) return [(0, msg)] for method in ('dump', 'dumps', 'load', 'loads'): self.assertEqual( __get_msg(method), list(checks.use_jsonutils("json.%s(" % method, "./neutron/common/rpc.py"))) self.assertEqual(0, len(list(checks.use_jsonutils("jsonx.%s(" % method, "./neutron/common/rpc.py")))) self.assertEqual(0, len(list(checks.use_jsonutils("json.%sx(" % method, "./neutron/common/rpc.py")))) self.assertEqual(0, len(list(checks.use_jsonutils( "json.%s" % method, "./neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/" "etc/xapi.d/plugins/netwrap")))) def test_assert_called_once_with(self): fail_code1 = """ mock = Mock() mock.method(1, 2, 3, test='wow') mock.method.assert_called_once() """ fail_code2 = """ mock = Mock() mock.method(1, 2, 3, test='wow') mock.method.assertCalledOnceWith() """ fail_code3 = """ mock = Mock() mock.method(1, 2, 3, test='wow') mock.method.called_once_with() """ fail_code4 = """ mock = Mock() mock.method(1, 2, 3, test='wow') mock.method.assert_has_called() """ pass_code = """ mock = Mock() mock.method(1, 2, 3, test='wow') mock.method.assert_called_once_with() """ pass_code2 = """ mock = Mock() mock.method(1, 2, 3, test='wow') mock.method.assert_has_calls() """ self.assertEqual( 1, len(list(checks.check_assert_called_once_with(fail_code1, "neutron/tests/test_assert.py")))) self.assertEqual( 1, len(list(checks.check_assert_called_once_with(fail_code2, "neutron/tests/test_assert.py")))) self.assertEqual( 1, len(list(checks.check_assert_called_once_with(fail_code3, "neutron/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assert_called_once_with(pass_code, "neutron/tests/test_assert.py")))) self.assertEqual( 1, len(list(checks.check_assert_called_once_with(fail_code4, "neutron/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assert_called_once_with(pass_code2, "neutron/tests/test_assert.py")))) def test_check_python3_xrange(self): f = checks.check_python3_xrange self.assertLineFails(f, 'a = xrange(1000)') self.assertLineFails(f, 'b =xrange ( 42 )') self.assertLineFails(f, 'c = xrange(1, 10, 2)') self.assertLinePasses(f, 'd = range(1000)') self.assertLinePasses(f, 'e = six.moves.range(1337)') def test_no_basestring(self): self.assertEqual(1, len(list(checks.check_no_basestring("isinstance(x, basestring)")))) def test_check_python3_iteritems(self): f = checks.check_python3_no_iteritems self.assertLineFails(f, "d.iteritems()") self.assertLinePasses(f, "six.iteritems(d)") def test_asserttrue(self): fail_code1 = """ test_bool = True self.assertEqual(True, test_bool) """ fail_code2 = """ test_bool = True self.assertEqual(test_bool, True) """ pass_code = """ test_bool = True self.assertTrue(test_bool) """ self.assertEqual( 1, len(list(checks.check_asserttrue(fail_code1, "neutron/tests/test_assert.py")))) self.assertEqual( 1, len(list(checks.check_asserttrue(fail_code2, "neutron/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_asserttrue(pass_code, "neutron/tests/test_assert.py")))) def test_no_mutable_default_args(self): self.assertEqual(1, len(list(checks.no_mutable_default_args( " def fake_suds_context(calls={}):")))) self.assertEqual(1, len(list(checks.no_mutable_default_args( "def get_info_from_bdm(virt_type, bdm, mapping=[])")))) self.assertEqual(0, len(list(checks.no_mutable_default_args( "defined = []")))) self.assertEqual(0, len(list(checks.no_mutable_default_args( "defined, undefined = [], {}")))) def test_assertfalse(self): fail_code1 = """ test_bool = False self.assertEqual(False, test_bool) """ fail_code2 = """ test_bool = False self.assertEqual(test_bool, False) """ pass_code = """ test_bool = False self.assertFalse(test_bool) """ self.assertEqual( 1, len(list(checks.check_assertfalse(fail_code1, "neutron/tests/test_assert.py")))) self.assertEqual( 1, len(list(checks.check_assertfalse(fail_code2, "neutron/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assertfalse(pass_code, "neutron/tests/test_assert.py")))) def test_assertempty(self): fail_code = """ test_empty = %s self.assertEqual(test_empty, %s) """ pass_code1 = """ test_empty = %s self.assertEqual(%s, test_empty) """ pass_code2 = """ self.assertEqual(123, foo(abc, %s)) """ empty_cases = ['{}', '[]', '""', "''", '()', 'set()'] for ec in empty_cases: self.assertEqual( 1, len(list(checks.check_assertempty(fail_code % (ec, ec), "neutron/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assertfalse(pass_code1 % (ec, ec), "neutron/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assertfalse(pass_code2 % ec, "neutron/tests/test_assert.py")))) def test_assertisinstance(self): fail_code = """ self.assertTrue(isinstance(observed, ANY_TYPE)) """ pass_code1 = """ self.assertEqual(ANY_TYPE, type(observed)) """ pass_code2 = """ self.assertIsInstance(observed, ANY_TYPE) """ self.assertEqual( 1, len(list(checks.check_assertisinstance(fail_code, "neutron/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assertisinstance(pass_code1, "neutron/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assertisinstance(pass_code2, "neutron/tests/test_assert.py")))) def test_assertequal_for_httpcode(self): fail_code = """ self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) """ pass_code = """ self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) """ self.assertEqual( 1, len(list(checks.check_assertequal_for_httpcode(fail_code, "neutron/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assertequal_for_httpcode(pass_code, "neutron/tests/test_assert.py")))) neutron-8.4.0/neutron/tests/unit/hacking/__init__.py0000664000567000056710000000000013044372736023705 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/0000775000567000056710000000000013044373210022011 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/__init__.py0000664000567000056710000000000013044372736024124 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/metering/0000775000567000056710000000000013044373210023623 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/metering/__init__.py0000664000567000056710000000000013044372736025736 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/metering/drivers/0000775000567000056710000000000013044373210025301 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/metering/drivers/__init__.py0000664000567000056710000000000013044372736027414 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/metering/drivers/test_iptables.py0000664000567000056710000005215613044372760030537 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from oslo_config import cfg from neutron.services.metering.drivers.iptables import iptables_driver from neutron.tests import base TEST_ROUTERS = [ {'_metering_labels': [ {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'rules': [{ 'direction': 'ingress', 'excluded': False, 'id': '7f1a261f-2489-4ed1-870c-a62754501379', 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '10.0.0.0/24'}]}], 'admin_state_up': True, 'gw_port_id': '6d411f48-ecc7-45e0-9ece-3b5bdb54fcee', 'id': '473ec392-1711-44e3-b008-3251ccfc5099', 'name': 'router1', 'status': 'ACTIVE', 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}, {'_metering_labels': [ {'id': 'eeef45da-c600-4a2a-b2f4-c0fb6df73c83', 'rules': [{ 'direction': 'egress', 'excluded': False, 'id': 'fa2441e8-2489-4ed1-870c-a62754501379', 'metering_label_id': 'eeef45da-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '20.0.0.0/24'}]}], 'admin_state_up': True, 'gw_port_id': '7d411f48-ecc7-45e0-9ece-3b5bdb54fcee', 'id': '373ec392-1711-44e3-b008-3251ccfc5099', 'name': 'router2', 'status': 'ACTIVE', 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}, ] TEST_ROUTERS_WITH_ONE_RULE = [ {'_metering_labels': [ {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'rule': { 'direction': 'ingress', 'excluded': False, 'id': '7f1a261f-2489-4ed1-870c-a62754501379', 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '30.0.0.0/24'}}], 'admin_state_up': True, 'gw_port_id': '6d411f48-ecc7-45e0-9ece-3b5bdb54fcee', 'id': '473ec392-1711-44e3-b008-3251ccfc5099', 'name': 'router1', 'status': 'ACTIVE', 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}, {'_metering_labels': [ {'id': 'eeef45da-c600-4a2a-b2f4-c0fb6df73c83', 'rule': { 'direction': 'egress', 'excluded': False, 'id': 'fa2441e8-2489-4ed1-870c-a62754501379', 'metering_label_id': 'eeef45da-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '40.0.0.0/24'}}], 'admin_state_up': True, 'gw_port_id': '7d411f48-ecc7-45e0-9ece-3b5bdb54fcee', 'id': '373ec392-1711-44e3-b008-3251ccfc5099', 'name': 'router2', 'status': 'ACTIVE', 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}, ] class IptablesDriverTestCase(base.BaseTestCase): def setUp(self): super(IptablesDriverTestCase, self).setUp() self.utils_exec_p = mock.patch( 'neutron.agent.linux.utils.execute') self.utils_exec = self.utils_exec_p.start() self.iptables_cls_p = mock.patch( 'neutron.agent.linux.iptables_manager.IptablesManager') self.iptables_cls = self.iptables_cls_p.start() self.iptables_inst = mock.Mock() self.v4filter_inst = mock.Mock() self.v6filter_inst = mock.Mock() self.v4filter_inst.chains = [] self.v6filter_inst.chains = [] self.iptables_inst.ipv4 = {'filter': self.v4filter_inst} self.iptables_inst.ipv6 = {'filter': self.v6filter_inst} self.iptables_cls.return_value = self.iptables_inst cfg.CONF.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') self.metering = iptables_driver.IptablesMeteringDriver('metering', cfg.CONF) def test_create_stateless_iptables_manager(self): routers = TEST_ROUTERS[:1] self.metering.add_metering_label(None, routers) self.iptables_cls.assert_called_with( binary_name=mock.ANY, namespace=mock.ANY, state_less=True, use_ipv6=mock.ANY) def test_add_metering_label(self): routers = TEST_ROUTERS[:1] self.metering.add_metering_label(None, routers) calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False)] self.v4filter_inst.assert_has_calls(calls) def test_process_metering_label_rules(self): self.metering.add_metering_label(None, TEST_ROUTERS) calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-i qg-6d411f48-ec -s 10.0.0.0/24' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), mock.call.add_chain('neutron-meter-l-eeef45da-c60', wrap=False), mock.call.add_chain('neutron-meter-r-eeef45da-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-eeef45da-c60', wrap=False), mock.call.add_rule('neutron-meter-l-eeef45da-c60', '', wrap=False), mock.call.add_rule('neutron-meter-r-eeef45da-c60', '-o qg-7d411f48-ec -d 20.0.0.0/24' ' -j neutron-meter-l-eeef45da-c60', wrap=False, top=False)] self.v4filter_inst.assert_has_calls(calls) def test_add_metering_label_with_rules(self): routers = copy.deepcopy(TEST_ROUTERS) routers[1]['_metering_labels'][0]['rules'][0].update({ 'direction': 'ingress', 'excluded': True, }) self.metering.add_metering_label(None, routers) calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-i qg-6d411f48-ec -s 10.0.0.0/24' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), mock.call.add_chain('neutron-meter-l-eeef45da-c60', wrap=False), mock.call.add_chain('neutron-meter-r-eeef45da-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-eeef45da-c60', wrap=False), mock.call.add_rule('neutron-meter-l-eeef45da-c60', '', wrap=False), mock.call.add_rule('neutron-meter-r-eeef45da-c60', '-i qg-7d411f48-ec -s 20.0.0.0/24' ' -j RETURN', wrap=False, top=True)] self.v4filter_inst.assert_has_calls(calls) def test_update_metering_label_rules(self): routers = TEST_ROUTERS[:1] self.metering.add_metering_label(None, routers) updates = copy.deepcopy(routers) updates[0]['_metering_labels'][0]['rules'] = [{ 'direction': 'egress', 'excluded': True, 'id': '7f1a261f-2489-4ed1-870c-a62754501379', 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '10.0.0.0/24'}, {'direction': 'ingress', 'excluded': False, 'id': '6f1a261f-2489-4ed1-870c-a62754501379', 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '20.0.0.0/24'}] self.metering.update_metering_label_rules(None, updates) calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-i qg-6d411f48-ec -s 10.0.0.0/24' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), mock.call.empty_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-o qg-6d411f48-ec -d 10.0.0.0/24' ' -j RETURN', wrap=False, top=True), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-i qg-6d411f48-ec -s 20.0.0.0/24 -j ' 'neutron-meter-l-c5df2fe5-c60', wrap=False, top=False)] self.v4filter_inst.assert_has_calls(calls) def test_remove_metering_label_rule_in_update(self): routers = copy.deepcopy(TEST_ROUTERS[:1]) routers[0]['_metering_labels'][0]['rules'].append({ 'direction': 'ingress', 'excluded': False, 'id': 'aaaa261f-2489-4ed1-870c-a62754501379', 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '20.0.0.0/24', }) self.metering.add_metering_label(None, routers) del routers[0]['_metering_labels'][0]['rules'][1] self.metering.update_metering_label_rules(None, routers) calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-i qg-6d411f48-ec -s 10.0.0.0/24' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-i qg-6d411f48-ec -s 20.0.0.0/24' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), mock.call.empty_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-i qg-6d411f48-ec -s 10.0.0.0/24' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False)] self.v4filter_inst.assert_has_calls(calls) def test_add_metering_label_rule(self): new_routers_rules = TEST_ROUTERS_WITH_ONE_RULE self.metering.update_routers(None, TEST_ROUTERS) self.metering.add_metering_label_rule(None, new_routers_rules) calls = [ mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-i qg-6d411f48-ec -s 30.0.0.0/24' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), mock.call.add_rule('neutron-meter-r-eeef45da-c60', '-o qg-7d411f48-ec -d 40.0.0.0/24' ' -j neutron-meter-l-eeef45da-c60', wrap=False, top=False), ] self.v4filter_inst.assert_has_calls(calls) def test_remove_metering_label_rule(self): new_routers_rules = TEST_ROUTERS_WITH_ONE_RULE self.metering.update_routers(None, TEST_ROUTERS) self.metering.add_metering_label_rule(None, new_routers_rules) self.metering.remove_metering_label_rule(None, new_routers_rules) calls = [ mock.call.remove_rule('neutron-meter-r-c5df2fe5-c60', '-i qg-6d411f48-ec -s 30.0.0.0/24' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), mock.call.remove_rule('neutron-meter-r-eeef45da-c60', '-o qg-7d411f48-ec -d 40.0.0.0/24' ' -j neutron-meter-l-eeef45da-c60', wrap=False, top=False) ] self.v4filter_inst.assert_has_calls(calls) def test_remove_metering_label(self): routers = TEST_ROUTERS[:1] self.metering.add_metering_label(None, routers) self.metering.remove_metering_label(None, routers) calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-i qg-6d411f48-ec -s 10.0.0.0/24' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), mock.call.remove_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), mock.call.remove_chain('neutron-meter-r-c5df2fe5-c60', wrap=False)] self.v4filter_inst.assert_has_calls(calls) def test_update_routers(self): routers = copy.deepcopy(TEST_ROUTERS) routers[1]['_metering_labels'][0]['rules'][0].update({ 'direction': 'ingress', 'excluded': True, }) self.metering.add_metering_label(None, routers) updates = copy.deepcopy(routers) updates[0]['gw_port_id'] = '587b63c1-22a3-40b3-9834-486d1fb215a5' self.metering.update_routers(None, updates) calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-i qg-6d411f48-ec -s 10.0.0.0/24' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), mock.call.add_chain('neutron-meter-l-eeef45da-c60', wrap=False), mock.call.add_chain('neutron-meter-r-eeef45da-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-eeef45da-c60', wrap=False), mock.call.add_rule('neutron-meter-l-eeef45da-c60', '', wrap=False), mock.call.add_rule('neutron-meter-r-eeef45da-c60', '-i qg-7d411f48-ec -s 20.0.0.0/24' ' -j RETURN', wrap=False, top=True), mock.call.remove_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), mock.call.remove_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-i qg-587b63c1-22 -s 10.0.0.0/24' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False)] self.v4filter_inst.assert_has_calls(calls) def test_update_routers_removal(self): routers = TEST_ROUTERS self.metering.add_metering_label(None, routers) # Remove router id '373ec392-1711-44e3-b008-3251ccfc5099' updates = TEST_ROUTERS[:1] self.metering.update_routers(None, updates) calls = [mock.call.remove_chain('neutron-meter-l-eeef45da-c60', wrap=False), mock.call.remove_chain('neutron-meter-r-eeef45da-c60', wrap=False)] self.v4filter_inst.assert_has_calls(calls) def test_get_traffic_counters_with_missing_chain(self): for r in TEST_ROUTERS: rm = iptables_driver.RouterWithMetering(self.metering.conf, r) rm.metering_labels = {r['_metering_labels'][0]['id']: 'fake'} self.metering.routers[r['id']] = rm mocked_method = self.iptables_cls.return_value.get_traffic_counters mocked_method.side_effect = [{'pkts': 1, 'bytes': 8}, RuntimeError('Failed to find the chain')] counters = self.metering.get_traffic_counters(None, TEST_ROUTERS) expected_label_id = TEST_ROUTERS[0]['_metering_labels'][0]['id'] self.assertIn(expected_label_id, counters) self.assertEqual(1, counters[expected_label_id]['pkts']) self.assertEqual(8, counters[expected_label_id]['bytes']) neutron-8.4.0/neutron/tests/unit/services/metering/agents/0000775000567000056710000000000013044373210025104 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/metering/agents/__init__.py0000664000567000056710000000000013044372736027217 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/metering/agents/test_metering_agent.py0000664000567000056710000002506613044372760031527 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_utils import fixture as utils_fixture from oslo_utils import timeutils from oslo_utils import uuidutils from neutron.services.metering.agents import metering_agent from neutron.tests import base from neutron.tests import fake_notifier _uuid = uuidutils.generate_uuid TENANT_ID = _uuid() LABEL_ID = _uuid() ROUTERS = [{'status': 'ACTIVE', 'name': 'router1', 'gw_port_id': None, 'admin_state_up': True, 'tenant_id': TENANT_ID, '_metering_labels': [{'rules': [], 'id': LABEL_ID}], 'id': _uuid()}] ROUTERS_WITH_RULE = [{'status': 'ACTIVE', 'name': 'router1', 'gw_port_id': None, 'admin_state_up': True, 'tenant_id': TENANT_ID, '_metering_labels': [{'rule': {}, 'id': LABEL_ID}], 'id': _uuid()}] class TestMeteringOperations(base.BaseTestCase): def setUp(self): super(TestMeteringOperations, self).setUp() cfg.CONF.register_opts(metering_agent.MeteringAgent.Opts) self.noop_driver = ('neutron.services.metering.drivers.noop.' 'noop_driver.NoopMeteringDriver') cfg.CONF.set_override('driver', self.noop_driver) cfg.CONF.set_override('measure_interval', 0) cfg.CONF.set_override('report_interval', 0) self.setup_notification_driver() metering_rpc = ('neutron.services.metering.agents.metering_agent.' 'MeteringPluginRpc._get_sync_data_metering') self.metering_rpc_patch = mock.patch(metering_rpc, return_value=[]) self.metering_rpc_patch.start() self.driver_patch = mock.patch(self.noop_driver, spec=True) self.driver_patch.start() loopingcall_patch = mock.patch( 'oslo_service.loopingcall.FixedIntervalLoopingCall') loopingcall_patch.start() self.agent = metering_agent.MeteringAgent('my agent', cfg.CONF) self.driver = self.agent.metering_driver def test_add_metering_label(self): self.agent.add_metering_label(None, ROUTERS) self.assertEqual(1, self.driver.add_metering_label.call_count) def test_remove_metering_label(self): self.agent.remove_metering_label(None, ROUTERS) self.assertEqual(1, self.driver.remove_metering_label.call_count) def test_update_metering_label_rule(self): self.agent.update_metering_label_rules(None, ROUTERS) self.assertEqual(1, self.driver.update_metering_label_rules.call_count) def test_add_metering_label_rule(self): self.agent.add_metering_label_rule(None, ROUTERS_WITH_RULE) self.assertEqual(1, self.driver.add_metering_label_rule.call_count) def test_remove_metering_label_rule(self): self.agent.remove_metering_label_rule(None, ROUTERS_WITH_RULE) self.assertEqual(1, self.driver.remove_metering_label_rule.call_count) def test_routers_updated(self): self.agent.routers_updated(None, ROUTERS) self.assertEqual(1, self.driver.update_routers.call_count) def test_get_traffic_counters(self): self.agent._get_traffic_counters(None, ROUTERS) self.assertEqual(1, self.driver.get_traffic_counters.call_count) def test_notification_report(self): self.agent.routers_updated(None, ROUTERS) self.driver.get_traffic_counters.return_value = {LABEL_ID: {'pkts': 88, 'bytes': 444}} self.agent._metering_loop() self.assertNotEqual(len(fake_notifier.NOTIFICATIONS), 0) for n in fake_notifier.NOTIFICATIONS: if n['event_type'] == 'l3.meter': break self.assertEqual('l3.meter', n['event_type']) payload = n['payload'] self.assertEqual(TENANT_ID, payload['tenant_id']) self.assertEqual(LABEL_ID, payload['label_id']) self.assertEqual(88, payload['pkts']) self.assertEqual(444, payload['bytes']) def test_notification_report_interval(self): measure_interval = 30 report_interval = 600 now = timeutils.utcnow() time_fixture = self.useFixture(utils_fixture.TimeFixture(now)) self.addCleanup(timeutils.clear_time_override) self.agent.routers_updated(None, ROUTERS) self.driver.get_traffic_counters.return_value = {LABEL_ID: {'pkts': 889, 'bytes': 4440}} cfg.CONF.set_override('measure_interval', measure_interval) cfg.CONF.set_override('report_interval', report_interval) for i in range(report_interval): self.agent._metering_loop() count = 0 if len(fake_notifier.NOTIFICATIONS) > 1: for n in fake_notifier.NOTIFICATIONS: if n['event_type'] == 'l3.meter': #skip the first notification because the time is 0 count += 1 if count > 1: break time_fixture.advance_time_seconds(measure_interval) self.assertEqual('l3.meter', n['event_type']) payload = n['payload'] self.assertEqual(TENANT_ID, payload['tenant_id']) self.assertEqual(LABEL_ID, payload['label_id']) self.assertTrue((payload['time'] - report_interval) < measure_interval, payload) interval = (payload['last_update'] - payload['first_update']) \ - report_interval self.assertTrue(interval < measure_interval, payload) def test_router_deleted(self): label_id = _uuid() self.driver.get_traffic_counters = mock.MagicMock() self.driver.get_traffic_counters.return_value = {label_id: {'pkts': 44, 'bytes': 222}} self.agent._add_metering_info = mock.MagicMock() self.agent.routers_updated(None, ROUTERS) self.agent.router_deleted(None, ROUTERS[0]['id']) self.assertEqual(1, self.agent._add_metering_info.call_count) self.assertEqual(1, self.driver.remove_router.call_count) self.agent._add_metering_info.assert_called_with(label_id, 44, 222) @mock.patch('time.time') def _test_purge_metering_info(self, current_timestamp, is_empty, mock_time): mock_time.return_value = current_timestamp self.agent.metering_infos = {'fake': {'last_update': 1}} self.config(report_interval=1) self.agent._purge_metering_info() self.assertEqual(0 if is_empty else 1, len(self.agent.metering_infos)) self.assertEqual(1, mock_time.call_count) def test_purge_metering_info(self): # 1 < 2 - 1 -> False self._test_purge_metering_info(2, False) def test_purge_metering_info_delete(self): # 1 < 3 - 1 -> False self._test_purge_metering_info(3, True) @mock.patch('time.time') def _test_add_metering_info(self, expected_info, current_timestamp, mock_time): mock_time.return_value = current_timestamp actual_info = self.agent._add_metering_info('fake_label_id', 1, 1) self.assertEqual(1, len(self.agent.metering_infos)) self.assertEqual(expected_info, actual_info) self.assertEqual(expected_info, self.agent.metering_infos['fake_label_id']) self.assertEqual(1, mock_time.call_count) def test_add_metering_info_create(self): expected_info = {'bytes': 1, 'pkts': 1, 'time': 0, 'first_update': 1, 'last_update': 1} self._test_add_metering_info(expected_info, 1) def test_add_metering_info_update(self): expected_info = {'bytes': 1, 'pkts': 1, 'time': 0, 'first_update': 1, 'last_update': 1} self.agent.metering_infos = {'fake_label_id': expected_info} expected_info.update({'bytes': 2, 'pkts': 2, 'time': 1, 'last_update': 2}) self._test_add_metering_info(expected_info, 2) class TestMeteringDriver(base.BaseTestCase): def setUp(self): super(TestMeteringDriver, self).setUp() cfg.CONF.register_opts(metering_agent.MeteringAgent.Opts) self.noop_driver = ('neutron.services.metering.drivers.noop.' 'noop_driver.NoopMeteringDriver') cfg.CONF.set_override('driver', self.noop_driver) self.agent = metering_agent.MeteringAgent('my agent', cfg.CONF) self.driver = mock.Mock() self.agent.metering_driver = self.driver def test_add_metering_label_with_bad_driver_impl(self): del self.driver.add_metering_label with mock.patch.object(metering_agent, 'LOG') as log: self.agent.add_metering_label(None, ROUTERS) log.exception.assert_called_with(mock.ANY, {'driver': self.noop_driver, 'func': 'add_metering_label'}) def test_add_metering_label_runtime_error(self): self.driver.add_metering_label.side_effect = RuntimeError with mock.patch.object(metering_agent, 'LOG') as log: self.agent.add_metering_label(None, ROUTERS) log.exception.assert_called_with(mock.ANY, {'driver': self.noop_driver, 'func': 'add_metering_label'}) def test_init_chain(self): with mock.patch('oslo_service.' 'periodic_task.PeriodicTasks.__init__') as init: metering_agent.MeteringAgent('my agent', cfg.CONF) init.assert_called_once_with(cfg.CONF) neutron-8.4.0/neutron/tests/unit/services/metering/test_metering_plugin.py0000664000567000056710000005056413044372760030447 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import uuidutils from neutron.api.v2 import attributes as attr from neutron import context from neutron.db import agents_db from neutron.db import l3_agentschedulers_db from neutron.db.metering import metering_rpc from neutron.extensions import l3 as ext_l3 from neutron.extensions import metering as ext_metering from neutron import manager from neutron.plugins.common import constants from neutron.tests.common import helpers from neutron.tests import tools from neutron.tests.unit.db.metering import test_metering_db from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.extensions import test_l3 _uuid = uuidutils.generate_uuid METERING_SERVICE_PLUGIN_KLASS = ( "neutron.services.metering." "metering_plugin.MeteringPlugin" ) class MeteringTestExtensionManager(object): def get_resources(self): attr.RESOURCE_ATTRIBUTE_MAP.update(ext_metering.RESOURCE_ATTRIBUTE_MAP) attr.RESOURCE_ATTRIBUTE_MAP.update(ext_l3.RESOURCE_ATTRIBUTE_MAP) l3_res = ext_l3.L3.get_resources() metering_res = ext_metering.Metering.get_resources() return l3_res + metering_res def get_actions(self): return [] def get_request_extensions(self): return [] class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase, test_l3.L3NatTestCaseMixin, test_metering_db.MeteringPluginDbTestCaseMixin): resource_prefix_map = dict( (k.replace('_', '-'), "/metering") for k in ext_metering.RESOURCE_ATTRIBUTE_MAP.keys() ) def setUp(self): plugin = 'neutron.tests.unit.extensions.test_l3.TestL3NatIntPlugin' service_plugins = {'metering_plugin_name': METERING_SERVICE_PLUGIN_KLASS} ext_mgr = MeteringTestExtensionManager() super(TestMeteringPlugin, self).setUp(plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.uuid = '654f6b9d-0f36-4ae5-bd1b-01616794ca60' uuid = 'oslo_utils.uuidutils.generate_uuid' self.uuid_patch = mock.patch(uuid, return_value=self.uuid) self.mock_uuid = self.uuid_patch.start() self.tenant_id = 'a7e61382-47b8-4d40-bae3-f95981b5637b' self.ctx = context.Context('', self.tenant_id, is_admin=True) self.context_patch = mock.patch('neutron.context.Context', return_value=self.ctx) self.mock_context = self.context_patch.start() self.topic = 'metering_agent' add = ('neutron.api.rpc.agentnotifiers.' + 'metering_rpc_agent_api.MeteringAgentNotifyAPI' + '.add_metering_label') self.add_patch = mock.patch(add) self.mock_add = self.add_patch.start() remove = ('neutron.api.rpc.agentnotifiers.' + 'metering_rpc_agent_api.MeteringAgentNotifyAPI' + '.remove_metering_label') self.remove_patch = mock.patch(remove) self.mock_remove = self.remove_patch.start() update = ('neutron.api.rpc.agentnotifiers.' + 'metering_rpc_agent_api.MeteringAgentNotifyAPI' + '.update_metering_label_rules') self.update_patch = mock.patch(update) self.mock_update = self.update_patch.start() add_rule = ('neutron.api.rpc.agentnotifiers.' + 'metering_rpc_agent_api.MeteringAgentNotifyAPI' + '.add_metering_label_rule') self.add_rule_patch = mock.patch(add_rule) self.mock_add_rule = self.add_rule_patch.start() remove_rule = ('neutron.api.rpc.agentnotifiers.' + 'metering_rpc_agent_api.MeteringAgentNotifyAPI' + '.remove_metering_label_rule') self.remove_rule_patch = mock.patch(remove_rule) self.mock_remove_rule = self.remove_rule_patch.start() def test_add_metering_label_rpc_call(self): second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84' expected = [{'status': 'ACTIVE', 'name': 'router1', 'gw_port_id': None, 'admin_state_up': True, 'tenant_id': self.tenant_id, '_metering_labels': [ {'rules': [], 'id': self.uuid}], 'id': self.uuid}] tenant_id_2 = '8a268a58-1610-4890-87e0-07abb8231206' self.mock_uuid.return_value = second_uuid with self.router(name='router2', tenant_id=tenant_id_2, set_context=True): self.mock_uuid.return_value = self.uuid with self.router(name='router1', tenant_id=self.tenant_id, set_context=True): with self.metering_label(tenant_id=self.tenant_id, set_context=True): self.mock_add.assert_called_with(self.ctx, expected) def test_add_metering_label_shared_rpc_call(self): second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84' expected = [{'status': 'ACTIVE', 'name': 'router1', 'gw_port_id': None, 'admin_state_up': True, 'tenant_id': self.tenant_id, '_metering_labels': [ {'rules': [], 'id': self.uuid}, {'rules': [], 'id': second_uuid}], 'id': self.uuid}] tenant_id_2 = '8a268a58-1610-4890-87e0-07abb8231206' with self.router(name='router1', tenant_id=self.tenant_id, set_context=True): with self.metering_label(tenant_id=self.tenant_id, set_context=True): self.mock_uuid.return_value = second_uuid with self.metering_label(tenant_id=tenant_id_2, shared=True, set_context=True): self.mock_add.assert_called_with(self.ctx, expected) def test_remove_metering_label_rpc_call(self): expected = [{'status': 'ACTIVE', 'name': 'router1', 'gw_port_id': None, 'admin_state_up': True, 'tenant_id': self.tenant_id, '_metering_labels': [ {'rules': [], 'id': self.uuid}], 'id': self.uuid}] with self.router(tenant_id=self.tenant_id, set_context=True): with self.metering_label(tenant_id=self.tenant_id, set_context=True) as label: self.mock_add.assert_called_with(self.ctx, expected) self._delete('metering-labels', label['metering_label']['id']) self.mock_remove.assert_called_with(self.ctx, expected) def test_remove_one_metering_label_rpc_call(self): second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84' expected_add = [{'status': 'ACTIVE', 'name': 'router1', 'gw_port_id': None, 'admin_state_up': True, 'tenant_id': self.tenant_id, '_metering_labels': [ {'rules': [], 'id': self.uuid}, {'rules': [], 'id': second_uuid}], 'id': self.uuid}] expected_remove = [{'status': 'ACTIVE', 'name': 'router1', 'gw_port_id': None, 'admin_state_up': True, 'tenant_id': self.tenant_id, '_metering_labels': [ {'rules': [], 'id': second_uuid}], 'id': self.uuid}] with self.router(tenant_id=self.tenant_id, set_context=True): with self.metering_label(tenant_id=self.tenant_id, set_context=True): self.mock_uuid.return_value = second_uuid with self.metering_label(tenant_id=self.tenant_id, set_context=True) as label: self.mock_add.assert_called_with(self.ctx, expected_add) self._delete('metering-labels', label['metering_label']['id']) self.mock_remove.assert_called_with(self.ctx, expected_remove) def test_add_and_remove_metering_label_rule_rpc_call(self): second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84' expected_add = [{'status': 'ACTIVE', 'name': 'router1', 'gw_port_id': None, 'admin_state_up': True, 'tenant_id': self.tenant_id, '_metering_labels': [ {'rule': { 'remote_ip_prefix': '10.0.0.0/24', 'direction': 'ingress', 'metering_label_id': self.uuid, 'excluded': False, 'id': second_uuid}, 'id': self.uuid}], 'id': self.uuid}] expected_del = [{'status': 'ACTIVE', 'name': 'router1', 'gw_port_id': None, 'admin_state_up': True, 'tenant_id': self.tenant_id, '_metering_labels': [ {'rule': { 'remote_ip_prefix': '10.0.0.0/24', 'direction': 'ingress', 'metering_label_id': self.uuid, 'excluded': False, 'id': second_uuid}, 'id': self.uuid}], 'id': self.uuid}] with self.router(tenant_id=self.tenant_id, set_context=True): with self.metering_label(tenant_id=self.tenant_id, set_context=True) as label: l = label['metering_label'] self.mock_uuid.return_value = second_uuid with self.metering_label_rule(l['id']): self.mock_add_rule.assert_called_with(self.ctx, expected_add) self._delete('metering-label-rules', second_uuid) self.mock_remove_rule.assert_called_with(self.ctx, expected_del) def test_delete_metering_label_does_not_clear_router_tenant_id(self): tenant_id = '654f6b9d-0f36-4ae5-bd1b-01616794ca60' with self.metering_label(tenant_id=tenant_id) as metering_label: with self.router(tenant_id=tenant_id, set_context=True) as r: router = self._show('routers', r['router']['id']) self.assertEqual(tenant_id, router['router']['tenant_id']) metering_label_id = metering_label['metering_label']['id'] self._delete('metering-labels', metering_label_id, 204) router = self._show('routers', r['router']['id']) self.assertEqual(tenant_id, router['router']['tenant_id']) class TestMeteringPluginL3AgentScheduler( l3_agentschedulers_db.L3AgentSchedulerDbMixin, test_db_base_plugin_v2.NeutronDbPluginV2TestCase, test_l3.L3NatTestCaseMixin, test_metering_db.MeteringPluginDbTestCaseMixin): resource_prefix_map = dict( (k.replace('_', '-'), "/metering") for k in ext_metering.RESOURCE_ATTRIBUTE_MAP.keys() ) def setUp(self, plugin_str=None, service_plugins=None, scheduler=None): if not plugin_str: plugin_str = ('neutron.tests.unit.extensions.test_l3.' 'TestL3NatIntAgentSchedulingPlugin') if not service_plugins: service_plugins = {'metering_plugin_name': METERING_SERVICE_PLUGIN_KLASS} if not scheduler: scheduler = plugin_str ext_mgr = MeteringTestExtensionManager() super(TestMeteringPluginL3AgentScheduler, self).setUp(plugin=plugin_str, ext_mgr=ext_mgr, service_plugins=service_plugins) self.uuid = '654f6b9d-0f36-4ae5-bd1b-01616794ca60' uuid = 'oslo_utils.uuidutils.generate_uuid' self.uuid_patch = mock.patch(uuid, return_value=self.uuid) self.mock_uuid = self.uuid_patch.start() self.tenant_id = 'a7e61382-47b8-4d40-bae3-f95981b5637b' self.ctx = context.Context('', self.tenant_id, is_admin=True) self.context_patch = mock.patch('neutron.context.Context', return_value=self.ctx) self.mock_context = self.context_patch.start() self.l3routers_patch = mock.patch(scheduler + '.get_l3_agents_hosting_routers') self.l3routers_mock = self.l3routers_patch.start() self.topic = 'metering_agent' add = ('neutron.api.rpc.agentnotifiers.' + 'metering_rpc_agent_api.MeteringAgentNotifyAPI' + '.add_metering_label') self.add_patch = mock.patch(add) self.mock_add = self.add_patch.start() remove = ('neutron.api.rpc.agentnotifiers.' + 'metering_rpc_agent_api.MeteringAgentNotifyAPI' + '.remove_metering_label') self.remove_patch = mock.patch(remove) self.mock_remove = self.remove_patch.start() def test_add_metering_label_rpc_call(self): second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84' expected = [{'status': 'ACTIVE', 'name': 'router1', 'gw_port_id': None, 'admin_state_up': True, 'tenant_id': self.tenant_id, '_metering_labels': [ {'rules': [], 'id': second_uuid}], 'id': self.uuid}, {'status': 'ACTIVE', 'name': 'router2', 'gw_port_id': None, 'admin_state_up': True, 'tenant_id': self.tenant_id, '_metering_labels': [ {'rules': [], 'id': second_uuid}], 'id': second_uuid}] # bind each router to a specific agent agent1 = agents_db.Agent(host='agent1') agent2 = agents_db.Agent(host='agent2') agents = {self.uuid: agent1, second_uuid: agent2} def side_effect(context, routers, admin_state_up, active): return [agents[routers[0]]] self.l3routers_mock.side_effect = side_effect with self.router(name='router1', tenant_id=self.tenant_id, set_context=True): self.mock_uuid.return_value = second_uuid with self.router(name='router2', tenant_id=self.tenant_id, set_context=True): with self.metering_label(tenant_id=self.tenant_id, set_context=True): self.mock_add.assert_called_with( self.ctx, tools.UnorderedList(expected)) class TestMeteringPluginL3AgentSchedulerServicePlugin( TestMeteringPluginL3AgentScheduler): """Unit tests for the case where separate service plugin implements L3 routing. """ def setUp(self): l3_plugin = ('neutron.tests.unit.extensions.test_l3.' 'TestL3NatAgentSchedulingServicePlugin') service_plugins = {'metering_plugin_name': METERING_SERVICE_PLUGIN_KLASS, 'l3_plugin_name': l3_plugin} plugin_str = ('neutron.tests.unit.extensions.test_l3.' 'TestNoL3NatPlugin') super(TestMeteringPluginL3AgentSchedulerServicePlugin, self).setUp( plugin_str=plugin_str, service_plugins=service_plugins, scheduler=l3_plugin) class TestMeteringPluginRpcFromL3Agent( test_db_base_plugin_v2.NeutronDbPluginV2TestCase, test_l3.L3NatTestCaseMixin, test_metering_db.MeteringPluginDbTestCaseMixin): resource_prefix_map = dict( (k.replace('_', '-'), "/metering") for k in ext_metering.RESOURCE_ATTRIBUTE_MAP ) def setUp(self): service_plugins = {'metering_plugin_name': METERING_SERVICE_PLUGIN_KLASS} plugin = ('neutron.tests.unit.extensions.test_l3.' 'TestL3NatIntAgentSchedulingPlugin') ext_mgr = MeteringTestExtensionManager() super(TestMeteringPluginRpcFromL3Agent, self).setUp(plugin=plugin, service_plugins=service_plugins, ext_mgr=ext_mgr) self.meter_plugin = manager.NeutronManager.get_service_plugins().get( constants.METERING) self.tenant_id = 'admin_tenant_id' self.tenant_id_1 = 'tenant_id_1' self.tenant_id_2 = 'tenant_id_2' self.adminContext = context.get_admin_context() helpers.register_l3_agent(host='agent1') def test_get_sync_data_metering(self): with self.subnet() as subnet: s = subnet['subnet'] self._set_net_external(s['network_id']) with self.router(name='router1', subnet=subnet) as router: r = router['router'] self._add_external_gateway_to_router(r['id'], s['network_id']) with self.metering_label(tenant_id=r['tenant_id']): callbacks = metering_rpc.MeteringRpcCallbacks( self.meter_plugin) data = callbacks.get_sync_data_metering(self.adminContext, host='agent1') self.assertEqual('router1', data[0]['name']) helpers.register_l3_agent(host='agent2') data = callbacks.get_sync_data_metering(self.adminContext, host='agent2') self.assertFalse(data) self._remove_external_gateway_from_router( r['id'], s['network_id']) def test_get_sync_data_metering_shared(self): with self.router(name='router1', tenant_id=self.tenant_id_1): with self.router(name='router2', tenant_id=self.tenant_id_2): with self.metering_label(tenant_id=self.tenant_id, shared=True): callbacks = metering_rpc.MeteringRpcCallbacks( self.meter_plugin) data = callbacks.get_sync_data_metering(self.adminContext) routers = [router['name'] for router in data] self.assertIn('router1', routers) self.assertIn('router2', routers) def test_get_sync_data_metering_not_shared(self): with self.router(name='router1', tenant_id=self.tenant_id_1): with self.router(name='router2', tenant_id=self.tenant_id_2): with self.metering_label(tenant_id=self.tenant_id): callbacks = metering_rpc.MeteringRpcCallbacks( self.meter_plugin) data = callbacks.get_sync_data_metering(self.adminContext) routers = [router['name'] for router in data] self.assertEqual([], routers) neutron-8.4.0/neutron/tests/unit/services/bgp/0000775000567000056710000000000013044373210022561 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/bgp/scheduler/0000775000567000056710000000000013044373210024537 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/bgp/scheduler/__init__.py0000664000567000056710000000000013044372760026647 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/bgp/scheduler/test_bgp_dragent_scheduler.py0000664000567000056710000002160313044372760032475 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testscenarios from oslo_utils import importutils from neutron import context from neutron.db import bgp_db from neutron.db import bgp_dragentscheduler_db as bgp_dras_db from neutron.services.bgp.scheduler import bgp_dragent_scheduler as bgp_dras from neutron.tests.common import helpers from neutron.tests.unit import testlib_api # Required to generate tests from scenarios. Not compatible with nose. load_tests = testscenarios.load_tests_apply_scenarios class TestBgpDrAgentSchedulerBaseTestCase(testlib_api.SqlTestCase): def setUp(self): super(TestBgpDrAgentSchedulerBaseTestCase, self).setUp() self.ctx = context.get_admin_context() self.bgp_speaker = {'id': 'foo_bgp_speaker_id'} self.bgp_speaker_id = 'foo_bgp_speaker_id' self._save_bgp_speaker(self.bgp_speaker_id) def _create_and_set_agents_down(self, hosts, down_agent_count=0, admin_state_up=True): agents = [] for i, host in enumerate(hosts): is_alive = i >= down_agent_count agents.append(helpers.register_bgp_dragent( host, admin_state_up=admin_state_up, alive=is_alive)) return agents def _save_bgp_speaker(self, bgp_speaker_id): cls = bgp_db.BgpDbMixin() bgp_speaker_body = {'bgp_speaker': { 'name': 'fake_bgp_speaker', 'ip_version': '4', 'local_as': '123', 'advertise_floating_ip_host_routes': '0', 'advertise_tenant_networks': '0', 'peers': [], 'networks': []}} cls._save_bgp_speaker(self.ctx, bgp_speaker_body, uuid=bgp_speaker_id) def _test_schedule_bind_bgp_speaker(self, agents, bgp_speaker_id): scheduler = bgp_dras.ChanceScheduler() scheduler.resource_filter.bind(self.ctx, agents, bgp_speaker_id) results = self.ctx.session.query( bgp_dras_db.BgpSpeakerDrAgentBinding).filter_by( bgp_speaker_id=bgp_speaker_id).all() for result in results: self.assertEqual(bgp_speaker_id, result.bgp_speaker_id) class TestBgpDrAgentScheduler(TestBgpDrAgentSchedulerBaseTestCase, bgp_db.BgpDbMixin): def test_schedule_bind_bgp_speaker_single_agent(self): agents = self._create_and_set_agents_down(['host-a']) self._test_schedule_bind_bgp_speaker(agents, self.bgp_speaker_id) def test_schedule_bind_bgp_speaker_multi_agents(self): agents = self._create_and_set_agents_down(['host-a', 'host-b']) self._test_schedule_bind_bgp_speaker(agents, self.bgp_speaker_id) class TestBgpAgentFilter(TestBgpDrAgentSchedulerBaseTestCase, bgp_db.BgpDbMixin, bgp_dras_db.BgpDrAgentSchedulerDbMixin): def setUp(self): super(TestBgpAgentFilter, self).setUp() self.bgp_drscheduler = importutils.import_object( 'neutron.services.bgp.scheduler.' 'bgp_dragent_scheduler.ChanceScheduler' ) self.plugin = self def _test_filter_agents_helper(self, bgp_speaker, expected_filtered_dragent_ids=None, expected_num_agents=1): filtered_agents = ( self.plugin.bgp_drscheduler.resource_filter.filter_agents( self.plugin, self.ctx, bgp_speaker)) self.assertEqual(expected_num_agents, filtered_agents['n_agents']) actual_filtered_dragent_ids = [ agent.id for agent in filtered_agents['hostable_agents']] if expected_filtered_dragent_ids is None: expected_filtered_dragent_ids = [] self.assertEqual(len(expected_filtered_dragent_ids), len(actual_filtered_dragent_ids)) for filtered_agent_id in actual_filtered_dragent_ids: self.assertIn(filtered_agent_id, expected_filtered_dragent_ids) def test_filter_agents_single_agent(self): agents = self._create_and_set_agents_down(['host-a']) expected_filtered_dragent_ids = [agents[0].id] self._test_filter_agents_helper( self.bgp_speaker, expected_filtered_dragent_ids=expected_filtered_dragent_ids) def test_filter_agents_no_agents(self): expected_filtered_dragent_ids = [] self._test_filter_agents_helper( self.bgp_speaker, expected_filtered_dragent_ids=expected_filtered_dragent_ids, expected_num_agents=0) def test_filter_agents_two_agents(self): agents = self._create_and_set_agents_down(['host-a', 'host-b']) expected_filtered_dragent_ids = [agent.id for agent in agents] self._test_filter_agents_helper( self.bgp_speaker, expected_filtered_dragent_ids=expected_filtered_dragent_ids) def test_filter_agents_agent_already_scheduled(self): agents = self._create_and_set_agents_down(['host-a', 'host-b']) self._test_schedule_bind_bgp_speaker([agents[0]], self.bgp_speaker_id) self._test_filter_agents_helper(self.bgp_speaker, expected_num_agents=0) def test_filter_agents_multiple_agents_bgp_speakers(self): agents = self._create_and_set_agents_down(['host-a', 'host-b']) self._test_schedule_bind_bgp_speaker([agents[0]], self.bgp_speaker_id) bgp_speaker = {'id': 'bar-speaker-id'} self._save_bgp_speaker(bgp_speaker['id']) expected_filtered_dragent_ids = [agents[1].id] self._test_filter_agents_helper( bgp_speaker, expected_filtered_dragent_ids=expected_filtered_dragent_ids) class TestAutoScheduleBgpSpeakers(TestBgpDrAgentSchedulerBaseTestCase): """Unit test scenarios for schedule_unscheduled_bgp_speakers. bgp_speaker_present BGP speaker is present or not scheduled_already BGP speaker is already scheduled to the agent or not agent_down BGP DRAgent is down or alive valid_host If true, then an valid host is passed to schedule BGP speaker, else an invalid host is passed. """ scenarios = [ ('BGP speaker present', dict(bgp_speaker_present=True, scheduled_already=False, agent_down=False, valid_host=True, expected_result=True)), ('No BGP speaker', dict(bgp_speaker_present=False, scheduled_already=False, agent_down=False, valid_host=True, expected_result=False)), ('BGP speaker already scheduled', dict(bgp_speaker_present=True, scheduled_already=True, agent_down=False, valid_host=True, expected_result=False)), ('BGP DR agent down', dict(bgp_speaker_present=True, scheduled_already=False, agent_down=True, valid_host=False, expected_result=False)), ('Invalid host', dict(bgp_speaker_present=True, scheduled_already=False, agent_down=False, valid_host=False, expected_result=False)), ] def test_auto_schedule_bgp_speaker(self): scheduler = bgp_dras.ChanceScheduler() if self.bgp_speaker_present: down_agent_count = 1 if self.agent_down else 0 agents = self._create_and_set_agents_down( ['host-a'], down_agent_count=down_agent_count) if self.scheduled_already: self._test_schedule_bind_bgp_speaker(agents, self.bgp_speaker_id) expected_hosted_agents = (1 if self.bgp_speaker_present and self.valid_host else 0) host = "host-a" if self.valid_host else "host-b" observed_ret_value = scheduler.schedule_unscheduled_bgp_speakers( self.ctx, host) self.assertEqual(self.expected_result, observed_ret_value) hosted_agents = self.ctx.session.query( bgp_dras_db.BgpSpeakerDrAgentBinding).all() self.assertEqual(expected_hosted_agents, len(hosted_agents)) neutron-8.4.0/neutron/tests/unit/services/bgp/__init__.py0000664000567000056710000000000013044372760024671 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/bgp/driver/0000775000567000056710000000000013044373210024054 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/bgp/driver/__init__.py0000664000567000056710000000000013044372760026164 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/bgp/driver/test_utils.py0000664000567000056710000000345613044372760026646 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron.services.bgp.driver import utils from neutron.tests import base FAKE_LOCAL_AS = 12345 FAKE_RYU_SPEAKER = {} class TestBgpMultiSpeakerCache(base.BaseTestCase): def setUp(self): super(TestBgpMultiSpeakerCache, self).setUp() self.expected_cache = {FAKE_LOCAL_AS: FAKE_RYU_SPEAKER} self.bs_cache = utils.BgpMultiSpeakerCache() def test_put_bgp_speaker(self): self.bs_cache.put_bgp_speaker(FAKE_LOCAL_AS, FAKE_RYU_SPEAKER) self.assertEqual(self.expected_cache, self.bs_cache.cache) def test_remove_bgp_speaker(self): self.bs_cache.put_bgp_speaker(FAKE_LOCAL_AS, FAKE_RYU_SPEAKER) self.assertEqual(1, len(self.bs_cache.cache)) self.bs_cache.remove_bgp_speaker(FAKE_LOCAL_AS) self.assertEqual(0, len(self.bs_cache.cache)) def test_get_bgp_speaker(self): self.bs_cache.put_bgp_speaker(FAKE_LOCAL_AS, FAKE_RYU_SPEAKER) self.assertEqual( FAKE_RYU_SPEAKER, self.bs_cache.get_bgp_speaker(FAKE_LOCAL_AS)) def test_get_hosted_bgp_speakers_count(self): self.bs_cache.put_bgp_speaker(FAKE_LOCAL_AS, FAKE_RYU_SPEAKER) self.assertEqual(1, self.bs_cache.get_hosted_bgp_speakers_count()) neutron-8.4.0/neutron/tests/unit/services/bgp/driver/ryu/0000775000567000056710000000000013044373210024673 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/bgp/driver/ryu/__init__.py0000664000567000056710000000000013044372760027003 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/bgp/driver/ryu/test_driver.py0000664000567000056710000003166013044372760027616 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import six import mock from oslo_config import cfg from oslo_utils import encodeutils from ryu.services.protocols.bgp import bgpspeaker from ryu.services.protocols.bgp.rtconf.neighbors import CONNECT_MODE_ACTIVE from neutron.services.bgp.agent import config as bgp_config from neutron.services.bgp.driver import exceptions as bgp_driver_exc from neutron.services.bgp.driver.ryu import driver as ryu_driver from neutron.tests import base # Test variables for BGP Speaker FAKE_LOCAL_AS1 = 12345 FAKE_LOCAL_AS2 = 23456 FAKE_ROUTER_ID = '1.1.1.1' # Test variables for BGP Peer FAKE_PEER_AS = 45678 FAKE_PEER_IP = '2.2.2.5' FAKE_AUTH_TYPE = 'md5' FAKE_PEER_PASSWORD = 'awesome' # Test variables for Route FAKE_ROUTE = '2.2.2.0/24' FAKE_NEXTHOP = '5.5.5.5' class TestRyuBgpDriver(base.BaseTestCase): def setUp(self): super(TestRyuBgpDriver, self).setUp() cfg.CONF.register_opts(bgp_config.BGP_PROTO_CONFIG_OPTS, 'BGP') cfg.CONF.set_override('bgp_router_id', FAKE_ROUTER_ID, 'BGP') self.ryu_bgp_driver = ryu_driver.RyuBgpDriver(cfg.CONF.BGP) mock_ryu_speaker_p = mock.patch.object(bgpspeaker, 'BGPSpeaker') self.mock_ryu_speaker = mock_ryu_speaker_p.start() def test_add_new_bgp_speaker(self): self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1) self.assertEqual(1, self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count()) self.mock_ryu_speaker.assert_called_once_with( as_number=FAKE_LOCAL_AS1, router_id=FAKE_ROUTER_ID, bgp_server_port=0, best_path_change_handler=ryu_driver.best_path_change_cb, peer_down_handler=ryu_driver.bgp_peer_down_cb, peer_up_handler=ryu_driver.bgp_peer_up_cb) def test_remove_bgp_speaker(self): self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1) self.assertEqual(1, self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count()) speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1) self.ryu_bgp_driver.delete_bgp_speaker(FAKE_LOCAL_AS1) self.assertEqual(0, self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count()) self.assertEqual(1, speaker.shutdown.call_count) def test_add_bgp_peer_without_password(self): self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1) self.assertEqual(1, self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count()) self.ryu_bgp_driver.add_bgp_peer(FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS) speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1) speaker.neighbor_add.assert_called_once_with( address=FAKE_PEER_IP, remote_as=FAKE_PEER_AS, password=None, connect_mode=CONNECT_MODE_ACTIVE) def test_add_bgp_peer_with_password(self): self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1) self.assertEqual(1, self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count()) self.ryu_bgp_driver.add_bgp_peer(FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS, FAKE_AUTH_TYPE, FAKE_PEER_PASSWORD) speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1) speaker.neighbor_add.assert_called_once_with( address=FAKE_PEER_IP, remote_as=FAKE_PEER_AS, password=encodeutils.to_utf8(FAKE_PEER_PASSWORD), connect_mode=CONNECT_MODE_ACTIVE) def test_add_bgp_peer_with_unicode_password(self): self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1) self.assertEqual(1, self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count()) # In Python3 a str is unicode if six.PY3: NEW_FAKE_PEER_PASSWORD = str(FAKE_PEER_PASSWORD) else: NEW_FAKE_PEER_PASSWORD = unicode(FAKE_PEER_PASSWORD) self.ryu_bgp_driver.add_bgp_peer( FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS, FAKE_AUTH_TYPE, NEW_FAKE_PEER_PASSWORD) speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1) speaker.neighbor_add.assert_called_once_with( address=FAKE_PEER_IP, remote_as=FAKE_PEER_AS, password=encodeutils.to_utf8(NEW_FAKE_PEER_PASSWORD), connect_mode=CONNECT_MODE_ACTIVE) def test_remove_bgp_peer(self): self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1) self.assertEqual(1, self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count()) self.ryu_bgp_driver.delete_bgp_peer(FAKE_LOCAL_AS1, FAKE_PEER_IP) speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1) speaker.neighbor_del.assert_called_once_with(address=FAKE_PEER_IP) def test_advertise_route(self): self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1) self.assertEqual(1, self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count()) self.ryu_bgp_driver.advertise_route(FAKE_LOCAL_AS1, FAKE_ROUTE, FAKE_NEXTHOP) speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1) speaker.prefix_add.assert_called_once_with(prefix=FAKE_ROUTE, next_hop=FAKE_NEXTHOP) def test_withdraw_route(self): self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1) self.assertEqual(1, self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count()) self.ryu_bgp_driver.withdraw_route(FAKE_LOCAL_AS1, FAKE_ROUTE) speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1) speaker.prefix_del.assert_called_once_with(prefix=FAKE_ROUTE) def test_add_same_bgp_speakers_twice(self): self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1) self.assertRaises(bgp_driver_exc.BgpSpeakerAlreadyScheduled, self.ryu_bgp_driver.add_bgp_speaker, FAKE_LOCAL_AS1) def test_add_different_bgp_speakers_when_one_already_added(self): self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1) self.assertRaises(bgp_driver_exc.BgpSpeakerMaxScheduled, self.ryu_bgp_driver.add_bgp_speaker, FAKE_LOCAL_AS2) def test_add_bgp_speaker_with_invalid_asnum_paramtype(self): self.assertRaises(bgp_driver_exc.InvalidParamType, self.ryu_bgp_driver.add_bgp_speaker, '12345') def test_add_bgp_speaker_with_invalid_asnum_range(self): self.assertRaises(bgp_driver_exc.InvalidParamRange, self.ryu_bgp_driver.add_bgp_speaker, -1) self.assertRaises(bgp_driver_exc.InvalidParamRange, self.ryu_bgp_driver.add_bgp_speaker, 65536) def test_add_bgp_peer_with_invalid_paramtype(self): # Test with an invalid asnum data-type self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1) self.assertRaises(bgp_driver_exc.InvalidParamType, self.ryu_bgp_driver.add_bgp_peer, FAKE_LOCAL_AS1, FAKE_PEER_IP, '12345') # Test with an invalid auth-type and an invalid password self.assertRaises(bgp_driver_exc.InvalidParamType, self.ryu_bgp_driver.add_bgp_peer, FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS, 'sha-1', 1234) # Test with an invalid auth-type and a valid password self.assertRaises(bgp_driver_exc.InvaildAuthType, self.ryu_bgp_driver.add_bgp_peer, FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS, 'hmac-md5', FAKE_PEER_PASSWORD) # Test with none auth-type and a valid password self.assertRaises(bgp_driver_exc.InvaildAuthType, self.ryu_bgp_driver.add_bgp_peer, FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS, 'none', FAKE_PEER_PASSWORD) # Test with none auth-type and an invalid password self.assertRaises(bgp_driver_exc.InvalidParamType, self.ryu_bgp_driver.add_bgp_peer, FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS, 'none', 1234) # Test with a valid auth-type and no password self.assertRaises(bgp_driver_exc.PasswordNotSpecified, self.ryu_bgp_driver.add_bgp_peer, FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS, FAKE_AUTH_TYPE, None) def test_add_bgp_peer_with_invalid_asnum_range(self): self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1) self.assertRaises(bgp_driver_exc.InvalidParamRange, self.ryu_bgp_driver.add_bgp_peer, FAKE_LOCAL_AS1, FAKE_PEER_IP, -1) self.assertRaises(bgp_driver_exc.InvalidParamRange, self.ryu_bgp_driver.add_bgp_peer, FAKE_LOCAL_AS1, FAKE_PEER_IP, 65536) def test_add_bgp_peer_without_adding_speaker(self): self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded, self.ryu_bgp_driver.add_bgp_peer, FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS) def test_remove_bgp_peer_with_invalid_paramtype(self): self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1) self.assertRaises(bgp_driver_exc.InvalidParamType, self.ryu_bgp_driver.delete_bgp_peer, FAKE_LOCAL_AS1, 12345) def test_remove_bgp_peer_without_adding_speaker(self): self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded, self.ryu_bgp_driver.delete_bgp_peer, FAKE_LOCAL_AS1, FAKE_PEER_IP) def test_advertise_route_with_invalid_paramtype(self): self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1) self.assertRaises(bgp_driver_exc.InvalidParamType, self.ryu_bgp_driver.advertise_route, FAKE_LOCAL_AS1, 12345, FAKE_NEXTHOP) self.assertRaises(bgp_driver_exc.InvalidParamType, self.ryu_bgp_driver.advertise_route, FAKE_LOCAL_AS1, FAKE_ROUTE, 12345) def test_advertise_route_without_adding_speaker(self): self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded, self.ryu_bgp_driver.advertise_route, FAKE_LOCAL_AS1, FAKE_ROUTE, FAKE_NEXTHOP) def test_withdraw_route_with_invalid_paramtype(self): self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1) self.assertRaises(bgp_driver_exc.InvalidParamType, self.ryu_bgp_driver.withdraw_route, FAKE_LOCAL_AS1, 12345) self.assertRaises(bgp_driver_exc.InvalidParamType, self.ryu_bgp_driver.withdraw_route, FAKE_LOCAL_AS1, 12345) def test_withdraw_route_without_adding_speaker(self): self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded, self.ryu_bgp_driver.withdraw_route, FAKE_LOCAL_AS1, FAKE_ROUTE) def test_add_multiple_bgp_speakers(self): self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1) self.assertEqual(1, self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count()) self.assertRaises(bgp_driver_exc.BgpSpeakerMaxScheduled, self.ryu_bgp_driver.add_bgp_speaker, FAKE_LOCAL_AS2) self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded, self.ryu_bgp_driver.delete_bgp_speaker, FAKE_LOCAL_AS2) self.assertEqual(1, self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count()) self.ryu_bgp_driver.delete_bgp_speaker(FAKE_LOCAL_AS1) self.assertEqual(0, self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count()) neutron-8.4.0/neutron/tests/unit/services/bgp/agent/0000775000567000056710000000000013044373210023657 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/bgp/agent/__init__.py0000664000567000056710000000000013044372760025767 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/bgp/agent/test_bgp_dragent.py0000664000567000056710000010077613044372760027570 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import sys import uuid import eventlet import mock from oslo_config import cfg import testtools from neutron.common import config as common_config from neutron import context from neutron.services.bgp.agent import bgp_dragent from neutron.services.bgp.agent import config as bgp_config from neutron.services.bgp.agent import entry from neutron.tests import base HOSTNAME = 'hostname' rpc_api = bgp_dragent.BgpDrPluginApi BGP_PLUGIN = '%s.%s' % (rpc_api.__module__, rpc_api.__name__) FAKE_BGPSPEAKER_UUID = str(uuid.uuid4()) FAKE_BGPPEER_UUID = str(uuid.uuid4()) FAKE_BGP_SPEAKER = {'id': FAKE_BGPSPEAKER_UUID, 'local_as': 12345, 'peers': [{'remote_as': '2345', 'peer_ip': '1.1.1.1', 'auth_type': 'none', 'password': ''}], 'advertised_routes': []} FAKE_BGP_PEER = {'id': FAKE_BGPPEER_UUID, 'remote_as': '2345', 'peer_ip': '1.1.1.1', 'auth_type': 'none', 'password': ''} FAKE_ROUTE = {'id': FAKE_BGPSPEAKER_UUID, 'destination': '2.2.2.2/32', 'next_hop': '3.3.3.3'} FAKE_ROUTES = {'routes': {'id': FAKE_BGPSPEAKER_UUID, 'destination': '2.2.2.2/32', 'next_hop': '3.3.3.3'} } class TestBgpDrAgent(base.BaseTestCase): def setUp(self): super(TestBgpDrAgent, self).setUp() cfg.CONF.register_opts(bgp_config.BGP_DRIVER_OPTS, 'BGP') cfg.CONF.register_opts(bgp_config.BGP_PROTO_CONFIG_OPTS, 'BGP') mock_log_p = mock.patch.object(bgp_dragent, 'LOG') self.mock_log = mock_log_p.start() self.driver_cls_p = mock.patch( 'neutron.services.bgp.agent.bgp_dragent.importutils.import_class') self.driver_cls = self.driver_cls_p.start() self.context = context.get_admin_context() def test_bgp_dragent_manager(self): state_rpc_str = 'neutron.agent.rpc.PluginReportStateAPI' # sync_state is needed for this test with mock.patch.object(bgp_dragent.BgpDrAgentWithStateReport, 'sync_state', autospec=True) as mock_sync_state: with mock.patch(state_rpc_str) as state_rpc: with mock.patch.object(sys, 'argv') as sys_argv: sys_argv.return_value = [ 'bgp_dragent', '--config-file', base.etcdir('neutron.conf')] common_config.init(sys.argv[1:]) agent_mgr = bgp_dragent.BgpDrAgentWithStateReport( 'testhost') eventlet.greenthread.sleep(1) agent_mgr.after_start() self.assertIsNotNone(len(mock_sync_state.mock_calls)) state_rpc.assert_has_calls( [mock.call(mock.ANY), mock.call().report_state(mock.ANY, mock.ANY, mock.ANY)]) def test_bgp_dragent_main_agent_manager(self): logging_str = 'neutron.agent.common.config.setup_logging' launcher_str = 'oslo_service.service.ServiceLauncher' with mock.patch(logging_str): with mock.patch.object(sys, 'argv') as sys_argv: with mock.patch(launcher_str) as launcher: sys_argv.return_value = ['bgp_dragent', '--config-file', base.etcdir('neutron.conf')] entry.main() launcher.assert_has_calls( [mock.call(cfg.CONF), mock.call().launch_service(mock.ANY), mock.call().wait()]) def test_run_completes_single_pass(self): bgp_dr = bgp_dragent.BgpDrAgent(HOSTNAME) with mock.patch.object(bgp_dr, 'sync_state') as sync_state: bgp_dr.run() self.assertIsNotNone(len(sync_state.mock_calls)) def test_after_start(self): bgp_dr = bgp_dragent.BgpDrAgent(HOSTNAME) with mock.patch.object(bgp_dr, 'sync_state') as sync_state: bgp_dr.after_start() self.assertIsNotNone(len(sync_state.mock_calls)) def _test_sync_state_helper(self, bgp_speaker_list=None, cached_info=None, safe_configure_call_count=0, sync_bgp_speaker_call_count=0, remove_bgp_speaker_call_count=0, remove_bgp_speaker_ids=None, added_bgp_speakers=None, synced_bgp_speakers=None): bgp_dr = bgp_dragent.BgpDrAgent(HOSTNAME) attrs_to_mock = dict( [(a, mock.MagicMock()) for a in ['plugin_rpc', 'sync_bgp_speaker', 'safe_configure_dragent_for_bgp_speaker', 'remove_bgp_speaker_from_dragent']]) with mock.patch.multiple(bgp_dr, **attrs_to_mock): if not cached_info: cached_info = {} if not added_bgp_speakers: added_bgp_speakers = [] if not remove_bgp_speaker_ids: remove_bgp_speaker_ids = [] if not synced_bgp_speakers: synced_bgp_speakers = [] bgp_dr.plugin_rpc.get_bgp_speakers.return_value = bgp_speaker_list bgp_dr.cache.cache = cached_info bgp_dr.cache.clear_cache = mock.Mock() bgp_dr.sync_state(mock.ANY) self.assertEqual( remove_bgp_speaker_call_count, bgp_dr.remove_bgp_speaker_from_dragent.call_count) if remove_bgp_speaker_call_count: expected_calls = [mock.call(bgp_speaker_id) for bgp_speaker_id in remove_bgp_speaker_ids] bgp_dr.remove_bgp_speaker_from_dragent.assert_has_calls( expected_calls) self.assertEqual( safe_configure_call_count, bgp_dr.safe_configure_dragent_for_bgp_speaker.call_count) if safe_configure_call_count: expected_calls = [mock.call(bgp_speaker) for bgp_speaker in added_bgp_speakers] bgp_dr.safe_configure_dragent_for_bgp_speaker.assert_has_calls( expected_calls) self.assertEqual(sync_bgp_speaker_call_count, bgp_dr.sync_bgp_speaker.call_count) if sync_bgp_speaker_call_count: expected_calls = [mock.call(bgp_speaker) for bgp_speaker in synced_bgp_speakers] bgp_dr.sync_bgp_speaker.assert_has_calls(expected_calls) def test_sync_state_bgp_speaker_added(self): bgp_speaker_list = [{'id': 'foo-id', 'local_as': 12345, 'peers': [], 'advertised_routes': []}] self._test_sync_state_helper(bgp_speaker_list=bgp_speaker_list, safe_configure_call_count=1, added_bgp_speakers=bgp_speaker_list) def test_sync_state_bgp_speaker_deleted(self): bgp_speaker_list = [] cached_bgp_speaker = {'id': 'foo-id', 'local_as': 12345, 'peers': ['peer-1'], 'advertised_routes': []} cached_info = {'foo-id': cached_bgp_speaker} self._test_sync_state_helper(bgp_speaker_list=bgp_speaker_list, cached_info=cached_info, remove_bgp_speaker_call_count=1, remove_bgp_speaker_ids=['foo-id']) def test_sync_state_added_and_deleted(self): bgp_speaker_list = [{'id': 'foo-id', 'local_as': 12345, 'peers': [], 'advertised_routes': []}] cached_bgp_speaker = {'bgp_speaker': {'local_as': 12345}, 'peers': ['peer-1'], 'advertised_routes': []} cached_info = {'bar-id': cached_bgp_speaker} self._test_sync_state_helper(bgp_speaker_list=bgp_speaker_list, cached_info=cached_info, remove_bgp_speaker_call_count=1, remove_bgp_speaker_ids=['bar-id'], safe_configure_call_count=1, added_bgp_speakers=bgp_speaker_list) def test_sync_state_added_and_synced(self): bgp_speaker_list = [{'id': 'foo-id', 'local_as': 12345, 'peers': [], 'advertised_routes': []}, {'id': 'bar-id', 'peers': ['peer-2'], 'advertised_routes': []}, {'id': 'temp-id', 'peers': ['temp-1'], 'advertised_routes': []}] cached_bgp_speaker = {'id': 'bar-id', 'bgp_speaker': {'id': 'bar-id'}, 'peers': ['peer-1'], 'advertised_routes': []} cached_bgp_speaker_2 = {'id': 'temp-id', 'bgp_speaker': {'id': 'temp-id'}, 'peers': ['temp-1'], 'advertised_routes': []} cached_info = {'bar-id': cached_bgp_speaker, 'temp-id': cached_bgp_speaker_2} self._test_sync_state_helper(bgp_speaker_list=bgp_speaker_list, cached_info=cached_info, safe_configure_call_count=1, added_bgp_speakers=[bgp_speaker_list[0]], sync_bgp_speaker_call_count=2, synced_bgp_speakers=[bgp_speaker_list[1], bgp_speaker_list[2]] ) def test_sync_state_added_synced_and_removed(self): bgp_speaker_list = [{'id': 'foo-id', 'local_as': 12345, 'peers': [], 'advertised_routes': []}, {'id': 'bar-id', 'peers': ['peer-2'], 'advertised_routes': []}] cached_bgp_speaker = {'id': 'bar-id', 'bgp_speaker': {'id': 'bar-id'}, 'peers': ['peer-1'], 'advertised_routes': []} cached_bgp_speaker_2 = {'id': 'temp-id', 'bgp_speaker': {'id': 'temp-id'}, 'peers': ['temp-1'], 'advertised_routes': []} cached_info = {'bar-id': cached_bgp_speaker, 'temp-id': cached_bgp_speaker_2} self._test_sync_state_helper(bgp_speaker_list=bgp_speaker_list, cached_info=cached_info, remove_bgp_speaker_call_count=1, remove_bgp_speaker_ids=['temp-id'], safe_configure_call_count=1, added_bgp_speakers=[bgp_speaker_list[0]], sync_bgp_speaker_call_count=1, synced_bgp_speakers=[bgp_speaker_list[1]]) def _test_sync_bgp_speaker_helper(self, bgp_speaker, cached_info=None, remove_bgp_peer_call_count=0, removed_bgp_peer_ip_list=None, withdraw_route_call_count=0, withdraw_routes_list=None, add_bgp_peers_called=False, advertise_routes_called=False): if not cached_info: cached_info = {} if not removed_bgp_peer_ip_list: removed_bgp_peer_ip_list = [] if not withdraw_routes_list: withdraw_routes_list = [] bgp_dr = bgp_dragent.BgpDrAgent(HOSTNAME) attrs_to_mock = dict( [(a, mock.MagicMock()) for a in ['remove_bgp_peer_from_bgp_speaker', 'add_bgp_peers_to_bgp_speaker', 'advertise_routes_via_bgp_speaker', 'withdraw_route_via_bgp_speaker']]) with mock.patch.multiple(bgp_dr, **attrs_to_mock): bgp_dr.cache.cache = cached_info bgp_dr.sync_bgp_speaker(bgp_speaker) self.assertEqual( remove_bgp_peer_call_count, bgp_dr.remove_bgp_peer_from_bgp_speaker.call_count) if remove_bgp_peer_call_count: expected_calls = [mock.call(bgp_speaker['id'], peer_ip) for peer_ip in removed_bgp_peer_ip_list] bgp_dr.remove_bgp_peer_from_bgp_speaker.assert_has_calls( expected_calls) self.assertEqual(add_bgp_peers_called, bgp_dr.add_bgp_peers_to_bgp_speaker.called) if add_bgp_peers_called: bgp_dr.add_bgp_peers_to_bgp_speaker.assert_called_with( bgp_speaker) self.assertEqual( withdraw_route_call_count, bgp_dr.withdraw_route_via_bgp_speaker.call_count) if withdraw_route_call_count: expected_calls = [mock.call(bgp_speaker['id'], 12345, route) for route in withdraw_routes_list] bgp_dr.withdraw_route_via_bgp_speaker.assert_has_calls( expected_calls) self.assertEqual(advertise_routes_called, bgp_dr.advertise_routes_via_bgp_speaker.called) if advertise_routes_called: bgp_dr.advertise_routes_via_bgp_speaker.assert_called_with( bgp_speaker) def test_sync_bgp_speaker_bgp_peers_updated(self): peers = [{'id': 'peer-1', 'peer_ip': '1.1.1.1'}, {'id': 'peer-2', 'peer_ip': '2.2.2.2'}] bgp_speaker = {'id': 'foo-id', 'local_as': 12345, 'peers': peers, 'advertised_routes': []} cached_peers = {'1.1.1.1': {'id': 'peer-2', 'peer_ip': '1.1.1.1'}, '3.3.3.3': {'id': 'peer-3', 'peer_ip': '3.3.3.3'}} cached_bgp_speaker = {'foo-id': {'bgp_speaker': {'local_as': 12345}, 'peers': cached_peers, 'advertised_routes': []}} self._test_sync_bgp_speaker_helper( bgp_speaker, cached_info=cached_bgp_speaker, remove_bgp_peer_call_count=1, removed_bgp_peer_ip_list=['3.3.3.3'], add_bgp_peers_called=True, advertise_routes_called=False) def test_sync_bgp_speaker_routes_updated(self): adv_routes = [{'destination': '10.0.0.0/24', 'next_hop': '1.1.1.1'}, {'destination': '20.0.0.0/24', 'next_hop': '2.2.2.2'}] bgp_speaker = {'id': 'foo-id', 'local_as': 12345, 'peers': {}, 'advertised_routes': adv_routes} cached_adv_routes = [{'destination': '20.0.0.0/24', 'next_hop': '2.2.2.2'}, {'destination': '30.0.0.0/24', 'next_hop': '3.3.3.3'}] cached_bgp_speaker = { 'foo-id': {'bgp_speaker': {'local_as': 12345}, 'peers': {}, 'advertised_routes': cached_adv_routes}} self._test_sync_bgp_speaker_helper( bgp_speaker, cached_info=cached_bgp_speaker, withdraw_route_call_count=1, withdraw_routes_list=[cached_adv_routes[1]], add_bgp_peers_called=False, advertise_routes_called=True) def test_sync_bgp_speaker_peers_routes_added(self): peers = [{'id': 'peer-1', 'peer_ip': '1.1.1.1'}, {'id': 'peer-2', 'peer_ip': '2.2.2.2'}] adv_routes = [{'destination': '10.0.0.0/24', 'next_hop': '1.1.1.1'}, {'destination': '20.0.0.0/24', 'next_hop': '2.2.2.2'}] bgp_speaker = {'id': 'foo-id', 'local_as': 12345, 'peers': peers, 'advertised_routes': adv_routes} cached_bgp_speaker = { 'foo-id': {'bgp_speaker': {'local_as': 12345}, 'peers': {}, 'advertised_routes': []}} self._test_sync_bgp_speaker_helper( bgp_speaker, cached_info=cached_bgp_speaker, add_bgp_peers_called=True, advertise_routes_called=True) def test_sync_state_plugin_error(self): with mock.patch(BGP_PLUGIN) as plug: mock_plugin = mock.Mock() mock_plugin.get_bgp_speakers.side_effect = Exception plug.return_value = mock_plugin with mock.patch.object(bgp_dragent.LOG, 'error') as log: bgp_dr = bgp_dragent.BgpDrAgent(HOSTNAME) with mock.patch.object(bgp_dr, 'schedule_full_resync') as schedule_full_resync: bgp_dr.sync_state(mock.ANY) self.assertTrue(log.called) self.assertTrue(schedule_full_resync.called) def test_periodic_resync(self): bgp_dr = bgp_dragent.BgpDrAgent(HOSTNAME) with mock.patch.object(bgp_dr, '_periodic_resync_helper') as resync_helper: bgp_dr.periodic_resync(self.context) self.assertTrue(resync_helper.called) def test_periodic_resync_helper(self): bgp_dr = bgp_dragent.BgpDrAgent(HOSTNAME) bgp_dr.schedule_resync('foo reason', 'foo-id') with mock.patch.object(bgp_dr, 'sync_state') as sync_state: sync_state.side_effect = RuntimeError with testtools.ExpectedException(RuntimeError): bgp_dr._periodic_resync_helper(self.context) self.assertTrue(sync_state.called) self.assertEqual(len(bgp_dr.needs_resync_reasons), 0) def _test_add_bgp_peer_helper(self, bgp_speaker_id, bgp_peer, cached_bgp_speaker, put_bgp_peer_called=True): bgp_dr = bgp_dragent.BgpDrAgent(HOSTNAME) bgp_dr.cache.cache = cached_bgp_speaker with mock.patch.object( bgp_dr.cache, 'put_bgp_peer') as mock_put_bgp_peer: bgp_dr.add_bgp_peer_to_bgp_speaker('foo-id', 12345, bgp_peer) if put_bgp_peer_called: mock_put_bgp_peer.assert_called_once_with( bgp_speaker_id, bgp_peer) else: self.assertFalse(mock_put_bgp_peer.called) def test_add_bgp_peer_not_cached(self): bgp_peer = {'peer_ip': '1.1.1.1', 'remote_as': 34567, 'auth_type': 'md5', 'password': 'abc'} cached_bgp_speaker = {'foo-id': {'bgp_speaker': {'local_as': 12345}, 'peers': {}, 'advertised_routes': []}} self._test_add_bgp_peer_helper('foo-id', bgp_peer, cached_bgp_speaker) def test_add_bgp_peer_already_cached(self): bgp_peer = {'peer_ip': '1.1.1.1', 'remote_as': 34567, 'auth_type': 'md5', 'password': 'abc'} cached_peers = {'1.1.1.1': {'peer_ip': '1.1.1.1', 'remote_as': 34567}} cached_bgp_speaker = {'foo-id': {'bgp_speaker': {'local_as': 12345}, 'peers': cached_peers, 'advertised_routes': []}} self._test_add_bgp_peer_helper('foo-id', bgp_peer, cached_bgp_speaker, put_bgp_peer_called=False) def _test_advertise_route_helper(self, bgp_speaker_id, route, cached_bgp_speaker, put_adv_route_called=True): bgp_dr = bgp_dragent.BgpDrAgent(HOSTNAME) bgp_dr.cache.cache = cached_bgp_speaker with mock.patch.object( bgp_dr.cache, 'put_adv_route') as mock_put_adv_route: bgp_dr.advertise_route_via_bgp_speaker(bgp_speaker_id, 12345, route) if put_adv_route_called: mock_put_adv_route.assert_called_once_with( bgp_speaker_id, route) else: self.assertFalse(mock_put_adv_route.called) def test_advertise_route_helper_not_cached(self): route = {'destination': '10.0.0.0/24', 'next_hop': '1.1.1.1'} cached_bgp_speaker = {'foo-id': {'bgp_speaker': {'local_as': 12345}, 'peers': {}, 'advertised_routes': []}} self._test_advertise_route_helper('foo-id', route, cached_bgp_speaker, put_adv_route_called=True) def test_advertise_route_helper_already_cached(self): route = {'destination': '10.0.0.0/24', 'next_hop': '1.1.1.1'} cached_bgp_speaker = {'foo-id': {'bgp_speaker': {'local_as': 12345}, 'peers': {}, 'advertised_routes': [route]}} self._test_advertise_route_helper('foo-id', route, cached_bgp_speaker, put_adv_route_called=False) class TestBgpDrAgentEventHandler(base.BaseTestCase): cache_cls = 'neutron.services.bgp.agent.bgp_dragent.BgpSpeakerCache' def setUp(self): super(TestBgpDrAgentEventHandler, self).setUp() cfg.CONF.register_opts(bgp_config.BGP_DRIVER_OPTS, 'BGP') cfg.CONF.register_opts(bgp_config.BGP_PROTO_CONFIG_OPTS, 'BGP') mock_log_p = mock.patch.object(bgp_dragent, 'LOG') self.mock_log = mock_log_p.start() self.plugin_p = mock.patch(BGP_PLUGIN) plugin_cls = self.plugin_p.start() self.plugin = mock.Mock() plugin_cls.return_value = self.plugin self.cache_p = mock.patch(self.cache_cls) cache_cls = self.cache_p.start() self.cache = mock.Mock() cache_cls.return_value = self.cache self.driver_cls_p = mock.patch( 'neutron.services.bgp.agent.bgp_dragent.importutils.import_class') self.driver_cls = self.driver_cls_p.start() self.bgp_dr = bgp_dragent.BgpDrAgent(HOSTNAME) self.schedule_full_resync_p = mock.patch.object( self.bgp_dr, 'schedule_full_resync') self.schedule_full_resync = self.schedule_full_resync_p.start() self.context = mock.Mock() def test_bgp_speaker_create_end(self): payload = {'bgp_speaker': {'id': FAKE_BGPSPEAKER_UUID}} with mock.patch.object(self.bgp_dr, 'add_bgp_speaker_helper') as enable: self.bgp_dr.bgp_speaker_create_end(None, payload) enable.assert_called_once_with(FAKE_BGP_SPEAKER['id']) def test_bgp_peer_association_end(self): payload = {'bgp_peer': {'speaker_id': FAKE_BGPSPEAKER_UUID, 'peer_id': FAKE_BGPPEER_UUID}} with mock.patch.object(self.bgp_dr, 'add_bgp_peer_helper') as enable: self.bgp_dr.bgp_peer_association_end(None, payload) enable.assert_called_once_with(FAKE_BGP_SPEAKER['id'], FAKE_BGP_PEER['id']) def test_route_advertisement_end(self): routes = [{'destination': '2.2.2.2/32', 'next_hop': '3.3.3.3'}, {'destination': '4.4.4.4/32', 'next_hop': '5.5.5.5'}] payload = {'advertise_routes': {'speaker_id': FAKE_BGPSPEAKER_UUID, 'routes': routes}} expected_calls = [mock.call(FAKE_BGP_SPEAKER['id'], routes)] with mock.patch.object(self.bgp_dr, 'add_routes_helper') as enable: self.bgp_dr.bgp_routes_advertisement_end(None, payload) enable.assert_has_calls(expected_calls) def test_add_bgp_speaker_helper(self): self.plugin.get_bgp_speaker_info.return_value = FAKE_BGP_SPEAKER add_bs_p = mock.patch.object(self.bgp_dr, 'add_bgp_speaker_on_dragent') add_bs = add_bs_p.start() self.bgp_dr.add_bgp_speaker_helper(FAKE_BGP_SPEAKER['id']) self.plugin.assert_has_calls([ mock.call.get_bgp_speaker_info(mock.ANY, FAKE_BGP_SPEAKER['id'])]) add_bs.assert_called_once_with(FAKE_BGP_SPEAKER) def test_add_bgp_peer_helper(self): self.plugin.get_bgp_peer_info.return_value = FAKE_BGP_PEER add_bp_p = mock.patch.object(self.bgp_dr, 'add_bgp_peer_to_bgp_speaker') add_bp = add_bp_p.start() self.bgp_dr.add_bgp_peer_helper(FAKE_BGP_SPEAKER['id'], FAKE_BGP_PEER['id']) self.plugin.assert_has_calls([ mock.call.get_bgp_peer_info(mock.ANY, FAKE_BGP_PEER['id'])]) self.assertEqual(1, add_bp.call_count) def test_add_routes_helper(self): add_rt_p = mock.patch.object(self.bgp_dr, 'advertise_route_via_bgp_speaker') add_bp = add_rt_p.start() self.bgp_dr.add_routes_helper(FAKE_BGP_SPEAKER['id'], FAKE_ROUTES) self.assertEqual(1, add_bp.call_count) def test_bgp_speaker_remove_end(self): payload = {'bgp_speaker': {'id': FAKE_BGPSPEAKER_UUID}} with mock.patch.object(self.bgp_dr, 'remove_bgp_speaker_from_dragent') as disable: self.bgp_dr.bgp_speaker_remove_end(None, payload) disable.assert_called_once_with(FAKE_BGP_SPEAKER['id']) def test_bgp_peer_disassociation_end(self): payload = {'bgp_peer': {'speaker_id': FAKE_BGPSPEAKER_UUID, 'peer_ip': '1.1.1.1'}} with mock.patch.object(self.bgp_dr, 'remove_bgp_peer_from_bgp_speaker') as disable: self.bgp_dr.bgp_peer_disassociation_end(None, payload) disable.assert_called_once_with(FAKE_BGPSPEAKER_UUID, FAKE_BGP_PEER['peer_ip']) def test_bgp_routes_withdrawal_end(self): withdraw_routes = [{'destination': '2.2.2.2/32'}, {'destination': '3.3.3.3/32'}] payload = {'withdraw_routes': {'speaker_id': FAKE_BGPSPEAKER_UUID, 'routes': withdraw_routes}} expected_calls = [mock.call(FAKE_BGP_SPEAKER['id'], withdraw_routes)] with mock.patch.object(self.bgp_dr, 'withdraw_routes_helper') as disable: self.bgp_dr.bgp_routes_withdrawal_end(None, payload) disable.assert_has_calls(expected_calls) class TestBGPSpeakerCache(base.BaseTestCase): def setUp(self): super(TestBGPSpeakerCache, self).setUp() self.expected_cache = {FAKE_BGP_SPEAKER['id']: {'bgp_speaker': FAKE_BGP_SPEAKER, 'peers': {}, 'advertised_routes': []}} self.bs_cache = bgp_dragent.BgpSpeakerCache() def test_put_bgp_speaker(self): self.bs_cache.put_bgp_speaker(FAKE_BGP_SPEAKER) self.assertEqual(self.expected_cache, self.bs_cache.cache) def test_put_bgp_speaker_existing(self): prev_bs_info = {'id': 'foo-id'} with mock.patch.object(self.bs_cache, 'remove_bgp_speaker_by_id') as remove: self.bs_cache.cache[FAKE_BGP_SPEAKER['id']] = prev_bs_info self.bs_cache.put_bgp_speaker(FAKE_BGP_SPEAKER) remove.assert_called_once_with(prev_bs_info) self.assertEqual(self.expected_cache, self.bs_cache.cache) def remove_bgp_speaker_by_id(self): self.bs_cache.put_bgp_speaker(FAKE_BGP_SPEAKER) self.assertEqual(1, len(self.bs_cache.cache)) self.bs_cache.remove_bgp_speaker_by_id(FAKE_BGP_SPEAKER['id']) self.assertEqual(0, len(self.bs_cache.cache)) def test_get_bgp_speaker_by_id(self): self.bs_cache.put_bgp_speaker(FAKE_BGP_SPEAKER) self.assertEqual( FAKE_BGP_SPEAKER, self.bs_cache.get_bgp_speaker_by_id(FAKE_BGP_SPEAKER['id'])) def test_get_bgp_speaker_ids(self): self.bs_cache.put_bgp_speaker(FAKE_BGP_SPEAKER) self.assertEqual([FAKE_BGP_SPEAKER['id']], list(self.bs_cache.get_bgp_speaker_ids())) def _test_bgp_peer_helper(self, remove=False): self.bs_cache.put_bgp_speaker(FAKE_BGP_SPEAKER) self.bs_cache.put_bgp_peer(FAKE_BGP_SPEAKER['id'], FAKE_BGP_PEER) expected_cache = copy.deepcopy(self.expected_cache) expected_cache[FAKE_BGP_SPEAKER['id']]['peers'] = { FAKE_BGP_PEER['peer_ip']: FAKE_BGP_PEER} self.assertEqual(expected_cache, self.bs_cache.cache) if remove: self.bs_cache.remove_bgp_peer_by_ip(FAKE_BGP_SPEAKER['id'], 'foo-ip') self.assertEqual(expected_cache, self.bs_cache.cache) self.bs_cache.remove_bgp_peer_by_ip(FAKE_BGP_SPEAKER['id'], FAKE_BGP_PEER['peer_ip']) self.assertEqual(self.expected_cache, self.bs_cache.cache) def test_put_bgp_peer(self): self._test_bgp_peer_helper() def test_remove_bgp_peer(self): self._test_bgp_peer_helper(remove=True) def _test_bgp_speaker_adv_route_helper(self, remove=False): self.bs_cache.put_bgp_speaker(FAKE_BGP_SPEAKER) self.bs_cache.put_adv_route(FAKE_BGP_SPEAKER['id'], FAKE_ROUTE) expected_cache = copy.deepcopy(self.expected_cache) expected_cache[FAKE_BGP_SPEAKER['id']]['advertised_routes'].append( FAKE_ROUTE) self.assertEqual(expected_cache, self.bs_cache.cache) fake_route_2 = copy.deepcopy(FAKE_ROUTE) fake_route_2['destination'] = '4.4.4.4/32' self.bs_cache.put_adv_route(FAKE_BGP_SPEAKER['id'], fake_route_2) expected_cache[FAKE_BGP_SPEAKER['id']]['advertised_routes'].append( fake_route_2) self.assertEqual(expected_cache, self.bs_cache.cache) if remove: self.bs_cache.remove_adv_route(FAKE_BGP_SPEAKER['id'], fake_route_2) expected_cache[FAKE_BGP_SPEAKER['id']]['advertised_routes'] = ( [FAKE_ROUTE]) self.assertEqual(expected_cache, self.bs_cache.cache) self.bs_cache.remove_adv_route(FAKE_BGP_SPEAKER['id'], FAKE_ROUTE) self.assertEqual(self.expected_cache, self.bs_cache.cache) def test_put_bgp_speaker_adv_route(self): self._test_bgp_speaker_adv_route_helper() def test_remove_bgp_speaker_adv_route(self): self._test_bgp_speaker_adv_route_helper(remove=True) def test_is_bgp_speaker_adv_route_present(self): self._test_bgp_speaker_adv_route_helper() self.assertTrue(self.bs_cache.is_route_advertised( FAKE_BGP_SPEAKER['id'], FAKE_ROUTE)) self.assertFalse(self.bs_cache.is_route_advertised( FAKE_BGP_SPEAKER['id'], {'destination': 'foo-destination', 'next_hop': 'foo-next-hop'})) neutron-8.4.0/neutron/tests/unit/services/test_provider_configuration.py0000664000567000056710000002560513044372760030224 0ustar jenkinsjenkins00000000000000# Copyright 2013 VMware, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil import mock from oslo_config import cfg from neutron.common import exceptions as n_exc from neutron import manager from neutron.plugins.common import constants from neutron.services import provider_configuration as provconf from neutron.tests import base class ParseServiceProviderConfigurationTestCase(base.BaseTestCase): def setUp(self): super(ParseServiceProviderConfigurationTestCase, self).setUp() self.service_providers = mock.patch.object( provconf.NeutronModule, 'service_providers').start() def _set_override(self, service_providers): self.service_providers.return_value = service_providers def test_default_service_provider_configuration(self): providers = cfg.CONF.service_providers.service_provider self.assertEqual([], providers) def test_parse_single_service_provider_opt(self): self._set_override([constants.LOADBALANCER + ':lbaas:driver_path']) expected = {'service_type': constants.LOADBALANCER, 'name': 'lbaas', 'driver': 'driver_path', 'default': False} res = provconf.parse_service_provider_opt() self.assertEqual(1, len(res)) self.assertEqual([expected], res) def test_parse_single_default_service_provider_opt(self): self._set_override([constants.LOADBALANCER + ':lbaas:driver_path:default']) expected = {'service_type': constants.LOADBALANCER, 'name': 'lbaas', 'driver': 'driver_path', 'default': True} res = provconf.parse_service_provider_opt() self.assertEqual(1, len(res)) self.assertEqual([expected], res) def test_parse_multi_service_provider_opt(self): self._set_override([constants.LOADBALANCER + ':lbaas:driver_path', constants.LOADBALANCER + ':name1:path1', constants.LOADBALANCER + ':name2:path2:default']) res = provconf.parse_service_provider_opt() # This parsing crosses repos if additional projects are installed, # so check that at least what we expect is there; there may be more. self.assertTrue(len(res) >= 3) def test_parse_service_provider_invalid_format(self): self._set_override([constants.LOADBALANCER + ':lbaas:driver_path', 'svc_type:name1:path1:def']) self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt) self._set_override([constants.LOADBALANCER + ':', 'svc_type:name1:path1:def']) self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt) def test_parse_service_provider_name_too_long(self): name = 'a' * 256 self._set_override([constants.LOADBALANCER + ':' + name + ':driver_path', 'svc_type:name1:path1:def']) self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt) class ProviderConfigurationTestCase(base.BaseTestCase): def setUp(self): super(ProviderConfigurationTestCase, self).setUp() self.service_providers = mock.patch.object( provconf.NeutronModule, 'service_providers').start() def _set_override(self, service_providers): self.service_providers.return_value = service_providers def test_ensure_driver_unique(self): pconf = provconf.ProviderConfiguration() pconf.providers[('svctype', 'name')] = {'driver': 'driver', 'default': True} self.assertRaises(n_exc.Invalid, pconf._ensure_driver_unique, 'driver') self.assertIsNone(pconf._ensure_driver_unique('another_driver1')) def test_ensure_default_unique(self): pconf = provconf.ProviderConfiguration() pconf.providers[('svctype', 'name')] = {'driver': 'driver', 'default': True} self.assertRaises(n_exc.Invalid, pconf._ensure_default_unique, 'svctype', True) self.assertIsNone(pconf._ensure_default_unique('svctype', False)) self.assertIsNone(pconf._ensure_default_unique('svctype1', True)) self.assertIsNone(pconf._ensure_default_unique('svctype1', False)) def test_add_provider(self): pconf = provconf.ProviderConfiguration() prov = {'service_type': constants.LOADBALANCER, 'name': 'name', 'driver': 'path', 'default': False} pconf.add_provider(prov) self.assertEqual(1, len(pconf.providers)) self.assertEqual([(constants.LOADBALANCER, 'name')], list(pconf.providers.keys())) self.assertEqual([{'driver': 'path', 'default': False}], list(pconf.providers.values())) def test_add_duplicate_provider(self): pconf = provconf.ProviderConfiguration() prov = {'service_type': constants.LOADBALANCER, 'name': 'name', 'driver': 'path', 'default': False} pconf.add_provider(prov) self.assertRaises(n_exc.Invalid, pconf.add_provider, prov) self.assertEqual(1, len(pconf.providers)) def test_get_service_providers(self): self._set_override([constants.LOADBALANCER + ':name:path', constants.LOADBALANCER + ':name2:path2', 'st2:name:driver:default', 'st3:name2:driver2:default']) provs = [{'service_type': constants.LOADBALANCER, 'name': 'name', 'driver': 'path', 'default': False}, {'service_type': constants.LOADBALANCER, 'name': 'name2', 'driver': 'path2', 'default': False}, {'service_type': 'st2', 'name': 'name', 'driver': 'driver', 'default': True }, {'service_type': 'st3', 'name': 'name2', 'driver': 'driver2', 'default': True}] pconf = provconf.ProviderConfiguration() for prov in provs: p = pconf.get_service_providers( filters={'name': [prov['name']], 'service_type': prov['service_type']} ) self.assertEqual([prov], p) def test_get_service_providers_with_fields(self): self._set_override([constants.LOADBALANCER + ":name:path", constants.LOADBALANCER + ":name2:path2"]) provs = [{'service_type': constants.LOADBALANCER, 'name': 'name', 'driver': 'path', 'default': False}, {'service_type': constants.LOADBALANCER, 'name': 'name2', 'driver': 'path2', 'default': False}] pconf = provconf.ProviderConfiguration() for prov in provs: p = pconf.get_service_providers( filters={'name': [prov['name']], 'service_type': prov['service_type']}, fields=['name'] ) self.assertEqual([{'name': prov['name']}], p) class GetProviderDriverClassTestCase(base.BaseTestCase): def test_get_provider_driver_class_hit(self): driver = 'ml2' expected = 'neutron.plugins.ml2.plugin.Ml2Plugin' actual = provconf.get_provider_driver_class( driver, namespace=manager.CORE_PLUGINS_NAMESPACE) self.assertEqual(expected, actual) def test_get_provider_driver_class_miss(self): retval = provconf.get_provider_driver_class('foo') self.assertEqual('foo', retval) class NeutronModuleTestCase(base.BaseTestCase): def test_can_parse_multi_opt_service_provider_from_conf_file(self): mod = provconf.NeutronModule('neutron_test') mod.ini(base.ETCDIR) self.assertEqual(['foo', 'bar'], mod.service_providers(), 'Expected two providers, only one read') class NeutronModuleConfigDirTestCase(base.BaseTestCase): def setup_config(self): self.config_parse(args=['--config-dir', base.ETCDIR]) def test_can_parse_multi_opt_service_provider_from_conf_dir(self): mod = provconf.NeutronModule('neutron_test') mod.ini() self.assertEqual(['foo', 'bar'], mod.service_providers()) class NeutronModuleMultiConfigDirTestCase(base.BaseTestCase): def setUp(self): self.tmpdir = self.get_default_temp_dir().path shutil.copyfile( os.path.join(base.ETCDIR, 'neutron_test2.conf.example'), os.path.join(self.tmpdir, 'neutron_test.conf')) super(NeutronModuleMultiConfigDirTestCase, self).setUp() def setup_config(self): self.config_parse(args=[ # NOTE(ihrachys): we expect the second directory to be checked '--config-dir', self.tmpdir, '--config-dir', base.ETCDIR ]) def test_read_configuration_from_all_matching_files(self): mod = provconf.NeutronModule('neutron_test') mod.ini() self.assertEqual(['zzz', 'foo', 'bar'], mod.service_providers()) class NeutronModuleMultiConfigFileTestCase(base.BaseTestCase): def setUp(self): self.tmpdir = self.get_default_temp_dir().path self.filepath1 = os.path.join(self.tmpdir, 'neutron_test.conf') self.filepath2 = os.path.join(base.ETCDIR, 'neutron_test.conf') shutil.copyfile( os.path.join(base.ETCDIR, 'neutron_test2.conf.example'), self.filepath1) super(NeutronModuleMultiConfigFileTestCase, self).setUp() def setup_config(self): self.config_parse(args=[ # NOTE(ihrachys): we expect both directories to be checked '--config-file', self.filepath1, '--config-file', self.filepath2 ]) def test_read_configuration_from_all_matching_files(self): mod = provconf.NeutronModule('neutron_test') mod.ini() self.assertEqual(['zzz', 'foo', 'bar'], mod.service_providers()) neutron-8.4.0/neutron/tests/unit/services/qos/0000775000567000056710000000000013044373210022613 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/qos/__init__.py0000664000567000056710000000000013044372736024726 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/qos/notification_drivers/0000775000567000056710000000000013044373210027037 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/qos/notification_drivers/test_message_queue.py0000664000567000056710000000560113044372760033313 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import uuidutils from neutron.api.rpc.callbacks import events from neutron import context from neutron.objects.qos import policy as policy_object from neutron.objects.qos import rule as rule_object from neutron.services.qos.notification_drivers import message_queue from neutron.tests.unit.services.qos import base DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' class TestQosRpcNotificationDriver(base.BaseQosTestCase): def setUp(self): super(TestQosRpcNotificationDriver, self).setUp() rpc_api_cls = mock.patch('neutron.api.rpc.handlers.resources_rpc' '.ResourcesPushRpcApi').start() self.rpc_api = rpc_api_cls.return_value self.driver = message_queue.RpcQosServiceNotificationDriver() policy_id = uuidutils.generate_uuid() self.policy_data = {'policy': { 'id': policy_id, 'tenant_id': uuidutils.generate_uuid(), 'name': 'testi-policy', 'description': 'test policyi description', 'shared': True}} self.rule_data = {'bandwidth_limit_rule': { 'id': policy_id, 'max_kbps': 100, 'max_burst_kbps': 150}} self.context = context.get_admin_context() self.policy = policy_object.QosPolicy(self.context, **self.policy_data['policy']) self.rule = rule_object.QosBandwidthLimitRule( self.context, **self.rule_data['bandwidth_limit_rule']) def _validate_push_params(self, event_type, policy): self.rpc_api.push.assert_called_once_with(self.context, policy, event_type) def test_create_policy(self): self.driver.create_policy(self.context, self.policy) self.assertFalse(self.rpc_api.push.called) def test_update_policy(self): self.driver.update_policy(self.context, self.policy) self._validate_push_params(events.UPDATED, self.policy) def test_delete_policy(self): self.driver.delete_policy(self.context, self.policy) self._validate_push_params(events.DELETED, self.policy) neutron-8.4.0/neutron/tests/unit/services/qos/notification_drivers/__init__.py0000664000567000056710000000000013044372736031152 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/qos/notification_drivers/test_manager.py0000664000567000056710000001064613044372760032102 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_utils import uuidutils from neutron.api.rpc.callbacks import events from neutron import context from neutron.objects.qos import policy as policy_object from neutron.services.qos.notification_drivers import manager as driver_mgr from neutron.services.qos.notification_drivers import message_queue from neutron.tests.unit.services.qos import base DUMMY_DRIVER = ("neutron.tests.unit.services.qos.notification_drivers." "dummy.DummyQosServiceNotificationDriver") def _load_multiple_drivers(): cfg.CONF.set_override( "notification_drivers", ["message_queue", DUMMY_DRIVER], "qos") class TestQosDriversManagerBase(base.BaseQosTestCase): def setUp(self): super(TestQosDriversManagerBase, self).setUp() self.config_parse() self.setup_coreplugin() config = cfg.ConfigOpts() config.register_opts(driver_mgr.QOS_PLUGIN_OPTS, "qos") self.policy_data = {'policy': { 'id': uuidutils.generate_uuid(), 'tenant_id': uuidutils.generate_uuid(), 'name': 'test-policy', 'description': 'test policy description', 'shared': True}} self.context = context.get_admin_context() self.policy = policy_object.QosPolicy(self.context, **self.policy_data['policy']) ctxt = None self.kwargs = {'context': ctxt} class TestQosDriversManager(TestQosDriversManagerBase): def setUp(self): super(TestQosDriversManager, self).setUp() #TODO(Qos): Fix this unittest to test manager and not message_queue # notification driver rpc_api_cls = mock.patch('neutron.api.rpc.handlers.resources_rpc' '.ResourcesPushRpcApi').start() self.rpc_api = rpc_api_cls.return_value self.driver_manager = driver_mgr.QosServiceNotificationDriverManager() def _validate_registry_params(self, event_type, policy): self.rpc_api.push.assert_called_with(self.context, policy, event_type) def test_create_policy_default_configuration(self): #RPC driver should be loaded by default self.driver_manager.create_policy(self.context, self.policy) self.assertFalse(self.rpc_api.push.called) def test_update_policy_default_configuration(self): #RPC driver should be loaded by default self.driver_manager.update_policy(self.context, self.policy) self._validate_registry_params(events.UPDATED, self.policy) def test_delete_policy_default_configuration(self): #RPC driver should be loaded by default self.driver_manager.delete_policy(self.context, self.policy) self._validate_registry_params(events.DELETED, self.policy) class TestQosDriversManagerMulti(TestQosDriversManagerBase): def _test_multi_drivers_configuration_op(self, op): _load_multiple_drivers() driver_manager = driver_mgr.QosServiceNotificationDriverManager() handler = '%s_policy' % op with mock.patch('.'.join([DUMMY_DRIVER, handler])) as dummy_mock: rpc_driver = message_queue.RpcQosServiceNotificationDriver with mock.patch.object(rpc_driver, handler) as rpc_mock: getattr(driver_manager, handler)(self.context, self.policy) for mock_ in (dummy_mock, rpc_mock): mock_.assert_called_with(self.context, self.policy) def test_multi_drivers_configuration_create(self): self._test_multi_drivers_configuration_op('create') def test_multi_drivers_configuration_update(self): self._test_multi_drivers_configuration_op('update') def test_multi_drivers_configuration_delete(self): self._test_multi_drivers_configuration_op('delete') neutron-8.4.0/neutron/tests/unit/services/qos/notification_drivers/dummy.py0000664000567000056710000000173413044372736030565 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.services.qos.notification_drivers import qos_base class DummyQosServiceNotificationDriver( qos_base.QosServiceNotificationDriverBase): """Dummy service notification driver for QoS.""" def get_description(self): return "Dummy" def create_policy(self, policy): pass def update_policy(self, policy): pass def delete_policy(self, policy): pass neutron-8.4.0/neutron/tests/unit/services/qos/base.py0000664000567000056710000000304313044372736024113 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.api.rpc.callbacks.consumer import registry as cons_registry from neutron.api.rpc.callbacks.producer import registry as prod_registry from neutron.api.rpc.callbacks import resource_manager from neutron.tests.unit import testlib_api class BaseQosTestCase(testlib_api.SqlTestCase): def setUp(self): super(BaseQosTestCase, self).setUp() with mock.patch.object( resource_manager.ResourceCallbacksManager, '_singleton', new_callable=mock.PropertyMock(return_value=False)): self.cons_mgr = resource_manager.ConsumerResourceCallbacksManager() self.prod_mgr = resource_manager.ProducerResourceCallbacksManager() for mgr in (self.cons_mgr, self.prod_mgr): mgr.clear() mock.patch.object( cons_registry, '_get_manager', return_value=self.cons_mgr).start() mock.patch.object( prod_registry, '_get_manager', return_value=self.prod_mgr).start() neutron-8.4.0/neutron/tests/unit/services/qos/test_qos_plugin.py0000664000567000056710000002326513044372760026425 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_utils import uuidutils from neutron.common import exceptions as n_exc from neutron import context from neutron import manager from neutron.objects import base as base_object from neutron.objects.qos import policy as policy_object from neutron.objects.qos import rule as rule_object from neutron.plugins.common import constants from neutron.tests.unit.services.qos import base DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' class TestQosPlugin(base.BaseQosTestCase): def setUp(self): super(TestQosPlugin, self).setUp() self.setup_coreplugin() mock.patch('neutron.objects.db.api.create_object').start() mock.patch('neutron.objects.db.api.update_object').start() mock.patch('neutron.objects.db.api.delete_object').start() mock.patch('neutron.objects.db.api.get_object').start() mock.patch( 'neutron.objects.qos.policy.QosPolicy.obj_load_attr').start() cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) cfg.CONF.set_override("service_plugins", ["qos"]) mgr = manager.NeutronManager.get_instance() self.qos_plugin = mgr.get_service_plugins().get( constants.QOS) self.qos_plugin.notification_driver_manager = mock.Mock() self.ctxt = context.Context('fake_user', 'fake_tenant') policy_id = uuidutils.generate_uuid() self.policy_data = { 'policy': {'id': policy_id, 'tenant_id': uuidutils.generate_uuid(), 'name': 'test-policy', 'description': 'Test policy description', 'shared': True}} self.rule_data = { 'bandwidth_limit_rule': {'id': policy_id, 'max_kbps': 100, 'max_burst_kbps': 150}} self.policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) self.rule = rule_object.QosBandwidthLimitRule( self.ctxt, **self.rule_data['bandwidth_limit_rule']) def _validate_notif_driver_params(self, method_name): method = getattr(self.qos_plugin.notification_driver_manager, method_name) self.assertTrue(method.called) self.assertIsInstance( method.call_args[0][1], policy_object.QosPolicy) @mock.patch( 'neutron.objects.rbac_db.RbacNeutronDbObjectMixin' '.create_rbac_policy') def test_add_policy(self, *mocks): self.qos_plugin.create_policy(self.ctxt, self.policy_data) self._validate_notif_driver_params('create_policy') @mock.patch( 'neutron.objects.rbac_db.RbacNeutronDbObjectMixin' '.create_rbac_policy') def test_update_policy(self, *mocks): fields = base_object.get_updatable_fields( policy_object.QosPolicy, self.policy_data['policy']) self.qos_plugin.update_policy( self.ctxt, self.policy.id, {'policy': fields}) self._validate_notif_driver_params('update_policy') @mock.patch('neutron.objects.db.api.get_object', return_value=None) def test_delete_policy(self, *mocks): self.qos_plugin.delete_policy(self.ctxt, self.policy.id) self._validate_notif_driver_params('delete_policy') def test_create_policy_rule(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=self.policy): self.qos_plugin.create_policy_bandwidth_limit_rule( self.ctxt, self.policy.id, self.rule_data) self._validate_notif_driver_params('update_policy') def test_update_policy_rule(self): _policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy): setattr(_policy, "rules", [self.rule]) self.qos_plugin.update_policy_bandwidth_limit_rule( self.ctxt, self.rule.id, self.policy.id, self.rule_data) self._validate_notif_driver_params('update_policy') def test_update_policy_rule_bad_policy(self): _policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy): setattr(_policy, "rules", []) self.assertRaises( n_exc.QosRuleNotFound, self.qos_plugin.update_policy_bandwidth_limit_rule, self.ctxt, self.rule.id, self.policy.id, self.rule_data) def test_delete_policy_rule(self): _policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy): setattr(_policy, "rules", [self.rule]) self.qos_plugin.delete_policy_bandwidth_limit_rule( self.ctxt, self.rule.id, _policy.id) self._validate_notif_driver_params('update_policy') def test_delete_policy_rule_bad_policy(self): _policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy): setattr(_policy, "rules", []) self.assertRaises( n_exc.QosRuleNotFound, self.qos_plugin.delete_policy_bandwidth_limit_rule, self.ctxt, self.rule.id, _policy.id) def test_get_policy_bandwidth_limit_rules_for_policy(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=self.policy): with mock.patch('neutron.objects.qos.rule.' 'QosBandwidthLimitRule.' 'get_objects') as get_object_mock: self.qos_plugin.get_policy_bandwidth_limit_rules( self.ctxt, self.policy.id) get_object_mock.assert_called_once_with( self.ctxt, qos_policy_id=self.policy.id) def test_get_policy_bandwidth_limit_rules_for_policy_with_filters(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=self.policy): with mock.patch('neutron.objects.qos.rule.' 'QosBandwidthLimitRule.' 'get_objects') as get_object_mock: filters = {'filter': 'filter_id'} self.qos_plugin.get_policy_bandwidth_limit_rules( self.ctxt, self.policy.id, filters=filters) get_object_mock.assert_called_once_with( self.ctxt, qos_policy_id=self.policy.id, filter='filter_id') def test_get_policy_for_nonexistent_policy(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=None): self.assertRaises( n_exc.QosPolicyNotFound, self.qos_plugin.get_policy, self.ctxt, self.policy.id) def test_get_policy_bandwidth_limit_rule_for_nonexistent_policy(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=None): self.assertRaises( n_exc.QosPolicyNotFound, self.qos_plugin.get_policy_bandwidth_limit_rule, self.ctxt, self.rule.id, self.policy.id) def test_get_policy_bandwidth_limit_rules_for_nonexistent_policy(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=None): self.assertRaises( n_exc.QosPolicyNotFound, self.qos_plugin.get_policy_bandwidth_limit_rules, self.ctxt, self.policy.id) def test_create_policy_rule_for_nonexistent_policy(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=None): self.assertRaises( n_exc.QosPolicyNotFound, self.qos_plugin.create_policy_bandwidth_limit_rule, self.ctxt, self.policy.id, self.rule_data) def test_update_policy_rule_for_nonexistent_policy(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=None): self.assertRaises( n_exc.QosPolicyNotFound, self.qos_plugin.update_policy_bandwidth_limit_rule, self.ctxt, self.rule.id, self.policy.id, self.rule_data) def test_delete_policy_rule_for_nonexistent_policy(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=None): self.assertRaises( n_exc.QosPolicyNotFound, self.qos_plugin.delete_policy_bandwidth_limit_rule, self.ctxt, self.rule.id, self.policy.id) neutron-8.4.0/neutron/tests/unit/services/auto_allocate/0000775000567000056710000000000013044373210024625 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/auto_allocate/__init__.py0000664000567000056710000000000013044372736026740 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/services/auto_allocate/test_db.py0000664000567000056710000000704513044372760026642 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.common import exceptions as n_exc from neutron import context from neutron.services.auto_allocate import db from neutron.services.auto_allocate import exceptions from neutron.tests.unit import testlib_api class AutoAllocateTestCase(testlib_api.SqlTestCaseLight): def setUp(self): super(AutoAllocateTestCase, self).setUp() self.ctx = context.get_admin_context() self.mixin = db.AutoAllocatedTopologyMixin() self.mixin._l3_plugin = mock.Mock() def test__provision_external_connectivity_expected_cleanup(self): """Test that the right resources are cleaned up.""" subnets = [ {'id': 'subnet_foo_1', 'network_id': 'network_foo'}, {'id': 'subnet_foo_2', 'network_id': 'network_foo'}, ] with mock.patch.object(self.mixin, '_cleanup') as mock_cleanup: self.mixin.l3_plugin.create_router.return_value = ( {'id': 'router_foo'}) self.mixin.l3_plugin.add_router_interface.side_effect = ( n_exc.BadRequest(resource='router', msg='doh!')) self.assertRaises(exceptions.AutoAllocationFailure, self.mixin._provision_external_connectivity, self.ctx, 'ext_net_foo', subnets, 'tenant_foo') # expect no subnets to be unplugged mock_cleanup.assert_called_once_with( self.ctx, network_id='network_foo', router_id='router_foo', subnets=[]) def test_get_auto_allocated_topology_dry_run_happy_path_for_kevin(self): with mock.patch.object(self.mixin, '_check_requirements') as f: self.mixin.get_auto_allocated_topology( self.ctx, mock.ANY, fields=['dry-run']) self.assertEqual(1, f.call_count) def test_get_auto_allocated_topology_dry_run_bad_input(self): self.assertRaises(n_exc.BadRequest, self.mixin.get_auto_allocated_topology, self.ctx, mock.ANY, fields=['foo']) def test__check_requirements_fail_on_missing_ext_net(self): self.assertRaises(exceptions.AutoAllocationFailure, self.mixin._check_requirements, self.ctx, 'foo_tenant') def test__check_requirements_fail_on_missing_pools(self): with mock.patch.object( self.mixin, '_get_default_external_network'),\ mock.patch.object( self.mixin, '_get_supported_subnetpools') as g: g.side_effect = n_exc.NotFound() self.assertRaises(exceptions.AutoAllocationFailure, self.mixin._check_requirements, self.ctx, 'foo_tenant') def test__check_requirements_happy_path_for_kevin(self): with mock.patch.object( self.mixin, '_get_default_external_network'),\ mock.patch.object( self.mixin, '_get_supported_subnetpools'): result = self.mixin._check_requirements(self.ctx, 'foo_tenant') expected = {'id': 'dry-run=pass', 'tenant_id': 'foo_tenant'} self.assertEqual(expected, result) neutron-8.4.0/neutron/tests/unit/callbacks/0000775000567000056710000000000013044373210022105 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/callbacks/__init__.py0000664000567000056710000000000013044372736024220 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/callbacks/test_manager.py0000664000567000056710000001767413044372760025160 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.callbacks import events from neutron.callbacks import exceptions from neutron.callbacks import manager from neutron.callbacks import resources from neutron.tests import base def callback_1(*args, **kwargs): callback_1.counter += 1 callback_id_1 = manager._get_id(callback_1) def callback_2(*args, **kwargs): callback_2.counter += 1 callback_id_2 = manager._get_id(callback_2) def callback_raise(*args, **kwargs): raise Exception() class CallBacksManagerTestCase(base.BaseTestCase): def setUp(self): super(CallBacksManagerTestCase, self).setUp() self.manager = manager.CallbacksManager() callback_1.counter = 0 callback_2.counter = 0 def test_subscribe(self): self.manager.subscribe( callback_1, resources.PORT, events.BEFORE_CREATE) self.assertIsNotNone( self.manager._callbacks[resources.PORT][events.BEFORE_CREATE]) self.assertIn(callback_id_1, self.manager._index) def test_subscribe_unknown(self): self.manager.subscribe( callback_1, 'my_resource', 'my-event') self.assertIsNotNone( self.manager._callbacks['my_resource']['my-event']) self.assertIn(callback_id_1, self.manager._index) def test_subscribe_is_idempotent(self): self.manager.subscribe( callback_1, resources.PORT, events.BEFORE_CREATE) self.manager.subscribe( callback_1, resources.PORT, events.BEFORE_CREATE) self.assertEqual( 1, len(self.manager._callbacks[resources.PORT][events.BEFORE_CREATE])) callbacks = self.manager._index[callback_id_1][resources.PORT] self.assertEqual(1, len(callbacks)) def test_subscribe_multiple_callbacks(self): self.manager.subscribe( callback_1, resources.PORT, events.BEFORE_CREATE) self.manager.subscribe( callback_2, resources.PORT, events.BEFORE_CREATE) self.assertEqual(2, len(self.manager._index)) self.assertEqual( 2, len(self.manager._callbacks[resources.PORT][events.BEFORE_CREATE])) def test_unsubscribe(self): self.manager.subscribe( callback_1, resources.PORT, events.BEFORE_CREATE) self.manager.unsubscribe( callback_1, resources.PORT, events.BEFORE_CREATE) self.assertNotIn( callback_id_1, self.manager._callbacks[resources.PORT][events.BEFORE_CREATE]) self.assertNotIn(callback_id_1, self.manager._index) def test_unsubscribe_unknown_callback(self): self.manager.subscribe( callback_2, resources.PORT, events.BEFORE_CREATE) self.manager.unsubscribe(callback_1, mock.ANY, mock.ANY) self.assertEqual(1, len(self.manager._index)) def test_unsubscribe_is_idempotent(self): self.manager.subscribe( callback_1, resources.PORT, events.BEFORE_CREATE) self.manager.unsubscribe( callback_1, resources.PORT, events.BEFORE_CREATE) self.manager.unsubscribe( callback_1, resources.PORT, events.BEFORE_CREATE) self.assertNotIn(callback_id_1, self.manager._index) self.assertNotIn(callback_id_1, self.manager._callbacks[resources.PORT][events.BEFORE_CREATE]) def test_unsubscribe_by_resource(self): self.manager.subscribe( callback_1, resources.PORT, events.BEFORE_CREATE) self.manager.subscribe( callback_1, resources.PORT, events.BEFORE_DELETE) self.manager.subscribe( callback_2, resources.PORT, events.BEFORE_DELETE) self.manager.unsubscribe_by_resource(callback_1, resources.PORT) self.assertNotIn( callback_id_1, self.manager._callbacks[resources.PORT][events.BEFORE_CREATE]) self.assertIn( callback_id_2, self.manager._callbacks[resources.PORT][events.BEFORE_DELETE]) self.assertNotIn(callback_id_1, self.manager._index) def test_unsubscribe_all(self): self.manager.subscribe( callback_1, resources.PORT, events.BEFORE_CREATE) self.manager.subscribe( callback_1, resources.PORT, events.BEFORE_DELETE) self.manager.subscribe( callback_1, resources.ROUTER, events.BEFORE_CREATE) self.manager.unsubscribe_all(callback_1) self.assertNotIn( callback_id_1, self.manager._callbacks[resources.PORT][events.BEFORE_CREATE]) self.assertNotIn(callback_id_1, self.manager._index) def test_notify_none(self): self.manager.notify(resources.PORT, events.BEFORE_CREATE, mock.ANY) self.assertEqual(0, callback_1.counter) self.assertEqual(0, callback_2.counter) def test_feebly_referenced_callback(self): self.manager.subscribe(lambda *x, **y: None, resources.PORT, events.BEFORE_CREATE) self.manager.notify(resources.PORT, events.BEFORE_CREATE, mock.ANY) def test_notify_with_exception(self): with mock.patch.object(self.manager, '_notify_loop') as n: n.return_value = ['error'] self.assertRaises(exceptions.CallbackFailure, self.manager.notify, mock.ANY, events.BEFORE_CREATE, 'trigger', params={'a': 1}) expected_calls = [ mock.call(mock.ANY, 'before_create', 'trigger', params={'a': 1}), mock.call(mock.ANY, 'abort_create', 'trigger', params={'a': 1}) ] n.assert_has_calls(expected_calls) def test_notify_handle_exception(self): self.manager.subscribe( callback_raise, resources.PORT, events.BEFORE_CREATE) e = self.assertRaises(exceptions.CallbackFailure, self.manager.notify, resources.PORT, events.BEFORE_CREATE, self) self.assertIsInstance(e.errors[0], exceptions.NotificationError) def test_notify_called_once_with_no_failures(self): with mock.patch.object(self.manager, '_notify_loop') as n: n.return_value = False self.manager.notify(resources.PORT, events.BEFORE_CREATE, mock.ANY) n.assert_called_once_with( resources.PORT, events.BEFORE_CREATE, mock.ANY) def test__notify_loop_single_event(self): self.manager.subscribe( callback_1, resources.PORT, events.BEFORE_CREATE) self.manager.subscribe( callback_2, resources.PORT, events.BEFORE_CREATE) self.manager._notify_loop( resources.PORT, events.BEFORE_CREATE, mock.ANY) self.assertEqual(1, callback_1.counter) self.assertEqual(1, callback_2.counter) def test__notify_loop_multiple_events(self): self.manager.subscribe( callback_1, resources.PORT, events.BEFORE_CREATE) self.manager.subscribe( callback_1, resources.ROUTER, events.BEFORE_DELETE) self.manager.subscribe( callback_2, resources.PORT, events.BEFORE_CREATE) self.manager._notify_loop( resources.PORT, events.BEFORE_CREATE, mock.ANY) self.manager._notify_loop( resources.ROUTER, events.BEFORE_DELETE, mock.ANY) self.assertEqual(2, callback_1.counter) self.assertEqual(1, callback_2.counter) neutron-8.4.0/neutron/tests/unit/core_extensions/0000775000567000056710000000000013044373210023375 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/core_extensions/__init__.py0000664000567000056710000000000013044372736025510 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/core_extensions/test_qos.py0000664000567000056710000002065013044372760025624 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron import context from neutron.core_extensions import base as base_core from neutron.core_extensions import qos as qos_core from neutron.plugins.common import constants as plugin_constants from neutron.services.qos import qos_consts from neutron.tests import base def _get_test_dbdata(qos_policy_id): return {'id': None, 'qos_policy_binding': {'policy_id': qos_policy_id, 'network_id': 'fake_net_id'}} class QosCoreResourceExtensionTestCase(base.BaseTestCase): def setUp(self): super(QosCoreResourceExtensionTestCase, self).setUp() self.core_extension = qos_core.QosCoreResourceExtension() policy_p = mock.patch('neutron.objects.qos.policy.QosPolicy') self.policy_m = policy_p.start() self.context = context.get_admin_context() def test_process_fields_no_qos_policy_id(self): self.core_extension.process_fields( self.context, base_core.PORT, {}, None) self.assertFalse(self.policy_m.called) def _mock_plugin_loaded(self, plugin_loaded): plugins = {} if plugin_loaded: plugins[plugin_constants.QOS] = None return mock.patch('neutron.manager.NeutronManager.get_service_plugins', return_value=plugins) def test_process_fields_no_qos_plugin_loaded(self): with self._mock_plugin_loaded(False): self.core_extension.process_fields( self.context, base_core.PORT, {qos_consts.QOS_POLICY_ID: None}, None) self.assertFalse(self.policy_m.called) def test_process_fields_port_new_policy(self): with self._mock_plugin_loaded(True): qos_policy_id = mock.Mock() actual_port = {'id': mock.Mock(), qos_consts.QOS_POLICY_ID: qos_policy_id} qos_policy = mock.MagicMock() self.policy_m.get_object = mock.Mock(return_value=qos_policy) self.core_extension.process_fields( self.context, base_core.PORT, {qos_consts.QOS_POLICY_ID: qos_policy_id}, actual_port) qos_policy.attach_port.assert_called_once_with(actual_port['id']) def test_process_fields_port_updated_policy(self): with self._mock_plugin_loaded(True): qos_policy1_id = mock.Mock() qos_policy2_id = mock.Mock() port_id = mock.Mock() actual_port = {'id': port_id, qos_consts.QOS_POLICY_ID: qos_policy1_id} old_qos_policy = mock.MagicMock() self.policy_m.get_port_policy = mock.Mock( return_value=old_qos_policy) new_qos_policy = mock.MagicMock() self.policy_m.get_object = mock.Mock(return_value=new_qos_policy) self.core_extension.process_fields( self.context, base_core.PORT, {qos_consts.QOS_POLICY_ID: qos_policy2_id}, actual_port) old_qos_policy.detach_port.assert_called_once_with(port_id) new_qos_policy.attach_port.assert_called_once_with(port_id) self.assertEqual(qos_policy2_id, actual_port['qos_policy_id']) def test_process_resource_port_updated_no_policy(self): with self._mock_plugin_loaded(True): port_id = mock.Mock() qos_policy_id = mock.Mock() actual_port = {'id': port_id, qos_consts.QOS_POLICY_ID: qos_policy_id} old_qos_policy = mock.MagicMock() self.policy_m.get_port_policy = mock.Mock( return_value=old_qos_policy) new_qos_policy = mock.MagicMock() self.policy_m.get_object = mock.Mock(return_value=new_qos_policy) self.core_extension.process_fields( self.context, base_core.PORT, {qos_consts.QOS_POLICY_ID: None}, actual_port) old_qos_policy.detach_port.assert_called_once_with(port_id) self.assertIsNone(actual_port['qos_policy_id']) def test_process_resource_network_updated_no_policy(self): with self._mock_plugin_loaded(True): network_id = mock.Mock() qos_policy_id = mock.Mock() actual_network = {'id': network_id, qos_consts.QOS_POLICY_ID: qos_policy_id} old_qos_policy = mock.MagicMock() self.policy_m.get_network_policy = mock.Mock( return_value=old_qos_policy) new_qos_policy = mock.MagicMock() self.policy_m.get_object = mock.Mock(return_value=new_qos_policy) self.core_extension.process_fields( self.context, base_core.NETWORK, {qos_consts.QOS_POLICY_ID: None}, actual_network) old_qos_policy.detach_network.assert_called_once_with(network_id) self.assertIsNone(actual_network['qos_policy_id']) def test_process_fields_network_new_policy(self): with self._mock_plugin_loaded(True): qos_policy_id = mock.Mock() actual_network = {'id': mock.Mock(), qos_consts.QOS_POLICY_ID: qos_policy_id} qos_policy = mock.MagicMock() self.policy_m.get_object = mock.Mock(return_value=qos_policy) self.core_extension.process_fields( self.context, base_core.NETWORK, {qos_consts.QOS_POLICY_ID: qos_policy_id}, actual_network) qos_policy.attach_network.assert_called_once_with( actual_network['id']) def test_process_fields_network_updated_policy(self): with self._mock_plugin_loaded(True): qos_policy_id = mock.Mock() network_id = mock.Mock() actual_network = {'id': network_id, qos_consts.QOS_POLICY_ID: qos_policy_id} old_qos_policy = mock.MagicMock() self.policy_m.get_network_policy = mock.Mock( return_value=old_qos_policy) new_qos_policy = mock.MagicMock() self.policy_m.get_object = mock.Mock(return_value=new_qos_policy) self.core_extension.process_fields( self.context, base_core.NETWORK, {qos_consts.QOS_POLICY_ID: qos_policy_id}, actual_network) old_qos_policy.detach_network.assert_called_once_with(network_id) new_qos_policy.attach_network.assert_called_once_with(network_id) def test_extract_fields_plugin_not_loaded(self): with self._mock_plugin_loaded(False): fields = self.core_extension.extract_fields(None, None) self.assertEqual({}, fields) def _test_extract_fields_for_port(self, qos_policy_id): with self._mock_plugin_loaded(True): fields = self.core_extension.extract_fields( base_core.PORT, _get_test_dbdata(qos_policy_id)) self.assertEqual({qos_consts.QOS_POLICY_ID: qos_policy_id}, fields) def test_extract_fields_no_port_policy(self): self._test_extract_fields_for_port(None) def test_extract_fields_port_policy_exists(self): qos_policy_id = mock.Mock() self._test_extract_fields_for_port(qos_policy_id) def _test_extract_fields_for_network(self, qos_policy_id): with self._mock_plugin_loaded(True): fields = self.core_extension.extract_fields( base_core.NETWORK, _get_test_dbdata(qos_policy_id)) self.assertEqual({qos_consts.QOS_POLICY_ID: qos_policy_id}, fields) def test_extract_fields_no_network_policy(self): self._test_extract_fields_for_network(None) def test_extract_fields_network_policy_exists(self): qos_policy_id = mock.Mock() qos_policy = mock.Mock() qos_policy.id = qos_policy_id self._test_extract_fields_for_network(qos_policy_id) neutron-8.4.0/neutron/tests/unit/__init__.py0000664000567000056710000000126713044372760022316 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg cfg.CONF.use_stderr = False neutron-8.4.0/neutron/tests/unit/test_manager.py0000664000567000056710000001714713044372760023234 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import weakref import fixtures from oslo_config import cfg from neutron import manager from neutron.plugins.common import constants from neutron.tests import base from neutron.tests.unit import dummy_plugin from neutron.tests.unit import testlib_api DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' class MultiServiceCorePlugin(object): supported_extension_aliases = ['lbaas', 'dummy'] class CorePluginWithAgentNotifiers(object): agent_notifiers = {'l3': 'l3_agent_notifier', 'dhcp': 'dhcp_agent_notifier'} class NeutronManagerTestCase(base.BaseTestCase): def setUp(self): super(NeutronManagerTestCase, self).setUp() self.config_parse() self.setup_coreplugin() self.useFixture( fixtures.MonkeyPatch('neutron.manager.NeutronManager._instance')) def test_service_plugin_is_loaded(self): cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) cfg.CONF.set_override("service_plugins", ["neutron.tests.unit.dummy_plugin." "DummyServicePlugin"]) mgr = manager.NeutronManager.get_instance() plugin = mgr.get_service_plugins()[constants.DUMMY] self.assertIsInstance( plugin, dummy_plugin.DummyServicePlugin, "loaded plugin should be of type neutronDummyPlugin") def test_service_plugin_by_name_is_loaded(self): cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) cfg.CONF.set_override("service_plugins", ["dummy"]) mgr = manager.NeutronManager.get_instance() plugin = mgr.get_service_plugins()[constants.DUMMY] self.assertIsInstance( plugin, dummy_plugin.DummyServicePlugin, "loaded plugin should be of type neutronDummyPlugin") def test_multiple_plugins_specified_for_service_type(self): cfg.CONF.set_override("service_plugins", ["neutron.tests.unit.dummy_plugin." "DummyServicePlugin", "neutron.tests.unit.dummy_plugin." "DummyServicePlugin"]) cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) e = self.assertRaises(ValueError, manager.NeutronManager.get_instance) self.assertIn(constants.DUMMY, str(e)) def test_multiple_plugins_by_name_specified_for_service_type(self): cfg.CONF.set_override("service_plugins", ["dummy", "dummy"]) cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) self.assertRaises(ValueError, manager.NeutronManager.get_instance) def test_multiple_plugins_mixed_specified_for_service_type(self): cfg.CONF.set_override("service_plugins", ["neutron.tests.unit.dummy_plugin." "DummyServicePlugin", "dummy"]) cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) self.assertRaises(ValueError, manager.NeutronManager.get_instance) def test_service_plugin_conflicts_with_core_plugin(self): cfg.CONF.set_override("service_plugins", ["neutron.tests.unit.dummy_plugin." "DummyServicePlugin"]) cfg.CONF.set_override("core_plugin", "neutron.tests.unit.test_manager." "MultiServiceCorePlugin") e = self.assertRaises(ValueError, manager.NeutronManager.get_instance) self.assertIn(constants.DUMMY, str(e)) def test_core_plugin_supports_services(self): cfg.CONF.set_override("core_plugin", "neutron.tests.unit.test_manager." "MultiServiceCorePlugin") mgr = manager.NeutronManager.get_instance() svc_plugins = mgr.get_service_plugins() self.assertEqual(3, len(svc_plugins)) self.assertIn(constants.CORE, svc_plugins.keys()) self.assertIn(constants.LOADBALANCER, svc_plugins.keys()) self.assertIn(constants.DUMMY, svc_plugins.keys()) def test_load_default_service_plugins(self): self.patched_default_svc_plugins.return_value = { 'neutron.tests.unit.dummy_plugin.DummyServicePlugin': 'DUMMY' } cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) mgr = manager.NeutronManager.get_instance() svc_plugins = mgr.get_service_plugins() self.assertIn('DUMMY', svc_plugins) def test_post_plugin_validation(self): cfg.CONF.import_opt('dhcp_agents_per_network', 'neutron.db.agentschedulers_db') self.assertIsNone(manager.validate_post_plugin_load()) cfg.CONF.set_override('dhcp_agents_per_network', 2) self.assertIsNone(manager.validate_post_plugin_load()) cfg.CONF.set_override('dhcp_agents_per_network', 0) self.assertIsNotNone(manager.validate_post_plugin_load()) cfg.CONF.set_override('dhcp_agents_per_network', -1) self.assertIsNotNone(manager.validate_post_plugin_load()) def test_pre_plugin_validation(self): self.assertIsNotNone(manager.validate_pre_plugin_load()) cfg.CONF.set_override('core_plugin', 'dummy.plugin') self.assertIsNone(manager.validate_pre_plugin_load()) def test_manager_gathers_agent_notifiers_from_service_plugins(self): cfg.CONF.set_override("service_plugins", ["neutron.tests.unit.dummy_plugin." "DummyServicePlugin"]) cfg.CONF.set_override("core_plugin", "neutron.tests.unit.test_manager." "CorePluginWithAgentNotifiers") expected = {'l3': 'l3_agent_notifier', 'dhcp': 'dhcp_agent_notifier', 'dummy': 'dummy_agent_notifier'} core_plugin = manager.NeutronManager.get_plugin() self.assertEqual(expected, core_plugin.agent_notifiers) def test_load_class_for_provider(self): manager.NeutronManager.load_class_for_provider( 'neutron.core_plugins', 'ml2') def test_load_class_for_provider_wrong_plugin(self): with testlib_api.ExpectedException(ImportError): manager.NeutronManager.load_class_for_provider( 'neutron.core_plugins', 'ml2XXXXXX') def test_get_service_plugin_by_path_prefix_3(self): cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) nm = manager.NeutronManager.get_instance() class pclass(object): def __init__(self, path_prefix): self.path_prefix = path_prefix x_plugin, y_plugin = pclass('xpa'), pclass('ypa') nm.service_plugins['x'], nm.service_plugins['y'] = x_plugin, y_plugin self.assertEqual(weakref.proxy(x_plugin), nm.get_service_plugin_by_path_prefix('xpa')) self.assertEqual(weakref.proxy(y_plugin), nm.get_service_plugin_by_path_prefix('ypa')) self.assertIsNone(nm.get_service_plugin_by_path_prefix('abc')) neutron-8.4.0/neutron/tests/unit/quota/0000775000567000056710000000000013044373210021317 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/quota/test_resource.py0000664000567000056710000002436213044372760024577 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import mock from oslo_config import cfg from neutron import context from neutron.db import api as db_api from neutron.db.quota import api as quota_api from neutron.quota import resource from neutron.tests import base from neutron.tests.unit import quota as test_quota from neutron.tests.unit import testlib_api meh_quota_flag = 'quota_meh' meh_quota_opts = [cfg.IntOpt(meh_quota_flag, default=99)] class TestResource(base.DietTestCase): """Unit tests for neutron.quota.resource.BaseResource""" def test_create_resource_without_plural_name(self): res = resource.BaseResource('foo', None) self.assertEqual('foos', res.plural_name) res = resource.BaseResource('foy', None) self.assertEqual('foies', res.plural_name) def test_create_resource_with_plural_name(self): res = resource.BaseResource('foo', None, plural_name='foopsies') self.assertEqual('foopsies', res.plural_name) def test_resource_default_value(self): res = resource.BaseResource('foo', 'foo_quota') with mock.patch('oslo_config.cfg.CONF') as mock_cfg: mock_cfg.QUOTAS.foo_quota = 99 self.assertEqual(99, res.default) def test_resource_negative_default_value(self): res = resource.BaseResource('foo', 'foo_quota') with mock.patch('oslo_config.cfg.CONF') as mock_cfg: mock_cfg.QUOTAS.foo_quota = -99 self.assertEqual(-1, res.default) class TestTrackedResource(testlib_api.SqlTestCaseLight): def _add_data(self, tenant_id=None): session = db_api.get_session() with session.begin(): tenant_id = tenant_id or self.tenant_id session.add(test_quota.MehModel(meh='meh_%s' % uuid.uuid4(), tenant_id=tenant_id)) session.add(test_quota.MehModel(meh='meh_%s' % uuid.uuid4(), tenant_id=tenant_id)) def _delete_data(self): session = db_api.get_session() with session.begin(): query = session.query(test_quota.MehModel).filter_by( tenant_id=self.tenant_id) for item in query: session.delete(item) def _update_data(self): session = db_api.get_session() with session.begin(): query = session.query(test_quota.MehModel).filter_by( tenant_id=self.tenant_id) for item in query: item['meh'] = 'meh-%s' % item['meh'] session.add(item) def setUp(self): base.BaseTestCase.config_parse() cfg.CONF.register_opts(meh_quota_opts, 'QUOTAS') self.addCleanup(cfg.CONF.reset) self.resource = 'meh' self.other_resource = 'othermeh' self.tenant_id = 'meh' self.context = context.Context( user_id='', tenant_id=self.tenant_id, is_admin=False) super(TestTrackedResource, self).setUp() def _register_events(self, res): res.register_events() self.addCleanup(res.unregister_events) def _create_resource(self): res = resource.TrackedResource( self.resource, test_quota.MehModel, meh_quota_flag) self._register_events(res) return res def _create_other_resource(self): res = resource.TrackedResource( self.other_resource, test_quota.OtherMehModel, meh_quota_flag) self._register_events(res) return res def test_count_first_call_with_dirty_false(self): quota_api.set_quota_usage( self.context, self.resource, self.tenant_id, in_use=1) res = self._create_resource() self._add_data() # explicitly set dirty flag to False quota_api.set_all_quota_usage_dirty( self.context, self.resource, dirty=False) # Expect correct count to be returned anyway since the first call to # count() always resyncs with the db self.assertEqual(2, res.count(self.context, None, self.tenant_id)) def _test_count(self): res = self._create_resource() quota_api.set_quota_usage( self.context, res.name, self.tenant_id, in_use=0) self._add_data() return res def test_count_with_dirty_false(self): res = self._test_count() res.count(self.context, None, self.tenant_id) # At this stage count has been invoked, and the dirty flag should be # false. Another invocation of count should not query the model class set_quota = 'neutron.db.quota.api.set_quota_usage' with mock.patch(set_quota) as mock_set_quota: self.assertEqual(0, mock_set_quota.call_count) self.assertEqual(2, res.count(self.context, None, self.tenant_id)) def test_count_with_dirty_true_resync(self): res = self._test_count() # Expect correct count to be returned, which also implies # set_quota_usage has been invoked with the correct parameters self.assertEqual(2, res.count(self.context, None, self.tenant_id, resync_usage=True)) def test_count_with_dirty_true_resync_calls_set_quota_usage(self): res = self._test_count() set_quota_usage = 'neutron.db.quota.api.set_quota_usage' with mock.patch(set_quota_usage) as mock_set_quota_usage: quota_api.set_quota_usage_dirty(self.context, self.resource, self.tenant_id) res.count(self.context, None, self.tenant_id, resync_usage=True) mock_set_quota_usage.assert_called_once_with( self.context, self.resource, self.tenant_id, in_use=2) def test_count_with_dirty_true_no_usage_info(self): res = self._create_resource() self._add_data() # Invoke count without having usage info in DB - Expect correct # count to be returned self.assertEqual(2, res.count(self.context, None, self.tenant_id)) def test_count_with_dirty_true_no_usage_info_calls_set_quota_usage(self): res = self._create_resource() self._add_data() set_quota_usage = 'neutron.db.quota.api.set_quota_usage' with mock.patch(set_quota_usage) as mock_set_quota_usage: quota_api.set_quota_usage_dirty(self.context, self.resource, self.tenant_id) res.count(self.context, None, self.tenant_id, resync_usage=True) mock_set_quota_usage.assert_called_once_with( self.context, self.resource, self.tenant_id, in_use=2) def test_add_delete_data_triggers_event(self): res = self._create_resource() other_res = self._create_other_resource() # Validate dirty tenants since mock does not work well with SQLAlchemy # event handlers. self._add_data() self._add_data('someone_else') self.assertEqual(2, len(res._dirty_tenants)) # Also, the dirty flag should not be set for other resources self.assertEqual(0, len(other_res._dirty_tenants)) self.assertIn(self.tenant_id, res._dirty_tenants) self.assertIn('someone_else', res._dirty_tenants) def test_delete_data_triggers_event(self): res = self._create_resource() self._add_data() self._add_data('someone_else') # Artificially clear _dirty_tenants res._dirty_tenants.clear() self._delete_data() # We did not delete "someone_else", so expect only a single dirty # tenant self.assertEqual(1, len(res._dirty_tenants)) self.assertIn(self.tenant_id, res._dirty_tenants) def test_update_does_not_trigger_event(self): res = self._create_resource() self._add_data() self._add_data('someone_else') # Artificially clear _dirty_tenants res._dirty_tenants.clear() self._update_data() self.assertEqual(0, len(res._dirty_tenants)) def test_mark_dirty(self): res = self._create_resource() self._add_data() self._add_data('someone_else') set_quota_usage = 'neutron.db.quota.api.set_quota_usage_dirty' with mock.patch(set_quota_usage) as mock_set_quota_usage: res.mark_dirty(self.context) self.assertEqual(2, mock_set_quota_usage.call_count) mock_set_quota_usage.assert_any_call( self.context, self.resource, self.tenant_id) mock_set_quota_usage.assert_any_call( self.context, self.resource, 'someone_else') def test_mark_dirty_no_dirty_tenant(self): res = self._create_resource() set_quota_usage = 'neutron.db.quota.api.set_quota_usage_dirty' with mock.patch(set_quota_usage) as mock_set_quota_usage: res.mark_dirty(self.context) self.assertFalse(mock_set_quota_usage.call_count) def test_resync(self): res = self._create_resource() self._add_data() res.mark_dirty(self.context) # self.tenant_id now is out of sync set_quota_usage = 'neutron.db.quota.api.set_quota_usage' with mock.patch(set_quota_usage) as mock_set_quota_usage: res.resync(self.context, self.tenant_id) # and now it should be in sync self.assertNotIn(self.tenant_id, res._out_of_sync_tenants) mock_set_quota_usage.assert_called_once_with( self.context, self.resource, self.tenant_id, in_use=2) neutron-8.4.0/neutron/tests/unit/quota/test_resource_registry.py0000664000567000056710000001704413044372760026526 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from neutron import context from neutron.quota import resource from neutron.quota import resource_registry from neutron.tests import base from neutron.tests.unit import quota as test_quota class TestResourceRegistry(base.DietTestCase): def setUp(self): super(TestResourceRegistry, self).setUp() self.registry = resource_registry.ResourceRegistry.get_instance() # clean up the registry at every test self.registry.unregister_resources() def test_set_tracked_resource_new_resource(self): self.registry.set_tracked_resource('meh', test_quota.MehModel) self.assertEqual(test_quota.MehModel, self.registry._tracked_resource_mappings['meh']) def test_set_tracked_resource_existing_with_override(self): self.test_set_tracked_resource_new_resource() self.registry.set_tracked_resource('meh', test_quota.OtherMehModel, override=True) # Override is set to True, the model class should change self.assertEqual(test_quota.OtherMehModel, self.registry._tracked_resource_mappings['meh']) def test_set_tracked_resource_existing_no_override(self): self.test_set_tracked_resource_new_resource() self.registry.set_tracked_resource('meh', test_quota.OtherMehModel) # Override is set to false, the model class should not change self.assertEqual(test_quota.MehModel, self.registry._tracked_resource_mappings['meh']) def _test_register_resource_by_name(self, resource_name, expected_type): self.assertNotIn(resource_name, self.registry._resources) self.registry.register_resource_by_name(resource_name) self.assertIn(resource_name, self.registry._resources) self.assertIsInstance(self.registry.get_resource(resource_name), expected_type) def test_register_resource_by_name_tracked(self): self.test_set_tracked_resource_new_resource() self._test_register_resource_by_name('meh', resource.TrackedResource) def test_register_resource_by_name_not_tracked(self): self._test_register_resource_by_name('meh', resource.CountableResource) def test_register_resource_by_name_with_tracking_disabled_by_config(self): cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS') # DietTestCase does not automatically cleans configuration overrides self.addCleanup(cfg.CONF.reset) self.registry.set_tracked_resource('meh', test_quota.MehModel) self.assertNotIn( 'meh', self.registry._tracked_resource_mappings) self._test_register_resource_by_name('meh', resource.CountableResource) class TestAuxiliaryFunctions(base.DietTestCase): def setUp(self): super(TestAuxiliaryFunctions, self).setUp() self.registry = resource_registry.ResourceRegistry.get_instance() # clean up the registry at every test self.registry.unregister_resources() def test_resync_tracking_disabled(self): cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS') # DietTestCase does not automatically cleans configuration overrides self.addCleanup(cfg.CONF.reset) with mock.patch('neutron.quota.resource.' 'TrackedResource.resync') as mock_resync: self.registry.set_tracked_resource('meh', test_quota.MehModel) self.registry.register_resource_by_name('meh') resource_registry.resync_resource(mock.ANY, 'meh', 'tenant_id') self.assertEqual(0, mock_resync.call_count) def test_resync_tracked_resource(self): with mock.patch('neutron.quota.resource.' 'TrackedResource.resync') as mock_resync: self.registry.set_tracked_resource('meh', test_quota.MehModel) self.registry.register_resource_by_name('meh') resource_registry.resync_resource(mock.ANY, 'meh', 'tenant_id') mock_resync.assert_called_once_with(mock.ANY, 'tenant_id') def test_resync_non_tracked_resource(self): with mock.patch('neutron.quota.resource.' 'TrackedResource.resync') as mock_resync: self.registry.register_resource_by_name('meh') resource_registry.resync_resource(mock.ANY, 'meh', 'tenant_id') self.assertEqual(0, mock_resync.call_count) def test_set_resources_dirty_invoked_with_tracking_disabled(self): cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS') # DietTestCase does not automatically cleans configuration overrides self.addCleanup(cfg.CONF.reset) with mock.patch('neutron.quota.resource.' 'TrackedResource.mark_dirty') as mock_mark_dirty: self.registry.set_tracked_resource('meh', test_quota.MehModel) self.registry.register_resource_by_name('meh') resource_registry.set_resources_dirty(mock.ANY) self.assertEqual(0, mock_mark_dirty.call_count) def test_set_resources_dirty_no_dirty_resource(self): ctx = context.Context('user_id', 'tenant_id', is_admin=False, is_advsvc=False) with mock.patch('neutron.quota.resource.' 'TrackedResource.mark_dirty') as mock_mark_dirty: self.registry.set_tracked_resource('meh', test_quota.MehModel) self.registry.register_resource_by_name('meh') res = self.registry.get_resource('meh') # This ensures dirty is false res._dirty_tenants.clear() resource_registry.set_resources_dirty(ctx) self.assertEqual(0, mock_mark_dirty.call_count) def test_set_resources_dirty_no_tracked_resource(self): ctx = context.Context('user_id', 'tenant_id', is_admin=False, is_advsvc=False) with mock.patch('neutron.quota.resource.' 'TrackedResource.mark_dirty') as mock_mark_dirty: self.registry.register_resource_by_name('meh') resource_registry.set_resources_dirty(ctx) self.assertEqual(0, mock_mark_dirty.call_count) def test_set_resources_dirty(self): ctx = context.Context('user_id', 'tenant_id', is_admin=False, is_advsvc=False) with mock.patch('neutron.quota.resource.' 'TrackedResource.mark_dirty') as mock_mark_dirty: self.registry.set_tracked_resource('meh', test_quota.MehModel) self.registry.register_resource_by_name('meh') res = self.registry.get_resource('meh') # This ensures dirty is true res._dirty_tenants.add('tenant_id') resource_registry.set_resources_dirty(ctx) mock_mark_dirty.assert_called_once_with(ctx) neutron-8.4.0/neutron/tests/unit/quota/__init__.py0000664000567000056710000000170713044372760023446 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from neutron.db import model_base # Model classes for test resources class MehModel(model_base.BASEV2, model_base.HasTenant): meh = sa.Column(sa.String(8), primary_key=True) class OtherMehModel(model_base.BASEV2, model_base.HasTenant): othermeh = sa.Column(sa.String(8), primary_key=True) neutron-8.4.0/neutron/tests/unit/test_policy.py0000664000567000056710000006763513044372760023130 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Test of Policy Engine For Neutron""" import mock from oslo_db import exception as db_exc from oslo_policy import fixture as op_fixture from oslo_policy import policy as oslo_policy from oslo_serialization import jsonutils from oslo_utils import importutils import neutron from neutron.api.v2 import attributes from neutron.common import constants as const from neutron.common import exceptions from neutron import context from neutron import manager from neutron import policy from neutron.tests import base class PolicyFileTestCase(base.BaseTestCase): def setUp(self): super(PolicyFileTestCase, self).setUp() self.context = context.Context('fake', 'fake', is_admin=False) self.target = {'tenant_id': 'fake'} def test_modified_policy_reloads(self): tmpfilename = self.get_temp_file_path('policy') action = "example:test" with open(tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": ""}""") policy.refresh(policy_file=tmpfilename) policy.enforce(self.context, action, self.target) with open(tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": "!"}""") policy.refresh(policy_file=tmpfilename) self.target = {'tenant_id': 'fake_tenant'} self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) class PolicyTestCase(base.BaseTestCase): def setUp(self): super(PolicyTestCase, self).setUp() # NOTE(vish): preload rules to circumvent reloading from file rules = { "true": '@', "example:allowed": '@', "example:denied": '!', "example:get_http": "http:http://www.example.com", "example:my_file": "role:compute_admin or tenant_id:%(tenant_id)s", "example:early_and_fail": "! and @", "example:early_or_success": "@ or !", "example:lowercase_admin": "role:admin or role:sysadmin", "example:uppercase_admin": "role:ADMIN or role:sysadmin", } policy.refresh() # NOTE(vish): then overload underlying rules policy.set_rules(oslo_policy.Rules.from_dict(rules)) self.context = context.Context('fake', 'fake', roles=['member']) self.target = {} def test_enforce_nonexistent_action_throws(self): action = "example:noexist" self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_enforce_bad_action_throws(self): action = "example:denied" self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_check_bad_action_noraise(self): action = "example:denied" result = policy.check(self.context, action, self.target) self.assertFalse(result) def test_check_non_existent_action(self): action = "example:idonotexist" result_1 = policy.check(self.context, action, self.target) self.assertFalse(result_1) result_2 = policy.check(self.context, action, self.target, might_not_exist=True) self.assertTrue(result_2) def test_enforce_good_action(self): action = "example:allowed" result = policy.enforce(self.context, action, self.target) self.assertTrue(result) def test_enforce_http_true(self): self.useFixture(op_fixture.HttpCheckFixture()) action = "example:get_http" target = {} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_enforce_http_false(self): self.useFixture(op_fixture.HttpCheckFixture(False)) action = "example:get_http" target = {} self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, target) def test_templatized_enforcement(self): target_mine = {'tenant_id': 'fake'} target_not_mine = {'tenant_id': 'another'} action = "example:my_file" policy.enforce(self.context, action, target_mine) self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, target_not_mine) def test_early_AND_enforcement(self): action = "example:early_and_fail" self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_early_OR_enforcement(self): action = "example:early_or_success" policy.enforce(self.context, action, self.target) def test_ignore_case_role_check(self): lowercase_action = "example:lowercase_admin" uppercase_action = "example:uppercase_admin" # NOTE(dprince) we mix case in the Admin role here to ensure # case is ignored admin_context = context.Context('admin', 'fake', roles=['AdMiN']) policy.enforce(admin_context, lowercase_action, self.target) policy.enforce(admin_context, uppercase_action, self.target) class DefaultPolicyTestCase(base.BaseTestCase): def setUp(self): super(DefaultPolicyTestCase, self).setUp() tmpfilename = self.get_temp_file_path('policy.json') self.rules = { "default": '', "example:exist": '!', } with open(tmpfilename, "w") as policyfile: jsonutils.dump(self.rules, policyfile) policy.refresh(policy_file=tmpfilename) self.context = context.Context('fake', 'fake') def test_policy_called(self): self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, "example:exist", {}) def test_not_found_policy_calls_default(self): policy.enforce(self.context, "example:noexist", {}) FAKE_RESOURCE_NAME = 'fake_resource' FAKE_SPECIAL_RESOURCE_NAME = 'fake_policy' FAKE_RESOURCES = {"%ss" % FAKE_RESOURCE_NAME: {'attr': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'enforce_policy': True, 'validate': {'type:dict': {'sub_attr_1': {'type:string': None}, 'sub_attr_2': {'type:string': None}}} }}, # special plural name "%s" % FAKE_SPECIAL_RESOURCE_NAME.replace('y', 'ies'): {'attr': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'enforce_policy': True, 'validate': {'type:dict': {'sub_attr_1': {'type:string': None}, 'sub_attr_2': {'type:string': None}}} }}} class NeutronPolicyTestCase(base.BaseTestCase): def fakepolicyinit(self, **kwargs): enf = policy._ENFORCER enf.set_rules(oslo_policy.Rules(self.rules)) def setUp(self): super(NeutronPolicyTestCase, self).setUp() policy.refresh() # Add Fake resources to RESOURCE_ATTRIBUTE_MAP attributes.RESOURCE_ATTRIBUTE_MAP.update(FAKE_RESOURCES) self._set_rules() def remove_fake_resource(): del attributes.RESOURCE_ATTRIBUTE_MAP["%ss" % FAKE_RESOURCE_NAME] self.patcher = mock.patch.object(neutron.policy, 'init', new=self.fakepolicyinit) self.patcher.start() self.addCleanup(remove_fake_resource) self.context = context.Context('fake', 'fake', roles=['user']) plugin_klass = importutils.import_class( "neutron.db.db_base_plugin_v2.NeutronDbPluginV2") self.manager_patcher = mock.patch('neutron.manager.NeutronManager') fake_manager = self.manager_patcher.start() fake_manager_instance = fake_manager.return_value fake_manager_instance.plugin = plugin_klass() def _set_rules(self, **kwargs): rules_dict = { "context_is_admin": "role:admin", "context_is_advsvc": "role:advsvc", "admin_or_network_owner": "rule:context_is_admin or " "tenant_id:%(network:tenant_id)s", "admin_or_owner": ("rule:context_is_admin or " "tenant_id:%(tenant_id)s"), "admin_only": "rule:context_is_admin", "regular_user": "role:user", "shared": "field:networks:shared=True", "external": "field:networks:router:external=True", "network_device": "field:port:device_owner=~^network:", "default": '@', "create_network": "rule:admin_or_owner", "create_network:shared": "rule:admin_only", "update_network": '@', "update_network:shared": "rule:admin_only", "get_network": "rule:admin_or_owner or rule:shared or " "rule:external or rule:context_is_advsvc", "create_subnet": "rule:admin_or_network_owner", "create_port:mac": "rule:admin_or_network_owner or " "rule:context_is_advsvc", "create_port:device_owner": "not rule:network_device", "update_port": "rule:admin_or_owner or rule:context_is_advsvc", "get_port": "rule:admin_or_owner or rule:context_is_advsvc", "delete_port": "rule:admin_or_owner or rule:context_is_advsvc", "create_fake_resource": "rule:admin_or_owner", "create_fake_resource:attr": "rule:admin_or_owner", "create_fake_resource:attr:sub_attr_1": "rule:admin_or_owner", "create_fake_resource:attr:sub_attr_2": "rule:admin_only", "create_fake_policy:": "rule:admin_or_owner", "get_firewall_policy": "rule:admin_or_owner or " "rule:shared", "get_firewall_rule": "rule:admin_or_owner or " "rule:shared", "insert_rule": "rule:admin_or_owner", "remove_rule": "rule:admin_or_owner", } rules_dict.update(**kwargs) self.rules = oslo_policy.Rules.from_dict(rules_dict) def test_firewall_policy_insert_rule_with_admin_context(self): action = "insert_rule" target = {} result = policy.check(context.get_admin_context(), action, target) self.assertTrue(result) def test_firewall_policy_insert_rule_with_owner(self): action = "insert_rule" target = {"tenant_id": "own_tenant"} user_context = context.Context('', "own_tenant", roles=['user']) result = policy.check(user_context, action, target) self.assertTrue(result) def test_firewall_policy_remove_rule_without_admin_or_owner(self): action = "remove_rule" target = {"firewall_rule_id": "rule_id", "tenant_id": "tenantA"} user_context = context.Context('', "another_tenant", roles=['user']) result = policy.check(user_context, action, target) self.assertFalse(result) def _test_action_on_attr(self, context, action, obj, attr, value, exception=None, **kwargs): action = "%s_%s" % (action, obj) target = {'tenant_id': 'the_owner', attr: value} if kwargs: target.update(kwargs) if exception: self.assertRaises(exception, policy.enforce, context, action, target) else: result = policy.enforce(context, action, target) self.assertTrue(result) def _test_nonadmin_action_on_attr(self, action, attr, value, exception=None, **kwargs): user_context = context.Context('', "user", roles=['user']) self._test_action_on_attr(user_context, action, "network", attr, value, exception, **kwargs) def _test_advsvc_action_on_attr(self, action, obj, attr, value, exception=None, **kwargs): user_context = context.Context('', "user", roles=['user', 'advsvc']) self._test_action_on_attr(user_context, action, obj, attr, value, exception, **kwargs) def test_nonadmin_write_on_private_fails(self): self._test_nonadmin_action_on_attr('create', 'shared', False, oslo_policy.PolicyNotAuthorized) def test_nonadmin_read_on_private_fails(self): self._test_nonadmin_action_on_attr('get', 'shared', False, oslo_policy.PolicyNotAuthorized) def test_nonadmin_write_on_shared_fails(self): self._test_nonadmin_action_on_attr('create', 'shared', True, oslo_policy.PolicyNotAuthorized) def test_create_port_device_owner_regex(self): blocked_values = (const.DEVICE_OWNER_NETWORK_PREFIX, 'network:abdef', const.DEVICE_OWNER_DHCP, const.DEVICE_OWNER_ROUTER_INTF) for val in blocked_values: self._test_advsvc_action_on_attr( 'create', 'port', 'device_owner', val, oslo_policy.PolicyNotAuthorized ) ok_values = ('network', 'networks', 'my_network:test', 'my_network:') for val in ok_values: self._test_advsvc_action_on_attr( 'create', 'port', 'device_owner', val ) def test_advsvc_get_network_works(self): self._test_advsvc_action_on_attr('get', 'network', 'shared', False) def test_advsvc_create_network_fails(self): self._test_advsvc_action_on_attr('create', 'network', 'shared', False, oslo_policy.PolicyNotAuthorized) def test_advsvc_create_port_works(self): self._test_advsvc_action_on_attr('create', 'port:mac', 'shared', False) def test_advsvc_get_port_works(self): self._test_advsvc_action_on_attr('get', 'port', 'shared', False) def test_advsvc_update_port_works(self): kwargs = {const.ATTRIBUTES_TO_UPDATE: ['shared']} self._test_advsvc_action_on_attr('update', 'port', 'shared', True, **kwargs) def test_advsvc_delete_port_works(self): self._test_advsvc_action_on_attr('delete', 'port', 'shared', False) def test_advsvc_create_subnet_fails(self): self._test_advsvc_action_on_attr('create', 'subnet', 'shared', False, oslo_policy.PolicyNotAuthorized) def test_nonadmin_read_on_shared_succeeds(self): self._test_nonadmin_action_on_attr('get', 'shared', True) def test_check_is_admin_with_admin_context_succeeds(self): admin_context = context.get_admin_context() # explicitly set roles as this test verifies user credentials # with the policy engine admin_context.roles = ['admin'] self.assertTrue(policy.check_is_admin(admin_context)) def test_check_is_admin_with_user_context_fails(self): self.assertFalse(policy.check_is_admin(self.context)) def test_check_is_admin_with_no_admin_policy_fails(self): del self.rules[policy.ADMIN_CTX_POLICY] admin_context = context.get_admin_context() self.assertFalse(policy.check_is_admin(admin_context)) def test_check_is_advsvc_with_admin_context_fails(self): admin_context = context.get_admin_context() self.assertFalse(policy.check_is_advsvc(admin_context)) def test_check_is_advsvc_with_svc_context_succeeds(self): svc_context = context.Context('', 'svc', roles=['advsvc']) self.assertTrue(policy.check_is_advsvc(svc_context)) def test_check_is_advsvc_with_no_advsvc_policy_fails(self): del self.rules[policy.ADVSVC_CTX_POLICY] svc_context = context.Context('', 'svc', roles=['advsvc']) self.assertFalse(policy.check_is_advsvc(svc_context)) def test_check_is_advsvc_with_user_context_fails(self): self.assertFalse(policy.check_is_advsvc(self.context)) def _test_enforce_adminonly_attribute(self, action, **kwargs): admin_context = context.get_admin_context() target = {'shared': True} if kwargs: target.update(kwargs) result = policy.enforce(admin_context, action, target) self.assertTrue(result) def test_enforce_adminonly_attribute_create(self): self._test_enforce_adminonly_attribute('create_network') def test_enforce_adminonly_attribute_update(self): kwargs = {const.ATTRIBUTES_TO_UPDATE: ['shared']} self._test_enforce_adminonly_attribute('update_network', **kwargs) def test_reset_adminonly_attr_to_default_fails(self): kwargs = {const.ATTRIBUTES_TO_UPDATE: ['shared']} self._test_nonadmin_action_on_attr('update', 'shared', False, oslo_policy.PolicyNotAuthorized, **kwargs) def test_enforce_adminonly_attribute_nonadminctx_returns_403(self): action = "create_network" target = {'shared': True, 'tenant_id': 'somebody_else'} self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, target) def _test_build_subattribute_match_rule(self, validate_value): bk = FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] = ( validate_value) action = "create_" + FAKE_RESOURCE_NAME target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x'}} self.assertFalse(policy._build_subattr_match_rule( 'attr', FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr'], action, target)) FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] = bk def test_build_subattribute_match_rule_empty_dict_validator(self): self._test_build_subattribute_match_rule({}) def test_build_subattribute_match_rule_wrong_validation_info(self): self._test_build_subattribute_match_rule( {'type:dict': 'wrong_stuff'}) def test_build_match_rule_special_pluralized(self): action = "create_" + FAKE_SPECIAL_RESOURCE_NAME pluralized = "create_fake_policies" target = {} result = policy._build_match_rule(action, target, pluralized) self.assertEqual("rule:" + action, str(result)) def test_build_match_rule_normal_pluralized_when_create(self): action = "create_" + FAKE_RESOURCE_NAME target = {} result = policy._build_match_rule(action, target, None) self.assertEqual("rule:" + action, str(result)) def test_enforce_subattribute(self): action = "create_" + FAKE_RESOURCE_NAME target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x'}} result = policy.enforce(self.context, action, target, None) self.assertTrue(result) def test_enforce_admin_only_subattribute(self): action = "create_" + FAKE_RESOURCE_NAME target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x', 'sub_attr_2': 'y'}} result = policy.enforce(context.get_admin_context(), action, target, None) self.assertTrue(result) def test_enforce_admin_only_subattribute_nonadminctx_returns_403(self): action = "create_" + FAKE_RESOURCE_NAME target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x', 'sub_attr_2': 'y'}} self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, target, None) def test_enforce_regularuser_on_read(self): action = "get_network" target = {'shared': True, 'tenant_id': 'somebody_else'} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_enforce_firewall_policy_shared(self): action = "get_firewall_policy" target = {'shared': True, 'tenant_id': 'somebody_else'} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_enforce_firewall_rule_shared(self): action = "get_firewall_rule" target = {'shared': True, 'tenant_id': 'somebody_else'} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_enforce_tenant_id_check(self): # Trigger a policy with rule admin_or_owner action = "create_network" target = {'tenant_id': 'fake'} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_enforce_tenant_id_check_parent_resource(self): def fakegetnetwork(*args, **kwargs): return {'tenant_id': 'fake'} action = "create_port:mac" with mock.patch.object(manager.NeutronManager.get_instance().plugin, 'get_network', new=fakegetnetwork): target = {'network_id': 'whatever'} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_enforce_plugin_failure(self): def fakegetnetwork(*args, **kwargs): raise NotImplementedError('Blast!') # the policy check and plugin method we use in this test are irrelevant # so long that we verify that, if *f* blows up, the behavior of the # policy engine to propagate the exception is preserved action = "create_port:mac" with mock.patch.object(manager.NeutronManager.get_instance().plugin, 'get_network', new=fakegetnetwork): target = {'network_id': 'whatever'} self.assertRaises(NotImplementedError, policy.enforce, self.context, action, target) def test_retryrequest_on_notfound(self): failure = exceptions.NetworkNotFound(net_id='whatever') action = "create_port:mac" with mock.patch.object(manager.NeutronManager.get_instance().plugin, 'get_network', side_effect=failure): target = {'network_id': 'whatever'} try: policy.enforce(self.context, action, target) self.fail("Did not raise RetryRequest") except db_exc.RetryRequest as e: self.assertEqual(failure, e.inner_exc) def test_enforce_tenant_id_check_parent_resource_bw_compatibility(self): def fakegetnetwork(*args, **kwargs): return {'tenant_id': 'fake'} self._set_rules( admin_or_network_owner="role:admin or " "tenant_id:%(network_tenant_id)s") action = "create_port:mac" with mock.patch.object(manager.NeutronManager.get_instance().plugin, 'get_network', new=fakegetnetwork): target = {'network_id': 'whatever'} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_tenant_id_check_no_target_field_raises(self): # Try and add a bad rule self.assertRaises( exceptions.PolicyInitError, oslo_policy.Rules.from_dict, {'test_policy': 'tenant_id:(wrong_stuff)'}) def _test_enforce_tenant_id_raises(self, bad_rule): self._set_rules(admin_or_owner=bad_rule) # Trigger a policy with rule admin_or_owner action = "create_network" target = {'tenant_id': 'fake'} self.fakepolicyinit() self.assertRaises(exceptions.PolicyCheckError, policy.enforce, self.context, action, target) def test_enforce_tenant_id_check_malformed_target_field_raises(self): self._test_enforce_tenant_id_raises('tenant_id:%(malformed_field)s') def test_enforce_tenant_id_check_invalid_parent_resource_raises(self): self._test_enforce_tenant_id_raises('tenant_id:%(foobaz_tenant_id)s') def test_process_rules(self): action = "create_" + FAKE_RESOURCE_NAME # Construct RuleChecks for an action, attribute and subattribute match_rule = oslo_policy.RuleCheck('rule', action) attr_rule = oslo_policy.RuleCheck( 'rule', '%s:%ss' % (action, FAKE_RESOURCE_NAME)) sub_attr_rules = [oslo_policy.RuleCheck( 'rule', '%s:%s:%s' % (action, 'attr', 'sub_attr_1'))] # Build an AndCheck from the given RuleChecks # Make the checks nested to better check the recursion sub_attr_rules = oslo_policy.AndCheck(sub_attr_rules) attr_rule = oslo_policy.AndCheck( [attr_rule, sub_attr_rules]) match_rule = oslo_policy.AndCheck([match_rule, attr_rule]) # Assert that the rules are correctly extracted from the match_rule rules = policy._process_rules_list([], match_rule) self.assertEqual(['create_fake_resource', 'create_fake_resource:fake_resources', 'create_fake_resource:attr:sub_attr_1'], rules) @mock.patch.object(policy.LOG, 'isEnabledFor', return_value=True) @mock.patch.object(policy.LOG, 'debug') def test_log_rule_list(self, mock_debug, mock_is_e): policy.log_rule_list(oslo_policy.RuleCheck('rule', 'create_')) self.assertTrue(mock_is_e.called) self.assertTrue(mock_debug.called) def test__is_attribute_explicitly_set(self): action = 'create' attr = 'attr' target = {attr: 'valueA', 'tgt-tenant': 'tenantA'} resource = {attr: {'allow_post': True, 'allow_put': True, 'is_visible': True, 'enforce_policy': True, 'validate': {'type:string': 10}}} result = policy._is_attribute_explicitly_set( attr, resource, target, action) self.assertTrue(result) target = {'tgt-tenant': 'tenantA'} result = policy._is_attribute_explicitly_set( attr, resource, target, action) self.assertFalse(result) resource = {attr: {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': 'DfltValue', 'enforce_policy': True, 'validate': {'type:string': 10}}} result = policy._is_attribute_explicitly_set( attr, resource, target, action) self.assertFalse(result) target = {attr: 'DfltValue', 'tgt-tenant': 'tenantA'} result = policy._is_attribute_explicitly_set( attr, resource, target, action) self.assertFalse(result) target = {attr: attributes.ATTR_NOT_SPECIFIED, 'tgt-tenant': 'tenantA'} result = policy._is_attribute_explicitly_set( attr, resource, target, action) self.assertFalse(result) neutron-8.4.0/neutron/tests/unit/testlib_api.py0000664000567000056710000000706513044372760023060 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import six import testtools from neutron.db import api as db_api # Import all data models from neutron.db.migration.models import head # noqa from neutron.db import model_base from neutron.tests import base from neutron import wsgi class ExpectedException(testtools.ExpectedException): def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): if super(ExpectedException, self).__exit__(exc_type, exc_value, traceback): self.exception = exc_value return True return False def create_request(path, body, content_type, method='GET', query_string=None, context=None): if query_string: url = "%s?%s" % (path, query_string) else: url = path req = wsgi.Request.blank(url) req.method = method req.headers = {} req.headers['Accept'] = content_type if isinstance(body, six.text_type): req.body = body.encode() else: req.body = body if context: req.environ['neutron.context'] = context return req class SqlFixture(fixtures.Fixture): # flag to indicate that the models have been loaded _TABLES_ESTABLISHED = False def _setUp(self): # Register all data models engine = db_api.get_engine() if not SqlFixture._TABLES_ESTABLISHED: model_base.BASEV2.metadata.create_all(engine) SqlFixture._TABLES_ESTABLISHED = True def clear_tables(): with engine.begin() as conn: for table in reversed( model_base.BASEV2.metadata.sorted_tables): conn.execute(table.delete()) self.addCleanup(clear_tables) class SqlTestCaseLight(base.DietTestCase): """All SQL taste, zero plugin/rpc sugar""" def setUp(self): super(SqlTestCaseLight, self).setUp() self.useFixture(SqlFixture()) class SqlTestCase(base.BaseTestCase): def setUp(self): super(SqlTestCase, self).setUp() self.useFixture(SqlFixture()) class WebTestCase(SqlTestCase): fmt = 'json' def setUp(self): super(WebTestCase, self).setUp() json_deserializer = wsgi.JSONDeserializer() self._deserializers = { 'application/json': json_deserializer, } def deserialize(self, response): ctype = 'application/%s' % self.fmt data = self._deserializers[ctype].deserialize(response.body)['body'] return data def serialize(self, data): ctype = 'application/%s' % self.fmt result = wsgi.Serializer().serialize(data, ctype) return result class SubDictMatch(object): def __init__(self, sub_dict): self.sub_dict = sub_dict def __eq__(self, super_dict): return all(item in super_dict.items() for item in self.sub_dict.items()) neutron-8.4.0/neutron/tests/unit/debug/0000775000567000056710000000000013044373210021254 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/debug/__init__.py0000664000567000056710000000000013044372736023367 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/debug/test_commands.py0000664000567000056710000003726013044372760024507 0ustar jenkinsjenkins00000000000000# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket import mock from oslo_config import cfg from neutron.agent.common import config from neutron.agent.linux import interface from neutron.common import config as common_config from neutron.debug import commands from neutron.debug import debug_agent from neutron.extensions import portbindings from neutron.tests import base class MyApp(object): def __init__(self, _stdout): self.stdout = _stdout class TestDebugCommands(base.BaseTestCase): def setUp(self): super(TestDebugCommands, self).setUp() cfg.CONF.register_opts(interface.OPTS) cfg.CONF.register_opts(config.EXT_NET_BRIDGE_OPTS) common_config.init([]) config.register_interface_driver_opts_helper(cfg.CONF) device_exists_p = mock.patch( 'neutron.agent.linux.ip_lib.device_exists', return_value=False) device_exists_p.start() namespace_p = mock.patch( 'neutron.agent.linux.ip_lib.IpNetnsCommand') namespace_p.start() ensure_namespace_p = mock.patch( 'neutron.agent.linux.ip_lib.IPWrapper.ensure_namespace') ensure_namespace_p.start() dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver') driver_cls = dvr_cls_p.start() mock_driver = mock.MagicMock() mock_driver.DEV_NAME_LEN = ( interface.LinuxInterfaceDriver.DEV_NAME_LEN) mock_driver.get_device_name.return_value = 'tap12345678-12' driver_cls.return_value = mock_driver self.driver = mock_driver client_cls_p = mock.patch('neutronclient.v2_0.client.Client') client_cls = client_cls_p.start() client_inst = mock.Mock() client_cls.return_value = client_inst fake_network = {'network': {'id': 'fake_net', 'tenant_id': 'fake_tenant', 'subnets': ['fake_subnet']}} fake_port = {'port': {'id': 'fake_port', 'device_owner': 'fake_device', 'mac_address': 'aa:bb:cc:dd:ee:ffa', 'network_id': 'fake_net', 'fixed_ips': [{'subnet_id': 'fake_subnet', 'ip_address': '10.0.0.3'}] }} fake_ports = {'ports': [fake_port['port']]} self.fake_ports = fake_ports allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}] fake_subnet_v4 = {'subnet': {'name': 'fake_subnet_v4', 'id': 'fake_subnet', 'network_id': 'fake_net', 'gateway_ip': '10.0.0.1', 'dns_nameservers': ['10.0.0.2'], 'host_routes': [], 'cidr': '10.0.0.0/24', 'allocation_pools': allocation_pools, 'enable_dhcp': True, 'ip_version': 4}} client_inst.list_ports.return_value = fake_ports client_inst.create_port.return_value = fake_port client_inst.show_port.return_value = fake_port client_inst.show_network.return_value = fake_network client_inst.show_subnet.return_value = fake_subnet_v4 self.client = client_inst mock_std = mock.Mock() self.app = MyApp(mock_std) self.app.debug_agent = debug_agent.NeutronDebugAgent(cfg.CONF, client_inst, mock_driver) def _test_create_probe(self, device_owner): cmd = commands.CreateProbe(self.app, None) cmd_parser = cmd.get_parser('create_probe') if device_owner == debug_agent.DEVICE_OWNER_COMPUTE_PROBE: args = ['fake_net', '--device-owner', 'compute'] else: args = ['fake_net'] parsed_args = cmd_parser.parse_args(args) cmd.run(parsed_args) fake_port = {'port': {'device_owner': device_owner, 'admin_state_up': True, 'network_id': 'fake_net', 'tenant_id': 'fake_tenant', portbindings.HOST_ID: cfg.CONF.host, 'fixed_ips': [{'subnet_id': 'fake_subnet'}], 'device_id': socket.gethostname()}} namespace = 'qprobe-fake_port' self.client.assert_has_calls([mock.call.show_network('fake_net'), mock.call.show_subnet('fake_subnet'), mock.call.create_port(fake_port), mock.call.show_subnet('fake_subnet')]) self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY), mock.call.plug('fake_net', 'fake_port', 'tap12345678-12', 'aa:bb:cc:dd:ee:ffa', bridge=None, namespace=namespace), mock.call.init_l3('tap12345678-12', ['10.0.0.3/24'], namespace=namespace )]) def test_create_network_probe(self): self._test_create_probe(debug_agent.DEVICE_OWNER_NETWORK_PROBE) def test_create_nova_probe(self): self._test_create_probe(debug_agent.DEVICE_OWNER_COMPUTE_PROBE) def _test_create_probe_external(self, device_owner): fake_network = {'network': {'id': 'fake_net', 'tenant_id': 'fake_tenant', 'router:external': True, 'subnets': ['fake_subnet']}} self.client.show_network.return_value = fake_network cmd = commands.CreateProbe(self.app, None) cmd_parser = cmd.get_parser('create_probe') if device_owner == debug_agent.DEVICE_OWNER_COMPUTE_PROBE: args = ['fake_net', '--device-owner', 'compute'] else: args = ['fake_net'] parsed_args = cmd_parser.parse_args(args) cmd.run(parsed_args) fake_port = {'port': {'device_owner': device_owner, 'admin_state_up': True, 'network_id': 'fake_net', 'tenant_id': 'fake_tenant', portbindings.HOST_ID: cfg.CONF.host, 'fixed_ips': [{'subnet_id': 'fake_subnet'}], 'device_id': socket.gethostname()}} namespace = 'qprobe-fake_port' self.client.assert_has_calls([mock.call.show_network('fake_net'), mock.call.show_subnet('fake_subnet'), mock.call.create_port(fake_port), mock.call.show_subnet('fake_subnet')]) self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY), mock.call.plug('fake_net', 'fake_port', 'tap12345678-12', 'aa:bb:cc:dd:ee:ffa', bridge='br-ex', namespace=namespace), mock.call.init_l3('tap12345678-12', ['10.0.0.3/24'], namespace=namespace )]) def test_create_network_probe_external(self): self._test_create_probe_external( debug_agent.DEVICE_OWNER_NETWORK_PROBE) def test_create_nova_probe_external(self): self._test_create_probe_external( debug_agent.DEVICE_OWNER_COMPUTE_PROBE) def test_delete_probe(self): cmd = commands.DeleteProbe(self.app, None) cmd_parser = cmd.get_parser('delete_probe') args = ['fake_port'] parsed_args = cmd_parser.parse_args(args) cmd.run(parsed_args) namespace = 'qprobe-fake_port' self.client.assert_has_calls([mock.call.show_port('fake_port'), mock.call.show_network('fake_net'), mock.call.show_subnet('fake_subnet'), mock.call.delete_port('fake_port')]) self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY), mock.call.unplug('tap12345678-12', namespace=namespace, bridge=None)]) def test_delete_probe_external(self): fake_network = {'network': {'id': 'fake_net', 'tenant_id': 'fake_tenant', 'router:external': True, 'subnets': ['fake_subnet']}} self.client.show_network.return_value = fake_network cmd = commands.DeleteProbe(self.app, None) cmd_parser = cmd.get_parser('delete_probe') args = ['fake_port'] parsed_args = cmd_parser.parse_args(args) cmd.run(parsed_args) namespace = 'qprobe-fake_port' self.client.assert_has_calls([mock.call.show_port('fake_port'), mock.call.show_network('fake_net'), mock.call.show_subnet('fake_subnet'), mock.call.delete_port('fake_port')]) self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY), mock.call.unplug('tap12345678-12', namespace=namespace, bridge='br-ex')]) def test_list_probe(self): cmd = commands.ListProbe(self.app, None) cmd_parser = cmd.get_parser('list_probe') args = [] parsed_args = cmd_parser.parse_args(args) cmd.run(parsed_args) self.client.assert_has_calls( [mock.call.list_ports( device_owner=[debug_agent.DEVICE_OWNER_NETWORK_PROBE, debug_agent.DEVICE_OWNER_COMPUTE_PROBE])]) def test_exec_command(self): cmd = commands.ExecProbe(self.app, None) cmd_parser = cmd.get_parser('exec_command') args = ['fake_port', 'fake_command'] parsed_args = cmd_parser.parse_args(args) with mock.patch('neutron.agent.linux.ip_lib.IpNetnsCommand') as ns: cmd.run(parsed_args) ns.assert_has_calls([mock.call.execute(mock.ANY)]) self.client.assert_has_calls([mock.call.show_port('fake_port')]) def test_clear_probe(self): cmd = commands.ClearProbe(self.app, None) cmd_parser = cmd.get_parser('clear_probe') args = [] parsed_args = cmd_parser.parse_args(args) cmd.run(parsed_args) namespace = 'qprobe-fake_port' self.client.assert_has_calls( [mock.call.list_ports( device_id=socket.gethostname(), device_owner=[debug_agent.DEVICE_OWNER_NETWORK_PROBE, debug_agent.DEVICE_OWNER_COMPUTE_PROBE]), mock.call.show_port('fake_port'), mock.call.show_network('fake_net'), mock.call.show_subnet('fake_subnet'), mock.call.delete_port('fake_port')]) self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY), mock.call.unplug('tap12345678-12', namespace=namespace, bridge=None)]) def test_ping_all_with_ensure_port(self): fake_ports = self.fake_ports def fake_port_list(network_id=None, device_owner=None, device_id=None): if network_id: # In order to test ensure_port, return [] return {'ports': []} return fake_ports self.client.list_ports.side_effect = fake_port_list cmd = commands.PingAll(self.app, None) cmd_parser = cmd.get_parser('ping_all') args = [] parsed_args = cmd_parser.parse_args(args) namespace = 'qprobe-fake_port' with mock.patch('neutron.agent.linux.ip_lib.IpNetnsCommand') as ns: cmd.run(parsed_args) ns.assert_has_calls([mock.call.execute(mock.ANY)]) fake_port = {'port': {'device_owner': debug_agent.DEVICE_OWNER_NETWORK_PROBE, 'admin_state_up': True, 'network_id': 'fake_net', 'tenant_id': 'fake_tenant', portbindings.HOST_ID: cfg.CONF.host, 'fixed_ips': [{'subnet_id': 'fake_subnet'}], 'device_id': socket.gethostname()}} expected = [mock.call.show_network('fake_net'), mock.call.show_subnet('fake_subnet'), mock.call.create_port(fake_port), mock.call.show_subnet('fake_subnet')] self.client.assert_has_calls(expected) self.driver.assert_has_calls([mock.call.init_l3('tap12345678-12', ['10.0.0.3/24'], namespace=namespace )]) def test_ping_all(self): cmd = commands.PingAll(self.app, None) cmd_parser = cmd.get_parser('ping_all') args = [] parsed_args = cmd_parser.parse_args(args) with mock.patch('neutron.agent.linux.ip_lib.IpNetnsCommand') as ns: cmd.run(parsed_args) ns.assert_has_calls([mock.call.execute(mock.ANY)]) expected = [mock.call.list_ports(), mock.call.list_ports( network_id='fake_net', device_owner=debug_agent.DEVICE_OWNER_NETWORK_PROBE, device_id=socket.gethostname()), mock.call.show_subnet('fake_subnet'), mock.call.show_port('fake_port')] self.client.assert_has_calls(expected) def test_ping_all_v6(self): fake_subnet_v6 = {'subnet': {'name': 'fake_v6', 'ip_version': 6}} self.client.show_subnet.return_value = fake_subnet_v6 cmd = commands.PingAll(self.app, None) cmd_parser = cmd.get_parser('ping_all') args = [] parsed_args = cmd_parser.parse_args(args) with mock.patch('neutron.agent.linux.ip_lib.IpNetnsCommand') as ns: cmd.run(parsed_args) ns.assert_has_calls([mock.call.execute(mock.ANY)]) self.client.assert_has_calls([mock.call.list_ports()]) neutron-8.4.0/neutron/tests/unit/common/0000775000567000056710000000000013044373210021456 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/common/__init__.py0000664000567000056710000000000013044372736023571 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/common/test_ipv6_utils.py0000664000567000056710000001317613044372760025214 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import mock from neutron.common import constants from neutron.common import ipv6_utils from neutron.tests import base from neutron.tests import tools class IPv6byEUI64TestCase(base.BaseTestCase): """Unit tests for generate IPv6 by EUI-64 operations.""" def test_generate_IPv6_by_EUI64(self): addr = ipv6_utils.get_ipv6_addr_by_EUI64('2001:db8::', '00:16:3e:33:44:55') self.assertEqual('2001:db8::216:3eff:fe33:4455', addr.format()) def test_generate_IPv6_with_IPv4_prefix(self): ipv4_prefix = '10.0.8' mac = '00:16:3e:33:44:55' self.assertRaises(TypeError, lambda: ipv6_utils.get_ipv6_addr_by_EUI64(ipv4_prefix, mac)) def test_generate_IPv6_with_bad_mac(self): bad_mac = '00:16:3e:33:44:5Z' prefix = '2001:db8::' self.assertRaises(TypeError, lambda: ipv6_utils.get_ipv6_addr_by_EUI64(prefix, bad_mac)) def test_generate_IPv6_with_bad_prefix(self): mac = '00:16:3e:33:44:55' bad_prefix = 'bb' self.assertRaises(TypeError, lambda: ipv6_utils.get_ipv6_addr_by_EUI64(bad_prefix, mac)) def test_generate_IPv6_with_error_prefix_type(self): mac = '00:16:3e:33:44:55' prefix = 123 self.assertRaises(TypeError, lambda: ipv6_utils.get_ipv6_addr_by_EUI64(prefix, mac)) class TestIsEnabled(base.BaseTestCase): def setUp(self): super(TestIsEnabled, self).setUp() def reset_detection_flag(): ipv6_utils._IS_IPV6_ENABLED = None reset_detection_flag() self.addCleanup(reset_detection_flag) self.mock_exists = mock.patch("os.path.exists", return_value=True).start() self.proc_path = '/proc/sys/net/ipv6/conf/default/disable_ipv6' def test_enabled(self): self.useFixture(tools.OpenFixture(self.proc_path, '0')) enabled = ipv6_utils.is_enabled() self.assertTrue(enabled) def test_disabled(self): self.useFixture(tools.OpenFixture(self.proc_path, '1')) enabled = ipv6_utils.is_enabled() self.assertFalse(enabled) def test_disabled_non_exists(self): mo = self.useFixture(tools.OpenFixture(self.proc_path, '1')).mock_open self.mock_exists.return_value = False enabled = ipv6_utils.is_enabled() self.assertFalse(enabled) self.assertFalse(mo.called) def test_memoize(self): mo = self.useFixture(tools.OpenFixture(self.proc_path, '0')).mock_open ipv6_utils.is_enabled() enabled = ipv6_utils.is_enabled() self.assertTrue(enabled) mo.assert_called_once_with(self.proc_path, 'r') class TestIsAutoAddressSubnet(base.BaseTestCase): def setUp(self): self.subnet = { 'cidr': '2001:200::/64', 'gateway_ip': '2001:200::1', 'ip_version': 6, 'ipv6_address_mode': None, 'ipv6_ra_mode': None } super(TestIsAutoAddressSubnet, self).setUp() def test_combinations(self): Mode = collections.namedtuple('Mode', "addr_mode ra_mode " "is_auto_address") subnets = [ Mode(None, None, False), Mode(constants.DHCPV6_STATEFUL, None, False), Mode(constants.DHCPV6_STATELESS, None, True), Mode(constants.IPV6_SLAAC, None, True), Mode(None, constants.DHCPV6_STATEFUL, False), Mode(None, constants.DHCPV6_STATELESS, True), Mode(None, constants.IPV6_SLAAC, True), Mode(constants.DHCPV6_STATEFUL, constants.DHCPV6_STATEFUL, False), Mode(constants.DHCPV6_STATELESS, constants.DHCPV6_STATELESS, True), Mode(constants.IPV6_SLAAC, constants.IPV6_SLAAC, True), ] for subnet in subnets: self.subnet['ipv6_address_mode'] = subnet.addr_mode self.subnet['ipv6_ra_mode'] = subnet.ra_mode self.assertEqual(subnet.is_auto_address, ipv6_utils.is_auto_address_subnet(self.subnet)) class TestIsEui64Address(base.BaseTestCase): def _test_eui_64(self, ips, expected): for ip in ips: self.assertEqual(expected, ipv6_utils.is_eui64_address(ip), "Error on %s" % ip) def test_valid_eui64_addresses(self): ips = ('fffe::0cad:12ff:fe44:5566', ipv6_utils.get_ipv6_addr_by_EUI64('2001:db8::', '00:16:3e:33:44:55')) self._test_eui_64(ips, True) def test_invalid_eui64_addresses(self): ips = ('192.168.1.1', '192.168.1.0', '255.255.255.255', '0.0.0.0', 'fffe::', 'ff80::1', 'fffe::0cad:12ff:ff44:5566', 'fffe::0cad:12fe:fe44:5566', 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') self._test_eui_64(ips, False) neutron-8.4.0/neutron/tests/unit/common/test_utils.py0000664000567000056710000007346213044372760024254 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import inspect import re import eventlet import mock import netaddr from oslo_log import log as logging import six import testscenarios import testtools from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import utils from neutron.plugins.common import constants as p_const from neutron.plugins.common import utils as plugin_utils from neutron.tests import base from neutron.tests.common import helpers load_tests = testscenarios.load_tests_apply_scenarios class TestParseMappings(base.BaseTestCase): def parse(self, mapping_list, unique_values=True, unique_keys=True): return utils.parse_mappings(mapping_list, unique_values, unique_keys) def test_parse_mappings_fails_for_missing_separator(self): with testtools.ExpectedException(ValueError): self.parse(['key']) def test_parse_mappings_fails_for_missing_key(self): with testtools.ExpectedException(ValueError): self.parse([':val']) def test_parse_mappings_fails_for_missing_value(self): with testtools.ExpectedException(ValueError): self.parse(['key:']) def test_parse_mappings_fails_for_extra_separator(self): with testtools.ExpectedException(ValueError): self.parse(['key:val:junk']) def test_parse_mappings_fails_for_duplicate_key(self): with testtools.ExpectedException(ValueError): self.parse(['key:val1', 'key:val2']) def test_parse_mappings_fails_for_duplicate_value(self): with testtools.ExpectedException(ValueError): self.parse(['key1:val', 'key2:val']) def test_parse_mappings_succeeds_for_one_mapping(self): self.assertEqual({'key': 'val'}, self.parse(['key:val'])) def test_parse_mappings_succeeds_for_n_mappings(self): self.assertEqual({'key1': 'val1', 'key2': 'val2'}, self.parse(['key1:val1', 'key2:val2'])) def test_parse_mappings_succeeds_for_duplicate_value(self): self.assertEqual({'key1': 'val', 'key2': 'val'}, self.parse(['key1:val', 'key2:val'], False)) def test_parse_mappings_succeeds_for_no_mappings(self): self.assertEqual({}, self.parse([''])) def test_parse_mappings_succeeds_for_nonuniq_key(self): self.assertEqual({'key': ['val1', 'val2']}, self.parse(['key:val1', 'key:val2', 'key:val2'], unique_keys=False)) class TestParseTunnelRangesMixin(object): TUN_MIN = None TUN_MAX = None TYPE = None _err_prefix = "Invalid network tunnel range: '%d:%d' - " _err_suffix = "%s is not a valid %s identifier." _err_range = "End of tunnel range is less than start of tunnel range." def _build_invalid_tunnel_range_msg(self, t_range_tuple, n): bad_id = t_range_tuple[n - 1] return (self._err_prefix % t_range_tuple) + (self._err_suffix % (bad_id, self.TYPE)) def _build_range_reversed_msg(self, t_range_tuple): return (self._err_prefix % t_range_tuple) + self._err_range def _verify_range(self, tunnel_range): return plugin_utils.verify_tunnel_range(tunnel_range, self.TYPE) def _check_range_valid_ranges(self, tunnel_range): self.assertIsNone(self._verify_range(tunnel_range)) def _check_range_invalid_ranges(self, bad_range, which): expected_msg = self._build_invalid_tunnel_range_msg(bad_range, which) err = self.assertRaises(n_exc.NetworkTunnelRangeError, self._verify_range, bad_range) self.assertEqual(expected_msg, str(err)) def _check_range_reversed(self, bad_range): err = self.assertRaises(n_exc.NetworkTunnelRangeError, self._verify_range, bad_range) expected_msg = self._build_range_reversed_msg(bad_range) self.assertEqual(expected_msg, str(err)) def test_range_tunnel_id_valid(self): self._check_range_valid_ranges((self.TUN_MIN, self.TUN_MAX)) def test_range_tunnel_id_invalid(self): self._check_range_invalid_ranges((-1, self.TUN_MAX), 1) self._check_range_invalid_ranges((self.TUN_MIN, self.TUN_MAX + 1), 2) self._check_range_invalid_ranges((self.TUN_MIN - 1, self.TUN_MAX + 1), 1) def test_range_tunnel_id_reversed(self): self._check_range_reversed((self.TUN_MAX, self.TUN_MIN)) class TestGreTunnelRangeVerifyValid(TestParseTunnelRangesMixin, base.BaseTestCase): TUN_MIN = p_const.MIN_GRE_ID TUN_MAX = p_const.MAX_GRE_ID TYPE = p_const.TYPE_GRE class TestVxlanTunnelRangeVerifyValid(TestParseTunnelRangesMixin, base.BaseTestCase): TUN_MIN = p_const.MIN_VXLAN_VNI TUN_MAX = p_const.MAX_VXLAN_VNI TYPE = p_const.TYPE_VXLAN class UtilTestParseVlanRanges(base.BaseTestCase): _err_prefix = "Invalid network VLAN range: '" _err_too_few = "' - 'need more than 2 values to unpack'." _err_too_many_prefix = "' - 'too many values to unpack" _err_not_int = "' - 'invalid literal for int() with base 10: '%s''." _err_bad_vlan = "' - '%s is not a valid VLAN tag'." _err_range = "' - 'End of VLAN range is less than start of VLAN range'." def _range_too_few_err(self, nv_range): return self._err_prefix + nv_range + self._err_too_few def _range_too_many_err_prefix(self, nv_range): return self._err_prefix + nv_range + self._err_too_many_prefix def _vlan_not_int_err(self, nv_range, vlan): return self._err_prefix + nv_range + (self._err_not_int % vlan) def _nrange_invalid_vlan(self, nv_range, n): vlan = nv_range.split(':')[n] v_range = ':'.join(nv_range.split(':')[1:]) return self._err_prefix + v_range + (self._err_bad_vlan % vlan) def _vrange_invalid_vlan(self, v_range_tuple, n): vlan = v_range_tuple[n - 1] v_range_str = '%d:%d' % v_range_tuple return self._err_prefix + v_range_str + (self._err_bad_vlan % vlan) def _vrange_invalid(self, v_range_tuple): v_range_str = '%d:%d' % v_range_tuple return self._err_prefix + v_range_str + self._err_range class TestVlanNetworkNameValid(base.BaseTestCase): def parse_vlan_ranges(self, vlan_range): return plugin_utils.parse_network_vlan_ranges(vlan_range) def test_validate_provider_phynet_name_mixed(self): self.assertRaises(n_exc.PhysicalNetworkNameError, self.parse_vlan_ranges, ['', ':23:30', 'physnet1', 'tenant_net:100:200']) def test_validate_provider_phynet_name_bad(self): self.assertRaises(n_exc.PhysicalNetworkNameError, self.parse_vlan_ranges, [':1:34']) class TestVlanRangeVerifyValid(UtilTestParseVlanRanges): def verify_range(self, vlan_range): return plugin_utils.verify_vlan_range(vlan_range) def test_range_valid_ranges(self): self.assertIsNone(self.verify_range((1, 2))) self.assertIsNone(self.verify_range((1, 1999))) self.assertIsNone(self.verify_range((100, 100))) self.assertIsNone(self.verify_range((100, 200))) self.assertIsNone(self.verify_range((4001, 4094))) self.assertIsNone(self.verify_range((1, 4094))) def check_one_vlan_invalid(self, bad_range, which): expected_msg = self._vrange_invalid_vlan(bad_range, which) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.verify_range, bad_range) self.assertEqual(str(err), expected_msg) def test_range_first_vlan_invalid_negative(self): self.check_one_vlan_invalid((-1, 199), 1) def test_range_first_vlan_invalid_zero(self): self.check_one_vlan_invalid((0, 199), 1) def test_range_first_vlan_invalid_limit_plus_one(self): self.check_one_vlan_invalid((4095, 199), 1) def test_range_first_vlan_invalid_too_big(self): self.check_one_vlan_invalid((9999, 199), 1) def test_range_second_vlan_invalid_negative(self): self.check_one_vlan_invalid((299, -1), 2) def test_range_second_vlan_invalid_zero(self): self.check_one_vlan_invalid((299, 0), 2) def test_range_second_vlan_invalid_limit_plus_one(self): self.check_one_vlan_invalid((299, 4095), 2) def test_range_second_vlan_invalid_too_big(self): self.check_one_vlan_invalid((299, 9999), 2) def test_range_both_vlans_invalid_01(self): self.check_one_vlan_invalid((-1, 0), 1) def test_range_both_vlans_invalid_02(self): self.check_one_vlan_invalid((0, 4095), 1) def test_range_both_vlans_invalid_03(self): self.check_one_vlan_invalid((4095, 9999), 1) def test_range_both_vlans_invalid_04(self): self.check_one_vlan_invalid((9999, -1), 1) def test_range_reversed(self): bad_range = (95, 10) expected_msg = self._vrange_invalid(bad_range) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.verify_range, bad_range) self.assertEqual(str(err), expected_msg) class TestParseOneVlanRange(UtilTestParseVlanRanges): def parse_one(self, cfg_entry): return plugin_utils.parse_network_vlan_range(cfg_entry) def test_parse_one_net_no_vlan_range(self): config_str = "net1" expected_networks = ("net1", None) self.assertEqual(expected_networks, self.parse_one(config_str)) def test_parse_one_net_and_vlan_range(self): config_str = "net1:100:199" expected_networks = ("net1", (100, 199)) self.assertEqual(expected_networks, self.parse_one(config_str)) def test_parse_one_net_incomplete_range(self): config_str = "net1:100" expected_msg = self._range_too_few_err(config_str) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_one, config_str) self.assertEqual(str(err), expected_msg) def test_parse_one_net_range_too_many(self): config_str = "net1:100:150:200" expected_msg_prefix = self._range_too_many_err_prefix(config_str) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_one, config_str) # The error message is not same in Python 2 and Python 3. In Python 3, # it depends on the amount of values used when unpacking, so it cannot # be predicted as a fixed string. self.assertTrue(str(err).startswith(expected_msg_prefix)) def test_parse_one_net_vlan1_not_int(self): config_str = "net1:foo:199" expected_msg = self._vlan_not_int_err(config_str, 'foo') err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_one, config_str) self.assertEqual(str(err), expected_msg) def test_parse_one_net_vlan2_not_int(self): config_str = "net1:100:bar" expected_msg = self._vlan_not_int_err(config_str, 'bar') err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_one, config_str) self.assertEqual(str(err), expected_msg) def test_parse_one_net_and_max_range(self): config_str = "net1:1:4094" expected_networks = ("net1", (1, 4094)) self.assertEqual(expected_networks, self.parse_one(config_str)) def test_parse_one_net_range_bad_vlan1(self): config_str = "net1:9000:150" expected_msg = self._nrange_invalid_vlan(config_str, 1) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_one, config_str) self.assertEqual(str(err), expected_msg) def test_parse_one_net_range_bad_vlan2(self): config_str = "net1:4000:4999" expected_msg = self._nrange_invalid_vlan(config_str, 2) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_one, config_str) self.assertEqual(str(err), expected_msg) class TestParseVlanRangeList(UtilTestParseVlanRanges): def parse_list(self, cfg_entries): return plugin_utils.parse_network_vlan_ranges(cfg_entries) def test_parse_list_one_net_no_vlan_range(self): config_list = ["net1"] expected_networks = {"net1": []} self.assertEqual(expected_networks, self.parse_list(config_list)) def test_parse_list_one_net_vlan_range(self): config_list = ["net1:100:199"] expected_networks = {"net1": [(100, 199)]} self.assertEqual(expected_networks, self.parse_list(config_list)) def test_parse_two_nets_no_vlan_range(self): config_list = ["net1", "net2"] expected_networks = {"net1": [], "net2": []} self.assertEqual(expected_networks, self.parse_list(config_list)) def test_parse_two_nets_range_and_no_range(self): config_list = ["net1:100:199", "net2"] expected_networks = {"net1": [(100, 199)], "net2": []} self.assertEqual(expected_networks, self.parse_list(config_list)) def test_parse_two_nets_no_range_and_range(self): config_list = ["net1", "net2:200:299"] expected_networks = {"net1": [], "net2": [(200, 299)]} self.assertEqual(expected_networks, self.parse_list(config_list)) def test_parse_two_nets_bad_vlan_range1(self): config_list = ["net1:100", "net2:200:299"] expected_msg = self._range_too_few_err(config_list[0]) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_list, config_list) self.assertEqual(str(err), expected_msg) def test_parse_two_nets_vlan_not_int2(self): config_list = ["net1:100:199", "net2:200:0x200"] expected_msg = self._vlan_not_int_err(config_list[1], '0x200') err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_list, config_list) self.assertEqual(str(err), expected_msg) def test_parse_two_nets_and_append_1_2(self): config_list = ["net1:100:199", "net1:1000:1099", "net2:200:299"] expected_networks = {"net1": [(100, 199), (1000, 1099)], "net2": [(200, 299)]} self.assertEqual(expected_networks, self.parse_list(config_list)) def test_parse_two_nets_and_append_1_3(self): config_list = ["net1:100:199", "net2:200:299", "net1:1000:1099"] expected_networks = {"net1": [(100, 199), (1000, 1099)], "net2": [(200, 299)]} self.assertEqual(expected_networks, self.parse_list(config_list)) class TestDictUtils(base.BaseTestCase): def test_dict2str(self): dic = {"key1": "value1", "key2": "value2", "key3": "value3"} expected = "key1=value1,key2=value2,key3=value3" self.assertEqual(expected, utils.dict2str(dic)) def test_str2dict(self): string = "key1=value1,key2=value2,key3=value3" expected = {"key1": "value1", "key2": "value2", "key3": "value3"} self.assertEqual(expected, utils.str2dict(string)) def test_dict_str_conversion(self): dic = {"key1": "value1", "key2": "value2"} self.assertEqual(dic, utils.str2dict(utils.dict2str(dic))) def test_diff_list_of_dict(self): old_list = [{"key1": "value1"}, {"key2": "value2"}, {"key3": "value3"}] new_list = [{"key1": "value1"}, {"key2": "value2"}, {"key4": "value4"}] added, removed = utils.diff_list_of_dict(old_list, new_list) self.assertEqual(added, [dict(key4="value4")]) self.assertEqual(removed, [dict(key3="value3")]) class _CachingDecorator(object): def __init__(self): self.func_retval = 'bar' self._cache = mock.Mock() @utils.cache_method_results def func(self, *args, **kwargs): return self.func_retval class TestCachingDecorator(base.BaseTestCase): def setUp(self): super(TestCachingDecorator, self).setUp() self.decor = _CachingDecorator() self.func_name = '%(module)s._CachingDecorator.func' % { 'module': self.__module__ } self.not_cached = self.decor.func.func.__self__._not_cached def test_cache_miss(self): expected_key = (self.func_name, 1, 2, ('foo', 'bar')) args = (1, 2) kwargs = {'foo': 'bar'} self.decor._cache.get.return_value = self.not_cached retval = self.decor.func(*args, **kwargs) self.decor._cache.set.assert_called_once_with( expected_key, self.decor.func_retval, None) self.assertEqual(self.decor.func_retval, retval) def test_cache_hit(self): expected_key = (self.func_name, 1, 2, ('foo', 'bar')) args = (1, 2) kwargs = {'foo': 'bar'} retval = self.decor.func(*args, **kwargs) self.assertFalse(self.decor._cache.set.called) self.assertEqual(self.decor._cache.get.return_value, retval) self.decor._cache.get.assert_called_once_with(expected_key, self.not_cached) def test_get_unhashable(self): expected_key = (self.func_name, [1], 2) self.decor._cache.get.side_effect = TypeError retval = self.decor.func([1], 2) self.assertFalse(self.decor._cache.set.called) self.assertEqual(self.decor.func_retval, retval) self.decor._cache.get.assert_called_once_with(expected_key, self.not_cached) def test_missing_cache(self): delattr(self.decor, '_cache') self.assertRaises(NotImplementedError, self.decor.func, (1, 2)) def test_no_cache(self): self.decor._cache = False retval = self.decor.func((1, 2)) self.assertEqual(self.decor.func_retval, retval) class TestDict2Tuples(base.BaseTestCase): def test_dict(self): input_dict = {'foo': 'bar', '42': 'baz', 'aaa': 'zzz'} expected = (('42', 'baz'), ('aaa', 'zzz'), ('foo', 'bar')) output_tuple = utils.dict2tuple(input_dict) self.assertEqual(expected, output_tuple) class TestExceptionLogger(base.BaseTestCase): def test_normal_call(self): result = "Result" @utils.exception_logger() def func(): return result self.assertEqual(result, func()) def test_raise(self): result = "Result" @utils.exception_logger() def func(): raise RuntimeError(result) self.assertRaises(RuntimeError, func) def test_spawn_normal(self): result = "Result" logger = mock.Mock() @utils.exception_logger(logger=logger) def func(): return result gt = eventlet.spawn(func) self.assertEqual(result, gt.wait()) self.assertFalse(logger.called) def test_spawn_raise(self): result = "Result" logger = mock.Mock() @utils.exception_logger(logger=logger) def func(): raise RuntimeError(result) gt = eventlet.spawn(func) self.assertRaises(RuntimeError, gt.wait) self.assertTrue(logger.called) def test_pool_spawn_normal(self): logger = mock.Mock() calls = mock.Mock() @utils.exception_logger(logger=logger) def func(i): calls(i) pool = eventlet.GreenPool(4) for i in range(0, 4): pool.spawn(func, i) pool.waitall() calls.assert_has_calls([mock.call(0), mock.call(1), mock.call(2), mock.call(3)], any_order=True) self.assertFalse(logger.called) def test_pool_spawn_raise(self): logger = mock.Mock() calls = mock.Mock() @utils.exception_logger(logger=logger) def func(i): if i == 2: raise RuntimeError(2) else: calls(i) pool = eventlet.GreenPool(4) for i in range(0, 4): pool.spawn(func, i) pool.waitall() calls.assert_has_calls([mock.call(0), mock.call(1), mock.call(3)], any_order=True) self.assertTrue(logger.called) class TestDvrServices(base.BaseTestCase): def _test_is_dvr_serviced(self, device_owner, expected): self.assertEqual(expected, utils.is_dvr_serviced(device_owner)) def test_is_dvr_serviced_with_lb_port(self): self._test_is_dvr_serviced(constants.DEVICE_OWNER_LOADBALANCER, True) def test_is_dvr_serviced_with_lbv2_port(self): self._test_is_dvr_serviced(constants.DEVICE_OWNER_LOADBALANCERV2, True) def test_is_dvr_serviced_with_dhcp_port(self): self._test_is_dvr_serviced(constants.DEVICE_OWNER_DHCP, True) def test_is_dvr_serviced_with_vm_port(self): self._test_is_dvr_serviced(constants.DEVICE_OWNER_COMPUTE_PREFIX, True) class TestIpToCidr(base.BaseTestCase): def test_ip_to_cidr_ipv4_default(self): self.assertEqual('15.1.2.3/32', utils.ip_to_cidr('15.1.2.3')) def test_ip_to_cidr_ipv4_prefix(self): self.assertEqual('15.1.2.3/24', utils.ip_to_cidr('15.1.2.3', 24)) def test_ip_to_cidr_ipv4_netaddr(self): ip_address = netaddr.IPAddress('15.1.2.3') self.assertEqual('15.1.2.3/32', utils.ip_to_cidr(ip_address)) def test_ip_to_cidr_ipv4_bad_prefix(self): self.assertRaises(netaddr.core.AddrFormatError, utils.ip_to_cidr, '15.1.2.3', 33) def test_ip_to_cidr_ipv6_default(self): self.assertEqual('::1/128', utils.ip_to_cidr('::1')) def test_ip_to_cidr_ipv6_prefix(self): self.assertEqual('::1/64', utils.ip_to_cidr('::1', 64)) def test_ip_to_cidr_ipv6_bad_prefix(self): self.assertRaises(netaddr.core.AddrFormatError, utils.ip_to_cidr, '2000::1', 129) class TestCidrIsHost(base.BaseTestCase): def test_is_cidr_host_ipv4(self): self.assertTrue(utils.is_cidr_host('15.1.2.3/32')) def test_is_cidr_host_ipv4_not_cidr(self): self.assertRaises(ValueError, utils.is_cidr_host, '15.1.2.3') def test_is_cidr_host_ipv6(self): self.assertTrue(utils.is_cidr_host('2000::1/128')) def test_is_cidr_host_ipv6_netaddr(self): net = netaddr.IPNetwork("2000::1") self.assertTrue(utils.is_cidr_host(net)) def test_is_cidr_host_ipv6_32(self): self.assertFalse(utils.is_cidr_host('2000::1/32')) def test_is_cidr_host_ipv6_not_cidr(self): self.assertRaises(ValueError, utils.is_cidr_host, '2000::1') def test_is_cidr_host_ipv6_not_cidr_netaddr(self): ip_address = netaddr.IPAddress("2000::3") self.assertRaises(ValueError, utils.is_cidr_host, ip_address) class TestIpVersionFromInt(base.BaseTestCase): def test_ip_version_from_int_ipv4(self): self.assertEqual(constants.IPv4, utils.ip_version_from_int(4)) def test_ip_version_from_int_ipv6(self): self.assertEqual(constants.IPv6, utils.ip_version_from_int(6)) def test_ip_version_from_int_illegal_int(self): self.assertRaises(ValueError, utils.ip_version_from_int, 8) class TestDelayedStringRenderer(base.BaseTestCase): def test_call_deferred_until_str(self): my_func = mock.MagicMock(return_value='Brie cheese!') delayed = utils.DelayedStringRenderer(my_func, 1, 2, key_arg=44) self.assertFalse(my_func.called) string = "Type: %s" % delayed my_func.assert_called_once_with(1, 2, key_arg=44) self.assertEqual("Type: Brie cheese!", string) def test_not_called_with_low_log_level(self): LOG = logging.getLogger(__name__) # make sure we return logging to previous level current_log_level = LOG.logger.getEffectiveLevel() self.addCleanup(LOG.logger.setLevel, current_log_level) my_func = mock.MagicMock() delayed = utils.DelayedStringRenderer(my_func) # set to warning so we shouldn't be logging debug messages LOG.logger.setLevel(logging.logging.WARNING) LOG.debug("Hello %s", delayed) self.assertFalse(my_func.called) # but it should be called with the debug level LOG.logger.setLevel(logging.logging.DEBUG) LOG.debug("Hello %s", delayed) self.assertTrue(my_func.called) class TestEnsureDir(base.BaseTestCase): @mock.patch('os.makedirs') def test_ensure_dir_no_fail_if_exists(self, makedirs): error = OSError() error.errno = errno.EEXIST makedirs.side_effect = error utils.ensure_dir("/etc/create/concurrently") @mock.patch('os.makedirs') def test_ensure_dir_calls_makedirs(self, makedirs): utils.ensure_dir("/etc/create/directory") makedirs.assert_called_once_with("/etc/create/directory", 0o755) class TestCamelize(base.BaseTestCase): def test_camelize(self): data = {'bandwidth_limit': 'BandwidthLimit', 'test': 'Test', 'some__more__dashes': 'SomeMoreDashes', 'a_penguin_walks_into_a_bar': 'APenguinWalksIntoABar'} for s, expected in data.items(): self.assertEqual(expected, utils.camelize(s)) class TestRoundVal(base.BaseTestCase): def test_round_val_ok(self): for expected, value in ((0, 0), (0, 0.1), (1, 0.5), (1, 1.49), (2, 1.5)): self.assertEqual(expected, utils.round_val(value)) class TestGetRandomString(base.BaseTestCase): def test_get_random_string(self): length = 127 random_string = utils.get_random_string(length) self.assertEqual(length, len(random_string)) regex = re.compile('^[0-9a-fA-F]+$') self.assertIsNotNone(regex.match(random_string)) class TestSafeDecodeUtf8(base.BaseTestCase): @helpers.requires_py2 def test_py2_does_nothing(self): s = 'test-py2' self.assertIs(s, utils.safe_decode_utf8(s)) @helpers.requires_py3 def test_py3_decoded_valid_bytes(self): s = bytes('test-py2', 'utf-8') decoded_str = utils.safe_decode_utf8(s) self.assertIsInstance(decoded_str, six.text_type) self.assertEqual(s, decoded_str.encode('utf-8')) @helpers.requires_py3 def test_py3_decoded_invalid_bytes(self): s = bytes('test-py2', 'utf_16') decoded_str = utils.safe_decode_utf8(s) self.assertIsInstance(decoded_str, six.text_type) class TestPortRuleMasking(base.BaseTestCase): scenarios = [ ('Test 1 (networking-ovs-dpdk)', {'port_min': 5, 'port_max': 12, 'expected': ['0x0005', '0x0006/0xfffe', '0x0008/0xfffc', '0x000c']} ), ('Test 2 (networking-ovs-dpdk)', {'port_min': 20, 'port_max': 130, 'expected': ['0x0014/0xfffc', '0x0018/0xfff8', '0x0020/0xffe0', '0x0040/0xffc0', '0x0080/0xfffe', '0x0082']}), ('Test 3 (networking-ovs-dpdk)', {'port_min': 4501, 'port_max': 33057, 'expected': ['0x1195', '0x1196/0xfffe', '0x1198/0xfff8', '0x11a0/0xffe0', '0x11c0/0xffc0', '0x1200/0xfe00', '0x1400/0xfc00', '0x1800/0xf800', '0x2000/0xe000', '0x4000/0xc000', '0x8000/0xff00', '0x8100/0xffe0', '0x8120/0xfffe']}), ('Test port_max == 2^k-1', {'port_min': 101, 'port_max': 127, 'expected': ['0x0065', '0x0066/0xfffe', '0x0068/0xfff8', '0x0070/0xfff0']}), ('Test single even port', {'port_min': 22, 'port_max': 22, 'expected': ['0x0016']}), ('Test single odd port', {'port_min': 5001, 'port_max': 5001, 'expected': ['0x1389']}), ('Test full interval', {'port_min': 0, 'port_max': 7, 'expected': ['0x0000/0xfff8']}), ('Test 2^k interval', {'port_min': 8, 'port_max': 15, 'expected': ['0x0008/0xfff8']}), ('Test full port range', {'port_min': 0, 'port_max': 65535, 'expected': ['0x0000/0x0000']}), ('Test bad values', {'port_min': 12, 'port_max': 5, 'expected': ValueError}), ] def test_port_rule_masking(self): if (inspect.isclass(self.expected) and issubclass(self.expected, Exception)): with testtools.ExpectedException(self.expected): utils.port_rule_masking(self.port_min, self.port_max) else: rules = utils.port_rule_masking(self.port_min, self.port_max) self.assertItemsEqual(self.expected, rules) neutron-8.4.0/neutron/tests/unit/common/test_rpc.py0000664000567000056710000004415313044372760023673 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import fixtures import mock from oslo_config import cfg import oslo_messaging as messaging from oslo_messaging import conffixture as messaging_conffixture import testtools from neutron.common import rpc from neutron import context from neutron.tests import base CONF = cfg.CONF CONF.import_opt('state_path', 'neutron.common.config') class RPCFixture(fixtures.Fixture): def _setUp(self): self.trans = copy.copy(rpc.TRANSPORT) self.noti_trans = copy.copy(rpc.NOTIFICATION_TRANSPORT) self.noti = copy.copy(rpc.NOTIFIER) self.all_mods = copy.copy(rpc.ALLOWED_EXMODS) self.ext_mods = copy.copy(rpc.EXTRA_EXMODS) self.addCleanup(self._reset_everything) def _reset_everything(self): rpc.TRANSPORT = self.trans rpc.NOTIFICATION_TRANSPORT = self.noti_trans rpc.NOTIFIER = self.noti rpc.ALLOWED_EXMODS = self.all_mods rpc.EXTRA_EXMODS = self.ext_mods class TestRPC(base.DietTestCase): def setUp(self): super(TestRPC, self).setUp() self.useFixture(RPCFixture()) @mock.patch.object(rpc, 'get_allowed_exmods') @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(messaging, 'get_transport') @mock.patch.object(messaging, 'get_notification_transport') @mock.patch.object(messaging, 'Notifier') def test_init(self, mock_not, mock_noti_trans, mock_trans, mock_ser, mock_exmods): notifier = mock.Mock() transport = mock.Mock() noti_transport = mock.Mock() serializer = mock.Mock() conf = mock.Mock() mock_exmods.return_value = ['foo'] mock_trans.return_value = transport mock_noti_trans.return_value = noti_transport mock_ser.return_value = serializer mock_not.return_value = notifier rpc.init(conf) mock_exmods.assert_called_once_with() mock_trans.assert_called_once_with(conf, allowed_remote_exmods=['foo'], aliases=rpc.TRANSPORT_ALIASES) mock_noti_trans.assert_called_once_with(conf, allowed_remote_exmods=['foo'], aliases=rpc.TRANSPORT_ALIASES) mock_not.assert_called_once_with(noti_transport, serializer=serializer) self.assertIsNotNone(rpc.TRANSPORT) self.assertIsNotNone(rpc.NOTIFICATION_TRANSPORT) self.assertIsNotNone(rpc.NOTIFIER) def test_cleanup_transport_null(self): rpc.NOTIFIER = mock.Mock() rpc.NOTIFICATION_TRANSPORT = mock.Mock() self.assertRaises(AssertionError, rpc.cleanup) def test_cleanup_notification_transport_null(self): rpc.TRANSPORT = mock.Mock() rpc.NOTIFIER = mock.Mock() self.assertRaises(AssertionError, rpc.cleanup) def test_cleanup_notifier_null(self): rpc.TRANSPORT = mock.Mock() rpc.NOTIFICATION_TRANSPORT = mock.Mock() self.assertRaises(AssertionError, rpc.cleanup) def test_cleanup(self): rpc.NOTIFIER = mock.Mock() rpc.NOTIFICATION_TRANSPORT = mock.Mock() rpc.TRANSPORT = mock.Mock() trans_cleanup = mock.Mock() not_trans_cleanup = mock.Mock() rpc.TRANSPORT.cleanup = trans_cleanup rpc.NOTIFICATION_TRANSPORT.cleanup = not_trans_cleanup rpc.cleanup() trans_cleanup.assert_called_once_with() not_trans_cleanup.assert_called_once_with() self.assertIsNone(rpc.TRANSPORT) self.assertIsNone(rpc.NOTIFICATION_TRANSPORT) self.assertIsNone(rpc.NOTIFIER) def test_add_extra_exmods(self): rpc.EXTRA_EXMODS = [] rpc.add_extra_exmods('foo', 'bar') self.assertEqual(['foo', 'bar'], rpc.EXTRA_EXMODS) def test_clear_extra_exmods(self): rpc.EXTRA_EXMODS = ['foo', 'bar'] rpc.clear_extra_exmods() self.assertEqual(0, len(rpc.EXTRA_EXMODS)) def test_get_allowed_exmods(self): rpc.ALLOWED_EXMODS = ['foo'] rpc.EXTRA_EXMODS = ['bar'] exmods = rpc.get_allowed_exmods() self.assertEqual(['foo', 'bar'], exmods) @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(rpc, 'BackingOffClient') def test_get_client(self, mock_client, mock_ser): rpc.TRANSPORT = mock.Mock() tgt = mock.Mock() ser = mock.Mock() mock_client.return_value = 'client' mock_ser.return_value = ser client = rpc.get_client(tgt, version_cap='1.0', serializer='foo') mock_ser.assert_called_once_with('foo') mock_client.assert_called_once_with(rpc.TRANSPORT, tgt, version_cap='1.0', serializer=ser) self.assertEqual('client', client) @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(messaging, 'get_rpc_server') def test_get_server(self, mock_get, mock_ser): rpc.TRANSPORT = mock.Mock() ser = mock.Mock() tgt = mock.Mock() ends = mock.Mock() mock_ser.return_value = ser mock_get.return_value = 'server' server = rpc.get_server(tgt, ends, serializer='foo') mock_ser.assert_called_once_with('foo') mock_get.assert_called_once_with(rpc.TRANSPORT, tgt, ends, 'eventlet', ser) self.assertEqual('server', server) def test_get_notifier(self): rpc.NOTIFIER = mock.Mock() mock_prep = mock.Mock() mock_prep.return_value = 'notifier' rpc.NOTIFIER.prepare = mock_prep notifier = rpc.get_notifier('service', publisher_id='foo') mock_prep.assert_called_once_with(publisher_id='foo') self.assertEqual('notifier', notifier) def test_get_notifier_null_publisher(self): rpc.NOTIFIER = mock.Mock() mock_prep = mock.Mock() mock_prep.return_value = 'notifier' rpc.NOTIFIER.prepare = mock_prep notifier = rpc.get_notifier('service', host='bar') mock_prep.assert_called_once_with(publisher_id='service.bar') self.assertEqual('notifier', notifier) class TestRequestContextSerializer(base.DietTestCase): def setUp(self): super(TestRequestContextSerializer, self).setUp() self.mock_base = mock.Mock() self.ser = rpc.RequestContextSerializer(self.mock_base) self.ser_null = rpc.RequestContextSerializer(None) def test_serialize_entity(self): self.mock_base.serialize_entity.return_value = 'foo' ser_ent = self.ser.serialize_entity('context', 'entity') self.mock_base.serialize_entity.assert_called_once_with('context', 'entity') self.assertEqual('foo', ser_ent) def test_deserialize_entity(self): self.mock_base.deserialize_entity.return_value = 'foo' deser_ent = self.ser.deserialize_entity('context', 'entity') self.mock_base.deserialize_entity.assert_called_once_with('context', 'entity') self.assertEqual('foo', deser_ent) def test_deserialize_entity_null_base(self): deser_ent = self.ser_null.deserialize_entity('context', 'entity') self.assertEqual('entity', deser_ent) def test_serialize_context(self): context = mock.Mock() self.ser.serialize_context(context) context.to_dict.assert_called_once_with() @mock.patch.object(context, 'Context') def test_deserialize_context(self, mock_con): context = mock.Mock() context.copy.return_value = {'foo': 'bar', 'user_id': 1, 'tenant_id': 1} self.ser.deserialize_context(context) mock_con.assert_called_once_with(1, 1, foo='bar') @mock.patch.object(context, 'Context') def test_deserialize_context_no_user_id(self, mock_con): context = mock.Mock() context.copy.return_value = {'foo': 'bar', 'user': 1, 'tenant_id': 1} self.ser.deserialize_context(context) mock_con.assert_called_once_with(1, 1, foo='bar') @mock.patch.object(context, 'Context') def test_deserialize_context_no_tenant_id(self, mock_con): context = mock.Mock() context.copy.return_value = {'foo': 'bar', 'user_id': 1, 'project_id': 1} self.ser.deserialize_context(context) mock_con.assert_called_once_with(1, 1, foo='bar') @mock.patch.object(context, 'Context') def test_deserialize_context_no_ids(self, mock_con): context = mock.Mock() context.copy.return_value = {'foo': 'bar'} self.ser.deserialize_context(context) mock_con.assert_called_once_with(None, None, foo='bar') class ServiceTestCase(base.DietTestCase): # the class cannot be based on BaseTestCase since it mocks rpc.Connection def setUp(self): super(ServiceTestCase, self).setUp() self.host = 'foo' self.topic = 'neutron-agent' self.target_mock = mock.patch('oslo_messaging.Target') self.target_mock.start() self.messaging_conf = messaging_conffixture.ConfFixture(CONF) self.messaging_conf.transport_driver = 'fake' self.messaging_conf.response_timeout = 0 self.useFixture(self.messaging_conf) self.addCleanup(rpc.cleanup) rpc.init(CONF) def test_operations(self): with mock.patch('oslo_messaging.get_rpc_server') as get_rpc_server: rpc_server = get_rpc_server.return_value service = rpc.Service(self.host, self.topic) service.start() rpc_server.start.assert_called_once_with() service.stop() rpc_server.stop.assert_called_once_with() rpc_server.wait.assert_called_once_with() class TimeoutTestCase(base.DietTestCase): def setUp(self): super(TimeoutTestCase, self).setUp() self.messaging_conf = messaging_conffixture.ConfFixture(CONF) self.messaging_conf.transport_driver = 'fake' self.messaging_conf.response_timeout = 0 self.useFixture(self.messaging_conf) self.addCleanup(rpc.cleanup) rpc.init(CONF) rpc.TRANSPORT = mock.MagicMock() rpc.TRANSPORT._send.side_effect = messaging.MessagingTimeout target = messaging.Target(version='1.0', topic='testing') self.client = rpc.get_client(target) self.call_context = mock.Mock() self.sleep = mock.patch('time.sleep').start() rpc.TRANSPORT.conf.rpc_response_timeout = 10 def test_timeout_unaffected_when_explicitly_set(self): rpc.TRANSPORT.conf.rpc_response_timeout = 5 ctx = self.client.prepare(topic='sandwiches', timeout=77) with testtools.ExpectedException(messaging.MessagingTimeout): ctx.call(self.call_context, 'create_pb_and_j') # ensure that the timeout was not increased and the back-off sleep # wasn't called self.assertEqual( 5, rpc._ContextWrapper._METHOD_TIMEOUTS['create_pb_and_j']) self.assertFalse(self.sleep.called) def test_timeout_store_defaults(self): # any method should default to the configured timeout self.assertEqual(rpc.TRANSPORT.conf.rpc_response_timeout, rpc._ContextWrapper._METHOD_TIMEOUTS['method_1']) self.assertEqual(rpc.TRANSPORT.conf.rpc_response_timeout, rpc._ContextWrapper._METHOD_TIMEOUTS['method_2']) # a change to an existing should not affect new or existing ones rpc._ContextWrapper._METHOD_TIMEOUTS['method_2'] = 7000 self.assertEqual(rpc.TRANSPORT.conf.rpc_response_timeout, rpc._ContextWrapper._METHOD_TIMEOUTS['method_1']) self.assertEqual(rpc.TRANSPORT.conf.rpc_response_timeout, rpc._ContextWrapper._METHOD_TIMEOUTS['method_3']) def test_method_timeout_sleep(self): rpc.TRANSPORT.conf.rpc_response_timeout = 2 for i in range(100): with testtools.ExpectedException(messaging.MessagingTimeout): self.client.call(self.call_context, 'method_1') # sleep value should always be between 0 and configured timeout self.assertGreaterEqual(self.sleep.call_args_list[0][0][0], 0) self.assertLessEqual(self.sleep.call_args_list[0][0][0], 2) self.sleep.reset_mock() def test_method_timeout_increases_on_timeout_exception(self): rpc._ContextWrapper._METHOD_TIMEOUTS['method_1'] = 1 for i in range(5): with testtools.ExpectedException(messaging.MessagingTimeout): self.client.call(self.call_context, 'method_1') # we only care to check the timeouts sent to the transport timeouts = [call[1]['timeout'] for call in rpc.TRANSPORT._send.call_args_list] self.assertEqual([1, 2, 4, 8, 16], timeouts) def test_method_timeout_10x_config_ceiling(self): rpc.TRANSPORT.conf.rpc_response_timeout = 10 # 5 doublings should max out at the 10xdefault ceiling for i in range(5): with testtools.ExpectedException(messaging.MessagingTimeout): self.client.call(self.call_context, 'method_1') self.assertEqual(10 * rpc.TRANSPORT.conf.rpc_response_timeout, rpc._ContextWrapper._METHOD_TIMEOUTS['method_1']) with testtools.ExpectedException(messaging.MessagingTimeout): self.client.call(self.call_context, 'method_1') self.assertEqual(10 * rpc.TRANSPORT.conf.rpc_response_timeout, rpc._ContextWrapper._METHOD_TIMEOUTS['method_1']) def test_timeout_unchanged_on_other_exception(self): rpc._ContextWrapper._METHOD_TIMEOUTS['method_1'] = 1 rpc.TRANSPORT._send.side_effect = ValueError with testtools.ExpectedException(ValueError): self.client.call(self.call_context, 'method_1') rpc.TRANSPORT._send.side_effect = messaging.MessagingTimeout with testtools.ExpectedException(messaging.MessagingTimeout): self.client.call(self.call_context, 'method_1') timeouts = [call[1]['timeout'] for call in rpc.TRANSPORT._send.call_args_list] self.assertEqual([1, 1], timeouts) def test_timeouts_for_methods_tracked_independently(self): rpc._ContextWrapper._METHOD_TIMEOUTS['method_1'] = 1 rpc._ContextWrapper._METHOD_TIMEOUTS['method_2'] = 1 for method in ('method_1', 'method_1', 'method_2', 'method_1', 'method_2'): with testtools.ExpectedException(messaging.MessagingTimeout): self.client.call(self.call_context, method) timeouts = [call[1]['timeout'] for call in rpc.TRANSPORT._send.call_args_list] self.assertEqual([1, 2, 1, 4, 2], timeouts) def test_timeouts_for_namespaces_tracked_independently(self): rpc._ContextWrapper._METHOD_TIMEOUTS['ns1.method'] = 1 rpc._ContextWrapper._METHOD_TIMEOUTS['ns2.method'] = 1 for ns in ('ns1', 'ns2'): self.client.target.namespace = ns for i in range(4): with testtools.ExpectedException(messaging.MessagingTimeout): self.client.call(self.call_context, 'method') timeouts = [call[1]['timeout'] for call in rpc.TRANSPORT._send.call_args_list] self.assertEqual([1, 2, 4, 8, 1, 2, 4, 8], timeouts) def test_method_timeout_increases_with_prepare(self): rpc._ContextWrapper._METHOD_TIMEOUTS['method_1'] = 1 ctx = self.client.prepare(version='1.4') with testtools.ExpectedException(messaging.MessagingTimeout): ctx.call(self.call_context, 'method_1') with testtools.ExpectedException(messaging.MessagingTimeout): ctx.call(self.call_context, 'method_1') # we only care to check the timeouts sent to the transport timeouts = [call[1]['timeout'] for call in rpc.TRANSPORT._send.call_args_list] self.assertEqual([1, 2], timeouts) class TestConnection(base.DietTestCase): def setUp(self): super(TestConnection, self).setUp() self.conn = rpc.Connection() @mock.patch.object(messaging, 'Target') @mock.patch.object(cfg, 'CONF') @mock.patch.object(rpc, 'get_server') def test_create_consumer(self, mock_get, mock_cfg, mock_tgt): mock_cfg.host = 'foo' server = mock.Mock() target = mock.Mock() mock_get.return_value = server mock_tgt.return_value = target self.conn.create_consumer('topic', 'endpoints', fanout=True) mock_tgt.assert_called_once_with(topic='topic', server='foo', fanout=True) mock_get.assert_called_once_with(target, 'endpoints') self.assertEqual([server], self.conn.servers) def test_consume_in_threads(self): self.conn.servers = [mock.Mock(), mock.Mock()] servs = self.conn.consume_in_threads() for serv in self.conn.servers: serv.start.assert_called_once_with() self.assertEqual(servs, self.conn.servers) def test_close(self): self.conn.servers = [mock.Mock(), mock.Mock()] self.conn.close() for serv in self.conn.servers: serv.stop.assert_called_once_with() serv.wait.assert_called_once_with() neutron-8.4.0/neutron/tests/unit/_test_extension_portbindings.py0000664000567000056710000004117513044372760026555 0ustar jenkinsjenkins00000000000000# Copyright 2013 NEC Corporation # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from six.moves import http_client as httplib from webob import exc from neutron import context from neutron.extensions import portbindings from neutron import manager from neutron.tests.unit.db import test_db_base_plugin_v2 class PortBindingsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): # VIF_TYPE must be overridden according to plugin vif_type VIF_TYPE = portbindings.VIF_TYPE_OTHER # VIF_DETAILS must be overridden according to plugin vif_details VIF_DETAILS = None def _check_response_portbindings(self, port): self.assertEqual(port[portbindings.VIF_TYPE], self.VIF_TYPE) # REVISIT(rkukura): Consider reworking tests to enable ML2 to bind if self.VIF_TYPE not in [portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED]: # NOTE(r-mibu): The following six lines are just for backward # compatibility. In this class, HAS_PORT_FILTER has been replaced # by VIF_DETAILS which can be set expected vif_details to check, # but all replacement of HAS_PORT_FILTER in successor has not been # completed. if self.VIF_DETAILS is None: expected = getattr(self, 'HAS_PORT_FILTER', False) vif_details = port[portbindings.VIF_DETAILS] port_filter = vif_details[portbindings.CAP_PORT_FILTER] self.assertEqual(expected, port_filter) return self.assertEqual(self.VIF_DETAILS, port[portbindings.VIF_DETAILS]) def _check_response_no_portbindings(self, port): self.assertIn('status', port) self.assertNotIn(portbindings.VIF_TYPE, port) self.assertNotIn(portbindings.VIF_DETAILS, port) def _get_non_admin_context(self): return context.Context(user_id=None, tenant_id=self._tenant_id, is_admin=False) def test_port_vif_details(self): with self.port(name='name') as port: port_id = port['port']['id'] # Check a response of create_port self._check_response_portbindings(port['port']) # Check a response of get_port ctx = context.get_admin_context() port = self._show('ports', port_id, neutron_context=ctx)['port'] self._check_response_portbindings(port) # By default user is admin - now test non admin user ctx = self._get_non_admin_context() non_admin_port = self._show( 'ports', port_id, neutron_context=ctx)['port'] self._check_response_no_portbindings(non_admin_port) def test_ports_vif_details(self): plugin = manager.NeutronManager.get_plugin() cfg.CONF.set_default('allow_overlapping_ips', True) with self.port(), self.port(): ctx = context.get_admin_context() ports = plugin.get_ports(ctx) self.assertEqual(len(ports), 2) for port in ports: self._check_response_portbindings(port) # By default user is admin - now test non admin user ctx = self._get_non_admin_context() ports = self._list('ports', neutron_context=ctx)['ports'] self.assertEqual(len(ports), 2) for non_admin_port in ports: self._check_response_no_portbindings(non_admin_port) def _check_port_binding_profile(self, port, profile=None): # For plugins which does not use binding:profile attr # we just check an operation for the port succeed. self.assertIn('id', port) def _test_create_port_binding_profile(self, profile): profile_arg = {portbindings.PROFILE: profile} with self.port(arg_list=(portbindings.PROFILE,), **profile_arg) as port: port_id = port['port']['id'] self._check_port_binding_profile(port['port'], profile) port = self._show('ports', port_id) self._check_port_binding_profile(port['port'], profile) def test_create_port_binding_profile_none(self): self._test_create_port_binding_profile(None) def test_create_port_binding_profile_with_empty_dict(self): self._test_create_port_binding_profile({}) def _test_update_port_binding_profile(self, profile): profile_arg = {portbindings.PROFILE: profile} with self.port() as port: self._check_port_binding_profile(port['port']) port_id = port['port']['id'] ctx = context.get_admin_context() port = self._update('ports', port_id, {'port': profile_arg}, neutron_context=ctx)['port'] self._check_port_binding_profile(port, profile) port = self._show('ports', port_id)['port'] self._check_port_binding_profile(port, profile) def test_update_port_binding_profile_none(self): self._test_update_port_binding_profile(None) def test_update_port_binding_profile_with_empty_dict(self): self._test_update_port_binding_profile({}) def test_port_create_portinfo_non_admin(self): profile_arg = {portbindings.PROFILE: {'dummy': 'dummy'}} with self.network(set_context=True, tenant_id='test') as net1: with self.subnet(network=net1) as subnet1: # succeed without binding:profile with self.port(subnet=subnet1, set_context=True, tenant_id='test'): pass # fail with binding:profile try: with self.port(subnet=subnet1, expected_res_status=403, arg_list=(portbindings.PROFILE,), set_context=True, tenant_id='test', **profile_arg): pass except exc.HTTPClientError: pass def test_port_update_portinfo_non_admin(self): profile_arg = {portbindings.PROFILE: {'dummy': 'dummy'}} with self.network() as net1: with self.subnet(network=net1) as subnet1: with self.port(subnet=subnet1) as port: # By default user is admin - now test non admin user port_id = port['port']['id'] ctx = self._get_non_admin_context() port = self._update('ports', port_id, {'port': profile_arg}, expected_code=exc.HTTPForbidden.code, neutron_context=ctx) class PortBindingsHostTestCaseMixin(object): fmt = 'json' hostname = 'testhost' def _check_response_portbindings_host(self, port): self.assertEqual(port[portbindings.HOST_ID], self.hostname) def _check_response_no_portbindings_host(self, port): self.assertIn('status', port) self.assertNotIn(portbindings.HOST_ID, port) def test_port_vif_non_admin(self): with self.network(set_context=True, tenant_id='test') as net1: with self.subnet(network=net1) as subnet1: host_arg = {portbindings.HOST_ID: self.hostname} try: with self.port(subnet=subnet1, expected_res_status=403, arg_list=(portbindings.HOST_ID,), set_context=True, tenant_id='test', **host_arg): pass except exc.HTTPClientError: pass def test_port_vif_host(self): host_arg = {portbindings.HOST_ID: self.hostname} with self.port(name='name', arg_list=(portbindings.HOST_ID,), **host_arg) as port: port_id = port['port']['id'] # Check a response of create_port self._check_response_portbindings_host(port['port']) # Check a response of get_port ctx = context.get_admin_context() port = self._show('ports', port_id, neutron_context=ctx)['port'] self._check_response_portbindings_host(port) # By default user is admin - now test non admin user ctx = context.Context(user_id=None, tenant_id=self._tenant_id, is_admin=False) non_admin_port = self._show( 'ports', port_id, neutron_context=ctx)['port'] self._check_response_no_portbindings_host(non_admin_port) def test_ports_vif_host(self): cfg.CONF.set_default('allow_overlapping_ips', True) host_arg = {portbindings.HOST_ID: self.hostname} with self.port(name='name1', arg_list=(portbindings.HOST_ID,), **host_arg), self.port(name='name2'): ctx = context.get_admin_context() ports = self._list('ports', neutron_context=ctx)['ports'] self.assertEqual(2, len(ports)) for port in ports: if port['name'] == 'name1': self._check_response_portbindings_host(port) else: self.assertFalse(port[portbindings.HOST_ID]) # By default user is admin - now test non admin user ctx = context.Context(user_id=None, tenant_id=self._tenant_id, is_admin=False) ports = self._list('ports', neutron_context=ctx)['ports'] self.assertEqual(2, len(ports)) for non_admin_port in ports: self._check_response_no_portbindings_host(non_admin_port) def test_ports_vif_host_update(self): cfg.CONF.set_default('allow_overlapping_ips', True) host_arg = {portbindings.HOST_ID: self.hostname} with self.port(name='name1', arg_list=(portbindings.HOST_ID,), **host_arg) as port1, self.port(name='name2') as port2: data = {'port': {portbindings.HOST_ID: 'testhosttemp'}} req = self.new_update_request('ports', data, port1['port']['id']) req.get_response(self.api) req = self.new_update_request('ports', data, port2['port']['id']) ctx = context.get_admin_context() req.get_response(self.api) ports = self._list('ports', neutron_context=ctx)['ports'] self.assertEqual(2, len(ports)) for port in ports: self.assertEqual('testhosttemp', port[portbindings.HOST_ID]) def test_ports_vif_non_host_update(self): host_arg = {portbindings.HOST_ID: self.hostname} with self.port(name='name', arg_list=(portbindings.HOST_ID,), **host_arg) as port: data = {'port': {'admin_state_up': False}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(port['port'][portbindings.HOST_ID], res['port'][portbindings.HOST_ID]) def test_ports_vif_non_host_update_when_host_null(self): with self.port() as port: data = {'port': {'admin_state_up': False}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(port['port'][portbindings.HOST_ID], res['port'][portbindings.HOST_ID]) def test_ports_vif_host_list(self): cfg.CONF.set_default('allow_overlapping_ips', True) host_arg = {portbindings.HOST_ID: self.hostname} with self.port(name='name1', arg_list=(portbindings.HOST_ID,), **host_arg) as port1,\ self.port(name='name2'),\ self.port(name='name3', arg_list=(portbindings.HOST_ID,), **host_arg) as port3: self._test_list_resources( 'port', (port1, port3), query_params='%s=%s' % (portbindings.HOST_ID, self.hostname)) class PortBindingsVnicTestCaseMixin(object): fmt = 'json' vnic_type = portbindings.VNIC_NORMAL def _check_response_portbindings_vnic_type(self, port): self.assertIn('status', port) self.assertEqual(port[portbindings.VNIC_TYPE], self.vnic_type) def test_port_vnic_type_non_admin(self): with self.network(set_context=True, tenant_id='test') as net1: with self.subnet(network=net1) as subnet1: vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type} with self.port(subnet=subnet1, expected_res_status=httplib.CREATED, arg_list=(portbindings.VNIC_TYPE,), set_context=True, tenant_id='test', **vnic_arg) as port: # Check a response of create_port self._check_response_portbindings_vnic_type(port['port']) def test_port_vnic_type(self): vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type} with self.port(name='name', arg_list=(portbindings.VNIC_TYPE,), **vnic_arg) as port: port_id = port['port']['id'] # Check a response of create_port self._check_response_portbindings_vnic_type(port['port']) # Check a response of get_port ctx = context.get_admin_context() port = self._show('ports', port_id, neutron_context=ctx)['port'] self._check_response_portbindings_vnic_type(port) # By default user is admin - now test non admin user ctx = context.Context(user_id=None, tenant_id=self._tenant_id, is_admin=False) non_admin_port = self._show( 'ports', port_id, neutron_context=ctx)['port'] self._check_response_portbindings_vnic_type(non_admin_port) def test_ports_vnic_type(self): cfg.CONF.set_default('allow_overlapping_ips', True) vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type} with self.port(name='name1', arg_list=(portbindings.VNIC_TYPE,), **vnic_arg), self.port(name='name2'): ctx = context.get_admin_context() ports = self._list('ports', neutron_context=ctx)['ports'] self.assertEqual(2, len(ports)) for port in ports: if port['name'] == 'name1': self._check_response_portbindings_vnic_type(port) else: self.assertEqual(portbindings.VNIC_NORMAL, port[portbindings.VNIC_TYPE]) # By default user is admin - now test non admin user ctx = context.Context(user_id=None, tenant_id=self._tenant_id, is_admin=False) ports = self._list('ports', neutron_context=ctx)['ports'] self.assertEqual(2, len(ports)) for non_admin_port in ports: self._check_response_portbindings_vnic_type(non_admin_port) def test_ports_vnic_type_list(self): cfg.CONF.set_default('allow_overlapping_ips', True) vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type} with self.port(name='name1', arg_list=(portbindings.VNIC_TYPE,), **vnic_arg) as port1,\ self.port(name='name2') as port2,\ self.port(name='name3', arg_list=(portbindings.VNIC_TYPE,), **vnic_arg) as port3: self._test_list_resources( 'port', (port1, port2, port3), query_params='%s=%s' % (portbindings.VNIC_TYPE, self.vnic_type)) neutron-8.4.0/neutron/tests/unit/test_auth.py0000664000567000056710000001163013044372736022555 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_middleware import request_id import webob from neutron import auth from neutron.tests import base class NeutronKeystoneContextTestCase(base.BaseTestCase): def setUp(self): super(NeutronKeystoneContextTestCase, self).setUp() @webob.dec.wsgify def fake_app(req): self.context = req.environ['neutron.context'] return webob.Response() self.context = None self.middleware = auth.NeutronKeystoneContext(fake_app) self.request = webob.Request.blank('/') self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken' def test_no_user_id(self): self.request.headers['X_PROJECT_ID'] = 'testtenantid' response = self.request.get_response(self.middleware) self.assertEqual('401 Unauthorized', response.status) def test_with_user_id(self): self.request.headers['X_PROJECT_ID'] = 'testtenantid' self.request.headers['X_USER_ID'] = 'testuserid' response = self.request.get_response(self.middleware) self.assertEqual('200 OK', response.status) self.assertEqual('testuserid', self.context.user_id) self.assertEqual('testuserid', self.context.user) def test_with_tenant_id(self): self.request.headers['X_PROJECT_ID'] = 'testtenantid' self.request.headers['X_USER_ID'] = 'test_user_id' response = self.request.get_response(self.middleware) self.assertEqual('200 OK', response.status) self.assertEqual('testtenantid', self.context.tenant_id) self.assertEqual('testtenantid', self.context.tenant) def test_roles_no_admin(self): self.request.headers['X_PROJECT_ID'] = 'testtenantid' self.request.headers['X_USER_ID'] = 'testuserid' self.request.headers['X_ROLES'] = 'role1, role2 , role3,role4,role5' response = self.request.get_response(self.middleware) self.assertEqual('200 OK', response.status) self.assertEqual(['role1', 'role2', 'role3', 'role4', 'role5'], self.context.roles) self.assertFalse(self.context.is_admin) def test_roles_with_admin(self): self.request.headers['X_PROJECT_ID'] = 'testtenantid' self.request.headers['X_USER_ID'] = 'testuserid' self.request.headers['X_ROLES'] = ('role1, role2 , role3,role4,role5,' 'AdMiN') response = self.request.get_response(self.middleware) self.assertEqual('200 OK', response.status) self.assertEqual(['role1', 'role2', 'role3', 'role4', 'role5', 'AdMiN'], self.context.roles) self.assertTrue(self.context.is_admin) def test_with_user_tenant_name(self): self.request.headers['X_PROJECT_ID'] = 'testtenantid' self.request.headers['X_USER_ID'] = 'testuserid' self.request.headers['X_PROJECT_NAME'] = 'testtenantname' self.request.headers['X_USER_NAME'] = 'testusername' response = self.request.get_response(self.middleware) self.assertEqual('200 OK', response.status) self.assertEqual('testuserid', self.context.user_id) self.assertEqual('testusername', self.context.user_name) self.assertEqual('testtenantid', self.context.tenant_id) self.assertEqual('testtenantname', self.context.tenant_name) def test_request_id_extracted_from_env(self): req_id = 'dummy-request-id' self.request.headers['X_PROJECT_ID'] = 'testtenantid' self.request.headers['X_USER_ID'] = 'testuserid' self.request.environ[request_id.ENV_REQUEST_ID] = req_id self.request.get_response(self.middleware) self.assertEqual(req_id, self.context.request_id) def test_with_auth_token(self): self.request.headers['X_PROJECT_ID'] = 'testtenantid' self.request.headers['X_USER_ID'] = 'testuserid' response = self.request.get_response(self.middleware) self.assertEqual('200 OK', response.status) self.assertEqual('testauthtoken', self.context.auth_token) def test_without_auth_token(self): self.request.headers['X_PROJECT_ID'] = 'testtenantid' self.request.headers['X_USER_ID'] = 'testuserid' del self.request.headers['X_AUTH_TOKEN'] self.request.get_response(self.middleware) self.assertIsNone(self.context.auth_token) neutron-8.4.0/neutron/tests/unit/extensions/0000775000567000056710000000000013044373210022365 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/extensions/test_tag.py0000664000567000056710000002301213044372760024560 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api import extensions from neutron.common import config import neutron.extensions from neutron.services.tag import tag_plugin from neutron.tests.unit.db import test_db_base_plugin_v2 extensions_path = ':'.join(neutron.extensions.__path__) class TestTagApiBase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self): service_plugins = {'TAG': "neutron.services.tag.tag_plugin.TagPlugin"} super(TestTagApiBase, self).setUp(service_plugins=service_plugins) plugin = tag_plugin.TagPlugin() ext_mgr = extensions.PluginAwareExtensionManager( extensions_path, {'TAG': plugin} ) app = config.load_paste_app('extensions_test_app') self.ext_api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr) def _get_resource_tags(self, resource_id): res = self._show(self.resource, resource_id) return res[self.member]['tags'] def _put_tag(self, resource_id, tag): req = self._req('PUT', self.resource, id=resource_id, subresource='tags', sub_id=tag) return req.get_response(self.ext_api) def _put_tags(self, resource_id, tags): body = {'tags': tags} req = self._req('PUT', self.resource, data=body, id=resource_id, subresource='tags') return req.get_response(self.ext_api) def _get_tag(self, resource_id, tag): req = self._req('GET', self.resource, id=resource_id, subresource='tags', sub_id=tag) return req.get_response(self.ext_api) def _delete_tag(self, resource_id, tag): req = self._req('DELETE', self.resource, id=resource_id, subresource='tags', sub_id=tag) return req.get_response(self.ext_api) def _delete_tags(self, resource_id): req = self._req('DELETE', self.resource, id=resource_id, subresource='tags') return req.get_response(self.ext_api) def _assertEqualTags(self, expected, actual): self.assertEqual(set(expected), set(actual)) def _make_query_string(self, tags, tags_any, not_tags, not_tags_any): filter_strings = [] if tags: filter_strings.append("tags=" + ','.join(tags)) if tags_any: filter_strings.append("tags-any=" + ','.join(tags_any)) if not_tags: filter_strings.append("not-tags=" + ','.join(not_tags)) if not_tags_any: filter_strings.append("not-tags-any=" + ','.join(not_tags_any)) return '&'.join(filter_strings) def _get_tags_filter_resources(self, tags=None, tags_any=None, not_tags=None, not_tags_any=None): params = self._make_query_string(tags, tags_any, not_tags, not_tags_any) req = self._req('GET', self.resource, params=params) res = req.get_response(self.api) res = self.deserialize(self.fmt, res) return res[self.resource] class TestNetworkTagApi(TestTagApiBase): resource = 'networks' member = 'network' def test_put_tag(self): with self.network() as net: net_id = net['network']['id'] res = self._put_tag(net_id, 'red') self.assertEqual(201, res.status_int) tags = self._get_resource_tags(net_id) self._assertEqualTags(['red'], tags) res = self._put_tag(net_id, 'blue') self.assertEqual(201, res.status_int) tags = self._get_resource_tags(net_id) self._assertEqualTags(['red', 'blue'], tags) def test_put_tag_exists(self): with self.network() as net: net_id = net['network']['id'] res = self._put_tag(net_id, 'blue') self.assertEqual(201, res.status_int) res = self._put_tag(net_id, 'blue') self.assertEqual(201, res.status_int) def test_put_tags(self): with self.network() as net: net_id = net['network']['id'] res = self._put_tags(net_id, ['red', 'green']) self.assertEqual(200, res.status_int) tags = self._get_resource_tags(net_id) self._assertEqualTags(['red', 'green'], tags) def test_put_tags_replace(self): with self.network() as net: net_id = net['network']['id'] res = self._put_tags(net_id, ['red', 'green']) self.assertEqual(200, res.status_int) tags = self._get_resource_tags(net_id) self._assertEqualTags(['red', 'green'], tags) res = self._put_tags(net_id, ['blue', 'red']) self.assertEqual(200, res.status_int) tags = self._get_resource_tags(net_id) self._assertEqualTags(['blue', 'red'], tags) def test_get_tag(self): with self.network() as net: net_id = net['network']['id'] res = self._put_tag(net_id, 'red') self.assertEqual(201, res.status_int) res = self._get_tag(net_id, 'red') self.assertEqual(204, res.status_int) def test_get_tag_notfound(self): with self.network() as net: net_id = net['network']['id'] res = self._put_tag(net_id, 'red') self.assertEqual(201, res.status_int) res = self._get_tag(net_id, 'green') self.assertEqual(404, res.status_int) def test_delete_tag(self): with self.network() as net: net_id = net['network']['id'] res = self._put_tags(net_id, ['red', 'green']) self.assertEqual(200, res.status_int) res = self._delete_tag(net_id, 'red') self.assertEqual(204, res.status_int) tags = self._get_resource_tags(net_id) self._assertEqualTags(['green'], tags) def test_delete_tag_notfound(self): with self.network() as net: net_id = net['network']['id'] res = self._put_tags(net_id, ['red', 'green']) self.assertEqual(200, res.status_int) res = self._delete_tag(net_id, 'blue') self.assertEqual(404, res.status_int) def test_delete_tags(self): with self.network() as net: net_id = net['network']['id'] res = self._put_tags(net_id, ['red', 'green']) self.assertEqual(200, res.status_int) res = self._delete_tags(net_id) self.assertEqual(204, res.status_int) tags = self._get_resource_tags(net_id) self._assertEqualTags([], tags) class TestNetworkTagFilter(TestTagApiBase): resource = 'networks' member = 'network' def setUp(self): super(TestNetworkTagFilter, self).setUp() self._prepare_network_tags() def _prepare_network_tags(self): res = self._make_network(self.fmt, 'net1', True) net1_id = res['network']['id'] res = self._make_network(self.fmt, 'net2', True) net2_id = res['network']['id'] res = self._make_network(self.fmt, 'net3', True) net3_id = res['network']['id'] res = self._make_network(self.fmt, 'net4', True) net4_id = res['network']['id'] res = self._make_network(self.fmt, 'net5', True) net5_id = res['network']['id'] self._put_tags(net1_id, ['red']) self._put_tags(net2_id, ['red', 'blue']) self._put_tags(net3_id, ['red', 'blue', 'green']) self._put_tags(net4_id, ['green']) # net5: no tags tags = self._get_resource_tags(net5_id) self._assertEqualTags([], tags) def _assertEqualResources(self, expected, res): actual = [n['name'] for n in res] self.assertEqual(set(expected), set(actual)) def test_filter_tags_single(self): res = self._get_tags_filter_resources(tags=['red']) self._assertEqualResources(['net1', 'net2', 'net3'], res) def test_filter_tags_multi(self): res = self._get_tags_filter_resources(tags=['red', 'blue']) self._assertEqualResources(['net2', 'net3'], res) def test_filter_tags_any_single(self): res = self._get_tags_filter_resources(tags_any=['blue']) self._assertEqualResources(['net2', 'net3'], res) def test_filter_tags_any_multi(self): res = self._get_tags_filter_resources(tags_any=['red', 'blue']) self._assertEqualResources(['net1', 'net2', 'net3'], res) def test_filter_not_tags_single(self): res = self._get_tags_filter_resources(not_tags=['red']) self._assertEqualResources(['net4', 'net5'], res) def test_filter_not_tags_multi(self): res = self._get_tags_filter_resources(not_tags=['red', 'blue']) self._assertEqualResources(['net1', 'net4', 'net5'], res) def test_filter_not_tags_any_single(self): res = self._get_tags_filter_resources(not_tags_any=['blue']) self._assertEqualResources(['net1', 'net4', 'net5'], res) def test_filter_not_tags_any_multi(self): res = self._get_tags_filter_resources(not_tags_any=['red', 'blue']) self._assertEqualResources(['net4', 'net5'], res) neutron-8.4.0/neutron/tests/unit/extensions/test_l3.py0000664000567000056710000050536013044372760024336 0ustar jenkinsjenkins00000000000000# Copyright 2012 VMware, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import contextlib import copy import mock import netaddr from oslo_config import cfg from oslo_utils import importutils from oslo_utils import uuidutils from sqlalchemy import orm from webob import exc from neutron._i18n import _ from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api from neutron.api.rpc.handlers import l3_rpc from neutron.api.v2 import attributes from neutron.callbacks import events from neutron.callbacks import exceptions from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants as l3_constants from neutron.common import exceptions as n_exc from neutron import context from neutron.db import common_db_mixin from neutron.db import db_base_plugin_v2 from neutron.db import dns_db from neutron.db import external_net_db from neutron.db import l3_agentschedulers_db from neutron.db import l3_attrs_db from neutron.db import l3_db from neutron.db import l3_dvr_db from neutron.db import l3_dvrscheduler_db from neutron.db import models_v2 from neutron.extensions import external_net from neutron.extensions import l3 from neutron.extensions import portbindings from neutron import manager from neutron.plugins.common import constants as service_constants from neutron.tests import base from neutron.tests.common import helpers from neutron.tests import fake_notifier from neutron.tests.unit.api import test_extensions from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.extensions import base as test_extensions_base from neutron.tests.unit.extensions import test_agent from neutron.tests.unit.plugins.ml2 import base as ml2_base _uuid = uuidutils.generate_uuid _get_path = test_base._get_path DEVICE_OWNER_COMPUTE = l3_constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' class L3TestExtensionManager(object): def get_resources(self): # Add the resources to the global attribute map # This is done here as the setup process won't # initialize the main API router which extends # the global attribute map attributes.RESOURCE_ATTRIBUTE_MAP.update( l3.RESOURCE_ATTRIBUTE_MAP) return l3.L3.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] class L3NatExtensionTestCase(test_extensions_base.ExtensionTestCase): fmt = 'json' def setUp(self): super(L3NatExtensionTestCase, self).setUp() self._setUpExtension( 'neutron.extensions.l3.RouterPluginBase', None, l3.RESOURCE_ATTRIBUTE_MAP, l3.L3, '', allow_pagination=True, allow_sorting=True, supported_extension_aliases=['router'], use_quota=True) def test_router_create(self): router_id = _uuid() data = {'router': {'name': 'router1', 'admin_state_up': True, 'tenant_id': _uuid(), 'external_gateway_info': None}} return_value = copy.deepcopy(data['router']) return_value.update({'status': "ACTIVE", 'id': router_id}) instance = self.plugin.return_value instance.create_router.return_value = return_value instance.get_routers_count.return_value = 0 res = self.api.post(_get_path('routers', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_router.assert_called_with(mock.ANY, router=data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('router', res) router = res['router'] self.assertEqual(router_id, router['id']) self.assertEqual("ACTIVE", router['status']) self.assertTrue(router['admin_state_up']) def test_router_list(self): router_id = _uuid() return_value = [{'name': 'router1', 'admin_state_up': True, 'tenant_id': _uuid(), 'id': router_id}] instance = self.plugin.return_value instance.get_routers.return_value = return_value res = self.api.get(_get_path('routers', fmt=self.fmt)) instance.get_routers.assert_called_with(mock.ANY, fields=mock.ANY, filters=mock.ANY, sorts=mock.ANY, limit=mock.ANY, marker=mock.ANY, page_reverse=mock.ANY) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('routers', res) self.assertEqual(1, len(res['routers'])) self.assertEqual(router_id, res['routers'][0]['id']) def test_router_update(self): router_id = _uuid() update_data = {'router': {'admin_state_up': False}} return_value = {'name': 'router1', 'admin_state_up': False, 'tenant_id': _uuid(), 'status': "ACTIVE", 'id': router_id} instance = self.plugin.return_value instance.update_router.return_value = return_value res = self.api.put(_get_path('routers', id=router_id, fmt=self.fmt), self.serialize(update_data)) instance.update_router.assert_called_with(mock.ANY, router_id, router=update_data) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('router', res) router = res['router'] self.assertEqual(router_id, router['id']) self.assertEqual("ACTIVE", router['status']) self.assertFalse(router['admin_state_up']) def test_router_get(self): router_id = _uuid() return_value = {'name': 'router1', 'admin_state_up': False, 'tenant_id': _uuid(), 'status': "ACTIVE", 'id': router_id} instance = self.plugin.return_value instance.get_router.return_value = return_value res = self.api.get(_get_path('routers', id=router_id, fmt=self.fmt)) instance.get_router.assert_called_with(mock.ANY, router_id, fields=mock.ANY) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('router', res) router = res['router'] self.assertEqual(router_id, router['id']) self.assertEqual("ACTIVE", router['status']) self.assertFalse(router['admin_state_up']) def test_router_delete(self): router_id = _uuid() res = self.api.delete(_get_path('routers', id=router_id)) instance = self.plugin.return_value instance.delete_router.assert_called_with(mock.ANY, router_id) self.assertEqual(exc.HTTPNoContent.code, res.status_int) def test_router_add_interface(self): router_id = _uuid() subnet_id = _uuid() port_id = _uuid() interface_data = {'subnet_id': subnet_id} return_value = copy.deepcopy(interface_data) return_value['port_id'] = port_id instance = self.plugin.return_value instance.add_router_interface.return_value = return_value path = _get_path('routers', id=router_id, action="add_router_interface", fmt=self.fmt) res = self.api.put(path, self.serialize(interface_data)) instance.add_router_interface.assert_called_with(mock.ANY, router_id, interface_data) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('port_id', res) self.assertEqual(port_id, res['port_id']) self.assertEqual(subnet_id, res['subnet_id']) # This base plugin class is for tests. class TestL3NatBasePlugin(db_base_plugin_v2.NeutronDbPluginV2, external_net_db.External_net_db_mixin): __native_pagination_support = True __native_sorting_support = True def create_network(self, context, network): session = context.session with session.begin(subtransactions=True): net = super(TestL3NatBasePlugin, self).create_network(context, network) self._process_l3_create(context, net, network['network']) return net def update_network(self, context, id, network): session = context.session with session.begin(subtransactions=True): net = super(TestL3NatBasePlugin, self).update_network(context, id, network) self._process_l3_update(context, net, network['network']) return net def delete_network(self, context, id): with context.session.begin(subtransactions=True): self._process_l3_delete(context, id) super(TestL3NatBasePlugin, self).delete_network(context, id) def delete_port(self, context, id, l3_port_check=True): plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) if plugin: if l3_port_check: plugin.prevent_l3_port_deletion(context, id) plugin.disassociate_floatingips(context, id) return super(TestL3NatBasePlugin, self).delete_port(context, id) # This plugin class is for tests with plugin that integrates L3. class TestL3NatIntPlugin(TestL3NatBasePlugin, l3_db.L3_NAT_db_mixin, dns_db.DNSDbMixin): supported_extension_aliases = ["external-net", "router", "dns-integration"] # This plugin class is for tests with plugin that integrates L3 and L3 agent # scheduling. class TestL3NatIntAgentSchedulingPlugin(TestL3NatIntPlugin, l3_agentschedulers_db. L3AgentSchedulerDbMixin): supported_extension_aliases = ["external-net", "router", "l3_agent_scheduler"] router_scheduler = importutils.import_object( cfg.CONF.router_scheduler_driver) # This plugin class is for tests with plugin not supporting L3. class TestNoL3NatPlugin(TestL3NatBasePlugin): __native_pagination_support = True __native_sorting_support = True supported_extension_aliases = ["external-net"] # A L3 routing service plugin class for tests with plugins that # delegate away L3 routing functionality class TestL3NatServicePlugin(common_db_mixin.CommonDbMixin, l3_dvr_db.L3_NAT_with_dvr_db_mixin, l3_db.L3_NAT_db_mixin, dns_db.DNSDbMixin): supported_extension_aliases = ["router", "dns-integration"] def get_plugin_type(self): return service_constants.L3_ROUTER_NAT def get_plugin_description(self): return "L3 Routing Service Plugin for testing" # A L3 routing with L3 agent scheduling service plugin class for tests with # plugins that delegate away L3 routing functionality class TestL3NatAgentSchedulingServicePlugin(TestL3NatServicePlugin, l3_dvrscheduler_db. L3_DVRsch_db_mixin): supported_extension_aliases = ["router", "l3_agent_scheduler"] def __init__(self): super(TestL3NatAgentSchedulingServicePlugin, self).__init__() self.router_scheduler = importutils.import_object( cfg.CONF.router_scheduler_driver) self.agent_notifiers.update( {l3_constants.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()}) class L3NatTestCaseMixin(object): def _create_router(self, fmt, tenant_id, name=None, admin_state_up=None, set_context=False, arg_list=None, **kwargs): tenant_id = tenant_id or _uuid() data = {'router': {'tenant_id': tenant_id}} if name: data['router']['name'] = name if admin_state_up: data['router']['admin_state_up'] = admin_state_up for arg in (('admin_state_up', 'tenant_id', 'availability_zone_hints') + (arg_list or ())): # Arg must be present and not empty if arg in kwargs: data['router'][arg] = kwargs[arg] router_req = self.new_create_request('routers', data, fmt) if set_context and tenant_id: # create a specific auth context for this request router_req.environ['neutron.context'] = context.Context( '', tenant_id) return router_req.get_response(self.ext_api) def _make_router(self, fmt, tenant_id, name=None, admin_state_up=None, external_gateway_info=None, set_context=False, arg_list=None, **kwargs): if external_gateway_info: arg_list = ('external_gateway_info', ) + (arg_list or ()) res = self._create_router(fmt, tenant_id, name, admin_state_up, set_context, arg_list=arg_list, external_gateway_info=external_gateway_info, **kwargs) return self.deserialize(fmt, res) def _add_external_gateway_to_router(self, router_id, network_id, expected_code=exc.HTTPOk.code, neutron_context=None, ext_ips=None): ext_ips = ext_ips or [] body = {'router': {'external_gateway_info': {'network_id': network_id}}} if ext_ips: body['router']['external_gateway_info'][ 'external_fixed_ips'] = ext_ips return self._update('routers', router_id, body, expected_code=expected_code, neutron_context=neutron_context) def _remove_external_gateway_from_router(self, router_id, network_id, expected_code=exc.HTTPOk.code, external_gw_info=None): return self._update('routers', router_id, {'router': {'external_gateway_info': external_gw_info}}, expected_code=expected_code) def _router_interface_action(self, action, router_id, subnet_id, port_id, expected_code=exc.HTTPOk.code, expected_body=None, tenant_id=None, msg=None): interface_data = {} if subnet_id is not None: interface_data.update({'subnet_id': subnet_id}) if port_id is not None: interface_data.update({'port_id': port_id}) req = self.new_action_request('routers', interface_data, router_id, "%s_router_interface" % action) # if tenant_id was specified, create a tenant context for this request if tenant_id: req.environ['neutron.context'] = context.Context( '', tenant_id) res = req.get_response(self.ext_api) self.assertEqual(expected_code, res.status_int, msg) response = self.deserialize(self.fmt, res) if expected_body: self.assertEqual(expected_body, response, msg) return response @contextlib.contextmanager def router(self, name='router1', admin_state_up=True, fmt=None, tenant_id=None, external_gateway_info=None, set_context=False, **kwargs): router = self._make_router(fmt or self.fmt, tenant_id, name, admin_state_up, external_gateway_info, set_context, **kwargs) yield router def _set_net_external(self, net_id): self._update('networks', net_id, {'network': {external_net.EXTERNAL: True}}) def _create_floatingip(self, fmt, network_id, port_id=None, fixed_ip=None, set_context=False, floating_ip=None, subnet_id=False, tenant_id=None): tenant_id = tenant_id or self._tenant_id data = {'floatingip': {'floating_network_id': network_id, 'tenant_id': tenant_id}} if port_id: data['floatingip']['port_id'] = port_id if fixed_ip: data['floatingip']['fixed_ip_address'] = fixed_ip if floating_ip: data['floatingip']['floating_ip_address'] = floating_ip if subnet_id: data['floatingip']['subnet_id'] = subnet_id floatingip_req = self.new_create_request('floatingips', data, fmt) if set_context and tenant_id: # create a specific auth context for this request floatingip_req.environ['neutron.context'] = context.Context( '', tenant_id) return floatingip_req.get_response(self.ext_api) def _make_floatingip(self, fmt, network_id, port_id=None, fixed_ip=None, set_context=False, tenant_id=None, floating_ip=None, http_status=exc.HTTPCreated.code): res = self._create_floatingip(fmt, network_id, port_id, fixed_ip, set_context, floating_ip, tenant_id=tenant_id) self.assertEqual(http_status, res.status_int) return self.deserialize(fmt, res) def _validate_floating_ip(self, fip): body = self._list('floatingips') self.assertEqual(1, len(body['floatingips'])) self.assertEqual(body['floatingips'][0]['id'], fip['floatingip']['id']) body = self._show('floatingips', fip['floatingip']['id']) self.assertEqual(body['floatingip']['id'], fip['floatingip']['id']) @contextlib.contextmanager def floatingip_with_assoc(self, port_id=None, fmt=None, fixed_ip=None, public_cidr='11.0.0.0/24', set_context=False, tenant_id=None): with self.subnet(cidr=public_cidr, set_context=set_context, tenant_id=tenant_id) as public_sub: self._set_net_external(public_sub['subnet']['network_id']) private_port = None if port_id: private_port = self._show('ports', port_id) with test_db_base_plugin_v2.optional_ctx( private_port, self.port, set_context=set_context, tenant_id=tenant_id) as private_port: with self.router(set_context=set_context, tenant_id=tenant_id) as r: sid = private_port['port']['fixed_ips'][0]['subnet_id'] private_sub = {'subnet': {'id': sid}} floatingip = None self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self._router_interface_action( 'add', r['router']['id'], private_sub['subnet']['id'], None) floatingip = self._make_floatingip( fmt or self.fmt, public_sub['subnet']['network_id'], port_id=private_port['port']['id'], fixed_ip=fixed_ip, tenant_id=tenant_id, set_context=set_context) yield floatingip if floatingip: self._delete('floatingips', floatingip['floatingip']['id']) @contextlib.contextmanager def floatingip_no_assoc_with_public_sub( self, private_sub, fmt=None, set_context=False, public_sub=None): self._set_net_external(public_sub['subnet']['network_id']) with self.router() as r: floatingip = None self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self._router_interface_action('add', r['router']['id'], private_sub['subnet']['id'], None) floatingip = self._make_floatingip( fmt or self.fmt, public_sub['subnet']['network_id'], set_context=set_context) yield floatingip, r if floatingip: self._delete('floatingips', floatingip['floatingip']['id']) @contextlib.contextmanager def floatingip_no_assoc(self, private_sub, fmt=None, set_context=False): with self.subnet(cidr='12.0.0.0/24') as public_sub: with self.floatingip_no_assoc_with_public_sub( private_sub, fmt, set_context, public_sub) as (f, r): # Yield only the floating ip object yield f class ExtraAttributesMixinTestCase(base.BaseTestCase): def setUp(self): super(ExtraAttributesMixinTestCase, self).setUp() self.mixin = l3_attrs_db.ExtraAttributesMixin() def _test__extend_extra_router_dict( self, extra_attributes, attributes, expected_attributes): self.mixin._extend_extra_router_dict( attributes, {'extra_attributes': extra_attributes}) self.assertEqual(expected_attributes, attributes) def test__extend_extra_router_dict_string_default(self): self.mixin.extra_attributes = [{ 'name': "foo_key", 'default': 'foo_default' }] extension_attributes = {'foo_key': 'my_fancy_value'} self._test__extend_extra_router_dict( extension_attributes, {}, extension_attributes) def test__extend_extra_router_dict_booleans_false_default(self): self.mixin.extra_attributes = [{ 'name': "foo_key", 'default': False }] extension_attributes = {'foo_key': True} self._test__extend_extra_router_dict( extension_attributes, {}, extension_attributes) def test__extend_extra_router_dict_booleans_true_default(self): self.mixin.extra_attributes = [{ 'name': "foo_key", 'default': True }] # Test that the default is overridden extension_attributes = {'foo_key': False} self._test__extend_extra_router_dict( extension_attributes, {}, extension_attributes) def test__extend_extra_router_dict_no_extension_attributes(self): self.mixin.extra_attributes = [{ 'name': "foo_key", 'default': 'foo_value' }] self._test__extend_extra_router_dict({}, {}, {'foo_key': 'foo_value'}) def test__extend_extra_router_dict_none_extension_attributes(self): self._test__extend_extra_router_dict(None, {}, {}) class L3NatTestCaseBase(L3NatTestCaseMixin): def test_router_create(self): name = 'router1' tenant_id = _uuid() expected_value = [('name', name), ('tenant_id', tenant_id), ('admin_state_up', True), ('status', 'ACTIVE'), ('external_gateway_info', None)] with self.router(name='router1', admin_state_up=True, tenant_id=tenant_id) as router: for k, v in expected_value: self.assertEqual(router['router'][k], v) def test_router_create_call_extensions(self): self.extension_called = False def _extend_router_dict_test_attr(*args, **kwargs): self.extension_called = True db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( l3.ROUTERS, [_extend_router_dict_test_attr]) self.assertFalse(self.extension_called) with self.router(): self.assertTrue(self.extension_called) def test_router_create_with_gwinfo(self): with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) data = {'router': {'tenant_id': _uuid()}} data['router']['name'] = 'router1' data['router']['external_gateway_info'] = { 'network_id': s['subnet']['network_id']} router_req = self.new_create_request('routers', data, self.fmt) res = router_req.get_response(self.ext_api) router = self.deserialize(self.fmt, res) self.assertEqual( s['subnet']['network_id'], router['router']['external_gateway_info']['network_id']) def test_router_create_with_gwinfo_ext_ip(self): with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) ext_info = { 'network_id': s['subnet']['network_id'], 'external_fixed_ips': [{'ip_address': '10.0.0.99'}] } res = self._create_router( self.fmt, _uuid(), arg_list=('external_gateway_info',), external_gateway_info=ext_info ) router = self.deserialize(self.fmt, res) self.assertEqual( [{'ip_address': '10.0.0.99', 'subnet_id': s['subnet']['id']}], router['router']['external_gateway_info'][ 'external_fixed_ips']) def test_router_create_with_gwinfo_ext_ip_subnet(self): with self.network() as n: with self.subnet(network=n) as v1,\ self.subnet(network=n, cidr='1.0.0.0/24') as v2,\ self.subnet(network=n, cidr='2.0.0.0/24') as v3: subnets = (v1, v2, v3) self._set_net_external(n['network']['id']) for s in subnets: ext_info = { 'network_id': n['network']['id'], 'external_fixed_ips': [ {'subnet_id': s['subnet']['id']}] } res = self._create_router( self.fmt, _uuid(), arg_list=('external_gateway_info',), external_gateway_info=ext_info ) router = self.deserialize(self.fmt, res) ext_ips = router['router']['external_gateway_info'][ 'external_fixed_ips'] self.assertEqual( [{'subnet_id': s['subnet']['id'], 'ip_address': mock.ANY}], ext_ips) def test_router_create_with_gwinfo_ext_ip_non_admin(self): with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) ext_info = { 'network_id': s['subnet']['network_id'], 'external_fixed_ips': [{'ip_address': '10.0.0.99'}] } res = self._create_router( self.fmt, _uuid(), arg_list=('external_gateway_info',), set_context=True, external_gateway_info=ext_info ) self.assertEqual(exc.HTTPForbidden.code, res.status_int) def test_router_list(self): with self.router() as v1, self.router() as v2, self.router() as v3: routers = (v1, v2, v3) self._test_list_resources('router', routers) def test_router_list_with_parameters(self): with self.router(name='router1') as router1,\ self.router(name='router2') as router2: query_params = 'name=router1' self._test_list_resources('router', [router1], query_params=query_params) query_params = 'name=router2' self._test_list_resources('router', [router2], query_params=query_params) query_params = 'name=router3' self._test_list_resources('router', [], query_params=query_params) def test_router_list_with_sort(self): with self.router(name='router1') as router1,\ self.router(name='router2') as router2,\ self.router(name='router3') as router3: self._test_list_with_sort('router', (router3, router2, router1), [('name', 'desc')]) def test_router_list_with_pagination(self): with self.router(name='router1') as router1,\ self.router(name='router2') as router2,\ self.router(name='router3') as router3: self._test_list_with_pagination('router', (router1, router2, router3), ('name', 'asc'), 2, 2) def test_router_list_with_pagination_reverse(self): with self.router(name='router1') as router1,\ self.router(name='router2') as router2,\ self.router(name='router3') as router3: self._test_list_with_pagination_reverse('router', (router1, router2, router3), ('name', 'asc'), 2, 2) def test_router_update(self): rname1 = "yourrouter" rname2 = "nachorouter" with self.router(name=rname1) as r: body = self._show('routers', r['router']['id']) self.assertEqual(body['router']['name'], rname1) body = self._update('routers', r['router']['id'], {'router': {'name': rname2}}) body = self._show('routers', r['router']['id']) self.assertEqual(body['router']['name'], rname2) def test_router_update_gateway(self): with self.router() as r: with self.subnet() as s1: with self.subnet() as s2: self._set_net_external(s1['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s1['subnet']['network_id']) body = self._show('routers', r['router']['id']) net_id = (body['router'] ['external_gateway_info']['network_id']) self.assertEqual(net_id, s1['subnet']['network_id']) self._set_net_external(s2['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s2['subnet']['network_id']) body = self._show('routers', r['router']['id']) net_id = (body['router'] ['external_gateway_info']['network_id']) self.assertEqual(net_id, s2['subnet']['network_id']) # Validate that we can clear the gateway with # an empty dict, in any other case, we fall back # on None as default value self._remove_external_gateway_from_router( r['router']['id'], s2['subnet']['network_id'], external_gw_info={}) def test_router_update_gateway_with_external_ip_used_by_gw(self): with self.router() as r: with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id'], ext_ips=[{'ip_address': s['subnet']['gateway_ip']}], expected_code=exc.HTTPBadRequest.code) def test_router_update_gateway_with_invalid_external_ip(self): with self.router() as r: with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id'], ext_ips=[{'ip_address': '99.99.99.99'}], expected_code=exc.HTTPBadRequest.code) def test_router_update_gateway_with_invalid_external_subnet(self): with self.subnet() as s1,\ self.subnet(cidr='1.0.0.0/24') as s2,\ self.router() as r: self._set_net_external(s1['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s1['subnet']['network_id'], # this subnet is not on the same network so this should fail ext_ips=[{'subnet_id': s2['subnet']['id']}], expected_code=exc.HTTPBadRequest.code) def test_router_update_gateway_with_different_external_subnet(self): with self.network() as n: with self.subnet(network=n) as s1,\ self.subnet(network=n, cidr='1.0.0.0/24') as s2,\ self.router() as r: self._set_net_external(n['network']['id']) res1 = self._add_external_gateway_to_router( r['router']['id'], n['network']['id'], ext_ips=[{'subnet_id': s1['subnet']['id']}]) res2 = self._add_external_gateway_to_router( r['router']['id'], n['network']['id'], ext_ips=[{'subnet_id': s2['subnet']['id']}]) fip1 = res1['router']['external_gateway_info']['external_fixed_ips'][0] fip2 = res2['router']['external_gateway_info']['external_fixed_ips'][0] self.assertEqual(s1['subnet']['id'], fip1['subnet_id']) self.assertEqual(s2['subnet']['id'], fip2['subnet_id']) self.assertNotEqual(fip1['subnet_id'], fip2['subnet_id']) self.assertNotEqual(fip1['ip_address'], fip2['ip_address']) def test_router_update_gateway_with_existed_floatingip(self): with self.subnet() as subnet: self._set_net_external(subnet['subnet']['network_id']) with self.floatingip_with_assoc() as fip: self._add_external_gateway_to_router( fip['floatingip']['router_id'], subnet['subnet']['network_id'], expected_code=exc.HTTPConflict.code) def test_router_update_gateway_to_empty_with_existed_floatingip(self): with self.floatingip_with_assoc() as fip: self._remove_external_gateway_from_router( fip['floatingip']['router_id'], None, expected_code=exc.HTTPConflict.code) def test_router_update_gateway_add_multiple_prefixes_ipv6(self): with self.network() as n: with self.subnet(network=n) as s1, \ self.subnet(network=n, ip_version=6, cidr='2001:db8::/32') \ as s2, (self.router()) as r: self._set_net_external(n['network']['id']) res1 = self._add_external_gateway_to_router( r['router']['id'], n['network']['id'], ext_ips=[{'subnet_id': s1['subnet']['id']}]) fip1 = (res1['router']['external_gateway_info'] ['external_fixed_ips'][0]) self.assertEqual(s1['subnet']['id'], fip1['subnet_id']) res2 = self._add_external_gateway_to_router( r['router']['id'], n['network']['id'], ext_ips=[{'ip_address': fip1['ip_address'], 'subnet_id': s1['subnet']['id']}, {'subnet_id': s2['subnet']['id']}]) self.assertEqual(fip1, res2['router']['external_gateway_info'] ['external_fixed_ips'][0]) fip2 = (res2['router']['external_gateway_info'] ['external_fixed_ips'][1]) self.assertEqual(s2['subnet']['id'], fip2['subnet_id']) self.assertNotEqual(fip1['subnet_id'], fip2['subnet_id']) self.assertNotEqual(fip1['ip_address'], fip2['ip_address']) def test_router_update_gateway_upon_subnet_create_ipv6(self): with self.network() as n: with self.subnet(network=n) as s1, self.router() as r: self._set_net_external(n['network']['id']) res1 = self._add_external_gateway_to_router( r['router']['id'], n['network']['id'], ext_ips=[{'subnet_id': s1['subnet']['id']}]) fip1 = (res1['router']['external_gateway_info'] ['external_fixed_ips'][0]) sres = self._create_subnet(self.fmt, net_id=n['network']['id'], ip_version=6, cidr='2001:db8::/32', expected_res_status=( exc.HTTPCreated.code)) s2 = self.deserialize(self.fmt, sres) res2 = self._show('routers', r['router']['id']) self.assertEqual(fip1, res2['router']['external_gateway_info'] ['external_fixed_ips'][0]) fip2 = (res2['router']['external_gateway_info'] ['external_fixed_ips'][1]) self.assertEqual(s2['subnet']['id'], fip2['subnet_id']) self.assertNotEqual(fip1['subnet_id'], fip2['subnet_id']) self.assertNotEqual(fip1['ip_address'], fip2['ip_address']) def test_router_update_gateway_upon_subnet_create_max_ips_ipv6(self): """Create subnet should not cause excess fixed IPs on router gw If a router gateway port has the maximum of one IPv4 and one IPv6 fixed, create subnet should not add any more IP addresses to the port (unless this is the subnet is a SLAAC/DHCPv6-stateless subnet in which case the addresses are added automatically) """ with self.router() as r, self.network() as n: with self.subnet(cidr='10.0.0.0/24', network=n) as s1, ( self.subnet(ip_version=6, cidr='2001:db8::/64', network=n)) as s2: self._set_net_external(n['network']['id']) self._add_external_gateway_to_router( r['router']['id'], n['network']['id'], ext_ips=[{'subnet_id': s1['subnet']['id']}, {'subnet_id': s2['subnet']['id']}], expected_code=exc.HTTPOk.code) res1 = self._show('routers', r['router']['id']) original_fips = (res1['router']['external_gateway_info'] ['external_fixed_ips']) # Add another IPv4 subnet - a fip SHOULD NOT be added # to the external gateway port as it already has a v4 address self._create_subnet(self.fmt, net_id=n['network']['id'], cidr='10.0.1.0/24') res2 = self._show('routers', r['router']['id']) self.assertEqual(original_fips, res2['router']['external_gateway_info'] ['external_fixed_ips']) # Add a SLAAC subnet - a fip from this subnet SHOULD be added # to the external gateway port s3 = self.deserialize(self.fmt, self._create_subnet(self.fmt, net_id=n['network']['id'], ip_version=6, cidr='2001:db8:1::/64', ipv6_ra_mode=l3_constants.IPV6_SLAAC, ipv6_address_mode=l3_constants.IPV6_SLAAC)) res3 = self._show('routers', r['router']['id']) fips = (res3['router']['external_gateway_info'] ['external_fixed_ips']) fip_subnet_ids = [fip['subnet_id'] for fip in fips] self.assertIn(s1['subnet']['id'], fip_subnet_ids) self.assertIn(s2['subnet']['id'], fip_subnet_ids) self.assertIn(s3['subnet']['id'], fip_subnet_ids) self._remove_external_gateway_from_router( r['router']['id'], n['network']['id']) def _test_router_add_interface_subnet(self, router, subnet, msg=None): exp_notifications = ['router.create.start', 'router.create.end', 'network.create.start', 'network.create.end', 'subnet.create.start', 'subnet.create.end', 'router.interface.create', 'router.interface.delete'] body = self._router_interface_action('add', router['router']['id'], subnet['subnet']['id'], None) self.assertIn('port_id', body, msg) # fetch port and confirm device_id r_port_id = body['port_id'] port = self._show('ports', r_port_id) self.assertEqual(port['port']['device_id'], router['router']['id'], msg) self._router_interface_action('remove', router['router']['id'], subnet['subnet']['id'], None) self._show('ports', r_port_id, expected_code=exc.HTTPNotFound.code) self.assertEqual( set(exp_notifications), set(n['event_type'] for n in fake_notifier.NOTIFICATIONS), msg) for n in fake_notifier.NOTIFICATIONS: if n['event_type'].startswith('router.interface.'): payload = n['payload']['router_interface'] self.assertIn('id', payload) self.assertEqual(payload['id'], router['router']['id']) self.assertIn('tenant_id', payload) stid = subnet['subnet']['tenant_id'] # tolerate subnet tenant deliberately set to '' in the # nsx metadata access case self.assertIn(payload['tenant_id'], [stid, ''], msg) def test_router_add_interface_bad_values(self): with self.router() as r: exp_code = exc.HTTPBadRequest.code self._router_interface_action('add', r['router']['id'], False, None, expected_code=exp_code) self._router_interface_action('add', r['router']['id'], None, False, expected_code=exp_code) def test_router_add_interface_subnet(self): fake_notifier.reset() with self.router() as r: with self.network() as n: with self.subnet(network=n) as s: self._test_router_add_interface_subnet(r, s) def test_router_add_interface_ipv6_subnet(self): """Test router-interface-add for valid ipv6 subnets. Verify the valid use-cases of an IPv6 subnet where we are allowed to associate to the Neutron Router are successful. """ slaac = l3_constants.IPV6_SLAAC stateful = l3_constants.DHCPV6_STATEFUL stateless = l3_constants.DHCPV6_STATELESS use_cases = [{'msg': 'IPv6 Subnet Modes (slaac, none)', 'ra_mode': slaac, 'address_mode': None}, {'msg': 'IPv6 Subnet Modes (none, none)', 'ra_mode': None, 'address_mode': None}, {'msg': 'IPv6 Subnet Modes (dhcpv6-stateful, none)', 'ra_mode': stateful, 'address_mode': None}, {'msg': 'IPv6 Subnet Modes (dhcpv6-stateless, none)', 'ra_mode': stateless, 'address_mode': None}, {'msg': 'IPv6 Subnet Modes (slaac, slaac)', 'ra_mode': slaac, 'address_mode': slaac}, {'msg': 'IPv6 Subnet Modes (dhcpv6-stateful,' 'dhcpv6-stateful)', 'ra_mode': stateful, 'address_mode': stateful}, {'msg': 'IPv6 Subnet Modes (dhcpv6-stateless,' 'dhcpv6-stateless)', 'ra_mode': stateless, 'address_mode': stateless}] for uc in use_cases: fake_notifier.reset() with self.router() as r, self.network() as n: with self.subnet(network=n, cidr='fd00::1/64', gateway_ip='fd00::1', ip_version=6, ipv6_ra_mode=uc['ra_mode'], ipv6_address_mode=uc['address_mode']) as s: self._test_router_add_interface_subnet(r, s, uc['msg']) def test_router_add_interface_multiple_ipv4_subnets(self): """Test router-interface-add for multiple ipv4 subnets. Verify that adding multiple ipv4 subnets from the same network to a router places them all on different router interfaces. """ with self.router() as r, self.network() as n: with self.subnet(network=n, cidr='10.0.0.0/24') as s1, ( self.subnet(network=n, cidr='10.0.1.0/24')) as s2: body = self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) pid1 = body['port_id'] body = self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) pid2 = body['port_id'] self.assertNotEqual(pid1, pid2) self._router_interface_action('remove', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], None) def test_router_add_interface_multiple_ipv6_subnets_same_net(self): """Test router-interface-add for multiple ipv6 subnets on a network. Verify that adding multiple ipv6 subnets from the same network to a router places them all on the same router interface. """ with self.router() as r, self.network() as n: with (self.subnet(network=n, cidr='fd00::1/64', ip_version=6) ) as s1, self.subnet(network=n, cidr='fd01::1/64', ip_version=6) as s2: body = self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) pid1 = body['port_id'] body = self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) pid2 = body['port_id'] self.assertEqual(pid1, pid2) port = self._show('ports', pid1) self.assertEqual(2, len(port['port']['fixed_ips'])) port_subnet_ids = [fip['subnet_id'] for fip in port['port']['fixed_ips']] self.assertIn(s1['subnet']['id'], port_subnet_ids) self.assertIn(s2['subnet']['id'], port_subnet_ids) self._router_interface_action('remove', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], None) def test_router_add_interface_multiple_ipv6_subnets_different_net(self): """Test router-interface-add for ipv6 subnets on different networks. Verify that adding multiple ipv6 subnets from different networks to a router places them on different router interfaces. """ with self.router() as r, self.network() as n1, self.network() as n2: with (self.subnet(network=n1, cidr='fd00::1/64', ip_version=6) ) as s1, self.subnet(network=n2, cidr='fd01::1/64', ip_version=6) as s2: body = self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) pid1 = body['port_id'] body = self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) pid2 = body['port_id'] self.assertNotEqual(pid1, pid2) self._router_interface_action('remove', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], None) def test_router_add_iface_ipv6_ext_ra_subnet_returns_400(self): """Test router-interface-add for in-valid ipv6 subnets. Verify that an appropriate error message is displayed when an IPv6 subnet configured to use an external_router for Router Advertisements (i.e., ipv6_ra_mode is None and ipv6_address_mode is not None) is attempted to associate with a Neutron Router. """ use_cases = [{'msg': 'IPv6 Subnet Modes (none, slaac)', 'ra_mode': None, 'address_mode': l3_constants.IPV6_SLAAC}, {'msg': 'IPv6 Subnet Modes (none, dhcpv6-stateful)', 'ra_mode': None, 'address_mode': l3_constants.DHCPV6_STATEFUL}, {'msg': 'IPv6 Subnet Modes (none, dhcpv6-stateless)', 'ra_mode': None, 'address_mode': l3_constants.DHCPV6_STATELESS}] for uc in use_cases: with self.router() as r, self.network() as n: with self.subnet(network=n, cidr='fd00::1/64', gateway_ip='fd00::1', ip_version=6, ipv6_ra_mode=uc['ra_mode'], ipv6_address_mode=uc['address_mode']) as s: exp_code = exc.HTTPBadRequest.code self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None, expected_code=exp_code, msg=uc['msg']) def test_router_add_interface_ipv6_subnet_without_gateway_ip(self): with self.router() as r: with self.subnet(ip_version=6, cidr='fe80::/64', gateway_ip=None) as s: error_code = exc.HTTPBadRequest.code self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None, expected_code=error_code) def test_router_add_interface_subnet_with_bad_tenant_returns_404(self): tenant_id = _uuid() with self.router(tenant_id=tenant_id, set_context=True) as r: with self.network(tenant_id=tenant_id, set_context=True) as n: with self.subnet(network=n, set_context=True) as s: err_code = exc.HTTPNotFound.code self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None, expected_code=err_code, tenant_id='bad_tenant') body = self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) self.assertIn('port_id', body) self._router_interface_action('remove', r['router']['id'], s['subnet']['id'], None, expected_code=err_code, tenant_id='bad_tenant') def test_router_add_interface_subnet_with_port_from_other_tenant(self): tenant_id = _uuid() other_tenant_id = _uuid() with self.router(tenant_id=tenant_id) as r,\ self.network(tenant_id=tenant_id) as n1,\ self.network(tenant_id=other_tenant_id) as n2: with self.subnet(network=n1, cidr='10.0.0.0/24') as s1,\ self.subnet(network=n2, cidr='10.1.0.0/24') as s2: body = self._router_interface_action( 'add', r['router']['id'], s2['subnet']['id'], None) self.assertIn('port_id', body) self._router_interface_action( 'add', r['router']['id'], s1['subnet']['id'], None, tenant_id=tenant_id) self.assertIn('port_id', body) def test_router_add_interface_port(self): orig_update_port = self.plugin.update_port with self.router() as r, ( self.port()) as p, ( mock.patch.object(self.plugin, 'update_port')) as update_port: update_port.side_effect = orig_update_port body = self._router_interface_action('add', r['router']['id'], None, p['port']['id']) self.assertIn('port_id', body) self.assertEqual(p['port']['id'], body['port_id']) expected_port_update = { 'device_owner': l3_constants.DEVICE_OWNER_ROUTER_INTF, 'device_id': r['router']['id']} update_port.assert_called_with( mock.ANY, p['port']['id'], {'port': expected_port_update}) # fetch port and confirm device_id body = self._show('ports', p['port']['id']) self.assertEqual(r['router']['id'], body['port']['device_id']) # clean-up self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def _assert_body_port_id_and_update_port(self, body, mock_update_port, port_id, device_id): self.assertNotIn('port_id', body) expected_port_update_before_update = { 'device_owner': l3_constants.DEVICE_OWNER_ROUTER_INTF, 'device_id': device_id} expected_port_update_after_fail = { 'device_owner': '', 'device_id': ''} mock_update_port.assert_has_calls( [mock.call( mock.ANY, port_id, {'port': expected_port_update_before_update}), mock.call( mock.ANY, port_id, {'port': expected_port_update_after_fail})], any_order=False) # fetch port and confirm device_id and device_owner body = self._show('ports', port_id) self.assertEqual('', body['port']['device_owner']) self.assertEqual('', body['port']['device_id']) def test_router_add_interface_multiple_ipv4_subnet_port_returns_400(self): """Test adding router port with multiple IPv4 subnets fails. Multiple IPv4 subnets are not allowed on a single router port. Ensure that adding a port with multiple IPv4 subnets to a router fails. """ with self.network() as n, self.router() as r: with self.subnet(network=n, cidr='10.0.0.0/24') as s1, ( self.subnet(network=n, cidr='10.0.1.0/24')) as s2: fixed_ips = [{'subnet_id': s1['subnet']['id']}, {'subnet_id': s2['subnet']['id']}] orig_update_port = self.plugin.update_port with self.port(subnet=s1, fixed_ips=fixed_ips) as p, ( mock.patch.object(self.plugin, 'update_port')) as update_port: update_port.side_effect = orig_update_port exp_code = exc.HTTPBadRequest.code body = self._router_interface_action( 'add', r['router']['id'], None, p['port']['id'], expected_code=exp_code) self._assert_body_port_id_and_update_port( body, update_port, p['port']['id'], r['router']['id']) def test_router_add_interface_ipv6_port_existing_network_returns_400(self): """Ensure unique IPv6 router ports per network id. Adding a router port containing one or more IPv6 subnets with the same network id as an existing router port should fail. This is so there is no ambiguity regarding on which port to add an IPv6 subnet when executing router-interface-add with a subnet and no port. """ with self.network() as n, self.router() as r: with self.subnet(network=n, cidr='fd00::/64', ip_version=6) as s1, ( self.subnet(network=n, cidr='fd01::/64', ip_version=6)) as s2: orig_update_port = self.plugin.update_port with self.port(subnet=s1) as p, ( mock.patch.object(self.plugin, 'update_port')) as update_port: update_port.side_effect = orig_update_port self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) exp_code = exc.HTTPBadRequest.code body = self._router_interface_action( 'add', r['router']['id'], None, p['port']['id'], expected_code=exp_code) self._assert_body_port_id_and_update_port( body, update_port, p['port']['id'], r['router']['id']) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], None) def test_router_add_interface_multiple_ipv6_subnet_port(self): """A port with multiple IPv6 subnets can be added to a router Create a port with multiple associated IPv6 subnets and attach it to a router. The action should succeed. """ with self.network() as n, self.router() as r: with self.subnet(network=n, cidr='fd00::/64', ip_version=6) as s1, ( self.subnet(network=n, cidr='fd01::/64', ip_version=6)) as s2: fixed_ips = [{'subnet_id': s1['subnet']['id']}, {'subnet_id': s2['subnet']['id']}] with self.port(subnet=s1, fixed_ips=fixed_ips) as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def test_router_add_interface_empty_port_and_subnet_ids(self): with self.router() as r: self._router_interface_action('add', r['router']['id'], None, None, expected_code=exc. HTTPBadRequest.code) def test_router_add_interface_port_bad_tenant_returns_404(self): tenant_id = _uuid() with self.router(tenant_id=tenant_id, set_context=True) as r: with self.network(tenant_id=tenant_id, set_context=True) as n: with self.subnet(tenant_id=tenant_id, network=n, set_context=True) as s: with self.port(tenant_id=tenant_id, subnet=s, set_context=True) as p: err_code = exc.HTTPNotFound.code self._router_interface_action('add', r['router']['id'], None, p['port']['id'], expected_code=err_code, tenant_id='bad_tenant') self._router_interface_action('add', r['router']['id'], None, p['port']['id'], tenant_id=tenant_id) # clean-up should fail as well self._router_interface_action('remove', r['router']['id'], None, p['port']['id'], expected_code=err_code, tenant_id='bad_tenant') def test_router_add_interface_port_without_ips(self): with self.network() as network, self.router() as r: # Create a router port without ips p = self._make_port(self.fmt, network['network']['id'], device_owner=l3_constants.DEVICE_OWNER_ROUTER_INTF) err_code = exc.HTTPBadRequest.code self._router_interface_action('add', r['router']['id'], None, p['port']['id'], expected_code=err_code) def test_router_add_interface_dup_subnet1_returns_400(self): with self.router() as r: with self.subnet() as s: self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None, expected_code=exc. HTTPBadRequest.code) def test_router_add_interface_dup_subnet2_returns_400(self): with self.router() as r: with self.subnet() as s1, self.subnet(cidr='1.0.0.0/24') as s2: with self.port(subnet=s1) as p1, self.port(subnet=s2) as p2: orig_update_port = self.plugin.update_port with self.port(subnet=s1) as p3, ( mock.patch.object(self.plugin, 'update_port')) as update_port: update_port.side_effect = orig_update_port for p in [p1, p2]: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) body = self._router_interface_action( 'add', r['router']['id'], None, p3['port']['id'], expected_code=exc.HTTPBadRequest.code) self._assert_body_port_id_and_update_port( body, update_port, p3['port']['id'], r['router']['id']) def test_router_add_interface_overlapped_cidr_returns_400(self): with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s1, self.subnet( cidr='10.0.2.0/24') as s2: self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) def try_overlapped_cidr(cidr): with self.subnet(cidr=cidr) as s3: self._router_interface_action('add', r['router']['id'], s3['subnet']['id'], None, expected_code=exc. HTTPBadRequest.code) # another subnet with same cidr try_overlapped_cidr('10.0.1.0/24') try_overlapped_cidr('10.0.2.0/24') # another subnet with overlapped cidr including s1 try_overlapped_cidr('10.0.0.0/16') # another subnet with overlapped cidr including s2 try_overlapped_cidr('10.0.2.128/28') def test_router_add_interface_no_data_returns_400(self): with self.router() as r: self._router_interface_action('add', r['router']['id'], None, None, expected_code=exc. HTTPBadRequest.code) def test_router_add_interface_with_both_ids_returns_400(self): with self.router() as r: with self.subnet() as s: with self.port(subnet=s) as p: self._router_interface_action('add', r['router']['id'], s['subnet']['id'], p['port']['id'], expected_code=exc. HTTPBadRequest.code) def test_router_add_gateway_dup_subnet1_returns_400(self): with self.router() as r: with self.subnet() as s: self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) self._set_net_external(s['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id'], expected_code=exc.HTTPBadRequest.code) def test_router_add_gateway_dup_subnet2_returns_400(self): with self.router() as r: with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id']) self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None, expected_code=exc. HTTPBadRequest.code) def test_router_add_gateway_multiple_subnets_ipv6(self): """Ensure external gateway set doesn't add excess IPs on router gw Setting the gateway of a router to an external network with more than one IPv4 and one IPv6 subnet should only add an address from the first IPv4 subnet, an address from the first IPv6-stateful subnet, and an address from each IPv6-stateless (SLAAC and DHCPv6-stateless) subnet """ with self.router() as r, self.network() as n: with self.subnet( cidr='10.0.0.0/24', network=n) as s1, ( self.subnet( cidr='10.0.1.0/24', network=n)) as s2, ( self.subnet( cidr='2001:db8::/64', network=n, ip_version=6, ipv6_ra_mode=l3_constants.IPV6_SLAAC, ipv6_address_mode=l3_constants.IPV6_SLAAC)) as s3, ( self.subnet( cidr='2001:db8:1::/64', network=n, ip_version=6, ipv6_ra_mode=l3_constants.DHCPV6_STATEFUL, ipv6_address_mode=l3_constants.DHCPV6_STATEFUL)) as s4, ( self.subnet( cidr='2001:db8:2::/64', network=n, ip_version=6, ipv6_ra_mode=l3_constants.DHCPV6_STATELESS, ipv6_address_mode=l3_constants.DHCPV6_STATELESS)) as s5: self._set_net_external(n['network']['id']) self._add_external_gateway_to_router( r['router']['id'], n['network']['id']) res = self._show('routers', r['router']['id']) fips = (res['router']['external_gateway_info'] ['external_fixed_ips']) fip_subnet_ids = {fip['subnet_id'] for fip in fips} # one of s1 or s2 should be in the list. if s1['subnet']['id'] in fip_subnet_ids: self.assertEqual({s1['subnet']['id'], s3['subnet']['id'], s4['subnet']['id'], s5['subnet']['id']}, fip_subnet_ids) else: self.assertEqual({s2['subnet']['id'], s3['subnet']['id'], s4['subnet']['id'], s5['subnet']['id']}, fip_subnet_ids) self._remove_external_gateway_from_router( r['router']['id'], n['network']['id']) def test_router_add_and_remove_gateway(self): with self.router() as r: with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id']) body = self._show('routers', r['router']['id']) net_id = body['router']['external_gateway_info']['network_id'] self.assertEqual(net_id, s['subnet']['network_id']) self._remove_external_gateway_from_router( r['router']['id'], s['subnet']['network_id']) body = self._show('routers', r['router']['id']) gw_info = body['router']['external_gateway_info'] self.assertIsNone(gw_info) def test_router_add_and_remove_gateway_tenant_ctx(self): with self.router(tenant_id='noadmin', set_context=True) as r: with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) ctx = context.Context('', 'noadmin') self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id'], neutron_context=ctx) body = self._show('routers', r['router']['id']) net_id = body['router']['external_gateway_info']['network_id'] self.assertEqual(net_id, s['subnet']['network_id']) self._remove_external_gateway_from_router( r['router']['id'], s['subnet']['network_id']) body = self._show('routers', r['router']['id']) gw_info = body['router']['external_gateway_info'] self.assertIsNone(gw_info) def test_create_router_port_with_device_id_of_other_teants_router(self): with self.router() as admin_router: with self.network(tenant_id='tenant_a', set_context=True) as n: with self.subnet(network=n): for device_owner in l3_constants.ROUTER_INTERFACE_OWNERS: self._create_port( self.fmt, n['network']['id'], tenant_id='tenant_a', device_id=admin_router['router']['id'], device_owner=device_owner, set_context=True, expected_res_status=exc.HTTPConflict.code) def test_create_non_router_port_device_id_of_other_teants_router_update( self): # This tests that HTTPConflict is raised if we create a non-router # port that matches the device_id of another tenants router and then # we change the device_owner to be network:router_interface. with self.router() as admin_router: with self.network(tenant_id='tenant_a', set_context=True) as n: with self.subnet(network=n): for device_owner in l3_constants.ROUTER_INTERFACE_OWNERS: port_res = self._create_port( self.fmt, n['network']['id'], tenant_id='tenant_a', device_id=admin_router['router']['id'], set_context=True) port = self.deserialize(self.fmt, port_res) neutron_context = context.Context('', 'tenant_a') data = {'port': {'device_owner': device_owner}} self._update('ports', port['port']['id'], data, neutron_context=neutron_context, expected_code=exc.HTTPConflict.code) def test_update_port_device_id_to_different_tenants_router(self): with self.router() as admin_router: with self.router(tenant_id='tenant_a', set_context=True) as tenant_router: with self.network(tenant_id='tenant_a', set_context=True) as n: with self.subnet(network=n) as s: port = self._router_interface_action( 'add', tenant_router['router']['id'], s['subnet']['id'], None, tenant_id='tenant_a') neutron_context = context.Context('', 'tenant_a') data = {'port': {'device_id': admin_router['router']['id']}} self._update('ports', port['port_id'], data, neutron_context=neutron_context, expected_code=exc.HTTPConflict.code) def test_router_add_gateway_invalid_network_returns_400(self): with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], "foobar", expected_code=exc.HTTPBadRequest.code) def test_router_add_gateway_non_existent_network_returns_404(self): with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], _uuid(), expected_code=exc.HTTPNotFound.code) def test_router_add_gateway_net_not_external_returns_400(self): with self.router() as r: with self.subnet() as s: # intentionally do not set net as external self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id'], expected_code=exc.HTTPBadRequest.code) def test_router_add_gateway_no_subnet(self): with self.router() as r: with self.network() as n: self._set_net_external(n['network']['id']) self._add_external_gateway_to_router( r['router']['id'], n['network']['id']) body = self._show('routers', r['router']['id']) net_id = body['router']['external_gateway_info']['network_id'] self.assertEqual(net_id, n['network']['id']) self._remove_external_gateway_from_router( r['router']['id'], n['network']['id']) body = self._show('routers', r['router']['id']) gw_info = body['router']['external_gateway_info'] self.assertIsNone(gw_info) def test_router_add_gateway_no_subnet_forbidden(self): with self.router() as r: with self.network() as n: self._set_net_external(n['network']['id']) with mock.patch.object(registry, 'notify') as notify: errors = [ exceptions.NotificationError( 'foo_callback_id', n_exc.InvalidInput(error_message='forbidden')), ] notify.side_effect = exceptions.CallbackFailure( errors=errors) self._add_external_gateway_to_router( r['router']['id'], n['network']['id'], expected_code=exc.HTTPBadRequest.code) notify.assert_called_once_with( resources.ROUTER_GATEWAY, events.BEFORE_CREATE, mock.ANY, context=mock.ANY, router_id=r['router']['id'], network_id=n['network']['id'], subnets=[]) def test_router_remove_interface_inuse_returns_409(self): with self.router() as r: with self.subnet() as s: self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) self._delete('routers', r['router']['id'], expected_code=exc.HTTPConflict.code) def test_router_remove_interface_callback_failure_returns_409(self): with self.router() as r,\ self.subnet() as s,\ mock.patch.object(registry, 'notify') as notify: errors = [ exceptions.NotificationError( 'foo_callback_id', n_exc.InUse()), ] self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) # we fail the first time, but not the second, when # the clean-up takes place notify.side_effect = [ exceptions.CallbackFailure(errors=errors), None ] self._router_interface_action( 'remove', r['router']['id'], s['subnet']['id'], None, exc.HTTPConflict.code) def test_router_clear_gateway_callback_failure_returns_409(self): with self.router() as r,\ self.subnet() as s,\ mock.patch.object(registry, 'notify') as notify: errors = [ exceptions.NotificationError( 'foo_callback_id', n_exc.InUse()), ] self._set_net_external(s['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id']) notify.side_effect = exceptions.CallbackFailure(errors=errors) self._remove_external_gateway_from_router( r['router']['id'], s['subnet']['network_id'], external_gw_info={}, expected_code=exc.HTTPConflict.code) def test_router_remove_interface_wrong_subnet_returns_400(self): with self.router() as r: with self.subnet() as s: with self.port() as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) self._router_interface_action('remove', r['router']['id'], s['subnet']['id'], p['port']['id'], exc.HTTPBadRequest.code) def test_router_remove_interface_nothing_returns_400(self): with self.router() as r: with self.subnet() as s: with self.port(subnet=s) as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) self._router_interface_action('remove', r['router']['id'], None, None, exc.HTTPBadRequest.code) #remove properly to clean-up self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def test_router_remove_interface_returns_200(self): with self.router() as r: with self.port() as p: body = self._router_interface_action('add', r['router']['id'], None, p['port']['id']) self._router_interface_action('remove', r['router']['id'], None, p['port']['id'], expected_body=body) def test_router_remove_interface_with_both_ids_returns_200(self): with self.router() as r: with self.subnet() as s: with self.port(subnet=s) as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) self._router_interface_action('remove', r['router']['id'], s['subnet']['id'], p['port']['id']) def test_router_remove_interface_wrong_port_returns_404(self): with self.router() as r: with self.subnet(): with self.port() as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) # create another port for testing failure case res = self._create_port(self.fmt, p['port']['network_id']) p2 = self.deserialize(self.fmt, res) self._router_interface_action('remove', r['router']['id'], None, p2['port']['id'], exc.HTTPNotFound.code) def test_router_remove_ipv6_subnet_from_interface(self): """Delete a subnet from a router interface Verify that deleting a subnet with router-interface-delete removes that subnet when there are multiple subnets on the interface and removes the interface when it is the last subnet on the interface. """ with self.router() as r, self.network() as n: with (self.subnet(network=n, cidr='fd00::1/64', ip_version=6) ) as s1, self.subnet(network=n, cidr='fd01::1/64', ip_version=6) as s2: body = self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) port = self._show('ports', body['port_id']) self.assertEqual(2, len(port['port']['fixed_ips'])) self._router_interface_action('remove', r['router']['id'], s1['subnet']['id'], None) port = self._show('ports', body['port_id']) self.assertEqual(1, len(port['port']['fixed_ips'])) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], None) exp_code = exc.HTTPNotFound.code port = self._show('ports', body['port_id'], expected_code=exp_code) def test_router_delete(self): with self.router() as router: router_id = router['router']['id'] req = self.new_show_request('router', router_id) res = req.get_response(self._api_for_resource('router')) self.assertEqual(404, res.status_int) def test_router_delete_with_port_existed_returns_409(self): with self.subnet() as subnet: res = self._create_router(self.fmt, _uuid()) router = self.deserialize(self.fmt, res) self._router_interface_action('add', router['router']['id'], subnet['subnet']['id'], None) self._delete('routers', router['router']['id'], exc.HTTPConflict.code) def test_router_delete_with_floatingip_existed_returns_409(self): with self.port() as p: private_sub = {'subnet': {'id': p['port']['fixed_ips'][0]['subnet_id']}} with self.subnet(cidr='12.0.0.0/24') as public_sub: self._set_net_external(public_sub['subnet']['network_id']) res = self._create_router(self.fmt, _uuid()) r = self.deserialize(self.fmt, res) self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self._router_interface_action('add', r['router']['id'], private_sub['subnet']['id'], None) res = self._create_floatingip( self.fmt, public_sub['subnet']['network_id'], port_id=p['port']['id']) self.assertEqual(exc.HTTPCreated.code, res.status_int) self._delete('routers', r['router']['id'], expected_code=exc.HTTPConflict.code) def test_router_show(self): name = 'router1' tenant_id = _uuid() expected_value = [('name', name), ('tenant_id', tenant_id), ('admin_state_up', True), ('status', 'ACTIVE'), ('external_gateway_info', None)] with self.router(name='router1', admin_state_up=True, tenant_id=tenant_id) as router: res = self._show('routers', router['router']['id']) for k, v in expected_value: self.assertEqual(res['router'][k], v) def test_network_update_external_failure(self): with self.router() as r: with self.subnet() as s1: self._set_net_external(s1['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s1['subnet']['network_id']) self._update('networks', s1['subnet']['network_id'], {'network': {external_net.EXTERNAL: False}}, expected_code=exc.HTTPConflict.code) def test_network_update_external(self): with self.router() as r: with self.network('test_net') as testnet: self._set_net_external(testnet['network']['id']) with self.subnet() as s1: self._set_net_external(s1['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s1['subnet']['network_id']) self._update('networks', testnet['network']['id'], {'network': {external_net.EXTERNAL: False}}) def test_floatingip_crd_ops(self): with self.floatingip_with_assoc() as fip: self._validate_floating_ip(fip) # post-delete, check that it is really gone body = self._list('floatingips') self.assertEqual(0, len(body['floatingips'])) self._show('floatingips', fip['floatingip']['id'], expected_code=exc.HTTPNotFound.code) def _test_floatingip_with_assoc_fails(self, plugin_method): with self.subnet(cidr='200.0.0.0/24') as public_sub: self._set_net_external(public_sub['subnet']['network_id']) with self.port() as private_port: with self.router() as r: sid = private_port['port']['fixed_ips'][0]['subnet_id'] private_sub = {'subnet': {'id': sid}} self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self._router_interface_action('add', r['router']['id'], private_sub['subnet']['id'], None) with mock.patch(plugin_method) as pl: pl.side_effect = n_exc.BadRequest( resource='floatingip', msg='fake_error') res = self._create_floatingip( self.fmt, public_sub['subnet']['network_id'], port_id=private_port['port']['id']) self.assertEqual(400, res.status_int) for p in self._list('ports')['ports']: if (p['device_owner'] == l3_constants.DEVICE_OWNER_FLOATINGIP): self.fail('garbage port is not deleted') def test_floatingip_with_assoc_fails(self): self._test_floatingip_with_assoc_fails( 'neutron.db.l3_db.L3_NAT_db_mixin._check_and_get_fip_assoc') def test_create_floatingip_with_assoc( self, expected_status=l3_constants.FLOATINGIP_STATUS_ACTIVE): with self.floatingip_with_assoc() as fip: body = self._show('floatingips', fip['floatingip']['id']) self.assertEqual(body['floatingip']['id'], fip['floatingip']['id']) self.assertEqual(body['floatingip']['port_id'], fip['floatingip']['port_id']) self.assertEqual(expected_status, body['floatingip']['status']) self.assertIsNotNone(body['floatingip']['fixed_ip_address']) self.assertIsNotNone(body['floatingip']['router_id']) def test_create_floatingip_non_admin_context_agent_notification(self): plugin = manager.NeutronManager.get_service_plugins()[ service_constants.L3_ROUTER_NAT] if not hasattr(plugin, 'l3_rpc_notifier'): self.skipTest("Plugin does not support l3_rpc_notifier") with self.subnet(cidr='11.0.0.0/24') as public_sub,\ self.port() as private_port,\ self.router() as r: self._set_net_external(public_sub['subnet']['network_id']) subnet_id = private_port['port']['fixed_ips'][0]['subnet_id'] private_sub = {'subnet': {'id': subnet_id}} self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self._router_interface_action( 'add', r['router']['id'], private_sub['subnet']['id'], None) with mock.patch.object(plugin.l3_rpc_notifier, 'routers_updated') as agent_notification: self._make_floatingip( self.fmt, public_sub['subnet']['network_id'], port_id=private_port['port']['id'], set_context=False) self.assertTrue(agent_notification.called) def test_floating_port_status_not_applicable(self): with self.floatingip_with_assoc(): port_body = self._list('ports', query_params='device_owner=network:floatingip')['ports'][0] self.assertEqual(l3_constants.PORT_STATUS_NOTAPPLICABLE, port_body['status']) def test_floatingip_update( self, expected_status=l3_constants.FLOATINGIP_STATUS_ACTIVE): with self.port() as p: private_sub = {'subnet': {'id': p['port']['fixed_ips'][0]['subnet_id']}} with self.floatingip_no_assoc(private_sub) as fip: body = self._show('floatingips', fip['floatingip']['id']) self.assertIsNone(body['floatingip']['port_id']) self.assertIsNone(body['floatingip']['fixed_ip_address']) self.assertEqual(expected_status, body['floatingip']['status']) port_id = p['port']['id'] ip_address = p['port']['fixed_ips'][0]['ip_address'] body = self._update('floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': port_id}}) self.assertEqual(port_id, body['floatingip']['port_id']) self.assertEqual(ip_address, body['floatingip']['fixed_ip_address']) def test_floatingip_create_different_fixed_ip_same_port(self): '''This tests that it is possible to delete a port that has multiple floating ip addresses associated with it (each floating address associated with a unique fixed address). ''' with self.router() as r: with self.subnet(cidr='11.0.0.0/24') as public_sub: self._set_net_external(public_sub['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) with self.subnet() as private_sub: ip_range = list(netaddr.IPNetwork( private_sub['subnet']['cidr'])) fixed_ips = [{'ip_address': str(ip_range[-3])}, {'ip_address': str(ip_range[-2])}] self._router_interface_action( 'add', r['router']['id'], private_sub['subnet']['id'], None) with self.port(subnet=private_sub, fixed_ips=fixed_ips) as p: fip1 = self._make_floatingip( self.fmt, public_sub['subnet']['network_id'], p['port']['id'], fixed_ip=str(ip_range[-2])) fip2 = self._make_floatingip( self.fmt, public_sub['subnet']['network_id'], p['port']['id'], fixed_ip=str(ip_range[-3])) # Test that floating ips are assigned successfully. body = self._show('floatingips', fip1['floatingip']['id']) self.assertEqual( body['floatingip']['port_id'], fip1['floatingip']['port_id']) body = self._show('floatingips', fip2['floatingip']['id']) self.assertEqual( body['floatingip']['port_id'], fip2['floatingip']['port_id']) self._delete('ports', p['port']['id']) # Test that port has been successfully deleted. body = self._show('ports', p['port']['id'], expected_code=exc.HTTPNotFound.code) def test_floatingip_update_different_fixed_ip_same_port(self): with self.subnet() as s: ip_range = list(netaddr.IPNetwork(s['subnet']['cidr'])) fixed_ips = [{'ip_address': str(ip_range[-3])}, {'ip_address': str(ip_range[-2])}] with self.port(subnet=s, fixed_ips=fixed_ips) as p: with self.floatingip_with_assoc( port_id=p['port']['id'], fixed_ip=str(ip_range[-3])) as fip: body = self._show('floatingips', fip['floatingip']['id']) self.assertEqual(fip['floatingip']['id'], body['floatingip']['id']) self.assertEqual(fip['floatingip']['port_id'], body['floatingip']['port_id']) self.assertEqual(str(ip_range[-3]), body['floatingip']['fixed_ip_address']) self.assertIsNotNone(body['floatingip']['router_id']) body_2 = self._update( 'floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': p['port']['id'], 'fixed_ip_address': str(ip_range[-2])} }) self.assertEqual(fip['floatingip']['port_id'], body_2['floatingip']['port_id']) self.assertEqual(str(ip_range[-2]), body_2['floatingip']['fixed_ip_address']) def test_first_floatingip_associate_notification(self): with self.port() as p: private_sub = {'subnet': {'id': p['port']['fixed_ips'][0]['subnet_id']}} with self.floatingip_no_assoc(private_sub) as fip: port_id = p['port']['id'] ip_address = p['port']['fixed_ips'][0]['ip_address'] with mock.patch.object(registry, 'notify') as notify: body = self._update('floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': port_id}}) fip_addr = fip['floatingip']['floating_ip_address'] fip_network_id = fip['floatingip']['floating_network_id'] router_id = body['floatingip']['router_id'] body = self._show('routers', router_id) ext_gw_info = body['router']['external_gateway_info'] ext_fixed_ip = ext_gw_info['external_fixed_ips'][0] notify.assert_called_once_with( resources.FLOATING_IP, events.AFTER_UPDATE, mock.ANY, context=mock.ANY, fixed_ip_address=ip_address, fixed_port_id=port_id, floating_ip_address=fip_addr, floating_network_id=fip_network_id, last_known_router_id=None, router_id=router_id, next_hop=ext_fixed_ip['ip_address']) def test_floatingip_disassociate_notification(self): with self.port() as p: private_sub = {'subnet': {'id': p['port']['fixed_ips'][0]['subnet_id']}} with self.floatingip_no_assoc(private_sub) as fip: port_id = p['port']['id'] body = self._update('floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': port_id}}) with mock.patch.object(registry, 'notify') as notify: fip_addr = fip['floatingip']['floating_ip_address'] fip_network_id = fip['floatingip']['floating_network_id'] router_id = body['floatingip']['router_id'] self._update('floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': None}}) notify.assert_called_once_with( resources.FLOATING_IP, events.AFTER_UPDATE, mock.ANY, context=mock.ANY, fixed_ip_address=None, fixed_port_id=None, floating_ip_address=fip_addr, floating_network_id=fip_network_id, last_known_router_id=router_id, router_id=None, next_hop=None) def test_floatingip_association_on_unowned_router(self): # create a router owned by one tenant and associate the FIP with a # different tenant, assert that the FIP association succeeds with self.subnet(cidr='11.0.0.0/24') as public_sub: self._set_net_external(public_sub['subnet']['network_id']) with self.port() as private_port: with self.router(tenant_id='router-owner', set_context=True) as r: sid = private_port['port']['fixed_ips'][0]['subnet_id'] private_sub = {'subnet': {'id': sid}} self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self._router_interface_action( 'add', r['router']['id'], private_sub['subnet']['id'], None) self._make_floatingip(self.fmt, public_sub['subnet']['network_id'], port_id=private_port['port']['id'], fixed_ip=None, set_context=True) def test_floatingip_update_different_router(self): # Create subnet with different CIDRs to account for plugins which # do not support overlapping IPs with self.subnet(cidr='10.0.0.0/24') as s1,\ self.subnet(cidr='10.0.1.0/24') as s2: with self.port(subnet=s1) as p1, self.port(subnet=s2) as p2: private_sub1 = {'subnet': {'id': p1['port']['fixed_ips'][0]['subnet_id']}} private_sub2 = {'subnet': {'id': p2['port']['fixed_ips'][0]['subnet_id']}} with self.subnet(cidr='12.0.0.0/24') as public_sub: with self.floatingip_no_assoc_with_public_sub( private_sub1, public_sub=public_sub) as (fip1, r1),\ self.floatingip_no_assoc_with_public_sub( private_sub2, public_sub=public_sub) as (fip2, r2): def assert_no_assoc(fip): body = self._show('floatingips', fip['floatingip']['id']) self.assertIsNone(body['floatingip']['port_id']) self.assertIsNone( body['floatingip']['fixed_ip_address']) assert_no_assoc(fip1) assert_no_assoc(fip2) def associate_and_assert(fip, port): port_id = port['port']['id'] ip_address = (port['port']['fixed_ips'] [0]['ip_address']) body = self._update( 'floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': port_id}}) self.assertEqual(port_id, body['floatingip']['port_id']) self.assertEqual( ip_address, body['floatingip']['fixed_ip_address']) return body['floatingip']['router_id'] fip1_r1_res = associate_and_assert(fip1, p1) self.assertEqual(fip1_r1_res, r1['router']['id']) # The following operation will associate the floating # ip to a different router fip1_r2_res = associate_and_assert(fip1, p2) self.assertEqual(fip1_r2_res, r2['router']['id']) fip2_r1_res = associate_and_assert(fip2, p1) self.assertEqual(fip2_r1_res, r1['router']['id']) # disassociate fip1 self._update( 'floatingips', fip1['floatingip']['id'], {'floatingip': {'port_id': None}}) fip2_r2_res = associate_and_assert(fip2, p2) self.assertEqual(fip2_r2_res, r2['router']['id']) def test_floatingip_port_delete(self): with self.subnet() as private_sub: with self.floatingip_no_assoc(private_sub) as fip: with self.port(subnet=private_sub) as p: body = self._update('floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': p['port']['id']}}) # note: once this port goes out of scope, the port will be # deleted, which is what we want to test. We want to confirm # that the fields are set back to None self._delete('ports', p['port']['id']) body = self._show('floatingips', fip['floatingip']['id']) self.assertEqual(body['floatingip']['id'], fip['floatingip']['id']) self.assertIsNone(body['floatingip']['port_id']) self.assertIsNone(body['floatingip']['fixed_ip_address']) self.assertIsNone(body['floatingip']['router_id']) def test_two_fips_one_port_invalid_return_409(self): with self.floatingip_with_assoc() as fip1: res = self._create_floatingip( self.fmt, fip1['floatingip']['floating_network_id'], fip1['floatingip']['port_id']) self.assertEqual(exc.HTTPConflict.code, res.status_int) def test_floating_ip_direct_port_delete_returns_409(self): found = False with self.floatingip_with_assoc(): for p in self._list('ports')['ports']: if p['device_owner'] == l3_constants.DEVICE_OWNER_FLOATINGIP: self._delete('ports', p['id'], expected_code=exc.HTTPConflict.code) found = True self.assertTrue(found) def _test_floatingip_with_invalid_create_port(self, plugin_class): with self.port() as p: private_sub = {'subnet': {'id': p['port']['fixed_ips'][0]['subnet_id']}} with self.subnet(cidr='12.0.0.0/24') as public_sub: self._set_net_external(public_sub['subnet']['network_id']) res = self._create_router(self.fmt, _uuid()) r = self.deserialize(self.fmt, res) self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self._router_interface_action( 'add', r['router']['id'], private_sub['subnet']['id'], None) with mock.patch(plugin_class + '.create_port') as createport: createport.return_value = {'fixed_ips': []} res = self._create_floatingip( self.fmt, public_sub['subnet']['network_id'], port_id=p['port']['id']) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_floatingip_with_invalid_create_port(self): self._test_floatingip_with_invalid_create_port( 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2') def test_create_floatingip_with_subnet_id_non_admin(self): with self.subnet() as public_sub: self._set_net_external(public_sub['subnet']['network_id']) with self.router(): res = self._create_floatingip( self.fmt, public_sub['subnet']['network_id'], subnet_id=public_sub['subnet']['id'], set_context=True) self.assertEqual(exc.HTTPCreated.code, res.status_int) def test_create_floatingip_with_multisubnet_id(self): with self.network() as network: self._set_net_external(network['network']['id']) with self.subnet(network, cidr='10.0.12.0/24') as subnet1: with self.subnet(network, cidr='10.0.13.0/24') as subnet2: with self.router(): res = self._create_floatingip( self.fmt, subnet1['subnet']['network_id'], subnet_id=subnet1['subnet']['id']) fip1 = self.deserialize(self.fmt, res) res = self._create_floatingip( self.fmt, subnet1['subnet']['network_id'], subnet_id=subnet2['subnet']['id']) fip2 = self.deserialize(self.fmt, res) self.assertTrue( fip1['floatingip']['floating_ip_address'].startswith('10.0.12')) self.assertTrue( fip2['floatingip']['floating_ip_address'].startswith('10.0.13')) def test_create_floatingip_with_wrong_subnet_id(self): with self.network() as network1: self._set_net_external(network1['network']['id']) with self.subnet(network1, cidr='10.0.12.0/24') as subnet1: with self.network() as network2: self._set_net_external(network2['network']['id']) with self.subnet(network2, cidr='10.0.13.0/24') as subnet2: with self.router(): res = self._create_floatingip( self.fmt, subnet1['subnet']['network_id'], subnet_id=subnet2['subnet']['id']) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_create_floatingip_no_ext_gateway_return_404(self): with self.subnet() as public_sub: self._set_net_external(public_sub['subnet']['network_id']) with self.port() as private_port: with self.router(): res = self._create_floatingip( self.fmt, public_sub['subnet']['network_id'], port_id=private_port['port']['id']) # this should be some kind of error self.assertEqual(exc.HTTPNotFound.code, res.status_int) def test_create_floating_non_ext_network_returns_400(self): with self.subnet() as public_sub: # normally we would set the network of public_sub to be # external, but the point of this test is to handle when # that is not the case with self.router(): res = self._create_floatingip( self.fmt, public_sub['subnet']['network_id']) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_create_floatingip_no_public_subnet_returns_400(self): with self.network() as public_network: with self.port() as private_port: with self.router() as r: sid = private_port['port']['fixed_ips'][0]['subnet_id'] private_sub = {'subnet': {'id': sid}} self._router_interface_action('add', r['router']['id'], private_sub['subnet']['id'], None) res = self._create_floatingip( self.fmt, public_network['network']['id'], port_id=private_port['port']['id']) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_create_floatingip_invalid_floating_network_id_returns_400(self): # API-level test - no need to create all objects for l3 plugin res = self._create_floatingip(self.fmt, 'iamnotanuuid', uuidutils.generate_uuid(), '192.168.0.1') self.assertEqual(400, res.status_int) def test_create_floatingip_invalid_floating_port_id_returns_400(self): # API-level test - no need to create all objects for l3 plugin res = self._create_floatingip(self.fmt, uuidutils.generate_uuid(), 'iamnotanuuid', '192.168.0.1') self.assertEqual(400, res.status_int) def test_create_floatingip_invalid_fixed_ip_address_returns_400(self): # API-level test - no need to create all objects for l3 plugin res = self._create_floatingip(self.fmt, uuidutils.generate_uuid(), uuidutils.generate_uuid(), 'iamnotnanip') self.assertEqual(400, res.status_int) def test_floatingip_list_with_sort(self): with self.subnet(cidr="10.0.0.0/24") as s1,\ self.subnet(cidr="11.0.0.0/24") as s2,\ self.subnet(cidr="12.0.0.0/24") as s3: network_id1 = s1['subnet']['network_id'] network_id2 = s2['subnet']['network_id'] network_id3 = s3['subnet']['network_id'] self._set_net_external(network_id1) self._set_net_external(network_id2) self._set_net_external(network_id3) fp1 = self._make_floatingip(self.fmt, network_id1) fp2 = self._make_floatingip(self.fmt, network_id2) fp3 = self._make_floatingip(self.fmt, network_id3) self._test_list_with_sort('floatingip', (fp3, fp2, fp1), [('floating_ip_address', 'desc')]) def test_floatingip_list_with_port_id(self): with self.floatingip_with_assoc() as fip: port_id = fip['floatingip']['port_id'] res = self._list('floatingips', query_params="port_id=%s" % port_id) self.assertEqual(1, len(res['floatingips'])) res = self._list('floatingips', query_params="port_id=aaa") self.assertEqual(0, len(res['floatingips'])) def test_floatingip_list_with_pagination(self): with self.subnet(cidr="10.0.0.0/24") as s1,\ self.subnet(cidr="11.0.0.0/24") as s2,\ self.subnet(cidr="12.0.0.0/24") as s3: network_id1 = s1['subnet']['network_id'] network_id2 = s2['subnet']['network_id'] network_id3 = s3['subnet']['network_id'] self._set_net_external(network_id1) self._set_net_external(network_id2) self._set_net_external(network_id3) fp1 = self._make_floatingip(self.fmt, network_id1) fp2 = self._make_floatingip(self.fmt, network_id2) fp3 = self._make_floatingip(self.fmt, network_id3) self._test_list_with_pagination( 'floatingip', (fp1, fp2, fp3), ('floating_ip_address', 'asc'), 2, 2) def test_floatingip_list_with_pagination_reverse(self): with self.subnet(cidr="10.0.0.0/24") as s1,\ self.subnet(cidr="11.0.0.0/24") as s2,\ self.subnet(cidr="12.0.0.0/24") as s3: network_id1 = s1['subnet']['network_id'] network_id2 = s2['subnet']['network_id'] network_id3 = s3['subnet']['network_id'] self._set_net_external(network_id1) self._set_net_external(network_id2) self._set_net_external(network_id3) fp1 = self._make_floatingip(self.fmt, network_id1) fp2 = self._make_floatingip(self.fmt, network_id2) fp3 = self._make_floatingip(self.fmt, network_id3) self._test_list_with_pagination_reverse( 'floatingip', (fp1, fp2, fp3), ('floating_ip_address', 'asc'), 2, 2) def test_floatingip_multi_external_one_internal(self): with self.subnet(cidr="10.0.0.0/24") as exs1,\ self.subnet(cidr="11.0.0.0/24") as exs2,\ self.subnet(cidr="12.0.0.0/24") as ins1: network_ex_id1 = exs1['subnet']['network_id'] network_ex_id2 = exs2['subnet']['network_id'] self._set_net_external(network_ex_id1) self._set_net_external(network_ex_id2) r2i_fixed_ips = [{'ip_address': '12.0.0.2'}] with self.router() as r1,\ self.router() as r2,\ self.port(subnet=ins1, fixed_ips=r2i_fixed_ips) as r2i_port: self._add_external_gateway_to_router( r1['router']['id'], network_ex_id1) self._router_interface_action('add', r1['router']['id'], ins1['subnet']['id'], None) self._add_external_gateway_to_router( r2['router']['id'], network_ex_id2) self._router_interface_action('add', r2['router']['id'], None, r2i_port['port']['id']) with self.port(subnet=ins1, fixed_ips=[{'ip_address': '12.0.0.3'}] ) as private_port: fp1 = self._make_floatingip(self.fmt, network_ex_id1, private_port['port']['id'], floating_ip='10.0.0.3') fp2 = self._make_floatingip(self.fmt, network_ex_id2, private_port['port']['id'], floating_ip='11.0.0.3') self.assertEqual(fp1['floatingip']['router_id'], r1['router']['id']) self.assertEqual(fp2['floatingip']['router_id'], r2['router']['id']) def test_floatingip_same_external_and_internal(self): # Select router with subnet's gateway_ip for floatingip when # routers connected to same subnet and external network. with self.subnet(cidr="10.0.0.0/24") as exs,\ self.subnet(cidr="12.0.0.0/24", gateway_ip="12.0.0.50") as ins: network_ex_id = exs['subnet']['network_id'] self._set_net_external(network_ex_id) r2i_fixed_ips = [{'ip_address': '12.0.0.2'}] with self.router() as r1,\ self.router() as r2,\ self.port(subnet=ins, fixed_ips=r2i_fixed_ips) as r2i_port: self._add_external_gateway_to_router( r1['router']['id'], network_ex_id) self._router_interface_action('add', r2['router']['id'], None, r2i_port['port']['id']) self._router_interface_action('add', r1['router']['id'], ins['subnet']['id'], None) self._add_external_gateway_to_router( r2['router']['id'], network_ex_id) with self.port(subnet=ins, fixed_ips=[{'ip_address': '12.0.0.8'}] ) as private_port: fp = self._make_floatingip(self.fmt, network_ex_id, private_port['port']['id'], floating_ip='10.0.0.8') self.assertEqual(r1['router']['id'], fp['floatingip']['router_id']) def _test_floatingip_via_router_interface(self, http_status): # NOTE(yamamoto): "exs" subnet is just to provide a gateway port # for the router. Otherwise the test would fail earlier without # reaching the code we want to test. (bug 1556884) with self.subnet(cidr="10.0.0.0/24") as exs, \ self.subnet(cidr="10.0.1.0/24") as ins1, \ self.subnet(cidr="10.0.2.0/24") as ins2: network_ex_id = exs['subnet']['network_id'] self._set_net_external(network_ex_id) network_in2_id = ins2['subnet']['network_id'] self._set_net_external(network_in2_id) with self.router() as r1, self.port(subnet=ins1) as private_port: self._add_external_gateway_to_router(r1['router']['id'], network_ex_id) self._router_interface_action('add', r1['router']['id'], ins1['subnet']['id'], None) self._router_interface_action('add', r1['router']['id'], ins2['subnet']['id'], None) self._make_floatingip(self.fmt, network_id=network_in2_id, port_id=private_port['port']['id'], http_status=http_status) def _get_router_for_floatingip_without_device_owner_check( self, context, internal_port, internal_subnet, external_network_id): gw_port = orm.aliased(models_v2.Port, name="gw_port") routerport_qry = context.session.query( l3_db.RouterPort.router_id, models_v2.IPAllocation.ip_address ).join( models_v2.Port, models_v2.IPAllocation ).filter( models_v2.Port.network_id == internal_port['network_id'], l3_db.RouterPort.port_type.in_( l3_constants.ROUTER_INTERFACE_OWNERS ), models_v2.IPAllocation.subnet_id == internal_subnet['id'] ).join( gw_port, gw_port.device_id == l3_db.RouterPort.router_id ).filter( gw_port.network_id == external_network_id, ).distinct() first_router_id = None for router_id, interface_ip in routerport_qry: if interface_ip == internal_subnet['gateway_ip']: return router_id if not first_router_id: first_router_id = router_id if first_router_id: return first_router_id raise l3.ExternalGatewayForFloatingIPNotFound( subnet_id=internal_subnet['id'], external_network_id=external_network_id, port_id=internal_port['id']) def test_floatingip_via_router_interface_returns_404(self): self._test_floatingip_via_router_interface(exc.HTTPNotFound.code) def test_floatingip_via_router_interface_returns_201(self): # Override get_router_for_floatingip, as # networking-midonet's L3 service plugin would do. plugin = manager.NeutronManager.get_service_plugins()[ service_constants.L3_ROUTER_NAT] with mock.patch.object(plugin, "get_router_for_floatingip", self._get_router_for_floatingip_without_device_owner_check): self._test_floatingip_via_router_interface(exc.HTTPCreated.code) def test_floatingip_delete_router_intf_with_subnet_id_returns_409(self): found = False with self.floatingip_with_assoc(): for p in self._list('ports')['ports']: if p['device_owner'] == l3_constants.DEVICE_OWNER_ROUTER_INTF: subnet_id = p['fixed_ips'][0]['subnet_id'] router_id = p['device_id'] self._router_interface_action( 'remove', router_id, subnet_id, None, expected_code=exc.HTTPConflict.code) found = True break self.assertTrue(found) def test_floatingip_delete_router_intf_with_port_id_returns_409(self): found = False with self.floatingip_with_assoc(): for p in self._list('ports')['ports']: if p['device_owner'] == l3_constants.DEVICE_OWNER_ROUTER_INTF: router_id = p['device_id'] self._router_interface_action( 'remove', router_id, None, p['id'], expected_code=exc.HTTPConflict.code) found = True break self.assertTrue(found) def _test_router_delete_subnet_inuse_returns_409(self, router, subnet): r, s = router, subnet self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) # subnet cannot be deleted as it's attached to a router self._delete('subnets', s['subnet']['id'], expected_code=exc.HTTPConflict.code) def _ipv6_subnet(self, mode): return self.subnet(cidr='fd00::1/64', gateway_ip='fd00::1', ip_version=6, ipv6_ra_mode=mode, ipv6_address_mode=mode) def test_router_delete_subnet_inuse_returns_409(self): with self.router() as r: with self.subnet() as s: self._test_router_delete_subnet_inuse_returns_409(r, s) def test_router_delete_ipv6_slaac_subnet_inuse_returns_409(self): with self.router() as r: with self._ipv6_subnet(l3_constants.IPV6_SLAAC) as s: self._test_router_delete_subnet_inuse_returns_409(r, s) def test_router_delete_dhcpv6_stateless_subnet_inuse_returns_409(self): with self.router() as r: with self._ipv6_subnet(l3_constants.DHCPV6_STATELESS) as s: self._test_router_delete_subnet_inuse_returns_409(r, s) def test_delete_ext_net_with_disassociated_floating_ips(self): with self.network() as net: net_id = net['network']['id'] self._set_net_external(net_id) with self.subnet(network=net): self._make_floatingip(self.fmt, net_id) def test_create_floatingip_with_specific_ip(self): with self.subnet(cidr='10.0.0.0/24') as s: network_id = s['subnet']['network_id'] self._set_net_external(network_id) fp = self._make_floatingip(self.fmt, network_id, floating_ip='10.0.0.10') self.assertEqual('10.0.0.10', fp['floatingip']['floating_ip_address']) def test_create_floatingip_with_specific_ip_out_of_allocation(self): with self.subnet(cidr='10.0.0.0/24', allocation_pools=[ {'start': '10.0.0.10', 'end': '10.0.0.20'}] ) as s: network_id = s['subnet']['network_id'] self._set_net_external(network_id) fp = self._make_floatingip(self.fmt, network_id, floating_ip='10.0.0.30') self.assertEqual('10.0.0.30', fp['floatingip']['floating_ip_address']) def test_create_floatingip_with_specific_ip_non_admin(self): ctx = context.Context('user_id', 'tenant_id') with self.subnet(cidr='10.0.0.0/24') as s: network_id = s['subnet']['network_id'] self._set_net_external(network_id) self._make_floatingip(self.fmt, network_id, set_context=ctx, floating_ip='10.0.0.10', http_status=exc.HTTPForbidden.code) def test_create_floatingip_with_specific_ip_out_of_subnet(self): with self.subnet(cidr='10.0.0.0/24') as s: network_id = s['subnet']['network_id'] self._set_net_external(network_id) self._make_floatingip(self.fmt, network_id, floating_ip='10.0.1.10', http_status=exc.HTTPBadRequest.code) def test_create_floatingip_with_duplicated_specific_ip(self): with self.subnet(cidr='10.0.0.0/24') as s: network_id = s['subnet']['network_id'] self._set_net_external(network_id) self._make_floatingip(self.fmt, network_id, floating_ip='10.0.0.10') self._make_floatingip(self.fmt, network_id, floating_ip='10.0.0.10', http_status=exc.HTTPConflict.code) def test_router_specify_id_backend(self): plugin = manager.NeutronManager.get_service_plugins()[ service_constants.L3_ROUTER_NAT] router_req = {'router': {'id': _uuid(), 'name': 'router', 'tenant_id': 'foo', 'admin_state_up': True}} result = plugin.create_router(context.Context('', 'foo'), router_req) self.assertEqual(router_req['router']['id'], result['id']) def test_create_floatingip_ipv6_only_network_returns_400(self): with self.subnet(cidr="2001:db8::/48", ip_version=6) as public_sub: self._set_net_external(public_sub['subnet']['network_id']) res = self._create_floatingip( self.fmt, public_sub['subnet']['network_id']) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_create_floatingip_ipv6_and_ipv4_network_creates_ipv4(self): with self.network() as n,\ self.subnet(cidr="2001:db8::/48", ip_version=6, network=n),\ self.subnet(cidr="192.168.1.0/24", ip_version=4, network=n): self._set_net_external(n['network']['id']) fip = self._make_floatingip(self.fmt, n['network']['id']) self.assertEqual('192.168.1.2', fip['floatingip']['floating_ip_address']) def test_create_floatingip_with_assoc_to_ipv6_subnet(self): with self.subnet() as public_sub: self._set_net_external(public_sub['subnet']['network_id']) with self.subnet(cidr="2001:db8::/48", ip_version=6) as private_sub: with self.port(subnet=private_sub) as private_port: res = self._create_floatingip( self.fmt, public_sub['subnet']['network_id'], port_id=private_port['port']['id']) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_create_floatingip_with_assoc_to_ipv4_and_ipv6_port(self): with self.network() as n,\ self.subnet(cidr='10.0.0.0/24', network=n) as s4,\ self.subnet(cidr='2001:db8::/64', ip_version=6, network=n),\ self.port(subnet=s4) as p: self.assertEqual(2, len(p['port']['fixed_ips'])) ipv4_address = next(i['ip_address'] for i in p['port']['fixed_ips'] if netaddr.IPAddress(i['ip_address']).version == 4) with self.floatingip_with_assoc(port_id=p['port']['id']) as fip: self.assertEqual(fip['floatingip']['fixed_ip_address'], ipv4_address) floating_ip = netaddr.IPAddress( fip['floatingip']['floating_ip_address']) self.assertEqual(4, floating_ip.version) def test_router_add_interface_by_port_fails_nested(self): # Force _validate_router_port_info failure plugin = manager.NeutronManager.get_service_plugins()[ service_constants.L3_ROUTER_NAT] if not isinstance(plugin, l3_db.L3_NAT_dbonly_mixin): self.skipTest("Plugin is not L3_NAT_dbonly_mixin") orig_update_port = self.plugin.update_port def mock_fail__validate_router_port_info(ctx, router, port_id): # Fail with raising BadRequest exception msg = _("Failure mocking...") raise n_exc.BadRequest(resource='router', msg=msg) def mock_update_port_with_transaction(ctx, id, port): # Update port within a sub-transaction with ctx.session.begin(subtransactions=True): orig_update_port(ctx, id, port) def add_router_interface_with_transaction(ctx, router_id, interface_info): # Call add_router_interface() within a sub-transaction with ctx.session.begin(): plugin.add_router_interface(ctx, router_id, interface_info) tenant_id = _uuid() ctx = context.Context('', tenant_id) with self.network(tenant_id=tenant_id) as network, ( self.router(name='router1', admin_state_up=True, tenant_id=tenant_id)) as router: with self.subnet(network=network, cidr='10.0.0.0/24', tenant_id=tenant_id) as subnet: fixed_ips = [{'subnet_id': subnet['subnet']['id']}] with self.port(subnet=subnet, fixed_ips=fixed_ips, tenant_id=tenant_id) as port: mock.patch.object( self.plugin, 'update_port', side_effect=( mock_update_port_with_transaction)).start() mock.patch.object( plugin, '_validate_router_port_info', side_effect=( mock_fail__validate_router_port_info)).start() self.assertRaises(n_exc.BadRequest, add_router_interface_with_transaction, ctx, router['router']['id'], {'port_id': port['port']['id']}) # fetch port and confirm device_id and device_owner body = self._show('ports', port['port']['id']) self.assertEqual('', body['port']['device_owner']) self.assertEqual('', body['port']['device_id']) def test_update_subnet_gateway_for_external_net(self): """Test to make sure notification to routers occurs when the gateway ip address of a subnet of the external network is changed. """ plugin = manager.NeutronManager.get_service_plugins()[ service_constants.L3_ROUTER_NAT] if not hasattr(plugin, 'l3_rpc_notifier'): self.skipTest("Plugin does not support l3_rpc_notifier") # make sure the callback is registered. registry.subscribe( l3_db._notify_subnet_gateway_ip_update, resources.SUBNET_GATEWAY, events.AFTER_UPDATE) with mock.patch.object(plugin.l3_rpc_notifier, 'routers_updated') as chk_method: with self.network() as network: allocation_pools = [{'start': '120.0.0.3', 'end': '120.0.0.254'}] with self.subnet(network=network, gateway_ip='120.0.0.1', allocation_pools=allocation_pools, cidr='120.0.0.0/24') as subnet: kwargs = { 'device_owner': l3_constants.DEVICE_OWNER_ROUTER_GW, 'device_id': 'fake_device'} with self.port(subnet=subnet, **kwargs): data = {'subnet': {'gateway_ip': '120.0.0.2'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['subnet']['gateway_ip'], res['subnet']['gateway_ip']) chk_method.assert_called_with(mock.ANY, ['fake_device'], None) def test__notify_subnetpool_address_scope_update(self): plugin = manager.NeutronManager.get_service_plugins()[ service_constants.L3_ROUTER_NAT] tenant_id = _uuid() with mock.patch.object( plugin, 'notify_routers_updated') as chk_method, \ self.subnetpool(prefixes=['10.0.0.0/24'], admin=True, name='sp', tenant_id=tenant_id) as subnetpool, \ self.router(tenant_id=tenant_id) as router, \ self.network(tenant_id=tenant_id) as network: subnetpool_id = subnetpool['subnetpool']['id'] data = {'subnet': { 'network_id': network['network']['id'], 'subnetpool_id': subnetpool_id, 'prefixlen': 24, 'ip_version': 4, 'tenant_id': tenant_id}} req = self.new_create_request('subnets', data) subnet = self.deserialize(self.fmt, req.get_response(self.api)) admin_ctx = context.get_admin_context() plugin.add_router_interface( admin_ctx, router['router']['id'], {'subnet_id': subnet['subnet']['id']}) l3_db._notify_subnetpool_address_scope_update( mock.ANY, mock.ANY, mock.ANY, context=admin_ctx, subnetpool_id=subnetpool_id) chk_method.assert_called_with(admin_ctx, [router['router']['id']]) class L3AgentDbTestCaseBase(L3NatTestCaseMixin): """Unit tests for methods called by the L3 agent.""" def test_l3_agent_routers_query_interfaces(self): with self.router() as r: with self.port() as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) routers = self.plugin.get_sync_data( context.get_admin_context(), None) self.assertEqual(1, len(routers)) interfaces = routers[0][l3_constants.INTERFACE_KEY] self.assertEqual(1, len(interfaces)) subnets = interfaces[0]['subnets'] self.assertEqual(1, len(subnets)) subnet_id = subnets[0]['id'] wanted_subnetid = p['port']['fixed_ips'][0]['subnet_id'] self.assertEqual(wanted_subnetid, subnet_id) def test_l3_agent_sync_interfaces(self): """Test L3 interfaces query return valid result""" with self.router() as router1, self.router() as router2: with self.port() as port1, self.port() as port2: self._router_interface_action('add', router1['router']['id'], None, port1['port']['id']) self._router_interface_action('add', router2['router']['id'], None, port2['port']['id']) admin_ctx = context.get_admin_context() router1_id = router1['router']['id'] router2_id = router2['router']['id'] # Verify if router1 pass in, return only interface from router1 ifaces = self.plugin._get_sync_interfaces(admin_ctx, [router1_id]) self.assertEqual(1, len(ifaces)) self.assertEqual(router1_id, ifaces[0]['device_id']) # Verify if router1 and router2 pass in, return both interfaces ifaces = self.plugin._get_sync_interfaces(admin_ctx, [router1_id, router2_id]) self.assertEqual(2, len(ifaces)) device_list = [i['device_id'] for i in ifaces] self.assertIn(router1_id, device_list) self.assertIn(router2_id, device_list) #Verify if no router pass in, return empty list ifaces = self.plugin._get_sync_interfaces(admin_ctx, None) self.assertEqual(0, len(ifaces)) def test_l3_agent_routers_query_ignore_interfaces_with_moreThanOneIp(self): with self.router() as r: with self.subnet(cidr='9.0.1.0/24') as subnet: with self.port(subnet=subnet, fixed_ips=[{'ip_address': '9.0.1.3'}]) as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) port = {'port': {'fixed_ips': [{'ip_address': '9.0.1.4', 'subnet_id': subnet['subnet']['id']}, {'ip_address': '9.0.1.5', 'subnet_id': subnet['subnet']['id']}]}} ctx = context.get_admin_context() self.core_plugin.update_port(ctx, p['port']['id'], port) routers = self.plugin.get_sync_data(ctx, None) self.assertEqual(1, len(routers)) interfaces = routers[0].get(l3_constants.INTERFACE_KEY, []) self.assertEqual(1, len(interfaces)) def test_l3_agent_routers_query_gateway(self): with self.router() as r: with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id']) routers = self.plugin.get_sync_data( context.get_admin_context(), [r['router']['id']]) self.assertEqual(1, len(routers)) gw_port = routers[0]['gw_port'] subnets = gw_port.get('subnets') self.assertEqual(1, len(subnets)) self.assertEqual(s['subnet']['id'], subnets[0]['id']) self._remove_external_gateway_from_router( r['router']['id'], s['subnet']['network_id']) def test_l3_agent_routers_query_floatingips(self): with self.floatingip_with_assoc() as fip: routers = self.plugin.get_sync_data( context.get_admin_context(), [fip['floatingip']['router_id']]) self.assertEqual(1, len(routers)) floatingips = routers[0][l3_constants.FLOATINGIP_KEY] self.assertEqual(1, len(floatingips)) self.assertEqual(floatingips[0]['id'], fip['floatingip']['id']) self.assertEqual(floatingips[0]['port_id'], fip['floatingip']['port_id']) self.assertIsNotNone(floatingips[0]['fixed_ip_address']) self.assertIsNotNone(floatingips[0]['router_id']) def _test_notify_op_agent(self, target_func, *args): l3_rpc_agent_api_str = ( 'neutron.api.rpc.agentnotifiers.l3_rpc_agent_api.L3AgentNotifyAPI') with mock.patch(l3_rpc_agent_api_str): plugin = manager.NeutronManager.get_service_plugins()[ service_constants.L3_ROUTER_NAT] notifyApi = plugin.l3_rpc_notifier kargs = [item for item in args] kargs.append(notifyApi) target_func(*kargs) def _test_router_gateway_op_agent(self, notifyApi): with self.router() as r: with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id']) self._remove_external_gateway_from_router( r['router']['id'], s['subnet']['network_id']) self.assertEqual( 2, notifyApi.routers_updated.call_count) def test_router_gateway_op_agent(self): self._test_notify_op_agent(self._test_router_gateway_op_agent) def _test_interfaces_op_agent(self, r, notifyApi): with self.port() as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) # clean-up self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) self.assertEqual(2, notifyApi.routers_updated.call_count) def test_interfaces_op_agent(self): with self.router() as r: self._test_notify_op_agent( self._test_interfaces_op_agent, r) def _test_floatingips_op_agent(self, notifyApi): with self.floatingip_with_assoc(): pass # add gateway, add interface, associate, deletion of floatingip self.assertEqual(4, notifyApi.routers_updated.call_count) def test_floatingips_op_agent(self): self._test_notify_op_agent(self._test_floatingips_op_agent) class L3BaseForIntTests(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): mock_rescheduling = True def setUp(self, plugin=None, ext_mgr=None, service_plugins=None): if not plugin: plugin = 'neutron.tests.unit.extensions.test_l3.TestL3NatIntPlugin' # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) ext_mgr = ext_mgr or L3TestExtensionManager() if self.mock_rescheduling: mock.patch('%s._check_router_needs_rescheduling' % plugin, new=lambda *a: False).start() super(L3BaseForIntTests, self).setUp(plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.setup_notification_driver() class L3BaseForSepTests(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self, plugin=None, ext_mgr=None): # the plugin without L3 support if not plugin: plugin = 'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin' # the L3 service plugin l3_plugin = ('neutron.tests.unit.extensions.test_l3.' 'TestL3NatServicePlugin') service_plugins = {'l3_plugin_name': l3_plugin} # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) if not ext_mgr: ext_mgr = L3TestExtensionManager() super(L3BaseForSepTests, self).setUp(plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.setup_notification_driver() class L3NatDBIntAgentSchedulingTestCase(L3BaseForIntTests, L3NatTestCaseMixin, test_agent. AgentDBTestMixIn): """Unit tests for core plugin with L3 routing and scheduling integrated.""" def setUp(self, plugin='neutron.tests.unit.extensions.test_l3.' 'TestL3NatIntAgentSchedulingPlugin', ext_mgr=None, service_plugins=None): self.mock_rescheduling = False super(L3NatDBIntAgentSchedulingTestCase, self).setUp( plugin, ext_mgr, service_plugins) self.adminContext = context.get_admin_context() def _assert_router_on_agent(self, router_id, agent_host): plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) agents = plugin.list_l3_agents_hosting_router( self.adminContext, router_id)['agents'] self.assertEqual(1, len(agents)) self.assertEqual(agents[0]['host'], agent_host) def test_update_gateway_agent_exists_supporting_network(self): with self.router() as r, self.subnet() as s1, self.subnet() as s2: self._set_net_external(s1['subnet']['network_id']) l3_rpc_cb = l3_rpc.L3RpcCallback() helpers.register_l3_agent( host='host1', ext_net_id=s1['subnet']['network_id']) helpers.register_l3_agent( host='host2', internal_only=False, ext_net_id=s2['subnet']['network_id']) l3_rpc_cb.get_router_ids(self.adminContext, host='host1') self._assert_router_on_agent(r['router']['id'], 'host1') self._add_external_gateway_to_router( r['router']['id'], s1['subnet']['network_id']) self._assert_router_on_agent(r['router']['id'], 'host1') self._set_net_external(s2['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s2['subnet']['network_id']) self._assert_router_on_agent(r['router']['id'], 'host2') def test_update_gateway_agent_exists_supporting_multiple_network(self): with self.router() as r, self.subnet() as s1, self.subnet() as s2: self._set_net_external(s1['subnet']['network_id']) l3_rpc_cb = l3_rpc.L3RpcCallback() helpers.register_l3_agent( host='host1', ext_net_id=s1['subnet']['network_id']) helpers.register_l3_agent( host='host2', internal_only=False, ext_net_id='', ext_bridge='') l3_rpc_cb.get_router_ids(self.adminContext, host='host1') self._assert_router_on_agent(r['router']['id'], 'host1') self._add_external_gateway_to_router( r['router']['id'], s1['subnet']['network_id']) self._assert_router_on_agent(r['router']['id'], 'host1') self._set_net_external(s2['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s2['subnet']['network_id']) self._assert_router_on_agent(r['router']['id'], 'host2') def test_router_update_gateway_no_eligible_l3_agent(self): with self.router() as r: with self.subnet() as s1: with self.subnet() as s2: self._set_net_external(s1['subnet']['network_id']) self._set_net_external(s2['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s1['subnet']['network_id'], expected_code=exc.HTTPBadRequest.code) class L3RpcCallbackTestCase(base.BaseTestCase): def setUp(self): super(L3RpcCallbackTestCase, self).setUp() self.mock_plugin = mock.patch.object( l3_rpc.L3RpcCallback, 'plugin', new_callable=mock.PropertyMock).start() self.mock_l3plugin = mock.patch.object( l3_rpc.L3RpcCallback, 'l3plugin', new_callable=mock.PropertyMock).start() self.l3_rpc_cb = l3_rpc.L3RpcCallback() def test__ensure_host_set_on_port_host_id_none(self): port = {'id': 'id', portbindings.HOST_ID: 'somehost'} self.l3_rpc_cb._ensure_host_set_on_port(None, None, port) self.assertFalse(self.l3_rpc_cb.plugin.update_port.called) def test__ensure_host_set_on_port_bad_bindings(self): for b in (portbindings.VIF_TYPE_BINDING_FAILED, portbindings.VIF_TYPE_UNBOUND): port = {'id': 'id', portbindings.HOST_ID: 'somehost', portbindings.VIF_TYPE: b} self.l3_rpc_cb._ensure_host_set_on_port(None, 'somehost', port) self.assertTrue(self.l3_rpc_cb.plugin.update_port.called) def test__ensure_host_set_on_port_update_on_concurrent_delete(self): port_id = 'foo_port_id' port = { 'id': port_id, 'device_owner': DEVICE_OWNER_COMPUTE, portbindings.HOST_ID: '', portbindings.VIF_TYPE: portbindings.VIF_TYPE_BINDING_FAILED } router_id = 'foo_router_id' self.l3_rpc_cb.plugin.update_port.side_effect = n_exc.PortNotFound( port_id=port_id) with mock.patch.object(l3_rpc.LOG, 'debug') as mock_log: self.l3_rpc_cb._ensure_host_set_on_port( mock.ANY, mock.ANY, port, router_id) self.l3_rpc_cb.plugin.update_port.assert_called_once_with( mock.ANY, port_id, {'port': {portbindings.HOST_ID: mock.ANY}}) self.assertTrue(mock_log.call_count) expected_message = ('Port foo_port_id not found while updating ' 'agent binding for router foo_router_id.') actual_message = mock_log.call_args[0][0] % mock_log.call_args[0][1] self.assertEqual(expected_message, actual_message) class L3AgentDbIntTestCase(L3BaseForIntTests, L3AgentDbTestCaseBase): """Unit tests for methods called by the L3 agent for the case where core plugin implements L3 routing. """ def setUp(self): super(L3AgentDbIntTestCase, self).setUp() self.core_plugin = TestL3NatIntPlugin() self.plugin = self.core_plugin class L3AgentDbSepTestCase(L3BaseForSepTests, L3AgentDbTestCaseBase): """Unit tests for methods called by the L3 agent for the case where separate service plugin implements L3 routing. """ def setUp(self): super(L3AgentDbSepTestCase, self).setUp() self.core_plugin = TestNoL3NatPlugin() self.plugin = TestL3NatServicePlugin() class TestL3DbOperationBounds(test_db_base_plugin_v2.DbOperationBoundMixin, L3NatTestCaseMixin, ml2_base.ML2TestFramework): def setUp(self): super(TestL3DbOperationBounds, self).setUp() ext_mgr = L3TestExtensionManager() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) self.kwargs = self.get_api_kwargs() def test_router_list_queries_constant(self): with self.subnet(**self.kwargs) as s: self._set_net_external(s['subnet']['network_id']) def router_maker(): ext_info = {'network_id': s['subnet']['network_id']} self._create_router(self.fmt, arg_list=('external_gateway_info',), external_gateway_info=ext_info, **self.kwargs) self._assert_object_list_queries_constant(router_maker, 'routers') def test_floatingip_list_queries_constant(self): with self.floatingip_with_assoc(**self.kwargs) as flip: internal_port = self._show('ports', flip['floatingip']['port_id']) internal_net_id = internal_port['port']['network_id'] def float_maker(): port = self._make_port( self.fmt, internal_net_id, **self.kwargs) self._make_floatingip( self.fmt, flip['floatingip']['floating_network_id'], port_id=port['port']['id'], **self.kwargs) self._assert_object_list_queries_constant(float_maker, 'floatingips') class TestL3DbOperationBoundsTenant(TestL3DbOperationBounds): admin = False class L3NatDBTestCaseMixin(object): """L3_NAT_dbonly_mixin specific test cases.""" def setUp(self): super(L3NatDBTestCaseMixin, self).setUp() plugin = manager.NeutronManager.get_service_plugins()[ service_constants.L3_ROUTER_NAT] if not isinstance(plugin, l3_db.L3_NAT_dbonly_mixin): self.skipTest("Plugin is not L3_NAT_dbonly_mixin") def test_create_router_gateway_fails(self): """Force _update_router_gw_info failure and see the exception is propagated. """ plugin = manager.NeutronManager.get_service_plugins()[ service_constants.L3_ROUTER_NAT] ctx = context.Context('', 'foo') class MyException(Exception): pass mock.patch.object(plugin, '_update_router_gw_info', side_effect=MyException).start() with self.network() as n: data = {'router': { 'name': 'router1', 'admin_state_up': True, 'tenant_id': ctx.tenant_id, 'external_gateway_info': {'network_id': n['network']['id']}}} self.assertRaises(MyException, plugin.create_router, ctx, data) # Verify router doesn't persist on failure routers = plugin.get_routers(ctx) self.assertEqual(0, len(routers)) class L3NatDBIntTestCase(L3BaseForIntTests, L3NatTestCaseBase, L3NatDBTestCaseMixin): """Unit tests for core plugin with L3 routing integrated.""" pass class L3NatDBSepTestCase(L3BaseForSepTests, L3NatTestCaseBase, L3NatDBTestCaseMixin): """Unit tests for a separate L3 routing service plugin.""" def test_port_deletion_prevention_handles_missing_port(self): pl = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) self.assertIsNone( pl.prevent_l3_port_deletion(context.get_admin_context(), 'fakeid') ) neutron-8.4.0/neutron/tests/unit/extensions/test_dns.py0000664000567000056710000005651413044372760024606 0ustar jenkinsjenkins00000000000000# Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math import netaddr from oslo_config import cfg from neutron.common import constants from neutron.common import utils from neutron import context from neutron.db import db_base_plugin_v2 from neutron.extensions import dns from neutron.tests.unit.db import test_db_base_plugin_v2 class DnsExtensionManager(object): def get_resources(self): return [] def get_actions(self): return [] def get_request_extensions(self): return [] def get_extended_resources(self, version): return dns.get_extended_resources(version) class DnsExtensionTestPlugin(db_base_plugin_v2.NeutronDbPluginV2): """Test plugin to mixin the DNS Integration extensions. """ supported_extension_aliases = ["dns-integration", "router"] class DnsExtensionTestCase(test_db_base_plugin_v2.TestNetworksV2): """Test API extension dns attributes. """ def setUp(self): plugin = ('neutron.tests.unit.extensions.test_dns.' + 'DnsExtensionTestPlugin') ext_mgr = DnsExtensionManager() super(DnsExtensionTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def _create_network(self, fmt, name, admin_state_up, arg_list=None, set_context=False, tenant_id=None, **kwargs): new_arg_list = ('dns_domain',) if arg_list is not None: new_arg_list = arg_list + new_arg_list return super(DnsExtensionTestCase, self)._create_network(fmt, name, admin_state_up, arg_list=new_arg_list, set_context=set_context, tenant_id=tenant_id, **kwargs) def _create_port(self, fmt, net_id, expected_res_status=None, arg_list=None, set_context=False, tenant_id=None, **kwargs): tenant_id = tenant_id or self._tenant_id data = {'port': {'network_id': net_id, 'tenant_id': tenant_id}} for arg in (('admin_state_up', 'device_id', 'mac_address', 'name', 'fixed_ips', 'tenant_id', 'device_owner', 'security_groups', 'dns_name') + (arg_list or ())): # Arg must be present if arg in kwargs: data['port'][arg] = kwargs[arg] # create a dhcp port device id if one hasn't been supplied if ('device_owner' in kwargs and kwargs['device_owner'] == constants.DEVICE_OWNER_DHCP and 'host' in kwargs and 'device_id' not in kwargs): device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host']) data['port']['device_id'] = device_id port_req = self.new_create_request('ports', data, fmt) if set_context and tenant_id: # create a specific auth context for this request port_req.environ['neutron.context'] = context.Context( '', tenant_id) port_res = port_req.get_response(self.api) if expected_res_status: self.assertEqual(expected_res_status, port_res.status_int) return port_res def _test_list_resources(self, resource, items, neutron_context=None, query_params=None): res = self._list('%ss' % resource, neutron_context=neutron_context, query_params=query_params) resource = resource.replace('-', '_') self.assertItemsEqual([i['id'] for i in res['%ss' % resource]], [i[resource]['id'] for i in items]) return res def test_create_port_json(self): keys = [('admin_state_up', True), ('status', self.port_create_status)] with self.port(name='myname') as port: for k, v in keys: self.assertEqual(port['port'][k], v) self.assertIn('mac_address', port['port']) ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.2', ips[0]['ip_address']) self.assertEqual('myname', port['port']['name']) self._verify_dns_assigment(port['port'], ips_list=['10.0.0.2']) def test_list_ports(self): # for this test we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) with self.port() as v1, self.port() as v2, self.port() as v3: ports = (v1, v2, v3) res = self._test_list_resources('port', ports) for port in res['ports']: self._verify_dns_assigment( port, ips_list=[port['fixed_ips'][0]['ip_address']]) def test_show_port(self): with self.port() as port: req = self.new_show_request('ports', port['port']['id'], self.fmt) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(port['port']['id'], sport['port']['id']) self._verify_dns_assigment( sport['port'], ips_list=[sport['port']['fixed_ips'][0]['ip_address']]) def test_update_port_non_default_dns_domain_with_dns_name(self): with self.port() as port: cfg.CONF.set_override('dns_domain', 'example.com') data = {'port': {'admin_state_up': False, 'dns_name': 'vm1'}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['port']['admin_state_up'], res['port']['admin_state_up']) self._verify_dns_assigment(res['port'], ips_list=['10.0.0.2'], dns_name='vm1') def test_update_port_default_dns_domain_with_dns_name(self): with self.port() as port: data = {'port': {'admin_state_up': False, 'dns_name': 'vm1'}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['port']['admin_state_up'], res['port']['admin_state_up']) self._verify_dns_assigment(res['port'], ips_list=['10.0.0.2']) def _verify_dns_assigment(self, port, ips_list=None, exp_ips_ipv4=0, exp_ips_ipv6=0, ipv4_cidrs=None, ipv6_cidrs=None, dns_name=''): ips_list = ips_list or [] ipv4_cidrs = ipv4_cidrs or [] ipv6_cidrs = ipv6_cidrs or [] self.assertEqual(dns_name, port['dns_name']) dns_assignment = port['dns_assignment'] if ips_list: self.assertEqual(len(dns_assignment), len(ips_list)) ips_set = set(ips_list) else: self.assertEqual(len(dns_assignment), exp_ips_ipv4 + exp_ips_ipv6) ipv4_count = 0 ipv6_count = 0 subnets_v4 = [netaddr.IPNetwork(cidr) for cidr in ipv4_cidrs] subnets_v6 = [netaddr.IPNetwork(cidr) for cidr in ipv6_cidrs] request_dns_name, request_fqdn = self._get_request_hostname_and_fqdn( dns_name) for assignment in dns_assignment: if ips_list: self.assertIn(assignment['ip_address'], ips_set) ips_set.remove(assignment['ip_address']) else: ip = netaddr.IPAddress(assignment['ip_address']) if ip.version == 4: self.assertTrue(self._verify_ip_in_subnet(ip, subnets_v4)) ipv4_count += 1 else: self.assertTrue(self._verify_ip_in_subnet(ip, subnets_v6)) ipv6_count += 1 hostname, fqdn = self._get_hostname_and_fqdn(request_dns_name, request_fqdn, assignment) self.assertEqual(assignment['hostname'], hostname) self.assertEqual(assignment['fqdn'], fqdn) if ips_list: self.assertFalse(ips_set) else: self.assertEqual(ipv4_count, exp_ips_ipv4) self.assertEqual(ipv6_count, exp_ips_ipv6) def _get_dns_domain(self): if not cfg.CONF.dns_domain: return '' if cfg.CONF.dns_domain.endswith('.'): return cfg.CONF.dns_domain return '%s.' % cfg.CONF.dns_domain def _get_request_hostname_and_fqdn(self, dns_name): request_dns_name = '' request_fqdn = '' dns_domain = self._get_dns_domain() if dns_name and dns_domain and dns_domain != 'openstacklocal.': request_dns_name = dns_name request_fqdn = request_dns_name if not request_dns_name.endswith('.'): request_fqdn = '%s.%s' % (dns_name, dns_domain) return request_dns_name, request_fqdn def _get_hostname_and_fqdn(self, request_dns_name, request_fqdn, assignment): dns_domain = self._get_dns_domain() if request_dns_name: hostname = request_dns_name fqdn = request_fqdn else: hostname = 'host-%s' % assignment['ip_address'].replace( '.', '-').replace(':', '-') fqdn = hostname if dns_domain: fqdn = '%s.%s' % (hostname, dns_domain) return hostname, fqdn def _verify_ip_in_subnet(self, ip, subnets_list): for subnet in subnets_list: if ip in subnet: return True return False def test_update_port_update_ip(self): """Test update of port IP. Check that a configured IP 10.0.0.2 is replaced by 10.0.0.10. """ with self.subnet() as subnet: with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.2', ips[0]['ip_address']) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': "10.0.0.10"}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(ips[0]['ip_address'], '10.0.0.10') self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) self._verify_dns_assigment(res['port'], ips_list=['10.0.0.10']) def test_update_port_update_ip_address_only(self): with self.subnet() as subnet: with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.2', ips[0]['ip_address']) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': "10.0.0.10"}, {'ip_address': "10.0.0.2"}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] self.assertEqual(2, len(ips)) self.assertIn({'ip_address': '10.0.0.2', 'subnet_id': subnet['subnet']['id']}, ips) self.assertIn({'ip_address': '10.0.0.10', 'subnet_id': subnet['subnet']['id']}, ips) self._verify_dns_assigment(res['port'], ips_list=['10.0.0.10', '10.0.0.2']) def test_create_port_with_multiple_ipv4_and_ipv6_subnets(self): res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets() self.assertEqual(201, res.status_code) def test_create_port_multiple_v4_v6_subnets_pqdn_and_dns_domain_no_period( self): cfg.CONF.set_override('dns_domain', 'example.com') res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets( dns_name='vm1') self.assertEqual(201, res.status_code) def test_create_port_multiple_v4_v6_subnets_pqdn_and_dns_domain_period( self): cfg.CONF.set_override('dns_domain', 'example.com.') res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets( dns_name='vm1') self.assertEqual(201, res.status_code) def test_create_port_multiple_v4_v6_subnets_pqdn_and_no_dns_domain( self): cfg.CONF.set_override('dns_domain', '') res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets() self.assertEqual(201, res.status_code) def test_create_port_multiple_v4_v6_subnets_fqdn_and_dns_domain_no_period( self): cfg.CONF.set_override('dns_domain', 'example.com') res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets( dns_name='vm1.example.com.') self.assertEqual(201, res.status_code) def test_create_port_multiple_v4_v6_subnets_fqdn_and_dns_domain_period( self): cfg.CONF.set_override('dns_domain', 'example.com.') res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets( dns_name='vm1.example.com.') self.assertEqual(201, res.status_code) def test_create_port_multiple_v4_v6_subnets_fqdn_default_domain_period( self): cfg.CONF.set_override('dns_domain', 'openstacklocal.') res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets() self.assertEqual(201, res.status_code) def test_create_port_multiple_v4_v6_subnets_bad_fqdn_and_dns_domain( self): cfg.CONF.set_override('dns_domain', 'example.com') res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets( dns_name='vm1.bad-domain.com.') self.assertEqual(400, res.status_code) expected_error = ('The dns_name passed is a FQDN. Its higher level ' 'labels must be equal to the dns_domain option in ' 'neutron.conf') self.assertIn(expected_error, res.text) def test_create_port_multiple_v4_v6_subnets_bad_pqdn_and_dns_domain( self): cfg.CONF.set_override('dns_domain', 'example.com') num_labels = int( math.floor(dns.FQDN_MAX_LEN / dns.DNS_LABEL_MAX_LEN)) filler_len = int( math.floor(dns.FQDN_MAX_LEN % dns.DNS_LABEL_MAX_LEN)) dns_name = (('a' * (dns.DNS_LABEL_MAX_LEN - 1) + '.') * num_labels + 'a' * filler_len) res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets( dns_name=dns_name) self.assertEqual(400, res.status_code) expected_error = ("When the two are concatenated to form a FQDN " "(with a '.' at the end), the resulting length " "exceeds the maximum size") self.assertIn(expected_error, res.text) def _test_create_port_with_multiple_ipv4_and_ipv6_subnets(self, dns_name=''): """Test port create with multiple IPv4, IPv6 DHCP/SLAAC subnets.""" res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) sub_dicts = [ {'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24', 'ip_version': 4, 'ra_addr_mode': None}, {'gateway': '10.0.1.1', 'cidr': '10.0.1.0/24', 'ip_version': 4, 'ra_addr_mode': None}, {'gateway': 'fe80::1', 'cidr': 'fe80::/64', 'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC}, {'gateway': 'fe81::1', 'cidr': 'fe81::/64', 'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC}, {'gateway': 'fe82::1', 'cidr': 'fe82::/64', 'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL}, {'gateway': 'fe83::1', 'cidr': 'fe83::/64', 'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL}] subnets = {} for sub_dict in sub_dicts: subnet = self._make_subnet( self.fmt, network, gateway=sub_dict['gateway'], cidr=sub_dict['cidr'], ip_version=sub_dict['ip_version'], ipv6_ra_mode=sub_dict['ra_addr_mode'], ipv6_address_mode=sub_dict['ra_addr_mode']) subnets[subnet['subnet']['id']] = sub_dict res = self._create_port(self.fmt, net_id=network['network']['id'], dns_name=dns_name) if res.status_code != 201: return res port = self.deserialize(self.fmt, res) # Since the create port request was made without a list of fixed IPs, # the port should be associated with addresses for one of the # IPv4 subnets, one of the DHCPv6 subnets, and both of the IPv6 # SLAAC subnets. self.assertEqual(4, len(port['port']['fixed_ips'])) addr_mode_count = {None: 0, constants.DHCPV6_STATEFUL: 0, constants.IPV6_SLAAC: 0} for fixed_ip in port['port']['fixed_ips']: subnet_id = fixed_ip['subnet_id'] if subnet_id in subnets: addr_mode_count[subnets[subnet_id]['ra_addr_mode']] += 1 self.assertEqual(1, addr_mode_count[None]) self.assertEqual(1, addr_mode_count[constants.DHCPV6_STATEFUL]) self.assertEqual(2, addr_mode_count[constants.IPV6_SLAAC]) self._verify_dns_assigment(port['port'], exp_ips_ipv4=1, exp_ips_ipv6=3, ipv4_cidrs=[sub_dicts[0]['cidr'], sub_dicts[1]['cidr']], ipv6_cidrs=[sub_dicts[2]['cidr'], sub_dicts[3]['cidr'], sub_dicts[4]['cidr'], sub_dicts[5]['cidr']], dns_name=dns_name) return res def test_api_extension_validation_with_bad_dns_names(self): num_labels = int( math.floor(dns.FQDN_MAX_LEN / dns.DNS_LABEL_MAX_LEN)) filler_len = int( math.floor(dns.FQDN_MAX_LEN % dns.DNS_LABEL_MAX_LEN)) dns_names = [555, '\f\n\r', '.', '-vm01', '_vm01', 'vm01-', '-vm01.test1', 'vm01.-test1', 'vm01._test1', 'vm01.test1-', 'vm01.te$t1', 'vm0#1.test1.', 'vm01.123.', '-' + 'a' * dns.DNS_LABEL_MAX_LEN, 'a' * (dns.DNS_LABEL_MAX_LEN + 1), ('a' * (dns.DNS_LABEL_MAX_LEN - 1) + '.') * num_labels + 'a' * (filler_len + 1)] res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) sub_dict = {'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24', 'ip_version': 4, 'ra_addr_mode': None} self._make_subnet(self.fmt, network, gateway=sub_dict['gateway'], cidr=sub_dict['cidr'], ip_version=sub_dict['ip_version'], ipv6_ra_mode=sub_dict['ra_addr_mode'], ipv6_address_mode=sub_dict['ra_addr_mode']) for dns_name in dns_names: res = self._create_port(self.fmt, net_id=network['network']['id'], dns_name=dns_name) self.assertEqual(400, res.status_code) error_message = res.json['NeutronError']['message'] is_expected_message = ( 'cannot be converted to lowercase string' in error_message or 'not a valid PQDN or FQDN. Reason:' in error_message) self.assertTrue(is_expected_message) def test_api_extension_validation_with_good_dns_names(self): cfg.CONF.set_override('dns_domain', 'example.com') higher_labels_len = len('example.com.') num_labels = int( math.floor((dns.FQDN_MAX_LEN - higher_labels_len) / dns.DNS_LABEL_MAX_LEN)) filler_len = int( math.floor((dns.FQDN_MAX_LEN - higher_labels_len) % dns.DNS_LABEL_MAX_LEN)) dns_names = ['', 'www.1000.com', 'vM01', 'vm01.example.com.', '8vm01', 'vm-01.example.com.', 'vm01.test', 'vm01.test.example.com.', 'vm01.test-100', 'vm01.test-100.example.com.', 'a' * dns.DNS_LABEL_MAX_LEN, ('a' * dns.DNS_LABEL_MAX_LEN) + '.example.com.', ('a' * (dns.DNS_LABEL_MAX_LEN - 1) + '.') * num_labels + 'a' * (filler_len - 1)] res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) sub_dict = {'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24', 'ip_version': 4, 'ra_addr_mode': None} self._make_subnet(self.fmt, network, gateway=sub_dict['gateway'], cidr=sub_dict['cidr'], ip_version=sub_dict['ip_version'], ipv6_ra_mode=sub_dict['ra_addr_mode'], ipv6_address_mode=sub_dict['ra_addr_mode']) for dns_name in dns_names: res = self._create_port(self.fmt, net_id=network['network']['id'], dns_name=dns_name) self.assertEqual(201, res.status_code) def test_update_network_dns_domain(self): with self.network() as network: data = {'network': {'dns_domain': 'my-domain.org.'}} req = self.new_update_request('networks', data, network['network']['id']) res = req.get_response(self.api) self.assertEqual(200, res.status_code) self.assertNotIn('dns_domain', self.deserialize(self.fmt, res)['network']) neutron-8.4.0/neutron/tests/unit/extensions/test_agent.py0000664000567000056710000001572413044372760025116 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from datetime import datetime import time from oslo_config import cfg from oslo_utils import uuidutils from webob import exc from neutron.api.v2 import attributes from neutron.common import constants from neutron import context from neutron.db import agents_db from neutron.db import db_base_plugin_v2 from neutron.extensions import agent from neutron.tests.common import helpers from neutron.tests import tools from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit.db import test_db_base_plugin_v2 _uuid = uuidutils.generate_uuid _get_path = test_base._get_path L3_HOSTA = 'hosta' DHCP_HOSTA = 'hosta' L3_HOSTB = 'hostb' DHCP_HOSTC = 'hostc' LBAAS_HOSTA = 'hosta' LBAAS_HOSTB = 'hostb' class AgentTestExtensionManager(object): def get_resources(self): # Add the resources to the global attribute map # This is done here as the setup process won't # initialize the main API router which extends # the global attribute map attributes.RESOURCE_ATTRIBUTE_MAP.update( agent.RESOURCE_ATTRIBUTE_MAP) return agent.Agent.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] # This plugin class is just for testing class TestAgentPlugin(db_base_plugin_v2.NeutronDbPluginV2, agents_db.AgentDbMixin): supported_extension_aliases = ["agent"] class AgentDBTestMixIn(object): def _list_agents(self, expected_res_status=None, neutron_context=None, query_string=None): agent_res = self._list('agents', neutron_context=neutron_context, query_params=query_string) if expected_res_status: self.assertEqual(expected_res_status, agent_res.status_int) return agent_res def _register_agent_states(self, lbaas_agents=False): """Register two L3 agents and two DHCP agents.""" l3_hosta = helpers._get_l3_agent_dict( L3_HOSTA, constants.L3_AGENT_MODE_LEGACY) l3_hostb = helpers._get_l3_agent_dict( L3_HOSTB, constants.L3_AGENT_MODE_LEGACY) dhcp_hosta = helpers._get_dhcp_agent_dict(DHCP_HOSTA) dhcp_hostc = helpers._get_dhcp_agent_dict(DHCP_HOSTC) helpers.register_l3_agent(host=L3_HOSTA) helpers.register_l3_agent(host=L3_HOSTB) helpers.register_dhcp_agent(host=DHCP_HOSTA) helpers.register_dhcp_agent(host=DHCP_HOSTC) res = [l3_hosta, l3_hostb, dhcp_hosta, dhcp_hostc] if lbaas_agents: lbaas_hosta = { 'binary': 'neutron-loadbalancer-agent', 'host': LBAAS_HOSTA, 'topic': 'LOADBALANCER_AGENT', 'configurations': {'device_drivers': ['haproxy_ns']}, 'agent_type': constants.AGENT_TYPE_LOADBALANCER} lbaas_hostb = copy.deepcopy(lbaas_hosta) lbaas_hostb['host'] = LBAAS_HOSTB callback = agents_db.AgentExtRpcCallback() callback.report_state( self.adminContext, agent_state={'agent_state': lbaas_hosta}, time=datetime.utcnow().strftime(constants.ISO8601_TIME_FORMAT)) callback.report_state( self.adminContext, agent_state={'agent_state': lbaas_hostb}, time=datetime.utcnow().strftime(constants.ISO8601_TIME_FORMAT)) res += [lbaas_hosta, lbaas_hostb] return res def _register_dvr_agents(self): dvr_snat_agent = helpers.register_l3_agent( host=L3_HOSTA, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) dvr_agent = helpers.register_l3_agent( host=L3_HOSTB, agent_mode=constants.L3_AGENT_MODE_DVR) return [dvr_snat_agent, dvr_agent] def _register_l3_agent(self, host): helpers.register_l3_agent(host) def _register_bgp_dragent(self, host): helpers.register_bgp_dragent(host) class AgentDBTestCase(AgentDBTestMixIn, test_db_base_plugin_v2.NeutronDbPluginV2TestCase): fmt = 'json' def setUp(self): plugin = 'neutron.tests.unit.extensions.test_agent.TestAgentPlugin' # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) self.useFixture(tools.AttributeMapMemento()) ext_mgr = AgentTestExtensionManager() super(AgentDBTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) self.adminContext = context.get_admin_context() def test_create_agent(self): data = {'agent': {}} _req = self.new_create_request('agents', data, self.fmt) _req.environ['neutron.context'] = context.Context( '', 'tenant_id') res = _req.get_response(self.ext_api) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_list_agent(self): agents = self._register_agent_states() res = self._list('agents') self.assertEqual(len(agents), len(res['agents'])) def test_show_agent(self): self._register_agent_states() agents = self._list_agents( query_string='binary=neutron-l3-agent') self.assertEqual(2, len(agents['agents'])) agent = self._show('agents', agents['agents'][0]['id']) self.assertEqual('neutron-l3-agent', agent['agent']['binary']) def test_update_agent(self): self._register_agent_states() agents = self._list_agents( query_string='binary=neutron-l3-agent&host=' + L3_HOSTB) self.assertEqual(1, len(agents['agents'])) com_id = agents['agents'][0]['id'] agent = self._show('agents', com_id) new_agent = {} new_agent['agent'] = {} new_agent['agent']['admin_state_up'] = False new_agent['agent']['description'] = 'description' self._update('agents', com_id, new_agent) agent = self._show('agents', com_id) self.assertFalse(agent['agent']['admin_state_up']) self.assertEqual('description', agent['agent']['description']) def test_dead_agent(self): cfg.CONF.set_override('agent_down_time', 1) self._register_agent_states() time.sleep(1.5) agents = self._list_agents( query_string='binary=neutron-l3-agent&host=' + L3_HOSTB) self.assertFalse(agents['agents'][0]['alive']) neutron-8.4.0/neutron/tests/unit/extensions/test_netmtu.py0000664000567000056710000000541013044372760025323 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import constants from neutron.db import db_base_plugin_v2 from neutron.db import netmtu_db from neutron.extensions import netmtu from neutron.tests.unit.db import test_db_base_plugin_v2 class NetmtuExtensionManager(object): def get_resources(self): return [] def get_actions(self): return [] def get_request_extensions(self): return [] def get_extended_resources(self, version): return netmtu.get_extended_resources(version) class NetmtuExtensionTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, netmtu_db.Netmtu_db_mixin): """Test plugin to mixin the network MTU extensions. """ supported_extension_aliases = ["net-mtu"] class NetmtuExtensionTestCase(test_db_base_plugin_v2.TestNetworksV2): """Test API extension net-mtu attributes. """ def setUp(self): plugin = ('neutron.tests.unit.extensions.test_netmtu.' + 'NetmtuExtensionTestPlugin') ext_mgr = NetmtuExtensionManager() super(NetmtuExtensionTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def test_list_networks_with_fields_mtu(self): with self.network(name='net1') as net1: req = self.new_list_request('networks', params='fields=name&fields=mtu') res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(1, len(res['networks'])) self.assertEqual(res['networks'][0]['name'], net1['network']['name']) self.assertEqual(constants.DEFAULT_NETWORK_MTU, res['networks'][0].get('mtu')) def test_show_network_mtu(self): with self.network(name='net1') as net: req = self.new_show_request('networks', net['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['network']['name'], net['network']['name']) self.assertEqual(constants.DEFAULT_NETWORK_MTU, res['network']['mtu']) neutron-8.4.0/neutron/tests/unit/extensions/test_providernet.py0000664000567000056710000001564513044372760026363 0ustar jenkinsjenkins00000000000000# Copyright 2013 VMware # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_utils import uuidutils from webob import exc as web_exc import webtest from neutron.api import extensions from neutron.api.v2 import router from neutron import context from neutron.extensions import providernet as pnet from neutron import manager from neutron import quota from neutron.tests import tools from neutron.tests.unit.api import test_extensions from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit import testlib_api class ProviderExtensionManager(object): def get_resources(self): return [] def get_actions(self): return [] def get_request_extensions(self): return [] def get_extended_resources(self, version): return pnet.get_extended_resources(version) class ProvidernetExtensionTestCase(testlib_api.WebTestCase): fmt = 'json' def setUp(self): super(ProvidernetExtensionTestCase, self).setUp() plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2' # Ensure existing ExtensionManager is not used extensions.PluginAwareExtensionManager._instance = None self.useFixture(tools.AttributeMapMemento()) # Update the plugin and extensions path self.setup_coreplugin(plugin) cfg.CONF.set_override('allow_pagination', True) cfg.CONF.set_override('allow_sorting', True) self._plugin_patcher = mock.patch(plugin, autospec=True) self.plugin = self._plugin_patcher.start() # Ensure Quota checks never fail because of mock instance = self.plugin.return_value instance.get_networks_count.return_value = 1 # Instantiate mock plugin and enable the 'provider' extension manager.NeutronManager.get_plugin().supported_extension_aliases = ( ["provider"]) ext_mgr = ProviderExtensionManager() self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr) self.addCleanup(self._plugin_patcher.stop) self.api = webtest.TestApp(router.APIRouter()) quota.QUOTAS._driver = None cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', group='QUOTAS') def _prepare_net_data(self): return {'name': 'net1', pnet.NETWORK_TYPE: 'sometype', pnet.PHYSICAL_NETWORK: 'physnet', pnet.SEGMENTATION_ID: 666} def _put_network_with_provider_attrs(self, ctx, expect_errors=False): data = self._prepare_net_data() env = {'neutron.context': ctx} instance = self.plugin.return_value instance.get_network.return_value = {'tenant_id': ctx.tenant_id, 'shared': False} net_id = uuidutils.generate_uuid() res = self.api.put(test_base._get_path('networks', id=net_id, fmt=self.fmt), self.serialize({'network': data}), extra_environ=env, expect_errors=expect_errors) return res, data, net_id def _post_network_with_provider_attrs(self, ctx, expect_errors=False): data = self._prepare_net_data() env = {'neutron.context': ctx} res = self.api.post(test_base._get_path('networks', fmt=self.fmt), self.serialize({'network': data}), content_type='application/' + self.fmt, extra_environ=env, expect_errors=expect_errors) return res, data def _post_network_with_bad_provider_attrs(self, ctx, bad_data, expect_errors=False): data = self._prepare_net_data() data.update(bad_data) env = {'neutron.context': ctx} res = self.api.post(test_base._get_path('networks', fmt=self.fmt), self.serialize({'network': data}), content_type='application/' + self.fmt, extra_environ=env, expect_errors=expect_errors) return res, data def test_network_create_with_provider_attrs(self): ctx = context.get_admin_context() ctx.tenant_id = 'an_admin' res, data = self._post_network_with_provider_attrs(ctx) instance = self.plugin.return_value exp_input = {'network': data} exp_input['network'].update({'admin_state_up': True, 'tenant_id': 'an_admin', 'shared': False}) instance.create_network.assert_called_with(mock.ANY, network=exp_input) self.assertEqual(web_exc.HTTPCreated.code, res.status_int) def test_network_create_with_bad_provider_attrs_400(self): ctx = context.get_admin_context() ctx.tenant_id = 'an_admin' bad_data = {pnet.SEGMENTATION_ID: "abc"} res, _1 = self._post_network_with_bad_provider_attrs(ctx, bad_data, True) self.assertEqual(web_exc.HTTPBadRequest.code, res.status_int) def test_network_update_with_provider_attrs(self): ctx = context.get_admin_context() ctx.tenant_id = 'an_admin' res, data, net_id = self._put_network_with_provider_attrs(ctx) instance = self.plugin.return_value exp_input = {'network': data} instance.update_network.assert_called_with(mock.ANY, net_id, network=exp_input) self.assertEqual(web_exc.HTTPOk.code, res.status_int) def test_network_create_with_provider_attrs_noadmin_returns_403(self): tenant_id = 'no_admin' ctx = context.Context('', tenant_id, is_admin=False) res, _1 = self._post_network_with_provider_attrs(ctx, True) self.assertEqual(web_exc.HTTPForbidden.code, res.status_int) def test_network_update_with_provider_attrs_noadmin_returns_403(self): tenant_id = 'no_admin' ctx = context.Context('', tenant_id, is_admin=False) res, _1, _2 = self._put_network_with_provider_attrs(ctx, True) self.assertEqual(web_exc.HTTPForbidden.code, res.status_int) neutron-8.4.0/neutron/tests/unit/extensions/v2attributes.py0000664000567000056710000000274413044372760025415 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron.api import extensions EXTENDED_ATTRIBUTES_2_0 = { 'networks': { 'v2attrs:something': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'v2attrs:something_else': {'allow_post': True, 'allow_put': False, 'is_visible': False}, } } class V2attributes(extensions.ExtensionDescriptor): def get_name(self): return "V2 Extended Attributes Example" def get_alias(self): return "v2attrs" def get_description(self): return "Demonstrates extended attributes on V2 core resources" def get_updated(self): return "2012-07-18T10:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} neutron-8.4.0/neutron/tests/unit/extensions/__init__.py0000664000567000056710000000000013044372736024500 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/extensions/test_router_availability_zone.py0000664000567000056710000001113013044372760031110 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from neutron.db.availability_zone import router as router_az_db from neutron.db import common_db_mixin from neutron.db import l3_agentschedulers_db from neutron.db import l3_db from neutron.extensions import l3 from neutron.extensions import router_availability_zone as router_az from neutron.plugins.common import constants as service_constants from neutron.tests.unit.extensions import test_availability_zone as test_az from neutron.tests.unit.extensions import test_l3 class AZL3ExtensionManager(test_az.AZExtensionManager): def get_resources(self): return (super(AZL3ExtensionManager, self).get_resources() + l3.L3.get_resources()) class AZRouterTestPlugin(common_db_mixin.CommonDbMixin, l3_db.L3_NAT_db_mixin, router_az_db.RouterAvailabilityZoneMixin, l3_agentschedulers_db.AZL3AgentSchedulerDbMixin): supported_extension_aliases = ["router", "l3_agent_scheduler", "router_availability_zone"] def get_plugin_type(self): return service_constants.L3_ROUTER_NAT def get_plugin_description(self): return "L3 Routing Service Plugin for testing" def _create_router_db(self, context, router, tenant_id): # l3-plugin using routerextraattributes must call # _process_extra_attr_router_create. with context.session.begin(subtransactions=True): router_db = super(AZRouterTestPlugin, self)._create_router_db( context, router, tenant_id) self._process_extra_attr_router_create(context, router_db, router) return router_db class TestAZRouterCase(test_az.AZTestCommon, test_l3.L3NatTestCaseMixin): def setUp(self): plugin = ('neutron.tests.unit.extensions.' 'test_availability_zone.AZTestPlugin') l3_plugin = ('neutron.tests.unit.extensions.' 'test_router_availability_zone.AZRouterTestPlugin') service_plugins = {'l3_plugin_name': l3_plugin} self._backup() l3.RESOURCE_ATTRIBUTE_MAP['routers'].update( router_az.EXTENDED_ATTRIBUTES_2_0['routers']) ext_mgr = AZL3ExtensionManager() super(TestAZRouterCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) def _backup(self): self.contents_backup = {} for res, attrs in six.iteritems(l3.RESOURCE_ATTRIBUTE_MAP): self.contents_backup[res] = attrs.copy() self.addCleanup(self._restore) def _restore(self): l3.RESOURCE_ATTRIBUTE_MAP = self.contents_backup def test_create_router_with_az(self): self._register_azs() az_hints = ['nova2'] with self.router(availability_zone_hints=az_hints) as router: res = self._show('routers', router['router']['id']) self.assertItemsEqual(az_hints, res['router']['availability_zone_hints']) def test_create_router_with_azs(self): self._register_azs() az_hints = ['nova2', 'nova3'] with self.router(availability_zone_hints=az_hints) as router: res = self._show('routers', router['router']['id']) self.assertItemsEqual(az_hints, res['router']['availability_zone_hints']) def test_create_router_without_az(self): with self.router() as router: res = self._show('routers', router['router']['id']) self.assertEqual([], res['router']['availability_zone_hints']) def test_create_router_with_empty_az(self): with self.router(availability_zone_hints=[]) as router: res = self._show('routers', router['router']['id']) self.assertEqual([], res['router']['availability_zone_hints']) def test_create_router_with_none_existing_az(self): res = self._create_router(self.fmt, 'tenant_id', availability_zone_hints=['nova4']) self.assertEqual(404, res.status_int) neutron-8.4.0/neutron/tests/unit/extensions/test_extraroute.py0000664000567000056710000006003213044372760026212 0ustar jenkinsjenkins00000000000000# Copyright 2013, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_utils import uuidutils from webob import exc from neutron.common import constants from neutron.common import utils from neutron import context from neutron.db import extraroute_db from neutron.extensions import extraroute from neutron.extensions import l3 from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit.extensions import test_l3 as test_l3 _uuid = uuidutils.generate_uuid _get_path = test_base._get_path class ExtraRouteTestExtensionManager(object): def get_resources(self): l3.RESOURCE_ATTRIBUTE_MAP['routers'].update( extraroute.EXTENDED_ATTRIBUTES_2_0['routers']) return l3.L3.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] # This plugin class is for tests with plugin that integrates L3. class TestExtraRouteIntPlugin(test_l3.TestL3NatIntPlugin, extraroute_db.ExtraRoute_db_mixin): supported_extension_aliases = ["external-net", "router", "extraroute"] # A fake l3 service plugin class with extra route capability for # plugins that delegate away L3 routing functionality class TestExtraRouteL3NatServicePlugin(test_l3.TestL3NatServicePlugin, extraroute_db.ExtraRoute_db_mixin): supported_extension_aliases = ["router", "extraroute"] class ExtraRouteDBTestCaseBase(object): def _routes_update_prepare( self, router_id, subnet_id, port_id, routes, skip_add=False, tenant_id=None): if not skip_add: self._router_interface_action( 'add', router_id, subnet_id, port_id, tenant_id=None) ctxt = context.Context('', tenant_id) if tenant_id else None self._update('routers', router_id, {'router': {'routes': routes}}, neutron_context=ctxt) return self._show('routers', router_id) def _routes_update_cleanup(self, port_id, subnet_id, router_id, routes): self._update('routers', router_id, {'router': {'routes': routes}}) self._router_interface_action('remove', router_id, subnet_id, port_id) def test_route_update_with_one_route(self): routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}] with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: with self.port(subnet=s) as p: body = self._routes_update_prepare(r['router']['id'], None, p['port']['id'], routes) self.assertEqual(routes, body['router']['routes']) self._routes_update_cleanup(p['port']['id'], None, r['router']['id'], []) def test_route_update_with_external_route(self): my_tenant = 'tenant1' routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}] with self.subnet(cidr='10.0.1.0/24', tenant_id='notme') as ext_subnet: self._set_net_external(ext_subnet['subnet']['network_id']) ext_info = {'network_id': ext_subnet['subnet']['network_id']} with self.router( external_gateway_info=ext_info, tenant_id=my_tenant) as r: body = self._routes_update_prepare( r['router']['id'], None, None, routes, skip_add=True, tenant_id=my_tenant) self.assertEqual(routes, body['router']['routes']) def test_route_update_with_route_via_another_tenant_subnet(self): my_tenant = 'tenant1' routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}] with self.subnet(cidr='10.0.1.0/24', tenant_id='notme') as subnet: with self.router(tenant_id=my_tenant) as r: body = self._routes_update_prepare( r['router']['id'], subnet['subnet']['id'], None, routes, tenant_id=my_tenant) self.assertEqual(routes, body['router']['routes']) def test_route_clear_routes_with_None(self): routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}, {'destination': '12.0.0.0/8', 'nexthop': '10.0.1.4'}, {'destination': '141.212.0.0/16', 'nexthop': '10.0.1.5'}] with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: with self.port(subnet=s) as p: self._routes_update_prepare(r['router']['id'], None, p['port']['id'], routes) body = self._update('routers', r['router']['id'], {'router': {'routes': None}}) self.assertEqual([], body['router']['routes']) self._routes_update_cleanup(p['port']['id'], None, r['router']['id'], []) def test_router_interface_in_use_by_route(self): routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}] with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: with self.port(subnet=s) as p: body = self._routes_update_prepare(r['router']['id'], None, p['port']['id'], routes) self.assertEqual(routes, body['router']['routes']) self._router_interface_action( 'remove', r['router']['id'], None, p['port']['id'], expected_code=exc.HTTPConflict.code) self._routes_update_cleanup(p['port']['id'], None, r['router']['id'], []) def test_route_update_with_multi_routes(self): routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}, {'destination': '12.0.0.0/8', 'nexthop': '10.0.1.4'}, {'destination': '141.212.0.0/16', 'nexthop': '10.0.1.5'}] with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: with self.port(subnet=s) as p: body = self._routes_update_prepare(r['router']['id'], None, p['port']['id'], routes) self.assertEqual( sorted(body['router']['routes'], key=utils.safe_sort_key), sorted(routes, key=utils.safe_sort_key)) self._routes_update_cleanup(p['port']['id'], None, r['router']['id'], []) def test_routes_update_for_multiple_routers(self): routes1 = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.0.3'}] routes2 = [{'destination': '12.0.0.0/8', 'nexthop': '10.0.0.4'}] with self.router() as r1,\ self.router() as r2,\ self.subnet(cidr='10.0.0.0/24') as s: with self.port(subnet=s) as p1, self.port(subnet=s) as p2: body = self._routes_update_prepare(r1['router']['id'], None, p1['port']['id'], routes1) self.assertEqual(routes1, body['router']['routes']) body = self._routes_update_prepare(r2['router']['id'], None, p2['port']['id'], routes2) self.assertEqual(routes2, body['router']['routes']) self._routes_update_cleanup(p1['port']['id'], None, r1['router']['id'], []) self._routes_update_cleanup(p2['port']['id'], None, r2['router']['id'], []) def test_router_update_delete_routes(self): routes_orig = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}, {'destination': '12.0.0.0/8', 'nexthop': '10.0.1.4'}, {'destination': '141.212.0.0/16', 'nexthop': '10.0.1.5'}] routes_left = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}, {'destination': '141.212.0.0/16', 'nexthop': '10.0.1.5'}] with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: with self.port(subnet=s) as p: body = self._routes_update_prepare(r['router']['id'], None, p['port']['id'], routes_orig) self.assertEqual( sorted(body['router']['routes'], key=utils.safe_sort_key), sorted(routes_orig, key=utils.safe_sort_key)) body = self._routes_update_prepare(r['router']['id'], None, p['port']['id'], routes_left, skip_add=True) self.assertEqual( sorted(body['router']['routes'], key=utils.safe_sort_key), sorted(routes_left, key=utils.safe_sort_key)) self._routes_update_cleanup(p['port']['id'], None, r['router']['id'], []) def _test_malformed_route(self, routes): with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: with self.port(subnet=s) as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) self._update('routers', r['router']['id'], {'router': {'routes': routes}}, expected_code=exc.HTTPBadRequest.code) # clean-up self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def test_no_destination_route(self): self._test_malformed_route([{'nexthop': '10.0.1.6'}]) def test_no_nexthop_route(self): self._test_malformed_route({'destination': '135.207.0.0/16'}) def test_none_destination(self): self._test_malformed_route([{'destination': None, 'nexthop': '10.0.1.3'}]) def test_none_nexthop(self): self._test_malformed_route([{'destination': '135.207.0.0/16', 'nexthop': None}]) def test_nexthop_is_port_ip(self): with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: with self.port(subnet=s) as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) port_ip = p['port']['fixed_ips'][0]['ip_address'] routes = [{'destination': '135.207.0.0/16', 'nexthop': port_ip}] self._update('routers', r['router']['id'], {'router': {'routes': routes}}, expected_code=exc.HTTPBadRequest.code) # clean-up self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def test_router_update_with_too_many_routes(self): with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: with self.port(subnet=s) as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}, {'destination': '12.0.0.0/8', 'nexthop': '10.0.1.4'}, {'destination': '141.212.0.0/16', 'nexthop': '10.0.1.5'}, {'destination': '192.168.0.0/16', 'nexthop': '10.0.1.6'}] self._update('routers', r['router']['id'], {'router': {'routes': routes}}, expected_code=exc.HTTPBadRequest.code) # clean-up self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def test_router_update_with_dup_address(self): with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: with self.port(subnet=s) as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}, {'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}] self._update('routers', r['router']['id'], {'router': {'routes': routes}}, expected_code=exc.HTTPBadRequest.code) # clean-up self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def test_router_update_with_invalid_ip_address(self): with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: with self.port(subnet=s) as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) routes = [{'destination': '512.207.0.0/16', 'nexthop': '10.0.1.3'}] self._update('routers', r['router']['id'], {'router': {'routes': routes}}, expected_code=exc.HTTPBadRequest.code) routes = [{'destination': '127.207.0.0/48', 'nexthop': '10.0.1.3'}] self._update('routers', r['router']['id'], {'router': {'routes': routes}}, expected_code=exc.HTTPBadRequest.code) routes = [{'destination': 'invalid_ip_address', 'nexthop': '10.0.1.3'}] self._update('routers', r['router']['id'], {'router': {'routes': routes}}, expected_code=exc.HTTPBadRequest.code) # clean-up self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def test_router_update_with_invalid_nexthop_ip(self): with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: with self.port(subnet=s) as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) routes = [{'destination': '127.207.0.0/16', 'nexthop': ' 300.10.10.4'}] self._update('routers', r['router']['id'], {'router': {'routes': routes}}, expected_code=exc.HTTPBadRequest.code) # clean-up self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def test_router_update_with_nexthop_is_outside_port_subnet(self): with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: with self.port(subnet=s) as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) routes = [{'destination': '127.207.0.0/16', 'nexthop': ' 20.10.10.4'}] self._update('routers', r['router']['id'], {'router': {'routes': routes}}, expected_code=exc.HTTPBadRequest.code) # clean-up self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def test_router_update_on_external_port(self): with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: self._set_net_external(s['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id']) body = self._show('routers', r['router']['id']) net_id = body['router']['external_gateway_info']['network_id'] self.assertEqual(net_id, s['subnet']['network_id']) port_res = self._list_ports( 'json', 200, s['subnet']['network_id'], tenant_id=r['router']['tenant_id'], device_owner=constants.DEVICE_OWNER_ROUTER_GW) port_list = self.deserialize('json', port_res) self.assertEqual(1, len(port_list['ports'])) routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}] body = self._update('routers', r['router']['id'], {'router': {'routes': routes}}) body = self._show('routers', r['router']['id']) self.assertEqual(routes, body['router']['routes']) self._remove_external_gateway_from_router( r['router']['id'], s['subnet']['network_id']) body = self._show('routers', r['router']['id']) gw_info = body['router']['external_gateway_info'] self.assertIsNone(gw_info) def test_router_list_with_sort(self): with self.router(name='router1') as router1,\ self.router(name='router2') as router2,\ self.router(name='router3') as router3: self._test_list_with_sort('router', (router3, router2, router1), [('name', 'desc')]) def test_router_list_with_pagination(self): with self.router(name='router1') as router1,\ self.router(name='router2') as router2,\ self.router(name='router3') as router3: self._test_list_with_pagination('router', (router1, router2, router3), ('name', 'asc'), 2, 2) def test_router_list_with_pagination_reverse(self): with self.router(name='router1') as router1,\ self.router(name='router2') as router2,\ self.router(name='router3') as router3: self._test_list_with_pagination_reverse('router', (router1, router2, router3), ('name', 'asc'), 2, 2) class ExtraRouteDBIntTestCase(test_l3.L3NatDBIntTestCase, ExtraRouteDBTestCaseBase): def setUp(self, plugin=None, ext_mgr=None): if not plugin: plugin = ('neutron.tests.unit.extensions.test_extraroute.' 'TestExtraRouteIntPlugin') # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) cfg.CONF.set_default('max_routes', 3) ext_mgr = ExtraRouteTestExtensionManager() super(test_l3.L3BaseForIntTests, self).setUp(plugin=plugin, ext_mgr=ext_mgr) self.setup_notification_driver() class ExtraRouteDBSepTestCase(test_l3.L3NatDBSepTestCase, ExtraRouteDBTestCaseBase): def setUp(self): # the plugin without L3 support plugin = 'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin' # the L3 service plugin l3_plugin = ('neutron.tests.unit.extensions.test_extraroute.' 'TestExtraRouteL3NatServicePlugin') service_plugins = {'l3_plugin_name': l3_plugin} # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) cfg.CONF.set_default('max_routes', 3) ext_mgr = ExtraRouteTestExtensionManager() super(test_l3.L3BaseForSepTests, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.setup_notification_driver() neutron-8.4.0/neutron/tests/unit/extensions/extensionattribute.py0000664000567000056710000000615313044372760026715 0ustar jenkinsjenkins00000000000000# Copyright 2013 VMware, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron.api import extensions from neutron.api.v2 import base from neutron import manager from neutron.quota import resource_registry # Attribute Map RESOURCE_ATTRIBUTE_MAP = { 'ext_test_resources': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': None}, 'is_visible': True}, } } class Extensionattribute(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Extension Test Resource" @classmethod def get_alias(cls): return "ext-obj-test" @classmethod def get_description(cls): return "Extension Test Resource" @classmethod def get_updated(cls): return "2013-02-05T10:00:00-00:00" def update_attributes_map(self, attributes): super(Extensionattribute, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) @classmethod def get_resources(cls): """Returns Ext Resources.""" exts = [] plugin = manager.NeutronManager.get_plugin() resource_name = 'ext_test_resource' collection_name = resource_name + "s" params = RESOURCE_ATTRIBUTE_MAP.get(collection_name, dict()) resource_registry.register_resource_by_name(resource_name) controller = base.create_resource(collection_name, resource_name, plugin, params, member_actions={}) ex = extensions.ResourceExtension(collection_name, controller, member_actions={}) exts.append(ex) return exts def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} class ExtensionObjectTestPluginBase(object): @abc.abstractmethod def create_ext_test_resource(self, context, router): pass @abc.abstractmethod def get_ext_test_resource(self, context, id, fields=None): pass neutron-8.4.0/neutron/tests/unit/extensions/test_servicetype.py0000664000567000056710000002114513044372760026354 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg import webob.exc as webexc import webtest from neutron.api import extensions from neutron.common import exceptions as n_exc from neutron import context from neutron.db import servicetype_db as st_db from neutron.extensions import servicetype from neutron.plugins.common import constants from neutron.services import provider_configuration as provconf from neutron.tests.unit.api import test_extensions from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit import dummy_plugin as dp from neutron.tests.unit import testlib_api _uuid = test_base._uuid _get_path = test_base._get_path class ServiceTypeManagerTestCase(testlib_api.SqlTestCase): def setUp(self): self.service_providers = mock.patch.object( provconf.NeutronModule, 'service_providers').start() super(ServiceTypeManagerTestCase, self).setUp() self.ctx = context.get_admin_context() def _set_override(self, service_providers): self.service_providers.return_value = service_providers st_db.ServiceTypeManager._instance = None self.manager = st_db.ServiceTypeManager.get_instance() for provider in service_providers: self.manager.add_provider_configuration( provider.split(':')[0], provconf.ProviderConfiguration()) def test_service_provider_driver_not_unique(self): self._set_override([constants.LOADBALANCER + ':lbaas:driver']) prov = {'service_type': constants.LOADBALANCER, 'name': 'name2', 'driver': 'driver', 'default': False} self.assertRaises( n_exc.Invalid, self.manager.config['LOADBALANCER'].add_provider, prov) def test_get_service_providers(self): """Test that get_service_providers filters correctly.""" self._set_override( [constants.LOADBALANCER + ':lbaas:driver_path1', constants.FIREWALL + ':fwaas:driver_path2']) ctx = context.get_admin_context() res = self.manager.get_service_providers( ctx, filters=dict(service_type=[constants.LOADBALANCER]) ) self.assertEqual(1, len(res)) res = self.manager.get_service_providers( ctx, filters=dict(service_type=[constants.FIREWALL]) ) self.assertEqual(1, len(res)) def test_multiple_default_providers_specified_for_service(self): self.assertRaises( n_exc.Invalid, self._set_override, [constants.LOADBALANCER + ':lbaas1:driver_path:default', constants.LOADBALANCER + ':lbaas2:driver_path:default']) def test_get_default_provider(self): self._set_override([constants.LOADBALANCER + ':lbaas1:driver_path:default', constants.DUMMY + ':lbaas2:driver_path2']) # can pass None as a context p = self.manager.get_default_service_provider(None, constants.LOADBALANCER) self.assertEqual({'service_type': constants.LOADBALANCER, 'name': 'lbaas1', 'driver': 'driver_path', 'default': True}, p) self.assertRaises( provconf.DefaultServiceProviderNotFound, self.manager.get_default_service_provider, None, constants.DUMMY ) def test_add_resource_association(self): self._set_override([constants.LOADBALANCER + ':lbaas1:driver_path:default', constants.DUMMY + ':lbaas2:driver_path2']) ctx = context.get_admin_context() self.manager.add_resource_association(ctx, constants.LOADBALANCER, 'lbaas1', '123-123') self.assertEqual(ctx.session. query(st_db.ProviderResourceAssociation).count(), 1) assoc = ctx.session.query(st_db.ProviderResourceAssociation).one() ctx.session.delete(assoc) def test_invalid_resource_association(self): self._set_override([constants.LOADBALANCER + ':lbaas1:driver_path:default', constants.DUMMY + ':lbaas2:driver_path2']) ctx = context.get_admin_context() self.assertRaises(provconf.ServiceProviderNotFound, self.manager.add_resource_association, ctx, 'BLABLA_svc', 'name', '123-123') class TestServiceTypeExtensionManager(object): """Mock extensions manager.""" def get_resources(self): return (servicetype.Servicetype.get_resources() + dp.Dummy.get_resources()) def get_actions(self): return [] def get_request_extensions(self): return [] class ServiceTypeExtensionTestCaseBase(testlib_api.WebTestCase): fmt = 'json' def setUp(self): # This is needed because otherwise a failure will occur due to # nonexisting core_plugin self.setup_coreplugin(test_db_base_plugin_v2.DB_PLUGIN_KLASS) cfg.CONF.set_override('service_plugins', ["%s.%s" % (dp.__name__, dp.DummyServicePlugin.__name__)]) # Ensure existing ExtensionManager is not used extensions.PluginAwareExtensionManager._instance = None ext_mgr = TestServiceTypeExtensionManager() self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr) self.api = webtest.TestApp(self.ext_mdw) self.resource_name = servicetype.RESOURCE_NAME.replace('-', '_') super(ServiceTypeExtensionTestCaseBase, self).setUp() class ServiceTypeExtensionTestCase(ServiceTypeExtensionTestCaseBase): def setUp(self): self._patcher = mock.patch( "neutron.db.servicetype_db.ServiceTypeManager", autospec=True) self.mock_mgr = self._patcher.start() self.mock_mgr.get_instance.return_value = self.mock_mgr.return_value super(ServiceTypeExtensionTestCase, self).setUp() def test_service_provider_list(self): instance = self.mock_mgr.return_value res = self.api.get(_get_path('service-providers', fmt=self.fmt)) instance.get_service_providers.assert_called_with(mock.ANY, filters={}, fields=[]) self.assertEqual(webexc.HTTPOk.code, res.status_int) class ServiceTypeManagerExtTestCase(ServiceTypeExtensionTestCaseBase): """Tests ServiceTypemanager as a public API.""" def setUp(self): self.service_providers = mock.patch.object( provconf.NeutronModule, 'service_providers').start() service_providers = [ constants.LOADBALANCER + ':lbaas:driver_path', constants.DUMMY + ':dummy:dummy_dr' ] self.service_providers.return_value = service_providers # Blank out service type manager instance st_db.ServiceTypeManager._instance = None self.manager = st_db.ServiceTypeManager.get_instance() for provider in service_providers: self.manager.add_provider_configuration( provider.split(':')[0], provconf.ProviderConfiguration()) super(ServiceTypeManagerExtTestCase, self).setUp() def _list_service_providers(self): return self.api.get(_get_path('service-providers', fmt=self.fmt)) def test_list_service_providers(self): res = self._list_service_providers() self.assertEqual(webexc.HTTPOk.code, res.status_int) data = self.deserialize(res) self.assertIn('service_providers', data) self.assertGreaterEqual(len(data['service_providers']), 2) neutron-8.4.0/neutron/tests/unit/extensions/test_external_net.py0000664000567000056710000002130613044372760026501 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import uuidutils import testtools from webob import exc from neutron.common import constants from neutron import context from neutron.db import models_v2 from neutron.extensions import external_net as external_net from neutron import manager from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit.db import test_db_base_plugin_v2 _uuid = uuidutils.generate_uuid _get_path = test_base._get_path class ExtNetTestExtensionManager(object): def get_resources(self): return [] def get_actions(self): return [] def get_request_extensions(self): return [] class ExtNetDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def _create_network(self, fmt, name, admin_state_up, **kwargs): """Override the routine for allowing the router:external attribute.""" # attributes containing a colon should be passed with # a double underscore new_args = dict(zip(map(lambda x: x.replace('__', ':'), kwargs), kwargs.values())) arg_list = new_args.pop('arg_list', ()) + (external_net.EXTERNAL,) return super(ExtNetDBTestCase, self)._create_network( fmt, name, admin_state_up, arg_list=arg_list, **new_args) def setUp(self): plugin = 'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin' ext_mgr = ExtNetTestExtensionManager() super(ExtNetDBTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def _set_net_external(self, net_id): self._update('networks', net_id, {'network': {external_net.EXTERNAL: True}}) def test_list_nets_external(self): with self.network() as n1: self._set_net_external(n1['network']['id']) with self.network(): body = self._list('networks') self.assertEqual(2, len(body['networks'])) body = self._list('networks', query_params="%s=True" % external_net.EXTERNAL) self.assertEqual(1, len(body['networks'])) body = self._list('networks', query_params="%s=False" % external_net.EXTERNAL) self.assertEqual(1, len(body['networks'])) def test_list_nets_external_pagination(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") with self.network(name='net1') as n1, self.network(name='net3') as n3: self._set_net_external(n1['network']['id']) self._set_net_external(n3['network']['id']) with self.network(name='net2') as n2: self._test_list_with_pagination( 'network', (n1, n3), ('name', 'asc'), 1, 3, query_params='router:external=True') self._test_list_with_pagination( 'network', (n2, ), ('name', 'asc'), 1, 2, query_params='router:external=False') def test_get_network_succeeds_without_filter(self): plugin = manager.NeutronManager.get_plugin() ctx = context.Context(None, None, is_admin=True) result = plugin.get_networks(ctx, filters=None) self.assertEqual([], result) def test_update_network_set_external_non_admin_fails(self): # Assert that a non-admin user cannot update the # router:external attribute with self.network(tenant_id='noadmin') as network: data = {'network': {'router:external': True}} req = self.new_update_request('networks', data, network['network']['id']) req.environ['neutron.context'] = context.Context('', 'noadmin') res = req.get_response(self.api) self.assertEqual(exc.HTTPForbidden.code, res.status_int) def test_update_network_external_net_with_ports_set_not_shared(self): with self.network(router__external=True, shared=True) as ext_net,\ self.subnet(network=ext_net) as ext_subnet, \ self.port(subnet=ext_subnet, tenant_id='', device_owner=constants.DEVICE_OWNER_ROUTER_SNAT): data = {'network': {'shared': False}} req = self.new_update_request('networks', data, ext_net['network']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPOk.code, res.status_int) ctx = context.Context(None, None, is_admin=True) plugin = manager.NeutronManager.get_plugin() result = plugin.get_networks(ctx) self.assertFalse(result[0]['shared']) def test_network_filter_hook_admin_context(self): plugin = manager.NeutronManager.get_plugin() ctx = context.Context(None, None, is_admin=True) model = models_v2.Network conditions = plugin._network_filter_hook(ctx, model, []) self.assertEqual([], conditions) def test_network_filter_hook_nonadmin_context(self): plugin = manager.NeutronManager.get_plugin() ctx = context.Context('edinson', 'cavani') model = models_v2.Network txt = ("networkrbacs.action = :action_1 AND " "networkrbacs.target_tenant = :target_tenant_1 OR " "networkrbacs.target_tenant = :target_tenant_2") conditions = plugin._network_filter_hook(ctx, model, []) self.assertEqual(conditions.__str__(), txt) # Try to concatenate conditions txt2 = (txt.replace('tenant_1', 'tenant_3'). replace('tenant_2', 'tenant_4'). replace('action_1', 'action_2')) conditions = plugin._network_filter_hook(ctx, model, conditions) self.assertEqual(conditions.__str__(), "%s OR %s" % (txt, txt2)) def test_create_port_external_network_non_admin_fails(self): with self.network(router__external=True) as ext_net: with self.subnet(network=ext_net) as ext_subnet: with testtools.ExpectedException( exc.HTTPClientError) as ctx_manager: with self.port(subnet=ext_subnet, set_context='True', tenant_id='noadmin'): pass self.assertEqual(403, ctx_manager.exception.code) def test_create_port_external_network_admin_succeeds(self): with self.network(router__external=True) as ext_net: with self.subnet(network=ext_net) as ext_subnet: with self.port(subnet=ext_subnet) as port: self.assertEqual(port['port']['network_id'], ext_net['network']['id']) def test_create_external_network_non_admin_fails(self): with testtools.ExpectedException(exc.HTTPClientError) as ctx_manager: with self.network(router__external=True, set_context='True', tenant_id='noadmin'): pass self.assertEqual(403, ctx_manager.exception.code) def test_create_external_network_admin_succeeds(self): with self.network(router__external=True) as ext_net: self.assertTrue(ext_net['network'][external_net.EXTERNAL]) def test_delete_network_check_disassociated_floatingips(self): with mock.patch.object(manager.NeutronManager, 'get_service_plugins') as srv_plugins: l3_mock = mock.Mock() srv_plugins.return_value = {'L3_ROUTER_NAT': l3_mock} with self.network() as net: req = self.new_delete_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPNoContent.code, res.status_int) (l3_mock.delete_disassociated_floatingips .assert_called_once_with(mock.ANY, net['network']['id'])) neutron-8.4.0/neutron/tests/unit/extensions/test_timestamp_core.py0000664000567000056710000002731513044372760027032 0ustar jenkinsjenkins00000000000000# Copyright 2015 HuaWei Technologies. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import six import mock from neutron import context from neutron.db import db_base_plugin_v2 from neutron.db import models_v2 from neutron.db import tag_db as tag_module from neutron.extensions import timestamp_core as timestamp from neutron import manager from neutron.tests.unit.db import test_db_base_plugin_v2 class TimeStampExtensionManager(object): def get_resources(self): return [] def get_actions(self): return [] def get_request_extensions(self): return [] def get_extended_resources(self, version): return timestamp.Timestamp_core().get_extended_resources(version) class TimeStampTestPlugin(db_base_plugin_v2.NeutronDbPluginV2): """Just for test with TimeStampPlugin""" class TimeStampChangedsinceTestCase(test_db_base_plugin_v2. NeutronDbPluginV2TestCase): plugin = ('neutron.tests.unit.extensions.test_timestamp_core.' + 'TimeStampTestPlugin') def setUp(self): ext_mgr = TimeStampExtensionManager() super(TimeStampChangedsinceTestCase, self).setUp(plugin=self.plugin, ext_mgr=ext_mgr) self.addCleanup(manager.NeutronManager. get_service_plugins()['timestamp_core']. unregister_db_events) self.addCleanup(manager.NeutronManager.clear_instance) def setup_coreplugin(self, core_plugin=None): super(TimeStampChangedsinceTestCase, self).setup_coreplugin( self.plugin) self.patched_default_svc_plugins.return_value = ['timestamp_core'] def _get_resp_with_changed_since(self, resource_type, changed_since): query_params = 'changed_since=%s' % changed_since req = self.new_list_request('%ss' % resource_type, self.fmt, query_params) resources = self.deserialize(self.fmt, req.get_response(self.api)) return resources def _return_by_timedelay(self, resource, timedelay): resource_type = six.next(six.iterkeys(resource)) try: time_create = datetime.datetime.strptime( resource[resource_type]['updated_at'], '%Y-%m-%dT%H:%M:%S') except Exception: time_create = datetime.datetime.strptime( resource[resource_type]['updated_at'], '%Y-%m-%d %H:%M:%S.%f') time_before = datetime.timedelta(seconds=timedelay) addedtime_string = (datetime.datetime. strftime(time_create + time_before, '%Y-%m-%dT%H:%M:%S')) return self._get_resp_with_changed_since(resource_type, addedtime_string) def _update_test_resource_by_name(self, resource): resource_type = six.next(six.iterkeys(resource)) name = resource[resource_type]['name'] data = {resource_type: {'name': '%s_new' % name}} req = self.new_update_request('%ss' % resource_type, data, resource[resource_type]['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) return res def _set_timestamp_by_show(self, resource, type): req = self.new_show_request('%ss' % type, resource[type]['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) resource[type]['created_at'] = res[type]['created_at'] resource[type]['updated_at'] = res[type]['updated_at'] def _list_resources_with_changed_since(self, resource): # assert list results contain the net info when # changed_since equal with the net updated time. resource_type = six.next(six.iterkeys(resource)) if resource_type in ['network', 'port']: self._set_timestamp_by_show(resource, resource_type) resources = self._get_resp_with_changed_since(resource_type, resource[resource_type][ 'updated_at']) self.assertEqual(resource[resource_type]['id'], resources[resource_type + 's'][0]['id']) # assert list results contain the net info when changed_since # is earlier than the net updated time. resources = self._return_by_timedelay(resource, -1) self.assertEqual(resource[resource_type]['id'], resources[resource_type + 's'][0]['id']) # assert list results is Null when changed_since # is later with the net updated time. resources = self._return_by_timedelay(resource, 1) self.assertEqual([], resources[resource_type + 's']) def _test_list_mutiple_resources_with_changed_since(self, first, second): resource_type = six.next(six.iterkeys(first)) if resource_type in ['network', 'port']: self._set_timestamp_by_show(first, resource_type) self._set_timestamp_by_show(second, resource_type) # update names of second new_second = self._update_test_resource_by_name(second) # now the queue of order by # updated_at is first < new_second # test changed_since < first's updated_at resources = self._return_by_timedelay(first, -1) for resource in [first[resource_type]['id'], new_second[resource_type]['id']]: self.assertIn(resource, [n['id'] for n in resources[resource_type + 's']]) # test changed_since = first's updated_at resources = self._return_by_timedelay(first, 0) for resource in [first[resource_type]['id'], new_second[resource_type]['id']]: self.assertIn(resource, [n['id'] for n in resources[resource_type + 's']]) # test first < changed_since < second resources = self._return_by_timedelay(new_second, -1) self.assertIn(new_second[resource_type]['id'], [n['id'] for n in resources[resource_type + 's']]) # test first < changed_since = second resources = self._return_by_timedelay(new_second, 0) self.assertIn(new_second[resource_type]['id'], [n['id'] for n in resources[resource_type + 's']]) #test first < second < changed_since resources = self._return_by_timedelay(new_second, 3) self.assertEqual({resource_type + 's': []}, resources) def test_list_networks_with_changed_since(self): with self.network('net1') as net: self._list_resources_with_changed_since(net) def test_list_subnets_with_changed_since(self): with self.network('net2') as net: with self.subnet(network=net) as subnet: self._list_resources_with_changed_since(subnet) def test_list_ports_with_changed_since(self): with self.network('net3') as net: with self.subnet(network=net) as subnet: with self.port(subnet=subnet) as port: self._list_resources_with_changed_since(port) def test_list_subnetpools_with_changed_since(self): prefixs = ['3.3.3.3/24', '4.4.4.4/24'] with self.subnetpool(prefixs, tenant_id='tenant_one', name='sp_test02') as subnetpool: self._list_resources_with_changed_since(subnetpool) def test_list_mutiple_networks_with_changed_since(self): with self.network('net1') as net1, self.network('net2') as net2: self._test_list_mutiple_resources_with_changed_since(net1, net2) def test_list_mutiple_subnets_with_changed_since(self): with self.network('net1') as net1, self.network('net2') as net2: with self.subnet(network=net1) as subnet1, self.subnet( network=net2) as subnet2: self._test_list_mutiple_resources_with_changed_since(subnet1, subnet2) def test_list_mutiple_subnetpools_with_changed_since(self): prefixs1 = ['3.3.3.3/24', '4.4.4.4/24'] prefixs2 = ['5.5.5.5/24', '6.6.6.6/24'] with self.subnetpool(prefixs1, tenant_id='tenant_one', name='sp01') as sp1: with self.subnetpool(prefixs2, tenant_id='tenant_one', name='sp02') as sp2: self._test_list_mutiple_resources_with_changed_since(sp1, sp2) def test_list_mutiple_ports_with_changed_since(self): with self.network('net') as net: with self.subnet(network=net) as subnet: with self.port(subnet=subnet) as p1, self.port( subnet=subnet) as p2: self._test_list_mutiple_resources_with_changed_since(p1, p2) def test_list_resources_with_invalid_changed_since(self): # check when input --changed-since with no arg, then filters # stored as 'True'. And also check other invalid inputs changed_sinces = ['123', 'True', 'AAAA-BB-CCTDD-EE-FFZ', '9a9b-11-00T99-1a-r3Z', '0000-00-00T00-00-00Z'] for resource in ['network', 'subnet', 'port', 'subnetpool']: for changed_since in changed_sinces: req = self.new_list_request('%ss' % resource, self.fmt, 'changed_since=%s' % changed_since) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(list(res.values())[0]['type'], 'InvalidInput') class TimeStampDBMixinTestCase(TimeStampChangedsinceTestCase): """Test timestamp_db.TimeStamp_db_mixin()""" def setUp(self): super(TimeStampDBMixinTestCase, self).setUp() def _save_network(self, network_id): ctx = context.get_admin_context() with ctx.session.begin(subtransactions=True): ctx.session.add(models_v2.Network(id=network_id)) network = ctx.session.query(models_v2.Network).one() return network.standard_attr_id # Use tag as non StandardAttribute object def _save_tag(self, tags, standard_attr_id): ctx = context.get_admin_context() for tag in tags: with ctx.session.begin(subtransactions=True): tag_db = tag_module.Tag(standard_attr_id=standard_attr_id, tag=tag) ctx.session.add(tag_db) def test_update_timpestamp(self): network_id = "foo_network_id" tags = ["red", "blue"] with mock.patch('oslo_utils.timeutils.utcnow') as timenow: timenow.return_value = datetime.datetime(2016, 3, 11, 0, 0) # Test to update StandardAttribute object standard_attr_id = self._save_network(network_id) self.assertEqual(1, timenow.call_count) # Test not to update non StandardAttribute object self._save_tag(tags, standard_attr_id) self.assertEqual(1, timenow.call_count) neutron-8.4.0/neutron/tests/unit/extensions/extendedattribute.py0000664000567000056710000000312213044372760026472 0ustar jenkinsjenkins00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api import extensions EXTENDED_ATTRIBUTE = 'extended_attribute' EXTENDED_ATTRIBUTES_2_0 = { 'ext_test_resources': { EXTENDED_ATTRIBUTE: {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid_or_none': None}, 'default': None, 'is_visible': True}, } } class Extendedattribute(extensions.ExtensionDescriptor): """Extension class supporting extended attribute for router.""" @classmethod def get_name(cls): return "Extended Extension Attributes" @classmethod def get_alias(cls): return "extended-ext-attr" @classmethod def get_description(cls): return "Provides extended_attr attribute to router" @classmethod def get_updated(cls): return "2013-02-05T00:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} neutron-8.4.0/neutron/tests/unit/extensions/test_portsecurity.py0000664000567000056710000004654113044372760026575 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from webob import exc from neutron.api.v2 import attributes as attr from neutron import context from neutron.db import db_base_plugin_v2 from neutron.db import portsecurity_db from neutron.db import securitygroups_db from neutron.extensions import portsecurity as psec from neutron.extensions import securitygroup as ext_sg from neutron import manager from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.extensions import test_securitygroup DB_PLUGIN_KLASS = ('neutron.tests.unit.extensions.test_portsecurity.' 'PortSecurityTestPlugin') class PortSecurityTestCase( test_securitygroup.SecurityGroupsTestCase, test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self, plugin=None): ext_mgr = ( test_securitygroup.SecurityGroupTestExtensionManager()) super(PortSecurityTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) # Check if a plugin supports security groups plugin_obj = manager.NeutronManager.get_plugin() self._skip_security_group = ('security-group' not in plugin_obj.supported_extension_aliases) def tearDown(self): super(PortSecurityTestCase, self).tearDown() self._skip_security_group = None class PortSecurityTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, securitygroups_db.SecurityGroupDbMixin, portsecurity_db.PortSecurityDbMixin): """Test plugin that implements necessary calls on create/delete port for associating ports with security groups and port security. """ supported_extension_aliases = ["security-group", "port-security"] def create_network(self, context, network): tenant_id = network['network'].get('tenant_id') self._ensure_default_security_group(context, tenant_id) with context.session.begin(subtransactions=True): neutron_db = super(PortSecurityTestPlugin, self).create_network( context, network) neutron_db.update(network['network']) self._process_network_port_security_create( context, network['network'], neutron_db) return neutron_db def update_network(self, context, id, network): with context.session.begin(subtransactions=True): neutron_db = super(PortSecurityTestPlugin, self).update_network( context, id, network) if psec.PORTSECURITY in network['network']: self._process_network_port_security_update( context, network['network'], neutron_db) return neutron_db def get_network(self, context, id, fields=None): with context.session.begin(subtransactions=True): net = super(PortSecurityTestPlugin, self).get_network( context, id) return self._fields(net, fields) def create_port(self, context, port): p = port['port'] with context.session.begin(subtransactions=True): p[ext_sg.SECURITYGROUPS] = self._get_security_groups_on_port( context, port) neutron_db = super(PortSecurityTestPlugin, self).create_port( context, port) p.update(neutron_db) (port_security, has_ip) = self._determine_port_security_and_has_ip( context, p) p[psec.PORTSECURITY] = port_security self._process_port_port_security_create(context, p, neutron_db) if (attr.is_attr_set(p.get(ext_sg.SECURITYGROUPS)) and not (port_security and has_ip)): raise psec.PortSecurityAndIPRequiredForSecurityGroups() # Port requires ip and port_security enabled for security group if has_ip and port_security: self._ensure_default_security_group_on_port(context, port) if (p.get(ext_sg.SECURITYGROUPS) and p[psec.PORTSECURITY]): self._process_port_create_security_group( context, p, p[ext_sg.SECURITYGROUPS]) return port['port'] def update_port(self, context, id, port): delete_security_groups = self._check_update_deletes_security_groups( port) has_security_groups = self._check_update_has_security_groups(port) with context.session.begin(subtransactions=True): ret_port = super(PortSecurityTestPlugin, self).update_port( context, id, port) # copy values over - but not fixed_ips port['port'].pop('fixed_ips', None) ret_port.update(port['port']) # populate port_security setting if psec.PORTSECURITY not in ret_port: ret_port[psec.PORTSECURITY] = self._get_port_security_binding( context, id) has_ip = self._ip_on_port(ret_port) # checks if security groups were updated adding/modifying # security groups, port security is set and port has ip if (has_security_groups and (not ret_port[psec.PORTSECURITY] or not has_ip)): raise psec.PortSecurityAndIPRequiredForSecurityGroups() # Port security/IP was updated off. Need to check that no security # groups are on port. if ret_port[psec.PORTSECURITY] is not True or not has_ip: if has_security_groups: raise psec.PortSecurityAndIPRequiredForSecurityGroups() # get security groups on port filters = {'port_id': [id]} security_groups = (super(PortSecurityTestPlugin, self). _get_port_security_group_bindings( context, filters)) if security_groups and not delete_security_groups: raise psec.PortSecurityPortHasSecurityGroup() if (delete_security_groups or has_security_groups): # delete the port binding and read it with the new rules. self._delete_port_security_group_bindings(context, id) sgids = self._get_security_groups_on_port(context, port) # process port create sec groups needs port id port['id'] = id self._process_port_create_security_group(context, ret_port, sgids) if psec.PORTSECURITY in port['port']: self._process_port_port_security_update( context, port['port'], ret_port) return ret_port class PortSecurityDBTestCase(PortSecurityTestCase): def setUp(self, plugin=None, service_plugins=None): plugin = plugin or DB_PLUGIN_KLASS super(PortSecurityDBTestCase, self).setUp(plugin) class TestPortSecurity(PortSecurityDBTestCase): def test_create_network_with_portsecurity_mac(self): res = self._create_network('json', 'net1', True) net = self.deserialize('json', res) self.assertTrue(net['network'][psec.PORTSECURITY]) def test_create_network_with_portsecurity_false(self): res = self._create_network('json', 'net1', True, arg_list=('port_security_enabled',), port_security_enabled=False) net = self.deserialize('json', res) self.assertFalse(net['network'][psec.PORTSECURITY]) def test_updating_network_port_security(self): res = self._create_network('json', 'net1', True, port_security_enabled='True') net = self.deserialize('json', res) self.assertTrue(net['network'][psec.PORTSECURITY]) update_net = {'network': {psec.PORTSECURITY: False}} req = self.new_update_request('networks', update_net, net['network']['id']) net = self.deserialize('json', req.get_response(self.api)) self.assertFalse(net['network'][psec.PORTSECURITY]) req = self.new_show_request('networks', net['network']['id']) net = self.deserialize('json', req.get_response(self.api)) self.assertFalse(net['network'][psec.PORTSECURITY]) def test_create_port_default_true(self): with self.network() as net: res = self._create_port('json', net['network']['id']) port = self.deserialize('json', res) self.assertTrue(port['port'][psec.PORTSECURITY]) self._delete('ports', port['port']['id']) def test_create_port_passing_true(self): res = self._create_network('json', 'net1', True, arg_list=('port_security_enabled',), port_security_enabled=True) net = self.deserialize('json', res) res = self._create_port('json', net['network']['id']) port = self.deserialize('json', res) self.assertTrue(port['port'][psec.PORTSECURITY]) self._delete('ports', port['port']['id']) def test_create_port_on_port_security_false_network(self): res = self._create_network('json', 'net1', True, arg_list=('port_security_enabled',), port_security_enabled=False) net = self.deserialize('json', res) res = self._create_port('json', net['network']['id']) port = self.deserialize('json', res) self.assertFalse(port['port'][psec.PORTSECURITY]) self._delete('ports', port['port']['id']) def test_create_port_security_overrides_network_value(self): res = self._create_network('json', 'net1', True, arg_list=('port_security_enabled',), port_security_enabled=False) net = self.deserialize('json', res) res = self._create_port('json', net['network']['id'], arg_list=('port_security_enabled',), port_security_enabled=True) port = self.deserialize('json', res) self.assertTrue(port['port'][psec.PORTSECURITY]) self._delete('ports', port['port']['id']) def test_create_port_fails_with_secgroup_and_port_security_false(self): if self._skip_security_group: self.skipTest("Plugin does not support security groups") with self.network() as net: with self.subnet(network=net): security_group = self.deserialize( 'json', self._create_security_group(self.fmt, 'asdf', 'asdf')) security_group_id = security_group['security_group']['id'] res = self._create_port('json', net['network']['id'], arg_list=('security_groups', 'port_security_enabled'), security_groups=[security_group_id], port_security_enabled=False) self.assertEqual(400, res.status_int) def test_create_port_with_default_security_group(self): if self._skip_security_group: self.skipTest("Plugin does not support security groups") with self.network() as net: with self.subnet(network=net): res = self._create_port('json', net['network']['id']) port = self.deserialize('json', res) self.assertTrue(port['port'][psec.PORTSECURITY]) self.assertEqual(1, len(port['port'][ext_sg.SECURITYGROUPS])) self._delete('ports', port['port']['id']) def test_create_port_with_security_group_and_net_sec_false(self): # This tests that port_security_enabled is true when creating # a port on a network that is marked as port_security_enabled=False # that has a subnet and security_groups are passed it. if self._skip_security_group: self.skipTest("Plugin does not support security groups") res = self._create_network('json', 'net1', True, arg_list=('port_security_enabled',), port_security_enabled=False) net = self.deserialize('json', res) self._create_subnet('json', net['network']['id'], '10.0.0.0/24') security_group = self.deserialize( 'json', self._create_security_group(self.fmt, 'asdf', 'asdf')) security_group_id = security_group['security_group']['id'] res = self._create_port('json', net['network']['id'], arg_list=('security_groups', 'port_security_enabled'), port_security_enabled=True, security_groups=[security_group_id]) port = self.deserialize('json', res) self.assertTrue(port['port'][psec.PORTSECURITY]) self.assertEqual(port['port']['security_groups'], [security_group_id]) self._delete('ports', port['port']['id']) def test_create_port_without_security_group_and_net_sec_false(self): res = self._create_network('json', 'net1', True, arg_list=('port_security_enabled',), port_security_enabled=False) net = self.deserialize('json', res) self._create_subnet('json', net['network']['id'], '10.0.0.0/24') res = self._create_port('json', net['network']['id']) port = self.deserialize('json', res) self.assertFalse(port['port'][psec.PORTSECURITY]) self._delete('ports', port['port']['id']) def test_update_port_security_off_with_security_group(self): if self._skip_security_group: self.skipTest("Plugin does not support security groups") with self.network() as net: with self.subnet(network=net): res = self._create_port('json', net['network']['id']) port = self.deserialize('json', res) self.assertTrue(port['port'][psec.PORTSECURITY]) update_port = {'port': {psec.PORTSECURITY: False}} req = self.new_update_request('ports', update_port, port['port']['id']) res = req.get_response(self.api) self.assertEqual(409, res.status_int) # remove security group on port update_port = {'port': {ext_sg.SECURITYGROUPS: None}} req = self.new_update_request('ports', update_port, port['port']['id']) self.deserialize('json', req.get_response(self.api)) self._delete('ports', port['port']['id']) def test_update_port_remove_port_security_security_group(self): if self._skip_security_group: self.skipTest("Plugin does not support security groups") with self.network() as net: with self.subnet(network=net): res = self._create_port('json', net['network']['id'], arg_list=('port_security_enabled',), port_security_enabled=True) port = self.deserialize('json', res) self.assertTrue(port['port'][psec.PORTSECURITY]) # remove security group on port update_port = {'port': {ext_sg.SECURITYGROUPS: None, psec.PORTSECURITY: False}} req = self.new_update_request('ports', update_port, port['port']['id']) port = self.deserialize('json', req.get_response(self.api)) self.assertFalse(port['port'][psec.PORTSECURITY]) self.assertEqual(0, len(port['port'][ext_sg.SECURITYGROUPS])) self._delete('ports', port['port']['id']) def test_update_port_remove_port_security_security_group_read(self): if self._skip_security_group: self.skipTest("Plugin does not support security groups") with self.network() as net: with self.subnet(network=net): res = self._create_port('json', net['network']['id'], arg_list=('port_security_enabled',), port_security_enabled=True) port = self.deserialize('json', res) self.assertTrue(port['port'][psec.PORTSECURITY]) # remove security group on port update_port = {'port': {ext_sg.SECURITYGROUPS: None, psec.PORTSECURITY: False}} req = self.new_update_request('ports', update_port, port['port']['id']) self.deserialize('json', req.get_response(self.api)) sg_id = port['port'][ext_sg.SECURITYGROUPS] update_port = {'port': {ext_sg.SECURITYGROUPS: [sg_id[0]], psec.PORTSECURITY: True}} req = self.new_update_request('ports', update_port, port['port']['id']) port = self.deserialize('json', req.get_response(self.api)) self.assertTrue(port['port'][psec.PORTSECURITY]) self.assertEqual(1, len(port['port'][ext_sg.SECURITYGROUPS])) self._delete('ports', port['port']['id']) def test_create_port_security_off_shared_network(self): with self.network(shared=True) as net: with self.subnet(network=net): res = self._create_port('json', net['network']['id'], arg_list=('port_security_enabled',), port_security_enabled=False, tenant_id='not_network_owner', set_context=True) self.deserialize('json', res) self.assertEqual(403, res.status_int) def test_update_port_security_off_shared_network(self): with self.network(shared=True) as net: with self.subnet(network=net): res = self._create_port('json', net['network']['id'], tenant_id='not_network_owner', set_context=True) port = self.deserialize('json', res) # remove security group on port update_port = {'port': {ext_sg.SECURITYGROUPS: None, psec.PORTSECURITY: False}} req = self.new_update_request('ports', update_port, port['port']['id']) req.environ['neutron.context'] = context.Context( '', 'not_network_owner') res = req.get_response(self.api) self.assertEqual(exc.HTTPForbidden.code, res.status_int) neutron-8.4.0/neutron/tests/unit/extensions/test_bgp_dragentscheduler.py0000664000567000056710000002153313044372760030166 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testscenarios from oslo_utils import importutils from neutron import context from neutron.db import bgp_db from neutron.db import bgp_dragentscheduler_db as bgp_dras_db from neutron.services.bgp.scheduler import bgp_dragent_scheduler as bgp_dras from neutron.tests.common import helpers from neutron.tests.unit import testlib_api # Required to generate tests from scenarios. Not compatible with nose. load_tests = testscenarios.load_tests_apply_scenarios class TestBgpDrAgentSchedulerBaseTestCase(testlib_api.SqlTestCase): def setUp(self): super(TestBgpDrAgentSchedulerBaseTestCase, self).setUp() self.ctx = context.get_admin_context() self.bgp_speaker = {'id': 'foo_bgp_speaker_id'} self.bgp_speaker_id = 'foo_bgp_speaker_id' self._save_bgp_speaker(self.bgp_speaker_id) def _create_and_set_agents_down(self, hosts, down_agent_count=0, admin_state_up=True): agents = [] for i, host in enumerate(hosts): is_alive = i >= down_agent_count agents.append(helpers.register_bgp_dragent( host, admin_state_up=admin_state_up, alive=is_alive)) return agents def _save_bgp_speaker(self, bgp_speaker_id): cls = bgp_db.BgpDbMixin() bgp_speaker_body = {'bgp_speaker': {'ip_version': '4', 'name': 'test-speaker', 'local_as': '123', 'advertise_floating_ip_host_routes': '0', 'advertise_tenant_networks': '0', 'peers': [], 'networks': []}} cls._save_bgp_speaker(self.ctx, bgp_speaker_body, uuid=bgp_speaker_id) def _test_schedule_bind_bgp_speaker(self, agents, bgp_speaker_id): scheduler = bgp_dras.ChanceScheduler() scheduler.resource_filter.bind(self.ctx, agents, bgp_speaker_id) results = self.ctx.session.query( bgp_dras_db.BgpSpeakerDrAgentBinding).filter_by( bgp_speaker_id=bgp_speaker_id).all() for result in results: self.assertEqual(bgp_speaker_id, result.bgp_speaker_id) class TestBgpDrAgentScheduler(TestBgpDrAgentSchedulerBaseTestCase, bgp_db.BgpDbMixin): def test_schedule_bind_bgp_speaker_single_agent(self): agents = self._create_and_set_agents_down(['host-a']) self._test_schedule_bind_bgp_speaker(agents, self.bgp_speaker_id) def test_schedule_bind_bgp_speaker_multi_agents(self): agents = self._create_and_set_agents_down(['host-a', 'host-b']) self._test_schedule_bind_bgp_speaker(agents, self.bgp_speaker_id) class TestBgpAgentFilter(TestBgpDrAgentSchedulerBaseTestCase, bgp_db.BgpDbMixin, bgp_dras_db.BgpDrAgentSchedulerDbMixin): def setUp(self): super(TestBgpAgentFilter, self).setUp() self.bgp_drscheduler = importutils.import_object( 'neutron.services.bgp.scheduler' '.bgp_dragent_scheduler.ChanceScheduler' ) self.plugin = self def _test_filter_agents_helper(self, bgp_speaker, expected_filtered_dragent_ids=None, expected_num_agents=1): if not expected_filtered_dragent_ids: expected_filtered_dragent_ids = [] filtered_agents = ( self.plugin.bgp_drscheduler.resource_filter.filter_agents( self.plugin, self.ctx, bgp_speaker)) self.assertEqual(expected_num_agents, filtered_agents['n_agents']) actual_filtered_dragent_ids = [ agent.id for agent in filtered_agents['hostable_agents']] self.assertEqual(len(expected_filtered_dragent_ids), len(actual_filtered_dragent_ids)) for filtered_agent_id in actual_filtered_dragent_ids: self.assertIn(filtered_agent_id, expected_filtered_dragent_ids) def test_filter_agents_single_agent(self): agents = self._create_and_set_agents_down(['host-a']) expected_filtered_dragent_ids = [agents[0].id] self._test_filter_agents_helper( self.bgp_speaker, expected_filtered_dragent_ids=expected_filtered_dragent_ids) def test_filter_agents_no_agents(self): expected_filtered_dragent_ids = [] self._test_filter_agents_helper( self.bgp_speaker, expected_filtered_dragent_ids=expected_filtered_dragent_ids, expected_num_agents=0) def test_filter_agents_two_agents(self): agents = self._create_and_set_agents_down(['host-a', 'host-b']) expected_filtered_dragent_ids = [agent.id for agent in agents] self._test_filter_agents_helper( self.bgp_speaker, expected_filtered_dragent_ids=expected_filtered_dragent_ids) def test_filter_agents_agent_already_scheduled(self): agents = self._create_and_set_agents_down(['host-a', 'host-b']) self._test_schedule_bind_bgp_speaker([agents[0]], self.bgp_speaker_id) self._test_filter_agents_helper(self.bgp_speaker, expected_num_agents=0) def test_filter_agents_multiple_agents_bgp_speakers(self): agents = self._create_and_set_agents_down(['host-a', 'host-b']) self._test_schedule_bind_bgp_speaker([agents[0]], self.bgp_speaker_id) bgp_speaker = {'id': 'bar-speaker-id'} self._save_bgp_speaker(bgp_speaker['id']) expected_filtered_dragent_ids = [agents[1].id] self._test_filter_agents_helper( bgp_speaker, expected_filtered_dragent_ids=expected_filtered_dragent_ids) class TestAutoScheduleBgpSpeakers(TestBgpDrAgentSchedulerBaseTestCase): """Unit test scenarios for schedule_unscheduled_bgp_speakers. bgp_speaker_present BGP speaker is present or not scheduled_already BGP speaker is already scheduled to the agent or not agent_down BGP DRAgent is down or alive valid_host If true, then an valid host is passed to schedule BGP speaker, else an invalid host is passed. """ scenarios = [ ('BGP speaker present', dict(bgp_speaker_present=True, scheduled_already=False, agent_down=False, valid_host=True, expected_result=True)), ('No BGP speaker', dict(bgp_speaker_present=False, scheduled_already=False, agent_down=False, valid_host=True, expected_result=False)), ('BGP speaker already scheduled', dict(bgp_speaker_present=True, scheduled_already=True, agent_down=False, valid_host=True, expected_result=False)), ('BGP DR agent down', dict(bgp_speaker_present=True, scheduled_already=False, agent_down=True, valid_host=False, expected_result=False)), ('Invalid host', dict(bgp_speaker_present=True, scheduled_already=False, agent_down=False, valid_host=False, expected_result=False)), ] def test_auto_schedule_bgp_speaker(self): scheduler = bgp_dras.ChanceScheduler() if self.bgp_speaker_present: down_agent_count = 1 if self.agent_down else 0 agents = self._create_and_set_agents_down( ['host-a'], down_agent_count=down_agent_count) if self.scheduled_already: self._test_schedule_bind_bgp_speaker(agents, self.bgp_speaker_id) expected_hosted_agents = (1 if self.bgp_speaker_present and self.valid_host else 0) host = "host-a" if self.valid_host else "host-b" observed_ret_value = scheduler.schedule_unscheduled_bgp_speakers( self.ctx, host) self.assertEqual(self.expected_result, observed_ret_value) hosted_agents = self.ctx.session.query( bgp_dras_db.BgpSpeakerDrAgentBinding).all() self.assertEqual(expected_hosted_agents, len(hosted_agents)) neutron-8.4.0/neutron/tests/unit/extensions/test_address_scope.py0000664000567000056710000006326613044372760026642 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock import netaddr import webob.exc from neutron.api.v2 import attributes as attr from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants from neutron import context from neutron.db import address_scope_db from neutron.db import db_base_plugin_v2 from neutron.extensions import address_scope as ext_address_scope from neutron.tests.unit.db import test_db_base_plugin_v2 DB_PLUGIN_KLASS = ('neutron.tests.unit.extensions.test_address_scope.' 'AddressScopeTestPlugin') class AddressScopeTestExtensionManager(object): def get_resources(self): # Add the resources to the global attribute map # This is done here as the setup process won't # initialize the main API router which extends # the global attribute map attr.RESOURCE_ATTRIBUTE_MAP.update( ext_address_scope.RESOURCE_ATTRIBUTE_MAP) return ext_address_scope.Address_scope.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] class AddressScopeTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def _create_address_scope(self, fmt, ip_version=constants.IP_VERSION_4, expected_res_status=None, admin=False, **kwargs): address_scope = {'address_scope': {}} address_scope['address_scope']['ip_version'] = ip_version for k, v in kwargs.items(): address_scope['address_scope'][k] = str(v) address_scope_req = self.new_create_request('address-scopes', address_scope, fmt) if not admin: neutron_context = context.Context('', kwargs.get('tenant_id', self._tenant_id)) address_scope_req.environ['neutron.context'] = neutron_context address_scope_res = address_scope_req.get_response(self.ext_api) if expected_res_status: self.assertEqual(expected_res_status, address_scope_res.status_int) return address_scope_res def _make_address_scope(self, fmt, ip_version, admin=False, **kwargs): res = self._create_address_scope(fmt, ip_version, admin=admin, **kwargs) if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) @contextlib.contextmanager def address_scope(self, ip_version=constants.IP_VERSION_4, admin=False, **kwargs): addr_scope = self._make_address_scope(self.fmt, ip_version, admin, **kwargs) yield addr_scope def _test_create_address_scope(self, ip_version=constants.IP_VERSION_4, admin=False, expected=None, **kwargs): keys = kwargs.copy() keys.setdefault('tenant_id', self._tenant_id) with self.address_scope(ip_version, admin=admin, **keys) as addr_scope: keys['ip_version'] = ip_version self._validate_resource(addr_scope, keys, 'address_scope') if expected: self._compare_resource(addr_scope, expected, 'address_scope') return addr_scope def _test_update_address_scope(self, addr_scope_id, data, admin=False, expected=None, tenant_id=None): update_req = self.new_update_request( 'address-scopes', data, addr_scope_id) if not admin: neutron_context = context.Context('', tenant_id or self._tenant_id) update_req.environ['neutron.context'] = neutron_context update_res = update_req.get_response(self.ext_api) if expected: addr_scope = self.deserialize(self.fmt, update_res) self._compare_resource(addr_scope, expected, 'address_scope') return addr_scope return update_res class AddressScopeTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, address_scope_db.AddressScopeDbMixin): __native_pagination_support = True __native_sorting_support = True supported_extension_aliases = ["address-scope"] class TestAddressScope(AddressScopeTestCase): def setUp(self): plugin = DB_PLUGIN_KLASS ext_mgr = AddressScopeTestExtensionManager() super(TestAddressScope, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def test_create_address_scope_ipv4(self): expected_addr_scope = {'name': 'foo-address-scope', 'tenant_id': self._tenant_id, 'shared': False, 'ip_version': constants.IP_VERSION_4} self._test_create_address_scope(name='foo-address-scope', expected=expected_addr_scope) def test_create_address_scope_ipv6(self): expected_addr_scope = {'name': 'foo-address-scope', 'tenant_id': self._tenant_id, 'shared': False, 'ip_version': constants.IP_VERSION_6} self._test_create_address_scope(constants.IP_VERSION_6, name='foo-address-scope', expected=expected_addr_scope) def test_create_address_scope_empty_name(self): expected_addr_scope = {'name': '', 'tenant_id': self._tenant_id, 'shared': False} self._test_create_address_scope(name='', expected=expected_addr_scope) # no name specified self._test_create_address_scope(expected=expected_addr_scope) def test_create_address_scope_shared_admin(self): expected_addr_scope = {'name': 'foo-address-scope', 'shared': True} self._test_create_address_scope(name='foo-address-scope', admin=True, shared=True, expected=expected_addr_scope) def test_created_address_scope_shared_non_admin(self): res = self._create_address_scope(self.fmt, name='foo-address-scope', tenant_id=self._tenant_id, admin=False, shared=True) self.assertEqual(webob.exc.HTTPForbidden.code, res.status_int) def test_created_address_scope_specify_id(self): res = self._create_address_scope(self.fmt, name='foo-address-scope', id='foo-id') self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_delete_address_scope(self): with self.address_scope(name='foo-address-scope') as addr_scope: self._delete('address-scopes', addr_scope['address_scope']['id']) self._show('address-scopes', addr_scope['address_scope']['id'], expected_code=webob.exc.HTTPNotFound.code) def test_update_address_scope(self): addr_scope = self._test_create_address_scope(name='foo-address-scope') data = {'address_scope': {'name': 'bar-address-scope'}} self._test_update_address_scope(addr_scope['address_scope']['id'], data, expected=data['address_scope']) def test_update_address_scope_shared_true_admin(self): addr_scope = self._test_create_address_scope(name='foo-address-scope') data = {'address_scope': {'shared': True}} self._test_update_address_scope(addr_scope['address_scope']['id'], data, admin=True, expected=data['address_scope']) def test_update_address_scope_shared_true_non_admin(self): addr_scope = self._test_create_address_scope(name='foo-address-scope') data = {'address_scope': {'shared': True}} res = self._test_update_address_scope( addr_scope['address_scope']['id'], data, admin=False) self.assertEqual(webob.exc.HTTPForbidden.code, res.status_int) def test_update_address_scope_shared_false_admin(self): addr_scope = self._test_create_address_scope(name='foo-address-scope', admin=True, shared=True) data = {'address_scope': {'shared': False}} res = self._test_update_address_scope( addr_scope['address_scope']['id'], data, admin=True) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_get_address_scope(self): addr_scope = self._test_create_address_scope(name='foo-address-scope') req = self.new_show_request('address-scopes', addr_scope['address_scope']['id']) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) self.assertEqual(addr_scope['address_scope']['id'], res['address_scope']['id']) def test_get_address_scope_different_tenants_not_shared(self): addr_scope = self._test_create_address_scope(name='foo-address-scope') req = self.new_show_request('address-scopes', addr_scope['address_scope']['id']) neutron_context = context.Context('', 'not-the-owner') req.environ['neutron.context'] = neutron_context res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_get_address_scope_different_tenants_shared(self): addr_scope = self._test_create_address_scope(name='foo-address-scope', shared=True, admin=True) req = self.new_show_request('address-scopes', addr_scope['address_scope']['id']) neutron_context = context.Context('', 'test-tenant-2') req.environ['neutron.context'] = neutron_context res = self.deserialize(self.fmt, req.get_response(self.ext_api)) self.assertEqual(addr_scope['address_scope']['id'], res['address_scope']['id']) def test_list_address_scopes(self): self._test_create_address_scope(name='foo-address-scope') self._test_create_address_scope(constants.IP_VERSION_6, name='bar-address-scope') res = self._list('address-scopes') self.assertEqual(2, len(res['address_scopes'])) def test_list_address_scopes_different_tenants_shared(self): self._test_create_address_scope(name='foo-address-scope', shared=True, admin=True) admin_res = self._list('address-scopes') mortal_res = self._list( 'address-scopes', neutron_context=context.Context('', 'not-the-owner')) self.assertEqual(1, len(admin_res['address_scopes'])) self.assertEqual(1, len(mortal_res['address_scopes'])) def test_list_address_scopes_different_tenants_not_shared(self): self._test_create_address_scope(constants.IP_VERSION_6, name='foo-address-scope') admin_res = self._list('address-scopes') mortal_res = self._list( 'address-scopes', neutron_context=context.Context('', 'not-the-owner')) self.assertEqual(1, len(admin_res['address_scopes'])) self.assertEqual(0, len(mortal_res['address_scopes'])) class TestSubnetPoolsWithAddressScopes(AddressScopeTestCase): def setUp(self): plugin = DB_PLUGIN_KLASS ext_mgr = AddressScopeTestExtensionManager() super(TestSubnetPoolsWithAddressScopes, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def _test_create_subnetpool(self, prefixes, expected=None, admin=False, **kwargs): keys = kwargs.copy() keys.setdefault('tenant_id', self._tenant_id) with self.subnetpool(prefixes, admin, **keys) as subnetpool: self._validate_resource(subnetpool, keys, 'subnetpool') if expected: self._compare_resource(subnetpool, expected, 'subnetpool') return subnetpool def test_create_subnetpool_associate_address_scope(self): with self.address_scope(name='foo-address-scope') as addr_scope: address_scope_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/24') expected = {'address_scope_id': address_scope_id} self._test_create_subnetpool([subnet.cidr], expected=expected, name='foo-subnetpool', min_prefixlen='21', address_scope_id=address_scope_id) def test_create_subnetpool_associate_invalid_address_scope(self): self.assertRaises( webob.exc.HTTPClientError, self._test_create_subnetpool, [], min_prefixlen='21', address_scope_id='foo-addr-scope-id') def test_create_subnetpool_assoc_address_scope_with_prefix_intersect(self): with self.address_scope(name='foo-address-scope') as addr_scope: address_scope_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/24') expected = {'address_scope_id': address_scope_id} self._test_create_subnetpool([subnet.cidr], expected=expected, name='foo-subnetpool', min_prefixlen='21', address_scope_id=address_scope_id) overlap_subnet = netaddr.IPNetwork('10.10.10.10/24') self.assertRaises( webob.exc.HTTPClientError, self._test_create_subnetpool, [overlap_subnet.cidr], min_prefixlen='21', address_scope_id=address_scope_id) def test_update_subnetpool_associate_address_scope(self): subnet = netaddr.IPNetwork('10.10.10.0/24') initial_subnetpool = self._test_create_subnetpool([subnet.cidr], name='foo-sp', min_prefixlen='21') with self.address_scope(name='foo-address-scope') as addr_scope: address_scope_id = addr_scope['address_scope']['id'] data = {'subnetpool': {'address_scope_id': address_scope_id}} req = self.new_update_request( 'subnetpools', data, initial_subnetpool['subnetpool']['id']) api = self._api_for_resource('subnetpools') res = self.deserialize(self.fmt, req.get_response(api)) self._compare_resource(res, data['subnetpool'], 'subnetpool') def test_update_subnetpool_associate_invalid_address_scope(self): subnet = netaddr.IPNetwork('10.10.10.0/24') initial_subnetpool = self._test_create_subnetpool([subnet.cidr], name='foo-sp', min_prefixlen='21') data = {'subnetpool': {'address_scope_id': 'foo-addr-scope-id'}} req = self.new_update_request( 'subnetpools', data, initial_subnetpool['subnetpool']['id']) api = self._api_for_resource('subnetpools') res = req.get_response(api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnetpool_disassociate_address_scope(self): with self.address_scope(name='foo-address-scope') as addr_scope: address_scope_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/24') expected = {'address_scope_id': address_scope_id} initial_subnetpool = self._test_create_subnetpool( [subnet.cidr], expected=expected, name='foo-sp', min_prefixlen='21', address_scope_id=address_scope_id) data = {'subnetpool': {'address_scope_id': None}} req = self.new_update_request( 'subnetpools', data, initial_subnetpool['subnetpool']['id']) api = self._api_for_resource('subnetpools') res = self.deserialize(self.fmt, req.get_response(api)) self._compare_resource(res, data['subnetpool'], 'subnetpool') def test_update_subnetpool_associate_another_address_scope(self): with self.address_scope(name='foo-address-scope') as addr_scope: address_scope_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/24') expected = {'address_scope_id': address_scope_id} initial_subnetpool = self._test_create_subnetpool( [subnet.cidr], expected=expected, name='foo-sp', min_prefixlen='21', address_scope_id=address_scope_id) with self.address_scope(name='foo-address-scope') as other_a_s: other_a_s_id = other_a_s['address_scope']['id'] update_data = {'subnetpool': {'address_scope_id': other_a_s_id}} req = self.new_update_request( 'subnetpools', update_data, initial_subnetpool['subnetpool']['id']) api = self._api_for_resource('subnetpools') res = self.deserialize(self.fmt, req.get_response(api)) self._compare_resource(res, update_data['subnetpool'], 'subnetpool') def _test_update_subnetpool_address_scope_notify(self, as_change=True): with self.address_scope(name='foo-address-scope') as addr_scope: foo_as_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/24') initial_subnetpool = self._test_create_subnetpool( [subnet.cidr], name='foo-sp', min_prefixlen='21', address_scope_id=foo_as_id) subnetpool_id = initial_subnetpool['subnetpool']['id'] with self.address_scope(name='bar-address-scope') as other_as, \ self.network() as network: data = {'subnet': { 'network_id': network['network']['id'], 'subnetpool_id': subnetpool_id, 'prefixlen': 24, 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) subnet = self.deserialize(self.fmt, req.get_response(self.api)) with mock.patch.object(registry, 'notify') as notify: plugin = db_base_plugin_v2.NeutronDbPluginV2() plugin.is_address_scope_owned_by_tenant = mock.Mock( return_value=True) plugin._validate_address_scope_id = mock.Mock() ctx = context.get_admin_context() bar_as_id = other_as['address_scope']['id'] data = {'subnetpool': { 'name': 'bar-sp'}} if as_change: data['subnetpool']['address_scope_id'] = bar_as_id updated_sp = plugin.update_subnetpool( ctx, subnetpool_id, data) self.assertEqual('bar-sp', updated_sp['name']) if as_change: self.assertEqual(bar_as_id, updated_sp['address_scope_id']) notify.assert_called_once_with( resources.SUBNETPOOL_ADDRESS_SCOPE, events.AFTER_UPDATE, plugin.update_subnetpool, context=ctx, subnetpool_id=subnetpool_id) else: self.assertEqual(foo_as_id, updated_sp['address_scope_id']) self.assertFalse(notify.called) def test_update_subnetpool_address_scope_notify(self): self._test_update_subnetpool_address_scope_notify() def test_not_update_subnetpool_address_scope_not_notify(self): self._test_update_subnetpool_address_scope_notify(False) def test_network_create_contain_address_scope_attr(self): with self.network() as network: result = self._show('networks', network['network']['id']) keys = [ext_address_scope.IPV4_ADDRESS_SCOPE, ext_address_scope.IPV6_ADDRESS_SCOPE] for k in keys: # Correlated address scopes should initially be None self.assertIsNone(result['network'][k]) def test_correlate_network_with_address_scope(self): with self.address_scope(name='v4-as') as v4_addr_scope, \ self.address_scope( name='v6-as', ip_version=constants.IP_VERSION_6) as v6_addr_scope, \ self.network() as network: v4_as_id = v4_addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/24') v4_subnetpool = self._test_create_subnetpool( [subnet.cidr], name='v4-sp', min_prefixlen='24', address_scope_id=v4_as_id) v4_subnetpool_id = v4_subnetpool['subnetpool']['id'] v6_as_id = v6_addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('fd5c:6ee1:c7ae::/64') v6_subnetpool = self._test_create_subnetpool( [subnet.cidr], name='v6-sp', min_prefixlen='64', address_scope_id=v6_as_id) v6_subnetpool_id = v6_subnetpool['subnetpool']['id'] data = {'subnet': { 'network_id': network['network']['id'], 'subnetpool_id': v4_subnetpool_id, 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) self.deserialize(self.fmt, req.get_response(self.api)) data['subnet']['subnetpool_id'] = v6_subnetpool_id data['subnet']['ip_version'] = 6 req = self.new_create_request('subnets', data) self.deserialize(self.fmt, req.get_response(self.api)) result = self._show('networks', network['network']['id']) self.assertEqual( v4_as_id, result['network'][ext_address_scope.IPV4_ADDRESS_SCOPE]) self.assertEqual( v6_as_id, result['network'][ext_address_scope.IPV6_ADDRESS_SCOPE]) def test_delete_address_scope_in_use(self): with self.address_scope(name='foo-address-scope') as addr_scope: address_scope_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/24') expected = {'address_scope_id': address_scope_id} self._test_create_subnetpool([subnet.cidr], expected=expected, name='foo-subnetpool', min_prefixlen='21', address_scope_id=address_scope_id) self._delete('address-scopes', address_scope_id, expected_code=webob.exc.HTTPConflict.code) def test_add_subnetpool_address_scope_wrong_address_family(self): with self.address_scope(constants.IP_VERSION_6, name='foo-address-scope') as addr_scope: address_scope_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/24') self.assertRaises(webob.exc.HTTPClientError, self._test_create_subnetpool, [subnet.cidr], name='foo-subnetpool', min_prefixlen='21', address_scope_id=address_scope_id) def test_update_subnetpool_associate_address_scope_wrong_family(self): with self.address_scope(constants.IP_VERSION_6, name='foo-address-scope') as addr_scope: address_scope_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('2001:db8::/64') expected = {'address_scope_id': address_scope_id} initial_subnetpool = self._test_create_subnetpool( [subnet.cidr], expected=expected, name='foo-sp', min_prefixlen='64', address_scope_id=address_scope_id) with self.address_scope(name='foo-address-scope') as other_a_s: other_a_s_id = other_a_s['address_scope']['id'] update_data = {'subnetpool': {'address_scope_id': other_a_s_id}} req = self.new_update_request( 'subnetpools', update_data, initial_subnetpool['subnetpool']['id']) api = self._api_for_resource('subnetpools') res = req.get_response(api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) neutron-8.4.0/neutron/tests/unit/extensions/test_securitygroup.py0000664000567000056710000023727113044372760026747 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import mock import oslo_db.exception as exc import six import testtools import webob.exc from neutron.api.v2 import attributes as attr from neutron.common import constants as const from neutron.common import exceptions as n_exc from neutron import context from neutron.db import db_base_plugin_v2 from neutron.db import securitygroups_db from neutron.extensions import securitygroup as ext_sg from neutron import manager from neutron.tests import base from neutron.tests.unit.db import test_db_base_plugin_v2 DB_PLUGIN_KLASS = ('neutron.tests.unit.extensions.test_securitygroup.' 'SecurityGroupTestPlugin') class SecurityGroupTestExtensionManager(object): def get_resources(self): # Add the resources to the global attribute map # This is done here as the setup process won't # initialize the main API router which extends # the global attribute map attr.RESOURCE_ATTRIBUTE_MAP.update( ext_sg.RESOURCE_ATTRIBUTE_MAP) return ext_sg.Securitygroup.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] class SecurityGroupsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def _create_security_group(self, fmt, name, description, **kwargs): data = {'security_group': {'name': name, 'tenant_id': kwargs.get('tenant_id', 'test-tenant'), 'description': description}} security_group_req = self.new_create_request('security-groups', data, fmt) if (kwargs.get('set_context') and 'tenant_id' in kwargs): # create a specific auth context for this request security_group_req.environ['neutron.context'] = ( context.Context('', kwargs['tenant_id'])) return security_group_req.get_response(self.ext_api) def _build_security_group_rule(self, security_group_id, direction, proto, port_range_min=None, port_range_max=None, remote_ip_prefix=None, remote_group_id=None, tenant_id='test-tenant', ethertype=const.IPv4): data = {'security_group_rule': {'security_group_id': security_group_id, 'direction': direction, 'protocol': proto, 'ethertype': ethertype, 'tenant_id': tenant_id}} if port_range_min: data['security_group_rule']['port_range_min'] = port_range_min if port_range_max: data['security_group_rule']['port_range_max'] = port_range_max if remote_ip_prefix: data['security_group_rule']['remote_ip_prefix'] = remote_ip_prefix if remote_group_id: data['security_group_rule']['remote_group_id'] = remote_group_id return data def _create_security_group_rule(self, fmt, rules, **kwargs): security_group_rule_req = self.new_create_request( 'security-group-rules', rules, fmt) if (kwargs.get('set_context') and 'tenant_id' in kwargs): # create a specific auth context for this request security_group_rule_req.environ['neutron.context'] = ( context.Context('', kwargs['tenant_id'])) return security_group_rule_req.get_response(self.ext_api) def _make_security_group(self, fmt, name, description, **kwargs): res = self._create_security_group(fmt, name, description, **kwargs) if res.status_int >= webob.exc.HTTPBadRequest.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) def _make_security_group_rule(self, fmt, rules, **kwargs): res = self._create_security_group_rule(self.fmt, rules) if res.status_int >= webob.exc.HTTPBadRequest.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) @contextlib.contextmanager def security_group(self, name='webservers', description='webservers', fmt=None): if not fmt: fmt = self.fmt security_group = self._make_security_group(fmt, name, description) yield security_group @contextlib.contextmanager def security_group_rule(self, security_group_id='4cd70774-cc67-4a87-9b39-7' 'd1db38eb087', direction='ingress', protocol=const.PROTO_NAME_TCP, port_range_min='22', port_range_max='22', remote_ip_prefix=None, remote_group_id=None, fmt=None, ethertype=const.IPv4): if not fmt: fmt = self.fmt rule = self._build_security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix, remote_group_id, ethertype=ethertype) security_group_rule = self._make_security_group_rule(self.fmt, rule) yield security_group_rule def _delete_default_security_group_egress_rules(self, security_group_id): """Deletes default egress rules given a security group ID.""" res = self._list( 'security-group-rules', query_params='security_group_id=%s' % security_group_id) for r in res['security_group_rules']: if (r['direction'] == 'egress' and not r['port_range_max'] and not r['port_range_min'] and not r['protocol'] and not r['remote_ip_prefix']): self._delete('security-group-rules', r['id']) def _assert_sg_rule_has_kvs(self, security_group_rule, expected_kvs): """Asserts that the sg rule has expected key/value pairs passed in as expected_kvs dictionary """ for k, v in six.iteritems(expected_kvs): self.assertEqual(security_group_rule[k], v) class SecurityGroupTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, securitygroups_db.SecurityGroupDbMixin): """Test plugin that implements necessary calls on create/delete port for associating ports with security groups. """ __native_pagination_support = True __native_sorting_support = True supported_extension_aliases = ["security-group"] def create_port(self, context, port): tenant_id = port['port']['tenant_id'] default_sg = self._ensure_default_security_group(context, tenant_id) if not attr.is_attr_set(port['port'].get(ext_sg.SECURITYGROUPS)): port['port'][ext_sg.SECURITYGROUPS] = [default_sg] session = context.session with session.begin(subtransactions=True): sgids = self._get_security_groups_on_port(context, port) port = super(SecurityGroupTestPlugin, self).create_port(context, port) self._process_port_create_security_group(context, port, sgids) return port def update_port(self, context, id, port): session = context.session with session.begin(subtransactions=True): if ext_sg.SECURITYGROUPS in port['port']: port['port'][ext_sg.SECURITYGROUPS] = ( self._get_security_groups_on_port(context, port)) # delete the port binding and read it with the new rules self._delete_port_security_group_bindings(context, id) port['port']['id'] = id self._process_port_create_security_group( context, port['port'], port['port'].get(ext_sg.SECURITYGROUPS)) port = super(SecurityGroupTestPlugin, self).update_port( context, id, port) return port def create_network(self, context, network): self._ensure_default_security_group(context, network['network']['tenant_id']) return super(SecurityGroupTestPlugin, self).create_network(context, network) def get_ports(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): sorts = sorts or [] neutron_lports = super(SecurityGroupTestPlugin, self).get_ports( context, filters, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) return neutron_lports class SecurityGroupDBTestCase(SecurityGroupsTestCase): def setUp(self, plugin=None, ext_mgr=None): plugin = plugin or DB_PLUGIN_KLASS ext_mgr = ext_mgr or SecurityGroupTestExtensionManager() super(SecurityGroupDBTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) class TestSecurityGroups(SecurityGroupDBTestCase): def test_create_security_group(self): name = 'webservers' description = 'my webservers' keys = [('name', name,), ('description', description)] with self.security_group(name, description) as security_group: for k, v, in keys: self.assertEqual(security_group['security_group'][k], v) # Verify that default egress rules have been created sg_rules = security_group['security_group']['security_group_rules'] self.assertEqual(2, len(sg_rules)) v4_rules = [r for r in sg_rules if r['ethertype'] == const.IPv4] self.assertEqual(1, len(v4_rules)) v4_rule = v4_rules[0] expected = {'direction': 'egress', 'ethertype': const.IPv4, 'remote_group_id': None, 'remote_ip_prefix': None, 'protocol': None, 'port_range_max': None, 'port_range_min': None} self._assert_sg_rule_has_kvs(v4_rule, expected) v6_rules = [r for r in sg_rules if r['ethertype'] == const.IPv6] self.assertEqual(1, len(v6_rules)) v6_rule = v6_rules[0] expected = {'direction': 'egress', 'ethertype': const.IPv6, 'remote_group_id': None, 'remote_ip_prefix': None, 'protocol': None, 'port_range_max': None, 'port_range_min': None} self._assert_sg_rule_has_kvs(v6_rule, expected) def test_skip_duplicate_default_sg_error(self): num_called = [0] original_func = self.plugin.create_security_group def side_effect(context, security_group, default_sg): # can't always raise, or create_security_group will hang self.assertTrue(default_sg) self.assertTrue(num_called[0] < 2) num_called[0] += 1 ret = original_func(context, security_group, default_sg) if num_called[0] == 1: return ret # make another call to cause an exception. # NOTE(yamamoto): raising the exception by ourselves # doesn't update the session state appropriately. self.assertRaises(exc.DBDuplicateEntry, original_func, context, security_group, default_sg) with mock.patch.object(SecurityGroupTestPlugin, 'create_security_group', side_effect=side_effect): self.plugin.create_network( context.get_admin_context(), {'network': {'name': 'foo', 'admin_state_up': True, 'shared': False, 'tenant_id': 'bar'}}) def test_update_security_group(self): with self.security_group() as sg: data = {'security_group': {'name': 'new_name', 'description': 'new_desc'}} req = self.new_update_request('security-groups', data, sg['security_group']['id']) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) self.assertEqual(data['security_group']['name'], res['security_group']['name']) self.assertEqual(data['security_group']['description'], res['security_group']['description']) def test_update_security_group_name_to_default_fail(self): with self.security_group() as sg: data = {'security_group': {'name': 'default', 'description': 'new_desc'}} req = self.new_update_request('security-groups', data, sg['security_group']['id']) req.environ['neutron.context'] = context.Context('', 'somebody') res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_update_default_security_group_name_fail(self): with self.network(): res = self.new_list_request('security-groups') sg = self.deserialize(self.fmt, res.get_response(self.ext_api)) data = {'security_group': {'name': 'new_name', 'description': 'new_desc'}} req = self.new_update_request('security-groups', data, sg['security_groups'][0]['id']) req.environ['neutron.context'] = context.Context('', 'somebody') res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_update_default_security_group_with_description(self): with self.network(): res = self.new_list_request('security-groups') sg = self.deserialize(self.fmt, res.get_response(self.ext_api)) data = {'security_group': {'description': 'new_desc'}} req = self.new_update_request('security-groups', data, sg['security_groups'][0]['id']) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) self.assertEqual(data['security_group']['description'], res['security_group']['description']) def test_check_default_security_group_description(self): with self.network(): res = self.new_list_request('security-groups') sg = self.deserialize(self.fmt, res.get_response(self.ext_api)) self.assertEqual('Default security group', sg['security_groups'][0]['description']) def test_default_security_group(self): with self.network(): res = self.new_list_request('security-groups') groups = self.deserialize(self.fmt, res.get_response(self.ext_api)) self.assertEqual(1, len(groups['security_groups'])) def test_create_default_security_group_fail(self): name = 'default' description = 'my webservers' res = self._create_security_group(self.fmt, name, description) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_default_security_group_check_case_insensitive(self): name = 'DEFAULT' description = 'my webservers' res = self._create_security_group(self.fmt, name, description) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_list_security_groups(self): with self.security_group(name='sg1', description='sg') as v1,\ self.security_group(name='sg2', description='sg') as v2,\ self.security_group(name='sg3', description='sg') as v3: security_groups = (v1, v2, v3) self._test_list_resources('security-group', security_groups, query_params='description=sg') def test_list_security_groups_with_sort(self): with self.security_group(name='sg1', description='sg') as sg1,\ self.security_group(name='sg2', description='sg') as sg2,\ self.security_group(name='sg3', description='sg') as sg3: self._test_list_with_sort('security-group', (sg3, sg2, sg1), [('name', 'desc')], query_params='description=sg') def test_list_security_groups_with_pagination(self): with self.security_group(name='sg1', description='sg') as sg1,\ self.security_group(name='sg2', description='sg') as sg2,\ self.security_group(name='sg3', description='sg') as sg3: self._test_list_with_pagination('security-group', (sg1, sg2, sg3), ('name', 'asc'), 2, 2, query_params='description=sg') def test_list_security_groups_with_pagination_reverse(self): with self.security_group(name='sg1', description='sg') as sg1,\ self.security_group(name='sg2', description='sg') as sg2,\ self.security_group(name='sg3', description='sg') as sg3: self._test_list_with_pagination_reverse( 'security-group', (sg1, sg2, sg3), ('name', 'asc'), 2, 2, query_params='description=sg') def test_create_security_group_rule_ethertype_invalid_as_number(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] ethertype = 2 rule = self._build_security_group_rule( security_group_id, 'ingress', const.PROTO_NAME_TCP, '22', '22', None, None, ethertype=ethertype) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_ethertype_invalid_for_protocol(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] rule = self._build_security_group_rule( security_group_id, 'ingress', const.PROTO_NAME_IPV6_ICMP) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_invalid_ip_prefix(self): name = 'webservers' description = 'my webservers' for bad_prefix in ['bad_ip', 256, "2001:db8:a::123/129", '172.30./24']: with self.security_group(name, description) as sg: sg_id = sg['security_group']['id'] remote_ip_prefix = bad_prefix rule = self._build_security_group_rule( sg_id, 'ingress', const.PROTO_NAME_TCP, '22', '22', remote_ip_prefix) res = self._create_security_group_rule(self.fmt, rule) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_invalid_ethertype_for_prefix(self): name = 'webservers' description = 'my webservers' test_addr = {'192.168.1.1/24': 'IPv6', '2001:db8:1234::/48': 'IPv4', '192.168.2.1/24': 'BadEthertype'} for remote_ip_prefix, ethertype in six.iteritems(test_addr): with self.security_group(name, description) as sg: sg_id = sg['security_group']['id'] rule = self._build_security_group_rule( sg_id, 'ingress', const.PROTO_NAME_TCP, '22', '22', remote_ip_prefix, None, ethertype=ethertype) res = self._create_security_group_rule(self.fmt, rule) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_with_unmasked_prefix(self): name = 'webservers' description = 'my webservers' addr = {'10.1.2.3': {'mask': '32', 'ethertype': 'IPv4'}, 'fe80::2677:3ff:fe7d:4c': {'mask': '128', 'ethertype': 'IPv6'}} for ip in addr: with self.security_group(name, description) as sg: sg_id = sg['security_group']['id'] ethertype = addr[ip]['ethertype'] remote_ip_prefix = ip rule = self._build_security_group_rule( sg_id, 'ingress', const.PROTO_NAME_TCP, '22', '22', remote_ip_prefix, None, ethertype=ethertype) res = self._create_security_group_rule(self.fmt, rule) self.assertEqual(res.status_int, 201) res_sg = self.deserialize(self.fmt, res) prefix = res_sg['security_group_rule']['remote_ip_prefix'] self.assertEqual(prefix, '%s/%s' % (ip, addr[ip]['mask'])) def test_create_security_group_rule_tcp_protocol_as_number(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] protocol = const.PROTO_NUM_TCP # TCP rule = self._build_security_group_rule( security_group_id, 'ingress', protocol, '22', '22') res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) def test_create_security_group_rule_protocol_as_number(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] protocol = 2 rule = self._build_security_group_rule( security_group_id, 'ingress', protocol) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) def test_create_security_group_rule_case_insensitive(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = 'TCP' port_range_min = 22 port_range_max = 22 ethertype = 'ipV4' with self.security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix, ethertype=ethertype) as rule: # the lower case value will be return self.assertEqual(rule['security_group_rule']['protocol'], protocol.lower()) self.assertEqual(rule['security_group_rule']['ethertype'], const.IPv4) def test_get_security_group(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: remote_group_id = sg['security_group']['id'] res = self.new_show_request('security-groups', remote_group_id) security_group_id = sg['security_group']['id'] direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = const.PROTO_NAME_TCP port_range_min = 22 port_range_max = 22 keys = [('remote_ip_prefix', remote_ip_prefix), ('security_group_id', security_group_id), ('direction', direction), ('protocol', protocol), ('port_range_min', port_range_min), ('port_range_max', port_range_max)] with self.security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix): group = self.deserialize( self.fmt, res.get_response(self.ext_api)) sg_rule = group['security_group']['security_group_rules'] self.assertEqual(group['security_group']['id'], remote_group_id) self.assertEqual(3, len(sg_rule)) sg_rule = [r for r in sg_rule if r['direction'] == 'ingress'] for k, v, in keys: self.assertEqual(sg_rule[0][k], v) def test_get_security_group_on_port_from_wrong_tenant(self): plugin = manager.NeutronManager.get_plugin() if not hasattr(plugin, '_get_security_groups_on_port'): self.skipTest("plugin doesn't use the mixin with this method") neutron_context = context.get_admin_context() res = self._create_security_group(self.fmt, 'webservers', 'webservers', tenant_id='bad_tenant') sg1 = self.deserialize(self.fmt, res) with testtools.ExpectedException(ext_sg.SecurityGroupNotFound): plugin._get_security_groups_on_port( neutron_context, {'port': {'security_groups': [sg1['security_group']['id']], 'tenant_id': 'tenant'}} ) def test_delete_security_group(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: remote_group_id = sg['security_group']['id'] self._delete('security-groups', remote_group_id, webob.exc.HTTPNoContent.code) def test_delete_default_security_group_admin(self): with self.network(): res = self.new_list_request('security-groups') sg = self.deserialize(self.fmt, res.get_response(self.ext_api)) self._delete('security-groups', sg['security_groups'][0]['id'], webob.exc.HTTPNoContent.code) def test_delete_default_security_group_nonadmin(self): with self.network(): res = self.new_list_request('security-groups') sg = self.deserialize(self.fmt, res.get_response(self.ext_api)) neutron_context = context.Context('', 'test-tenant') self._delete('security-groups', sg['security_groups'][0]['id'], webob.exc.HTTPConflict.code, neutron_context=neutron_context) def test_security_group_list_creates_default_security_group(self): neutron_context = context.Context('', 'test-tenant') sg = self._list('security-groups', neutron_context=neutron_context).get('security_groups') self.assertEqual(1, len(sg)) def test_security_group_port_create_creates_default_security_group(self): res = self._create_network(self.fmt, 'net1', True, tenant_id='not_admin', set_context=True) net1 = self.deserialize(self.fmt, res) res = self._create_port(self.fmt, net1['network']['id'], tenant_id='not_admin', set_context=True) sg = self._list('security-groups').get('security_groups') self.assertEqual(1, len(sg)) def test_default_security_group_rules(self): with self.network(): res = self.new_list_request('security-groups') groups = self.deserialize(self.fmt, res.get_response(self.ext_api)) self.assertEqual(len(groups['security_groups']), 1) security_group_id = groups['security_groups'][0]['id'] res = self.new_list_request('security-group-rules') rules = self.deserialize(self.fmt, res.get_response(self.ext_api)) self.assertEqual(len(rules['security_group_rules']), 4) # Verify default rule for v4 egress sg_rules = rules['security_group_rules'] rules = [ r for r in sg_rules if r['direction'] == 'egress' and r['ethertype'] == const.IPv4 ] self.assertEqual(1, len(rules)) v4_egress = rules[0] expected = {'direction': 'egress', 'ethertype': const.IPv4, 'remote_group_id': None, 'remote_ip_prefix': None, 'protocol': None, 'port_range_max': None, 'port_range_min': None} self._assert_sg_rule_has_kvs(v4_egress, expected) # Verify default rule for v6 egress rules = [ r for r in sg_rules if r['direction'] == 'egress' and r['ethertype'] == const.IPv6 ] self.assertEqual(1, len(rules)) v6_egress = rules[0] expected = {'direction': 'egress', 'ethertype': const.IPv6, 'remote_group_id': None, 'remote_ip_prefix': None, 'protocol': None, 'port_range_max': None, 'port_range_min': None} self._assert_sg_rule_has_kvs(v6_egress, expected) # Verify default rule for v4 ingress rules = [ r for r in sg_rules if r['direction'] == 'ingress' and r['ethertype'] == const.IPv4 ] self.assertEqual(1, len(rules)) v4_ingress = rules[0] expected = {'direction': 'ingress', 'ethertype': const.IPv4, 'remote_group_id': security_group_id, 'remote_ip_prefix': None, 'protocol': None, 'port_range_max': None, 'port_range_min': None} self._assert_sg_rule_has_kvs(v4_ingress, expected) # Verify default rule for v6 ingress rules = [ r for r in sg_rules if r['direction'] == 'ingress' and r['ethertype'] == const.IPv6 ] self.assertEqual(1, len(rules)) v6_ingress = rules[0] expected = {'direction': 'ingress', 'ethertype': const.IPv6, 'remote_group_id': security_group_id, 'remote_ip_prefix': None, 'protocol': None, 'port_range_max': None, 'port_range_min': None} self._assert_sg_rule_has_kvs(v6_ingress, expected) def test_create_security_group_rule_remote_ip_prefix(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = const.PROTO_NAME_TCP port_range_min = 22 port_range_max = 22 keys = [('remote_ip_prefix', remote_ip_prefix), ('security_group_id', security_group_id), ('direction', direction), ('protocol', protocol), ('port_range_min', port_range_min), ('port_range_max', port_range_max)] with self.security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix) as rule: for k, v, in keys: self.assertEqual(rule['security_group_rule'][k], v) def test_create_security_group_rule_group_id(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: with self.security_group(name, description) as sg2: security_group_id = sg['security_group']['id'] direction = "ingress" remote_group_id = sg2['security_group']['id'] protocol = const.PROTO_NAME_TCP port_range_min = 22 port_range_max = 22 keys = [('remote_group_id', remote_group_id), ('security_group_id', security_group_id), ('direction', direction), ('protocol', protocol), ('port_range_min', port_range_min), ('port_range_max', port_range_max)] with self.security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_group_id=remote_group_id ) as rule: for k, v, in keys: self.assertEqual(rule['security_group_rule'][k], v) def test_create_security_group_rule_icmp_with_type_and_code(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = const.PROTO_NAME_ICMP # port_range_min (ICMP type) is greater than port_range_max # (ICMP code) in order to confirm min <= max port check is # not called for ICMP. port_range_min = 8 port_range_max = 5 keys = [('remote_ip_prefix', remote_ip_prefix), ('security_group_id', security_group_id), ('direction', direction), ('protocol', protocol), ('port_range_min', port_range_min), ('port_range_max', port_range_max)] with self.security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix) as rule: for k, v, in keys: self.assertEqual(rule['security_group_rule'][k], v) def test_create_security_group_rule_icmp_with_type_only(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = const.PROTO_NAME_ICMP # ICMP type port_range_min = 8 # ICMP code port_range_max = None keys = [('remote_ip_prefix', remote_ip_prefix), ('security_group_id', security_group_id), ('direction', direction), ('protocol', protocol), ('port_range_min', port_range_min), ('port_range_max', port_range_max)] with self.security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix) as rule: for k, v, in keys: self.assertEqual(rule['security_group_rule'][k], v) def test_create_security_group_rule_icmpv6_with_type_only(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] direction = "ingress" ethertype = const.IPv6 remote_ip_prefix = "2001::f401:56ff:fefe:d3dc/128" protocol = const.PROTO_NAME_IPV6_ICMP # ICMPV6 type port_range_min = const.ICMPV6_TYPE_RA # ICMPV6 code port_range_max = None keys = [('remote_ip_prefix', remote_ip_prefix), ('security_group_id', security_group_id), ('direction', direction), ('ethertype', ethertype), ('protocol', protocol), ('port_range_min', port_range_min), ('port_range_max', port_range_max)] with self.security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix, None, None, ethertype) as rule: for k, v, in keys: self.assertEqual(rule['security_group_rule'][k], v) def test_create_security_group_rule_icmpv6_legacy_protocol_name(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] direction = "ingress" ethertype = const.IPv6 remote_ip_prefix = "2001::f401:56ff:fefe:d3dc/128" protocol = const.PROTO_NAME_IPV6_ICMP_LEGACY keys = [('remote_ip_prefix', remote_ip_prefix), ('security_group_id', security_group_id), ('direction', direction), ('ethertype', ethertype), ('protocol', protocol)] with self.security_group_rule(security_group_id, direction, protocol, None, None, remote_ip_prefix, None, None, ethertype) as rule: for k, v, in keys: self.assertEqual(rule['security_group_rule'][k], v) def test_create_security_group_source_group_ip_and_ip_prefix(self): security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087" direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = const.PROTO_NAME_TCP port_range_min = 22 port_range_max = 22 remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087" rule = self._build_security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix, remote_group_id) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_bad_security_group_id(self): security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087" direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = const.PROTO_NAME_TCP port_range_min = 22 port_range_max = 22 rule = self._build_security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_create_security_group_rule_bad_tenant(self): with self.security_group() as sg: rule = {'security_group_rule': {'security_group_id': sg['security_group']['id'], 'direction': 'ingress', 'protocol': const.PROTO_NAME_TCP, 'port_range_min': '22', 'port_range_max': '22', 'tenant_id': "bad_tenant"}} res = self._create_security_group_rule(self.fmt, rule, tenant_id='bad_tenant', set_context=True) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_create_security_group_rule_bad_tenant_remote_group_id(self): with self.security_group() as sg: res = self._create_security_group(self.fmt, 'webservers', 'webservers', tenant_id='bad_tenant') sg2 = self.deserialize(self.fmt, res) rule = {'security_group_rule': {'security_group_id': sg2['security_group']['id'], 'direction': 'ingress', 'protocol': const.PROTO_NAME_TCP, 'port_range_min': '22', 'port_range_max': '22', 'tenant_id': 'bad_tenant', 'remote_group_id': sg['security_group']['id']}} res = self._create_security_group_rule(self.fmt, rule, tenant_id='bad_tenant', set_context=True) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_create_security_group_rule_bad_tenant_security_group_rule(self): with self.security_group() as sg: res = self._create_security_group(self.fmt, 'webservers', 'webservers', tenant_id='bad_tenant') self.deserialize(self.fmt, res) rule = {'security_group_rule': {'security_group_id': sg['security_group']['id'], 'direction': 'ingress', 'protocol': const.PROTO_NAME_TCP, 'port_range_min': '22', 'port_range_max': '22', 'tenant_id': 'bad_tenant'}} res = self._create_security_group_rule(self.fmt, rule, tenant_id='bad_tenant', set_context=True) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_create_security_group_rule_bad_remote_group_id(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] remote_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087" direction = "ingress" protocol = const.PROTO_NAME_TCP port_range_min = 22 port_range_max = 22 rule = self._build_security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_group_id=remote_group_id) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_create_security_group_rule_duplicate_rules(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id) as sgr: rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '22', '22') res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) self.assertIn(sgr['security_group_rule']['id'], res.json['NeutronError']['message']) def test_create_security_group_rule_duplicate_rules_proto_name_num(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id): rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '22', '22') self._create_security_group_rule(self.fmt, rule) rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NUM_TCP, '22', '22') res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_security_group_rule_duplicate_rules_proto_num_name(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id): rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NUM_UDP, '50', '100') self._create_security_group_rule(self.fmt, rule) rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_UDP, '50', '100') res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_security_group_rule_min_port_greater_max(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id): for protocol in [const.PROTO_NAME_TCP, const.PROTO_NAME_UDP, const.PROTO_NUM_TCP, const.PROTO_NUM_UDP]: rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', protocol, '50', '22') res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_ports_but_no_protocol(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id): rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', None, '22', '22') res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_port_range_min_only(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id): rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '22', None) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_port_range_max_only(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id): rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, None, '22') res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_icmp_type_too_big(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id): rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_ICMP, '256', None) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_icmp_code_too_big(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id): rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_ICMP, '8', '256') res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_icmp_with_code_only(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id): for code in ['2', '0']: rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_ICMP, None, code) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_list_ports_security_group(self): with self.network() as n: with self.subnet(n): self._create_port(self.fmt, n['network']['id']) req = self.new_list_request('ports') res = req.get_response(self.api) ports = self.deserialize(self.fmt, res) port = ports['ports'][0] self.assertEqual(len(port[ext_sg.SECURITYGROUPS]), 1) self._delete('ports', port['id']) def test_list_security_group_rules(self): with self.security_group(name='sg') as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id, direction='egress', port_range_min=22, port_range_max=22) as sgr1,\ self.security_group_rule(security_group_id, direction='egress', port_range_min=23, port_range_max=23) as sgr2,\ self.security_group_rule(security_group_id, direction='egress', port_range_min=24, port_range_max=24) as sgr3: # Delete default rules as they would fail the following # assertion at the end. self._delete_default_security_group_egress_rules( security_group_id) q = 'direction=egress&security_group_id=' + security_group_id self._test_list_resources('security-group-rule', [sgr1, sgr2, sgr3], query_params=q) def test_list_security_group_rules_with_sort(self): with self.security_group(name='sg') as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id, direction='egress', port_range_min=22, port_range_max=22) as sgr1,\ self.security_group_rule(security_group_id, direction='egress', port_range_min=23, port_range_max=23) as sgr2,\ self.security_group_rule(security_group_id, direction='egress', port_range_min=24, port_range_max=24) as sgr3: # Delete default rules as they would fail the following # assertion at the end. self._delete_default_security_group_egress_rules( security_group_id) q = 'direction=egress&security_group_id=' + security_group_id self._test_list_with_sort('security-group-rule', (sgr3, sgr2, sgr1), [('port_range_max', 'desc')], query_params=q) def test_list_security_group_rules_with_pagination(self): with self.security_group(name='sg') as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id, direction='egress', port_range_min=22, port_range_max=22) as sgr1,\ self.security_group_rule(security_group_id, direction='egress', port_range_min=23, port_range_max=23) as sgr2,\ self.security_group_rule(security_group_id, direction='egress', port_range_min=24, port_range_max=24) as sgr3: # Delete default rules as they would fail the following # assertion at the end. self._delete_default_security_group_egress_rules( security_group_id) q = 'direction=egress&security_group_id=' + security_group_id self._test_list_with_pagination( 'security-group-rule', (sgr3, sgr2, sgr1), ('port_range_max', 'desc'), 2, 2, query_params=q) def test_list_security_group_rules_with_pagination_reverse(self): with self.security_group(name='sg') as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id, direction='egress', port_range_min=22, port_range_max=22) as sgr1,\ self.security_group_rule(security_group_id, direction='egress', port_range_min=23, port_range_max=23) as sgr2,\ self.security_group_rule(security_group_id, direction='egress', port_range_min=24, port_range_max=24) as sgr3: self._test_list_with_pagination_reverse( 'security-group-rule', (sgr3, sgr2, sgr1), ('port_range_max', 'desc'), 2, 2, query_params='direction=egress') def test_create_port_with_multiple_security_groups(self): with self.network() as n: with self.subnet(n): with self.security_group() as sg1: with self.security_group() as sg2: res = self._create_port( self.fmt, n['network']['id'], security_groups=[sg1['security_group']['id'], sg2['security_group']['id']]) port = self.deserialize(self.fmt, res) self.assertEqual(2, len( port['port'][ext_sg.SECURITYGROUPS])) self._delete('ports', port['port']['id']) def test_create_port_with_no_security_groups(self): with self.network() as n: with self.subnet(n): res = self._create_port(self.fmt, n['network']['id'], security_groups=[]) port = self.deserialize(self.fmt, res) self.assertEqual([], port['port'][ext_sg.SECURITYGROUPS]) def test_update_port_with_security_group(self): with self.network() as n: with self.subnet(n): with self.security_group() as sg: res = self._create_port(self.fmt, n['network']['id']) port = self.deserialize(self.fmt, res) data = {'port': {'fixed_ips': port['port']['fixed_ips'], 'name': port['port']['name'], ext_sg.SECURITYGROUPS: [sg['security_group']['id']]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0], sg['security_group']['id']) # Test update port without security group data = {'port': {'fixed_ips': port['port']['fixed_ips'], 'name': port['port']['name']}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0], sg['security_group']['id']) self._delete('ports', port['port']['id']) def test_update_port_with_multiple_security_groups(self): with self.network() as n: with self.subnet(n) as s: with self.port(s) as port: with self.security_group() as sg1: with self.security_group() as sg2: data = {'port': {ext_sg.SECURITYGROUPS: [sg1['security_group']['id'], sg2['security_group']['id']]}} req = self.new_update_request( 'ports', data, port['port']['id']) port = self.deserialize( self.fmt, req.get_response(self.api)) self.assertEqual( 2, len(port['port'][ext_sg.SECURITYGROUPS])) def test_update_port_remove_security_group_empty_list(self): with self.network() as n: with self.subnet(n): with self.security_group() as sg: res = self._create_port(self.fmt, n['network']['id'], security_groups=( [sg['security_group']['id']])) port = self.deserialize(self.fmt, res) data = {'port': {'fixed_ips': port['port']['fixed_ips'], 'name': port['port']['name'], 'security_groups': []}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual([], res['port'].get(ext_sg.SECURITYGROUPS)) self._delete('ports', port['port']['id']) def test_update_port_remove_security_group_none(self): with self.network() as n: with self.subnet(n): with self.security_group() as sg: res = self._create_port(self.fmt, n['network']['id'], security_groups=( [sg['security_group']['id']])) port = self.deserialize(self.fmt, res) data = {'port': {'fixed_ips': port['port']['fixed_ips'], 'name': port['port']['name'], 'security_groups': None}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual([], res['port'].get(ext_sg.SECURITYGROUPS)) self._delete('ports', port['port']['id']) def test_create_port_with_bad_security_group(self): with self.network() as n: with self.subnet(n): res = self._create_port(self.fmt, n['network']['id'], security_groups=['bad_id']) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_delete_security_group_port_in_use(self): with self.network() as n: with self.subnet(n): with self.security_group() as sg: res = self._create_port(self.fmt, n['network']['id'], security_groups=( [sg['security_group']['id']])) port = self.deserialize(self.fmt, res) self.assertEqual(port['port'][ext_sg.SECURITYGROUPS][0], sg['security_group']['id']) # try to delete security group that's in use res = self._delete('security-groups', sg['security_group']['id'], webob.exc.HTTPConflict.code) # delete the blocking port self._delete('ports', port['port']['id']) def test_create_security_group_rule_bulk_native(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk " "security_group_rule create") with self.security_group() as sg: rule1 = self._build_security_group_rule(sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24') rule2 = self._build_security_group_rule(sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '23', '23', '10.0.0.1/24') rules = {'security_group_rules': [rule1['security_group_rule'], rule2['security_group_rule']]} res = self._create_security_group_rule(self.fmt, rules) ret = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) self.assertEqual(2, len(ret['security_group_rules'])) def test_create_security_group_rule_bulk_emulated(self): real_has_attr = hasattr #ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('six.moves.builtins.hasattr', new=fakehasattr): with self.security_group() as sg: rule1 = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24') rule2 = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '23', '23', '10.0.0.1/24') rules = {'security_group_rules': [rule1['security_group_rule'], rule2['security_group_rule']] } res = self._create_security_group_rule(self.fmt, rules) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) def test_create_security_group_rule_allow_all_ipv4(self): with self.security_group() as sg: rule = {'security_group_id': sg['security_group']['id'], 'direction': 'ingress', 'ethertype': const.IPv4, 'tenant_id': 'test-tenant'} res = self._create_security_group_rule( self.fmt, {'security_group_rule': rule}) rule = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) def test_create_security_group_rule_allow_all_ipv4_v6_bulk(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk " "security_group_rule create") with self.security_group() as sg: rule_v4 = {'security_group_id': sg['security_group']['id'], 'direction': 'ingress', 'ethertype': const.IPv4, 'tenant_id': 'test-tenant'} rule_v6 = {'security_group_id': sg['security_group']['id'], 'direction': 'ingress', 'ethertype': const.IPv6, 'tenant_id': 'test-tenant'} rules = {'security_group_rules': [rule_v4, rule_v6]} res = self._create_security_group_rule(self.fmt, rules) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) def test_create_security_group_rule_duplicate_rule_in_post(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk " "security_group_rule create") with self.security_group() as sg: rule = self._build_security_group_rule(sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24') rules = {'security_group_rules': [rule['security_group_rule'], rule['security_group_rule']]} res = self._create_security_group_rule(self.fmt, rules) rule = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_security_group_rule_duplicate_rule_in_post_emulated(self): real_has_attr = hasattr #ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('six.moves.builtins.hasattr', new=fakehasattr): with self.security_group() as sg: rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24') rules = {'security_group_rules': [rule['security_group_rule'], rule['security_group_rule']]} res = self._create_security_group_rule(self.fmt, rules) rule = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_security_group_rule_duplicate_rule_db(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk " "security_group_rule create") with self.security_group() as sg: rule = self._build_security_group_rule(sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24') rules = {'security_group_rules': [rule]} self._create_security_group_rule(self.fmt, rules) res = self._create_security_group_rule(self.fmt, rules) rule = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_security_group_rule_duplicate_rule_db_emulated(self): real_has_attr = hasattr #ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('six.moves.builtins.hasattr', new=fakehasattr): with self.security_group() as sg: rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24') rules = {'security_group_rules': [rule]} self._create_security_group_rule(self.fmt, rules) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_security_group_rule_different_security_group_ids(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk " "security_group_rule create") with self.security_group() as sg1: with self.security_group() as sg2: rule1 = self._build_security_group_rule( sg1['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24') rule2 = self._build_security_group_rule( sg2['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '23', '23', '10.0.0.1/24') rules = {'security_group_rules': [rule1['security_group_rule'], rule2['security_group_rule']] } res = self._create_security_group_rule(self.fmt, rules) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_with_invalid_ethertype(self): security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087" direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = const.PROTO_NAME_TCP port_range_min = 22 port_range_max = 22 remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087" rule = self._build_security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix, remote_group_id, ethertype='IPv5') res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_with_invalid_protocol(self): security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087" direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = 'tcp/ip' port_range_min = 22 port_range_max = 22 remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087" rule = self._build_security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix, remote_group_id) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_with_invalid_tcp_or_udp_protocol(self): security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087" direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = 'tcp' port_range_min = 0 port_range_max = 80 remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087" rule = self._build_security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix, remote_group_id) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_port_with_non_uuid(self): with self.network() as n: with self.subnet(n): res = self._create_port(self.fmt, n['network']['id'], security_groups=['not_valid']) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_with_specific_id(self): neutron_context = context.Context('', 'test-tenant') specified_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087" with self.security_group() as sg: rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NUM_TCP) rule['security_group_rule'].update({'id': specified_id, 'port_range_min': None, 'port_range_max': None, 'remote_ip_prefix': None, 'remote_group_id': None}) result = self.plugin.create_security_group_rule( neutron_context, rule) self.assertEqual(specified_id, result['id']) class TestConvertIPPrefixToCIDR(base.BaseTestCase): def test_convert_bad_ip_prefix_to_cidr(self): for val in ['bad_ip', 256, "2001:db8:a::123/129"]: self.assertRaises(n_exc.InvalidCIDR, ext_sg.convert_ip_prefix_to_cidr, val) self.assertIsNone(ext_sg.convert_ip_prefix_to_cidr(None)) def test_convert_ip_prefix_no_netmask_to_cidr(self): addr = {'10.1.2.3': '32', 'fe80::2677:3ff:fe7d:4c': '128'} for k, v in six.iteritems(addr): self.assertEqual(ext_sg.convert_ip_prefix_to_cidr(k), '%s/%s' % (k, v)) def test_convert_ip_prefix_with_netmask_to_cidr(self): addresses = ['10.1.0.0/16', '10.1.2.3/32', '2001:db8:1234::/48'] for addr in addresses: self.assertEqual(addr, ext_sg.convert_ip_prefix_to_cidr(addr)) class TestConvertProtocol(base.BaseTestCase): def test_convert_numeric_protocol(self): self.assertIsInstance(ext_sg.convert_protocol('2'), str) def test_convert_bad_protocol(self): for val in ['bad', '256', '-1']: self.assertRaises(ext_sg.SecurityGroupRuleInvalidProtocol, ext_sg.convert_protocol, val) def test_convert_numeric_protocol_to_string(self): self.assertIsInstance(ext_sg.convert_protocol(2), str) class TestConvertEtherType(base.BaseTestCase): def test_convert_unsupported_ethertype(self): for val in ['ip', 'ip4', 'ip6', '']: self.assertRaises(ext_sg.SecurityGroupRuleInvalidEtherType, ext_sg.convert_ethertype_to_case_insensitive, val) neutron-8.4.0/neutron/tests/unit/extensions/test_flavors.py0000664000567000056710000007457013044372760025500 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy import fixtures import mock from oslo_config import cfg from oslo_utils import uuidutils from webob import exc from neutron.api.v2 import attributes as attr from neutron import context from neutron.db import api as dbapi from neutron.db import flavors_db from neutron.db import servicetype_db from neutron.extensions import flavors from neutron.plugins.common import constants from neutron.services.flavors import flavors_plugin from neutron.services import provider_configuration as provconf from neutron.tests import base from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.extensions import base as extension _uuid = uuidutils.generate_uuid _get_path = test_base._get_path _driver = ('neutron.tests.unit.extensions.test_flavors.' 'DummyServiceDriver') _provider = 'dummy' _long_name = 'x' * (attr.NAME_MAX_LEN + 1) _long_description = 'x' * (attr.LONG_DESCRIPTION_MAX_LEN + 1) class FlavorExtensionTestCase(extension.ExtensionTestCase): def setUp(self): super(FlavorExtensionTestCase, self).setUp() self._setUpExtension( 'neutron.services.flavors.flavors_plugin.FlavorsPlugin', constants.FLAVORS, flavors.RESOURCE_ATTRIBUTE_MAP, flavors.Flavors, '', supported_extension_aliases='flavors') def test_create_flavor(self): tenant_id = uuidutils.generate_uuid() # Use service_type FLAVORS since plugin must be loaded to validate data = {'flavor': {'name': 'GOLD', 'service_type': constants.FLAVORS, 'description': 'the best flavor', 'tenant_id': tenant_id, 'enabled': True}} expected = copy.deepcopy(data) expected['flavor']['service_profiles'] = [] instance = self.plugin.return_value instance.create_flavor.return_value = expected['flavor'] res = self.api.post(_get_path('flavors', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_flavor.assert_called_with(mock.ANY, flavor=expected) res = self.deserialize(res) self.assertIn('flavor', res) self.assertEqual(expected, res) def test_create_flavor_invalid_service_type(self): tenant_id = uuidutils.generate_uuid() data = {'flavor': {'name': 'GOLD', 'service_type': 'BROKEN', 'description': 'the best flavor', 'tenant_id': tenant_id, 'enabled': True}} self.api.post(_get_path('flavors', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_create_flavor_too_long_name(self): tenant_id = uuidutils.generate_uuid() data = {'flavor': {'name': _long_name, 'service_type': constants.FLAVORS, 'description': 'the best flavor', 'tenant_id': tenant_id, 'enabled': True}} self.api.post(_get_path('flavors', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_create_flavor_too_long_description(self): tenant_id = uuidutils.generate_uuid() data = {'flavor': {'name': _long_name, 'service_type': constants.FLAVORS, 'description': _long_description, 'tenant_id': tenant_id, 'enabled': True}} self.api.post(_get_path('flavors', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_create_flavor_invalid_enabled(self): tenant_id = uuidutils.generate_uuid() data = {'flavor': {'name': _long_name, 'service_type': constants.FLAVORS, 'description': 'the best flavor', 'tenant_id': tenant_id, 'enabled': 'BROKEN'}} self.api.post(_get_path('flavors', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_update_flavor(self): flavor_id = 'fake_id' data = {'flavor': {'name': 'GOLD', 'description': 'the best flavor', 'enabled': True}} expected = copy.copy(data) expected['flavor']['service_profiles'] = [] instance = self.plugin.return_value instance.update_flavor.return_value = expected['flavor'] res = self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.update_flavor.assert_called_with(mock.ANY, flavor_id, flavor=expected) res = self.deserialize(res) self.assertIn('flavor', res) self.assertEqual(expected, res) def test_update_flavor_too_long_name(self): flavor_id = 'fake_id' data = {'flavor': {'name': _long_name, 'description': 'the best flavor', 'enabled': True}} self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_update_flavor_too_long_description(self): flavor_id = 'fake_id' data = {'flavor': {'name': 'GOLD', 'description': _long_description, 'enabled': True}} self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_update_flavor_invalid_enabled(self): flavor_id = 'fake_id' data = {'flavor': {'name': 'GOLD', 'description': _long_description, 'enabled': 'BROKEN'}} self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_delete_flavor(self): flavor_id = 'fake_id' instance = self.plugin.return_value self.api.delete(_get_path('flavors', id=flavor_id, fmt=self.fmt), content_type='application/%s' % self.fmt) instance.delete_flavor.assert_called_with(mock.ANY, flavor_id) def test_show_flavor(self): flavor_id = 'fake_id' expected = {'flavor': {'id': flavor_id, 'name': 'GOLD', 'description': 'the best flavor', 'enabled': True, 'service_profiles': ['profile-1']}} instance = self.plugin.return_value instance.get_flavor.return_value = expected['flavor'] res = self.api.get(_get_path('flavors', id=flavor_id, fmt=self.fmt)) instance.get_flavor.assert_called_with(mock.ANY, flavor_id, fields=mock.ANY) res = self.deserialize(res) self.assertEqual(expected, res) def test_get_flavors(self): data = {'flavors': [{'id': 'id1', 'name': 'GOLD', 'description': 'the best flavor', 'enabled': True, 'service_profiles': ['profile-1']}, {'id': 'id2', 'name': 'GOLD', 'description': 'the best flavor', 'enabled': True, 'service_profiles': ['profile-2', 'profile-1']}]} instance = self.plugin.return_value instance.get_flavors.return_value = data['flavors'] res = self.api.get(_get_path('flavors', fmt=self.fmt)) instance.get_flavors.assert_called_with(mock.ANY, fields=mock.ANY, filters=mock.ANY) res = self.deserialize(res) self.assertEqual(data, res) def test_create_service_profile(self): tenant_id = uuidutils.generate_uuid() expected = {'service_profile': {'description': 'the best sp', 'driver': '', 'tenant_id': tenant_id, 'enabled': True, 'metainfo': '{"data": "value"}'}} instance = self.plugin.return_value instance.create_service_profile.return_value = ( expected['service_profile']) res = self.api.post(_get_path('service_profiles', fmt=self.fmt), self.serialize(expected), content_type='application/%s' % self.fmt) instance.create_service_profile.assert_called_with( mock.ANY, service_profile=expected) res = self.deserialize(res) self.assertIn('service_profile', res) self.assertEqual(expected, res) def test_create_service_profile_too_long_description(self): tenant_id = uuidutils.generate_uuid() expected = {'service_profile': {'description': _long_description, 'driver': '', 'tenant_id': tenant_id, 'enabled': True, 'metainfo': '{"data": "value"}'}} self.api.post(_get_path('service_profiles', fmt=self.fmt), self.serialize(expected), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_create_service_profile_too_long_driver(self): tenant_id = uuidutils.generate_uuid() expected = {'service_profile': {'description': 'the best sp', 'driver': _long_description, 'tenant_id': tenant_id, 'enabled': True, 'metainfo': '{"data": "value"}'}} self.api.post(_get_path('service_profiles', fmt=self.fmt), self.serialize(expected), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_create_service_profile_invalid_enabled(self): tenant_id = uuidutils.generate_uuid() expected = {'service_profile': {'description': 'the best sp', 'driver': '', 'tenant_id': tenant_id, 'enabled': 'BROKEN', 'metainfo': '{"data": "value"}'}} self.api.post(_get_path('service_profiles', fmt=self.fmt), self.serialize(expected), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_update_service_profile(self): sp_id = "fake_id" expected = {'service_profile': {'description': 'the best sp', 'enabled': False, 'metainfo': '{"data1": "value3"}'}} instance = self.plugin.return_value instance.update_service_profile.return_value = ( expected['service_profile']) res = self.api.put(_get_path('service_profiles', id=sp_id, fmt=self.fmt), self.serialize(expected), content_type='application/%s' % self.fmt) instance.update_service_profile.assert_called_with( mock.ANY, sp_id, service_profile=expected) res = self.deserialize(res) self.assertIn('service_profile', res) self.assertEqual(expected, res) def test_update_service_profile_too_long_description(self): sp_id = "fake_id" expected = {'service_profile': {'description': 'the best sp', 'enabled': 'BROKEN', 'metainfo': '{"data1": "value3"}'}} self.api.put(_get_path('service_profiles', id=sp_id, fmt=self.fmt), self.serialize(expected), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_update_service_profile_invalid_enabled(self): sp_id = "fake_id" expected = {'service_profile': {'description': 'the best sp', 'enabled': 'BROKEN', 'metainfo': '{"data1": "value3"}'}} self.api.put(_get_path('service_profiles', id=sp_id, fmt=self.fmt), self.serialize(expected), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_delete_service_profile(self): sp_id = 'fake_id' instance = self.plugin.return_value self.api.delete(_get_path('service_profiles', id=sp_id, fmt=self.fmt), content_type='application/%s' % self.fmt) instance.delete_service_profile.assert_called_with(mock.ANY, sp_id) def test_show_service_profile(self): sp_id = 'fake_id' expected = {'service_profile': {'id': 'id1', 'driver': _driver, 'description': 'desc', 'metainfo': '{}', 'enabled': True}} instance = self.plugin.return_value instance.get_service_profile.return_value = ( expected['service_profile']) res = self.api.get(_get_path('service_profiles', id=sp_id, fmt=self.fmt)) instance.get_service_profile.assert_called_with(mock.ANY, sp_id, fields=mock.ANY) res = self.deserialize(res) self.assertEqual(expected, res) def test_get_service_profiles(self): expected = {'service_profiles': [{'id': 'id1', 'driver': _driver, 'description': 'desc', 'metainfo': '{}', 'enabled': True}, {'id': 'id2', 'driver': _driver, 'description': 'desc', 'metainfo': '{}', 'enabled': True}]} instance = self.plugin.return_value instance.get_service_profiles.return_value = ( expected['service_profiles']) res = self.api.get(_get_path('service_profiles', fmt=self.fmt)) instance.get_service_profiles.assert_called_with(mock.ANY, fields=mock.ANY, filters=mock.ANY) res = self.deserialize(res) self.assertEqual(expected, res) def test_associate_service_profile_with_flavor(self): tenant_id = uuidutils.generate_uuid() expected = {'service_profile': {'id': _uuid(), 'tenant_id': tenant_id}} instance = self.plugin.return_value instance.create_flavor_service_profile.return_value = ( expected['service_profile']) res = self.api.post('/flavors/fl_id/service_profiles', self.serialize(expected), content_type='application/%s' % self.fmt) instance.create_flavor_service_profile.assert_called_with( mock.ANY, service_profile=expected, flavor_id='fl_id') res = self.deserialize(res) self.assertEqual(expected, res) def test_disassociate_service_profile_with_flavor(self): instance = self.plugin.return_value instance.delete_flavor_service_profile.return_value = None self.api.delete('/flavors/fl_id/service_profiles/%s' % 'fake_spid', content_type='application/%s' % self.fmt) instance.delete_flavor_service_profile.assert_called_with( mock.ANY, 'fake_spid', flavor_id='fl_id') def test_update_association_error(self): """Confirm that update is not permitted with user error.""" new_id = uuidutils.generate_uuid() data = {'service_profile': {'id': new_id}} self.api.put('/flavors/fl_id/service_profiles/%s' % 'fake_spid', self.serialize(data), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) class DummyCorePlugin(object): pass class DummyServicePlugin(object): def driver_loaded(self, driver, service_profile): pass def get_plugin_type(self): return constants.DUMMY def get_plugin_description(self): return "Dummy service plugin, aware of flavors" class DummyServiceDriver(object): @staticmethod def get_service_type(): return constants.DUMMY def __init__(self, plugin): pass class FlavorPluginTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase, base.PluginFixture): def setUp(self): super(FlavorPluginTestCase, self).setUp() self.config_parse() cfg.CONF.set_override( 'core_plugin', 'neutron.tests.unit.extensions.test_flavors.DummyCorePlugin') cfg.CONF.set_override( 'service_plugins', ['neutron.tests.unit.extensions.test_flavors.DummyServicePlugin']) self.useFixture( fixtures.MonkeyPatch('neutron.manager.NeutronManager._instance')) self.plugin = flavors_plugin.FlavorsPlugin() self.ctx = context.get_admin_context() providers = [DummyServiceDriver.get_service_type() + ":" + _provider + ":" + _driver] self.service_manager = servicetype_db.ServiceTypeManager.get_instance() self.service_providers = mock.patch.object( provconf.NeutronModule, 'service_providers').start() self.service_providers.return_value = providers for provider in providers: self.service_manager.add_provider_configuration( provider.split(':')[0], provconf.ProviderConfiguration()) dbapi.get_engine() def _create_flavor(self, description=None): flavor = {'flavor': {'name': 'GOLD', 'service_type': constants.DUMMY, 'description': description or 'the best flavor', 'enabled': True}} return self.plugin.create_flavor(self.ctx, flavor), flavor def test_create_flavor(self): self._create_flavor() res = self.ctx.session.query(flavors_db.Flavor).all() self.assertEqual(1, len(res)) self.assertEqual('GOLD', res[0]['name']) self.assertEqual(constants.DUMMY, res[0]['service_type']) def test_update_flavor(self): fl, flavor = self._create_flavor() flavor = {'flavor': {'name': 'Silver', 'enabled': False}} self.plugin.update_flavor(self.ctx, fl['id'], flavor) res = (self.ctx.session.query(flavors_db.Flavor). filter_by(id=fl['id']).one()) self.assertEqual('Silver', res['name']) self.assertFalse(res['enabled']) def test_delete_flavor(self): fl, data = self._create_flavor() self.plugin.delete_flavor(self.ctx, fl['id']) res = (self.ctx.session.query(flavors_db.Flavor).all()) self.assertFalse(res) def test_show_flavor(self): fl, data = self._create_flavor() show_fl = self.plugin.get_flavor(self.ctx, fl['id']) self.assertEqual(fl, show_fl) def test_get_flavors(self): fl, flavor = self._create_flavor() flavor['flavor']['name'] = 'SILVER' self.plugin.create_flavor(self.ctx, flavor) show_fl = self.plugin.get_flavors(self.ctx) self.assertEqual(2, len(show_fl)) def _create_service_profile(self, description=None): data = {'service_profile': {'description': description or 'the best sp', 'driver': _driver, 'enabled': True, 'metainfo': '{"data": "value"}'}} sp = self.plugin.create_service_profile(self.ctx, data) return sp, data def test_create_service_profile(self): sp, data = self._create_service_profile() res = (self.ctx.session.query(flavors_db.ServiceProfile). filter_by(id=sp['id']).one()) self.assertEqual(data['service_profile']['driver'], res['driver']) self.assertEqual(data['service_profile']['metainfo'], res['metainfo']) def test_create_service_profile_empty_driver(self): data = {'service_profile': {'description': 'the best sp', 'driver': '', 'enabled': True, 'metainfo': '{"data": "value"}'}} sp = self.plugin.create_service_profile(self.ctx, data) res = (self.ctx.session.query(flavors_db.ServiceProfile). filter_by(id=sp['id']).one()) self.assertEqual(data['service_profile']['driver'], res['driver']) self.assertEqual(data['service_profile']['metainfo'], res['metainfo']) def test_create_service_profile_invalid_driver(self): data = {'service_profile': {'description': 'the best sp', 'driver': "Broken", 'enabled': True, 'metainfo': '{"data": "value"}'}} self.assertRaises(flavors.ServiceProfileDriverNotFound, self.plugin.create_service_profile, self.ctx, data) def test_create_service_profile_invalid_empty(self): data = {'service_profile': {'description': '', 'driver': '', 'enabled': True, 'metainfo': ''}} self.assertRaises(flavors.ServiceProfileEmpty, self.plugin.create_service_profile, self.ctx, data) def test_update_service_profile(self): sp, data = self._create_service_profile() data['service_profile']['metainfo'] = '{"data": "value1"}' sp = self.plugin.update_service_profile(self.ctx, sp['id'], data) res = (self.ctx.session.query(flavors_db.ServiceProfile). filter_by(id=sp['id']).one()) self.assertEqual(data['service_profile']['metainfo'], res['metainfo']) def test_delete_service_profile(self): sp, data = self._create_service_profile() self.plugin.delete_service_profile(self.ctx, sp['id']) res = self.ctx.session.query(flavors_db.ServiceProfile).all() self.assertFalse(res) def test_show_service_profile(self): sp, data = self._create_service_profile() sp_show = self.plugin.get_service_profile(self.ctx, sp['id']) self.assertEqual(sp, sp_show) def test_get_service_profiles(self): self._create_service_profile() self._create_service_profile(description='another sp') self.assertEqual(2, len(self.plugin.get_service_profiles(self.ctx))) def test_associate_service_profile_with_flavor(self): sp, data = self._create_service_profile() fl, data = self._create_flavor() self.plugin.create_flavor_service_profile( self.ctx, {'service_profile': {'id': sp['id']}}, fl['id']) binding = ( self.ctx.session.query(flavors_db.FlavorServiceProfileBinding). first()) self.assertEqual(fl['id'], binding['flavor_id']) self.assertEqual(sp['id'], binding['service_profile_id']) res = self.plugin.get_flavor(self.ctx, fl['id']) self.assertEqual(1, len(res['service_profiles'])) self.assertEqual(sp['id'], res['service_profiles'][0]) res = self.plugin.get_service_profile(self.ctx, sp['id']) self.assertEqual(1, len(res['flavors'])) self.assertEqual(fl['id'], res['flavors'][0]) def test_autodelete_flavor_associations(self): sp, data = self._create_service_profile() fl, data = self._create_flavor() self.plugin.create_flavor_service_profile( self.ctx, {'service_profile': {'id': sp['id']}}, fl['id']) self.plugin.delete_flavor(self.ctx, fl['id']) binding = ( self.ctx.session.query(flavors_db.FlavorServiceProfileBinding). first()) self.assertIsNone(binding) def test_associate_service_profile_with_flavor_exists(self): sp, data = self._create_service_profile() fl, data = self._create_flavor() self.plugin.create_flavor_service_profile( self.ctx, {'service_profile': {'id': sp['id']}}, fl['id']) self.assertRaises(flavors.FlavorServiceProfileBindingExists, self.plugin.create_flavor_service_profile, self.ctx, {'service_profile': {'id': sp['id']}}, fl['id']) def test_disassociate_service_profile_with_flavor(self): sp, data = self._create_service_profile() fl, data = self._create_flavor() self.plugin.create_flavor_service_profile( self.ctx, {'service_profile': {'id': sp['id']}}, fl['id']) self.plugin.delete_flavor_service_profile( self.ctx, sp['id'], fl['id']) binding = ( self.ctx.session.query(flavors_db.FlavorServiceProfileBinding). first()) self.assertIsNone(binding) self.assertRaises( flavors.FlavorServiceProfileBindingNotFound, self.plugin.delete_flavor_service_profile, self.ctx, sp['id'], fl['id']) def test_delete_service_profile_in_use(self): sp, data = self._create_service_profile() fl, data = self._create_flavor() self.plugin.create_flavor_service_profile( self.ctx, {'service_profile': {'id': sp['id']}}, fl['id']) self.assertRaises( flavors.ServiceProfileInUse, self.plugin.delete_service_profile, self.ctx, sp['id']) def test_get_flavor_next_provider_no_binding(self): fl, data = self._create_flavor() self.assertRaises( flavors.FlavorServiceProfileBindingNotFound, self.plugin.get_flavor_next_provider, self.ctx, fl['id']) def test_get_flavor_next_provider_disabled(self): data = {'service_profile': {'description': 'the best sp', 'driver': _driver, 'enabled': False, 'metainfo': '{"data": "value"}'}} sp = self.plugin.create_service_profile(self.ctx, data) fl, data = self._create_flavor() self.plugin.create_flavor_service_profile( self.ctx, {'service_profile': {'id': sp['id']}}, fl['id']) self.assertRaises( flavors.ServiceProfileDisabled, self.plugin.get_flavor_next_provider, self.ctx, fl['id']) def test_get_flavor_next_provider_no_driver(self): data = {'service_profile': {'description': 'the best sp', 'driver': '', 'enabled': True, 'metainfo': '{"data": "value"}'}} sp = self.plugin.create_service_profile(self.ctx, data) fl, data = self._create_flavor() self.plugin.create_flavor_service_profile( self.ctx, {'service_profile': {'id': sp['id']}}, fl['id']) self.assertRaises( flavors.ServiceProfileDriverNotFound, self.plugin.get_flavor_next_provider, self.ctx, fl['id']) def test_get_flavor_next_provider(self): sp, data = self._create_service_profile() fl, data = self._create_flavor() self.plugin.create_flavor_service_profile( self.ctx, {'service_profile': {'id': sp['id']}}, fl['id']) providers = self.plugin.get_flavor_next_provider( self.ctx, fl['id']) self.assertEqual(_provider, providers[0].get('provider', None)) neutron-8.4.0/neutron/tests/unit/extensions/test_default_subnetpools.py0000664000567000056710000002152413044372760030074 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_config import cfg import webob.exc from neutron.common import constants from neutron.db import db_base_plugin_v2 from neutron.extensions import default_subnetpools from neutron.tests.unit.db import test_db_base_plugin_v2 class DefaultSubnetpoolsExtensionManager(object): def get_resources(self): return [] def get_actions(self): return [] def get_request_extensions(self): return [] def get_extended_resources(self, version): return default_subnetpools.get_extended_resources(version) class DefaultSubnetpoolsExtensionTestPlugin( db_base_plugin_v2.NeutronDbPluginV2): """Test plugin to mixin the default subnet pools extension. """ supported_extension_aliases = ["default-subnetpools", "subnet_allocation"] class DefaultSubnetpoolsExtensionTestCase( test_db_base_plugin_v2.NeutronDbPluginV2TestCase): """Test API extension default_subnetpools attributes. """ def setUp(self): plugin = ('neutron.tests.unit.extensions.test_default_subnetpools.' + 'DefaultSubnetpoolsExtensionTestPlugin') ext_mgr = DefaultSubnetpoolsExtensionManager() super(DefaultSubnetpoolsExtensionTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def test_create_subnet_only_ip_version_v4(self): with self.network() as network: tenant_id = network['network']['tenant_id'] subnetpool_prefix = '10.0.0.0/8' with self.subnetpool(prefixes=[subnetpool_prefix], admin=True, name="My subnet pool", tenant_id=tenant_id, min_prefixlen='25', is_default=True) as subnetpool: subnetpool_id = subnetpool['subnetpool']['id'] data = {'subnet': {'network_id': network['network']['id'], 'ip_version': '4', 'prefixlen': '27', 'tenant_id': tenant_id, 'use_default_subnetpool': True}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) subnet = self.deserialize(self.fmt, res)['subnet'] ip_net = netaddr.IPNetwork(subnet['cidr']) self.assertIn(ip_net, netaddr.IPNetwork(subnetpool_prefix)) self.assertEqual(27, ip_net.prefixlen) self.assertEqual(subnetpool_id, subnet['subnetpool_id']) def test_create_subnet_only_ip_version_v4_old(self): # TODO(john-davidge): Remove after Mitaka release. with self.network() as network: tenant_id = network['network']['tenant_id'] subnetpool_prefix = '10.0.0.0/8' with self.subnetpool(prefixes=[subnetpool_prefix], admin=False, name="My subnet pool", tenant_id=tenant_id, min_prefixlen='25') as subnetpool: subnetpool_id = subnetpool['subnetpool']['id'] cfg.CONF.set_override('default_ipv4_subnet_pool', subnetpool_id) data = {'subnet': {'network_id': network['network']['id'], 'ip_version': '4', 'prefixlen': '27', 'tenant_id': tenant_id, 'use_default_subnetpool': True}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) subnet = self.deserialize(self.fmt, res)['subnet'] ip_net = netaddr.IPNetwork(subnet['cidr']) self.assertIn(ip_net, netaddr.IPNetwork(subnetpool_prefix)) self.assertEqual(27, ip_net.prefixlen) self.assertEqual(subnetpool_id, subnet['subnetpool_id']) def test_create_subnet_only_ip_version_v6(self): # this test mirrors its v4 counterpart with self.network() as network: tenant_id = network['network']['tenant_id'] subnetpool_prefix = '2000::/56' with self.subnetpool(prefixes=[subnetpool_prefix], admin=True, name="My ipv6 subnet pool", tenant_id=tenant_id, min_prefixlen='64', is_default=True) as subnetpool: subnetpool_id = subnetpool['subnetpool']['id'] cfg.CONF.set_override('ipv6_pd_enabled', False) data = {'subnet': {'network_id': network['network']['id'], 'ip_version': '6', 'tenant_id': tenant_id, 'use_default_subnetpool': True}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) subnet = self.deserialize(self.fmt, res)['subnet'] self.assertEqual(subnetpool_id, subnet['subnetpool_id']) ip_net = netaddr.IPNetwork(subnet['cidr']) self.assertIn(ip_net, netaddr.IPNetwork(subnetpool_prefix)) self.assertEqual(64, ip_net.prefixlen) def test_create_subnet_only_ip_version_v6_old(self): # TODO(john-davidge): Remove after Mitaka release. with self.network() as network: tenant_id = network['network']['tenant_id'] subnetpool_prefix = '2000::/56' with self.subnetpool(prefixes=[subnetpool_prefix], admin=False, name="My ipv6 subnet pool", tenant_id=tenant_id, min_prefixlen='64') as subnetpool: subnetpool_id = subnetpool['subnetpool']['id'] cfg.CONF.set_override('default_ipv6_subnet_pool', subnetpool_id) cfg.CONF.set_override('ipv6_pd_enabled', False) data = {'subnet': {'network_id': network['network']['id'], 'ip_version': '6', 'tenant_id': tenant_id, 'use_default_subnetpool': True}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) subnet = self.deserialize(self.fmt, res)['subnet'] self.assertEqual(subnetpool_id, subnet['subnetpool_id']) ip_net = netaddr.IPNetwork(subnet['cidr']) self.assertIn(ip_net, netaddr.IPNetwork(subnetpool_prefix)) self.assertEqual(64, ip_net.prefixlen) def _test_create_subnet_V6_pd_modes(self, ra_addr_mode, expect_fail=False): cfg.CONF.set_override('ipv6_pd_enabled', True) with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'ip_version': '6', 'tenant_id': network['network']['tenant_id'], 'use_default_subnetpool': True}} if ra_addr_mode: data['subnet']['ipv6_ra_mode'] = ra_addr_mode data['subnet']['ipv6_address_mode'] = ra_addr_mode subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) if expect_fail: self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) else: subnet = self.deserialize(self.fmt, res)['subnet'] self.assertEqual(constants.IPV6_PD_POOL_ID, subnet['subnetpool_id']) def test_create_subnet_V6_pd_slaac(self): self._test_create_subnet_V6_pd_modes('slaac') def test_create_subnet_V6_pd_stateless(self): self._test_create_subnet_V6_pd_modes('dhcpv6-stateless') def test_create_subnet_V6_pd_statefull(self): self._test_create_subnet_V6_pd_modes('dhcpv6-statefull', expect_fail=True) def test_create_subnet_V6_pd_no_mode(self): self._test_create_subnet_V6_pd_modes(None, expect_fail=True) neutron-8.4.0/neutron/tests/unit/extensions/base.py0000664000567000056710000001167113044372760023670 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corporation. # Copyright 2014 Isaku Yamahata # # All Rights Reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import mock from oslo_config import cfg from webob import exc import webtest from neutron.api import extensions from neutron.api.v2 import attributes from neutron import quota from neutron.tests import tools from neutron.tests.unit.api import test_extensions from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit import testlib_api class ExtensionTestCase(testlib_api.WebTestCase): def _setUpExtension(self, plugin, service_type, resource_attribute_map, extension_class, resource_prefix, plural_mappings=None, translate_resource_name=False, allow_pagination=False, allow_sorting=False, supported_extension_aliases=None, use_quota=False, ): self._resource_prefix = resource_prefix self._plural_mappings = plural_mappings or {} self._translate_resource_name = translate_resource_name # Ensure existing ExtensionManager is not used extensions.PluginAwareExtensionManager._instance = None self.useFixture(tools.AttributeMapMemento()) # Create the default configurations self.config_parse() #just stubbing core plugin with plugin self.setup_coreplugin(plugin) cfg.CONF.set_override('core_plugin', plugin) if service_type: cfg.CONF.set_override('service_plugins', [plugin]) self._plugin_patcher = mock.patch(plugin, autospec=True) self.plugin = self._plugin_patcher.start() instance = self.plugin.return_value if service_type: instance.get_plugin_type.return_value = service_type if supported_extension_aliases is not None: instance.supported_extension_aliases = supported_extension_aliases if allow_pagination: cfg.CONF.set_override('allow_pagination', True) # instance.__native_pagination_support = True native_pagination_attr_name = ("_%s__native_pagination_support" % instance.__class__.__name__) setattr(instance, native_pagination_attr_name, True) if allow_sorting: cfg.CONF.set_override('allow_sorting', True) # instance.__native_sorting_support = True native_sorting_attr_name = ("_%s__native_sorting_support" % instance.__class__.__name__) setattr(instance, native_sorting_attr_name, True) if use_quota: quota.QUOTAS._driver = None cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', group='QUOTAS') setattr(instance, 'path_prefix', resource_prefix) class ExtensionTestExtensionManager(object): def get_resources(self): # Add the resources to the global attribute map # This is done here as the setup process won't # initialize the main API router which extends # the global attribute map attributes.RESOURCE_ATTRIBUTE_MAP.update( resource_attribute_map) return extension_class.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] ext_mgr = ExtensionTestExtensionManager() self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr) self.api = webtest.TestApp(self.ext_mdw) def _test_entity_delete(self, entity): """Does the entity deletion based on naming convention.""" entity_id = str(uuid.uuid4()) path = self._resource_prefix + '/' if self._resource_prefix else '' path += self._plural_mappings.get(entity, entity + 's') if self._translate_resource_name: path = path.replace('_', '-') res = self.api.delete( test_base._get_path(path, id=entity_id, fmt=self.fmt)) delete_entity = getattr(self.plugin.return_value, "delete_" + entity) delete_entity.assert_called_with(mock.ANY, entity_id) self.assertEqual(exc.HTTPNoContent.code, res.status_int) neutron-8.4.0/neutron/tests/unit/extensions/test_network_ip_availability.py0000664000567000056710000004317713044372760030736 0ustar jenkinsjenkins00000000000000# Copyright 2016 GoDaddy. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import neutron.api.extensions as api_ext import neutron.common.config as config import neutron.common.constants as constants import neutron.extensions import neutron.services.network_ip_availability.plugin as plugin_module import neutron.tests.unit.db.test_db_base_plugin_v2 as test_db_base_plugin_v2 API_RESOURCE = 'network-ip-availabilities' IP_AVAIL_KEY = 'network_ip_availability' IP_AVAILS_KEY = 'network_ip_availabilities' EXTENSIONS_PATH = ':'.join(neutron.extensions.__path__) PLUGIN_NAME = '%s.%s' % (plugin_module.NetworkIPAvailabilityPlugin.__module__, plugin_module.NetworkIPAvailabilityPlugin.__name__) class TestNetworkIPAvailabilityAPI( test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self): svc_plugins = {'plugin_name': PLUGIN_NAME} super(TestNetworkIPAvailabilityAPI, self).setUp( service_plugins=svc_plugins) self.plugin = plugin_module.NetworkIPAvailabilityPlugin() ext_mgr = api_ext.PluginAwareExtensionManager( EXTENSIONS_PATH, {"network-ip-availability": self.plugin} ) app = config.load_paste_app('extensions_test_app') self.ext_api = api_ext.ExtensionMiddleware(app, ext_mgr=ext_mgr) def _validate_availability(self, network, availability, expected_used_ips, expected_total_ips=253): self.assertEqual(network['name'], availability['network_name']) self.assertEqual(network['id'], availability['network_id']) self.assertEqual(expected_used_ips, availability['used_ips']) self.assertEqual(expected_total_ips, availability['total_ips']) def _validate_from_availabilities(self, availabilities, wrapped_network, expected_used_ips, expected_total_ips=253): network = wrapped_network['network'] availability = self._find_availability(availabilities, network['id']) self.assertIsNotNone(availability) self._validate_availability(network, availability, expected_used_ips=expected_used_ips, expected_total_ips=expected_total_ips) @staticmethod def _find_availability(availabilities, net_id): for ip_availability in availabilities: if net_id == ip_availability['network_id']: return ip_availability def test_basic(self): with self.network() as net: with self.subnet(network=net): network = net['network'] # Get ALL request = self.new_list_request(API_RESOURCE, self.fmt) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAILS_KEY, response) self.assertEqual(1, len(response[IP_AVAILS_KEY])) self._validate_from_availabilities(response[IP_AVAILS_KEY], net, 0) # Get single via id request = self.new_show_request(API_RESOURCE, network['id']) response = self.deserialize( self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAIL_KEY, response) usage = response[IP_AVAIL_KEY] self._validate_availability(network, usage, 0) def test_usages_multi_nets_subnets(self): with self.network(name='net1') as n1,\ self.network(name='net2') as n2,\ self.network(name='net3') as n3: # n1 should have 2 subnets, n2 should have none, n3 has 1 with self.subnet(network=n1) as subnet1_1, \ self.subnet(cidr='40.0.0.0/24', network=n3) as subnet3_1: # Consume 3 ports n1, none n2, 2 ports on n3 with self.port(subnet=subnet1_1),\ self.port(subnet=subnet1_1),\ self.port(subnet=subnet1_1),\ self.port(subnet=subnet3_1),\ self.port(subnet=subnet3_1): # Test get ALL request = self.new_list_request(API_RESOURCE) response = self.deserialize( self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAILS_KEY, response) self.assertEqual(3, len(response[IP_AVAILS_KEY])) data = response[IP_AVAILS_KEY] self._validate_from_availabilities(data, n1, 3, 253) self._validate_from_availabilities(data, n2, 0, 0) self._validate_from_availabilities(data, n3, 2, 253) # Test get single via network id network = n1['network'] request = self.new_show_request(API_RESOURCE, network['id']) response = self.deserialize( self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAIL_KEY, response) self._validate_availability(network, response[IP_AVAIL_KEY], 3, 253) def test_usages_multi_nets_subnets_sums(self): with self.network(name='net1') as n1: # n1 has 2 subnets with self.subnet(network=n1) as subnet1_1, \ self.subnet(cidr='40.0.0.0/24', network=n1) as subnet1_2: # Consume 3 ports n1: 1 on subnet 1 and 2 on subnet 2 with self.port(subnet=subnet1_1),\ self.port(subnet=subnet1_2),\ self.port(subnet=subnet1_2): # Get ALL request = self.new_list_request(API_RESOURCE) response = self.deserialize( self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAILS_KEY, response) self.assertEqual(1, len(response[IP_AVAILS_KEY])) self._validate_from_availabilities(response[IP_AVAILS_KEY], n1, 3, 506) # Get single via network id network = n1['network'] request = self.new_show_request(API_RESOURCE, network['id']) response = self.deserialize( self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAIL_KEY, response) self._validate_availability(network, response[IP_AVAIL_KEY], 3, 506) def test_usages_port_consumed_v4(self): with self.network() as net: with self.subnet(network=net) as subnet: request = self.new_list_request(API_RESOURCE) # Consume 2 ports with self.port(subnet=subnet), self.port(subnet=subnet): response = self.deserialize(self.fmt, request.get_response( self.ext_api)) self._validate_from_availabilities(response[IP_AVAILS_KEY], net, 2) def test_usages_query_ip_version_v4(self): with self.network() as net: with self.subnet(network=net): # Get IPv4 params = 'ip_version=4' request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAILS_KEY, response) self.assertEqual(1, len(response[IP_AVAILS_KEY])) self._validate_from_availabilities(response[IP_AVAILS_KEY], net, 0) # Get IPv6 should return empty array params = 'ip_version=6' request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertEqual(0, len(response[IP_AVAILS_KEY])) def test_usages_query_ip_version_v6(self): with self.network() as net: with self.subnet( network=net, cidr='2607:f0d0:1002:51::/64', ip_version=6, ipv6_address_mode=constants.DHCPV6_STATELESS): # Get IPv6 params = 'ip_version=6' request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertEqual(1, len(response[IP_AVAILS_KEY])) self._validate_from_availabilities( response[IP_AVAILS_KEY], net, 0, 18446744073709551614) # Get IPv4 should return empty array params = 'ip_version=4' request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertEqual(0, len(response[IP_AVAILS_KEY])) def test_usages_ports_consumed_v6(self): with self.network() as net: with self.subnet( network=net, cidr='2607:f0d0:1002:51::/64', ip_version=6, ipv6_address_mode=constants.DHCPV6_STATELESS) as subnet: request = self.new_list_request(API_RESOURCE) # Consume 3 ports with self.port(subnet=subnet),\ self.port(subnet=subnet), \ self.port(subnet=subnet): response = self.deserialize( self.fmt, request.get_response(self.ext_api)) self._validate_from_availabilities(response[IP_AVAILS_KEY], net, 3, 18446744073709551614) def test_usages_query_network_id(self): with self.network() as net: with self.subnet(network=net): network = net['network'] test_id = network['id'] # Get by query param: network_id params = 'network_id=%s' % test_id request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAILS_KEY, response) self.assertEqual(1, len(response[IP_AVAILS_KEY])) self._validate_from_availabilities(response[IP_AVAILS_KEY], net, 0) # Get by NON-matching query param: network_id params = 'network_id=clearlywontmatch' request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertEqual(0, len(response[IP_AVAILS_KEY])) def test_usages_query_network_name(self): test_name = 'net_name_1' with self.network(name=test_name) as net: with self.subnet(network=net): # Get by query param: network_name params = 'network_name=%s' % test_name request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAILS_KEY, response) self.assertEqual(1, len(response[IP_AVAILS_KEY])) self._validate_from_availabilities(response[IP_AVAILS_KEY], net, 0) # Get by NON-matching query param: network_name params = 'network_name=clearly-wont-match' request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertEqual(0, len(response[IP_AVAILS_KEY])) def test_usages_query_tenant_id(self): test_tenant_id = 'a-unique-test-id' with self.network(tenant_id=test_tenant_id) as net: with self.subnet(network=net): # Get by query param: network_name params = 'tenant_id=%s' % test_tenant_id request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAILS_KEY, response) self.assertEqual(1, len(response[IP_AVAILS_KEY])) self._validate_from_availabilities(response[IP_AVAILS_KEY], net, 0) for net_avail in response[IP_AVAILS_KEY]: self.assertEqual(test_tenant_id, net_avail['tenant_id']) # Get by NON-matching query param: network_name params = 'tenant_id=clearly-wont-match' request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertEqual(0, len(response[IP_AVAILS_KEY])) def test_usages_multi_net_multi_subnet_46(self): # Setup mixed v4/v6 networks with IPs consumed on each with self.network(name='net-v6-1') as net_v6_1, \ self.network(name='net-v6-2') as net_v6_2, \ self.network(name='net-v4-1') as net_v4_1, \ self.network(name='net-v4-2') as net_v4_2: with self.subnet(network=net_v6_1, cidr='2607:f0d0:1002:51::/64', ip_version=6) as s61, \ self.subnet(network=net_v6_2, cidr='2607:f0d0:1003:52::/64', ip_version=6) as s62, \ self.subnet(network=net_v4_1, cidr='10.0.0.0/24') as s41, \ self.subnet(network=net_v4_2, cidr='10.0.1.0/24') as s42: with self.port(subnet=s61),\ self.port(subnet=s62), self.port(subnet=s62), \ self.port(subnet=s41), \ self.port(subnet=s42), self.port(subnet=s42): # Verify consumption across all request = self.new_list_request(API_RESOURCE) response = self.deserialize( self.fmt, request.get_response(self.ext_api)) avails_list = response[IP_AVAILS_KEY] self._validate_from_availabilities( avails_list, net_v6_1, 1, 18446744073709551614) self._validate_from_availabilities( avails_list, net_v6_2, 2, 18446744073709551614) self._validate_from_availabilities( avails_list, net_v4_1, 1, 253) self._validate_from_availabilities( avails_list, net_v4_2, 2, 253) # Query by IP versions. Ensure subnet versions match for ip_ver in [4, 6]: params = 'ip_version=%i' % ip_ver request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize( self.fmt, request.get_response(self.ext_api)) for net_avail in response[IP_AVAILS_KEY]: for sub in net_avail['subnet_ip_availability']: self.assertEqual(ip_ver, sub['ip_version']) # Verify consumption querying 2 network ids (IN clause) request = self.new_list_request( API_RESOURCE, params='network_id=%s&network_id=%s' % (net_v4_2['network']['id'], net_v6_2['network']['id'])) response = self.deserialize( self.fmt, request.get_response(self.ext_api)) avails_list = response[IP_AVAILS_KEY] self._validate_from_availabilities( avails_list, net_v6_2, 2, 18446744073709551614) self._validate_from_availabilities( avails_list, net_v4_2, 2, 253) neutron-8.4.0/neutron/tests/unit/extensions/test_availability_zone.py0000664000567000056710000001317613044372760027524 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron import context from neutron.db import agents_db from neutron.db import db_base_plugin_v2 from neutron.extensions import agent from neutron.extensions import availability_zone as az_ext from neutron.tests.common import helpers from neutron.tests.unit.db import test_db_base_plugin_v2 class AZExtensionManager(object): def get_resources(self): agent.RESOURCE_ATTRIBUTE_MAP['agents'].update( az_ext.EXTENDED_ATTRIBUTES_2_0['agents']) return (az_ext.Availability_zone.get_resources() + agent.Agent.get_resources()) def get_actions(self): return [] def get_request_extensions(self): return [] class AZTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, agents_db.AgentDbMixin): supported_extension_aliases = ["agent", "availability_zone"] class AZTestCommon(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def _register_azs(self): self.agent1 = helpers.register_dhcp_agent(host='host1', az='nova1') self.agent2 = helpers.register_dhcp_agent(host='host2', az='nova2') self.agent3 = helpers.register_l3_agent(host='host2', az='nova2') self.agent4 = helpers.register_l3_agent(host='host3', az='nova3') self.agent5 = helpers.register_l3_agent(host='host4', az='nova2') class TestAZAgentCase(AZTestCommon): def setUp(self): plugin = ('neutron.tests.unit.extensions.' 'test_availability_zone.AZTestPlugin') ext_mgr = AZExtensionManager() super(TestAZAgentCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def test_list_availability_zones(self): self._register_azs() helpers.set_agent_admin_state(self.agent3['id'], admin_state_up=False) helpers.set_agent_admin_state(self.agent4['id'], admin_state_up=False) expected = [ {'name': 'nova1', 'resource': 'network', 'state': 'available'}, {'name': 'nova2', 'resource': 'network', 'state': 'available'}, {'name': 'nova2', 'resource': 'router', 'state': 'available'}, {'name': 'nova3', 'resource': 'router', 'state': 'unavailable'}] res = self._list('availability_zones') azs = res['availability_zones'] self.assertItemsEqual(expected, azs) # not admin case ctx = context.Context('', 'noadmin') res = self._list('availability_zones', neutron_context=ctx) azs = res['availability_zones'] self.assertItemsEqual(expected, azs) def test_list_agent_with_az(self): helpers.register_dhcp_agent(host='host1', az='nova1') res = self._list('agents') self.assertEqual('nova1', res['agents'][0]['availability_zone']) def test_validate_availability_zones(self): self._register_azs() ctx = context.Context('', 'tenant_id') self.plugin.validate_availability_zones(ctx, 'network', ['nova1', 'nova2']) self.plugin.validate_availability_zones(ctx, 'router', ['nova2', 'nova3']) self.assertRaises(az_ext.AvailabilityZoneNotFound, self.plugin.validate_availability_zones, ctx, 'router', ['nova1']) class TestAZNetworkCase(AZTestCommon): def setUp(self): plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin' ext_mgr = AZExtensionManager() super(TestAZNetworkCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def test_availability_zones_in_create_response(self): with self.network() as net: self.assertIn('availability_zone_hints', net['network']) self.assertIn('availability_zones', net['network']) def test_create_network_with_az(self): self._register_azs() az_hints = ['nova1'] with self.network(availability_zone_hints=az_hints) as net: res = self._show('networks', net['network']['id']) self.assertItemsEqual(az_hints, res['network']['availability_zone_hints']) def test_create_network_with_azs(self): self._register_azs() az_hints = ['nova1', 'nova2'] with self.network(availability_zone_hints=az_hints) as net: res = self._show('networks', net['network']['id']) self.assertItemsEqual(az_hints, res['network']['availability_zone_hints']) def test_create_network_without_az(self): with self.network() as net: res = self._show('networks', net['network']['id']) self.assertEqual([], res['network']['availability_zone_hints']) def test_create_network_with_empty_az(self): with self.network(availability_zone_hints=[]) as net: res = self._show('networks', net['network']['id']) self.assertEqual([], res['network']['availability_zone_hints']) def test_create_network_with_not_exist_az(self): res = self._create_network(self.fmt, 'net', True, availability_zone_hints=['nova3']) self.assertEqual(404, res.status_int) neutron-8.4.0/neutron/tests/unit/extensions/test_l3_ext_gw_mode.py0000664000567000056710000005424313044372760026716 0ustar jenkinsjenkins00000000000000# Copyright 2013 VMware, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from oslo_config import cfg from oslo_db import exception as db_exc from oslo_serialization import jsonutils from oslo_utils import uuidutils import testscenarios from webob import exc from neutron.common import constants from neutron.db import api as db_api from neutron.db import external_net_db from neutron.db import l3_db from neutron.db import l3_gwmode_db from neutron.db import models_v2 from neutron.extensions import l3 from neutron.extensions import l3_ext_gw_mode from neutron.tests import base from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.extensions import test_l3 from neutron.tests.unit import testlib_api _uuid = uuidutils.generate_uuid FAKE_GW_PORT_ID = _uuid() FAKE_GW_PORT_MAC = 'aa:bb:cc:dd:ee:ff' FAKE_FIP_EXT_PORT_ID = _uuid() FAKE_FIP_EXT_PORT_MAC = '11:22:33:44:55:66' FAKE_FIP_INT_PORT_ID = _uuid() FAKE_FIP_INT_PORT_MAC = 'aa:aa:aa:aa:aa:aa' FAKE_ROUTER_PORT_ID = _uuid() FAKE_ROUTER_PORT_MAC = 'bb:bb:bb:bb:bb:bb' class TestExtensionManager(object): def get_resources(self): # Simulate extension of L3 attribute map for key in l3.RESOURCE_ATTRIBUTE_MAP.keys(): l3.RESOURCE_ATTRIBUTE_MAP[key].update( l3_ext_gw_mode.EXTENDED_ATTRIBUTES_2_0.get(key, {})) return l3.L3.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] # A simple class for making a concrete class out of the mixin # for the case of a plugin that integrates l3 routing. class TestDbIntPlugin(test_l3.TestL3NatIntPlugin, l3_gwmode_db.L3_NAT_db_mixin): supported_extension_aliases = ["external-net", "router", "ext-gw-mode"] # A simple class for making a concrete class out of the mixin # for the case of a l3 router service plugin class TestDbSepPlugin(test_l3.TestL3NatServicePlugin, l3_gwmode_db.L3_NAT_db_mixin): supported_extension_aliases = ["router", "ext-gw-mode"] class TestGetEnableSnat(testscenarios.WithScenarios, base.BaseTestCase): scenarios = [ ('enabled', {'enable_snat_by_default': True}), ('disabled', {'enable_snat_by_default': False})] def setUp(self): super(TestGetEnableSnat, self).setUp() self.config(enable_snat_by_default=self.enable_snat_by_default) def _test_get_enable_snat(self, expected, info): observed = l3_gwmode_db.L3_NAT_dbonly_mixin._get_enable_snat(info) self.assertEqual(expected, observed) def test_get_enable_snat_without_gw_info(self): self._test_get_enable_snat(self.enable_snat_by_default, {}) def test_get_enable_snat_without_enable_snat(self): info = {'network_id': _uuid()} self._test_get_enable_snat(self.enable_snat_by_default, info) def test_get_enable_snat_with_snat_enabled(self): self._test_get_enable_snat(True, {'enable_snat': True}) def test_get_enable_snat_with_snat_disabled(self): self._test_get_enable_snat(False, {'enable_snat': False}) class TestL3GwModeMixin(testlib_api.SqlTestCase): def setUp(self): super(TestL3GwModeMixin, self).setUp() plugin = __name__ + '.' + TestDbIntPlugin.__name__ self.setup_coreplugin(plugin) self.target_object = TestDbIntPlugin() # Patch the context ctx_patcher = mock.patch('neutron.context', autospec=True) mock_context = ctx_patcher.start() self.context = mock_context.get_admin_context() # This ensure also calls to elevated work in unit tests self.context.elevated.return_value = self.context self.context.session = db_api.get_session() # Create sample data for tests self.ext_net_id = _uuid() self.int_net_id = _uuid() self.int_sub_id = _uuid() self.tenant_id = 'the_tenant' self.network = models_v2.Network( id=self.ext_net_id, tenant_id=self.tenant_id, admin_state_up=True, status=constants.NET_STATUS_ACTIVE) self.net_ext = external_net_db.ExternalNetwork( network_id=self.ext_net_id) self.context.session.add(self.network) # The following is to avoid complaints from SQLite on # foreign key violations self.context.session.flush() self.context.session.add(self.net_ext) self.router = l3_db.Router( id=_uuid(), name=None, tenant_id=self.tenant_id, admin_state_up=True, status=constants.NET_STATUS_ACTIVE, enable_snat=True, gw_port_id=None) self.context.session.add(self.router) self.context.session.flush() self.router_gw_port = models_v2.Port( id=FAKE_GW_PORT_ID, tenant_id=self.tenant_id, device_id=self.router.id, device_owner=l3_db.DEVICE_OWNER_ROUTER_GW, admin_state_up=True, status=constants.PORT_STATUS_ACTIVE, mac_address=FAKE_GW_PORT_MAC, network_id=self.ext_net_id) self.router.gw_port_id = self.router_gw_port.id self.context.session.add(self.router) self.context.session.add(self.router_gw_port) self.context.session.flush() self.fip_ext_port = models_v2.Port( id=FAKE_FIP_EXT_PORT_ID, tenant_id=self.tenant_id, admin_state_up=True, device_id=self.router.id, device_owner=l3_db.DEVICE_OWNER_FLOATINGIP, status=constants.PORT_STATUS_ACTIVE, mac_address=FAKE_FIP_EXT_PORT_MAC, network_id=self.ext_net_id) self.context.session.add(self.fip_ext_port) self.context.session.flush() self.int_net = models_v2.Network( id=self.int_net_id, tenant_id=self.tenant_id, admin_state_up=True, status=constants.NET_STATUS_ACTIVE) self.int_sub = models_v2.Subnet( id=self.int_sub_id, tenant_id=self.tenant_id, ip_version=4, cidr='3.3.3.0/24', gateway_ip='3.3.3.1', network_id=self.int_net_id) self.router_port = models_v2.Port( id=FAKE_ROUTER_PORT_ID, tenant_id=self.tenant_id, admin_state_up=True, device_id=self.router.id, device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF, status=constants.PORT_STATUS_ACTIVE, mac_address=FAKE_ROUTER_PORT_MAC, network_id=self.int_net_id) self.router_port_ip_info = models_v2.IPAllocation( port_id=self.router_port.id, network_id=self.int_net.id, subnet_id=self.int_sub_id, ip_address='3.3.3.1') self.context.session.add(self.int_net) self.context.session.add(self.int_sub) self.context.session.add(self.router_port) self.context.session.add(self.router_port_ip_info) self.context.session.flush() self.fip_int_port = models_v2.Port( id=FAKE_FIP_INT_PORT_ID, tenant_id=self.tenant_id, admin_state_up=True, device_id='something', device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX + 'nova', status=constants.PORT_STATUS_ACTIVE, mac_address=FAKE_FIP_INT_PORT_MAC, network_id=self.int_net_id) self.fip_int_ip_info = models_v2.IPAllocation( port_id=self.fip_int_port.id, network_id=self.int_net.id, subnet_id=self.int_sub_id, ip_address='3.3.3.3') self.fip = l3_db.FloatingIP( id=_uuid(), floating_ip_address='1.1.1.2', floating_network_id=self.ext_net_id, floating_port_id=FAKE_FIP_EXT_PORT_ID, fixed_port_id=None, fixed_ip_address=None, router_id=None) self.context.session.add(self.fip_int_port) self.context.session.add(self.fip_int_ip_info) self.context.session.add(self.fip) self.context.session.flush() self.fip_request = {'port_id': FAKE_FIP_INT_PORT_ID, 'tenant_id': self.tenant_id} def _get_gwports_dict(self, gw_ports): return dict((gw_port['id'], gw_port) for gw_port in gw_ports) def _reset_ext_gw(self): # Reset external gateway self.router.gw_port_id = None self.context.session.add(self.router) self.context.session.flush() def _test_update_router_gw(self, current_enable_snat, gw_info=None, expected_enable_snat=True): if not current_enable_snat: previous_gw_info = {'network_id': self.ext_net_id, 'enable_snat': current_enable_snat} self.target_object._update_router_gw_info( self.context, self.router.id, previous_gw_info) self.target_object._update_router_gw_info( self.context, self.router.id, gw_info) router = self.target_object._get_router( self.context, self.router.id) try: self.assertEqual(FAKE_GW_PORT_ID, router.gw_port.id) self.assertEqual(FAKE_GW_PORT_MAC, router.gw_port.mac_address) except AttributeError: self.assertIsNone(router.gw_port) self.assertEqual(expected_enable_snat, router.enable_snat) def test_update_router_gw_with_gw_info_none(self): self._test_update_router_gw(current_enable_snat=True) def test_update_router_gw_without_info_and_snat_disabled_previously(self): self._test_update_router_gw(current_enable_snat=False) def test_update_router_gw_with_network_only(self): info = {'network_id': self.ext_net_id} self._test_update_router_gw(current_enable_snat=True, gw_info=info) def test_update_router_gw_with_network_and_snat_disabled_previously(self): info = {'network_id': self.ext_net_id} self._test_update_router_gw(current_enable_snat=False, gw_info=info) def test_update_router_gw_with_snat_disabled(self): info = {'network_id': self.ext_net_id, 'enable_snat': False} self._test_update_router_gw( current_enable_snat=True, gw_info=info, expected_enable_snat=False) def test_update_router_gw_with_snat_enabled(self): info = {'network_id': self.ext_net_id, 'enable_snat': True} self._test_update_router_gw(current_enable_snat=False, gw_info=info) def test_make_router_dict_no_ext_gw(self): self._reset_ext_gw() router_dict = self.target_object._make_router_dict(self.router) self.assertIsNone(router_dict[l3.EXTERNAL_GW_INFO]) def test_make_router_dict_with_ext_gw(self): router_dict = self.target_object._make_router_dict(self.router) self.assertEqual({'network_id': self.ext_net_id, 'enable_snat': True, 'external_fixed_ips': []}, router_dict[l3.EXTERNAL_GW_INFO]) def test_make_router_dict_with_ext_gw_snat_disabled(self): self.router.enable_snat = False router_dict = self.target_object._make_router_dict(self.router) self.assertEqual({'network_id': self.ext_net_id, 'enable_snat': False, 'external_fixed_ips': []}, router_dict[l3.EXTERNAL_GW_INFO]) def test_build_routers_list_no_ext_gw(self): self._reset_ext_gw() router_dict = self.target_object._make_router_dict(self.router) routers = self.target_object._build_routers_list(self.context, [router_dict], []) self.assertEqual(1, len(routers)) router = routers[0] self.assertIsNone(router.get('gw_port')) self.assertIsNone(router.get('enable_snat')) def test_build_routers_list_with_ext_gw(self): router_dict = self.target_object._make_router_dict(self.router) routers = self.target_object._build_routers_list( self.context, [router_dict], self._get_gwports_dict([self.router.gw_port])) self.assertEqual(1, len(routers)) router = routers[0] self.assertIsNotNone(router.get('gw_port')) self.assertEqual(FAKE_GW_PORT_ID, router['gw_port']['id']) self.assertTrue(router.get('enable_snat')) def test_build_routers_list_with_ext_gw_snat_disabled(self): self.router.enable_snat = False router_dict = self.target_object._make_router_dict(self.router) routers = self.target_object._build_routers_list( self.context, [router_dict], self._get_gwports_dict([self.router.gw_port])) self.assertEqual(1, len(routers)) router = routers[0] self.assertIsNotNone(router.get('gw_port')) self.assertEqual(FAKE_GW_PORT_ID, router['gw_port']['id']) self.assertFalse(router.get('enable_snat')) def test_build_routers_list_with_gw_port_mismatch(self): router_dict = self.target_object._make_router_dict(self.router) routers = self.target_object._build_routers_list( self.context, [router_dict], {}) self.assertEqual(1, len(routers)) router = routers[0] self.assertIsNone(router.get('gw_port')) self.assertIsNone(router.get('enable_snat')) class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase, test_l3.L3NatTestCaseMixin): def setUp(self, plugin=None, svc_plugins=None, ext_mgr=None): # Store l3 resource attribute map as it will be updated self._l3_attribute_map_bk = {} for item in l3.RESOURCE_ATTRIBUTE_MAP: self._l3_attribute_map_bk[item] = ( l3.RESOURCE_ATTRIBUTE_MAP[item].copy()) plugin = plugin or ( 'neutron.tests.unit.extensions.test_l3_ext_gw_mode.' 'TestDbIntPlugin') # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) ext_mgr = ext_mgr or TestExtensionManager() super(ExtGwModeIntTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr, service_plugins=svc_plugins) self.addCleanup(self.restore_l3_attribute_map) def restore_l3_attribute_map(self): l3.RESOURCE_ATTRIBUTE_MAP = self._l3_attribute_map_bk def tearDown(self): super(ExtGwModeIntTestCase, self).tearDown() def _set_router_external_gateway(self, router_id, network_id, snat_enabled=None, expected_code=exc.HTTPOk.code, neutron_context=None): ext_gw_info = {'network_id': network_id} # Need to set enable_snat also if snat_enabled == False if snat_enabled is not None: ext_gw_info['enable_snat'] = snat_enabled return self._update('routers', router_id, {'router': {'external_gateway_info': ext_gw_info}}, expected_code=expected_code, neutron_context=neutron_context) def test_router_gateway_set_retry(self): with self.router() as r, self.subnet() as s: ext_net_id = s['subnet']['network_id'] self._set_net_external(ext_net_id) with mock.patch.object( l3_db.L3_NAT_dbonly_mixin, '_validate_gw_info', side_effect=[db_exc.RetryRequest(None), ext_net_id]): self._set_router_external_gateway(r['router']['id'], ext_net_id) res = self._show('routers', r['router']['id'])['router'] self.assertEqual(ext_net_id, res['external_gateway_info']['network_id']) def test_router_create_with_gwinfo_invalid_ext_ip(self): with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) ext_info = { 'network_id': s['subnet']['network_id'], 'external_fixed_ips': [{'ip_address': '10.0.0.'}] } error_code = exc.HTTPBadRequest.code res = self._create_router( self.fmt, _uuid(), arg_list=('external_gateway_info',), external_gateway_info=ext_info, expected_code=error_code ) msg = ("Invalid input for external_gateway_info. " "Reason: '10.0.0.' is not a valid IP address.") body = jsonutils.loads(res.body) self.assertEqual(msg, body['NeutronError']['message']) def test_router_create_show_no_ext_gwinfo(self): name = 'router1' tenant_id = _uuid() expected_value = [('name', name), ('tenant_id', tenant_id), ('admin_state_up', True), ('status', 'ACTIVE'), ('external_gateway_info', None)] with self.router(name=name, admin_state_up=True, tenant_id=tenant_id) as router: res = self._show('routers', router['router']['id']) for k, v in expected_value: self.assertEqual(res['router'][k], v) def _test_router_create_show_ext_gwinfo(self, snat_input_value, snat_expected_value): name = 'router1' tenant_id = _uuid() with self.subnet() as s: ext_net_id = s['subnet']['network_id'] self._set_net_external(ext_net_id) input_value = {'network_id': ext_net_id} if snat_input_value in (True, False): input_value['enable_snat'] = snat_input_value expected_value = [('name', name), ('tenant_id', tenant_id), ('admin_state_up', True), ('status', 'ACTIVE'), ('external_gateway_info', {'network_id': ext_net_id, 'enable_snat': snat_expected_value, 'external_fixed_ips': [{ 'ip_address': mock.ANY, 'subnet_id': s['subnet']['id']}]})] with self.router( name=name, admin_state_up=True, tenant_id=tenant_id, external_gateway_info=input_value) as router: res = self._show('routers', router['router']['id']) for k, v in expected_value: self.assertEqual(res['router'][k], v) def test_router_create_show_ext_gwinfo_default(self): self._test_router_create_show_ext_gwinfo(None, True) def test_router_create_show_ext_gwinfo_with_snat_enabled(self): self._test_router_create_show_ext_gwinfo(True, True) def test_router_create_show_ext_gwinfo_with_snat_disabled(self): self._test_router_create_show_ext_gwinfo(False, False) def _test_router_update_ext_gwinfo(self, snat_input_value, snat_expected_value=False, expected_http_code=exc.HTTPOk.code): with self.router() as r: with self.subnet() as s: try: ext_net_id = s['subnet']['network_id'] self._set_net_external(ext_net_id) self._set_router_external_gateway( r['router']['id'], ext_net_id, snat_enabled=snat_input_value, expected_code=expected_http_code) if expected_http_code != exc.HTTPOk.code: return body = self._show('routers', r['router']['id']) res_gw_info = body['router']['external_gateway_info'] self.assertEqual(ext_net_id, res_gw_info['network_id']) self.assertEqual(snat_expected_value, res_gw_info['enable_snat']) finally: self._remove_external_gateway_from_router( r['router']['id'], ext_net_id) def test_router_update_ext_gwinfo_default(self): self._test_router_update_ext_gwinfo(None, True) def test_router_update_ext_gwinfo_with_snat_enabled(self): self._test_router_update_ext_gwinfo(True, True) def test_router_update_ext_gwinfo_with_snat_disabled(self): self._test_router_update_ext_gwinfo(False, False) def test_router_update_ext_gwinfo_with_invalid_snat_setting(self): self._test_router_update_ext_gwinfo( 'xxx', None, expected_http_code=exc.HTTPBadRequest.code) class ExtGwModeSepTestCase(ExtGwModeIntTestCase): def setUp(self, plugin=None): # Store l3 resource attribute map as it will be updated self._l3_attribute_map_bk = {} for item in l3.RESOURCE_ATTRIBUTE_MAP: self._l3_attribute_map_bk[item] = ( l3.RESOURCE_ATTRIBUTE_MAP[item].copy()) plugin = plugin or ( 'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin') # the L3 service plugin l3_plugin = ('neutron.tests.unit.extensions.test_l3_ext_gw_mode.' 'TestDbSepPlugin') svc_plugins = {'l3_plugin_name': l3_plugin} # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) super(ExtGwModeSepTestCase, self).setUp(plugin=plugin, svc_plugins=svc_plugins) self.addCleanup(self.restore_l3_attribute_map) neutron-8.4.0/neutron/tests/unit/extensions/test_extra_dhcp_opt.py0000664000567000056710000003461413044372736027025 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import webob.exc from neutron.db import db_base_plugin_v2 from neutron.db import extradhcpopt_db as edo_db from neutron.extensions import extra_dhcp_opt as edo_ext from neutron.tests.unit.db import test_db_base_plugin_v2 DB_PLUGIN_KLASS = ( 'neutron.tests.unit.extensions.test_extra_dhcp_opt.ExtraDhcpOptTestPlugin') class ExtraDhcpOptTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, edo_db.ExtraDhcpOptMixin): """Test plugin that implements necessary calls on create/delete port for associating ports with extra dhcp options. """ supported_extension_aliases = ["extra_dhcp_opt"] def create_port(self, context, port): with context.session.begin(subtransactions=True): edos = port['port'].get(edo_ext.EXTRADHCPOPTS, []) new_port = super(ExtraDhcpOptTestPlugin, self).create_port( context, port) self._process_port_create_extra_dhcp_opts(context, new_port, edos) return new_port def update_port(self, context, id, port): with context.session.begin(subtransactions=True): rtn_port = super(ExtraDhcpOptTestPlugin, self).update_port( context, id, port) self._update_extra_dhcp_opts_on_port(context, id, port, rtn_port) return rtn_port class ExtraDhcpOptDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self, plugin=DB_PLUGIN_KLASS): super(ExtraDhcpOptDBTestCase, self).setUp(plugin=plugin) class TestExtraDhcpOpt(ExtraDhcpOptDBTestCase): def _check_opts(self, expected, returned): self.assertEqual(len(expected), len(returned)) for opt in returned: name = opt['opt_name'] for exp in expected: if (name == exp['opt_name'] and opt['ip_version'] == exp.get( 'ip_version', 4)): val = exp['opt_value'] break self.assertEqual(val, opt['opt_value']) def test_create_port_with_extradhcpopts(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: self._check_opts(opt_list, port['port'][edo_ext.EXTRADHCPOPTS]) def test_create_port_with_none_extradhcpopts(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': None}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}] expected = [{'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: self._check_opts(expected, port['port'][edo_ext.EXTRADHCPOPTS]) def test_create_port_with_empty_router_extradhcpopts(self): opt_list = [{'opt_name': 'router', 'opt_value': ''}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: self._check_opts(opt_list, port['port'][edo_ext.EXTRADHCPOPTS]) def test_create_port_with_extradhcpopts_ipv4_opt_version(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0', 'ip_version': 4}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456', 'ip_version': 4}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123', 'ip_version': 4}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: self._check_opts(opt_list, port['port'][edo_ext.EXTRADHCPOPTS]) def test_create_port_with_extradhcpopts_ipv6_opt_version(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0', 'ip_version': 6}, {'opt_name': 'tftp-server', 'opt_value': '2001:192:168::1', 'ip_version': 6}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: self._check_opts(opt_list, port['port'][edo_ext.EXTRADHCPOPTS]) def _test_update_port_with_extradhcpopts(self, opt_list, upd_opts, expected_opts): params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}} req = self.new_update_request('ports', update_port, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPOk.code, res.status_int) port = self.deserialize('json', res) self._check_opts(expected_opts, port['port'][edo_ext.EXTRADHCPOPTS]) def test_update_port_with_extradhcpopts_with_same(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}] expected_opts = opt_list[:] for i in expected_opts: if i['opt_name'] == upd_opts[0]['opt_name']: i['opt_value'] = upd_opts[0]['opt_value'] break self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_with_additional_extradhcpopt(self): opt_list = [{'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}] expected_opts = copy.deepcopy(opt_list) expected_opts.append(upd_opts[0]) self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_with_extradhcpopts(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}] expected_opts = copy.deepcopy(opt_list) for i in expected_opts: if i['opt_name'] == upd_opts[0]['opt_name']: i['opt_value'] = upd_opts[0]['opt_value'] break self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_with_extradhcpopt_delete(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': None}] expected_opts = [] expected_opts = [opt for opt in opt_list if opt['opt_name'] != 'bootfile-name'] self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_without_extradhcpopt_delete(self): opt_list = [] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': None}] expected_opts = [] self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_adding_extradhcpopts(self): opt_list = [] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}] expected_opts = copy.deepcopy(upd_opts) self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_with_blank_string_extradhcpopt(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': ' '}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}} req = self.new_update_request('ports', update_port, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_update_port_with_blank_name_extradhcpopt(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}] upd_opts = [{'opt_name': ' ', 'opt_value': 'pxelinux.0'}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}} req = self.new_update_request('ports', update_port, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_update_port_with_blank_router_extradhcpopt(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0', 'ip_version': 4}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123', 'ip_version': 4}, {'opt_name': 'router', 'opt_value': '123.123.123.1', 'ip_version': 4}] upd_opts = [{'opt_name': 'router', 'opt_value': '', 'ip_version': 4}] expected_opts = copy.deepcopy(opt_list) for i in expected_opts: if i['opt_name'] == upd_opts[0]['opt_name']: i['opt_value'] = upd_opts[0]['opt_value'] break self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_with_extradhcpopts_ipv6_change_value(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0', 'ip_version': 6}, {'opt_name': 'tftp-server', 'opt_value': '2001:192:168::1', 'ip_version': 6}] upd_opts = [{'opt_name': 'tftp-server', 'opt_value': '2001:192:168::2', 'ip_version': 6}] expected_opts = copy.deepcopy(opt_list) for i in expected_opts: if i['opt_name'] == upd_opts[0]['opt_name']: i['opt_value'] = upd_opts[0]['opt_value'] break self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_with_extradhcpopts_add_another_ver_opt(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0', 'ip_version': 6}, {'opt_name': 'tftp-server', 'opt_value': '2001:192:168::1', 'ip_version': 6}] upd_opts = [{'opt_name': 'tftp-server', 'opt_value': '123.123.123.123', 'ip_version': 4}] expected_opts = copy.deepcopy(opt_list) expected_opts.extend(upd_opts) self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) neutron-8.4.0/neutron/tests/unit/extensions/test_quotasv2.py0000664000567000056710000005047413044372760025605 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import mock from oslo_config import cfg import testtools from webob import exc import webtest from neutron.api import extensions from neutron.api.v2 import router from neutron.common import config from neutron.common import constants from neutron.common import exceptions from neutron import context from neutron.db.quota import driver from neutron import quota from neutron.quota import resource_registry from neutron.tests import base from neutron.tests import tools from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit import testlib_api TARGET_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin' _get_path = test_base._get_path class QuotaExtensionTestCase(testlib_api.WebTestCase): def setUp(self): super(QuotaExtensionTestCase, self).setUp() # Ensure existing ExtensionManager is not used extensions.PluginAwareExtensionManager._instance = None self.useFixture(tools.AttributeMapMemento()) # Create the default configurations self.config_parse() # Update the plugin and extensions path self.setup_coreplugin(TARGET_PLUGIN) cfg.CONF.set_override( 'quota_items', ['network', 'subnet', 'port', 'extra1'], group='QUOTAS') quota.QUOTAS = quota.QuotaEngine() quota.register_resources_from_config() self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True) self.plugin = self._plugin_patcher.start() self.plugin.return_value.supported_extension_aliases = ['quotas'] # QUOTAS will register the items in conf when starting # extra1 here is added later, so have to do it manually resource_registry.register_resource_by_name('extra1') ext_mgr = extensions.PluginAwareExtensionManager.get_instance() app = config.load_paste_app('extensions_test_app') ext_middleware = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr) self.api = webtest.TestApp(ext_middleware) # Initialize the router for the core API in order to ensure core quota # resources are registered router.APIRouter() def tearDown(self): self.api = None self.plugin = None super(QuotaExtensionTestCase, self).tearDown() def _test_quota_default_values(self, expected_values): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env) quota = self.deserialize(res) for resource, expected_value in expected_values.items(): self.assertEqual(expected_value, quota['quota'][resource]) class QuotaExtensionDbTestCase(QuotaExtensionTestCase): fmt = 'json' def setUp(self): cfg.CONF.set_override( 'quota_driver', 'neutron.db.quota.driver.DbQuotaDriver', group='QUOTAS') super(QuotaExtensionDbTestCase, self).setUp() def test_quotas_loaded_right(self): res = self.api.get(_get_path('quotas', fmt=self.fmt)) quota = self.deserialize(res) self.assertEqual([], quota['quotas']) self.assertEqual(200, res.status_int) def test_quotas_default_values(self): self._test_quota_default_values( {'network': 10, 'subnet': 10, 'port': 50, 'extra1': -1}) def test_quotas_negative_default_value(self): cfg.CONF.set_override( 'quota_port', -666, group='QUOTAS') cfg.CONF.set_override( 'quota_network', -10, group='QUOTAS') cfg.CONF.set_override( 'quota_subnet', -50, group='QUOTAS') self._test_quota_default_values( {'network': -1, 'subnet': -1, 'port': -1, 'extra1': -1}) def test_show_quotas_with_admin(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id + '2', is_admin=True)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env) self.assertEqual(200, res.status_int) quota = self.deserialize(res) self.assertEqual(10, quota['quota']['network']) self.assertEqual(10, quota['quota']['subnet']) self.assertEqual(50, quota['quota']['port']) def test_show_quotas_without_admin_forbidden_returns_403(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id + '2', is_admin=False)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env, expect_errors=True) self.assertEqual(403, res.status_int) def test_show_quotas_with_owner_tenant(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=False)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env) self.assertEqual(200, res.status_int) quota = self.deserialize(res) self.assertEqual(10, quota['quota']['network']) self.assertEqual(10, quota['quota']['subnet']) self.assertEqual(50, quota['quota']['port']) def test_list_quotas_with_admin(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=True)} res = self.api.get(_get_path('quotas', fmt=self.fmt), extra_environ=env) self.assertEqual(200, res.status_int) quota = self.deserialize(res) self.assertEqual([], quota['quotas']) def test_list_quotas_without_admin_forbidden_returns_403(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=False)} res = self.api.get(_get_path('quotas', fmt=self.fmt), extra_environ=env, expect_errors=True) self.assertEqual(403, res.status_int) def test_update_quotas_without_admin_forbidden_returns_403(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=False)} quotas = {'quota': {'network': 100}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env, expect_errors=True) self.assertEqual(403, res.status_int) def test_update_quotas_with_non_integer_returns_400(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=True)} quotas = {'quota': {'network': 'abc'}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env, expect_errors=True) self.assertEqual(400, res.status_int) def test_update_quotas_with_negative_integer_returns_400(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=True)} quotas = {'quota': {'network': -2}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env, expect_errors=True) self.assertEqual(400, res.status_int) def test_update_quotas_with_out_of_range_integer_returns_400(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=True)} quotas = {'quota': {'network': constants.DB_INTEGER_MAX_VALUE + 1}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env, expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_update_quotas_to_unlimited(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=True)} quotas = {'quota': {'network': -1}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env, expect_errors=False) self.assertEqual(200, res.status_int) def test_update_quotas_exceeding_current_limit(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=True)} quotas = {'quota': {'network': 120}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env, expect_errors=False) self.assertEqual(200, res.status_int) def test_update_quotas_with_non_support_resource_returns_400(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=True)} quotas = {'quota': {'abc': 100}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env, expect_errors=True) self.assertEqual(400, res.status_int) def test_update_quotas_with_admin(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id + '2', is_admin=True)} quotas = {'quota': {'network': 100}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env) self.assertEqual(200, res.status_int) env2 = {'neutron.context': context.Context('', tenant_id)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env2) quota = self.deserialize(res) self.assertEqual(100, quota['quota']['network']) self.assertEqual(10, quota['quota']['subnet']) self.assertEqual(50, quota['quota']['port']) def test_update_attributes(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id + '2', is_admin=True)} quotas = {'quota': {'extra1': 100}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env) self.assertEqual(200, res.status_int) env2 = {'neutron.context': context.Context('', tenant_id)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env2) quota = self.deserialize(res) self.assertEqual(100, quota['quota']['extra1']) def test_delete_quotas_with_admin(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id + '2', is_admin=True)} res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env) self.assertEqual(204, res.status_int) def test_delete_quotas_without_admin_forbidden_returns_403(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=False)} res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env, expect_errors=True) self.assertEqual(403, res.status_int) def test_quotas_loaded_bad_returns_404(self): try: res = self.api.get(_get_path('quotas'), expect_errors=True) self.assertEqual(404, res.status_int) except Exception: pass def test_quotas_limit_check(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=True)} quotas = {'quota': {'network': 5}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env) self.assertEqual(200, res.status_int) quota.QUOTAS.limit_check(context.Context('', tenant_id), tenant_id, network=4) def test_quotas_limit_check_with_invalid_quota_value(self): tenant_id = 'tenant_id1' with testtools.ExpectedException(exceptions.InvalidQuotaValue): quota.QUOTAS.limit_check(context.Context('', tenant_id), tenant_id, network=-2) def test_quotas_limit_check_with_not_registered_resource_fails(self): tenant_id = 'tenant_id1' self.assertRaises(exceptions.QuotaResourceUnknown, quota.QUOTAS.limit_check, context.get_admin_context(), tenant_id, foobar=1) def test_quotas_get_tenant_from_request_context(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=True)} res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt), extra_environ=env) self.assertEqual(200, res.status_int) quota = self.deserialize(res) self.assertEqual(quota['tenant']['tenant_id'], tenant_id) def test_quotas_get_tenant_from_empty_request_context_returns_400(self): env = {'neutron.context': context.Context('', '', is_admin=True)} res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt), extra_environ=env, expect_errors=True) self.assertEqual(400, res.status_int) def test_make_reservation_resource_unknown_raises(self): tenant_id = 'tenant_id1' self.assertRaises(exceptions.QuotaResourceUnknown, quota.QUOTAS.make_reservation, context.get_admin_context(), tenant_id, {'foobar': 1}, plugin=None) def test_make_reservation_negative_delta_raises(self): tenant_id = 'tenant_id1' self.assertRaises(exceptions.InvalidQuotaValue, quota.QUOTAS.make_reservation, context.get_admin_context(), tenant_id, {'network': -1}, plugin=None) class QuotaExtensionCfgTestCase(QuotaExtensionTestCase): fmt = 'json' def setUp(self): cfg.CONF.set_override( 'quota_driver', 'neutron.quota.ConfDriver', group='QUOTAS') super(QuotaExtensionCfgTestCase, self).setUp() def test_quotas_default_values(self): self._test_quota_default_values( {'network': 10, 'subnet': 10, 'port': 50, 'extra1': -1}) def test_quotas_negative_default_value(self): cfg.CONF.set_override( 'quota_port', -666, group='QUOTAS') self._test_quota_default_values( {'network': 10, 'subnet': 10, 'port': -1, 'extra1': -1}) def test_show_quotas_with_admin(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id + '2', is_admin=True)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env) self.assertEqual(200, res.status_int) def test_show_quotas_without_admin_forbidden(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id + '2', is_admin=False)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env, expect_errors=True) self.assertEqual(403, res.status_int) def test_update_quotas_forbidden(self): tenant_id = 'tenant_id1' quotas = {'quota': {'network': 100}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), expect_errors=True) self.assertEqual(403, res.status_int) def test_delete_quotas_forbidden(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=False)} res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env, expect_errors=True) self.assertEqual(403, res.status_int) class TestDbQuotaDriver(base.BaseTestCase): """Test for neutron.db.quota.driver.DbQuotaDriver.""" def test_get_tenant_quotas_arg(self): """Call neutron.db.quota.driver.DbQuotaDriver._get_quotas.""" quota_driver = driver.DbQuotaDriver() ctx = context.Context('', 'bar') foo_quotas = {'network': 5} default_quotas = {'network': 10} target_tenant = 'foo' with mock.patch.object(driver.DbQuotaDriver, 'get_tenant_quotas', return_value=foo_quotas) as get_tenant_quotas: quotas = quota_driver._get_quotas(ctx, target_tenant, default_quotas) self.assertEqual(quotas, foo_quotas) get_tenant_quotas.assert_called_once_with(ctx, default_quotas, target_tenant) class TestQuotaDriverLoad(base.BaseTestCase): def setUp(self): super(TestQuotaDriverLoad, self).setUp() # Make sure QuotaEngine is reinitialized in each test. quota.QUOTAS._driver = None def _test_quota_driver(self, cfg_driver, loaded_driver, with_quota_db_module=True): cfg.CONF.set_override('quota_driver', cfg_driver, group='QUOTAS') with mock.patch.dict(sys.modules, {}): if (not with_quota_db_module and 'neutron.db.quota.driver' in sys.modules): del sys.modules['neutron.db.quota.driver'] driver = quota.QUOTAS.get_driver() self.assertEqual(loaded_driver, driver.__class__.__name__) def test_quota_db_driver_with_quotas_table(self): self._test_quota_driver('neutron.db.quota.driver.DbQuotaDriver', 'DbQuotaDriver', True) def test_quota_db_driver_fallback_conf_driver(self): self._test_quota_driver('neutron.db.quota.driver.DbQuotaDriver', 'ConfDriver', False) def test_quota_conf_driver(self): self._test_quota_driver('neutron.quota.ConfDriver', 'ConfDriver', True) neutron-8.4.0/neutron/tests/unit/extensions/foxinsocks.py0000664000567000056710000000701013044372760025134 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_serialization import jsonutils from neutron.api import extensions from neutron import wsgi class FoxInSocksController(wsgi.Controller): def index(self, request): return "Try to say this Mr. Knox, sir..." class FoxInSocksPluginInterface(extensions.PluginInterface): @abc.abstractmethod def method_to_support_foxnsox_extension(self): pass class Foxinsocks(extensions.ExtensionDescriptor): def __init__(self): pass def get_plugin_interface(self): return FoxInSocksPluginInterface def get_name(self): return "Fox In Socks" def get_alias(self): return "FOXNSOX" def get_description(self): return "The Fox In Socks Extension" def get_updated(self): return "2011-01-22T13:25:27-06:00" def get_resources(self): resources = [] resource = extensions.ResourceExtension('foxnsocks', FoxInSocksController()) resources.append(resource) return resources def get_actions(self): return [extensions.ActionExtension('dummy_resources', 'FOXNSOX:add_tweedle', self._add_tweedle_handler), extensions.ActionExtension('dummy_resources', 'FOXNSOX:delete_tweedle', self._delete_tweedle_handler)] def get_request_extensions(self): request_exts = [] def _goose_handler(req, res): #NOTE: This only handles JSON responses. # You can use content type header to test for XML. data = jsonutils.loads(res.body) data['FOXNSOX:googoose'] = req.GET.get('chewing') res.body = jsonutils.dumps(data).encode('utf-8') return res req_ext1 = extensions.RequestExtension('GET', '/dummy_resources/:(id)', _goose_handler) request_exts.append(req_ext1) def _bands_handler(req, res): #NOTE: This only handles JSON responses. # You can use content type header to test for XML. data = jsonutils.loads(res.body) data['FOXNSOX:big_bands'] = 'Pig Bands!' res.body = jsonutils.dumps(data).encode('utf-8') return res req_ext2 = extensions.RequestExtension('GET', '/dummy_resources/:(id)', _bands_handler) request_exts.append(req_ext2) return request_exts def _add_tweedle_handler(self, input_dict, req, id): return "Tweedle {0} Added.".format( input_dict['FOXNSOX:add_tweedle']['name']) def _delete_tweedle_handler(self, input_dict, req, id): return "Tweedle {0} Deleted.".format( input_dict['FOXNSOX:delete_tweedle']['name']) neutron-8.4.0/neutron/tests/unit/extensions/test_vlantransparent.py0000664000567000056710000001135613044372760027237 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Cisco Systems Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import six from webob import exc as web_exc from neutron.api.v2 import attributes from neutron.db import db_base_plugin_v2 from neutron.db import vlantransparent_db as vlt_db from neutron.extensions import vlantransparent as vlt from neutron import quota from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit import testlib_api class VlanTransparentExtensionManager(object): def get_resources(self): return [] def get_actions(self): return [] def get_request_extensions(self): return [] def get_extended_resources(self, version): return vlt.get_extended_resources(version) class VlanTransparentExtensionTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, vlt_db.Vlantransparent_db_mixin): """Test plugin to mixin the VLAN transparent extensions.""" supported_extension_aliases = ["vlan-transparent"] def create_network(self, context, network): with context.session.begin(subtransactions=True): new_net = super(VlanTransparentExtensionTestPlugin, self).create_network(context, network) # Update the vlan_transparent in the database n = network['network'] vlan_transparent = vlt.get_vlan_transparent(n) network = self._get_network(context, new_net['id']) n['vlan_transparent'] = vlan_transparent network.update(n) return new_net class VlanTransparentExtensionTestCase(test_db_base_plugin_v2.TestNetworksV2): fmt = 'json' def setUp(self): plugin = ('neutron.tests.unit.extensions.test_vlantransparent.' 'VlanTransparentExtensionTestPlugin') # Save the global RESOURCE_ATTRIBUTE_MAP self.saved_attr_map = {} for res, attrs in six.iteritems(attributes.RESOURCE_ATTRIBUTE_MAP): self.saved_attr_map[res] = attrs.copy() # Update the plugin and extensions path cfg.CONF.set_override('allow_pagination', True) cfg.CONF.set_override('allow_sorting', True) ext_mgr = VlanTransparentExtensionManager() self.addCleanup(self._restore_attribute_map) super(VlanTransparentExtensionTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) quota.QUOTAS._driver = None cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', group='QUOTAS') def _restore_attribute_map(self): # Restore the global RESOURCE_ATTRIBUTE_MAP attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map def test_network_create_with_vlan_transparent_attr(self): vlantrans = {'vlan_transparent': True} with self.network(name='net1', **vlantrans) as net: req = self.new_show_request('networks', net['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(net['network']['name'], res['network']['name']) self.assertTrue(res['network'][vlt.VLANTRANSPARENT]) def test_network_create_with_bad_vlan_transparent_attr(self): vlantrans = {'vlan_transparent': "abc"} with testlib_api.ExpectedException( web_exc.HTTPClientError) as ctx_manager: with self.network(name='net1', **vlantrans): pass self.assertEqual(web_exc.HTTPClientError.code, ctx_manager.exception.code) def test_network_update_with_vlan_transparent_exception(self): with self.network(name='net1') as net: self._update('networks', net['network']['id'], {'network': {vlt.VLANTRANSPARENT: False}}, web_exc.HTTPBadRequest.code) req = self.new_show_request('networks', net['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(net['network']['name'], res['network']['name']) self.assertFalse(res['network'][vlt.VLANTRANSPARENT]) neutron-8.4.0/neutron/tests/unit/dummy_plugin.py0000664000567000056710000001053613044372760023267 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils from neutron.api import extensions from neutron.api.v2 import base from neutron.common import exceptions from neutron.db import servicetype_db from neutron.extensions import servicetype from neutron import manager from neutron.plugins.common import constants from neutron.services import service_base RESOURCE_NAME = "dummy" COLLECTION_NAME = "%ss" % RESOURCE_NAME # Attribute Map for dummy resource RESOURCE_ATTRIBUTE_MAP = { COLLECTION_NAME: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'is_visible': True}, 'service_type': {'allow_post': True, 'allow_put': False, 'validate': {'type:servicetype_ref': None}, 'is_visible': True, 'default': None} } } class Dummy(object): @classmethod def get_name(cls): return "dummy" @classmethod def get_alias(cls): return "dummy" @classmethod def get_description(cls): return "Dummy stuff" @classmethod def get_updated(cls): return "2012-11-20T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Extended Resource for dummy management.""" n_mgr = manager.NeutronManager.get_instance() dummy_inst = n_mgr.get_service_plugins()['DUMMY'] controller = base.create_resource( COLLECTION_NAME, RESOURCE_NAME, dummy_inst, RESOURCE_ATTRIBUTE_MAP[COLLECTION_NAME]) return [extensions.ResourceExtension(COLLECTION_NAME, controller)] class DummyServicePlugin(service_base.ServicePluginBase): """This is a simple plugin for managing instances of a fictional 'dummy' service. This plugin is provided as a proof-of-concept of how advanced service might leverage the service type extension. Ideally, instances of real advanced services, such as load balancing or VPN will adopt a similar solution. """ supported_extension_aliases = ['dummy', servicetype.EXT_ALIAS] path_prefix = "/dummy_svc" agent_notifiers = {'dummy': 'dummy_agent_notifier'} def __init__(self): self.svctype_mgr = servicetype_db.ServiceTypeManager.get_instance() self.dummys = {} def get_plugin_type(self): return constants.DUMMY def get_plugin_description(self): return "Neutron Dummy Service Plugin" def get_dummys(self, context, filters, fields): return self.dummys.values() def get_dummy(self, context, id, fields): try: return self.dummys[id] except KeyError: raise exceptions.NotFound() def create_dummy(self, context, dummy): d = dummy['dummy'] d['id'] = uuidutils.generate_uuid() self.dummys[d['id']] = d self.svctype_mgr.increase_service_type_refcount(context, d['service_type']) return d def update_dummy(self, context, id, dummy): pass def delete_dummy(self, context, id): try: svc_type_id = self.dummys[id]['service_type'] del self.dummys[id] self.svctype_mgr.decrease_service_type_refcount(context, svc_type_id) except KeyError: raise exceptions.NotFound() neutron-8.4.0/neutron/tests/unit/agent/0000775000567000056710000000000013044373210021264 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/ovsdb/0000775000567000056710000000000013044373210022401 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/ovsdb/test_impl_idl.py0000664000567000056710000000345313044372760025621 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from six.moves import queue import testtools import unittest2 try: from ovs.db import idl # noqa except ImportError: raise unittest2.SkipTest( "Skip test since ovs requirement for PY3 doesn't support yet.") from neutron.agent.ovsdb import api from neutron.agent.ovsdb import impl_idl from neutron.tests import base class TransactionTestCase(base.BaseTestCase): def test_commit_raises_exception_on_timeout(self): with mock.patch.object(queue, 'Queue') as mock_queue: transaction = impl_idl.NeutronOVSDBTransaction(mock.sentinel, mock.Mock(), 0) mock_queue.return_value.get.side_effect = queue.Empty with testtools.ExpectedException(api.TimeoutException): transaction.commit() def test_post_commit_does_not_raise_exception(self): with mock.patch.object(impl_idl.NeutronOVSDBTransaction, "do_post_commit", side_effect=Exception): transaction = impl_idl.NeutronOVSDBTransaction(mock.sentinel, mock.Mock(), 0) transaction.post_commit(mock.Mock()) neutron-8.4.0/neutron/tests/unit/agent/ovsdb/native/0000775000567000056710000000000013044373210023667 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/ovsdb/native/__init__.py0000664000567000056710000000000013044372736026002 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/ovsdb/native/test_helpers.py0000664000567000056710000000312413044372760026753 0ustar jenkinsjenkins00000000000000# Copyright 2015, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.agent.ovsdb.native import helpers from neutron.tests import base CONNECTION_TO_MANAGER_URI_MAP = ( ('unix:/path/to/file', 'punix:/path/to/file'), ('tcp:127.0.0.1:6640', 'ptcp:6640:127.0.0.1'), ('ssl:192.168.1.1:8080', 'pssl:8080:192.168.1.1')) class TestOVSNativeHelpers(base.BaseTestCase): def setUp(self): super(TestOVSNativeHelpers, self).setUp() self.execute = mock.patch('neutron.agent.common.utils.execute').start() def test__connection_to_manager_uri(self): for conn_uri, expected in CONNECTION_TO_MANAGER_URI_MAP: self.assertEqual(expected, helpers._connection_to_manager_uri(conn_uri)) def test_enable_connection_uri(self): for conn_uri, manager_uri in CONNECTION_TO_MANAGER_URI_MAP: helpers.enable_connection_uri(conn_uri) self.execute.assert_called_with( ['ovs-vsctl', 'set-manager', manager_uri], run_as_root=True) neutron-8.4.0/neutron/tests/unit/agent/ovsdb/__init__.py0000664000567000056710000000000013044372736024514 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/l3/0000775000567000056710000000000013044373210021602 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/l3/test_router_processing_queue.py0000664000567000056710000000771513044372736030221 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import datetime from oslo_utils import uuidutils from neutron.agent.l3 import router_processing_queue as l3_queue from neutron.tests import base _uuid = uuidutils.generate_uuid FAKE_ID = _uuid() FAKE_ID_2 = _uuid() class TestExclusiveRouterProcessor(base.BaseTestCase): def setUp(self): super(TestExclusiveRouterProcessor, self).setUp() def test_i_am_master(self): master = l3_queue.ExclusiveRouterProcessor(FAKE_ID) not_master = l3_queue.ExclusiveRouterProcessor(FAKE_ID) master_2 = l3_queue.ExclusiveRouterProcessor(FAKE_ID_2) not_master_2 = l3_queue.ExclusiveRouterProcessor(FAKE_ID_2) self.assertTrue(master._i_am_master()) self.assertFalse(not_master._i_am_master()) self.assertTrue(master_2._i_am_master()) self.assertFalse(not_master_2._i_am_master()) master.__exit__(None, None, None) master_2.__exit__(None, None, None) def test_master(self): master = l3_queue.ExclusiveRouterProcessor(FAKE_ID) not_master = l3_queue.ExclusiveRouterProcessor(FAKE_ID) master_2 = l3_queue.ExclusiveRouterProcessor(FAKE_ID_2) not_master_2 = l3_queue.ExclusiveRouterProcessor(FAKE_ID_2) self.assertEqual(master, master._master) self.assertEqual(master, not_master._master) self.assertEqual(master_2, master_2._master) self.assertEqual(master_2, not_master_2._master) master.__exit__(None, None, None) master_2.__exit__(None, None, None) def test__enter__(self): self.assertNotIn(FAKE_ID, l3_queue.ExclusiveRouterProcessor._masters) master = l3_queue.ExclusiveRouterProcessor(FAKE_ID) master.__enter__() self.assertIn(FAKE_ID, l3_queue.ExclusiveRouterProcessor._masters) master.__exit__(None, None, None) def test__exit__(self): master = l3_queue.ExclusiveRouterProcessor(FAKE_ID) not_master = l3_queue.ExclusiveRouterProcessor(FAKE_ID) master.__enter__() self.assertIn(FAKE_ID, l3_queue.ExclusiveRouterProcessor._masters) not_master.__enter__() not_master.__exit__(None, None, None) self.assertIn(FAKE_ID, l3_queue.ExclusiveRouterProcessor._masters) master.__exit__(None, None, None) self.assertNotIn(FAKE_ID, l3_queue.ExclusiveRouterProcessor._masters) def test_data_fetched_since(self): master = l3_queue.ExclusiveRouterProcessor(FAKE_ID) self.assertEqual(datetime.datetime.min, master._get_router_data_timestamp()) ts1 = datetime.datetime.utcnow() - datetime.timedelta(seconds=10) ts2 = datetime.datetime.utcnow() master.fetched_and_processed(ts2) self.assertEqual(ts2, master._get_router_data_timestamp()) master.fetched_and_processed(ts1) self.assertEqual(ts2, master._get_router_data_timestamp()) master.__exit__(None, None, None) def test_updates(self): master = l3_queue.ExclusiveRouterProcessor(FAKE_ID) not_master = l3_queue.ExclusiveRouterProcessor(FAKE_ID) master.queue_update(l3_queue.RouterUpdate(FAKE_ID, 0)) not_master.queue_update(l3_queue.RouterUpdate(FAKE_ID, 0)) for update in not_master.updates(): raise Exception("Only the master should process a router") self.assertEqual(2, len([i for i in master.updates()])) neutron-8.4.0/neutron/tests/unit/agent/l3/test_agent.py0000664000567000056710000037677613044372760024354 0ustar jenkinsjenkins00000000000000# Copyright 2012 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from itertools import chain as iter_chain from itertools import combinations as iter_combinations import eventlet import mock import netaddr from oslo_log import log import oslo_messaging from oslo_utils import timeutils from oslo_utils import uuidutils import six from testtools import matchers from neutron.agent.common import config as agent_config from neutron.agent.l3 import agent as l3_agent from neutron.agent.l3 import config as l3_config from neutron.agent.l3 import dvr_edge_router as dvr_router from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import ha from neutron.agent.l3 import legacy_router from neutron.agent.l3 import link_local_allocator as lla from neutron.agent.l3 import namespaces from neutron.agent.l3 import router_info as l3router from neutron.agent.l3 import router_processing_queue from neutron.agent.linux import dibbler from neutron.agent.linux import external_process from neutron.agent.linux import interface from neutron.agent.linux import iptables_manager from neutron.agent.linux import pd from neutron.agent.linux import ra from neutron.agent.metadata import driver as metadata_driver from neutron.agent import rpc as agent_rpc from neutron.common import config as base_config from neutron.common import constants as l3_constants from neutron.common import exceptions as n_exc from neutron.extensions import portbindings from neutron.plugins.common import constants as p_const from neutron.tests import base from neutron.tests.common import l3_test_common _uuid = uuidutils.generate_uuid HOSTNAME = 'myhost' FAKE_ID = _uuid() FAKE_ID_2 = _uuid() FIP_PRI = 32768 class BasicRouterOperationsFramework(base.BaseTestCase): def setUp(self): super(BasicRouterOperationsFramework, self).setUp() mock.patch('eventlet.spawn').start() self.conf = agent_config.setup_conf() self.conf.register_opts(base_config.core_opts) log.register_options(self.conf) self.conf.register_opts(agent_config.AGENT_STATE_OPTS, 'AGENT') self.conf.register_opts(l3_config.OPTS) self.conf.register_opts(ha.OPTS) agent_config.register_interface_driver_opts_helper(self.conf) agent_config.register_process_monitor_opts(self.conf) agent_config.register_availability_zone_opts_helper(self.conf) self.conf.register_opts(interface.OPTS) self.conf.register_opts(external_process.OPTS) self.conf.register_opts(pd.OPTS) self.conf.register_opts(ra.OPTS) self.conf.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') self.conf.set_override('send_arp_for_ha', 1) self.conf.set_override('state_path', '/tmp') self.conf.set_override('ra_confs', '/tmp') self.conf.set_override('pd_dhcp_driver', '') self.device_exists_p = mock.patch( 'neutron.agent.linux.ip_lib.device_exists') self.device_exists = self.device_exists_p.start() self.ensure_dir = mock.patch('neutron.common.utils.ensure_dir').start() mock.patch('neutron.agent.linux.keepalived.KeepalivedManager' '.get_full_config_file_path').start() self.utils_exec_p = mock.patch( 'neutron.agent.linux.utils.execute') self.utils_exec = self.utils_exec_p.start() self.utils_replace_file_p = mock.patch( 'neutron.common.utils.replace_file') self.utils_replace_file = self.utils_replace_file_p.start() self.external_process_p = mock.patch( 'neutron.agent.linux.external_process.ProcessManager') self.external_process = self.external_process_p.start() self.process_monitor = mock.patch( 'neutron.agent.linux.external_process.ProcessMonitor').start() self.send_adv_notif_p = mock.patch( 'neutron.agent.linux.ip_lib.send_ip_addr_adv_notif') self.send_adv_notif = self.send_adv_notif_p.start() self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver') driver_cls = self.dvr_cls_p.start() self.mock_driver = mock.MagicMock() self.mock_driver.DEV_NAME_LEN = ( interface.LinuxInterfaceDriver.DEV_NAME_LEN) driver_cls.return_value = self.mock_driver self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper') ip_cls = self.ip_cls_p.start() self.mock_ip = mock.MagicMock() ip_cls.return_value = self.mock_ip ip_rule = mock.patch('neutron.agent.linux.ip_lib.IPRule').start() self.mock_rule = mock.MagicMock() ip_rule.return_value = self.mock_rule ip_dev = mock.patch('neutron.agent.linux.ip_lib.IPDevice').start() self.mock_ip_dev = mock.MagicMock() ip_dev.return_value = self.mock_ip_dev self.l3pluginApi_cls_p = mock.patch( 'neutron.agent.l3.agent.L3PluginApi') l3pluginApi_cls = self.l3pluginApi_cls_p.start() self.plugin_api = mock.MagicMock() l3pluginApi_cls.return_value = self.plugin_api self.looping_call_p = mock.patch( 'oslo_service.loopingcall.FixedIntervalLoopingCall') self.looping_call_p.start() subnet_id_1 = _uuid() subnet_id_2 = _uuid() self.snat_ports = [{'subnets': [{'cidr': '152.2.0.0/16', 'gateway_ip': '152.2.0.1', 'id': subnet_id_1}], 'network_id': _uuid(), 'device_owner': l3_constants.DEVICE_OWNER_ROUTER_SNAT, 'mac_address': 'fa:16:3e:80:8d:80', 'fixed_ips': [{'subnet_id': subnet_id_1, 'ip_address': '152.2.0.13', 'prefixlen': 16}], 'id': _uuid(), 'device_id': _uuid()}, {'subnets': [{'cidr': '152.10.0.0/16', 'gateway_ip': '152.10.0.1', 'id': subnet_id_2}], 'network_id': _uuid(), 'device_owner': l3_constants.DEVICE_OWNER_ROUTER_SNAT, 'mac_address': 'fa:16:3e:80:8d:80', 'fixed_ips': [{'subnet_id': subnet_id_2, 'ip_address': '152.10.0.13', 'prefixlen': 16}], 'id': _uuid(), 'device_id': _uuid()}] self.ri_kwargs = {'agent_conf': self.conf, 'interface_driver': self.mock_driver} def _process_router_instance_for_agent(self, agent, ri, router): ri.router = router if not ri.radvd: ri.radvd = ra.DaemonMonitor(router['id'], ri.ns_name, agent.process_monitor, ri.get_internal_device_name, self.conf) ri.process(agent) class TestBasicRouterOperations(BasicRouterOperationsFramework): def test_init_ha_conf(self): with mock.patch('os.path.dirname', return_value='/etc/ha/'): l3_agent.L3NATAgent(HOSTNAME, self.conf) self.ensure_dir.assert_called_once_with('/etc/ha/') def test_enqueue_state_change_router_not_found(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) non_existent_router = 42 # Make sure the exceptional code path has coverage agent.enqueue_state_change(non_existent_router, 'master') def test_enqueue_state_change_metadata_disable(self): self.conf.set_override('enable_metadata_proxy', False) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = mock.Mock() router_info = mock.MagicMock() agent.router_info[router.id] = router_info agent._update_metadata_proxy = mock.Mock() agent.enqueue_state_change(router.id, 'master') self.assertFalse(agent._update_metadata_proxy.call_count) def test_periodic_sync_routers_task_raise_exception(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.plugin_api.get_router_ids.return_value = ['fake_id'] self.plugin_api.get_routers.side_effect = ValueError self.assertRaises(ValueError, agent.periodic_sync_routers_task, agent.context) self.assertTrue(agent.fullsync) def test_l3_initial_report_state_done(self): with mock.patch.object(l3_agent.L3NATAgentWithStateReport, 'periodic_sync_routers_task'),\ mock.patch.object(agent_rpc.PluginReportStateAPI, 'report_state') as report_state,\ mock.patch.object(eventlet, 'spawn_n'): agent = l3_agent.L3NATAgentWithStateReport(host=HOSTNAME, conf=self.conf) self.assertTrue(agent.agent_state['start_flag']) agent.after_start() report_state.assert_called_once_with(agent.context, agent.agent_state, True) self.assertIsNone(agent.agent_state.get('start_flag')) def test_report_state_revival_logic(self): with mock.patch.object(agent_rpc.PluginReportStateAPI, 'report_state') as report_state: agent = l3_agent.L3NATAgentWithStateReport(host=HOSTNAME, conf=self.conf) report_state.return_value = l3_constants.AGENT_REVIVED agent._report_state() self.assertTrue(agent.fullsync) agent.fullsync = False report_state.return_value = l3_constants.AGENT_ALIVE agent._report_state() self.assertFalse(agent.fullsync) def test_periodic_sync_routers_task_call_clean_stale_namespaces(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.plugin_api.get_routers.return_value = [] agent.periodic_sync_routers_task(agent.context) self.assertFalse(agent.namespaces_manager._clean_stale) def test_periodic_sync_routers_task_call_clean_stale_meta_proxies(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) stale_router_ids = [_uuid(), _uuid()] active_routers = [{'id': _uuid()}, {'id': _uuid()}] self.plugin_api.get_router_ids.return_value = [r['id'] for r in active_routers] self.plugin_api.get_routers.return_value = active_routers namespace_list = [namespaces.NS_PREFIX + r_id for r_id in stale_router_ids] namespace_list += [namespaces.NS_PREFIX + r['id'] for r in active_routers] self.mock_ip.get_namespaces.return_value = namespace_list driver = metadata_driver.MetadataDriver with mock.patch.object( driver, 'destroy_monitored_metadata_proxy') as destroy_proxy: agent.periodic_sync_routers_task(agent.context) expected_calls = [mock.call(mock.ANY, r_id, agent.conf) for r_id in stale_router_ids] self.assertEqual(len(stale_router_ids), destroy_proxy.call_count) destroy_proxy.assert_has_calls(expected_calls, any_order=True) def test_router_info_create(self): id = _uuid() ri = l3router.RouterInfo(id, {}, **self.ri_kwargs) self.assertTrue(ri.ns_name.endswith(id)) def test_router_info_create_with_router(self): ns_id = _uuid() subnet_id = _uuid() ex_gw_port = {'id': _uuid(), 'network_id': _uuid(), 'fixed_ips': [{'ip_address': '19.4.4.4', 'prefixlen': 24, 'subnet_id': subnet_id}], 'subnets': [{'id': subnet_id, 'cidr': '19.4.4.0/24', 'gateway_ip': '19.4.4.1'}]} router = { 'id': _uuid(), 'enable_snat': True, 'routes': [], 'gw_port': ex_gw_port} ri = l3router.RouterInfo(ns_id, router, **self.ri_kwargs) self.assertTrue(ri.ns_name.endswith(ns_id)) self.assertEqual(router, ri.router) def test_agent_create(self): l3_agent.L3NATAgent(HOSTNAME, self.conf) def _test_internal_network_action(self, action): router = l3_test_common.prepare_router_data(num_internal_ports=2) router_id = router['id'] ri = l3router.RouterInfo(router_id, router, **self.ri_kwargs) port = {'network_id': _uuid(), 'id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef', 'fixed_ips': [{'subnet_id': _uuid(), 'ip_address': '99.0.1.9', 'prefixlen': 24}]} interface_name = ri.get_internal_device_name(port['id']) if action == 'add': self.device_exists.return_value = False ri.internal_network_added(port) self.assertEqual(1, self.mock_driver.plug.call_count) self.assertEqual(1, self.mock_driver.init_router_port.call_count) self.send_adv_notif.assert_called_once_with(ri.ns_name, interface_name, '99.0.1.9', mock.ANY) elif action == 'remove': self.device_exists.return_value = True ri.internal_network_removed(port) self.assertEqual(1, self.mock_driver.unplug.call_count) else: raise Exception("Invalid action %s" % action) @staticmethod def _fixed_ip_cidr(fixed_ip): return '%s/%s' % (fixed_ip['ip_address'], fixed_ip['prefixlen']) def _test_internal_network_action_dist(self, action): router = l3_test_common.prepare_router_data(num_internal_ports=2) router_id = router['id'] agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = dvr_router.DvrEdgeRouter( agent, HOSTNAME, router_id, router, **self.ri_kwargs) subnet_id = _uuid() port = {'network_id': _uuid(), 'id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef', 'fixed_ips': [{'subnet_id': subnet_id, 'ip_address': '99.0.1.9', 'prefixlen': 24}], 'subnets': [{'id': subnet_id}]} ri.router['gw_port_host'] = HOSTNAME agent.host = HOSTNAME agent.conf.agent_mode = 'dvr_snat' sn_port = {'fixed_ips': [{'ip_address': '20.0.0.31', 'subnet_id': _uuid()}], 'subnets': [{'gateway_ip': '20.0.0.1'}], 'extra_subnets': [{'cidr': '172.16.0.0/24'}], 'id': _uuid(), 'network_id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef'} ex_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': _uuid()}], 'subnets': [{'gateway_ip': '20.0.0.1'}], 'extra_subnets': [{'cidr': '172.16.0.0/24'}], 'id': _uuid(), portbindings.HOST_ID: HOSTNAME, 'network_id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef'} ri.snat_ports = sn_port ri.ex_gw_port = ex_gw_port ri.snat_namespace = mock.Mock() if action == 'add': self.device_exists.return_value = False ri.get_snat_port_for_internal_port = mock.Mock( return_value=sn_port) ri._snat_redirect_add = mock.Mock() ri._set_subnet_arp_info = mock.Mock() ri._internal_network_added = mock.Mock() ri._set_subnet_arp_info = mock.Mock() ri.internal_network_added(port) self.assertEqual(1, ri._snat_redirect_add.call_count) self.assertEqual(2, ri._internal_network_added.call_count) ri._set_subnet_arp_info.assert_called_once_with(subnet_id) ri._internal_network_added.assert_called_with( dvr_snat_ns.SnatNamespace.get_snat_ns_name(ri.router['id']), sn_port['network_id'], sn_port['id'], sn_port['fixed_ips'], sn_port['mac_address'], ri._get_snat_int_device_name(sn_port['id']), dvr_snat_ns.SNAT_INT_DEV_PREFIX, mtu=None) elif action == 'remove': self.device_exists.return_value = False ri.get_snat_port_for_internal_port = mock.Mock( return_value=sn_port) ri._delete_arp_cache_for_internal_port = mock.Mock() ri._snat_redirect_modify = mock.Mock() ri.internal_network_removed(port) self.assertEqual( 1, ri._delete_arp_cache_for_internal_port.call_count) ri._snat_redirect_modify.assert_called_with( sn_port, port, ri.get_internal_device_name(port['id']), is_add=False) def test_agent_add_internal_network(self): self._test_internal_network_action('add') def test_agent_add_internal_network_dist(self): self._test_internal_network_action_dist('add') def test_agent_remove_internal_network(self): self._test_internal_network_action('remove') def test_agent_remove_internal_network_dist(self): self._test_internal_network_action_dist('remove') def _add_external_gateway(self, ri, router, ex_gw_port, interface_name, use_fake_fip=False, no_subnet=False, no_sub_gw=None, dual_stack=False): self.device_exists.return_value = False if no_sub_gw is None: no_sub_gw = [] if use_fake_fip: fake_fip = {'floatingips': [{'id': _uuid(), 'floating_ip_address': '192.168.1.34', 'fixed_ip_address': '192.168.0.1', 'port_id': _uuid()}]} router[l3_constants.FLOATINGIP_KEY] = fake_fip['floatingips'] ri.external_gateway_added(ex_gw_port, interface_name) if not router.get('distributed'): self.assertEqual(1, self.mock_driver.plug.call_count) self.assertEqual(1, self.mock_driver.init_router_port.call_count) if no_subnet and not dual_stack: self.assertEqual(0, self.send_adv_notif.call_count) ip_cidrs = [] kwargs = {'preserve_ips': [], 'namespace': 'qrouter-' + router['id'], 'extra_subnets': [], 'clean_connections': True} else: exp_arp_calls = [mock.call(ri.ns_name, interface_name, '20.0.0.30', mock.ANY)] if dual_stack and not no_sub_gw: exp_arp_calls += [mock.call(ri.ns_name, interface_name, '2001:192:168:100::2', mock.ANY)] self.send_adv_notif.assert_has_calls(exp_arp_calls) ip_cidrs = ['20.0.0.30/24'] if dual_stack: if not no_sub_gw: ip_cidrs.append('2001:192:168:100::2/64') kwargs = {'preserve_ips': ['192.168.1.34/32'], 'namespace': 'qrouter-' + router['id'], 'extra_subnets': [{'cidr': '172.16.0.0/24'}], 'clean_connections': True} self.mock_driver.init_router_port.assert_called_with( interface_name, ip_cidrs, **kwargs) else: ri._create_dvr_gateway.assert_called_once_with( ex_gw_port, interface_name) def _test_external_gateway_action(self, action, router, dual_stack=False): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ex_net_id = _uuid() sn_port = self.snat_ports[1] # Special setup for dvr routers if router.get('distributed'): agent.conf.agent_mode = 'dvr_snat' agent.host = HOSTNAME ri = dvr_router.DvrEdgeRouter(agent, HOSTNAME, router['id'], router, **self.ri_kwargs) ri._create_dvr_gateway = mock.Mock() ri.get_snat_interfaces = mock.Mock(return_value=self.snat_ports) ri.snat_ports = self.snat_ports ri._create_snat_namespace() ri.fip_ns = agent.get_fip_ns(ex_net_id) ri.internal_ports = self.snat_ports else: ri = l3router.RouterInfo( router['id'], router, **self.ri_kwargs) ri.use_ipv6 = False subnet_id = _uuid() fixed_ips = [{'subnet_id': subnet_id, 'ip_address': '20.0.0.30', 'prefixlen': 24}] subnets = [{'id': subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}] if dual_stack: ri.use_ipv6 = True subnet_id_v6 = _uuid() fixed_ips.append({'subnet_id': subnet_id_v6, 'ip_address': '2001:192:168:100::2', 'prefixlen': 64}) subnets.append({'id': subnet_id_v6, 'cidr': '2001:192:168:100::/64', 'gateway_ip': '2001:192:168:100::1'}) ex_gw_port = {'fixed_ips': fixed_ips, 'subnets': subnets, 'extra_subnets': [{'cidr': '172.16.0.0/24'}], 'id': _uuid(), 'network_id': ex_net_id, 'mac_address': 'ca:fe:de:ad:be:ef'} ex_gw_port_no_sub = {'fixed_ips': [], 'id': _uuid(), 'network_id': ex_net_id, 'mac_address': 'ca:fe:de:ad:be:ef'} interface_name = ri.get_external_device_name(ex_gw_port['id']) if action == 'add': self._add_external_gateway(ri, router, ex_gw_port, interface_name, use_fake_fip=True, dual_stack=dual_stack) elif action == 'add_no_sub': ri.use_ipv6 = True self._add_external_gateway(ri, router, ex_gw_port_no_sub, interface_name, no_subnet=True) elif action == 'add_no_sub_v6_gw': ri.use_ipv6 = True self.conf.set_override('ipv6_gateway', 'fe80::f816:3eff:fe2e:1') if dual_stack: use_fake_fip = True # Remove v6 entries del ex_gw_port['fixed_ips'][-1] del ex_gw_port['subnets'][-1] else: use_fake_fip = False ex_gw_port = ex_gw_port_no_sub self._add_external_gateway(ri, router, ex_gw_port, interface_name, no_subnet=True, no_sub_gw='fe80::f816:3eff:fe2e:1', use_fake_fip=use_fake_fip, dual_stack=dual_stack) elif action == 'remove': self.device_exists.return_value = True ri.get_snat_port_for_internal_port = mock.Mock( return_value=sn_port) ri._snat_redirect_remove = mock.Mock() ri.router['gw_port'] = "" ri.external_gateway_removed(ex_gw_port, interface_name) if not router.get('distributed'): self.mock_driver.unplug.assert_called_once_with( interface_name, bridge=agent.conf.external_network_bridge, namespace=mock.ANY, prefix=mock.ANY) else: ri._snat_redirect_remove.assert_called_with( sn_port, sn_port, ri.get_internal_device_name(sn_port['id'])) ri.get_snat_port_for_internal_port.assert_called_with( mock.ANY, ri.snat_ports) else: raise Exception("Invalid action %s" % action) def _test_external_gateway_updated(self, dual_stack=False): router = l3_test_common.prepare_router_data(num_internal_ports=2) ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) ri.use_ipv6 = False interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test( self, ri, dual_stack=dual_stack) fake_fip = {'floatingips': [{'id': _uuid(), 'floating_ip_address': '192.168.1.34', 'fixed_ip_address': '192.168.0.1', 'port_id': _uuid()}]} router[l3_constants.FLOATINGIP_KEY] = fake_fip['floatingips'] ri.external_gateway_updated(ex_gw_port, interface_name) self.assertEqual(1, self.mock_driver.plug.call_count) self.assertEqual(1, self.mock_driver.init_router_port.call_count) exp_arp_calls = [mock.call(ri.ns_name, interface_name, '20.0.0.30', mock.ANY)] if dual_stack: ri.use_ipv6 = True exp_arp_calls += [mock.call(ri.ns_name, interface_name, '2001:192:168:100::2', mock.ANY)] self.send_adv_notif.assert_has_calls(exp_arp_calls) ip_cidrs = ['20.0.0.30/24'] gateway_ips = ['20.0.0.1'] if dual_stack: ip_cidrs.append('2001:192:168:100::2/64') gateway_ips.append('2001:192:168:100::1') kwargs = {'preserve_ips': ['192.168.1.34/32'], 'namespace': 'qrouter-' + router['id'], 'extra_subnets': [{'cidr': '172.16.0.0/24'}], 'clean_connections': True} self.mock_driver.init_router_port.assert_called_with(interface_name, ip_cidrs, **kwargs) def test_external_gateway_updated(self): self._test_external_gateway_updated() def test_external_gateway_updated_dual_stack(self): self._test_external_gateway_updated(dual_stack=True) def test_dvr_edge_router_init_for_snat_namespace_object(self): router = {'id': _uuid()} ri = dvr_router.DvrEdgeRouter(mock.Mock(), HOSTNAME, router['id'], router, **self.ri_kwargs) # Make sure that ri.snat_namespace object is created when the # router is initialized self.assertIsNotNone(ri.snat_namespace) def test_ext_gw_updated_calling_snat_ns_delete_if_gw_port_host_none( self): """Test to check the impact of snat_namespace object. This function specifically checks the impact of the snat namespace object value on external_gateway_removed for deleting snat_namespace when the gw_port_host mismatches or none. """ router = l3_test_common.prepare_router_data(num_internal_ports=2) ri = dvr_router.DvrEdgeRouter(mock.Mock(), HOSTNAME, router['id'], router, **self.ri_kwargs) with mock.patch.object(dvr_snat_ns.SnatNamespace, 'delete') as snat_ns_delete: interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test( self, ri) router['gw_port_host'] = '' ri._snat_redirect_remove = mock.Mock() ri.external_gateway_updated(ex_gw_port, interface_name) if router['gw_port_host'] != ri.host: self.assertFalse(ri._snat_redirect_remove.called) self.assertEqual(1, snat_ns_delete.call_count) @mock.patch.object(namespaces.Namespace, 'delete') def test_snat_ns_delete_not_called_when_snat_namespace_does_not_exist( self, mock_ns_del): """Test to check the impact of snat_namespace object. This function specifically checks the impact of the snat namespace object initialization without the actual creation of snat_namespace. When deletes are issued to the snat namespace based on the snat namespace object existence, it should be checking for the valid namespace existence before it tries to delete. """ router = l3_test_common.prepare_router_data(num_internal_ports=2) ri = dvr_router.DvrEdgeRouter(mock.Mock(), HOSTNAME, router['id'], router, **self.ri_kwargs) # Make sure we set a return value to emulate the non existence # of the namespace. self.mock_ip.netns.exists.return_value = False self.assertIsNotNone(ri.snat_namespace) interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self, ri) ri._external_gateway_removed = mock.Mock() ri.external_gateway_removed(ex_gw_port, interface_name) self.assertFalse(mock_ns_del.called) def _test_ext_gw_updated_dvr_edge_router(self, host_match, snat_hosted_before=True): """ Helper to test external gw update for edge router on dvr_snat agent :param host_match: True if new gw host should be the same as agent host :param snat_hosted_before: True if agent has already been hosting snat for the router """ router = l3_test_common.prepare_router_data(num_internal_ports=2) ri = dvr_router.DvrEdgeRouter(mock.Mock(), HOSTNAME, router['id'], router, **self.ri_kwargs) if snat_hosted_before: ri._create_snat_namespace() snat_ns_name = ri.snat_namespace.name interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self, ri) ri._external_gateway_added = mock.Mock() router['gw_port_host'] = ri.host if host_match else (ri.host + 'foo') ri.external_gateway_updated(ex_gw_port, interface_name) if not host_match: self.assertFalse(ri._external_gateway_added.called) if snat_hosted_before: # host mismatch means that snat was rescheduled to another # agent, hence need to verify that gw port was unplugged and # snat namespace was deleted self.mock_driver.unplug.assert_called_with( interface_name, bridge=self.conf.external_network_bridge, namespace=snat_ns_name, prefix=l3_agent.EXTERNAL_DEV_PREFIX) else: if not snat_hosted_before: self.assertIsNotNone(ri.snat_namespace) self.assertTrue(ri._external_gateway_added.called) def test_ext_gw_updated_dvr_edge_router(self): self._test_ext_gw_updated_dvr_edge_router(host_match=True) def test_ext_gw_updated_dvr_edge_router_host_mismatch(self): self._test_ext_gw_updated_dvr_edge_router(host_match=False) def test_ext_gw_updated_dvr_dvr_edge_router_snat_rescheduled(self): self._test_ext_gw_updated_dvr_edge_router(host_match=True, snat_hosted_before=False) def test_agent_add_external_gateway(self): router = l3_test_common.prepare_router_data(num_internal_ports=2) self._test_external_gateway_action('add', router) def test_agent_add_external_gateway_dual_stack(self): router = l3_test_common.prepare_router_data(num_internal_ports=2) self._test_external_gateway_action('add', router, dual_stack=True) def test_agent_add_external_gateway_dist(self): router = l3_test_common.prepare_router_data(num_internal_ports=2) router['distributed'] = True router['gw_port_host'] = HOSTNAME self._test_external_gateway_action('add', router) def test_agent_add_external_gateway_dist_dual_stack(self): router = l3_test_common.prepare_router_data(num_internal_ports=2) router['distributed'] = True router['gw_port_host'] = HOSTNAME self._test_external_gateway_action('add', router, dual_stack=True) def test_agent_add_external_gateway_no_subnet(self): router = l3_test_common.prepare_router_data(num_internal_ports=2, v6_ext_gw_with_sub=False) self._test_external_gateway_action('add_no_sub', router) def test_agent_add_external_gateway_no_subnet_with_ipv6_gw(self): router = l3_test_common.prepare_router_data(num_internal_ports=2, v6_ext_gw_with_sub=False) self._test_external_gateway_action('add_no_sub_v6_gw', router) def test_agent_add_external_gateway_dual_stack_no_subnet_w_ipv6_gw(self): router = l3_test_common.prepare_router_data(num_internal_ports=2, v6_ext_gw_with_sub=False) self._test_external_gateway_action('add_no_sub_v6_gw', router, dual_stack=True) def test_agent_remove_external_gateway(self): router = l3_test_common.prepare_router_data(num_internal_ports=2) self._test_external_gateway_action('remove', router) def test_agent_remove_external_gateway_dual_stack(self): router = l3_test_common.prepare_router_data(num_internal_ports=2) self._test_external_gateway_action('remove', router, dual_stack=True) def test_agent_remove_external_gateway_dist(self): router = l3_test_common.prepare_router_data(num_internal_ports=2) router['distributed'] = True router['gw_port_host'] = HOSTNAME self._test_external_gateway_action('remove', router) def test_agent_remove_external_gateway_dist_dual_stack(self): router = l3_test_common.prepare_router_data(num_internal_ports=2) router['distributed'] = True router['gw_port_host'] = HOSTNAME self._test_external_gateway_action('remove', router, dual_stack=True) def _verify_snat_mangle_rules(self, nat_rules, mangle_rules, router, negate=False): interfaces = router[l3_constants.INTERFACE_KEY] source_cidrs = [] for iface in interfaces: for subnet in iface['subnets']: prefix = subnet['cidr'].split('/')[1] source_cidr = "%s/%s" % (iface['fixed_ips'][0]['ip_address'], prefix) source_cidrs.append(source_cidr) source_nat_ip = router['gw_port']['fixed_ips'][0]['ip_address'] interface_name = ('qg-%s' % router['gw_port']['id'])[:14] expected_rules = [ '! -i %s ! -o %s -m conntrack ! --ctstate DNAT -j ACCEPT' % (interface_name, interface_name), '-o %s -j SNAT --to-source %s' % (interface_name, source_nat_ip), '-m mark ! --mark 0x2/%s -m conntrack --ctstate DNAT ' '-j SNAT --to-source %s' % (l3_constants.ROUTER_MARK_MASK, source_nat_ip)] for r in nat_rules: if negate: self.assertNotIn(r.rule, expected_rules) else: self.assertIn(r.rule, expected_rules) expected_rules = [ '-i %s -j MARK --set-xmark 0x2/%s' % (interface_name, l3_constants.ROUTER_MARK_MASK), '-o %s -m connmark --mark 0x0/%s -j CONNMARK ' '--save-mark --nfmask %s --ctmask %s' % (interface_name, l3router.ADDRESS_SCOPE_MARK_MASK, l3router.ADDRESS_SCOPE_MARK_MASK, l3router.ADDRESS_SCOPE_MARK_MASK)] for r in mangle_rules: if negate: self.assertNotIn(r.rule, expected_rules) else: self.assertIn(r.rule, expected_rules) def test_get_snat_port_for_internal_port(self): router = l3_test_common.prepare_router_data(num_internal_ports=4) ri = dvr_router.DvrEdgeRouter(mock.sentinel.agent, HOSTNAME, router['id'], router, **self.ri_kwargs) test_port = { 'mac_address': '00:12:23:34:45:56', 'fixed_ips': [{'subnet_id': l3_test_common.get_subnet_id( router[l3_constants.INTERFACE_KEY][0]), 'ip_address': '101.12.13.14'}]} internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, []) # test valid case with mock.patch.object(ri, 'get_snat_interfaces') as get_interfaces: get_interfaces.return_value = [test_port] res_port = ri.get_snat_port_for_internal_port(internal_ports[0]) self.assertEqual(test_port, res_port) # test invalid case test_port['fixed_ips'][0]['subnet_id'] = 1234 res_ip = ri.get_snat_port_for_internal_port(internal_ports[0]) self.assertNotEqual(test_port, res_ip) self.assertIsNone(res_ip) def test_process_cent_router(self): router = l3_test_common.prepare_router_data() agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) self._test_process_router(ri, agent) def test_process_dist_router(self): router = l3_test_common.prepare_router_data() agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = dvr_router.DvrEdgeRouter(agent, HOSTNAME, router['id'], router, **self.ri_kwargs) subnet_id = l3_test_common.get_subnet_id( router[l3_constants.INTERFACE_KEY][0]) ri.router['distributed'] = True ri.router['_snat_router_interfaces'] = [{ 'fixed_ips': [{'subnet_id': subnet_id, 'ip_address': '1.2.3.4'}]}] ri.router['gw_port_host'] = None self._test_process_router(ri, agent) def _test_process_router(self, ri, agent): router = ri.router agent.host = HOSTNAME fake_fip_id = 'fake_fip_id' ri.create_dvr_fip_interfaces = mock.Mock() ri.process_floating_ip_addresses = mock.Mock() ri.process_floating_ip_nat_rules = mock.Mock() ri.process_floating_ip_addresses.return_value = { fake_fip_id: 'ACTIVE'} ri.external_gateway_added = mock.Mock() ri.external_gateway_updated = mock.Mock() ri.process_address_scope = mock.Mock() fake_floatingips1 = {'floatingips': [ {'id': fake_fip_id, 'floating_ip_address': '8.8.8.8', 'fixed_ip_address': '7.7.7.7', 'port_id': _uuid(), 'host': HOSTNAME}]} ri.process(agent) ri.process_floating_ip_addresses.assert_called_with(mock.ANY) ri.process_floating_ip_addresses.reset_mock() ri.process_floating_ip_nat_rules.assert_called_with() ri.process_floating_ip_nat_rules.reset_mock() ri.external_gateway_added.reset_mock() # remap floating IP to a new fixed ip fake_floatingips2 = copy.deepcopy(fake_floatingips1) fake_floatingips2['floatingips'][0]['fixed_ip_address'] = '7.7.7.8' router[l3_constants.FLOATINGIP_KEY] = fake_floatingips2['floatingips'] ri.process(agent) ri.process_floating_ip_addresses.assert_called_with(mock.ANY) ri.process_floating_ip_addresses.reset_mock() ri.process_floating_ip_nat_rules.assert_called_with() ri.process_floating_ip_nat_rules.reset_mock() self.assertEqual(0, ri.external_gateway_added.call_count) self.assertEqual(0, ri.external_gateway_updated.call_count) ri.external_gateway_added.reset_mock() ri.external_gateway_updated.reset_mock() # change the ex_gw_port a bit to test gateway update new_gw_port = copy.deepcopy(ri.router['gw_port']) ri.router['gw_port'] = new_gw_port old_ip = (netaddr.IPAddress(ri.router['gw_port'] ['fixed_ips'][0]['ip_address'])) ri.router['gw_port']['fixed_ips'][0]['ip_address'] = str(old_ip + 1) ri.process(agent) ri.process_floating_ip_addresses.reset_mock() ri.process_floating_ip_nat_rules.reset_mock() self.assertEqual(0, ri.external_gateway_added.call_count) self.assertEqual(1, ri.external_gateway_updated.call_count) # remove just the floating ips del router[l3_constants.FLOATINGIP_KEY] ri.process(agent) ri.process_floating_ip_addresses.assert_called_with(mock.ANY) ri.process_floating_ip_addresses.reset_mock() ri.process_floating_ip_nat_rules.assert_called_with() ri.process_floating_ip_nat_rules.reset_mock() # now no ports so state is torn down del router[l3_constants.INTERFACE_KEY] del router['gw_port'] ri.process(agent) self.assertEqual(1, self.send_adv_notif.call_count) distributed = ri.router.get('distributed', False) self.assertEqual(distributed, ri.process_floating_ip_addresses.called) self.assertEqual(distributed, ri.process_floating_ip_nat_rules.called) @mock.patch('neutron.agent.linux.ip_lib.IPDevice') def _test_process_floating_ip_addresses_add(self, ri, agent, IPDevice): floating_ips = ri.get_floating_ips() fip_id = floating_ips[0]['id'] IPDevice.return_value = device = mock.Mock() device.addr.list.return_value = [] ri.iptables_manager.ipv4['nat'] = mock.MagicMock() ex_gw_port = {'id': _uuid(), 'network_id': mock.sentinel.ext_net_id} ri.add_floating_ip = mock.Mock( return_value=l3_constants.FLOATINGIP_STATUS_ACTIVE) with mock.patch.object(lla.LinkLocalAllocator, '_write'): if ri.router['distributed']: ri.fip_ns = agent.get_fip_ns(ex_gw_port['network_id']) ri.create_dvr_fip_interfaces(ex_gw_port) fip_statuses = ri.process_floating_ip_addresses( mock.sentinel.interface_name) self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE}, fip_statuses) ri.add_floating_ip.assert_called_once_with( floating_ips[0], mock.sentinel.interface_name, device) @mock.patch.object(lla.LinkLocalAllocator, '_write') def test_create_dvr_fip_interfaces_if_fipnamespace_exist(self, lla_write): fake_network_id = _uuid() subnet_id = _uuid() fake_floatingips = {'floatingips': [ {'id': _uuid(), 'floating_ip_address': '20.0.0.3', 'fixed_ip_address': '192.168.0.1', 'floating_network_id': _uuid(), 'port_id': _uuid(), 'host': HOSTNAME}]} agent_gateway_port = ( [{'fixed_ips': [ {'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': subnet_id}], 'subnets': [ {'id': subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}], 'id': _uuid(), 'network_id': fake_network_id, 'mac_address': 'ca:fe:de:ad:be:ef'}] ) router = l3_test_common.prepare_router_data(enable_snat=True) router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] router[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port router['distributed'] = True agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = dvr_router.DvrEdgeRouter( agent, HOSTNAME, router['id'], router, **self.ri_kwargs) ext_gw_port = ri.router.get('gw_port') ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id']) ri.dist_fip_count = 0 agent.process_router_add = mock.Mock() ri.fip_ns.create_rtr_2_fip_link = mock.Mock() with mock.patch.object(ri, 'get_floating_ips') as fips, \ mock.patch.object(ri.fip_ns, 'create') as create_fip, \ mock.patch.object(ri, 'get_floating_agent_gw_interface' ) as fip_gw_port: fips.return_value = fake_floatingips fip_gw_port.return_value = agent_gateway_port[0] ri.create_dvr_fip_interfaces(ext_gw_port) self.assertTrue(fip_gw_port.called) self.assertTrue(fips.called) self.assertTrue(create_fip.called) self.assertEqual(agent_gateway_port[0], ri.fip_ns.agent_gateway_port) # Now let us associate the fip to the router ri.floating_ip_added_dist(fips, "192.168.0.1/32") self.assertEqual(1, ri.dist_fip_count) # Now let us disassociate the fip from the router ri.floating_ip_removed_dist("192.168.0.1/32") self.assertEqual(0, ri.dist_fip_count) # Calling create_dvr_fip_interfaces again to make sure # that the fip namespace create is not called again. # If the create is not called again, that would contain # the duplicate rules configuration in the fip namespace. ri.create_dvr_fip_interfaces(ext_gw_port) self.assertTrue(fip_gw_port.called) self.assertTrue(fips.called) create_fip.assert_called_once_with() self.assertEqual(2, agent.process_router_add.call_count) self.assertEqual(2, ri.fip_ns.create_rtr_2_fip_link.call_count) @mock.patch.object(lla.LinkLocalAllocator, '_write') def test_create_dvr_fip_interfaces_for_late_binding(self, lla_write): fake_network_id = _uuid() fake_subnet_id = _uuid() fake_floatingips = {'floatingips': [ {'id': _uuid(), 'floating_ip_address': '20.0.0.3', 'fixed_ip_address': '192.168.0.1', 'floating_network_id': _uuid(), 'port_id': _uuid(), 'host': HOSTNAME}]} agent_gateway_port = ( {'fixed_ips': [ {'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': fake_subnet_id}], 'subnets': [ {'id': fake_subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}], 'id': _uuid(), 'network_id': fake_network_id, 'mac_address': 'ca:fe:de:ad:be:ef'} ) router = l3_test_common.prepare_router_data(enable_snat=True) router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] router[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = [] router['distributed'] = True agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = dvr_router.DvrEdgeRouter( agent, HOSTNAME, router['id'], router, **self.ri_kwargs) ext_gw_port = ri.router.get('gw_port') ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id']) ri.dist_fip_count = 0 ri.fip_ns.subscribe = mock.Mock() with mock.patch.object(agent.plugin_rpc, 'get_agent_gateway_port') as fip_gw_port: fip_gw_port.return_value = agent_gateway_port ri.create_dvr_fip_interfaces(ext_gw_port) self.assertTrue(fip_gw_port.called) self.assertEqual(agent_gateway_port, ri.fip_ns.agent_gateway_port) @mock.patch.object(lla.LinkLocalAllocator, '_write') def test_create_dvr_fip_interfaces(self, lla_write): fake_network_id = _uuid() subnet_id = _uuid() fake_floatingips = {'floatingips': [ {'id': _uuid(), 'floating_ip_address': '20.0.0.3', 'fixed_ip_address': '192.168.0.1', 'floating_network_id': _uuid(), 'port_id': _uuid(), 'host': HOSTNAME}]} agent_gateway_port = ( [{'fixed_ips': [ {'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': subnet_id}], 'subnets': [ {'id': subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}], 'id': _uuid(), 'network_id': fake_network_id, 'mac_address': 'ca:fe:de:ad:be:ef'}] ) router = l3_test_common.prepare_router_data(enable_snat=True) router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] router[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port router['distributed'] = True agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = dvr_router.DvrEdgeRouter( agent, HOSTNAME, router['id'], router, **self.ri_kwargs) ext_gw_port = ri.router.get('gw_port') ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id']) ri.dist_fip_count = 0 ri.fip_ns.subscribe = mock.Mock() ri.fip_ns.agent_router_gateway = mock.Mock() agent.process_router_add = mock.Mock() with mock.patch.object(ri, 'get_floating_ips') as fips, \ mock.patch.object(ri, 'get_floating_agent_gw_interface' ) as fip_gw_port: fips.return_value = fake_floatingips fip_gw_port.return_value = agent_gateway_port[0] ri.create_dvr_fip_interfaces(ext_gw_port) self.assertTrue(fip_gw_port.called) self.assertTrue(fips.called) self.assertEqual(agent_gateway_port[0], ri.fip_ns.agent_gateway_port) self.assertTrue(ri.rtr_fip_subnet) self.assertEqual(1, agent.process_router_add.call_count) @mock.patch.object(lla.LinkLocalAllocator, '_write') def test_create_dvr_fip_interfaces_for_restart_l3agent_case(self, lla_write): fake_floatingips = {'floatingips': [ {'id': _uuid(), 'floating_ip_address': '20.0.0.3', 'fixed_ip_address': '192.168.0.1', 'floating_network_id': _uuid(), 'port_id': _uuid(), 'host': HOSTNAME}]} agent_gateway_port = ( [{'fixed_ips': [ {'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': 'subnet_id'}], 'subnets': [ {'id': 'subnet_id', 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}], 'id': _uuid(), 'network_id': 'fake_network_id', 'mac_address': 'ca:fe:de:ad:be:ef'}] ) router = l3_test_common.prepare_router_data(enable_snat=True) router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] router[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port router['distributed'] = True agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = dvr_router.DvrEdgeRouter( agent, HOSTNAME, router['id'], router, **self.ri_kwargs) ext_gw_port = ri.router.get('gw_port') ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id']) ri.fip_ns.subscribe = mock.Mock(return_value=True) ri.fip_ns.agent_router_gateway = mock.Mock() ri.rtr_fip_subnet = None ri.dist_fip_count = 0 with mock.patch.object(ri, 'get_floating_ips') as fips,\ mock.patch.object(ri, 'get_floating_agent_gw_interface' ) as fip_gw_port: fips.return_value = fake_floatingips fip_gw_port.return_value = agent_gateway_port[0] ri.create_dvr_fip_interfaces(ext_gw_port) self.assertTrue(fip_gw_port.called) self.assertTrue(fips.called) self.assertEqual(agent_gateway_port[0], ri.fip_ns.agent_gateway_port) self.assertTrue(ri.rtr_fip_subnet) def test_process_router_cent_floating_ip_add(self): fake_floatingips = {'floatingips': [ {'id': _uuid(), 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.1', 'status': 'DOWN', 'floating_network_id': _uuid(), 'port_id': _uuid(), 'host': HOSTNAME}]} router = l3_test_common.prepare_router_data(enable_snat=True) router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) ri.iptables_manager.ipv4['nat'] = mock.MagicMock() ri.get_external_device_name = mock.Mock(return_value='exgw') self._test_process_floating_ip_addresses_add(ri, agent) def test_process_router_snat_disabled(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(enable_snat=True) ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() # Process with NAT ri.process(agent) orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] orig_mangle_rules = ri.iptables_manager.ipv4['mangle'].rules[:] # Reprocess without NAT router['enable_snat'] = False # Reassign the router object to RouterInfo ri.router = router ri.process(agent) # For some reason set logic does not work well with # IpTablesRule instances nat_rules_delta = [r for r in orig_nat_rules if r not in ri.iptables_manager.ipv4['nat'].rules] self.assertEqual(1, len(nat_rules_delta)) mangle_rules_delta = [ r for r in orig_mangle_rules if r not in ri.iptables_manager.ipv4['mangle'].rules] self.assertEqual(1, len(mangle_rules_delta)) self._verify_snat_mangle_rules(nat_rules_delta, mangle_rules_delta, router) self.assertEqual(1, self.send_adv_notif.call_count) def test_process_router_snat_enabled(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(enable_snat=False) ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() # Process without NAT ri.process(agent) orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] orig_mangle_rules = ri.iptables_manager.ipv4['mangle'].rules[:] # Reprocess with NAT router['enable_snat'] = True # Reassign the router object to RouterInfo ri.router = router ri.process(agent) # For some reason set logic does not work well with # IpTablesRule instances nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules if r not in orig_nat_rules] self.assertEqual(1, len(nat_rules_delta)) mangle_rules_delta = [ r for r in ri.iptables_manager.ipv4['mangle'].rules if r not in orig_mangle_rules] self.assertEqual(1, len(mangle_rules_delta)) self._verify_snat_mangle_rules(nat_rules_delta, mangle_rules_delta, router) self.assertEqual(1, self.send_adv_notif.call_count) def _test_update_routing_table(self, is_snat_host=True): router = l3_test_common.prepare_router_data() uuid = router['id'] s_netns = 'snat-' + uuid q_netns = 'qrouter-' + uuid fake_route1 = {'destination': '135.207.0.0/16', 'nexthop': '19.4.4.200'} calls = [mock.call('replace', fake_route1, q_netns)] agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = dvr_router.DvrEdgeRouter( agent, HOSTNAME, uuid, router, **self.ri_kwargs) ri._update_routing_table = mock.Mock() with mock.patch.object(ri, '_is_this_snat_host') as snat_host: snat_host.return_value = is_snat_host ri.update_routing_table('replace', fake_route1) if is_snat_host: ri._update_routing_table('replace', fake_route1, s_netns) calls += [mock.call('replace', fake_route1, s_netns)] ri._update_routing_table.assert_has_calls(calls, any_order=True) def test_process_update_snat_routing_table(self): self._test_update_routing_table() def test_process_not_update_snat_routing_table(self): self._test_update_routing_table(is_snat_host=False) def test_process_router_interface_added(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data() ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() # Process with NAT ri.process(agent) # Add an interface and reprocess l3_test_common.router_append_interface(router) # Reassign the router object to RouterInfo ri.router = router ri.process(agent) # send_ip_addr_adv_notif is called both times process is called self.assertEqual(2, self.send_adv_notif.call_count) def _test_process_ipv6_only_or_dual_stack_gw(self, dual_stack=False): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(ip_version=6, dual_stack=dual_stack) # Get NAT rules without the gw_port gw_port = router['gw_port'] router['gw_port'] = None ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() self._process_router_instance_for_agent(agent, ri, router) orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] # Get NAT rules with the gw_port router['gw_port'] = gw_port ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) p = ri.external_gateway_nat_fip_rules s = ri.external_gateway_nat_snat_rules attrs_to_mock = dict( [(a, mock.DEFAULT) for a in ['external_gateway_nat_fip_rules', 'external_gateway_nat_snat_rules']] ) with mock.patch.multiple(ri, **attrs_to_mock) as mocks: mocks['external_gateway_nat_fip_rules'].side_effect = p mocks['external_gateway_nat_snat_rules'].side_effect = s self._process_router_instance_for_agent(agent, ri, router) new_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] # NAT rules should only change for dual_stack operation if dual_stack: self.assertTrue( mocks['external_gateway_nat_fip_rules'].called) self.assertTrue( mocks['external_gateway_nat_snat_rules'].called) self.assertNotEqual(orig_nat_rules, new_nat_rules) else: self.assertFalse( mocks['external_gateway_nat_fip_rules'].called) self.assertFalse( mocks['external_gateway_nat_snat_rules'].called) self.assertEqual(orig_nat_rules, new_nat_rules) def test_process_ipv6_only_gw(self): self._test_process_ipv6_only_or_dual_stack_gw() def test_process_dual_stack_gw(self): self._test_process_ipv6_only_or_dual_stack_gw(dual_stack=True) def _process_router_ipv6_interface_added( self, router, ra_mode=None, addr_mode=None): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() # Process with NAT ri.process(agent) orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] # Add an IPv6 interface and reprocess l3_test_common.router_append_interface(router, count=1, ip_version=6, ra_mode=ra_mode, addr_mode=addr_mode) # Reassign the router object to RouterInfo self._process_router_instance_for_agent(agent, ri, router) # IPv4 NAT rules should not be changed by adding an IPv6 interface nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules if r not in orig_nat_rules] self.assertFalse(nat_rules_delta) return ri def _radvd_expected_call_external_process(self, ri, enable=True): expected_calls = [mock.call(uuid=ri.router['id'], service='radvd', default_cmd_callback=mock.ANY, namespace=ri.ns_name, conf=mock.ANY, run_as_root=True)] if enable: expected_calls.append(mock.call().enable(reload_cfg=True)) else: expected_calls.append(mock.call().disable()) return expected_calls def _process_router_ipv6_subnet_added(self, router, ipv6_subnet_modes=None, dns_nameservers=None, network_mtu=0): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) agent.external_gateway_added = mock.Mock() self._process_router_instance_for_agent(agent, ri, router) # Add an IPv6 interface with len(ipv6_subnet_modes) subnets # and reprocess l3_test_common.router_append_subnet( router, count=len(ipv6_subnet_modes), ip_version=6, ipv6_subnet_modes=ipv6_subnet_modes, dns_nameservers=dns_nameservers, network_mtu=network_mtu) # Reassign the router object to RouterInfo self._process_router_instance_for_agent(agent, ri, router) return ri def _assert_ri_process_enabled(self, ri): """Verify that process was enabled for a router instance.""" expected_calls = self._radvd_expected_call_external_process(ri) self.assertEqual(expected_calls, self.external_process.mock_calls) def _assert_ri_process_disabled(self, ri): """Verify that process was disabled for a router instance.""" expected_calls = self._radvd_expected_call_external_process(ri, False) self.assertEqual(expected_calls, self.external_process.mock_calls) def test_process_router_ipv6_interface_added(self): router = l3_test_common.prepare_router_data() ri = self._process_router_ipv6_interface_added(router) self._assert_ri_process_enabled(ri) # Expect radvd configured without prefix self.assertNotIn('prefix', self.utils_replace_file.call_args[0][1]) def test_process_router_ipv6_slaac_interface_added(self): router = l3_test_common.prepare_router_data() ri = self._process_router_ipv6_interface_added( router, ra_mode=l3_constants.IPV6_SLAAC) self._assert_ri_process_enabled(ri) # Expect radvd configured with prefix radvd_config_str = self.utils_replace_file.call_args[0][1] self.assertIn('prefix', radvd_config_str) self.assertIn('AdvAutonomous on', radvd_config_str) def test_process_router_ipv6_dhcpv6_stateful_interface_added(self): router = l3_test_common.prepare_router_data() ri = self._process_router_ipv6_interface_added( router, ra_mode=l3_constants.DHCPV6_STATEFUL) self._assert_ri_process_enabled(ri) # Expect radvd configured with prefix radvd_config_str = self.utils_replace_file.call_args[0][1] self.assertIn('prefix', radvd_config_str) self.assertIn('AdvAutonomous off', radvd_config_str) def test_process_router_ipv6_subnets_added(self): router = l3_test_common.prepare_router_data() ri = self._process_router_ipv6_subnet_added(router, ipv6_subnet_modes=[ {'ra_mode': l3_constants.IPV6_SLAAC, 'address_mode': l3_constants.IPV6_SLAAC}, {'ra_mode': l3_constants.DHCPV6_STATELESS, 'address_mode': l3_constants.DHCPV6_STATELESS}, {'ra_mode': l3_constants.DHCPV6_STATEFUL, 'address_mode': l3_constants.DHCPV6_STATEFUL}]) self._assert_ri_process_enabled(ri) radvd_config_str = self.utils_replace_file.call_args[0][1] # Assert we have a prefix from IPV6_SLAAC and a prefix from # DHCPV6_STATELESS on one interface self.assertEqual(3, radvd_config_str.count("prefix")) self.assertEqual(1, radvd_config_str.count("interface")) self.assertEqual(2, radvd_config_str.count("AdvAutonomous on")) self.assertEqual(1, radvd_config_str.count("AdvAutonomous off")) def test_process_router_ipv6_subnets_added_to_existing_port(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data() ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) agent.external_gateway_added = mock.Mock() self._process_router_instance_for_agent(agent, ri, router) # Add the first subnet on a new interface l3_test_common.router_append_subnet( router, count=1, ip_version=6, ipv6_subnet_modes=[ {'ra_mode': l3_constants.IPV6_SLAAC, 'address_mode': l3_constants.IPV6_SLAAC}]) self._process_router_instance_for_agent(agent, ri, router) self._assert_ri_process_enabled(ri) radvd_config = self.utils_replace_file.call_args[0][1].split() self.assertEqual(1, len(ri.internal_ports[1]['subnets'])) self.assertEqual(1, len(ri.internal_ports[1]['fixed_ips'])) self.assertEqual(1, radvd_config.count("prefix")) self.assertEqual(1, radvd_config.count("interface")) # Reset mocks to verify radvd enabled and configured correctly # after second subnet added to interface self.external_process.reset_mock() self.utils_replace_file.reset_mock() # Add the second subnet on the same interface interface_id = router[l3_constants.INTERFACE_KEY][1]['id'] l3_test_common.router_append_subnet( router, count=1, ip_version=6, ipv6_subnet_modes=[ {'ra_mode': l3_constants.IPV6_SLAAC, 'address_mode': l3_constants.IPV6_SLAAC}], interface_id=interface_id) self._process_router_instance_for_agent(agent, ri, router) # radvd should have been enabled again and the interface # should have two prefixes self._assert_ri_process_enabled(ri) radvd_config = self.utils_replace_file.call_args[0][1].split() self.assertEqual(2, len(ri.internal_ports[1]['subnets'])) self.assertEqual(2, len(ri.internal_ports[1]['fixed_ips'])) self.assertEqual(2, radvd_config.count("prefix")) self.assertEqual(1, radvd_config.count("interface")) def test_process_router_ipv6v4_interface_added(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data() ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() # Process with NAT ri.process(agent) # Add an IPv4 and IPv6 interface and reprocess l3_test_common.router_append_interface(router, count=1, ip_version=4) l3_test_common.router_append_interface(router, count=1, ip_version=6) # Reassign the router object to RouterInfo self._process_router_instance_for_agent(agent, ri, router) self._assert_ri_process_enabled(ri) def test_process_router_interface_removed(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(num_internal_ports=2) ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() # Process with NAT ri.process(agent) # Add an interface and reprocess del router[l3_constants.INTERFACE_KEY][1] # Reassign the router object to RouterInfo ri.router = router ri.process(agent) # send_ip_addr_adv_notif is called both times process is called self.assertEqual(2, self.send_adv_notif.call_count) def test_process_router_ipv6_interface_removed(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data() ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() self._process_router_instance_for_agent(agent, ri, router) # Add an IPv6 interface and reprocess l3_test_common.router_append_interface(router, count=1, ip_version=6) self._process_router_instance_for_agent(agent, ri, router) self._assert_ri_process_enabled(ri) # Reset the calls so we can check for disable radvd self.external_process.reset_mock() self.process_monitor.reset_mock() # Remove the IPv6 interface and reprocess del router[l3_constants.INTERFACE_KEY][1] self._process_router_instance_for_agent(agent, ri, router) self._assert_ri_process_disabled(ri) def test_process_router_ipv6_subnet_removed(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data() ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) agent.external_gateway_added = mock.Mock() self._process_router_instance_for_agent(agent, ri, router) # Add an IPv6 interface with two subnets and reprocess l3_test_common.router_append_subnet( router, count=2, ip_version=6, ipv6_subnet_modes=([{'ra_mode': l3_constants.IPV6_SLAAC, 'address_mode': l3_constants.IPV6_SLAAC}] * 2)) self._process_router_instance_for_agent(agent, ri, router) self._assert_ri_process_enabled(ri) # Reset mocks to check for modified radvd config self.utils_replace_file.reset_mock() self.external_process.reset_mock() # Remove one subnet from the interface and reprocess interfaces = copy.deepcopy(router[l3_constants.INTERFACE_KEY]) del interfaces[1]['subnets'][0] del interfaces[1]['fixed_ips'][0] router[l3_constants.INTERFACE_KEY] = interfaces self._process_router_instance_for_agent(agent, ri, router) # Assert radvd was enabled again and that we only have one # prefix on the interface self._assert_ri_process_enabled(ri) radvd_config = self.utils_replace_file.call_args[0][1].split() self.assertEqual(1, len(ri.internal_ports[1]['subnets'])) self.assertEqual(1, len(ri.internal_ports[1]['fixed_ips'])) self.assertEqual(1, radvd_config.count("interface")) self.assertEqual(1, radvd_config.count("prefix")) def test_process_router_internal_network_added_unexpected_error(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data() ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() with mock.patch.object( ri, 'internal_network_added') as internal_network_added: # raise RuntimeError to simulate that an unexpected exception # occurs internal_network_added.side_effect = RuntimeError self.assertRaises(RuntimeError, ri.process, agent) self.assertNotIn( router[l3_constants.INTERFACE_KEY][0], ri.internal_ports) # The unexpected exception has been fixed manually internal_network_added.side_effect = None # periodic_sync_routers_task finds out that _rpc_loop failed to # process the router last time, it will retry in the next run. ri.process(agent) # We were able to add the port to ri.internal_ports self.assertIn( router[l3_constants.INTERFACE_KEY][0], ri.internal_ports) def test_process_router_internal_network_removed_unexpected_error(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data() ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() # add an internal port ri.process(agent) with mock.patch.object( ri, 'internal_network_removed') as internal_net_removed: # raise RuntimeError to simulate that an unexpected exception # occurs internal_net_removed.side_effect = RuntimeError ri.internal_ports[0]['admin_state_up'] = False # The above port is set to down state, remove it. self.assertRaises(RuntimeError, ri.process, agent) self.assertIn( router[l3_constants.INTERFACE_KEY][0], ri.internal_ports) # The unexpected exception has been fixed manually internal_net_removed.side_effect = None # periodic_sync_routers_task finds out that _rpc_loop failed to # process the router last time, it will retry in the next run. ri.process(agent) # We were able to remove the port from ri.internal_ports self.assertNotIn( router[l3_constants.INTERFACE_KEY][0], ri.internal_ports) def test_process_router_floatingip_nochange(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(num_internal_ports=1) fip1 = {'id': _uuid(), 'floating_ip_address': '8.8.8.8', 'fixed_ip_address': '7.7.7.7', 'status': 'ACTIVE', 'port_id': router[l3_constants.INTERFACE_KEY][0]['id']} fip2 = copy.copy(fip1) fip2.update({'id': _uuid(), 'status': 'DOWN', 'floating_ip_address': '9.9.9.9'}) router[l3_constants.FLOATINGIP_KEY] = [fip1, fip2] ri = legacy_router.LegacyRouter(router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() with mock.patch.object( agent.plugin_rpc, 'update_floatingip_statuses' ) as mock_update_fip_status,\ mock.patch.object(ri, 'get_router_cidrs') as mock_get_cidrs: mock_get_cidrs.return_value = set( [fip1['floating_ip_address'] + '/32']) ri.process(agent) # make sure only the one that wasn't in existing cidrs was sent mock_update_fip_status.assert_called_once_with( mock.ANY, ri.router_id, {fip2['id']: 'ACTIVE'}) def test_process_router_floatingip_status_update_if_processed(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(num_internal_ports=1) fip1 = {'id': _uuid(), 'floating_ip_address': '8.8.8.8', 'fixed_ip_address': '7.7.7.7', 'status': 'ACTIVE', 'port_id': router[l3_constants.INTERFACE_KEY][0]['id']} fip2 = copy.copy(fip1) fip2.update({'id': _uuid(), 'status': 'DOWN', }) router[l3_constants.FLOATINGIP_KEY] = [fip1, fip2] ri = legacy_router.LegacyRouter(router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() with mock.patch.object( agent.plugin_rpc, 'update_floatingip_statuses' ) as mock_update_fip_status,\ mock.patch.object(ri, 'get_router_cidrs') as mock_get_cidrs: mock_get_cidrs.return_value = set() ri.process(agent) # make sure both was sent since not existed in existing cidrs mock_update_fip_status.assert_called_once_with( mock.ANY, ri.router_id, {fip1['id']: 'ACTIVE', fip2['id']: 'ACTIVE'}) def test_process_router_floatingip_disabled(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) with mock.patch.object( agent.plugin_rpc, 'update_floatingip_statuses') as mock_update_fip_status: fip_id = _uuid() router = l3_test_common.prepare_router_data(num_internal_ports=1) router[l3_constants.FLOATINGIP_KEY] = [ {'id': fip_id, 'floating_ip_address': '8.8.8.8', 'fixed_ip_address': '7.7.7.7', 'status': 'DOWN', 'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}] ri = legacy_router.LegacyRouter(router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() ri.process(agent) # Assess the call for putting the floating IP up was performed mock_update_fip_status.assert_called_once_with( mock.ANY, ri.router_id, {fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE}) mock_update_fip_status.reset_mock() # Process the router again, this time without floating IPs router[l3_constants.FLOATINGIP_KEY] = [] ri.router = router ri.process(agent) # Assess the call for putting the floating IP up was performed mock_update_fip_status.assert_called_once_with( mock.ANY, ri.router_id, {fip_id: l3_constants.FLOATINGIP_STATUS_DOWN}) def test_process_router_floatingip_exception(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) with mock.patch.object( agent.plugin_rpc, 'update_floatingip_statuses') as mock_update_fip_status: fip_id = _uuid() router = l3_test_common.prepare_router_data(num_internal_ports=1) router[l3_constants.FLOATINGIP_KEY] = [ {'id': fip_id, 'floating_ip_address': '8.8.8.8', 'fixed_ip_address': '7.7.7.7', 'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}] ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) ri.process_floating_ip_addresses = mock.Mock( side_effect=RuntimeError) ri.external_gateway_added = mock.Mock() ri.process(agent) # Assess the call for putting the floating IP into Error # was performed mock_update_fip_status.assert_called_once_with( mock.ANY, ri.router_id, {fip_id: l3_constants.FLOATINGIP_STATUS_ERROR}) def test_process_external_iptables_exception(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) with mock.patch.object( agent.plugin_rpc, 'update_floatingip_statuses') as mock_update_fip_status: fip_id = _uuid() router = l3_test_common.prepare_router_data(num_internal_ports=1) router[l3_constants.FLOATINGIP_KEY] = [ {'id': fip_id, 'floating_ip_address': '8.8.8.8', 'fixed_ip_address': '7.7.7.7', 'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}] ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) ri.iptables_manager._apply = mock.Mock(side_effect=Exception) ri.process_external(agent) # Assess the call for putting the floating IP into Error # was performed mock_update_fip_status.assert_called_once_with( mock.ANY, ri.router_id, {fip_id: l3_constants.FLOATINGIP_STATUS_ERROR}) self.assertEqual(1, ri.iptables_manager._apply.call_count) def test_handle_router_snat_rules_distributed_without_snat_manager(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = dvr_router.DvrEdgeRouter( agent, HOSTNAME, 'foo_router_id', {}, **self.ri_kwargs) ri.iptables_manager = mock.MagicMock() ri._is_this_snat_host = mock.Mock(return_value=True) ri.get_ex_gw_port = mock.Mock(return_value=None) ri._handle_router_snat_rules(None, mock.ANY) self.assertIsNone(ri.snat_iptables_manager) self.assertFalse(ri.iptables_manager.called) def test_handle_router_snat_rules_add_back_jump(self): ri = l3router.RouterInfo(_uuid(), {}, **self.ri_kwargs) ri.iptables_manager = mock.MagicMock() port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]} ri._handle_router_snat_rules(port, "iface") nat = ri.iptables_manager.ipv4['nat'] nat.empty_chain.assert_any_call('snat') nat.add_rule.assert_any_call('snat', '-j $float-snat') for call in nat.mock_calls: name, args, kwargs = call if name == 'add_rule': self.assertEqual(('snat', '-j $float-snat'), args) self.assertEqual({}, kwargs) break def test_handle_router_snat_rules_add_rules(self): ri = l3router.RouterInfo(_uuid(), {}, **self.ri_kwargs) ex_gw_port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]} ri.router = {'distributed': False} ri._handle_router_snat_rules(ex_gw_port, "iface") nat_rules = list(map(str, ri.iptables_manager.ipv4['nat'].rules)) wrap_name = ri.iptables_manager.wrap_name jump_float_rule = "-A %s-snat -j %s-float-snat" % (wrap_name, wrap_name) snat_rule1 = ("-A %s-snat -o iface -j SNAT --to-source %s") % ( wrap_name, ex_gw_port['fixed_ips'][0]['ip_address']) snat_rule2 = ("-A %s-snat -m mark ! --mark 0x2/%s " "-m conntrack --ctstate DNAT " "-j SNAT --to-source %s") % ( wrap_name, l3_constants.ROUTER_MARK_MASK, ex_gw_port['fixed_ips'][0]['ip_address']) self.assertIn(jump_float_rule, nat_rules) self.assertIn(snat_rule1, nat_rules) self.assertIn(snat_rule2, nat_rules) self.assertThat(nat_rules.index(jump_float_rule), matchers.LessThan(nat_rules.index(snat_rule1))) mangle_rules = list(map(str, ri.iptables_manager.ipv4['mangle'].rules)) mangle_rule = ("-A %s-mark -i iface " "-j MARK --set-xmark 0x2/%s" % (wrap_name, l3_constants.ROUTER_MARK_MASK)) self.assertIn(mangle_rule, mangle_rules) def test_process_router_delete_stale_internal_devices(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) stale_devlist = [l3_test_common.FakeDev('qr-a1b2c3d4-e5'), l3_test_common.FakeDev('qr-b2c3d4e5-f6')] stale_devnames = [dev.name for dev in stale_devlist] get_devices_return = [] get_devices_return.extend(stale_devlist) self.mock_ip.get_devices.return_value = get_devices_return router = l3_test_common.prepare_router_data(enable_snat=True, num_internal_ports=1) ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, []) self.assertEqual(1, len(internal_ports)) internal_port = internal_ports[0] with mock.patch.object(ri, 'internal_network_removed' ) as internal_network_removed,\ mock.patch.object(ri, 'internal_network_added' ) as internal_network_added,\ mock.patch.object(ri, 'external_gateway_removed' ) as external_gateway_removed,\ mock.patch.object(ri, 'external_gateway_added' ) as external_gateway_added: ri.process(agent) self.assertEqual(1, external_gateway_added.call_count) self.assertFalse(external_gateway_removed.called) self.assertFalse(internal_network_removed.called) internal_network_added.assert_called_once_with(internal_port) self.assertEqual(len(stale_devnames), self.mock_driver.unplug.call_count) calls = [mock.call(stale_devname, namespace=ri.ns_name, prefix=l3_agent.INTERNAL_DEV_PREFIX) for stale_devname in stale_devnames] self.mock_driver.unplug.assert_has_calls(calls, any_order=True) def test_process_router_delete_stale_external_devices(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) stale_devlist = [l3_test_common.FakeDev('qg-a1b2c3d4-e5')] stale_devnames = [dev.name for dev in stale_devlist] router = l3_test_common.prepare_router_data(enable_snat=True, num_internal_ports=1) del router['gw_port'] ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) self.mock_ip.get_devices.return_value = stale_devlist ri.process(agent) self.mock_driver.unplug.assert_called_with( stale_devnames[0], bridge="br-ex", namespace=ri.ns_name, prefix=l3_agent.EXTERNAL_DEV_PREFIX) def test_router_deleted(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent._queue = mock.Mock() agent.router_deleted(None, FAKE_ID) self.assertEqual(1, agent._queue.add.call_count) def test_routers_updated(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent._queue = mock.Mock() agent.routers_updated(None, [FAKE_ID]) self.assertEqual(1, agent._queue.add.call_count) def test_removed_from_agent(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent._queue = mock.Mock() agent.router_removed_from_agent(None, {'router_id': FAKE_ID}) self.assertEqual(1, agent._queue.add.call_count) def test_added_to_agent(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent._queue = mock.Mock() agent.router_added_to_agent(None, [FAKE_ID]) self.assertEqual(1, agent._queue.add.call_count) def test_destroy_namespace(self): namespace = 'qrouter-bar' self.mock_ip.get_namespaces.return_value = [namespace] self.mock_ip.get_devices.return_value = [ l3_test_common.FakeDev('qr-aaaa'), l3_test_common.FakeDev('rfp-aaaa')] agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ns = namespaces.RouterNamespace( 'bar', self.conf, agent.driver, agent.use_ipv6) ns.create() ns.delete() self.mock_driver.unplug.assert_called_once_with('qr-aaaa', prefix='qr-', namespace='qrouter' '-bar') self.mock_ip.del_veth.assert_called_once_with('rfp-aaaa') def test_destroy_router_namespace(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ns = namespaces.Namespace( 'qrouter-bar', self.conf, agent.driver, agent.use_ipv6) ns.create() ns.delete() self.mock_ip.netns.delete.assert_called_once_with("qrouter-bar") def test_destroy_snat_namespace(self): namespace = 'snat-bar' self.mock_ip.get_namespaces.return_value = [namespace] self.mock_ip.get_devices.return_value = [ l3_test_common.FakeDev('qg-aaaa'), l3_test_common.FakeDev('sg-aaaa')] agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ns = dvr_snat_ns.SnatNamespace( 'bar', self.conf, agent.driver, agent.use_ipv6) ns.create() ns.delete() calls = [mock.call('qg-aaaa', bridge=agent.conf.external_network_bridge, namespace=namespace, prefix=l3_agent.EXTERNAL_DEV_PREFIX), mock.call('sg-aaaa', namespace=namespace, prefix=dvr_snat_ns.SNAT_INT_DEV_PREFIX)] self.mock_driver.unplug.assert_has_calls(calls, any_order=True) def _configure_metadata_proxy(self, enableflag=True): if not enableflag: self.conf.set_override('enable_metadata_proxy', False) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router_id = _uuid() router = {'id': router_id, 'external_gateway_info': {}, 'routes': [], 'distributed': False} driver = metadata_driver.MetadataDriver with mock.patch.object( driver, 'destroy_monitored_metadata_proxy') as destroy_proxy: with mock.patch.object( driver, 'spawn_monitored_metadata_proxy') as spawn_proxy: agent._process_added_router(router) if enableflag: spawn_proxy.assert_called_with( mock.ANY, mock.ANY, self.conf.metadata_port, mock.ANY, router_id=router_id ) else: self.assertFalse(spawn_proxy.call_count) agent._router_removed(router_id) if enableflag: destroy_proxy.assert_called_with(mock.ANY, router_id, mock.ANY) else: self.assertFalse(destroy_proxy.call_count) def test_enable_metadata_proxy(self): self._configure_metadata_proxy() def test_disable_metadata_proxy_spawn(self): self._configure_metadata_proxy(enableflag=False) def test_router_id_specified_in_conf(self): self.conf.set_override('router_id', '1234') self._configure_metadata_proxy() def _test_process_routers_update_rpc_timeout(self, ext_net_call=False, ext_net_call_failed=False): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.fullsync = False agent._process_router_if_compatible = mock.Mock() if ext_net_call_failed: agent._process_router_if_compatible.side_effect = ( oslo_messaging.MessagingTimeout) agent._queue = mock.Mock() agent._resync_router = mock.Mock() update = mock.Mock() update.router = None agent._queue.each_update_to_next_router.side_effect = [ [(None, update)]] agent._process_router_update() self.assertFalse(agent.fullsync) self.assertEqual(ext_net_call, agent._process_router_if_compatible.called) agent._resync_router.assert_called_with(update) def test_process_routers_update_rpc_timeout_on_get_routers(self): self.plugin_api.get_routers.side_effect = ( oslo_messaging.MessagingTimeout) self._test_process_routers_update_rpc_timeout() def test_process_routers_update_resyncs_failed_router(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) # Attempting to configure the router will fail agent._process_router_if_compatible = mock.MagicMock() agent._process_router_if_compatible.side_effect = RuntimeError() # Queue an update from a full sync update = router_processing_queue.RouterUpdate( 42, router_processing_queue.PRIORITY_SYNC_ROUTERS_TASK, router=mock.Mock(), timestamp=timeutils.utcnow()) agent._queue.add(update) agent._process_router_update() # The update contained the router object, get_routers won't be called self.assertFalse(agent.plugin_rpc.get_routers.called) # The update failed, assert that get_routers was called agent._process_router_update() self.assertTrue(agent.plugin_rpc.get_routers.called) def test_process_routers_update_rpc_timeout_on_get_ext_net(self): self._test_process_routers_update_rpc_timeout(ext_net_call=True, ext_net_call_failed=True) def _test_process_routers_update_router_deleted(self, error=False): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent._queue = mock.Mock() update = mock.Mock() update.router = None update.action = 1 # ROUTER_DELETED router_info = mock.MagicMock() agent.router_info[update.id] = router_info router_processor = mock.Mock() agent._queue.each_update_to_next_router.side_effect = [ [(router_processor, update)]] agent._resync_router = mock.Mock() if error: agent._safe_router_removed = mock.Mock() agent._safe_router_removed.return_value = False agent._process_router_update() if error: self.assertFalse(router_processor.fetched_and_processed.called) agent._resync_router.assert_called_with(update) else: router_info.delete.assert_called_once_with(agent) self.assertFalse(agent.router_info) self.assertFalse(agent._resync_router.called) router_processor.fetched_and_processed.assert_called_once_with( update.timestamp) def test_process_routers_update_router_deleted_success(self): self._test_process_routers_update_router_deleted() def test_process_routers_update_router_deleted_error(self): self._test_process_routers_update_router_deleted(True) def test_process_router_if_compatible_with_no_ext_net_in_conf(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.plugin_api.get_external_network_id.return_value = 'aaa' router = {'id': _uuid(), 'routes': [], 'admin_state_up': True, 'external_gateway_info': {'network_id': 'aaa'}} agent._process_router_if_compatible(router) self.assertIn(router['id'], agent.router_info) self.plugin_api.get_external_network_id.assert_called_with( agent.context) def test_process_router_if_compatible_with_cached_ext_net(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.plugin_api.get_external_network_id.return_value = 'aaa' agent.target_ex_net_id = 'aaa' router = {'id': _uuid(), 'routes': [], 'admin_state_up': True, 'external_gateway_info': {'network_id': 'aaa'}} agent._process_router_if_compatible(router) self.assertIn(router['id'], agent.router_info) self.assertFalse(self.plugin_api.get_external_network_id.called) def test_process_router_if_compatible_with_stale_cached_ext_net(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.plugin_api.get_external_network_id.return_value = 'aaa' agent.target_ex_net_id = 'bbb' router = {'id': _uuid(), 'routes': [], 'admin_state_up': True, 'external_gateway_info': {'network_id': 'aaa'}} agent._process_router_if_compatible(router) self.assertIn(router['id'], agent.router_info) self.plugin_api.get_external_network_id.assert_called_with( agent.context) def test_process_router_if_compatible_w_no_ext_net_and_2_net_plugin(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = {'id': _uuid(), 'routes': [], 'admin_state_up': True, 'external_gateway_info': {'network_id': 'aaa'}} agent.router_info = {} self.plugin_api.get_external_network_id.side_effect = ( n_exc.TooManyExternalNetworks()) self.assertRaises(n_exc.TooManyExternalNetworks, agent._process_router_if_compatible, router) self.assertNotIn(router['id'], agent.router_info) def test_process_router_if_compatible_with_ext_net_in_conf(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.plugin_api.get_external_network_id.return_value = 'aaa' router = {'id': _uuid(), 'routes': [], 'admin_state_up': True, 'external_gateway_info': {'network_id': 'bbb'}} agent.router_info = {} self.conf.set_override('gateway_external_network_id', 'aaa') self.assertRaises(n_exc.RouterNotCompatibleWithAgent, agent._process_router_if_compatible, router) self.assertNotIn(router['id'], agent.router_info) def test_process_router_if_compatible_with_no_bridge_no_ext_net(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.plugin_api.get_external_network_id.return_value = 'aaa' router = {'id': _uuid(), 'routes': [], 'admin_state_up': True, 'external_gateway_info': {'network_id': 'aaa'}} agent.router_info = {} self.conf.set_override('external_network_bridge', '') agent._process_router_if_compatible(router) self.assertIn(router['id'], agent.router_info) def test_nonexistent_interface_driver(self): self.conf.set_override('interface_driver', None) self.assertRaises(SystemExit, l3_agent.L3NATAgent, HOSTNAME, self.conf) self.conf.set_override('interface_driver', 'wrong.driver') self.assertRaises(SystemExit, l3_agent.L3NATAgent, HOSTNAME, self.conf) @mock.patch.object(namespaces.RouterNamespace, 'delete') @mock.patch.object(dvr_snat_ns.SnatNamespace, 'delete') def _cleanup_namespace_test(self, stale_namespace_list, router_list, other_namespaces, mock_snat_ns, mock_router_ns): good_namespace_list = [namespaces.NS_PREFIX + r['id'] for r in router_list] good_namespace_list += [dvr_snat_ns.SNAT_NS_PREFIX + r['id'] for r in router_list] self.mock_ip.get_namespaces.return_value = (stale_namespace_list + good_namespace_list + other_namespaces) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.assertTrue(agent.namespaces_manager._clean_stale) pm = self.external_process.return_value pm.reset_mock() with agent.namespaces_manager as ns_manager: for r in router_list: ns_manager.keep_router(r['id']) qrouters = [n for n in stale_namespace_list if n.startswith(namespaces.NS_PREFIX)] self.assertEqual(len(qrouters), mock_router_ns.call_count) self.assertEqual( len(stale_namespace_list) - len(qrouters), mock_snat_ns.call_count) self.assertFalse(agent.namespaces_manager._clean_stale) def test_cleanup_namespace(self): self.conf.set_override('router_id', None) stale_namespaces = [namespaces.NS_PREFIX + 'foo', namespaces.NS_PREFIX + 'bar', dvr_snat_ns.SNAT_NS_PREFIX + 'foo'] other_namespaces = ['unknown'] self._cleanup_namespace_test(stale_namespaces, [], other_namespaces) def test_cleanup_namespace_with_registered_router_ids(self): self.conf.set_override('router_id', None) stale_namespaces = [namespaces.NS_PREFIX + 'cccc', namespaces.NS_PREFIX + 'eeeee', dvr_snat_ns.SNAT_NS_PREFIX + 'fffff'] router_list = [{'id': 'foo', 'distributed': False}, {'id': 'aaaa', 'distributed': False}] other_namespaces = ['qdhcp-aabbcc', 'unknown'] self._cleanup_namespace_test(stale_namespaces, router_list, other_namespaces) def test_cleanup_namespace_with_conf_router_id(self): self.conf.set_override('router_id', 'bbbbb') stale_namespaces = [namespaces.NS_PREFIX + 'cccc', namespaces.NS_PREFIX + 'eeeee', namespaces.NS_PREFIX + self.conf.router_id] router_list = [{'id': 'foo', 'distributed': False}, {'id': 'aaaa', 'distributed': False}] other_namespaces = ['qdhcp-aabbcc', 'unknown'] self._cleanup_namespace_test(stale_namespaces, router_list, other_namespaces) def test_create_dvr_gateway(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data() ri = dvr_router.DvrEdgeRouter(agent, HOSTNAME, router['id'], router, **self.ri_kwargs) port_id = _uuid() subnet_id = _uuid() dvr_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': subnet_id}], 'subnets': [{'id': subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}], 'id': port_id, 'network_id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef'} interface_name = ri._get_snat_int_device_name(port_id) self.device_exists.return_value = False with mock.patch.object(ri, 'get_snat_interfaces') as get_interfaces: get_interfaces.return_value = self.snat_ports ri._create_dvr_gateway(dvr_gw_port, interface_name) # check 2 internal ports are plugged # check 1 ext-gw-port is plugged self.assertEqual(3, self.mock_driver.plug.call_count) self.assertEqual(3, self.mock_driver.init_router_port.call_count) def test_process_address_scope(self): router = l3_test_common.prepare_router_data() router['distributed'] = True router['gw_port_host'] = HOSTNAME agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = dvr_router.DvrEdgeRouter(agent, HOSTNAME, router['id'], router, **self.ri_kwargs) ri.get_ex_gw_port = mock.Mock(return_value=None) # Make sure the code doesn't crash if ri.snat_iptables_manager is None. ri.process_address_scope() with mock.patch.object(ri, '_add_address_scope_mark') as mocked_func: ri.snat_iptables_manager = iptables_manager.IptablesManager( namespace=mock.ANY, use_ipv6=False) ri.snat_iptables_manager.defer_apply_off = mock.Mock() ri.process_address_scope() self.assertEqual(2, mocked_func.call_count) def test_get_service_plugin_list(self): service_plugins = [p_const.L3_ROUTER_NAT] self.plugin_api.get_service_plugin_list.return_value = service_plugins agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.assertEqual(service_plugins, agent.neutron_service_plugins) self.assertTrue(self.plugin_api.get_service_plugin_list.called) def test_get_service_plugin_list_failed(self): raise_rpc = oslo_messaging.RemoteError() self.plugin_api.get_service_plugin_list.side_effect = raise_rpc agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.assertIsNone(agent.neutron_service_plugins) self.assertTrue(self.plugin_api.get_service_plugin_list.called) def test_get_service_plugin_list_retried(self): raise_timeout = oslo_messaging.MessagingTimeout() # Raise a timeout the first 2 times it calls # get_service_plugin_list then return a empty tuple self.plugin_api.get_service_plugin_list.side_effect = ( raise_timeout, tuple() ) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.assertEqual(tuple(), agent.neutron_service_plugins) def test_external_gateway_removed_ext_gw_port_no_fip_ns(self): self.conf.set_override('state_path', '/tmp') agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.conf.agent_mode = 'dvr_snat' router = l3_test_common.prepare_router_data(num_internal_ports=2) router['gw_port_host'] = HOSTNAME self.mock_driver.unplug.reset_mock() external_net_id = router['gw_port']['network_id'] ri = dvr_router.DvrEdgeRouter( agent, HOSTNAME, router['id'], router, **self.ri_kwargs) ri.remove_floating_ip = mock.Mock() agent._fetch_external_net_id = mock.Mock(return_value=external_net_id) ri.ex_gw_port = ri.router['gw_port'] del ri.router['gw_port'] ri.fip_ns = None nat = ri.iptables_manager.ipv4['nat'] nat.clear_rules_by_tag = mock.Mock() nat.add_rule = mock.Mock() ri.snat_namespace = mock.Mock() ri.external_gateway_removed( ri.ex_gw_port, ri.get_external_device_name(ri.ex_gw_port['id'])) self.assertFalse(ri.remove_floating_ip.called) def test_spawn_radvd(self): router = l3_test_common.prepare_router_data(ip_version=6) conffile = '/fake/radvd.conf' pidfile = '/fake/radvd.pid' agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) # we don't want the whole process manager to be mocked to be # able to catch execute() calls self.external_process_p.stop() self.ip_cls_p.stop() get_conf_file_name = 'neutron.agent.linux.utils.get_conf_file_name' get_pid_file_name = ('neutron.agent.linux.external_process.' 'ProcessManager.get_pid_file_name') utils_execute = 'neutron.agent.common.utils.execute' mock.patch(get_conf_file_name).start().return_value = conffile mock.patch(get_pid_file_name).start().return_value = pidfile execute = mock.patch(utils_execute).start() radvd = ra.DaemonMonitor( router['id'], namespaces.RouterNamespace._get_ns_name(router['id']), agent.process_monitor, l3_test_common.FakeDev, self.conf) radvd.enable(router['_interfaces']) cmd = execute.call_args[0][0] self.assertIn('radvd', cmd) _join = lambda *args: ' '.join(args) cmd = _join(*cmd) self.assertIn(_join('-C', conffile), cmd) self.assertIn(_join('-p', pidfile), cmd) self.assertIn(_join('-m', 'syslog'), cmd) def test_generate_radvd_mtu_conf(self): router = l3_test_common.prepare_router_data() ipv6_subnet_modes = [{'ra_mode': l3_constants.IPV6_SLAAC, 'address_mode': l3_constants.IPV6_SLAAC}] network_mtu = '1446' ri = self._process_router_ipv6_subnet_added(router, ipv6_subnet_modes, None, network_mtu) expected = "AdvLinkMTU 1446" ri.agent_conf.set_override('advertise_mtu', False) ri.radvd._generate_radvd_conf(router[l3_constants.INTERFACE_KEY]) self.assertNotIn(expected, self.utils_replace_file.call_args[0][1]) # Verify that MTU is advertised when advertise_mtu is True ri.agent_conf.set_override('advertise_mtu', True) ri.radvd._generate_radvd_conf(router[l3_constants.INTERFACE_KEY]) self.assertIn(expected, self.utils_replace_file.call_args[0][1]) def test_generate_radvd_conf_other_and_managed_flag(self): # expected = {ra_mode: (AdvOtherConfigFlag, AdvManagedFlag), ...} expected = {l3_constants.IPV6_SLAAC: (False, False), l3_constants.DHCPV6_STATELESS: (True, False), l3_constants.DHCPV6_STATEFUL: (False, True)} modes = [l3_constants.IPV6_SLAAC, l3_constants.DHCPV6_STATELESS, l3_constants.DHCPV6_STATEFUL] mode_combos = list(iter_chain(*[[list(combo) for combo in iter_combinations(modes, i)] for i in range(1, len(modes) + 1)])) for mode_list in mode_combos: ipv6_subnet_modes = [{'ra_mode': mode, 'address_mode': mode} for mode in mode_list] router = l3_test_common.prepare_router_data() ri = self._process_router_ipv6_subnet_added(router, ipv6_subnet_modes) ri.radvd._generate_radvd_conf(router[l3_constants.INTERFACE_KEY]) def assertFlag(flag): return (self.assertIn if flag else self.assertNotIn) other_flag, managed_flag = ( any(expected[mode][0] for mode in mode_list), any(expected[mode][1] for mode in mode_list)) assertFlag(other_flag)('AdvOtherConfigFlag on;', self.utils_replace_file.call_args[0][1]) assertFlag(managed_flag)('AdvManagedFlag on;', self.utils_replace_file.call_args[0][1]) def test_generate_radvd_intervals(self): self.conf.set_override('min_rtr_adv_interval', 22) self.conf.set_override('max_rtr_adv_interval', 66) router = l3_test_common.prepare_router_data() ipv6_subnet_modes = [{'ra_mode': l3_constants.IPV6_SLAAC, 'address_mode': l3_constants.IPV6_SLAAC}] ri = self._process_router_ipv6_subnet_added(router, ipv6_subnet_modes) ri.radvd._generate_radvd_conf(router[l3_constants.INTERFACE_KEY]) self.assertIn("MinRtrAdvInterval 22", self.utils_replace_file.call_args[0][1]) self.assertIn("MaxRtrAdvInterval 66", self.utils_replace_file.call_args[0][1]) def test_generate_radvd_rdnss_conf(self): router = l3_test_common.prepare_router_data() ipv6_subnet_modes = [{'ra_mode': l3_constants.IPV6_SLAAC, 'address_mode': l3_constants.IPV6_SLAAC}] dns_list = ['fd01:1::100', 'fd01:1::200', 'fd01::300', 'fd01::400'] ri = self._process_router_ipv6_subnet_added(router, ipv6_subnet_modes, dns_nameservers=dns_list) ri.radvd._generate_radvd_conf(router[l3_constants.INTERFACE_KEY]) # Verify that radvd configuration file includes RDNSS entries expected = "RDNSS " for dns in dns_list[0:ra.MAX_RDNSS_ENTRIES]: expected += "%s " % dns self.assertIn(expected, self.utils_replace_file.call_args[0][1]) def _pd_expected_call_external_process(self, requestor, ri, enable=True): expected_calls = [] if enable: expected_calls.append(mock.call(uuid=requestor, service='dibbler', default_cmd_callback=mock.ANY, namespace=ri.ns_name, conf=mock.ANY, pid_file=mock.ANY)) expected_calls.append(mock.call().enable(reload_cfg=False)) else: expected_calls.append(mock.call(uuid=requestor, service='dibbler', namespace=ri.ns_name, conf=mock.ANY, pid_file=mock.ANY)) expected_calls.append(mock.call().disable( get_stop_command=mock.ANY)) return expected_calls def _pd_setup_agent_router(self): router = l3_test_common.prepare_router_data() ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.external_gateway_added = mock.Mock() ri.process(agent) agent._router_added(router['id'], router) # Make sure radvd monitor is created if not ri.radvd: ri.radvd = ra.DaemonMonitor(router['id'], ri.ns_name, agent.process_monitor, ri.get_internal_device_name, self.conf) return agent, router, ri def _pd_remove_gw_interface(self, intfs, agent, router, ri): expected_pd_update = {} expected_calls = [] for intf in intfs: requestor_id = self._pd_get_requestor_id(intf, router, ri) expected_calls += (self._pd_expected_call_external_process( requestor_id, ri, False)) for subnet in intf['subnets']: expected_pd_update[subnet['id']] = ( l3_constants.PROVISIONAL_IPV6_PD_PREFIX) # Implement the prefix update notifier # Keep track of the updated prefix self.pd_update = {} def pd_notifier(context, prefix_update): self.pd_update = prefix_update for subnet_id, prefix in six.iteritems(prefix_update): for intf in intfs: for subnet in intf['subnets']: if subnet['id'] == subnet_id: # Update the prefix subnet['cidr'] = prefix break # Remove the gateway interface agent.pd.notifier = pd_notifier agent.pd.remove_gw_interface(router['id']) self._pd_assert_dibbler_calls(expected_calls, self.external_process.mock_calls[-len(expected_calls):]) self.assertEqual(expected_pd_update, self.pd_update) def _pd_remove_interfaces(self, intfs, agent, router, ri): expected_pd_update = [] expected_calls = [] for intf in intfs: # Remove the router interface router[l3_constants.INTERFACE_KEY].remove(intf) requestor_id = self._pd_get_requestor_id(intf, router, ri) expected_calls += (self._pd_expected_call_external_process( requestor_id, ri, False)) for subnet in intf['subnets']: expected_pd_update += [{subnet['id']: l3_constants.PROVISIONAL_IPV6_PD_PREFIX}] # Implement the prefix update notifier # Keep track of the updated prefix self.pd_update = [] def pd_notifier(context, prefix_update): self.pd_update.append(prefix_update) for intf in intfs: for subnet in intf['subnets']: if subnet['id'] in prefix_update: # Update the prefix subnet['cidr'] = prefix_update[subnet['id']] # Process the router for removed interfaces agent.pd.notifier = pd_notifier ri.process(agent) # The number of external process calls takes radvd into account. # This is because there is no ipv6 interface any more after removing # the interfaces, and radvd will be killed because of that self._pd_assert_dibbler_calls(expected_calls, self.external_process.mock_calls[-len(expected_calls) - 2:]) self._pd_assert_radvd_calls(ri, False) self.assertEqual(expected_pd_update, self.pd_update) def _pd_get_requestor_id(self, intf, router, ri): ifname = ri.get_internal_device_name(intf['id']) for subnet in intf['subnets']: return dibbler.PDDibbler(router['id'], subnet['id'], ifname).requestor_id def _pd_assert_dibbler_calls(self, expected, actual): '''Check the external process calls for dibbler are expected in the case of multiple pd-enabled router ports, the exact sequence of these calls are not deterministic. It's known, though, that each external_process call is followed with either an enable() or disable() ''' num_ext_calls = len(expected) // 2 expected_ext_calls = [] actual_ext_calls = [] expected_action_calls = [] actual_action_calls = [] for c in range(num_ext_calls): expected_ext_calls.append(expected[c * 2]) actual_ext_calls.append(actual[c * 2]) expected_action_calls.append(expected[c * 2 + 1]) actual_action_calls.append(actual[c * 2 + 1]) self.assertEqual(expected_action_calls, actual_action_calls) for exp in expected_ext_calls: for act in actual_ext_calls: if exp == act: break else: msg = "Unexpected dibbler external process call." self.fail(msg) def _pd_assert_radvd_calls(self, ri, enable=True): exp_calls = self._radvd_expected_call_external_process(ri, enable) self.assertEqual(exp_calls, self.external_process.mock_calls[-len(exp_calls):]) def _pd_get_prefixes(self, agent, router, ri, existing_intfs, new_intfs, mock_get_prefix): # First generate the prefixes that will be used for each interface prefixes = {} expected_pd_update = {} expected_calls = [] for ifno, intf in enumerate(existing_intfs + new_intfs): requestor_id = self._pd_get_requestor_id(intf, router, ri) prefixes[requestor_id] = "2001:cafe:cafe:%d::/64" % ifno if intf in new_intfs: subnet_id = (intf['subnets'][0]['id'] if intf['subnets'] else None) expected_pd_update[subnet_id] = prefixes[requestor_id] expected_calls += ( self._pd_expected_call_external_process(requestor_id, ri)) # Implement the prefix update notifier # Keep track of the updated prefix self.pd_update = {} def pd_notifier(context, prefix_update): self.pd_update = prefix_update for subnet_id, prefix in six.iteritems(prefix_update): for intf in new_intfs: for subnet in intf['subnets']: if subnet['id'] == subnet_id: # Update the prefix subnet['cidr'] = prefix break # Start the dibbler client agent.pd.notifier = pd_notifier agent.pd.process_prefix_update() # Get the prefix and check that the neutron server is notified def get_prefix(pdo): key = '%s:%s:%s' % (pdo.router_id, pdo.subnet_id, pdo.ri_ifname) return prefixes[key] mock_get_prefix.side_effect = get_prefix agent.pd.process_prefix_update() # Make sure that the updated prefixes are expected self._pd_assert_dibbler_calls(expected_calls, self.external_process.mock_calls[-len(expected_calls):]) self.assertEqual(expected_pd_update, self.pd_update) def _pd_add_gw_interface(self, agent, router, ri): gw_ifname = ri.get_external_device_name(router['gw_port']['id']) agent.pd.add_gw_interface(router['id'], gw_ifname) @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True) @mock.patch.object(dibbler.os, 'getpid', return_value=1234) @mock.patch.object(pd.PrefixDelegation, '_is_lla_active', return_value=True) @mock.patch.object(dibbler.os, 'chmod') @mock.patch.object(dibbler.shutil, 'rmtree') @mock.patch.object(pd.PrefixDelegation, '_get_sync_data') def test_pd_add_remove_subnet(self, mock1, mock2, mock3, mock4, mock_getpid, mock_get_prefix): '''Add and remove one pd-enabled subnet Remove the interface by deleting it from the router ''' # Initial setup agent, router, ri = self._pd_setup_agent_router() # Create one pd-enabled subnet and add router interface intfs = l3_test_common.router_append_pd_enabled_subnet(router) ri.process(agent) # No client should be started since there is no gateway port self.assertFalse(self.external_process.call_count) self.assertFalse(mock_get_prefix.call_count) # Add the gateway interface self._pd_add_gw_interface(agent, router, ri) # Get one prefix self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix) # Update the router with the new prefix ri.process(agent) # Check that radvd is started and the router port is configured # with the new prefix self._pd_assert_radvd_calls(ri) # Now remove the interface self._pd_remove_interfaces(intfs, agent, router, ri) @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True) @mock.patch.object(dibbler.os, 'getpid', return_value=1234) @mock.patch.object(pd.PrefixDelegation, '_is_lla_active', return_value=True) @mock.patch.object(dibbler.os, 'chmod') @mock.patch.object(dibbler.shutil, 'rmtree') @mock.patch.object(pd.PrefixDelegation, '_get_sync_data') def test_pd_remove_gateway(self, mock1, mock2, mock3, mock4, mock_getpid, mock_get_prefix): '''Add one pd-enabled subnet and remove the gateway port Remove the gateway port and check the prefix is removed ''' # Initial setup agent, router, ri = self._pd_setup_agent_router() # Create one pd-enabled subnet and add router interface intfs = l3_test_common.router_append_pd_enabled_subnet(router) ri.process(agent) # Add the gateway interface self._pd_add_gw_interface(agent, router, ri) # Get one prefix self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix) # Update the router with the new prefix ri.process(agent) # Check that radvd is started self._pd_assert_radvd_calls(ri) # Now remove the gw interface self._pd_remove_gw_interface(intfs, agent, router, ri) # There will be a router update ri.process(agent) @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True) @mock.patch.object(dibbler.os, 'getpid', return_value=1234) @mock.patch.object(pd.PrefixDelegation, '_is_lla_active', return_value=True) @mock.patch.object(dibbler.os, 'chmod') @mock.patch.object(dibbler.shutil, 'rmtree') @mock.patch.object(pd.PrefixDelegation, '_get_sync_data') def test_pd_add_remove_2_subnets(self, mock1, mock2, mock3, mock4, mock_getpid, mock_get_prefix): '''Add and remove two pd-enabled subnets Remove the interfaces by deleting them from the router ''' # Initial setup agent, router, ri = self._pd_setup_agent_router() # Create 2 pd-enabled subnets and add router interfaces intfs = l3_test_common.router_append_pd_enabled_subnet(router, count=2) ri.process(agent) # No client should be started self.assertFalse(self.external_process.call_count) self.assertFalse(mock_get_prefix.call_count) # Add the gateway interface self._pd_add_gw_interface(agent, router, ri) # Get prefixes self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix) # Update the router with the new prefix ri.process(agent) # Check that radvd is started and the router port is configured # with the new prefix self._pd_assert_radvd_calls(ri) # Now remove the interface self._pd_remove_interfaces(intfs, agent, router, ri) @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True) @mock.patch.object(dibbler.os, 'getpid', return_value=1234) @mock.patch.object(pd.PrefixDelegation, '_is_lla_active', return_value=True) @mock.patch.object(dibbler.os, 'chmod') @mock.patch.object(dibbler.shutil, 'rmtree') @mock.patch.object(pd.PrefixDelegation, '_get_sync_data') def test_pd_remove_gateway_2_subnets(self, mock1, mock2, mock3, mock4, mock_getpid, mock_get_prefix): '''Add one pd-enabled subnet, followed by adding another one Remove the gateway port and check the prefix is removed ''' # Initial setup agent, router, ri = self._pd_setup_agent_router() # Add the gateway interface self._pd_add_gw_interface(agent, router, ri) # Create 1 pd-enabled subnet and add router interface intfs = l3_test_common.router_append_pd_enabled_subnet(router, count=1) ri.process(agent) # Get prefixes self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix) # Update the router with the new prefix ri.process(agent) # Check that radvd is started self._pd_assert_radvd_calls(ri) # Now add another interface # Create one pd-enabled subnet and add router interface intfs1 = l3_test_common.router_append_pd_enabled_subnet(router, count=1) ri.process(agent) # Get prefixes self._pd_get_prefixes(agent, router, ri, intfs, intfs1, mock_get_prefix) # Update the router with the new prefix ri.process(agent) # Check that radvd is notified for the new prefix self._pd_assert_radvd_calls(ri) # Now remove the gw interface self._pd_remove_gw_interface(intfs + intfs1, agent, router, ri) ri.process(agent) neutron-8.4.0/neutron/tests/unit/agent/l3/test_namespace_manager.py0000664000567000056710000001120413044372736026653 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import uuidutils from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import namespace_manager from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib from neutron.tests import base _uuid = uuidutils.generate_uuid class NamespaceManagerTestCaseFramework(base.BaseTestCase): def _create_namespace_manager(self): self.agent_conf = mock.Mock() self.driver = mock.Mock() return namespace_manager.NamespaceManager(self.agent_conf, self.driver) class TestNamespaceManager(NamespaceManagerTestCaseFramework): def setUp(self): super(TestNamespaceManager, self).setUp() self.ns_manager = self._create_namespace_manager() def test_get_prefix_and_id(self): router_id = _uuid() ns_prefix, ns_id = self.ns_manager.get_prefix_and_id( namespaces.NS_PREFIX + router_id) self.assertEqual(namespaces.NS_PREFIX, ns_prefix) self.assertEqual(router_id, ns_id) ns_prefix, ns_id = self.ns_manager.get_prefix_and_id( dvr_snat_ns.SNAT_NS_PREFIX + router_id) self.assertEqual(dvr_snat_ns.SNAT_NS_PREFIX, ns_prefix) self.assertEqual(router_id, ns_id) ns_name = 'dhcp-' + router_id self.assertIsNone(self.ns_manager.get_prefix_and_id(ns_name)) def test_is_managed(self): router_id = _uuid() router_ns_name = namespaces.NS_PREFIX + router_id self.assertTrue(self.ns_manager.is_managed(router_ns_name)) router_ns_name = dvr_snat_ns.SNAT_NS_PREFIX + router_id self.assertTrue(self.ns_manager.is_managed(router_ns_name)) ext_net_id = _uuid() router_ns_name = dvr_fip_ns.FIP_NS_PREFIX + ext_net_id self.assertTrue(self.ns_manager.is_managed(router_ns_name)) self.assertFalse(self.ns_manager.is_managed('dhcp-' + router_id)) def test_list_all(self): ns_names = [namespaces.NS_PREFIX + _uuid(), dvr_snat_ns.SNAT_NS_PREFIX + _uuid(), dvr_fip_ns.FIP_NS_PREFIX + _uuid(), 'dhcp-' + _uuid(), ] # Test the normal path with mock.patch.object(ip_lib.IPWrapper, 'get_namespaces', return_value=ns_names): retrieved_ns_names = self.ns_manager.list_all() self.assertEqual(len(ns_names) - 1, len(retrieved_ns_names)) for i in range(len(retrieved_ns_names)): self.assertIn(ns_names[i], retrieved_ns_names) self.assertNotIn(ns_names[-1], retrieved_ns_names) # Test path where IPWrapper raises exception with mock.patch.object(ip_lib.IPWrapper, 'get_namespaces', side_effect=RuntimeError): retrieved_ns_names = self.ns_manager.list_all() self.assertFalse(retrieved_ns_names) def test_ensure_snat_cleanup(self): router_id = _uuid() with mock.patch.object(self.ns_manager, '_cleanup') as mock_cleanup: self.ns_manager.ensure_snat_cleanup(router_id) mock_cleanup.assert_called_once_with(dvr_snat_ns.SNAT_NS_PREFIX, router_id) def test_ensure_router_cleanup(self): router_id = _uuid() ns_names = [namespaces.NS_PREFIX + _uuid() for _ in range(5)] ns_names += [dvr_snat_ns.SNAT_NS_PREFIX + _uuid() for _ in range(5)] ns_names += [namespaces.NS_PREFIX + router_id, dvr_snat_ns.SNAT_NS_PREFIX + router_id] with mock.patch.object(ip_lib.IPWrapper, 'get_namespaces', return_value=ns_names), \ mock.patch.object(self.ns_manager, '_cleanup') as mock_cleanup: self.ns_manager.ensure_router_cleanup(router_id) expected = [mock.call(namespaces.NS_PREFIX, router_id), mock.call(dvr_snat_ns.SNAT_NS_PREFIX, router_id)] mock_cleanup.assert_has_calls(expected, any_order=True) self.assertEqual(2, mock_cleanup.call_count) neutron-8.4.0/neutron/tests/unit/agent/l3/test_ha_router.py0000664000567000056710000000573213044372760025223 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import uuidutils from neutron.agent.l3 import ha_router from neutron.tests import base _uuid = uuidutils.generate_uuid class TestBasicRouterOperations(base.BaseTestCase): def setUp(self): super(TestBasicRouterOperations, self).setUp() def _create_router(self, router=None, **kwargs): if not router: router = mock.MagicMock() self.agent_conf = mock.Mock() self.router_id = _uuid() return ha_router.HaRouter(mock.sentinel.enqueue_state, self.router_id, router, self.agent_conf, mock.sentinel.driver, **kwargs) def test_get_router_cidrs_returns_ha_cidrs(self): ri = self._create_router() device = mock.MagicMock() device.name.return_value = 'eth2' addresses = ['15.1.2.2/24', '15.1.2.3/32'] ri._get_cidrs_from_keepalived = mock.MagicMock(return_value=addresses) self.assertEqual(set(addresses), ri.get_router_cidrs(device)) def test__add_default_gw_virtual_route(self): ri = self._create_router() mock_instance = mock.Mock() mock_instance.virtual_routes.gateway_routes = [] ri._get_keepalived_instance = mock.Mock(return_value=mock_instance) subnets = [{'id': _uuid(), 'cidr': '20.0.0.0/24', 'gateway_ip': None}] ex_gw_port = {'fixed_ips': [], 'subnets': subnets, 'extra_subnets': [], 'id': _uuid(), 'network_id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef'} # Make sure no exceptional code ri._add_default_gw_virtual_route(ex_gw_port, 'qg-abc') self.assertEqual(0, len(mock_instance.virtual_routes.gateway_routes)) subnets.append({'id': _uuid(), 'cidr': '30.0.0.0/24', 'gateway_ip': '30.0.0.1'}) ri._add_default_gw_virtual_route(ex_gw_port, 'qg-abc') self.assertEqual(1, len(mock_instance.virtual_routes.gateway_routes)) subnets[1]['gateway_ip'] = None ri._add_default_gw_virtual_route(ex_gw_port, 'qg-abc') self.assertEqual(0, len(mock_instance.virtual_routes.gateway_routes)) neutron-8.4.0/neutron/tests/unit/agent/l3/test_legacy_router.py0000664000567000056710000000632113044372760026072 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import uuidutils from neutron.agent.l3 import legacy_router from neutron.agent.linux import ip_lib from neutron.common import constants as l3_constants from neutron.tests import base _uuid = uuidutils.generate_uuid class BasicRouterTestCaseFramework(base.BaseTestCase): def _create_router(self, router=None, **kwargs): if not router: router = mock.MagicMock() self.agent_conf = mock.Mock() self.driver = mock.Mock() self.router_id = _uuid() return legacy_router.LegacyRouter(self.router_id, router, self.agent_conf, self.driver, **kwargs) class TestBasicRouterOperations(BasicRouterTestCaseFramework): def test_remove_floating_ip(self): ri = self._create_router(mock.MagicMock()) device = mock.Mock() cidr = '15.1.2.3/32' ri.remove_floating_ip(device, cidr) device.delete_addr_and_conntrack_state.assert_called_once_with(cidr) def test_remove_external_gateway_ip(self): ri = self._create_router(mock.MagicMock()) device = mock.Mock() cidr = '172.16.0.0/24' ri.remove_external_gateway_ip(device, cidr) device.delete_addr_and_conntrack_state.assert_called_once_with(cidr) @mock.patch.object(ip_lib, 'send_ip_addr_adv_notif') class TestAddFloatingIpWithMockGarp(BasicRouterTestCaseFramework): def test_add_floating_ip(self, send_ip_addr_adv_notif): ri = self._create_router() ri._add_fip_addr_to_device = mock.Mock(return_value=True) ip = '15.1.2.3' result = ri.add_floating_ip({'floating_ip_address': ip}, mock.sentinel.interface_name, mock.sentinel.device) ip_lib.send_ip_addr_adv_notif.assert_called_once_with( ri.ns_name, mock.sentinel.interface_name, ip, self.agent_conf) self.assertEqual(l3_constants.FLOATINGIP_STATUS_ACTIVE, result) def test_add_floating_ip_error(self, send_ip_addr_adv_notif): ri = self._create_router() ri._add_fip_addr_to_device = mock.Mock(return_value=False) result = ri.add_floating_ip({'floating_ip_address': '15.1.2.3'}, mock.sentinel.interface_name, mock.sentinel.device) self.assertFalse(ip_lib.send_ip_addr_adv_notif.called) self.assertEqual(l3_constants.FLOATINGIP_STATUS_ERROR, result) neutron-8.4.0/neutron/tests/unit/agent/l3/__init__.py0000664000567000056710000000000013044372736023715 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/l3/test_item_allocator.py0000664000567000056710000001134113044372760026222 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.agent.l3 import item_allocator as ia from neutron.tests import base class TestObject(object): def __init__(self, value): super(TestObject, self).__init__() self._value = value def __str__(self): return str(self._value) class TestItemAllocator(base.BaseTestCase): def setUp(self): super(TestItemAllocator, self).setUp() def test__init__(self): test_pool = set(TestObject(s) for s in range(32768, 40000)) with mock.patch.object(ia.ItemAllocator, '_write') as write: a = ia.ItemAllocator('/file', TestObject, test_pool) test_object = a.allocate('test') self.assertIn('test', a.allocations) self.assertIn(test_object, a.allocations.values()) self.assertNotIn(test_object, a.pool) self.assertTrue(write.called) def test__init__readfile(self): test_pool = set(TestObject(s) for s in range(32768, 40000)) with mock.patch.object(ia.ItemAllocator, '_read') as read: read.return_value = ["da873ca2,10\n"] a = ia.ItemAllocator('/file', TestObject, test_pool) self.assertIn('da873ca2', a.remembered) self.assertEqual({}, a.allocations) def test_allocate(self): test_pool = set([TestObject(33000), TestObject(33001)]) a = ia.ItemAllocator('/file', TestObject, test_pool) with mock.patch.object(ia.ItemAllocator, '_write') as write: test_object = a.allocate('test') self.assertIn('test', a.allocations) self.assertIn(test_object, a.allocations.values()) self.assertNotIn(test_object, a.pool) self.assertTrue(write.called) def test_allocate_repeated_call_with_same_key(self): test_pool = set([TestObject(33000), TestObject(33001), TestObject(33002), TestObject(33003), TestObject(33004), TestObject(33005)]) a = ia.ItemAllocator('/file', TestObject, test_pool) with mock.patch.object(ia.ItemAllocator, '_write'): test_object = a.allocate('test') test_object1 = a.allocate('test') test_object2 = a.allocate('test') test_object3 = a.allocate('test1') # same value for same key on repeated calls self.assertEqual(test_object, test_object1) self.assertEqual(test_object1, test_object2) # values for different keys should be diffent self.assertNotEqual(test_object, test_object3) def test_allocate_from_file(self): test_pool = set([TestObject(33000), TestObject(33001)]) with mock.patch.object(ia.ItemAllocator, '_read') as read: read.return_value = ["deadbeef,33000\n"] a = ia.ItemAllocator('/file', TestObject, test_pool) with mock.patch.object(ia.ItemAllocator, '_write') as write: t_obj = a.allocate('deadbeef') self.assertEqual('33000', t_obj._value) self.assertIn('deadbeef', a.allocations) self.assertIn(t_obj, a.allocations.values()) self.assertNotIn(33000, a.pool) self.assertFalse(write.called) def test_allocate_exhausted_pool(self): test_pool = set([TestObject(33000)]) with mock.patch.object(ia.ItemAllocator, '_read') as read: read.return_value = ["deadbeef,33000\n"] a = ia.ItemAllocator('/file', TestObject, test_pool) with mock.patch.object(ia.ItemAllocator, '_write') as write: allocation = a.allocate('abcdef12') self.assertNotIn('deadbeef', a.allocations) self.assertNotIn(allocation, a.pool) self.assertTrue(write.called) def test_release(self): test_pool = set([TestObject(33000), TestObject(33001)]) with mock.patch.object(ia.ItemAllocator, '_write') as write: a = ia.ItemAllocator('/file', TestObject, test_pool) allocation = a.allocate('deadbeef') write.reset_mock() a.release('deadbeef') self.assertNotIn('deadbeef', a.allocations) self.assertIn(allocation, a.pool) self.assertEqual({}, a.allocations) write.assert_called_once_with([]) neutron-8.4.0/neutron/tests/unit/agent/l3/test_fip_rule_priority_allocator.py0000664000567000056710000000445313044372736031043 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.l3 import fip_rule_priority_allocator as frpa from neutron.tests import base class TestFipPriority(base.BaseTestCase): def setUp(self): super(TestFipPriority, self).setUp() def test__init__(self): test_pr = frpa.FipPriority(10) self.assertEqual(10, test_pr.index) def test__repr__(self): test_pr = frpa.FipPriority(20) self.assertEqual("20", str(test_pr)) def test__eq__(self): left_pr = frpa.FipPriority(10) right_pr = frpa.FipPriority(10) other_pr = frpa.FipPriority(20) self.assertEqual(left_pr, right_pr) self.assertNotEqual(left_pr, other_pr) self.assertNotEqual(right_pr, other_pr) def test__hash__(self): left_pr = frpa.FipPriority(10) right_pr = frpa.FipPriority(10) other_pr = frpa.FipPriority(20) self.assertEqual(hash(left_pr), hash(right_pr)) self.assertNotEqual(hash(left_pr), hash(other_pr)) self.assertNotEqual(hash(other_pr), hash(right_pr)) class TestFipRulePriorityAllocator(base.BaseTestCase): def setUp(self): super(TestFipRulePriorityAllocator, self).setUp() self.priority_rule_start = 100 self.priority_rule_end = 200 self.data_store_path = '/data_store_path_test' def test__init__(self): _frpa = frpa.FipRulePriorityAllocator(self.data_store_path, self.priority_rule_start, self.priority_rule_end) self.assertEqual(self.data_store_path, _frpa.state_file) self.assertEqual(frpa.FipPriority, _frpa.ItemClass) self.assertEqual(100, len(_frpa.pool)) neutron-8.4.0/neutron/tests/unit/agent/l3/test_router_info.py0000664000567000056710000004545713044372760025576 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import uuidutils from neutron.agent.common import config as agent_config from neutron.agent.l3 import router_info from neutron.agent.linux import ip_lib from neutron.common import constants as l3_constants from neutron.common import exceptions as n_exc from neutron.tests import base _uuid = uuidutils.generate_uuid class TestRouterInfo(base.BaseTestCase): def setUp(self): super(TestRouterInfo, self).setUp() conf = agent_config.setup_conf() self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper') ip_cls = self.ip_cls_p.start() self.mock_ip = mock.MagicMock() ip_cls.return_value = self.mock_ip self.ri_kwargs = {'agent_conf': conf, 'interface_driver': mock.sentinel.interface_driver} def _check_agent_method_called(self, calls): self.mock_ip.netns.execute.assert_has_calls( [mock.call(call, check_exit_code=False) for call in calls], any_order=True) def test_routing_table_update(self): ri = router_info.RouterInfo(_uuid(), {}, **self.ri_kwargs) ri.router = {} fake_route1 = {'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'} fake_route2 = {'destination': '135.207.111.111/32', 'nexthop': '1.2.3.4'} ri.update_routing_table('replace', fake_route1) expected = [['ip', 'route', 'replace', 'to', '135.207.0.0/16', 'via', '1.2.3.4']] self._check_agent_method_called(expected) ri.update_routing_table('delete', fake_route1) expected = [['ip', 'route', 'delete', 'to', '135.207.0.0/16', 'via', '1.2.3.4']] self._check_agent_method_called(expected) ri.update_routing_table('replace', fake_route2) expected = [['ip', 'route', 'replace', 'to', '135.207.111.111/32', 'via', '1.2.3.4']] self._check_agent_method_called(expected) ri.update_routing_table('delete', fake_route2) expected = [['ip', 'route', 'delete', 'to', '135.207.111.111/32', 'via', '1.2.3.4']] self._check_agent_method_called(expected) def test_update_routing_table(self): # Just verify the correct namespace was used in the call uuid = _uuid() netns = 'qrouter-' + uuid fake_route1 = {'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'} ri = router_info.RouterInfo(uuid, {'id': uuid}, **self.ri_kwargs) ri._update_routing_table = mock.Mock() ri.update_routing_table('replace', fake_route1) ri._update_routing_table.assert_called_once_with('replace', fake_route1, netns) def test_routes_updated(self): ri = router_info.RouterInfo(_uuid(), {}, **self.ri_kwargs) ri.router = {} fake_old_routes = [] fake_new_routes = [{'destination': "110.100.31.0/24", 'nexthop': "10.100.10.30"}, {'destination': "110.100.30.0/24", 'nexthop': "10.100.10.30"}] ri.routes = fake_old_routes ri.router['routes'] = fake_new_routes ri.routes_updated(fake_old_routes, fake_new_routes) expected = [['ip', 'route', 'replace', 'to', '110.100.30.0/24', 'via', '10.100.10.30'], ['ip', 'route', 'replace', 'to', '110.100.31.0/24', 'via', '10.100.10.30']] self._check_agent_method_called(expected) ri.routes = fake_new_routes fake_new_routes = [{'destination': "110.100.30.0/24", 'nexthop': "10.100.10.30"}] ri.router['routes'] = fake_new_routes ri.routes_updated(ri.routes, fake_new_routes) expected = [['ip', 'route', 'delete', 'to', '110.100.31.0/24', 'via', '10.100.10.30']] self._check_agent_method_called(expected) fake_new_routes = [] ri.router['routes'] = fake_new_routes ri.routes_updated(ri.routes, fake_new_routes) expected = [['ip', 'route', 'delete', 'to', '110.100.30.0/24', 'via', '10.100.10.30']] self._check_agent_method_called(expected) def test_add_ports_address_scope_iptables(self): ri = router_info.RouterInfo(_uuid(), {}, **self.ri_kwargs) port = { 'id': _uuid(), 'fixed_ips': [{'ip_address': '172.9.9.9'}], 'address_scopes': {l3_constants.IP_VERSION_4: '1234'} } ipv4_mangle = ri.iptables_manager.ipv4['mangle'] = mock.MagicMock() ri.get_address_scope_mark_mask = mock.Mock(return_value='fake_mark') ri.get_internal_device_name = mock.Mock(return_value='fake_device') ri.rt_tables_manager = mock.MagicMock() ri.process_external_port_address_scope_routing = mock.Mock() ri.process_floating_ip_address_scope_rules = mock.Mock() ri.iptables_manager._apply = mock.Mock() ri.router[l3_constants.INTERFACE_KEY] = [port] ri.process_address_scope() ipv4_mangle.add_rule.assert_called_once_with( 'scope', ri.address_scope_mangle_rule('fake_device', 'fake_mark')) def test_address_scope_mark_ids_handling(self): mark_ids = set(range(router_info.ADDRESS_SCOPE_MARK_ID_MIN, router_info.ADDRESS_SCOPE_MARK_ID_MAX)) ri = router_info.RouterInfo(_uuid(), {}, **self.ri_kwargs) # first mark id is used for the default address scope scope_to_mark_id = {router_info.DEFAULT_ADDRESS_SCOPE: mark_ids.pop()} self.assertEqual(scope_to_mark_id, ri._address_scope_to_mark_id) self.assertEqual(mark_ids, ri.available_mark_ids) # new id should be used for new address scope ri.get_address_scope_mark_mask('new_scope') scope_to_mark_id['new_scope'] = mark_ids.pop() self.assertEqual(scope_to_mark_id, ri._address_scope_to_mark_id) self.assertEqual(mark_ids, ri.available_mark_ids) # new router should have it's own mark ids set new_mark_ids = set(range(router_info.ADDRESS_SCOPE_MARK_ID_MIN, router_info.ADDRESS_SCOPE_MARK_ID_MAX)) new_ri = router_info.RouterInfo(_uuid(), {}, **self.ri_kwargs) new_mark_ids.pop() self.assertEqual(new_mark_ids, new_ri.available_mark_ids) self.assertTrue(ri.available_mark_ids != new_ri.available_mark_ids) def test_process_delete(self): ri = router_info.RouterInfo(_uuid(), {}, **self.ri_kwargs) ri.router = {'id': _uuid()} with mock.patch.object(ri, '_process_internal_ports') as p_i_p,\ mock.patch.object(ri, '_process_external_on_delete') as p_e_o_d: self.mock_ip.netns.exists.return_value = False ri.process_delete(mock.Mock()) self.assertFalse(p_i_p.called) self.assertFalse(p_e_o_d.called) p_i_p.reset_mock() p_e_o_d.reset_mock() self.mock_ip.netns.exists.return_value = True ri.process_delete(mock.Mock()) p_i_p.assert_called_once_with(mock.ANY) p_e_o_d.assert_called_once_with(mock.ANY) class BasicRouterTestCaseFramework(base.BaseTestCase): def _create_router(self, router=None, **kwargs): if not router: router = mock.MagicMock() self.agent_conf = mock.Mock() self.router_id = _uuid() return router_info.RouterInfo(self.router_id, router, self.agent_conf, mock.sentinel.interface_driver, **kwargs) class TestBasicRouterOperations(BasicRouterTestCaseFramework): def test_get_floating_ips(self): router = mock.MagicMock() router.get.return_value = [mock.sentinel.floating_ip] ri = self._create_router(router) fips = ri.get_floating_ips() self.assertEqual([mock.sentinel.floating_ip], fips) def test_process_floating_ip_nat_rules(self): ri = self._create_router() fips = [{'fixed_ip_address': mock.sentinel.ip, 'floating_ip_address': mock.sentinel.fip}] ri.get_floating_ips = mock.Mock(return_value=fips) ri.iptables_manager = mock.MagicMock() ipv4_nat = ri.iptables_manager.ipv4['nat'] ri.floating_forward_rules = mock.Mock( return_value=[(mock.sentinel.chain, mock.sentinel.rule)]) ri.process_floating_ip_nat_rules() # Be sure that the rules are cleared first and apply is called last self.assertEqual(mock.call.clear_rules_by_tag('floating_ip'), ipv4_nat.mock_calls[0]) self.assertEqual(mock.call.apply(), ri.iptables_manager.mock_calls[-1]) # Be sure that add_rule is called somewhere in the middle ipv4_nat.add_rule.assert_called_once_with(mock.sentinel.chain, mock.sentinel.rule, tag='floating_ip') def test_process_floating_ip_nat_rules_removed(self): ri = self._create_router() ri.get_floating_ips = mock.Mock(return_value=[]) ri.iptables_manager = mock.MagicMock() ipv4_nat = ri.iptables_manager.ipv4['nat'] ri.process_floating_ip_nat_rules() # Be sure that the rules are cleared first and apply is called last self.assertEqual(mock.call.clear_rules_by_tag('floating_ip'), ipv4_nat.mock_calls[0]) self.assertEqual(mock.call.apply(), ri.iptables_manager.mock_calls[-1]) # Be sure that add_rule is called somewhere in the middle self.assertFalse(ipv4_nat.add_rule.called) def test_process_floating_ip_address_scope_rules_diff_scopes(self): ri = self._create_router() fips = [{'fixed_ip_address': mock.sentinel.ip, 'floating_ip_address': mock.sentinel.fip, 'fixed_ip_address_scope': 'scope1'}] ri.get_floating_ips = mock.Mock(return_value=fips) ri._get_external_address_scope = mock.Mock(return_value='scope2') ipv4_mangle = ri.iptables_manager.ipv4['mangle'] = mock.MagicMock() ri.floating_mangle_rules = mock.Mock( return_value=[(mock.sentinel.chain1, mock.sentinel.rule1)]) ri.get_external_device_name = mock.Mock() ri.process_floating_ip_address_scope_rules() # Be sure that the rules are cleared first self.assertEqual(mock.call.clear_rules_by_tag('floating_ip'), ipv4_mangle.mock_calls[0]) # Be sure that add_rule is called somewhere in the middle self.assertEqual(1, ipv4_mangle.add_rule.call_count) self.assertEqual(mock.call.add_rule(mock.sentinel.chain1, mock.sentinel.rule1, tag='floating_ip'), ipv4_mangle.mock_calls[1]) def test_process_floating_ip_address_scope_rules_same_scopes(self): ri = self._create_router() fips = [{'fixed_ip_address': mock.sentinel.ip, 'floating_ip_address': mock.sentinel.fip, 'fixed_ip_address_scope': 'scope1'}] ri.get_floating_ips = mock.Mock(return_value=fips) ri._get_external_address_scope = mock.Mock(return_value='scope1') ipv4_mangle = ri.iptables_manager.ipv4['mangle'] = mock.MagicMock() ri.process_floating_ip_address_scope_rules() # Be sure that the rules are cleared first self.assertEqual(mock.call.clear_rules_by_tag('floating_ip'), ipv4_mangle.mock_calls[0]) # Be sure that add_rule is not called somewhere in the middle self.assertFalse(ipv4_mangle.add_rule.called) def test_process_floating_ip_mangle_rules_removed(self): ri = self._create_router() ri.get_floating_ips = mock.Mock(return_value=[]) ipv4_mangle = ri.iptables_manager.ipv4['mangle'] = mock.MagicMock() ri.process_floating_ip_address_scope_rules() # Be sure that the rules are cleared first self.assertEqual(mock.call.clear_rules_by_tag('floating_ip'), ipv4_mangle.mock_calls[0]) # Be sure that add_rule is not called somewhere in the middle self.assertFalse(ipv4_mangle.add_rule.called) def _test_add_fip_addr_to_device_error(self, device): ri = self._create_router() ip = '15.1.2.3' result = ri._add_fip_addr_to_device( {'id': mock.sentinel.id, 'floating_ip_address': ip}, device) device.addr.add.assert_called_with(ip + '/32') return result def test__add_fip_addr_to_device(self): result = self._test_add_fip_addr_to_device_error(mock.Mock()) self.assertTrue(result) def test__add_fip_addr_to_device_error(self): device = mock.Mock() device.addr.add.side_effect = RuntimeError result = self._test_add_fip_addr_to_device_error(device) self.assertFalse(result) def test_process_snat_dnat_for_fip(self): ri = self._create_router() ri.process_floating_ip_nat_rules = mock.Mock(side_effect=Exception) self.assertRaises(n_exc.FloatingIpSetupException, ri.process_snat_dnat_for_fip) ri.process_floating_ip_nat_rules.assert_called_once_with() def test_put_fips_in_error_state(self): ri = self._create_router() ri.router = mock.Mock() ri.router.get.return_value = [{'id': mock.sentinel.id1}, {'id': mock.sentinel.id2}] statuses = ri.put_fips_in_error_state() expected = [{mock.sentinel.id1: l3_constants.FLOATINGIP_STATUS_ERROR, mock.sentinel.id2: l3_constants.FLOATINGIP_STATUS_ERROR}] self.assertNotEqual(expected, statuses) def test_configure_fip_addresses(self): ri = self._create_router() ri.process_floating_ip_addresses = mock.Mock( side_effect=Exception) self.assertRaises(n_exc.FloatingIpSetupException, ri.configure_fip_addresses, mock.sentinel.interface_name) ri.process_floating_ip_addresses.assert_called_once_with( mock.sentinel.interface_name) def test_get_router_cidrs_returns_cidrs(self): ri = self._create_router() addresses = ['15.1.2.2/24', '15.1.2.3/32'] device = mock.MagicMock() device.addr.list.return_value = [{'cidr': addresses[0]}, {'cidr': addresses[1]}] self.assertEqual(set(addresses), ri.get_router_cidrs(device)) @mock.patch.object(ip_lib, 'IPDevice') class TestFloatingIpWithMockDevice(BasicRouterTestCaseFramework): def test_process_floating_ip_addresses_remap(self, IPDevice): fip_id = _uuid() fip = { 'id': fip_id, 'port_id': _uuid(), 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.2', 'status': l3_constants.FLOATINGIP_STATUS_DOWN } IPDevice.return_value = device = mock.Mock() device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}] ri = self._create_router() ri.get_floating_ips = mock.Mock(return_value=[fip]) fip_statuses = ri.process_floating_ip_addresses( mock.sentinel.interface_name) self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE}, fip_statuses) self.assertFalse(device.addr.add.called) self.assertFalse(device.addr.delete.called) def test_process_router_with_disabled_floating_ip(self, IPDevice): fip_id = _uuid() fip = { 'id': fip_id, 'port_id': _uuid(), 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.2' } ri = self._create_router() ri.floating_ips = [fip] ri.get_floating_ips = mock.Mock(return_value=[]) fip_statuses = ri.process_floating_ip_addresses( mock.sentinel.interface_name) self.assertIsNone(fip_statuses.get(fip_id)) def test_process_router_floating_ip_with_device_add_error(self, IPDevice): IPDevice.return_value = device = mock.Mock(side_effect=RuntimeError) device.addr.list.return_value = [] fip_id = _uuid() fip = { 'id': fip_id, 'port_id': _uuid(), 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.2', 'status': 'DOWN' } ri = self._create_router() ri.add_floating_ip = mock.Mock( return_value=l3_constants.FLOATINGIP_STATUS_ERROR) ri.get_floating_ips = mock.Mock(return_value=[fip]) fip_statuses = ri.process_floating_ip_addresses( mock.sentinel.interface_name) self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ERROR}, fip_statuses) # TODO(mrsmith): refactor for DVR cases def test_process_floating_ip_addresses_remove(self, IPDevice): IPDevice.return_value = device = mock.Mock() device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}] ri = self._create_router() ri.remove_floating_ip = mock.Mock() ri.router.get = mock.Mock(return_value=[]) fip_statuses = ri.process_floating_ip_addresses( mock.sentinel.interface_name) self.assertEqual({}, fip_statuses) ri.remove_floating_ip.assert_called_once_with(device, '15.1.2.3/32') def test_process_floating_ip_reassignment(self, IPDevice): IPDevice.return_value = device = mock.Mock() device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}] fip_id = _uuid() fip = { 'id': fip_id, 'port_id': _uuid(), 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.3', 'status': 'DOWN' } ri = self._create_router() ri.get_floating_ips = mock.Mock(return_value=[fip]) ri.move_floating_ip = mock.Mock() ri.fip_map = {'15.1.2.3': '192.168.0.2'} ri.process_floating_ip_addresses(mock.sentinel.interface_name) ri.move_floating_ip.assert_called_once_with(fip) neutron-8.4.0/neutron/tests/unit/agent/l3/test_dvr_local_router.py0000664000567000056710000007054113044372760026600 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netaddr from oslo_log import log from oslo_utils import uuidutils from neutron.agent.common import config as agent_config from neutron.agent.l3 import agent as l3_agent from neutron.agent.l3 import config as l3_config from neutron.agent.l3 import dvr_local_router as dvr_router from neutron.agent.l3 import ha from neutron.agent.l3 import link_local_allocator as lla from neutron.agent.l3 import router_info from neutron.agent.linux import external_process from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.common import config as base_config from neutron.common import constants as l3_constants from neutron.common import utils as common_utils from neutron.extensions import portbindings from neutron.tests import base from neutron.tests.common import l3_test_common _uuid = uuidutils.generate_uuid FIP_PRI = 32768 HOSTNAME = 'myhost' class TestDvrRouterOperations(base.BaseTestCase): def setUp(self): super(TestDvrRouterOperations, self).setUp() mock.patch('eventlet.spawn').start() self.conf = agent_config.setup_conf() self.conf.register_opts(base_config.core_opts) log.register_options(self.conf) self.conf.register_opts(agent_config.AGENT_STATE_OPTS, 'AGENT') self.conf.register_opts(l3_config.OPTS) self.conf.register_opts(ha.OPTS) agent_config.register_interface_driver_opts_helper(self.conf) agent_config.register_process_monitor_opts(self.conf) self.conf.register_opts(interface.OPTS) self.conf.register_opts(external_process.OPTS) self.conf.set_override('router_id', 'fake_id') self.conf.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') self.conf.set_override('send_arp_for_ha', 1) self.conf.set_override('state_path', '') self.device_exists_p = mock.patch( 'neutron.agent.linux.ip_lib.device_exists') self.device_exists = self.device_exists_p.start() self.ensure_dir = mock.patch('neutron.common.utils.ensure_dir').start() mock.patch('neutron.agent.linux.keepalived.KeepalivedManager' '.get_full_config_file_path').start() self.utils_exec_p = mock.patch( 'neutron.agent.linux.utils.execute') self.utils_exec = self.utils_exec_p.start() self.utils_replace_file_p = mock.patch( 'neutron.common.utils.replace_file') self.utils_replace_file = self.utils_replace_file_p.start() self.external_process_p = mock.patch( 'neutron.agent.linux.external_process.ProcessManager') self.external_process = self.external_process_p.start() self.process_monitor = mock.patch( 'neutron.agent.linux.external_process.ProcessMonitor').start() self.send_adv_notif_p = mock.patch( 'neutron.agent.linux.ip_lib.send_ip_addr_adv_notif') self.send_adv_notif = self.send_adv_notif_p.start() self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver') driver_cls = self.dvr_cls_p.start() self.mock_driver = mock.MagicMock() self.mock_driver.DEV_NAME_LEN = ( interface.LinuxInterfaceDriver.DEV_NAME_LEN) driver_cls.return_value = self.mock_driver self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper') ip_cls = self.ip_cls_p.start() self.mock_ip = mock.MagicMock() ip_cls.return_value = self.mock_ip ip_rule = mock.patch('neutron.agent.linux.ip_lib.IPRule').start() self.mock_rule = mock.MagicMock() ip_rule.return_value = self.mock_rule ip_dev = mock.patch('neutron.agent.linux.ip_lib.IPDevice').start() self.mock_ip_dev = mock.MagicMock() ip_dev.return_value = self.mock_ip_dev self.l3pluginApi_cls_p = mock.patch( 'neutron.agent.l3.agent.L3PluginApi') l3pluginApi_cls = self.l3pluginApi_cls_p.start() self.plugin_api = mock.MagicMock() l3pluginApi_cls.return_value = self.plugin_api self.looping_call_p = mock.patch( 'oslo_service.loopingcall.FixedIntervalLoopingCall') self.looping_call_p.start() subnet_id_1 = _uuid() subnet_id_2 = _uuid() self.snat_ports = [{'subnets': [{'cidr': '152.2.0.0/16', 'gateway_ip': '152.2.0.1', 'id': subnet_id_1}], 'network_id': _uuid(), 'device_owner': l3_constants.DEVICE_OWNER_ROUTER_SNAT, 'mac_address': 'fa:16:3e:80:8d:80', 'fixed_ips': [{'subnet_id': subnet_id_1, 'ip_address': '152.2.0.13', 'prefixlen': 16}], 'id': _uuid(), 'device_id': _uuid()}, {'subnets': [{'cidr': '152.10.0.0/16', 'gateway_ip': '152.10.0.1', 'id': subnet_id_2}], 'network_id': _uuid(), 'device_owner': l3_constants.DEVICE_OWNER_ROUTER_SNAT, 'mac_address': 'fa:16:3e:80:8d:80', 'fixed_ips': [{'subnet_id': subnet_id_2, 'ip_address': '152.10.0.13', 'prefixlen': 16}], 'id': _uuid(), 'device_id': _uuid()}] self.ri_kwargs = {'agent_conf': self.conf, 'interface_driver': self.mock_driver} def _create_router(self, router=None, **kwargs): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.router_id = _uuid() if not router: router = mock.MagicMock() return dvr_router.DvrLocalRouter(agent, HOSTNAME, self.router_id, router, self.conf, mock.Mock(), **kwargs) def test_create_dvr_fip_interfaces_update(self): ri = self._create_router() fip_agent_port = {'subnets': []} ri.get_floating_agent_gw_interface = mock.Mock( return_value=fip_agent_port) ri.get_floating_ips = mock.Mock(return_value=True) ri.fip_ns = mock.Mock() ri.fip_ns.subscribe.return_value = False ex_gw_port = {'network_id': 'fake_net_id'} ri.create_dvr_fip_interfaces(ex_gw_port) ri.fip_ns.create_or_update_gateway_port.assert_called_once_with( fip_agent_port) def test_get_floating_ips_dvr(self): router = mock.MagicMock() router.get.return_value = [{'host': HOSTNAME}, {'host': mock.sentinel.otherhost}] ri = self._create_router(router) fips = ri.get_floating_ips() self.assertEqual([{'host': HOSTNAME}], fips) @mock.patch.object(ip_lib, 'send_ip_addr_adv_notif') @mock.patch.object(ip_lib, 'IPDevice') @mock.patch.object(ip_lib, 'IPRule') def test_floating_ip_added_dist(self, mIPRule, mIPDevice, mock_adv_notif): router = mock.MagicMock() ri = self._create_router(router) ext_net_id = _uuid() subnet_id = _uuid() agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': subnet_id}], 'subnets': [{'id': subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}], 'id': _uuid(), 'network_id': ext_net_id, 'mac_address': 'ca:fe:de:ad:be:ef'} fip = {'id': _uuid(), 'host': HOSTNAME, 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.1', 'floating_network_id': ext_net_id, 'port_id': _uuid()} ri.fip_ns = mock.Mock() ri.fip_ns.agent_gateway_port = agent_gw_port ri.fip_ns.allocate_rule_priority.return_value = FIP_PRI subnet = lla.LinkLocalAddressPair('169.254.30.42/31') ri.rtr_fip_subnet = subnet ri.fip_ns.local_subnets = mock.Mock() ri.fip_ns.local_subnets.allocate.return_value = subnet ri.dist_fip_count = 0 ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address']) ri.floating_ip_added_dist(fip, ip_cidr) mIPRule().rule.add.assert_called_with(ip='192.168.0.1', table=16, priority=FIP_PRI) ri.fip_ns.local_subnets.allocate.assert_not_called() self.assertEqual(1, ri.dist_fip_count) # Validate that fip_ns.local_subnets is called when # rtr_fip_subnet is None ri.rtr_fip_subnet = None ri.floating_ip_added_dist(fip, ip_cidr) mIPRule().rule.add.assert_called_with(ip='192.168.0.1', table=16, priority=FIP_PRI) ri.fip_ns.local_subnets.allocate.assert_called_once_with(ri.router_id) # TODO(mrsmith): add more asserts @mock.patch.object(ip_lib, 'IPWrapper') @mock.patch.object(ip_lib, 'IPDevice') @mock.patch.object(ip_lib, 'IPRule') def test_floating_ip_removed_dist(self, mIPRule, mIPDevice, mIPWrapper): router = mock.MagicMock() ri = self._create_router(router) subnet_id = _uuid() agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': subnet_id}], 'subnets': [{'id': subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}], 'id': _uuid(), 'network_id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef'} fip_cidr = '11.22.33.44/24' ri.dist_fip_count = 2 ri.fip_ns = mock.Mock() ri.fip_ns.get_name.return_value = 'fip_ns_name' ri.floating_ips_dict['11.22.33.44'] = FIP_PRI ri.fip_2_rtr = '11.22.33.42' ri.rtr_2_fip = '11.22.33.40' ri.fip_ns.agent_gateway_port = agent_gw_port s = lla.LinkLocalAddressPair('169.254.30.42/31') ri.rtr_fip_subnet = s ri.fip_ns.local_subnets = mock.Mock() ri.floating_ip_removed_dist(fip_cidr) mIPRule().rule.delete.assert_called_with( ip=str(netaddr.IPNetwork(fip_cidr).ip), table=16, priority=FIP_PRI) mIPDevice().route.delete_route.assert_called_with(fip_cidr, str(s.ip)) ri.fip_ns.local_subnets.allocate.assert_not_called() ri.dist_fip_count = 1 s1 = lla.LinkLocalAddressPair('15.1.2.3/32') ri.rtr_fip_subnet = None ri.fip_ns.local_subnets.allocate.return_value = s1 _, fip_to_rtr = s1.get_pair() fip_ns = ri.fip_ns ri.floating_ip_removed_dist(fip_cidr) self.assertTrue(fip_ns.destroyed) mIPWrapper().del_veth.assert_called_once_with( fip_ns.get_int_device_name(router['id'])) mIPDevice().route.delete_gateway.assert_called_once_with( str(fip_to_rtr.ip), table=16) self.assertFalse(ri.fip_ns.unsubscribe.called) ri.fip_ns.local_subnets.allocate.assert_called_once_with(ri.router_id) @mock.patch.object(ip_lib, 'IPRule') def test_floating_ip_moved_dist(self, mIPRule): router = mock.MagicMock() ri = self._create_router(router) floating_ip_address = '15.1.2.3' fip = {'floating_ip_address': floating_ip_address, 'fixed_ip_address': '192.168.0.1'} ri.floating_ips_dict['15.1.2.3'] = FIP_PRI ri.fip_ns = mock.Mock() ri.fip_ns.allocate_rule_priority.return_value = FIP_PRI ri.floating_ip_moved_dist(fip) mIPRule().rule.delete.assert_called_once_with( ip=floating_ip_address, table=16, priority=FIP_PRI) ri.fip_ns.deallocate_rule_priority.assert_called_once_with( floating_ip_address) ri.fip_ns.allocate_rule_priority.assert_called_once_with( floating_ip_address) mIPRule().rule.add.assert_called_with(ip='192.168.0.1', table=16, priority=FIP_PRI) def _test_add_floating_ip(self, ri, fip, is_failure): ri.floating_ip_added_dist = mock.Mock() result = ri.add_floating_ip(fip, mock.sentinel.interface_name, mock.sentinel.device) ri.floating_ip_added_dist.assert_called_once_with( fip, mock.ANY) return result def test_add_floating_ip(self): ri = self._create_router(mock.MagicMock()) ip = '15.1.2.3' fip = {'floating_ip_address': ip} result = self._test_add_floating_ip(ri, fip, True) ri.floating_ip_added_dist.assert_called_once_with(fip, ip + '/32') self.assertEqual(l3_constants.FLOATINGIP_STATUS_ACTIVE, result) @mock.patch.object(router_info.RouterInfo, 'remove_floating_ip') def test_remove_floating_ip(self, super_remove_floating_ip): ri = self._create_router(mock.MagicMock()) ri.floating_ip_removed_dist = mock.Mock() ri.remove_floating_ip(mock.sentinel.device, mock.sentinel.ip_cidr) self.assertFalse(super_remove_floating_ip.called) ri.floating_ip_removed_dist.assert_called_once_with( mock.sentinel.ip_cidr) def test__get_internal_port(self): ri = self._create_router() port = {'fixed_ips': [{'subnet_id': mock.sentinel.subnet_id}]} router_ports = [port] ri.router.get.return_value = router_ports self.assertEqual(port, ri._get_internal_port(mock.sentinel.subnet_id)) def test__get_internal_port_not_found(self): ri = self._create_router() port = {'fixed_ips': [{'subnet_id': mock.sentinel.subnet_id}]} router_ports = [port] ri.router.get.return_value = router_ports self.assertIsNone(ri._get_internal_port(mock.sentinel.subnet_id2)) def test__get_snat_idx_ipv4(self): ip_cidr = '101.12.13.00/24' ri = self._create_router(mock.MagicMock()) snat_idx = ri._get_snat_idx(ip_cidr) # 0x650C0D00 is numerical value of 101.12.13.00 self.assertEqual(0x650C0D00, snat_idx) def test__get_snat_idx_ipv6(self): ip_cidr = '2620:0:a03:e100::/64' ri = self._create_router(mock.MagicMock()) snat_idx = ri._get_snat_idx(ip_cidr) # 0x3D345705 is 30 bit xor folded crc32 of the ip_cidr self.assertEqual(0x3D345705, snat_idx) def test__get_snat_idx_ipv6_below_32768(self): ip_cidr = 'd488::/30' # crc32 of this ip_cidr is 0x1BD7 ri = self._create_router(mock.MagicMock()) snat_idx = ri._get_snat_idx(ip_cidr) # 0x1BD7 + 0x3FFFFFFF = 0x40001BD6 self.assertEqual(0x40001BD6, snat_idx) def test__set_subnet_arp_info(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(num_internal_ports=2) router['distributed'] = True ri = dvr_router.DvrLocalRouter( agent, HOSTNAME, router['id'], router, **self.ri_kwargs) ports = ri.router.get(l3_constants.INTERFACE_KEY, []) subnet_id = l3_test_common.get_subnet_id(ports[0]) test_ports = [{'mac_address': '00:11:22:33:44:55', 'device_owner': l3_constants.DEVICE_OWNER_DHCP, 'fixed_ips': [{'ip_address': '1.2.3.4', 'prefixlen': 24, 'subnet_id': subnet_id}]}] self.plugin_api.get_ports_by_subnet.return_value = test_ports # Test basic case ports[0]['subnets'] = [{'id': subnet_id, 'cidr': '1.2.3.0/24'}] with mock.patch.object(ri, '_process_arp_cache_for_internal_port') as parp: ri._set_subnet_arp_info(subnet_id) self.assertEqual(1, parp.call_count) self.mock_ip_dev.neigh.add.assert_called_once_with( '1.2.3.4', '00:11:22:33:44:55') # Test negative case router['distributed'] = False ri._set_subnet_arp_info(subnet_id) self.mock_ip_dev.neigh.add.never_called() def test_add_arp_entry(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(num_internal_ports=2) router['distributed'] = True subnet_id = l3_test_common.get_subnet_id( router[l3_constants.INTERFACE_KEY][0]) arp_table = {'ip_address': '1.7.23.11', 'mac_address': '00:11:22:33:44:55', 'subnet_id': subnet_id} payload = {'arp_table': arp_table, 'router_id': router['id']} agent._router_added(router['id'], router) agent.add_arp_entry(None, payload) agent.router_deleted(None, router['id']) self.mock_ip_dev.neigh.add.assert_called_once_with( '1.7.23.11', '00:11:22:33:44:55') def test_add_arp_entry_no_routerinfo(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(num_internal_ports=2) subnet_id = l3_test_common.get_subnet_id( router[l3_constants.INTERFACE_KEY][0]) arp_table = {'ip_address': '1.7.23.11', 'mac_address': '00:11:22:33:44:55', 'subnet_id': subnet_id} payload = {'arp_table': arp_table, 'router_id': router['id']} agent.add_arp_entry(None, payload) def test__update_arp_entry_with_no_subnet(self): ri = dvr_router.DvrLocalRouter( mock.sentinel.agent, HOSTNAME, 'foo_router_id', {'distributed': True, 'gw_port_host': HOSTNAME}, **self.ri_kwargs) with mock.patch.object(l3_agent.ip_lib, 'IPDevice') as f: ri._update_arp_entry(mock.ANY, mock.ANY, 'foo_subnet_id', 'add') self.assertFalse(f.call_count) def _setup_test_for_arp_entry_cache(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(num_internal_ports=2) router['distributed'] = True ri = dvr_router.DvrLocalRouter( agent, HOSTNAME, router['id'], router, **self.ri_kwargs) subnet_id = l3_test_common.get_subnet_id( ri.router[l3_constants.INTERFACE_KEY][0]) return ri, subnet_id def test__update_arp_entry_calls_arp_cache_with_no_device(self): ri, subnet_id = self._setup_test_for_arp_entry_cache() state = True with mock.patch.object(l3_agent.ip_lib, 'IPDevice') as rtrdev,\ mock.patch.object(ri, '_cache_arp_entry') as arp_cache: rtrdev.return_value.exists.return_value = False state = ri._update_arp_entry( mock.ANY, mock.ANY, subnet_id, 'add') self.assertFalse(state) self.assertTrue(arp_cache.called) arp_cache.assert_called_once_with(mock.ANY, mock.ANY, subnet_id, 'add') self.assertFalse(rtrdev.neigh.add.called) def test__process_arp_cache_for_internal_port(self): ri, subnet_id = self._setup_test_for_arp_entry_cache() ri._cache_arp_entry('1.7.23.11', '00:11:22:33:44:55', subnet_id, 'add') self.assertEqual(1, len(ri._pending_arp_set)) with mock.patch.object(ri, '_update_arp_entry') as update_arp: update_arp.return_value = True ri._process_arp_cache_for_internal_port(subnet_id) self.assertEqual(0, len(ri._pending_arp_set)) def test__delete_arp_cache_for_internal_port(self): ri, subnet_id = self._setup_test_for_arp_entry_cache() ri._cache_arp_entry('1.7.23.11', '00:11:22:33:44:55', subnet_id, 'add') self.assertEqual(1, len(ri._pending_arp_set)) ri._delete_arp_cache_for_internal_port(subnet_id) self.assertEqual(0, len(ri._pending_arp_set)) def test_del_arp_entry(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(num_internal_ports=2) router['distributed'] = True subnet_id = l3_test_common.get_subnet_id( router[l3_constants.INTERFACE_KEY][0]) arp_table = {'ip_address': '1.5.25.15', 'mac_address': '00:44:33:22:11:55', 'subnet_id': subnet_id} payload = {'arp_table': arp_table, 'router_id': router['id']} agent._router_added(router['id'], router) # first add the entry agent.add_arp_entry(None, payload) # now delete it agent.del_arp_entry(None, payload) self.mock_ip_dev.neigh.delete.assert_called_once_with( '1.5.25.15', '00:44:33:22:11:55') agent.router_deleted(None, router['id']) def test_get_floating_agent_gw_interfaces(self): fake_network_id = _uuid() subnet_id = _uuid() agent_gateway_port = ( [{'fixed_ips': [{'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': subnet_id}], 'subnets': [{'id': subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}], 'id': _uuid(), portbindings.HOST_ID: 'myhost', 'device_owner': l3_constants.DEVICE_OWNER_AGENT_GW, 'network_id': fake_network_id, 'mac_address': 'ca:fe:de:ad:be:ef'}] ) router = l3_test_common.prepare_router_data(enable_snat=True) router[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port router['distributed'] = True agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = dvr_router.DvrLocalRouter( agent, HOSTNAME, router['id'], router, **self.ri_kwargs) self.assertEqual( agent_gateway_port[0], ri.get_floating_agent_gw_interface(fake_network_id)) def test_process_router_dist_floating_ip_add(self): fake_floatingips = {'floatingips': [ {'id': _uuid(), 'host': HOSTNAME, 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.1', 'floating_network_id': mock.sentinel.ext_net_id, 'port_id': _uuid()}, {'id': _uuid(), 'host': 'some-other-host', 'floating_ip_address': '15.1.2.4', 'fixed_ip_address': '192.168.0.10', 'floating_network_id': mock.sentinel.ext_net_id, 'port_id': _uuid()}]} router = l3_test_common.prepare_router_data(enable_snat=True) router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] router['distributed'] = True agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = dvr_router.DvrLocalRouter(agent, HOSTNAME, router['id'], router, **self.ri_kwargs) ri.iptables_manager.ipv4['nat'] = mock.MagicMock() ri.dist_fip_count = 0 fip_ns = agent.get_fip_ns(mock.sentinel.ext_net_id) subnet_id = _uuid() fip_ns.agent_gateway_port = ( {'fixed_ips': [{'ip_address': '20.0.0.30', 'subnet_id': subnet_id}], 'subnets': [{'id': subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}], 'id': _uuid(), 'network_id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef'} ) def _test_ext_gw_updated_dvr_agent_mode(self, host, agent_mode, expected_call_count): router = l3_test_common.prepare_router_data(num_internal_ports=2) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = dvr_router.DvrLocalRouter(agent, HOSTNAME, router['id'], router, **self.ri_kwargs) interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self, ri) ri._external_gateway_added = mock.Mock() # test agent mode = dvr (compute node) router['gw_port_host'] = host agent.conf.agent_mode = agent_mode ri.external_gateway_updated(ex_gw_port, interface_name) # no gateway should be added on dvr node self.assertEqual(expected_call_count, ri._external_gateway_added.call_count) def test_ext_gw_updated_dvr_agent_mode(self): # no gateway should be added on dvr node self._test_ext_gw_updated_dvr_agent_mode('any-foo', 'dvr', 0) def test_ext_gw_updated_dvr_agent_mode_host(self): # no gateway should be added on dvr node self._test_ext_gw_updated_dvr_agent_mode(HOSTNAME, 'dvr', 0) def test_external_gateway_removed_ext_gw_port_and_fip(self): self.conf.set_override('state_path', '/tmp') agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.conf.agent_mode = 'dvr' router = l3_test_common.prepare_router_data(num_internal_ports=2) router['gw_port_host'] = HOSTNAME self.mock_driver.unplug.reset_mock() external_net_id = router['gw_port']['network_id'] ri = dvr_router.DvrLocalRouter( agent, HOSTNAME, router['id'], router, **self.ri_kwargs) ri.remove_floating_ip = mock.Mock() agent._fetch_external_net_id = mock.Mock(return_value=external_net_id) ri.ex_gw_port = ri.router['gw_port'] del ri.router['gw_port'] ri.fip_ns = None nat = ri.iptables_manager.ipv4['nat'] nat.clear_rules_by_tag = mock.Mock() nat.add_rule = mock.Mock() ri.fip_ns = agent.get_fip_ns(external_net_id) subnet_id = _uuid() ri.fip_ns.agent_gateway_port = { 'fixed_ips': [{ 'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': subnet_id }], 'subnets': [{'id': subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}], 'id': _uuid(), 'network_id': external_net_id, 'mac_address': 'ca:fe:de:ad:be:ef'} vm_floating_ip = '19.4.4.2' ri.floating_ips_dict[vm_floating_ip] = FIP_PRI ri.dist_fip_count = 1 ri.rtr_fip_subnet = ri.fip_ns.local_subnets.allocate(ri.router_id) _, fip_to_rtr = ri.rtr_fip_subnet.get_pair() self.mock_ip.get_devices.return_value = [ l3_test_common.FakeDev(ri.fip_ns.get_ext_device_name(_uuid()))] ri.get_router_cidrs = mock.Mock( return_value={vm_floating_ip + '/32', '19.4.4.1/24'}) self.device_exists.return_value = True ri.external_gateway_removed( ri.ex_gw_port, ri.get_external_device_name(ri.ex_gw_port['id'])) ri.remove_floating_ip.assert_called_once_with(self.mock_ip_dev, '19.4.4.2/32') neutron-8.4.0/neutron/tests/unit/agent/l3/test_link_local_allocator.py0000664000567000056710000000213013044372736027372 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron.agent.l3 import link_local_allocator as lla from neutron.tests import base class TestLinkLocalAddrAllocator(base.BaseTestCase): def setUp(self): super(TestLinkLocalAddrAllocator, self).setUp() self.subnet = netaddr.IPNetwork('169.254.31.0/24') def test__init__(self): a = lla.LinkLocalAllocator('/file', self.subnet.cidr) self.assertEqual('/file', a.state_file) self.assertEqual({}, a.allocations) neutron-8.4.0/neutron/tests/unit/agent/l3/test_dvr_fip_ns.py0000664000567000056710000003710413044372760025362 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from oslo_utils import uuidutils from neutron.agent.common import utils from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import link_local_allocator as lla from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_manager from neutron.common import exceptions as n_exc from neutron.tests import base _uuid = uuidutils.generate_uuid class TestDvrFipNs(base.BaseTestCase): def setUp(self): super(TestDvrFipNs, self).setUp() self.conf = mock.Mock() self.conf.state_path = '/tmp' self.driver = mock.Mock() self.driver.DEV_NAME_LEN = 14 self.net_id = _uuid() self.fip_ns = dvr_fip_ns.FipNamespace(self.net_id, self.conf, self.driver, use_ipv6=True) def test_subscribe(self): is_first = self.fip_ns.subscribe(mock.sentinel.external_net_id) self.assertTrue(is_first) def test_subscribe_not_first(self): self.fip_ns.subscribe(mock.sentinel.external_net_id) is_first = self.fip_ns.subscribe(mock.sentinel.external_net_id2) self.assertFalse(is_first) def test_unsubscribe(self): self.fip_ns.subscribe(mock.sentinel.external_net_id) is_last = self.fip_ns.unsubscribe(mock.sentinel.external_net_id) self.assertTrue(is_last) def test_unsubscribe_not_last(self): self.fip_ns.subscribe(mock.sentinel.external_net_id) self.fip_ns.subscribe(mock.sentinel.external_net_id2) is_last = self.fip_ns.unsubscribe(mock.sentinel.external_net_id2) self.assertFalse(is_last) def test_allocate_rule_priority(self): pr = self.fip_ns.allocate_rule_priority('20.0.0.30') self.assertIn('20.0.0.30', self.fip_ns._rule_priorities.allocations) self.assertNotIn(pr, self.fip_ns._rule_priorities.pool) def test_deallocate_rule_priority(self): pr = self.fip_ns.allocate_rule_priority('20.0.0.30') self.fip_ns.deallocate_rule_priority('20.0.0.30') self.assertNotIn('20.0.0.30', self.fip_ns._rule_priorities.allocations) self.assertIn(pr, self.fip_ns._rule_priorities.pool) def _get_agent_gw_port(self): v4_subnet_id = _uuid() v6_subnet_id = _uuid() agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': v4_subnet_id}, {'ip_address': 'cafe:dead:beef::3', 'prefixlen': 64, 'subnet_id': v6_subnet_id}], 'subnets': [{'id': v4_subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}, {'id': v6_subnet_id, 'cidr': 'cafe:dead:beef::/64', 'gateway_ip': 'cafe:dead:beef::1'}], 'id': _uuid(), 'network_id': self.net_id, 'mac_address': 'ca:fe:de:ad:be:ef'} return agent_gw_port @mock.patch.object(ip_lib, 'IPWrapper') @mock.patch.object(ip_lib, 'device_exists') @mock.patch.object(dvr_fip_ns.FipNamespace, 'create') def test_create_gateway_port(self, fip_create, device_exists, ip_wrapper): agent_gw_port = self._get_agent_gw_port() interface_name = self.fip_ns.get_ext_device_name(agent_gw_port['id']) device_exists.return_value = False self.fip_ns._update_gateway_port = mock.Mock() self.fip_ns.create_or_update_gateway_port(agent_gw_port) self.assertTrue(fip_create.called) self.assertEqual(1, self.driver.plug.call_count) self.assertEqual(1, self.driver.init_l3.call_count) self.fip_ns._update_gateway_port.assert_called_once_with( agent_gw_port, interface_name) @mock.patch.object(ip_lib, 'IPWrapper') @mock.patch.object(ip_lib, 'device_exists') @mock.patch.object(dvr_fip_ns.FipNamespace, 'create') @mock.patch.object(dvr_fip_ns.FipNamespace, 'delete') @mock.patch.object(dvr_fip_ns.FipNamespace, 'unsubscribe') def test_create_gateway_port_raises_exception( self, fip_desub, fip_delete, fip_create, device_exists, ip_wrapper): agent_gw_port = self._get_agent_gw_port() interface_name = self.fip_ns.get_ext_device_name(agent_gw_port['id']) device_exists.return_value = False msg = 'L3 agent failed to setup fip gateway in the namespace' self.fip_ns._update_gateway_port = mock.Mock( side_effect=n_exc.FloatingIpSetupException(msg)) self.assertRaises(n_exc.FloatingIpSetupException, self.fip_ns.create_or_update_gateway_port, agent_gw_port) self.assertTrue(fip_create.called) self.assertEqual(1, self.driver.plug.call_count) self.assertEqual(1, self.driver.init_l3.call_count) self.fip_ns._update_gateway_port.assert_called_once_with( agent_gw_port, interface_name) self.assertTrue(fip_desub.called) self.assertTrue(fip_delete.called) self.assertIsNone(self.fip_ns.agent_gateway_port) @mock.patch.object(ip_lib, 'IPDevice') @mock.patch.object(ip_lib, 'send_ip_addr_adv_notif') @mock.patch.object(dvr_fip_ns.FipNamespace, 'subscribe') def test_update_gateway_port(self, fip_sub, send_adv_notif, IPDevice): fip_sub.return_value = False self.fip_ns._check_for_gateway_ip_change = mock.Mock(return_value=True) self.fip_ns.agent_gateway_port = None agent_gw_port = self._get_agent_gw_port() self.fip_ns.create_or_update_gateway_port(agent_gw_port) expected = [ mock.call(self.fip_ns.get_name(), self.fip_ns.get_ext_device_name(agent_gw_port['id']), agent_gw_port['fixed_ips'][0]['ip_address'], mock.ANY), mock.call(self.fip_ns.get_name(), self.fip_ns.get_ext_device_name(agent_gw_port['id']), agent_gw_port['fixed_ips'][1]['ip_address'], mock.ANY)] send_adv_notif.assert_has_calls(expected) gw_ipv4 = agent_gw_port['subnets'][0]['gateway_ip'] gw_ipv6 = agent_gw_port['subnets'][1]['gateway_ip'] expected = [mock.call(gw_ipv4), mock.call(gw_ipv6)] IPDevice().route.add_gateway.assert_has_calls(expected) @mock.patch.object(ip_lib.IPDevice, 'exists') @mock.patch.object(dvr_fip_ns.FipNamespace, 'subscribe') def test_update_gateway_port_raises_exception(self, fip_sub, exists): fip_sub.return_value = False exists.return_value = False self.fip_ns._check_for_gateway_ip_change = mock.Mock(return_value=True) self.fip_ns.agent_gateway_port = None agent_gw_port = self._get_agent_gw_port() self.assertRaises(n_exc.FloatingIpSetupException, self.fip_ns.create_or_update_gateway_port, agent_gw_port) @mock.patch.object(ip_lib, 'IPDevice') @mock.patch.object(ip_lib, 'send_ip_addr_adv_notif') @mock.patch.object(dvr_fip_ns.FipNamespace, 'subscribe') def test_update_gateway_port_gateway_outside_subnet_added( self, fip_sub, send_adv_notif, IPDevice): fip_sub.return_value = False self.fip_ns.agent_gateway_port = None agent_gw_port = self._get_agent_gw_port() agent_gw_port['subnets'][0]['gateway_ip'] = '20.0.1.1' self.fip_ns.create_or_update_gateway_port(agent_gw_port) IPDevice().route.add_route.assert_called_once_with('20.0.1.1', scope='link') def test_check_gateway_ip_changed_no_change(self): agent_gw_port = self._get_agent_gw_port() self.fip_ns.agent_gateway_port = copy.deepcopy(agent_gw_port) agent_gw_port['mac_address'] = 'aa:bb:cc:dd:ee:ff' self.assertFalse(self.fip_ns._check_for_gateway_ip_change( agent_gw_port)) def test_check_gateway_ip_changed_v4(self): agent_gw_port = self._get_agent_gw_port() self.fip_ns.agent_gateway_port = copy.deepcopy(agent_gw_port) agent_gw_port['subnets'][0]['gateway_ip'] = '20.0.0.2' self.assertTrue(self.fip_ns._check_for_gateway_ip_change( agent_gw_port)) def test_check_gateway_ip_changed_v6(self): agent_gw_port = self._get_agent_gw_port() self.fip_ns.agent_gateway_port = copy.deepcopy(agent_gw_port) agent_gw_port['subnets'][1]['gateway_ip'] = 'cafe:dead:beef::2' self.assertTrue(self.fip_ns._check_for_gateway_ip_change( agent_gw_port)) @mock.patch.object(iptables_manager, 'IptablesManager') @mock.patch.object(utils, 'execute') @mock.patch.object(ip_lib.IpNetnsCommand, 'exists') def _test_create(self, old_kernel, exists, execute, IPTables): exists.return_value = True # There are up to four sysctl calls - two for ip_nonlocal_bind, # and two to enable forwarding execute.side_effect = [RuntimeError if old_kernel else None, None, None, None] self.fip_ns._iptables_manager = IPTables() self.fip_ns.create() ns_name = self.fip_ns.get_name() netns_cmd = ['ip', 'netns', 'exec', ns_name] bind_cmd = ['sysctl', '-w', 'net.ipv4.ip_nonlocal_bind=1'] expected = [mock.call(netns_cmd + bind_cmd, check_exit_code=True, extra_ok_codes=None, log_fail_as_error=False, run_as_root=True)] if old_kernel: expected.append(mock.call(bind_cmd, check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True, run_as_root=True)) execute.assert_has_calls(expected) def test_create_old_kernel(self): self._test_create(True) def test_create_new_kernel(self): self._test_create(False) @mock.patch.object(ip_lib, 'IPWrapper') def test_destroy(self, IPWrapper): ip_wrapper = IPWrapper() dev1 = mock.Mock() dev1.name = 'fpr-aaaa' dev2 = mock.Mock() dev2.name = 'fg-aaaa' ip_wrapper.get_devices.return_value = [dev1, dev2] with mock.patch.object(self.fip_ns.ip_wrapper_root.netns, 'delete') as delete,\ mock.patch.object(self.fip_ns.ip_wrapper_root.netns, 'exists', return_value=True) as exists: self.fip_ns.delete() exists.assert_called_once_with(self.fip_ns.name) delete.assert_called_once_with(self.fip_ns.name) ext_net_bridge = self.conf.external_network_bridge ns_name = self.fip_ns.get_name() self.driver.unplug.assert_called_once_with('fg-aaaa', bridge=ext_net_bridge, prefix='fg-', namespace=ns_name) ip_wrapper.del_veth.assert_called_once_with('fpr-aaaa') def test_destroy_no_namespace(self): with mock.patch.object(self.fip_ns.ip_wrapper_root.netns, 'delete') as delete,\ mock.patch.object(self.fip_ns.ip_wrapper_root.netns, 'exists', return_value=False) as exists: self.fip_ns.delete() exists.assert_called_once_with(self.fip_ns.name) self.assertFalse(delete.called) @mock.patch.object(ip_lib, 'IPWrapper') @mock.patch.object(ip_lib, 'IPDevice') def _test_create_rtr_2_fip_link(self, dev_exists, addr_exists, IPDevice, IPWrapper): ri = mock.Mock() ri.router_id = _uuid() ri.rtr_fip_subnet = None ri.ns_name = mock.sentinel.router_ns rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(ri.router_id) fip_2_rtr_name = self.fip_ns.get_int_device_name(ri.router_id) fip_ns_name = self.fip_ns.get_name() self.fip_ns.local_subnets = allocator = mock.Mock() pair = lla.LinkLocalAddressPair('169.254.31.28/31') allocator.allocate.return_value = pair addr_pair = pair.get_pair() ip_wrapper = IPWrapper() self.conf.network_device_mtu = 2000 ip_wrapper.add_veth.return_value = (IPDevice(), IPDevice()) device = IPDevice() device.exists.return_value = dev_exists device.addr.list.return_value = addr_exists self.fip_ns.create_rtr_2_fip_link(ri) if not dev_exists: ip_wrapper.add_veth.assert_called_with(rtr_2_fip_name, fip_2_rtr_name, fip_ns_name) device.link.set_mtu.assert_called_with(2000) self.assertEqual(2, device.link.set_mtu.call_count) self.assertEqual(2, device.link.set_up.call_count) if not addr_exists: expected = [mock.call(str(addr_pair[0]), add_broadcast=False), mock.call(str(addr_pair[1]), add_broadcast=False)] device.addr.add.assert_has_calls(expected) self.assertEqual(2, device.addr.add.call_count) device.route.add_gateway.assert_called_once_with( '169.254.31.29', table=16) def test_create_rtr_2_fip_link(self): self._test_create_rtr_2_fip_link(False, False) def test_create_rtr_2_fip_link_already_exists(self): self._test_create_rtr_2_fip_link(True, False) def test_create_rtr_2_fip_link_and_addr_already_exist(self): self._test_create_rtr_2_fip_link(True, True) @mock.patch.object(ip_lib, 'IPDevice') def _test_scan_fip_ports(self, ri, ip_list, IPDevice): IPDevice.return_value = device = mock.Mock() device.exists.return_value = True ri.get_router_cidrs.return_value = ip_list self.fip_ns.get_rtr_ext_device_name = mock.Mock( return_value=mock.sentinel.rtr_ext_device_name) self.fip_ns.scan_fip_ports(ri) def test_scan_fip_ports_restart_fips(self): ri = mock.Mock() ri.dist_fip_count = None ri.floating_ips_dict = {} ip_list = [{'cidr': '111.2.3.4'}, {'cidr': '111.2.3.5'}] self._test_scan_fip_ports(ri, ip_list) self.assertEqual(2, ri.dist_fip_count) def test_scan_fip_ports_restart_none(self): ri = mock.Mock() ri.dist_fip_count = None ri.floating_ips_dict = {} self._test_scan_fip_ports(ri, []) self.assertEqual(0, ri.dist_fip_count) def test_scan_fip_ports_restart_zero(self): ri = mock.Mock() ri.dist_fip_count = 0 self._test_scan_fip_ports(ri, None) self.assertEqual(0, ri.dist_fip_count) neutron-8.4.0/neutron/tests/unit/agent/__init__.py0000664000567000056710000000000013044372736023377 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/dhcp/0000775000567000056710000000000013044373210022202 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/dhcp/test_agent.py0000664000567000056710000022453213044372760024732 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import sys import uuid import eventlet import mock from oslo_config import cfg import oslo_messaging import testtools from neutron.agent.common import config from neutron.agent.dhcp import agent as dhcp_agent from neutron.agent.dhcp import config as dhcp_config from neutron.agent import dhcp_agent as entry from neutron.agent.linux import dhcp from neutron.agent.linux import interface from neutron.common import config as common_config from neutron.common import constants as const from neutron.common import exceptions from neutron.common import utils from neutron import context from neutron.tests import base HOSTNAME = 'hostname' dev_man = dhcp.DeviceManager rpc_api = dhcp_agent.DhcpPluginApi DEVICE_MANAGER = '%s.%s' % (dev_man.__module__, dev_man.__name__) DHCP_PLUGIN = '%s.%s' % (rpc_api.__module__, rpc_api.__name__) fake_tenant_id = 'aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa' fake_subnet1_allocation_pools = dhcp.DictModel(dict(id='', start='172.9.9.2', end='172.9.9.254')) fake_subnet1 = dhcp.DictModel(dict(id='bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb', network_id='12345678-1234-5678-1234567890ab', cidr='172.9.9.0/24', enable_dhcp=True, name='', tenant_id=fake_tenant_id, gateway_ip='172.9.9.1', host_routes=[], dns_nameservers=[], ip_version=4, ipv6_ra_mode=None, ipv6_address_mode=None, allocation_pools=fake_subnet1_allocation_pools)) fake_subnet2_allocation_pools = dhcp.DictModel(dict(id='', start='172.9.8.2', end='172.9.8.254')) fake_subnet2 = dhcp.DictModel(dict(id='dddddddd-dddd-dddd-dddddddddddd', network_id='12345678-1234-5678-1234567890ab', cidr='172.9.8.0/24', enable_dhcp=False, name='', tenant_id=fake_tenant_id, gateway_ip='172.9.8.1', host_routes=[], dns_nameservers=[], ip_version=4, allocation_pools=fake_subnet2_allocation_pools)) fake_subnet3 = dhcp.DictModel(dict(id='bbbbbbbb-1111-2222-bbbbbbbbbbbb', network_id='12345678-1234-5678-1234567890ab', cidr='192.168.1.1/24', enable_dhcp=True)) fake_ipv6_subnet = dhcp.DictModel(dict(id='bbbbbbbb-1111-2222-bbbbbbbbbbbb', network_id='12345678-1234-5678-1234567890ab', cidr='2001:0db8::0/64', enable_dhcp=True, tenant_id=fake_tenant_id, gateway_ip='2001:0db8::1', ip_version=6, ipv6_ra_mode='slaac', ipv6_address_mode=None)) fake_meta_subnet = dhcp.DictModel(dict(id='bbbbbbbb-1111-2222-bbbbbbbbbbbb', network_id='12345678-1234-5678-1234567890ab', cidr='169.254.169.252/30', gateway_ip='169.254.169.253', enable_dhcp=True)) fake_fixed_ip1 = dhcp.DictModel(dict(id='', subnet_id=fake_subnet1.id, ip_address='172.9.9.9')) fake_fixed_ip2 = dhcp.DictModel(dict(id='', subnet_id=fake_subnet1.id, ip_address='172.9.9.10')) fake_fixed_ipv6 = dhcp.DictModel(dict(id='', subnet_id=fake_ipv6_subnet.id, ip_address='2001:db8::a8bb:ccff:fedd:ee99')) fake_meta_fixed_ip = dhcp.DictModel(dict(id='', subnet=fake_meta_subnet, ip_address='169.254.169.254')) fake_allocation_pool_subnet1 = dhcp.DictModel(dict(id='', start='172.9.9.2', end='172.9.9.254')) fake_port1 = dhcp.DictModel(dict(id='12345678-1234-aaaa-1234567890ab', device_id='dhcp-12345678-1234-aaaa-1234567890ab', device_owner='', allocation_pools=fake_subnet1_allocation_pools, mac_address='aa:bb:cc:dd:ee:ff', network_id='12345678-1234-5678-1234567890ab', fixed_ips=[fake_fixed_ip1])) fake_dhcp_port = dhcp.DictModel(dict(id='12345678-1234-aaaa-123456789022', device_id='dhcp-12345678-1234-aaaa-123456789022', device_owner=const.DEVICE_OWNER_DHCP, allocation_pools=fake_subnet1_allocation_pools, mac_address='aa:bb:cc:dd:ee:22', network_id='12345678-1234-5678-1234567890ab', fixed_ips=[fake_fixed_ip2])) fake_port2 = dhcp.DictModel(dict(id='12345678-1234-aaaa-123456789000', device_id='dhcp-12345678-1234-aaaa-123456789000', device_owner='', mac_address='aa:bb:cc:dd:ee:99', network_id='12345678-1234-5678-1234567890ab', revision_number=77, fixed_ips=[fake_fixed_ip2])) fake_ipv6_port = dhcp.DictModel(dict(id='12345678-1234-aaaa-123456789000', device_owner='', mac_address='aa:bb:cc:dd:ee:99', network_id='12345678-1234-5678-1234567890ab', fixed_ips=[fake_fixed_ipv6])) fake_meta_port = dhcp.DictModel(dict(id='12345678-1234-aaaa-1234567890ab', mac_address='aa:bb:cc:dd:ee:ff', network_id='12345678-1234-5678-1234567890ab', device_owner=const.DEVICE_OWNER_ROUTER_INTF, device_id='forzanapoli', fixed_ips=[fake_meta_fixed_ip])) fake_meta_dvr_port = dhcp.DictModel(fake_meta_port.copy()) fake_meta_dvr_port.device_owner = const.DEVICE_OWNER_DVR_INTERFACE fake_dist_port = dhcp.DictModel(dict(id='12345678-1234-aaaa-1234567890ab', mac_address='aa:bb:cc:dd:ee:ff', network_id='12345678-1234-5678-1234567890ab', device_owner=const.DEVICE_OWNER_DVR_INTERFACE, device_id='forzanapoli', fixed_ips=[fake_meta_fixed_ip])) FAKE_NETWORK_UUID = '12345678-1234-5678-1234567890ab' FAKE_NETWORK_DHCP_NS = "qdhcp-%s" % FAKE_NETWORK_UUID fake_network = dhcp.NetModel(dict(id=FAKE_NETWORK_UUID, tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', admin_state_up=True, subnets=[fake_subnet1, fake_subnet2], ports=[fake_port1])) fake_network_ipv6 = dhcp.NetModel(dict( id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', admin_state_up=True, subnets=[fake_ipv6_subnet], ports=[fake_ipv6_port])) fake_network_ipv6_ipv4 = dhcp.NetModel(dict( id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', admin_state_up=True, subnets=[fake_ipv6_subnet, fake_subnet1], ports=[fake_port1])) isolated_network = dhcp.NetModel( dict( id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', admin_state_up=True, subnets=[fake_subnet1], ports=[fake_port1])) nonisolated_dist_network = dhcp.NetModel( dict( id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', admin_state_up=True, subnets=[fake_subnet1], ports=[fake_port1, fake_port2])) empty_network = dhcp.NetModel( dict( id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', admin_state_up=True, subnets=[fake_subnet1], ports=[])) fake_meta_network = dhcp.NetModel( dict(id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', admin_state_up=True, subnets=[fake_meta_subnet], ports=[fake_meta_port])) fake_meta_dvr_network = dhcp.NetModel(fake_meta_network.copy()) fake_meta_dvr_network.ports = [fake_meta_dvr_port] fake_dist_network = dhcp.NetModel( dict(id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', admin_state_up=True, subnets=[fake_meta_subnet], ports=[fake_meta_port, fake_dist_port])) fake_down_network = dhcp.NetModel( dict(id='12345678-dddd-dddd-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', admin_state_up=False, subnets=[], ports=[])) class TestDhcpAgent(base.BaseTestCase): def setUp(self): super(TestDhcpAgent, self).setUp() entry.register_options(cfg.CONF) cfg.CONF.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') # disable setting up periodic state reporting cfg.CONF.set_override('report_interval', 0, 'AGENT') self.driver_cls_p = mock.patch( 'neutron.agent.dhcp.agent.importutils.import_class') self.driver = mock.Mock(name='driver') self.driver.existing_dhcp_networks.return_value = [] self.driver_cls = self.driver_cls_p.start() self.driver_cls.return_value = self.driver self.mock_makedirs_p = mock.patch("os.makedirs") self.mock_makedirs = self.mock_makedirs_p.start() self.mock_ip_wrapper_p = mock.patch("neutron.agent.linux.ip_lib." "IPWrapper") self.mock_ip_wrapper = self.mock_ip_wrapper_p.start() def test_init_host(self): dhcp = dhcp_agent.DhcpAgent(HOSTNAME) with mock.patch.object(dhcp, 'sync_state') as sync_state: dhcp.init_host() sync_state.assert_called_once_with() def test_dhcp_agent_manager(self): state_rpc_str = 'neutron.agent.rpc.PluginReportStateAPI' # sync_state is needed for this test cfg.CONF.set_override('report_interval', 1, 'AGENT') with mock.patch.object(dhcp_agent.DhcpAgentWithStateReport, 'sync_state', autospec=True) as mock_sync_state: with mock.patch.object(dhcp_agent.DhcpAgentWithStateReport, 'periodic_resync', autospec=True) as mock_periodic_resync: with mock.patch(state_rpc_str) as state_rpc: with mock.patch.object(sys, 'argv') as sys_argv: sys_argv.return_value = [ 'dhcp', '--config-file', base.etcdir('neutron.conf')] cfg.CONF.register_opts(dhcp_config.DHCP_AGENT_OPTS) config.register_interface_driver_opts_helper(cfg.CONF) config.register_agent_state_opts_helper(cfg.CONF) cfg.CONF.register_opts(interface.OPTS) common_config.init(sys.argv[1:]) agent_mgr = dhcp_agent.DhcpAgentWithStateReport( 'testhost') eventlet.greenthread.sleep(1) agent_mgr.after_start() mock_sync_state.assert_called_once_with(agent_mgr) mock_periodic_resync.assert_called_once_with(agent_mgr) state_rpc.assert_has_calls( [mock.call(mock.ANY), mock.call().report_state(mock.ANY, mock.ANY, mock.ANY)]) def test_run_completes_single_pass(self): with mock.patch(DEVICE_MANAGER): dhcp = dhcp_agent.DhcpAgent(HOSTNAME) attrs_to_mock = dict( [(a, mock.DEFAULT) for a in ['sync_state', 'periodic_resync']]) with mock.patch.multiple(dhcp, **attrs_to_mock) as mocks: dhcp.run() mocks['sync_state'].assert_called_once_with() mocks['periodic_resync'].assert_called_once_with() def test_call_driver(self): network = mock.Mock() network.id = '1' dhcp = dhcp_agent.DhcpAgent(cfg.CONF) self.assertTrue(dhcp.call_driver('foo', network)) self.driver.assert_called_once_with(cfg.CONF, mock.ANY, mock.ANY, mock.ANY, mock.ANY) def _test_call_driver_failure(self, exc=None, trace_level='exception', expected_sync=True): network = mock.Mock() network.id = '1' self.driver.return_value.foo.side_effect = exc or Exception dhcp = dhcp_agent.DhcpAgent(HOSTNAME) with mock.patch.object(dhcp, 'schedule_resync') as schedule_resync: self.assertIsNone(dhcp.call_driver('foo', network)) self.driver.assert_called_once_with(cfg.CONF, mock.ANY, mock.ANY, mock.ANY, mock.ANY) self.assertEqual(expected_sync, schedule_resync.called) def test_call_driver_ip_address_generation_failure(self): error = oslo_messaging.RemoteError( exc_type='IpAddressGenerationFailure') self._test_call_driver_failure(exc=error, expected_sync=False) def test_call_driver_failure(self): self._test_call_driver_failure() def test_call_driver_remote_error_net_not_found(self): self._test_call_driver_failure( exc=oslo_messaging.RemoteError(exc_type='NetworkNotFound'), trace_level='warning') def test_call_driver_network_not_found(self): self._test_call_driver_failure( exc=exceptions.NetworkNotFound(net_id='1'), trace_level='warning') def test_call_driver_conflict(self): self._test_call_driver_failure( exc=exceptions.Conflict(), trace_level='warning', expected_sync=False) def _test_sync_state_helper(self, known_net_ids, active_net_ids): active_networks = set(mock.Mock(id=netid) for netid in active_net_ids) with mock.patch(DHCP_PLUGIN) as plug: mock_plugin = mock.Mock() mock_plugin.get_active_networks_info.return_value = active_networks plug.return_value = mock_plugin dhcp = dhcp_agent.DhcpAgent(HOSTNAME) attrs_to_mock = dict([(a, mock.DEFAULT) for a in ['disable_dhcp_helper', 'cache', 'safe_configure_dhcp_for_network']]) with mock.patch.multiple(dhcp, **attrs_to_mock) as mocks: mocks['cache'].get_network_ids.return_value = known_net_ids dhcp.sync_state() diff = set(known_net_ids) - set(active_net_ids) exp_disable = [mock.call(net_id) for net_id in diff] mocks['cache'].assert_has_calls([mock.call.get_network_ids()]) mocks['disable_dhcp_helper'].assert_has_calls(exp_disable) def test_sync_state_initial(self): self._test_sync_state_helper([], ['a']) def test_sync_state_same(self): self._test_sync_state_helper(['a'], ['a']) def test_sync_state_disabled_net(self): self._test_sync_state_helper(['b'], ['a']) def test_sync_state_waitall(self): with mock.patch.object(dhcp_agent.eventlet.GreenPool, 'waitall') as w: active_net_ids = ['1', '2', '3', '4', '5'] known_net_ids = ['1', '2', '3', '4', '5'] self._test_sync_state_helper(known_net_ids, active_net_ids) w.assert_called_once_with() def test_sync_state_for_all_networks_plugin_error(self): with mock.patch(DHCP_PLUGIN) as plug: mock_plugin = mock.Mock() mock_plugin.get_active_networks_info.side_effect = Exception plug.return_value = mock_plugin with mock.patch.object(dhcp_agent.LOG, 'exception') as log: dhcp = dhcp_agent.DhcpAgent(HOSTNAME) with mock.patch.object(dhcp, 'schedule_resync') as schedule_resync: dhcp.sync_state() self.assertTrue(log.called) self.assertTrue(schedule_resync.called) def test_sync_state_for_one_network_plugin_error(self): with mock.patch(DHCP_PLUGIN) as plug: mock_plugin = mock.Mock() exc = Exception() mock_plugin.get_active_networks_info.side_effect = exc plug.return_value = mock_plugin with mock.patch.object(dhcp_agent.LOG, 'exception') as log: dhcp = dhcp_agent.DhcpAgent(HOSTNAME) with mock.patch.object(dhcp, 'schedule_resync') as schedule_resync: dhcp.sync_state(['foo_network']) self.assertTrue(log.called) schedule_resync.assert_called_with(exc, 'foo_network') def test_periodic_resync(self): dhcp = dhcp_agent.DhcpAgent(HOSTNAME) with mock.patch.object(dhcp_agent.eventlet, 'spawn') as spawn: dhcp.periodic_resync() spawn.assert_called_once_with(dhcp._periodic_resync_helper) def test_report_state_revival_logic(self): dhcp = dhcp_agent.DhcpAgentWithStateReport(HOSTNAME) with mock.patch.object(dhcp.state_rpc, 'report_state') as report_state,\ mock.patch.object(dhcp, "run"): report_state.return_value = const.AGENT_ALIVE dhcp._report_state() self.assertEqual({}, dhcp.needs_resync_reasons) report_state.return_value = const.AGENT_REVIVED dhcp._report_state() self.assertEqual(dhcp.needs_resync_reasons[None], ['Agent has just been revived']) def test_periodic_resync_helper(self): with mock.patch.object(dhcp_agent.eventlet, 'sleep') as sleep: dhcp = dhcp_agent.DhcpAgent(HOSTNAME) resync_reasons = collections.OrderedDict( (('a', 'reason1'), ('b', 'reason2'))) dhcp.needs_resync_reasons = resync_reasons with mock.patch.object(dhcp, 'sync_state') as sync_state: sync_state.side_effect = RuntimeError with testtools.ExpectedException(RuntimeError): dhcp._periodic_resync_helper() sync_state.assert_called_once_with(resync_reasons.keys()) sleep.assert_called_once_with(dhcp.conf.resync_interval) self.assertEqual(len(dhcp.needs_resync_reasons), 0) def test_populate_cache_on_start_without_active_networks_support(self): # emul dhcp driver that doesn't support retrieving of active networks self.driver.existing_dhcp_networks.side_effect = NotImplementedError with mock.patch.object(dhcp_agent.LOG, 'debug') as log: dhcp = dhcp_agent.DhcpAgent(HOSTNAME) self.driver.existing_dhcp_networks.assert_called_once_with( dhcp.conf, ) self.assertFalse(dhcp.cache.get_network_ids()) self.assertTrue(log.called) def test_populate_cache_on_start(self): networks = ['aaa', 'bbb'] self.driver.existing_dhcp_networks.return_value = networks dhcp = dhcp_agent.DhcpAgent(HOSTNAME) self.driver.existing_dhcp_networks.assert_called_once_with( dhcp.conf, ) self.assertEqual(set(networks), set(dhcp.cache.get_network_ids())) def test_none_interface_driver(self): cfg.CONF.set_override('interface_driver', None) self.assertRaises(SystemExit, dhcp.DeviceManager, cfg.CONF, None) def test_nonexistent_interface_driver(self): # Temporarily turn off mock, so could use the real import_class # to import interface_driver. self.driver_cls_p.stop() self.addCleanup(self.driver_cls_p.start) cfg.CONF.set_override('interface_driver', 'foo.bar') self.assertRaises(SystemExit, dhcp.DeviceManager, cfg.CONF, None) class TestLogArgs(base.BaseTestCase): def test_log_args_without_log_dir_and_file(self): conf_dict = {'debug': True, 'verbose': False, 'log_dir': None, 'log_file': None, 'use_syslog': True, 'syslog_log_facility': 'LOG_USER'} conf = dhcp.DictModel(conf_dict) expected_args = ['--debug', '--use-syslog', '--syslog-log-facility=LOG_USER'] args = config.get_log_args(conf, 'log_file_name') self.assertEqual(expected_args, args) def test_log_args_without_log_file(self): conf_dict = {'debug': True, 'verbose': True, 'log_dir': '/etc/tests', 'log_file': None, 'use_syslog': False, 'syslog_log_facility': 'LOG_USER'} conf = dhcp.DictModel(conf_dict) expected_args = ['--debug', '--verbose', '--log-file=log_file_name', '--log-dir=/etc/tests'] args = config.get_log_args(conf, 'log_file_name') self.assertEqual(expected_args, args) def test_log_args_with_log_dir_and_file(self): conf_dict = {'debug': True, 'verbose': False, 'log_dir': '/etc/tests', 'log_file': 'tests/filelog', 'use_syslog': False, 'syslog_log_facility': 'LOG_USER'} conf = dhcp.DictModel(conf_dict) expected_args = ['--debug', '--log-file=log_file_name', '--log-dir=/etc/tests/tests'] args = config.get_log_args(conf, 'log_file_name') self.assertEqual(expected_args, args) def test_log_args_without_log_dir(self): conf_dict = {'debug': True, 'verbose': False, 'log_file': 'tests/filelog', 'log_dir': None, 'use_syslog': False, 'syslog_log_facility': 'LOG_USER'} conf = dhcp.DictModel(conf_dict) expected_args = ['--debug', '--log-file=log_file_name', '--log-dir=tests'] args = config.get_log_args(conf, 'log_file_name') self.assertEqual(expected_args, args) def test_log_args_with_filelog_and_syslog(self): conf_dict = {'debug': True, 'verbose': True, 'log_file': 'tests/filelog', 'log_dir': '/etc/tests', 'use_syslog': True, 'syslog_log_facility': 'LOG_USER'} conf = dhcp.DictModel(conf_dict) expected_args = ['--debug', '--verbose', '--log-file=log_file_name', '--log-dir=/etc/tests/tests'] args = config.get_log_args(conf, 'log_file_name') self.assertEqual(expected_args, args) class TestDhcpAgentEventHandler(base.BaseTestCase): def setUp(self): super(TestDhcpAgentEventHandler, self).setUp() config.register_interface_driver_opts_helper(cfg.CONF) cfg.CONF.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') entry.register_options(cfg.CONF) # register all dhcp cfg options self.plugin_p = mock.patch(DHCP_PLUGIN) plugin_cls = self.plugin_p.start() self.plugin = mock.Mock() plugin_cls.return_value = self.plugin self.cache_p = mock.patch('neutron.agent.dhcp.agent.NetworkCache') cache_cls = self.cache_p.start() self.cache = mock.Mock() self.cache.is_port_message_stale.return_value = False cache_cls.return_value = self.cache self.mock_makedirs_p = mock.patch("os.makedirs") self.mock_makedirs = self.mock_makedirs_p.start() self.mock_init_p = mock.patch('neutron.agent.dhcp.agent.' 'DhcpAgent._populate_networks_cache') self.mock_init = self.mock_init_p.start() self.dhcp = dhcp_agent.DhcpAgent(HOSTNAME) self.call_driver_p = mock.patch.object(self.dhcp, 'call_driver') self.call_driver = self.call_driver_p.start() self.schedule_resync_p = mock.patch.object(self.dhcp, 'schedule_resync') self.schedule_resync = self.schedule_resync_p.start() self.external_process_p = mock.patch( 'neutron.agent.linux.external_process.ProcessManager' ) self.external_process = self.external_process_p.start() def _process_manager_constructor_call(self, ns=FAKE_NETWORK_DHCP_NS): return mock.call(conf=cfg.CONF, uuid=FAKE_NETWORK_UUID, namespace=ns, default_cmd_callback=mock.ANY) def _enable_dhcp_helper(self, network, enable_isolated_metadata=False, is_isolated_network=False): self.dhcp._process_monitor = mock.Mock() if enable_isolated_metadata: cfg.CONF.set_override('enable_isolated_metadata', True) self.plugin.get_network_info.return_value = network self.dhcp.enable_dhcp_helper(network.id) self.plugin.assert_has_calls([ mock.call.get_network_info(network.id)]) self.call_driver.assert_called_once_with('enable', network) self.cache.assert_has_calls([mock.call.put(network)]) if is_isolated_network and enable_isolated_metadata: self.external_process.assert_has_calls([ self._process_manager_constructor_call(), mock.call().enable()]) else: self.external_process.assert_has_calls([ self._process_manager_constructor_call(ns=None), mock.call().disable()]) def test_enable_dhcp_helper_enable_metadata_isolated_network(self): self._enable_dhcp_helper(isolated_network, enable_isolated_metadata=True, is_isolated_network=True) def test_enable_dhcp_helper_enable_metadata_no_gateway(self): isolated_network_no_gateway = copy.deepcopy(isolated_network) isolated_network_no_gateway.subnets[0].gateway_ip = None self._enable_dhcp_helper(isolated_network_no_gateway, enable_isolated_metadata=True, is_isolated_network=True) def test_enable_dhcp_helper_enable_metadata_nonisolated_network(self): nonisolated_network = copy.deepcopy(isolated_network) nonisolated_network.ports[0].device_owner = ( const.DEVICE_OWNER_ROUTER_INTF) nonisolated_network.ports[0].fixed_ips[0].ip_address = '172.9.9.1' self._enable_dhcp_helper(nonisolated_network, enable_isolated_metadata=True, is_isolated_network=False) def test_enable_dhcp_helper_enable_metadata_nonisolated_dist_network(self): nonisolated_dist_network.ports[0].device_owner = ( const.DEVICE_OWNER_ROUTER_INTF) nonisolated_dist_network.ports[0].fixed_ips[0].ip_address = '172.9.9.1' nonisolated_dist_network.ports[1].device_owner = ( const.DEVICE_OWNER_DVR_INTERFACE) nonisolated_dist_network.ports[1].fixed_ips[0].ip_address = '172.9.9.1' self._enable_dhcp_helper(nonisolated_dist_network, enable_isolated_metadata=True, is_isolated_network=False) def test_enable_dhcp_helper_enable_metadata_empty_network(self): self._enable_dhcp_helper(empty_network, enable_isolated_metadata=True, is_isolated_network=True) def test_enable_dhcp_helper_enable_metadata_ipv6_ipv4_network(self): self._enable_dhcp_helper(fake_network_ipv6_ipv4, enable_isolated_metadata=True, is_isolated_network=True) def test_enable_dhcp_helper_driver_failure_ipv6_ipv4_network(self): self.plugin.get_network_info.return_value = fake_network_ipv6_ipv4 self.call_driver.return_value = False cfg.CONF.set_override('enable_isolated_metadata', True) with mock.patch.object( self.dhcp, 'enable_isolated_metadata_proxy') as enable_metadata: self.dhcp.enable_dhcp_helper(fake_network_ipv6_ipv4.id) self.plugin.assert_has_calls( [mock.call.get_network_info(fake_network_ipv6_ipv4.id)]) self.call_driver.assert_called_once_with('enable', fake_network_ipv6_ipv4) self.assertFalse(self.cache.called) self.assertFalse(enable_metadata.called) self.assertFalse(self.external_process.called) def test_enable_dhcp_helper(self): self._enable_dhcp_helper(fake_network) def test_enable_dhcp_helper_ipv6_network(self): self._enable_dhcp_helper(fake_network_ipv6) def test_enable_dhcp_helper_down_network(self): self.plugin.get_network_info.return_value = fake_down_network self.dhcp.enable_dhcp_helper(fake_down_network.id) self.plugin.assert_has_calls( [mock.call.get_network_info(fake_down_network.id)]) self.assertFalse(self.call_driver.called) self.assertFalse(self.cache.called) self.assertFalse(self.external_process.called) def test_enable_dhcp_helper_network_none(self): self.plugin.get_network_info.return_value = None self.dhcp.enable_dhcp_helper('fake_id') self.plugin.assert_has_calls( [mock.call.get_network_info('fake_id')]) self.assertFalse(self.call_driver.called) self.assertFalse(self.dhcp.schedule_resync.called) def test_enable_dhcp_helper_exception_during_rpc(self): self.plugin.get_network_info.side_effect = Exception with mock.patch.object(dhcp_agent.LOG, 'exception') as log: self.dhcp.enable_dhcp_helper(fake_network.id) self.plugin.assert_has_calls( [mock.call.get_network_info(fake_network.id)]) self.assertFalse(self.call_driver.called) self.assertTrue(log.called) self.assertTrue(self.schedule_resync.called) self.assertFalse(self.cache.called) self.assertFalse(self.external_process.called) def test_enable_dhcp_helper_driver_failure(self): self.plugin.get_network_info.return_value = fake_network self.call_driver.return_value = False cfg.CONF.set_override('enable_isolated_metadata', True) self.dhcp.enable_dhcp_helper(fake_network.id) self.plugin.assert_has_calls( [mock.call.get_network_info(fake_network.id)]) self.call_driver.assert_called_once_with('enable', fake_network) self.assertFalse(self.cache.called) self.assertFalse(self.external_process.called) def _disable_dhcp_helper_known_network(self, isolated_metadata=False): if isolated_metadata: cfg.CONF.set_override('enable_isolated_metadata', True) self.cache.get_network_by_id.return_value = fake_network self.dhcp.disable_dhcp_helper(fake_network.id) self.cache.assert_has_calls( [mock.call.get_network_by_id(fake_network.id)]) self.call_driver.assert_called_once_with('disable', fake_network) if isolated_metadata: self.external_process.assert_has_calls([ self._process_manager_constructor_call(ns=None), mock.call().disable()]) else: self.assertFalse(self.external_process.call_count) def test_disable_dhcp_helper_known_network_isolated_metadata(self): self._disable_dhcp_helper_known_network(isolated_metadata=True) def test_disable_dhcp_helper_known_network(self): self._disable_dhcp_helper_known_network() def test_disable_dhcp_helper_unknown_network(self): self.cache.get_network_by_id.return_value = None self.dhcp.disable_dhcp_helper('abcdef') self.cache.assert_has_calls( [mock.call.get_network_by_id('abcdef')]) self.assertEqual(0, self.call_driver.call_count) self.assertFalse(self.external_process.called) def _disable_dhcp_helper_driver_failure(self, isolated_metadata=False): if isolated_metadata: cfg.CONF.set_override('enable_isolated_metadata', True) self.cache.get_network_by_id.return_value = fake_network self.call_driver.return_value = False self.dhcp.disable_dhcp_helper(fake_network.id) self.cache.assert_has_calls( [mock.call.get_network_by_id(fake_network.id)]) self.call_driver.assert_called_once_with('disable', fake_network) self.cache.assert_has_calls( [mock.call.get_network_by_id(fake_network.id)]) if isolated_metadata: self.external_process.assert_has_calls([ self._process_manager_constructor_call(ns=None), mock.call().disable() ]) else: self.assertFalse(self.external_process.call_count) def test_disable_dhcp_helper_driver_failure_isolated_metadata(self): self._disable_dhcp_helper_driver_failure(isolated_metadata=True) def test_disable_dhcp_helper_driver_failure(self): self._disable_dhcp_helper_driver_failure() def test_enable_isolated_metadata_proxy(self): self.dhcp._process_monitor = mock.Mock() self.dhcp.enable_isolated_metadata_proxy(fake_network) self.external_process.assert_has_calls([ self._process_manager_constructor_call(), mock.call().enable() ]) def test_disable_isolated_metadata_proxy(self): method_path = ('neutron.agent.metadata.driver.MetadataDriver' '.destroy_monitored_metadata_proxy') with mock.patch(method_path) as destroy: self.dhcp.disable_isolated_metadata_proxy(fake_network) destroy.assert_called_once_with(self.dhcp._process_monitor, fake_network.id, cfg.CONF) def _test_enable_isolated_metadata_proxy(self, network): cfg.CONF.set_override('enable_metadata_network', True) cfg.CONF.set_override('debug', True) cfg.CONF.set_override('verbose', False) cfg.CONF.set_override('log_file', 'test.log') method_path = ('neutron.agent.metadata.driver.MetadataDriver' '.spawn_monitored_metadata_proxy') with mock.patch(method_path) as spawn: self.dhcp.enable_isolated_metadata_proxy(network) spawn.assert_called_once_with(self.dhcp._process_monitor, network.namespace, dhcp.METADATA_PORT, cfg.CONF, router_id='forzanapoli') def test_enable_isolated_metadata_proxy_with_metadata_network(self): self._test_enable_isolated_metadata_proxy(fake_meta_network) def test_enable_isolated_metadata_proxy_with_metadata_network_dvr(self): self._test_enable_isolated_metadata_proxy(fake_meta_dvr_network) def test_enable_isolated_metadata_proxy_with_dist_network(self): self._test_enable_isolated_metadata_proxy(fake_dist_network) def _test_disable_isolated_metadata_proxy(self, network): cfg.CONF.set_override('enable_metadata_network', True) method_path = ('neutron.agent.metadata.driver.MetadataDriver' '.destroy_monitored_metadata_proxy') with mock.patch(method_path) as destroy: self.dhcp.enable_isolated_metadata_proxy(network) self.dhcp.disable_isolated_metadata_proxy(network) destroy.assert_called_once_with(self.dhcp._process_monitor, 'forzanapoli', cfg.CONF) def test_disable_isolated_metadata_proxy_with_metadata_network(self): self._test_disable_isolated_metadata_proxy(fake_meta_network) def test_disable_isolated_metadata_proxy_with_metadata_network_dvr(self): self._test_disable_isolated_metadata_proxy(fake_meta_dvr_network) def test_disable_isolated_metadata_proxy_with_dist_network(self): self._test_disable_isolated_metadata_proxy(fake_dist_network) def test_network_create_end(self): payload = dict(network=dict(id=fake_network.id)) with mock.patch.object(self.dhcp, 'enable_dhcp_helper') as enable: self.dhcp.network_create_end(None, payload) enable.assert_called_once_with(fake_network.id) def test_network_update_end_admin_state_up(self): payload = dict(network=dict(id=fake_network.id, admin_state_up=True)) with mock.patch.object(self.dhcp, 'enable_dhcp_helper') as enable: self.dhcp.network_update_end(None, payload) enable.assert_called_once_with(fake_network.id) def test_network_update_end_admin_state_down(self): payload = dict(network=dict(id=fake_network.id, admin_state_up=False)) with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable: self.dhcp.network_update_end(None, payload) disable.assert_called_once_with(fake_network.id) def test_network_delete_end(self): payload = dict(network_id=fake_network.id) with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable: self.dhcp.network_delete_end(None, payload) disable.assert_called_once_with(fake_network.id) def test_refresh_dhcp_helper_no_dhcp_enabled_networks(self): network = dhcp.NetModel(dict(id='net-id', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', admin_state_up=True, subnets=[], ports=[])) self.cache.get_network_by_id.return_value = network self.plugin.get_network_info.return_value = network with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable: self.dhcp.refresh_dhcp_helper(network.id) disable.assert_called_once_with(network.id) self.assertFalse(self.cache.called) self.assertFalse(self.call_driver.called) self.cache.assert_has_calls( [mock.call.get_network_by_id('net-id')]) def test_refresh_dhcp_helper_exception_during_rpc(self): network = dhcp.NetModel(dict(id='net-id', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', admin_state_up=True, subnets=[], ports=[])) self.cache.get_network_by_id.return_value = network self.plugin.get_network_info.side_effect = Exception with mock.patch.object(dhcp_agent.LOG, 'exception') as log: self.dhcp.refresh_dhcp_helper(network.id) self.assertFalse(self.call_driver.called) self.cache.assert_has_calls( [mock.call.get_network_by_id('net-id')]) self.assertTrue(log.called) self.assertTrue(self.dhcp.schedule_resync.called) def test_subnet_create_restarts_with_dhcp_disabled(self): payload = dict(subnet=dhcp.DictModel( dict(network_id=fake_network.id, enable_dhcp=False, cidr='99.99.99.0/24'))) self.cache.get_network_by_id.return_value = fake_network new_net = copy.deepcopy(fake_network) new_net.subnets.append(payload['subnet']) self.plugin.get_network_info.return_value = new_net self.dhcp.subnet_create_end(None, payload) self.cache.assert_has_calls([mock.call.put(new_net)]) self.call_driver.assert_called_once_with('restart', new_net) def test_subnet_update_end(self): payload = dict(subnet=dict(network_id=fake_network.id)) self.cache.get_network_by_id.return_value = fake_network self.plugin.get_network_info.return_value = fake_network self.dhcp.subnet_update_end(None, payload) self.cache.assert_has_calls([mock.call.put(fake_network)]) self.call_driver.assert_called_once_with('reload_allocations', fake_network) def test_subnet_update_end_restart(self): new_state = dhcp.NetModel(dict(id=fake_network.id, tenant_id=fake_network.tenant_id, admin_state_up=True, subnets=[fake_subnet1, fake_subnet3], ports=[fake_port1])) payload = dict(subnet=dict(network_id=fake_network.id)) self.cache.get_network_by_id.return_value = fake_network self.plugin.get_network_info.return_value = new_state self.dhcp.subnet_update_end(None, payload) self.cache.assert_has_calls([mock.call.put(new_state)]) self.call_driver.assert_called_once_with('restart', new_state) def test_subnet_update_end_delete_payload(self): prev_state = dhcp.NetModel(dict(id=fake_network.id, tenant_id=fake_network.tenant_id, admin_state_up=True, subnets=[fake_subnet1, fake_subnet3], ports=[fake_port1])) payload = dict(subnet_id=fake_subnet1.id) self.cache.get_network_by_subnet_id.return_value = prev_state self.cache.get_network_by_id.return_value = prev_state self.plugin.get_network_info.return_value = fake_network self.dhcp.subnet_delete_end(None, payload) self.cache.assert_has_calls([ mock.call.get_network_by_subnet_id( 'bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb'), mock.call.get_network_by_id('12345678-1234-5678-1234567890ab'), mock.call.put(fake_network)]) self.call_driver.assert_called_once_with('restart', fake_network) def test_port_update_end(self): payload = dict(port=fake_port2) self.cache.get_network_by_id.return_value = fake_network self.cache.get_port_by_id.return_value = fake_port2 self.dhcp.port_update_end(None, payload) self.cache.assert_has_calls( [mock.call.get_network_by_id(fake_port2.network_id), mock.call.put_port(mock.ANY)]) self.call_driver.assert_called_once_with('reload_allocations', fake_network) def test_port_update_change_ip_on_port(self): payload = dict(port=fake_port1) self.cache.get_network_by_id.return_value = fake_network updated_fake_port1 = copy.deepcopy(fake_port1) updated_fake_port1.fixed_ips[0].ip_address = '172.9.9.99' self.cache.get_port_by_id.return_value = updated_fake_port1 self.dhcp.port_update_end(None, payload) self.cache.assert_has_calls( [mock.call.get_network_by_id(fake_port1.network_id), mock.call.put_port(mock.ANY)]) self.call_driver.assert_has_calls( [mock.call.call_driver('reload_allocations', fake_network)]) def test_port_update_change_ip_on_dhcp_agents_port(self): self.cache.get_network_by_id.return_value = fake_network self.cache.get_port_by_id.return_value = fake_port1 payload = dict(port=copy.deepcopy(fake_port1)) device_id = utils.get_dhcp_agent_device_id( payload['port']['network_id'], self.dhcp.conf.host) payload['port']['fixed_ips'][0]['ip_address'] = '172.9.9.99' payload['port']['device_id'] = device_id self.dhcp.port_update_end(None, payload) self.call_driver.assert_has_calls( [mock.call.call_driver('restart', fake_network)]) def test_port_update_on_dhcp_agents_port_no_ip_change(self): self.cache.get_network_by_id.return_value = fake_network self.cache.get_port_by_id.return_value = fake_port1 payload = dict(port=fake_port1) device_id = utils.get_dhcp_agent_device_id( payload['port']['network_id'], self.dhcp.conf.host) payload['port']['device_id'] = device_id self.dhcp.port_update_end(None, payload) self.call_driver.assert_has_calls( [mock.call.call_driver('reload_allocations', fake_network)]) def test_port_delete_end(self): payload = dict(port_id=fake_port2.id) self.cache.get_network_by_id.return_value = fake_network self.cache.get_port_by_id.return_value = fake_port2 self.dhcp.port_delete_end(None, payload) self.cache.assert_has_calls( [mock.call.get_port_by_id(fake_port2.id), mock.call.deleted_ports.add(fake_port2.id), mock.call.get_network_by_id(fake_network.id), mock.call.remove_port(fake_port2)]) self.call_driver.assert_has_calls( [mock.call.call_driver('reload_allocations', fake_network)]) def test_port_delete_end_unknown_port(self): payload = dict(port_id='unknown') self.cache.get_port_by_id.return_value = None self.dhcp.port_delete_end(None, payload) self.cache.assert_has_calls([mock.call.get_port_by_id('unknown')]) self.assertEqual(self.call_driver.call_count, 0) class TestDhcpPluginApiProxy(base.BaseTestCase): def _test_dhcp_api(self, method, **kwargs): ctxt = context.get_admin_context() proxy = dhcp_agent.DhcpPluginApi('foo', ctxt, host='foo') with mock.patch.object(proxy.client, 'call') as rpc_mock,\ mock.patch.object(proxy.client, 'prepare') as prepare_mock: prepare_mock.return_value = proxy.client rpc_mock.return_value = kwargs.pop('return_value', []) prepare_args = {} if 'version' in kwargs: prepare_args['version'] = kwargs.pop('version') retval = getattr(proxy, method)(**kwargs) self.assertEqual(retval, rpc_mock.return_value) prepare_mock.assert_called_once_with(**prepare_args) kwargs['host'] = proxy.host rpc_mock.assert_called_once_with(ctxt, method, **kwargs) def test_get_active_networks_info(self): self._test_dhcp_api('get_active_networks_info', version='1.1') def test_get_network_info(self): self._test_dhcp_api('get_network_info', network_id='fake_id', return_value=None) def test_create_dhcp_port(self): self._test_dhcp_api('create_dhcp_port', port='fake_port', return_value=None, version='1.1') def test_update_dhcp_port(self): self._test_dhcp_api('update_dhcp_port', port_id='fake_id', port='fake_port', return_value=None, version='1.1') def test_release_dhcp_port(self): self._test_dhcp_api('release_dhcp_port', network_id='fake_id', device_id='fake_id_2') class TestNetworkCache(base.BaseTestCase): def test_update_of_deleted_port_ignored(self): nc = dhcp_agent.NetworkCache() nc.put(fake_network) nc.deleted_ports.add(fake_port2['id']) self.assertTrue(nc.is_port_message_stale(fake_port2)) def test_stale_update_ignored(self): nc = dhcp_agent.NetworkCache() nc.put(fake_network) nc.put_port(fake_port2) stale = copy.copy(fake_port2) stale['revision_number'] = 2 self.assertTrue(nc.is_port_message_stale(stale)) def test_put_network(self): nc = dhcp_agent.NetworkCache() nc.put(fake_network) self.assertEqual(nc.cache, {fake_network.id: fake_network}) self.assertEqual(nc.subnet_lookup, {fake_subnet1.id: fake_network.id, fake_subnet2.id: fake_network.id}) self.assertEqual(nc.port_lookup, {fake_port1.id: fake_network.id}) def test_put_network_existing(self): prev_network_info = mock.Mock() nc = dhcp_agent.NetworkCache() with mock.patch.object(nc, 'remove') as remove: nc.cache[fake_network.id] = prev_network_info nc.put(fake_network) remove.assert_called_once_with(prev_network_info) self.assertEqual(nc.cache, {fake_network.id: fake_network}) self.assertEqual(nc.subnet_lookup, {fake_subnet1.id: fake_network.id, fake_subnet2.id: fake_network.id}) self.assertEqual(nc.port_lookup, {fake_port1.id: fake_network.id}) def test_remove_network(self): nc = dhcp_agent.NetworkCache() nc.cache = {fake_network.id: fake_network} nc.subnet_lookup = {fake_subnet1.id: fake_network.id, fake_subnet2.id: fake_network.id} nc.port_lookup = {fake_port1.id: fake_network.id} nc.remove(fake_network) self.assertEqual(len(nc.cache), 0) self.assertEqual(len(nc.subnet_lookup), 0) self.assertEqual(len(nc.port_lookup), 0) def test_get_network_by_id(self): nc = dhcp_agent.NetworkCache() nc.put(fake_network) self.assertEqual(nc.get_network_by_id(fake_network.id), fake_network) def test_get_network_ids(self): nc = dhcp_agent.NetworkCache() nc.put(fake_network) self.assertEqual(list(nc.get_network_ids()), [fake_network.id]) def test_get_network_by_subnet_id(self): nc = dhcp_agent.NetworkCache() nc.put(fake_network) self.assertEqual(nc.get_network_by_subnet_id(fake_subnet1.id), fake_network) def test_get_network_by_port_id(self): nc = dhcp_agent.NetworkCache() nc.put(fake_network) self.assertEqual(nc.get_network_by_port_id(fake_port1.id), fake_network) def test_put_port(self): fake_net = dhcp.NetModel( dict(id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', subnets=[fake_subnet1], ports=[fake_port1])) nc = dhcp_agent.NetworkCache() nc.put(fake_net) nc.put_port(fake_port2) self.assertEqual(len(nc.port_lookup), 2) self.assertIn(fake_port2, fake_net.ports) def test_put_port_existing(self): fake_net = dhcp.NetModel( dict(id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', subnets=[fake_subnet1], ports=[fake_port1, fake_port2])) nc = dhcp_agent.NetworkCache() nc.put(fake_net) nc.put_port(fake_port2) self.assertEqual(len(nc.port_lookup), 2) self.assertIn(fake_port2, fake_net.ports) def test_remove_port_existing(self): fake_net = dhcp.NetModel( dict(id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', subnets=[fake_subnet1], ports=[fake_port1, fake_port2])) nc = dhcp_agent.NetworkCache() nc.put(fake_net) nc.remove_port(fake_port2) self.assertEqual(len(nc.port_lookup), 1) self.assertNotIn(fake_port2, fake_net.ports) def test_get_port_by_id(self): nc = dhcp_agent.NetworkCache() nc.put(fake_network) self.assertEqual(nc.get_port_by_id(fake_port1.id), fake_port1) class FakePort1(object): id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' class FakeV4Subnet(object): id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' ip_version = 4 cidr = '192.168.0.0/24' gateway_ip = '192.168.0.1' enable_dhcp = True class FakeV4SubnetOutsideGateway(FakeV4Subnet): gateway_ip = '192.168.1.1' class FakeV4SubnetNoGateway(object): id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' ip_version = 4 cidr = '192.168.1.0/24' gateway_ip = None enable_dhcp = True class FakeV4Network(object): id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' subnets = [FakeV4Subnet()] ports = [FakePort1()] namespace = 'qdhcp-aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' class FakeV4NetworkOutsideGateway(FakeV4Network): subnets = [FakeV4SubnetOutsideGateway()] class FakeV4NetworkNoSubnet(object): id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' subnets = [] ports = [] class FakeV4NetworkNoGateway(object): id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' subnets = [FakeV4SubnetNoGateway()] ports = [FakePort1()] class TestDeviceManager(base.BaseTestCase): def setUp(self): super(TestDeviceManager, self).setUp() config.register_interface_driver_opts_helper(cfg.CONF) cfg.CONF.register_opts(dhcp_config.DHCP_AGENT_OPTS) cfg.CONF.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') cfg.CONF.set_override('enable_isolated_metadata', True) self.ensure_device_is_ready_p = mock.patch( 'neutron.agent.linux.ip_lib.ensure_device_is_ready') self.ensure_device_is_ready = (self.ensure_device_is_ready_p.start()) self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver') self.iproute_cls_p = mock.patch('neutron.agent.linux.' 'ip_lib.IpRouteCommand') driver_cls = self.dvr_cls_p.start() iproute_cls = self.iproute_cls_p.start() self.mock_driver = mock.MagicMock() self.mock_driver.DEV_NAME_LEN = ( interface.LinuxInterfaceDriver.DEV_NAME_LEN) self.mock_driver.use_gateway_ips = False self.mock_iproute = mock.MagicMock() driver_cls.return_value = self.mock_driver iproute_cls.return_value = self.mock_iproute iptables_cls_p = mock.patch( 'neutron.agent.linux.iptables_manager.IptablesManager') iptables_cls = iptables_cls_p.start() self.iptables_inst = mock.Mock() iptables_cls.return_value = self.iptables_inst self.mangle_inst = mock.Mock() self.iptables_inst.ipv4 = {'mangle': self.mangle_inst} self.mock_ip_wrapper_p = mock.patch("neutron.agent.linux.ip_lib." "IPWrapper") self.mock_ip_wrapper = self.mock_ip_wrapper_p.start() def _test_setup_helper(self, device_is_ready, net=None, port=None): net = net or fake_network port = port or fake_port1 plugin = mock.Mock() plugin.create_dhcp_port.return_value = port or fake_port1 self.ensure_device_is_ready.return_value = device_is_ready self.mock_driver.get_device_name.return_value = 'tap12345678-12' dh = dhcp.DeviceManager(cfg.CONF, plugin) dh._set_default_route = mock.Mock() dh._cleanup_stale_devices = mock.Mock() interface_name = dh.setup(net) self.assertEqual(interface_name, 'tap12345678-12') plugin.assert_has_calls([ mock.call.create_dhcp_port( {'port': {'name': '', 'admin_state_up': True, 'network_id': net.id, 'tenant_id': net.tenant_id, 'fixed_ips': [{'subnet_id': port.fixed_ips[0].subnet_id}], 'device_id': mock.ANY}})]) if port == fake_ipv6_port: expected_ips = ['169.254.169.254/16'] else: expected_ips = ['172.9.9.9/24', '169.254.169.254/16'] expected = [ mock.call.get_device_name(port), mock.call.init_l3( 'tap12345678-12', expected_ips, namespace=net.namespace)] if not device_is_ready: expected.insert(1, mock.call.plug(net.id, port.id, 'tap12345678-12', 'aa:bb:cc:dd:ee:ff', namespace=net.namespace, mtu=None)) self.mock_driver.assert_has_calls(expected) dh._set_default_route.assert_called_once_with(net, 'tap12345678-12') def test_setup(self): cfg.CONF.set_override('enable_metadata_network', False) self._test_setup_helper(False) cfg.CONF.set_override('enable_metadata_network', True) self._test_setup_helper(False) def test_setup_calls_fill_dhcp_udp_checksums(self): self._test_setup_helper(False) rule = ('-p udp -m udp --dport %d -j CHECKSUM --checksum-fill' % const.DHCP_RESPONSE_PORT) expected = [mock.call.add_rule('POSTROUTING', rule)] self.mangle_inst.assert_has_calls(expected) def test_setup_create_dhcp_port(self): with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: plugin = mock.Mock() device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = None net = copy.deepcopy(fake_network) plugin.create_dhcp_port.return_value = fake_dhcp_port dh = dhcp.DeviceManager(cfg.CONF, plugin) dh.setup(net) plugin.assert_has_calls([ mock.call.create_dhcp_port( {'port': {'name': '', 'admin_state_up': True, 'network_id': net.id, 'tenant_id': net.tenant_id, 'fixed_ips': [{'subnet_id': fake_dhcp_port.fixed_ips[0].subnet_id}], 'device_id': mock.ANY}})]) self.assertIn(fake_dhcp_port, net.ports) def test_setup_plug_exception(self): plugin = mock.Mock() plugin.create_dhcp_port.return_value = fake_dhcp_port self.ensure_device_is_ready.return_value = False self.mock_driver.get_device_name.return_value = 'tap12345678-12' dh = dhcp.DeviceManager(cfg.CONF, plugin) dh._set_default_route = mock.Mock() dh._cleanup_stale_devices = mock.Mock() dh.driver = mock.Mock() dh.driver.plug.side_effect = OSError() net = copy.deepcopy(fake_network) self.assertRaises(OSError, dh.setup, net) plugin.release_dhcp_port.assert_called_once_with( net.id, mock.ANY) def test_setup_ipv6(self): self._test_setup_helper(True, net=fake_network_ipv6, port=fake_ipv6_port) def test_setup_device_is_ready(self): self._test_setup_helper(True) def test_create_dhcp_port_raise_conflict(self): plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) plugin.create_dhcp_port.return_value = None self.assertRaises(exceptions.Conflict, dh.setup_dhcp_port, fake_network) def test_create_dhcp_port_create_new(self): plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) plugin.create_dhcp_port.return_value = fake_network.ports[0] dh.setup_dhcp_port(fake_network) plugin.assert_has_calls([ mock.call.create_dhcp_port( {'port': {'name': '', 'admin_state_up': True, 'network_id': fake_network.id, 'tenant_id': fake_network.tenant_id, 'fixed_ips': [{'subnet_id': fake_fixed_ip1.subnet_id}], 'device_id': mock.ANY}})]) def test_create_dhcp_port_update_add_subnet(self): plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) fake_network_copy = copy.deepcopy(fake_network) fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network) fake_network_copy.subnets[1].enable_dhcp = True plugin.update_dhcp_port.return_value = fake_network.ports[0] dh.setup_dhcp_port(fake_network_copy) port_body = {'port': { 'network_id': fake_network.id, 'fixed_ips': [{'subnet_id': fake_fixed_ip1.subnet_id, 'ip_address': fake_fixed_ip1.ip_address}, {'subnet_id': fake_subnet2.id}]}} plugin.assert_has_calls([ mock.call.update_dhcp_port(fake_network_copy.ports[0].id, port_body)]) def test_update_dhcp_port_raises_conflict(self): plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) fake_network_copy = copy.deepcopy(fake_network) fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network) fake_network_copy.subnets[1].enable_dhcp = True plugin.update_dhcp_port.return_value = None self.assertRaises(exceptions.Conflict, dh.setup_dhcp_port, fake_network_copy) def test_create_dhcp_port_no_update_or_create(self): plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) fake_network_copy = copy.deepcopy(fake_network) fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network) dh.setup_dhcp_port(fake_network_copy) self.assertFalse(plugin.setup_dhcp_port.called) self.assertFalse(plugin.update_dhcp_port.called) def test_setup_dhcp_port_with_non_enable_dhcp_subnet(self): plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) fake_network_copy = copy.deepcopy(fake_network) fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network) plugin.update_dhcp_port.return_value = fake_port1 self.assertEqual(fake_subnet1.id, dh.setup_dhcp_port(fake_network_copy).fixed_ips[0].subnet_id) def test_destroy(self): fake_net = dhcp.NetModel( dict(id=FAKE_NETWORK_UUID, tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa')) with mock.patch('neutron.agent.linux.interface.NullDriver') as dvr_cls: mock_driver = mock.MagicMock() mock_driver.get_device_name.return_value = 'tap12345678-12' dvr_cls.return_value = mock_driver plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) dh.destroy(fake_net, 'tap12345678-12') dvr_cls.assert_called_once_with(cfg.CONF) mock_driver.assert_has_calls( [mock.call.unplug('tap12345678-12', namespace='qdhcp-' + fake_net.id)]) plugin.assert_has_calls( [mock.call.release_dhcp_port(fake_net.id, mock.ANY)]) def test_destroy_with_none(self): fake_net = dhcp.NetModel( dict(id=FAKE_NETWORK_UUID, tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa')) with mock.patch('neutron.agent.linux.interface.NullDriver') as dvr_cls: mock_driver = mock.MagicMock() mock_driver.get_device_name.return_value = 'tap12345678-12' dvr_cls.return_value = mock_driver plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) dh.destroy(fake_net, None) dvr_cls.assert_called_once_with(cfg.CONF) plugin.assert_has_calls( [mock.call.release_dhcp_port(fake_net.id, mock.ANY)]) self.assertFalse(mock_driver.called) def test_get_interface_name(self): fake_net = dhcp.NetModel( dict(id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa')) fake_port = dhcp.DictModel( dict(id='12345678-1234-aaaa-1234567890ab', mac_address='aa:bb:cc:dd:ee:ff')) with mock.patch('neutron.agent.linux.interface.NullDriver') as dvr_cls: mock_driver = mock.MagicMock() mock_driver.get_device_name.return_value = 'tap12345678-12' dvr_cls.return_value = mock_driver plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) dh.get_interface_name(fake_net, fake_port) dvr_cls.assert_called_once_with(cfg.CONF) mock_driver.assert_has_calls( [mock.call.get_device_name(fake_port)]) self.assertEqual(len(plugin.mock_calls), 0) def test_get_device_id(self): fake_net = dhcp.NetModel( dict(id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa')) expected = ('dhcp1ae5f96c-c527-5079-82ea-371a01645457-12345678-1234-' '5678-1234567890ab') # the DHCP port name only contains the hostname and not the domain name local_hostname = cfg.CONF.host.split('.')[0] with mock.patch('uuid.uuid5') as uuid5: uuid5.return_value = '1ae5f96c-c527-5079-82ea-371a01645457' dh = dhcp.DeviceManager(cfg.CONF, None) self.assertEqual(dh.get_device_id(fake_net), expected) uuid5.assert_called_once_with(uuid.NAMESPACE_DNS, local_hostname) def test_update(self): # Try with namespaces and no metadata network cfg.CONF.set_override('enable_metadata_network', False) dh = dhcp.DeviceManager(cfg.CONF, None) dh._set_default_route = mock.Mock() network = mock.Mock() dh.update(network, 'ns-12345678-12') dh._set_default_route.assert_called_once_with(network, 'ns-12345678-12') # Meta data network enabled, don't interfere with its gateway. cfg.CONF.set_override('enable_metadata_network', True) dh = dhcp.DeviceManager(cfg.CONF, None) dh._set_default_route = mock.Mock() dh.update(FakeV4Network(), 'ns-12345678-12') self.assertTrue(dh._set_default_route.called) def test_set_default_route(self): dh = dhcp.DeviceManager(cfg.CONF, None) with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = None # Basic one subnet with gateway. network = FakeV4Network() dh._set_default_route(network, 'tap-name') self.assertEqual(device.route.get_gateway.call_count, 1) self.assertFalse(device.route.delete_gateway.called) device.route.add_gateway.assert_called_once_with('192.168.0.1') def test_set_default_route_outside_subnet(self): dh = dhcp.DeviceManager(cfg.CONF, None) with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = None # Basic one subnet with gateway outside the subnet. network = FakeV4NetworkOutsideGateway() dh._set_default_route(network, 'tap-name') self.assertEqual(device.route.get_gateway.call_count, 1) self.assertFalse(device.route.delete_gateway.called) device.route.add_route.assert_called_once_with('192.168.1.1', scope='link') device.route.add_gateway.assert_called_once_with('192.168.1.1') def test_set_default_route_no_subnet(self): dh = dhcp.DeviceManager(cfg.CONF, None) with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = None network = FakeV4NetworkNoSubnet() network.namespace = 'qdhcp-1234' dh._set_default_route(network, 'tap-name') self.assertEqual(device.route.get_gateway.call_count, 1) self.assertFalse(device.route.delete_gateway.called) self.assertFalse(device.route.add_gateway.called) def test_set_default_route_no_subnet_delete_gateway(self): dh = dhcp.DeviceManager(cfg.CONF, None) with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = dict(gateway='192.168.0.1') network = FakeV4NetworkNoSubnet() network.namespace = 'qdhcp-1234' dh._set_default_route(network, 'tap-name') self.assertEqual(device.route.get_gateway.call_count, 1) device.route.delete_gateway.assert_called_once_with('192.168.0.1') self.assertFalse(device.route.add_gateway.called) def test_set_default_route_no_gateway(self): dh = dhcp.DeviceManager(cfg.CONF, None) with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = dict(gateway='192.168.0.1') network = FakeV4NetworkNoGateway() network.namespace = 'qdhcp-1234' dh._set_default_route(network, 'tap-name') self.assertEqual(device.route.get_gateway.call_count, 1) device.route.delete_gateway.assert_called_once_with('192.168.0.1') self.assertFalse(device.route.add_gateway.called) def test_set_default_route_do_nothing(self): dh = dhcp.DeviceManager(cfg.CONF, None) with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = dict(gateway='192.168.0.1') network = FakeV4Network() dh._set_default_route(network, 'tap-name') self.assertEqual(device.route.get_gateway.call_count, 1) self.assertFalse(device.route.delete_gateway.called) self.assertFalse(device.route.add_gateway.called) def test_set_default_route_change_gateway(self): dh = dhcp.DeviceManager(cfg.CONF, None) with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = dict(gateway='192.168.0.2') network = FakeV4Network() dh._set_default_route(network, 'tap-name') self.assertEqual(device.route.get_gateway.call_count, 1) self.assertFalse(device.route.delete_gateway.called) device.route.add_gateway.assert_called_once_with('192.168.0.1') def test_set_default_route_change_gateway_outside_subnet(self): dh = dhcp.DeviceManager(cfg.CONF, None) with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.list_onlink_routes.return_value = ( [{'cidr': '192.168.2.1'}]) device.route.get_gateway.return_value = dict(gateway='192.168.2.1') network = FakeV4NetworkOutsideGateway() dh._set_default_route(network, 'tap-name') self.assertEqual(device.route.get_gateway.call_count, 1) self.assertEqual(device.route.list_onlink_routes.call_count, 2) self.assertFalse(device.route.delete_gateway.called) device.route.delete_route.assert_called_once_with('192.168.2.1', scope='link') device.route.add_route.assert_called_once_with('192.168.1.1', scope='link') device.route.add_gateway.assert_called_once_with('192.168.1.1') def test_set_default_route_two_subnets(self): # Try two subnets. Should set gateway from the first. dh = dhcp.DeviceManager(cfg.CONF, None) with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = None network = FakeV4Network() subnet2 = FakeV4Subnet() subnet2.gateway_ip = '192.168.1.1' network.subnets = [subnet2, FakeV4Subnet()] dh._set_default_route(network, 'tap-name') self.assertEqual(device.route.get_gateway.call_count, 1) self.assertFalse(device.route.delete_gateway.called) device.route.add_gateway.assert_called_once_with('192.168.1.1') class TestDictModel(base.BaseTestCase): def test_basic_dict(self): d = dict(a=1, b=2) m = dhcp.DictModel(d) self.assertEqual(m.a, 1) self.assertEqual(m.b, 2) def test_dict_has_sub_dict(self): d = dict(a=dict(b=2)) m = dhcp.DictModel(d) self.assertEqual(m.a.b, 2) def test_dict_contains_list(self): d = dict(a=[1, 2]) m = dhcp.DictModel(d) self.assertEqual(m.a, [1, 2]) def test_dict_contains_list_of_dicts(self): d = dict(a=[dict(b=2), dict(c=3)]) m = dhcp.DictModel(d) self.assertEqual(m.a[0].b, 2) self.assertEqual(m.a[1].c, 3) neutron-8.4.0/neutron/tests/unit/agent/dhcp/__init__.py0000664000567000056710000000000013044372736024315 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/test_securitygroups_rpc.py0000664000567000056710000044555313044372760026701 0ustar jenkinsjenkins00000000000000# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import contextlib import mock from oslo_config import cfg import oslo_messaging from testtools import matchers import webob.exc from neutron.agent import firewall as firewall_base from neutron.agent.linux import iptables_manager from neutron.agent import securitygroups_rpc as sg_rpc from neutron.api.rpc.handlers import securitygroups_rpc from neutron.common import constants as const from neutron.common import ipv6_utils as ipv6 from neutron.common import rpc as n_rpc from neutron import context from neutron.db import securitygroups_rpc_base as sg_db_rpc from neutron.extensions import allowedaddresspairs as addr_pair from neutron.extensions import securitygroup as ext_sg from neutron import manager from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent from neutron.tests import base from neutron.tests import tools from neutron.tests.unit.extensions import test_securitygroup as test_sg FAKE_PREFIX = {const.IPv4: '10.0.0.0/24', const.IPv6: '2001:db8::/64'} FAKE_IP = {const.IPv4: '10.0.0.1', const.IPv6: 'fe80::1', 'IPv6_GLOBAL': '2001:db8::1', 'IPv6_LLA': 'fe80::123', 'IPv6_DHCP': '2001:db8::3'} TEST_PLUGIN_CLASS = ('neutron.tests.unit.agent.test_securitygroups_rpc.' 'SecurityGroupRpcTestPlugin') FIREWALL_BASE_PACKAGE = 'neutron.agent.linux.iptables_firewall.' FIREWALL_IPTABLES_DRIVER = FIREWALL_BASE_PACKAGE + 'IptablesFirewallDriver' FIREWALL_HYBRID_DRIVER = (FIREWALL_BASE_PACKAGE + 'OVSHybridIptablesFirewallDriver') FIREWALL_NOOP_DRIVER = 'neutron.agent.firewall.NoopFirewallDriver' def set_enable_security_groups(enabled): cfg.CONF.set_override('enable_security_group', enabled, group='SECURITYGROUP') def set_firewall_driver(firewall_driver): cfg.CONF.set_override('firewall_driver', firewall_driver, group='SECURITYGROUP') class FakeFirewallDriver(firewall_base.FirewallDriver): """Fake FirewallDriver FirewallDriver is base class for other types of drivers. To be able to use it in tests, it's needed to overwrite all abstract methods. """ def prepare_port_filter(self, port): raise NotImplementedError() def update_port_filter(self, port): raise NotImplementedError() class SecurityGroupRpcTestPlugin(test_sg.SecurityGroupTestPlugin, sg_db_rpc.SecurityGroupServerRpcMixin): def __init__(self): super(SecurityGroupRpcTestPlugin, self).__init__() self.notifier = mock.Mock() self.devices = {} def create_port(self, context, port): result = super(SecurityGroupRpcTestPlugin, self).create_port(context, port) self.devices[result['id']] = result self.notify_security_groups_member_updated(context, result) return result def update_port(self, context, id, port): original_port = self.get_port(context, id) updated_port = super(SecurityGroupRpcTestPlugin, self).update_port(context, id, port) self.devices[id] = updated_port self.update_security_group_on_port( context, id, port, original_port, updated_port) def delete_port(self, context, id): port = self.get_port(context, id) super(SecurityGroupRpcTestPlugin, self).delete_port(context, id) self.notify_security_groups_member_updated(context, port) del self.devices[id] def get_port_from_device(self, context, device): device = self.devices.get(device) if device: device['security_group_rules'] = [] device['security_group_source_groups'] = [] device['fixed_ips'] = [ip['ip_address'] for ip in device['fixed_ips']] return device class SGServerRpcCallBackTestCase(test_sg.SecurityGroupDBTestCase): def setUp(self, plugin=None): plugin = plugin or TEST_PLUGIN_CLASS set_firewall_driver(FIREWALL_NOOP_DRIVER) super(SGServerRpcCallBackTestCase, self).setUp(plugin) self.notifier = manager.NeutronManager.get_plugin().notifier self.rpc = securitygroups_rpc.SecurityGroupServerRpcCallback() def _test_security_group_port(self, device_owner, gw_ip, cidr, ip_version, ip_address): with self.network() as net: with self.subnet(net, gateway_ip=gw_ip, cidr=cidr, ip_version=ip_version) as subnet: kwargs = { 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': ip_address}]} if device_owner: kwargs['device_owner'] = device_owner res = self._create_port( self.fmt, net['network']['id'], **kwargs) res = self.deserialize(self.fmt, res) port_id = res['port']['id'] if device_owner in const.ROUTER_INTERFACE_OWNERS: data = {'port': {'fixed_ips': []}} req = self.new_update_request('ports', data, port_id) res = self.deserialize(self.fmt, req.get_response(self.api)) self._delete('ports', port_id) def test_notify_security_group_ipv6_gateway_port_added(self): self._test_security_group_port( const.DEVICE_OWNER_ROUTER_INTF, '2001:0db8::1', '2001:0db8::/64', 6, '2001:0db8::1') self.assertTrue(self.notifier.security_groups_provider_updated.called) def test_notify_security_group_dvr_ipv6_gateway_port_added(self): self._test_security_group_port( const.DEVICE_OWNER_DVR_INTERFACE, '2001:0db8::1', '2001:0db8::/64', 6, '2001:0db8::2') self.assertTrue(self.notifier.security_groups_provider_updated.called) def test_notify_security_group_ipv6_normal_port_added(self): self._test_security_group_port( None, '2001:0db8::1', '2001:0db8::/64', 6, '2001:0db8::3') self.assertFalse(self.notifier.security_groups_provider_updated.called) def test_notify_security_group_ipv4_dhcp_port_added(self): self._test_security_group_port( const.DEVICE_OWNER_DHCP, '192.168.1.1', '192.168.1.0/24', 4, '192.168.1.2') self.assertTrue(self.notifier.security_groups_provider_updated.called) def test_notify_security_group_ipv4_gateway_port_added(self): self._test_security_group_port( const.DEVICE_OWNER_ROUTER_INTF, '192.168.1.1', '192.168.1.0/24', 4, '192.168.1.1') self.assertFalse(self.notifier.security_groups_provider_updated.called) def test_notify_security_group_ipv4_normal_port_added(self): self._test_security_group_port( None, '192.168.1.1', '192.168.1.0/24', 4, '192.168.1.3') self.assertFalse(self.notifier.security_groups_provider_updated.called) def _test_sg_rules_for_devices_ipv4_ingress_port_range( self, min_port, max_port): fake_prefix = FAKE_PREFIX[const.IPv4] with self.network() as n,\ self.subnet(n),\ self.security_group() as sg1: sg1_id = sg1['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'ingress', const.PROTO_NAME_TCP, str(min_port), str(max_port)) rule2 = self._build_security_group_rule( sg1_id, 'ingress', const.PROTO_NAME_TCP, '23', '23', fake_prefix) rules = { 'security_group_rules': [rule1['security_group_rule'], rule2['security_group_rule']]} res = self._create_security_group_rule(self.fmt, rules) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) res1 = self._create_port( self.fmt, n['network']['id'], security_groups=[sg1_id]) ports_rest1 = self.deserialize(self.fmt, res1) port_id1 = ports_rest1['port']['id'] self.rpc.devices = {port_id1: ports_rest1['port']} devices = [port_id1, 'no_exist_device'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_rules_for_devices( ctx, devices=devices) port_rpc = ports_rpc[port_id1] expected = [{'direction': 'egress', 'ethertype': const.IPv4, 'security_group_id': sg1_id}, {'direction': 'egress', 'ethertype': const.IPv6, 'security_group_id': sg1_id}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv4, 'port_range_max': max_port, 'security_group_id': sg1_id, 'port_range_min': min_port}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv4, 'port_range_max': 23, 'security_group_id': sg1_id, 'port_range_min': 23, 'source_ip_prefix': fake_prefix}, ] self.assertEqual(port_rpc['security_group_rules'], expected) self._delete('ports', port_id1) def test_sg_rules_for_devices_ipv4_ingress_port_range_min_port_1(self): self._test_sg_rules_for_devices_ipv4_ingress_port_range(1, 10) def test_security_group_info_for_ports_with_no_rules(self): with self.network() as n,\ self.subnet(n),\ self.security_group() as sg: sg_id = sg['security_group']['id'] self._delete_default_security_group_egress_rules(sg_id) res = self._create_port( self.fmt, n['network']['id'], security_groups=[sg_id]) ports_rest = self.deserialize(self.fmt, res) port_id = ports_rest['port']['id'] self.rpc.devices = {port_id: ports_rest['port']} devices = [port_id] ctx = context.get_admin_context() sg_info = self.rpc.security_group_info_for_devices( ctx, devices=devices) expected = {sg_id: []} self.assertEqual(expected, sg_info['security_groups']) self._delete('ports', port_id) @contextlib.contextmanager def _port_with_addr_pairs_and_security_group(self): plugin_obj = manager.NeutronManager.get_plugin() if ('allowed-address-pairs' not in plugin_obj.supported_extension_aliases): self.skipTest("Test depends on allowed-address-pairs extension") fake_prefix = FAKE_PREFIX['IPv4'] with self.network() as n,\ self.subnet(n),\ self.security_group() as sg1: sg1_id = sg1['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'ingress', 'tcp', '22', '22', remote_group_id=sg1_id) rule2 = self._build_security_group_rule( sg1_id, 'ingress', 'tcp', '23', '23', fake_prefix) rules = { 'security_group_rules': [rule1['security_group_rule'], rule2['security_group_rule']]} res = self._create_security_group_rule(self.fmt, rules) self.deserialize(self.fmt, res) self.assertEqual(res.status_int, 201) address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.1.0/24'}, {'mac_address': '00:00:00:00:00:01', 'ip_address': '11.0.0.1'}] res1 = self._create_port( self.fmt, n['network']['id'], security_groups=[sg1_id], arg_list=(addr_pair.ADDRESS_PAIRS,), allowed_address_pairs=address_pairs) yield self.deserialize(self.fmt, res1) def test_security_group_info_for_devices_ipv4_addr_pair(self): with self._port_with_addr_pairs_and_security_group() as port: port_id = port['port']['id'] sg_id = port['port']['security_groups'][0] devices = [port_id, 'no_exist_device'] ctx = context.get_admin_context() # verify that address pairs are included in remote SG IPs sg_member_ips = self.rpc.security_group_info_for_devices( ctx, devices=devices)['sg_member_ips'] expected_member_ips = [ '10.0.1.0/24', '11.0.0.1', port['port']['fixed_ips'][0]['ip_address']] self.assertEqual(sorted(expected_member_ips), sorted(sg_member_ips[sg_id]['IPv4'])) self._delete('ports', port_id) def test_security_group_rules_for_devices_ipv4_ingress_addr_pair(self): fake_prefix = FAKE_PREFIX[const.IPv4] with self._port_with_addr_pairs_and_security_group() as port: port_id = port['port']['id'] sg_id = port['port']['security_groups'][0] devices = [port_id, 'no_exist_device'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_rules_for_devices( ctx, devices=devices) port_rpc = ports_rpc[port_id] expected = [{'direction': 'egress', 'ethertype': 'IPv4', 'security_group_id': sg_id}, {'direction': 'egress', 'ethertype': 'IPv6', 'security_group_id': sg_id}, {'direction': 'ingress', 'protocol': 'tcp', 'ethertype': 'IPv4', 'port_range_max': 22, 'remote_group_id': sg_id, 'security_group_id': sg_id, 'source_ip_prefix': '11.0.0.1/32', 'port_range_min': 22}, {'direction': 'ingress', 'protocol': 'tcp', 'ethertype': 'IPv4', 'port_range_max': 22, 'remote_group_id': sg_id, 'security_group_id': sg_id, 'source_ip_prefix': '10.0.1.0/24', 'port_range_min': 22}, {'direction': 'ingress', 'protocol': 'tcp', 'ethertype': 'IPv4', 'port_range_max': 23, 'security_group_id': sg_id, 'port_range_min': 23, 'source_ip_prefix': fake_prefix}, ] expected = tools.UnorderedList(expected) self.assertEqual(expected, port_rpc['security_group_rules']) self.assertEqual(port['port']['allowed_address_pairs'], port_rpc['allowed_address_pairs']) self._delete('ports', port_id) def test_security_group_rules_for_devices_ipv4_egress(self): fake_prefix = FAKE_PREFIX[const.IPv4] with self.network() as n,\ self.subnet(n),\ self.security_group() as sg1: sg1_id = sg1['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'egress', const.PROTO_NAME_TCP, '22', '22') rule2 = self._build_security_group_rule( sg1_id, 'egress', const.PROTO_NAME_UDP, '23', '23', fake_prefix) rules = { 'security_group_rules': [rule1['security_group_rule'], rule2['security_group_rule']]} res = self._create_security_group_rule(self.fmt, rules) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) res1 = self._create_port( self.fmt, n['network']['id'], security_groups=[sg1_id]) ports_rest1 = self.deserialize(self.fmt, res1) port_id1 = ports_rest1['port']['id'] self.rpc.devices = {port_id1: ports_rest1['port']} devices = [port_id1, 'no_exist_device'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_rules_for_devices( ctx, devices=devices) port_rpc = ports_rpc[port_id1] expected = [{'direction': 'egress', 'ethertype': const.IPv4, 'security_group_id': sg1_id}, {'direction': 'egress', 'ethertype': const.IPv6, 'security_group_id': sg1_id}, {'direction': 'egress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv4, 'port_range_max': 22, 'security_group_id': sg1_id, 'port_range_min': 22}, {'direction': 'egress', 'protocol': const.PROTO_NAME_UDP, 'ethertype': const.IPv4, 'port_range_max': 23, 'security_group_id': sg1_id, 'port_range_min': 23, 'dest_ip_prefix': fake_prefix}, ] self.assertEqual(port_rpc['security_group_rules'], expected) self._delete('ports', port_id1) def test_security_group_rules_for_devices_ipv4_source_group(self): with self.network() as n,\ self.subnet(n),\ self.security_group() as sg1,\ self.security_group() as sg2: sg1_id = sg1['security_group']['id'] sg2_id = sg2['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'ingress', const.PROTO_NAME_TCP, '24', '25', remote_group_id=sg2['security_group']['id']) rules = { 'security_group_rules': [rule1['security_group_rule']]} res = self._create_security_group_rule(self.fmt, rules) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) res1 = self._create_port( self.fmt, n['network']['id'], security_groups=[sg1_id, sg2_id]) ports_rest1 = self.deserialize(self.fmt, res1) port_id1 = ports_rest1['port']['id'] self.rpc.devices = {port_id1: ports_rest1['port']} devices = [port_id1, 'no_exist_device'] res2 = self._create_port( self.fmt, n['network']['id'], security_groups=[sg2_id]) ports_rest2 = self.deserialize(self.fmt, res2) port_id2 = ports_rest2['port']['id'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_rules_for_devices( ctx, devices=devices) port_rpc = ports_rpc[port_id1] expected = [{'direction': 'egress', 'ethertype': const.IPv4, 'security_group_id': sg1_id}, {'direction': 'egress', 'ethertype': const.IPv6, 'security_group_id': sg1_id}, {'direction': 'egress', 'ethertype': const.IPv4, 'security_group_id': sg2_id}, {'direction': 'egress', 'ethertype': const.IPv6, 'security_group_id': sg2_id}, {'direction': u'ingress', 'source_ip_prefix': u'10.0.0.3/32', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv4, 'port_range_max': 25, 'port_range_min': 24, 'remote_group_id': sg2_id, 'security_group_id': sg1_id}, ] self.assertEqual(port_rpc['security_group_rules'], expected) self._delete('ports', port_id1) self._delete('ports', port_id2) def test_security_group_info_for_devices_ipv4_source_group(self): with self.network() as n,\ self.subnet(n),\ self.security_group() as sg1,\ self.security_group() as sg2: sg1_id = sg1['security_group']['id'] sg2_id = sg2['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'ingress', const.PROTO_NAME_TCP, '24', '25', remote_group_id=sg2['security_group']['id']) rules = { 'security_group_rules': [rule1['security_group_rule']]} res = self._create_security_group_rule(self.fmt, rules) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) res1 = self._create_port( self.fmt, n['network']['id'], security_groups=[sg1_id]) ports_rest1 = self.deserialize(self.fmt, res1) port_id1 = ports_rest1['port']['id'] self.rpc.devices = {port_id1: ports_rest1['port']} devices = [port_id1, 'no_exist_device'] res2 = self._create_port( self.fmt, n['network']['id'], security_groups=[sg2_id]) ports_rest2 = self.deserialize(self.fmt, res2) port_id2 = ports_rest2['port']['id'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_info_for_devices( ctx, devices=devices) expected = { 'security_groups': {sg1_id: [ {'direction': 'egress', 'ethertype': const.IPv4}, {'direction': 'egress', 'ethertype': const.IPv6}, {'direction': u'ingress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv4, 'port_range_max': 25, 'port_range_min': 24, 'remote_group_id': sg2_id} ]}, 'sg_member_ips': {sg2_id: { 'IPv4': set([u'10.0.0.3']), 'IPv6': set(), }} } self.assertEqual(expected['security_groups'], ports_rpc['security_groups']) self.assertEqual(expected['sg_member_ips'][sg2_id]['IPv4'], ports_rpc['sg_member_ips'][sg2_id]['IPv4']) self._delete('ports', port_id1) self._delete('ports', port_id2) def test_security_group_rules_for_devices_ipv6_ingress(self): fake_prefix = FAKE_PREFIX[const.IPv6] fake_gateway = FAKE_IP[const.IPv6] with self.network() as n,\ self.subnet(n, gateway_ip=fake_gateway, cidr=fake_prefix, ip_version=6 ) as subnet_v6,\ self.security_group() as sg1: sg1_id = sg1['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'ingress', const.PROTO_NAME_TCP, '22', '22', ethertype=const.IPv6) rule2 = self._build_security_group_rule( sg1_id, 'ingress', const.PROTO_NAME_UDP, '23', '23', fake_prefix, ethertype=const.IPv6) rules = { 'security_group_rules': [rule1['security_group_rule'], rule2['security_group_rule']]} res = self._create_security_group_rule(self.fmt, rules) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) dhcp_port = self._create_port( self.fmt, n['network']['id'], fixed_ips=[{'subnet_id': subnet_v6['subnet']['id'], 'ip_address': FAKE_IP['IPv6_DHCP']}], device_owner=const.DEVICE_OWNER_DHCP, security_groups=[sg1_id]) dhcp_rest = self.deserialize(self.fmt, dhcp_port) dhcp_mac = dhcp_rest['port']['mac_address'] dhcp_lla_ip = str(ipv6.get_ipv6_addr_by_EUI64( const.IPV6_LLA_PREFIX, dhcp_mac)) res1 = self._create_port( self.fmt, n['network']['id'], fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], security_groups=[sg1_id]) ports_rest1 = self.deserialize(self.fmt, res1) port_id1 = ports_rest1['port']['id'] self.rpc.devices = {port_id1: ports_rest1['port']} devices = [port_id1, 'no_exist_device'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_rules_for_devices( ctx, devices=devices) port_rpc = ports_rpc[port_id1] source_port, dest_port, ethertype = sg_db_rpc.DHCP_RULE_PORT[6] expected = [{'direction': 'egress', 'ethertype': const.IPv4, 'security_group_id': sg1_id}, {'direction': 'egress', 'ethertype': const.IPv6, 'security_group_id': sg1_id}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv6, 'port_range_max': 22, 'security_group_id': sg1_id, 'port_range_min': 22}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_UDP, 'ethertype': const.IPv6, 'port_range_max': 23, 'security_group_id': sg1_id, 'port_range_min': 23, 'source_ip_prefix': fake_prefix}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_IPV6_ICMP, 'ethertype': const.IPv6, 'source_ip_prefix': fake_gateway, 'source_port_range_min': const.ICMPV6_TYPE_RA}, {'direction': 'ingress', 'ethertype': ethertype, 'port_range_max': dest_port, 'port_range_min': dest_port, 'protocol': const.PROTO_NAME_UDP, 'source_ip_prefix': dhcp_lla_ip, 'source_port_range_max': source_port, 'source_port_range_min': source_port} ] self.assertEqual(port_rpc['security_group_rules'], expected) self._delete('ports', port_id1) def test_security_group_info_for_devices_only_ipv6_rule(self): with self.network() as n,\ self.subnet(n),\ self.security_group() as sg1: sg1_id = sg1['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'ingress', const.PROTO_NAME_TCP, '22', '22', remote_group_id=sg1_id, ethertype=const.IPv6) rules = { 'security_group_rules': [rule1['security_group_rule']]} self._make_security_group_rule(self.fmt, rules) res1 = self._create_port( self.fmt, n['network']['id'], security_groups=[sg1_id]) ports_rest1 = self.deserialize(self.fmt, res1) port_id1 = ports_rest1['port']['id'] self.rpc.devices = {port_id1: ports_rest1['port']} devices = [port_id1, 'no_exist_device'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_info_for_devices( ctx, devices=devices) expected = { 'security_groups': {sg1_id: [ {'direction': 'egress', 'ethertype': const.IPv4}, {'direction': 'egress', 'ethertype': const.IPv6}, {'direction': u'ingress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv6, 'port_range_max': 22, 'port_range_min': 22, 'remote_group_id': sg1_id} ]}, 'sg_member_ips': {sg1_id: { 'IPv6': set(), }} } self.assertEqual(expected['security_groups'], ports_rpc['security_groups']) self.assertEqual(expected['sg_member_ips'][sg1_id]['IPv6'], ports_rpc['sg_member_ips'][sg1_id]['IPv6']) self._delete('ports', port_id1) def test_security_group_ra_rules_for_devices_ipv6_gateway_global(self): fake_prefix = FAKE_PREFIX[const.IPv6] fake_gateway = FAKE_IP['IPv6_GLOBAL'] with self.network() as n,\ self.subnet(n, gateway_ip=fake_gateway, cidr=fake_prefix, ip_version=6, ipv6_ra_mode=const.IPV6_SLAAC ) as subnet_v6,\ self.security_group() as sg1: sg1_id = sg1['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'ingress', const.PROTO_NAME_TCP, '22', '22', ethertype=const.IPv6) rules = { 'security_group_rules': [rule1['security_group_rule']]} self._make_security_group_rule(self.fmt, rules) # Create gateway port gateway_res = self._make_port( self.fmt, n['network']['id'], fixed_ips=[{'subnet_id': subnet_v6['subnet']['id'], 'ip_address': fake_gateway}], device_owner=const.DEVICE_OWNER_ROUTER_INTF) gateway_mac = gateway_res['port']['mac_address'] gateway_port_id = gateway_res['port']['id'] gateway_lla_ip = str(ipv6.get_ipv6_addr_by_EUI64( const.IPV6_LLA_PREFIX, gateway_mac)) ports_rest1 = self._make_port( self.fmt, n['network']['id'], fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], security_groups=[sg1_id]) port_id1 = ports_rest1['port']['id'] self.rpc.devices = {port_id1: ports_rest1['port']} devices = [port_id1, 'no_exist_device'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_rules_for_devices( ctx, devices=devices) port_rpc = ports_rpc[port_id1] expected = [{'direction': 'egress', 'ethertype': const.IPv4, 'security_group_id': sg1_id}, {'direction': 'egress', 'ethertype': const.IPv6, 'security_group_id': sg1_id}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv6, 'port_range_max': 22, 'security_group_id': sg1_id, 'port_range_min': 22}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_IPV6_ICMP, 'ethertype': const.IPv6, 'source_ip_prefix': gateway_lla_ip, 'source_port_range_min': const.ICMPV6_TYPE_RA}, ] self.assertEqual(port_rpc['security_group_rules'], expected) self._delete('ports', port_id1) # Note(xuhanp): remove gateway port's fixed_ips or gateway port # deletion will be prevented. data = {'port': {'fixed_ips': []}} req = self.new_update_request('ports', data, gateway_port_id) self.deserialize(self.fmt, req.get_response(self.api)) self._delete('ports', gateway_port_id) def test_security_group_rule_for_device_ipv6_multi_router_interfaces(self): fake_prefix = FAKE_PREFIX[const.IPv6] fake_gateway = FAKE_IP['IPv6_GLOBAL'] with self.network() as n,\ self.subnet(n, gateway_ip=fake_gateway, cidr=fake_prefix, ip_version=6, ipv6_ra_mode=const.IPV6_SLAAC ) as subnet_v6,\ self.security_group() as sg1: sg1_id = sg1['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'ingress', const.PROTO_NAME_TCP, '22', '22', ethertype=const.IPv6) rules = { 'security_group_rules': [rule1['security_group_rule']]} self._make_security_group_rule(self.fmt, rules) # Create gateway port gateway_res = self._make_port( self.fmt, n['network']['id'], fixed_ips=[{'subnet_id': subnet_v6['subnet']['id'], 'ip_address': fake_gateway}], device_owner=const.DEVICE_OWNER_ROUTER_INTF) gateway_mac = gateway_res['port']['mac_address'] gateway_port_id = gateway_res['port']['id'] gateway_lla_ip = str(ipv6.get_ipv6_addr_by_EUI64( const.IPV6_LLA_PREFIX, gateway_mac)) # Create another router interface port interface_res = self._make_port( self.fmt, n['network']['id'], fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], device_owner=const.DEVICE_OWNER_ROUTER_INTF) interface_port_id = interface_res['port']['id'] ports_rest1 = self._make_port( self.fmt, n['network']['id'], fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], security_groups=[sg1_id]) port_id1 = ports_rest1['port']['id'] self.rpc.devices = {port_id1: ports_rest1['port']} devices = [port_id1, 'no_exist_device'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_rules_for_devices( ctx, devices=devices) port_rpc = ports_rpc[port_id1] expected = [{'direction': 'egress', 'ethertype': const.IPv4, 'security_group_id': sg1_id}, {'direction': 'egress', 'ethertype': const.IPv6, 'security_group_id': sg1_id}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv6, 'port_range_max': 22, 'security_group_id': sg1_id, 'port_range_min': 22}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_IPV6_ICMP, 'ethertype': const.IPv6, 'source_ip_prefix': gateway_lla_ip, 'source_port_range_min': const.ICMPV6_TYPE_RA}, ] self.assertEqual(port_rpc['security_group_rules'], expected) self._delete('ports', port_id1) data = {'port': {'fixed_ips': []}} req = self.new_update_request('ports', data, gateway_port_id) self.deserialize(self.fmt, req.get_response(self.api)) req = self.new_update_request('ports', data, interface_port_id) self.deserialize(self.fmt, req.get_response(self.api)) self._delete('ports', gateway_port_id) self._delete('ports', interface_port_id) def test_security_group_ra_rules_for_devices_ipv6_dvr(self): fake_prefix = FAKE_PREFIX[const.IPv6] fake_gateway = FAKE_IP['IPv6_GLOBAL'] with self.network() as n,\ self.subnet(n, gateway_ip=fake_gateway, cidr=fake_prefix, ip_version=6, ipv6_ra_mode=const.IPV6_SLAAC ) as subnet_v6,\ self.security_group() as sg1: sg1_id = sg1['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'ingress', const.PROTO_NAME_TCP, '22', '22', ethertype=const.IPv6) rules = { 'security_group_rules': [rule1['security_group_rule']]} self._make_security_group_rule(self.fmt, rules) # Create DVR router interface port gateway_res = self._make_port( self.fmt, n['network']['id'], fixed_ips=[{'subnet_id': subnet_v6['subnet']['id'], 'ip_address': fake_gateway}], device_owner=const.DEVICE_OWNER_DVR_INTERFACE) gateway_mac = gateway_res['port']['mac_address'] gateway_port_id = gateway_res['port']['id'] gateway_lla_ip = str(ipv6.get_ipv6_addr_by_EUI64( const.IPV6_LLA_PREFIX, gateway_mac)) ports_rest1 = self._make_port( self.fmt, n['network']['id'], fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], security_groups=[sg1_id]) port_id1 = ports_rest1['port']['id'] self.rpc.devices = {port_id1: ports_rest1['port']} devices = [port_id1, 'no_exist_device'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_rules_for_devices( ctx, devices=devices) port_rpc = ports_rpc[port_id1] expected = [{'direction': 'egress', 'ethertype': const.IPv4, 'security_group_id': sg1_id}, {'direction': 'egress', 'ethertype': const.IPv6, 'security_group_id': sg1_id}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv6, 'port_range_max': 22, 'security_group_id': sg1_id, 'port_range_min': 22}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_IPV6_ICMP, 'ethertype': const.IPv6, 'source_ip_prefix': gateway_lla_ip, 'source_port_range_min': const.ICMPV6_TYPE_RA}, ] self.assertEqual(port_rpc['security_group_rules'], expected) self._delete('ports', port_id1) # Note(xuhanp): remove gateway port's fixed_ips or gateway port # deletion will be prevented. data = {'port': {'fixed_ips': []}} req = self.new_update_request('ports', data, gateway_port_id) self.deserialize(self.fmt, req.get_response(self.api)) self._delete('ports', gateway_port_id) def test_security_group_ra_rules_for_devices_ipv6_gateway_lla(self): fake_prefix = FAKE_PREFIX[const.IPv6] fake_gateway = FAKE_IP['IPv6_LLA'] with self.network() as n,\ self.subnet(n, gateway_ip=fake_gateway, cidr=fake_prefix, ip_version=6, ipv6_ra_mode=const.IPV6_SLAAC ) as subnet_v6,\ self.security_group() as sg1: sg1_id = sg1['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'ingress', const.PROTO_NAME_TCP, '22', '22', ethertype=const.IPv6) rules = { 'security_group_rules': [rule1['security_group_rule']]} self._make_security_group_rule(self.fmt, rules) ports_rest1 = self._make_port( self.fmt, n['network']['id'], fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], security_groups=[sg1_id]) port_id1 = ports_rest1['port']['id'] self.rpc.devices = {port_id1: ports_rest1['port']} devices = [port_id1, 'no_exist_device'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_rules_for_devices( ctx, devices=devices) port_rpc = ports_rpc[port_id1] expected = [{'direction': 'egress', 'ethertype': const.IPv4, 'security_group_id': sg1_id}, {'direction': 'egress', 'ethertype': const.IPv6, 'security_group_id': sg1_id}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv6, 'port_range_max': 22, 'security_group_id': sg1_id, 'port_range_min': 22}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_IPV6_ICMP, 'ethertype': const.IPv6, 'source_ip_prefix': fake_gateway, 'source_port_range_min': const.ICMPV6_TYPE_RA}, ] self.assertEqual(port_rpc['security_group_rules'], expected) self._delete('ports', port_id1) def test_security_group_ra_rules_for_devices_ipv6_no_gateway_port(self): fake_prefix = FAKE_PREFIX[const.IPv6] with self.network() as n,\ self.subnet(n, gateway_ip=None, cidr=fake_prefix, ip_version=6, ipv6_ra_mode=const.IPV6_SLAAC ) as subnet_v6,\ self.security_group() as sg1: sg1_id = sg1['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'ingress', const.PROTO_NAME_TCP, '22', '22', ethertype=const.IPv6) rules = { 'security_group_rules': [rule1['security_group_rule']]} self._make_security_group_rule(self.fmt, rules) ports_rest1 = self._make_port( self.fmt, n['network']['id'], fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], security_groups=[sg1_id]) port_id1 = ports_rest1['port']['id'] self.rpc.devices = {port_id1: ports_rest1['port']} devices = [port_id1, 'no_exist_device'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_rules_for_devices( ctx, devices=devices) port_rpc = ports_rpc[port_id1] expected = [{'direction': 'egress', 'ethertype': const.IPv4, 'security_group_id': sg1_id}, {'direction': 'egress', 'ethertype': const.IPv6, 'security_group_id': sg1_id}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv6, 'port_range_max': 22, 'security_group_id': sg1_id, 'port_range_min': 22}, ] self.assertEqual(port_rpc['security_group_rules'], expected) self._delete('ports', port_id1) def test_security_group_rules_for_devices_ipv6_egress(self): fake_prefix = FAKE_PREFIX[const.IPv6] fake_gateway = FAKE_IP[const.IPv6] with self.network() as n,\ self.subnet(n, gateway_ip=fake_gateway, cidr=fake_prefix, ip_version=6 ) as subnet_v6,\ self.security_group() as sg1: sg1_id = sg1['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'egress', const.PROTO_NAME_TCP, '22', '22', ethertype=const.IPv6) rule2 = self._build_security_group_rule( sg1_id, 'egress', const.PROTO_NAME_UDP, '23', '23', fake_prefix, ethertype=const.IPv6) rules = { 'security_group_rules': [rule1['security_group_rule'], rule2['security_group_rule']]} self._make_security_group_rule(self.fmt, rules) ports_rest1 = self._make_port( self.fmt, n['network']['id'], fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], security_groups=[sg1_id]) port_id1 = ports_rest1['port']['id'] self.rpc.devices = {port_id1: ports_rest1['port']} devices = [port_id1, 'no_exist_device'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_rules_for_devices( ctx, devices=devices) port_rpc = ports_rpc[port_id1] expected = [{'direction': 'egress', 'ethertype': const.IPv4, 'security_group_id': sg1_id}, {'direction': 'egress', 'ethertype': const.IPv6, 'security_group_id': sg1_id}, {'direction': 'egress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv6, 'port_range_max': 22, 'security_group_id': sg1_id, 'port_range_min': 22}, {'direction': 'egress', 'protocol': const.PROTO_NAME_UDP, 'ethertype': const.IPv6, 'port_range_max': 23, 'security_group_id': sg1_id, 'port_range_min': 23, 'dest_ip_prefix': fake_prefix}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_IPV6_ICMP, 'ethertype': const.IPv6, 'source_ip_prefix': fake_gateway, 'source_port_range_min': const.ICMPV6_TYPE_RA}, ] self.assertEqual(port_rpc['security_group_rules'], expected) self._delete('ports', port_id1) def test_security_group_rules_for_devices_ipv6_source_group(self): fake_prefix = FAKE_PREFIX[const.IPv6] fake_gateway = FAKE_IP[const.IPv6] with self.network() as n,\ self.subnet(n, gateway_ip=fake_gateway, cidr=fake_prefix, ip_version=6 ) as subnet_v6,\ self.security_group() as sg1,\ self.security_group() as sg2: sg1_id = sg1['security_group']['id'] sg2_id = sg2['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'ingress', const.PROTO_NAME_TCP, '24', '25', ethertype=const.IPv6, remote_group_id=sg2['security_group']['id']) rules = { 'security_group_rules': [rule1['security_group_rule']]} self._make_security_group_rule(self.fmt, rules) ports_rest1 = self._make_port( self.fmt, n['network']['id'], fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], security_groups=[sg1_id, sg2_id]) port_id1 = ports_rest1['port']['id'] self.rpc.devices = {port_id1: ports_rest1['port']} devices = [port_id1, 'no_exist_device'] ports_rest2 = self._make_port( self.fmt, n['network']['id'], fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], security_groups=[sg2_id]) port_id2 = ports_rest2['port']['id'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_rules_for_devices( ctx, devices=devices) port_rpc = ports_rpc[port_id1] expected = [{'direction': 'egress', 'ethertype': const.IPv4, 'security_group_id': sg1_id}, {'direction': 'egress', 'ethertype': const.IPv6, 'security_group_id': sg1_id}, {'direction': 'egress', 'ethertype': const.IPv4, 'security_group_id': sg2_id}, {'direction': 'egress', 'ethertype': const.IPv6, 'security_group_id': sg2_id}, {'direction': 'ingress', 'source_ip_prefix': '2001:db8::2/128', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv6, 'port_range_max': 25, 'port_range_min': 24, 'remote_group_id': sg2_id, 'security_group_id': sg1_id}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_IPV6_ICMP, 'ethertype': const.IPv6, 'source_ip_prefix': fake_gateway, 'source_port_range_min': const.ICMPV6_TYPE_RA}, ] self.assertEqual(port_rpc['security_group_rules'], expected) self._delete('ports', port_id1) self._delete('ports', port_id2) class SecurityGroupAgentRpcTestCaseForNoneDriver(base.BaseTestCase): def test_init_firewall_with_none_driver(self): set_enable_security_groups(False) agent = sg_rpc.SecurityGroupAgentRpc( context=None, plugin_rpc=mock.Mock()) self.assertEqual(agent.firewall.__class__.__name__, 'NoopFirewallDriver') class BaseSecurityGroupAgentRpcTestCase(base.BaseTestCase): def setUp(self, defer_refresh_firewall=False): super(BaseSecurityGroupAgentRpcTestCase, self).setUp() set_firewall_driver(FIREWALL_NOOP_DRIVER) self.agent = sg_rpc.SecurityGroupAgentRpc( context=None, plugin_rpc=mock.Mock(), defer_refresh_firewall=defer_refresh_firewall) mock.patch('neutron.agent.linux.iptables_manager').start() self.default_firewall = self.agent.firewall self.firewall = mock.Mock() firewall_object = FakeFirewallDriver() self.firewall.defer_apply.side_effect = firewall_object.defer_apply self.agent.firewall = self.firewall self.fake_device = {'device': 'fake_device', 'network_id': 'fake_net', 'security_groups': ['fake_sgid1', 'fake_sgid2'], 'security_group_source_groups': ['fake_sgid2'], 'security_group_rules': [{'security_group_id': 'fake_sgid1', 'remote_group_id': 'fake_sgid2'}]} self.firewall.ports = {'fake_device': self.fake_device} self.firewall.security_group_updated = mock.Mock() class SecurityGroupAgentRpcTestCase(BaseSecurityGroupAgentRpcTestCase): def setUp(self, defer_refresh_firewall=False): super(SecurityGroupAgentRpcTestCase, self).setUp( defer_refresh_firewall) rpc = self.agent.plugin_rpc rpc.security_group_info_for_devices.side_effect = ( oslo_messaging.UnsupportedVersion('1.2')) rpc.security_group_rules_for_devices.return_value = ( self.firewall.ports) def test_prepare_and_remove_devices_filter(self): self.agent.prepare_devices_filter(['fake_device']) self.agent.remove_devices_filter(['fake_device']) # ignore device which is not filtered self.firewall.assert_has_calls([mock.call.defer_apply(), mock.call.prepare_port_filter( self.fake_device), mock.call.defer_apply(), mock.call.remove_port_filter( self.fake_device), ]) def test_prepare_devices_filter_with_noopfirewall(self): self.agent.firewall = self.default_firewall self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock() self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock() self.agent.prepare_devices_filter(['fake_device']) self.assertFalse(self.agent.plugin_rpc. security_group_info_for_devices.called) self.assertFalse(self.agent.plugin_rpc. security_group_rules_for_devices.called) def test_prepare_devices_filter_with_firewall_disabled(self): cfg.CONF.set_override('enable_security_group', False, 'SECURITYGROUP') self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock() self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock() self.agent.prepare_devices_filter(['fake_device']) self.assertFalse(self.agent.plugin_rpc. security_group_info_for_devices.called) self.assertFalse(self.agent.plugin_rpc. security_group_rules_for_devices.called) def test_security_groups_rule_updated(self): self.agent.refresh_firewall = mock.Mock() self.agent.prepare_devices_filter(['fake_port_id']) self.agent.security_groups_rule_updated(['fake_sgid1', 'fake_sgid3']) self.agent.refresh_firewall.assert_has_calls( [mock.call.refresh_firewall([self.fake_device['device']])]) self.assertFalse(self.firewall.security_group_updated.called) def test_security_groups_rule_not_updated(self): self.agent.refresh_firewall = mock.Mock() self.agent.prepare_devices_filter(['fake_port_id']) self.agent.security_groups_rule_updated(['fake_sgid3', 'fake_sgid4']) self.assertFalse(self.agent.refresh_firewall.called) self.assertFalse(self.firewall.security_group_updated.called) def test_security_groups_member_updated(self): self.agent.refresh_firewall = mock.Mock() self.agent.prepare_devices_filter(['fake_port_id']) self.agent.security_groups_member_updated(['fake_sgid2', 'fake_sgid3']) self.agent.refresh_firewall.assert_has_calls( [mock.call.refresh_firewall([self.fake_device['device']])]) self.assertFalse(self.firewall.security_group_updated.called) def test_security_groups_member_not_updated(self): self.agent.refresh_firewall = mock.Mock() self.agent.prepare_devices_filter(['fake_port_id']) self.agent.security_groups_member_updated(['fake_sgid3', 'fake_sgid4']) self.assertFalse(self.agent.refresh_firewall.called) self.assertFalse(self.firewall.security_group_updated.called) def test_security_groups_provider_updated(self): self.agent.refresh_firewall = mock.Mock() self.agent.security_groups_provider_updated(None) self.agent.refresh_firewall.assert_has_calls( [mock.call.refresh_firewall(None)]) def test_refresh_firewall(self): self.agent.prepare_devices_filter(['fake_port_id']) self.agent.refresh_firewall() calls = [mock.call.defer_apply(), mock.call.prepare_port_filter(self.fake_device), mock.call.defer_apply(), mock.call.update_port_filter(self.fake_device)] self.firewall.assert_has_calls(calls) def test_refresh_firewall_devices(self): self.agent.prepare_devices_filter(['fake_port_id']) self.agent.refresh_firewall([self.fake_device]) calls = [mock.call.defer_apply(), mock.call.prepare_port_filter(self.fake_device), mock.call.defer_apply(), mock.call.update_port_filter(self.fake_device)] self.firewall.assert_has_calls(calls) def test_refresh_firewall_none(self): self.agent.refresh_firewall([]) self.assertFalse(self.firewall.called) def test_refresh_firewall_with_firewall_disabled(self): cfg.CONF.set_override('enable_security_group', False, 'SECURITYGROUP') self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock() self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock() self.agent.firewall.defer_apply = mock.Mock() self.agent.refresh_firewall([self.fake_device]) self.assertFalse(self.agent.plugin_rpc. security_group_info_for_devices.called) self.assertFalse(self.agent.plugin_rpc. security_group_rules_for_devices.called) self.assertFalse(self.agent.firewall.defer_apply.called) def test_refresh_firewall_with_noopfirewall(self): self.agent.firewall = self.default_firewall self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock() self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock() self.agent.firewall.defer_apply = mock.Mock() self.agent.refresh_firewall([self.fake_device]) self.assertFalse(self.agent.plugin_rpc. security_group_info_for_devices.called) self.assertFalse(self.agent.plugin_rpc. security_group_rules_for_devices.called) self.assertFalse(self.agent.firewall.defer_apply.called) class SecurityGroupAgentEnhancedRpcTestCase( BaseSecurityGroupAgentRpcTestCase): def setUp(self, defer_refresh_firewall=False): super(SecurityGroupAgentEnhancedRpcTestCase, self).setUp( defer_refresh_firewall=defer_refresh_firewall) fake_sg_info = { 'security_groups': collections.OrderedDict([ ('fake_sgid2', []), ('fake_sgid1', [{'remote_group_id': 'fake_sgid2'}])]), 'sg_member_ips': {'fake_sgid2': {'IPv4': [], 'IPv6': []}}, 'devices': self.firewall.ports} self.agent.plugin_rpc.security_group_info_for_devices.return_value = ( fake_sg_info) def test_prepare_and_remove_devices_filter_enhanced_rpc(self): self.agent.prepare_devices_filter(['fake_device']) self.agent.remove_devices_filter(['fake_device']) # these two mocks are too long, just use tmp_mock to replace them tmp_mock1 = mock.call.update_security_group_rules( 'fake_sgid1', [{'remote_group_id': 'fake_sgid2'}]) tmp_mock2 = mock.call.update_security_group_members( 'fake_sgid2', {'IPv4': [], 'IPv6': []}) # ignore device which is not filtered self.firewall.assert_has_calls([mock.call.defer_apply(), mock.call.update_security_group_rules( 'fake_sgid2', []), tmp_mock1, tmp_mock2, mock.call.prepare_port_filter( self.fake_device), mock.call.defer_apply(), mock.call.remove_port_filter( self.fake_device), ]) def test_security_groups_rule_updated_enhanced_rpc(self): sg_list = ['fake_sgid1', 'fake_sgid3'] self.agent.refresh_firewall = mock.Mock() self.agent.prepare_devices_filter(['fake_port_id']) self.agent.security_groups_rule_updated(sg_list) self.agent.refresh_firewall.assert_called_once_with( [self.fake_device['device']]) self.firewall.security_group_updated.assert_called_once_with( 'sg_rule', set(sg_list)) def test_security_groups_rule_not_updated_enhanced_rpc(self): self.agent.refresh_firewall = mock.Mock() self.agent.prepare_devices_filter(['fake_port_id']) self.agent.security_groups_rule_updated(['fake_sgid3', 'fake_sgid4']) self.assertFalse(self.agent.refresh_firewall.called) self.assertFalse(self.firewall.security_group_updated.called) def test_security_groups_member_updated_enhanced_rpc(self): sg_list = ['fake_sgid2', 'fake_sgid3'] self.agent.refresh_firewall = mock.Mock() self.agent.prepare_devices_filter(['fake_port_id']) self.agent.security_groups_member_updated(sg_list) self.agent.refresh_firewall.assert_called_once_with( [self.fake_device['device']]) self.firewall.security_group_updated.assert_called_once_with( 'sg_member', set(sg_list)) def test_security_groups_member_not_updated_enhanced_rpc(self): self.agent.refresh_firewall = mock.Mock() self.agent.prepare_devices_filter(['fake_port_id']) self.agent.security_groups_member_updated( ['fake_sgid3', 'fake_sgid4']) self.assertFalse(self.agent.refresh_firewall.called) self.assertFalse(self.firewall.security_group_updated.called) def test_security_groups_provider_updated_enhanced_rpc(self): self.agent.refresh_firewall = mock.Mock() self.agent.security_groups_provider_updated(None) self.agent.refresh_firewall.assert_has_calls( [mock.call.refresh_firewall(None)]) def test_refresh_firewall_enhanced_rpc(self): self.agent.prepare_devices_filter(['fake_port_id']) self.agent.refresh_firewall() calls = [mock.call.defer_apply(), mock.call.update_security_group_rules('fake_sgid2', []), mock.call.update_security_group_rules( 'fake_sgid1', [{'remote_group_id': 'fake_sgid2'}]), mock.call.update_security_group_members( 'fake_sgid2', {'IPv4': [], 'IPv6': []}), mock.call.prepare_port_filter(self.fake_device), mock.call.defer_apply(), mock.call.update_security_group_rules('fake_sgid2', []), mock.call.update_security_group_rules( 'fake_sgid1', [{'remote_group_id': 'fake_sgid2'}]), mock.call.update_security_group_members( 'fake_sgid2', {'IPv4': [], 'IPv6': []}), mock.call.update_port_filter(self.fake_device)] self.firewall.assert_has_calls(calls) def test_refresh_firewall_devices_enhanced_rpc(self): self.agent.prepare_devices_filter(['fake_device']) self.agent.refresh_firewall([self.fake_device]) calls = [mock.call.defer_apply(), mock.call.update_security_group_rules('fake_sgid2', []), mock.call.update_security_group_rules('fake_sgid1', [ {'remote_group_id': 'fake_sgid2'}]), mock.call.update_security_group_members('fake_sgid2', { 'IPv4': [], 'IPv6': [] }), mock.call.prepare_port_filter(self.fake_device), mock.call.defer_apply(), mock.call.update_security_group_rules('fake_sgid2', []), mock.call.update_security_group_rules('fake_sgid1', [ {'remote_group_id': 'fake_sgid2'}]), mock.call.update_security_group_members('fake_sgid2', { 'IPv4': [], 'IPv6': []}), mock.call.update_port_filter(self.fake_device) ] self.firewall.assert_has_calls(calls) def test_refresh_firewall_none_enhanced_rpc(self): self.agent.refresh_firewall([]) self.assertFalse(self.firewall.called) class SecurityGroupAgentRpcWithDeferredRefreshTestCase( SecurityGroupAgentRpcTestCase): def setUp(self): super(SecurityGroupAgentRpcWithDeferredRefreshTestCase, self).setUp( defer_refresh_firewall=True) @contextlib.contextmanager def add_fake_device(self, device, sec_groups, source_sec_groups=None): fake_device = {'device': device, 'security_groups': sec_groups, 'security_group_source_groups': source_sec_groups or [], 'security_group_rules': [{'security_group_id': 'fake_sgid1', 'remote_group_id': 'fake_sgid2'}]} self.firewall.ports[device] = fake_device yield del self.firewall.ports[device] def test_security_groups_rule_updated(self): self.agent.security_groups_rule_updated(['fake_sgid1', 'fake_sgid3']) self.assertIn('fake_device', self.agent.devices_to_refilter) self.assertFalse(self.firewall.security_group_updated.called) def test_multiple_security_groups_rule_updated_same_port(self): with self.add_fake_device(device='fake_device_2', sec_groups=['fake_sgidX']): self.agent.refresh_firewall = mock.Mock() self.agent.security_groups_rule_updated(['fake_sgid1']) self.agent.security_groups_rule_updated(['fake_sgid2']) self.assertIn('fake_device', self.agent.devices_to_refilter) self.assertNotIn('fake_device_2', self.agent.devices_to_refilter) self.assertFalse(self.firewall.security_group_updated.called) def test_security_groups_rule_updated_multiple_ports(self): with self.add_fake_device(device='fake_device_2', sec_groups=['fake_sgid2']): self.agent.refresh_firewall = mock.Mock() self.agent.security_groups_rule_updated(['fake_sgid1', 'fake_sgid2']) self.assertIn('fake_device', self.agent.devices_to_refilter) self.assertIn('fake_device_2', self.agent.devices_to_refilter) self.assertFalse(self.firewall.security_group_updated.called) def test_multiple_security_groups_rule_updated_multiple_ports(self): with self.add_fake_device(device='fake_device_2', sec_groups=['fake_sgid2']): self.agent.refresh_firewall = mock.Mock() self.agent.security_groups_rule_updated(['fake_sgid1']) self.agent.security_groups_rule_updated(['fake_sgid2']) self.assertIn('fake_device', self.agent.devices_to_refilter) self.assertIn('fake_device_2', self.agent.devices_to_refilter) self.assertFalse(self.firewall.security_group_updated.called) def test_security_groups_member_updated(self): self.agent.security_groups_member_updated(['fake_sgid2', 'fake_sgid3']) self.assertIn('fake_device', self.agent.devices_to_refilter) self.assertFalse(self.firewall.security_group_updated.called) def test_multiple_security_groups_member_updated_same_port(self): with self.add_fake_device(device='fake_device_2', sec_groups=['fake_sgid1', 'fake_sgid1B'], source_sec_groups=['fake_sgidX']): self.agent.refresh_firewall = mock.Mock() self.agent.security_groups_member_updated(['fake_sgid1', 'fake_sgid3']) self.agent.security_groups_member_updated(['fake_sgid2', 'fake_sgid3']) self.assertIn('fake_device', self.agent.devices_to_refilter) self.assertNotIn('fake_device_2', self.agent.devices_to_refilter) self.assertFalse(self.firewall.security_group_updated.called) def test_security_groups_member_updated_multiple_ports(self): with self.add_fake_device(device='fake_device_2', sec_groups=['fake_sgid1', 'fake_sgid1B'], source_sec_groups=['fake_sgid2']): self.agent.security_groups_member_updated(['fake_sgid2']) self.assertIn('fake_device', self.agent.devices_to_refilter) self.assertIn('fake_device_2', self.agent.devices_to_refilter) self.assertFalse(self.firewall.security_group_updated.called) def test_multiple_security_groups_member_updated_multiple_ports(self): with self.add_fake_device(device='fake_device_2', sec_groups=['fake_sgid1', 'fake_sgid1B'], source_sec_groups=['fake_sgid1B']): self.agent.security_groups_member_updated(['fake_sgid1B']) self.agent.security_groups_member_updated(['fake_sgid2']) self.assertIn('fake_device', self.agent.devices_to_refilter) self.assertIn('fake_device_2', self.agent.devices_to_refilter) self.assertFalse(self.firewall.security_group_updated.called) def test_security_groups_provider_updated(self): self.agent.security_groups_provider_updated(None) self.assertTrue(self.agent.global_refresh_firewall) def test_security_groups_provider_updated_devices_specified(self): self.agent.security_groups_provider_updated( ['fake_device_1', 'fake_device_2']) self.assertFalse(self.agent.global_refresh_firewall) self.assertIn('fake_device_1', self.agent.devices_to_refilter) self.assertIn('fake_device_2', self.agent.devices_to_refilter) def test_setup_port_filters_new_ports_only(self): self.agent.prepare_devices_filter = mock.Mock() self.agent.refresh_firewall = mock.Mock() self.agent.devices_to_refilter = set() self.agent.global_refresh_firewall = False self.agent.setup_port_filters(set(['fake_new_device']), set()) self.assertFalse(self.agent.devices_to_refilter) self.assertFalse(self.agent.global_refresh_firewall) self.agent.prepare_devices_filter.assert_called_once_with( set(['fake_new_device'])) self.assertFalse(self.agent.refresh_firewall.called) self.assertFalse(self.firewall.security_group_updated.called) def test_setup_port_filters_updated_ports_only(self): self.agent.prepare_devices_filter = mock.Mock() self.agent.refresh_firewall = mock.Mock() self.agent.devices_to_refilter = set() self.agent.global_refresh_firewall = False self.agent.setup_port_filters(set(), set(['fake_updated_device'])) self.assertFalse(self.agent.devices_to_refilter) self.assertFalse(self.agent.global_refresh_firewall) self.agent.refresh_firewall.assert_called_once_with( set(['fake_updated_device'])) self.assertFalse(self.agent.prepare_devices_filter.called) self.assertFalse(self.firewall.security_group_updated.called) def test_setup_port_filter_new_and_updated_ports(self): self.agent.prepare_devices_filter = mock.Mock() self.agent.refresh_firewall = mock.Mock() self.agent.devices_to_refilter = set() self.agent.global_refresh_firewall = False self.agent.setup_port_filters(set(['fake_new_device']), set(['fake_updated_device'])) self.assertFalse(self.agent.devices_to_refilter) self.assertFalse(self.agent.global_refresh_firewall) self.agent.prepare_devices_filter.assert_called_once_with( set(['fake_new_device'])) self.agent.refresh_firewall.assert_called_once_with( set(['fake_updated_device'])) self.assertFalse(self.firewall.security_group_updated.called) def test_setup_port_filters_sg_updates_only(self): self.agent.prepare_devices_filter = mock.Mock() self.agent.refresh_firewall = mock.Mock() self.agent.devices_to_refilter = set(['fake_device']) self.agent.global_refresh_firewall = False self.agent.setup_port_filters(set(), set()) self.assertFalse(self.agent.devices_to_refilter) self.assertFalse(self.agent.global_refresh_firewall) self.agent.refresh_firewall.assert_called_once_with( set(['fake_device'])) self.assertFalse(self.agent.prepare_devices_filter.called) self.assertFalse(self.firewall.security_group_updated.called) def test_setup_port_filters_sg_updates_and_new_ports(self): self.agent.prepare_devices_filter = mock.Mock() self.agent.refresh_firewall = mock.Mock() self.agent.devices_to_refilter = set(['fake_device']) self.agent.global_refresh_firewall = False self.agent.setup_port_filters(set(['fake_new_device']), set()) self.assertFalse(self.agent.devices_to_refilter) self.assertFalse(self.agent.global_refresh_firewall) self.agent.prepare_devices_filter.assert_called_once_with( set(['fake_new_device'])) self.agent.refresh_firewall.assert_called_once_with( set(['fake_device'])) self.assertFalse(self.firewall.security_group_updated.called) def _test_prepare_devices_filter(self, devices): # simulate an RPC arriving and calling _security_group_updated() self.agent.devices_to_refilter |= set(['fake_new_device']) def test_setup_port_filters_new_port_and_rpc(self): # Make sure that if an RPC arrives and adds a device to # devices_to_refilter while we are in setup_port_filters() # that it is not cleared, and will be processed later. self.agent.prepare_devices_filter = self._test_prepare_devices_filter self.agent.refresh_firewall = mock.Mock() self.agent.devices_to_refilter = set(['new_device', 'fake_device']) self.agent.global_refresh_firewall = False self.agent.setup_port_filters(set(['new_device']), set()) self.assertEqual(self.agent.devices_to_refilter, set(['fake_new_device'])) self.assertFalse(self.agent.global_refresh_firewall) self.agent.refresh_firewall.assert_called_once_with( set(['fake_device'])) self.assertFalse(self.firewall.security_group_updated.called) def test_setup_port_filters_sg_updates_and_updated_ports(self): self.agent.prepare_devices_filter = mock.Mock() self.agent.refresh_firewall = mock.Mock() self.agent.devices_to_refilter = set(['fake_device', 'fake_device_2']) self.agent.global_refresh_firewall = False self.agent.setup_port_filters( set(), set(['fake_device', 'fake_updated_device'])) self.assertFalse(self.agent.devices_to_refilter) self.assertFalse(self.agent.global_refresh_firewall) self.agent.refresh_firewall.assert_called_once_with( set(['fake_device', 'fake_device_2', 'fake_updated_device'])) self.assertFalse(self.agent.prepare_devices_filter.called) self.assertFalse(self.firewall.security_group_updated.called) def test_setup_port_filters_all_updates(self): self.agent.prepare_devices_filter = mock.Mock() self.agent.refresh_firewall = mock.Mock() self.agent.devices_to_refilter = set(['fake_device', 'fake_device_2']) self.agent.global_refresh_firewall = False self.agent.setup_port_filters( set(['fake_new_device']), set(['fake_device', 'fake_updated_device'])) self.assertFalse(self.agent.devices_to_refilter) self.assertFalse(self.agent.global_refresh_firewall) self.agent.prepare_devices_filter.assert_called_once_with( set(['fake_new_device'])) self.agent.refresh_firewall.assert_called_once_with( set(['fake_device', 'fake_device_2', 'fake_updated_device'])) self.assertFalse(self.firewall.security_group_updated.called) def test_setup_port_filters_no_update(self): self.agent.prepare_devices_filter = mock.Mock() self.agent.refresh_firewall = mock.Mock() self.agent.devices_to_refilter = set() self.agent.global_refresh_firewall = False self.agent.setup_port_filters(set(), set()) self.assertFalse(self.agent.devices_to_refilter) self.assertFalse(self.agent.global_refresh_firewall) self.assertFalse(self.agent.refresh_firewall.called) self.assertFalse(self.agent.prepare_devices_filter.called) self.assertFalse(self.firewall.security_group_updated.called) def test_setup_port_filters_with_global_refresh(self): self.agent.prepare_devices_filter = mock.Mock() self.agent.refresh_firewall = mock.Mock() self.agent.devices_to_refilter = set() self.agent.global_refresh_firewall = True self.agent.setup_port_filters(set(), set()) self.assertFalse(self.agent.devices_to_refilter) self.assertFalse(self.agent.global_refresh_firewall) self.agent.refresh_firewall.assert_called_once_with() self.assertFalse(self.agent.prepare_devices_filter.called) self.assertFalse(self.firewall.security_group_updated.called) class FakeSGNotifierAPI(sg_rpc.SecurityGroupAgentRpcApiMixin): def __init__(self): self.topic = 'fake' target = oslo_messaging.Target(topic=self.topic, version='1.0') self.client = n_rpc.get_client(target) class SecurityGroupAgentRpcApiTestCase(base.BaseTestCase): def setUp(self): super(SecurityGroupAgentRpcApiTestCase, self).setUp() self.notifier = FakeSGNotifierAPI() self.mock_prepare = mock.patch.object(self.notifier.client, 'prepare', return_value=self.notifier.client).start() self.mock_cast = mock.patch.object(self.notifier.client, 'cast').start() def test_security_groups_provider_updated(self): self.notifier.security_groups_provider_updated(None) self.mock_cast.assert_has_calls( [mock.call(None, 'security_groups_provider_updated', devices_to_update=None)]) def test_security_groups_rule_updated(self): self.notifier.security_groups_rule_updated( None, security_groups=['fake_sgid']) self.mock_cast.assert_has_calls( [mock.call(None, 'security_groups_rule_updated', security_groups=['fake_sgid'])]) def test_security_groups_member_updated(self): self.notifier.security_groups_member_updated( None, security_groups=['fake_sgid']) self.mock_cast.assert_has_calls( [mock.call(None, 'security_groups_member_updated', security_groups=['fake_sgid'])]) def test_security_groups_rule_not_updated(self): self.notifier.security_groups_rule_updated( None, security_groups=[]) self.assertFalse(self.mock_cast.called) def test_security_groups_member_not_updated(self): self.notifier.security_groups_member_updated( None, security_groups=[]) self.assertFalse(self.mock_cast.called) #Note(nati) bn -> binary_name # id -> device_id PHYSDEV_MOD = '-m physdev' PHYSDEV_IS_BRIDGED = '--physdev-is-bridged' IPTABLES_ARG = {'bn': iptables_manager.binary_name, 'physdev_mod': PHYSDEV_MOD, 'physdev_is_bridged': PHYSDEV_IS_BRIDGED} CHAINS_MANGLE = ('FORWARD|INPUT|OUTPUT|POSTROUTING|PREROUTING|mark|scope' '|float-snat|floatingip') IPTABLES_ARG['chains'] = CHAINS_MANGLE IPTABLES_MANGLE = """# Generated by iptables_manager *mangle :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :POSTROUTING - [0:0] :PREROUTING - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j %(bn)s-OUTPUT -I POSTROUTING 1 -j %(bn)s-POSTROUTING -I PREROUTING 1 -j %(bn)s-PREROUTING -I %(bn)s-PREROUTING 1 -j %(bn)s-mark -I %(bn)s-PREROUTING 2 -j %(bn)s-scope -I %(bn)s-PREROUTING 3 -m connmark ! --mark 0x0/0xffff0000 -j CONNMARK \ --restore-mark --nfmask 0xffff0000 --ctmask 0xffff0000 -I %(bn)s-PREROUTING 4 -j %(bn)s-floatingip -I %(bn)s-float-snat 1 -m connmark --mark 0x0/0xffff0000 \ -j CONNMARK --save-mark --nfmask 0xffff0000 --ctmask 0xffff0000 COMMIT # Completed by iptables_manager """ % IPTABLES_ARG CHAINS_MANGLE_V6 = 'FORWARD|INPUT|OUTPUT|POSTROUTING|PREROUTING|scope' IPTABLES_ARG['chains'] = CHAINS_MANGLE_V6 IPTABLES_MANGLE_V6 = """# Generated by iptables_manager *mangle :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :POSTROUTING - [0:0] :PREROUTING - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j %(bn)s-OUTPUT -I POSTROUTING 1 -j %(bn)s-POSTROUTING -I PREROUTING 1 -j %(bn)s-PREROUTING -I %(bn)s-PREROUTING 1 -j %(bn)s-scope -I %(bn)s-PREROUTING 2 -m connmark ! --mark 0x0/0xffff0000 -j CONNMARK \ --restore-mark --nfmask 0xffff0000 --ctmask 0xffff0000 COMMIT # Completed by iptables_manager """ % IPTABLES_ARG CHAINS_NAT = 'OUTPUT|POSTROUTING|PREROUTING|float-snat|snat' IPTABLES_ARG['port1'] = 'port1' IPTABLES_ARG['port2'] = 'port2' IPTABLES_ARG['mac1'] = '12:34:56:78:9A:BC' IPTABLES_ARG['mac2'] = '12:34:56:78:9A:BD' IPTABLES_ARG['ip1'] = '10.0.0.3/32' IPTABLES_ARG['ip2'] = '10.0.0.4/32' IPTABLES_ARG['chains'] = CHAINS_NAT IPTABLES_RAW_DEFAULT = """# Generated by iptables_manager *raw :OUTPUT - [0:0] :PREROUTING - [0:0] :%(bn)s-OUTPUT - [0:0] :%(bn)s-PREROUTING - [0:0] -I OUTPUT 1 -j %(bn)s-OUTPUT -I PREROUTING 1 -j %(bn)s-PREROUTING COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_RAW_DEVICE_1 = """# Generated by iptables_manager *raw :OUTPUT - [0:0] :PREROUTING - [0:0] :%(bn)s-OUTPUT - [0:0] :%(bn)s-PREROUTING - [0:0] -I OUTPUT 1 -j %(bn)s-OUTPUT -I PREROUTING 1 -j %(bn)s-PREROUTING -I %(bn)s-PREROUTING 1 -m physdev --physdev-in qvbtap_port1 -j CT --zone 1 -I %(bn)s-PREROUTING 2 -m physdev --physdev-in tap_port1 -j CT --zone 1 COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_RAW_DEVICE_2 = """# Generated by iptables_manager *raw :OUTPUT - [0:0] :PREROUTING - [0:0] :%(bn)s-OUTPUT - [0:0] :%(bn)s-PREROUTING - [0:0] -I OUTPUT 1 -j %(bn)s-OUTPUT -I PREROUTING 1 -j %(bn)s-PREROUTING -I %(bn)s-PREROUTING 1 -m physdev --physdev-in qvbtap_%(port1)s \ -j CT --zone 1 -I %(bn)s-PREROUTING 2 -m physdev --physdev-in tap_%(port1)s -j CT --zone 1 -I %(bn)s-PREROUTING 3 -m physdev --physdev-in qvbtap_%(port2)s \ -j CT --zone 2 -I %(bn)s-PREROUTING 4 -m physdev --physdev-in tap_%(port2)s -j CT --zone 2 COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_NAT = """# Generated by iptables_manager *nat :OUTPUT - [0:0] :POSTROUTING - [0:0] :PREROUTING - [0:0] :neutron-postrouting-bottom - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I OUTPUT 1 -j %(bn)s-OUTPUT -I POSTROUTING 1 -j %(bn)s-POSTROUTING -I POSTROUTING 2 -j neutron-postrouting-bottom -I PREROUTING 1 -j %(bn)s-PREROUTING -I neutron-postrouting-bottom 1 -j %(bn)s-snat -I %(bn)s-snat 1 -j %(bn)s-float-snat COMMIT # Completed by iptables_manager """ % IPTABLES_ARG CHAINS_RAW = 'OUTPUT|PREROUTING' IPTABLES_ARG['chains'] = CHAINS_RAW IPTABLES_RAW = """# Generated by iptables_manager *raw :OUTPUT - [0:0] :PREROUTING - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I OUTPUT 1 -j %(bn)s-OUTPUT -I PREROUTING 1 -j %(bn)s-PREROUTING COMMIT # Completed by iptables_manager """ % IPTABLES_ARG CHAINS_EMPTY = ('FORWARD|INPUT|OUTPUT|local|scope|sg-chain|sg-fallback') CHAINS_1 = CHAINS_EMPTY + '|i_port1|o_port1|s_port1' CHAINS_2 = CHAINS_1 + '|i_port2|o_port2|s_port2' IPTABLES_ARG['chains'] = CHAINS_1 IPSET_FILTER_1 = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 -j %(bn)s-scope -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-INGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-o_port1 -I %(bn)s-i_port1 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_port1 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 -m udp \ --dport 68 -j RETURN -I %(bn)s-i_port1 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_port1 4 -m set --match-set NIPv4security_group1 src -j \ RETURN -I %(bn)s-i_port1 5 -m state --state INVALID -j DROP -I %(bn)s-i_port1 6 -j %(bn)s-sg-fallback -I %(bn)s-o_port1 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_port1 2 -j %(bn)s-s_port1 -I %(bn)s-o_port1 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_port1 4 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP -I %(bn)s-o_port1 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_port1 6 -j RETURN -I %(bn)s-o_port1 7 -m state --state INVALID -j DROP -I %(bn)s-o_port1 8 -j %(bn)s-sg-fallback -I %(bn)s-s_port1 1 -s 10.0.0.3/32 -m mac --mac-source 12:34:56:78:9A:BC \ -j RETURN -I %(bn)s-s_port1 2 -j DROP -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-i_port1 -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-o_port1 -I %(bn)s-sg-chain 3 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_FILTER_1 = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 -j %(bn)s-scope -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-INGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-o_port1 -I %(bn)s-i_port1 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_port1 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 -m udp \ --dport 68 -j RETURN -I %(bn)s-i_port1 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_port1 4 -m state --state INVALID -j DROP -I %(bn)s-i_port1 5 -j %(bn)s-sg-fallback -I %(bn)s-o_port1 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_port1 2 -j %(bn)s-s_port1 -I %(bn)s-o_port1 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_port1 4 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP -I %(bn)s-o_port1 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_port1 6 -j RETURN -I %(bn)s-o_port1 7 -m state --state INVALID -j DROP -I %(bn)s-o_port1 8 -j %(bn)s-sg-fallback -I %(bn)s-s_port1 1 -s 10.0.0.3/32 -m mac --mac-source 12:34:56:78:9A:BC \ -j RETURN -I %(bn)s-s_port1 2 -j DROP -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-i_port1 -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-o_port1 -I %(bn)s-sg-chain 3 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_FILTER_1_2 = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 -j %(bn)s-scope -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-INGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-o_port1 -I %(bn)s-i_port1 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_port1 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 -m udp \ --dport 68 -j RETURN -I %(bn)s-i_port1 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_port1 4 -s 10.0.0.4/32 -j RETURN -I %(bn)s-i_port1 5 -m state --state INVALID -j DROP -I %(bn)s-i_port1 6 -j %(bn)s-sg-fallback -I %(bn)s-o_port1 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_port1 2 -j %(bn)s-s_port1 -I %(bn)s-o_port1 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_port1 4 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP -I %(bn)s-o_port1 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_port1 6 -j RETURN -I %(bn)s-o_port1 7 -m state --state INVALID -j DROP -I %(bn)s-o_port1 8 -j %(bn)s-sg-fallback -I %(bn)s-s_port1 1 -s 10.0.0.3/32 -m mac --mac-source 12:34:56:78:9A:BC \ -j RETURN -I %(bn)s-s_port1 2 -j DROP -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-i_port1 -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-o_port1 -I %(bn)s-sg-chain 3 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_ARG['chains'] = CHAINS_2 IPSET_FILTER_2 = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 -j %(bn)s-scope -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 5 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ -m udp --dport 68 -j RETURN -I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port1)s 4 -m set --match-set NIPv4security_group1 src -j RETURN -I %(bn)s-i_%(port1)s 5 -m state --state INVALID -j DROP -I %(bn)s-i_%(port1)s 6 -j %(bn)s-sg-fallback -I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ -m udp --dport 68 -j RETURN -I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port2)s 4 -m set --match-set NIPv4security_group1 src -j RETURN -I %(bn)s-i_%(port2)s 5 -m state --state INVALID -j DROP -I %(bn)s-i_%(port2)s 6 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s -I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP -I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port1)s 6 -j RETURN -I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s -I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP -I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port2)s 6 -j RETURN -I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback -I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN -I %(bn)s-s_%(port1)s 2 -j DROP -I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN -I %(bn)s-s_%(port2)s 2 -j DROP -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port1)s -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port2)s -I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-sg-chain 5 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPSET_FILTER_2_3 = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 -j %(bn)s-scope -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 5 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ -m udp --dport 68 -j RETURN -I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port1)s 4 -m set --match-set NIPv4security_group1 src -j RETURN -I %(bn)s-i_%(port1)s 5 -p icmp -j RETURN -I %(bn)s-i_%(port1)s 6 -m state --state INVALID -j DROP -I %(bn)s-i_%(port1)s 7 -j %(bn)s-sg-fallback -I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ -m udp --dport 68 -j RETURN -I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port2)s 4 -m set --match-set NIPv4security_group1 src -j RETURN -I %(bn)s-i_%(port2)s 5 -p icmp -j RETURN -I %(bn)s-i_%(port2)s 6 -m state --state INVALID -j DROP -I %(bn)s-i_%(port2)s 7 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s -I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP -I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port1)s 6 -j RETURN -I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s -I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP -I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port2)s 6 -j RETURN -I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback -I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN -I %(bn)s-s_%(port1)s 2 -j DROP -I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN -I %(bn)s-s_%(port2)s 2 -j DROP -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port1)s -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port2)s -I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-sg-chain 5 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_FILTER_2 = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 -j %(bn)s-scope -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 5 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ -m udp --dport 68 -j RETURN -I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port1)s 4 -s %(ip2)s -j RETURN -I %(bn)s-i_%(port1)s 5 -m state --state INVALID -j DROP -I %(bn)s-i_%(port1)s 6 -j %(bn)s-sg-fallback -I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ -m udp --dport 68 -j RETURN -I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port2)s 4 -s %(ip1)s -j RETURN -I %(bn)s-i_%(port2)s 5 -m state --state INVALID -j DROP -I %(bn)s-i_%(port2)s 6 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s -I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP -I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port1)s 6 -j RETURN -I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s -I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP -I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port2)s 6 -j RETURN -I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback -I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN -I %(bn)s-s_%(port1)s 2 -j DROP -I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN -I %(bn)s-s_%(port2)s 2 -j DROP -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port1)s -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port2)s -I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-sg-chain 5 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_FILTER_2_2 = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 -j %(bn)s-scope -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 5 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ -m udp --dport 68 -j RETURN -I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port1)s 4 -m state --state INVALID -j DROP -I %(bn)s-i_%(port1)s 5 -j %(bn)s-sg-fallback -I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ -m udp --dport 68 -j RETURN -I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port2)s 4 -s %(ip1)s -j RETURN -I %(bn)s-i_%(port2)s 5 -m state --state INVALID -j DROP -I %(bn)s-i_%(port2)s 6 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s -I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP -I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port1)s 6 -j RETURN -I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s -I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP -I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port2)s 6 -j RETURN -I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback -I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN -I %(bn)s-s_%(port1)s 2 -j DROP -I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN -I %(bn)s-s_%(port2)s 2 -j DROP -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port1)s -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port2)s -I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-sg-chain 5 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_FILTER_2_3 = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 -j %(bn)s-scope -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 5 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ -m udp --dport 68 -j RETURN -I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port1)s 4 -s %(ip2)s -j RETURN -I %(bn)s-i_%(port1)s 5 -p icmp -j RETURN -I %(bn)s-i_%(port1)s 6 -m state --state INVALID -j DROP -I %(bn)s-i_%(port1)s 7 -j %(bn)s-sg-fallback -I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ -m udp --dport 68 -j RETURN -I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port2)s 4 -s %(ip1)s -j RETURN -I %(bn)s-i_%(port2)s 5 -p icmp -j RETURN -I %(bn)s-i_%(port2)s 6 -m state --state INVALID -j DROP -I %(bn)s-i_%(port2)s 7 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s -I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP -I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port1)s 6 -j RETURN -I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s -I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 -m udp --dport 68 -j DROP -I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port2)s 6 -j RETURN -I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback -I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN -I %(bn)s-s_%(port1)s 2 -j DROP -I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN -I %(bn)s-s_%(port2)s 2 -j DROP -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port1)s -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port2)s -I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-sg-chain 5 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_ARG['chains'] = CHAINS_EMPTY IPTABLES_FILTER_EMPTY = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 -j %(bn)s-scope -I %(bn)s-sg-chain 1 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_ARG['chains'] = CHAINS_1 IPTABLES_FILTER_V6_1 = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 -j %(bn)s-scope -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-INGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-o_port1 -I %(bn)s-i_port1 1 -p ipv6-icmp -m icmp6 --icmpv6-type 130 -j RETURN -I %(bn)s-i_port1 2 -p ipv6-icmp -m icmp6 --icmpv6-type 131 -j RETURN -I %(bn)s-i_port1 3 -p ipv6-icmp -m icmp6 --icmpv6-type 132 -j RETURN -I %(bn)s-i_port1 4 -p ipv6-icmp -m icmp6 --icmpv6-type 135 -j RETURN -I %(bn)s-i_port1 5 -p ipv6-icmp -m icmp6 --icmpv6-type 136 -j RETURN -I %(bn)s-i_port1 6 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_port1 7 -m state --state INVALID -j DROP -I %(bn)s-i_port1 8 -j %(bn)s-sg-fallback -I %(bn)s-o_port1 1 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 131 -j RETURN -I %(bn)s-o_port1 2 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 135 -j RETURN -I %(bn)s-o_port1 3 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 143 -j RETURN -I %(bn)s-o_port1 4 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j DROP -I %(bn)s-o_port1 5 -p ipv6-icmp -j RETURN -I %(bn)s-o_port1 6 -p udp -m udp --sport 546 -m udp --dport 547 -j RETURN -I %(bn)s-o_port1 7 -p udp -m udp --sport 547 -m udp --dport 546 -j DROP -I %(bn)s-o_port1 8 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_port1 9 -m state --state INVALID -j DROP -I %(bn)s-o_port1 10 -j %(bn)s-sg-fallback -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-i_port1 -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-o_port1 -I %(bn)s-sg-chain 3 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_ARG['chains'] = CHAINS_2 IPTABLES_FILTER_V6_2 = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 -j %(bn)s-scope -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 5 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-i_%(port1)s 1 -p ipv6-icmp -m icmp6 --icmpv6-type 130 -j RETURN -I %(bn)s-i_%(port1)s 2 -p ipv6-icmp -m icmp6 --icmpv6-type 131 -j RETURN -I %(bn)s-i_%(port1)s 3 -p ipv6-icmp -m icmp6 --icmpv6-type 132 -j RETURN -I %(bn)s-i_%(port1)s 4 -p ipv6-icmp -m icmp6 --icmpv6-type 135 -j RETURN -I %(bn)s-i_%(port1)s 5 -p ipv6-icmp -m icmp6 --icmpv6-type 136 -j RETURN -I %(bn)s-i_%(port1)s 6 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port1)s 7 -m state --state INVALID -j DROP -I %(bn)s-i_%(port1)s 8 -j %(bn)s-sg-fallback -I %(bn)s-i_%(port2)s 1 -p ipv6-icmp -m icmp6 --icmpv6-type 130 -j RETURN -I %(bn)s-i_%(port2)s 2 -p ipv6-icmp -m icmp6 --icmpv6-type 131 -j RETURN -I %(bn)s-i_%(port2)s 3 -p ipv6-icmp -m icmp6 --icmpv6-type 132 -j RETURN -I %(bn)s-i_%(port2)s 4 -p ipv6-icmp -m icmp6 --icmpv6-type 135 -j RETURN -I %(bn)s-i_%(port2)s 5 -p ipv6-icmp -m icmp6 --icmpv6-type 136 -j RETURN -I %(bn)s-i_%(port2)s 6 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port2)s 7 -m state --state INVALID -j DROP -I %(bn)s-i_%(port2)s 8 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port1)s 1 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 131 -j RETURN -I %(bn)s-o_%(port1)s 2 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 135 -j RETURN -I %(bn)s-o_%(port1)s 3 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 143 -j RETURN -I %(bn)s-o_%(port1)s 4 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j DROP -I %(bn)s-o_%(port1)s 5 -p ipv6-icmp -j RETURN -I %(bn)s-o_%(port1)s 6 -p udp -m udp --sport 546 -m udp --dport 547 -j RETURN -I %(bn)s-o_%(port1)s 7 -p udp -m udp --sport 547 -m udp --dport 546 -j DROP -I %(bn)s-o_%(port1)s 8 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port1)s 9 -m state --state INVALID -j DROP -I %(bn)s-o_%(port1)s 10 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port2)s 1 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 131 -j RETURN -I %(bn)s-o_%(port2)s 2 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 135 -j RETURN -I %(bn)s-o_%(port2)s 3 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 143 -j RETURN -I %(bn)s-o_%(port2)s 4 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j DROP -I %(bn)s-o_%(port2)s 5 -p ipv6-icmp -j RETURN -I %(bn)s-o_%(port2)s 6 -p udp -m udp --sport 546 -m udp --dport 547 -j RETURN -I %(bn)s-o_%(port2)s 7 -p udp -m udp --sport 547 -m udp --dport 546 -j DROP -I %(bn)s-o_%(port2)s 8 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port2)s 9 -m state --state INVALID -j DROP -I %(bn)s-o_%(port2)s 10 -j %(bn)s-sg-fallback -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port1)s -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port2)s -I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-sg-chain 5 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_ARG['chains'] = CHAINS_EMPTY IPTABLES_FILTER_V6_EMPTY = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 -j %(bn)s-scope -I %(bn)s-sg-chain 1 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG class TestSecurityGroupAgentWithIptables(base.BaseTestCase): FIREWALL_DRIVER = FIREWALL_IPTABLES_DRIVER PHYSDEV_INGRESS = 'physdev-out' PHYSDEV_EGRESS = 'physdev-in' def setUp(self, defer_refresh_firewall=False, test_rpc_v1_1=True): super(TestSecurityGroupAgentWithIptables, self).setUp() set_firewall_driver(self.FIREWALL_DRIVER) cfg.CONF.set_override('enable_ipset', False, group='SECURITYGROUP') cfg.CONF.set_override('comment_iptables_rules', False, group='AGENT') self.utils_exec = mock.patch( 'neutron.agent.linux.utils.execute').start() self.rpc = mock.Mock() self._init_agent(defer_refresh_firewall) if test_rpc_v1_1: self.rpc.security_group_info_for_devices.side_effect = ( oslo_messaging.UnsupportedVersion('1.2')) self.iptables = self.agent.firewall.iptables # TODO(jlibosva) Get rid of mocking iptables execute and mock out # firewall instead self.iptables.use_ipv6 = True self.iptables_execute = mock.patch.object(self.iptables, "execute").start() self.iptables_execute_return_values = [] self.expected_call_count = 0 self.expected_calls = [] self.expected_process_inputs = [] self.iptables_execute.side_effect = self.iptables_execute_return_values rule1 = [{'direction': 'ingress', 'protocol': const.PROTO_NAME_UDP, 'ethertype': const.IPv4, 'source_ip_prefix': '10.0.0.2/32', 'source_port_range_min': 67, 'source_port_range_max': 67, 'port_range_min': 68, 'port_range_max': 68}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv4, 'port_range_min': 22, 'port_range_max': 22}, {'direction': 'egress', 'ethertype': const.IPv4}] rule2 = rule1[:] rule2 += [{'direction': 'ingress', 'source_ip_prefix': '10.0.0.4/32', 'ethertype': const.IPv4}] rule3 = rule2[:] rule3 += [{'direction': 'ingress', 'protocol': const.PROTO_NAME_ICMP, 'ethertype': const.IPv4}] rule4 = rule1[:] rule4 += [{'direction': 'ingress', 'source_ip_prefix': '10.0.0.3/32', 'ethertype': const.IPv4}] rule5 = rule4[:] rule5 += [{'direction': 'ingress', 'protocol': const.PROTO_NAME_ICMP, 'ethertype': const.IPv4}] self.devices1 = {'tap_port1': self._device('tap_port1', '10.0.0.3/32', '12:34:56:78:9a:bc', rule1)} self.devices2 = collections.OrderedDict([ ('tap_port1', self._device('tap_port1', '10.0.0.3/32', '12:34:56:78:9a:bc', rule2)), ('tap_port2', self._device('tap_port2', '10.0.0.4/32', '12:34:56:78:9a:bd', rule4)) ]) self.devices3 = collections.OrderedDict([ ('tap_port1', self._device('tap_port1', '10.0.0.3/32', '12:34:56:78:9a:bc', rule3)), ('tap_port2', self._device('tap_port2', '10.0.0.4/32', '12:34:56:78:9a:bd', rule5)) ]) self.agent.firewall.security_group_updated = mock.Mock() @staticmethod def _enforce_order_in_firewall(firewall): # for the sake of the test, eliminate any order randomness: # it helps to match iptables output against regexps consistently for attr in ('filtered_ports', 'unfiltered_ports'): setattr(firewall, attr, collections.OrderedDict()) def _init_agent(self, defer_refresh_firewall): self.agent = sg_rpc.SecurityGroupAgentRpc( context=None, plugin_rpc=self.rpc, defer_refresh_firewall=defer_refresh_firewall) self._enforce_order_in_firewall(self.agent.firewall) def _device(self, device, ip, mac_address, rule): return {'device': device, 'network_id': 'fakenet', 'fixed_ips': [ip], 'mac_address': mac_address, 'security_groups': ['security_group1'], 'security_group_rules': rule, 'security_group_source_groups': [ 'security_group1']} def _regex(self, value): value = value.replace('physdev-INGRESS', self.PHYSDEV_INGRESS) value = value.replace('physdev-EGRESS', self.PHYSDEV_EGRESS) value = value.replace('\n', '\\n') value = value.replace('[', r'\[') value = value.replace(']', r'\]') value = value.replace('*', r'\*') return value def _register_mock_call(self, *args, **kwargs): return_value = kwargs.pop('return_value', None) self.iptables_execute_return_values.append(return_value) has_process_input = 'process_input' in kwargs process_input = kwargs.get('process_input') self.expected_process_inputs.append((has_process_input, process_input)) if has_process_input: kwargs['process_input'] = mock.ANY self.expected_calls.append(mock.call(*args, **kwargs)) self.expected_call_count += 1 def _verify_mock_calls(self, exp_fw_sg_updated_call=False): self.assertEqual(self.expected_call_count, self.iptables_execute.call_count) self.iptables_execute.assert_has_calls(self.expected_calls) for i, expected in enumerate(self.expected_process_inputs): check, expected_regex = expected if not check: continue # The second or later arguments of self.iptables.execute # are keyword parameter, so keyword argument is extracted by [1] kwargs = self.iptables_execute.call_args_list[i][1] self.assertThat(kwargs['process_input'], matchers.MatchesRegex(expected_regex)) expected = ['net.bridge.bridge-nf-call-arptables=1', 'net.bridge.bridge-nf-call-ip6tables=1', 'net.bridge.bridge-nf-call-iptables=1'] for e in expected: self.utils_exec.assert_any_call(['sysctl', '-w', e], run_as_root=True) self.assertEqual(exp_fw_sg_updated_call, self.agent.firewall.security_group_updated.called) def _replay_iptables(self, v4_filter, v6_filter, raw): self._register_mock_call( ['iptables-save'], run_as_root=True, return_value='') self._register_mock_call( ['iptables-restore', '-n'], process_input=self._regex(v4_filter + IPTABLES_MANGLE + IPTABLES_NAT + raw), run_as_root=True, return_value='') self._register_mock_call( ['ip6tables-save'], run_as_root=True, return_value='') self._register_mock_call( ['ip6tables-restore', '-n'], process_input=self._regex(v6_filter + IPTABLES_MANGLE_V6 + raw), run_as_root=True, return_value='') def test_prepare_remove_port(self): self.rpc.security_group_rules_for_devices.return_value = self.devices1 self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_DEFAULT) self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY, IPTABLES_RAW_DEFAULT) self.agent.prepare_devices_filter(['tap_port1']) self.agent.remove_devices_filter(['tap_port1']) self._verify_mock_calls() def test_security_group_member_updated(self): self.rpc.security_group_rules_for_devices.return_value = self.devices1 self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_DEFAULT) self._replay_iptables(IPTABLES_FILTER_1_2, IPTABLES_FILTER_V6_1, IPTABLES_RAW_DEFAULT) self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2, IPTABLES_RAW_DEFAULT) self._replay_iptables(IPTABLES_FILTER_2_2, IPTABLES_FILTER_V6_2, IPTABLES_RAW_DEFAULT) self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_DEFAULT) self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY, IPTABLES_RAW_DEFAULT) self.agent.prepare_devices_filter(['tap_port1']) self.rpc.security_group_rules_for_devices.return_value = self.devices2 self.agent.security_groups_member_updated(['security_group1']) self.agent.prepare_devices_filter(['tap_port2']) self.rpc.security_group_rules_for_devices.return_value = self.devices1 self.agent.security_groups_member_updated(['security_group1']) self.agent.remove_devices_filter(['tap_port2']) self.agent.remove_devices_filter(['tap_port1']) self._verify_mock_calls() def test_security_group_rule_updated(self): self.rpc.security_group_rules_for_devices.return_value = self.devices2 self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2, IPTABLES_RAW_DEFAULT) self._replay_iptables(IPTABLES_FILTER_2_3, IPTABLES_FILTER_V6_2, IPTABLES_RAW_DEFAULT) self.agent.prepare_devices_filter(['tap_port1', 'tap_port3']) self.rpc.security_group_rules_for_devices.return_value = self.devices3 self.agent.security_groups_rule_updated(['security_group1']) self._verify_mock_calls() class TestSecurityGroupAgentEnhancedRpcWithIptables( TestSecurityGroupAgentWithIptables): def setUp(self, defer_refresh_firewall=False): super(TestSecurityGroupAgentEnhancedRpcWithIptables, self).setUp( defer_refresh_firewall=defer_refresh_firewall, test_rpc_v1_1=False) self.sg_info = self.rpc.security_group_info_for_devices rule1 = [{'direction': 'ingress', 'protocol': const.PROTO_NAME_UDP, 'ethertype': const.IPv4, 'source_ip_prefix': '10.0.0.2/32', 'source_port_range_min': 67, 'source_port_range_max': 67, 'port_range_min': 68, 'port_range_max': 68}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv4, 'port_range_min': 22, 'port_range_max': 22}, {'direction': 'egress', 'ethertype': const.IPv4}, {'direction': 'ingress', 'remote_group_id': 'security_group1', 'ethertype': const.IPv4}] rule2 = rule1[:] rule2 += [{'direction': 'ingress', 'protocol': const.PROTO_NAME_ICMP, 'ethertype': const.IPv4}] devices_info1 = {'tap_port1': self._device('tap_port1', '10.0.0.3/32', '12:34:56:78:9a:bc', [])} self.devices_info1 = {'security_groups': {'security_group1': rule1}, 'sg_member_ips': { 'security_group1': { 'IPv4': ['10.0.0.3/32'], 'IPv6': []}}, 'devices': devices_info1} devices_info2 = collections.OrderedDict([ ('tap_port1', self._device('tap_port1', '10.0.0.3/32', '12:34:56:78:9a:bc', [])), ('tap_port2', self._device('tap_port2', '10.0.0.4/32', '12:34:56:78:9a:bd', [])) ]) self.devices_info2 = {'security_groups': {'security_group1': rule1}, 'sg_member_ips': { 'security_group1': { 'IPv4': ['10.0.0.3/32', '10.0.0.4/32'], 'IPv6': []}}, 'devices': devices_info2} self.devices_info3 = {'security_groups': {'security_group1': rule2}, 'sg_member_ips': { 'security_group1': { 'IPv4': ['10.0.0.3/32', '10.0.0.4/32'], 'IPv6': []}}, 'devices': devices_info2} def test_prepare_remove_port(self): self.sg_info.return_value = self.devices_info1 self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_DEFAULT) self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY, IPTABLES_RAW_DEFAULT) self.agent.prepare_devices_filter(['tap_port1']) self.agent.remove_devices_filter(['tap_port1']) self._verify_mock_calls() def test_security_group_member_updated(self): self.sg_info.return_value = self.devices_info1 self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_DEFAULT) self._replay_iptables(IPTABLES_FILTER_1_2, IPTABLES_FILTER_V6_1, IPTABLES_RAW_DEFAULT) self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2, IPTABLES_RAW_DEFAULT) self._replay_iptables(IPTABLES_FILTER_2_2, IPTABLES_FILTER_V6_2, IPTABLES_RAW_DEFAULT) self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_DEFAULT) self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY, IPTABLES_RAW_DEFAULT) self.agent.prepare_devices_filter(['tap_port1']) self.sg_info.return_value = self.devices_info2 self.agent.security_groups_member_updated(['security_group1']) self.agent.prepare_devices_filter(['tap_port2']) self.sg_info.return_value = self.devices_info1 self.agent.security_groups_member_updated(['security_group1']) self.agent.remove_devices_filter(['tap_port2']) self.agent.remove_devices_filter(['tap_port1']) self._verify_mock_calls(True) self.assertEqual( 2, self.agent.firewall.security_group_updated.call_count) def test_security_group_rule_updated(self): self.sg_info.return_value = self.devices_info2 self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2, IPTABLES_RAW_DEFAULT) self._replay_iptables(IPTABLES_FILTER_2_3, IPTABLES_FILTER_V6_2, IPTABLES_RAW_DEFAULT) self.agent.prepare_devices_filter(['tap_port1', 'tap_port3']) self.sg_info.return_value = self.devices_info3 self.agent.security_groups_rule_updated(['security_group1']) self._verify_mock_calls(True) self.agent.firewall.security_group_updated.assert_called_with( 'sg_rule', set(['security_group1'])) class TestSecurityGroupAgentEnhancedIpsetWithIptables( TestSecurityGroupAgentEnhancedRpcWithIptables): def setUp(self, defer_refresh_firewall=False): super(TestSecurityGroupAgentEnhancedIpsetWithIptables, self).setUp( defer_refresh_firewall) self.agent.firewall.enable_ipset = True self.ipset = self.agent.firewall.ipset self.ipset_execute = mock.patch.object(self.ipset, "execute").start() def test_prepare_remove_port(self): self.sg_info.return_value = self.devices_info1 self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_DEFAULT) self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY, IPTABLES_RAW_DEFAULT) self.agent.prepare_devices_filter(['tap_port1']) self.agent.remove_devices_filter(['tap_port1']) self._verify_mock_calls() def test_security_group_member_updated(self): self.sg_info.return_value = self.devices_info1 self.ipset._get_new_set_ips = mock.Mock(return_value=['10.0.0.3']) self.ipset._get_deleted_set_ips = mock.Mock(return_value=[]) self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_DEFAULT) self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_DEFAULT) self._replay_iptables(IPSET_FILTER_2, IPTABLES_FILTER_V6_2, IPTABLES_RAW_DEFAULT) self._replay_iptables(IPSET_FILTER_2, IPTABLES_FILTER_V6_2, IPTABLES_RAW_DEFAULT) self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_DEFAULT) self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY, IPTABLES_RAW_DEFAULT) self.agent.prepare_devices_filter(['tap_port1']) self.sg_info.return_value = self.devices_info2 self.agent.security_groups_member_updated(['security_group1']) self.agent.prepare_devices_filter(['tap_port2']) self.sg_info.return_value = self.devices_info1 self.agent.security_groups_member_updated(['security_group1']) self.agent.remove_devices_filter(['tap_port2']) self.agent.remove_devices_filter(['tap_port1']) self._verify_mock_calls(True) self.assertEqual( 2, self.agent.firewall.security_group_updated.call_count) def test_security_group_rule_updated(self): self.ipset._get_new_set_ips = mock.Mock(return_value=['10.0.0.3']) self.ipset._get_deleted_set_ips = mock.Mock(return_value=[]) self.sg_info.return_value = self.devices_info2 self._replay_iptables(IPSET_FILTER_2, IPTABLES_FILTER_V6_2, IPTABLES_RAW_DEFAULT) self._replay_iptables(IPSET_FILTER_2_3, IPTABLES_FILTER_V6_2, IPTABLES_RAW_DEFAULT) self.agent.prepare_devices_filter(['tap_port1', 'tap_port3']) self.sg_info.return_value = self.devices_info3 self.agent.security_groups_rule_updated(['security_group1']) self._verify_mock_calls(True) self.agent.firewall.security_group_updated.assert_called_with( 'sg_rule', set(['security_group1'])) class SGNotificationTestMixin(object): def test_security_group_rule_updated(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: with self.security_group(name, description): security_group_id = sg['security_group']['id'] rule = self._build_security_group_rule( security_group_id, direction='ingress', proto=const.PROTO_NAME_TCP) security_group_rule = self._make_security_group_rule(self.fmt, rule) self._delete('security-group-rules', security_group_rule['security_group_rule']['id']) self.notifier.assert_has_calls( [mock.call.security_groups_rule_updated(mock.ANY, [security_group_id]), mock.call.security_groups_rule_updated(mock.ANY, [security_group_id])]) def test_security_group_member_updated(self): with self.network() as n: with self.subnet(n): with self.security_group() as sg: security_group_id = sg['security_group']['id'] res = self._create_port(self.fmt, n['network']['id']) port = self.deserialize(self.fmt, res) data = {'port': {'fixed_ips': port['port']['fixed_ips'], 'name': port['port']['name'], ext_sg.SECURITYGROUPS: [security_group_id]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0], security_group_id) self._delete('ports', port['port']['id']) self.notifier.assert_has_calls( [mock.call.security_groups_member_updated( mock.ANY, [mock.ANY])]) class TestSecurityGroupAgentWithOVSIptables( TestSecurityGroupAgentWithIptables): FIREWALL_DRIVER = FIREWALL_HYBRID_DRIVER def setUp(self, defer_refresh_firewall=False, test_rpc_v1_1=True): super(TestSecurityGroupAgentWithOVSIptables, self).setUp( defer_refresh_firewall, test_rpc_v1_1) def _init_agent(self, defer_refresh_firewall): fake_map = ovs_neutron_agent.LocalVLANMapping(1, 'network_type', 'physical_network', 1) local_vlan_map = {'fakenet': fake_map} self.agent = sg_rpc.SecurityGroupAgentRpc( context=None, plugin_rpc=self.rpc, local_vlan_map=local_vlan_map, defer_refresh_firewall=defer_refresh_firewall) self._enforce_order_in_firewall(self.agent.firewall) def test_prepare_remove_port(self): self.rpc.security_group_rules_for_devices.return_value = self.devices1 self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_DEVICE_1) self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY, IPTABLES_RAW_DEFAULT) self.agent.prepare_devices_filter(['tap_port1']) self.agent.remove_devices_filter(['tap_port1']) self._verify_mock_calls() def test_security_group_member_updated(self): self.rpc.security_group_rules_for_devices.return_value = self.devices1 self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_DEVICE_1) self._replay_iptables(IPTABLES_FILTER_1_2, IPTABLES_FILTER_V6_1, IPTABLES_RAW_DEVICE_1) self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2, IPTABLES_RAW_DEVICE_2) self._replay_iptables(IPTABLES_FILTER_2_2, IPTABLES_FILTER_V6_2, IPTABLES_RAW_DEVICE_2) self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_DEVICE_1) self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY, IPTABLES_RAW_DEFAULT) self.agent.prepare_devices_filter(['tap_port1']) self.rpc.security_group_rules_for_devices.return_value = self.devices2 self.agent.security_groups_member_updated(['security_group1']) self.agent.prepare_devices_filter(['tap_port2']) self.rpc.security_group_rules_for_devices.return_value = self.devices1 self.agent.security_groups_member_updated(['security_group1']) self.agent.remove_devices_filter(['tap_port2']) self.agent.remove_devices_filter(['tap_port1']) self._verify_mock_calls() def test_security_group_rule_updated(self): self.rpc.security_group_rules_for_devices.return_value = self.devices2 self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2, IPTABLES_RAW_DEVICE_2) self._replay_iptables(IPTABLES_FILTER_2_3, IPTABLES_FILTER_V6_2, IPTABLES_RAW_DEVICE_2) self.agent.prepare_devices_filter(['tap_port1', 'tap_port3']) self.rpc.security_group_rules_for_devices.return_value = self.devices3 self.agent.security_groups_rule_updated(['security_group1']) self._verify_mock_calls() def _regex(self, value): #Note(nati): tap is prefixed on the device # in the OVSHybridIptablesFirewallDriver value = value.replace('tap_port', 'taptap_port') value = value.replace('qvbtaptap_port', 'qvbtap_port') value = value.replace('o_port', 'otap_port') value = value.replace('i_port', 'itap_port') value = value.replace('s_port', 'stap_port') return super( TestSecurityGroupAgentWithOVSIptables, self)._regex(value) class TestSecurityGroupExtensionControl(base.BaseTestCase): def test_disable_security_group_extension_by_config(self): set_enable_security_groups(False) exp_aliases = ['dummy1', 'dummy2'] ext_aliases = ['dummy1', 'security-group', 'dummy2'] sg_rpc.disable_security_group_extension_by_config(ext_aliases) self.assertEqual(ext_aliases, exp_aliases) def test_enable_security_group_extension_by_config(self): set_enable_security_groups(True) exp_aliases = ['dummy1', 'security-group', 'dummy2'] ext_aliases = ['dummy1', 'security-group', 'dummy2'] sg_rpc.disable_security_group_extension_by_config(ext_aliases) self.assertEqual(ext_aliases, exp_aliases) def test_is_invalid_drvier_combination_sg_enabled(self): set_enable_security_groups(True) set_firewall_driver(FIREWALL_NOOP_DRIVER) self.assertFalse(sg_rpc._is_valid_driver_combination()) def test_is_invalid_drvier_combination_sg_enabled_with_none(self): set_enable_security_groups(True) set_firewall_driver(None) self.assertFalse(sg_rpc._is_valid_driver_combination()) def test_is_invalid_drvier_combination_sg_disabled(self): set_enable_security_groups(False) set_firewall_driver('NonNoopDriver') self.assertFalse(sg_rpc._is_valid_driver_combination()) def test_is_valid_drvier_combination_sg_enabled(self): set_enable_security_groups(True) set_firewall_driver('NonNoopDriver') self.assertTrue(sg_rpc._is_valid_driver_combination()) def test_is_valid_drvier_combination_sg_disabled(self): set_enable_security_groups(False) set_firewall_driver(FIREWALL_NOOP_DRIVER) self.assertTrue(sg_rpc._is_valid_driver_combination()) def test_is_valid_drvier_combination_sg_disabled_with_none(self): set_enable_security_groups(False) set_firewall_driver(None) self.assertTrue(sg_rpc._is_valid_driver_combination()) neutron-8.4.0/neutron/tests/unit/agent/common/0000775000567000056710000000000013044373210022554 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/common/__init__.py0000664000567000056710000000000013044372736024667 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/common/test_config.py0000664000567000056710000000305113044372736025445 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.common import config from neutron.tests import base def test_setup_conf(): conf = config.setup_conf() assert conf.state_path.endswith('/var/lib/neutron') class TestRootHelper(base.BaseTestCase): def test_agent_root_helper(self): conf = config.setup_conf() config.register_root_helper(conf) conf.set_override('root_helper', 'my_root_helper', 'AGENT') self.assertEqual(config.get_root_helper(conf), 'my_root_helper') def test_root_default(self): conf = config.setup_conf() config.register_root_helper(conf) self.assertEqual(config.get_root_helper(conf), 'sudo') def test_agent_root_helper_daemon(self): conf = config.setup_conf() config.register_root_helper(conf) rhd = 'my_root_helper_daemon' conf.set_override('root_helper_daemon', rhd, 'AGENT') self.assertEqual(rhd, conf.AGENT.root_helper_daemon) neutron-8.4.0/neutron/tests/unit/agent/common/test_polling.py0000664000567000056710000000520613044372736025650 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.agent.common import base_polling as polling from neutron.tests import base class TestBasePollingManager(base.BaseTestCase): def setUp(self): super(TestBasePollingManager, self).setUp() self.pm = polling.BasePollingManager() def test__is_polling_required_should_not_be_implemented(self): self.assertRaises(NotImplementedError, self.pm._is_polling_required) def test_force_polling_sets_interval_attribute(self): self.assertFalse(self.pm._force_polling) self.pm.force_polling() self.assertTrue(self.pm._force_polling) def test_polling_completed_sets_interval_attribute(self): self.pm._polling_completed = False self.pm.polling_completed() self.assertTrue(self.pm._polling_completed) def mock_is_polling_required(self, return_value): return mock.patch.object(self.pm, '_is_polling_required', return_value=return_value) def test_is_polling_required_returns_true_when_forced(self): with self.mock_is_polling_required(False): self.pm.force_polling() self.assertTrue(self.pm.is_polling_required) self.assertFalse(self.pm._force_polling) def test_is_polling_required_returns_true_when_polling_not_completed(self): with self.mock_is_polling_required(False): self.pm._polling_completed = False self.assertTrue(self.pm.is_polling_required) def test_is_polling_required_returns_true_when_updates_are_present(self): with self.mock_is_polling_required(True): self.assertTrue(self.pm.is_polling_required) self.assertFalse(self.pm._polling_completed) def test_is_polling_required_returns_false_for_no_updates(self): with self.mock_is_polling_required(False): self.assertFalse(self.pm.is_polling_required) class TestAlwaysPoll(base.BaseTestCase): def test_is_polling_required_always_returns_true(self): pm = polling.AlwaysPoll() self.assertTrue(pm.is_polling_required) neutron-8.4.0/neutron/tests/unit/agent/common/test_utils.py0000664000567000056710000000703213044372736025343 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.agent.common import config from neutron.agent.common import utils from neutron.agent.linux import interface from neutron.tests import base from neutron.tests.unit import testlib_api class TestLoadInterfaceDriver(base.BaseTestCase): def setUp(self): super(TestLoadInterfaceDriver, self).setUp() self.conf = config.setup_conf() self.conf.register_opts(interface.OPTS) config.register_interface_driver_opts_helper(self.conf) def test_load_interface_driver_not_set(self): with testlib_api.ExpectedException(SystemExit): utils.load_interface_driver(self.conf) def test_load_interface_driver_wrong_driver(self): self.conf.set_override('interface_driver', 'neutron.NonExistentDriver') with testlib_api.ExpectedException(SystemExit): utils.load_interface_driver(self.conf) def test_load_interface_driver_does_not_consume_irrelevant_errors(self): self.conf.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') with mock.patch('oslo_utils.importutils.import_class', side_effect=RuntimeError()): with testlib_api.ExpectedException(RuntimeError): utils.load_interface_driver(self.conf) def test_load_interface_driver_success(self): self.conf.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') self.assertIsInstance(utils.load_interface_driver(self.conf), interface.NullDriver) def test_load_null_interface_driver_success(self): self.conf.set_override('interface_driver', 'null') self.assertIsInstance(utils.load_interface_driver(self.conf), interface.NullDriver) def test_load_ivs_interface_driver_success(self): self.conf.set_override('interface_driver', 'ivs') self.assertIsInstance(utils.load_interface_driver(self.conf), interface.IVSInterfaceDriver) def test_load_linuxbridge_interface_driver_success(self): self.conf.set_override('interface_driver', 'linuxbridge') self.assertIsInstance(utils.load_interface_driver(self.conf), interface.BridgeInterfaceDriver) def test_load_ovs_interface_driver_success(self): self.conf.set_override('interface_driver', 'openvswitch') self.assertIsInstance(utils.load_interface_driver(self.conf), interface.OVSInterfaceDriver) def test_load_interface_driver_as_alias_wrong_driver(self): self.conf.set_override('interface_driver', 'openvswitchXX') with testlib_api.ExpectedException(SystemExit): utils.load_interface_driver(self.conf) neutron-8.4.0/neutron/tests/unit/agent/common/test_ovs_lib.py0000664000567000056710000011432013044372760025634 0ustar jenkinsjenkins00000000000000# Copyright 2012, VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import mock from oslo_serialization import jsonutils from oslo_utils import uuidutils import testtools from neutron.agent.common import ovs_lib from neutron.agent.common import utils from neutron.common import exceptions from neutron.plugins.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent.common \ import constants as p_const from neutron.tests import base from neutron.tests import tools OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0" # some test data for get_vif_port_to_ofport_map that exhibited bug 1444269 OVSLIST_WITH_UNSET_PORT = ( '{"data":[["patch-tun",["map",[]],1],["tap2ab72a72-44",["map",[["attached-' 'mac","fa:16:3e:b0:f8:38"],["iface-id","2ab72a72-4407-4ef3-806a-b2172f3e4d' 'c7"],["iface-status","active"]]],2],["tap6b108774-15",["map",[["attached-' 'mac","fa:16:3e:02:f5:91"],["iface-id","6b108774-1559-45e9-a7c3-b714f11722' 'cf"],["iface-status","active"]]],["set",[]]]],"headings":["name","externa' 'l_ids","ofport"]}') class OFCTLParamListMatcher(object): def _parse(self, params): actions_pos = params.find('actions') return set(params[:actions_pos].split(',')), params[actions_pos:] def __init__(self, params): self.expected = self._parse(params) def __eq__(self, other): return self.expected == self._parse(other) def __str__(self): return 'ovs-ofctl parameters: %s, "%s"' % self.expected __repr__ = __str__ class OVS_Lib_Test(base.BaseTestCase): """A test suite to exercise the OVS libraries shared by Neutron agents. Note: these tests do not actually execute ovs-* utilities, and thus can run on any system. That does, however, limit their scope. """ def setUp(self): super(OVS_Lib_Test, self).setUp() self.BR_NAME = "br-int" self.br = ovs_lib.OVSBridge(self.BR_NAME) self.execute = mock.patch.object( utils, "execute", spec=utils.execute).start() @property def TO(self): return "--timeout=%s" % self.br.vsctl_timeout def _vsctl_args(self, *args): cmd = ['ovs-vsctl', self.TO, '--oneline', '--format=json', '--'] cmd += args return cmd def _vsctl_mock(self, *args): cmd = self._vsctl_args(*args) return mock.call(cmd, run_as_root=True, log_fail_as_error=False) def _verify_vsctl_mock(self, *args): cmd = self._vsctl_args(*args) self.execute.assert_called_once_with(cmd, run_as_root=True, log_fail_as_error=False) def test_vifport(self): """Create and stringify vif port, confirm no exceptions.""" pname = "vif1.0" ofport = 5 vif_id = uuidutils.generate_uuid() mac = "ca:fe:de:ad:be:ef" # test __init__ port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br) self.assertEqual(port.port_name, pname) self.assertEqual(port.ofport, ofport) self.assertEqual(port.vif_id, vif_id) self.assertEqual(port.vif_mac, mac) self.assertEqual(port.switch.br_name, self.BR_NAME) # test __str__ str(port) def _build_timeout_opt(self, exp_timeout): return "--timeout=%d" % exp_timeout if exp_timeout else self.TO def test_add_flow(self): ofport = "99" vid = 4000 lsw_id = 18 cidr = '192.168.1.0/24' flow_dict_1 = collections.OrderedDict([ ('cookie', 1234), ('priority', 2), ('dl_src', 'ca:fe:de:ad:be:ef'), ('actions', 'strip_vlan,output:0')]) flow_dict_2 = collections.OrderedDict([ ('cookie', 1254), ('priority', 1), ('actions', 'normal')]) flow_dict_3 = collections.OrderedDict([ ('cookie', 1257), ('priority', 2), ('actions', 'drop')]) flow_dict_4 = collections.OrderedDict([ ('cookie', 1274), ('priority', 2), ('in_port', ofport), ('actions', 'drop')]) flow_dict_5 = collections.OrderedDict([ ('cookie', 1284), ('priority', 4), ('in_port', ofport), ('dl_vlan', vid), ('actions', "strip_vlan,set_tunnel:%s,normal" % (lsw_id))]) flow_dict_6 = collections.OrderedDict([ ('cookie', 1754), ('priority', 3), ('tun_id', lsw_id), ('actions', "mod_vlan_vid:%s,output:%s" % (vid, ofport))]) flow_dict_7 = collections.OrderedDict([ ('cookie', 1256), ('priority', 4), ('nw_src', cidr), ('proto', 'arp'), ('actions', 'drop')]) self.br.add_flow(**flow_dict_1) self.br.add_flow(**flow_dict_2) self.br.add_flow(**flow_dict_3) self.br.add_flow(**flow_dict_4) self.br.add_flow(**flow_dict_5) self.br.add_flow(**flow_dict_6) self.br.add_flow(**flow_dict_7) expected_calls = [ self._ofctl_mock("add-flows", self.BR_NAME, '-', process_input=OFCTLParamListMatcher( "hard_timeout=0,idle_timeout=0,cookie=1234," "priority=2,dl_src=ca:fe:de:ad:be:ef," "actions=strip_vlan,output:0")), self._ofctl_mock("add-flows", self.BR_NAME, '-', process_input=OFCTLParamListMatcher( "hard_timeout=0,idle_timeout=0,cookie=1254," "priority=1,actions=normal")), self._ofctl_mock("add-flows", self.BR_NAME, '-', process_input=OFCTLParamListMatcher( "hard_timeout=0,idle_timeout=0,cookie=1257," "priority=2,actions=drop")), self._ofctl_mock("add-flows", self.BR_NAME, '-', process_input=OFCTLParamListMatcher( "hard_timeout=0,idle_timeout=0,cookie=1274," "priority=2,in_port=%s,actions=drop" % ofport )), self._ofctl_mock("add-flows", self.BR_NAME, '-', process_input=OFCTLParamListMatcher( "hard_timeout=0,idle_timeout=0,cookie=1284," "priority=4,dl_vlan=%s,in_port=%s," "actions=strip_vlan,set_tunnel:%s,normal" % (vid, ofport, lsw_id))), self._ofctl_mock("add-flows", self.BR_NAME, '-', process_input=OFCTLParamListMatcher( "hard_timeout=0,idle_timeout=0,cookie=1754," "priority=3," "tun_id=%s,actions=mod_vlan_vid:%s,output:%s" % (lsw_id, vid, ofport))), self._ofctl_mock("add-flows", self.BR_NAME, '-', process_input=OFCTLParamListMatcher( "hard_timeout=0,idle_timeout=0,cookie=1256," "priority=4,nw_src=%s,arp,actions=drop" % cidr)), ] self.execute.assert_has_calls(expected_calls) def _ofctl_args(self, cmd, *args): cmd = ['ovs-ofctl', cmd] cmd += args return cmd def _ofctl_mock(self, cmd, *args, **kwargs): cmd = self._ofctl_args(cmd, *args) return mock.call(cmd, run_as_root=True, **kwargs) def _verify_ofctl_mock(self, cmd, *args, **kwargs): cmd = self._ofctl_args(cmd, *args) return self.execute.assert_called_once_with(cmd, run_as_root=True, **kwargs) def test_add_flow_timeout_set(self): flow_dict = collections.OrderedDict([ ('cookie', 1234), ('priority', 1), ('hard_timeout', 1000), ('idle_timeout', 2000), ('actions', 'normal')]) self.br.add_flow(**flow_dict) self._verify_ofctl_mock( "add-flows", self.BR_NAME, '-', process_input="hard_timeout=1000,idle_timeout=2000," "priority=1,cookie=1234,actions=normal") def test_add_flow_default_priority(self): flow_dict = collections.OrderedDict([('actions', 'normal'), ('cookie', 1234)]) self.br.add_flow(**flow_dict) self._verify_ofctl_mock( "add-flows", self.BR_NAME, '-', process_input="hard_timeout=0,idle_timeout=0,priority=1," "cookie=1234,actions=normal") def _test_get_port_ofport(self, ofport, expected_result): pname = "tap99" self.br.vsctl_timeout = 0 # Don't waste precious time retrying self.execute.return_value = self._encode_ovs_json( ['ofport'], [[ofport]]) self.assertEqual(self.br.get_port_ofport(pname), expected_result) self._verify_vsctl_mock("--columns=ofport", "list", "Interface", pname) def test_get_port_ofport_succeeds_for_valid_ofport(self): self._test_get_port_ofport(6, 6) def test_get_port_ofport_returns_invalid_ofport_for_non_int(self): self._test_get_port_ofport([], ovs_lib.INVALID_OFPORT) def test_get_port_ofport_returns_invalid_for_invalid(self): self._test_get_port_ofport(ovs_lib.INVALID_OFPORT, ovs_lib.INVALID_OFPORT) def test_default_datapath(self): # verify kernel datapath is default expected = p_const.OVS_DATAPATH_SYSTEM self.assertEqual(expected, self.br.datapath_type) def test_non_default_datapath(self): expected = p_const.OVS_DATAPATH_NETDEV self.br = ovs_lib.OVSBridge(self.BR_NAME, datapath_type=expected) self.assertEqual(expected, self.br.datapath_type) def test_count_flows(self): self.execute.return_value = 'ignore\nflow-1\n' # counts the number of flows as total lines of output - 2 self.assertEqual(self.br.count_flows(), 1) self._verify_ofctl_mock("dump-flows", self.BR_NAME, process_input=None) def test_delete_flow(self): ofport = "5" lsw_id = 40 vid = 39 self.br.delete_flows(in_port=ofport) self.br.delete_flows(tun_id=lsw_id) self.br.delete_flows(dl_vlan=vid) expected_calls = [ self._ofctl_mock("del-flows", self.BR_NAME, '-', process_input="in_port=" + ofport), self._ofctl_mock("del-flows", self.BR_NAME, '-', process_input="tun_id=%s" % lsw_id), self._ofctl_mock("del-flows", self.BR_NAME, '-', process_input="dl_vlan=%s" % vid), ] self.execute.assert_has_calls(expected_calls) def test_delete_flow_with_priority_set(self): params = {'in_port': '1', 'priority': '1'} self.assertRaises(exceptions.InvalidInput, self.br.delete_flows, **params) def test_dump_flows(self): table = 23 nxst_flow = "NXST_FLOW reply (xid=0x4):" flows = "\n".join([" cookie=0x0, duration=18042.514s, table=0, " "n_packets=6, n_bytes=468, " "priority=2,in_port=1 actions=drop", " cookie=0x0, duration=18027.562s, table=0, " "n_packets=0, n_bytes=0, " "priority=3,in_port=1,dl_vlan=100 " "actions=mod_vlan_vid:1,NORMAL", " cookie=0x0, duration=18044.351s, table=0, " "n_packets=9, n_bytes=594, priority=1 " "actions=NORMAL", " cookie=0x0, " "duration=18044.211s, table=23, n_packets=0, " "n_bytes=0, priority=0 actions=drop"]) flow_args = '\n'.join([nxst_flow, flows]) run_ofctl = mock.patch.object(self.br, 'run_ofctl').start() run_ofctl.side_effect = [flow_args] retflows = self.br.dump_flows_for_table(table) self.assertEqual(flows, retflows) def test_dump_flows_ovs_dead(self): table = 23 run_ofctl = mock.patch.object(self.br, 'run_ofctl').start() run_ofctl.side_effect = [''] retflows = self.br.dump_flows_for_table(table) self.assertIsNone(retflows) def test_mod_flow_with_priority_set(self): params = {'in_port': '1', 'priority': '1'} self.assertRaises(exceptions.InvalidInput, self.br.mod_flow, **params) def test_mod_flow_no_actions_set(self): params = {'in_port': '1'} self.assertRaises(exceptions.InvalidInput, self.br.mod_flow, **params) def test_run_ofctl_retry_on_socket_error(self): err = RuntimeError('failed to connect to socket') self.execute.side_effect = [err] * 5 with mock.patch('time.sleep') as sleep: self.br.run_ofctl('add-flows', []) self.assertEqual(5, sleep.call_count) self.assertEqual(6, self.execute.call_count) # a regular exception fails right away self.execute.side_effect = RuntimeError('garbage') self.execute.reset_mock() with mock.patch('time.sleep') as sleep: self.br.run_ofctl('add-flows', []) self.assertEqual(0, sleep.call_count) self.assertEqual(1, self.execute.call_count) def test_add_tunnel_port(self): pname = "tap99" local_ip = "1.1.1.1" remote_ip = "9.9.9.9" ofport = 6 command = ["--may-exist", "add-port", self.BR_NAME, pname] command.extend(["--", "set", "Interface", pname]) command.extend(["type=gre", "options:df_default=true", "options:remote_ip=" + remote_ip, "options:local_ip=" + local_ip, "options:in_key=flow", "options:out_key=flow"]) # Each element is a tuple of (expected mock call, return_value) expected_calls_and_values = [ (self._vsctl_mock(*command), None), (self._vsctl_mock("--columns=ofport", "list", "Interface", pname), self._encode_ovs_json(['ofport'], [[ofport]])), ] tools.setup_mock_calls(self.execute, expected_calls_and_values) self.assertEqual( self.br.add_tunnel_port(pname, remote_ip, local_ip), ofport) tools.verify_mock_calls(self.execute, expected_calls_and_values) def test_add_vxlan_fragmented_tunnel_port(self): pname = "tap99" local_ip = "1.1.1.1" remote_ip = "9.9.9.9" ofport = 6 vxlan_udp_port = "9999" dont_fragment = False command = ["--may-exist", "add-port", self.BR_NAME, pname] command.extend(["--", "set", "Interface", pname]) command.extend(["type=" + constants.TYPE_VXLAN, "options:dst_port=" + vxlan_udp_port, "options:df_default=false", "options:remote_ip=" + remote_ip, "options:local_ip=" + local_ip, "options:in_key=flow", "options:out_key=flow"]) # Each element is a tuple of (expected mock call, return_value) expected_calls_and_values = [ (self._vsctl_mock(*command), None), (self._vsctl_mock("--columns=ofport", "list", "Interface", pname), self._encode_ovs_json(['ofport'], [[ofport]])), ] tools.setup_mock_calls(self.execute, expected_calls_and_values) self.assertEqual( self.br.add_tunnel_port(pname, remote_ip, local_ip, constants.TYPE_VXLAN, vxlan_udp_port, dont_fragment), ofport) tools.verify_mock_calls(self.execute, expected_calls_and_values) def test_add_vxlan_csum_tunnel_port(self): pname = "tap99" local_ip = "1.1.1.1" remote_ip = "9.9.9.9" ofport = 6 vxlan_udp_port = "9999" dont_fragment = True tunnel_csum = True command = ["--may-exist", "add-port", self.BR_NAME, pname] command.extend(["--", "set", "Interface", pname]) command.extend(["type=" + constants.TYPE_VXLAN, "options:dst_port=" + vxlan_udp_port, "options:df_default=true", "options:remote_ip=" + remote_ip, "options:local_ip=" + local_ip, "options:in_key=flow", "options:out_key=flow", "options:csum=true"]) # Each element is a tuple of (expected mock call, return_value) expected_calls_and_values = [ (self._vsctl_mock(*command), None), (self._vsctl_mock("--columns=ofport", "list", "Interface", pname), self._encode_ovs_json(['ofport'], [[ofport]])), ] tools.setup_mock_calls(self.execute, expected_calls_and_values) self.assertEqual( self.br.add_tunnel_port(pname, remote_ip, local_ip, constants.TYPE_VXLAN, vxlan_udp_port, dont_fragment, tunnel_csum), ofport) tools.verify_mock_calls(self.execute, expected_calls_and_values) def _test_get_vif_ports(self, is_xen=False): pname = "tap99" ofport = 6 vif_id = uuidutils.generate_uuid() mac = "ca:fe:de:ad:be:ef" id_field = 'xs-vif-uuid' if is_xen else 'iface-id' external_ids = {"attached-mac": mac, id_field: vif_id} self.br.get_ports_attributes = mock.Mock(return_value=[{ 'name': pname, 'ofport': ofport, 'external_ids': external_ids}]) self.br.get_xapi_iface_id = mock.Mock(return_value=vif_id) ports = self.br.get_vif_ports() self.assertEqual(1, len(ports)) self.assertEqual(ports[0].port_name, pname) self.assertEqual(ports[0].ofport, ofport) self.assertEqual(ports[0].vif_id, vif_id) self.assertEqual(ports[0].vif_mac, mac) self.assertEqual(ports[0].switch.br_name, self.BR_NAME) self.br.get_ports_attributes.assert_called_once_with( 'Interface', columns=['name', 'external_ids', 'ofport'], if_exists=True) def _encode_ovs_json(self, headings, data): # See man ovs-vsctl(8) for the encoding details. r = {"data": [], "headings": headings} for row in data: ovs_row = [] r["data"].append(ovs_row) for cell in row: if isinstance(cell, (str, int, list)): ovs_row.append(cell) elif isinstance(cell, dict): ovs_row.append(["map", cell.items()]) elif isinstance(cell, set): ovs_row.append(["set", cell]) else: raise TypeError('%r not int, str, list, set or dict' % type(cell)) return jsonutils.dumps(r) def _test_get_vif_port_set(self, is_xen): if is_xen: id_key = 'xs-vif-uuid' else: id_key = 'iface-id' headings = ['name', 'external_ids', 'ofport'] data = [ # A vif port on this bridge: ['tap99', {id_key: 'tap99id', 'attached-mac': 'tap99mac'}, 1], # A vif port on this bridge not yet configured ['tap98', {id_key: 'tap98id', 'attached-mac': 'tap98mac'}, []], # Another vif port on this bridge not yet configured ['tap97', {id_key: 'tap97id', 'attached-mac': 'tap97mac'}, ['set', []]], # Non-vif port on this bridge: ['bogus', {}, 2], ] # Each element is a tuple of (expected mock call, return_value) expected_calls_and_values = [ (self._vsctl_mock("list-ports", self.BR_NAME), 'tap99\\ntun22'), (self._vsctl_mock("--if-exists", "--columns=name,external_ids,ofport", "list", "Interface", 'tap99', 'tun22'), self._encode_ovs_json(headings, data)), ] tools.setup_mock_calls(self.execute, expected_calls_and_values) if is_xen: get_xapi_iface_id = mock.patch.object(self.br, 'get_xapi_iface_id').start() get_xapi_iface_id.return_value = 'tap99id' port_set = self.br.get_vif_port_set() self.assertEqual(set(['tap99id']), port_set) tools.verify_mock_calls(self.execute, expected_calls_and_values) if is_xen: get_xapi_iface_id.assert_called_once_with('tap99id') def test_get_vif_port_to_ofport_map(self): self.execute.return_value = OVSLIST_WITH_UNSET_PORT results = self.br.get_vif_port_to_ofport_map() expected = {'2ab72a72-4407-4ef3-806a-b2172f3e4dc7': 2, 'patch-tun': 1} self.assertEqual(expected, results) def test_get_vif_ports_nonxen(self): self._test_get_vif_ports(is_xen=False) def test_get_vif_ports_xen(self): self._test_get_vif_ports(is_xen=True) def test_get_vif_port_set_nonxen(self): self._test_get_vif_port_set(False) def test_get_vif_port_set_xen(self): self._test_get_vif_port_set(True) def test_get_vif_ports_list_ports_error(self): expected_calls_and_values = [ (self._vsctl_mock("list-ports", self.BR_NAME), RuntimeError()), ] tools.setup_mock_calls(self.execute, expected_calls_and_values) self.assertRaises(RuntimeError, self.br.get_vif_ports) tools.verify_mock_calls(self.execute, expected_calls_and_values) def test_get_vif_port_set_list_ports_error(self): expected_calls_and_values = [ (self._vsctl_mock("list-ports", self.BR_NAME), RuntimeError()), ] tools.setup_mock_calls(self.execute, expected_calls_and_values) self.assertRaises(RuntimeError, self.br.get_vif_port_set) tools.verify_mock_calls(self.execute, expected_calls_and_values) def test_get_vif_port_set_list_interface_error(self): expected_calls_and_values = [ (self._vsctl_mock("list-ports", self.BR_NAME), 'tap99\n'), (self._vsctl_mock("--if-exists", "--columns=name,external_ids,ofport", "list", "Interface", "tap99"), RuntimeError()), ] tools.setup_mock_calls(self.execute, expected_calls_and_values) self.assertRaises(RuntimeError, self.br.get_vif_port_set) tools.verify_mock_calls(self.execute, expected_calls_and_values) def test_get_port_tag_dict(self): headings = ['name', 'tag'] data = [ ['int-br-eth2', set()], ['patch-tun', set()], ['qr-76d9e6b6-21', 1], ['tapce5318ff-78', 1], ['tape1400310-e6', 1], ] # Each element is a tuple of (expected mock call, return_value) expected_calls_and_values = [ (self._vsctl_mock("list-ports", self.BR_NAME), '\\n'.join((iface for iface, tag in data))), (self._vsctl_mock("--columns=name,tag", "list", "Port"), self._encode_ovs_json(headings, data)), ] tools.setup_mock_calls(self.execute, expected_calls_and_values) port_tags = self.br.get_port_tag_dict() self.assertEqual( port_tags, {u'int-br-eth2': [], u'patch-tun': [], u'qr-76d9e6b6-21': 1, u'tapce5318ff-78': 1, u'tape1400310-e6': 1} ) def test_clear_db_attribute(self): pname = "tap77" self.br.clear_db_attribute("Port", pname, "tag") self._verify_vsctl_mock("clear", "Port", pname, "tag") def _test_iface_to_br(self, exp_timeout=None): iface = 'tap0' br = 'br-int' if exp_timeout: self.br.vsctl_timeout = exp_timeout self.execute.return_value = 'br-int' self.assertEqual(self.br.get_bridge_for_iface(iface), br) self._verify_vsctl_mock("iface-to-br", iface) def test_iface_to_br(self): self._test_iface_to_br() def test_iface_to_br_non_default_timeout(self): new_timeout = 5 self._test_iface_to_br(new_timeout) def test_iface_to_br_handles_ovs_vsctl_exception(self): iface = 'tap0' self.execute.side_effect = Exception self.assertIsNone(self.br.get_bridge_for_iface(iface)) self._verify_vsctl_mock("iface-to-br", iface) def test_delete_all_ports(self): with mock.patch.object(self.br, 'get_port_name_list', return_value=['port1']) as get_port: with mock.patch.object(self.br, 'delete_port') as delete_port: self.br.delete_ports(all_ports=True) get_port.assert_called_once_with() delete_port.assert_called_once_with('port1') def test_delete_neutron_ports(self): port1 = ovs_lib.VifPort('tap1234', 1, uuidutils.generate_uuid(), 'ca:fe:de:ad:be:ef', 'br') port2 = ovs_lib.VifPort('tap5678', 2, uuidutils.generate_uuid(), 'ca:ee:de:ad:be:ef', 'br') with mock.patch.object(self.br, 'get_vif_ports', return_value=[port1, port2]) as get_ports: with mock.patch.object(self.br, 'delete_port') as delete_port: self.br.delete_ports(all_ports=False) get_ports.assert_called_once_with() delete_port.assert_has_calls([ mock.call('tap1234'), mock.call('tap5678') ]) def test_delete_neutron_ports_list_error(self): expected_calls_and_values = [ (self._vsctl_mock("list-ports", self.BR_NAME), RuntimeError()), ] tools.setup_mock_calls(self.execute, expected_calls_and_values) self.assertRaises(RuntimeError, self.br.delete_ports, all_ports=False) tools.verify_mock_calls(self.execute, expected_calls_and_values) def test_get_bridges_not_default_timeout(self): bridges = ['br-int', 'br-ex'] self.br.vsctl_timeout = 5 self.execute.return_value = 'br-int\\nbr-ex\n' self.assertEqual(self.br.get_bridges(), bridges) self._verify_vsctl_mock("list-br") def test_get_local_port_mac_succeeds(self): with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand', return_value=mock.Mock(address='foo')): self.assertEqual('foo', self.br.get_local_port_mac()) def test_get_local_port_mac_raises_exception_for_missing_mac(self): with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand', return_value=mock.Mock(address=None)): with testtools.ExpectedException(Exception): self.br.get_local_port_mac() def test_get_vifs_by_ids(self): db_list_res = [ {'name': 'qvo1', 'ofport': 1, 'external_ids': {'iface-id': 'pid1', 'attached-mac': '11'}}, {'name': 'qvo2', 'ofport': 2, 'external_ids': {'iface-id': 'pid2', 'attached-mac': '22'}}, {'name': 'qvo4', 'ofport': -1, 'external_ids': {'iface-id': 'pid4', 'attached-mac': '44'}}, ] self.br.get_ports_attributes = mock.Mock(return_value=db_list_res) self.br.ovsdb = mock.Mock() self.br.ovsdb.list_ports.return_value.execute.return_value = [ 'qvo1', 'qvo2', 'qvo4'] by_id = self.br.get_vifs_by_ids(['pid1', 'pid2', 'pid3', 'pid4']) # pid3 isn't on bridge and pid4 doesn't have a valid ofport self.assertIsNone(by_id['pid3']) self.assertIsNone(by_id['pid4']) self.assertEqual('pid1', by_id['pid1'].vif_id) self.assertEqual('qvo1', by_id['pid1'].port_name) self.assertEqual(1, by_id['pid1'].ofport) self.assertEqual('pid2', by_id['pid2'].vif_id) self.assertEqual('qvo2', by_id['pid2'].port_name) self.assertEqual(2, by_id['pid2'].ofport) self.br.get_ports_attributes.assert_has_calls( [mock.call('Interface', columns=['name', 'external_ids', 'ofport'], if_exists=True)]) def _test_get_vif_port_by_id(self, iface_id, data, br_name=None, extra_calls_and_values=None): headings = ['external_ids', 'name', 'ofport'] # Each element is a tuple of (expected mock call, return_value) expected_calls_and_values = [ (self._vsctl_mock("--columns=external_ids,name,ofport", "find", "Interface", 'external_ids:iface-id=%s' % iface_id, 'external_ids:attached-mac!=""'), self._encode_ovs_json(headings, data))] if data: if not br_name: br_name = self.BR_NAME # Only the last information list in 'data' is used, so if more # than one vif is described in data, the rest must be declared # in the argument 'expected_calls_and_values'. if extra_calls_and_values: expected_calls_and_values.extend(extra_calls_and_values) expected_calls_and_values.append( (self._vsctl_mock("iface-to-br", data[-1][headings.index('name')]), br_name)) tools.setup_mock_calls(self.execute, expected_calls_and_values) vif_port = self.br.get_vif_port_by_id(iface_id) tools.verify_mock_calls(self.execute, expected_calls_and_values) return vif_port def _assert_vif_port(self, vif_port, ofport=None, mac=None): if not ofport or ofport == -1 or not mac: self.assertIsNone(vif_port, "Got %s" % vif_port) return self.assertEqual('tap99id', vif_port.vif_id) self.assertEqual(mac, vif_port.vif_mac) self.assertEqual('tap99', vif_port.port_name) self.assertEqual(ofport, vif_port.ofport) def _test_get_vif_port_by_id_with_data(self, ofport=None, mac=None): external_ids = [["iface-id", "tap99id"], ["iface-status", "active"], ["attached-mac", mac]] data = [[["map", external_ids], "tap99", ofport if ofport else ["set", []]]] vif_port = self._test_get_vif_port_by_id('tap99id', data) self._assert_vif_port(vif_port, ofport, mac) def test_get_vif_by_port_id_with_ofport(self): self._test_get_vif_port_by_id_with_data( ofport=1, mac="aa:bb:cc:dd:ee:ff") def test_get_vif_by_port_id_without_ofport(self): self._test_get_vif_port_by_id_with_data(mac="aa:bb:cc:dd:ee:ff") def test_get_vif_by_port_id_with_invalid_ofport(self): self._test_get_vif_port_by_id_with_data( ofport=-1, mac="aa:bb:cc:dd:ee:ff") def test_get_vif_by_port_id_with_no_data(self): self.assertIsNone(self._test_get_vif_port_by_id('whatever', [])) def test_get_vif_by_port_id_different_bridge(self): external_ids = [["iface-id", "tap99id"], ["iface-status", "active"]] data = [[["map", external_ids], "tap99", 1]] self.assertIsNone(self._test_get_vif_port_by_id('tap99id', data, "br-ext")) def test_get_vif_by_port_id_multiple_vifs(self): external_ids = [["iface-id", "tap99id"], ["iface-status", "active"], ["attached-mac", "de:ad:be:ef:13:37"]] data = [[["map", external_ids], "dummytap", 1], [["map", external_ids], "tap99", 1337]] extra_calls_and_values = [ (self._vsctl_mock("iface-to-br", "dummytap"), "br-ext")] vif_port = self._test_get_vif_port_by_id( 'tap99id', data, extra_calls_and_values=extra_calls_and_values) self._assert_vif_port(vif_port, ofport=1337, mac="de:ad:be:ef:13:37") class TestDeferredOVSBridge(base.BaseTestCase): def setUp(self): super(TestDeferredOVSBridge, self).setUp() self.br = mock.Mock() self.mocked_do_action_flows = mock.patch.object( self.br, 'do_action_flows').start() self.add_flow_dict1 = dict(in_port=11, actions='drop') self.add_flow_dict2 = dict(in_port=12, actions='drop') self.mod_flow_dict1 = dict(in_port=21, actions='drop') self.mod_flow_dict2 = dict(in_port=22, actions='drop') self.del_flow_dict1 = dict(in_port=31) self.del_flow_dict2 = dict(in_port=32) def test_right_allowed_passthroughs(self): expected_passthroughs = ('add_port', 'add_tunnel_port', 'delete_port') self.assertEqual(expected_passthroughs, ovs_lib.DeferredOVSBridge.ALLOWED_PASSTHROUGHS) def _verify_mock_call(self, expected_calls): self.mocked_do_action_flows.assert_has_calls(expected_calls) self.assertEqual(len(expected_calls), len(self.mocked_do_action_flows.mock_calls)) def test_apply_on_exit(self): expected_calls = [ mock.call('add', [self.add_flow_dict1]), mock.call('mod', [self.mod_flow_dict1]), mock.call('del', [self.del_flow_dict1]), ] with ovs_lib.DeferredOVSBridge(self.br) as deferred_br: deferred_br.add_flow(**self.add_flow_dict1) deferred_br.mod_flow(**self.mod_flow_dict1) deferred_br.delete_flows(**self.del_flow_dict1) self._verify_mock_call([]) self._verify_mock_call(expected_calls) def test_apply_on_exit_with_errors(self): try: with ovs_lib.DeferredOVSBridge(self.br) as deferred_br: deferred_br.add_flow(**self.add_flow_dict1) deferred_br.mod_flow(**self.mod_flow_dict1) deferred_br.delete_flows(**self.del_flow_dict1) raise Exception() except Exception: self._verify_mock_call([]) else: self.fail('Exception would be reraised') def test_apply(self): expected_calls = [ mock.call('add', [self.add_flow_dict1]), mock.call('mod', [self.mod_flow_dict1]), mock.call('del', [self.del_flow_dict1]), ] with ovs_lib.DeferredOVSBridge(self.br) as deferred_br: deferred_br.add_flow(**self.add_flow_dict1) deferred_br.mod_flow(**self.mod_flow_dict1) deferred_br.delete_flows(**self.del_flow_dict1) self._verify_mock_call([]) deferred_br.apply_flows() self._verify_mock_call(expected_calls) self._verify_mock_call(expected_calls) def test_apply_order(self): expected_calls = [ mock.call('del', [self.del_flow_dict1, self.del_flow_dict2]), mock.call('mod', [self.mod_flow_dict1, self.mod_flow_dict2]), mock.call('add', [self.add_flow_dict1, self.add_flow_dict2]), ] order = 'del', 'mod', 'add' with ovs_lib.DeferredOVSBridge(self.br, order=order) as deferred_br: deferred_br.add_flow(**self.add_flow_dict1) deferred_br.mod_flow(**self.mod_flow_dict1) deferred_br.delete_flows(**self.del_flow_dict1) deferred_br.delete_flows(**self.del_flow_dict2) deferred_br.add_flow(**self.add_flow_dict2) deferred_br.mod_flow(**self.mod_flow_dict2) self._verify_mock_call(expected_calls) def test_apply_full_ordered(self): expected_calls = [ mock.call('add', [self.add_flow_dict1]), mock.call('mod', [self.mod_flow_dict1]), mock.call('del', [self.del_flow_dict1, self.del_flow_dict2]), mock.call('add', [self.add_flow_dict2]), mock.call('mod', [self.mod_flow_dict2]), ] with ovs_lib.DeferredOVSBridge(self.br, full_ordered=True) as deferred_br: deferred_br.add_flow(**self.add_flow_dict1) deferred_br.mod_flow(**self.mod_flow_dict1) deferred_br.delete_flows(**self.del_flow_dict1) deferred_br.delete_flows(**self.del_flow_dict2) deferred_br.add_flow(**self.add_flow_dict2) deferred_br.mod_flow(**self.mod_flow_dict2) self._verify_mock_call(expected_calls) def test_getattr_unallowed_attr(self): with ovs_lib.DeferredOVSBridge(self.br) as deferred_br: self.assertEqual(self.br.add_port, deferred_br.add_port) def test_getattr_unallowed_attr_failure(self): with ovs_lib.DeferredOVSBridge(self.br) as deferred_br: self.assertRaises(AttributeError, getattr, deferred_br, 'failure') def test_default_cookie(self): self.br = ovs_lib.OVSBridge("br-tun") uuid_stamp1 = self.br.default_cookie self.assertEqual(uuid_stamp1, self.br.default_cookie) def test_cookie_passed_to_addmod(self): self.br = ovs_lib.OVSBridge("br-tun") stamp = str(self.br.default_cookie) expected_calls = [ mock.call('add-flows', ['-'], 'hard_timeout=0,idle_timeout=0,priority=1,' 'cookie=' + stamp + ',actions=drop'), mock.call('mod-flows', ['-'], 'cookie=' + stamp + ',actions=drop') ] with mock.patch.object(self.br, 'run_ofctl') as f: with ovs_lib.DeferredOVSBridge(self.br) as deferred_br: deferred_br.add_flow(actions='drop') deferred_br.mod_flow(actions='drop') f.assert_has_calls(expected_calls) neutron-8.4.0/neutron/tests/unit/agent/linux/0000775000567000056710000000000013044373210022423 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/linux/test_tc_lib.py0000664000567000056710000002526313044372760025311 0ustar jenkinsjenkins00000000000000# Copyright 2016 OVH SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.agent.linux import tc_lib from neutron.services.qos import qos_consts from neutron.tests import base DEVICE_NAME = "tap_device" KERNEL_HZ_VALUE = 1000 BW_LIMIT = 2000 # [kbps] BURST = 100 # [kbit] LATENCY = 50 # [ms] TC_QDISC_OUTPUT = ( 'qdisc tbf 8011: root refcnt 2 rate %(bw)skbit burst %(burst)skbit ' 'lat 50.0ms \n') % {'bw': BW_LIMIT, 'burst': BURST} TC_FILTERS_OUTPUT = ( 'filter protocol all pref 49152 u32 \nfilter protocol all pref ' '49152 u32 fh 800: ht divisor 1 \nfilter protocol all pref 49152 u32 fh ' '800::800 order 2048 key ht 800 \n match 00000000/00000000 at 0\n ' 'police 0x1e rate %(bw)skbit burst %(burst)skbit mtu 2Kb action \n' 'drop overhead 0b \n ref 1 bind 1' ) % {'bw': BW_LIMIT, 'burst': BURST} class BaseUnitConversionTest(object): def test_convert_to_kilobits_bare_value(self): value = "1000" expected_value = 8 # kbit self.assertEqual( expected_value, tc_lib.convert_to_kilobits(value, self.base_unit) ) def test_convert_to_kilobits_bytes_value(self): value = "1000b" expected_value = 8 # kbit self.assertEqual( expected_value, tc_lib.convert_to_kilobits(value, self.base_unit) ) def test_convert_to_kilobits_bits_value(self): value = "1000bit" expected_value = tc_lib.bits_to_kilobits(1000, self.base_unit) self.assertEqual( expected_value, tc_lib.convert_to_kilobits(value, self.base_unit) ) def test_convert_to_kilobits_megabytes_value(self): value = "1m" expected_value = tc_lib.bits_to_kilobits( self.base_unit ** 2 * 8, self.base_unit) self.assertEqual( expected_value, tc_lib.convert_to_kilobits(value, self.base_unit) ) def test_convert_to_kilobits_megabits_value(self): value = "1mbit" expected_value = tc_lib.bits_to_kilobits( self.base_unit ** 2, self.base_unit) self.assertEqual( expected_value, tc_lib.convert_to_kilobits(value, self.base_unit) ) def test_convert_to_bytes_wrong_unit(self): value = "1Zbit" self.assertRaises( tc_lib.InvalidUnit, tc_lib.convert_to_kilobits, value, self.base_unit ) def test_bytes_to_bits(self): test_values = [ (0, 0), # 0 bytes should be 0 bits (1, 8) # 1 byte should be 8 bits ] for input_bytes, expected_bits in test_values: self.assertEqual( expected_bits, tc_lib.bytes_to_bits(input_bytes) ) class TestSIUnitConversions(BaseUnitConversionTest, base.BaseTestCase): base_unit = tc_lib.SI_BASE def test_bits_to_kilobits(self): test_values = [ (0, 0), # 0 bites should be 0 kilobites (1, 1), # 1 bit should be 1 kilobit (999, 1), # 999 bits should be 1 kilobit (1000, 1), # 1000 bits should be 1 kilobit (1001, 2) # 1001 bits should be 2 kilobits ] for input_bits, expected_kilobits in test_values: self.assertEqual( expected_kilobits, tc_lib.bits_to_kilobits(input_bits, self.base_unit) ) class TestIECUnitConversions(BaseUnitConversionTest, base.BaseTestCase): base_unit = tc_lib.IEC_BASE def test_bits_to_kilobits(self): test_values = [ (0, 0), # 0 bites should be 0 kilobites (1, 1), # 1 bit should be 1 kilobit (1023, 1), # 1023 bits should be 1 kilobit (1024, 1), # 1024 bits should be 1 kilobit (1025, 2) # 1025 bits should be 2 kilobits ] for input_bits, expected_kilobits in test_values: self.assertEqual( expected_kilobits, tc_lib.bits_to_kilobits(input_bits, self.base_unit) ) class TestTcCommand(base.BaseTestCase): def setUp(self): super(TestTcCommand, self).setUp() self.tc = tc_lib.TcCommand(DEVICE_NAME, KERNEL_HZ_VALUE) self.bw_limit = "%s%s" % (BW_LIMIT, tc_lib.BW_LIMIT_UNIT) self.burst = "%s%s" % (BURST, tc_lib.BURST_UNIT) self.latency = "%s%s" % (LATENCY, tc_lib.LATENCY_UNIT) self.execute = mock.patch('neutron.agent.common.utils.execute').start() def test_check_kernel_hz_lower_then_zero(self): self.assertRaises( tc_lib.InvalidKernelHzValue, tc_lib.TcCommand, DEVICE_NAME, 0 ) self.assertRaises( tc_lib.InvalidKernelHzValue, tc_lib.TcCommand, DEVICE_NAME, -100 ) def test_get_filters_bw_limits(self): self.execute.return_value = TC_FILTERS_OUTPUT bw_limit, burst_limit = self.tc.get_filters_bw_limits() self.assertEqual(BW_LIMIT, bw_limit) self.assertEqual(BURST, burst_limit) def test_get_filters_bw_limits_when_output_not_match(self): output = ( "Some different " "output from command:" "tc filters show dev XXX parent ffff:" ) self.execute.return_value = output bw_limit, burst_limit = self.tc.get_filters_bw_limits() self.assertIsNone(bw_limit) self.assertIsNone(burst_limit) def test_get_filters_bw_limits_when_wrong_units(self): output = TC_FILTERS_OUTPUT.replace("kbit", "Xbit") self.execute.return_value = output self.assertRaises(tc_lib.InvalidUnit, self.tc.get_filters_bw_limits) def test_get_tbf_bw_limits(self): self.execute.return_value = TC_QDISC_OUTPUT bw_limit, burst_limit = self.tc.get_tbf_bw_limits() self.assertEqual(BW_LIMIT, bw_limit) self.assertEqual(BURST, burst_limit) def test_get_tbf_bw_limits_when_wrong_qdisc(self): output = TC_QDISC_OUTPUT.replace("tbf", "different_qdisc") self.execute.return_value = output bw_limit, burst_limit = self.tc.get_tbf_bw_limits() self.assertIsNone(bw_limit) self.assertIsNone(burst_limit) def test_get_tbf_bw_limits_when_wrong_units(self): output = TC_QDISC_OUTPUT.replace("kbit", "Xbit") self.execute.return_value = output self.assertRaises(tc_lib.InvalidUnit, self.tc.get_tbf_bw_limits) def test_set_tbf_bw_limit(self): self.tc.set_tbf_bw_limit(BW_LIMIT, BURST, LATENCY) self.execute.assert_called_once_with( ["tc", "qdisc", "replace", "dev", DEVICE_NAME, "root", "tbf", "rate", self.bw_limit, "latency", self.latency, "burst", self.burst], run_as_root=True, check_exit_code=True, log_fail_as_error=True, extra_ok_codes=None ) def test_update_filters_bw_limit(self): self.tc.update_filters_bw_limit(BW_LIMIT, BURST) self.execute.assert_has_calls([ mock.call( ["tc", "qdisc", "del", "dev", DEVICE_NAME, "ingress"], run_as_root=True, check_exit_code=True, log_fail_as_error=True, extra_ok_codes=[2] ), mock.call( ['tc', 'qdisc', 'add', 'dev', DEVICE_NAME, "ingress", "handle", tc_lib.INGRESS_QDISC_ID], run_as_root=True, check_exit_code=True, log_fail_as_error=True, extra_ok_codes=None ), mock.call( ['tc', 'filter', 'add', 'dev', DEVICE_NAME, 'parent', tc_lib.INGRESS_QDISC_ID, 'protocol', 'all', 'prio', '49', 'basic', 'police', 'rate', self.bw_limit, 'burst', self.burst, 'mtu', tc_lib.MAX_MTU_VALUE, 'drop'], run_as_root=True, check_exit_code=True, log_fail_as_error=True, extra_ok_codes=None )] ) def test_update_tbf_bw_limit(self): self.tc.update_tbf_bw_limit(BW_LIMIT, BURST, LATENCY) self.execute.assert_called_once_with( ["tc", "qdisc", "replace", "dev", DEVICE_NAME, "root", "tbf", "rate", self.bw_limit, "latency", self.latency, "burst", self.burst], run_as_root=True, check_exit_code=True, log_fail_as_error=True, extra_ok_codes=None ) def test_delete_filters_bw_limit(self): self.tc.delete_filters_bw_limit() self.execute.assert_called_once_with( ["tc", "qdisc", "del", "dev", DEVICE_NAME, "ingress"], run_as_root=True, check_exit_code=True, log_fail_as_error=True, extra_ok_codes=[2] ) def test_delete_tbf_bw_limit(self): self.tc.delete_tbf_bw_limit() self.execute.assert_called_once_with( ["tc", "qdisc", "del", "dev", DEVICE_NAME, "root"], run_as_root=True, check_exit_code=True, log_fail_as_error=True, extra_ok_codes=[2] ) def test_get_ingress_qdisc_burst_value_burst_not_none(self): self.assertEqual( BURST, self.tc.get_ingress_qdisc_burst_value(BW_LIMIT, BURST) ) def test_get_ingress_qdisc_burst_no_burst_value_given(self): expected_burst = BW_LIMIT * qos_consts.DEFAULT_BURST_RATE self.assertEqual( expected_burst, self.tc.get_ingress_qdisc_burst_value(BW_LIMIT, None) ) def test_get_ingress_qdisc_burst_burst_value_zero(self): expected_burst = BW_LIMIT * qos_consts.DEFAULT_BURST_RATE self.assertEqual( expected_burst, self.tc.get_ingress_qdisc_burst_value(BW_LIMIT, 0) ) def test__get_tbf_burst_value_when_burst_bigger_then_minimal(self): result = self.tc._get_tbf_burst_value(BW_LIMIT, BURST) self.assertEqual(BURST, result) def test__get_tbf_burst_value_when_burst_smaller_then_minimal(self): result = self.tc._get_tbf_burst_value(BW_LIMIT, 0) self.assertEqual(2, result) neutron-8.4.0/neutron/tests/unit/agent/linux/test_dhcp.py0000664000567000056710000027215413044372760024776 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock import netaddr from oslo_config import cfg from neutron.agent.common import config from neutron.agent.dhcp import config as dhcp_config from neutron.agent.linux import dhcp from neutron.agent.linux import external_process from neutron.common import config as base_config from neutron.common import constants from neutron.common import utils from neutron.extensions import extra_dhcp_opt as edo_ext from neutron.tests import base from neutron.tests import tools class FakeIPAllocation(object): def __init__(self, address, subnet_id=None): self.ip_address = address self.subnet_id = subnet_id class FakeDNSAssignment(object): def __init__(self, ip_address, dns_name='', domain='openstacklocal'): if dns_name: self.hostname = dns_name else: self.hostname = 'host-%s' % ip_address.replace( '.', '-').replace(':', '-') self.ip_address = ip_address self.fqdn = self.hostname if domain: self.fqdn = '%s.%s.' % (self.hostname, domain) class DhcpOpt(object): def __init__(self, **kwargs): self.__dict__.update(ip_version=4) self.__dict__.update(kwargs) def __str__(self): return str(self.__dict__) # A base class where class attributes can also be accessed by treating # an instance as a dict. class Dictable(object): def __getitem__(self, k): return self.__class__.__dict__.get(k) class FakeDhcpPort(object): def __init__(self): self.id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaa' self.admin_state_up = True self.device_owner = constants.DEVICE_OWNER_DHCP self.fixed_ips = [ FakeIPAllocation('192.168.0.1', 'dddddddd-dddd-dddd-dddd-dddddddddddd')] self.mac_address = '00:00:80:aa:bb:ee' self.device_id = 'fake_dhcp_port' self.extra_dhcp_opts = [] class FakeReservedPort(object): def __init__(self, id='reserved-aaaa-aaaa-aaaa-aaaaaaaaaaa'): self.admin_state_up = True self.device_owner = constants.DEVICE_OWNER_DHCP self.fixed_ips = [ FakeIPAllocation('192.168.0.6', 'dddddddd-dddd-dddd-dddd-dddddddddddd')] self.mac_address = '00:00:80:aa:bb:ee' self.device_id = constants.DEVICE_ID_RESERVED_DHCP_PORT self.extra_dhcp_opts = [] self.id = id class FakePort1(object): def __init__(self, domain='openstacklocal'): self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.admin_state_up = True self.device_owner = 'foo1' self.fixed_ips = [ FakeIPAllocation('192.168.0.2', 'dddddddd-dddd-dddd-dddd-dddddddddddd')] self.mac_address = '00:00:80:aa:bb:cc' self.device_id = 'fake_port1' self.extra_dhcp_opts = [] self.dns_assignment = [FakeDNSAssignment('192.168.0.2', domain=domain)] class FakePort2(object): def __init__(self): self.id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' self.admin_state_up = False self.device_owner = 'foo2' self.fixed_ips = [ FakeIPAllocation('192.168.0.3', 'dddddddd-dddd-dddd-dddd-dddddddddddd')] self.mac_address = '00:00:f3:aa:bb:cc' self.device_id = 'fake_port2' self.dns_assignment = [FakeDNSAssignment('192.168.0.3')] self.extra_dhcp_opts = [] class FakePort3(object): def __init__(self): self.id = '44444444-4444-4444-4444-444444444444' self.admin_state_up = True self.device_owner = 'foo3' self.fixed_ips = [ FakeIPAllocation('192.168.0.4', 'dddddddd-dddd-dddd-dddd-dddddddddddd'), FakeIPAllocation('192.168.1.2', 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee')] self.dns_assignment = [FakeDNSAssignment('192.168.0.4'), FakeDNSAssignment('192.168.1.2')] self.mac_address = '00:00:0f:aa:bb:cc' self.device_id = 'fake_port3' self.extra_dhcp_opts = [] class FakePort4(object): def __init__(self): self.id = 'gggggggg-gggg-gggg-gggg-gggggggggggg' self.admin_state_up = False self.device_owner = 'foo3' self.fixed_ips = [ FakeIPAllocation('192.168.0.4', 'dddddddd-dddd-dddd-dddd-dddddddddddd'), FakeIPAllocation('ffda:3ba5:a17a:4ba3:0216:3eff:fec2:771d', 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee')] self.dns_assignment = [ FakeDNSAssignment('192.168.0.4'), FakeDNSAssignment('ffda:3ba5:a17a:4ba3:0216:3eff:fec2:771d')] self.mac_address = '00:16:3E:C2:77:1D' self.device_id = 'fake_port4' self.extra_dhcp_opts = [] class FakePort5(object): def __init__(self): self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeee' self.admin_state_up = True self.device_owner = 'foo5' self.fixed_ips = [ FakeIPAllocation('192.168.0.5', 'dddddddd-dddd-dddd-dddd-dddddddddddd')] self.dns_assignment = [FakeDNSAssignment('192.168.0.5')] self.mac_address = '00:00:0f:aa:bb:55' self.device_id = 'fake_port5' self.extra_dhcp_opts = [ DhcpOpt(opt_name=edo_ext.CLIENT_ID, opt_value='test5')] class FakePort6(object): def __init__(self): self.id = 'ccccccccc-cccc-cccc-cccc-ccccccccc' self.admin_state_up = True self.device_owner = 'foo6' self.fixed_ips = [ FakeIPAllocation('192.168.0.6', 'dddddddd-dddd-dddd-dddd-dddddddddddd')] self.dns_assignment = [FakeDNSAssignment('192.168.0.6')] self.mac_address = '00:00:0f:aa:bb:66' self.device_id = 'fake_port6' self.extra_dhcp_opts = [ DhcpOpt(opt_name=edo_ext.CLIENT_ID, opt_value='test6', ip_version=4), DhcpOpt(opt_name='dns-server', opt_value='123.123.123.45', ip_version=4)] class FakeV6Port(object): def __init__(self, domain='openstacklocal'): self.id = 'hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh' self.admin_state_up = True self.device_owner = 'foo3' self.fixed_ips = [ FakeIPAllocation('fdca:3ba5:a17a:4ba3::2', 'ffffffff-ffff-ffff-ffff-ffffffffffff')] self.mac_address = '00:00:f3:aa:bb:cc' self.device_id = 'fake_port6' self.extra_dhcp_opts = [] self.dns_assignment = [FakeDNSAssignment('fdca:3ba5:a17a:4ba3::2', domain=domain)] class FakeV6PortExtraOpt(object): def __init__(self): self.id = 'hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh' self.admin_state_up = True self.device_owner = 'foo3' self.fixed_ips = [ FakeIPAllocation('ffea:3ba5:a17a:4ba3:0216:3eff:fec2:771d', 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee')] self.dns_assignment = [ FakeDNSAssignment('ffea:3ba5:a17a:4ba3:0216:3eff:fec2:771d')] self.mac_address = '00:16:3e:c2:77:1d' self.device_id = 'fake_port6' self.extra_dhcp_opts = [ DhcpOpt(opt_name='dns-server', opt_value='ffea:3ba5:a17a:4ba3::100', ip_version=6)] class FakeDualPortWithV6ExtraOpt(object): def __init__(self): self.id = 'hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh' self.admin_state_up = True self.device_owner = 'foo3' self.fixed_ips = [ FakeIPAllocation('192.168.0.3', 'dddddddd-dddd-dddd-dddd-dddddddddddd'), FakeIPAllocation('ffea:3ba5:a17a:4ba3:0216:3eff:fec2:771d', 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee')] self.dns_assignment = [ FakeDNSAssignment('192.168.0.3'), FakeDNSAssignment('ffea:3ba5:a17a:4ba3:0216:3eff:fec2:771d')] self.mac_address = '00:16:3e:c2:77:1d' self.device_id = 'fake_port6' self.extra_dhcp_opts = [ DhcpOpt(opt_name='dns-server', opt_value='ffea:3ba5:a17a:4ba3::100', ip_version=6)] class FakeDualPort(object): def __init__(self, domain='openstacklocal'): self.id = 'hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh' self.admin_state_up = True self.device_owner = 'foo3' self.fixed_ips = [ FakeIPAllocation('192.168.0.3', 'dddddddd-dddd-dddd-dddd-dddddddddddd'), FakeIPAllocation('fdca:3ba5:a17a:4ba3::3', 'ffffffff-ffff-ffff-ffff-ffffffffffff')] self.mac_address = '00:00:0f:aa:bb:cc' self.device_id = 'fake_dual_port' self.extra_dhcp_opts = [] self.dns_assignment = [FakeDNSAssignment('192.168.0.3', domain=domain), FakeDNSAssignment('fdca:3ba5:a17a:4ba3::3', domain=domain)] class FakeRouterPort(object): def __init__(self, dev_owner=constants.DEVICE_OWNER_ROUTER_INTF, ip_address='192.168.0.1', domain='openstacklocal'): self.id = 'rrrrrrrr-rrrr-rrrr-rrrr-rrrrrrrrrrrr' self.admin_state_up = True self.device_owner = constants.DEVICE_OWNER_ROUTER_INTF self.mac_address = '00:00:0f:rr:rr:rr' self.device_id = 'fake_router_port' self.dns_assignment = [] self.extra_dhcp_opts = [] self.device_owner = dev_owner self.fixed_ips = [FakeIPAllocation( ip_address, 'dddddddd-dddd-dddd-dddd-dddddddddddd')] self.dns_assignment = [FakeDNSAssignment(ip.ip_address, domain=domain) for ip in self.fixed_ips] class FakeRouterPort2(object): def __init__(self): self.id = 'rrrrrrrr-rrrr-rrrr-rrrr-rrrrrrrrrrrr' self.admin_state_up = True self.device_owner = constants.DEVICE_OWNER_ROUTER_INTF self.fixed_ips = [ FakeIPAllocation('192.168.1.1', 'dddddddd-dddd-dddd-dddd-dddddddddddd')] self.dns_assignment = [FakeDNSAssignment('192.168.1.1')] self.mac_address = '00:00:0f:rr:rr:r2' self.device_id = 'fake_router_port2' self.extra_dhcp_opts = [] class FakePortMultipleAgents1(object): def __init__(self): self.id = 'rrrrrrrr-rrrr-rrrr-rrrr-rrrrrrrrrrrr' self.admin_state_up = True self.device_owner = constants.DEVICE_OWNER_DHCP self.fixed_ips = [ FakeIPAllocation('192.168.0.5', 'dddddddd-dddd-dddd-dddd-dddddddddddd')] self.dns_assignment = [FakeDNSAssignment('192.168.0.5')] self.mac_address = '00:00:0f:dd:dd:dd' self.device_id = 'fake_multiple_agents_port' self.extra_dhcp_opts = [] class FakePortMultipleAgents2(object): def __init__(self): self.id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' self.admin_state_up = True self.device_owner = constants.DEVICE_OWNER_DHCP self.fixed_ips = [ FakeIPAllocation('192.168.0.6', 'dddddddd-dddd-dddd-dddd-dddddddddddd')] self.dns_assignment = [FakeDNSAssignment('192.168.0.6')] self.mac_address = '00:00:0f:ee:ee:ee' self.device_id = 'fake_multiple_agents_port2' self.extra_dhcp_opts = [] class FakeV4HostRoute(object): def __init__(self): self.destination = '20.0.0.1/24' self.nexthop = '20.0.0.1' class FakeV4HostRouteGateway(object): def __init__(self): self.destination = constants.IPv4_ANY self.nexthop = '10.0.0.1' class FakeV6HostRoute(object): def __init__(self): self.destination = '2001:0200:feed:7ac0::/64' self.nexthop = '2001:0200:feed:7ac0::1' class FakeV4Subnet(Dictable): def __init__(self): self.id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' self.ip_version = 4 self.cidr = '192.168.0.0/24' self.gateway_ip = '192.168.0.1' self.enable_dhcp = True self.host_routes = [FakeV4HostRoute()] self.dns_nameservers = ['8.8.8.8'] class FakeV4Subnet2(FakeV4Subnet): def __init__(self): super(FakeV4Subnet2, self).__init__() self.cidr = '192.168.1.0/24' self.gateway_ip = '192.168.1.1' self.host_routes = [] class FakeV4MetadataSubnet(FakeV4Subnet): def __init__(self): super(FakeV4MetadataSubnet, self).__init__() self.cidr = '169.254.169.254/30' self.gateway_ip = '169.254.169.253' self.host_routes = [] self.dns_nameservers = [] class FakeV4SubnetGatewayRoute(FakeV4Subnet): def __init__(self): super(FakeV4SubnetGatewayRoute, self).__init__() self.host_routes = [FakeV4HostRouteGateway()] class FakeV4SubnetMultipleAgentsWithoutDnsProvided(FakeV4Subnet): def __init__(self): super(FakeV4SubnetMultipleAgentsWithoutDnsProvided, self).__init__() self.dns_nameservers = [] self.host_routes = [] class FakeV4SubnetAgentWithManyDnsProvided(FakeV4Subnet): def __init__(self): super(FakeV4SubnetAgentWithManyDnsProvided, self).__init__() self.dns_nameservers = ['2.2.2.2', '9.9.9.9', '1.1.1.1', '3.3.3.3'] self.host_routes = [] class FakeV4MultipleAgentsWithoutDnsProvided(object): def __init__(self): self.id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' self.subnets = [FakeV4SubnetMultipleAgentsWithoutDnsProvided()] self.ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort(), FakePortMultipleAgents1(), FakePortMultipleAgents2()] self.namespace = 'qdhcp-ns' class FakeV4AgentWithoutDnsProvided(object): def __init__(self): self.id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' self.subnets = [FakeV4SubnetMultipleAgentsWithoutDnsProvided()] self.ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort(), FakePortMultipleAgents1()] self.namespace = 'qdhcp-ns' class FakeV4AgentWithManyDnsProvided(object): def __init__(self): self.id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' self.subnets = [FakeV4SubnetAgentWithManyDnsProvided()] self.ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort(), FakePortMultipleAgents1()] self.namespace = 'qdhcp-ns' class FakeV4SubnetMultipleAgentsWithDnsProvided(FakeV4Subnet): def __init__(self): super(FakeV4SubnetMultipleAgentsWithDnsProvided, self).__init__() self.host_routes = [] class FakeV4MultipleAgentsWithDnsProvided(object): def __init__(self): self.id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' self.subnets = [FakeV4SubnetMultipleAgentsWithDnsProvided()] self.ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort(), FakePortMultipleAgents1(), FakePortMultipleAgents2()] self.namespace = 'qdhcp-ns' class FakeV6Subnet(object): def __init__(self): self.id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' self.ip_version = 6 self.cidr = 'fdca:3ba5:a17a:4ba3::/64' self.gateway_ip = 'fdca:3ba5:a17a:4ba3::1' self.enable_dhcp = True self.host_routes = [FakeV6HostRoute()] self.dns_nameservers = ['2001:0200:feed:7ac0::1'] self.ipv6_ra_mode = None self.ipv6_address_mode = None class FakeV4SubnetNoDHCP(object): def __init__(self): self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.ip_version = 4 self.cidr = '192.168.1.0/24' self.gateway_ip = '192.168.1.1' self.enable_dhcp = False self.host_routes = [] self.dns_nameservers = [] class FakeV6SubnetDHCPStateful(Dictable): def __init__(self): self.id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' self.ip_version = 6 self.cidr = 'fdca:3ba5:a17a:4ba3::/64' self.gateway_ip = 'fdca:3ba5:a17a:4ba3::1' self.enable_dhcp = True self.host_routes = [FakeV6HostRoute()] self.dns_nameservers = ['2001:0200:feed:7ac0::1'] self.ipv6_ra_mode = None self.ipv6_address_mode = constants.DHCPV6_STATEFUL class FakeV6SubnetSlaac(object): def __init__(self): self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.ip_version = 6 self.cidr = 'ffda:3ba5:a17a:4ba3::/64' self.gateway_ip = 'ffda:3ba5:a17a:4ba3::1' self.enable_dhcp = True self.host_routes = [FakeV6HostRoute()] self.ipv6_address_mode = constants.IPV6_SLAAC self.ipv6_ra_mode = None class FakeV6SubnetStateless(object): def __init__(self): self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.ip_version = 6 self.cidr = 'ffea:3ba5:a17a:4ba3::/64' self.gateway_ip = 'ffea:3ba5:a17a:4ba3::1' self.enable_dhcp = True self.dns_nameservers = [] self.host_routes = [] self.ipv6_address_mode = constants.DHCPV6_STATELESS self.ipv6_ra_mode = None class FakeV6SubnetStatelessBadPrefixLength(object): def __init__(self): self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.ip_version = 6 self.cidr = 'ffeb:3ba5:a17a:4ba3::/56' self.gateway_ip = 'ffeb:3ba5:a17a:4ba3::1' self.enable_dhcp = True self.dns_nameservers = [] self.host_routes = [] self.ipv6_address_mode = constants.DHCPV6_STATELESS self.ipv6_ra_mode = None class FakeV4SubnetNoGateway(FakeV4Subnet): def __init__(self): super(FakeV4SubnetNoGateway, self).__init__() self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.cidr = '192.168.1.0/24' self.gateway_ip = None self.enable_dhcp = True self.host_routes = [] self.dns_nameservers = [] class FakeV4SubnetNoRouter(FakeV4Subnet): def __init__(self): super(FakeV4SubnetNoRouter, self).__init__() self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.cidr = '192.168.1.0/24' self.gateway_ip = '192.168.1.1' self.host_routes = [] self.dns_nameservers = [] class FakeV4Network(object): def __init__(self): self.id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' self.subnets = [FakeV4Subnet()] self.ports = [FakePort1()] self.namespace = 'qdhcp-ns' class FakeV4NetworkClientId(object): def __init__(self): self.id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' self.subnets = [FakeV4Subnet()] self.ports = [FakePort1(), FakePort5(), FakePort6()] self.namespace = 'qdhcp-ns' class FakeV6Network(object): def __init__(self): self.id = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb' self.subnets = [FakeV6Subnet()] self.ports = [FakePort2()] self.namespace = 'qdhcp-ns' class FakeDualNetwork(object): def __init__(self, domain='openstacklocal'): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4Subnet(), FakeV6SubnetDHCPStateful()] self.namespace = 'qdhcp-ns' self.ports = [FakePort1(domain=domain), FakeV6Port(domain=domain), FakeDualPort(domain=domain), FakeRouterPort(domain=domain)] class FakeDeviceManagerNetwork(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4Subnet(), FakeV6SubnetDHCPStateful()] self.ports = [FakePort1(), FakeV6Port(), FakeDualPort(), FakeRouterPort()] self.namespace = 'qdhcp-ns' class FakeDualNetworkReserved(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4Subnet(), FakeV6SubnetDHCPStateful()] self.ports = [FakePort1(), FakeV6Port(), FakeDualPort(), FakeRouterPort(), FakeReservedPort()] self.namespace = 'qdhcp-ns' class FakeDualNetworkReserved2(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4Subnet(), FakeV6SubnetDHCPStateful()] self.ports = [FakePort1(), FakeV6Port(), FakeDualPort(), FakeRouterPort(), FakeReservedPort(), FakeReservedPort(id='reserved-2')] self.namespace = 'qdhcp-ns' class FakeNetworkDhcpPort(object): def __init__(self): self.id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' self.subnets = [FakeV4Subnet()] self.ports = [FakePort1(), FakeDhcpPort()] self.namespace = 'qdhcp-ns' class FakeDualNetworkGatewayRoute(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4SubnetGatewayRoute(), FakeV6SubnetDHCPStateful()] self.ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()] self.namespace = 'qdhcp-ns' class FakeDualNetworkSingleDHCP(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4Subnet(), FakeV4SubnetNoDHCP()] self.ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()] self.namespace = 'qdhcp-ns' class FakeDualNetworkDualDHCP(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4Subnet(), FakeV4Subnet2()] self.ports = [FakePort1(), FakeRouterPort(), FakeRouterPort2()] self.namespace = 'qdhcp-ns' class FakeV4NoGatewayNetwork(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4SubnetNoGateway()] self.ports = [FakePort1()] class FakeV4NetworkNoRouter(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4SubnetNoRouter()] self.ports = [FakePort1()] class FakeV4MetadataNetwork(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4MetadataSubnet()] self.ports = [FakeRouterPort(ip_address='169.254.169.253')] class FakeV4NetworkDistRouter(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4Subnet()] self.ports = [FakePort1(), FakeRouterPort( dev_owner=constants.DEVICE_OWNER_DVR_INTERFACE)] class FakeDualV4Pxe3Ports(object): def __init__(self, port_detail="portsSame"): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4Subnet(), FakeV4SubnetNoDHCP()] self.ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()] self.namespace = 'qdhcp-ns' if port_detail == "portsSame": self.ports[0].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] self.ports[1].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux2.0')] self.ports[2].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux3.0')] else: self.ports[0].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.2'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] self.ports[1].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.5'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.5'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux2.0')] self.ports[2].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.7'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.7'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux3.0')] class FakeV4NetworkPxe2Ports(object): def __init__(self, port_detail="portsSame"): self.id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' self.subnets = [FakeV4Subnet()] self.ports = [FakePort1(), FakePort2(), FakeRouterPort()] self.namespace = 'qdhcp-ns' if port_detail == "portsSame": self.ports[0].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] self.ports[1].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] else: self.ports[0].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] self.ports[1].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.5'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.5'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] class FakeV4NetworkPxe3Ports(object): def __init__(self, port_detail="portsSame"): self.id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' self.subnets = [FakeV4Subnet()] self.ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()] self.namespace = 'qdhcp-ns' if port_detail == "portsSame": self.ports[0].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] self.ports[1].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] self.ports[2].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] else: self.ports[0].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] self.ports[1].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.5'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.5'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux2.0')] self.ports[2].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.7'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.7'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux3.0')] class FakeV6NetworkPxePort(object): def __init__(self): self.id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' self.subnets = [FakeV6SubnetDHCPStateful()] self.ports = [FakeV6Port()] self.namespace = 'qdhcp-ns' self.ports[0].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='2001:192:168::1', ip_version=6), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0', ip_version=6)] class FakeV6NetworkPxePortWrongOptVersion(object): def __init__(self): self.id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' self.subnets = [FakeV6SubnetDHCPStateful()] self.ports = [FakeV6Port()] self.namespace = 'qdhcp-ns' self.ports[0].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.7', ip_version=4), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0', ip_version=6)] class FakeDualStackNetworkSingleDHCP(object): def __init__(self): self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.subnets = [FakeV4Subnet(), FakeV6SubnetSlaac()] self.ports = [FakePort1(), FakePort4(), FakeRouterPort()] class FakeDualStackNetworkingSingleDHCPTags(object): def __init__(self): self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.subnets = [FakeV4Subnet(), FakeV6SubnetSlaac()] self.ports = [FakePort1(), FakePort4(), FakeRouterPort()] for port in self.ports: port.extra_dhcp_opts = [ DhcpOpt(opt_name='tag:ipxe,bootfile-name', opt_value='pxelinux.0')] class FakeV4NetworkMultipleTags(object): def __init__(self): self.id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' self.subnets = [FakeV4Subnet()] self.ports = [FakePort1(), FakeRouterPort()] self.namespace = 'qdhcp-ns' self.ports[0].extra_dhcp_opts = [ DhcpOpt(opt_name='tag:ipxe,bootfile-name', opt_value='pxelinux.0')] class FakeV6NetworkStatelessDHCP(object): def __init__(self): self.id = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb' self.subnets = [FakeV6SubnetStateless()] self.ports = [FakeV6PortExtraOpt()] self.namespace = 'qdhcp-ns' class FakeV6NetworkStatelessDHCPBadPrefixLength(object): def __init__(self): self.id = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb' self.subnets = [FakeV6SubnetStatelessBadPrefixLength()] self.ports = [FakeV6PortExtraOpt()] self.namespace = 'qdhcp-ns' class FakeNetworkWithV6SatelessAndV4DHCPSubnets(object): def __init__(self): self.id = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb' self.subnets = [FakeV6SubnetStateless(), FakeV4Subnet()] self.ports = [FakeDualPortWithV6ExtraOpt(), FakeRouterPort()] self.namespace = 'qdhcp-ns' class LocalChild(dhcp.DhcpLocalProcess): PORTS = {4: [4], 6: [6]} def __init__(self, *args, **kwargs): self.process_monitor = mock.Mock() kwargs['process_monitor'] = self.process_monitor super(LocalChild, self).__init__(*args, **kwargs) self.called = [] def reload_allocations(self): self.called.append('reload') def restart(self): self.called.append('restart') def spawn_process(self): self.called.append('spawn') class TestConfBase(base.BaseTestCase): def setUp(self): super(TestConfBase, self).setUp() self.conf = config.setup_conf() self.conf.register_opts(base_config.core_opts) self.conf.register_opts(dhcp_config.DHCP_OPTS) self.conf.register_opts(dhcp_config.DNSMASQ_OPTS) self.conf.register_opts(external_process.OPTS) config.register_interface_driver_opts_helper(self.conf) class TestBase(TestConfBase): def setUp(self): super(TestBase, self).setUp() instance = mock.patch("neutron.agent.linux.dhcp.DeviceManager") self.mock_mgr = instance.start() self.conf.register_opt(cfg.BoolOpt('enable_isolated_metadata', default=True)) self.conf.register_opt(cfg.BoolOpt("force_metadata", default=False)) self.conf.register_opt(cfg.BoolOpt('enable_metadata_network', default=False)) self.config_parse(self.conf) self.conf.set_override('state_path', '') self.replace_p = mock.patch('neutron.common.utils.replace_file') self.execute_p = mock.patch('neutron.agent.common.utils.execute') self.safe = self.replace_p.start() self.execute = self.execute_p.start() self.makedirs = mock.patch('os.makedirs').start() self.rmtree = mock.patch('shutil.rmtree').start() self.external_process = mock.patch( 'neutron.agent.linux.external_process.ProcessManager').start() self.mock_mgr.return_value.driver.bridged = True class TestDhcpBase(TestBase): def test_existing_dhcp_networks_abstract_error(self): self.assertRaises(NotImplementedError, dhcp.DhcpBase.existing_dhcp_networks, None) def test_check_version_abstract_error(self): self.assertRaises(NotImplementedError, dhcp.DhcpBase.check_version) def test_base_abc_error(self): self.assertRaises(TypeError, dhcp.DhcpBase, None) def test_restart(self): class SubClass(dhcp.DhcpBase): def __init__(self): dhcp.DhcpBase.__init__(self, cfg.CONF, FakeV4Network(), mock.Mock(), None) self.called = [] def enable(self): self.called.append('enable') def disable(self, retain_port=False): self.called.append('disable %s' % retain_port) def reload_allocations(self): pass @property def active(self): return True c = SubClass() c.restart() self.assertEqual(c.called, ['disable True', 'enable']) class TestDhcpLocalProcess(TestBase): def test_get_conf_file_name(self): tpl = '/dhcp/aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa/dev' lp = LocalChild(self.conf, FakeV4Network()) self.assertEqual(lp.get_conf_file_name('dev'), tpl) @mock.patch.object(utils, 'ensure_dir') def test_ensure_dir_called(self, ensure_dir): LocalChild(self.conf, FakeV4Network()) ensure_dir.assert_called_once_with( '/dhcp/aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa') def test_enable_already_active(self): with mock.patch.object(LocalChild, 'active') as patched: patched.__get__ = mock.Mock(return_value=True) lp = LocalChild(self.conf, FakeV4Network()) lp.enable() self.assertEqual(lp.called, ['restart']) self.assertFalse(self.mock_mgr.return_value.setup.called) @mock.patch.object(utils, 'ensure_dir') def test_enable(self, ensure_dir): attrs_to_mock = dict( [(a, mock.DEFAULT) for a in ['active', 'interface_name']] ) with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks: mocks['active'].__get__ = mock.Mock(return_value=False) mocks['interface_name'].__set__ = mock.Mock() lp = LocalChild(self.conf, FakeDualNetwork()) lp.enable() self.mock_mgr.assert_has_calls( [mock.call(self.conf, None), mock.call().setup(mock.ANY)]) self.assertEqual(lp.called, ['spawn']) self.assertTrue(mocks['interface_name'].__set__.called) ensure_dir.assert_called_with( '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc') def _assert_disabled(self, lp): self.assertTrue(lp.process_monitor.unregister.called) self.assertTrue(self.external_process().disable.called) def test_disable_not_active(self): attrs_to_mock = dict([(a, mock.DEFAULT) for a in ['active', 'interface_name']]) with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks: mocks['active'].__get__ = mock.Mock(return_value=False) mocks['interface_name'].__get__ = mock.Mock(return_value='tap0') network = FakeDualNetwork() lp = LocalChild(self.conf, network) lp.device_manager = mock.Mock() lp.disable() lp.device_manager.destroy.assert_called_once_with( network, 'tap0') self._assert_disabled(lp) def test_disable_retain_port(self): attrs_to_mock = dict([(a, mock.DEFAULT) for a in ['active', 'interface_name']]) network = FakeDualNetwork() with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks: mocks['active'].__get__ = mock.Mock(return_value=True) mocks['interface_name'].__get__ = mock.Mock(return_value='tap0') lp = LocalChild(self.conf, network) lp.disable(retain_port=True) self._assert_disabled(lp) def test_disable(self): attrs_to_mock = {'active': mock.DEFAULT} with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks: mocks['active'].__get__ = mock.Mock(return_value=False) lp = LocalChild(self.conf, FakeDualNetwork()) with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip: lp.disable() self._assert_disabled(lp) ip.return_value.netns.delete.assert_called_with('qdhcp-ns') def test_disable_config_dir_removed_after_destroy(self): parent = mock.MagicMock() parent.attach_mock(self.rmtree, 'rmtree') parent.attach_mock(self.mock_mgr, 'DeviceManager') lp = LocalChild(self.conf, FakeDualNetwork()) lp.disable(retain_port=False) expected = [mock.call.DeviceManager().destroy(mock.ANY, mock.ANY), mock.call.rmtree(mock.ANY, ignore_errors=True)] parent.assert_has_calls(expected) def test_get_interface_name(self): net = FakeDualNetwork() path = '/dhcp/%s/interface' % net.id self.useFixture(tools.OpenFixture(path, 'tap0')) lp = LocalChild(self.conf, net) self.assertEqual(lp.interface_name, 'tap0') def test_set_interface_name(self): with mock.patch('neutron.common.utils.replace_file') as replace: lp = LocalChild(self.conf, FakeDualNetwork()) with mock.patch.object(lp, 'get_conf_file_name') as conf_file: conf_file.return_value = '/interface' lp.interface_name = 'tap0' conf_file.assert_called_once_with('interface') replace.assert_called_once_with(mock.ANY, 'tap0') class TestDnsmasq(TestBase): def _get_dnsmasq(self, network, process_monitor=None): process_monitor = process_monitor or mock.Mock() return dhcp.Dnsmasq(self.conf, network, process_monitor=process_monitor) def _test_spawn(self, extra_options, network=FakeDualNetwork(), max_leases=16777216, lease_duration=86400, has_static=True, no_resolv='--no-resolv', has_stateless=True): def mock_get_conf_file_name(kind): return '/dhcp/%s/%s' % (network.id, kind) # if you need to change this path here, think twice, # that means pid files will move around, breaking upgrades # or backwards-compatibility expected_pid_file = '/dhcp/%s/pid' % network.id expected = [ 'dnsmasq', '--no-hosts', no_resolv, '--strict-order', '--except-interface=lo', '--pid-file=%s' % expected_pid_file, '--dhcp-hostsfile=/dhcp/%s/host' % network.id, '--addn-hosts=/dhcp/%s/addn_hosts' % network.id, '--dhcp-optsfile=/dhcp/%s/opts' % network.id, '--dhcp-leasefile=/dhcp/%s/leases' % network.id, '--dhcp-match=set:ipxe,175', '--bind-interfaces', '--interface=tap0', ] seconds = '' if lease_duration == -1: lease_duration = 'infinite' else: seconds = 's' if has_static: prefix = '--dhcp-range=set:tag%d,%s,static,%s%s' prefix6 = '--dhcp-range=set:tag%d,%s,static,%s,%s%s' elif has_stateless: prefix = '--dhcp-range=set:tag%d,%s,%s%s' prefix6 = '--dhcp-range=set:tag%d,%s,%s,%s%s' possible_leases = 0 for i, s in enumerate(network.subnets): if (s.ip_version != 6 or s.ipv6_address_mode == constants.DHCPV6_STATEFUL): if s.ip_version == 4: expected.extend([prefix % ( i, s.cidr.split('/')[0], lease_duration, seconds)]) else: expected.extend([prefix6 % ( i, s.cidr.split('/')[0], s.cidr.split('/')[1], lease_duration, seconds)]) possible_leases += netaddr.IPNetwork(s.cidr).size if cfg.CONF.advertise_mtu: if hasattr(network, 'mtu'): expected.append( '--dhcp-option-force=option:mtu,%s' % network.mtu) expected.append('--dhcp-lease-max=%d' % min( possible_leases, max_leases)) expected.extend(extra_options) self.execute.return_value = ('', '') attrs_to_mock = dict( [(a, mock.DEFAULT) for a in ['_output_opts_file', 'get_conf_file_name', 'interface_name']] ) test_pm = mock.Mock() with mock.patch.multiple(dhcp.Dnsmasq, **attrs_to_mock) as mocks: mocks['get_conf_file_name'].side_effect = mock_get_conf_file_name mocks['_output_opts_file'].return_value = ( '/dhcp/%s/opts' % network.id ) mocks['interface_name'].__get__ = mock.Mock(return_value='tap0') dm = self._get_dnsmasq(network, test_pm) dm.spawn_process() self.assertTrue(mocks['_output_opts_file'].called) self.assertTrue(test_pm.register.called) self.external_process().enable.assert_called_once_with( reload_cfg=False) call_kwargs = self.external_process.mock_calls[0][2] cmd_callback = call_kwargs['default_cmd_callback'] result_cmd = cmd_callback(expected_pid_file) self.assertEqual(expected, result_cmd) def test_spawn(self): self._test_spawn(['--conf-file=', '--domain=openstacklocal']) def test_spawn_infinite_lease_duration(self): self.conf.set_override('dhcp_lease_duration', -1) self._test_spawn(['--conf-file=', '--domain=openstacklocal'], FakeDualNetwork(), 16777216, -1) def test_spawn_cfg_config_file(self): self.conf.set_override('dnsmasq_config_file', '/foo') self._test_spawn(['--conf-file=/foo', '--domain=openstacklocal']) def test_spawn_no_dhcp_domain(self): (exp_host_name, exp_host_data, exp_addn_name, exp_addn_data) = self._test_no_dhcp_domain_alloc_data self.conf.set_override('dhcp_domain', '') network = FakeDualNetwork(domain=self.conf.dhcp_domain) self._test_spawn(['--conf-file='], network=network) self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data), mock.call(exp_addn_name, exp_addn_data)]) def test_spawn_no_dhcp_range(self): network = FakeV6Network() subnet = FakeV6SubnetSlaac() network.subnets = [subnet] self._test_spawn(['--conf-file=', '--domain=openstacklocal'], network, has_static=False) def test_spawn_no_dhcp_range_bad_prefix_length(self): network = FakeV6NetworkStatelessDHCPBadPrefixLength() subnet = FakeV6SubnetStatelessBadPrefixLength() network.subnets = [subnet] self._test_spawn(['--conf-file=', '--domain=openstacklocal'], network, has_static=False, has_stateless=False) def test_spawn_cfg_dns_server(self): self.conf.set_override('dnsmasq_dns_servers', ['8.8.8.8']) self._test_spawn(['--conf-file=', '--server=8.8.8.8', '--domain=openstacklocal']) def test_spawn_cfg_multiple_dns_server(self): self.conf.set_override('dnsmasq_dns_servers', ['8.8.8.8', '9.9.9.9']) self._test_spawn(['--conf-file=', '--server=8.8.8.8', '--server=9.9.9.9', '--domain=openstacklocal']) def test_spawn_cfg_enable_dnsmasq_log(self): self.conf.set_override('dnsmasq_base_log_dir', '/tmp') network = FakeV4Network() dhcp_dns_log = \ '/tmp/aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa/dhcp_dns_log' self._test_spawn(['--conf-file=', '--domain=openstacklocal', '--log-queries', '--log-dhcp', ('--log-facility=%s' % dhcp_dns_log)], network) self.makedirs.assert_called_with(os.path.join('/tmp', network.id)) def test_spawn_cfg_with_local_resolv(self): self.conf.set_override('dnsmasq_local_resolv', True) self._test_spawn(['--conf-file=', '--domain=openstacklocal'], no_resolv='') def test_spawn_cfg_with_local_resolv_overriden(self): self.conf.set_override('dnsmasq_local_resolv', True) self.conf.set_override('dnsmasq_dns_servers', ['8.8.8.8']) self._test_spawn(['--conf-file=', '--server=8.8.8.8', '--domain=openstacklocal']) def test_spawn_max_leases_is_smaller_than_cap(self): self._test_spawn( ['--conf-file=', '--domain=openstacklocal'], network=FakeV4Network(), max_leases=256) def test_spawn_cfg_broadcast(self): self.conf.set_override('dhcp_broadcast_reply', True) self._test_spawn(['--conf-file=', '--domain=openstacklocal', '--dhcp-broadcast']) def test_spawn_cfg_advertise_mtu(self): cfg.CONF.set_override('advertise_mtu', True) network = FakeV4Network() network.mtu = 1500 self._test_spawn(['--conf-file=', '--domain=openstacklocal'], network) def test_spawn_cfg_advertise_mtu_plugin_doesnt_pass_mtu_value(self): cfg.CONF.set_override('advertise_mtu', True) network = FakeV4Network() self._test_spawn(['--conf-file=', '--domain=openstacklocal'], network) def _test_output_init_lease_file(self, timestamp): expected = [ '00:00:80:aa:bb:cc 192.168.0.2 * *', '00:00:f3:aa:bb:cc [fdca:3ba5:a17a:4ba3::2] * *', '00:00:0f:aa:bb:cc 192.168.0.3 * *', '00:00:0f:aa:bb:cc [fdca:3ba5:a17a:4ba3::3] * *', '00:00:0f:rr:rr:rr 192.168.0.1 * *\n'] expected = "\n".join(['%s %s' % (timestamp, l) for l in expected]) with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn: conf_fn.return_value = '/foo/leases' dm = self._get_dnsmasq(FakeDualNetwork()) dm._output_init_lease_file() self.safe.assert_called_once_with('/foo/leases', expected) @mock.patch('time.time') def test_output_init_lease_file(self, tmock): self.conf.set_override('dhcp_lease_duration', 500) tmock.return_value = 1000000 # lease duration should be added to current time timestamp = 1000000 + 500 self._test_output_init_lease_file(timestamp) def test_output_init_lease_file_infinite_duration(self): self.conf.set_override('dhcp_lease_duration', -1) # when duration is infinite, lease db timestamp should be 0 timestamp = 0 self._test_output_init_lease_file(timestamp) def _test_output_opts_file(self, expected, network, ipm_retval=None): with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn: conf_fn.return_value = '/foo/opts' dm = self._get_dnsmasq(network) if ipm_retval: with mock.patch.object( dm, '_make_subnet_interface_ip_map') as ipm: ipm.return_value = ipm_retval dm._output_opts_file() self.assertTrue(ipm.called) else: dm._output_opts_file() self.safe.assert_called_once_with('/foo/opts', expected) def test_output_opts_file(self): fake_v6 = '2001:0200:feed:7ac0::1' expected = ( 'tag:tag0,option:dns-server,8.8.8.8\n' 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,249,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1\n' 'tag:tag1,option6:dns-server,%s\n' 'tag:tag1,option6:domain-search,openstacklocal').lstrip() % ( '[' + fake_v6 + ']') self._test_output_opts_file(expected, FakeDualNetwork()) def test_output_opts_file_gateway_route(self): fake_v6 = '2001:0200:feed:7ac0::1' expected = ('tag:tag0,option:dns-server,8.8.8.8\n' 'tag:tag0,option:classless-static-route,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,' '192.168.0.1\ntag:tag0,249,169.254.169.254/32,' '192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1\n' 'tag:tag1,option6:dns-server,%s\n' 'tag:tag1,option6:domain-search,' 'openstacklocal').lstrip() % ('[' + fake_v6 + ']') self._test_output_opts_file(expected, FakeDualNetworkGatewayRoute()) def test_output_opts_file_multiple_agents_without_dns_provided(self): expected = ('tag:tag0,option:classless-static-route,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,249,169.254.169.254/32,192.168.0.1,0.0.0.0/0,' '192.168.0.1\ntag:tag0,option:router,192.168.0.1\n' 'tag:tag0,option:dns-server,192.168.0.5,' '192.168.0.6').lstrip() self._test_output_opts_file(expected, FakeV4MultipleAgentsWithoutDnsProvided()) def test_output_opts_file_agent_dns_provided(self): expected = ('tag:tag0,option:classless-static-route,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,249,169.254.169.254/32,192.168.0.1,0.0.0.0/0,' '192.168.0.1\ntag:tag0,option:router,192.168.0.1' ).lstrip() self._test_output_opts_file(expected, FakeV4AgentWithoutDnsProvided()) def test_output_opts_file_agent_with_many_dns_provided(self): expected = ('tag:tag0,' 'option:dns-server,2.2.2.2,9.9.9.9,1.1.1.1,3.3.3.3\n' 'tag:tag0,option:classless-static-route,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,249,169.254.169.254/32,192.168.0.1,0.0.0.0/0,' '192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1').lstrip() self._test_output_opts_file(expected, FakeV4AgentWithManyDnsProvided()) def test_output_opts_file_multiple_agents_with_dns_provided(self): expected = ('tag:tag0,option:dns-server,8.8.8.8\n' 'tag:tag0,option:classless-static-route,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,249,169.254.169.254/32,192.168.0.1,0.0.0.0/0,' '192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1').lstrip() self._test_output_opts_file(expected, FakeV4MultipleAgentsWithDnsProvided()) def test_output_opts_file_single_dhcp(self): expected = ( 'tag:tag0,option:dns-server,8.8.8.8\n' 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,' '192.168.1.0/24,0.0.0.0,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,249,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,192.168.1.0/24,0.0.0.0,' '0.0.0.0/0,192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1').lstrip() self._test_output_opts_file(expected, FakeDualNetworkSingleDHCP()) def test_output_opts_file_dual_dhcp_rfc3442(self): expected = ( 'tag:tag0,option:dns-server,8.8.8.8\n' 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,' '192.168.1.0/24,0.0.0.0,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,249,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,192.168.1.0/24,0.0.0.0,' '0.0.0.0/0,192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1\n' 'tag:tag1,option:dns-server,8.8.8.8\n' 'tag:tag1,option:classless-static-route,' '169.254.169.254/32,192.168.1.1,' '192.168.0.0/24,0.0.0.0,0.0.0.0/0,192.168.1.1\n' 'tag:tag1,249,169.254.169.254/32,192.168.1.1,' '192.168.0.0/24,0.0.0.0,0.0.0.0/0,192.168.1.1\n' 'tag:tag1,option:router,192.168.1.1').lstrip() self._test_output_opts_file(expected, FakeDualNetworkDualDHCP()) def test_output_opts_file_no_gateway(self): expected = ( 'tag:tag0,option:classless-static-route,' '169.254.169.254/32,192.168.1.1\n' 'tag:tag0,249,169.254.169.254/32,192.168.1.1\n' 'tag:tag0,option:router').lstrip() ipm_retval = {FakeV4SubnetNoGateway().id: '192.168.1.1'} self._test_output_opts_file(expected, FakeV4NoGatewayNetwork(), ipm_retval=ipm_retval) def test_output_opts_file_no_neutron_router_on_subnet(self): expected = ( 'tag:tag0,option:classless-static-route,' '169.254.169.254/32,192.168.1.2,0.0.0.0/0,192.168.1.1\n' 'tag:tag0,249,169.254.169.254/32,192.168.1.2,' '0.0.0.0/0,192.168.1.1\n' 'tag:tag0,option:router,192.168.1.1').lstrip() ipm_retval = {FakeV4SubnetNoRouter().id: '192.168.1.2'} self._test_output_opts_file(expected, FakeV4NetworkNoRouter(), ipm_retval=ipm_retval) def test_output_opts_file_dist_neutron_router_on_subnet(self): expected = ( 'tag:tag0,option:dns-server,8.8.8.8\n' 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,249,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1').lstrip() ipm_retval = {FakeV4Subnet().id: '192.168.0.1'} self._test_output_opts_file(expected, FakeV4NetworkDistRouter(), ipm_retval=ipm_retval) def test_output_opts_file_pxe_2port_1net(self): expected = ( 'tag:tag0,option:dns-server,8.8.8.8\n' 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,249,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1\n' 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:tftp-server,192.168.0.3\n' 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:server-ip-address,192.168.0.2\n' 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:bootfile-name,pxelinux.0\n' 'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option:tftp-server,192.168.0.3\n' 'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option:server-ip-address,192.168.0.2\n' 'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option:bootfile-name,pxelinux.0').lstrip() self._test_output_opts_file(expected, FakeV4NetworkPxe2Ports()) def test_output_opts_file_pxe_2port_1net_diff_details(self): expected = ( 'tag:tag0,option:dns-server,8.8.8.8\n' 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,249,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1\n' 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:tftp-server,192.168.0.3\n' 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:server-ip-address,192.168.0.2\n' 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:bootfile-name,pxelinux.0\n' 'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option:tftp-server,192.168.0.5\n' 'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option:server-ip-address,192.168.0.5\n' 'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option:bootfile-name,pxelinux.0').lstrip() self._test_output_opts_file(expected, FakeV4NetworkPxe2Ports("portsDiff")) def test_output_opts_file_pxe_3port_2net(self): expected = ( 'tag:tag0,option:dns-server,8.8.8.8\n' 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,' '192.168.1.0/24,0.0.0.0,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,249,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,192.168.1.0/24,0.0.0.0,' '0.0.0.0/0,192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1\n' 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:tftp-server,192.168.0.3\n' 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:server-ip-address,192.168.0.2\n' 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:bootfile-name,pxelinux.0\n' 'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option:tftp-server,192.168.1.3\n' 'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option:server-ip-address,192.168.1.2\n' 'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option:bootfile-name,pxelinux2.0\n' 'tag:44444444-4444-4444-4444-444444444444,' 'option:tftp-server,192.168.1.3\n' 'tag:44444444-4444-4444-4444-444444444444,' 'option:server-ip-address,192.168.1.2\n' 'tag:44444444-4444-4444-4444-444444444444,' 'option:bootfile-name,pxelinux3.0').lstrip() self._test_output_opts_file(expected, FakeDualV4Pxe3Ports()) def test_output_opts_file_multiple_tags(self): expected = ( 'tag:tag0,option:dns-server,8.8.8.8\n' 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,249,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1\n' 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'tag:ipxe,option:bootfile-name,pxelinux.0') expected = expected.lstrip() with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn: conf_fn.return_value = '/foo/opts' dm = self._get_dnsmasq(FakeV4NetworkMultipleTags()) dm._output_opts_file() self.safe.assert_called_once_with('/foo/opts', expected) @mock.patch('neutron.agent.linux.dhcp.Dnsmasq.get_conf_file_name', return_value='/foo/opts') def test_output_opts_file_pxe_ipv6_port_with_ipv6_opt(self, mock_get_conf_fn): expected = ( 'tag:tag0,option6:dns-server,[2001:0200:feed:7ac0::1]\n' 'tag:tag0,option6:domain-search,openstacklocal\n' 'tag:hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh,' 'option6:tftp-server,2001:192:168::1\n' 'tag:hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh,' 'option6:bootfile-name,pxelinux.0') expected = expected.lstrip() dm = self._get_dnsmasq(FakeV6NetworkPxePort()) dm._output_opts_file() self.safe.assert_called_once_with('/foo/opts', expected) @mock.patch('neutron.agent.linux.dhcp.Dnsmasq.get_conf_file_name', return_value='/foo/opts') def test_output_opts_file_pxe_ipv6_port_with_ipv4_opt(self, mock_get_conf_fn): expected = ( 'tag:tag0,option6:dns-server,[2001:0200:feed:7ac0::1]\n' 'tag:tag0,option6:domain-search,openstacklocal\n' 'tag:hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh,' 'option6:bootfile-name,pxelinux.0') expected = expected.lstrip() dm = self._get_dnsmasq(FakeV6NetworkPxePortWrongOptVersion()) dm._output_opts_file() self.safe.assert_called_once_with('/foo/opts', expected) def test_output_opts_file_ipv6_address_mode_unset(self): fake_v6 = '2001:0200:feed:7ac0::1' expected = ( 'tag:tag0,option6:dns-server,%s\n' 'tag:tag0,option6:domain-search,openstacklocal').lstrip() % ( '[' + fake_v6 + ']') self._test_output_opts_file(expected, FakeV6Network()) @property def _test_no_dhcp_domain_alloc_data(self): exp_host_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/host' exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2,' '192.168.0.2\n' '00:00:f3:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--2,' '[fdca:3ba5:a17a:4ba3::2]\n' '00:00:0f:aa:bb:cc,host-192-168-0-3,' '192.168.0.3\n' '00:00:0f:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--3,' '[fdca:3ba5:a17a:4ba3::3]\n' '00:00:0f:rr:rr:rr,host-192-168-0-1,' '192.168.0.1\n').lstrip() exp_addn_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/addn_hosts' exp_addn_data = ( '192.168.0.2\t' 'host-192-168-0-2 host-192-168-0-2\n' 'fdca:3ba5:a17a:4ba3::2\t' 'host-fdca-3ba5-a17a-4ba3--2 ' 'host-fdca-3ba5-a17a-4ba3--2\n' '192.168.0.3\thost-192-168-0-3 ' 'host-192-168-0-3\n' 'fdca:3ba5:a17a:4ba3::3\t' 'host-fdca-3ba5-a17a-4ba3--3 ' 'host-fdca-3ba5-a17a-4ba3--3\n' '192.168.0.1\t' 'host-192-168-0-1 ' 'host-192-168-0-1\n' ).lstrip() return (exp_host_name, exp_host_data, exp_addn_name, exp_addn_data) @property def _test_reload_allocation_data(self): exp_host_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/host' exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal.,' '192.168.0.2\n' '00:00:f3:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--2.' 'openstacklocal.,[fdca:3ba5:a17a:4ba3::2]\n' '00:00:0f:aa:bb:cc,host-192-168-0-3.openstacklocal.,' '192.168.0.3\n' '00:00:0f:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--3.' 'openstacklocal.,[fdca:3ba5:a17a:4ba3::3]\n' '00:00:0f:rr:rr:rr,host-192-168-0-1.openstacklocal.,' '192.168.0.1\n').lstrip() exp_addn_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/addn_hosts' exp_addn_data = ( '192.168.0.2\t' 'host-192-168-0-2.openstacklocal. host-192-168-0-2\n' 'fdca:3ba5:a17a:4ba3::2\t' 'host-fdca-3ba5-a17a-4ba3--2.openstacklocal. ' 'host-fdca-3ba5-a17a-4ba3--2\n' '192.168.0.3\thost-192-168-0-3.openstacklocal. ' 'host-192-168-0-3\n' 'fdca:3ba5:a17a:4ba3::3\t' 'host-fdca-3ba5-a17a-4ba3--3.openstacklocal. ' 'host-fdca-3ba5-a17a-4ba3--3\n' '192.168.0.1\t' 'host-192-168-0-1.openstacklocal. ' 'host-192-168-0-1\n' ).lstrip() exp_opt_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/opts' fake_v6 = '2001:0200:feed:7ac0::1' exp_opt_data = ( 'tag:tag0,option:dns-server,8.8.8.8\n' 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,249,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1\n' 'tag:tag1,option6:dns-server,%s\n' 'tag:tag1,option6:domain-search,openstacklocal').lstrip() % ( '[' + fake_v6 + ']') return (exp_host_name, exp_host_data, exp_addn_name, exp_addn_data, exp_opt_name, exp_opt_data,) def test_reload_allocations(self): (exp_host_name, exp_host_data, exp_addn_name, exp_addn_data, exp_opt_name, exp_opt_data,) = self._test_reload_allocation_data net = FakeDualNetwork() hpath = '/dhcp/%s/host' % net.id ipath = '/dhcp/%s/interface' % net.id self.useFixture(tools.OpenFixture(hpath)) self.useFixture(tools.OpenFixture(ipath)) test_pm = mock.Mock() dm = self._get_dnsmasq(net, test_pm) dm.reload_allocations() self.assertTrue(test_pm.register.called) self.external_process().enable.assert_called_once_with( reload_cfg=True) self.safe.assert_has_calls([ mock.call(exp_host_name, exp_host_data), mock.call(exp_addn_name, exp_addn_data), mock.call(exp_opt_name, exp_opt_data), ]) def test_release_unused_leases(self): dnsmasq = self._get_dnsmasq(FakeDualNetwork()) ip1 = '192.168.1.2' mac1 = '00:00:80:aa:bb:cc' ip2 = '192.168.1.3' mac2 = '00:00:80:cc:bb:aa' old_leases = set([(ip1, mac1, None), (ip2, mac2, None)]) dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases) dnsmasq._output_hosts_file = mock.Mock() dnsmasq._release_lease = mock.Mock() dnsmasq.network.ports = [] dnsmasq.device_manager.driver.unplug = mock.Mock() dnsmasq._release_unused_leases() dnsmasq._release_lease.assert_has_calls([mock.call(mac1, ip1, None), mock.call(mac2, ip2, None)], any_order=True) dnsmasq.device_manager.driver.unplug.assert_has_calls( [mock.call(dnsmasq.interface_name, namespace=dnsmasq.network.namespace)]) def test_release_for_ipv6_lease(self): dnsmasq = self._get_dnsmasq(FakeDualNetwork()) ip1 = 'fdca:3ba5:a17a::1' mac1 = '00:00:80:aa:bb:cc' ip2 = '192.168.1.3' mac2 = '00:00:80:cc:bb:aa' old_leases = set([(ip1, mac1, None), (ip2, mac2, None)]) dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases) ipw = mock.patch( 'neutron.agent.linux.ip_lib.IpNetnsCommand.execute').start() dnsmasq._release_unused_leases() # Verify that dhcp_release is called only for ipv4 addresses. self.assertEqual(1, ipw.call_count) ipw.assert_has_calls([mock.call(['dhcp_release', None, ip2, mac2], run_as_root=True)]) def test_release_unused_leases_with_dhcp_port(self): dnsmasq = self._get_dnsmasq(FakeNetworkDhcpPort()) ip1 = '192.168.1.2' mac1 = '00:00:80:aa:bb:cc' ip2 = '192.168.1.3' mac2 = '00:00:80:cc:bb:aa' old_leases = set([(ip1, mac1, None), (ip2, mac2, None)]) dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases) dnsmasq._output_hosts_file = mock.Mock() dnsmasq._release_lease = mock.Mock() dnsmasq.device_manager.get_device_id = mock.Mock( return_value='fake_dhcp_port') dnsmasq._release_unused_leases() self.assertFalse( dnsmasq.device_manager.driver.unplug.called) def test_release_unused_leases_with_client_id(self): dnsmasq = self._get_dnsmasq(FakeDualNetwork()) ip1 = '192.168.1.2' mac1 = '00:00:80:aa:bb:cc' client_id1 = 'client1' ip2 = '192.168.1.3' mac2 = '00:00:80:cc:bb:aa' client_id2 = 'client2' old_leases = set([(ip1, mac1, client_id1), (ip2, mac2, client_id2)]) dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases) dnsmasq._output_hosts_file = mock.Mock() dnsmasq._release_lease = mock.Mock() dnsmasq.network.ports = [] dnsmasq._release_unused_leases() dnsmasq._release_lease.assert_has_calls( [mock.call(mac1, ip1, client_id1), mock.call(mac2, ip2, client_id2)], any_order=True) def test_release_unused_leases_one_lease(self): dnsmasq = self._get_dnsmasq(FakeDualNetwork()) ip1 = '192.168.0.2' mac1 = '00:00:80:aa:bb:cc' ip2 = '192.168.0.3' mac2 = '00:00:80:cc:bb:aa' old_leases = set([(ip1, mac1, None), (ip2, mac2, None)]) dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases) dnsmasq._output_hosts_file = mock.Mock() dnsmasq._release_lease = mock.Mock() dnsmasq.network.ports = [FakePort1()] dnsmasq._release_unused_leases() dnsmasq._release_lease.assert_called_once_with( mac2, ip2, None) def test_release_unused_leases_one_lease_with_client_id(self): dnsmasq = self._get_dnsmasq(FakeDualNetwork()) ip1 = '192.168.0.2' mac1 = '00:00:80:aa:bb:cc' client_id1 = 'client1' ip2 = '192.168.0.5' mac2 = '00:00:0f:aa:bb:55' client_id2 = 'test5' old_leases = set([(ip1, mac1, client_id1), (ip2, mac2, client_id2)]) dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases) dnsmasq._output_hosts_file = mock.Mock() dnsmasq._release_lease = mock.Mock() dnsmasq.network.ports = [FakePort5()] dnsmasq._release_unused_leases() dnsmasq._release_lease.assert_called_once_with( mac1, ip1, client_id1) def test_read_hosts_file_leases(self): filename = '/path/to/file' lines = ["00:00:80:aa:bb:cc,inst-name,192.168.0.1", "00:00:80:aa:bb:cc,inst-name,[fdca:3ba5:a17a::1]"] mock_open = self.useFixture( tools.OpenFixture(filename, '\n'.join(lines))).mock_open dnsmasq = self._get_dnsmasq(FakeDualNetwork()) leases = dnsmasq._read_hosts_file_leases(filename) self.assertEqual(set([("192.168.0.1", "00:00:80:aa:bb:cc", None), ("fdca:3ba5:a17a::1", "00:00:80:aa:bb:cc", None)]), leases) mock_open.assert_called_once_with(filename) def test_read_hosts_file_leases_with_client_id(self): filename = '/path/to/file' lines = ["00:00:80:aa:bb:cc,id:client1,inst-name,192.168.0.1", "00:00:80:aa:bb:cc,id:client2,inst-name," "[fdca:3ba5:a17a::1]"] mock_open = self.useFixture( tools.OpenFixture(filename, '\n'.join(lines))).mock_open dnsmasq = self._get_dnsmasq(FakeDualNetwork()) leases = dnsmasq._read_hosts_file_leases(filename) self.assertEqual(set([("192.168.0.1", "00:00:80:aa:bb:cc", 'client1'), ("fdca:3ba5:a17a::1", "00:00:80:aa:bb:cc", 'client2')]), leases) mock_open.assert_called_once_with(filename) def test_read_hosts_file_leases_with_stateless_IPv6_tag(self): filename = self.get_temp_file_path('leases') with open(filename, "w") as leasesfile: lines = [ "00:00:80:aa:bb:cc,id:client1,inst-name,192.168.0.1\n", "00:00:80:aa:bb:cc,set:ccccccccc-cccc-cccc-cccc-cccccccc\n", "00:00:80:aa:bb:cc,id:client2,inst-name,[fdca:3ba5:a17a::1]\n"] for line in lines: leasesfile.write(line) dnsmasq = self._get_dnsmasq(FakeDualNetwork()) leases = dnsmasq._read_hosts_file_leases(filename) self.assertEqual(set([("192.168.0.1", "00:00:80:aa:bb:cc", 'client1'), ("fdca:3ba5:a17a::1", "00:00:80:aa:bb:cc", 'client2')]), leases) def test_make_subnet_interface_ip_map(self): with mock.patch('neutron.agent.linux.ip_lib.IPDevice') as ip_dev: ip_dev.return_value.addr.list.return_value = [ {'cidr': '192.168.0.1/24'} ] dm = self._get_dnsmasq(FakeDualNetwork()) self.assertEqual( dm._make_subnet_interface_ip_map(), {FakeV4Subnet().id: '192.168.0.1'} ) def test_remove_config_files(self): net = FakeV4Network() path = '/opt/data/neutron/dhcp' self.conf.dhcp_confs = path lp = LocalChild(self.conf, net) lp._remove_config_files() self.rmtree.assert_called_once_with(os.path.join(path, net.id), ignore_errors=True) def test_existing_dhcp_networks(self): path = '/opt/data/neutron/dhcp' self.conf.dhcp_confs = path cases = { # network_uuid --> is_dhcp_alive? 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa': True, 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb': False, 'not_uuid_like_name': True } def active_fake(self, instance, cls): return cases[instance.network.id] with mock.patch('os.listdir') as mock_listdir: with mock.patch.object(dhcp.Dnsmasq, 'active') as mock_active: mock_active.__get__ = active_fake mock_listdir.return_value = cases.keys() result = dhcp.Dnsmasq.existing_dhcp_networks(self.conf) mock_listdir.assert_called_once_with(path) self.assertEqual(['aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'], sorted(result)) def test__output_hosts_file_log_only_twice(self): dm = self._get_dnsmasq(FakeDualStackNetworkSingleDHCP()) with mock.patch.object(dhcp, 'LOG') as logger: logger.process.return_value = ('fake_message', {}) dm._output_hosts_file() # The method logs twice, at the start of and the end. There should be # no other logs, no matter how many hosts there are to dump in the # file. self.assertEqual(2, len(logger.method_calls)) def test_only_populates_dhcp_enabled_subnets(self): exp_host_name = '/dhcp/eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee/host' exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal.,' '192.168.0.2\n' '00:16:3E:C2:77:1D,host-192-168-0-4.openstacklocal.,' '192.168.0.4\n' '00:00:0f:rr:rr:rr,host-192-168-0-1.openstacklocal.,' '192.168.0.1\n').lstrip() dm = self._get_dnsmasq(FakeDualStackNetworkSingleDHCP()) dm._output_hosts_file() self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data)]) def test_only_populates_dhcp_client_id(self): exp_host_name = '/dhcp/aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa/host' exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal.,' '192.168.0.2\n' '00:00:0f:aa:bb:55,id:test5,' 'host-192-168-0-5.openstacklocal.,' '192.168.0.5\n' '00:00:0f:aa:bb:66,id:test6,' 'host-192-168-0-6.openstacklocal.,192.168.0.6,' 'set:ccccccccc-cccc-cccc-cccc-ccccccccc\n').lstrip() dm = self._get_dnsmasq(FakeV4NetworkClientId()) dm._output_hosts_file() self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data)]) def test_only_populates_dhcp_enabled_subnet_on_a_network(self): exp_host_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/host' exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal.,' '192.168.0.2\n' '00:00:f3:aa:bb:cc,host-192-168-0-3.openstacklocal.,' '192.168.0.3\n' '00:00:0f:aa:bb:cc,host-192-168-0-4.openstacklocal.,' '192.168.0.4\n' '00:00:0f:rr:rr:rr,host-192-168-0-1.openstacklocal.,' '192.168.0.1\n').lstrip() dm = self._get_dnsmasq(FakeDualNetworkSingleDHCP()) dm._output_hosts_file() self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data)]) def test_host_and_opts_file_on_stateless_dhcpv6_network(self): exp_host_name = '/dhcp/bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb/host' exp_host_data = ('00:16:3e:c2:77:1d,' 'set:hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh\n').lstrip() exp_opt_name = '/dhcp/bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb/opts' exp_opt_data = ('tag:tag0,option6:domain-search,openstacklocal\n' 'tag:hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh,' 'option6:dns-server,ffea:3ba5:a17a:4ba3::100').lstrip() dm = self._get_dnsmasq(FakeV6NetworkStatelessDHCP()) dm._output_hosts_file() dm._output_opts_file() self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data), mock.call(exp_opt_name, exp_opt_data)]) def test_host_file_on_net_with_v6_slaac_and_v4(self): exp_host_name = '/dhcp/eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee/host' exp_host_data = ( '00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal.,192.168.0.2,' 'set:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee\n' '00:16:3E:C2:77:1D,host-192-168-0-4.openstacklocal.,192.168.0.4,' 'set:gggggggg-gggg-gggg-gggg-gggggggggggg\n00:00:0f:rr:rr:rr,' 'host-192-168-0-1.openstacklocal.,192.168.0.1,' 'set:rrrrrrrr-rrrr-rrrr-rrrr-rrrrrrrrrrrr\n').lstrip() dm = self._get_dnsmasq(FakeDualStackNetworkingSingleDHCPTags()) dm._output_hosts_file() self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data)]) def test_host_and_opts_file_on_net_with_V6_stateless_and_V4_subnets( self): exp_host_name = '/dhcp/bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb/host' exp_host_data = ( '00:16:3e:c2:77:1d,set:hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh\n' '00:16:3e:c2:77:1d,host-192-168-0-3.openstacklocal.,' '192.168.0.3,set:hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh\n' '00:00:0f:rr:rr:rr,' 'host-192-168-0-1.openstacklocal.,192.168.0.1\n').lstrip() exp_opt_name = '/dhcp/bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb/opts' exp_opt_data = ( 'tag:tag0,option6:domain-search,openstacklocal\n' 'tag:tag1,option:dns-server,8.8.8.8\n' 'tag:tag1,option:classless-static-route,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag1,249,20.0.0.1/24,20.0.0.1,169.254.169.254/32,' '192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag1,option:router,192.168.0.1\n' 'tag:hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh,' 'option6:dns-server,ffea:3ba5:a17a:4ba3::100').lstrip() dm = self._get_dnsmasq(FakeNetworkWithV6SatelessAndV4DHCPSubnets()) dm._output_hosts_file() dm._output_opts_file() self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data), mock.call(exp_opt_name, exp_opt_data)]) def test_should_enable_metadata_isolated_network_returns_true(self): self.assertTrue(dhcp.Dnsmasq.should_enable_metadata( self.conf, FakeV4NetworkNoRouter())) def test_should_enable_metadata_non_isolated_network_returns_false(self): self.assertFalse(dhcp.Dnsmasq.should_enable_metadata( self.conf, FakeV4NetworkDistRouter())) def test_should_enable_metadata_isolated_meta_disabled_returns_false(self): self.conf.set_override('enable_isolated_metadata', False) self.assertFalse(dhcp.Dnsmasq.should_enable_metadata(self.conf, mock.ANY)) def test_should_enable_metadata_with_metadata_network_returns_true(self): self.conf.set_override('enable_metadata_network', True) self.assertTrue(dhcp.Dnsmasq.should_enable_metadata( self.conf, FakeV4MetadataNetwork())) def test_should_force_metadata_returns_true(self): self.conf.set_override("force_metadata", True) self.assertTrue(dhcp.Dnsmasq.should_enable_metadata( self.conf, FakeDualNetworkDualDHCP())) def _test__generate_opts_per_subnet_helper(self, config_opts, expected_mdt_ip): for key, value in config_opts.items(): self.conf.set_override(key, value) dm = self._get_dnsmasq(FakeNetworkDhcpPort()) with mock.patch('neutron.agent.linux.ip_lib.IPDevice') as ipdev_mock: list_addr = ipdev_mock.return_value.addr.list list_addr.return_value = [{'cidr': alloc.ip_address + '/24'} for alloc in FakeDhcpPort().fixed_ips] options, idx_map = dm._generate_opts_per_subnet() contains_metadata_ip = any(['%s/32' % dhcp.METADATA_DEFAULT_IP in line for line in options]) self.assertEqual(expected_mdt_ip, contains_metadata_ip) def test__generate_opts_per_subnet_no_metadata(self): config = {'enable_isolated_metadata': False, 'force_metadata': False} self._test__generate_opts_per_subnet_helper(config, False) def test__generate_opts_per_subnet_isolated_metadata_with_router(self): config = {'enable_isolated_metadata': True, 'force_metadata': False} self._test__generate_opts_per_subnet_helper(config, True) def test__generate_opts_per_subnet_forced_metadata(self): config = {'enable_isolated_metadata': False, 'force_metadata': True} self._test__generate_opts_per_subnet_helper(config, True) class TestDeviceManager(TestConfBase): def setUp(self): super(TestDeviceManager, self).setUp() ip_lib_patcher = mock.patch('neutron.agent.linux.dhcp.ip_lib') load_interface_driver_patcher = mock.patch( 'neutron.agent.linux.dhcp.agent_common_utils.' 'load_interface_driver') self.mock_ip_lib = ip_lib_patcher.start() self.mock_load_interface_driver = load_interface_driver_patcher.start() def _test_setup(self, load_interface_driver, ip_lib, use_gateway_ips): with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: # Create DeviceManager. self.conf.register_opt(cfg.BoolOpt('enable_isolated_metadata', default=False)) self.conf.register_opt(cfg.BoolOpt('force_metadata', default=False)) plugin = mock.Mock() device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = None mgr = dhcp.DeviceManager(self.conf, plugin) load_interface_driver.assert_called_with(self.conf) # Setup with no existing DHCP port - expect a new DHCP port to # be created. network = FakeDeviceManagerNetwork() network.tenant_id = 'Tenant A' def mock_create(dict): port = dhcp.DictModel(dict['port']) port.id = 'abcd-123456789' port.mac_address = '00-12-34-56-78-90' port.fixed_ips = [ dhcp.DictModel({'subnet_id': ip['subnet_id'], 'ip_address': 'unique-IP-address'}) for ip in port.fixed_ips ] return port plugin.create_dhcp_port.side_effect = mock_create mgr.driver.get_device_name.return_value = 'ns-XXX' mgr.driver.use_gateway_ips = use_gateway_ips ip_lib.ensure_device_is_ready.return_value = True mgr.setup(network) plugin.create_dhcp_port.assert_called_with(mock.ANY) mgr.driver.init_l3.assert_called_with('ns-XXX', mock.ANY, namespace='qdhcp-ns') cidrs = set(mgr.driver.init_l3.call_args[0][1]) if use_gateway_ips: self.assertEqual(cidrs, set(['%s/%s' % (s.gateway_ip, s.cidr.split('/')[1]) for s in network.subnets])) else: self.assertEqual(cidrs, set(['unique-IP-address/24', 'unique-IP-address/64'])) # Now call setup again. This time we go through the existing # port code path, and the driver's init_l3 method is called # again. plugin.create_dhcp_port.reset_mock() mgr.driver.init_l3.reset_mock() mgr.setup(network) mgr.driver.init_l3.assert_called_with('ns-XXX', mock.ANY, namespace='qdhcp-ns') cidrs = set(mgr.driver.init_l3.call_args[0][1]) if use_gateway_ips: self.assertEqual(cidrs, set(['%s/%s' % (s.gateway_ip, s.cidr.split('/')[1]) for s in network.subnets])) else: self.assertEqual(cidrs, set(['unique-IP-address/24', 'unique-IP-address/64'])) self.assertFalse(plugin.create_dhcp_port.called) def test_setup_device_manager_dhcp_port_without_gateway_ips(self): self._test_setup(self.mock_load_interface_driver, self.mock_ip_lib, use_gateway_ips=False) def test_setup_device_manager_dhcp_port_with_gateway_ips(self): self._test_setup(self.mock_load_interface_driver, self.mock_ip_lib, use_gateway_ips=True) def _test_setup_reserved(self, enable_isolated_metadata=False, force_metadata=False): with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: # Create DeviceManager. self.conf.register_opt( cfg.BoolOpt('enable_isolated_metadata', default=enable_isolated_metadata)) self.conf.register_opt( cfg.BoolOpt('force_metadata', default=force_metadata)) plugin = mock.Mock() device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = None mgr = dhcp.DeviceManager(self.conf, plugin) self.mock_load_interface_driver.assert_called_with(self.conf) # Setup with a reserved DHCP port. network = FakeDualNetworkReserved() network.tenant_id = 'Tenant A' reserved_port = network.ports[-1] def mock_update(port_id, dict): port = reserved_port port.network_id = dict['port']['network_id'] port.device_id = dict['port']['device_id'] return port plugin.update_dhcp_port.side_effect = mock_update mgr.driver.get_device_name.return_value = 'ns-XXX' mgr.driver.use_gateway_ips = False self.mock_ip_lib.ensure_device_is_ready.return_value = True mgr.setup(network) plugin.update_dhcp_port.assert_called_with(reserved_port.id, mock.ANY) except_ips = ['192.168.0.6/24'] if enable_isolated_metadata or force_metadata: except_ips.append(dhcp.METADATA_DEFAULT_CIDR) mgr.driver.init_l3.assert_called_with('ns-XXX', except_ips, namespace='qdhcp-ns') def test_setup_reserved_and_disable_metadata(self): """Test reserved port case of DeviceManager's DHCP port setup logic which metadata disabled. """ self._test_setup_reserved() def test_setup_reserved_with_isolated_metadata_enable(self): """Test reserved port case of DeviceManager's DHCP port setup logic which isolated_ metadata enabled. """ self._test_setup_reserved(enable_isolated_metadata=True) def test_setup_reserved_with_force_metadata_enable(self): """Test reserved port case of DeviceManager's DHCP port setup logic which force_metadata enabled. """ self._test_setup_reserved(force_metadata=True) def test_setup_reserved_and_enable_metadata(self): """Test reserved port case of DeviceManager's DHCP port setup logic which both isolated_metadata and force_metadata enabled. """ self._test_setup_reserved(enable_isolated_metadata=True, force_metadata=True) def test_setup_reserved_2(self): """Test scenario where a network has two reserved ports, and update_dhcp_port fails for the first of those. """ with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: # Create DeviceManager. self.conf.register_opt( cfg.BoolOpt('enable_isolated_metadata', default=False)) self.conf.register_opt( cfg.BoolOpt('force_metadata', default=False)) plugin = mock.Mock() device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = None mgr = dhcp.DeviceManager(self.conf, plugin) self.mock_load_interface_driver.assert_called_with(self.conf) # Setup with a reserved DHCP port. network = FakeDualNetworkReserved2() network.tenant_id = 'Tenant A' reserved_port_1 = network.ports[-2] reserved_port_2 = network.ports[-1] def mock_update(port_id, dict): if port_id == reserved_port_1.id: return None port = reserved_port_2 port.network_id = dict['port']['network_id'] port.device_id = dict['port']['device_id'] return port plugin.update_dhcp_port.side_effect = mock_update mgr.driver.get_device_name.return_value = 'ns-XXX' mgr.driver.use_gateway_ips = False self.mock_ip_lib.ensure_device_is_ready.return_value = True mgr.setup(network) plugin.update_dhcp_port.assert_called_with(reserved_port_2.id, mock.ANY) mgr.driver.init_l3.assert_called_with('ns-XXX', ['192.168.0.6/24'], namespace='qdhcp-ns') class TestDictModel(base.BaseTestCase): def test_string_representation_port(self): port = dhcp.DictModel({'id': 'id', 'network_id': 'net_id'}) self.assertEqual('id=id, network_id=net_id', str(port)) def test_string_representation_network(self): net = dhcp.DictModel({'id': 'id', 'name': 'myname'}) self.assertEqual('id=id, name=myname', str(net)) neutron-8.4.0/neutron/tests/unit/agent/linux/test_iptables_manager.py0000664000567000056710000013257413044372760027356 0ustar jenkinsjenkins00000000000000# Copyright 2012 Locaweb. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import mock from oslo_config import cfg import testtools from neutron._i18n import _ from neutron.agent.linux import iptables_comments as ic from neutron.agent.linux import iptables_manager from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.tests import base from neutron.tests import tools IPTABLES_ARG = {'bn': iptables_manager.binary_name, 'snat_out_comment': ic.SNAT_OUT, 'filter_rules': '', 'mark': constants.ROUTER_MARK_MASK} NAT_TEMPLATE = ('# Generated by iptables_manager\n' '*nat\n' ':OUTPUT - [0:0]\n' ':POSTROUTING - [0:0]\n' ':PREROUTING - [0:0]\n' ':neutron-postrouting-bottom - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-POSTROUTING - [0:0]\n' ':%(bn)s-PREROUTING - [0:0]\n' ':%(bn)s-float-snat - [0:0]\n' ':%(bn)s-snat - [0:0]\n' '-I OUTPUT 1 -j %(bn)s-OUTPUT\n' '-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n' '-I POSTROUTING 2 -j neutron-postrouting-bottom\n' '-I PREROUTING 1 -j %(bn)s-PREROUTING\n' '-I neutron-postrouting-bottom 1 -j %(bn)s-snat\n' '-I %(bn)s-snat 1 -j ' '%(bn)s-float-snat\n' 'COMMIT\n' '# Completed by iptables_manager\n') NAT_DUMP = NAT_TEMPLATE % IPTABLES_ARG FILTER_TEMPLATE = ('# Generated by iptables_manager\n' '*filter\n' ':FORWARD - [0:0]\n' ':INPUT - [0:0]\n' ':OUTPUT - [0:0]\n' ':neutron-filter-top - [0:0]\n' ':%(bn)s-FORWARD - [0:0]\n' ':%(bn)s-INPUT - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-local - [0:0]\n' ':%(bn)s-scope - [0:0]\n' '-I FORWARD 1 -j neutron-filter-top\n' '-I FORWARD 2 -j %(bn)s-FORWARD\n' '-I INPUT 1 -j %(bn)s-INPUT\n' '-I OUTPUT 1 -j neutron-filter-top\n' '-I OUTPUT 2 -j %(bn)s-OUTPUT\n' '-I neutron-filter-top 1 -j %(bn)s-local\n' '-I %(bn)s-FORWARD 1 -j %(bn)s-scope\n' 'COMMIT\n' '# Completed by iptables_manager\n') FILTER_DUMP = FILTER_TEMPLATE % IPTABLES_ARG FILTER_WITH_RULES_TEMPLATE = ( '# Generated by iptables_manager\n' '*filter\n' ':FORWARD - [0:0]\n' ':INPUT - [0:0]\n' ':OUTPUT - [0:0]\n' ':neutron-filter-top - [0:0]\n' ':%(bn)s-FORWARD - [0:0]\n' ':%(bn)s-INPUT - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-filter - [0:0]\n' ':%(bn)s-local - [0:0]\n' ':%(bn)s-scope - [0:0]\n' '-I FORWARD 1 -j neutron-filter-top\n' '-I FORWARD 2 -j %(bn)s-FORWARD\n' '-I INPUT 1 -j %(bn)s-INPUT\n' '-I OUTPUT 1 -j neutron-filter-top\n' '-I OUTPUT 2 -j %(bn)s-OUTPUT\n' '-I neutron-filter-top 1 -j %(bn)s-local\n' '-I %(bn)s-FORWARD 1 -j %(bn)s-scope\n' '%(filter_rules)s' 'COMMIT\n' '# Completed by iptables_manager\n') COMMENTED_NAT_DUMP = ( '# Generated by iptables_manager\n' '*nat\n' ':OUTPUT - [0:0]\n' ':POSTROUTING - [0:0]\n' ':PREROUTING - [0:0]\n' ':neutron-postrouting-bottom - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-POSTROUTING - [0:0]\n' ':%(bn)s-PREROUTING - [0:0]\n' ':%(bn)s-float-snat - [0:0]\n' ':%(bn)s-snat - [0:0]\n' '-I OUTPUT 1 -j %(bn)s-OUTPUT\n' '-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n' '-I POSTROUTING 2 -j neutron-postrouting-bottom\n' '-I PREROUTING 1 -j %(bn)s-PREROUTING\n' '-I neutron-postrouting-bottom 1 ' '-m comment --comment "%(snat_out_comment)s" -j %(bn)s-snat\n' '-I %(bn)s-snat 1 -j ' '%(bn)s-float-snat\n' 'COMMIT\n' '# Completed by iptables_manager\n' % IPTABLES_ARG) TRAFFIC_COUNTERS_DUMP = ( 'Chain OUTPUT (policy ACCEPT 400 packets, 65901 bytes)\n' ' pkts bytes target prot opt in out source' ' destination \n' ' 400 65901 chain1 all -- * * 0.0.0.0/0' ' 0.0.0.0/0 \n' ' 400 65901 chain2 all -- * * 0.0.0.0/0' ' 0.0.0.0/0 \n') class IptablesTestCase(base.BaseTestCase): def test_get_binary_name_in_unittest(self): # Corresponds to sys.argv content when running python -m unittest class with mock.patch('sys.argv', ['python -m unittest', 'class']): binary_name = iptables_manager.get_binary_name() self.assertEqual('python_-m_unitte', binary_name) class IptablesCommentsTestCase(base.BaseTestCase): def setUp(self): super(IptablesCommentsTestCase, self).setUp() cfg.CONF.set_override('comment_iptables_rules', True, 'AGENT') self.iptables = iptables_manager.IptablesManager() self.execute = mock.patch.object(self.iptables, "execute").start() def test_comments_short_enough(self): for attr in dir(ic): if not attr.startswith('__') and len(getattr(ic, attr)) > 255: self.fail("Iptables comment %s is longer than 255 characters." % attr) def test_reordering_of_jump_rule_comments(self): # jump at the start self.assertEqual( '-m comment --comment "aloha" -j sg-chain', iptables_manager.comment_rule('-j sg-chain', 'aloha')) # jump in the middle self.assertEqual( '-s source -m comment --comment "aloha" -j sg-chain', iptables_manager.comment_rule('-s source -j sg-chain', 'aloha')) # no jump rule self.assertEqual( '-s source -m comment --comment "aloha"', iptables_manager.comment_rule('-s source', 'aloha')) def test_add_filter_rule(self): iptables_args = {} iptables_args.update(IPTABLES_ARG) filter_rules = ('-I %(bn)s-INPUT 1 -s 0/0 -d 192.168.0.2 -j ' '%(bn)s-filter\n-I %(bn)s-filter 1 -j DROP\n' % iptables_args) iptables_args['filter_rules'] = filter_rules filter_dump_mod = FILTER_WITH_RULES_TEMPLATE % iptables_args raw_dump = _generate_raw_dump(IPTABLES_ARG) mangle_dump = _generate_mangle_dump(IPTABLES_ARG) expected_calls_and_values = [ (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(filter_dump_mod + mangle_dump + COMMENTED_NAT_DUMP + raw_dump), run_as_root=True), None), (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(FILTER_DUMP + mangle_dump + COMMENTED_NAT_DUMP + raw_dump), run_as_root=True ), None), ] tools.setup_mock_calls(self.execute, expected_calls_and_values) self.iptables.ipv4['filter'].add_chain('filter') self.iptables.ipv4['filter'].add_rule('filter', '-j DROP') self.iptables.ipv4['filter'].add_rule('INPUT', '-s 0/0 -d 192.168.0.2 -j' ' %(bn)s-filter' % IPTABLES_ARG) self.iptables.apply() self.iptables.ipv4['filter'].remove_rule('filter', '-j DROP') self.iptables.ipv4['filter'].remove_rule('INPUT', '-s 0/0 -d 192.168.0.2 -j' ' %(bn)s-filter' % IPTABLES_ARG) self.iptables.ipv4['filter'].remove_chain('filter') self.iptables.apply() tools.verify_mock_calls(self.execute, expected_calls_and_values) def _generate_mangle_dump(iptables_args): return ('# Generated by iptables_manager\n' '*mangle\n' ':FORWARD - [0:0]\n' ':INPUT - [0:0]\n' ':OUTPUT - [0:0]\n' ':POSTROUTING - [0:0]\n' ':PREROUTING - [0:0]\n' ':%(bn)s-FORWARD - [0:0]\n' ':%(bn)s-INPUT - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-POSTROUTING - [0:0]\n' ':%(bn)s-PREROUTING - [0:0]\n' ':%(bn)s-float-snat - [0:0]\n' ':%(bn)s-floatingip - [0:0]\n' ':%(bn)s-mark - [0:0]\n' ':%(bn)s-scope - [0:0]\n' '-I FORWARD 1 -j %(bn)s-FORWARD\n' '-I INPUT 1 -j %(bn)s-INPUT\n' '-I OUTPUT 1 -j %(bn)s-OUTPUT\n' '-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n' '-I PREROUTING 1 -j %(bn)s-PREROUTING\n' '-I %(bn)s-PREROUTING 1 -j %(bn)s-mark\n' '-I %(bn)s-PREROUTING 2 -j %(bn)s-scope\n' '-I %(bn)s-PREROUTING 3 -m connmark ! --mark 0x0/0xffff0000 ' '-j CONNMARK --restore-mark ' '--nfmask 0xffff0000 --ctmask 0xffff0000\n' '-I %(bn)s-PREROUTING 4 -j %(bn)s-floatingip\n' '-I %(bn)s-float-snat 1 -m connmark --mark 0x0/0xffff0000 ' '-j CONNMARK --save-mark ' '--nfmask 0xffff0000 --ctmask 0xffff0000\n' 'COMMIT\n' '# Completed by iptables_manager\n' % iptables_args) def _generate_mangle_dump_v6(iptables_args): return ('# Generated by iptables_manager\n' '*mangle\n' ':FORWARD - [0:0]\n' ':INPUT - [0:0]\n' ':OUTPUT - [0:0]\n' ':POSTROUTING - [0:0]\n' ':PREROUTING - [0:0]\n' ':%(bn)s-FORWARD - [0:0]\n' ':%(bn)s-INPUT - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-POSTROUTING - [0:0]\n' ':%(bn)s-PREROUTING - [0:0]\n' ':%(bn)s-scope - [0:0]\n' '-I FORWARD 1 -j %(bn)s-FORWARD\n' '-I INPUT 1 -j %(bn)s-INPUT\n' '-I OUTPUT 1 -j %(bn)s-OUTPUT\n' '-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n' '-I PREROUTING 1 -j %(bn)s-PREROUTING\n' '-I %(bn)s-PREROUTING 1 -j %(bn)s-scope\n' '-I %(bn)s-PREROUTING 2 -m connmark ! --mark 0x0/0xffff0000 ' '-j CONNMARK --restore-mark ' '--nfmask 0xffff0000 --ctmask 0xffff0000\n' 'COMMIT\n' '# Completed by iptables_manager\n' % iptables_args) def _generate_raw_dump(iptables_args): return ('# Generated by iptables_manager\n' '*raw\n' ':OUTPUT - [0:0]\n' ':PREROUTING - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-PREROUTING - [0:0]\n' '-I OUTPUT 1 -j %(bn)s-OUTPUT\n' '-I PREROUTING 1 -j %(bn)s-PREROUTING\n' 'COMMIT\n' '# Completed by iptables_manager\n' % iptables_args) MANGLE_DUMP = _generate_mangle_dump(IPTABLES_ARG) MANGLE_DUMP_V6 = _generate_mangle_dump_v6(IPTABLES_ARG) RAW_DUMP = _generate_raw_dump(IPTABLES_ARG) class IptablesManagerStateFulTestCase(base.BaseTestCase): def setUp(self): super(IptablesManagerStateFulTestCase, self).setUp() cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT') self.iptables = iptables_manager.IptablesManager() self.execute = mock.patch.object(self.iptables, "execute").start() def test_binary_name(self): expected = os.path.basename(sys.argv[0])[:16] self.assertEqual(expected, iptables_manager.binary_name) def test_get_chain_name(self): name = '0123456789' * 5 # 28 chars is the maximum length of iptables chain name. self.assertEqual(iptables_manager.get_chain_name(name, wrap=False), name[:28]) # 11 chars is the maximum length of chain name of iptable_manager # if binary_name is prepended. self.assertEqual(iptables_manager.get_chain_name(name, wrap=True), name[:11]) def test_defer_apply_with_exception(self): self.iptables._apply = mock.Mock(side_effect=Exception) with testtools.ExpectedException(n_exc.IpTablesApplyException): with self.iptables.defer_apply(): pass def _extend_with_ip6tables_filter(self, expected_calls, filter_dump): expected_calls.insert(2, ( mock.call(['ip6tables-save'], run_as_root=True), '')) expected_calls.insert(3, ( mock.call(['ip6tables-restore', '-n'], process_input=filter_dump, run_as_root=True), None)) expected_calls.extend([ (mock.call(['ip6tables-save'], run_as_root=True), ''), (mock.call(['ip6tables-restore', '-n'], process_input=filter_dump, run_as_root=True), None)]) def _test_add_and_remove_chain_custom_binary_name_helper(self, use_ipv6): bn = ("xbcdef" * 5) self.iptables = iptables_manager.IptablesManager( binary_name=bn, use_ipv6=use_ipv6) self.execute = mock.patch.object(self.iptables, "execute").start() iptables_args = {'bn': bn[:16], 'filter_rules': ''} filter_dump = FILTER_WITH_RULES_TEMPLATE % iptables_args filter_dump_ipv6 = FILTER_TEMPLATE % iptables_args filter_dump_mod = filter_dump nat_dump = NAT_TEMPLATE % iptables_args raw_dump = _generate_raw_dump(iptables_args) mangle_dump = _generate_mangle_dump(iptables_args) expected_calls_and_values = [ (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(filter_dump_mod + mangle_dump + nat_dump + raw_dump), run_as_root=True), None), (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(filter_dump + mangle_dump + nat_dump + raw_dump), run_as_root=True), None), ] if use_ipv6: mangle_dump_v6 = _generate_mangle_dump_v6(iptables_args) self._extend_with_ip6tables_filter( expected_calls_and_values, filter_dump_ipv6 + mangle_dump_v6 + raw_dump) tools.setup_mock_calls(self.execute, expected_calls_and_values) self.iptables.ipv4['filter'].add_chain('filter') self.iptables.apply() self.iptables.ipv4['filter'].empty_chain('filter') self.iptables.apply() tools.verify_mock_calls(self.execute, expected_calls_and_values) def test_add_and_remove_chain_custom_binary_name(self): self._test_add_and_remove_chain_custom_binary_name_helper(False) def test_add_and_remove_chain_custom_binary_name_with_ipv6(self): self._test_add_and_remove_chain_custom_binary_name_helper(True) def _test_empty_chain_custom_binary_name_helper(self, use_ipv6): bn = ("xbcdef" * 5)[:16] self.iptables = iptables_manager.IptablesManager( binary_name=bn, use_ipv6=use_ipv6) self.execute = mock.patch.object(self.iptables, "execute").start() iptables_args = {'bn': bn} filter_dump = FILTER_TEMPLATE % iptables_args filter_rules = ('-I %(bn)s-filter 1 -s 0/0 -d 192.168.0.2\n' % iptables_args) iptables_args['filter_rules'] = filter_rules filter_dump_mod = FILTER_WITH_RULES_TEMPLATE % iptables_args nat_dump = NAT_TEMPLATE % iptables_args raw_dump = _generate_raw_dump(iptables_args) mangle_dump = _generate_mangle_dump(iptables_args) expected_calls_and_values = [ (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(filter_dump_mod + mangle_dump + nat_dump + raw_dump), run_as_root=True), None), (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(filter_dump + mangle_dump + nat_dump + raw_dump), run_as_root=True), None), ] if use_ipv6: mangle_dump_v6 = _generate_mangle_dump_v6(iptables_args) self._extend_with_ip6tables_filter( expected_calls_and_values, filter_dump + mangle_dump_v6 + raw_dump) tools.setup_mock_calls(self.execute, expected_calls_and_values) self.iptables.ipv4['filter'].add_chain('filter') self.iptables.ipv4['filter'].add_rule('filter', '-s 0/0 -d 192.168.0.2') self.iptables.apply() self.iptables.ipv4['filter'].remove_chain('filter') self.iptables.apply() tools.verify_mock_calls(self.execute, expected_calls_and_values) def test_empty_chain_custom_binary_name(self): self._test_empty_chain_custom_binary_name_helper(False) def test_empty_chain_custom_binary_name_with_ipv6(self): self._test_empty_chain_custom_binary_name_helper(True) def _test_add_and_remove_chain_helper(self, use_ipv6): self.iptables = iptables_manager.IptablesManager( use_ipv6=use_ipv6) self.execute = mock.patch.object(self.iptables, "execute").start() filter_dump_mod = FILTER_WITH_RULES_TEMPLATE % IPTABLES_ARG expected_calls_and_values = [ (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(filter_dump_mod + MANGLE_DUMP + NAT_DUMP + RAW_DUMP), run_as_root=True), None), (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(FILTER_DUMP + MANGLE_DUMP + NAT_DUMP + RAW_DUMP), run_as_root=True), None), ] if use_ipv6: self._extend_with_ip6tables_filter( expected_calls_and_values, FILTER_DUMP + MANGLE_DUMP_V6 + RAW_DUMP) tools.setup_mock_calls(self.execute, expected_calls_and_values) self.iptables.ipv4['filter'].add_chain('filter') self.iptables.apply() self.iptables.ipv4['filter'].remove_chain('filter') self.iptables.apply() tools.verify_mock_calls(self.execute, expected_calls_and_values) def test_add_and_remove_chain(self): self._test_add_and_remove_chain_helper(False) def test_add_and_remove_chain_with_ipv6(self): self._test_add_and_remove_chain_helper(True) def _test_add_filter_rule_helper(self, use_ipv6): self.iptables = iptables_manager.IptablesManager( use_ipv6=use_ipv6) self.execute = mock.patch.object(self.iptables, "execute").start() iptables_args = {} iptables_args.update(IPTABLES_ARG) filter_rules = ('-I %(bn)s-INPUT 1 -s 0/0 -d 192.168.0.2 -j ' '%(bn)s-filter\n-I %(bn)s-filter 1 -j DROP\n' % iptables_args) iptables_args['filter_rules'] = filter_rules filter_dump_mod = FILTER_WITH_RULES_TEMPLATE % iptables_args raw_dump = RAW_DUMP % IPTABLES_ARG expected_calls_and_values = [ (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(filter_dump_mod + MANGLE_DUMP + NAT_DUMP + RAW_DUMP), run_as_root=True), None), (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(FILTER_DUMP + MANGLE_DUMP + NAT_DUMP + RAW_DUMP), run_as_root=True ), None), ] if use_ipv6: self._extend_with_ip6tables_filter( expected_calls_and_values, FILTER_DUMP + MANGLE_DUMP_V6 + raw_dump) tools.setup_mock_calls(self.execute, expected_calls_and_values) self.iptables.ipv4['filter'].add_chain('filter') self.iptables.ipv4['filter'].add_rule('filter', '-j DROP') self.iptables.ipv4['filter'].add_rule('INPUT', '-s 0/0 -d 192.168.0.2 -j' ' %(bn)s-filter' % IPTABLES_ARG) self.iptables.apply() self.iptables.ipv4['filter'].remove_rule('filter', '-j DROP') self.iptables.ipv4['filter'].remove_rule('INPUT', '-s 0/0 -d 192.168.0.2 -j' ' %(bn)s-filter' % IPTABLES_ARG) self.iptables.ipv4['filter'].remove_chain('filter') self.iptables.apply() tools.verify_mock_calls(self.execute, expected_calls_and_values) def test_add_filter_rule(self): self._test_add_filter_rule_helper(False) def test_add_filter_rule_with_ipv6(self): self._test_add_filter_rule_helper(True) def _test_rule_with_wrap_target_helper(self, use_ipv6): self.iptables = iptables_manager.IptablesManager( use_ipv6=use_ipv6) self.execute = mock.patch.object(self.iptables, "execute").start() name = '0123456789' * 5 wrap = "%s-%s" % (iptables_manager.binary_name, iptables_manager.get_chain_name(name)) iptables_args = {'bn': iptables_manager.binary_name, 'wrap': wrap} filter_dump_mod = ('# Generated by iptables_manager\n' '*filter\n' ':FORWARD - [0:0]\n' ':INPUT - [0:0]\n' ':OUTPUT - [0:0]\n' ':neutron-filter-top - [0:0]\n' ':%(wrap)s - [0:0]\n' ':%(bn)s-FORWARD - [0:0]\n' ':%(bn)s-INPUT - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-local - [0:0]\n' ':%(bn)s-scope - [0:0]\n' '-I FORWARD 1 -j neutron-filter-top\n' '-I FORWARD 2 -j %(bn)s-FORWARD\n' '-I INPUT 1 -j %(bn)s-INPUT\n' '-I OUTPUT 1 -j neutron-filter-top\n' '-I OUTPUT 2 -j %(bn)s-OUTPUT\n' '-I neutron-filter-top 1 -j %(bn)s-local\n' '-I %(bn)s-FORWARD 1 -j %(bn)s-scope\n' '-I %(bn)s-INPUT 1 -s 0/0 -d 192.168.0.2 -j ' '%(wrap)s\n' 'COMMIT\n' '# Completed by iptables_manager\n' % iptables_args) raw_dump = RAW_DUMP % IPTABLES_ARG expected_calls_and_values = [ (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(filter_dump_mod + MANGLE_DUMP + NAT_DUMP + RAW_DUMP), run_as_root=True), None), (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(FILTER_DUMP + MANGLE_DUMP + NAT_DUMP + RAW_DUMP), run_as_root=True), None), ] if use_ipv6: self._extend_with_ip6tables_filter( expected_calls_and_values, FILTER_DUMP + MANGLE_DUMP_V6 + raw_dump) tools.setup_mock_calls(self.execute, expected_calls_and_values) self.iptables.ipv4['filter'].add_chain(name) self.iptables.ipv4['filter'].add_rule('INPUT', '-s 0/0 -d 192.168.0.2 -j' ' $%s' % name) self.iptables.apply() self.iptables.ipv4['filter'].remove_rule('INPUT', '-s 0/0 -d 192.168.0.2 -j' ' $%s' % name) self.iptables.ipv4['filter'].remove_chain(name) self.iptables.apply() tools.verify_mock_calls(self.execute, expected_calls_and_values) def test_rule_with_wrap_target(self): self._test_rule_with_wrap_target_helper(False) def test_rule_with_wrap_target_with_ipv6(self): self._test_rule_with_wrap_target_helper(True) def _test_add_mangle_rule_helper(self, use_ipv6): self.iptables = iptables_manager.IptablesManager( use_ipv6=use_ipv6) self.execute = mock.patch.object(self.iptables, "execute").start() mangle_dump_mod = ( '# Generated by iptables_manager\n' '*mangle\n' ':FORWARD - [0:0]\n' ':INPUT - [0:0]\n' ':OUTPUT - [0:0]\n' ':POSTROUTING - [0:0]\n' ':PREROUTING - [0:0]\n' ':%(bn)s-FORWARD - [0:0]\n' ':%(bn)s-INPUT - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-POSTROUTING - [0:0]\n' ':%(bn)s-PREROUTING - [0:0]\n' ':%(bn)s-float-snat - [0:0]\n' ':%(bn)s-floatingip - [0:0]\n' ':%(bn)s-mangle - [0:0]\n' ':%(bn)s-mark - [0:0]\n' ':%(bn)s-scope - [0:0]\n' '-I FORWARD 1 -j %(bn)s-FORWARD\n' '-I INPUT 1 -j %(bn)s-INPUT\n' '-I OUTPUT 1 -j %(bn)s-OUTPUT\n' '-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n' '-I PREROUTING 1 -j %(bn)s-PREROUTING\n' '-I %(bn)s-PREROUTING 1 -j %(bn)s-mark\n' '-I %(bn)s-PREROUTING 2 -j %(bn)s-scope\n' '-I %(bn)s-PREROUTING 3 -m connmark ! --mark 0x0/0xffff0000 ' '-j CONNMARK --restore-mark ' '--nfmask 0xffff0000 --ctmask 0xffff0000\n' '-I %(bn)s-PREROUTING 4 -j %(bn)s-floatingip\n' '-I %(bn)s-PREROUTING 5 -j MARK --set-xmark 0x1/%(mark)s\n' '-I %(bn)s-float-snat 1 -m connmark --mark 0x0/0xffff0000 ' '-j CONNMARK --save-mark ' '--nfmask 0xffff0000 --ctmask 0xffff0000\n' 'COMMIT\n' '# Completed by iptables_manager\n' % IPTABLES_ARG) expected_calls_and_values = [ (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(FILTER_DUMP + mangle_dump_mod + NAT_DUMP + RAW_DUMP), run_as_root=True), None), (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(FILTER_DUMP + MANGLE_DUMP + NAT_DUMP + RAW_DUMP), run_as_root=True), None), ] if use_ipv6: self._extend_with_ip6tables_filter( expected_calls_and_values, FILTER_DUMP + MANGLE_DUMP_V6 + RAW_DUMP) tools.setup_mock_calls(self.execute, expected_calls_and_values) self.iptables.ipv4['mangle'].add_chain('mangle') self.iptables.ipv4['mangle'].add_rule( 'PREROUTING', '-j MARK --set-xmark 0x1/%s' % constants.ROUTER_MARK_MASK) self.iptables.apply() self.iptables.ipv4['mangle'].remove_rule( 'PREROUTING', '-j MARK --set-xmark 0x1/%s' % constants.ROUTER_MARK_MASK) self.iptables.ipv4['mangle'].remove_chain('mangle') self.iptables.apply() tools.verify_mock_calls(self.execute, expected_calls_and_values) def test_add_mangle_rule(self): self._test_add_mangle_rule_helper(False) def test_add_mangle_rule_with_ipv6(self): self._test_add_mangle_rule_helper(True) def _test_add_nat_rule_helper(self, use_ipv6): self.iptables = iptables_manager.IptablesManager( use_ipv6=use_ipv6) self.execute = mock.patch.object(self.iptables, "execute").start() nat_dump = NAT_TEMPLATE % IPTABLES_ARG nat_dump_mod = ('# Generated by iptables_manager\n' '*nat\n' ':OUTPUT - [0:0]\n' ':POSTROUTING - [0:0]\n' ':PREROUTING - [0:0]\n' ':neutron-postrouting-bottom - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-POSTROUTING - [0:0]\n' ':%(bn)s-PREROUTING - [0:0]\n' ':%(bn)s-float-snat - [0:0]\n' ':%(bn)s-nat - [0:0]\n' ':%(bn)s-snat - [0:0]\n' '-I OUTPUT 1 -j %(bn)s-OUTPUT\n' '-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n' '-I POSTROUTING 2 -j neutron-postrouting-bottom\n' '-I PREROUTING 1 -j %(bn)s-PREROUTING\n' '-I neutron-postrouting-bottom 1 -j %(bn)s-snat\n' '-I %(bn)s-PREROUTING 1 -d 192.168.0.3 -j ' '%(bn)s-nat\n' '-I %(bn)s-nat 1 -p tcp --dport 8080 -j ' 'REDIRECT --to-port 80\n' '-I %(bn)s-snat 1 -j %(bn)s-float-snat\n' 'COMMIT\n' '# Completed by iptables_manager\n' % IPTABLES_ARG) raw_dump = RAW_DUMP % IPTABLES_ARG expected_calls_and_values = [ (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(FILTER_DUMP + MANGLE_DUMP + nat_dump_mod + RAW_DUMP), run_as_root=True), None), (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(FILTER_DUMP + MANGLE_DUMP + nat_dump + RAW_DUMP), run_as_root=True), None), ] if use_ipv6: self._extend_with_ip6tables_filter( expected_calls_and_values, FILTER_DUMP + MANGLE_DUMP_V6 + raw_dump) tools.setup_mock_calls(self.execute, expected_calls_and_values) self.iptables.ipv4['nat'].add_chain('nat') self.iptables.ipv4['nat'].add_rule('PREROUTING', '-d 192.168.0.3 -j ' '%(bn)s-nat' % IPTABLES_ARG) self.iptables.ipv4['nat'].add_rule('nat', '-p tcp --dport 8080' + ' -j REDIRECT --to-port 80') self.iptables.apply() self.iptables.ipv4['nat'].remove_rule('nat', '-p tcp --dport 8080 -j' ' REDIRECT --to-port 80') self.iptables.ipv4['nat'].remove_rule('PREROUTING', '-d 192.168.0.3 -j ' '%(bn)s-nat' % IPTABLES_ARG) self.iptables.ipv4['nat'].remove_chain('nat') self.iptables.apply() tools.verify_mock_calls(self.execute, expected_calls_and_values) def test_add_nat_rule(self): self._test_add_nat_rule_helper(False) def test_add_nat_rule_with_ipv6(self): self._test_add_nat_rule_helper(True) def _test_add_raw_rule_helper(self, use_ipv6): self.iptables = iptables_manager.IptablesManager( use_ipv6=use_ipv6) self.execute = mock.patch.object(self.iptables, "execute").start() raw_dump_mod = ('# Generated by iptables_manager\n' '*raw\n' ':OUTPUT - [0:0]\n' ':PREROUTING - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-PREROUTING - [0:0]\n' ':%(bn)s-raw - [0:0]\n' '-I OUTPUT 1 -j %(bn)s-OUTPUT\n' '-I PREROUTING 1 -j %(bn)s-PREROUTING\n' '-I %(bn)s-PREROUTING 1 -j CT --notrack\n' 'COMMIT\n' '# Completed by iptables_manager\n' % IPTABLES_ARG) expected_calls_and_values = [ (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(FILTER_DUMP + MANGLE_DUMP + NAT_DUMP + raw_dump_mod), run_as_root=True), None), (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(FILTER_DUMP + MANGLE_DUMP + NAT_DUMP + RAW_DUMP), run_as_root=True), None), ] if use_ipv6: self._extend_with_ip6tables_filter( expected_calls_and_values, FILTER_DUMP + MANGLE_DUMP_V6 + RAW_DUMP) tools.setup_mock_calls(self.execute, expected_calls_and_values) self.iptables.ipv4['raw'].add_chain('raw') self.iptables.ipv4['raw'].add_rule('PREROUTING', '-j CT --notrack') self.iptables.apply() self.iptables.ipv4['raw'].remove_rule('PREROUTING', '-j CT --notrack') self.iptables.ipv4['raw'].remove_chain('raw') self.iptables.apply() tools.verify_mock_calls(self.execute, expected_calls_and_values) def test_add_raw_rule(self): self._test_add_raw_rule_helper(False) def test_add_raw_rule_with_ipv6(self): self._test_add_raw_rule_helper(True) def test_add_rule_to_a_nonexistent_chain(self): self.assertRaises(LookupError, self.iptables.ipv4['filter'].add_rule, 'nonexistent', '-j DROP') def test_remove_nonexistent_chain(self): with mock.patch.object(iptables_manager, "LOG") as log: self.iptables.ipv4['filter'].remove_chain('nonexistent') log.debug.assert_called_once_with( 'Attempted to remove chain %s which does not exist', 'nonexistent') def test_remove_nonexistent_rule(self): with mock.patch.object(iptables_manager, "LOG") as log: self.iptables.ipv4['filter'].remove_rule('nonexistent', '-j DROP') log.warning.assert_called_once_with( 'Tried to remove rule that was not there: ' '%(chain)r %(rule)r %(wrap)r %(top)r', {'wrap': True, 'top': False, 'rule': '-j DROP', 'chain': 'nonexistent'}) def test_iptables_failure_with_no_failing_line_number(self): with mock.patch.object(iptables_manager, "LOG") as log: # generate Runtime errors on iptables-restore calls def iptables_restore_failer(*args, **kwargs): if 'iptables-restore' in args[0]: self.input_lines = kwargs['process_input'].split('\n') # don't provide a specific failure message so all lines # are logged raise RuntimeError() return FILTER_DUMP self.execute.side_effect = iptables_restore_failer # _apply_synchronized calls iptables-restore so it should raise # a RuntimeError self.assertRaises(RuntimeError, self.iptables._apply_synchronized) # The RuntimeError should have triggered a log of the input to the # process that it failed to execute. Verify by comparing the log # call to the 'process_input' arg given to the failed iptables-restore # call. # Failure without a specific line number in the error should cause # all lines to be logged with numbers. logged = ['%7d. %s' % (n, l) for n, l in enumerate(self.input_lines, 1)] log.error.assert_called_once_with(_( 'IPTablesManager.apply failed to apply the ' 'following set of iptables rules:\n%s'), '\n'.join(logged) ) def test_iptables_failure_on_specific_line(self): with mock.patch.object(iptables_manager, "LOG") as log: # generate Runtime errors on iptables-restore calls def iptables_restore_failer(*args, **kwargs): if 'iptables-restore' in args[0]: self.input_lines = kwargs['process_input'].split('\n') # pretend line 11 failed msg = ("Exit code: 1\nStdout: ''\n" "Stderr: 'iptables-restore: line 11 failed\n'") raise RuntimeError(msg) return FILTER_DUMP self.execute.side_effect = iptables_restore_failer # _apply_synchronized calls iptables-restore so it should raise # a RuntimeError self.assertRaises(RuntimeError, self.iptables._apply_synchronized) # The RuntimeError should have triggered a log of the input to the # process that it failed to execute. Verify by comparing the log # call to the 'process_input' arg given to the failed iptables-restore # call. # Line 11 of the input was marked as failing so lines (11 - context) # to (11 + context) should be logged ctx = iptables_manager.IPTABLES_ERROR_LINES_OF_CONTEXT log_start = max(0, 11 - ctx) log_end = 11 + ctx logged = ['%7d. %s' % (n, l) for n, l in enumerate(self.input_lines[log_start:log_end], log_start + 1)] log.error.assert_called_once_with(_( 'IPTablesManager.apply failed to apply the ' 'following set of iptables rules:\n%s'), '\n'.join(logged) ) def test_get_traffic_counters_chain_notexists(self): with mock.patch.object(iptables_manager, "LOG") as log: acc = self.iptables.get_traffic_counters('chain1') self.assertIsNone(acc) self.assertEqual(0, self.execute.call_count) log.warning.assert_called_once_with( 'Attempted to get traffic counters of chain %s which ' 'does not exist', 'chain1') def _test_get_traffic_counters_helper(self, use_ipv6): self.iptables = iptables_manager.IptablesManager( use_ipv6=use_ipv6) self.execute = mock.patch.object(self.iptables, "execute").start() exp_packets = 800 exp_bytes = 131802 expected_calls_and_values = [ (mock.call(['iptables', '-t', 'filter', '-L', 'OUTPUT', '-n', '-v', '-x'], run_as_root=True), TRAFFIC_COUNTERS_DUMP), (mock.call(['iptables', '-t', 'raw', '-L', 'OUTPUT', '-n', '-v', '-x'], run_as_root=True), ''), (mock.call(['iptables', '-t', 'mangle', '-L', 'OUTPUT', '-n', '-v', '-x'], run_as_root=True), ''), (mock.call(['iptables', '-t', 'nat', '-L', 'OUTPUT', '-n', '-v', '-x'], run_as_root=True), ''), ] if use_ipv6: expected_calls_and_values.append( (mock.call(['ip6tables', '-t', 'raw', '-L', 'OUTPUT', '-n', '-v', '-x'], run_as_root=True), '')) expected_calls_and_values.append( (mock.call(['ip6tables', '-t', 'filter', '-L', 'OUTPUT', '-n', '-v', '-x'], run_as_root=True), TRAFFIC_COUNTERS_DUMP)) expected_calls_and_values.append( (mock.call(['ip6tables', '-t', 'mangle', '-L', 'OUTPUT', '-n', '-v', '-x'], run_as_root=True), '')) exp_packets *= 2 exp_bytes *= 2 tools.setup_mock_calls(self.execute, expected_calls_and_values) acc = self.iptables.get_traffic_counters('OUTPUT') self.assertEqual(acc['pkts'], exp_packets) self.assertEqual(acc['bytes'], exp_bytes) tools.verify_mock_calls(self.execute, expected_calls_and_values, any_order=True) def test_get_traffic_counters(self): self._test_get_traffic_counters_helper(False) def test_get_traffic_counters_with_ipv6(self): self._test_get_traffic_counters_helper(True) def _test_get_traffic_counters_with_zero_helper(self, use_ipv6): self.iptables = iptables_manager.IptablesManager( use_ipv6=use_ipv6) self.execute = mock.patch.object(self.iptables, "execute").start() exp_packets = 800 exp_bytes = 131802 expected_calls_and_values = [ (mock.call(['iptables', '-t', 'filter', '-L', 'OUTPUT', '-n', '-v', '-x', '-Z'], run_as_root=True), TRAFFIC_COUNTERS_DUMP), (mock.call(['iptables', '-t', 'raw', '-L', 'OUTPUT', '-n', '-v', '-x', '-Z'], run_as_root=True), ''), (mock.call(['iptables', '-t', 'mangle', '-L', 'OUTPUT', '-n', '-v', '-x', '-Z'], run_as_root=True), ''), (mock.call(['iptables', '-t', 'nat', '-L', 'OUTPUT', '-n', '-v', '-x', '-Z'], run_as_root=True), '') ] if use_ipv6: expected_calls_and_values.append( (mock.call(['ip6tables', '-t', 'raw', '-L', 'OUTPUT', '-n', '-v', '-x', '-Z'], run_as_root=True), '')) expected_calls_and_values.append( (mock.call(['ip6tables', '-t', 'filter', '-L', 'OUTPUT', '-n', '-v', '-x', '-Z'], run_as_root=True), TRAFFIC_COUNTERS_DUMP)) expected_calls_and_values.append( (mock.call(['ip6tables', '-t', 'mangle', '-L', 'OUTPUT', '-n', '-v', '-x', '-Z'], run_as_root=True), '')) exp_packets *= 2 exp_bytes *= 2 tools.setup_mock_calls(self.execute, expected_calls_and_values) acc = self.iptables.get_traffic_counters('OUTPUT', zero=True) self.assertEqual(acc['pkts'], exp_packets) self.assertEqual(acc['bytes'], exp_bytes) tools.verify_mock_calls(self.execute, expected_calls_and_values, any_order=True) def test_get_traffic_counters_with_zero(self): self._test_get_traffic_counters_with_zero_helper(False) def test_get_traffic_counters_with_zero_with_ipv6(self): self._test_get_traffic_counters_with_zero_helper(True) class IptablesManagerStateLessTestCase(base.BaseTestCase): def setUp(self): super(IptablesManagerStateLessTestCase, self).setUp() cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT') self.iptables = (iptables_manager.IptablesManager(state_less=True)) def test_nat_not_found(self): self.assertNotIn('nat', self.iptables.ipv4) def test_mangle_not_found(self): self.assertNotIn('mangle', self.iptables.ipv4) neutron-8.4.0/neutron/tests/unit/agent/linux/__init__.py0000664000567000056710000000000013044372736024536 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/linux/test_ipset_manager.py0000664000567000056710000001447713044372736026703 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.agent.linux import ipset_manager from neutron.tests import base TEST_SET_ID = 'fake_sgid' ETHERTYPE = 'IPv4' TEST_SET_NAME = ipset_manager.IpsetManager.get_name(TEST_SET_ID, ETHERTYPE) TEST_SET_NAME_NEW = TEST_SET_NAME + ipset_manager.SWAP_SUFFIX FAKE_IPS = ['10.0.0.1', '10.0.0.2', '10.0.0.3', '10.0.0.4', '10.0.0.5', '10.0.0.6'] class BaseIpsetManagerTest(base.BaseTestCase): def setUp(self): super(BaseIpsetManagerTest, self).setUp() self.ipset = ipset_manager.IpsetManager() self.execute = mock.patch.object(self.ipset, "execute").start() self.expected_calls = [] self.expect_create() self.force_sorted_get_set_ips() def force_sorted_get_set_ips(self): """Force sorted responses by self.ipset._get_new/deleted_set_ips. _get_new/deleted_set_ips use internally sets and return randomly ordered responses. This method ensures sorted responses from them in order to guarantee call order in self.ipset.set_members. """ original_get_new_set_ips = self.ipset._get_new_set_ips original_get_deleted_set_ips = self.ipset._get_deleted_set_ips def sorted_get_new_set_ips(set_name, expected_ips): unsorted = original_get_new_set_ips(set_name, expected_ips) return sorted(unsorted) def sorted_get_deleted_set_ips(set_name, expected_ips): unsorted = original_get_deleted_set_ips(set_name, expected_ips) return sorted(unsorted) mock.patch.object(self.ipset, '_get_new_set_ips', side_effect=sorted_get_new_set_ips).start() mock.patch.object(self.ipset, '_get_deleted_set_ips', side_effect=sorted_get_deleted_set_ips).start() def verify_mock_calls(self): self.execute.assert_has_calls(self.expected_calls, any_order=False) def expect_set(self, addresses): temp_input = ['create %s hash:net family inet' % TEST_SET_NAME_NEW] temp_input.extend('add %s %s' % (TEST_SET_NAME_NEW, ip) for ip in self.ipset._sanitize_addresses(addresses)) input = '\n'.join(temp_input) self.expected_calls.extend([ mock.call(['ipset', 'restore', '-exist'], process_input=input, run_as_root=True, check_exit_code=True), mock.call(['ipset', 'swap', TEST_SET_NAME_NEW, TEST_SET_NAME], process_input=None, run_as_root=True, check_exit_code=True), mock.call(['ipset', 'destroy', TEST_SET_NAME_NEW], process_input=None, run_as_root=True, check_exit_code=False)]) def expect_add(self, addresses): self.expected_calls.extend( mock.call(['ipset', 'add', '-exist', TEST_SET_NAME, ip], process_input=None, run_as_root=True, check_exit_code=True) for ip in self.ipset._sanitize_addresses(addresses)) def expect_del(self, addresses): self.expected_calls.extend( mock.call(['ipset', 'del', TEST_SET_NAME, ip], process_input=None, run_as_root=True, check_exit_code=False) for ip in self.ipset._sanitize_addresses(addresses)) def expect_create(self): self.expected_calls.append( mock.call(['ipset', 'create', '-exist', TEST_SET_NAME, 'hash:net', 'family', 'inet'], process_input=None, run_as_root=True, check_exit_code=True)) def expect_destroy(self): self.expected_calls.append( mock.call(['ipset', 'destroy', TEST_SET_NAME], process_input=None, run_as_root=True, check_exit_code=False)) def add_first_ip(self): self.expect_set([FAKE_IPS[0]]) self.ipset.set_members(TEST_SET_ID, ETHERTYPE, [FAKE_IPS[0]]) def add_all_ips(self): self.expect_set(FAKE_IPS) self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS) class IpsetManagerTestCase(BaseIpsetManagerTest): def test_set_name_exists(self): self.add_first_ip() self.assertTrue(self.ipset.set_name_exists('N' + ETHERTYPE + TEST_SET_ID)) def test_set_members_with_first_add_member(self): self.add_first_ip() self.verify_mock_calls() def test_set_members_adding_less_than_5(self): self.add_first_ip() self.expect_add(FAKE_IPS[1:5]) self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS[0:5]) self.verify_mock_calls() def test_set_members_deleting_less_than_5(self): self.add_all_ips() self.expect_del(FAKE_IPS[3:]) self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS[0:3]) self.verify_mock_calls() def test_set_members_adding_more_than_5(self): self.add_first_ip() self.expect_set(FAKE_IPS) self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS) self.verify_mock_calls() def test_set_members_adding_all_zero_ipv4(self): self.expect_set(['0.0.0.0/0']) self.ipset.set_members(TEST_SET_ID, ETHERTYPE, ['0.0.0.0/0']) self.verify_mock_calls() def test_set_members_adding_all_zero_ipv6(self): self.expect_set(['::/0']) self.ipset.set_members(TEST_SET_ID, ETHERTYPE, ['::/0']) self.verify_mock_calls() def test_destroy(self): self.add_first_ip() self.expect_destroy() self.ipset.destroy(TEST_SET_ID, ETHERTYPE) self.verify_mock_calls() neutron-8.4.0/neutron/tests/unit/agent/linux/test_async_process.py0000664000567000056710000002750413044372760026730 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import signal import eventlet.event import eventlet.queue import eventlet.timeout import mock import testtools from neutron.agent.linux import async_process from neutron.agent.linux import utils from neutron.tests import base from neutron.tests.unit.agent.linux import failing_process class TestAsyncProcess(base.BaseTestCase): def setUp(self): super(TestAsyncProcess, self).setUp() self.proc = async_process.AsyncProcess(['fake']) def test_construtor_raises_exception_for_negative_respawn_interval(self): with testtools.ExpectedException(ValueError): async_process.AsyncProcess(['fake'], respawn_interval=-1) def test__spawn(self): expected_process = 'Foo' proc = self.proc with mock.patch.object(utils, 'create_process') as mock_create_process: mock_create_process.return_value = [expected_process, None] with mock.patch('eventlet.spawn') as mock_spawn: proc._spawn() self.assertTrue(self.proc._is_running) self.assertIsInstance(proc._kill_event, eventlet.event.Event) self.assertEqual(proc._process, expected_process) mock_spawn.assert_has_calls([ mock.call(proc._watch_process, proc._read_stdout, proc._kill_event), mock.call(proc._watch_process, proc._read_stderr, proc._kill_event), ]) self.assertEqual(len(proc._watchers), 2) def test__handle_process_error_kills_with_respawn(self): with mock.patch.object(self.proc, '_kill') as kill: self.proc._handle_process_error() kill.assert_has_calls([mock.call(signal.SIGKILL)]) def test__handle_process_error_kills_without_respawn(self): self.proc.respawn_interval = 1 with mock.patch.object(self.proc, '_kill') as kill: with mock.patch.object(self.proc, '_spawn') as spawn: with mock.patch('eventlet.sleep') as sleep: self.proc._handle_process_error() kill.assert_has_calls([mock.call(signal.SIGKILL)]) sleep.assert_has_calls([mock.call(self.proc.respawn_interval)]) spawn.assert_called_once_with() def test__handle_process_error_no_crash_if_started(self): self.proc._is_running = True with mock.patch.object(self.proc, '_kill'): with mock.patch.object(self.proc, '_spawn') as mock_spawn: self.proc._handle_process_error() mock_spawn.assert_not_called() def _watch_process_exception(self): raise Exception('Error!') def _test__watch_process(self, callback, kill_event): self.proc._is_running = True self.proc._kill_event = kill_event # Ensure the test times out eventually if the watcher loops endlessly with eventlet.timeout.Timeout(5): with mock.patch.object(self.proc, '_handle_process_error') as func: self.proc._watch_process(callback, kill_event) if not kill_event.ready(): func.assert_called_once_with() def test__watch_process_exits_on_callback_failure(self): self._test__watch_process(lambda: None, eventlet.event.Event()) def test__watch_process_exits_on_exception(self): self._test__watch_process(self._watch_process_exception, eventlet.event.Event()) with mock.patch.object(self.proc, '_handle_process_error') as func: self.proc._watch_process(self._watch_process_exception, self.proc._kill_event) func.assert_not_called() def test__watch_process_exits_on_sent_kill_event(self): kill_event = eventlet.event.Event() kill_event.send() self._test__watch_process(None, kill_event) def _test_read_output_queues_and_returns_result(self, output): queue = eventlet.queue.LightQueue() mock_stream = mock.Mock() with mock.patch.object(mock_stream, 'readline') as mock_readline: mock_readline.return_value = output result = self.proc._read(mock_stream, queue) if output: self.assertEqual(output, result) self.assertEqual(output, queue.get_nowait()) else: self.assertFalse(result) self.assertTrue(queue.empty()) def test__read_queues_and_returns_output(self): self._test_read_output_queues_and_returns_result('foo') def test__read_returns_none_for_missing_output(self): self._test_read_output_queues_and_returns_result('') def test_start_raises_exception_if_process_already_started(self): self.proc._is_running = True with testtools.ExpectedException(async_process.AsyncProcessException): self.proc.start() def test_start_invokes__spawn(self): with mock.patch.object(self.proc, '_spawn') as mock_start: self.proc.start() mock_start.assert_called_once_with() def test__iter_queue_returns_empty_list_for_empty_queue(self): result = list(self.proc._iter_queue(eventlet.queue.LightQueue(), False)) self.assertEqual([], result) def test__iter_queue_returns_queued_data(self): queue = eventlet.queue.LightQueue() queue.put('foo') result = list(self.proc._iter_queue(queue, False)) self.assertEqual(result, ['foo']) def _test_iter_output_calls_iter_queue_on_output_queue(self, output_type): expected_value = 'foo' with mock.patch.object(self.proc, '_iter_queue') as mock_iter_queue: mock_iter_queue.return_value = expected_value target_func = getattr(self.proc, 'iter_%s' % output_type, None) value = target_func() self.assertEqual(value, expected_value) queue = getattr(self.proc, '_%s_lines' % output_type, None) mock_iter_queue.assert_called_with(queue, False) def test_iter_stdout(self): self._test_iter_output_calls_iter_queue_on_output_queue('stdout') def test_iter_stderr(self): self._test_iter_output_calls_iter_queue_on_output_queue('stderr') def test__kill_targets_process_for_pid(self): pid = 1 with mock.patch.object(self.proc, '_kill_event' ) as mock_kill_event,\ mock.patch.object(utils, 'get_root_helper_child_pid', return_value=pid),\ mock.patch.object(self.proc, '_kill_process' ) as mock_kill_process,\ mock.patch.object(self.proc, '_process'): self.proc._kill(signal.SIGKILL) self.assertIsNone(self.proc._kill_event) self.assertFalse(self.proc._is_running) mock_kill_event.send.assert_called_once_with() if pid: mock_kill_process.assert_called_once_with(pid, signal.SIGKILL) def _test__kill_process(self, pid, expected, exception_message=None, kill_signal=signal.SIGKILL): self.proc.run_as_root = True if exception_message: exc = RuntimeError(exception_message) else: exc = None with mock.patch.object(utils, 'execute', side_effect=exc) as mock_execute: actual = self.proc._kill_process(pid, kill_signal) self.assertEqual(expected, actual) mock_execute.assert_called_with(['kill', '-%d' % kill_signal, pid], run_as_root=self.proc.run_as_root) def test__kill_process_returns_true_for_valid_pid(self): self._test__kill_process('1', True) def test__kill_process_returns_true_for_stale_pid(self): self._test__kill_process('1', True, 'No such process') def test__kill_process_returns_false_for_execute_exception(self): self._test__kill_process('1', False, 'Invalid') def test_kill_process_with_different_signal(self): self._test__kill_process('1', True, kill_signal=signal.SIGTERM) def test_stop_calls_kill_with_provided_signal_number(self): self.proc._is_running = True with mock.patch.object(self.proc, '_kill') as mock_kill: self.proc.stop(kill_signal=signal.SIGTERM) mock_kill.assert_called_once_with(signal.SIGTERM) def test_stop_raises_exception_if_already_started(self): with testtools.ExpectedException(async_process.AsyncProcessException): self.proc.stop() def test_cmd(self): for expected, cmd in (('ls -l file', ['ls', '-l', 'file']), ('fake', ['fake'])): proc = async_process.AsyncProcess(cmd) self.assertEqual(expected, proc.cmd) class TestAsyncProcessLogging(base.BaseTestCase): def setUp(self): super(TestAsyncProcessLogging, self).setUp() self.log_mock = mock.patch.object(async_process, 'LOG').start() def _test__read_stdout_logging(self, enable): proc = async_process.AsyncProcess(['fakecmd'], log_output=enable) with mock.patch.object(proc, '_read', return_value='fakedata'),\ mock.patch.object(proc, '_process'): proc._read_stdout() self.assertEqual(enable, self.log_mock.debug.called) def _test__read_stderr_logging(self, enable): proc = async_process.AsyncProcess(['fake'], log_output=enable) with mock.patch.object(proc, '_read', return_value='fakedata'),\ mock.patch.object(proc, '_process'): proc._read_stderr() self.assertEqual(enable, self.log_mock.error.called) def test__read_stdout_logging_enabled(self): self._test__read_stdout_logging(enable=True) def test__read_stdout_logging_disabled(self): self._test__read_stdout_logging(enable=False) def test__read_stderr_logging_enabled(self): self._test__read_stderr_logging(enable=True) def test__read_stderr_logging_disabled(self): self._test__read_stderr_logging(enable=False) class TestAsyncProcessDieOnError(base.BaseTestCase): def test__read_stderr_returns_none_on_error(self): proc = async_process.AsyncProcess(['fakecmd'], die_on_error=True) with mock.patch.object(proc, '_read', return_value='fakedata'),\ mock.patch.object(proc, '_process'): self.assertIsNone(proc._read_stderr()) class TestFailingAsyncProcess(base.BaseTestCase): def setUp(self): super(TestFailingAsyncProcess, self).setUp() path = self.get_temp_file_path('async.tmp', self.get_new_temp_dir()) self.process = async_process.AsyncProcess(['python', failing_process.__file__, path], respawn_interval=0) def test_failing_async_process_handle_error_once(self): with mock.patch.object(self.process, '_handle_process_error')\ as handle_error_mock: self.process.start() self.process._process.wait() # Wait for the monitor process to complete for thread in self.process._watchers: thread.wait() self.assertEqual(1, handle_error_mock.call_count) neutron-8.4.0/neutron/tests/unit/agent/linux/test_polling.py0000664000567000056710000000477013044372760025521 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.agent.common import base_polling from neutron.agent.linux import polling from neutron.tests import base class TestGetPollingManager(base.BaseTestCase): def test_return_always_poll_by_default(self): with polling.get_polling_manager() as pm: self.assertEqual(pm.__class__, base_polling.AlwaysPoll) def test_manage_polling_minimizer(self): mock_target = 'neutron.agent.linux.polling.InterfacePollingMinimizer' with mock.patch('%s.start' % mock_target) as mock_start: with mock.patch('%s.stop' % mock_target) as mock_stop: with polling.get_polling_manager(minimize_polling=True) as pm: self.assertEqual(pm.__class__, polling.InterfacePollingMinimizer) mock_stop.assert_has_calls([mock.call()]) mock_start.assert_has_calls([mock.call()]) class TestInterfacePollingMinimizer(base.BaseTestCase): def setUp(self): super(TestInterfacePollingMinimizer, self).setUp() self.pm = polling.InterfacePollingMinimizer() def test_start_calls_monitor_start(self): with mock.patch.object(self.pm._monitor, 'start') as mock_start: self.pm.start() mock_start.assert_called_with() def test_stop_calls_monitor_stop(self): with mock.patch.object(self.pm._monitor, 'stop') as mock_stop: self.pm.stop() mock_stop.assert_called_with() def mock_has_updates(self, return_value): target = ('neutron.agent.linux.ovsdb_monitor.SimpleInterfaceMonitor' '.has_updates') return mock.patch( target, new_callable=mock.PropertyMock(return_value=return_value), ) def test__is_polling_required_returns_when_updates_are_present(self): with self.mock_has_updates(True): self.assertTrue(self.pm._is_polling_required()) neutron-8.4.0/neutron/tests/unit/agent/linux/test_interface.py0000664000567000056710000006466313044372760026024 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testtools from neutron.agent.common import config from neutron.agent.common import ovs_lib from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.common import constants from neutron.tests import base class BaseChild(interface.LinuxInterfaceDriver): def plug_new(*args): pass def unplug(*args): pass class FakeNetwork(object): id = '12345678-1234-5678-90ab-ba0987654321' class FakeSubnet(object): cidr = '192.168.1.1/24' class FakeAllocation(object): subnet = FakeSubnet() ip_address = '192.168.1.2' ip_version = 4 class FakePort(object): id = 'abcdef01-1234-5678-90ab-ba0987654321' fixed_ips = [FakeAllocation] device_id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' network = FakeNetwork() network_id = network.id class FakeInterfaceDriverNoMtu(interface.LinuxInterfaceDriver): # NOTE(ihrachys) this method intentially omit mtu= parameter, since that # was the method signature before Mitaka. We should make sure the old # signature still works. def __init__(self, *args, **kwargs): super(FakeInterfaceDriverNoMtu, self).__init__(*args, **kwargs) self.plug_called = False def plug_new(self, network_id, port_id, device_name, mac_address, bridge=None, namespace=None, prefix=None): self.plug_called = True def unplug(self, device_name, bridge=None, namespace=None, prefix=None): pass class TestBase(base.BaseTestCase): def setUp(self): super(TestBase, self).setUp() self.conf = config.setup_conf() self.conf.register_opts(interface.OPTS) self.ip_dev_p = mock.patch.object(ip_lib, 'IPDevice') self.ip_dev = self.ip_dev_p.start() self.ip_p = mock.patch.object(ip_lib, 'IPWrapper') self.ip = self.ip_p.start() self.device_exists_p = mock.patch.object(ip_lib, 'device_exists') self.device_exists = self.device_exists_p.start() class TestABCDriverNoMtu(TestBase): def test_plug_with_no_mtu_works(self): driver = FakeInterfaceDriverNoMtu(self.conf) self.device_exists.return_value = False driver.plug( mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock(), mtu=9000) self.assertTrue(driver.plug_called) class TestABCDriver(TestBase): def setUp(self): super(TestABCDriver, self).setUp() mock_link_addr = mock.PropertyMock(return_value='aa:bb:cc:dd:ee:ff') type(self.ip_dev().link).address = mock_link_addr def test_get_device_name(self): bc = BaseChild(self.conf) device_name = bc.get_device_name(FakePort()) self.assertEqual('tapabcdef01-12', device_name) def test_init_router_port(self): addresses = [dict(scope='global', dynamic=False, cidr='172.16.77.240/24')] self.ip_dev().addr.list = mock.Mock(return_value=addresses) self.ip_dev().route.list_onlink_routes.return_value = [] bc = BaseChild(self.conf) ns = '12345678-1234-5678-90ab-ba0987654321' bc.init_router_port('tap0', ['192.168.1.2/24'], namespace=ns, extra_subnets=[{'cidr': '172.20.0.0/24'}]) self.ip_dev.assert_has_calls( [mock.call('tap0', namespace=ns), mock.call().addr.list(filters=['permanent']), mock.call().addr.add('192.168.1.2/24'), mock.call().addr.delete('172.16.77.240/24'), mock.call('tap0', namespace=ns), mock.call().route.list_onlink_routes(constants.IP_VERSION_4), mock.call().route.list_onlink_routes(constants.IP_VERSION_6), mock.call().route.add_onlink_route('172.20.0.0/24')]) def test_init_router_port_delete_onlink_routes(self): addresses = [dict(scope='global', dynamic=False, cidr='172.16.77.240/24')] self.ip_dev().addr.list = mock.Mock(return_value=addresses) self.ip_dev().route.list_onlink_routes.return_value = [ {'cidr': '172.20.0.0/24'}] bc = BaseChild(self.conf) ns = '12345678-1234-5678-90ab-ba0987654321' bc.init_router_port('tap0', ['192.168.1.2/24'], namespace=ns) self.ip_dev.assert_has_calls( [mock.call().route.list_onlink_routes(constants.IP_VERSION_4), mock.call().route.list_onlink_routes(constants.IP_VERSION_6), mock.call().route.delete_onlink_route('172.20.0.0/24')]) def test_l3_init_with_preserve(self): addresses = [dict(scope='global', dynamic=False, cidr='192.168.1.3/32')] self.ip_dev().addr.list = mock.Mock(return_value=addresses) bc = BaseChild(self.conf) ns = '12345678-1234-5678-90ab-ba0987654321' bc.init_l3('tap0', ['192.168.1.2/24'], namespace=ns, preserve_ips=['192.168.1.3/32']) self.ip_dev.assert_has_calls( [mock.call('tap0', namespace=ns), mock.call().addr.list(filters=['permanent']), mock.call().addr.add('192.168.1.2/24')]) self.assertFalse(self.ip_dev().addr.delete.called) self.assertFalse(self.ip_dev().delete_addr_and_conntrack_state.called) def _test_l3_init_clean_connections(self, clean_connections): addresses = [ dict(scope='global', dynamic=False, cidr='10.0.0.1/24'), dict(scope='global', dynamic=False, cidr='10.0.0.3/32')] self.ip_dev().addr.list = mock.Mock(return_value=addresses) bc = BaseChild(self.conf) ns = '12345678-1234-5678-90ab-ba0987654321' bc.init_l3('tap0', ['10.0.0.1/24'], namespace=ns, clean_connections=clean_connections) delete = self.ip_dev().delete_addr_and_conntrack_state if clean_connections: delete.assert_called_once_with('10.0.0.3/32') else: self.assertFalse(delete.called) def test_l3_init_with_clean_connections(self): self._test_l3_init_clean_connections(True) def test_l3_init_without_clean_connections(self): self._test_l3_init_clean_connections(False) def test_init_router_port_ipv6_with_gw_ip(self): addresses = [dict(scope='global', dynamic=False, cidr='2001:db8:a::123/64')] self.ip_dev().addr.list = mock.Mock(return_value=addresses) self.ip_dev().route.list_onlink_routes.return_value = [] bc = BaseChild(self.conf) ns = '12345678-1234-5678-90ab-ba0987654321' new_cidr = '2001:db8:a::124/64' kwargs = {'namespace': ns, 'extra_subnets': [{'cidr': '2001:db8:b::/64'}]} bc.init_router_port('tap0', [new_cidr], **kwargs) expected_calls = ( [mock.call('tap0', namespace=ns), mock.call().addr.list(filters=['permanent']), mock.call().addr.add('2001:db8:a::124/64'), mock.call().addr.delete('2001:db8:a::123/64')]) expected_calls += ( [mock.call('tap0', namespace=ns), mock.call().route.list_onlink_routes(constants.IP_VERSION_4), mock.call().route.list_onlink_routes(constants.IP_VERSION_6), mock.call().route.add_onlink_route('2001:db8:b::/64')]) self.ip_dev.assert_has_calls(expected_calls) def test_init_router_port_ext_gw_with_dual_stack(self): old_addrs = [dict(ip_version=4, scope='global', dynamic=False, cidr='172.16.77.240/24'), dict(ip_version=6, scope='global', dynamic=False, cidr='2001:db8:a::123/64')] self.ip_dev().addr.list = mock.Mock(return_value=old_addrs) self.ip_dev().route.list_onlink_routes.return_value = [] bc = BaseChild(self.conf) ns = '12345678-1234-5678-90ab-ba0987654321' new_cidrs = ['192.168.1.2/24', '2001:db8:a::124/64'] bc.init_router_port('tap0', new_cidrs, namespace=ns, extra_subnets=[{'cidr': '172.20.0.0/24'}]) self.ip_dev.assert_has_calls( [mock.call('tap0', namespace=ns), mock.call().addr.list(filters=['permanent']), mock.call().addr.add('192.168.1.2/24'), mock.call().addr.add('2001:db8:a::124/64'), mock.call().addr.delete('172.16.77.240/24'), mock.call().addr.delete('2001:db8:a::123/64'), mock.call().route.list_onlink_routes(constants.IP_VERSION_4), mock.call().route.list_onlink_routes(constants.IP_VERSION_6), mock.call().route.add_onlink_route('172.20.0.0/24')], any_order=True) def test_init_router_port_with_ipv6_delete_onlink_routes(self): addresses = [dict(scope='global', dynamic=False, cidr='2001:db8:a::123/64')] route = '2001:db8:a::/64' self.ip_dev().addr.list = mock.Mock(return_value=addresses) self.ip_dev().route.list_onlink_routes.return_value = [{'cidr': route}] bc = BaseChild(self.conf) ns = '12345678-1234-5678-90ab-ba0987654321' bc.init_router_port('tap0', ['2001:db8:a::124/64'], namespace=ns) self.ip_dev.assert_has_calls( [mock.call().route.list_onlink_routes(constants.IP_VERSION_4), mock.call().route.list_onlink_routes(constants.IP_VERSION_6), mock.call().route.delete_onlink_route(route)]) def test_l3_init_with_duplicated_ipv6(self): addresses = [dict(scope='global', dynamic=False, cidr='2001:db8:a::123/64')] self.ip_dev().addr.list = mock.Mock(return_value=addresses) bc = BaseChild(self.conf) ns = '12345678-1234-5678-90ab-ba0987654321' bc.init_l3('tap0', ['2001:db8:a::123/64'], namespace=ns) self.assertFalse(self.ip_dev().addr.add.called) def test_l3_init_with_duplicated_ipv6_uncompact(self): addresses = [dict(scope='global', dynamic=False, cidr='2001:db8:a::123/64')] self.ip_dev().addr.list = mock.Mock(return_value=addresses) bc = BaseChild(self.conf) ns = '12345678-1234-5678-90ab-ba0987654321' bc.init_l3('tap0', ['2001:db8:a:0000:0000:0000:0000:0123/64'], namespace=ns) self.assertFalse(self.ip_dev().addr.add.called) def test_add_ipv6_addr(self): device_name = 'tap0' cidr = '2001:db8::/64' ns = '12345678-1234-5678-90ab-ba0987654321' bc = BaseChild(self.conf) bc.add_ipv6_addr(device_name, cidr, ns) self.ip_dev.assert_has_calls( [mock.call(device_name, namespace=ns), mock.call().addr.add(cidr, 'global')]) def test_delete_ipv6_addr(self): device_name = 'tap0' cidr = '2001:db8::/64' ns = '12345678-1234-5678-90ab-ba0987654321' bc = BaseChild(self.conf) bc.delete_ipv6_addr(device_name, cidr, ns) self.ip_dev.assert_has_calls( [mock.call(device_name, namespace=ns), mock.call().delete_addr_and_conntrack_state(cidr)]) def test_delete_ipv6_addr_with_prefix(self): device_name = 'tap0' prefix = '2001:db8::/48' in_cidr = '2001:db8::/64' out_cidr = '2001:db7::/64' ns = '12345678-1234-5678-90ab-ba0987654321' in_addresses = [dict(scope='global', dynamic=False, cidr=in_cidr)] out_addresses = [dict(scope='global', dynamic=False, cidr=out_cidr)] # Initially set the address list to be empty self.ip_dev().addr.list = mock.Mock(return_value=[]) bc = BaseChild(self.conf) # Call delete_v6addr_with_prefix when the address list is empty bc.delete_ipv6_addr_with_prefix(device_name, prefix, ns) # Assert that delete isn't called self.assertFalse(self.ip_dev().delete_addr_and_conntrack_state.called) # Set the address list to contain only an address outside of the range # of the given prefix self.ip_dev().addr.list = mock.Mock(return_value=out_addresses) bc.delete_ipv6_addr_with_prefix(device_name, prefix, ns) # Assert that delete isn't called self.assertFalse(self.ip_dev().delete_addr_and_conntrack_state.called) # Set the address list to contain only an address inside of the range # of the given prefix self.ip_dev().addr.list = mock.Mock(return_value=in_addresses) bc.delete_ipv6_addr_with_prefix(device_name, prefix, ns) # Assert that delete is called self.ip_dev.assert_has_calls( [mock.call(device_name, namespace=ns), mock.call().addr.list(scope='global', filters=['permanent']), mock.call().delete_addr_and_conntrack_state(in_cidr)]) def test_get_ipv6_llas(self): ns = '12345678-1234-5678-90ab-ba0987654321' addresses = [dict(scope='link', dynamic=False, cidr='fe80:cafe::/64')] self.ip_dev().addr.list = mock.Mock(return_value=addresses) device_name = self.ip_dev().name bc = BaseChild(self.conf) llas = bc.get_ipv6_llas(device_name, ns) self.assertEqual(addresses, llas) self.ip_dev.assert_has_calls( [mock.call(device_name, namespace=ns), mock.call().addr.list(scope='link', ip_version=6)]) class TestOVSInterfaceDriver(TestBase): def test_get_device_name(self): br = interface.OVSInterfaceDriver(self.conf) device_name = br.get_device_name(FakePort()) self.assertEqual('tapabcdef01-12', device_name) def test_plug_no_ns(self): self._test_plug() def test_plug_with_ns(self): self._test_plug(namespace='01234567-1234-1234-99') def test_plug_alt_bridge(self): self._test_plug(bridge='br-foo') def test_plug_configured_bridge(self): br = 'br-v' self.conf.set_override('ovs_use_veth', False) self.conf.set_override('ovs_integration_bridge', br) self.assertEqual(self.conf.ovs_integration_bridge, br) def device_exists(dev, namespace=None): return dev == br ovs = interface.OVSInterfaceDriver(self.conf) with mock.patch.object(ovs, '_ovs_add_port') as add_port: self.device_exists.side_effect = device_exists ovs.plug('01234567-1234-1234-99', 'port-1234', 'tap0', 'aa:bb:cc:dd:ee:ff', bridge=None, namespace=None) add_port.assert_called_once_with('br-v', 'tap0', 'port-1234', 'aa:bb:cc:dd:ee:ff', internal=True) def _test_plug(self, additional_expectation=None, bridge=None, namespace=None): additional_expectation = additional_expectation or [] if not bridge: bridge = 'br-int' def device_exists(dev, namespace=None): return dev == bridge with mock.patch.object(ovs_lib.OVSBridge, 'replace_port') as replace: ovs = interface.OVSInterfaceDriver(self.conf) self.device_exists.side_effect = device_exists ovs.plug('01234567-1234-1234-99', 'port-1234', 'tap0', 'aa:bb:cc:dd:ee:ff', bridge=bridge, namespace=namespace) replace.assert_called_once_with( 'tap0', ('type', 'internal'), ('external_ids', { 'iface-id': 'port-1234', 'iface-status': 'active', 'attached-mac': 'aa:bb:cc:dd:ee:ff'})) expected = [mock.call(), mock.call().device('tap0'), mock.call().device().link.set_address('aa:bb:cc:dd:ee:ff')] expected.extend(additional_expectation) if namespace: expected.extend( [mock.call().ensure_namespace(namespace), mock.call().ensure_namespace().add_device_to_namespace( mock.ANY)]) expected.extend([mock.call().device().link.set_up()]) self.ip.assert_has_calls(expected) def test_mtu_int(self): self.assertIsNone(self.conf.network_device_mtu) self.conf.set_override('network_device_mtu', 9000) self.assertEqual(self.conf.network_device_mtu, 9000) def test_validate_min_ipv6_mtu(self): self.conf.set_override('network_device_mtu', 1200) with mock.patch('neutron.common.ipv6_utils.is_enabled') as ipv6_status: with testtools.ExpectedException(SystemExit): ipv6_status.return_value = True BaseChild(self.conf) def test_plug_mtu(self): self.conf.set_override('network_device_mtu', 9000) self._test_plug([mock.call().device().link.set_mtu(9000)]) def test_unplug(self, bridge=None): if not bridge: bridge = 'br-int' with mock.patch('neutron.agent.common.ovs_lib.OVSBridge') as ovs_br: ovs = interface.OVSInterfaceDriver(self.conf) ovs.unplug('tap0') ovs_br.assert_has_calls([mock.call(bridge), mock.call().delete_port('tap0')]) class TestOVSInterfaceDriverWithVeth(TestOVSInterfaceDriver): def setUp(self): super(TestOVSInterfaceDriverWithVeth, self).setUp() self.conf.set_override('ovs_use_veth', True) def test_get_device_name(self): br = interface.OVSInterfaceDriver(self.conf) device_name = br.get_device_name(FakePort()) self.assertEqual('ns-abcdef01-12', device_name) def test_plug_with_prefix(self): self._test_plug(devname='qr-0', prefix='qr-') def _test_plug(self, devname=None, bridge=None, namespace=None, prefix=None, mtu=None): if not devname: devname = 'ns-0' if not bridge: bridge = 'br-int' def device_exists(dev, namespace=None): return dev == bridge ovs = interface.OVSInterfaceDriver(self.conf) self.device_exists.side_effect = device_exists root_dev = mock.Mock() ns_dev = mock.Mock() self.ip().add_veth = mock.Mock(return_value=(root_dev, ns_dev)) expected = [mock.call(), mock.call().add_veth('tap0', devname, namespace2=namespace)] with mock.patch.object(ovs_lib.OVSBridge, 'replace_port') as replace: ovs.plug('01234567-1234-1234-99', 'port-1234', devname, 'aa:bb:cc:dd:ee:ff', bridge=bridge, namespace=namespace, prefix=prefix) replace.assert_called_once_with( 'tap0', ('external_ids', { 'iface-id': 'port-1234', 'iface-status': 'active', 'attached-mac': 'aa:bb:cc:dd:ee:ff'})) ns_dev.assert_has_calls( [mock.call.link.set_address('aa:bb:cc:dd:ee:ff')]) if mtu: ns_dev.assert_has_calls([mock.call.link.set_mtu(mtu)]) root_dev.assert_has_calls([mock.call.link.set_mtu(mtu)]) self.ip.assert_has_calls(expected) root_dev.assert_has_calls([mock.call.link.set_up()]) ns_dev.assert_has_calls([mock.call.link.set_up()]) def test_plug_mtu(self): self.conf.set_override('network_device_mtu', 9000) self._test_plug(mtu=9000) def test_unplug(self, bridge=None): if not bridge: bridge = 'br-int' with mock.patch('neutron.agent.common.ovs_lib.OVSBridge') as ovs_br: ovs = interface.OVSInterfaceDriver(self.conf) ovs.unplug('ns-0', bridge=bridge) ovs_br.assert_has_calls([mock.call(bridge), mock.call().delete_port('tap0')]) self.ip_dev.assert_has_calls([mock.call('ns-0', namespace=None), mock.call().link.delete()]) class TestBridgeInterfaceDriver(TestBase): def test_get_device_name(self): br = interface.BridgeInterfaceDriver(self.conf) device_name = br.get_device_name(FakePort()) self.assertEqual('ns-abcdef01-12', device_name) def test_plug_no_ns(self): self._test_plug() def test_plug_with_ns(self): self._test_plug(namespace='01234567-1234-1234-99') def _test_plug(self, namespace=None, mtu=None): def device_exists(device, namespace=None): return device.startswith('brq') root_veth = mock.Mock() ns_veth = mock.Mock() self.ip().add_veth = mock.Mock(return_value=(root_veth, ns_veth)) self.device_exists.side_effect = device_exists br = interface.BridgeInterfaceDriver(self.conf) mac_address = 'aa:bb:cc:dd:ee:ff' br.plug('01234567-1234-1234-99', 'port-1234', 'ns-0', mac_address, namespace=namespace) ip_calls = [mock.call(), mock.call().add_veth('tap0', 'ns-0', namespace2=namespace)] ns_veth.assert_has_calls([mock.call.link.set_address(mac_address)]) if mtu: ns_veth.assert_has_calls([mock.call.link.set_mtu(mtu)]) root_veth.assert_has_calls([mock.call.link.set_mtu(mtu)]) self.ip.assert_has_calls(ip_calls) root_veth.assert_has_calls([mock.call.link.set_up()]) ns_veth.assert_has_calls([mock.call.link.set_up()]) def test_plug_dev_exists(self): self.device_exists.return_value = True with mock.patch('neutron.agent.linux.interface.LOG.info') as log: br = interface.BridgeInterfaceDriver(self.conf) br.plug('01234567-1234-1234-99', 'port-1234', 'tap0', 'aa:bb:cc:dd:ee:ff') self.assertFalse(self.ip_dev.called) self.assertEqual(log.call_count, 1) def test_plug_mtu(self): self.device_exists.return_value = False self.conf.set_override('network_device_mtu', 9000) self._test_plug(mtu=9000) def test_unplug_no_device(self): self.device_exists.return_value = False self.ip_dev().link.delete.side_effect = RuntimeError with mock.patch('neutron.agent.linux.interface.LOG') as log: br = interface.BridgeInterfaceDriver(self.conf) br.unplug('tap0') [mock.call(), mock.call('tap0'), mock.call().link.delete()] self.assertEqual(log.error.call_count, 1) def test_unplug(self): self.device_exists.return_value = True with mock.patch('neutron.agent.linux.interface.LOG.debug') as log: br = interface.BridgeInterfaceDriver(self.conf) br.unplug('tap0') self.assertEqual(log.call_count, 1) self.ip_dev.assert_has_calls([mock.call('tap0', namespace=None), mock.call().link.delete()]) class TestIVSInterfaceDriver(TestBase): def setUp(self): super(TestIVSInterfaceDriver, self).setUp() def test_get_device_name(self): br = interface.IVSInterfaceDriver(self.conf) device_name = br.get_device_name(FakePort()) self.assertEqual('ns-abcdef01-12', device_name) def test_plug_with_prefix(self): self._test_plug(devname='qr-0', prefix='qr-') def _test_plug(self, devname=None, namespace=None, prefix=None, mtu=None): if not devname: devname = 'ns-0' def device_exists(dev, namespace=None): return dev == 'indigo' ivs = interface.IVSInterfaceDriver(self.conf) self.device_exists.side_effect = device_exists root_dev = mock.Mock() _ns_dev = mock.Mock() ns_dev = mock.Mock() self.ip().add_veth = mock.Mock(return_value=(root_dev, _ns_dev)) self.ip().device = mock.Mock(return_value=(ns_dev)) expected = [mock.call(), mock.call().add_veth('tap0', devname), mock.call().device(devname)] ivsctl_cmd = ['ivs-ctl', 'add-port', 'tap0'] with mock.patch.object(utils, 'execute') as execute: ivs.plug('01234567-1234-1234-99', 'port-1234', devname, 'aa:bb:cc:dd:ee:ff', namespace=namespace, prefix=prefix) execute.assert_called_once_with(ivsctl_cmd, run_as_root=True) ns_dev.assert_has_calls( [mock.call.link.set_address('aa:bb:cc:dd:ee:ff')]) if mtu: ns_dev.assert_has_calls([mock.call.link.set_mtu(mtu)]) root_dev.assert_has_calls([mock.call.link.set_mtu(mtu)]) if namespace: expected.extend( [mock.call().ensure_namespace(namespace), mock.call().ensure_namespace().add_device_to_namespace( mock.ANY)]) self.ip.assert_has_calls(expected) root_dev.assert_has_calls([mock.call.link.set_up()]) ns_dev.assert_has_calls([mock.call.link.set_up()]) def test_plug_mtu(self): self.conf.set_override('network_device_mtu', 9000) self._test_plug(mtu=9000) def test_plug_namespace(self): self._test_plug(namespace='mynamespace') def test_unplug(self): ivs = interface.IVSInterfaceDriver(self.conf) ivsctl_cmd = ['ivs-ctl', 'del-port', 'tap0'] with mock.patch.object(utils, 'execute') as execute: ivs.unplug('ns-0') execute.assert_called_once_with(ivsctl_cmd, run_as_root=True) self.ip_dev.assert_has_calls([mock.call('ns-0', namespace=None), mock.call().link.delete()]) neutron-8.4.0/neutron/tests/unit/agent/linux/test_keepalived.py0000664000567000056710000002647213044372760026171 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools from neutron.agent.linux import keepalived from neutron.common import constants as n_consts from neutron.tests import base # Keepalived user guide: # http://www.keepalived.org/pdf/UserGuide.pdf class KeepalivedGetFreeRangeTestCase(base.BaseTestCase): def test_get_free_range(self): free_range = keepalived.get_free_range( parent_range='169.254.0.0/16', excluded_ranges=['169.254.0.0/24', '169.254.1.0/24', '169.254.2.0/24'], size=24) self.assertEqual('169.254.3.0/24', free_range) def test_get_free_range_without_excluded(self): free_range = keepalived.get_free_range( parent_range='169.254.0.0/16', excluded_ranges=[], size=20) self.assertEqual('169.254.0.0/20', free_range) def test_get_free_range_excluded_out_of_parent(self): free_range = keepalived.get_free_range( parent_range='169.254.0.0/16', excluded_ranges=['255.255.255.0/24'], size=24) self.assertEqual('169.254.0.0/24', free_range) def test_get_free_range_not_found(self): tiny_parent_range = '192.168.1.0/24' huge_size = 8 with testtools.ExpectedException(ValueError): keepalived.get_free_range( parent_range=tiny_parent_range, excluded_ranges=[], size=huge_size) class KeepalivedConfBaseMixin(object): def _get_config(self): config = keepalived.KeepalivedConf() instance1 = keepalived.KeepalivedInstance('MASTER', 'eth0', 1, ['169.254.192.0/18'], advert_int=5) instance1.set_authentication('AH', 'pass123') instance1.track_interfaces.append("eth0") vip_address1 = keepalived.KeepalivedVipAddress('192.168.1.0/24', 'eth1') vip_address2 = keepalived.KeepalivedVipAddress('192.168.2.0/24', 'eth2') vip_address3 = keepalived.KeepalivedVipAddress('192.168.3.0/24', 'eth2') vip_address_ex = keepalived.KeepalivedVipAddress('192.168.55.0/24', 'eth10') instance1.vips.append(vip_address1) instance1.vips.append(vip_address2) instance1.vips.append(vip_address3) instance1.vips.append(vip_address_ex) virtual_route = keepalived.KeepalivedVirtualRoute(n_consts.IPv4_ANY, "192.168.1.1", "eth1") instance1.virtual_routes.gateway_routes = [virtual_route] instance2 = keepalived.KeepalivedInstance('MASTER', 'eth4', 2, ['169.254.192.0/18'], mcast_src_ip='224.0.0.1') instance2.track_interfaces.append("eth4") vip_address1 = keepalived.KeepalivedVipAddress('192.168.3.0/24', 'eth6') instance2.vips.append(vip_address1) instance2.vips.append(vip_address2) instance2.vips.append(vip_address_ex) config.add_instance(instance1) config.add_instance(instance2) return config class KeepalivedConfTestCase(base.BaseTestCase, KeepalivedConfBaseMixin): expected = """vrrp_instance VR_1 { state MASTER interface eth0 virtual_router_id 1 priority 50 garp_master_delay 60 advert_int 5 authentication { auth_type AH auth_pass pass123 } track_interface { eth0 } virtual_ipaddress { 169.254.0.1/24 dev eth0 } virtual_ipaddress_excluded { 192.168.1.0/24 dev eth1 192.168.2.0/24 dev eth2 192.168.3.0/24 dev eth2 192.168.55.0/24 dev eth10 } virtual_routes { 0.0.0.0/0 via 192.168.1.1 dev eth1 } } vrrp_instance VR_2 { state MASTER interface eth4 virtual_router_id 2 priority 50 garp_master_delay 60 mcast_src_ip 224.0.0.1 track_interface { eth4 } virtual_ipaddress { 169.254.0.2/24 dev eth4 } virtual_ipaddress_excluded { 192.168.2.0/24 dev eth2 192.168.3.0/24 dev eth6 192.168.55.0/24 dev eth10 } }""" def test_config_generation(self): config = self._get_config() self.assertEqual(self.expected, config.get_config_str()) def test_config_with_reset(self): config = self._get_config() self.assertEqual(self.expected, config.get_config_str()) config.reset() self.assertEqual('', config.get_config_str()) def test_get_existing_vip_ip_addresses_returns_list(self): config = self._get_config() instance = config.get_instance(1) current_vips = sorted(instance.get_existing_vip_ip_addresses('eth2')) self.assertEqual(['192.168.2.0/24', '192.168.3.0/24'], current_vips) class KeepalivedStateExceptionTestCase(base.BaseTestCase): def test_state_exception(self): invalid_vrrp_state = 'a seal walks' self.assertRaises(keepalived.InvalidInstanceStateException, keepalived.KeepalivedInstance, invalid_vrrp_state, 'eth0', 33, ['169.254.192.0/18']) invalid_auth_type = 'into a club' instance = keepalived.KeepalivedInstance('MASTER', 'eth0', 1, ['169.254.192.0/18']) self.assertRaises(keepalived.InvalidAuthenticationTypeException, instance.set_authentication, invalid_auth_type, 'some_password') class KeepalivedInstanceRoutesTestCase(base.BaseTestCase): @classmethod def _get_instance_routes(cls): routes = keepalived.KeepalivedInstanceRoutes() default_gw_eth0 = keepalived.KeepalivedVirtualRoute( '0.0.0.0/0', '1.0.0.254', 'eth0') default_gw_eth1 = keepalived.KeepalivedVirtualRoute( '::/0', 'fe80::3e97:eff:fe26:3bfa/64', 'eth1') routes.gateway_routes = [default_gw_eth0, default_gw_eth1] extra_routes = [ keepalived.KeepalivedVirtualRoute('10.0.0.0/8', '1.0.0.1'), keepalived.KeepalivedVirtualRoute('20.0.0.0/8', '2.0.0.2')] routes.extra_routes = extra_routes extra_subnets = [ keepalived.KeepalivedVirtualRoute( '30.0.0.0/8', None, 'eth0', scope='link')] routes.extra_subnets = extra_subnets return routes def test_routes(self): routes = self._get_instance_routes() self.assertEqual(len(routes.routes), 5) def test_remove_routes_on_interface(self): routes = self._get_instance_routes() routes.remove_routes_on_interface('eth0') self.assertEqual(len(routes.routes), 3) routes.remove_routes_on_interface('eth1') self.assertEqual(len(routes.routes), 2) def test_build_config(self): expected = """ virtual_routes { 0.0.0.0/0 via 1.0.0.254 dev eth0 ::/0 via fe80::3e97:eff:fe26:3bfa/64 dev eth1 10.0.0.0/8 via 1.0.0.1 20.0.0.0/8 via 2.0.0.2 30.0.0.0/8 dev eth0 scope link }""" routes = self._get_instance_routes() self.assertEqual(expected, '\n'.join(routes.build_config())) class KeepalivedInstanceTestCase(base.BaseTestCase, KeepalivedConfBaseMixin): def test_get_primary_vip(self): instance = keepalived.KeepalivedInstance('MASTER', 'ha0', 42, ['169.254.192.0/18']) self.assertEqual('169.254.0.42/24', instance.get_primary_vip()) def test_remove_addresses_by_interface(self): config = self._get_config() instance = config.get_instance(1) instance.remove_vips_vroutes_by_interface('eth2') instance.remove_vips_vroutes_by_interface('eth10') expected = """vrrp_instance VR_1 { state MASTER interface eth0 virtual_router_id 1 priority 50 garp_master_delay 60 advert_int 5 authentication { auth_type AH auth_pass pass123 } track_interface { eth0 } virtual_ipaddress { 169.254.0.1/24 dev eth0 } virtual_ipaddress_excluded { 192.168.1.0/24 dev eth1 } virtual_routes { 0.0.0.0/0 via 192.168.1.1 dev eth1 } } vrrp_instance VR_2 { state MASTER interface eth4 virtual_router_id 2 priority 50 garp_master_delay 60 mcast_src_ip 224.0.0.1 track_interface { eth4 } virtual_ipaddress { 169.254.0.2/24 dev eth4 } virtual_ipaddress_excluded { 192.168.2.0/24 dev eth2 192.168.3.0/24 dev eth6 192.168.55.0/24 dev eth10 } }""" self.assertEqual(expected, config.get_config_str()) def test_build_config_no_vips(self): expected = """vrrp_instance VR_1 { state MASTER interface eth0 virtual_router_id 1 priority 50 garp_master_delay 60 virtual_ipaddress { 169.254.0.1/24 dev eth0 } }""" instance = keepalived.KeepalivedInstance( 'MASTER', 'eth0', 1, ['169.254.192.0/18']) self.assertEqual(expected, '\n'.join(instance.build_config())) class KeepalivedVipAddressTestCase(base.BaseTestCase): def test_vip_with_scope(self): vip = keepalived.KeepalivedVipAddress('fe80::3e97:eff:fe26:3bfa/64', 'eth1', 'link') self.assertEqual('fe80::3e97:eff:fe26:3bfa/64 dev eth1 scope link', vip.build_config()) def test_add_vip_idempotent(self): instance = keepalived.KeepalivedInstance('MASTER', 'eth0', 1, ['169.254.192.0/18']) instance.add_vip('192.168.222.1/32', 'eth11', None) instance.add_vip('192.168.222.1/32', 'eth12', 'link') self.assertEqual(1, len(instance.vips)) class KeepalivedVirtualRouteTestCase(base.BaseTestCase): def test_virtual_route_with_dev(self): route = keepalived.KeepalivedVirtualRoute(n_consts.IPv4_ANY, '1.2.3.4', 'eth0') self.assertEqual('0.0.0.0/0 via 1.2.3.4 dev eth0', route.build_config()) def test_virtual_route_without_dev(self): route = keepalived.KeepalivedVirtualRoute('50.0.0.0/8', '1.2.3.4') self.assertEqual('50.0.0.0/8 via 1.2.3.4', route.build_config()) neutron-8.4.0/neutron/tests/unit/agent/linux/openvswitch_firewall/0000775000567000056710000000000013044373210026661 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/linux/openvswitch_firewall/test_rules.py0000664000567000056710000002764613044372760031454 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.agent import firewall from neutron.agent.linux.openvswitch_firewall import firewall as ovsfw from neutron.agent.linux.openvswitch_firewall import rules from neutron.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \ as ovs_consts from neutron.tests import base TESTING_VLAN_TAG = 1 class TestIsValidPrefix(base.BaseTestCase): def test_valid_prefix_ipv4(self): is_valid = rules.is_valid_prefix('10.0.0.0/0') self.assertTrue(is_valid) def test_invalid_prefix_ipv4(self): is_valid = rules.is_valid_prefix('0.0.0.0/0') self.assertFalse(is_valid) def test_valid_prefix_ipv6(self): is_valid = rules.is_valid_prefix('ffff::0/0') self.assertTrue(is_valid) def test_invalid_prefix_ipv6(self): is_valid = rules.is_valid_prefix('0000:0::0/0') self.assertFalse(is_valid) is_valid = rules.is_valid_prefix('::0/0') self.assertFalse(is_valid) is_valid = rules.is_valid_prefix('::/0') self.assertFalse(is_valid) class TestCreateFlowsFromRuleAndPort(base.BaseTestCase): def setUp(self): super(TestCreateFlowsFromRuleAndPort, self).setUp() ovs_port = mock.Mock(vif_mac='00:00:00:00:00:00') ovs_port.ofport = 1 port_dict = {'device': 'port_id'} self.port = ovsfw.OFPort( port_dict, ovs_port, vlan_tag=TESTING_VLAN_TAG) self.create_flows_mock = mock.patch.object( rules, 'create_protocol_flows').start() @property def passed_flow_template(self): return self.create_flows_mock.call_args[0][1] def _test_create_flows_from_rule_and_port_helper( self, rule, expected_template): rules.create_flows_from_rule_and_port(rule, self.port) self.assertEqual(expected_template, self.passed_flow_template) def test_create_flows_from_rule_and_port_no_ip_ipv4(self): rule = { 'ethertype': constants.IPv4, 'direction': firewall.INGRESS_DIRECTION, } expected_template = { 'priority': 70, 'dl_type': constants.ETHERTYPE_IP, 'reg_port': self.port.ofport, } self._test_create_flows_from_rule_and_port_helper(rule, expected_template) def test_create_flows_from_rule_and_port_src_and_dst_ipv4(self): rule = { 'ethertype': constants.IPv4, 'direction': firewall.INGRESS_DIRECTION, 'source_ip_prefix': '192.168.0.0/24', 'dest_ip_prefix': '10.0.0.1/32', } expected_template = { 'priority': 70, 'dl_type': constants.ETHERTYPE_IP, 'reg_port': self.port.ofport, 'nw_src': '192.168.0.0/24', 'nw_dst': '10.0.0.1/32', } self._test_create_flows_from_rule_and_port_helper(rule, expected_template) def test_create_flows_from_rule_and_port_src_and_dst_with_zero_ipv4(self): rule = { 'ethertype': constants.IPv4, 'direction': firewall.INGRESS_DIRECTION, 'source_ip_prefix': '192.168.0.0/24', 'dest_ip_prefix': '0.0.0.0/0', } expected_template = { 'priority': 70, 'dl_type': constants.ETHERTYPE_IP, 'reg_port': self.port.ofport, 'nw_src': '192.168.0.0/24', } self._test_create_flows_from_rule_and_port_helper(rule, expected_template) def test_create_flows_from_rule_and_port_no_ip_ipv6(self): rule = { 'ethertype': constants.IPv6, 'direction': firewall.INGRESS_DIRECTION, } expected_template = { 'priority': 70, 'dl_type': constants.ETHERTYPE_IPV6, 'reg_port': self.port.ofport, } self._test_create_flows_from_rule_and_port_helper(rule, expected_template) def test_create_flows_from_rule_and_port_src_and_dst_ipv6(self): rule = { 'ethertype': constants.IPv6, 'direction': firewall.INGRESS_DIRECTION, 'source_ip_prefix': '2001:db8:bbbb::1/64', 'dest_ip_prefix': '2001:db8:aaaa::1/64', } expected_template = { 'priority': 70, 'dl_type': constants.ETHERTYPE_IPV6, 'reg_port': self.port.ofport, 'ipv6_src': '2001:db8:bbbb::1/64', 'ipv6_dst': '2001:db8:aaaa::1/64', } self._test_create_flows_from_rule_and_port_helper(rule, expected_template) def test_create_flows_from_rule_and_port_src_and_dst_with_zero_ipv6(self): rule = { 'ethertype': constants.IPv6, 'direction': firewall.INGRESS_DIRECTION, 'source_ip_prefix': '2001:db8:bbbb::1/64', 'dest_ip_prefix': '::/0', } expected_template = { 'priority': 70, 'dl_type': constants.ETHERTYPE_IPV6, 'reg_port': self.port.ofport, 'ipv6_src': '2001:db8:bbbb::1/64', } self._test_create_flows_from_rule_and_port_helper(rule, expected_template) class TestCreateProtocolFlows(base.BaseTestCase): def setUp(self): super(TestCreateProtocolFlows, self).setUp() ovs_port = mock.Mock(vif_mac='00:00:00:00:00:00') ovs_port.ofport = 1 port_dict = {'device': 'port_id'} self.port = ovsfw.OFPort( port_dict, ovs_port, vlan_tag=TESTING_VLAN_TAG) def _test_create_protocol_flows_helper(self, direction, rule, expected_flows): flow_template = {'some_settings': 'foo'} for flow in expected_flows: flow.update(flow_template) flows = rules.create_protocol_flows( direction, flow_template, self.port, rule) self.assertEqual(expected_flows, flows) def test_create_protocol_flows_ingress(self): rule = {'protocol': constants.PROTO_NAME_TCP} expected_flows = [{ 'table': ovs_consts.RULES_INGRESS_TABLE, 'dl_dst': self.port.mac, 'actions': 'strip_vlan,output:1', 'nw_proto': constants.PROTO_NUM_TCP, }] self._test_create_protocol_flows_helper( firewall.INGRESS_DIRECTION, rule, expected_flows) def test_create_protocol_flows_egress(self): rule = {'protocol': constants.PROTO_NAME_TCP} expected_flows = [{ 'table': ovs_consts.RULES_EGRESS_TABLE, 'dl_src': self.port.mac, 'actions': 'resubmit(,{:d})'.format( ovs_consts.ACCEPT_OR_INGRESS_TABLE), 'nw_proto': constants.PROTO_NUM_TCP, }] self._test_create_protocol_flows_helper( firewall.EGRESS_DIRECTION, rule, expected_flows) def test_create_protocol_flows_no_protocol(self): rule = {} expected_flows = [{ 'table': ovs_consts.RULES_EGRESS_TABLE, 'dl_src': self.port.mac, 'actions': 'resubmit(,{:d})'.format( ovs_consts.ACCEPT_OR_INGRESS_TABLE), }] self._test_create_protocol_flows_helper( firewall.EGRESS_DIRECTION, rule, expected_flows) def test_create_protocol_flows_icmp6(self): rule = {'ethertype': constants.IPv6, 'protocol': constants.PROTO_NAME_ICMP} expected_flows = [{ 'table': ovs_consts.RULES_EGRESS_TABLE, 'dl_src': self.port.mac, 'actions': 'resubmit(,{:d})'.format( ovs_consts.ACCEPT_OR_INGRESS_TABLE), 'nw_proto': constants.PROTO_NUM_IPV6_ICMP, }] self._test_create_protocol_flows_helper( firewall.EGRESS_DIRECTION, rule, expected_flows) def test_create_protocol_flows_port_range(self): rule = {'ethertype': constants.IPv4, 'protocol': constants.PROTO_NAME_TCP, 'port_range_min': 22, 'port_range_max': 23} expected_flows = [{ 'table': ovs_consts.RULES_EGRESS_TABLE, 'dl_src': self.port.mac, 'actions': 'resubmit(,{:d})'.format( ovs_consts.ACCEPT_OR_INGRESS_TABLE), 'nw_proto': constants.PROTO_NUM_TCP, 'tcp_dst': '0x0016/0xfffe' }] self._test_create_protocol_flows_helper( firewall.EGRESS_DIRECTION, rule, expected_flows) class TestCreatePortRangeFlows(base.BaseTestCase): def _test_create_port_range_flows_helper(self, expected_flows, rule): flow_template = {'some_settings': 'foo'} for flow in expected_flows: flow.update(flow_template) port_range_flows = rules.create_port_range_flows(flow_template, rule) self.assertEqual(expected_flows, port_range_flows) def test_create_port_range_flows_with_source_and_destination(self): rule = { 'protocol': constants.PROTO_NAME_TCP, 'source_port_range_min': 123, 'source_port_range_max': 124, 'port_range_min': 10, 'port_range_max': 11, } expected_flows = [ {'tcp_src': '0x007b', 'tcp_dst': '0x000a/0xfffe'}, {'tcp_src': '0x007c', 'tcp_dst': '0x000a/0xfffe'}, ] self._test_create_port_range_flows_helper(expected_flows, rule) def test_create_port_range_flows_with_source(self): rule = { 'protocol': constants.PROTO_NAME_TCP, 'source_port_range_min': 123, 'source_port_range_max': 124, } expected_flows = [ {'tcp_src': '0x007b'}, {'tcp_src': '0x007c'}, ] self._test_create_port_range_flows_helper(expected_flows, rule) def test_create_port_range_flows_with_destination(self): rule = { 'protocol': constants.PROTO_NAME_TCP, 'port_range_min': 10, 'port_range_max': 11, } expected_flows = [ {'tcp_dst': '0x000a/0xfffe'}, ] self._test_create_port_range_flows_helper(expected_flows, rule) def test_create_port_range_flows_without_port_range(self): rule = { 'protocol': constants.PROTO_NAME_TCP, } expected_flows = [] self._test_create_port_range_flows_helper(expected_flows, rule) def test_create_port_range_with_icmp_protocol(self): rule = { 'protocol': 'icmp', 'port_range_min': 10, 'port_range_max': 11, } expected_flows = [] self._test_create_port_range_flows_helper(expected_flows, rule) class TestCreateRuleForIpAddress(base.BaseTestCase): def test_create_rule_for_ip_address(self): sg_rule = { 'remote_group_id': 'remote_id', 'direction': firewall.INGRESS_DIRECTION, 'some_settings': 'foo', } expected_rule = { 'direction': firewall.INGRESS_DIRECTION, 'source_ip_prefix': '192.168.0.1/32', 'some_settings': 'foo', } translated_rule = rules.create_rule_for_ip_address( '192.168.0.1', sg_rule) self.assertEqual(expected_rule, translated_rule) neutron-8.4.0/neutron/tests/unit/agent/linux/openvswitch_firewall/__init__.py0000664000567000056710000000000013044372736030774 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/linux/openvswitch_firewall/test_firewall.py0000664000567000056710000004623113044372760032116 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testtools from neutron.agent.common import ovs_lib from neutron.agent import firewall from neutron.agent.linux.openvswitch_firewall import constants as ovsfw_consts from neutron.agent.linux.openvswitch_firewall import firewall as ovsfw from neutron.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \ as ovs_consts from neutron.tests import base TESTING_VLAN_TAG = 1 def create_ofport(port_dict): ovs_port = mock.Mock(vif_mac='00:00:00:00:00:00', port_name="port-name") return ovsfw.OFPort(port_dict, ovs_port, vlan_tag=TESTING_VLAN_TAG) class TestCreateRegNumbers(base.BaseTestCase): def test_no_registers_defined(self): flow = {'foo': 'bar'} ovsfw.create_reg_numbers(flow) self.assertEqual({'foo': 'bar'}, flow) def test_both_registers_defined(self): flow = {'foo': 'bar', 'reg_port': 1, 'reg_net': 2} expected_flow = {'foo': 'bar', 'reg{:d}'.format(ovsfw_consts.REG_PORT): 1, 'reg{:d}'.format(ovsfw_consts.REG_NET): 2} ovsfw.create_reg_numbers(flow) self.assertEqual(expected_flow, flow) class TestSecurityGroup(base.BaseTestCase): def setUp(self): super(TestSecurityGroup, self).setUp() self.sg = ovsfw.SecurityGroup('123') self.sg.members = {'type': [1, 2, 3, 4]} def test_update_rules(self): rules = [ {'foo': 'bar', 'rule': 'all'}, {'bar': 'foo'}, {'remote_group_id': '123456', 'foo': 'bar'}] expected_raw_rules = [{'foo': 'bar', 'rule': 'all'}, {'bar': 'foo'}] expected_remote_rules = [{'remote_group_id': '123456', 'foo': 'bar'}] self.sg.update_rules(rules) self.assertEqual(expected_raw_rules, self.sg.raw_rules) self.assertEqual(expected_remote_rules, self.sg.remote_rules) def get_ethertype_filtered_addresses(self): addresses = self.sg.get_ethertype_filtered_addresses('type') expected_addresses = [1, 2, 3, 4] self.assertEqual(expected_addresses, addresses) def get_ethertype_filtered_addresses_with_excluded_addresses(self): addresses = self.sg.get_ethertype_filtered_addresses('type', [2, 3]) expected_addresses = [1, 4] self.assertEqual(expected_addresses, addresses) class TestOFPort(base.BaseTestCase): def setUp(self): super(TestOFPort, self).setUp() self.ipv4_addresses = ['10.0.0.1', '192.168.0.1'] self.ipv6_addresses = ['fe80::f816:3eff:fe2e:1'] port_dict = {'device': 1, 'fixed_ips': self.ipv4_addresses + self.ipv6_addresses} self.port = create_ofport(port_dict) def test_ipv4_address(self): ipv4_addresses = self.port.ipv4_addresses self.assertEqual(self.ipv4_addresses, ipv4_addresses) def test_ipv6_address(self): ipv6_addresses = self.port.ipv6_addresses self.assertEqual(self.ipv6_addresses, ipv6_addresses) def test__get_allowed_pairs(self): port = { 'allowed_address_pairs': [ {'mac_address': 'foo', 'ip_address': '10.0.0.1'}, {'mac_address': 'bar', 'ip_address': '192.168.0.1'}, {'mac_address': 'baz', 'ip_address': '2003::f'}, ]} allowed_pairs_v4 = ovsfw.OFPort._get_allowed_pairs(port, version=4) allowed_pairs_v6 = ovsfw.OFPort._get_allowed_pairs(port, version=6) expected_aap_v4 = {('foo', '10.0.0.1'), ('bar', '192.168.0.1')} expected_aap_v6 = {('baz', '2003::f')} self.assertEqual(expected_aap_v4, allowed_pairs_v4) self.assertEqual(expected_aap_v6, allowed_pairs_v6) def test__get_allowed_pairs_empty(self): port = {} allowed_pairs = ovsfw.OFPort._get_allowed_pairs(port, version=4) self.assertFalse(allowed_pairs) def test_update(self): old_port_dict = self.port.neutron_port_dict new_port_dict = old_port_dict.copy() added_ips = [1, 2, 3] new_port_dict.update({ 'fixed_ips': added_ips, 'allowed_address_pairs': [ {'mac_address': '00:00:00:00:00:01', 'ip_address': '192.168.0.1'}, {'mac_address': '00:00:00:00:00:01', 'ip_address': '2003::f'}], }) self.port.update(new_port_dict) self.assertEqual(new_port_dict, self.port.neutron_port_dict) self.assertIsNot(new_port_dict, self.port.neutron_port_dict) self.assertEqual(added_ips, self.port.fixed_ips) self.assertEqual({('00:00:00:00:00:01', '192.168.0.1')}, self.port.allowed_pairs_v4) self.assertIn(('00:00:00:00:00:01', '2003::f'), self.port.allowed_pairs_v6) class TestSGPortMap(base.BaseTestCase): def setUp(self): super(TestSGPortMap, self).setUp() self.map = ovsfw.SGPortMap() def test_get_or_create_sg_existing_sg(self): self.map.sec_groups['id'] = mock.sentinel sg = self.map.get_or_create_sg('id') self.assertIs(mock.sentinel, sg) def test_get_or_create_sg_nonexisting_sg(self): with mock.patch.object(ovsfw, 'SecurityGroup') as sg_mock: sg = self.map.get_or_create_sg('id') self.assertEqual(sg_mock.return_value, sg) def _check_port(self, port_id, expected_sg_ids): port = self.map.ports[port_id] expected_sgs = [self.map.sec_groups[sg_id] for sg_id in expected_sg_ids] self.assertEqual(port.sec_groups, expected_sgs) def _check_sg(self, sg_id, expected_port_ids): sg = self.map.sec_groups[sg_id] expected_ports = {self.map.ports[port_id] for port_id in expected_port_ids} self.assertEqual(sg.ports, expected_ports) def _create_ports_and_sgroups(self): sg_1 = ovsfw.SecurityGroup(1) sg_2 = ovsfw.SecurityGroup(2) sg_3 = ovsfw.SecurityGroup(3) port_a = create_ofport({'device': 'a'}) port_b = create_ofport({'device': 'b'}) self.map.ports = {'a': port_a, 'b': port_b} self.map.sec_groups = {1: sg_1, 2: sg_2, 3: sg_3} port_a.sec_groups = [sg_1, sg_2] port_b.sec_groups = [sg_2, sg_3] sg_1.ports = {port_a} sg_2.ports = {port_a, port_b} sg_3.ports = {port_b} def test_create_port(self): port = create_ofport({'device': 'a'}) sec_groups = ['1', '2'] port_dict = {'security_groups': sec_groups} self.map.create_port(port, port_dict) self._check_port('a', sec_groups) self._check_sg('1', ['a']) self._check_sg('2', ['a']) def test_update_port_sg_added(self): self._create_ports_and_sgroups() port_dict = {'security_groups': [1, 2, 3]} self.map.update_port(self.map.ports['b'], port_dict) self._check_port('a', [1, 2]) self._check_port('b', [1, 2, 3]) self._check_sg(1, ['a', 'b']) self._check_sg(2, ['a', 'b']) self._check_sg(3, ['b']) def test_update_port_sg_removed(self): self._create_ports_and_sgroups() port_dict = {'security_groups': [1]} self.map.update_port(self.map.ports['b'], port_dict) self._check_port('a', [1, 2]) self._check_port('b', [1]) self._check_sg(1, ['a', 'b']) self._check_sg(2, ['a']) self._check_sg(3, []) def test_remove_port(self): self._create_ports_and_sgroups() self.map.remove_port(self.map.ports['a']) self._check_port('b', [2, 3]) self._check_sg(1, []) self._check_sg(2, ['b']) self._check_sg(3, ['b']) self.assertNotIn('a', self.map.ports) def test_update_rules(self): """Just make sure it doesn't crash""" self.map.update_rules(1, []) def test_update_members(self): """Just make sure we doesn't crash""" self.map.update_members(1, []) class FakeOVSPort(object): def __init__(self, name, port, mac): self.port_name = name self.ofport = port self.vif_mac = mac class TestOVSFirewallDriver(base.BaseTestCase): def setUp(self): super(TestOVSFirewallDriver, self).setUp() mock_bridge = mock.patch.object( ovs_lib, 'OVSBridge', autospec=True).start() self.firewall = ovsfw.OVSFirewallDriver(mock_bridge) self.mock_bridge = self.firewall.int_br self.mock_bridge.reset_mock() self.fake_ovs_port = FakeOVSPort('port', 1, '00:00:00:00:00:00') self.mock_bridge.br.get_vif_port_by_id.return_value = \ self.fake_ovs_port def _prepare_security_group(self): security_group_rules = [ {'ethertype': constants.IPv4, 'protocol': constants.PROTO_NAME_TCP, 'direction': firewall.INGRESS_DIRECTION, 'port_range_min': 123, 'port_range_max': 123}] self.firewall.update_security_group_rules(1, security_group_rules) security_group_rules = [ {'ethertype': constants.IPv4, 'protocol': constants.PROTO_NAME_UDP, 'direction': firewall.EGRESS_DIRECTION}] self.firewall.update_security_group_rules(2, security_group_rules) @property def port_ofport(self): return self.mock_bridge.br.get_vif_port_by_id.return_value.ofport @property def port_mac(self): return self.mock_bridge.br.get_vif_port_by_id.return_value.vif_mac def test_initialize_bridge(self): br = self.firewall.initialize_bridge(self.mock_bridge) self.assertEqual(br, self.mock_bridge.deferred.return_value) def test__add_flow_dl_type_formatted_to_string(self): dl_type = 0x0800 self.firewall._add_flow(dl_type=dl_type) def test__add_flow_registers_are_replaced(self): self.firewall._add_flow(in_port=1, reg_port=1, reg_net=2) expected_calls = {'in_port': 1, 'reg{:d}'.format(ovsfw_consts.REG_PORT): 1, 'reg{:d}'.format(ovsfw_consts.REG_NET): 2} self.mock_bridge.br.add_flow.assert_called_once_with( **expected_calls) def test__drop_all_unmatched_flows(self): self.firewall._drop_all_unmatched_flows() expected_calls = [ mock.call(actions='drop', priority=0, table=ovs_consts.BASE_EGRESS_TABLE), mock.call(actions='drop', priority=0, table=ovs_consts.RULES_EGRESS_TABLE), mock.call(actions='drop', priority=0, table=ovs_consts.ACCEPT_OR_INGRESS_TABLE), mock.call(actions='drop', priority=0, table=ovs_consts.BASE_INGRESS_TABLE), mock.call(actions='drop', priority=0, table=ovs_consts.RULES_INGRESS_TABLE)] actual_calls = self.firewall.int_br.br.add_flow.call_args_list self.assertEqual(expected_calls, actual_calls) def test_get_or_create_ofport_non_existing(self): port_dict = { 'device': 'port-id', 'security_groups': [123, 456]} port = self.firewall.get_or_create_ofport(port_dict) sg1, sg2 = sorted( self.firewall.sg_port_map.sec_groups.values(), key=lambda x: x.id) self.assertIn(port, self.firewall.sg_port_map.ports.values()) self.assertEqual( sorted(port.sec_groups, key=lambda x: x.id), [sg1, sg2]) self.assertIn(port, sg1.ports) self.assertIn(port, sg2.ports) def test_get_or_create_ofport_existing(self): port_dict = { 'device': 'port-id', 'security_groups': [123, 456]} of_port = create_ofport(port_dict) self.firewall.sg_port_map.ports[of_port.id] = of_port port = self.firewall.get_or_create_ofport(port_dict) sg1, sg2 = sorted( self.firewall.sg_port_map.sec_groups.values(), key=lambda x: x.id) self.assertIs(of_port, port) self.assertIn(port, self.firewall.sg_port_map.ports.values()) self.assertEqual( sorted(port.sec_groups, key=lambda x: x.id), [sg1, sg2]) self.assertIn(port, sg1.ports) self.assertIn(port, sg2.ports) def test_get_or_create_ofport_missing(self): port_dict = { 'device': 'port-id', 'security_groups': [123, 456]} self.mock_bridge.br.get_vif_port_by_id.return_value = None with testtools.ExpectedException(ovsfw.OVSFWPortNotFound): self.firewall.get_or_create_ofport(port_dict) def test_get_or_create_ofport_not_tagged(self): port_dict = { 'device': 'port-id', 'security_groups': [123, 456]} self.mock_bridge.br.db_get_val.return_value = None port = self.firewall.get_or_create_ofport(port_dict) self.assertEqual(ovs_consts.DEAD_VLAN_TAG, port.vlan_tag) def test_is_port_managed_managed_port(self): port_dict = {'device': 'port-id'} self.firewall.sg_port_map.ports[port_dict['device']] = object() is_managed = self.firewall.is_port_managed(port_dict) self.assertTrue(is_managed) def test_is_port_managed_not_managed_port(self): port_dict = {'device': 'port-id'} is_managed = self.firewall.is_port_managed(port_dict) self.assertFalse(is_managed) def test_prepare_port_filter(self): port_dict = {'device': 'port-id', 'security_groups': [1]} self._prepare_security_group() self.firewall.prepare_port_filter(port_dict) exp_ingress_classifier = mock.call( actions='set_field:{:d}->reg5,set_field:{:d}->reg6,' 'resubmit(,{:d})'.format( self.port_ofport, TESTING_VLAN_TAG, ovs_consts.BASE_EGRESS_TABLE), in_port=self.port_ofport, priority=100, table=ovs_consts.LOCAL_SWITCHING) exp_egress_classifier = mock.call( actions='set_field:{:d}->reg5,set_field:{:d}->reg6,' 'resubmit(,{:d})'.format( self.port_ofport, TESTING_VLAN_TAG, ovs_consts.BASE_INGRESS_TABLE), dl_dst=self.port_mac, priority=90, table=ovs_consts.LOCAL_SWITCHING) filter_rule = mock.call( actions='ct(commit,zone=NXM_NX_REG6[0..15]),' 'strip_vlan,output:{:d}'.format(self.port_ofport), dl_dst=self.port_mac, dl_type="0x{:04x}".format(constants.ETHERTYPE_IP), nw_proto=constants.PROTO_NUM_TCP, priority=70, reg5=self.port_ofport, ct_state=ovsfw_consts.OF_STATE_NEW_NOT_ESTABLISHED, table=ovs_consts.RULES_INGRESS_TABLE, tcp_dst='0x007b') calls = self.mock_bridge.br.add_flow.call_args_list for call in exp_ingress_classifier, exp_egress_classifier, filter_rule: self.assertIn(call, calls) def test_prepare_port_filter_port_security_disabled(self): port_dict = {'device': 'port-id', 'security_groups': [1], 'port_security_enabled': False} self._prepare_security_group() self.firewall.prepare_port_filter(port_dict) self.assertFalse(self.mock_bridge.br.add_flow.called) def test_prepare_port_filter_initialized_port(self): port_dict = {'device': 'port-id', 'security_groups': [1]} self._prepare_security_group() self.firewall.prepare_port_filter(port_dict) self.assertFalse(self.mock_bridge.br.delete_flows.called) self.firewall.prepare_port_filter(port_dict) self.assertTrue(self.mock_bridge.br.delete_flows.called) def test_update_port_filter(self): port_dict = {'device': 'port-id', 'security_groups': [1]} self._prepare_security_group() self.firewall.prepare_port_filter(port_dict) port_dict['security_groups'] = [2] self.mock_bridge.reset_mock() self.firewall.update_port_filter(port_dict) self.assertTrue(self.mock_bridge.br.delete_flows.called) add_calls = self.mock_bridge.br.add_flow.call_args_list filter_rule = mock.call( actions='resubmit(,{:d})'.format( ovs_consts.ACCEPT_OR_INGRESS_TABLE), dl_src=self.port_mac, dl_type="0x{:04x}".format(constants.ETHERTYPE_IP), nw_proto=constants.PROTO_NUM_UDP, priority=70, ct_state=ovsfw_consts.OF_STATE_NEW_NOT_ESTABLISHED, reg5=self.port_ofport, table=ovs_consts.RULES_EGRESS_TABLE) self.assertIn(filter_rule, add_calls) def test_update_port_filter_create_new_port_if_not_present(self): port_dict = {'device': 'port-id', 'security_groups': [1]} self._prepare_security_group() with mock.patch.object( self.firewall, 'prepare_port_filter') as prepare_mock: self.firewall.update_port_filter(port_dict) self.assertTrue(prepare_mock.called) def test_update_port_filter_port_security_disabled(self): port_dict = {'device': 'port-id', 'security_groups': [1]} self._prepare_security_group() self.firewall.prepare_port_filter(port_dict) port_dict['port_security_enabled'] = False self.firewall.update_port_filter(port_dict) self.assertTrue(self.mock_bridge.br.delete_flows.called) def test_remove_port_filter(self): port_dict = {'device': 'port-id', 'security_groups': [1]} self._prepare_security_group() self.firewall.prepare_port_filter(port_dict) self.firewall.remove_port_filter(port_dict) self.assertTrue(self.mock_bridge.br.delete_flows.called) def test_remove_port_filter_port_security_disabled(self): port_dict = {'device': 'port-id', 'security_groups': [1]} self.firewall.remove_port_filter(port_dict) self.assertFalse(self.mock_bridge.br.delete_flows.called) def test_update_security_group_rules(self): """Just make sure it doesn't crash""" new_rules = [ {'ethertype': constants.IPv4, 'direction': firewall.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP}, {'ethertype': constants.IPv4, 'direction': firewall.EGRESS_DIRECTION, 'remote_group_id': 2}] self.firewall.update_security_group_rules(1, new_rules) def test_update_security_group_members(self): """Just make sure it doesn't crash""" new_members = {constants.IPv4: [1, 2, 3, 4]} self.firewall.update_security_group_members(2, new_members) neutron-8.4.0/neutron/tests/unit/agent/linux/test_ip_link_support.py0000664000567000056710000001630413044372736027275 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.agent.linux import ip_link_support as ip_link from neutron.tests import base class TestIpLinkSupport(base.BaseTestCase): IP_LINK_HELP = """Usage: ip link add [link DEV] [ name ] NAME [ txqueuelen PACKETS ] [ address LLADDR ] [ broadcast LLADDR ] [ mtu MTU ] [index IDX ] [ numtxqueues QUEUE_COUNT ] [ numrxqueues QUEUE_COUNT ] type TYPE [ ARGS ] ip link delete DEV type TYPE [ ARGS ] ip link set { dev DEVICE | group DEVGROUP } [ { up | down } ] [ arp { on | off } ] [ dynamic { on | off } ] [ multicast { on | off } ] [ allmulticast { on | off } ] [ promisc { on | off } ] [ trailers { on | off } ] [ txqueuelen PACKETS ] [ name NEWNAME ] [ address LLADDR ] [ broadcast LLADDR ] [ mtu MTU ] [ netns PID ] [ netns NAME ] [ alias NAME ] [ vf NUM [ mac LLADDR ] [ vlan VLANID [ qos VLAN-QOS ] ] [ rate TXRATE ] ] [ spoofchk { on | off} ] ] [ state { auto | enable | disable} ] ] [ master DEVICE ] [ nomaster ] ip link show [ DEVICE | group GROUP ] [up] TYPE := { vlan | veth | vcan | dummy | ifb | macvlan | macvtap | can | bridge | bond | ipoib | ip6tnl | ipip | sit | vxlan | gre | gretap | ip6gre | ip6gretap | vti } """ IP_LINK_HELP_NO_STATE = """Usage: ip link add link DEV [ name ] NAME [ txqueuelen PACKETS ] [ address LLADDR ] [ broadcast LLADDR ] [ mtu MTU ] type TYPE [ ARGS ] ip link delete DEV type TYPE [ ARGS ] ip link set DEVICE [ { up | down } ] [ arp { on | off } ] [ dynamic { on | off } ] [ multicast { on | off } ] [ allmulticast { on | off } ] [ promisc { on | off } ] [ trailers { on | off } ] [ txqueuelen PACKETS ] [ name NEWNAME ] [ address LLADDR ] [ broadcast LLADDR ] [ mtu MTU ] [ netns PID ] [ alias NAME ] [ vf NUM [ mac LLADDR ] [ vlan VLANID [ qos VLAN-QOS ] ] [ rate TXRATE ] ] ip link show [ DEVICE ] TYPE := { vlan | veth | vcan | dummy | ifb | macvlan | can } """ IP_LINK_HELP_NO_SPOOFCHK = IP_LINK_HELP_NO_STATE IP_LINK_HELP_NO_VF = """Usage: ip link set DEVICE { up | down | arp { on | off } | dynamic { on | off } | multicast { on | off } | allmulticast { on | off } | promisc { on | off } | trailers { on | off } | txqueuelen PACKETS | name NEWNAME | address LLADDR | broadcast LLADDR | mtu MTU } ip link show [ DEVICE ] """ def _test_capability(self, capability, subcapability=None, expected=True, stdout="", stderr=""): with mock.patch("neutron.agent.linux.utils.execute") as mock_exec: mock_exec.return_value = (stdout, stderr) vf_section = ip_link.IpLinkSupport.get_vf_mgmt_section() capable = ip_link.IpLinkSupport.vf_mgmt_capability_supported( vf_section, capability, subcapability) self.assertEqual(expected, capable) mock_exec.assert_called_once_with(['ip', 'link', 'help'], check_exit_code=False, return_stderr=True, log_fail_as_error=False) def test_vf_mgmt(self): self._test_capability( ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE, stderr=self.IP_LINK_HELP) def test_execute_with_stdout(self): self._test_capability( ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE, stdout=self.IP_LINK_HELP) def test_vf_mgmt_no_state(self): self._test_capability( ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE, expected=False, stderr=self.IP_LINK_HELP_NO_STATE) def test_vf_mgmt_no_spoofchk(self): self._test_capability( ip_link.IpLinkConstants.IP_LINK_CAPABILITY_SPOOFCHK, expected=False, stderr=self.IP_LINK_HELP_NO_SPOOFCHK) def test_vf_mgmt_no_vf(self): self._test_capability( ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE, expected=False, stderr=self.IP_LINK_HELP_NO_VF) def test_vf_mgmt_unknown_capability(self): self._test_capability( "state1", expected=False, stderr=self.IP_LINK_HELP) def test_vf_mgmt_sub_capability(self): self._test_capability( ip_link.IpLinkConstants.IP_LINK_CAPABILITY_VLAN, ip_link.IpLinkConstants.IP_LINK_SUB_CAPABILITY_QOS, stderr=self.IP_LINK_HELP) def test_vf_mgmt_sub_capability_mismatch(self): self._test_capability( ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE, ip_link.IpLinkConstants.IP_LINK_SUB_CAPABILITY_QOS, expected=False, stderr=self.IP_LINK_HELP) def test_vf_mgmt_sub_capability_invalid(self): self._test_capability( ip_link.IpLinkConstants.IP_LINK_CAPABILITY_VLAN, "qos1", expected=False, stderr=self.IP_LINK_HELP) def test_vf_mgmt_error(self): with mock.patch("neutron.agent.linux.utils.execute") as mock_exec: mock_exec.side_effect = Exception() self.assertRaises( ip_link.UnsupportedIpLinkCommand, ip_link.IpLinkSupport.get_vf_mgmt_section) neutron-8.4.0/neutron/tests/unit/agent/linux/test_utils.py0000664000567000056710000004341313044372760025212 0ustar jenkinsjenkins00000000000000# Copyright 2012, VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket import mock import six import testtools import oslo_i18n from neutron.agent.linux import utils from neutron.tests import base from neutron.tests.common import helpers _marker = object() class AgentUtilsExecuteTest(base.BaseTestCase): def setUp(self): super(AgentUtilsExecuteTest, self).setUp() self.test_file = self.get_temp_file_path('test_execute.tmp') open(self.test_file, 'w').close() self.process = mock.patch('eventlet.green.subprocess.Popen').start() self.process.return_value.returncode = 0 self.mock_popen = self.process.return_value.communicate def test_without_helper(self): expected = "%s\n" % self.test_file self.mock_popen.return_value = [expected, ""] result = utils.execute(["ls", self.test_file]) self.assertEqual(result, expected) def test_with_helper(self): expected = "ls %s\n" % self.test_file self.mock_popen.return_value = [expected, ""] self.config(group='AGENT', root_helper='echo') result = utils.execute(["ls", self.test_file], run_as_root=True) self.assertEqual(result, expected) def test_stderr_true(self): expected = "%s\n" % self.test_file self.mock_popen.return_value = [expected, ""] out = utils.execute(["ls", self.test_file], return_stderr=True) self.assertIsInstance(out, tuple) self.assertEqual(out, (expected, "")) def test_check_exit_code(self): self.mock_popen.return_value = ["", ""] stdout = utils.execute(["ls", self.test_file[:-1]], check_exit_code=False) self.assertEqual("", stdout) def test_execute_raises(self): self.mock_popen.side_effect = RuntimeError self.assertRaises(RuntimeError, utils.execute, ["ls", self.test_file[:-1]]) def test_process_input(self): expected = "%s\n" % self.test_file[:-1] self.mock_popen.return_value = [expected, ""] result = utils.execute(["cat"], process_input="%s\n" % self.test_file[:-1]) self.assertEqual(result, expected) def test_with_addl_env(self): expected = "%s\n" % self.test_file self.mock_popen.return_value = [expected, ""] result = utils.execute(["ls", self.test_file], addl_env={'foo': 'bar'}) self.assertEqual(result, expected) def test_return_code_log_error_raise_runtime(self): self.mock_popen.return_value = ('', '') self.process.return_value.returncode = 1 with mock.patch.object(utils, 'LOG') as log: self.assertRaises(RuntimeError, utils.execute, ['ls']) self.assertTrue(log.error.called) def test_return_code_log_error_no_raise_runtime(self): self.mock_popen.return_value = ('', '') self.process.return_value.returncode = 1 with mock.patch.object(utils, 'LOG') as log: utils.execute(['ls'], check_exit_code=False) self.assertTrue(log.error.called) def test_return_code_log_debug(self): self.mock_popen.return_value = ('', '') with mock.patch.object(utils, 'LOG') as log: utils.execute(['ls']) self.assertTrue(log.debug.called) def test_return_code_log_error_change_locale(self): ja_output = 'std_out in Japanese' ja_error = 'std_err in Japanese' ja_message_out = oslo_i18n._message.Message(ja_output) ja_message_err = oslo_i18n._message.Message(ja_error) ja_translate_out = oslo_i18n._translate.translate(ja_message_out, 'ja') ja_translate_err = oslo_i18n._translate.translate(ja_message_err, 'ja') self.mock_popen.return_value = (ja_translate_out, ja_translate_err) self.process.return_value.returncode = 1 with mock.patch.object(utils, 'LOG') as log: utils.execute(['ls'], check_exit_code=False) self.assertIn(ja_translate_out, str(log.error.call_args_list)) self.assertIn(ja_translate_err, str(log.error.call_args_list)) def test_return_code_raise_runtime_do_not_log_fail_as_error(self): self.mock_popen.return_value = ('', '') self.process.return_value.returncode = 1 with mock.patch.object(utils, 'LOG') as log: self.assertRaises(utils.ProcessExecutionError, utils.execute, ['ls'], log_fail_as_error=False) self.assertFalse(log.error.called) def test_encode_process_input(self): str_idata = "%s\n" % self.test_file[:-1] str_odata = "%s\n" % self.test_file if six.PY3: bytes_idata = str_idata.encode(encoding='utf-8') bytes_odata = str_odata.encode(encoding='utf-8') self.mock_popen.return_value = [bytes_odata, b''] result = utils.execute(['cat'], process_input=str_idata) self.mock_popen.assert_called_once_with(bytes_idata) self.assertEqual(str_odata, result) else: self.mock_popen.return_value = [str_odata, ''] result = utils.execute(['cat'], process_input=str_idata) self.mock_popen.assert_called_once_with(str_idata) self.assertEqual(str_odata, result) def test_return_str_data(self): str_data = "%s\n" % self.test_file self.mock_popen.return_value = [str_data, ''] result = utils.execute(['ls', self.test_file], return_stderr=True) self.assertEqual((str_data, ''), result) @helpers.requires_py3 def test_surrogateescape_in_decoding_out_data(self): bytes_err_data = b'\xed\xa0\xbd' err_data = bytes_err_data.decode('utf-8', 'surrogateescape') out_data = "%s\n" % self.test_file bytes_out_data = out_data.encode(encoding='utf-8') self.mock_popen.return_value = [bytes_out_data, bytes_err_data] result = utils.execute(['ls', self.test_file], return_stderr=True) self.assertEqual((out_data, err_data), result) class AgentUtilsExecuteEncodeTest(base.BaseTestCase): def setUp(self): super(AgentUtilsExecuteEncodeTest, self).setUp() self.test_file = self.get_temp_file_path('test_execute.tmp') open(self.test_file, 'w').close() def test_decode_return_data(self): str_data = "%s\n" % self.test_file result = utils.execute(['ls', self.test_file], return_stderr=True) self.assertEqual((str_data, ''), result) class AgentUtilsGetInterfaceMAC(base.BaseTestCase): def test_get_interface_mac(self): expect_val = '01:02:03:04:05:06' with mock.patch('fcntl.ioctl') as ioctl: ioctl.return_value = ''.join(['\x00' * 18, '\x01\x02\x03\x04\x05\x06', '\x00' * 232]) actual_val = utils.get_interface_mac('eth0') self.assertEqual(actual_val, expect_val) class AgentUtilsReplaceFile(base.BaseTestCase): def _test_replace_file_helper(self, explicit_perms=None): # make file to replace with mock.patch('tempfile.NamedTemporaryFile') as ntf: ntf.return_value.name = '/baz' with mock.patch('os.chmod') as chmod: with mock.patch('os.rename') as rename: if explicit_perms is None: expected_perms = 0o644 utils.replace_file('/foo', 'bar') else: expected_perms = explicit_perms utils.replace_file('/foo', 'bar', explicit_perms) expected = [mock.call('w+', dir='/', delete=False), mock.call().write('bar'), mock.call().close()] ntf.assert_has_calls(expected) chmod.assert_called_once_with('/baz', expected_perms) rename.assert_called_once_with('/baz', '/foo') def test_replace_file_with_default_perms(self): self._test_replace_file_helper() def test_replace_file_with_0o600_perms(self): self._test_replace_file_helper(0o600) class TestFindChildPids(base.BaseTestCase): def test_returns_empty_list_for_exit_code_1(self): with mock.patch.object(utils, 'execute', side_effect=utils.ProcessExecutionError( '', returncode=1)): self.assertEqual([], utils.find_child_pids(-1)) def test_returns_empty_list_for_no_output(self): with mock.patch.object(utils, 'execute', return_value=''): self.assertEqual([], utils.find_child_pids(-1)) def test_returns_list_of_child_process_ids_for_good_ouput(self): with mock.patch.object(utils, 'execute', return_value=' 123 \n 185\n'): self.assertEqual(utils.find_child_pids(-1), ['123', '185']) def test_raises_unknown_exception(self): with testtools.ExpectedException(RuntimeError): with mock.patch.object(utils, 'execute', side_effect=RuntimeError()): utils.find_child_pids(-1) class TestGetRoothelperChildPid(base.BaseTestCase): def _test_get_root_helper_child_pid(self, expected=_marker, run_as_root=False, pids=None, cmds=None): def _find_child_pids(x): if not pids: return [] pids.pop(0) return pids mock_pid = object() pid_invoked_with_cmdline = {} if cmds: pid_invoked_with_cmdline['side_effect'] = cmds else: pid_invoked_with_cmdline['return_value'] = False with mock.patch.object(utils, 'find_child_pids', side_effect=_find_child_pids), \ mock.patch.object(utils, 'pid_invoked_with_cmdline', **pid_invoked_with_cmdline): actual = utils.get_root_helper_child_pid( mock_pid, mock.ANY, run_as_root) if expected is _marker: expected = str(mock_pid) self.assertEqual(expected, actual) def test_returns_process_pid_not_root(self): self._test_get_root_helper_child_pid() def test_returns_child_pid_as_root(self): self._test_get_root_helper_child_pid(expected='2', pids=['1', '2'], run_as_root=True, cmds=[True]) def test_returns_last_child_pid_as_root(self): self._test_get_root_helper_child_pid(expected='3', pids=['1', '2', '3'], run_as_root=True, cmds=[False, True]) def test_returns_first_non_root_helper_child(self): self._test_get_root_helper_child_pid( expected='2', pids=['1', '2', '3'], run_as_root=True, cmds=[True, False]) def test_returns_none_as_root(self): self._test_get_root_helper_child_pid(expected=None, run_as_root=True) class TestPathUtilities(base.BaseTestCase): def test_remove_abs_path(self): self.assertEqual(['ping', '8.8.8.8'], utils.remove_abs_path(['/usr/bin/ping', '8.8.8.8'])) def test_cmd_matches_expected_matches_abs_path(self): cmd = ['/bar/../foo'] self.assertTrue(utils.cmd_matches_expected(cmd, cmd)) def test_cmd_matches_expected_matches_script(self): self.assertTrue(utils.cmd_matches_expected(['python', 'script'], ['script'])) def test_cmd_matches_expected_doesnt_match(self): self.assertFalse(utils.cmd_matches_expected('foo', 'bar')) class FakeUser(object): def __init__(self, name): self.pw_name = name class FakeGroup(object): def __init__(self, name): self.gr_name = name class TestBaseOSUtils(base.BaseTestCase): EUID = 123 EUNAME = 'user' EGID = 456 EGNAME = 'group' @mock.patch('os.geteuid', return_value=EUID) @mock.patch('pwd.getpwuid', return_value=FakeUser(EUNAME)) def test_is_effective_user_id(self, getpwuid, geteuid): self.assertTrue(utils.is_effective_user(self.EUID)) geteuid.assert_called_once_with() self.assertFalse(getpwuid.called) @mock.patch('os.geteuid', return_value=EUID) @mock.patch('pwd.getpwuid', return_value=FakeUser(EUNAME)) def test_is_effective_user_str_id(self, getpwuid, geteuid): self.assertTrue(utils.is_effective_user(str(self.EUID))) geteuid.assert_called_once_with() self.assertFalse(getpwuid.called) @mock.patch('os.geteuid', return_value=EUID) @mock.patch('pwd.getpwuid', return_value=FakeUser(EUNAME)) def test_is_effective_user_name(self, getpwuid, geteuid): self.assertTrue(utils.is_effective_user(self.EUNAME)) geteuid.assert_called_once_with() getpwuid.assert_called_once_with(self.EUID) @mock.patch('os.geteuid', return_value=EUID) @mock.patch('pwd.getpwuid', return_value=FakeUser(EUNAME)) def test_is_not_effective_user(self, getpwuid, geteuid): self.assertFalse(utils.is_effective_user('wrong')) geteuid.assert_called_once_with() getpwuid.assert_called_once_with(self.EUID) @mock.patch('os.getegid', return_value=EGID) @mock.patch('grp.getgrgid', return_value=FakeGroup(EGNAME)) def test_is_effective_group_id(self, getgrgid, getegid): self.assertTrue(utils.is_effective_group(self.EGID)) getegid.assert_called_once_with() self.assertFalse(getgrgid.called) @mock.patch('os.getegid', return_value=EGID) @mock.patch('grp.getgrgid', return_value=FakeGroup(EGNAME)) def test_is_effective_group_str_id(self, getgrgid, getegid): self.assertTrue(utils.is_effective_group(str(self.EGID))) getegid.assert_called_once_with() self.assertFalse(getgrgid.called) @mock.patch('os.getegid', return_value=EGID) @mock.patch('grp.getgrgid', return_value=FakeGroup(EGNAME)) def test_is_effective_group_name(self, getgrgid, getegid): self.assertTrue(utils.is_effective_group(self.EGNAME)) getegid.assert_called_once_with() getgrgid.assert_called_once_with(self.EGID) @mock.patch('os.getegid', return_value=EGID) @mock.patch('grp.getgrgid', return_value=FakeGroup(EGNAME)) def test_is_not_effective_group(self, getgrgid, getegid): self.assertFalse(utils.is_effective_group('wrong')) getegid.assert_called_once_with() getgrgid.assert_called_once_with(self.EGID) class TestUnixDomainHttpConnection(base.BaseTestCase): def test_connect(self): with mock.patch.object(utils, 'cfg') as cfg: cfg.CONF.metadata_proxy_socket = '/the/path' with mock.patch('socket.socket') as socket_create: conn = utils.UnixDomainHTTPConnection('169.254.169.254', timeout=3) conn.connect() socket_create.assert_has_calls([ mock.call(socket.AF_UNIX, socket.SOCK_STREAM), mock.call().settimeout(3), mock.call().connect('/the/path')] ) self.assertEqual(conn.timeout, 3) class TestUnixDomainHttpProtocol(base.BaseTestCase): def test_init_empty_client(self): u = utils.UnixDomainHttpProtocol(mock.Mock(), '', mock.Mock()) self.assertEqual(u.client_address, ('', 0)) def test_init_with_client(self): u = utils.UnixDomainHttpProtocol(mock.Mock(), 'foo', mock.Mock()) self.assertEqual(u.client_address, 'foo') class TestUnixDomainWSGIServer(base.BaseTestCase): def setUp(self): super(TestUnixDomainWSGIServer, self).setUp() self.eventlet_p = mock.patch.object(utils, 'eventlet') self.eventlet = self.eventlet_p.start() def test_start(self): self.server = utils.UnixDomainWSGIServer('test') mock_app = mock.Mock() with mock.patch.object(self.server, '_launch') as launcher: self.server.start(mock_app, '/the/path', workers=5, backlog=128) self.eventlet.assert_has_calls([ mock.call.listen( '/the/path', family=socket.AF_UNIX, backlog=128 )] ) launcher.assert_called_once_with(mock_app, workers=5) def test_run(self): self.server = utils.UnixDomainWSGIServer('test') self.server._run('app', 'sock') self.eventlet.wsgi.server.assert_called_once_with( 'sock', 'app', protocol=utils.UnixDomainHttpProtocol, log=mock.ANY, max_size=self.server.num_threads ) def test_num_threads(self): num_threads = 8 self.server = utils.UnixDomainWSGIServer('test', num_threads=num_threads) self.server._run('app', 'sock') self.eventlet.wsgi.server.assert_called_once_with( 'sock', 'app', protocol=utils.UnixDomainHttpProtocol, log=mock.ANY, max_size=num_threads ) neutron-8.4.0/neutron/tests/unit/agent/linux/test_daemon.py0000664000567000056710000002664513044372760025325 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from logging import handlers import os import sys import mock import testtools from neutron.agent.linux import daemon from neutron.common import exceptions from neutron.tests import base from neutron.tests import tools FAKE_FD = 8 class FakeEntry(object): def __init__(self, name, value): setattr(self, name, value) class TestUnwatchLog(base.BaseTestCase): def test_unwatch_log(self): stream_handler = logging.StreamHandler() logger = logging.Logger('fake') logger.addHandler(stream_handler) logger.addHandler(handlers.WatchedFileHandler('/tmp/filename1')) with mock.patch('logging.getLogger', return_value=logger): daemon.unwatch_log() self.assertEqual(2, len(logger.handlers)) logger.handlers.remove(stream_handler) observed = logger.handlers[0] self.assertEqual(logging.FileHandler, type(observed)) self.assertEqual('/tmp/filename1', observed.baseFilename) class TestPrivileges(base.BaseTestCase): def test_setuid_with_name(self): with mock.patch('pwd.getpwnam', return_value=FakeEntry('pw_uid', 123)): with mock.patch('os.setuid') as setuid_mock: daemon.setuid('user') setuid_mock.assert_called_once_with(123) def test_setuid_with_id(self): with mock.patch('os.setuid') as setuid_mock: daemon.setuid('321') setuid_mock.assert_called_once_with(321) def test_setuid_fails(self): with mock.patch('os.setuid', side_effect=OSError()): with mock.patch.object(daemon.LOG, 'critical') as log_critical: self.assertRaises(exceptions.FailToDropPrivilegesExit, daemon.setuid, '321') log_critical.assert_called_once_with(mock.ANY) def test_setgid_with_name(self): with mock.patch('grp.getgrnam', return_value=FakeEntry('gr_gid', 123)): with mock.patch('os.setgid') as setgid_mock: daemon.setgid('group') setgid_mock.assert_called_once_with(123) def test_setgid_with_id(self): with mock.patch('os.setgid') as setgid_mock: daemon.setgid('321') setgid_mock.assert_called_once_with(321) def test_setgid_fails(self): with mock.patch('os.setgid', side_effect=OSError()): with mock.patch.object(daemon.LOG, 'critical') as log_critical: self.assertRaises(exceptions.FailToDropPrivilegesExit, daemon.setgid, '321') log_critical.assert_called_once_with(mock.ANY) @mock.patch.object(os, 'setgroups') @mock.patch.object(daemon, 'setgid') @mock.patch.object(daemon, 'setuid') def test_drop_no_privileges(self, mock_setuid, mock_setgid, mock_setgroups): daemon.drop_privileges() for cursor in (mock_setuid, mock_setgid, mock_setgroups): self.assertFalse(cursor.called) @mock.patch.object(os, 'geteuid', return_value=0) @mock.patch.object(os, 'setgroups') @mock.patch.object(daemon, 'setgid') @mock.patch.object(daemon, 'setuid') def _test_drop_privileges(self, setuid, setgid, setgroups, geteuid, user=None, group=None): daemon.drop_privileges(user=user, group=group) if user: setuid.assert_called_once_with(user) else: self.assertFalse(setuid.called) if group: setgroups.assert_called_once_with([]) setgid.assert_called_once_with(group) else: self.assertFalse(setgroups.called) self.assertFalse(setgid.called) def test_drop_user_privileges(self): self._test_drop_privileges(user='user') def test_drop_uid_privileges(self): self._test_drop_privileges(user='321') def test_drop_group_privileges(self): self._test_drop_privileges(group='group') def test_drop_gid_privileges(self): self._test_drop_privileges(group='654') def test_drop_privileges_without_root_permissions(self): with mock.patch('os.geteuid', return_value=1): with mock.patch.object(daemon.LOG, 'critical') as log_critical: self.assertRaises(exceptions.FailToDropPrivilegesExit, daemon.drop_privileges, 'user') log_critical.assert_called_once_with(mock.ANY) class TestPidfile(base.BaseTestCase): def setUp(self): super(TestPidfile, self).setUp() self.os_p = mock.patch.object(daemon, 'os') self.os = self.os_p.start() self.os.open.return_value = FAKE_FD self.fcntl_p = mock.patch.object(daemon, 'fcntl') self.fcntl = self.fcntl_p.start() self.fcntl.flock.return_value = 0 def test_init(self): self.os.O_CREAT = os.O_CREAT self.os.O_RDWR = os.O_RDWR daemon.Pidfile('thefile', 'python') self.os.open.assert_called_once_with('thefile', os.O_CREAT | os.O_RDWR) self.fcntl.flock.assert_called_once_with(FAKE_FD, self.fcntl.LOCK_EX | self.fcntl.LOCK_NB) def test_init_open_fail(self): self.os.open.side_effect = IOError with mock.patch.object(daemon.sys, 'stderr'): with testtools.ExpectedException(SystemExit): daemon.Pidfile('thefile', 'python') sys.assert_has_calls([ mock.call.stderr.write(mock.ANY), mock.call.exit(1)] ) def test_unlock(self): p = daemon.Pidfile('thefile', 'python') p.unlock() self.fcntl.flock.assert_has_calls([ mock.call(FAKE_FD, self.fcntl.LOCK_EX | self.fcntl.LOCK_NB), mock.call(FAKE_FD, self.fcntl.LOCK_UN)] ) def test_write(self): p = daemon.Pidfile('thefile', 'python') p.write(34) self.os.assert_has_calls([ mock.call.ftruncate(FAKE_FD, 0), mock.call.write(FAKE_FD, '34'), mock.call.fsync(FAKE_FD)] ) def test_read(self): self.os.read.return_value = '34' p = daemon.Pidfile('thefile', 'python') self.assertEqual(34, p.read()) def test_is_running(self): mock_open = self.useFixture( tools.OpenFixture('/proc/34/cmdline', 'python')).mock_open p = daemon.Pidfile('thefile', 'python') with mock.patch.object(p, 'read') as read: read.return_value = 34 self.assertTrue(p.is_running()) mock_open.assert_called_once_with('/proc/34/cmdline', 'r') def test_is_running_uuid_true(self): mock_open = self.useFixture( tools.OpenFixture('/proc/34/cmdline', 'python 1234')).mock_open p = daemon.Pidfile('thefile', 'python', uuid='1234') with mock.patch.object(p, 'read') as read: read.return_value = 34 self.assertTrue(p.is_running()) mock_open.assert_called_once_with('/proc/34/cmdline', 'r') def test_is_running_uuid_false(self): mock_open = self.useFixture( tools.OpenFixture('/proc/34/cmdline', 'python 1234')).mock_open p = daemon.Pidfile('thefile', 'python', uuid='6789') with mock.patch.object(p, 'read') as read: read.return_value = 34 self.assertFalse(p.is_running()) mock_open.assert_called_once_with('/proc/34/cmdline', 'r') class TestDaemon(base.BaseTestCase): def setUp(self): super(TestDaemon, self).setUp() self.os_p = mock.patch.object(daemon, 'os') self.os = self.os_p.start() self.pidfile_p = mock.patch.object(daemon, 'Pidfile') self.pidfile = self.pidfile_p.start() def test_init(self): d = daemon.Daemon('pidfile') self.assertEqual(d.procname, 'python') def test_init_nopidfile(self): d = daemon.Daemon(pidfile=None) self.assertEqual(d.procname, 'python') self.assertFalse(self.pidfile.called) def test_fork_parent(self): self.os.fork.return_value = 1 d = daemon.Daemon('pidfile') d._fork() self.os._exit.assert_called_once_with(mock.ANY) def test_fork_child(self): self.os.fork.return_value = 0 d = daemon.Daemon('pidfile') self.assertIsNone(d._fork()) def test_fork_error(self): self.os.fork.side_effect = OSError(1) with mock.patch.object(daemon.sys, 'stderr'): with testtools.ExpectedException(SystemExit): d = daemon.Daemon('pidfile', 'stdin') d._fork() def test_daemonize(self): self.os.devnull = '/dev/null' d = daemon.Daemon('pidfile') with mock.patch.object(d, '_fork') as fork: with mock.patch.object(daemon, 'atexit') as atexit: with mock.patch.object(daemon, 'signal') as signal: signal.SIGTERM = 15 with mock.patch.object(daemon, 'sys') as sys: sys.stdin.fileno.return_value = 0 sys.stdout.fileno.return_value = 1 sys.stderr.fileno.return_value = 2 d.daemonize() signal.signal.assert_called_once_with(15, d.handle_sigterm) atexit.register.assert_called_once_with(d.delete_pid) fork.assert_has_calls([mock.call(), mock.call()]) self.os.assert_has_calls([ mock.call.chdir('/'), mock.call.setsid(), mock.call.umask(0), mock.call.dup2(mock.ANY, 0), mock.call.dup2(mock.ANY, 1), mock.call.dup2(mock.ANY, 2), mock.call.getpid()] ) def test_delete_pid(self): self.pidfile.return_value.__str__.return_value = 'pidfile' d = daemon.Daemon('pidfile') d.delete_pid() self.os.remove.assert_called_once_with('pidfile') def test_handle_sigterm(self): d = daemon.Daemon('pidfile') with mock.patch.object(daemon, 'sys') as sys: d.handle_sigterm(15, 1234) sys.exit.assert_called_once_with(0) def test_start(self): self.pidfile.return_value.is_running.return_value = False d = daemon.Daemon('pidfile') with mock.patch.object(d, 'daemonize') as daemonize: with mock.patch.object(d, 'run') as run: d.start() run.assert_called_once_with() daemonize.assert_called_once_with() def test_start_running(self): self.pidfile.return_value.is_running.return_value = True d = daemon.Daemon('pidfile') with mock.patch.object(daemon.sys, 'stderr'): with mock.patch.object(d, 'daemonize') as daemonize: with testtools.ExpectedException(SystemExit): d.start() self.assertFalse(daemonize.called) neutron-8.4.0/neutron/tests/unit/agent/linux/test_ip_monitor.py0000664000567000056710000000301013044372736026221 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.linux import ip_monitor from neutron.tests import base class TestIPMonitorEvent(base.BaseTestCase): def test_from_text_parses_added_line(self): event = ip_monitor.IPMonitorEvent.from_text( '3: wlp3s0 inet 192.168.3.59/24 brd 192.168.3.255 ' 'scope global dynamic wlp3s0\ valid_lft 300sec ' 'preferred_lft 300sec') self.assertEqual('wlp3s0', event.interface) self.assertTrue(event.added) self.assertEqual('192.168.3.59/24', event.cidr) def test_from_text_parses_deleted_line(self): event = ip_monitor.IPMonitorEvent.from_text( 'Deleted 1: lo inet 127.0.0.2/8 scope host secondary lo\'' ' valid_lft forever preferred_lft forever') self.assertEqual('lo', event.interface) self.assertFalse(event.added) self.assertEqual('127.0.0.2/8', event.cidr) neutron-8.4.0/neutron/tests/unit/agent/linux/test_iptables_firewall.py0000664000567000056710000024361313044372760027546 0ustar jenkinsjenkins00000000000000# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from oslo_config import cfg import six import testtools from neutron.agent.common import config as a_cfg from neutron.agent.linux import ipset_manager from neutron.agent.linux import iptables_comments as ic from neutron.agent.linux import iptables_firewall from neutron.agent import securitygroups_rpc as sg_cfg from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import utils from neutron.tests import base from neutron.tests.unit.api.v2 import test_base _uuid = test_base._uuid #TODO(mangelajo): replace all 'IPv4', 'IPv6' to constants FAKE_PREFIX = {'IPv4': '10.0.0.0/24', 'IPv6': 'fe80::/48'} FAKE_IP = {'IPv4': '10.0.0.1', 'IPv6': 'fe80::1'} #TODO(mangelajo): replace all '*_sgid' strings for the constants FAKE_SGID = 'fake_sgid' OTHER_SGID = 'other_sgid' _IPv6 = constants.IPv6 _IPv4 = constants.IPv4 RAW_TABLE_OUTPUT = """ # Generated by iptables-save v1.4.21 on Fri Jul 31 16:13:28 2015 *raw :PREROUTING ACCEPT [11561:3470468] :OUTPUT ACCEPT [11504:4064044] :neutron-openvswi-OUTPUT - [0:0] :neutron-openvswi-PREROUTING - [0:0] -A PREROUTING -j neutron-openvswi-PREROUTING -A OUTPUT -j neutron-openvswi-OUTPUT -A neutron-openvswi-PREROUTING -m physdev --physdev-in qvbe804433b-61 -j CT --zone 1 -A neutron-openvswi-PREROUTING -m physdev --physdev-in tape804433b-61 -j CT --zone 1 -A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb95c24827-02 -j CT --zone 2 -A neutron-openvswi-PREROUTING -m physdev --physdev-in tap95c24827-02 -j CT --zone 2 -A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb61634509-31 -j CT --zone 2 -A neutron-openvswi-PREROUTING -m physdev --physdev-in tap61634509-31 -j CT --zone 2 -A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb8f46cf18-12 -j CT --zone 9 -A neutron-openvswi-PREROUTING -m physdev --physdev-in tap8f46cf18-12 -j CT --zone 9 COMMIT # Completed on Fri Jul 31 16:13:28 2015 """ # noqa class BaseIptablesFirewallTestCase(base.BaseTestCase): def setUp(self): super(BaseIptablesFirewallTestCase, self).setUp() cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT') cfg.CONF.register_opts(sg_cfg.security_group_opts, 'SECURITYGROUP') cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT') self.utils_exec_p = mock.patch( 'neutron.agent.linux.utils.execute') self.utils_exec = self.utils_exec_p.start() self.iptables_cls_p = mock.patch( 'neutron.agent.linux.iptables_manager.IptablesManager') iptables_cls = self.iptables_cls_p.start() self.iptables_inst = mock.Mock() self.v4filter_inst = mock.Mock() self.v6filter_inst = mock.Mock() self.iptables_inst.ipv4 = {'filter': self.v4filter_inst, 'raw': self.v4filter_inst } self.iptables_inst.ipv6 = {'filter': self.v6filter_inst, 'raw': self.v6filter_inst } iptables_cls.return_value = self.iptables_inst self.iptables_inst.get_rules_for_table.return_value = ( RAW_TABLE_OUTPUT.splitlines()) self.firewall = iptables_firewall.IptablesFirewallDriver() self.firewall.iptables = self.iptables_inst class IptablesFirewallTestCase(BaseIptablesFirewallTestCase): def _fake_port(self): return {'device': 'tapfake_dev', 'mac_address': 'ff:ff:ff:ff:ff:ff', 'network_id': 'fake_net', 'fixed_ips': [FAKE_IP['IPv4'], FAKE_IP['IPv6']]} def test_prepare_port_filter_with_no_sg(self): port = self._fake_port() self.firewall.prepare_port_filter(port) calls = [mock.call.add_chain('sg-fallback'), mock.call.add_rule( 'sg-fallback', '-j DROP', comment=ic.UNMATCH_DROP), mock.call.remove_chain('sg-chain'), mock.call.add_chain('sg-chain'), mock.call.add_chain('ifake_dev'), mock.call.add_rule('FORWARD', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged ' '-j $sg-chain', comment=ic.VM_INT_SG), mock.call.add_rule('sg-chain', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged ' '-j $ifake_dev', comment=ic.SG_TO_VM_SG), mock.call.add_rule( 'ifake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', comment=None), mock.call.add_rule( 'ifake_dev', '-m state --state INVALID -j DROP', comment=None), mock.call.add_rule( 'ifake_dev', '-j $sg-fallback', comment=None), mock.call.add_chain('ofake_dev'), mock.call.add_rule('FORWARD', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged ' '-j $sg-chain', comment=ic.VM_INT_SG), mock.call.add_rule('sg-chain', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', comment=ic.SG_TO_VM_SG), mock.call.add_rule('INPUT', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', comment=ic.INPUT_TO_SG), mock.call.add_chain('sfake_dev'), mock.call.add_rule( 'sfake_dev', '-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF ' '-j RETURN', comment=ic.PAIR_ALLOW), mock.call.add_rule( 'sfake_dev', '-j DROP', comment=ic.PAIR_DROP), mock.call.add_rule( 'ofake_dev', '-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp ' '--sport 68 --dport 67 -j RETURN', comment=None), mock.call.add_rule('ofake_dev', '-j $sfake_dev', comment=None), mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 68 --dport 67 -j RETURN', comment=None), mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 67 -m udp --dport 68 -j DROP', comment=None), mock.call.add_rule( 'ofake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', comment=None), mock.call.add_rule( 'ofake_dev', '-m state --state INVALID -j DROP', comment=None), mock.call.add_rule( 'ofake_dev', '-j $sg-fallback', comment=None), mock.call.add_rule('sg-chain', '-j ACCEPT')] self.v4filter_inst.assert_has_calls(calls) def test_filter_ipv4_ingress(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress'} ingress = mock.call.add_rule('ifake_dev', '-j RETURN', comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'source_ip_prefix': prefix} ingress = mock.call.add_rule( 'ifake_dev', '-s %s -j RETURN' % prefix, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_tcp(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'tcp'} ingress = mock.call.add_rule( 'ifake_dev', '-p tcp -j RETURN', comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_tcp_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'tcp', 'source_ip_prefix': prefix} ingress = mock.call.add_rule('ifake_dev', '-s %s -p tcp -j RETURN' % prefix, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_icmp(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'icmp'} ingress = mock.call.add_rule('ifake_dev', '-p icmp -j RETURN', comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_icmp_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'icmp', 'source_ip_prefix': prefix} ingress = mock.call.add_rule( 'ifake_dev', '-s %s -p icmp -j RETURN' % prefix, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_tcp_port(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 10} ingress = mock.call.add_rule('ifake_dev', '-p tcp -m tcp --dport 10 -j RETURN', comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_tcp_mport(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 100} ingress = mock.call.add_rule( 'ifake_dev', '-p tcp -m tcp -m multiport --dports 10:100 -j RETURN', comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_tcp_mport_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 100, 'source_ip_prefix': prefix} ingress = mock.call.add_rule( 'ifake_dev', '-s %s -p tcp -m tcp -m multiport --dports 10:100 ' '-j RETURN' % prefix, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_udp(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'udp'} ingress = mock.call.add_rule( 'ifake_dev', '-p udp -j RETURN', comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_udp_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'udp', 'source_ip_prefix': prefix} ingress = mock.call.add_rule('ifake_dev', '-s %s -p udp -j RETURN' % prefix, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_udp_port(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 10} ingress = mock.call.add_rule('ifake_dev', '-p udp -m udp --dport 10 -j RETURN', comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_udp_mport(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 100} ingress = mock.call.add_rule( 'ifake_dev', '-p udp -m udp -m multiport --dports 10:100 -j RETURN', comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_udp_mport_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 100, 'source_ip_prefix': prefix} ingress = mock.call.add_rule( 'ifake_dev', '-s %s -p udp -m udp -m multiport --dports 10:100 ' '-j RETURN' % prefix, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress(self): rule = {'ethertype': 'IPv4', 'direction': 'egress'} egress = mock.call.add_rule('ofake_dev', '-j RETURN', comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'egress', 'source_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-s %s -j RETURN' % prefix, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_tcp(self): rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'tcp'} egress = mock.call.add_rule( 'ofake_dev', '-p tcp -j RETURN', comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_tcp_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'tcp', 'source_ip_prefix': prefix} egress = mock.call.add_rule('ofake_dev', '-s %s -p tcp -j RETURN' % prefix, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_icmp(self): rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'icmp'} egress = mock.call.add_rule('ofake_dev', '-p icmp -j RETURN', comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_icmp_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'icmp', 'source_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-s %s -p icmp -j RETURN' % prefix, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_icmp_type(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'icmp', 'source_port_range_min': 8, 'source_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-s %s -p icmp -m icmp --icmp-type 8 -j RETURN' % prefix, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_icmp_type_name(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'icmp', 'source_port_range_min': 'echo-request', 'source_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-s %s -p icmp -m icmp --icmp-type echo-request ' '-j RETURN' % prefix, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_icmp_type_code(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'icmp', 'source_port_range_min': 8, 'source_port_range_max': 0, 'source_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-s %s -p icmp -m icmp --icmp-type 8/0 -j RETURN' % prefix, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_tcp_port(self): rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 10} egress = mock.call.add_rule('ofake_dev', '-p tcp -m tcp --dport 10 -j RETURN', comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_tcp_mport(self): rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 100} egress = mock.call.add_rule( 'ofake_dev', '-p tcp -m tcp -m multiport --dports 10:100 -j RETURN', comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_tcp_mport_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 100, 'source_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-s %s -p tcp -m tcp -m multiport --dports 10:100 ' '-j RETURN' % prefix, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_udp(self): rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'udp'} egress = mock.call.add_rule( 'ofake_dev', '-p udp -j RETURN', comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_udp_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'udp', 'source_ip_prefix': prefix} egress = mock.call.add_rule('ofake_dev', '-s %s -p udp -j RETURN' % prefix, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_udp_port(self): rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 10} egress = mock.call.add_rule('ofake_dev', '-p udp -m udp --dport 10 -j RETURN', comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_udp_mport(self): rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 100} egress = mock.call.add_rule( 'ofake_dev', '-p udp -m udp -m multiport --dports 10:100 -j RETURN', comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_udp_mport_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 100, 'source_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-s %s -p udp -m udp -m multiport --dports 10:100 ' '-j RETURN' % prefix, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress(self): rule = {'ethertype': 'IPv6', 'direction': 'ingress'} ingress = mock.call.add_rule('ifake_dev', '-j RETURN', comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'source_ip_prefix': prefix} ingress = mock.call.add_rule( 'ifake_dev', '-s %s -j RETURN' % prefix, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_tcp(self): rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'tcp'} ingress = mock.call.add_rule( 'ifake_dev', '-p tcp -j RETURN', comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_tcp_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'tcp', 'source_ip_prefix': prefix} ingress = mock.call.add_rule('ifake_dev', '-s %s -p tcp -j RETURN' % prefix, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_tcp_port(self): rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 10} ingress = mock.call.add_rule('ifake_dev', '-p tcp -m tcp --dport 10 -j RETURN', comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_icmp(self): rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'icmp'} ingress = mock.call.add_rule( 'ifake_dev', '-p ipv6-icmp -j RETURN', comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_icmp_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'icmp', 'source_ip_prefix': prefix} ingress = mock.call.add_rule( 'ifake_dev', '-s %s -p ipv6-icmp -j RETURN' % prefix, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_tcp_mport(self): rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 100} ingress = mock.call.add_rule( 'ifake_dev', '-p tcp -m tcp -m multiport --dports 10:100 -j RETURN', comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def _test_filter_ingress_tcp_min_port_0(self, ethertype): rule = {'ethertype': ethertype, 'direction': 'ingress', 'protocol': 'tcp', 'port_range_min': 0, 'port_range_max': 100} ingress = mock.call.add_rule( 'ifake_dev', '-p tcp -m tcp -m multiport --dports 0:100 -j RETURN', comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ingress_tcp_min_port_0_for_ipv4(self): self._test_filter_ingress_tcp_min_port_0('IPv4') def test_filter_ingress_tcp_min_port_0_for_ipv6(self): self._test_filter_ingress_tcp_min_port_0('IPv6') def test_filter_ipv6_ingress_tcp_mport_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 100, 'source_ip_prefix': prefix} ingress = mock.call.add_rule( 'ifake_dev', '-s %s -p tcp -m tcp -m multiport --dports 10:100 ' '-j RETURN' % prefix, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_udp(self): rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'udp'} ingress = mock.call.add_rule( 'ifake_dev', '-p udp -j RETURN', comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_udp_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'udp', 'source_ip_prefix': prefix} ingress = mock.call.add_rule('ifake_dev', '-s %s -p udp -j RETURN' % prefix, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_udp_port(self): rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 10} ingress = mock.call.add_rule('ifake_dev', '-p udp -m udp --dport 10 -j RETURN', comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_udp_mport(self): rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 100} ingress = mock.call.add_rule( 'ifake_dev', '-p udp -m udp -m multiport --dports 10:100 -j RETURN', comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_udp_mport_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 100, 'source_ip_prefix': prefix} ingress = mock.call.add_rule( 'ifake_dev', '-s %s -p udp -m udp -m multiport --dports 10:100 ' '-j RETURN' % prefix, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress(self): rule = {'ethertype': 'IPv6', 'direction': 'egress'} egress = mock.call.add_rule('ofake_dev', '-j RETURN', comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'egress', 'source_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-s %s -j RETURN' % prefix, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_tcp(self): rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'tcp'} egress = mock.call.add_rule( 'ofake_dev', '-p tcp -j RETURN', comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_tcp_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'tcp', 'source_ip_prefix': prefix} egress = mock.call.add_rule('ofake_dev', '-s %s -p tcp -j RETURN' % prefix, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_icmp(self): rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'icmp'} egress = mock.call.add_rule( 'ofake_dev', '-p ipv6-icmp -j RETURN', comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_icmp_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'icmp', 'source_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-s %s -p ipv6-icmp -j RETURN' % prefix, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_icmp_type(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'icmp', 'source_port_range_min': 8, 'source_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-s %s -p ipv6-icmp -m icmp6 --icmpv6-type 8 -j RETURN' % prefix, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_icmp_type_name(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'icmp', 'source_port_range_min': 'echo-request', 'source_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-s %s -p ipv6-icmp -m icmp6 --icmpv6-type echo-request ' '-j RETURN' % prefix, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_icmp_type_code(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'icmp', 'source_port_range_min': 8, 'source_port_range_max': 0, 'source_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-s %s -p ipv6-icmp -m icmp6 --icmpv6-type 8/0 -j RETURN' % prefix, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_tcp_port(self): rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 10} egress = mock.call.add_rule('ofake_dev', '-p tcp -m tcp --dport 10 -j RETURN', comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_tcp_mport(self): rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 100} egress = mock.call.add_rule( 'ofake_dev', '-p tcp -m tcp -m multiport --dports 10:100 -j RETURN', comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_tcp_mport_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 100, 'source_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-s %s -p tcp -m tcp -m multiport --dports 10:100 ' '-j RETURN' % prefix, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_udp(self): rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'udp'} egress = mock.call.add_rule( 'ofake_dev', '-p udp -j RETURN', comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_udp_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'udp', 'source_ip_prefix': prefix} egress = mock.call.add_rule('ofake_dev', '-s %s -p udp -j RETURN' % prefix, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_udp_port(self): rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 10} egress = mock.call.add_rule('ofake_dev', '-p udp -m udp --dport 10 -j RETURN', comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_udp_mport(self): rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 100} egress = mock.call.add_rule( 'ofake_dev', '-p udp -m udp -m multiport --dports 10:100 -j RETURN', comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_udp_mport_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 100, 'source_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-s %s -p udp -m udp -m multiport --dports 10:100 ' '-j RETURN' % prefix, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def _test_prepare_port_filter(self, rule, ingress_expected_call=None, egress_expected_call=None): port = self._fake_port() ethertype = rule['ethertype'] prefix = utils.ip_to_cidr(FAKE_IP[ethertype]) filter_inst = self.v4filter_inst dhcp_rule = [mock.call.add_rule( 'ofake_dev', '-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp ' '--sport 68 --dport 67 -j RETURN', comment=None)] if ethertype == 'IPv6': filter_inst = self.v6filter_inst dhcp_rule = [mock.call.add_rule('ofake_dev', '-s ::/128 -d ff02::/16 ' '-p ipv6-icmp -m icmp6 ' '--icmpv6-type 131 -j RETURN', comment=None), mock.call.add_rule('ofake_dev', '-s ::/128 -d ff02::/16 ' '-p ipv6-icmp -m icmp6 ' '--icmpv6-type %s -j RETURN' % constants.ICMPV6_TYPE_NC, comment=None), mock.call.add_rule('ofake_dev', '-s ::/128 -d ff02::/16 ' '-p ipv6-icmp -m icmp6 ' '--icmpv6-type 143 -j RETURN', comment=None)] sg = [rule] port['security_group_rules'] = sg self.firewall.prepare_port_filter(port) calls = [mock.call.add_chain('sg-fallback'), mock.call.add_rule( 'sg-fallback', '-j DROP', comment=ic.UNMATCH_DROP), mock.call.remove_chain('sg-chain'), mock.call.add_chain('sg-chain'), mock.call.add_chain('ifake_dev'), mock.call.add_rule('FORWARD', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged ' '-j $sg-chain', comment=ic.VM_INT_SG), mock.call.add_rule('sg-chain', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged ' '-j $ifake_dev', comment=ic.SG_TO_VM_SG) ] if ethertype == 'IPv6': for icmp6_type in constants.ICMPV6_ALLOWED_TYPES: calls.append( mock.call.add_rule('ifake_dev', '-p ipv6-icmp -m icmp6 --icmpv6-type ' '%s -j RETURN' % icmp6_type, comment=None)) calls += [ mock.call.add_rule( 'ifake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', comment=None ) ] if ingress_expected_call: calls.append(ingress_expected_call) calls += [mock.call.add_rule( 'ifake_dev', '-m state --state INVALID -j DROP', comment=None), mock.call.add_rule('ifake_dev', '-j $sg-fallback', comment=None), mock.call.add_chain('ofake_dev'), mock.call.add_rule('FORWARD', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged ' '-j $sg-chain', comment=ic.VM_INT_SG), mock.call.add_rule('sg-chain', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', comment=ic.SG_TO_VM_SG), mock.call.add_rule('INPUT', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', comment=ic.INPUT_TO_SG), mock.call.add_chain('sfake_dev'), mock.call.add_rule( 'sfake_dev', '-s %s -m mac --mac-source FF:FF:FF:FF:FF:FF -j RETURN' % prefix, comment=ic.PAIR_ALLOW)] if ethertype == 'IPv6': calls.append(mock.call.add_rule('sfake_dev', '-s fe80::fdff:ffff:feff:ffff/128 -m mac ' '--mac-source FF:FF:FF:FF:FF:FF -j RETURN', comment=ic.PAIR_ALLOW)) calls.append(mock.call.add_rule('sfake_dev', '-j DROP', comment=ic.PAIR_DROP)) calls += dhcp_rule calls.append(mock.call.add_rule('ofake_dev', '-j $sfake_dev', comment=None)) if ethertype == 'IPv4': calls.append(mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 68 --dport 67 -j RETURN', comment=None)) calls.append(mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 67 -m udp --dport 68 -j DROP', comment=None)) if ethertype == 'IPv6': calls.append(mock.call.add_rule('ofake_dev', '-p ipv6-icmp -m icmp6 ' '--icmpv6-type %s -j DROP' % constants.ICMPV6_TYPE_RA, comment=None)) calls.append(mock.call.add_rule('ofake_dev', '-p ipv6-icmp -j RETURN', comment=None)) calls.append(mock.call.add_rule('ofake_dev', '-p udp -m udp ' '--sport 546 -m udp --dport 547 ' '-j RETURN', comment=None)) calls.append(mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 547 -m udp --dport 546 -j DROP', comment=None)) calls += [ mock.call.add_rule( 'ofake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', comment=None), ] if egress_expected_call: calls.append(egress_expected_call) calls += [mock.call.add_rule( 'ofake_dev', '-m state --state INVALID -j DROP', comment=None), mock.call.add_rule('ofake_dev', '-j $sg-fallback', comment=None), mock.call.add_rule('sg-chain', '-j ACCEPT')] comb = zip(calls, filter_inst.mock_calls) for (l, r) in comb: self.assertEqual(l, r) filter_inst.assert_has_calls(calls) def _test_remove_conntrack_entries(self, ethertype, protocol, direction): port = self._fake_port() port['security_groups'] = 'fake_sg_id' self.firewall.filtered_ports[port['device']] = port self.firewall.updated_rule_sg_ids = set(['fake_sg_id']) self.firewall.sg_rules['fake_sg_id'] = [ {'direction': direction, 'ethertype': ethertype, 'protocol': protocol}] self.firewall.filter_defer_apply_on() self.firewall.sg_rules['fake_sg_id'] = [] self.firewall.filter_defer_apply_off() cmd = ['conntrack', '-D'] if protocol: cmd.extend(['-p', protocol]) if ethertype == 'IPv4': cmd.extend(['-f', 'ipv4']) if direction == 'ingress': cmd.extend(['-d', '10.0.0.1']) else: cmd.extend(['-s', '10.0.0.1']) else: cmd.extend(['-f', 'ipv6']) if direction == 'ingress': cmd.extend(['-d', 'fe80::1']) else: cmd.extend(['-s', 'fe80::1']) # initial data has 1, 2, and 9 in use, CT zone will start at 10. cmd.extend(['-w', 10]) calls = [ mock.call(cmd, run_as_root=True, check_exit_code=True, extra_ok_codes=[1])] self.utils_exec.assert_has_calls(calls) def test_remove_conntrack_entries_for_delete_rule_ipv4(self): for direction in ['ingress', 'egress']: for pro in [None, 'tcp', 'icmp', 'udp']: self._test_remove_conntrack_entries( 'IPv4', pro, direction) def test_remove_conntrack_entries_for_delete_rule_ipv6(self): for direction in ['ingress', 'egress']: for pro in [None, 'tcp', 'icmp', 'udp']: self._test_remove_conntrack_entries( 'IPv6', pro, direction) def test_remove_conntrack_entries_for_port_sec_group_change(self): port = self._fake_port() port['security_groups'] = ['fake_sg_id'] self.firewall.filtered_ports[port['device']] = port self.firewall.updated_sg_members = set(['tapfake_dev']) self.firewall.filter_defer_apply_on() new_port = copy.deepcopy(port) new_port['security_groups'] = ['fake_sg_id2'] self.firewall.filtered_ports[port['device']] = new_port self.firewall.filter_defer_apply_off() calls = [ # initial data has 1, 2, and 9 in use, CT zone will start at 10. mock.call(['conntrack', '-D', '-f', 'ipv4', '-d', '10.0.0.1', '-w', 10], run_as_root=True, check_exit_code=True, extra_ok_codes=[1]), mock.call(['conntrack', '-D', '-f', 'ipv6', '-d', 'fe80::1', '-w', 10], run_as_root=True, check_exit_code=True, extra_ok_codes=[1])] self.utils_exec.assert_has_calls(calls) def test_user_sg_rules_deduped_before_call_to_iptables_manager(self): port = self._fake_port() port['security_group_rules'] = [{'ethertype': 'IPv4', 'direction': 'ingress'}] * 2 self.firewall.prepare_port_filter(port) rules = [''.join(c[1]) for c in self.v4filter_inst.add_rule.mock_calls] self.assertEqual(len(set(rules)), len(rules)) def test_update_delete_port_filter(self): port = self._fake_port() port['security_group_rules'] = [{'ethertype': 'IPv4', 'direction': 'ingress'}] self.firewall.prepare_port_filter(port) port['security_group_rules'] = [{'ethertype': 'IPv4', 'direction': 'egress'}] self.firewall.update_port_filter(port) self.firewall.update_port_filter({'device': 'no-exist-device'}) self.firewall.remove_port_filter(port) self.firewall.remove_port_filter({'device': 'no-exist-device'}) calls = [mock.call.add_chain('sg-fallback'), mock.call.add_rule( 'sg-fallback', '-j DROP', comment=ic.UNMATCH_DROP), mock.call.remove_chain('sg-chain'), mock.call.add_chain('sg-chain'), mock.call.add_chain('ifake_dev'), mock.call.add_rule( 'FORWARD', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged -j $sg-chain', comment=ic.VM_INT_SG), mock.call.add_rule( 'sg-chain', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged -j $ifake_dev', comment=ic.SG_TO_VM_SG), mock.call.add_rule( 'ifake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', comment=None), mock.call.add_rule('ifake_dev', '-j RETURN', comment=None), mock.call.add_rule( 'ifake_dev', '-m state --state INVALID -j DROP', comment=None), mock.call.add_rule( 'ifake_dev', '-j $sg-fallback', comment=None), mock.call.add_chain('ofake_dev'), mock.call.add_rule( 'FORWARD', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $sg-chain', comment=ic.VM_INT_SG), mock.call.add_rule( 'sg-chain', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', comment=ic.SG_TO_VM_SG), mock.call.add_rule( 'INPUT', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', comment=ic.INPUT_TO_SG), mock.call.add_chain('sfake_dev'), mock.call.add_rule( 'sfake_dev', '-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF ' '-j RETURN', comment=ic.PAIR_ALLOW), mock.call.add_rule( 'sfake_dev', '-j DROP', comment=ic.PAIR_DROP), mock.call.add_rule( 'ofake_dev', '-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp ' '--sport 68 --dport 67 -j RETURN', comment=None), mock.call.add_rule('ofake_dev', '-j $sfake_dev', comment=None), mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 68 --dport 67 -j RETURN', comment=None), mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 67 -m udp --dport 68 -j DROP', comment=None), mock.call.add_rule( 'ofake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', comment=None), mock.call.add_rule( 'ofake_dev', '-m state --state INVALID -j DROP', comment=None), mock.call.add_rule( 'ofake_dev', '-j $sg-fallback', comment=None), mock.call.add_rule('sg-chain', '-j ACCEPT'), mock.call.remove_chain('ifake_dev'), mock.call.remove_chain('ofake_dev'), mock.call.remove_chain('sfake_dev'), mock.call.remove_chain('sg-chain'), mock.call.add_chain('sg-chain'), mock.call.add_chain('ifake_dev'), mock.call.add_rule( 'FORWARD', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged -j $sg-chain', comment=ic.VM_INT_SG), mock.call.add_rule( 'sg-chain', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged -j $ifake_dev', comment=ic.SG_TO_VM_SG), mock.call.add_rule( 'ifake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', comment=None), mock.call.add_rule( 'ifake_dev', '-m state --state INVALID -j DROP', comment=None), mock.call.add_rule( 'ifake_dev', '-j $sg-fallback', comment=None), mock.call.add_chain('ofake_dev'), mock.call.add_rule( 'FORWARD', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $sg-chain', comment=ic.VM_INT_SG), mock.call.add_rule( 'sg-chain', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', comment=ic.SG_TO_VM_SG), mock.call.add_rule( 'INPUT', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', comment=ic.INPUT_TO_SG), mock.call.add_chain('sfake_dev'), mock.call.add_rule( 'sfake_dev', '-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF ' '-j RETURN', comment=ic.PAIR_ALLOW), mock.call.add_rule( 'sfake_dev', '-j DROP', comment=ic.PAIR_DROP), mock.call.add_rule( 'ofake_dev', '-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp ' '--sport 68 --dport 67 -j RETURN', comment=None), mock.call.add_rule('ofake_dev', '-j $sfake_dev', comment=None), mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 68 --dport 67 -j RETURN', comment=None), mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 67 -m udp --dport 68 -j DROP', comment=None), mock.call.add_rule( 'ofake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', comment=None), mock.call.add_rule('ofake_dev', '-j RETURN', comment=None), mock.call.add_rule( 'ofake_dev', '-m state --state INVALID -j DROP', comment=None), mock.call.add_rule('ofake_dev', '-j $sg-fallback', comment=None), mock.call.add_rule('sg-chain', '-j ACCEPT'), mock.call.remove_chain('ifake_dev'), mock.call.remove_chain('ofake_dev'), mock.call.remove_chain('sfake_dev'), mock.call.remove_chain('sg-chain'), mock.call.add_chain('sg-chain')] self.v4filter_inst.assert_has_calls(calls) def test_remove_unknown_port(self): port = self._fake_port() self.firewall.remove_port_filter(port) # checking no exception occurs self.assertFalse(self.v4filter_inst.called) def test_defer_apply(self): with self.firewall.defer_apply(): pass self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(), mock.call.defer_apply_off()]) def test_filter_defer_with_exception(self): try: with self.firewall.defer_apply(): raise Exception("same exception") except Exception: pass self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(), mock.call.defer_apply_off()]) def _mock_chain_applies(self): class CopyingMock(mock.MagicMock): """Copies arguments so mutable arguments can be asserted on. Copied verbatim from unittest.mock documentation. """ def __call__(self, *args, **kwargs): args = copy.deepcopy(args) kwargs = copy.deepcopy(kwargs) return super(CopyingMock, self).__call__(*args, **kwargs) # Need to use CopyingMock because _{setup,remove}_chains_apply are # usually called with that's modified between calls (i.e., # self.firewall.filtered_ports). chain_applies = CopyingMock() self.firewall._setup_chains_apply = chain_applies.setup self.firewall._remove_chains_apply = chain_applies.remove return chain_applies def test_mock_chain_applies(self): chain_applies = self._mock_chain_applies() port_prepare = {'device': 'd1', 'mac_address': 'prepare'} port_update = {'device': 'd1', 'mac_address': 'update'} self.firewall.prepare_port_filter(port_prepare) self.firewall.update_port_filter(port_update) self.firewall.remove_port_filter(port_update) chain_applies.assert_has_calls([mock.call.remove({}, {}), mock.call.setup({'d1': port_prepare}, {}), mock.call.remove({'d1': port_prepare}, {}), mock.call.setup({'d1': port_update}, {}), mock.call.remove({'d1': port_update}, {}), mock.call.setup({}, {})]) def test_defer_chain_apply_need_pre_defer_copy(self): chain_applies = self._mock_chain_applies() port = self._fake_port() device2port = {port['device']: port} self.firewall.prepare_port_filter(port) with self.firewall.defer_apply(): self.firewall.remove_port_filter(port) chain_applies.assert_has_calls([mock.call.remove({}, {}), mock.call.setup(device2port, {}), mock.call.remove(device2port, {}), mock.call.setup({}, {})]) def test_defer_chain_apply_coalesce_simple(self): chain_applies = self._mock_chain_applies() port = self._fake_port() with self.firewall.defer_apply(): self.firewall.prepare_port_filter(port) self.firewall.update_port_filter(port) self.firewall.remove_port_filter(port) chain_applies.assert_has_calls([mock.call.remove({}, {}), mock.call.setup({}, {})]) def test_defer_chain_apply_coalesce_multiple_ports(self): chain_applies = self._mock_chain_applies() port1 = {'device': 'd1', 'mac_address': 'mac1', 'network_id': 'net1'} port2 = {'device': 'd2', 'mac_address': 'mac2', 'network_id': 'net1'} device2port = {'d1': port1, 'd2': port2} with self.firewall.defer_apply(): self.firewall.prepare_port_filter(port1) self.firewall.prepare_port_filter(port2) chain_applies.assert_has_calls([mock.call.remove({}, {}), mock.call.setup(device2port, {})]) def test_ip_spoofing_filter_with_multiple_ips(self): port = {'device': 'tapfake_dev', 'mac_address': 'ff:ff:ff:ff:ff:ff', 'network_id': 'fake_net', 'fixed_ips': ['10.0.0.1', 'fe80::1', '10.0.0.2']} self.firewall.prepare_port_filter(port) calls = [mock.call.add_chain('sg-fallback'), mock.call.add_rule( 'sg-fallback', '-j DROP', comment=ic.UNMATCH_DROP), mock.call.remove_chain('sg-chain'), mock.call.add_chain('sg-chain'), mock.call.add_chain('ifake_dev'), mock.call.add_rule('FORWARD', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged ' '-j $sg-chain', comment=ic.VM_INT_SG), mock.call.add_rule('sg-chain', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged ' '-j $ifake_dev', comment=ic.SG_TO_VM_SG), mock.call.add_rule( 'ifake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', comment=None), mock.call.add_rule( 'ifake_dev', '-m state --state INVALID -j DROP', comment=None), mock.call.add_rule('ifake_dev', '-j $sg-fallback', comment=None), mock.call.add_chain('ofake_dev'), mock.call.add_rule('FORWARD', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged ' '-j $sg-chain', comment=ic.VM_INT_SG), mock.call.add_rule('sg-chain', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', comment=ic.SG_TO_VM_SG), mock.call.add_rule('INPUT', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', comment=ic.INPUT_TO_SG), mock.call.add_chain('sfake_dev'), mock.call.add_rule( 'sfake_dev', '-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF ' '-j RETURN', comment=ic.PAIR_ALLOW), mock.call.add_rule( 'sfake_dev', '-s 10.0.0.2/32 -m mac --mac-source FF:FF:FF:FF:FF:FF ' '-j RETURN', comment=ic.PAIR_ALLOW), mock.call.add_rule( 'sfake_dev', '-j DROP', comment=ic.PAIR_DROP), mock.call.add_rule( 'ofake_dev', '-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp ' '--sport 68 --dport 67 -j RETURN', comment=None), mock.call.add_rule('ofake_dev', '-j $sfake_dev', comment=None), mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 68 --dport 67 -j RETURN', comment=None), mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 67 -m udp --dport 68 -j DROP', comment=None), mock.call.add_rule( 'ofake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', comment=None), mock.call.add_rule( 'ofake_dev', '-m state --state INVALID -j DROP', comment=None), mock.call.add_rule('ofake_dev', '-j $sg-fallback', comment=None), mock.call.add_rule('sg-chain', '-j ACCEPT')] self.v4filter_inst.assert_has_calls(calls) def test_ip_spoofing_no_fixed_ips(self): port = {'device': 'tapfake_dev', 'mac_address': 'ff:ff:ff:ff:ff:ff', 'network_id': 'fake_net', 'fixed_ips': []} self.firewall.prepare_port_filter(port) calls = [mock.call.add_chain('sg-fallback'), mock.call.add_rule( 'sg-fallback', '-j DROP', comment=ic.UNMATCH_DROP), mock.call.remove_chain('sg-chain'), mock.call.add_chain('sg-chain'), mock.call.add_chain('ifake_dev'), mock.call.add_rule('FORWARD', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged ' '-j $sg-chain', comment=ic.VM_INT_SG), mock.call.add_rule('sg-chain', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged ' '-j $ifake_dev', comment=ic.SG_TO_VM_SG), mock.call.add_rule( 'ifake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', comment=None), mock.call.add_rule( 'ifake_dev', '-m state --state INVALID -j DROP', comment=None), mock.call.add_rule('ifake_dev', '-j $sg-fallback', comment=None), mock.call.add_chain('ofake_dev'), mock.call.add_rule('FORWARD', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged ' '-j $sg-chain', comment=ic.VM_INT_SG), mock.call.add_rule('sg-chain', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', comment=ic.SG_TO_VM_SG), mock.call.add_rule('INPUT', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', comment=ic.INPUT_TO_SG), mock.call.add_chain('sfake_dev'), mock.call.add_rule( 'sfake_dev', '-m mac --mac-source FF:FF:FF:FF:FF:FF -j RETURN', comment=ic.PAIR_ALLOW), mock.call.add_rule( 'sfake_dev', '-j DROP', comment=ic.PAIR_DROP), mock.call.add_rule( 'ofake_dev', '-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp ' '--sport 68 --dport 67 -j RETURN', comment=None), mock.call.add_rule('ofake_dev', '-j $sfake_dev', comment=None), mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 68 --dport 67 -j RETURN', comment=None), mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 67 -m udp --dport 68 -j DROP', comment=None), mock.call.add_rule( 'ofake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', comment=None), mock.call.add_rule( 'ofake_dev', '-m state --state INVALID -j DROP', comment=None), mock.call.add_rule('ofake_dev', '-j $sg-fallback', comment=None), mock.call.add_rule('sg-chain', '-j ACCEPT')] self.v4filter_inst.assert_has_calls(calls) class IptablesFirewallEnhancedIpsetTestCase(BaseIptablesFirewallTestCase): def setUp(self): super(IptablesFirewallEnhancedIpsetTestCase, self).setUp() self.firewall.ipset = mock.Mock() self.firewall.ipset.get_name.side_effect = ( ipset_manager.IpsetManager.get_name) self.firewall.ipset.set_name_exists.return_value = True self.firewall.ipset.set_members = mock.Mock(return_value=([], [])) def _fake_port(self, sg_id=FAKE_SGID): return {'device': 'tapfake_dev', 'mac_address': 'ff:ff:ff:ff:ff:ff', 'network_id': 'fake_net', 'fixed_ips': [FAKE_IP['IPv4'], FAKE_IP['IPv6']], 'security_groups': [sg_id], 'security_group_source_groups': [sg_id]} def _fake_sg_rule_for_ethertype(self, ethertype, remote_group): return {'direction': 'ingress', 'remote_group_id': remote_group, 'ethertype': ethertype} def _fake_sg_rules(self, sg_id=FAKE_SGID, remote_groups=None): remote_groups = remote_groups or {_IPv4: [FAKE_SGID], _IPv6: [FAKE_SGID]} rules = [] for ip_version, remote_group_list in six.iteritems(remote_groups): for remote_group in remote_group_list: rules.append(self._fake_sg_rule_for_ethertype(ip_version, remote_group)) return {sg_id: rules} def _fake_sg_members(self, sg_ids=None): return {sg_id: copy.copy(FAKE_IP) for sg_id in (sg_ids or [FAKE_SGID])} def test_update_security_group_members(self): sg_members = {'IPv4': ['10.0.0.1', '10.0.0.2'], 'IPv6': ['fe80::1']} self.firewall.update_security_group_members('fake_sgid', sg_members) calls = [ mock.call.set_members('fake_sgid', 'IPv4', ['10.0.0.1', '10.0.0.2']), mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1']) ] self.firewall.ipset.assert_has_calls(calls, any_order=True) def _setup_fake_firewall_members_and_rules(self, firewall): firewall.sg_rules = self._fake_sg_rules() firewall.pre_sg_rules = self._fake_sg_rules() firewall.sg_members = self._fake_sg_members() firewall.pre_sg_members = firewall.sg_members def _prepare_rules_and_members_for_removal(self): self._setup_fake_firewall_members_and_rules(self.firewall) self.firewall.pre_sg_members[OTHER_SGID] = ( self.firewall.pre_sg_members[FAKE_SGID]) def test_determine_remote_sgs_to_remove(self): self._prepare_rules_and_members_for_removal() ports = [self._fake_port()] self.assertEqual( {_IPv4: set([OTHER_SGID]), _IPv6: set([OTHER_SGID])}, self.firewall._determine_remote_sgs_to_remove(ports)) def test_determine_remote_sgs_to_remove_ipv6_unreferenced(self): self._prepare_rules_and_members_for_removal() ports = [self._fake_port()] self.firewall.sg_rules = self._fake_sg_rules( remote_groups={_IPv4: [OTHER_SGID, FAKE_SGID], _IPv6: [FAKE_SGID]}) self.assertEqual( {_IPv4: set(), _IPv6: set([OTHER_SGID])}, self.firewall._determine_remote_sgs_to_remove(ports)) def test_get_remote_sg_ids_by_ipversion(self): self.firewall.sg_rules = self._fake_sg_rules( remote_groups={_IPv4: [FAKE_SGID], _IPv6: [OTHER_SGID]}) ports = [self._fake_port()] self.assertEqual( {_IPv4: set([FAKE_SGID]), _IPv6: set([OTHER_SGID])}, self.firewall._get_remote_sg_ids_sets_by_ipversion(ports)) def test_get_remote_sg_ids(self): self.firewall.sg_rules = self._fake_sg_rules( remote_groups={_IPv4: [FAKE_SGID, FAKE_SGID, FAKE_SGID], _IPv6: [OTHER_SGID, OTHER_SGID, OTHER_SGID]}) port = self._fake_port() self.assertEqual( {_IPv4: set([FAKE_SGID]), _IPv6: set([OTHER_SGID])}, self.firewall._get_remote_sg_ids(port)) def test_determine_sg_rules_to_remove(self): self.firewall.pre_sg_rules = self._fake_sg_rules(sg_id=OTHER_SGID) ports = [self._fake_port()] self.assertEqual(set([OTHER_SGID]), self.firewall._determine_sg_rules_to_remove(ports)) def test_get_sg_ids_set_for_ports(self): sg_ids = set([FAKE_SGID, OTHER_SGID]) ports = [self._fake_port(sg_id) for sg_id in sg_ids] self.assertEqual(sg_ids, self.firewall._get_sg_ids_set_for_ports(ports)) def test_remove_sg_members(self): self.firewall.sg_members = self._fake_sg_members([FAKE_SGID, OTHER_SGID]) remote_sgs_to_remove = {_IPv4: set([FAKE_SGID]), _IPv6: set([FAKE_SGID, OTHER_SGID])} self.firewall._remove_sg_members(remote_sgs_to_remove) self.assertIn(OTHER_SGID, self.firewall.sg_members) self.assertNotIn(FAKE_SGID, self.firewall.sg_members) def test_remove_unused_security_group_info_clears_unused_rules(self): self._setup_fake_firewall_members_and_rules(self.firewall) self.firewall.prepare_port_filter(self._fake_port()) # create another SG which won't be referenced by any filtered port fake_sg_rules = self.firewall.sg_rules['fake_sgid'] self.firewall.pre_sg_rules[OTHER_SGID] = fake_sg_rules self.firewall.sg_rules[OTHER_SGID] = fake_sg_rules # call the cleanup function, and check the unused sg_rules are out self.firewall._remove_unused_security_group_info() self.assertNotIn(OTHER_SGID, self.firewall.sg_rules) def test_remove_unused_security_group_info(self): self.firewall.sg_members = {OTHER_SGID: {_IPv4: [], _IPv6: []}} self.firewall.pre_sg_members = self.firewall.sg_members self.firewall.sg_rules = self._fake_sg_rules( remote_groups={_IPv4: [FAKE_SGID], _IPv6: [FAKE_SGID]}) self.firewall.pre_sg_rules = self.firewall.sg_rules port = self._fake_port() self.firewall.filtered_ports['tapfake_dev'] = port self.firewall._remove_unused_security_group_info() self.assertNotIn(OTHER_SGID, self.firewall.sg_members) def test_not_remove_used_security_group_info(self): self.firewall.sg_members = {OTHER_SGID: {_IPv4: [], _IPv6: []}} self.firewall.pre_sg_members = self.firewall.sg_members self.firewall.sg_rules = self._fake_sg_rules( remote_groups={_IPv4: [OTHER_SGID], _IPv6: [OTHER_SGID]}) self.firewall.pre_sg_rules = self.firewall.sg_rules port = self._fake_port() self.firewall.filtered_ports['tapfake_dev'] = port self.firewall._remove_unused_security_group_info() self.assertIn(OTHER_SGID, self.firewall.sg_members) def test_remove_all_unused_info(self): self._setup_fake_firewall_members_and_rules(self.firewall) self.firewall.filtered_ports = {} self.firewall._remove_unused_security_group_info() self.assertFalse(self.firewall.sg_members) self.assertFalse(self.firewall.sg_rules) def test_single_fallback_accept_rule(self): p1, p2 = self._fake_port(), self._fake_port() self.firewall._setup_chains_apply(dict(p1=p1, p2=p2), {}) v4_adds = self.firewall.iptables.ipv4['filter'].add_rule.mock_calls v6_adds = self.firewall.iptables.ipv6['filter'].add_rule.mock_calls sg_chain_v4_accept = [call for call in v4_adds if call == mock.call('sg-chain', '-j ACCEPT')] sg_chain_v6_accept = [call for call in v6_adds if call == mock.call('sg-chain', '-j ACCEPT')] self.assertEqual(1, len(sg_chain_v4_accept)) self.assertEqual(1, len(sg_chain_v6_accept)) def test_remove_port_filter_with_destroy_ipset_chain(self): self.firewall.sg_rules = self._fake_sg_rules() port = self._fake_port() self.firewall.pre_sg_members = {'fake_sgid': { 'IPv4': [], 'IPv6': []}} sg_members = {'IPv4': ['10.0.0.1'], 'IPv6': ['fe80::1']} self.firewall.update_security_group_members('fake_sgid', sg_members) self.firewall.prepare_port_filter(port) self.firewall.filter_defer_apply_on() self.firewall.sg_members = {'fake_sgid': { 'IPv4': [], 'IPv6': []}} self.firewall.pre_sg_members = {'fake_sgid': { 'IPv4': ['10.0.0.1'], 'IPv6': ['fe80::1']}} self.firewall.remove_port_filter(port) self.firewall.filter_defer_apply_off() calls = [ mock.call.set_members('fake_sgid', 'IPv4', ['10.0.0.1']), mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1']), mock.call.get_name('fake_sgid', 'IPv4'), mock.call.set_name_exists('NIPv4fake_sgid'), mock.call.get_name('fake_sgid', 'IPv6'), mock.call.set_name_exists('NIPv6fake_sgid'), mock.call.destroy('fake_sgid', 'IPv4'), mock.call.destroy('fake_sgid', 'IPv6')] self.firewall.ipset.assert_has_calls(calls, any_order=True) def test_filter_defer_apply_off_with_sg_only_ipv6_rule(self): self.firewall.sg_rules = self._fake_sg_rules() self.firewall.pre_sg_rules = self._fake_sg_rules() self.firewall.ipset_chains = {'IPv4fake_sgid': ['10.0.0.2'], 'IPv6fake_sgid': ['fe80::1']} self.firewall.sg_members = {'fake_sgid': { 'IPv4': ['10.0.0.2'], 'IPv6': ['fe80::1']}} self.firewall.pre_sg_members = {'fake_sgid': { 'IPv4': ['10.0.0.2'], 'IPv6': ['fe80::1']}} self.firewall.sg_rules['fake_sgid'].remove( {'direction': 'ingress', 'remote_group_id': 'fake_sgid', 'ethertype': 'IPv4'}) self.firewall.sg_rules.update() self.firewall._defer_apply = True port = self._fake_port() self.firewall.filtered_ports['tapfake_dev'] = port self.firewall._pre_defer_filtered_ports = {} self.firewall._pre_defer_unfiltered_ports = {} self.firewall.filter_defer_apply_off() calls = [mock.call.destroy('fake_sgid', 'IPv4')] self.firewall.ipset.assert_has_calls(calls, True) def test_sg_rule_expansion_with_remote_ips(self): other_ips = ['10.0.0.2', '10.0.0.3', '10.0.0.4'] self.firewall.sg_members = {'fake_sgid': { 'IPv4': [FAKE_IP['IPv4']] + other_ips, 'IPv6': [FAKE_IP['IPv6']]}} port = self._fake_port() rule = self._fake_sg_rule_for_ethertype(_IPv4, FAKE_SGID) rules = self.firewall._expand_sg_rule_with_remote_ips( rule, port, 'ingress') self.assertEqual(list(rules), [dict(list(rule.items()) + [('source_ip_prefix', '%s/32' % ip)]) for ip in other_ips]) def test_build_ipv4v6_mac_ip_list(self): mac_oth = 'ffff-ff0f-ffff' mac_unix = 'FF:FF:FF:0F:FF:FF' ipv4 = FAKE_IP['IPv4'] ipv6 = FAKE_IP['IPv6'] fake_ipv4_pair = [] fake_ipv4_pair.append((mac_unix, ipv4)) fake_ipv6_pair = [] fake_ipv6_pair.append((mac_unix, ipv6)) fake_ipv6_pair.append((mac_unix, 'fe80::fdff:ffff:fe0f:ffff')) mac_ipv4_pairs = [] mac_ipv6_pairs = [] self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv4, mac_ipv4_pairs, mac_ipv6_pairs) self.assertEqual(fake_ipv4_pair, mac_ipv4_pairs) self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv6, mac_ipv4_pairs, mac_ipv6_pairs) self.assertEqual(fake_ipv6_pair, mac_ipv6_pairs) class OVSHybridIptablesFirewallTestCase(BaseIptablesFirewallTestCase): def setUp(self): super(OVSHybridIptablesFirewallTestCase, self).setUp() self.firewall = iptables_firewall.OVSHybridIptablesFirewallDriver() # initial data has 1, 2, and 9 in use, see RAW_TABLE_OUTPUT above. self._dev_zone_map = {'61634509-31': 2, '8f46cf18-12': 9, '95c24827-02': 2, 'e804433b-61': 1} def test__populate_initial_zone_map(self): self.assertEqual(self._dev_zone_map, self.firewall._device_zone_map) def test__generate_device_zone(self): # initial data has 1, 2, and 9 in use. # we fill from top up first. self.assertEqual(10, self.firewall._generate_device_zone('test')) # once it's maxed out, it scans for gaps self.firewall._device_zone_map['someport'] = ( iptables_firewall.MAX_CONNTRACK_ZONES) for i in range(3, 9): self.assertEqual(i, self.firewall._generate_device_zone(i)) # 9 and 10 are taken so next should be 11 self.assertEqual(11, self.firewall._generate_device_zone('p11')) # take out zone 1 and make sure it's selected self.firewall._device_zone_map.pop('e804433b-61') self.assertEqual(1, self.firewall._generate_device_zone('p1')) # fill it up and then make sure an extra throws an error for i in range(1, 65536): self.firewall._device_zone_map['dev-%s' % i] = i with testtools.ExpectedException(n_exc.CTZoneExhaustedError): self.firewall._find_open_zone() # with it full, try again, this should trigger a cleanup and return 1 self.assertEqual(1, self.firewall._generate_device_zone('p12')) self.assertEqual({'p12': 1}, self.firewall._device_zone_map) def test_get_device_zone(self): # initial data has 1, 2, and 9 in use. self.assertEqual(10, self.firewall.get_device_zone('12345678901234567')) # should have been truncated to 11 chars self._dev_zone_map.update({'12345678901': 10}) self.assertEqual(self._dev_zone_map, self.firewall._device_zone_map) neutron-8.4.0/neutron/tests/unit/agent/linux/test_external_process.py0000664000567000056710000002572313044372760027436 0ustar jenkinsjenkins00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import os.path from neutron.agent.linux import external_process as ep from neutron.common import utils as common_utils from neutron.tests import base from neutron.tests import tools TEST_UUID = 'test-uuid' TEST_SERVICE = 'testsvc' TEST_PID = 1234 class BaseTestProcessMonitor(base.BaseTestCase): def setUp(self): super(BaseTestProcessMonitor, self).setUp() self.log_patch = mock.patch("neutron.agent.linux.external_process." "LOG.error") self.error_log = self.log_patch.start() self.spawn_patch = mock.patch("eventlet.spawn") self.eventlent_spawn = self.spawn_patch.start() # create a default process monitor self.create_child_process_monitor('respawn') def create_child_process_monitor(self, action): conf = mock.Mock() conf.AGENT.check_child_processes_action = action conf.AGENT.check_child_processes = True self.pmonitor = ep.ProcessMonitor( config=conf, resource_type='test') def get_monitored_process(self, uuid, service=None): monitored_process = mock.Mock() self.pmonitor.register(uuid=uuid, service_name=service, monitored_process=monitored_process) return monitored_process class TestProcessMonitor(BaseTestProcessMonitor): def test_error_logged(self): pm = self.get_monitored_process(TEST_UUID) pm.active = False self.pmonitor._check_child_processes() self.assertTrue(self.error_log.called) def test_exit_handler(self): self.create_child_process_monitor('exit') pm = self.get_monitored_process(TEST_UUID) pm.active = False with mock.patch.object(ep.ProcessMonitor, '_exit_handler') as exit_handler: self.pmonitor._check_child_processes() exit_handler.assert_called_once_with(TEST_UUID, None) def test_register(self): pm = self.get_monitored_process(TEST_UUID) self.assertEqual(len(self.pmonitor._monitored_processes), 1) self.assertIn(pm, self.pmonitor._monitored_processes.values()) def test_register_same_service_twice(self): self.get_monitored_process(TEST_UUID) self.get_monitored_process(TEST_UUID) self.assertEqual(len(self.pmonitor._monitored_processes), 1) def test_register_different_service_types(self): self.get_monitored_process(TEST_UUID) self.get_monitored_process(TEST_UUID, TEST_SERVICE) self.assertEqual(len(self.pmonitor._monitored_processes), 2) def test_unregister(self): self.get_monitored_process(TEST_UUID) self.pmonitor.unregister(TEST_UUID, None) self.assertEqual(len(self.pmonitor._monitored_processes), 0) def test_unregister_unknown_process(self): self.pmonitor.unregister(TEST_UUID, None) self.assertEqual(len(self.pmonitor._monitored_processes), 0) class TestProcessManager(base.BaseTestCase): def setUp(self): super(TestProcessManager, self).setUp() self.execute_p = mock.patch('neutron.agent.common.utils.execute') self.execute = self.execute_p.start() self.delete_if_exists = mock.patch( 'oslo_utils.fileutils.delete_if_exists').start() self.ensure_dir = mock.patch.object( common_utils, 'ensure_dir').start() self.conf = mock.Mock() self.conf.external_pids = '/var/path' def test_processmanager_ensures_pid_dir(self): pid_file = os.path.join(self.conf.external_pids, 'pid') ep.ProcessManager(self.conf, 'uuid', pid_file=pid_file) self.ensure_dir.assert_called_once_with(self.conf.external_pids) def test_enable_no_namespace(self): callback = mock.Mock() callback.return_value = ['the', 'cmd'] with mock.patch.object(ep.ProcessManager, 'get_pid_file_name') as name: name.return_value = 'pidfile' with mock.patch.object(ep.ProcessManager, 'active') as active: active.__get__ = mock.Mock(return_value=False) manager = ep.ProcessManager(self.conf, 'uuid') manager.enable(callback) callback.assert_called_once_with('pidfile') self.execute.assert_called_once_with(['the', 'cmd'], check_exit_code=True, extra_ok_codes=None, run_as_root=False, log_fail_as_error=True) def test_enable_with_namespace(self): callback = mock.Mock() callback.return_value = ['the', 'cmd'] with mock.patch.object(ep.ProcessManager, 'get_pid_file_name') as name: name.return_value = 'pidfile' with mock.patch.object(ep.ProcessManager, 'active') as active: active.__get__ = mock.Mock(return_value=False) manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns') with mock.patch.object(ep, 'ip_lib') as ip_lib: manager.enable(callback) callback.assert_called_once_with('pidfile') ip_lib.assert_has_calls([ mock.call.IPWrapper(namespace='ns'), mock.call.IPWrapper().netns.execute( ['the', 'cmd'], addl_env=None, run_as_root=False)]) def test_enable_with_namespace_process_active(self): callback = mock.Mock() callback.return_value = ['the', 'cmd'] with mock.patch.object(ep.ProcessManager, 'active') as active: active.__get__ = mock.Mock(return_value=True) manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns') with mock.patch.object(ep, 'ip_lib'): manager.enable(callback) self.assertFalse(callback.called) def test_disable_no_namespace(self): with mock.patch.object(ep.ProcessManager, 'pid') as pid: pid.__get__ = mock.Mock(return_value=4) with mock.patch.object(ep.ProcessManager, 'active') as active: active.__get__ = mock.Mock(return_value=True) manager = ep.ProcessManager(self.conf, 'uuid') with mock.patch.object(ep, 'utils') as utils: manager.disable() utils.assert_has_calls([ mock.call.execute(['kill', '-9', 4], run_as_root=True)]) def test_disable_namespace(self): with mock.patch.object(ep.ProcessManager, 'pid') as pid: pid.__get__ = mock.Mock(return_value=4) with mock.patch.object(ep.ProcessManager, 'active') as active: active.__get__ = mock.Mock(return_value=True) manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns') with mock.patch.object(ep, 'utils') as utils: manager.disable() utils.assert_has_calls([ mock.call.execute(['kill', '-9', 4], run_as_root=True)]) def test_disable_not_active(self): with mock.patch.object(ep.ProcessManager, 'pid') as pid: pid.__get__ = mock.Mock(return_value=4) with mock.patch.object(ep.ProcessManager, 'active') as active: active.__get__ = mock.Mock(return_value=False) with mock.patch.object(ep.LOG, 'debug') as debug: manager = ep.ProcessManager(self.conf, 'uuid') manager.disable() debug.assert_called_once_with(mock.ANY, mock.ANY) def test_disable_no_pid(self): with mock.patch.object(ep.ProcessManager, 'pid') as pid: pid.__get__ = mock.Mock(return_value=None) with mock.patch.object(ep.ProcessManager, 'active') as active: active.__get__ = mock.Mock(return_value=False) with mock.patch.object(ep.LOG, 'debug') as debug: manager = ep.ProcessManager(self.conf, 'uuid') manager.disable() debug.assert_called_once_with(mock.ANY, mock.ANY) def test_get_pid_file_name_default(self): manager = ep.ProcessManager(self.conf, 'uuid') retval = manager.get_pid_file_name() self.assertEqual(retval, '/var/path/uuid.pid') def test_pid(self): self.useFixture(tools.OpenFixture('/var/path/uuid.pid', '5')) manager = ep.ProcessManager(self.conf, 'uuid') self.assertEqual(manager.pid, 5) def test_pid_no_an_int(self): self.useFixture(tools.OpenFixture('/var/path/uuid.pid', 'foo')) manager = ep.ProcessManager(self.conf, 'uuid') self.assertIsNone(manager.pid) def test_pid_invalid_file(self): with mock.patch.object(ep.ProcessManager, 'get_pid_file_name') as name: name.return_value = '.doesnotexist/pid' manager = ep.ProcessManager(self.conf, 'uuid') self.assertIsNone(manager.pid) def test_active(self): mock_open = self.useFixture( tools.OpenFixture('/proc/4/cmdline', 'python foo --router_id=uuid') ).mock_open with mock.patch.object(ep.ProcessManager, 'pid') as pid: pid.__get__ = mock.Mock(return_value=4) manager = ep.ProcessManager(self.conf, 'uuid') self.assertTrue(manager.active) mock_open.assert_called_once_with('/proc/4/cmdline', 'r') def test_active_none(self): dummy_cmd_line = 'python foo --router_id=uuid' self.execute.return_value = dummy_cmd_line with mock.patch.object(ep.ProcessManager, 'pid') as pid: pid.__get__ = mock.Mock(return_value=None) manager = ep.ProcessManager(self.conf, 'uuid') self.assertFalse(manager.active) def test_active_cmd_mismatch(self): mock_open = self.useFixture( tools.OpenFixture('/proc/4/cmdline', 'python foo --router_id=anotherid') ).mock_open with mock.patch.object(ep.ProcessManager, 'pid') as pid: pid.__get__ = mock.Mock(return_value=4) manager = ep.ProcessManager(self.conf, 'uuid') self.assertFalse(manager.active) mock_open.assert_called_once_with('/proc/4/cmdline', 'r') neutron-8.4.0/neutron/tests/unit/agent/linux/failing_process.py0000664000567000056710000000137513044372736026166 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys def main(): filename = sys.argv[1] if not os.path.exists(filename): sys.exit(1) if __name__ == '__main__': main() neutron-8.4.0/neutron/tests/unit/agent/linux/test_ip_conntrack.py0000664000567000056710000000254513044372760026525 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.agent.linux import ip_conntrack from neutron.tests import base class IPConntrackTestCase(base.BaseTestCase): def setUp(self): super(IPConntrackTestCase, self).setUp() self.execute = mock.Mock() self.mgr = ip_conntrack.IpConntrackManager(self._zone_lookup, self.execute) def _zone_lookup(self, dev): return 100 def test_delete_conntrack_state_dedupes(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress'} dev_info = {'device': 'device', 'fixed_ips': ['1.2.3.4']} dev_info_list = [dev_info for _ in range(10)] self.mgr._delete_conntrack_state(dev_info_list, rule) self.assertEqual(1, len(self.execute.mock_calls)) neutron-8.4.0/neutron/tests/unit/agent/linux/test_pd.py0000664000567000056710000000221313044372736024451 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.agent.linux import pd from neutron.tests import base as tests_base class FakeRouter(object): def __init__(self, router_id): self.router_id = router_id class TestPrefixDelegation(tests_base.DietTestCase): def test_remove_router(self): l3_agent = mock.Mock() router_id = 1 l3_agent.pd.routers = {router_id: pd.get_router_entry(None)} pd.remove_router(None, None, l3_agent, router=FakeRouter(router_id)) self.assertTrue(l3_agent.pd.delete_router_pd.called) self.assertEqual({}, l3_agent.pd.routers) neutron-8.4.0/neutron/tests/unit/agent/linux/test_ovsdb_monitor.py0000664000567000056710000000564313044372760026741 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.agent.common import ovs_lib from neutron.agent.linux import ovsdb_monitor from neutron.tests import base class TestOvsdbMonitor(base.BaseTestCase): def test___init__(self): ovsdb_monitor.OvsdbMonitor('Interface') def test___init___with_columns(self): columns = ['col1', 'col2'] with mock.patch( 'neutron.agent.linux.async_process.AsyncProcess.__init__') as init: ovsdb_monitor.OvsdbMonitor('Interface', columns=columns) cmd = init.call_args_list[0][0][0] self.assertEqual('col1,col2', cmd[-1]) def test___init___with_format(self): with mock.patch( 'neutron.agent.linux.async_process.AsyncProcess.__init__') as init: ovsdb_monitor.OvsdbMonitor('Interface', format='blob') cmd = init.call_args_list[0][0][0] self.assertEqual('--format=blob', cmd[-1]) class TestSimpleInterfaceMonitor(base.BaseTestCase): def setUp(self): super(TestSimpleInterfaceMonitor, self).setUp() self.monitor = ovsdb_monitor.SimpleInterfaceMonitor() def test_has_updates_is_false_if_active_with_no_output(self): target = ('neutron.agent.linux.ovsdb_monitor.SimpleInterfaceMonitor' '.is_active') with mock.patch(target, return_value=True): self.assertFalse(self.monitor.has_updates) def test_has_updates_after_calling_get_events_is_false(self): with mock.patch.object( self.monitor, 'process_events') as process_events: self.monitor.new_events = {'added': ['foo'], 'removed': ['foo1']} self.assertTrue(self.monitor.has_updates) self.monitor.get_events() self.assertTrue(process_events.called) self.assertFalse(self.monitor.has_updates) def process_event_unassigned_of_port(self): output = '{"data":[["e040fbec-0579-4990-8324-d338da33ae88","insert",' output += '"m50",["set",[]],["map",[]]]],"headings":["row","action",' output += '"name","ofport","external_ids"]}' with mock.patch.object( self.monitor, 'iter_stdout', return_value=[output]): self.monitor.process_events() self.assertEqual(self.monitor.new_events['added'][0]['ofport'], ovs_lib.UNASSIGNED_OFPORT) neutron-8.4.0/neutron/tests/unit/agent/linux/test_bridge_lib.py0000664000567000056710000000771013044372760026134 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel Corporation. # Copyright 2015 Isaku Yamahata # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.agent.linux import bridge_lib from neutron.tests import base class BridgeLibTest(base.BaseTestCase): """A test suite to exercise the bridge libraries """ _NAMESPACE = 'test-namespace' _BR_NAME = 'test-br' _IF_NAME = 'test-if' def setUp(self): super(BridgeLibTest, self).setUp() ip_wrapper = mock.patch('neutron.agent.linux.ip_lib.IPWrapper').start() self.execute = ip_wrapper.return_value.netns.execute def _verify_bridge_mock(self, cmd): self.execute.assert_called_once_with(cmd, run_as_root=True) self.execute.reset_mock() def _verify_bridge_mock_check_exit_code(self, cmd): self.execute.assert_called_once_with(cmd, run_as_root=True, check_exit_code=True) self.execute.reset_mock() def test_is_bridged_interface(self): exists = lambda path: path == "/sys/class/net/tapOK/brport" with mock.patch('os.path.exists', side_effect=exists): self.assertTrue(bridge_lib.is_bridged_interface("tapOK")) self.assertFalse(bridge_lib.is_bridged_interface("tapKO")) def test_get_interface_bridge(self): with mock.patch('os.readlink', side_effect=["prefix/br0", OSError()]): br = bridge_lib.BridgeDevice.get_interface_bridge('tap0') self.assertIsInstance(br, bridge_lib.BridgeDevice) self.assertEqual("br0", br.name) br = bridge_lib.BridgeDevice.get_interface_bridge('tap0') self.assertIsNone(br) def _test_br(self, namespace=None): br = bridge_lib.BridgeDevice.addbr(self._BR_NAME, namespace) self.assertEqual(namespace, br.namespace) self._verify_bridge_mock(['brctl', 'addbr', self._BR_NAME]) br.setfd(0) self._verify_bridge_mock(['brctl', 'setfd', self._BR_NAME, '0']) br.disable_stp() self._verify_bridge_mock(['brctl', 'stp', self._BR_NAME, 'off']) br.disable_ipv6() cmd = 'net.ipv6.conf.%s.disable_ipv6=1' % self._BR_NAME self._verify_bridge_mock_check_exit_code(['sysctl', '-w', cmd]) br.addif(self._IF_NAME) self._verify_bridge_mock( ['brctl', 'addif', self._BR_NAME, self._IF_NAME]) br.delif(self._IF_NAME) self._verify_bridge_mock( ['brctl', 'delif', self._BR_NAME, self._IF_NAME]) br.delbr() self._verify_bridge_mock(['brctl', 'delbr', self._BR_NAME]) def test_addbr_with_namespace(self): self._test_br(self._NAMESPACE) def test_addbr_without_namespace(self): self._test_br() def test_owns_interface(self): br = bridge_lib.BridgeDevice('br-int') exists = lambda path: path == "/sys/class/net/br-int/brif/abc" with mock.patch('os.path.exists', side_effect=exists): self.assertTrue(br.owns_interface("abc")) self.assertFalse(br.owns_interface("def")) def test_get_interfaces(self): br = bridge_lib.BridgeDevice('br-int') interfaces = ["tap1", "tap2"] with mock.patch('os.listdir', side_effect=[interfaces, OSError()]): self.assertEqual(interfaces, br.get_interfaces()) self.assertEqual([], br.get_interfaces()) neutron-8.4.0/neutron/tests/unit/agent/linux/test_ip_lib.py0000664000567000056710000017540213044372760025314 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netaddr import testtools from neutron.agent.common import utils # noqa from neutron.agent.linux import ip_lib from neutron.common import exceptions from neutron.tests import base NETNS_SAMPLE = [ '12345678-1234-5678-abcd-1234567890ab', 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'cccccccc-cccc-cccc-cccc-cccccccccccc'] NETNS_SAMPLE_IPROUTE2_4 = [ '12345678-1234-5678-abcd-1234567890ab (id: 1)', 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb (id: 0)', 'cccccccc-cccc-cccc-cccc-cccccccccccc (id: 2)'] LINK_SAMPLE = [ '1: lo: mtu 16436 qdisc noqueue state UNKNOWN \\' 'link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 promiscuity 0', '2: eth0: mtu 1500 qdisc mq state UP ' 'qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff' '\ alias openvswitch', '3: br-int: mtu 1500 qdisc noop state DOWN ' '\ link/ether aa:bb:cc:dd:ee:ff brd ff:ff:ff:ff:ff:ff promiscuity 0', '4: gw-ddc717df-49: mtu 1500 qdisc noop ' 'state DOWN \ link/ether fe:dc:ba:fe:dc:ba brd ff:ff:ff:ff:ff:ff ' 'promiscuity 0', '5: foo:foo: mtu 1500 qdisc mq state ' 'UP qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff ' 'promiscuity 0', '6: foo@foo: mtu 1500 qdisc mq state ' 'UP qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff ' 'promiscuity 0', '7: foo:foo@foo: mtu 1500 qdisc mq ' 'state UP qlen 1000' '\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0', '8: foo@foo:foo: mtu 1500 qdisc mq ' 'state UP qlen 1000' '\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0', '9: bar.9@eth0: mtu 1500 qdisc ' ' noqueue master brq0b24798c-07 state UP mode DEFAULT' '\ link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff promiscuity 0' '\ vlan protocol 802.1q id 9 ', '10: bar@eth0: mtu 1500 qdisc ' ' noqueue master brq0b24798c-07 state UP mode DEFAULT' '\ link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff promiscuity 0' '\ vlan protocol 802.1Q id 10 ', '11: bar:bar@eth0: mtu 1500 qdisc mq ' 'state UP qlen 1000' '\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0' '\ vlan id 11 ', '12: bar@bar@eth0: mtu 1500 qdisc mq ' 'state UP qlen 1000' '\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0' '\ vlan id 12 ', '13: bar:bar@bar@eth0: mtu 1500 ' 'qdisc mq state UP qlen 1000' '\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0' '\ vlan protocol 802.1q id 13 ', '14: bar@bar:bar@eth0: mtu 1500 ' 'qdisc mq state UP qlen 1000' '\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0' '\ vlan protocol 802.1Q id 14 '] ADDR_SAMPLE = (""" 2: eth0: mtu 1500 qdisc mq state UP qlen 1000 link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff inet 172.16.77.240/24 brd 172.16.77.255 scope global eth0 inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic valid_lft 14187sec preferred_lft 3387sec inet6 fe80::3023:39ff:febc:22ae/64 scope link tentative valid_lft forever preferred_lft forever inet6 fe80::3023:39ff:febc:22af/64 scope link tentative dadfailed valid_lft forever preferred_lft forever inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """ """deprecated dynamic valid_lft 14187sec preferred_lft 0sec inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """ """deprecated dynamic valid_lft 14187sec preferred_lft 0sec inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic valid_lft 14187sec preferred_lft 3387sec inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link valid_lft forever preferred_lft forever """) ADDR_SAMPLE2 = (""" 2: eth0: mtu 1500 qdisc mq state UP qlen 1000 link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff inet 172.16.77.240/24 scope global eth0 inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic valid_lft 14187sec preferred_lft 3387sec inet6 fe80::3023:39ff:febc:22ae/64 scope link tentative valid_lft forever preferred_lft forever inet6 fe80::3023:39ff:febc:22af/64 scope link tentative dadfailed valid_lft forever preferred_lft forever inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """ """deprecated dynamic valid_lft 14187sec preferred_lft 0sec inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """ """deprecated dynamic valid_lft 14187sec preferred_lft 0sec inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic valid_lft 14187sec preferred_lft 3387sec inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link valid_lft forever preferred_lft forever """) ADDR_SAMPLE3 = (""" 2: eth0@NONE: mtu 1500 qdisc mq state UP link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff inet 172.16.77.240/24 brd 172.16.77.255 scope global eth0 """) GATEWAY_SAMPLE1 = (""" default via 10.35.19.254 metric 100 10.35.16.0/22 proto kernel scope link src 10.35.17.97 """) GATEWAY_SAMPLE2 = (""" default via 10.35.19.254 metric 100 """) GATEWAY_SAMPLE3 = (""" 10.35.16.0/22 proto kernel scope link src 10.35.17.97 """) GATEWAY_SAMPLE4 = (""" default via 10.35.19.254 """) GATEWAY_SAMPLE5 = (""" default via 192.168.99.1 proto static """) GATEWAY_SAMPLE6 = (""" default via 192.168.99.1 proto static metric 100 """) GATEWAY_SAMPLE7 = (""" default dev qg-31cd36 metric 1 """) IPv6_GATEWAY_SAMPLE1 = (""" default via 2001:470:9:1224:4508:b885:5fb:740b metric 100 2001:db8::/64 proto kernel scope link src 2001:470:9:1224:dfcc:aaff:feb9:76ce """) IPv6_GATEWAY_SAMPLE2 = (""" default via 2001:470:9:1224:4508:b885:5fb:740b metric 100 """) IPv6_GATEWAY_SAMPLE3 = (""" 2001:db8::/64 proto kernel scope link src 2001:470:9:1224:dfcc:aaff:feb9:76ce """) IPv6_GATEWAY_SAMPLE4 = (""" default via fe80::dfcc:aaff:feb9:76ce """) IPv6_GATEWAY_SAMPLE5 = (""" default via 2001:470:9:1224:4508:b885:5fb:740b metric 1024 """) DEVICE_ROUTE_SAMPLE = ("10.0.0.0/24 scope link src 10.0.0.2") SUBNET_SAMPLE1 = ("10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1\n" "10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2") SUBNET_SAMPLE2 = ("10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2\n" "10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1") RULE_V4_SAMPLE = (""" 0: from all lookup local 32766: from all lookup main 32767: from all lookup default 101: from 192.168.45.100 lookup 2 """) RULE_V6_SAMPLE = (""" 0: from all lookup local 32766: from all lookup main 32767: from all lookup default 201: from 2001:db8::1 lookup 3 """) class TestSubProcessBase(base.BaseTestCase): def setUp(self): super(TestSubProcessBase, self).setUp() self.execute_p = mock.patch('neutron.agent.common.utils.execute') self.execute = self.execute_p.start() def test_execute_wrapper(self): ip_lib.SubProcessBase._execute(['o'], 'link', ('list',), run_as_root=True) self.execute.assert_called_once_with(['ip', '-o', 'link', 'list'], run_as_root=True, log_fail_as_error=True) def test_execute_wrapper_int_options(self): ip_lib.SubProcessBase._execute([4], 'link', ('list',)) self.execute.assert_called_once_with(['ip', '-4', 'link', 'list'], run_as_root=False, log_fail_as_error=True) def test_execute_wrapper_no_options(self): ip_lib.SubProcessBase._execute([], 'link', ('list',)) self.execute.assert_called_once_with(['ip', 'link', 'list'], run_as_root=False, log_fail_as_error=True) def test_run_no_namespace(self): base = ip_lib.SubProcessBase() base._run([], 'link', ('list',)) self.execute.assert_called_once_with(['ip', 'link', 'list'], run_as_root=False, log_fail_as_error=True) def test_run_namespace(self): base = ip_lib.SubProcessBase(namespace='ns') base._run([], 'link', ('list',)) self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', 'ip', 'link', 'list'], run_as_root=True, log_fail_as_error=True) def test_as_root_namespace(self): base = ip_lib.SubProcessBase(namespace='ns') base._as_root([], 'link', ('list',)) self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', 'ip', 'link', 'list'], run_as_root=True, log_fail_as_error=True) class TestIpWrapper(base.BaseTestCase): def setUp(self): super(TestIpWrapper, self).setUp() self.execute_p = mock.patch.object(ip_lib.IPWrapper, '_execute') self.execute = self.execute_p.start() @mock.patch('os.path.islink') @mock.patch('os.listdir', return_value=['lo']) def test_get_devices(self, mocked_listdir, mocked_islink): retval = ip_lib.IPWrapper().get_devices() mocked_islink.assert_called_once_with('/sys/class/net/lo') self.assertEqual(retval, [ip_lib.IPDevice('lo')]) @mock.patch('neutron.agent.common.utils.execute') def test_get_devices_namespaces(self, mocked_execute): fake_str = mock.Mock() fake_str.split.return_value = ['lo'] mocked_execute.return_value = fake_str retval = ip_lib.IPWrapper(namespace='foo').get_devices() mocked_execute.assert_called_once_with( ['ip', 'netns', 'exec', 'foo', 'find', '/sys/class/net', '-maxdepth', '1', '-type', 'l', '-printf', '%f '], run_as_root=True, log_fail_as_error=True) self.assertTrue(fake_str.split.called) self.assertEqual(retval, [ip_lib.IPDevice('lo', namespace='foo')]) @mock.patch('neutron.agent.common.utils.execute') def test_get_devices_exclude_loopback_and_gre(self, mocked_execute): device_name = 'somedevice' mocked_execute.return_value = 'lo gre0 gretap0 ' + device_name devices = ip_lib.IPWrapper(namespace='foo').get_devices( exclude_loopback=True, exclude_gre_devices=True) somedevice = devices.pop() self.assertEqual(device_name, somedevice.name) self.assertFalse(devices) @mock.patch('neutron.agent.common.utils.execute') def test_get_devices_namespaces_ns_not_exists(self, mocked_execute): mocked_execute.side_effect = RuntimeError( "Cannot open network namespace") with mock.patch.object(ip_lib.IpNetnsCommand, 'exists', return_value=False): retval = ip_lib.IPWrapper(namespace='foo').get_devices() self.assertEqual([], retval) @mock.patch('neutron.agent.common.utils.execute') def test_get_devices_namespaces_ns_exists(self, mocked_execute): mocked_execute.side_effect = RuntimeError( "Cannot open network namespace") with mock.patch.object(ip_lib.IpNetnsCommand, 'exists', return_value=True): self.assertRaises(RuntimeError, ip_lib.IPWrapper(namespace='foo').get_devices) def test_get_namespaces(self): self.execute.return_value = '\n'.join(NETNS_SAMPLE) retval = ip_lib.IPWrapper.get_namespaces() self.assertEqual(retval, ['12345678-1234-5678-abcd-1234567890ab', 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'cccccccc-cccc-cccc-cccc-cccccccccccc']) self.execute.assert_called_once_with([], 'netns', ('list',)) def test_get_namespaces_iproute2_4(self): self.execute.return_value = '\n'.join(NETNS_SAMPLE_IPROUTE2_4) retval = ip_lib.IPWrapper.get_namespaces() self.assertEqual(retval, ['12345678-1234-5678-abcd-1234567890ab', 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'cccccccc-cccc-cccc-cccc-cccccccccccc']) self.execute.assert_called_once_with([], 'netns', ('list',)) def test_add_tuntap(self): ip_lib.IPWrapper().add_tuntap('tap0') self.execute.assert_called_once_with([], 'tuntap', ('add', 'tap0', 'mode', 'tap'), run_as_root=True, namespace=None, log_fail_as_error=True) def test_add_veth(self): ip_lib.IPWrapper().add_veth('tap0', 'tap1') self.execute.assert_called_once_with([], 'link', ('add', 'tap0', 'type', 'veth', 'peer', 'name', 'tap1'), run_as_root=True, namespace=None, log_fail_as_error=True) def test_add_macvtap(self): ip_lib.IPWrapper().add_macvtap('macvtap0', 'eth0', 'bridge') self.execute.assert_called_once_with([], 'link', ('add', 'link', 'eth0', 'name', 'macvtap0', 'type', 'macvtap', 'mode', 'bridge'), run_as_root=True, namespace=None, log_fail_as_error=True) def test_del_veth(self): ip_lib.IPWrapper().del_veth('fpr-1234') self.execute.assert_called_once_with([], 'link', ('del', 'fpr-1234'), run_as_root=True, namespace=None, log_fail_as_error=True) def test_add_veth_with_namespaces(self): ns2 = 'ns2' with mock.patch.object(ip_lib.IPWrapper, 'ensure_namespace') as en: ip_lib.IPWrapper().add_veth('tap0', 'tap1', namespace2=ns2) en.assert_has_calls([mock.call(ns2)]) self.execute.assert_called_once_with([], 'link', ('add', 'tap0', 'type', 'veth', 'peer', 'name', 'tap1', 'netns', ns2), run_as_root=True, namespace=None, log_fail_as_error=True) def test_add_dummy(self): ip_lib.IPWrapper().add_dummy('dummy0') self.execute.assert_called_once_with([], 'link', ('add', 'dummy0', 'type', 'dummy'), run_as_root=True, namespace=None, log_fail_as_error=True) def test_get_device(self): dev = ip_lib.IPWrapper(namespace='ns').device('eth0') self.assertEqual(dev.namespace, 'ns') self.assertEqual(dev.name, 'eth0') def test_ensure_namespace(self): with mock.patch.object(ip_lib, 'IPDevice') as ip_dev: ip = ip_lib.IPWrapper() with mock.patch.object(ip.netns, 'exists') as ns_exists: with mock.patch('neutron.agent.common.utils.execute'): ns_exists.return_value = False ip.ensure_namespace('ns') self.execute.assert_has_calls( [mock.call([], 'netns', ('add', 'ns'), run_as_root=True, namespace=None, log_fail_as_error=True)]) ip_dev.assert_has_calls([mock.call('lo', namespace='ns'), mock.call().link.set_up()]) def test_ensure_namespace_existing(self): with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd: ip_ns_cmd.exists.return_value = True ns = ip_lib.IPWrapper().ensure_namespace('ns') self.assertFalse(self.execute.called) self.assertEqual(ns.namespace, 'ns') def test_namespace_is_empty_no_devices(self): ip = ip_lib.IPWrapper(namespace='ns') with mock.patch.object(ip, 'get_devices') as get_devices: get_devices.return_value = [] self.assertTrue(ip.namespace_is_empty()) self.assertTrue(get_devices.called) def test_namespace_is_empty(self): ip = ip_lib.IPWrapper(namespace='ns') with mock.patch.object(ip, 'get_devices') as get_devices: get_devices.return_value = [mock.Mock()] self.assertFalse(ip.namespace_is_empty()) self.assertTrue(get_devices.called) def test_garbage_collect_namespace_does_not_exist(self): with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: ip_ns_cmd_cls.return_value.exists.return_value = False ip = ip_lib.IPWrapper(namespace='ns') with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: self.assertFalse(ip.garbage_collect_namespace()) ip_ns_cmd_cls.assert_has_calls([mock.call().exists('ns')]) self.assertNotIn(mock.call().delete('ns'), ip_ns_cmd_cls.return_value.mock_calls) self.assertEqual([], mock_is_empty.mock_calls) def test_garbage_collect_namespace_existing_empty_ns(self): with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: ip_ns_cmd_cls.return_value.exists.return_value = True ip = ip_lib.IPWrapper(namespace='ns') with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: mock_is_empty.return_value = True self.assertTrue(ip.garbage_collect_namespace()) mock_is_empty.assert_called_once_with() expected = [mock.call().exists('ns'), mock.call().delete('ns')] ip_ns_cmd_cls.assert_has_calls(expected) def test_garbage_collect_namespace_existing_not_empty(self): lo_device = mock.Mock() lo_device.name = 'lo' tap_device = mock.Mock() tap_device.name = 'tap1' with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: ip_ns_cmd_cls.return_value.exists.return_value = True ip = ip_lib.IPWrapper(namespace='ns') with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: mock_is_empty.return_value = False self.assertFalse(ip.garbage_collect_namespace()) mock_is_empty.assert_called_once_with() expected = [mock.call(ip), mock.call().exists('ns')] self.assertEqual(ip_ns_cmd_cls.mock_calls, expected) self.assertNotIn(mock.call().delete('ns'), ip_ns_cmd_cls.mock_calls) def test_add_vlan(self): retval = ip_lib.IPWrapper().add_vlan('eth0.1', 'eth0', '1') self.assertIsInstance(retval, ip_lib.IPDevice) self.assertEqual(retval.name, 'eth0.1') self.execute.assert_called_once_with([], 'link', ['add', 'link', 'eth0', 'name', 'eth0.1', 'type', 'vlan', 'id', '1'], run_as_root=True, namespace=None, log_fail_as_error=True) def test_add_vxlan_valid_port_length(self): retval = ip_lib.IPWrapper().add_vxlan('vxlan0', 'vni0', group='group0', dev='dev0', ttl='ttl0', tos='tos0', local='local0', proxy=True, port=('1', '2')) self.assertIsInstance(retval, ip_lib.IPDevice) self.assertEqual(retval.name, 'vxlan0') self.execute.assert_called_once_with([], 'link', ['add', 'vxlan0', 'type', 'vxlan', 'id', 'vni0', 'group', 'group0', 'dev', 'dev0', 'ttl', 'ttl0', 'tos', 'tos0', 'local', 'local0', 'proxy', 'port', '1', '2'], run_as_root=True, namespace=None, log_fail_as_error=True) def test_add_vxlan_invalid_port_length(self): wrapper = ip_lib.IPWrapper() self.assertRaises(exceptions.NetworkVxlanPortRangeError, wrapper.add_vxlan, 'vxlan0', 'vni0', group='group0', dev='dev0', ttl='ttl0', tos='tos0', local='local0', proxy=True, port=('1', '2', '3')) def test_add_device_to_namespace(self): dev = mock.Mock() ip_lib.IPWrapper(namespace='ns').add_device_to_namespace(dev) dev.assert_has_calls([mock.call.link.set_netns('ns')]) def test_add_device_to_namespace_is_none(self): dev = mock.Mock() ip_lib.IPWrapper().add_device_to_namespace(dev) self.assertEqual([], dev.mock_calls) class TestIPDevice(base.BaseTestCase): def test_eq_same_name(self): dev1 = ip_lib.IPDevice('tap0') dev2 = ip_lib.IPDevice('tap0') self.assertEqual(dev1, dev2) def test_eq_diff_name(self): dev1 = ip_lib.IPDevice('tap0') dev2 = ip_lib.IPDevice('tap1') self.assertNotEqual(dev1, dev2) def test_eq_same_namespace(self): dev1 = ip_lib.IPDevice('tap0', 'ns1') dev2 = ip_lib.IPDevice('tap0', 'ns1') self.assertEqual(dev1, dev2) def test_eq_diff_namespace(self): dev1 = ip_lib.IPDevice('tap0', namespace='ns1') dev2 = ip_lib.IPDevice('tap0', namespace='ns2') self.assertNotEqual(dev1, dev2) def test_eq_other_is_none(self): dev1 = ip_lib.IPDevice('tap0', namespace='ns1') self.assertIsNotNone(dev1) def test_str(self): self.assertEqual(str(ip_lib.IPDevice('tap0')), 'tap0') class TestIPCommandBase(base.BaseTestCase): def setUp(self): super(TestIPCommandBase, self).setUp() self.ip = mock.Mock() self.ip.namespace = 'namespace' self.ip_cmd = ip_lib.IpCommandBase(self.ip) self.ip_cmd.COMMAND = 'foo' def test_run(self): self.ip_cmd._run([], ('link', 'show')) self.ip.assert_has_calls([mock.call._run([], 'foo', ('link', 'show'))]) def test_run_with_options(self): self.ip_cmd._run(['o'], ('link')) self.ip.assert_has_calls([mock.call._run(['o'], 'foo', ('link'))]) def test_as_root_namespace_false(self): self.ip_cmd._as_root([], ('link')) self.ip.assert_has_calls( [mock.call._as_root([], 'foo', ('link'), use_root_namespace=False)]) def test_as_root_namespace_true(self): self.ip_cmd._as_root([], ('link'), use_root_namespace=True) self.ip.assert_has_calls( [mock.call._as_root([], 'foo', ('link'), use_root_namespace=True)]) def test_as_root_namespace_true_with_options(self): self.ip_cmd._as_root('o', 'link', use_root_namespace=True) self.ip.assert_has_calls( [mock.call._as_root('o', 'foo', ('link'), use_root_namespace=True)]) class TestIPDeviceCommandBase(base.BaseTestCase): def setUp(self): super(TestIPDeviceCommandBase, self).setUp() self.ip_dev = mock.Mock() self.ip_dev.name = 'eth0' self.ip_dev._execute = mock.Mock(return_value='executed') self.ip_cmd = ip_lib.IpDeviceCommandBase(self.ip_dev) self.ip_cmd.COMMAND = 'foo' def test_name_property(self): self.assertEqual(self.ip_cmd.name, 'eth0') class TestIPCmdBase(base.BaseTestCase): def setUp(self): super(TestIPCmdBase, self).setUp() self.parent = mock.Mock() self.parent.name = 'eth0' def _assert_call(self, options, args): self.parent._run.assert_has_calls([ mock.call(options, self.command, args)]) def _assert_sudo(self, options, args, use_root_namespace=False): self.parent._as_root.assert_has_calls( [mock.call(options, self.command, args, use_root_namespace=use_root_namespace)]) class TestIpRuleCommand(TestIPCmdBase): def setUp(self): super(TestIpRuleCommand, self).setUp() self.parent._as_root.return_value = '' self.command = 'rule' self.rule_cmd = ip_lib.IpRuleCommand(self.parent) def _test_add_rule(self, ip, table, priority): ip_version = netaddr.IPNetwork(ip).version self.rule_cmd.add(ip, table=table, priority=priority) self._assert_sudo([ip_version], (['show'])) self._assert_sudo([ip_version], ('add', 'from', ip, 'priority', str(priority), 'table', str(table), 'type', 'unicast')) def _test_add_rule_exists(self, ip, table, priority, output): self.parent._as_root.return_value = output ip_version = netaddr.IPNetwork(ip).version self.rule_cmd.add(ip, table=table, priority=priority) self._assert_sudo([ip_version], (['show'])) def _test_delete_rule(self, ip, table, priority): ip_version = netaddr.IPNetwork(ip).version self.rule_cmd.delete(ip, table=table, priority=priority) self._assert_sudo([ip_version], ('del', 'priority', str(priority), 'table', str(table), 'type', 'unicast')) def test__parse_line(self): def test(ip_version, line, expected): actual = self.rule_cmd._parse_line(ip_version, line) self.assertEqual(expected, actual) test(4, "4030201:\tfrom 1.2.3.4/24 lookup 10203040", {'from': '1.2.3.4/24', 'table': '10203040', 'type': 'unicast', 'priority': '4030201'}) test(6, "1024: from all iif qg-c43b1928-48 lookup noscope", {'priority': '1024', 'from': '::/0', 'type': 'unicast', 'iif': 'qg-c43b1928-48', 'table': 'noscope'}) def test__make_canonical_all_v4(self): actual = self.rule_cmd._make_canonical(4, {'from': 'all'}) self.assertEqual({'from': '0.0.0.0/0', 'type': 'unicast'}, actual) def test__make_canonical_all_v6(self): actual = self.rule_cmd._make_canonical(6, {'from': 'all'}) self.assertEqual({'from': '::/0', 'type': 'unicast'}, actual) def test__make_canonical_lookup(self): actual = self.rule_cmd._make_canonical(6, {'lookup': 'table'}) self.assertEqual({'table': 'table', 'type': 'unicast'}, actual) def test__make_canonical_iif(self): actual = self.rule_cmd._make_canonical(6, {'iif': 'iface_name'}) self.assertEqual({'iif': 'iface_name', 'type': 'unicast'}, actual) def test__make_canonical_fwmark(self): actual = self.rule_cmd._make_canonical(6, {'fwmark': '0x400'}) self.assertEqual({'fwmark': '0x400/0xffffffff', 'type': 'unicast'}, actual) def test__make_canonical_fwmark_with_mask(self): actual = self.rule_cmd._make_canonical(6, {'fwmark': '0x400/0x00ff'}) self.assertEqual({'fwmark': '0x400/0xff', 'type': 'unicast'}, actual) def test__make_canonical_fwmark_integer(self): actual = self.rule_cmd._make_canonical(6, {'fwmark': 0x400}) self.assertEqual({'fwmark': '0x400/0xffffffff', 'type': 'unicast'}, actual) def test__make_canonical_fwmark_iterable(self): actual = self.rule_cmd._make_canonical(6, {'fwmark': (0x400, 0xffff)}) self.assertEqual({'fwmark': '0x400/0xffff', 'type': 'unicast'}, actual) def test_add_rule_v4(self): self._test_add_rule('192.168.45.100', 2, 100) def test_add_rule_v4_exists(self): self._test_add_rule_exists('192.168.45.100', 2, 101, RULE_V4_SAMPLE) def test_add_rule_v6(self): self._test_add_rule('2001:db8::1', 3, 200) def test_add_rule_v6_exists(self): self._test_add_rule_exists('2001:db8::1', 3, 201, RULE_V6_SAMPLE) def test_delete_rule_v4(self): self._test_delete_rule('192.168.45.100', 2, 100) def test_delete_rule_v6(self): self._test_delete_rule('2001:db8::1', 3, 200) class TestIpLinkCommand(TestIPCmdBase): def setUp(self): super(TestIpLinkCommand, self).setUp() self.parent._run.return_value = LINK_SAMPLE[1] self.command = 'link' self.link_cmd = ip_lib.IpLinkCommand(self.parent) def test_set_address(self): self.link_cmd.set_address('aa:bb:cc:dd:ee:ff') self._assert_sudo([], ('set', 'eth0', 'address', 'aa:bb:cc:dd:ee:ff')) def test_set_allmulticast_on(self): self.link_cmd.set_allmulticast_on() self._assert_sudo([], ('set', 'eth0', 'allmulticast', 'on')) def test_set_mtu(self): self.link_cmd.set_mtu(1500) self._assert_sudo([], ('set', 'eth0', 'mtu', 1500)) def test_set_up(self): observed = self.link_cmd.set_up() self.assertEqual(self.parent._as_root.return_value, observed) self._assert_sudo([], ('set', 'eth0', 'up')) def test_set_down(self): observed = self.link_cmd.set_down() self.assertEqual(self.parent._as_root.return_value, observed) self._assert_sudo([], ('set', 'eth0', 'down')) def test_set_netns(self): self.link_cmd.set_netns('foo') self._assert_sudo([], ('set', 'eth0', 'netns', 'foo')) self.assertEqual(self.parent.namespace, 'foo') def test_set_name(self): self.link_cmd.set_name('tap1') self._assert_sudo([], ('set', 'eth0', 'name', 'tap1')) self.assertEqual(self.parent.name, 'tap1') def test_set_alias(self): self.link_cmd.set_alias('openvswitch') self._assert_sudo([], ('set', 'eth0', 'alias', 'openvswitch')) def test_delete(self): self.link_cmd.delete() self._assert_sudo([], ('delete', 'eth0')) def test_address_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(self.link_cmd.address, 'cc:dd:ee:ff:ab:cd') def test_mtu_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(self.link_cmd.mtu, 1500) def test_qdisc_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(self.link_cmd.qdisc, 'mq') def test_qlen_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(self.link_cmd.qlen, 1000) def test_alias_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(self.link_cmd.alias, 'openvswitch') def test_state_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(self.link_cmd.state, 'UP') def test_settings_property(self): expected = {'mtu': 1500, 'qlen': 1000, 'state': 'UP', 'qdisc': 'mq', 'brd': 'ff:ff:ff:ff:ff:ff', 'link/ether': 'cc:dd:ee:ff:ab:cd', 'alias': 'openvswitch'} self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(self.link_cmd.attributes, expected) self._assert_call(['o'], ('show', 'eth0')) class TestIpAddrCommand(TestIPCmdBase): def setUp(self): super(TestIpAddrCommand, self).setUp() self.parent.name = 'tap0' self.command = 'addr' self.addr_cmd = ip_lib.IpAddrCommand(self.parent) def test_add_address(self): self.addr_cmd.add('192.168.45.100/24') self._assert_sudo([4], ('add', '192.168.45.100/24', 'scope', 'global', 'dev', 'tap0', 'brd', '192.168.45.255')) def test_add_address_scoped(self): self.addr_cmd.add('192.168.45.100/24', scope='link') self._assert_sudo([4], ('add', '192.168.45.100/24', 'scope', 'link', 'dev', 'tap0', 'brd', '192.168.45.255')) def test_add_address_no_broadcast(self): self.addr_cmd.add('192.168.45.100/24', add_broadcast=False) self._assert_sudo([4], ('add', '192.168.45.100/24', 'scope', 'global', 'dev', 'tap0')) def test_del_address(self): self.addr_cmd.delete('192.168.45.100/24') self._assert_sudo([4], ('del', '192.168.45.100/24', 'dev', 'tap0')) def test_flush(self): self.addr_cmd.flush(6) self._assert_sudo([6], ('flush', 'tap0')) def test_list(self): expected = [ dict(name='eth0', scope='global', dadfailed=False, tentative=False, dynamic=False, cidr='172.16.77.240/24'), dict(name='eth0', scope='global', dadfailed=False, tentative=False, dynamic=True, cidr='2001:470:9:1224:5595:dd51:6ba2:e788/64'), dict(name='eth0', scope='link', dadfailed=False, tentative=True, dynamic=False, cidr='fe80::3023:39ff:febc:22ae/64'), dict(name='eth0', scope='link', dadfailed=True, tentative=True, dynamic=False, cidr='fe80::3023:39ff:febc:22af/64'), dict(name='eth0', scope='global', dadfailed=False, tentative=False, dynamic=True, cidr='2001:470:9:1224:fd91:272:581e:3a32/64'), dict(name='eth0', scope='global', dadfailed=False, tentative=False, dynamic=True, cidr='2001:470:9:1224:4508:b885:5fb:740b/64'), dict(name='eth0', scope='global', dadfailed=False, tentative=False, dynamic=True, cidr='2001:470:9:1224:dfcc:aaff:feb9:76ce/64'), dict(name='eth0', scope='link', dadfailed=False, tentative=False, dynamic=False, cidr='fe80::dfcc:aaff:feb9:76ce/64')] test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2] for test_case in test_cases: self.parent._run = mock.Mock(return_value=test_case) self.assertEqual(expected, self.addr_cmd.list()) self._assert_call([], ('show', 'tap0')) def test_wait_until_address_ready(self): self.parent._run.return_value = ADDR_SAMPLE # this address is not tentative or failed so it should return self.assertIsNone(self.addr_cmd.wait_until_address_ready( '2001:470:9:1224:fd91:272:581e:3a32')) def test_wait_until_address_ready_non_existent_address(self): self.addr_cmd.list = mock.Mock(return_value=[]) with testtools.ExpectedException(ip_lib.AddressNotReady): self.addr_cmd.wait_until_address_ready('abcd::1234') def test_wait_until_address_ready_timeout(self): tentative_address = 'fe80::3023:39ff:febc:22ae' self.addr_cmd.list = mock.Mock(return_value=[ dict(scope='link', dadfailed=False, tentative=True, dynamic=False, cidr=tentative_address + '/64')]) with testtools.ExpectedException(ip_lib.AddressNotReady): self.addr_cmd.wait_until_address_ready(tentative_address, wait_time=1) def test_list_filtered(self): expected = [ dict(name='eth0', scope='global', tentative=False, dadfailed=False, dynamic=False, cidr='172.16.77.240/24')] test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2] for test_case in test_cases: output = '\n'.join(test_case.split('\n')[0:4]) self.parent._run.return_value = output self.assertEqual(self.addr_cmd.list('global', filters=['permanent']), expected) self._assert_call([], ('show', 'tap0', 'permanent', 'scope', 'global')) def test_get_devices_with_ip(self): self.parent._run.return_value = ADDR_SAMPLE3 devices = self.addr_cmd.get_devices_with_ip('172.16.77.240/24') self.assertEqual(1, len(devices)) self.assertEqual('eth0', devices[0]['name']) class TestIpRouteCommand(TestIPCmdBase): def setUp(self): super(TestIpRouteCommand, self).setUp() self.parent.name = 'eth0' self.command = 'route' self.route_cmd = ip_lib.IpRouteCommand(self.parent) self.ip_version = 4 self.table = 14 self.metric = 100 self.cidr = '192.168.45.100/24' self.ip = '10.0.0.1' self.gateway = '192.168.45.100' self.test_cases = [{'sample': GATEWAY_SAMPLE1, 'expected': {'gateway': '10.35.19.254', 'metric': 100}}, {'sample': GATEWAY_SAMPLE2, 'expected': {'gateway': '10.35.19.254', 'metric': 100}}, {'sample': GATEWAY_SAMPLE3, 'expected': None}, {'sample': GATEWAY_SAMPLE4, 'expected': {'gateway': '10.35.19.254'}}, {'sample': GATEWAY_SAMPLE5, 'expected': {'gateway': '192.168.99.1'}}, {'sample': GATEWAY_SAMPLE6, 'expected': {'gateway': '192.168.99.1', 'metric': 100}}, {'sample': GATEWAY_SAMPLE7, 'expected': {'metric': 1}}] def test_add_gateway(self): self.route_cmd.add_gateway(self.gateway, self.metric, self.table) self._assert_sudo([self.ip_version], ('replace', 'default', 'via', self.gateway, 'metric', self.metric, 'dev', self.parent.name, 'table', self.table)) def test_add_gateway_subtable(self): self.route_cmd.table(self.table).add_gateway(self.gateway, self.metric) self._assert_sudo([self.ip_version], ('replace', 'default', 'via', self.gateway, 'metric', self.metric, 'dev', self.parent.name, 'table', self.table)) def test_del_gateway_success(self): self.route_cmd.delete_gateway(self.gateway, table=self.table) self._assert_sudo([self.ip_version], ('del', 'default', 'via', self.gateway, 'dev', self.parent.name, 'table', self.table)) def test_del_gateway_success_subtable(self): self.route_cmd.table(table=self.table).delete_gateway(self.gateway) self._assert_sudo([self.ip_version], ('del', 'default', 'via', self.gateway, 'dev', self.parent.name, 'table', self.table)) def test_del_gateway_cannot_find_device(self): self.parent._as_root.side_effect = RuntimeError("Cannot find device") exc = self.assertRaises(exceptions.DeviceNotFoundError, self.route_cmd.delete_gateway, self.gateway, table=self.table) self.assertIn(self.parent.name, str(exc)) def test_del_gateway_other_error(self): self.parent._as_root.side_effect = RuntimeError() self.assertRaises(RuntimeError, self.route_cmd.delete_gateway, self.gateway, table=self.table) def test_get_gateway(self): for test_case in self.test_cases: self.parent._run = mock.Mock(return_value=test_case['sample']) self.assertEqual(self.route_cmd.get_gateway(), test_case['expected']) def test_pullup_route(self): # NOTE(brian-haley) Currently we do not have any IPv6-specific usecase # for pullup_route, hence skipping. Revisit, if required, in future. if self.ip_version == 6: return # interface is not the first in the list - requires # deleting and creating existing entries output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE1] def pullup_side_effect(self, *args): result = output.pop(0) return result self.parent._run = mock.Mock(side_effect=pullup_side_effect) self.route_cmd.pullup_route('tap1d7888a7-10', ip_version=4) self._assert_sudo([4], ('del', '10.0.0.0/24', 'dev', 'qr-23380d11-d2')) self._assert_sudo([4], ('append', '10.0.0.0/24', 'proto', 'kernel', 'src', '10.0.0.1', 'dev', 'qr-23380d11-d2')) def test_pullup_route_first(self): # NOTE(brian-haley) Currently we do not have any IPv6-specific usecase # for pullup_route, hence skipping. Revisit, if required, in future. if self.ip_version == 6: return # interface is first in the list - no changes output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE2] def pullup_side_effect(self, *args): result = output.pop(0) return result self.parent._run = mock.Mock(side_effect=pullup_side_effect) self.route_cmd.pullup_route('tap1d7888a7-10', ip_version=4) # Check two calls - device get and subnet get self.assertEqual(len(self.parent._run.mock_calls), 2) def test_flush_route_table(self): self.route_cmd.flush(self.ip_version, self.table) self._assert_sudo([self.ip_version], ('flush', 'table', self.table)) def test_add_route(self): self.route_cmd.add_route(self.cidr, self.ip, self.table) self._assert_sudo([self.ip_version], ('replace', self.cidr, 'via', self.ip, 'dev', self.parent.name, 'table', self.table)) def test_add_route_no_via(self): self.route_cmd.add_route(self.cidr, table=self.table) self._assert_sudo([self.ip_version], ('replace', self.cidr, 'dev', self.parent.name, 'table', self.table)) def test_add_route_with_scope(self): self.route_cmd.add_route(self.cidr, scope='link') self._assert_sudo([self.ip_version], ('replace', self.cidr, 'dev', self.parent.name, 'scope', 'link')) def test_add_route_no_device(self): self.parent._as_root.side_effect = RuntimeError("Cannot find device") self.assertRaises(exceptions.DeviceNotFoundError, self.route_cmd.add_route, self.cidr, self.ip, self.table) def test_delete_route(self): self.route_cmd.delete_route(self.cidr, self.ip, self.table) self._assert_sudo([self.ip_version], ('del', self.cidr, 'via', self.ip, 'dev', self.parent.name, 'table', self.table)) def test_delete_route_no_via(self): self.route_cmd.delete_route(self.cidr, table=self.table) self._assert_sudo([self.ip_version], ('del', self.cidr, 'dev', self.parent.name, 'table', self.table)) def test_delete_route_with_scope(self): self.route_cmd.delete_route(self.cidr, scope='link') self._assert_sudo([self.ip_version], ('del', self.cidr, 'dev', self.parent.name, 'scope', 'link')) def test_delete_route_no_device(self): self.parent._as_root.side_effect = RuntimeError("Cannot find device") self.assertRaises(exceptions.DeviceNotFoundError, self.route_cmd.delete_route, self.cidr, self.ip, self.table) def test_list_routes(self): self.parent._run.return_value = ( "default via 172.124.4.1 dev eth0 metric 100\n" "10.0.0.0/22 dev eth0 scope link\n" "172.24.4.0/24 dev eth0 proto kernel src 172.24.4.2\n") routes = self.route_cmd.table(self.table).list_routes(self.ip_version) self.assertEqual([{'cidr': '0.0.0.0/0', 'dev': 'eth0', 'metric': '100', 'table': 14, 'via': '172.124.4.1'}, {'cidr': '10.0.0.0/22', 'dev': 'eth0', 'scope': 'link', 'table': 14}, {'cidr': '172.24.4.0/24', 'dev': 'eth0', 'proto': 'kernel', 'src': '172.24.4.2', 'table': 14}], routes) def test_list_onlink_routes_subtable(self): self.parent._run.return_value = ( "10.0.0.0/22\n" "172.24.4.0/24 proto kernel src 172.24.4.2\n") routes = self.route_cmd.table(self.table).list_onlink_routes( self.ip_version) self.assertEqual(['10.0.0.0/22'], [r['cidr'] for r in routes]) self._assert_call([self.ip_version], ('list', 'dev', self.parent.name, 'table', self.table, 'scope', 'link')) def test_add_onlink_route_subtable(self): self.route_cmd.table(self.table).add_onlink_route(self.cidr) self._assert_sudo([self.ip_version], ('replace', self.cidr, 'dev', self.parent.name, 'table', self.table, 'scope', 'link')) def test_delete_onlink_route_subtable(self): self.route_cmd.table(self.table).delete_onlink_route(self.cidr) self._assert_sudo([self.ip_version], ('del', self.cidr, 'dev', self.parent.name, 'table', self.table, 'scope', 'link')) class TestIPv6IpRouteCommand(TestIpRouteCommand): def setUp(self): super(TestIPv6IpRouteCommand, self).setUp() self.ip_version = 6 self.cidr = '2001:db8::/64' self.ip = '2001:db8::100' self.gateway = '2001:db8::1' self.test_cases = [{'sample': IPv6_GATEWAY_SAMPLE1, 'expected': {'gateway': '2001:470:9:1224:4508:b885:5fb:740b', 'metric': 100}}, {'sample': IPv6_GATEWAY_SAMPLE2, 'expected': {'gateway': '2001:470:9:1224:4508:b885:5fb:740b', 'metric': 100}}, {'sample': IPv6_GATEWAY_SAMPLE3, 'expected': None}, {'sample': IPv6_GATEWAY_SAMPLE4, 'expected': {'gateway': 'fe80::dfcc:aaff:feb9:76ce'}}, {'sample': IPv6_GATEWAY_SAMPLE5, 'expected': {'gateway': '2001:470:9:1224:4508:b885:5fb:740b', 'metric': 1024}}] def test_list_routes(self): self.parent._run.return_value = ( "default via 2001:db8::1 dev eth0 metric 100\n" "2001:db8::/64 dev eth0 proto kernel src 2001:db8::2\n") routes = self.route_cmd.table(self.table).list_routes(self.ip_version) self.assertEqual([{'cidr': '::/0', 'dev': 'eth0', 'metric': '100', 'table': 14, 'via': '2001:db8::1'}, {'cidr': '2001:db8::/64', 'dev': 'eth0', 'proto': 'kernel', 'src': '2001:db8::2', 'table': 14}], routes) class TestIPRoute(TestIpRouteCommand): """Leverage existing tests for IpRouteCommand for IPRoute This test leverages the tests written for IpRouteCommand. The difference is that the 'dev' argument should not be passed for each of the commands. So, this test removes the dev argument from the expected arguments in each assert. """ def setUp(self): super(TestIPRoute, self).setUp() self.parent = ip_lib.IPRoute() self.parent._run = mock.Mock() self.parent._as_root = mock.Mock() self.route_cmd = self.parent.route self.check_dev_args = False def _remove_dev_args(self, args): def args_without_dev(): previous = None for arg in args: if 'dev' not in (arg, previous): yield arg previous = arg return tuple(arg for arg in args_without_dev()) def _assert_call(self, options, args): if not self.check_dev_args: args = self._remove_dev_args(args) super(TestIPRoute, self)._assert_call(options, args) def _assert_sudo(self, options, args, use_root_namespace=False): if not self.check_dev_args: args = self._remove_dev_args(args) super(TestIPRoute, self)._assert_sudo(options, args) def test_pullup_route(self): # This method gets the interface name passed to it as an argument. So, # don't remove it from the expected arguments. self.check_dev_args = True super(TestIPRoute, self).test_pullup_route() def test_del_gateway_cannot_find_device(self): # This test doesn't make sense for this case since dev won't be passed pass class TestIpNetnsCommand(TestIPCmdBase): def setUp(self): super(TestIpNetnsCommand, self).setUp() self.command = 'netns' self.netns_cmd = ip_lib.IpNetnsCommand(self.parent) def test_add_namespace(self): with mock.patch('neutron.agent.common.utils.execute') as execute: ns = self.netns_cmd.add('ns') self._assert_sudo([], ('add', 'ns'), use_root_namespace=True) self.assertEqual(ns.namespace, 'ns') execute.assert_called_once_with( ['ip', 'netns', 'exec', 'ns', 'sysctl', '-w', 'net.ipv4.conf.all.promote_secondaries=1'], run_as_root=True, check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True) def test_delete_namespace(self): with mock.patch('neutron.agent.common.utils.execute'): self.netns_cmd.delete('ns') self._assert_sudo([], ('delete', 'ns'), use_root_namespace=True) def test_namespace_exists_use_helper(self): self.config(group='AGENT', use_helper_for_ns_read=True) retval = '\n'.join(NETNS_SAMPLE) # need another instance to avoid mocking netns_cmd = ip_lib.IpNetnsCommand(ip_lib.SubProcessBase()) with mock.patch('neutron.agent.common.utils.execute') as execute: execute.return_value = retval self.assertTrue( netns_cmd.exists('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')) execute.assert_called_once_with(['ip', '-o', 'netns', 'list'], run_as_root=True, log_fail_as_error=True) def test_namespace_doest_not_exist_no_helper(self): self.config(group='AGENT', use_helper_for_ns_read=False) retval = '\n'.join(NETNS_SAMPLE) # need another instance to avoid mocking netns_cmd = ip_lib.IpNetnsCommand(ip_lib.SubProcessBase()) with mock.patch('neutron.agent.common.utils.execute') as execute: execute.return_value = retval self.assertFalse( netns_cmd.exists('bbbbbbbb-1111-2222-3333-bbbbbbbbbbbb')) execute.assert_called_once_with(['ip', '-o', 'netns', 'list'], run_as_root=False, log_fail_as_error=True) def test_execute(self): self.parent.namespace = 'ns' with mock.patch('neutron.agent.common.utils.execute') as execute: self.netns_cmd.execute(['ip', 'link', 'list']) execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', 'ip', 'link', 'list'], run_as_root=True, check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True) def test_execute_env_var_prepend(self): self.parent.namespace = 'ns' with mock.patch('neutron.agent.common.utils.execute') as execute: env = dict(FOO=1, BAR=2) self.netns_cmd.execute(['ip', 'link', 'list'], env) execute.assert_called_once_with( ['ip', 'netns', 'exec', 'ns', 'env'] + ['%s=%s' % (k, v) for k, v in env.items()] + ['ip', 'link', 'list'], run_as_root=True, check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True) def test_execute_nosudo_with_no_namespace(self): with mock.patch('neutron.agent.common.utils.execute') as execute: self.parent.namespace = None self.netns_cmd.execute(['test']) execute.assert_called_once_with(['test'], check_exit_code=True, extra_ok_codes=None, run_as_root=False, log_fail_as_error=True) class TestDeviceExists(base.BaseTestCase): def test_device_exists(self): with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute: _execute.return_value = LINK_SAMPLE[1] self.assertTrue(ip_lib.device_exists('eth0')) _execute.assert_called_once_with(['o'], 'link', ('show', 'eth0'), log_fail_as_error=False) def test_device_exists_reset_fail(self): device = ip_lib.IPDevice('eth0') device.set_log_fail_as_error(True) with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute: _execute.return_value = LINK_SAMPLE[1] self.assertTrue(device.exists()) self.assertTrue(device.get_log_fail_as_error()) def test_device_does_not_exist(self): with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute: _execute.return_value = '' _execute.side_effect = RuntimeError self.assertFalse(ip_lib.device_exists('eth0')) def test_ensure_device_is_ready(self): ip_lib_mock = mock.Mock() with mock.patch.object(ip_lib, 'IPDevice', return_value=ip_lib_mock): self.assertTrue(ip_lib.ensure_device_is_ready("eth0")) self.assertTrue(ip_lib_mock.link.set_up.called) ip_lib_mock.reset_mock() # device doesn't exists ip_lib_mock.link.set_up.side_effect = RuntimeError self.assertFalse(ip_lib.ensure_device_is_ready("eth0")) class TestGetRoutingTable(base.BaseTestCase): @mock.patch.object(ip_lib, 'IPWrapper') def _test_get_routing_table(self, version, ip_route_output, expected, mock_ipwrapper): instance = mock_ipwrapper.return_value mock_netns = instance.netns mock_execute = mock_netns.execute mock_execute.return_value = ip_route_output self.assertEqual(expected, ip_lib.get_routing_table(version)) def test_get_routing_table_4(self): ip_route_output = (""" default via 192.168.3.120 dev wlp3s0 proto static metric 1024 10.0.0.0/8 dev tun0 proto static scope link metric 1024 10.0.1.0/8 dev tun1 proto static scope link metric 1024 linkdown """) expected = [{'destination': 'default', 'nexthop': '192.168.3.120', 'device': 'wlp3s0', 'scope': None}, {'destination': '10.0.0.0/8', 'nexthop': None, 'device': 'tun0', 'scope': 'link'}, {'destination': '10.0.1.0/8', 'nexthop': None, 'device': 'tun1', 'scope': 'link'}] self._test_get_routing_table(4, ip_route_output, expected) def test_get_routing_table_6(self): ip_route_output = (""" 2001:db8:0:f101::/64 dev tap-1 proto kernel metric 256 pref medium 2001:db8:0:f102::/64 dev tap-2 proto kernel metric 256 pref medium linkdown default via 2001:db8:0:f101::4 dev tap-1 metric 1024 pref medium """) expected = [{'destination': '2001:db8:0:f101::/64', 'nexthop': None, 'device': 'tap-1', 'scope': None}, {'destination': '2001:db8:0:f102::/64', 'nexthop': None, 'device': 'tap-2', 'scope': None}, {'destination': 'default', 'nexthop': '2001:db8:0:f101::4', 'device': 'tap-1', 'scope': None}] self._test_get_routing_table(6, ip_route_output, expected) class TestIpNeighCommand(TestIPCmdBase): def setUp(self): super(TestIpNeighCommand, self).setUp() self.parent.name = 'tap0' self.command = 'neigh' self.neigh_cmd = ip_lib.IpNeighCommand(self.parent) def test_add_entry(self): self.neigh_cmd.add('192.168.45.100', 'cc:dd:ee:ff:ab:cd') self._assert_sudo([4], ('replace', '192.168.45.100', 'lladdr', 'cc:dd:ee:ff:ab:cd', 'nud', 'permanent', 'dev', 'tap0')) def test_delete_entry(self): self.neigh_cmd.delete('192.168.45.100', 'cc:dd:ee:ff:ab:cd') self._assert_sudo([4], ('del', '192.168.45.100', 'lladdr', 'cc:dd:ee:ff:ab:cd', 'dev', 'tap0')) def test_flush(self): self.neigh_cmd.flush(4, '192.168.0.1') self._assert_sudo([4], ('flush', 'to', '192.168.0.1')) class TestArpPing(TestIPCmdBase): @mock.patch.object(ip_lib, 'IPWrapper') @mock.patch('eventlet.spawn_n') def test_send_ipv4_addr_adv_notif(self, spawn_n, mIPWrapper): spawn_n.side_effect = lambda f: f() ARPING_COUNT = 3 address = '20.0.0.1' config = mock.Mock() config.send_arp_for_ha = ARPING_COUNT ip_lib.send_ip_addr_adv_notif(mock.sentinel.ns_name, mock.sentinel.iface_name, address, config) self.assertTrue(spawn_n.called) mIPWrapper.assert_called_once_with(namespace=mock.sentinel.ns_name) ip_wrapper = mIPWrapper(namespace=mock.sentinel.ns_name) # Just test that arping is called with the right arguments arping_cmd = ['arping', '-A', '-I', mock.sentinel.iface_name, '-c', ARPING_COUNT, '-w', mock.ANY, address] ip_wrapper.netns.execute.assert_any_call(arping_cmd, extra_ok_codes=[1]) @mock.patch('eventlet.spawn_n') def test_no_ipv6_addr_notif(self, spawn_n): ipv6_addr = 'fd00::1' config = mock.Mock() config.send_arp_for_ha = 3 ip_lib.send_ip_addr_adv_notif(mock.sentinel.ns_name, mock.sentinel.iface_name, ipv6_addr, config) self.assertFalse(spawn_n.called) class TestAddNamespaceToCmd(base.BaseTestCase): def test_add_namespace_to_cmd_with_namespace(self): cmd = ['ping', '8.8.8.8'] self.assertEqual(['ip', 'netns', 'exec', 'tmp'] + cmd, ip_lib.add_namespace_to_cmd(cmd, 'tmp')) def test_add_namespace_to_cmd_without_namespace(self): cmd = ['ping', '8.8.8.8'] self.assertEqual(cmd, ip_lib.add_namespace_to_cmd(cmd, None)) class TestSetIpNonlocalBindForHaNamespace(base.BaseTestCase): def test_setting_failure(self): """Make sure message is formatted correctly.""" with mock.patch.object( ip_lib, 'set_ip_nonlocal_bind', side_effect=RuntimeError): ip_lib.set_ip_nonlocal_bind_for_namespace('foo') neutron-8.4.0/neutron/tests/unit/agent/test_rpc.py0000664000567000056710000001747313044372760023506 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from oslo_context import context as oslo_context import oslo_messaging from neutron.agent import rpc from neutron.tests import base class AgentRPCPluginApi(base.BaseTestCase): def _test_rpc_call(self, method): agent = rpc.PluginApi('fake_topic') ctxt = oslo_context.RequestContext('fake_user', 'fake_project') expect_val = 'foo' with mock.patch.object(agent.client, 'call') as mock_call,\ mock.patch.object(agent.client, 'prepare') as mock_prepare: mock_prepare.return_value = agent.client mock_call.return_value = expect_val func_obj = getattr(agent, method) if method == 'tunnel_sync': actual_val = func_obj(ctxt, 'fake_tunnel_ip') else: actual_val = func_obj(ctxt, 'fake_device', 'fake_agent_id') self.assertEqual(actual_val, expect_val) def test_get_device_details(self): self._test_rpc_call('get_device_details') def test_get_devices_details_list(self): self._test_rpc_call('get_devices_details_list') def test_devices_details_list_unsupported(self): agent = rpc.PluginApi('fake_topic') ctxt = oslo_context.RequestContext('fake_user', 'fake_project') expect_val_get_device_details = 'foo' expect_val = [expect_val_get_device_details] with mock.patch.object(agent.client, 'call') as mock_call, \ mock.patch.object(agent.client, 'prepare') as mock_prepare: mock_prepare.return_value = agent.client mock_call.side_effect = [oslo_messaging.UnsupportedVersion('1.2'), expect_val_get_device_details] func_obj = getattr(agent, 'get_devices_details_list') actual_val = func_obj(ctxt, ['fake_device'], 'fake_agent_id') self.assertEqual(actual_val, expect_val) def test_update_device_down(self): self._test_rpc_call('update_device_down') def test_tunnel_sync(self): self._test_rpc_call('tunnel_sync') class AgentPluginReportState(base.BaseTestCase): def test_plugin_report_state_use_call(self): topic = 'test' reportStateAPI = rpc.PluginReportStateAPI(topic) expected_agent_state = {'agent': 'test'} with mock.patch.object(reportStateAPI.client, 'call') as mock_call, \ mock.patch.object(reportStateAPI.client, 'cast'), \ mock.patch.object(reportStateAPI.client, 'prepare' ) as mock_prepare: mock_prepare.return_value = reportStateAPI.client ctxt = oslo_context.RequestContext('fake_user', 'fake_project') reportStateAPI.report_state(ctxt, expected_agent_state, use_call=True) self.assertEqual(mock_call.call_args[0][0], ctxt) self.assertEqual(mock_call.call_args[0][1], 'report_state') self.assertEqual(mock_call.call_args[1]['agent_state'], {'agent_state': expected_agent_state}) self.assertIsInstance(mock_call.call_args[1]['time'], str) def test_plugin_report_state_cast(self): topic = 'test' reportStateAPI = rpc.PluginReportStateAPI(topic) expected_agent_state = {'agent': 'test'} with mock.patch.object(reportStateAPI.client, 'call'), \ mock.patch.object(reportStateAPI.client, 'cast' ) as mock_cast, \ mock.patch.object(reportStateAPI.client, 'prepare' ) as mock_prepare: mock_prepare.return_value = reportStateAPI.client ctxt = oslo_context.RequestContext('fake_user', 'fake_project') reportStateAPI.report_state(ctxt, expected_agent_state) self.assertEqual(mock_cast.call_args[0][0], ctxt) self.assertEqual(mock_cast.call_args[0][1], 'report_state') self.assertEqual(mock_cast.call_args[1]['agent_state'], {'agent_state': expected_agent_state}) self.assertIsInstance(mock_cast.call_args[1]['time'], str) def test_plugin_report_state_microsecond_is_0(self): topic = 'test' expected_time = datetime.datetime(2015, 7, 27, 15, 33, 30, 0) expected_time_str = '2015-07-27T15:33:30.000000' expected_agent_state = {'agent': 'test'} with mock.patch('neutron.agent.rpc.datetime') as mock_datetime: reportStateAPI = rpc.PluginReportStateAPI(topic) mock_datetime.utcnow.return_value = expected_time with mock.patch.object(reportStateAPI.client, 'call'), \ mock.patch.object(reportStateAPI.client, 'cast' ) as mock_cast, \ mock.patch.object(reportStateAPI.client, 'prepare' ) as mock_prepare: mock_prepare.return_value = reportStateAPI.client ctxt = oslo_context.RequestContext('fake_user', 'fake_project') reportStateAPI.report_state(ctxt, expected_agent_state) self.assertEqual(expected_time_str, mock_cast.call_args[1]['time']) class AgentRPCMethods(base.BaseTestCase): def _test_create_consumers( self, endpoints, method, expected, topics, listen): call_to_patch = 'neutron.common.rpc.create_connection' with mock.patch(call_to_patch) as create_connection: rpc.create_consumers( endpoints, method, topics, start_listening=listen) create_connection.assert_has_calls(expected) def test_create_consumers_start_listening(self): endpoints = [mock.Mock()] expected = [ mock.call(), mock.call().create_consumer('foo-topic-op', endpoints, fanout=True), mock.call().consume_in_threads() ] method = 'foo' topics = [('topic', 'op')] self._test_create_consumers( endpoints, method, expected, topics, True) def test_create_consumers_do_not_listen(self): endpoints = [mock.Mock()] expected = [ mock.call(), mock.call().create_consumer('foo-topic-op', endpoints, fanout=True), ] method = 'foo' topics = [('topic', 'op')] self._test_create_consumers( endpoints, method, expected, topics, False) def test_create_consumers_with_node_name(self): endpoints = [mock.Mock()] expected = [ mock.call(), mock.call().create_consumer('foo-topic-op', endpoints, fanout=True), mock.call().create_consumer('foo-topic-op.node1', endpoints, fanout=False), mock.call().consume_in_threads() ] call_to_patch = 'neutron.common.rpc.create_connection' with mock.patch(call_to_patch) as create_connection: rpc.create_consumers(endpoints, 'foo', [('topic', 'op', 'node1')]) create_connection.assert_has_calls(expected) neutron-8.4.0/neutron/tests/unit/agent/l2/0000775000567000056710000000000013044373210021601 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/l2/__init__.py0000775000567000056710000000000013044372760023714 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/l2/extensions/0000775000567000056710000000000013044373210024000 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/l2/extensions/__init__.py0000775000567000056710000000000013044372760026113 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/l2/extensions/test_manager.py0000664000567000056710000000360513044372760027040 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from neutron.agent.l2.extensions import manager as ext_manager from neutron.tests import base class TestAgentExtensionsManager(base.BaseTestCase): def setUp(self): super(TestAgentExtensionsManager, self).setUp() mock.patch('neutron.agent.l2.extensions.qos.QosAgentExtension', autospec=True).start() conf = cfg.CONF ext_manager.register_opts(conf) cfg.CONF.set_override('extensions', ['qos'], 'agent') self.manager = ext_manager.AgentExtensionsManager(conf) def _get_extension(self): return self.manager.extensions[0].obj def test_initialize(self): connection = object() self.manager.initialize(connection, 'fake_driver_type') ext = self._get_extension() ext.initialize.assert_called_once_with(connection, 'fake_driver_type') def test_handle_port(self): context = object() data = object() self.manager.handle_port(context, data) ext = self._get_extension() ext.handle_port.assert_called_once_with(context, data) def test_delete_port(self): context = object() data = object() self.manager.delete_port(context, data) ext = self._get_extension() ext.delete_port.assert_called_once_with(context, data) neutron-8.4.0/neutron/tests/unit/agent/l2/extensions/test_qos.py0000775000567000056710000004206613044372760026237 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mellanox Technologies, Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import uuidutils from neutron.agent.l2.extensions import qos from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron.common import exceptions from neutron import context from neutron.objects.qos import policy from neutron.objects.qos import rule from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.services.qos import qos_consts from neutron.tests import base BASE_TEST_POLICY = {'context': None, 'name': 'test1', 'id': uuidutils.generate_uuid()} TEST_POLICY = policy.QosPolicy(**BASE_TEST_POLICY) TEST_POLICY_DESCR = policy.QosPolicy(description='fake_descr', **BASE_TEST_POLICY) TEST_POLICY2 = policy.QosPolicy(context=None, name='test2', id=uuidutils.generate_uuid()) TEST_PORT = {'port_id': 'test_port_id', 'qos_policy_id': TEST_POLICY.id} TEST_PORT2 = {'port_id': 'test_port_id_2', 'qos_policy_id': TEST_POLICY2.id} FAKE_RULE_ID = uuidutils.generate_uuid() REALLY_FAKE_RULE_ID = uuidutils.generate_uuid() class FakeDriver(qos.QosAgentDriver): SUPPORTED_RULES = {qos_consts.RULE_TYPE_BANDWIDTH_LIMIT} def __init__(self): super(FakeDriver, self).__init__() self.create_bandwidth_limit = mock.Mock() self.update_bandwidth_limit = mock.Mock() self.delete_bandwidth_limit = mock.Mock() def initialize(self): pass class QosFakeRule(rule.QosRule): rule_type = 'fake_type' class QosAgentDriverTestCase(base.BaseTestCase): def setUp(self): super(QosAgentDriverTestCase, self).setUp() self.driver = FakeDriver() self.policy = TEST_POLICY self.rule = ( rule.QosBandwidthLimitRule(context=None, id=FAKE_RULE_ID, qos_policy_id=self.policy.id, max_kbps=100, max_burst_kbps=200)) self.policy.rules = [self.rule] self.port = {'qos_policy_id': None, 'network_qos_policy_id': None, 'device_owner': 'random-device-owner'} self.fake_rule = QosFakeRule(context=None, id=REALLY_FAKE_RULE_ID, qos_policy_id=self.policy.id) def test_create(self): self.driver.create(self.port, self.policy) self.driver.create_bandwidth_limit.assert_called_with( self.port, self.rule) def test_update(self): self.driver.update(self.port, self.policy) self.driver.update_bandwidth_limit.assert_called_with( self.port, self.rule) def test_delete(self): self.driver.delete(self.port, self.policy) self.driver.delete_bandwidth_limit.assert_called_with(self.port) def test_delete_no_policy(self): self.driver.delete(self.port, qos_policy=None) self.driver.delete_bandwidth_limit.assert_called_with(self.port) def test__iterate_rules_with_unknown_rule_type(self): self.policy.rules.append(self.fake_rule) rules = list(self.driver._iterate_rules(self.policy.rules)) self.assertEqual(1, len(rules)) self.assertIsInstance(rules[0], rule.QosBandwidthLimitRule) def test__handle_update_create_rules_checks_should_apply_to_port(self): self.rule.should_apply_to_port = mock.Mock(return_value=False) self.driver.create(self.port, self.policy) self.assertFalse(self.driver.create_bandwidth_limit.called) self.rule.should_apply_to_port = mock.Mock(return_value=True) self.driver.create(self.port, self.policy) self.assertTrue(self.driver.create_bandwidth_limit.called) def test__get_max_burst_value(self): rule = self.rule rule.max_burst_kbps = 0 expected_burst = rule.max_kbps * qos_consts.DEFAULT_BURST_RATE self.assertEqual( expected_burst, self.driver._get_egress_burst_value(rule) ) class QosExtensionBaseTestCase(base.BaseTestCase): def setUp(self): super(QosExtensionBaseTestCase, self).setUp() self.qos_ext = qos.QosAgentExtension() self.context = context.get_admin_context() self.connection = mock.Mock() # Don't rely on used driver mock.patch( 'neutron.manager.NeutronManager.load_class_for_provider', return_value=lambda: mock.Mock(spec=qos.QosAgentDriver) ).start() class QosExtensionRpcTestCase(QosExtensionBaseTestCase): def setUp(self): super(QosExtensionRpcTestCase, self).setUp() self.qos_ext.initialize( self.connection, constants.EXTENSION_DRIVER_TYPE) self.pull_mock = mock.patch.object( self.qos_ext.resource_rpc, 'pull', return_value=TEST_POLICY).start() def _create_test_port_dict(self, qos_policy_id=None): return {'port_id': uuidutils.generate_uuid(), 'qos_policy_id': qos_policy_id or TEST_POLICY.id} def test_handle_port_with_no_policy(self): port = self._create_test_port_dict() del port['qos_policy_id'] self.qos_ext._process_reset_port = mock.Mock() self.qos_ext.handle_port(self.context, port) self.qos_ext._process_reset_port.assert_called_with(port) def test_handle_unknown_port(self): port = self._create_test_port_dict() qos_policy_id = port['qos_policy_id'] port_id = port['port_id'] self.qos_ext.handle_port(self.context, port) # we make sure the underlying qos driver is called with the # right parameters self.qos_ext.qos_driver.create.assert_called_once_with( port, TEST_POLICY) self.assertEqual(port, self.qos_ext.policy_map.qos_policy_ports[qos_policy_id][port_id]) self.assertIn(port_id, self.qos_ext.policy_map.port_policies) self.assertEqual(TEST_POLICY, self.qos_ext.policy_map.known_policies[qos_policy_id]) def test_handle_known_port(self): port_obj1 = self._create_test_port_dict() port_obj2 = dict(port_obj1) self.qos_ext.handle_port(self.context, port_obj1) self.qos_ext.qos_driver.reset_mock() self.qos_ext.handle_port(self.context, port_obj2) self.assertFalse(self.qos_ext.qos_driver.create.called) def test_handle_known_port_change_policy_id(self): port = self._create_test_port_dict() self.qos_ext.handle_port(self.context, port) self.qos_ext.resource_rpc.pull.reset_mock() port['qos_policy_id'] = uuidutils.generate_uuid() self.qos_ext.handle_port(self.context, port) self.pull_mock.assert_called_once_with( self.context, resources.QOS_POLICY, port['qos_policy_id']) def test_delete_known_port(self): port = self._create_test_port_dict() self.qos_ext.handle_port(self.context, port) self.qos_ext.qos_driver.reset_mock() self.qos_ext.delete_port(self.context, port) self.qos_ext.qos_driver.delete.assert_called_with(port) self.assertIsNone(self.qos_ext.policy_map.get_port_policy(port)) def test_delete_unknown_port(self): port = self._create_test_port_dict() self.qos_ext.delete_port(self.context, port) self.assertFalse(self.qos_ext.qos_driver.delete.called) self.assertIsNone(self.qos_ext.policy_map.get_port_policy(port)) def test__handle_notification_ignores_all_event_types_except_updated(self): with mock.patch.object( self.qos_ext, '_process_update_policy') as update_mock: for event_type in set(events.VALID) - {events.UPDATED}: self.qos_ext._handle_notification(object(), event_type) self.assertFalse(update_mock.called) def test__handle_notification_passes_update_events(self): with mock.patch.object( self.qos_ext, '_process_update_policy') as update_mock: policy_obj = mock.Mock() self.qos_ext._handle_notification(policy_obj, events.UPDATED) update_mock.assert_called_with(policy_obj) def test__process_update_policy(self): port1 = self._create_test_port_dict(qos_policy_id=TEST_POLICY.id) port2 = self._create_test_port_dict(qos_policy_id=TEST_POLICY2.id) self.qos_ext.policy_map.set_port_policy(port1, TEST_POLICY) self.qos_ext.policy_map.set_port_policy(port2, TEST_POLICY2) self.qos_ext._policy_rules_modified = mock.Mock(return_value=True) policy_obj = mock.Mock() policy_obj.id = port1['qos_policy_id'] self.qos_ext._process_update_policy(policy_obj) self.qos_ext.qos_driver.update.assert_called_with(port1, policy_obj) self.qos_ext.qos_driver.update.reset_mock() policy_obj.id = port2['qos_policy_id'] self.qos_ext._process_update_policy(policy_obj) self.qos_ext.qos_driver.update.assert_called_with(port2, policy_obj) def test__process_update_policy_descr_not_propagated_into_driver(self): port = self._create_test_port_dict(qos_policy_id=TEST_POLICY.id) self.qos_ext.policy_map.set_port_policy(port, TEST_POLICY) self.qos_ext._policy_rules_modified = mock.Mock(return_value=False) self.qos_ext._process_update_policy(TEST_POLICY_DESCR) self.qos_ext._policy_rules_modified.assert_called_with(TEST_POLICY, TEST_POLICY_DESCR) self.assertFalse(self.qos_ext.qos_driver.delete.called) self.assertFalse(self.qos_ext.qos_driver.update.called) self.assertEqual(TEST_POLICY_DESCR, self.qos_ext.policy_map.get_policy(TEST_POLICY.id)) def test__process_update_policy_not_known(self): self.qos_ext._policy_rules_modified = mock.Mock() self.qos_ext._process_update_policy(TEST_POLICY_DESCR) self.assertFalse(self.qos_ext._policy_rules_modified.called) self.assertFalse(self.qos_ext.qos_driver.delete.called) self.assertFalse(self.qos_ext.qos_driver.update.called) self.assertIsNone(self.qos_ext.policy_map.get_policy( TEST_POLICY_DESCR.id)) def test__process_reset_port(self): port1 = self._create_test_port_dict(qos_policy_id=TEST_POLICY.id) port2 = self._create_test_port_dict(qos_policy_id=TEST_POLICY2.id) self.qos_ext.policy_map.set_port_policy(port1, TEST_POLICY) self.qos_ext.policy_map.set_port_policy(port2, TEST_POLICY2) self.qos_ext._process_reset_port(port1) self.qos_ext.qos_driver.delete.assert_called_with(port1) self.assertIsNone(self.qos_ext.policy_map.get_port_policy(port1)) self.assertIsNotNone(self.qos_ext.policy_map.get_port_policy(port2)) self.qos_ext.qos_driver.delete.reset_mock() self.qos_ext._process_reset_port(port2) self.qos_ext.qos_driver.delete.assert_called_with(port2) self.assertIsNone(self.qos_ext.policy_map.get_port_policy(port2)) class QosExtensionInitializeTestCase(QosExtensionBaseTestCase): @mock.patch.object(registry, 'subscribe') @mock.patch.object(resources_rpc, 'ResourcesPushRpcCallback') def test_initialize_subscribed_to_rpc(self, rpc_mock, subscribe_mock): self.qos_ext.initialize( self.connection, constants.EXTENSION_DRIVER_TYPE) self.connection.create_consumer.assert_has_calls( [mock.call( resources_rpc.resource_type_versioned_topic(resource_type), [rpc_mock()], fanout=True) for resource_type in self.qos_ext.SUPPORTED_RESOURCES] ) subscribe_mock.assert_called_with(mock.ANY, resources.QOS_POLICY) class QosExtensionReflushRulesTestCase(QosExtensionBaseTestCase): def setUp(self): super(QosExtensionReflushRulesTestCase, self).setUp() self.qos_ext.initialize( self.connection, constants.EXTENSION_DRIVER_TYPE) self.pull_mock = mock.patch.object( self.qos_ext.resource_rpc, 'pull', return_value=TEST_POLICY).start() self.policy = policy.QosPolicy(**BASE_TEST_POLICY) self.rule = ( rule.QosBandwidthLimitRule(context=None, id=FAKE_RULE_ID, qos_policy_id=self.policy.id, max_kbps=100, max_burst_kbps=10)) self.policy.rules = [self.rule] self.port = {'port_id': uuidutils.generate_uuid(), 'qos_policy_id': TEST_POLICY.id} self.new_policy = policy.QosPolicy(description='descr', **BASE_TEST_POLICY) def test_is_reflush_required_change_policy_descr(self): self.qos_ext.policy_map.set_port_policy(self.port, self.policy) self.new_policy.rules = [self.rule] self.assertFalse(self.qos_ext._policy_rules_modified(self.policy, self.new_policy)) def test_is_reflush_required_change_policy_rule(self): self.qos_ext.policy_map.set_port_policy(self.port, self.policy) updated_rule = (rule.QosBandwidthLimitRule(context=None, id=FAKE_RULE_ID, qos_policy_id=self.policy.id, max_kbps=200, max_burst_kbps=20)) self.new_policy.rules = [updated_rule] self.assertTrue(self.qos_ext._policy_rules_modified(self.policy, self.new_policy)) def test_is_reflush_required_remove_rules(self): self.qos_ext.policy_map.set_port_policy(self.port, self.policy) self.new_policy.rules = [] self.assertTrue(self.qos_ext._policy_rules_modified(self.policy, self.new_policy)) def test_is_reflush_required_add_rules(self): self.qos_ext.policy_map.set_port_policy(self.port, self.policy) self.new_policy.rules = [self.rule] fake_rule = QosFakeRule(context=None, id=REALLY_FAKE_RULE_ID, qos_policy_id=self.policy.id) self.new_policy.rules.append(fake_rule) self.assertTrue(self.qos_ext._policy_rules_modified(self.policy, self.new_policy)) class PortPolicyMapTestCase(base.BaseTestCase): def setUp(self): super(PortPolicyMapTestCase, self).setUp() self.policy_map = qos.PortPolicyMap() def test_update_policy(self): self.policy_map.update_policy(TEST_POLICY) self.assertEqual(TEST_POLICY, self.policy_map.known_policies[TEST_POLICY.id]) def _set_ports(self): self.policy_map.set_port_policy(TEST_PORT, TEST_POLICY) self.policy_map.set_port_policy(TEST_PORT2, TEST_POLICY2) def test_set_port_policy(self): self._set_ports() self.assertEqual(TEST_POLICY, self.policy_map.known_policies[TEST_POLICY.id]) self.assertIn(TEST_PORT['port_id'], self.policy_map.qos_policy_ports[TEST_POLICY.id]) def test_get_port_policy(self): self._set_ports() self.assertEqual(TEST_POLICY, self.policy_map.get_port_policy(TEST_PORT)) self.assertEqual(TEST_POLICY2, self.policy_map.get_port_policy(TEST_PORT2)) def test_get_ports(self): self._set_ports() self.assertEqual([TEST_PORT], list(self.policy_map.get_ports(TEST_POLICY))) self.assertEqual([TEST_PORT2], list(self.policy_map.get_ports(TEST_POLICY2))) def test_clean_by_port(self): self._set_ports() self.policy_map.clean_by_port(TEST_PORT) self.assertNotIn(TEST_POLICY.id, self.policy_map.known_policies) self.assertNotIn(TEST_PORT['port_id'], self.policy_map.port_policies) self.assertIn(TEST_POLICY2.id, self.policy_map.known_policies) def test_clean_by_port_raises_exception_for_unknown_port(self): self.assertRaises(exceptions.PortNotFound, self.policy_map.clean_by_port, TEST_PORT) def test_has_policy_changed(self): self._set_ports() self.assertTrue( self.policy_map.has_policy_changed(TEST_PORT, 'a_new_policy_id')) self.assertFalse( self.policy_map.has_policy_changed(TEST_PORT, TEST_POLICY.id)) neutron-8.4.0/neutron/tests/unit/agent/metadata/0000775000567000056710000000000013044373210023044 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/metadata/test_agent.py0000664000567000056710000004755513044372760025604 0ustar jenkinsjenkins00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testtools import webob from neutron.agent.linux import utils as agent_utils from neutron.agent.metadata import agent from neutron.agent.metadata import config from neutron.agent import metadata_agent from neutron.common import constants as n_const from neutron.common import utils from neutron.tests import base class FakeConf(object): auth_ca_cert = None nova_metadata_ip = '9.9.9.9' nova_metadata_port = 8775 metadata_proxy_shared_secret = 'secret' nova_metadata_protocol = 'http' nova_metadata_insecure = True nova_client_cert = 'nova_cert' nova_client_priv_key = 'nova_priv_key' cache_url = '' class FakeConfCache(FakeConf): cache_url = 'memory://?default_ttl=5' class TestMetadataProxyHandlerBase(base.BaseTestCase): fake_conf = FakeConf def setUp(self): super(TestMetadataProxyHandlerBase, self).setUp() self.log_p = mock.patch.object(agent, 'LOG') self.log = self.log_p.start() self.handler = agent.MetadataProxyHandler(self.fake_conf) self.handler.plugin_rpc = mock.Mock() self.handler.context = mock.Mock() class TestMetadataProxyHandlerRpc(TestMetadataProxyHandlerBase): def test_get_port_filters(self): router_id = 'test_router_id' ip = '1.2.3.4' networks = ('net_id1', 'net_id2') expected = {'device_id': [router_id], 'device_owner': n_const.ROUTER_INTERFACE_OWNERS, 'network_id': networks, 'fixed_ips': {'ip_address': [ip]}} actual = self.handler._get_port_filters(router_id, ip, networks) self.assertEqual(expected, actual) def test_get_router_networks(self): router_id = 'router-id' expected = ('network_id1', 'network_id2') ports = [{'network_id': 'network_id1', 'something': 42}, {'network_id': 'network_id2', 'something_else': 32}] self.handler.plugin_rpc.get_ports.return_value = ports networks = self.handler._get_router_networks(router_id) self.assertEqual(expected, networks) def test_get_ports_for_remote_address(self): ip = '1.1.1.1' networks = ('network_id1', 'network_id2') expected = [{'port_id': 'port_id1'}, {'port_id': 'port_id2'}] self.handler.plugin_rpc.get_ports.return_value = expected ports = self.handler._get_ports_for_remote_address(ip, networks) self.assertEqual(expected, ports) class TestMetadataProxyHandlerCache(TestMetadataProxyHandlerBase): fake_conf = FakeConfCache def test_call(self): req = mock.Mock() with mock.patch.object(self.handler, '_get_instance_and_tenant_id') as get_ids: get_ids.return_value = ('instance_id', 'tenant_id') with mock.patch.object(self.handler, '_proxy_request') as proxy: proxy.return_value = 'value' retval = self.handler(req) self.assertEqual(retval, 'value') def test_call_no_instance_match(self): req = mock.Mock() with mock.patch.object(self.handler, '_get_instance_and_tenant_id') as get_ids: get_ids.return_value = None, None retval = self.handler(req) self.assertIsInstance(retval, webob.exc.HTTPNotFound) def test_call_internal_server_error(self): req = mock.Mock() with mock.patch.object(self.handler, '_get_instance_and_tenant_id') as get_ids: get_ids.side_effect = Exception retval = self.handler(req) self.assertIsInstance(retval, webob.exc.HTTPInternalServerError) self.assertEqual(len(self.log.mock_calls), 2) def test_get_router_networks(self): router_id = 'router-id' expected = ('network_id1', 'network_id2') ports = [{'network_id': 'network_id1', 'something': 42}, {'network_id': 'network_id2', 'something_else': 32}] mock_get_ports = self.handler.plugin_rpc.get_ports mock_get_ports.return_value = ports networks = self.handler._get_router_networks(router_id) mock_get_ports.assert_called_once_with( mock.ANY, {'device_id': [router_id], 'device_owner': n_const.ROUTER_INTERFACE_OWNERS}) self.assertEqual(expected, networks) def _test_get_router_networks_twice_helper(self): router_id = 'router-id' ports = [{'network_id': 'network_id1', 'something': 42}] expected_networks = ('network_id1',) with mock.patch( 'oslo_utils.timeutils.utcnow_ts', return_value=0): mock_get_ports = self.handler.plugin_rpc.get_ports mock_get_ports.return_value = ports networks = self.handler._get_router_networks(router_id) mock_get_ports.assert_called_once_with( mock.ANY, {'device_id': [router_id], 'device_owner': n_const.ROUTER_INTERFACE_OWNERS}) self.assertEqual(expected_networks, networks) networks = self.handler._get_router_networks(router_id) def test_get_router_networks_twice(self): self._test_get_router_networks_twice_helper() self.assertEqual( 1, self.handler.plugin_rpc.get_ports.call_count) def _get_ports_for_remote_address_cache_hit_helper(self): remote_address = 'remote_address' networks = ('net1', 'net2') mock_get_ports = self.handler.plugin_rpc.get_ports mock_get_ports.return_value = [{'network_id': 'net1', 'something': 42}] self.handler._get_ports_for_remote_address(remote_address, networks) mock_get_ports.assert_called_once_with( mock.ANY, {'network_id': networks, 'fixed_ips': {'ip_address': [remote_address]}} ) self.assertEqual(1, mock_get_ports.call_count) self.handler._get_ports_for_remote_address(remote_address, networks) def test_get_ports_for_remote_address_cache_hit(self): self._get_ports_for_remote_address_cache_hit_helper() self.assertEqual( 1, self.handler.plugin_rpc.get_ports.call_count) def test_get_ports_network_id(self): network_id = 'network-id' router_id = 'router-id' remote_address = 'remote-address' expected = ['port1'] networks = (network_id,) with mock.patch.object(self.handler, '_get_ports_for_remote_address' ) as mock_get_ip_addr,\ mock.patch.object(self.handler, '_get_router_networks' ) as mock_get_router_networks: mock_get_ip_addr.return_value = expected ports = self.handler._get_ports(remote_address, network_id, router_id) mock_get_ip_addr.assert_called_once_with(remote_address, networks) self.assertFalse(mock_get_router_networks.called) self.assertEqual(expected, ports) def test_get_ports_router_id(self): router_id = 'router-id' remote_address = 'remote-address' expected = ['port1'] networks = ('network1', 'network2') with mock.patch.object(self.handler, '_get_ports_for_remote_address', return_value=expected ) as mock_get_ip_addr,\ mock.patch.object(self.handler, '_get_router_networks', return_value=networks ) as mock_get_router_networks: ports = self.handler._get_ports(remote_address, router_id=router_id) mock_get_router_networks.assert_called_once_with(router_id) mock_get_ip_addr.assert_called_once_with(remote_address, networks) self.assertEqual(expected, ports) def test_get_ports_no_id(self): self.assertRaises(TypeError, self.handler._get_ports, 'remote_address') def _get_instance_and_tenant_id_helper(self, headers, list_ports_retval, networks=None, router_id=None): remote_address = '192.168.1.1' headers['X-Forwarded-For'] = remote_address req = mock.Mock(headers=headers) def mock_get_ports(*args, **kwargs): return list_ports_retval.pop(0) self.handler.plugin_rpc.get_ports.side_effect = mock_get_ports instance_id, tenant_id = self.handler._get_instance_and_tenant_id(req) expected = [] if router_id: expected.append( mock.call( mock.ANY, {'device_id': [router_id], 'device_owner': n_const.ROUTER_INTERFACE_OWNERS} ) ) expected.append( mock.call( mock.ANY, {'network_id': networks, 'fixed_ips': {'ip_address': ['192.168.1.1']}} ) ) self.handler.plugin_rpc.get_ports.assert_has_calls(expected) return (instance_id, tenant_id) def test_get_instance_id_router_id(self): router_id = 'the_id' headers = { 'X-Neutron-Router-ID': router_id } networks = ('net1', 'net2') ports = [ [{'network_id': 'net1'}, {'network_id': 'net2'}], [{'device_id': 'device_id', 'tenant_id': 'tenant_id', 'network_id': 'net1'}] ] self.assertEqual( self._get_instance_and_tenant_id_helper(headers, ports, networks=networks, router_id=router_id), ('device_id', 'tenant_id') ) def test_get_instance_id_router_id_no_match(self): router_id = 'the_id' headers = { 'X-Neutron-Router-ID': router_id } networks = ('net1', 'net2') ports = [ [{'network_id': 'net1'}, {'network_id': 'net2'}], [] ] self.assertEqual( self._get_instance_and_tenant_id_helper(headers, ports, networks=networks, router_id=router_id), (None, None) ) def test_get_instance_id_network_id(self): network_id = 'the_id' headers = { 'X-Neutron-Network-ID': network_id } ports = [ [{'device_id': 'device_id', 'tenant_id': 'tenant_id', 'network_id': 'the_id'}] ] self.assertEqual( self._get_instance_and_tenant_id_helper(headers, ports, networks=('the_id',)), ('device_id', 'tenant_id') ) def test_get_instance_id_network_id_no_match(self): network_id = 'the_id' headers = { 'X-Neutron-Network-ID': network_id } ports = [[]] self.assertEqual( self._get_instance_and_tenant_id_helper(headers, ports, networks=('the_id',)), (None, None) ) def _proxy_request_test_helper(self, response_code=200, method='GET'): hdrs = {'X-Forwarded-For': '8.8.8.8'} body = 'body' req = mock.Mock(path_info='/the_path', query_string='', headers=hdrs, method=method, body=body) resp = mock.MagicMock(status=response_code) req.response = resp with mock.patch.object(self.handler, '_sign_instance_id') as sign: sign.return_value = 'signed' with mock.patch('httplib2.Http') as mock_http: resp.__getitem__.return_value = "text/plain" mock_http.return_value.request.return_value = (resp, 'content') retval = self.handler._proxy_request('the_id', 'tenant_id', req) mock_http.assert_called_once_with( ca_certs=None, disable_ssl_certificate_validation=True) mock_http.assert_has_calls([ mock.call().add_certificate( FakeConf.nova_client_priv_key, FakeConf.nova_client_cert, "%s:%s" % (FakeConf.nova_metadata_ip, FakeConf.nova_metadata_port) ), mock.call().request( 'http://9.9.9.9:8775/the_path', method=method, headers={ 'X-Forwarded-For': '8.8.8.8', 'X-Instance-ID-Signature': 'signed', 'X-Instance-ID': 'the_id', 'X-Tenant-ID': 'tenant_id' }, body=body )] ) return retval def test_proxy_request_post(self): response = self._proxy_request_test_helper(method='POST') self.assertEqual(response.content_type, "text/plain") self.assertEqual(response.body, 'content') def test_proxy_request_200(self): response = self._proxy_request_test_helper(200) self.assertEqual(response.content_type, "text/plain") self.assertEqual(response.body, 'content') def test_proxy_request_400(self): self.assertIsInstance(self._proxy_request_test_helper(400), webob.exc.HTTPBadRequest) def test_proxy_request_403(self): self.assertIsInstance(self._proxy_request_test_helper(403), webob.exc.HTTPForbidden) def test_proxy_request_404(self): self.assertIsInstance(self._proxy_request_test_helper(404), webob.exc.HTTPNotFound) def test_proxy_request_409(self): self.assertIsInstance(self._proxy_request_test_helper(409), webob.exc.HTTPConflict) def test_proxy_request_500(self): self.assertIsInstance(self._proxy_request_test_helper(500), webob.exc.HTTPInternalServerError) def test_proxy_request_other_code(self): with testtools.ExpectedException(Exception): self._proxy_request_test_helper(302) def test_sign_instance_id(self): self.assertEqual( self.handler._sign_instance_id('foo'), '773ba44693c7553d6ee20f61ea5d2757a9a4f4a44d2841ae4e95b52e4cd62db4' ) class TestMetadataProxyHandlerNoCache(TestMetadataProxyHandlerCache): fake_conf = FakeConf def test_get_router_networks_twice(self): self._test_get_router_networks_twice_helper() self.assertEqual( 2, self.handler.plugin_rpc.get_ports.call_count) def test_get_ports_for_remote_address_cache_hit(self): self._get_ports_for_remote_address_cache_hit_helper() self.assertEqual( 2, self.handler.plugin_rpc.get_ports.call_count) class TestUnixDomainMetadataProxy(base.BaseTestCase): def setUp(self): super(TestUnixDomainMetadataProxy, self).setUp() self.cfg_p = mock.patch.object(agent, 'cfg') self.cfg = self.cfg_p.start() looping_call_p = mock.patch( 'oslo_service.loopingcall.FixedIntervalLoopingCall') self.looping_mock = looping_call_p.start() self.cfg.CONF.metadata_proxy_socket = '/the/path' self.cfg.CONF.metadata_workers = 0 self.cfg.CONF.metadata_backlog = 128 self.cfg.CONF.metadata_proxy_socket_mode = config.USER_MODE @mock.patch.object(utils, 'ensure_dir') def test_init_doesnot_exists(self, ensure_dir): agent.UnixDomainMetadataProxy(mock.Mock()) ensure_dir.assert_called_once_with('/the') def test_init_exists(self): with mock.patch('os.path.isdir') as isdir: with mock.patch('os.unlink') as unlink: isdir.return_value = True agent.UnixDomainMetadataProxy(mock.Mock()) unlink.assert_called_once_with('/the/path') def test_init_exists_unlink_no_file(self): with mock.patch('os.path.isdir') as isdir: with mock.patch('os.unlink') as unlink: with mock.patch('os.path.exists') as exists: isdir.return_value = True exists.return_value = False unlink.side_effect = OSError agent.UnixDomainMetadataProxy(mock.Mock()) unlink.assert_called_once_with('/the/path') def test_init_exists_unlink_fails_file_still_exists(self): with mock.patch('os.path.isdir') as isdir: with mock.patch('os.unlink') as unlink: with mock.patch('os.path.exists') as exists: isdir.return_value = True exists.return_value = True unlink.side_effect = OSError with testtools.ExpectedException(OSError): agent.UnixDomainMetadataProxy(mock.Mock()) unlink.assert_called_once_with('/the/path') @mock.patch.object(agent, 'MetadataProxyHandler') @mock.patch.object(agent_utils, 'UnixDomainWSGIServer') @mock.patch.object(utils, 'ensure_dir') def test_run(self, ensure_dir, server, handler): p = agent.UnixDomainMetadataProxy(self.cfg.CONF) p.run() ensure_dir.assert_called_once_with('/the') server.assert_has_calls([ mock.call('neutron-metadata-agent'), mock.call().start(handler.return_value, '/the/path', workers=0, backlog=128, mode=0o644), mock.call().wait()] ) self.looping_mock.assert_called_once_with(p._report_state) self.looping_mock.return_value.start.assert_called_once_with( interval=mock.ANY) def test_main(self): with mock.patch.object(agent, 'UnixDomainMetadataProxy') as proxy: with mock.patch.object(metadata_agent, 'config') as config: with mock.patch.object(metadata_agent, 'cfg') as cfg: with mock.patch.object(utils, 'cfg'): metadata_agent.main() self.assertTrue(config.setup_logging.called) proxy.assert_has_calls([ mock.call(cfg.CONF), mock.call().run()] ) def test_report_state(self): with mock.patch('neutron.agent.rpc.PluginReportStateAPI') as state_api: with mock.patch('os.makedirs'): proxy = agent.UnixDomainMetadataProxy(self.cfg.CONF) proxy._init_state_reporting() self.assertTrue(proxy.agent_state['start_flag']) proxy._report_state() self.assertNotIn('start_flag', proxy.agent_state) state_api_inst = state_api.return_value state_api_inst.report_state.assert_called_once_with( proxy.context, proxy.agent_state, use_call=True) neutron-8.4.0/neutron/tests/unit/agent/metadata/__init__.py0000664000567000056710000000000013044372736025157 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/metadata/test_driver.py0000664000567000056710000001377413044372760025775 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_utils import uuidutils from neutron.agent.common import config as agent_config from neutron.agent.l3 import agent as l3_agent from neutron.agent.l3 import config as l3_config from neutron.agent.l3 import ha as l3_ha_agent from neutron.agent.metadata import config from neutron.agent.metadata import driver as metadata_driver from neutron.common import constants from neutron.tests import base _uuid = uuidutils.generate_uuid class TestMetadataDriverRules(base.BaseTestCase): def test_metadata_nat_rules(self): rules = ('PREROUTING', '-d 169.254.169.254/32 -i qr-+ ' '-p tcp -m tcp --dport 80 -j REDIRECT --to-ports 8775') self.assertEqual( [rules], metadata_driver.MetadataDriver.metadata_nat_rules(8775)) def test_metadata_filter_rules(self): rules = [('INPUT', '-m mark --mark 0x1/%s -j ACCEPT' % constants.ROUTER_MARK_MASK), ('INPUT', '-p tcp -m tcp --dport 8775 -j DROP')] self.assertEqual( rules, metadata_driver.MetadataDriver.metadata_filter_rules(8775, '0x1')) def test_metadata_mangle_rules(self): rule = ('PREROUTING', '-d 169.254.169.254/32 -i qr-+ ' '-p tcp -m tcp --dport 80 ' '-j MARK --set-xmark 0x1/%s' % constants.ROUTER_MARK_MASK) self.assertEqual( [rule], metadata_driver.MetadataDriver.metadata_mangle_rules('0x1')) class TestMetadataDriverProcess(base.BaseTestCase): EUID = 123 EGID = 456 EUNAME = 'neutron' def setUp(self): super(TestMetadataDriverProcess, self).setUp() mock.patch('eventlet.spawn').start() agent_config.register_interface_driver_opts_helper(cfg.CONF) cfg.CONF.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') mock.patch('neutron.agent.l3.agent.L3PluginApi').start() mock.patch('neutron.agent.l3.ha.AgentMixin' '._init_ha_conf_path').start() cfg.CONF.register_opts(l3_config.OPTS) cfg.CONF.register_opts(l3_ha_agent.OPTS) cfg.CONF.register_opts(config.SHARED_OPTS) cfg.CONF.register_opts(config.DRIVER_OPTS) def _test_spawn_metadata_proxy(self, expected_user, expected_group, user='', group='', watch_log=True): router_id = _uuid() router_ns = 'qrouter-%s' % router_id metadata_port = 8080 ip_class_path = 'neutron.agent.linux.ip_lib.IPWrapper' is_effective_user = 'neutron.agent.linux.utils.is_effective_user' fake_is_effective_user = lambda x: x in [self.EUNAME, str(self.EUID)] cfg.CONF.set_override('metadata_proxy_user', user) cfg.CONF.set_override('metadata_proxy_group', group) cfg.CONF.set_override('log_file', 'test.log') cfg.CONF.set_override('debug', True) agent = l3_agent.L3NATAgent('localhost') with mock.patch('os.geteuid', return_value=self.EUID),\ mock.patch('os.getegid', return_value=self.EGID),\ mock.patch(is_effective_user, side_effect=fake_is_effective_user),\ mock.patch(ip_class_path) as ip_mock: agent.metadata_driver.spawn_monitored_metadata_proxy( agent.process_monitor, router_ns, metadata_port, agent.conf, router_id=router_id) netns_execute_args = [ 'neutron-ns-metadata-proxy', mock.ANY, mock.ANY, '--router_id=%s' % router_id, mock.ANY, '--metadata_port=%s' % metadata_port, '--metadata_proxy_user=%s' % expected_user, '--metadata_proxy_group=%s' % expected_group, '--debug', '--verbose', '--log-file=neutron-ns-metadata-proxy-%s.log' % router_id] if not watch_log: netns_execute_args.append( '--nometadata_proxy_watch_log') ip_mock.assert_has_calls([ mock.call(namespace=router_ns), mock.call().netns.execute(netns_execute_args, addl_env=None, run_as_root=False) ]) def test_spawn_metadata_proxy_with_agent_user(self): self._test_spawn_metadata_proxy( self.EUNAME, str(self.EGID), user=self.EUNAME) def test_spawn_metadata_proxy_with_nonagent_user(self): self._test_spawn_metadata_proxy( 'notneutron', str(self.EGID), user='notneutron', watch_log=False) def test_spawn_metadata_proxy_with_agent_uid(self): self._test_spawn_metadata_proxy( str(self.EUID), str(self.EGID), user=str(self.EUID)) def test_spawn_metadata_proxy_with_nonagent_uid(self): self._test_spawn_metadata_proxy( '321', str(self.EGID), user='321', watch_log=False) def test_spawn_metadata_proxy_with_group(self): self._test_spawn_metadata_proxy(str(self.EUID), 'group', group='group') def test_spawn_metadata_proxy_with_gid(self): self._test_spawn_metadata_proxy(str(self.EUID), '654', group='654') def test_spawn_metadata_proxy(self): self._test_spawn_metadata_proxy(str(self.EUID), str(self.EGID)) neutron-8.4.0/neutron/tests/unit/agent/metadata/test_namespace_proxy.py0000664000567000056710000003142413044372736027672 0ustar jenkinsjenkins00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testtools import webob from neutron.agent.linux import utils as agent_utils from neutron.agent.metadata import namespace_proxy as ns_proxy from neutron.common import exceptions from neutron.common import utils from neutron.tests import base from neutron import wsgi class TestNetworkMetadataProxyHandler(base.BaseTestCase): def setUp(self): super(TestNetworkMetadataProxyHandler, self).setUp() self.handler = ns_proxy.NetworkMetadataProxyHandler('router_id') def test_call(self): req = mock.Mock(headers={}) with mock.patch.object(self.handler, '_proxy_request') as proxy_req: proxy_req.return_value = 'value' retval = self.handler(req) self.assertEqual(retval, 'value') proxy_req.assert_called_once_with(req.remote_addr, req.method, req.path_info, req.query_string, req.body) def test_no_argument_passed_to_init(self): with testtools.ExpectedException( exceptions.NetworkIdOrRouterIdRequiredError): ns_proxy.NetworkMetadataProxyHandler() def test_call_internal_server_error(self): req = mock.Mock(headers={}) with mock.patch.object(self.handler, '_proxy_request') as proxy_req: proxy_req.side_effect = Exception retval = self.handler(req) self.assertIsInstance(retval, webob.exc.HTTPInternalServerError) self.assertTrue(proxy_req.called) def test_proxy_request_router_200(self): self.handler.router_id = 'router_id' resp = mock.MagicMock(status=200) with mock.patch('httplib2.Http') as mock_http: resp.__getitem__.return_value = "text/plain" mock_http.return_value.request.return_value = (resp, 'content') retval = self.handler._proxy_request('192.168.1.1', 'GET', '/latest/meta-data', '', '') mock_http.assert_has_calls([ mock.call().request( 'http://169.254.169.254/latest/meta-data', method='GET', headers={ 'X-Forwarded-For': '192.168.1.1', 'X-Neutron-Router-ID': 'router_id' }, connection_type=agent_utils.UnixDomainHTTPConnection, body='' )] ) self.assertEqual(retval.headers['Content-Type'], 'text/plain') self.assertEqual(b'content', retval.body) def _test_proxy_request_network_200(self, content): self.handler.network_id = 'network_id' resp = mock.MagicMock(status=200) with mock.patch('httplib2.Http') as mock_http: resp.__getitem__.return_value = "application/json" mock_http.return_value.request.return_value = (resp, content) retval = self.handler._proxy_request('192.168.1.1', 'GET', '/latest/meta-data', '', '') mock_http.assert_has_calls([ mock.call().request( 'http://169.254.169.254/latest/meta-data', method='GET', headers={ 'X-Forwarded-For': '192.168.1.1', 'X-Neutron-Network-ID': 'network_id' }, connection_type=agent_utils.UnixDomainHTTPConnection, body='' )] ) self.assertEqual(retval.headers['Content-Type'], 'application/json') self.assertEqual(wsgi.encode_body(content), retval.body) def test_proxy_request_network_200(self): self._test_proxy_request_network_200('{}') def test_proxy_request_network_200_unicode_in_content(self): self._test_proxy_request_network_200('Gl\xfcck') def _test_proxy_request_network_4xx(self, status, method, expected): self.handler.network_id = 'network_id' resp = mock.Mock(status=status) with mock.patch('httplib2.Http') as mock_http: mock_http.return_value.request.return_value = (resp, '') retval = self.handler._proxy_request('192.168.1.1', method, '/latest/meta-data', '', '') mock_http.assert_has_calls([ mock.call().request( 'http://169.254.169.254/latest/meta-data', method=method, headers={ 'X-Forwarded-For': '192.168.1.1', 'X-Neutron-Network-ID': 'network_id' }, connection_type=agent_utils.UnixDomainHTTPConnection, body='' )] ) self.assertIsInstance(retval, expected) def test_proxy_request_network_400(self): self._test_proxy_request_network_4xx( 400, 'GET', webob.exc.HTTPBadRequest) def test_proxy_request_network_404(self): self._test_proxy_request_network_4xx( 404, 'GET', webob.exc.HTTPNotFound) def test_proxy_request_network_409(self): self._test_proxy_request_network_4xx( 409, 'POST', webob.exc.HTTPConflict) def test_proxy_request_network_500(self): self.handler.network_id = 'network_id' resp = mock.Mock(status=500) with mock.patch('httplib2.Http') as mock_http: mock_http.return_value.request.return_value = (resp, '') retval = self.handler._proxy_request('192.168.1.1', 'GET', '/latest/meta-data', '', '') mock_http.assert_has_calls([ mock.call().request( 'http://169.254.169.254/latest/meta-data', method='GET', headers={ 'X-Forwarded-For': '192.168.1.1', 'X-Neutron-Network-ID': 'network_id' }, connection_type=agent_utils.UnixDomainHTTPConnection, body='' )] ) self.assertIsInstance(retval, webob.exc.HTTPInternalServerError) def test_proxy_request_network_418(self): self.handler.network_id = 'network_id' resp = mock.Mock(status=418) with mock.patch('httplib2.Http') as mock_http: mock_http.return_value.request.return_value = (resp, '') with testtools.ExpectedException(Exception): self.handler._proxy_request('192.168.1.1', 'GET', '/latest/meta-data', '', '') mock_http.assert_has_calls([ mock.call().request( 'http://169.254.169.254/latest/meta-data', method='GET', headers={ 'X-Forwarded-For': '192.168.1.1', 'X-Neutron-Network-ID': 'network_id' }, connection_type=agent_utils.UnixDomainHTTPConnection, body='' )] ) def test_proxy_request_network_exception(self): self.handler.network_id = 'network_id' mock.Mock(status=500) with mock.patch('httplib2.Http') as mock_http: mock_http.return_value.request.side_effect = Exception with testtools.ExpectedException(Exception): self.handler._proxy_request('192.168.1.1', 'GET', '/latest/meta-data', '', '') mock_http.assert_has_calls([ mock.call().request( 'http://169.254.169.254/latest/meta-data', method='GET', headers={ 'X-Forwarded-For': '192.168.1.1', 'X-Neutron-Network-ID': 'network_id' }, connection_type=agent_utils.UnixDomainHTTPConnection, body='' )] ) class TestProxyDaemon(base.BaseTestCase): def test_init(self): with mock.patch('neutron.agent.linux.daemon.Pidfile'): pd = ns_proxy.ProxyDaemon('pidfile', 9697, 'net_id', 'router_id') self.assertEqual(pd.router_id, 'router_id') self.assertEqual(pd.network_id, 'net_id') def test_run(self): with mock.patch('neutron.agent.linux.daemon.Pidfile'): with mock.patch('neutron.wsgi.Server') as Server: pd = ns_proxy.ProxyDaemon('pidfile', 9697, 'net_id', 'router_id') pd.run() Server.assert_has_calls([ mock.call('neutron-network-metadata-proxy'), mock.call().start(mock.ANY, 9697), mock.call().wait()] ) def test_main(self): with mock.patch.object(ns_proxy, 'ProxyDaemon') as daemon: with mock.patch.object(ns_proxy, 'config') as config: with mock.patch.object(ns_proxy, 'cfg') as cfg: with mock.patch.object(utils, 'cfg') as utils_cfg: cfg.CONF.router_id = 'router_id' cfg.CONF.network_id = None cfg.CONF.metadata_port = 9697 cfg.CONF.pid_file = 'pidfile' cfg.CONF.daemonize = True utils_cfg.CONF.log_opt_values.return_value = None ns_proxy.main() self.assertTrue(config.setup_logging.called) daemon.assert_has_calls([ mock.call('pidfile', 9697, router_id='router_id', network_id=None, user=mock.ANY, group=mock.ANY, watch_log=mock.ANY), mock.call().start()] ) def test_main_dont_fork(self): with mock.patch.object(ns_proxy, 'ProxyDaemon') as daemon: with mock.patch.object(ns_proxy, 'config') as config: with mock.patch.object(ns_proxy, 'cfg') as cfg: with mock.patch.object(utils, 'cfg') as utils_cfg: cfg.CONF.router_id = 'router_id' cfg.CONF.network_id = None cfg.CONF.metadata_port = 9697 cfg.CONF.pid_file = 'pidfile' cfg.CONF.daemonize = False utils_cfg.CONF.log_opt_values.return_value = None ns_proxy.main() self.assertTrue(config.setup_logging.called) daemon.assert_has_calls([ mock.call('pidfile', 9697, router_id='router_id', network_id=None, user=mock.ANY, group=mock.ANY, watch_log=mock.ANY), mock.call().run()] ) neutron-8.4.0/neutron/tests/unit/agent/windows/0000775000567000056710000000000013044373210022756 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/windows/__init__.py0000664000567000056710000000000013044372736025071 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/agent/windows/test_ip_lib.py0000664000567000056710000000631013044372760025636 0ustar jenkinsjenkins00000000000000# Copyright 2016 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netifaces from neutron.agent.windows import ip_lib from neutron.tests import base class TestIpWrapper(base.BaseTestCase): def test_get_device_by_ip_no_ip(self): ret = ip_lib.IPWrapper().get_device_by_ip(None) self.assertIsNone(ret) @mock.patch.object(ip_lib.IPWrapper, 'get_devices') def test_get_device_by_ip(self, mock_get_devices): mock_dev1 = mock.MagicMock() mock_dev2 = mock.MagicMock() mock_dev1.device_has_ip.return_value = False mock_dev2.device_has_ip.return_value = True mock_get_devices.return_value = [mock_dev1, mock_dev2] ret = ip_lib.IPWrapper().get_device_by_ip('fake_ip') self.assertEqual(mock_dev2, ret) @mock.patch.object(ip_lib.IPWrapper, 'get_devices') def test_get_device_by_ip_exception(self, mock_get_devices): mock_get_devices.side_effects = OSError ret = ip_lib.IPWrapper().get_device_by_ip(mock.sentinel.fake_ip) self.assertIsNone(ret) @mock.patch('netifaces.interfaces') def test_get_devices(self, mock_interfaces): mock_interfaces.return_value = [mock.sentinel.dev1, mock.sentinel.dev2] ret = ip_lib.IPWrapper().get_devices() self.assertEqual(mock.sentinel.dev1, ret[0].device_name) self.assertEqual(mock.sentinel.dev2, ret[1].device_name) @mock.patch('netifaces.interfaces') def test_get_devices_error(self, mock_interfaces): mock_interfaces.side_effects = OSError ret = ip_lib.IPWrapper().get_devices() self.assertEqual([], ret) class TestIpDevice(base.BaseTestCase): @mock.patch('netifaces.ifaddresses') def test_device_has_ip(self, mock_netifaces): mock_address = {'addr': mock.sentinel.fake_addr} mock_netifaces.return_value = {netifaces.AF_INET: [mock_address]} ret = ip_lib.IPDevice("fake_dev").device_has_ip( mock.sentinel.fake_addr) self.assertTrue(ret) @mock.patch('netifaces.ifaddresses') def test_device_has_ip_false(self, mock_netifaces): mock_netifaces.return_value = {} ret = ip_lib.IPDevice("fake_dev").device_has_ip( mock.sentinel.fake_addr) self.assertFalse(ret) @mock.patch('netifaces.ifaddresses') def test_device_has_ip_error(self, mock_netifaces): mock_netifaces.side_effects = OSError ret = ip_lib.IPDevice("fake_dev").device_has_ip( mock.sentinel.fake_addr) self.assertFalse(ret) neutron-8.4.0/neutron/tests/unit/objects/0000775000567000056710000000000013044373210021617 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/objects/test_base.py0000664000567000056710000004721613044372760024165 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from oslo_db import exception as obj_exc from oslo_utils import uuidutils from oslo_versionedobjects import base as obj_base from oslo_versionedobjects import fields as obj_fields from neutron.common import exceptions as n_exc from neutron.common import utils as common_utils from neutron import context from neutron.db import models_v2 from neutron.objects import base from neutron.objects.db import api as obj_db_api from neutron.tests import base as test_base from neutron.tests import tools SQLALCHEMY_COMMIT = 'sqlalchemy.engine.Connection._commit_impl' OBJECTS_BASE_OBJ_FROM_PRIMITIVE = ('oslo_versionedobjects.base.' 'VersionedObject.obj_from_primitive') class FakeModel(object): def __init__(self, *args, **kwargs): pass @obj_base.VersionedObjectRegistry.register_if(False) class FakeNeutronObject(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = FakeModel fields = { 'id': obj_fields.UUIDField(), 'field1': obj_fields.StringField(), 'field2': obj_fields.StringField() } primary_keys = ['id'] fields_no_update = ['field1'] synthetic_fields = ['field2'] @obj_base.VersionedObjectRegistry.register_if(False) class FakeNeutronObjectNonStandardPrimaryKey(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = FakeModel primary_keys = ['weird_key'] fields = { 'weird_key': obj_fields.UUIDField(), 'field1': obj_fields.StringField(), 'field2': obj_fields.StringField() } synthetic_fields = ['field2'] @obj_base.VersionedObjectRegistry.register_if(False) class FakeNeutronObjectCompositePrimaryKey(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = FakeModel primary_keys = ['weird_key', 'field1'] fields = { 'weird_key': obj_fields.UUIDField(), 'field1': obj_fields.StringField(), 'field2': obj_fields.StringField() } synthetic_fields = ['field2'] @obj_base.VersionedObjectRegistry.register_if(False) class FakeNeutronObjectRenamedField(base.NeutronDbObject): """ Testing renaming the parameter from DB to NeutronDbObject For tests: - db fields: id, field_db, field2 - object: id, field_ovo, field2 """ # Version 1.0: Initial version VERSION = '1.0' db_model = FakeModel primary_keys = ['id'] fields = { 'id': obj_fields.UUIDField(), 'field_ovo': obj_fields.StringField(), 'field2': obj_fields.StringField() } synthetic_fields = ['field2'] fields_need_translation = {'field_ovo': 'field_db'} @obj_base.VersionedObjectRegistry.register_if(False) class FakeNeutronObjectCompositePrimaryKeyWithId(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = FakeModel primary_keys = ['id', 'field1'] fields = { 'id': obj_fields.UUIDField(), 'field1': obj_fields.StringField(), 'field2': obj_fields.StringField() } synthetic_fields = ['field2'] FIELD_TYPE_VALUE_GENERATOR_MAP = { obj_fields.BooleanField: tools.get_random_boolean, obj_fields.IntegerField: tools.get_random_integer, obj_fields.StringField: tools.get_random_string, obj_fields.UUIDField: uuidutils.generate_uuid, obj_fields.ListOfObjectsField: lambda: [] } def get_obj_db_fields(obj): return {field: getattr(obj, field) for field in obj.fields if field not in obj.synthetic_fields} class _BaseObjectTestCase(object): _test_class = FakeNeutronObject def setUp(self): super(_BaseObjectTestCase, self).setUp() self.context = context.get_admin_context() self.db_objs = list(self.get_random_fields() for _ in range(3)) self.db_obj = self.db_objs[0] self.obj_fields = [] for db_obj in self.db_objs: self.obj_fields.append( self._test_class.modify_fields_from_db(db_obj)) valid_field = [f for f in self._test_class.fields if f not in self._test_class.synthetic_fields][0] self.valid_field_filter = {valid_field: self.obj_fields[0][valid_field]} @classmethod def get_random_fields(cls, obj_cls=None): obj_cls = obj_cls or cls._test_class fields = {} for field, field_obj in obj_cls.fields.items(): if field not in obj_cls.synthetic_fields: generator = FIELD_TYPE_VALUE_GENERATOR_MAP[type(field_obj)] fields[field] = generator() return obj_cls.modify_fields_to_db(fields) @classmethod def generate_object_keys(cls, obj_cls): keys = {} for field, field_obj in obj_cls.fields.items(): if field in obj_cls.primary_keys: generator = FIELD_TYPE_VALUE_GENERATOR_MAP[type(field_obj)] keys[field] = generator() return keys def get_updatable_fields(self, fields): return base.get_updatable_fields(self._test_class, fields) @classmethod def _is_test_class(cls, obj): return isinstance(obj, cls._test_class) class BaseObjectIfaceTestCase(_BaseObjectTestCase, test_base.BaseTestCase): def test_get_object(self): with mock.patch.object(obj_db_api, 'get_object', return_value=self.db_obj) as get_object_mock: obj_keys = self.generate_object_keys(self._test_class) obj = self._test_class.get_object(self.context, **obj_keys) self.assertTrue(self._is_test_class(obj)) self.assertEqual(self.obj_fields[0], get_obj_db_fields(obj)) get_object_mock.assert_called_once_with( self.context, self._test_class.db_model, **obj_keys) def test_get_object_missing_object(self): with mock.patch.object(obj_db_api, 'get_object', return_value=None): obj_keys = self.generate_object_keys(self._test_class) obj = self._test_class.get_object(self.context, **obj_keys) self.assertIsNone(obj) def test_get_object_missing_primary_key(self): obj_keys = self.generate_object_keys(self._test_class) obj_keys.popitem() self.assertRaises(base.NeutronPrimaryKeyMissing, self._test_class.get_object, self.context, **obj_keys) def test_get_objects(self): with mock.patch.object(obj_db_api, 'get_objects', return_value=self.db_objs) as get_objects_mock: objs = self._test_class.get_objects(self.context) self._validate_objects(self.db_objs, objs) get_objects_mock.assert_called_once_with( self.context, self._test_class.db_model) def test_get_objects_valid_fields(self): with mock.patch.object( obj_db_api, 'get_objects', return_value=[self.db_obj]) as get_objects_mock: objs = self._test_class.get_objects(self.context, **self.valid_field_filter) self._validate_objects([self.db_obj], objs) get_objects_mock.assert_called_with( self.context, self._test_class.db_model, **self.valid_field_filter) def test_get_objects_mixed_fields(self): synthetic_fields = self._test_class.synthetic_fields if not synthetic_fields: self.skipTest('No synthetic fields found in test class %r' % self._test_class) filters = copy.copy(self.valid_field_filter) filters[synthetic_fields[0]] = 'xxx' with mock.patch.object(obj_db_api, 'get_objects', return_value=self.db_objs): self.assertRaises(base.exceptions.InvalidInput, self._test_class.get_objects, self.context, **filters) def test_get_objects_synthetic_fields(self): synthetic_fields = self._test_class.synthetic_fields if not synthetic_fields: self.skipTest('No synthetic fields found in test class %r' % self._test_class) with mock.patch.object(obj_db_api, 'get_objects', return_value=self.db_objs): self.assertRaises(base.exceptions.InvalidInput, self._test_class.get_objects, self.context, **{synthetic_fields[0]: 'xxx'}) def test_get_objects_invalid_fields(self): with mock.patch.object(obj_db_api, 'get_objects', return_value=self.db_objs): self.assertRaises(base.exceptions.InvalidInput, self._test_class.get_objects, self.context, fake_field='xxx') def _validate_objects(self, expected, observed): self.assertTrue(all(self._is_test_class(obj) for obj in observed)) self.assertEqual( sorted([self._test_class.modify_fields_from_db(db_obj) for db_obj in expected], key=common_utils.safe_sort_key), sorted([get_obj_db_fields(obj) for obj in observed], key=common_utils.safe_sort_key)) def _check_equal(self, obj, db_obj): self.assertEqual( sorted(db_obj), sorted(get_obj_db_fields(obj))) def test_create(self): with mock.patch.object(obj_db_api, 'create_object', return_value=self.db_obj) as create_mock: obj = self._test_class(self.context, **self.obj_fields[0]) self._check_equal(obj, self.obj_fields[0]) obj.create() self._check_equal(obj, self.obj_fields[0]) create_mock.assert_called_once_with( self.context, self._test_class.db_model, self.db_obj) def test_create_updates_from_db_object(self): with mock.patch.object(obj_db_api, 'create_object', return_value=self.db_obj): obj = self._test_class(self.context, **self.obj_fields[1]) self._check_equal(obj, self.obj_fields[1]) obj.create() self._check_equal(obj, self.obj_fields[0]) def test_create_duplicates(self): with mock.patch.object(obj_db_api, 'create_object', side_effect=obj_exc.DBDuplicateEntry): obj = self._test_class(self.context, **self.obj_fields[0]) self.assertRaises(base.NeutronDbObjectDuplicateEntry, obj.create) @mock.patch.object(obj_db_api, 'update_object') def test_update_no_changes(self, update_mock): with mock.patch.object(base.NeutronDbObject, '_get_changed_persistent_fields', return_value={}): obj_keys = self.generate_object_keys(self._test_class) obj = self._test_class(self.context, **obj_keys) obj.update() self.assertFalse(update_mock.called) @mock.patch.object(obj_db_api, 'update_object') def test_update_changes(self, update_mock): fields_to_update = self.get_updatable_fields(self.db_obj) if not fields_to_update: self.skipTest('No updatable fields found in test class %r' % self._test_class) with mock.patch.object(base.NeutronDbObject, '_get_changed_persistent_fields', return_value=fields_to_update): obj = self._test_class(self.context, **self.obj_fields[0]) obj.update() update_mock.assert_called_once_with( self.context, self._test_class.db_model, fields_to_update, **obj._get_composite_keys()) @mock.patch.object(base.NeutronDbObject, '_get_changed_persistent_fields', return_value={'a': 'a', 'b': 'b', 'c': 'c'}) def test_update_changes_forbidden(self, *mocks): with mock.patch.object( self._test_class, 'fields_no_update', new_callable=mock.PropertyMock(return_value=['a', 'c']), create=True): obj = self._test_class(self.context, **self.db_obj) self.assertRaises(base.NeutronObjectUpdateForbidden, obj.update) def test_update_updates_from_db_object(self): with mock.patch.object(obj_db_api, 'update_object', return_value=self.db_obj): obj = self._test_class(self.context, **self.obj_fields[1]) fields_to_update = self.get_updatable_fields(self.obj_fields[1]) if not fields_to_update: self.skipTest('No updatable fields found in test class %r' % self._test_class) with mock.patch.object(base.NeutronDbObject, '_get_changed_persistent_fields', return_value=fields_to_update): obj.update() self._check_equal(obj, self.obj_fields[0]) @mock.patch.object(obj_db_api, 'delete_object') def test_delete(self, delete_mock): obj = self._test_class(self.context, **self.obj_fields[0]) self._check_equal(obj, self.obj_fields[0]) obj.delete() self._check_equal(obj, self.obj_fields[0]) delete_mock.assert_called_once_with( self.context, self._test_class.db_model, **obj._get_composite_keys()) @mock.patch(OBJECTS_BASE_OBJ_FROM_PRIMITIVE) def test_clean_obj_from_primitive(self, get_prim_m): expected_obj = get_prim_m.return_value observed_obj = self._test_class.clean_obj_from_primitive('foo', 'bar') self.assertIs(expected_obj, observed_obj) self.assertTrue(observed_obj.obj_reset_changes.called) def test_update_primary_key_forbidden_fail(self): obj = self._test_class(self.context, **self.db_obj) obj.obj_reset_changes() if not self._test_class.primary_keys: self.skipTest( 'All non-updatable fields found in test class %r ' 'are primary keys' % self._test_class) for key, val in self.db_obj.items(): if key in self._test_class.primary_keys: setattr(obj, key, val) self.assertRaises(base.NeutronObjectUpdateForbidden, obj.update) class BaseDbObjectNonStandardPrimaryKeyTestCase(BaseObjectIfaceTestCase): _test_class = FakeNeutronObjectNonStandardPrimaryKey class BaseDbObjectCompositePrimaryKeyTestCase(BaseObjectIfaceTestCase): _test_class = FakeNeutronObjectCompositePrimaryKey class BaseDbObjectCompositePrimaryKeyWithIdTestCase(BaseObjectIfaceTestCase): _test_class = FakeNeutronObjectCompositePrimaryKeyWithId class BaseDbObjectRenamedFieldTestCase(BaseObjectIfaceTestCase): _test_class = FakeNeutronObjectRenamedField class BaseDbObjectTestCase(_BaseObjectTestCase): def _create_test_network(self): # TODO(ihrachys): replace with network.create() once we get an object # implementation for networks self._network = obj_db_api.create_object(self.context, models_v2.Network, {'name': 'test-network1'}) def _create_test_port(self, network): # TODO(ihrachys): replace with port.create() once we get an object # implementation for ports self._port = obj_db_api.create_object(self.context, models_v2.Port, {'tenant_id': 'fake_tenant_id', 'name': 'test-port1', 'network_id': network['id'], 'mac_address': 'fake_mac', 'admin_state_up': True, 'status': 'ACTIVE', 'device_id': 'fake_device', 'device_owner': 'fake_owner'}) def test_get_object_create_update_delete(self): obj = self._test_class(self.context, **self.obj_fields[0]) obj.create() new = self._test_class.get_object(self.context, **obj._get_composite_keys()) self.assertEqual(obj, new) obj = new for key, val in self.get_updatable_fields(self.obj_fields[1]).items(): setattr(obj, key, val) obj.update() new = self._test_class.get_object(self.context, **obj._get_composite_keys()) self.assertEqual(obj, new) obj = new new.delete() new = self._test_class.get_object(self.context, **obj._get_composite_keys()) self.assertIsNone(new) def test_update_non_existent_object_raises_not_found(self): obj = self._test_class(self.context, **self.obj_fields[0]) obj.obj_reset_changes() fields_to_update = self.get_updatable_fields(self.obj_fields[0]) if not fields_to_update: self.skipTest('No updatable fields found in test class %r' % self._test_class) for key, val in fields_to_update.items(): setattr(obj, key, val) self.assertRaises(n_exc.ObjectNotFound, obj.update) def test_delete_non_existent_object_raises_not_found(self): obj = self._test_class(self.context, **self.obj_fields[0]) self.assertRaises(n_exc.ObjectNotFound, obj.delete) @mock.patch(SQLALCHEMY_COMMIT) def test_create_single_transaction(self, mock_commit): obj = self._test_class(self.context, **self.obj_fields[0]) obj.create() self.assertEqual(1, mock_commit.call_count) def test_update_single_transaction(self): obj = self._test_class(self.context, **self.obj_fields[0]) obj.create() fields_to_update = self.get_updatable_fields(self.obj_fields[1]) if not fields_to_update: self.skipTest('No updatable fields found in test class %r' % self._test_class) for key, val in fields_to_update.items(): setattr(obj, key, val) with mock.patch(SQLALCHEMY_COMMIT) as mock_commit: obj.update() self.assertEqual(1, mock_commit.call_count) def test_delete_single_transaction(self): obj = self._test_class(self.context, **self.obj_fields[0]) obj.create() with mock.patch(SQLALCHEMY_COMMIT) as mock_commit: obj.delete() self.assertEqual(1, mock_commit.call_count) @mock.patch(SQLALCHEMY_COMMIT) def test_get_objects_single_transaction(self, mock_commit): self._test_class.get_objects(self.context) self.assertEqual(1, mock_commit.call_count) @mock.patch(SQLALCHEMY_COMMIT) def test_get_object_single_transaction(self, mock_commit): obj = self._test_class(self.context, **self.obj_fields[0]) obj.create() obj = self._test_class.get_object(self.context, **obj._get_composite_keys()) self.assertEqual(2, mock_commit.call_count) neutron-8.4.0/neutron/tests/unit/objects/test_objects.py0000664000567000056710000000412313044372760024672 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import pprint from oslo_versionedobjects import base as obj_base from oslo_versionedobjects import fixture from neutron import objects from neutron.tests import base as test_base from neutron.tests import tools # NOTE: The hashes in this list should only be changed if they come with a # corresponding version bump in the affected objects. object_data = { 'QosBandwidthLimitRule': '1.0-4e44a8f5c2895ab1278399f87b40a13d', 'QosRuleType': '1.0-d0df298d49eeffab91af18d1a4cf7eaf', 'QosPolicy': '1.0-721fa60ea8f0e8f15d456d6e917dfe59', } class TestObjectVersions(test_base.BaseTestCase): def setUp(self): super(TestObjectVersions, self).setUp() # NOTE(ihrachys): seed registry with all objects under neutron.objects # before validating the hashes tools.import_modules_recursively(os.path.dirname(objects.__file__)) def test_versions(self): checker = fixture.ObjectVersionChecker( obj_base.VersionedObjectRegistry.obj_classes()) fingerprints = checker.get_hashes() if os.getenv('GENERATE_HASHES'): file('object_hashes.txt', 'w').write( pprint.pformat(fingerprints)) expected, actual = checker.test_hashes(object_data) self.assertEqual(expected, actual, 'Some objects have changed; please make sure the ' 'versions have been bumped, and then update their ' 'hashes in the object_data map in this test module.') neutron-8.4.0/neutron/tests/unit/objects/test_common_types.py0000664000567000056710000000466613044372760025771 0ustar jenkinsjenkins00000000000000# Copyright 2016 OpenStack Foundation # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import constants from neutron.objects import common_types from neutron.tests import base as test_base class TestField(object): def test_coerce_good_values(self): for in_val, out_val in self.coerce_good_values: self.assertEqual(out_val, self.field.coerce('obj', 'attr', in_val)) def test_coerce_bad_values(self): for in_val in self.coerce_bad_values: self.assertRaises((TypeError, ValueError), self.field.coerce, 'obj', 'attr', in_val) def test_to_primitive(self): for in_val, prim_val in self.to_primitive_values: self.assertEqual(prim_val, self.field.to_primitive('obj', 'attr', in_val)) def test_from_primitive(self): class ObjectLikeThing(object): _context = 'context' for prim_val, out_val in self.from_primitive_values: self.assertEqual(out_val, self.field.from_primitive( ObjectLikeThing, 'attr', prim_val)) def test_stringify(self): for in_val, out_val in self.coerce_good_values: self.assertEqual("'%s'" % in_val, self.field.stringify(in_val)) def test_stringify_invalid(self): for in_val in self.coerce_bad_values: self.assertRaises(ValueError, self.field.stringify, in_val) class IPV6ModeEnumFieldTest(test_base.BaseTestCase, TestField): def setUp(self): super(IPV6ModeEnumFieldTest, self).setUp() self.field = common_types.IPV6ModeEnumField() self.coerce_good_values = [(mode, mode) for mode in constants.IPV6_MODES] self.coerce_bad_values = ['6', 4, 'type', 'slaacc'] self.to_primitive_values = self.coerce_good_values self.from_primitive_values = self.coerce_good_values neutron-8.4.0/neutron/tests/unit/objects/__init__.py0000664000567000056710000000000013044372736023732 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/objects/test_rbac_db.py0000664000567000056710000003404113044372760024617 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_versionedobjects import base as obj_base from oslo_versionedobjects import fields as obj_fields from six import add_metaclass import sqlalchemy as sa from neutron.callbacks import events from neutron.common import exceptions as n_exc from neutron.db import model_base from neutron.db import rbac_db_models from neutron.extensions import rbac as ext_rbac from neutron.objects import base from neutron.objects.db import api as obj_db_api from neutron.objects import rbac_db from neutron.tests.unit.objects import test_base from neutron.tests.unit import testlib_api class FakeDbModel(object): def __init__(self, *args, **kwargs): pass class FakeRbacModel(rbac_db_models.RBACColumns, model_base.BASEV2): object_id = sa.Column(sa.String(36), nullable=False) object_type = 'fake_rbac_object' def get_valid_actions(self): return (rbac_db_models.ACCESS_SHARED,) @obj_base.VersionedObjectRegistry.register_if(False) @add_metaclass(rbac_db.RbacNeutronMetaclass) class FakeNeutronDbObject(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' rbac_db_model = FakeRbacModel db_model = FakeDbModel fields = { 'id': obj_fields.UUIDField(), 'field1': obj_fields.StringField(), 'field2': obj_fields.StringField(), 'shared': obj_fields.BooleanField(default=False), } fields_no_update = ['id'] synthetic_fields = ['field2'] def get_bound_tenant_ids(cls, context, policy_id): pass class RbacNeutronDbObjectTestCase(test_base.BaseObjectIfaceTestCase, testlib_api.SqlTestCase): _test_class = FakeNeutronDbObject def setUp(self): super(RbacNeutronDbObjectTestCase, self).setUp() FakeNeutronDbObject.update_post = mock.Mock() @mock.patch.object(_test_class, 'rbac_db_model') def test_get_tenants_with_shared_access_to_db_obj_return_tenant_ids( self, *mocks): ctx = mock.Mock() fake_ids = {'tenant_id_' + str(i) for i in range(10)} ctx.session.query.return_value.filter.return_value = [ (fake_id,) for fake_id in fake_ids] ret_ids = self._test_class._get_tenants_with_shared_access_to_db_obj( ctx, 'fake_db_obj_id') self.assertEqual(fake_ids, ret_ids) def test_is_accessible_for_admin(self): ctx = mock.Mock(is_admin=True, tenant_id='we_dont_care') self.assertTrue(self._test_class.is_accessible(ctx, None)) def test_is_accessible_for_db_object_owner(self): ctx = mock.Mock(is_admin=False, tenant_id='db_object_owner') db_obj = mock.Mock(tenant_id=ctx.tenant_id) self.assertTrue(self._test_class.is_accessible(ctx, db_obj)) @mock.patch.object(_test_class, 'is_shared_with_tenant', return_value=True) def test_is_accessible_if_shared_with_tenant(self, mock_is_shared): ctx = mock.Mock(is_admin=False, tenant_id='db_object_shareholder') db_obj = mock.Mock(tenant_id='db_object_owner') self.assertTrue(self._test_class.is_accessible(ctx, db_obj)) mock_is_shared.assert_called_once_with( mock.ANY, db_obj.id, ctx.tenant_id) @mock.patch.object(_test_class, 'is_shared_with_tenant', return_value=False) def test_is_accessible_fails_for_unauthorized_tenant(self, mock_is_shared): ctx = mock.Mock(is_admin=False, tenant_id='Billy_the_kid') db_obj = mock.Mock(tenant_id='db_object_owner') self.assertFalse(self._test_class.is_accessible(ctx, db_obj)) mock_is_shared.assert_called_once_with( mock.ANY, db_obj.id, ctx.tenant_id) def _rbac_policy_generate_change_events(self, resource, trigger, context, object_type, policy, event_list): for event in event_list: self._test_class.validate_rbac_policy_change( resource, event, trigger, context, object_type, policy) @mock.patch.object(_test_class, 'validate_rbac_policy_update') def test_validate_rbac_policy_change_handles_only_object_type( self, mock_validate_rbac_update): self._rbac_policy_generate_change_events( resource=None, trigger='dummy_trigger', context=None, object_type='dummy_object_type', policy=None, event_list=(events.BEFORE_CREATE, events.BEFORE_UPDATE, events.BEFORE_DELETE)) mock_validate_rbac_update.assert_not_called() @mock.patch.object(_test_class, 'validate_rbac_policy_update') @mock.patch.object(_test_class, 'get_object', return_value={'tenant_id': 'tyrion_lannister'}) def test_validate_rbac_policy_change_allowed_for_admin_or_owner( self, mock_get_object, mock_validate_update): context = mock.Mock(is_admin=True, tenant_id='db_obj_owner_id') self._rbac_policy_generate_change_events( resource=None, trigger='dummy_trigger', context=context, object_type=self._test_class.rbac_db_model.object_type, policy={'object_id': 'fake_object_id'}, event_list=(events.BEFORE_CREATE, events.BEFORE_UPDATE)) self.assertTrue(self._test_class.validate_rbac_policy_update.called) @mock.patch.object(_test_class, 'validate_rbac_policy_update') @mock.patch.object(_test_class, 'get_object', return_value={'tenant_id': 'king_beyond_the_wall'}) def test_validate_rbac_policy_change_forbidden_for_outsiders( self, mock_get_object, mock_validate_update): context = mock.Mock(is_admin=False, tenant_id='db_obj_owner_id') self.assertRaises( n_exc.InvalidInput, self._rbac_policy_generate_change_events, resource=mock.Mock(), trigger='dummy_trigger', context=context, object_type=self._test_class.rbac_db_model.object_type, policy={'object_id': 'fake_object_id'}, event_list=(events.BEFORE_CREATE, events.BEFORE_UPDATE)) self.assertFalse(mock_validate_update.called) @mock.patch.object(_test_class, '_validate_rbac_policy_delete') def _test_validate_rbac_policy_delete_handles_policy( self, policy, mock_validate_delete): self._test_class.validate_rbac_policy_delete( resource=mock.Mock(), event=events.BEFORE_DELETE, trigger='dummy_trigger', context=mock.Mock(), object_type=self._test_class.rbac_db_model.object_type, policy=policy) mock_validate_delete.assert_not_called() def test_validate_rbac_policy_delete_handles_shared_action(self): self._test_validate_rbac_policy_delete_handles_policy( {'action': 'unknown_action'}) @mock.patch.object(_test_class, 'get_object') def test_validate_rbac_policy_delete_skips_db_object_owner(self, mock_get_object): policy = {'action': rbac_db_models.ACCESS_SHARED, 'target_tenant': 'fake_tenant_id', 'object_id': 'fake_obj_id', 'tenant_id': 'fake_tenant_id'} mock_get_object.return_value.tenant_id = policy['target_tenant'] self._test_validate_rbac_policy_delete_handles_policy(policy) @mock.patch.object(_test_class, 'get_object') @mock.patch.object(_test_class, 'get_bound_tenant_ids', return_value='tenant_id_shared_with') def test_validate_rbac_policy_delete_fails_single_tenant_and_in_use( self, get_bound_tenant_ids_mock, mock_get_object): policy = {'action': rbac_db_models.ACCESS_SHARED, 'target_tenant': 'tenant_id_shared_with', 'tenant_id': 'object_owner_tenant_id', 'object_id': 'fake_obj_id'} context = mock.Mock() with mock.patch.object( self._test_class, '_get_db_obj_rbac_entries') as target_tenants_mock: filter_mock = target_tenants_mock.return_value.filter filter_mock.return_value.count.return_value = 0 self.assertRaises( ext_rbac.RbacPolicyInUse, self._test_class.validate_rbac_policy_delete, resource=None, event=events.BEFORE_DELETE, trigger='dummy_trigger', context=context, object_type=self._test_class.rbac_db_model.object_type, policy=policy) def test_validate_rbac_policy_delete_not_bound_tenant_success(self): context = mock.Mock() with mock.patch.object( self._test_class, 'get_bound_tenant_ids', return_value={'fake_tid2', 'fake_tid3'}), \ mock.patch.object(self._test_class, '_get_db_obj_rbac_entries') as get_rbac_entries_mock, \ mock.patch.object( self._test_class, '_get_tenants_with_shared_access_to_db_obj') as sh_tids: get_rbac_entries_mock.filter.return_value.count.return_value = 0 self._test_class._validate_rbac_policy_delete( context=context, obj_id='fake_obj_id', target_tenant='fake_tid1') sh_tids.assert_not_called() @mock.patch.object(_test_class, '_get_db_obj_rbac_entries') @mock.patch.object(_test_class, '_get_tenants_with_shared_access_to_db_obj', return_value=['some_other_tenant']) @mock.patch.object(_test_class, 'get_bound_tenant_ids', return_value={'fake_id1'}) def test_validate_rbac_policy_delete_fails_single_used_wildcarded( self, get_bound_tenant_ids_mock, mock_tenants_with_shared_access, _get_db_obj_rbac_entries_mock): policy = {'action': rbac_db_models.ACCESS_SHARED, 'target_tenant': '*', 'tenant_id': 'object_owner_tenant_id', 'object_id': 'fake_obj_id'} context = mock.Mock() with mock.patch.object(self._test_class, 'get_object'): self.assertRaises( ext_rbac.RbacPolicyInUse, self._test_class.validate_rbac_policy_delete, resource=mock.Mock(), event=events.BEFORE_DELETE, trigger='dummy_trigger', context=context, object_type=self._test_class.rbac_db_model.object_type, policy=policy) @mock.patch.object(_test_class, 'attach_rbac') @mock.patch.object(obj_db_api, 'get_object', return_value=['fake_rbac_policy']) @mock.patch.object(_test_class, '_validate_rbac_policy_delete') def test_update_shared_avoid_duplicate_update( self, mock_validate_delete, get_object_mock, attach_rbac_mock): obj_id = 'fake_obj_id' self._test_class(mock.Mock()).update_shared(is_shared_new=True, obj_id=obj_id) get_object_mock.assert_called_with( mock.ANY, self._test_class.rbac_db_model, object_id=obj_id, target_tenant='*', action=rbac_db_models.ACCESS_SHARED) self.assertFalse(mock_validate_delete.called) self.assertFalse(attach_rbac_mock.called) @mock.patch.object(_test_class, 'attach_rbac') @mock.patch.object(obj_db_api, 'get_object', return_value=[]) @mock.patch.object(_test_class, '_validate_rbac_policy_delete') def test_update_shared_wildcard( self, mock_validate_delete, get_object_mock, attach_rbac_mock): obj_id = 'fake_obj_id' test_neutron_obj = self._test_class(mock.Mock()) test_neutron_obj.update_shared(is_shared_new=True, obj_id=obj_id) get_object_mock.assert_called_with( mock.ANY, self._test_class.rbac_db_model, object_id=obj_id, target_tenant='*', action=rbac_db_models.ACCESS_SHARED) attach_rbac_mock.assert_called_with( obj_id, test_neutron_obj._context.tenant_id) @mock.patch.object(_test_class, 'attach_rbac') @mock.patch.object(obj_db_api, 'get_object', return_value=['fake_rbac_policy']) @mock.patch.object(_test_class, '_validate_rbac_policy_delete') def test_update_shared_remove_wildcard_sharing( self, mock_validate_delete, get_object_mock, attach_rbac_mock): obj_id = 'fake_obj_id' self._test_class(mock.Mock()).update_shared(is_shared_new=False, obj_id=obj_id) get_object_mock.assert_called_with( mock.ANY, self._test_class.rbac_db_model, object_id=obj_id, target_tenant='*', action=rbac_db_models.ACCESS_SHARED) self.assertFalse(attach_rbac_mock.attach_rbac.called) mock_validate_delete.assert_called_with(mock.ANY, obj_id, '*') @mock.patch.object(_test_class, 'create_rbac_policy') def test_attach_rbac_returns_type(self, create_rbac_mock): obj_id = 'fake_obj_id' tenant_id = 'fake_tenant_id' target_tenant = 'fake_target_tenant' self._test_class(mock.Mock()).attach_rbac(obj_id, tenant_id, target_tenant) rbac_pol = create_rbac_mock.call_args_list[0][0][1]['rbac_policy'] self.assertEqual(rbac_pol['object_id'], obj_id) self.assertEqual(rbac_pol['target_tenant'], target_tenant) self.assertEqual(rbac_pol['action'], rbac_db_models.ACCESS_SHARED) self.assertEqual(rbac_pol['object_type'], self._test_class.rbac_db_model.object_type) neutron-8.4.0/neutron/tests/unit/objects/qos/0000775000567000056710000000000013044373210022421 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/objects/qos/__init__.py0000664000567000056710000000000013044372736024534 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/objects/qos/test_policy.py0000664000567000056710000002577713044372760025364 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.common import exceptions as n_exc from neutron.objects.db import api as db_api from neutron.objects.qos import policy from neutron.objects.qos import rule from neutron.tests.unit.objects import test_base from neutron.tests.unit import testlib_api class QosPolicyObjectTestCase(test_base.BaseObjectIfaceTestCase): _test_class = policy.QosPolicy def setUp(self): super(QosPolicyObjectTestCase, self).setUp() # qos_policy_ids will be incorrect, but we don't care in this test self.db_qos_bandwidth_rules = [ self.get_random_fields(rule.QosBandwidthLimitRule) for _ in range(3)] self.model_map = { self._test_class.db_model: self.db_objs, self._test_class.rbac_db_model: [], self._test_class.port_binding_model: [], self._test_class.network_binding_model: [], rule.QosBandwidthLimitRule.db_model: self.db_qos_bandwidth_rules} self.get_object = mock.patch.object( db_api, 'get_object', side_effect=self.fake_get_object).start() self.get_objects = mock.patch.object( db_api, 'get_objects', side_effect=self.fake_get_objects).start() def fake_get_objects(self, context, model, **kwargs): return self.model_map[model] def fake_get_object(self, context, model, **kwargs): objects = self.model_map[model] if not objects: return None return [obj for obj in objects if obj['id'] == kwargs['id']][0] def test_get_objects(self): admin_context = self.context.elevated() with mock.patch.object(self.context, 'elevated', return_value=admin_context) as context_mock: objs = self._test_class.get_objects(self.context) context_mock.assert_called_once_with() self.get_objects.assert_any_call( admin_context, self._test_class.db_model) self._validate_objects(self.db_objs, objs) def test_get_objects_valid_fields(self): admin_context = self.context.elevated() with mock.patch.object( db_api, 'get_objects', return_value=[self.db_obj]) as get_objects_mock: with mock.patch.object( self.context, 'elevated', return_value=admin_context) as context_mock: objs = self._test_class.get_objects( self.context, **self.valid_field_filter) context_mock.assert_called_once_with() get_objects_mock.assert_any_call( admin_context, self._test_class.db_model, **self.valid_field_filter) self._validate_objects([self.db_obj], objs) def test_get_object(self): admin_context = self.context.elevated() with mock.patch.object(db_api, 'get_object', return_value=self.db_obj) as get_object_mock: with mock.patch.object(self.context, 'elevated', return_value=admin_context) as context_mock: obj = self._test_class.get_object(self.context, id='fake_id') self.assertTrue(self._is_test_class(obj)) self.assertEqual(self.db_obj, test_base.get_obj_db_fields(obj)) context_mock.assert_called_once_with() get_object_mock.assert_called_once_with( admin_context, self._test_class.db_model, id='fake_id') class QosPolicyDbObjectTestCase(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = policy.QosPolicy def setUp(self): super(QosPolicyDbObjectTestCase, self).setUp() self._create_test_network() self._create_test_port(self._network) def _create_test_policy(self): policy_obj = policy.QosPolicy(self.context, **self.db_obj) policy_obj.create() return policy_obj def _create_test_policy_with_rule(self): policy_obj = self._create_test_policy() rule_fields = self.get_random_fields( obj_cls=rule.QosBandwidthLimitRule) rule_fields['qos_policy_id'] = policy_obj.id rule_obj = rule.QosBandwidthLimitRule(self.context, **rule_fields) rule_obj.create() return policy_obj, rule_obj def test_attach_network_get_network_policy(self): obj = self._create_test_policy() policy_obj = policy.QosPolicy.get_network_policy(self.context, self._network['id']) self.assertIsNone(policy_obj) # Now attach policy and repeat obj.attach_network(self._network['id']) policy_obj = policy.QosPolicy.get_network_policy(self.context, self._network['id']) self.assertEqual(obj, policy_obj) def test_attach_network_nonexistent_network(self): obj = self._create_test_policy() self.assertRaises(n_exc.NetworkQosBindingNotFound, obj.attach_network, 'non-existent-network') def test_attach_port_nonexistent_port(self): obj = self._create_test_policy() self.assertRaises(n_exc.PortQosBindingNotFound, obj.attach_port, 'non-existent-port') def test_attach_network_nonexistent_policy(self): policy_obj = policy.QosPolicy(self.context, **self.db_obj) self.assertRaises(n_exc.NetworkQosBindingNotFound, policy_obj.attach_network, self._network['id']) def test_attach_port_nonexistent_policy(self): policy_obj = policy.QosPolicy(self.context, **self.db_obj) self.assertRaises(n_exc.PortQosBindingNotFound, policy_obj.attach_port, self._port['id']) def test_attach_port_get_port_policy(self): obj = self._create_test_policy() policy_obj = policy.QosPolicy.get_network_policy(self.context, self._network['id']) self.assertIsNone(policy_obj) # Now attach policy and repeat obj.attach_port(self._port['id']) policy_obj = policy.QosPolicy.get_port_policy(self.context, self._port['id']) self.assertEqual(obj, policy_obj) def test_detach_port(self): obj = self._create_test_policy() obj.attach_port(self._port['id']) obj.detach_port(self._port['id']) policy_obj = policy.QosPolicy.get_port_policy(self.context, self._port['id']) self.assertIsNone(policy_obj) def test_detach_network(self): obj = self._create_test_policy() obj.attach_network(self._network['id']) obj.detach_network(self._network['id']) policy_obj = policy.QosPolicy.get_network_policy(self.context, self._network['id']) self.assertIsNone(policy_obj) def test_detach_port_nonexistent_port(self): obj = self._create_test_policy() self.assertRaises(n_exc.PortQosBindingNotFound, obj.detach_port, 'non-existent-port') def test_detach_network_nonexistent_network(self): obj = self._create_test_policy() self.assertRaises(n_exc.NetworkQosBindingNotFound, obj.detach_network, 'non-existent-port') def test_detach_port_nonexistent_policy(self): policy_obj = policy.QosPolicy(self.context, **self.db_obj) self.assertRaises(n_exc.PortQosBindingNotFound, policy_obj.detach_port, self._port['id']) def test_detach_network_nonexistent_policy(self): policy_obj = policy.QosPolicy(self.context, **self.db_obj) self.assertRaises(n_exc.NetworkQosBindingNotFound, policy_obj.detach_network, self._network['id']) def test_synthetic_rule_fields(self): policy_obj, rule_obj = self._create_test_policy_with_rule() policy_obj = policy.QosPolicy.get_object(self.context, id=policy_obj.id) self.assertEqual([rule_obj], policy_obj.rules) def test_get_object_fetches_rules_non_lazily(self): policy_obj, rule_obj = self._create_test_policy_with_rule() policy_obj = policy.QosPolicy.get_object(self.context, id=policy_obj.id) primitive = policy_obj.obj_to_primitive() self.assertNotEqual([], (primitive['versioned_object.data']['rules'])) def test_to_dict_returns_rules_as_dicts(self): policy_obj, rule_obj = self._create_test_policy_with_rule() policy_obj = policy.QosPolicy.get_object(self.context, id=policy_obj.id) obj_dict = policy_obj.to_dict() rule_dict = rule_obj.to_dict() # first make sure that to_dict() is still sane and does not return # objects for obj in (rule_dict, obj_dict): self.assertIsInstance(obj, dict) self.assertEqual(rule_dict, obj_dict['rules'][0]) def test_shared_default(self): obj = self._test_class(self.context, **self.db_obj) self.assertFalse(obj.shared) def test_delete_not_allowed_if_policy_in_use_by_port(self): obj = self._create_test_policy() obj.attach_port(self._port['id']) self.assertRaises(n_exc.QosPolicyInUse, obj.delete) obj.detach_port(self._port['id']) obj.delete() def test_delete_not_allowed_if_policy_in_use_by_network(self): obj = self._create_test_policy() obj.attach_network(self._network['id']) self.assertRaises(n_exc.QosPolicyInUse, obj.delete) obj.detach_network(self._network['id']) obj.delete() def test_reload_rules_reloads_rules(self): policy_obj, rule_obj = self._create_test_policy_with_rule() self.assertEqual([], policy_obj.rules) policy_obj.reload_rules() self.assertEqual([rule_obj], policy_obj.rules) def test_get_bound_tenant_ids_returns_set_of_tenant_ids(self): obj = self._create_test_policy() obj.attach_port(self._port['id']) ids = self._test_class.get_bound_tenant_ids(self.context, obj['id']) self.assertEqual(ids.pop(), self._port['tenant_id']) self.assertEqual(len(ids), 0) obj.detach_port(self._port['id']) obj.delete() neutron-8.4.0/neutron/tests/unit/objects/qos/test_rule.py0000664000567000056710000000663313044372760025022 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import constants from neutron.objects.qos import policy from neutron.objects.qos import rule from neutron.services.qos import qos_consts from neutron.tests import base as neutron_test_base from neutron.tests.unit.objects import test_base from neutron.tests.unit import testlib_api POLICY_ID_A = 'policy-id-a' POLICY_ID_B = 'policy-id-b' DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' class QosRuleObjectTestCase(neutron_test_base.BaseTestCase): def _test_should_apply_to_port(self, rule_policy_id, port_policy_id, device_owner, expected_result): test_rule = rule.QosRule(qos_policy_id=rule_policy_id) port = {qos_consts.QOS_POLICY_ID: port_policy_id, 'device_owner': device_owner} self.assertEqual(expected_result, test_rule.should_apply_to_port(port)) def test_should_apply_to_port_with_network_port_and_net_policy(self): self._test_should_apply_to_port( rule_policy_id=POLICY_ID_B, port_policy_id=POLICY_ID_A, device_owner=constants.DEVICE_OWNER_ROUTER_INTF, expected_result=False) def test_should_apply_to_port_with_network_port_and_port_policy(self): self._test_should_apply_to_port( rule_policy_id=POLICY_ID_A, port_policy_id=POLICY_ID_A, device_owner=constants.DEVICE_OWNER_ROUTER_INTF, expected_result=True) def test_should_apply_to_port_with_compute_port_and_net_policy(self): self._test_should_apply_to_port( rule_policy_id=POLICY_ID_B, port_policy_id=POLICY_ID_A, device_owner=DEVICE_OWNER_COMPUTE, expected_result=True) def test_should_apply_to_port_with_compute_port_and_port_policy(self): self._test_should_apply_to_port( rule_policy_id=POLICY_ID_A, port_policy_id=POLICY_ID_A, device_owner=DEVICE_OWNER_COMPUTE, expected_result=True) class QosBandwidthLimitRuleObjectTestCase(test_base.BaseObjectIfaceTestCase): _test_class = rule.QosBandwidthLimitRule def test_to_dict_returns_type(self): obj = rule.QosBandwidthLimitRule(self.context, **self.db_obj) dict_ = obj.to_dict() self.assertEqual(qos_consts.RULE_TYPE_BANDWIDTH_LIMIT, dict_['type']) class QosBandwidthLimitRuleDbObjectTestCase(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = rule.QosBandwidthLimitRule def setUp(self): super(QosBandwidthLimitRuleDbObjectTestCase, self).setUp() # Prepare policy to be able to insert a rule generated_qos_policy_id = self.db_obj['qos_policy_id'] policy_obj = policy.QosPolicy(self.context, id=generated_qos_policy_id) policy_obj.create() neutron-8.4.0/neutron/tests/unit/objects/qos/test_rule_type.py0000664000567000056710000000343613044372760026061 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # rule types are so different from other objects that we don't base the test # class on the common base class for all objects import mock from neutron import manager from neutron.objects.qos import rule_type from neutron.services.qos import qos_consts from neutron.tests import base as test_base DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' class QosRuleTypeObjectTestCase(test_base.BaseTestCase): def setUp(self): self.config_parse() self.setup_coreplugin(DB_PLUGIN_KLASS) super(QosRuleTypeObjectTestCase, self).setUp() def test_get_objects(self): core_plugin = manager.NeutronManager.get_plugin() rule_types_mock = mock.PropertyMock( return_value=qos_consts.VALID_RULE_TYPES) with mock.patch.object(core_plugin, 'supported_qos_rule_types', new_callable=rule_types_mock, create=True): types = rule_type.QosRuleType.get_objects() self.assertEqual(sorted(qos_consts.VALID_RULE_TYPES), sorted(type_['type'] for type_ in types)) def test_wrong_type(self): self.assertRaises(ValueError, rule_type.QosRuleType, type='bad_type') neutron-8.4.0/neutron/tests/unit/test_context.py0000664000567000056710000001432213044372760023276 0ustar jenkinsjenkins00000000000000# Copyright 2012 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_context import context as oslo_context from testtools import matchers from neutron import context from neutron.tests import base class TestNeutronContext(base.BaseTestCase): def setUp(self): super(TestNeutronContext, self).setUp() db_api = 'neutron.db.api.get_session' self._db_api_session_patcher = mock.patch(db_api) self.db_api_session = self._db_api_session_patcher.start() def test_neutron_context_create(self): ctx = context.Context('user_id', 'tenant_id') self.assertEqual('user_id', ctx.user_id) self.assertEqual('tenant_id', ctx.project_id) self.assertEqual('tenant_id', ctx.tenant_id) request_id = ctx.request_id if isinstance(request_id, bytes): request_id = request_id.decode('utf-8') self.assertThat(request_id, matchers.StartsWith('req-')) self.assertEqual('user_id', ctx.user) self.assertEqual('tenant_id', ctx.tenant) self.assertIsNone(ctx.user_name) self.assertIsNone(ctx.tenant_name) self.assertIsNone(ctx.auth_token) def test_neutron_context_create_with_name(self): ctx = context.Context('user_id', 'tenant_id', tenant_name='tenant_name', user_name='user_name') # Check name is set self.assertEqual('user_name', ctx.user_name) self.assertEqual('tenant_name', ctx.tenant_name) # Check user/tenant contains its ID even if user/tenant_name is passed self.assertEqual('user_id', ctx.user) self.assertEqual('tenant_id', ctx.tenant) def test_neutron_context_create_with_request_id(self): ctx = context.Context('user_id', 'tenant_id', request_id='req_id_xxx') self.assertEqual('req_id_xxx', ctx.request_id) def test_neutron_context_create_with_auth_token(self): ctx = context.Context('user_id', 'tenant_id', auth_token='auth_token_xxx') self.assertEqual('auth_token_xxx', ctx.auth_token) def test_neutron_context_to_dict(self): ctx = context.Context('user_id', 'tenant_id') ctx_dict = ctx.to_dict() self.assertEqual('user_id', ctx_dict['user_id']) self.assertEqual('tenant_id', ctx_dict['project_id']) self.assertEqual(ctx.request_id, ctx_dict['request_id']) self.assertEqual('user_id', ctx_dict['user']) self.assertEqual('tenant_id', ctx_dict['tenant']) self.assertIsNone(ctx_dict['user_name']) self.assertIsNone(ctx_dict['tenant_name']) self.assertIsNone(ctx_dict['project_name']) self.assertIsNone(ctx_dict['auth_token']) def test_neutron_context_to_dict_with_name(self): ctx = context.Context('user_id', 'tenant_id', tenant_name='tenant_name', user_name='user_name') ctx_dict = ctx.to_dict() self.assertEqual('user_name', ctx_dict['user_name']) self.assertEqual('tenant_name', ctx_dict['tenant_name']) self.assertEqual('tenant_name', ctx_dict['project_name']) def test_neutron_context_to_dict_with_auth_token(self): ctx = context.Context('user_id', 'tenant_id', auth_token='auth_token_xxx') ctx_dict = ctx.to_dict() self.assertEqual('auth_token_xxx', ctx_dict['auth_token']) def test_neutron_context_admin_to_dict(self): self.db_api_session.return_value = 'fakesession' ctx = context.get_admin_context() ctx_dict = ctx.to_dict() self.assertIsNone(ctx_dict['user_id']) self.assertIsNone(ctx_dict['tenant_id']) self.assertIsNone(ctx_dict['auth_token']) self.assertIsNotNone(ctx.session) self.assertNotIn('session', ctx_dict) def test_neutron_context_admin_without_session_to_dict(self): ctx = context.get_admin_context_without_session() ctx_dict = ctx.to_dict() self.assertIsNone(ctx_dict['user_id']) self.assertIsNone(ctx_dict['tenant_id']) self.assertIsNone(ctx_dict['auth_token']) self.assertFalse(hasattr(ctx, 'session')) def test_neutron_context_elevated_retains_request_id(self): ctx = context.Context('user_id', 'tenant_id') self.assertFalse(ctx.is_admin) req_id_before = ctx.request_id elevated_ctx = ctx.elevated() self.assertTrue(elevated_ctx.is_admin) self.assertEqual(req_id_before, elevated_ctx.request_id) def test_neutron_context_overwrite(self): ctx1 = context.Context('user_id', 'tenant_id') self.assertEqual(ctx1.request_id, oslo_context.get_current().request_id) # If overwrite is not specified, request_id should be updated. ctx2 = context.Context('user_id', 'tenant_id') self.assertNotEqual(ctx2.request_id, ctx1.request_id) self.assertEqual(ctx2.request_id, oslo_context.get_current().request_id) # If overwrite is specified, request_id should be kept. ctx3 = context.Context('user_id', 'tenant_id', overwrite=False) self.assertNotEqual(ctx3.request_id, ctx2.request_id) self.assertEqual(ctx2.request_id, oslo_context.get_current().request_id) def test_neutron_context_get_admin_context_not_update_local_store(self): ctx = context.Context('user_id', 'tenant_id') req_id_before = oslo_context.get_current().request_id self.assertEqual(ctx.request_id, req_id_before) ctx_admin = context.get_admin_context() self.assertEqual(req_id_before, oslo_context.get_current().request_id) self.assertNotEqual(req_id_before, ctx_admin.request_id) neutron-8.4.0/neutron/tests/unit/test_wsgi.py0000664000567000056710000006246213044372760022573 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import socket import ssl import mock from oslo_config import cfg import six.moves.urllib.request as urlrequest import testtools import webob import webob.exc from neutron.common import exceptions as exception from neutron.db import api from neutron.tests import base from neutron.tests.common import helpers from neutron import wsgi CONF = cfg.CONF TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'var')) def open_no_proxy(*args, **kwargs): # NOTE(jamespage): # Deal with more secure certification chain verification # introduced in python 2.7.9 under PEP-0476 # https://github.com/python/peps/blob/master/pep-0476.txt if hasattr(ssl, "_create_unverified_context"): opener = urlrequest.build_opener( urlrequest.ProxyHandler({}), urlrequest.HTTPSHandler(context=ssl._create_unverified_context()) ) else: opener = urlrequest.build_opener(urlrequest.ProxyHandler({})) return opener.open(*args, **kwargs) class TestServiceBase(base.BaseTestCase): """Service tests base.""" @mock.patch("neutron.policy.refresh") @mock.patch("neutron.common.config.setup_logging") def _test_reset(self, worker_service, setup_logging_mock, refresh_mock): worker_service.reset() setup_logging_mock.assert_called_once_with() refresh_mock.assert_called_once_with() class TestWorkerService(TestServiceBase): """WorkerService tests.""" @mock.patch('neutron.db.api.get_engine') def test_start_withoutdb_call(self, apimock): # clear engine from other tests api._FACADE = None _service = mock.Mock() _service.pool.spawn.return_value = None _app = mock.Mock() workerservice = wsgi.WorkerService(_service, _app) workerservice.start() self.assertFalse(apimock.called) def test_reset(self): _service = mock.Mock() _app = mock.Mock() worker_service = wsgi.WorkerService(_service, _app) self._test_reset(worker_service) class TestWSGIServer(base.BaseTestCase): """WSGI server tests.""" def test_start_random_port(self): server = wsgi.Server("test_random_port") server.start(None, 0, host="127.0.0.1") self.assertNotEqual(0, server.port) server.stop() server.wait() @mock.patch('oslo_service.service.ProcessLauncher') def test_start_multiple_workers(self, ProcessLauncher): launcher = ProcessLauncher.return_value server = wsgi.Server("test_multiple_processes") server.start(None, 0, host="127.0.0.1", workers=2) launcher.launch_service.assert_called_once_with(mock.ANY, workers=2) server.stop() launcher.stop.assert_called_once_with() server.wait() launcher.wait.assert_called_once_with() def test_start_random_port_with_ipv6(self): server = wsgi.Server("test_random_port") server.start(None, 0, host="::1") self.assertEqual("::1", server.host) self.assertNotEqual(0, server.port) server.stop() server.wait() def test_ipv6_listen_called_with_scope(self): server = wsgi.Server("test_app") with mock.patch.object(wsgi.eventlet, 'listen') as mock_listen: with mock.patch.object(socket, 'getaddrinfo') as mock_get_addr: mock_get_addr.return_value = [ (socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP, '', ('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2)) ] with mock.patch.object(server, 'pool') as mock_pool: server.start(None, 1234, host="fe80::204:acff:fe96:da87%eth0") mock_get_addr.assert_called_once_with( "fe80::204:acff:fe96:da87%eth0", 1234, socket.AF_UNSPEC, socket.SOCK_STREAM ) mock_listen.assert_called_once_with( ('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2), family=socket.AF_INET6, backlog=cfg.CONF.backlog ) mock_pool.spawn.assert_has_calls([ mock.call( server._run, None, mock_listen.return_value.dup.return_value) ]) def test_app(self): greetings = 'Hello, World!!!' def hello_world(env, start_response): if env['PATH_INFO'] != '/': start_response('404 Not Found', [('Content-Type', 'text/plain')]) return ['Not Found\r\n'] start_response('200 OK', [('Content-Type', 'text/plain')]) return [greetings] server = wsgi.Server("test_app") server.start(hello_world, 0, host="127.0.0.1") response = open_no_proxy('http://127.0.0.1:%d/' % server.port) self.assertEqual(greetings.encode('utf-8'), response.read()) server.stop() def test_disable_ssl(self): CONF.set_default('use_ssl', True) greetings = 'Hello, World!!!' def hello_world(env, start_response): if env['PATH_INFO'] != '/': start_response('404 Not Found', [('Content-Type', 'text/plain')]) return ['Not Found\r\n'] start_response('200 OK', [('Content-Type', 'text/plain')]) return [greetings] server = wsgi.Server("test_app", disable_ssl=True) server.start(hello_world, 0, host="127.0.0.1") response = open_no_proxy('http://127.0.0.1:%d/' % server.port) self.assertEqual(greetings.encode('utf-8'), response.read()) server.stop() @mock.patch.object(wsgi, 'eventlet') def test__run(self, eventlet_mock): server = wsgi.Server('test') server._run("app", "socket") eventlet_mock.wsgi.server.assert_called_once_with( 'socket', 'app', max_size=server.num_threads, log=mock.ANY, keepalive=CONF.wsgi_keep_alive, socket_timeout=server.client_socket_timeout ) class SerializerTest(base.BaseTestCase): def test_serialize_unknown_content_type(self): """Verify that exception InvalidContentType is raised.""" input_dict = {'servers': {'test': 'pass'}} content_type = 'application/unknown' serializer = wsgi.Serializer() self.assertRaises( exception.InvalidContentType, serializer.serialize, input_dict, content_type) def test_get_deserialize_handler_unknown_content_type(self): """Verify that exception InvalidContentType is raised.""" content_type = 'application/unknown' serializer = wsgi.Serializer() self.assertRaises( exception.InvalidContentType, serializer.get_deserialize_handler, content_type) def test_serialize_content_type_json(self): """Test serialize with content type json.""" input_data = {'servers': ['test=pass']} content_type = 'application/json' serializer = wsgi.Serializer() result = serializer.serialize(input_data, content_type) self.assertEqual(b'{"servers": ["test=pass"]}', result) def test_deserialize_raise_bad_request(self): """Test serialize verifies that exception is raises.""" content_type = 'application/unknown' data_string = 'test' serializer = wsgi.Serializer() self.assertRaises( webob.exc.HTTPBadRequest, serializer.deserialize, data_string, content_type) def test_deserialize_json_content_type(self): """Test Serializer.deserialize with content type json.""" content_type = 'application/json' data_string = '{"servers": ["test=pass"]}' serializer = wsgi.Serializer() result = serializer.deserialize(data_string, content_type) self.assertEqual({'body': {u'servers': [u'test=pass']}}, result) class RequestDeserializerTest(testtools.TestCase): def setUp(self): super(RequestDeserializerTest, self).setUp() class JSONDeserializer(object): def deserialize(self, data, action='default'): return 'pew_json' self.body_deserializers = {'application/json': JSONDeserializer()} self.deserializer = wsgi.RequestDeserializer(self.body_deserializers) def test_get_deserializer(self): """Test RequestDeserializer.get_body_deserializer.""" expected_json_serializer = self.deserializer.get_body_deserializer( 'application/json') self.assertEqual( expected_json_serializer, self.body_deserializers['application/json']) def test_get_expected_content_type(self): """Test RequestDeserializer.get_expected_content_type.""" request = wsgi.Request.blank('/') request.headers['Accept'] = 'application/json' self.assertEqual('application/json', self.deserializer.get_expected_content_type(request)) def test_get_action_args(self): """Test RequestDeserializer.get_action_args.""" env = { 'wsgiorg.routing_args': [None, { 'controller': None, 'format': None, 'action': 'update', 'id': 12}]} expected = {'action': 'update', 'id': 12} self.assertEqual(expected, self.deserializer.get_action_args(env)) def test_deserialize(self): """Test RequestDeserializer.deserialize.""" with mock.patch.object( self.deserializer, 'get_action_args') as mock_method: mock_method.return_value = {'action': 'create'} request = wsgi.Request.blank('/') request.headers['Accept'] = 'application/json' deserialized = self.deserializer.deserialize(request) expected = ('create', {}, 'application/json') self.assertEqual(expected, deserialized) def test_get_body_deserializer_unknown_content_type(self): """Verify that exception InvalidContentType is raised.""" content_type = 'application/unknown' deserializer = wsgi.RequestDeserializer() self.assertRaises( exception.InvalidContentType, deserializer.get_body_deserializer, content_type) class ResponseSerializerTest(testtools.TestCase): def setUp(self): super(ResponseSerializerTest, self).setUp() class JSONSerializer(object): def serialize(self, data, action='default'): return b'pew_json' class HeadersSerializer(object): def serialize(self, response, data, action): response.status_int = 404 self.body_serializers = {'application/json': JSONSerializer()} self.serializer = wsgi.ResponseSerializer( self.body_serializers, HeadersSerializer()) def test_serialize_unknown_content_type(self): """Verify that exception InvalidContentType is raised.""" self.assertRaises( exception.InvalidContentType, self.serializer.serialize, {}, 'application/unknown') def test_get_body_serializer(self): """Verify that exception InvalidContentType is raised.""" self.assertRaises( exception.InvalidContentType, self.serializer.get_body_serializer, 'application/unknown') def test_get_serializer(self): """Test ResponseSerializer.get_body_serializer.""" content_type = 'application/json' self.assertEqual(self.body_serializers[content_type], self.serializer.get_body_serializer(content_type)) def test_serialize_json_response(self): response = self.serializer.serialize({}, 'application/json') self.assertEqual('application/json', response.headers['Content-Type']) self.assertEqual(b'pew_json', response.body) self.assertEqual(404, response.status_int) def test_serialize_response_None(self): response = self.serializer.serialize( None, 'application/json') self.assertEqual('application/json', response.headers['Content-Type']) self.assertEqual(b'', response.body) self.assertEqual(404, response.status_int) class RequestTest(base.BaseTestCase): def test_content_type_missing(self): request = wsgi.Request.blank('/tests/123', method='POST') request.body = b"" self.assertIsNone(request.get_content_type()) def test_content_type_unsupported(self): request = wsgi.Request.blank('/tests/123', method='POST') request.headers["Content-Type"] = "text/html" request.body = b"fake
" self.assertIsNone(request.get_content_type()) def test_content_type_with_charset(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Type"] = "application/json; charset=UTF-8" result = request.get_content_type() self.assertEqual("application/json", result) def test_content_type_with_given_content_types(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Type"] = "application/new-type;" self.assertIsNone(request.get_content_type()) def test_content_type_from_accept(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/json" result = request.best_match_content_type() self.assertEqual("application/json", result) request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = ("application/json; q=0.3") result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_from_query_extension(self): request = wsgi.Request.blank('/tests/123.json') result = request.best_match_content_type() self.assertEqual("application/json", result) request = wsgi.Request.blank('/tests/123.invalid') result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_accept_and_query_extension(self): request = wsgi.Request.blank('/tests/123.json') request.headers["Accept"] = "application/json" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_accept_default(self): request = wsgi.Request.blank('/tests/123.unsupported') request.headers["Accept"] = "application/unsupported1" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_accept_with_given_content_types(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/new_type" result = request.best_match_content_type() self.assertEqual("application/json", result) class ActionDispatcherTest(base.BaseTestCase): def test_dispatch(self): """Test ActionDispatcher.dispatch.""" serializer = wsgi.ActionDispatcher() serializer.create = lambda x: x self.assertEqual('pants', serializer.dispatch('pants', action='create')) def test_dispatch_action_None(self): """Test ActionDispatcher.dispatch with none action.""" serializer = wsgi.ActionDispatcher() serializer.create = lambda x: x + ' pants' serializer.default = lambda x: x + ' trousers' self.assertEqual('Two trousers', serializer.dispatch('Two', action=None)) def test_dispatch_default(self): serializer = wsgi.ActionDispatcher() serializer.create = lambda x: x + ' pants' serializer.default = lambda x: x + ' trousers' self.assertEqual('Two trousers', serializer.dispatch('Two', action='update')) class ResponseHeadersSerializerTest(base.BaseTestCase): def test_default(self): serializer = wsgi.ResponseHeaderSerializer() response = webob.Response() serializer.serialize(response, {'v': '123'}, 'fake') self.assertEqual(200, response.status_int) def test_custom(self): class Serializer(wsgi.ResponseHeaderSerializer): def update(self, response, data): response.status_int = 404 response.headers['X-Custom-Header'] = data['v'] serializer = Serializer() response = webob.Response() serializer.serialize(response, {'v': '123'}, 'update') self.assertEqual(404, response.status_int) self.assertEqual('123', response.headers['X-Custom-Header']) class DictSerializerTest(base.BaseTestCase): def test_dispatch_default(self): serializer = wsgi.DictSerializer() self.assertEqual('', serializer.serialize({}, 'NonExistentAction')) class JSONDictSerializerTest(base.BaseTestCase): def test_json(self): input_dict = dict(servers=dict(a=(2, 3))) expected_json = b'{"servers":{"a":[2,3]}}' serializer = wsgi.JSONDictSerializer() result = serializer.serialize(input_dict) result = result.replace(b'\n', b'').replace(b' ', b'') self.assertEqual(expected_json, result) # The tested behaviour is only meant to be witnessed in Python 2, so it is # OK to skip this test with Python 3. @helpers.requires_py2 def test_json_with_utf8(self): input_dict = dict(servers=dict(a=(2, '\xe7\xbd\x91\xe7\xbb\x9c'))) expected_json = b'{"servers":{"a":[2,"\\u7f51\\u7edc"]}}' serializer = wsgi.JSONDictSerializer() result = serializer.serialize(input_dict) result = result.replace(b'\n', b'').replace(b' ', b'') self.assertEqual(expected_json, result) def test_json_with_unicode(self): input_dict = dict(servers=dict(a=(2, u'\u7f51\u7edc'))) expected_json = b'{"servers":{"a":[2,"\\u7f51\\u7edc"]}}' serializer = wsgi.JSONDictSerializer() result = serializer.serialize(input_dict) result = result.replace(b'\n', b'').replace(b' ', b'') self.assertEqual(expected_json, result) class TextDeserializerTest(base.BaseTestCase): def test_dispatch_default(self): deserializer = wsgi.TextDeserializer() self.assertEqual({}, deserializer.deserialize({}, 'update')) class JSONDeserializerTest(base.BaseTestCase): def test_json(self): data = """{"a": { "a1": "1", "a2": "2", "bs": ["1", "2", "3", {"c": {"c1": "1"}}], "d": {"e": "1"}, "f": "1"}}""" as_dict = { 'body': { 'a': { 'a1': '1', 'a2': '2', 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], 'd': {'e': '1'}, 'f': '1'}}} deserializer = wsgi.JSONDeserializer() self.assertEqual(as_dict, deserializer.deserialize(data)) def test_default_raise_Malformed_Exception(self): """Test JsonDeserializer.default. Test verifies JsonDeserializer.default raises exception MalformedRequestBody correctly. """ data_string = "" deserializer = wsgi.JSONDeserializer() self.assertRaises( exception.MalformedRequestBody, deserializer.default, data_string) def test_json_with_utf8(self): data = b'{"a": "\xe7\xbd\x91\xe7\xbb\x9c"}' as_dict = {'body': {'a': u'\u7f51\u7edc'}} deserializer = wsgi.JSONDeserializer() self.assertEqual(as_dict, deserializer.deserialize(data)) def test_json_with_unicode(self): data = b'{"a": "\u7f51\u7edc"}' as_dict = {'body': {'a': u'\u7f51\u7edc'}} deserializer = wsgi.JSONDeserializer() self.assertEqual(as_dict, deserializer.deserialize(data)) class RequestHeadersDeserializerTest(base.BaseTestCase): def test_default(self): deserializer = wsgi.RequestHeadersDeserializer() req = wsgi.Request.blank('/') self.assertEqual({}, deserializer.deserialize(req, 'nonExistent')) def test_custom(self): class Deserializer(wsgi.RequestHeadersDeserializer): def update(self, request): return {'a': request.headers['X-Custom-Header']} deserializer = Deserializer() req = wsgi.Request.blank('/') req.headers['X-Custom-Header'] = 'b' self.assertEqual({'a': 'b'}, deserializer.deserialize(req, 'update')) class ResourceTest(base.BaseTestCase): @staticmethod def my_fault_body_function(): return 'off' class Controller(object): def index(self, request, index=None): return index def test_dispatch(self): resource = wsgi.Resource(self.Controller(), self.my_fault_body_function) actual = resource.dispatch( resource.controller, 'index', action_args={'index': 'off'}) expected = 'off' self.assertEqual(expected, actual) def test_dispatch_unknown_controller_action(self): resource = wsgi.Resource(self.Controller(), self.my_fault_body_function) self.assertRaises( AttributeError, resource.dispatch, resource.controller, 'create', {}) def test_malformed_request_body_throws_bad_request(self): resource = wsgi.Resource(None, self.my_fault_body_function) request = wsgi.Request.blank( "/", body=b"{mal:formed", method='POST', headers={'Content-Type': "application/json"}) response = resource(request) self.assertEqual(400, response.status_int) def test_wrong_content_type_throws_unsupported_media_type_error(self): resource = wsgi.Resource(None, self.my_fault_body_function) request = wsgi.Request.blank( "/", body=b"{some:json}", method='POST', headers={'Content-Type': "xxx"}) response = resource(request) self.assertEqual(400, response.status_int) def test_wrong_content_type_server_error(self): resource = wsgi.Resource(None, self.my_fault_body_function) request = wsgi.Request.blank( "/", method='POST', headers={'Content-Type': "unknow"}) response = resource(request) self.assertEqual(500, response.status_int) def test_call_resource_class_bad_request(self): class FakeRequest(object): def __init__(self): self.url = 'http://where.no' self.environ = 'environ' self.body = 'body' def method(self): pass def best_match_content_type(self): return 'best_match_content_type' resource = wsgi.Resource(self.Controller(), self.my_fault_body_function) request = FakeRequest() result = resource(request) self.assertEqual(400, result.status_int) def test_type_error(self): resource = wsgi.Resource(self.Controller(), self.my_fault_body_function) request = wsgi.Request.blank( "/", method='POST', headers={'Content-Type': "json"}) response = resource.dispatch( request, action='index', action_args='test') self.assertEqual(400, response.status_int) def test_call_resource_class_internal_error(self): class FakeRequest(object): def __init__(self): self.url = 'http://where.no' self.environ = 'environ' self.body = '{"Content-Type": "json"}' def method(self): pass def best_match_content_type(self): return 'application/json' resource = wsgi.Resource(self.Controller(), self.my_fault_body_function) request = FakeRequest() result = resource(request) self.assertEqual(500, result.status_int) class FaultTest(base.BaseTestCase): def test_call_fault(self): class MyException(object): status_int = 415 explanation = 'test' my_exceptions = MyException() my_fault = wsgi.Fault(exception=my_exceptions) request = wsgi.Request.blank( "/", method='POST', headers={'Content-Type': "unknow"}) response = my_fault(request) self.assertEqual(415, response.status_int) neutron-8.4.0/neutron/tests/unit/ipam/0000775000567000056710000000000013044373210021114 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/ipam/__init__.py0000664000567000056710000000000013044372736023227 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/ipam/test_subnet_alloc.py0000664000567000056710000002405213044372760025213 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netaddr from oslo_config import cfg from oslo_db import exception as db_exc from oslo_utils import uuidutils from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import exceptions as n_exc from neutron import context from neutron.ipam import requests as ipam_req from neutron.ipam import subnet_alloc from neutron import manager from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit import testlib_api class TestSubnetAllocation(testlib_api.SqlTestCase): def setUp(self): super(TestSubnetAllocation, self).setUp() self._tenant_id = 'test-tenant' self.setup_coreplugin(test_db_base_plugin_v2.DB_PLUGIN_KLASS) self.plugin = manager.NeutronManager.get_plugin() self.ctx = context.get_admin_context() cfg.CONF.set_override('allow_overlapping_ips', True) def _create_subnet_pool(self, plugin, ctx, name, prefix_list, min_prefixlen, ip_version, max_prefixlen=attributes.ATTR_NOT_SPECIFIED, default_prefixlen=attributes.ATTR_NOT_SPECIFIED, default_quota=attributes.ATTR_NOT_SPECIFIED, shared=False, is_default=False): subnetpool = {'subnetpool': {'name': name, 'tenant_id': self._tenant_id, 'prefixes': prefix_list, 'min_prefixlen': min_prefixlen, 'max_prefixlen': max_prefixlen, 'default_prefixlen': default_prefixlen, 'shared': shared, 'is_default': is_default, 'default_quota': default_quota}} return plugin.create_subnetpool(ctx, subnetpool) def _get_subnetpool(self, ctx, plugin, id): return plugin.get_subnetpool(ctx, id) def test_allocate_any_subnet(self): prefix_list = ['10.1.0.0/16', '192.168.1.0/24'] sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', prefix_list, 21, 4) sp = self.plugin._get_subnetpool(self.ctx, sp['id']) with self.ctx.session.begin(subtransactions=True): sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam_req.AnySubnetRequest(self._tenant_id, uuidutils.generate_uuid(), constants.IPv4, 21) res = sa.allocate_subnet(req) detail = res.get_details() prefix_set = netaddr.IPSet(iterable=prefix_list) allocated_set = netaddr.IPSet(iterable=[detail.subnet_cidr]) self.assertTrue(allocated_set.issubset(prefix_set)) self.assertEqual(21, detail.prefixlen) def test_allocate_specific_subnet(self): sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', ['10.1.0.0/16', '192.168.1.0/24'], 21, 4) with self.ctx.session.begin(subtransactions=True): sp = self.plugin._get_subnetpool(self.ctx, sp['id']) sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam_req.SpecificSubnetRequest(self._tenant_id, uuidutils.generate_uuid(), '10.1.2.0/24') res = sa.allocate_subnet(req) detail = res.get_details() sp = self._get_subnetpool(self.ctx, self.plugin, sp['id']) self.assertEqual('10.1.2.0/24', str(detail.subnet_cidr)) self.assertEqual(24, detail.prefixlen) def test_insufficient_prefix_space_for_any_allocation(self): sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', ['10.1.1.0/24', '192.168.1.0/24'], 21, 4) sp = self.plugin._get_subnetpool(self.ctx, sp['id']) sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam_req.AnySubnetRequest(self._tenant_id, uuidutils.generate_uuid(), constants.IPv4, 21) self.assertRaises(n_exc.SubnetAllocationError, sa.allocate_subnet, req) def test_insufficient_prefix_space_for_specific_allocation(self): sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', ['10.1.0.0/24'], 21, 4) sp = self.plugin._get_subnetpool(self.ctx, sp['id']) sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam_req.SpecificSubnetRequest(self._tenant_id, uuidutils.generate_uuid(), '10.1.0.0/21') self.assertRaises(n_exc.SubnetAllocationError, sa.allocate_subnet, req) def test_allocate_any_subnet_gateway(self): sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', ['10.1.0.0/16', '192.168.1.0/24'], 21, 4) sp = self.plugin._get_subnetpool(self.ctx, sp['id']) with self.ctx.session.begin(subtransactions=True): sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam_req.AnySubnetRequest(self._tenant_id, uuidutils.generate_uuid(), constants.IPv4, 21) res = sa.allocate_subnet(req) detail = res.get_details() self.assertEqual(detail.gateway_ip, detail.subnet_cidr.network + 1) def test_allocate_specific_subnet_specific_gateway(self): sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', ['10.1.0.0/16', '192.168.1.0/24'], 21, 4) sp = self.plugin._get_subnetpool(self.ctx, sp['id']) with self.ctx.session.begin(subtransactions=True): sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam_req.SpecificSubnetRequest(self._tenant_id, uuidutils.generate_uuid(), '10.1.2.0/24', gateway_ip='10.1.2.254') res = sa.allocate_subnet(req) detail = res.get_details() self.assertEqual(netaddr.IPAddress('10.1.2.254'), detail.gateway_ip) def test_allocate_specific_ipv6_subnet_specific_gateway(self): # Same scenario as described in bug #1466322 sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', ['2210::/64'], 64, 6) sp = self.plugin._get_subnetpool(self.ctx, sp['id']) with self.ctx.session.begin(subtransactions=True): sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam_req.SpecificSubnetRequest(self._tenant_id, uuidutils.generate_uuid(), '2210::/64', '2210::ffff:ffff:ffff:ffff') res = sa.allocate_subnet(req) detail = res.get_details() self.assertEqual(netaddr.IPAddress('2210::ffff:ffff:ffff:ffff'), detail.gateway_ip) def test__allocation_value_for_tenant_no_allocations(self): sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', ['10.1.0.0/16', '192.168.1.0/24'], 21, 4) sa = subnet_alloc.SubnetAllocator(sp, self.ctx) value = sa._allocations_used_by_tenant(32) self.assertEqual(0, value) def test_subnetpool_default_quota_exceeded(self): sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', ['fe80::/48'], 48, 6, default_quota=1) sp = self.plugin._get_subnetpool(self.ctx, sp['id']) sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam_req.SpecificSubnetRequest(self._tenant_id, uuidutils.generate_uuid(), 'fe80::/63') self.assertRaises(n_exc.SubnetPoolQuotaExceeded, sa.allocate_subnet, req) def test_subnetpool_concurrent_allocation_exception(self): sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', ['fe80::/48'], 48, 6, default_quota=1) sp = self.plugin._get_subnetpool(self.ctx, sp['id']) sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam_req.SpecificSubnetRequest(self._tenant_id, uuidutils.generate_uuid(), 'fe80::/63') with mock.patch("sqlalchemy.orm.query.Query.update", return_value=0): self.assertRaises(db_exc.RetryRequest, sa.allocate_subnet, req) neutron-8.4.0/neutron/tests/unit/ipam/test_utils.py0000664000567000056710000000726713044372736023715 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron.ipam import utils from neutron.tests import base class TestIpamUtils(base.BaseTestCase): def test_check_subnet_ip_v4_network(self): self.assertFalse(utils.check_subnet_ip('1.1.1.0/24', '1.1.1.0')) def test_check_subnet_ip_v4_broadcast(self): self.assertFalse(utils.check_subnet_ip('1.1.1.0/24', '1.1.1.255')) def test_check_subnet_ip_v4_valid(self): self.assertTrue(utils.check_subnet_ip('1.1.1.0/24', '1.1.1.1')) self.assertTrue(utils.check_subnet_ip('1.1.1.0/24', '1.1.1.254')) def test_check_subnet_ip_v6_network(self): self.assertFalse(utils.check_subnet_ip('F111::0/64', 'F111::0')) def test_check_subnet_ip_v6_valid(self): self.assertTrue(utils.check_subnet_ip('F111::0/64', 'F111::1')) self.assertTrue(utils.check_subnet_ip('F111::0/64', 'F111::FFFF:FFFF:FFFF:FFFF')) def test_generate_pools_v4_nogateway(self): cidr = '192.168.0.0/24' expected = [netaddr.IPRange('192.168.0.1', '192.168.0.254')] self.assertEqual(expected, utils.generate_pools(cidr, None)) def test_generate_pools_v4_gateway_first(self): cidr = '192.168.0.0/24' gateway = '192.168.0.1' expected = [netaddr.IPRange('192.168.0.2', '192.168.0.254')] self.assertEqual(expected, utils.generate_pools(cidr, gateway)) def test_generate_pools_v4_gateway_last(self): cidr = '192.168.0.0/24' gateway = '192.168.0.254' expected = [netaddr.IPRange('192.168.0.1', '192.168.0.253')] self.assertEqual(expected, utils.generate_pools(cidr, gateway)) def test_generate_pools_v4_32(self): # 32 is special because it should have 1 usable address cidr = '192.168.0.0/32' expected = [netaddr.IPRange('192.168.0.0', '192.168.0.0')] self.assertEqual(expected, utils.generate_pools(cidr, None)) def test_generate_pools_v4_31(self): cidr = '192.168.0.0/31' expected = [] self.assertEqual(expected, utils.generate_pools(cidr, None)) def test_generate_pools_v4_gateway_middle(self): cidr = '192.168.0.0/24' gateway = '192.168.0.128' expected = [netaddr.IPRange('192.168.0.1', '192.168.0.127'), netaddr.IPRange('192.168.0.129', '192.168.0.254')] self.assertEqual(expected, utils.generate_pools(cidr, gateway)) def test_generate_pools_v6_nogateway(self): # other than the difference in the last address, the rest of the # logic is the same as v4 so we only need one test cidr = 'F111::0/64' expected = [netaddr.IPRange('F111::1', 'F111::FFFF:FFFF:FFFF:FFFF')] self.assertEqual(expected, utils.generate_pools(cidr, None)) def test_generate_pools_v6_empty(self): # We want to be sure the range will begin and end with an IPv6 # address, even if an ambiguous ::/64 cidr is given. cidr = '::/64' expected = [netaddr.IPRange('::1', '::FFFF:FFFF:FFFF:FFFF')] self.assertEqual(expected, utils.generate_pools(cidr, None)) neutron-8.4.0/neutron/tests/unit/ipam/fake_driver.py0000664000567000056710000000206213044372760023760 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Infoblox Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.ipam import driver class FakeDriver(driver.Pool): """Fake IPAM driver for tests only Just implement IPAM Driver interface without any functionality inside """ def allocate_subnet(self, subnet): return driver.Subnet() def get_subnet(self, cidr): return driver.Subnet() def update_subnet(self, request): return driver.Subnet() def remove_subnet(self, cidr): pass neutron-8.4.0/neutron/tests/unit/ipam/drivers/0000775000567000056710000000000013044373210022572 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/ipam/drivers/__init__.py0000664000567000056710000000000013044372736024705 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/ipam/drivers/neutrondb_ipam/0000775000567000056710000000000013044373210025600 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/ipam/drivers/neutrondb_ipam/__init__.py0000664000567000056710000000000013044372736027713 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_driver.py0000664000567000056710000005466113044372760030531 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netaddr from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import exceptions as n_exc from neutron import context from neutron.db import api as ndb_api from neutron.ipam.drivers.neutrondb_ipam import db_models from neutron.ipam.drivers.neutrondb_ipam import driver from neutron.ipam import exceptions as ipam_exc from neutron.ipam import requests as ipam_req from neutron import manager from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_plugin from neutron.tests.unit import testlib_api def convert_firstip_to_ipaddress(range_item): return netaddr.IPAddress(range_item['first_ip']) class TestNeutronDbIpamMixin(object): def _create_network(self, plugin, ctx, shared=False): network = {'network': {'name': 'net', 'shared': shared, 'admin_state_up': True, 'tenant_id': self._tenant_id}} created_network = plugin.create_network(ctx, network) return (created_network, created_network['id']) def _create_subnet(self, plugin, ctx, network_id, cidr, ip_version=4, v6_address_mode=attributes.ATTR_NOT_SPECIFIED, allocation_pools=attributes.ATTR_NOT_SPECIFIED): subnet = {'subnet': {'name': 'sub', 'cidr': cidr, 'ip_version': ip_version, 'gateway_ip': attributes.ATTR_NOT_SPECIFIED, 'allocation_pools': allocation_pools, 'enable_dhcp': True, 'dns_nameservers': attributes.ATTR_NOT_SPECIFIED, 'host_routes': attributes.ATTR_NOT_SPECIFIED, 'ipv6_address_mode': v6_address_mode, 'ipv6_ra_mode': attributes.ATTR_NOT_SPECIFIED, 'network_id': network_id, 'tenant_id': self._tenant_id}} return plugin.create_subnet(ctx, subnet) class TestNeutronDbIpamPool(testlib_api.SqlTestCase, TestNeutronDbIpamMixin): """Test case for the Neutron's DB IPAM driver subnet pool interface.""" def setUp(self): super(TestNeutronDbIpamPool, self).setUp() self._tenant_id = 'test-tenant' # Configure plugin for tests self.setup_coreplugin(test_db_plugin.DB_PLUGIN_KLASS) # Prepare environment for tests self.plugin = manager.NeutronManager.get_plugin() self.ctx = context.get_admin_context() self.network, self.net_id = self._create_network(self.plugin, self.ctx) # Allocate IPAM driver self.ipam_pool = driver.NeutronDbPool(None, self.ctx) def _verify_ipam_subnet_details(self, ipam_subnet, cidr=None, tenant_id=None, gateway_ip=None, allocation_pools=None): ipam_subnet_details = ipam_subnet.get_details() gateway_ip_address = None cidr_ip_network = None if gateway_ip: gateway_ip_address = netaddr.IPAddress(gateway_ip) if cidr: cidr_ip_network = netaddr.IPNetwork(cidr) self.assertEqual(tenant_id, ipam_subnet_details.tenant_id) self.assertEqual(gateway_ip_address, ipam_subnet_details.gateway_ip) self.assertEqual(cidr_ip_network, ipam_subnet_details.subnet_cidr) self.assertEqual(allocation_pools, ipam_subnet_details.allocation_pools) def test_allocate_ipam_subnet_no_neutron_subnet_id(self): cidr = '10.0.0.0/24' allocation_pools = [netaddr.IPRange('10.0.0.100', '10.0.0.150'), netaddr.IPRange('10.0.0.200', '10.0.0.250')] subnet_req = ipam_req.SpecificSubnetRequest( self._tenant_id, None, cidr, allocation_pools=allocation_pools, gateway_ip='10.0.0.101') ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req) self._verify_ipam_subnet_details(ipam_subnet, cidr, self._tenant_id, '10.0.0.101', allocation_pools) def _prepare_specific_subnet_request(self, cidr): subnet = self._create_subnet( self.plugin, self.ctx, self.net_id, cidr) subnet_req = ipam_req.SpecificSubnetRequest( self._tenant_id, subnet['id'], cidr, gateway_ip=subnet['gateway_ip']) return subnet, subnet_req def test_allocate_ipam_subnet_with_neutron_subnet_id(self): cidr = '10.0.0.0/24' subnet, subnet_req = self._prepare_specific_subnet_request(cidr) ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req) self._verify_ipam_subnet_details( ipam_subnet, cidr, self._tenant_id, subnet['gateway_ip'], [netaddr.IPRange('10.0.0.2', '10.0.0.254')]) def test_allocate_any_subnet_fails(self): self.assertRaises( ipam_exc.InvalidSubnetRequestType, self.ipam_pool.allocate_subnet, ipam_req.AnySubnetRequest(self._tenant_id, 'meh', constants.IPv4, 24)) def test_update_subnet_pools(self): cidr = '10.0.0.0/24' subnet, subnet_req = self._prepare_specific_subnet_request(cidr) self.ipam_pool.allocate_subnet(subnet_req) allocation_pools = [netaddr.IPRange('10.0.0.100', '10.0.0.150'), netaddr.IPRange('10.0.0.200', '10.0.0.250')] update_subnet_req = ipam_req.SpecificSubnetRequest( self._tenant_id, subnet['id'], cidr, gateway_ip=subnet['gateway_ip'], allocation_pools=allocation_pools) ipam_subnet = self.ipam_pool.update_subnet(update_subnet_req) self._verify_ipam_subnet_details( ipam_subnet, cidr, self._tenant_id, subnet['gateway_ip'], allocation_pools) def test_get_subnet(self): cidr = '10.0.0.0/24' subnet, subnet_req = self._prepare_specific_subnet_request(cidr) self.ipam_pool.allocate_subnet(subnet_req) # Retrieve the subnet ipam_subnet = self.ipam_pool.get_subnet(subnet['id']) self._verify_ipam_subnet_details( ipam_subnet, cidr, self._tenant_id, subnet['gateway_ip'], [netaddr.IPRange('10.0.0.2', '10.0.0.254')]) def test_get_non_existing_subnet_fails(self): self.assertRaises(n_exc.SubnetNotFound, self.ipam_pool.get_subnet, 'boo') def test_remove_ipam_subnet(self): cidr = '10.0.0.0/24' subnet, subnet_req = self._prepare_specific_subnet_request(cidr) self.ipam_pool.allocate_subnet(subnet_req) # Remove ipam subnet by neutron subnet id self.ipam_pool.remove_subnet(subnet['id']) def test_remove_non_existent_subnet_fails(self): self.assertRaises(n_exc.SubnetNotFound, self.ipam_pool.remove_subnet, 'non-existent-id') def test_get_details_for_invalid_subnet_id_fails(self): cidr = '10.0.0.0/24' subnet_req = ipam_req.SpecificSubnetRequest( self._tenant_id, 'non-existent-id', cidr) self.ipam_pool.allocate_subnet(subnet_req) # Neutron subnet does not exist, so get_subnet should fail self.assertRaises(n_exc.SubnetNotFound, self.ipam_pool.get_subnet, 'non-existent-id') class TestNeutronDbIpamSubnet(testlib_api.SqlTestCase, TestNeutronDbIpamMixin): """Test case for Subnet interface for Neutron's DB IPAM driver. This test case exercises the reference IPAM driver. Even if it loads a plugin, the unit tests in this class do not exercise it at all; they simply perform white box testing on the IPAM driver. The plugin is exclusively used to create the neutron objects on which the IPAM driver will operate. """ def _create_and_allocate_ipam_subnet( self, cidr, allocation_pools=attributes.ATTR_NOT_SPECIFIED, ip_version=4, v6_auto_address=False, tenant_id=None): v6_address_mode = attributes.ATTR_NOT_SPECIFIED if v6_auto_address: # set ip version to 6 regardless of what's been passed to the # method ip_version = 6 v6_address_mode = constants.IPV6_SLAAC subnet = self._create_subnet( self.plugin, self.ctx, self.net_id, cidr, ip_version=ip_version, allocation_pools=allocation_pools, v6_address_mode=v6_address_mode) # Build netaddr.IPRanges from allocation pools since IPAM SubnetRequest # objects are strongly typed allocation_pool_ranges = [netaddr.IPRange( pool['start'], pool['end']) for pool in subnet['allocation_pools']] subnet_req = ipam_req.SpecificSubnetRequest( tenant_id, subnet['id'], cidr, gateway_ip=subnet['gateway_ip'], allocation_pools=allocation_pool_ranges) ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req) return ipam_subnet, subnet def setUp(self): super(TestNeutronDbIpamSubnet, self).setUp() self._tenant_id = 'test-tenant' # Configure plugin for tests self.setup_coreplugin(test_db_plugin.DB_PLUGIN_KLASS) # Prepare environment for tests self.plugin = manager.NeutronManager.get_plugin() self.ctx = context.get_admin_context() self.network, self.net_id = self._create_network(self.plugin, self.ctx) # Allocate IPAM driver self.ipam_pool = driver.NeutronDbPool(None, self.ctx) def test__verify_ip_succeeds(self): cidr = '10.0.0.0/24' ipam_subnet = self._create_and_allocate_ipam_subnet(cidr)[0] ipam_subnet._verify_ip(self.ctx.session, '10.0.0.2') def test__verify_ip_not_in_subnet_fails(self): cidr = '10.0.0.0/24' ipam_subnet = self._create_and_allocate_ipam_subnet(cidr)[0] self.assertRaises(ipam_exc.InvalidIpForSubnet, ipam_subnet._verify_ip, self.ctx.session, '192.168.0.2') def test__verify_ip_bcast_and_network_fail(self): cidr = '10.0.0.0/24' ipam_subnet = self._create_and_allocate_ipam_subnet(cidr)[0] self.assertRaises(ipam_exc.InvalidIpForSubnet, ipam_subnet._verify_ip, self.ctx.session, '10.0.0.255') self.assertRaises(ipam_exc.InvalidIpForSubnet, ipam_subnet._verify_ip, self.ctx.session, '10.0.0.0') def test__allocate_specific_ip(self): cidr = '10.0.0.0/24' ipam_subnet = self._create_and_allocate_ipam_subnet(cidr)[0] with self.ctx.session.begin(): ranges = ipam_subnet._allocate_specific_ip( self.ctx.session, '10.0.0.33') self.assertEqual(2, len(ranges)) # 10.0.0.1 should be allocated for gateway ip ranges.sort(key=convert_firstip_to_ipaddress) self.assertEqual('10.0.0.2', ranges[0]['first_ip']) self.assertEqual('10.0.0.32', ranges[0]['last_ip']) self.assertEqual('10.0.0.34', ranges[1]['first_ip']) self.assertEqual('10.0.0.254', ranges[1]['last_ip']) # Limit test - first address in range ranges = ipam_subnet._allocate_specific_ip( self.ctx.session, '10.0.0.2') self.assertEqual(2, len(ranges)) ranges.sort(key=convert_firstip_to_ipaddress) self.assertEqual('10.0.0.3', ranges[0]['first_ip']) self.assertEqual('10.0.0.32', ranges[0]['last_ip']) self.assertEqual('10.0.0.34', ranges[1]['first_ip']) self.assertEqual('10.0.0.254', ranges[1]['last_ip']) # Limit test - last address in range ranges = ipam_subnet._allocate_specific_ip( self.ctx.session, '10.0.0.254') self.assertEqual(2, len(ranges)) ranges.sort(key=convert_firstip_to_ipaddress) self.assertEqual('10.0.0.3', ranges[0]['first_ip']) self.assertEqual('10.0.0.32', ranges[0]['last_ip']) self.assertEqual('10.0.0.34', ranges[1]['first_ip']) self.assertEqual('10.0.0.253', ranges[1]['last_ip']) def test__allocate_specific_ips_multiple_ranges(self): cidr = '10.0.0.0/24' ipam_subnet = self._create_and_allocate_ipam_subnet( cidr, allocation_pools=[{'start': '10.0.0.10', 'end': '10.0.0.19'}, {'start': '10.0.0.30', 'end': '10.0.0.39'}])[0] with self.ctx.session.begin(): ranges = ipam_subnet._allocate_specific_ip( self.ctx.session, '10.0.0.33') self.assertEqual(3, len(ranges)) # 10.0.0.1 should be allocated for gateway ip ranges.sort(key=convert_firstip_to_ipaddress) self.assertEqual('10.0.0.10', ranges[0]['first_ip']) self.assertEqual('10.0.0.19', ranges[0]['last_ip']) self.assertEqual('10.0.0.30', ranges[1]['first_ip']) self.assertEqual('10.0.0.32', ranges[1]['last_ip']) self.assertEqual('10.0.0.34', ranges[2]['first_ip']) self.assertEqual('10.0.0.39', ranges[2]['last_ip']) def test__allocate_specific_ip_out_of_range(self): cidr = '10.0.0.0/24' subnet = self._create_subnet( self.plugin, self.ctx, self.net_id, cidr) subnet_req = ipam_req.SpecificSubnetRequest( 'tenant_id', subnet['id'], cidr, gateway_ip=subnet['gateway_ip']) ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req) with self.ctx.session.begin(): ranges = ipam_subnet._allocate_specific_ip( self.ctx.session, '192.168.0.1') # In this case _allocate_specific_ips does not fail, but # simply does not update availability ranges at all self.assertEqual(1, len(ranges)) # 10.0.0.1 should be allocated for gateway ip ranges.sort(key=convert_firstip_to_ipaddress) self.assertEqual('10.0.0.2', ranges[0]['first_ip']) self.assertEqual('10.0.0.254', ranges[0]['last_ip']) def _allocate_address(self, cidr, ip_version, address_request): ipam_subnet = self._create_and_allocate_ipam_subnet( cidr, ip_version=ip_version)[0] return ipam_subnet.allocate(address_request) def test_allocate_any_v4_address_succeeds(self): ip_address = self._allocate_address( '10.0.0.0/24', 4, ipam_req.AnyAddressRequest) # As the DB IPAM driver allocation logic is strictly sequential, we can # expect this test to allocate the .2 address as .1 is used by default # as subnet gateway self.assertEqual('10.0.0.2', ip_address) def test_allocate_any_v6_address_succeeds(self): ip_address = self._allocate_address( 'fde3:abcd:4321:1::/64', 6, ipam_req.AnyAddressRequest) # As the DB IPAM driver allocation logic is strictly sequential, we can # expect this test to allocate the .2 address as .1 is used by default # as subnet gateway self.assertEqual('fde3:abcd:4321:1::2', ip_address) def test_allocate_specific_v4_address_succeeds(self): ip_address = self._allocate_address( '10.0.0.0/24', 4, ipam_req.SpecificAddressRequest('10.0.0.33')) self.assertEqual('10.0.0.33', ip_address) def test_allocate_specific_v6_address_succeeds(self): ip_address = self._allocate_address( 'fde3:abcd:4321:1::/64', 6, ipam_req.SpecificAddressRequest('fde3:abcd:4321:1::33')) self.assertEqual('fde3:abcd:4321:1::33', ip_address) def test_allocate_specific_v4_address_out_of_range_fails(self): self.assertRaises(ipam_exc.InvalidIpForSubnet, self._allocate_address, '10.0.0.0/24', 4, ipam_req.SpecificAddressRequest('192.168.0.1')) def test_allocate_specific_v6_address_out_of_range_fails(self): self.assertRaises(ipam_exc.InvalidIpForSubnet, self._allocate_address, 'fde3:abcd:4321:1::/64', 6, ipam_req.SpecificAddressRequest( 'fde3:abcd:eeee:1::33')) def test_allocate_specific_address_in_use_fails(self): ipam_subnet = self._create_and_allocate_ipam_subnet( 'fde3:abcd:4321:1::/64', ip_version=6)[0] addr_req = ipam_req.SpecificAddressRequest('fde3:abcd:4321:1::33') ipam_subnet.allocate(addr_req) self.assertRaises(ipam_exc.IpAddressAlreadyAllocated, ipam_subnet.allocate, addr_req) def test_allocate_any_address_exhausted_pools_fails(self): # Same as above, the ranges will be recalculated always ipam_subnet = self._create_and_allocate_ipam_subnet( '192.168.0.0/30', ip_version=4)[0] ipam_subnet.allocate(ipam_req.AnyAddressRequest) # The second address generation request on a /30 for v4 net must fail self.assertRaises(ipam_exc.IpAddressGenerationFailure, ipam_subnet.allocate, ipam_req.AnyAddressRequest) def _test_deallocate_address(self, cidr, ip_version): ipam_subnet = self._create_and_allocate_ipam_subnet( cidr, ip_version=ip_version)[0] ip_address = ipam_subnet.allocate(ipam_req.AnyAddressRequest) ipam_subnet.deallocate(ip_address) def test_deallocate_v4_address(self): self._test_deallocate_address('10.0.0.0/24', 4) def test_deallocate_v6_address(self): # This test does not really exercise any different code path wrt # test_deallocate_v4_address. It is provided for completeness and for # future proofing in case v6-specific logic will be added. self._test_deallocate_address('fde3:abcd:4321:1::/64', 6) def test_allocate_unallocated_address_fails(self): ipam_subnet = self._create_and_allocate_ipam_subnet( '10.0.0.0/24', ip_version=4)[0] self.assertRaises(ipam_exc.IpAddressAllocationNotFound, ipam_subnet.deallocate, '10.0.0.2') def test_allocate_all_pool_addresses_triggers_range_recalculation(self): # This test instead might be made to pass, but for the wrong reasons! pass def test_allocate_subnet_for_non_existent_subnet_pass(self): # This test should pass because ipam subnet is no longer # have foreign key relationship with neutron subnet. # Creating ipam subnet before neutron subnet is a valid case. subnet_req = ipam_req.SpecificSubnetRequest( 'tenant_id', 'meh', '192.168.0.0/24') self.ipam_pool.allocate_subnet(subnet_req) def test__allocate_specific_ip_raises_exception(self): cidr = '10.0.0.0/24' ip = '10.0.0.15' ipam_subnet = self._create_and_allocate_ipam_subnet(cidr)[0] ipam_subnet.subnet_manager = mock.Mock() ipam_subnet.subnet_manager.list_ranges_by_subnet_id.return_value = [{ 'first_ip': '10.0.0.15', 'last_ip': '10.0.0.15'}] ipam_subnet.subnet_manager.delete_range.return_value = 0 @ndb_api.retry_db_errors def go(): ipam_subnet._allocate_specific_ip(self.ctx.session, ip) self.assertRaises(ipam_exc.IPAllocationFailed, go) def test_update_allocation_pools_with_no_pool_change(self): cidr = '10.0.0.0/24' ipam_subnet = self._create_and_allocate_ipam_subnet( cidr)[0] ipam_subnet.subnet_manager.delete_allocation_pools = mock.Mock() ipam_subnet.create_allocation_pools = mock.Mock() alloc_pools = [netaddr.IPRange('10.0.0.2', '10.0.0.254')] # Make sure allocation pools recreation does not happen in case of # unchanged allocation pools ipam_subnet.update_allocation_pools(alloc_pools, cidr) self.assertFalse( ipam_subnet.subnet_manager.delete_allocation_pools.called) self.assertFalse(ipam_subnet.create_allocation_pools.called) def _test__no_pool_changes(self, new_pools): id = 'some-id' ipam_subnet = driver.NeutronDbSubnet(id, self.ctx) pools = [db_models.IpamAllocationPool(ipam_subnet_id=id, first_ip='192.168.10.20', last_ip='192.168.10.41'), db_models.IpamAllocationPool(ipam_subnet_id=id, first_ip='192.168.10.50', last_ip='192.168.10.60')] ipam_subnet.subnet_manager.list_pools = mock.Mock(return_value=pools) return ipam_subnet._no_pool_changes(self.ctx.session, new_pools) def test__no_pool_changes_negative(self): pool_list = [[netaddr.IPRange('192.168.10.2', '192.168.10.254')], [netaddr.IPRange('192.168.10.20', '192.168.10.41')], [netaddr.IPRange('192.168.10.20', '192.168.10.41'), netaddr.IPRange('192.168.10.51', '192.168.10.60')]] for pools in pool_list: self.assertFalse(self._test__no_pool_changes(pools)) def test__no_pool_changes_positive(self): pools = [netaddr.IPRange('192.168.10.20', '192.168.10.41'), netaddr.IPRange('192.168.10.50', '192.168.10.60')] self.assertTrue(self._test__no_pool_changes(pools)) neutron-8.4.0/neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_db_api.py0000664000567000056710000002213213044372760030440 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import uuidutils from sqlalchemy.orm import exc as orm_exc from neutron import context from neutron.db import api as ndb_api from neutron.ipam.drivers.neutrondb_ipam import db_api from neutron.ipam.drivers.neutrondb_ipam import db_models from neutron.ipam import exceptions as ipam_exc from neutron.tests.unit import testlib_api class TestIpamSubnetManager(testlib_api.SqlTestCase): """Test case for SubnetManager DB helper class""" def setUp(self): super(TestIpamSubnetManager, self).setUp() self.ctx = context.get_admin_context() self.neutron_subnet_id = uuidutils.generate_uuid() self.ipam_subnet_id = uuidutils.generate_uuid() self.subnet_ip = '1.2.3.4' self.single_pool = ('1.2.3.4', '1.2.3.10') self.multi_pool = (('1.2.3.2', '1.2.3.12'), ('1.2.3.15', '1.2.3.24')) self.subnet_manager = db_api.IpamSubnetManager(self.ipam_subnet_id, self.neutron_subnet_id) self.subnet_manager_id = self.subnet_manager.create(self.ctx.session) self.ctx.session.flush() def test_create(self): self.assertEqual(self.ipam_subnet_id, self.subnet_manager_id) subnets = self.ctx.session.query(db_models.IpamSubnet).filter_by( id=self.ipam_subnet_id).all() self.assertEqual(1, len(subnets)) def test_remove(self): count = db_api.IpamSubnetManager.delete(self.ctx.session, self.neutron_subnet_id) self.assertEqual(1, count) subnets = self.ctx.session.query(db_models.IpamSubnet).filter_by( id=self.ipam_subnet_id).all() self.assertEqual(0, len(subnets)) def test_remove_non_existent_subnet(self): count = db_api.IpamSubnetManager.delete(self.ctx.session, 'non-existent') self.assertEqual(0, count) def _create_pools(self, pools): db_pools = [] for pool in pools: db_pool = self.subnet_manager.create_pool(self.ctx.session, pool[0], pool[1]) db_pools.append(db_pool) return db_pools def _validate_ips(self, pools, db_pool): self.assertTrue( any(pool == (db_pool.first_ip, db_pool.last_ip) for pool in pools)) def test_create_pool(self): db_pools = self._create_pools([self.single_pool]) ipam_pool = self.ctx.session.query(db_models.IpamAllocationPool).\ filter_by(ipam_subnet_id=self.ipam_subnet_id).first() self._validate_ips([self.single_pool], ipam_pool) range = self.ctx.session.query(db_models.IpamAvailabilityRange).\ filter_by(allocation_pool_id=db_pools[0].id).first() self._validate_ips([self.single_pool], range) def test_get_first_range(self): self._create_pools(self.multi_pool) range = self.subnet_manager.get_first_range(self.ctx.session) self._validate_ips(self.multi_pool, range) def test_list_ranges_by_subnet_id(self): self._create_pools(self.multi_pool) db_ranges = self.subnet_manager.list_ranges_by_subnet_id( self.ctx.session).all() self.assertEqual(2, len(db_ranges)) self.assertEqual(db_models.IpamAvailabilityRange, type(db_ranges[0])) def test_list_ranges_by_allocation_pool(self): db_pools = self._create_pools([self.single_pool]) # generate ids for allocation pools on flush self.ctx.session.flush() db_ranges = self.subnet_manager.list_ranges_by_allocation_pool( self.ctx.session, db_pools[0].id).all() self.assertEqual(1, len(db_ranges)) self.assertEqual(db_models.IpamAvailabilityRange, type(db_ranges[0])) self._validate_ips([self.single_pool], db_ranges[0]) def test_create_range(self): self._create_pools([self.single_pool]) pool = self.ctx.session.query(db_models.IpamAllocationPool).\ filter_by(ipam_subnet_id=self.ipam_subnet_id).first() self._validate_ips([self.single_pool], pool) allocation_pool_id = pool.id # delete the range db_range = self.subnet_manager.list_ranges_by_allocation_pool( self.ctx.session, pool.id).first() self._validate_ips([self.single_pool], db_range) self.ctx.session.delete(db_range) # create a new range range_start = '1.2.3.5' range_end = '1.2.3.9' new_range = self.subnet_manager.create_range(self.ctx.session, allocation_pool_id, range_start, range_end) self.assertEqual(range_start, new_range.first_ip) self.assertEqual(range_end, new_range.last_ip) def test_update_range(self): self._create_pools([self.single_pool]) db_range = self.subnet_manager.get_first_range(self.ctx.session) updated_count = self.subnet_manager.update_range(self.ctx.session, db_range, first_ip='1.2.3.6', last_ip='1.2.3.8') self.assertEqual(1, updated_count) def test_update_range_no_new_values(self): self._create_pools([self.single_pool]) db_range = self.subnet_manager.get_first_range(self.ctx.session) self.assertRaises(ipam_exc.IpamAvailabilityRangeNoChanges, self.subnet_manager.update_range, self.ctx.session, db_range) def test_update_range_reraise_error(self): session = mock.Mock() session.query.side_effect = orm_exc.ObjectDeletedError(None, None) @ndb_api.retry_db_errors def go(): self.subnet_manager.update_range(session, mock.Mock(), first_ip='1.2.3.5') self.assertRaises(ipam_exc.IPAllocationFailed, go) def test_delete_range(self): self._create_pools([self.single_pool]) db_range = self.subnet_manager.get_first_range(self.ctx.session) deleted_count = self.subnet_manager.delete_range(self.ctx.session, db_range) self.assertEqual(1, deleted_count) def test_delete_range_reraise_error(self): session = mock.Mock() session.query.side_effect = orm_exc.ObjectDeletedError(None, None) @ndb_api.retry_db_errors def go(): self.subnet_manager.delete_range(session, mock.Mock()) self.assertRaises(ipam_exc.IPAllocationFailed, go) def test_check_unique_allocation(self): self.assertTrue(self.subnet_manager.check_unique_allocation( self.ctx.session, self.subnet_ip)) def test_check_unique_allocation_negative(self): self.subnet_manager.create_allocation(self.ctx.session, self.subnet_ip) self.assertFalse(self.subnet_manager.check_unique_allocation( self.ctx.session, self.subnet_ip)) def test_list_allocations(self): ips = ['1.2.3.4', '1.2.3.6', '1.2.3.7'] for ip in ips: self.subnet_manager.create_allocation(self.ctx.session, ip) allocs = self.subnet_manager.list_allocations(self.ctx.session).all() self.assertEqual(len(ips), len(allocs)) for allocation in allocs: self.assertIn(allocation.ip_address, ips) def _test_create_allocation(self): self.subnet_manager.create_allocation(self.ctx.session, self.subnet_ip) alloc = self.ctx.session.query(db_models.IpamAllocation).filter_by( ipam_subnet_id=self.ipam_subnet_id).all() self.assertEqual(1, len(alloc)) self.assertEqual(self.subnet_ip, alloc[0].ip_address) return alloc def test_create_allocation(self): self._test_create_allocation() def test_delete_allocation(self): allocs = self._test_create_allocation() self.subnet_manager.delete_allocation(self.ctx.session, allocs[0].ip_address) allocs = self.ctx.session.query(db_models.IpamAllocation).filter_by( ipam_subnet_id=self.ipam_subnet_id).all() self.assertEqual(0, len(allocs)) neutron-8.4.0/neutron/tests/unit/ipam/test_requests.py0000664000567000056710000004061013044372760024412 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netaddr from oslo_config import cfg from oslo_utils import uuidutils from neutron.common import constants from neutron.common import ipv6_utils from neutron import context from neutron.ipam import driver from neutron.ipam import exceptions as ipam_exc from neutron.ipam import requests as ipam_req from neutron import manager from neutron.tests import base from neutron.tests.unit.ipam import fake_driver FAKE_IPAM_CLASS = 'neutron.tests.unit.ipam.fake_driver.FakeDriver' class IpamSubnetRequestTestCase(base.BaseTestCase): def setUp(self): super(IpamSubnetRequestTestCase, self).setUp() self.tenant_id = uuidutils.generate_uuid() self.subnet_id = uuidutils.generate_uuid() class TestIpamSubnetRequests(IpamSubnetRequestTestCase): def test_subnet_request(self): pool = ipam_req.SubnetRequest(self.tenant_id, self.subnet_id) self.assertEqual(self.tenant_id, pool.tenant_id) self.assertEqual(self.subnet_id, pool.subnet_id) self.assertIsNone(pool.gateway_ip) self.assertIsNone(pool.allocation_pools) def test_subnet_request_gateway(self): request = ipam_req.SubnetRequest(self.tenant_id, self.subnet_id, gateway_ip='1.2.3.1') self.assertEqual('1.2.3.1', str(request.gateway_ip)) def test_subnet_request_bad_gateway(self): self.assertRaises(netaddr.core.AddrFormatError, ipam_req.SubnetRequest, self.tenant_id, self.subnet_id, gateway_ip='1.2.3.') def test_subnet_request_with_range(self): allocation_pools = [netaddr.IPRange('1.2.3.4', '1.2.3.5'), netaddr.IPRange('1.2.3.7', '1.2.3.9')] request = ipam_req.SubnetRequest(self.tenant_id, self.subnet_id, allocation_pools=allocation_pools) self.assertEqual(allocation_pools, request.allocation_pools) def test_subnet_request_range_not_list(self): self.assertRaises(TypeError, ipam_req.SubnetRequest, self.tenant_id, self.subnet_id, allocation_pools=1) def test_subnet_request_bad_range(self): self.assertRaises(TypeError, ipam_req.SubnetRequest, self.tenant_id, self.subnet_id, allocation_pools=['1.2.3.4']) def test_subnet_request_different_versions(self): pools = [netaddr.IPRange('0.0.0.1', '0.0.0.2'), netaddr.IPRange('::1', '::2')] self.assertRaises(ValueError, ipam_req.SubnetRequest, self.tenant_id, self.subnet_id, allocation_pools=pools) def test_subnet_request_overlap(self): pools = [netaddr.IPRange('0.0.0.10', '0.0.0.20'), netaddr.IPRange('0.0.0.8', '0.0.0.10')] self.assertRaises(ValueError, ipam_req.SubnetRequest, self.tenant_id, self.subnet_id, allocation_pools=pools) class TestIpamAnySubnetRequest(IpamSubnetRequestTestCase): def test_subnet_request(self): request = ipam_req.AnySubnetRequest(self.tenant_id, self.subnet_id, constants.IPv4, 24, gateway_ip='0.0.0.1') self.assertEqual(24, request.prefixlen) def test_subnet_request_bad_prefix_type(self): self.assertRaises(netaddr.core.AddrFormatError, ipam_req.AnySubnetRequest, self.tenant_id, self.subnet_id, constants.IPv4, 'A') def test_subnet_request_bad_prefix(self): self.assertRaises(netaddr.core.AddrFormatError, ipam_req.AnySubnetRequest, self.tenant_id, self.subnet_id, constants.IPv4, 33) self.assertRaises(netaddr.core.AddrFormatError, ipam_req.AnySubnetRequest, self.tenant_id, self.subnet_id, constants.IPv6, 129) def test_subnet_request_bad_gateway(self): cfg.CONF.set_override('force_gateway_on_subnet', True) self.assertRaises(ipam_exc.IpamValueInvalid, ipam_req.AnySubnetRequest, self.tenant_id, self.subnet_id, constants.IPv6, 64, gateway_ip='2000::1') def test_subnet_request_good_gateway(self): cfg.CONF.set_override('force_gateway_on_subnet', False) request = ipam_req.AnySubnetRequest(self.tenant_id, self.subnet_id, constants.IPv6, 64, gateway_ip='2000::1') self.assertEqual(netaddr.IPAddress('2000::1'), request.gateway_ip) def test_subnet_request_allocation_pool_wrong_version(self): pools = [netaddr.IPRange('0.0.0.4', '0.0.0.5')] self.assertRaises(ipam_exc.IpamValueInvalid, ipam_req.AnySubnetRequest, self.tenant_id, self.subnet_id, constants.IPv6, 64, allocation_pools=pools) def test_subnet_request_allocation_pool_not_in_net(self): pools = [netaddr.IPRange('0.0.0.64', '0.0.0.128')] self.assertRaises(ipam_exc.IpamValueInvalid, ipam_req.AnySubnetRequest, self.tenant_id, self.subnet_id, constants.IPv4, 25, allocation_pools=pools) class TestIpamSpecificSubnetRequest(IpamSubnetRequestTestCase): def test_subnet_request(self): request = ipam_req.SpecificSubnetRequest(self.tenant_id, self.subnet_id, '1.2.3.0/24', gateway_ip='1.2.3.1') self.assertEqual(24, request.prefixlen) self.assertEqual(netaddr.IPAddress('1.2.3.1'), request.gateway_ip) self.assertEqual(netaddr.IPNetwork('1.2.3.0/24'), request.subnet_cidr) def test_subnet_request_bad_gateway(self): cfg.CONF.set_override('force_gateway_on_subnet', True) self.assertRaises(ipam_exc.IpamValueInvalid, ipam_req.SpecificSubnetRequest, self.tenant_id, self.subnet_id, '2001::1', gateway_ip='2000::1') def test_subnet_request_good_gateway(self): cfg.CONF.set_override('force_gateway_on_subnet', False) request = ipam_req.SpecificSubnetRequest(self.tenant_id, self.subnet_id, '2001::1', gateway_ip='2000::1') self.assertEqual(netaddr.IPAddress('2000::1'), request.gateway_ip) class TestAddressRequest(base.BaseTestCase): # This class doesn't test much. At least running through all of the # constructors may shake out some trivial bugs. EUI64 = ipam_req.AutomaticAddressRequest.EUI64 def setUp(self): super(TestAddressRequest, self).setUp() def test_specific_address_ipv6(self): request = ipam_req.SpecificAddressRequest('2000::45') self.assertEqual(netaddr.IPAddress('2000::45'), request.address) def test_specific_address_ipv4(self): request = ipam_req.SpecificAddressRequest('1.2.3.32') self.assertEqual(netaddr.IPAddress('1.2.3.32'), request.address) def test_any_address(self): ipam_req.AnyAddressRequest() def test_automatic_address_request_eui64(self): subnet_cidr = '2607:f0d0:1002:51::/64' port_mac = 'aa:bb:cc:dd:ee:ff' eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr, port_mac)) request = ipam_req.AutomaticAddressRequest( address_type=self.EUI64, prefix=subnet_cidr, mac=port_mac) self.assertEqual(request.address, netaddr.IPAddress(eui_addr)) def test_automatic_address_request_invalid_address_type_raises(self): self.assertRaises(ipam_exc.InvalidAddressType, ipam_req.AutomaticAddressRequest, address_type='kaboom') def test_automatic_address_request_eui64_no_mac_raises(self): self.assertRaises(ipam_exc.AddressCalculationFailure, ipam_req.AutomaticAddressRequest, address_type=self.EUI64, prefix='meh') def test_automatic_address_request_eui64_alien_param_raises(self): self.assertRaises(ipam_exc.AddressCalculationFailure, ipam_req.AutomaticAddressRequest, address_type=self.EUI64, mac='meh', alien='et', prefix='meh') class TestIpamDriverLoader(base.BaseTestCase): def setUp(self): super(TestIpamDriverLoader, self).setUp() self.ctx = context.get_admin_context() def _verify_fake_ipam_driver_is_loaded(self, driver_name): mgr = manager.NeutronManager ipam_driver = mgr.load_class_for_provider('neutron.ipam_drivers', driver_name) self.assertEqual( fake_driver.FakeDriver, ipam_driver, "loaded ipam driver should be FakeDriver") def _verify_import_error_is_generated(self, driver_name): mgr = manager.NeutronManager self.assertRaises(ImportError, mgr.load_class_for_provider, 'neutron.ipam_drivers', driver_name) def test_ipam_driver_is_loaded_by_class(self): self._verify_fake_ipam_driver_is_loaded(FAKE_IPAM_CLASS) def test_ipam_driver_is_loaded_by_name(self): self._verify_fake_ipam_driver_is_loaded('fake') def test_ipam_driver_raises_import_error(self): self._verify_import_error_is_generated( 'neutron.tests.unit.ipam_req.SomeNonExistentClass') def test_ipam_driver_raises_import_error_for_none(self): self._verify_import_error_is_generated(None) def _load_ipam_driver(self, driver_name, subnet_pool_id): cfg.CONF.set_override("ipam_driver", driver_name) return driver.Pool.get_instance(subnet_pool_id, self.ctx) def test_ipam_driver_is_loaded_from_ipam_driver_config_value(self): ipam_driver = self._load_ipam_driver('fake', None) self.assertIsInstance( ipam_driver, fake_driver.FakeDriver, "loaded ipam driver should be of type FakeDriver") @mock.patch(FAKE_IPAM_CLASS) def test_ipam_driver_is_loaded_with_subnet_pool_id(self, ipam_mock): subnet_pool_id = 'SomePoolID' self._load_ipam_driver('fake', subnet_pool_id) ipam_mock.assert_called_once_with(subnet_pool_id, self.ctx) class TestAddressRequestFactory(base.BaseTestCase): def test_specific_address_request_is_loaded(self): for address in ('10.12.0.15', 'fffe::1'): ip = {'ip_address': address} self.assertIsInstance( ipam_req.AddressRequestFactory.get_request(None, None, ip), ipam_req.SpecificAddressRequest) def test_any_address_request_is_loaded(self): for addr in [None, '']: ip = {'ip_address': addr} self.assertIsInstance( ipam_req.AddressRequestFactory.get_request(None, None, ip), ipam_req.AnyAddressRequest) def test_automatic_address_request_is_loaded(self): ip = {'mac': '6c:62:6d:de:cf:49', 'subnet_cidr': '2001:470:abcd::/64', 'eui64_address': True} self.assertIsInstance( ipam_req.AddressRequestFactory.get_request(None, None, ip), ipam_req.AutomaticAddressRequest) class TestSubnetRequestFactory(IpamSubnetRequestTestCase): def _build_subnet_dict(self, id=None, cidr='192.168.1.0/24', prefixlen=8, ip_version=4): subnet = {'cidr': cidr, 'prefixlen': prefixlen, 'ip_version': ip_version, 'tenant_id': self.tenant_id, 'gateway_ip': None, 'allocation_pools': None, 'id': id or self.subnet_id} subnetpool = {'ip_version': ip_version, 'default_prefixlen': prefixlen} return subnet, subnetpool def test_specific_subnet_request_is_loaded(self): addresses = [ '10.12.0.15/24', '10.12.0.0/24', 'fffe::1/64', 'fffe::/64'] for address in addresses: subnet, subnetpool = self._build_subnet_dict(cidr=address) self.assertIsInstance( ipam_req.SubnetRequestFactory.get_request(None, subnet, subnetpool), ipam_req.SpecificSubnetRequest) def test_any_address_request_is_loaded_for_ipv4(self): subnet, subnetpool = self._build_subnet_dict(cidr=None, ip_version=4) self.assertIsInstance( ipam_req.SubnetRequestFactory.get_request(None, subnet, subnetpool), ipam_req.AnySubnetRequest) def test_any_address_request_is_loaded_for_ipv6(self): subnet, subnetpool = self._build_subnet_dict(cidr=None, ip_version=6) self.assertIsInstance( ipam_req.SubnetRequestFactory.get_request(None, subnet, subnetpool), ipam_req.AnySubnetRequest) def test_args_are_passed_to_specific_request(self): subnet, subnetpool = self._build_subnet_dict() request = ipam_req.SubnetRequestFactory.get_request(None, subnet, subnetpool) self.assertIsInstance(request, ipam_req.SpecificSubnetRequest) self.assertEqual(self.tenant_id, request.tenant_id) self.assertEqual(self.subnet_id, request.subnet_id) self.assertIsNone(request.gateway_ip) self.assertIsNone(request.allocation_pools) class TestGetRequestFactory(base.BaseTestCase): def setUp(self): super(TestGetRequestFactory, self).setUp() cfg.CONF.set_override('ipam_driver', 'fake') self.driver = driver.Pool.get_instance(None, None) def test_get_subnet_request_factory(self): self.assertEqual( self.driver.get_subnet_request_factory(), ipam_req.SubnetRequestFactory) def test_get_address_request_factory(self): self.assertEqual( self.driver.get_address_request_factory(), ipam_req.AddressRequestFactory) neutron-8.4.0/neutron/tests/unit/api/0000775000567000056710000000000013044373210020737 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/api/__init__.py0000664000567000056710000000000013044372736023052 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/api/v2/0000775000567000056710000000000013044373210021266 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/api/v2/test_base.py0000664000567000056710000020434313044372760023630 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from oslo_config import cfg from oslo_db import exception as db_exc from oslo_policy import policy as oslo_policy from oslo_utils import uuidutils import six from six import moves import six.moves.urllib.parse as urlparse import webob from webob import exc import webtest from neutron.api import api_common from neutron.api import extensions from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.api.v2 import attributes from neutron.api.v2 import base as v2_base from neutron.api.v2 import router from neutron.common import exceptions as n_exc from neutron import context from neutron import manager from neutron import policy from neutron import quota from neutron.quota import resource_registry from neutron.tests import base from neutron.tests import fake_notifier from neutron.tests import tools from neutron.tests.unit import testlib_api EXTDIR = os.path.join(base.ROOTDIR, 'unit/extensions') _uuid = uuidutils.generate_uuid def _get_path(resource, id=None, action=None, fmt=None): path = '/%s' % resource if id is not None: path = path + '/%s' % id if action is not None: path = path + '/%s' % action if fmt is not None: path = path + '.%s' % fmt return path class ResourceIndexTestCase(base.BaseTestCase): def test_index_json(self): index = webtest.TestApp(router.Index({'foo': 'bar'})) res = index.get('') self.assertIn('resources', res.json) self.assertEqual(1, len(res.json['resources'])) resource = res.json['resources'][0] self.assertIn('collection', resource) self.assertEqual('bar', resource['collection']) self.assertIn('name', resource) self.assertEqual('foo', resource['name']) self.assertIn('links', resource) self.assertEqual(1, len(resource['links'])) link = resource['links'][0] self.assertIn('href', link) self.assertEqual(link['href'], 'http://localhost/bar') self.assertIn('rel', link) self.assertEqual('self', link['rel']) class APIv2TestBase(base.BaseTestCase): def setUp(self): super(APIv2TestBase, self).setUp() plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2' # Ensure existing ExtensionManager is not used extensions.PluginAwareExtensionManager._instance = None # Create the default configurations self.config_parse() # Update the plugin self.setup_coreplugin(plugin) cfg.CONF.set_override('allow_pagination', True) cfg.CONF.set_override('allow_sorting', True) self._plugin_patcher = mock.patch(plugin, autospec=True) self.plugin = self._plugin_patcher.start() instance = self.plugin.return_value instance._NeutronPluginBaseV2__native_pagination_support = True instance._NeutronPluginBaseV2__native_sorting_support = True api = router.APIRouter() self.api = webtest.TestApp(api) quota.QUOTAS._driver = None cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', group='QUOTAS') # APIRouter initialization resets policy module, re-initializing it policy.init() class _ArgMatcher(object): """An adapter to assist mock assertions, used to custom compare.""" def __init__(self, cmp, obj): self.cmp = cmp self.obj = obj def __eq__(self, other): return self.cmp(self.obj, other) def _list_cmp(l1, l2): return set(l1) == set(l2) class APIv2TestCase(APIv2TestBase): def _do_field_list(self, resource, base_fields): attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[resource] policy_attrs = [name for (name, info) in attr_info.items() if info.get('required_by_policy')] for name, info in attr_info.items(): if info.get('primary_key'): policy_attrs.append(name) fields = base_fields fields.extend(policy_attrs) return fields def _get_collection_kwargs(self, skipargs=None, **kwargs): skipargs = skipargs or [] args_list = ['filters', 'fields', 'sorts', 'limit', 'marker', 'page_reverse'] args_dict = dict( (arg, mock.ANY) for arg in set(args_list) - set(skipargs)) args_dict.update(kwargs) return args_dict def test_fields(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'fields': 'foo'}) fields = self._do_field_list('networks', ['foo']) kwargs = self._get_collection_kwargs(fields=fields) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_fields_multiple(self): instance = self.plugin.return_value instance.get_networks.return_value = [] fields = self._do_field_list('networks', ['foo', 'bar']) self.api.get(_get_path('networks'), {'fields': ['foo', 'bar']}) kwargs = self._get_collection_kwargs(fields=fields) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_fields_multiple_with_empty(self): instance = self.plugin.return_value instance.get_networks.return_value = [] fields = self._do_field_list('networks', ['foo']) self.api.get(_get_path('networks'), {'fields': ['foo', '']}) kwargs = self._get_collection_kwargs(fields=fields) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_fields_empty(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'fields': ''}) kwargs = self._get_collection_kwargs(fields=[]) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_fields_multiple_empty(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'fields': ['', '']}) kwargs = self._get_collection_kwargs(fields=[]) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'name': 'bar'}) filters = {'name': ['bar']} kwargs = self._get_collection_kwargs(filters=filters) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters_empty(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'name': ''}) filters = {} kwargs = self._get_collection_kwargs(filters=filters) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters_multiple_empty(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'name': ['', '']}) filters = {} kwargs = self._get_collection_kwargs(filters=filters) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters_multiple_with_empty(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'name': ['bar', '']}) filters = {'name': ['bar']} kwargs = self._get_collection_kwargs(filters=filters) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters_multiple_values(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'name': ['bar', 'bar2']}) filters = {'name': ['bar', 'bar2']} kwargs = self._get_collection_kwargs(filters=filters) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters_multiple(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'name': 'bar', 'tenant_id': 'bar2'}) filters = {'name': ['bar'], 'tenant_id': ['bar2']} kwargs = self._get_collection_kwargs(filters=filters) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters_with_fields(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'name': 'bar', 'fields': 'foo'}) filters = {'name': ['bar']} fields = self._do_field_list('networks', ['foo']) kwargs = self._get_collection_kwargs(filters=filters, fields=fields) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters_with_convert_to(self): instance = self.plugin.return_value instance.get_ports.return_value = [] self.api.get(_get_path('ports'), {'admin_state_up': 'true'}) filters = {'admin_state_up': [True]} kwargs = self._get_collection_kwargs(filters=filters) instance.get_ports.assert_called_once_with(mock.ANY, **kwargs) def test_filters_with_convert_list_to(self): instance = self.plugin.return_value instance.get_ports.return_value = [] self.api.get(_get_path('ports'), {'fixed_ips': ['ip_address=foo', 'subnet_id=bar']}) filters = {'fixed_ips': {'ip_address': ['foo'], 'subnet_id': ['bar']}} kwargs = self._get_collection_kwargs(filters=filters) instance.get_ports.assert_called_once_with(mock.ANY, **kwargs) def test_limit(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'limit': '10'}) kwargs = self._get_collection_kwargs(limit=10) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_limit_with_great_than_max_limit(self): cfg.CONF.set_default('pagination_max_limit', '1000') instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'limit': '1001'}) kwargs = self._get_collection_kwargs(limit=1000) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_limit_with_zero(self): cfg.CONF.set_default('pagination_max_limit', '1000') instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'limit': '0'}) kwargs = self._get_collection_kwargs(limit=1000) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_limit_with_unspecific(self): cfg.CONF.set_default('pagination_max_limit', '1000') instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks')) kwargs = self._get_collection_kwargs(limit=1000) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_limit_with_negative_value(self): cfg.CONF.set_default('pagination_max_limit', '1000') instance = self.plugin.return_value instance.get_networks.return_value = [] res = self.api.get(_get_path('networks'), {'limit': -1}, expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_limit_with_non_integer(self): instance = self.plugin.return_value instance.get_networks.return_value = [] res = self.api.get(_get_path('networks'), {'limit': 'abc'}, expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) self.assertIn('abc', res) def test_limit_with_infinite_pagination_max_limit(self): instance = self.plugin.return_value instance.get_networks.return_value = [] cfg.CONF.set_override('pagination_max_limit', 'Infinite') self.api.get(_get_path('networks')) kwargs = self._get_collection_kwargs(limit=None) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_limit_with_negative_pagination_max_limit(self): instance = self.plugin.return_value instance.get_networks.return_value = [] cfg.CONF.set_default('pagination_max_limit', '-1') self.api.get(_get_path('networks')) kwargs = self._get_collection_kwargs(limit=None) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_limit_with_non_integer_pagination_max_limit(self): instance = self.plugin.return_value instance.get_networks.return_value = [] cfg.CONF.set_default('pagination_max_limit', 'abc') self.api.get(_get_path('networks')) kwargs = self._get_collection_kwargs(limit=None) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_marker(self): cfg.CONF.set_override('pagination_max_limit', '1000') instance = self.plugin.return_value instance.get_networks.return_value = [] marker = _uuid() self.api.get(_get_path('networks'), {'marker': marker}) kwargs = self._get_collection_kwargs(limit=1000, marker=marker) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_page_reverse(self): calls = [] instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'page_reverse': 'True'}) kwargs = self._get_collection_kwargs(page_reverse=True) calls.append(mock.call.get_networks(mock.ANY, **kwargs)) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) instance.get_networks.reset_mock() self.api.get(_get_path('networks'), {'page_reverse': 'False'}) kwargs = self._get_collection_kwargs(page_reverse=False) calls.append(mock.call.get_networks(mock.ANY, **kwargs)) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_page_reverse_with_non_bool(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'page_reverse': 'abc'}) kwargs = self._get_collection_kwargs(page_reverse=False) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_page_reverse_with_unspecific(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks')) kwargs = self._get_collection_kwargs(page_reverse=False) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_sort(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'sort_key': ['name', 'admin_state_up'], 'sort_dir': ['desc', 'asc']}) kwargs = self._get_collection_kwargs(sorts=[('name', False), ('admin_state_up', True), ('id', True)]) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_sort_with_primary_key(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'sort_key': ['name', 'admin_state_up', 'id'], 'sort_dir': ['desc', 'asc', 'desc']}) kwargs = self._get_collection_kwargs(sorts=[('name', False), ('admin_state_up', True), ('id', False)]) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_sort_without_direction(self): instance = self.plugin.return_value instance.get_networks.return_value = [] res = self.api.get(_get_path('networks'), {'sort_key': ['name']}, expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_sort_with_invalid_attribute(self): instance = self.plugin.return_value instance.get_networks.return_value = [] res = self.api.get(_get_path('networks'), {'sort_key': 'abc', 'sort_dir': 'asc'}, expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_sort_with_invalid_dirs(self): instance = self.plugin.return_value instance.get_networks.return_value = [] res = self.api.get(_get_path('networks'), {'sort_key': 'name', 'sort_dir': 'abc'}, expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_emulated_sort(self): instance = self.plugin.return_value instance._NeutronPluginBaseV2__native_pagination_support = False instance._NeutronPluginBaseV2__native_sorting_support = False instance.get_networks.return_value = [] api = webtest.TestApp(router.APIRouter()) api.get(_get_path('networks'), {'sort_key': ['name', 'status'], 'sort_dir': ['desc', 'asc']}) kwargs = self._get_collection_kwargs( skipargs=['sorts', 'limit', 'marker', 'page_reverse']) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_emulated_sort_without_sort_field(self): instance = self.plugin.return_value instance._NeutronPluginBaseV2__native_pagination_support = False instance._NeutronPluginBaseV2__native_sorting_support = False instance.get_networks.return_value = [] api = webtest.TestApp(router.APIRouter()) api.get(_get_path('networks'), {'sort_key': ['name', 'status'], 'sort_dir': ['desc', 'asc'], 'fields': ['subnets']}) kwargs = self._get_collection_kwargs( skipargs=['sorts', 'limit', 'marker', 'page_reverse'], fields=_ArgMatcher(_list_cmp, ['name', 'status', 'id', 'subnets', 'shared', 'tenant_id'])) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_emulated_pagination(self): instance = self.plugin.return_value instance._NeutronPluginBaseV2__native_pagination_support = False instance.get_networks.return_value = [] api = webtest.TestApp(router.APIRouter()) api.get(_get_path('networks'), {'limit': 10, 'marker': 'foo', 'page_reverse': False}) kwargs = self._get_collection_kwargs(skipargs=['limit', 'marker', 'page_reverse']) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_native_pagination_without_native_sorting(self): instance = self.plugin.return_value instance._NeutronPluginBaseV2__native_sorting_support = False self.assertRaises(n_exc.Invalid, router.APIRouter) def test_native_pagination_without_allow_sorting(self): cfg.CONF.set_override('allow_sorting', False) instance = self.plugin.return_value instance.get_networks.return_value = [] api = webtest.TestApp(router.APIRouter()) api.get(_get_path('networks'), {'sort_key': ['name', 'admin_state_up'], 'sort_dir': ['desc', 'asc']}) kwargs = self._get_collection_kwargs(sorts=[('name', False), ('admin_state_up', True), ('id', True)]) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) # Note: since all resources use the same controller and validation # logic, we actually get really good coverage from testing just networks. class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase): def _test_list(self, req_tenant_id, real_tenant_id): env = {} if req_tenant_id: env = {'neutron.context': context.Context('', req_tenant_id)} input_dict = {'id': uuidutils.generate_uuid(), 'name': 'net1', 'admin_state_up': True, 'status': "ACTIVE", 'tenant_id': real_tenant_id, 'shared': False, 'subnets': []} return_value = [input_dict] instance = self.plugin.return_value instance.get_networks.return_value = return_value res = self.api.get(_get_path('networks', fmt=self.fmt), extra_environ=env) res = self.deserialize(res) self.assertIn('networks', res) if not req_tenant_id or req_tenant_id == real_tenant_id: # expect full list returned self.assertEqual(1, len(res['networks'])) output_dict = res['networks'][0] input_dict['shared'] = False self.assertEqual(len(input_dict), len(output_dict)) for k, v in six.iteritems(input_dict): self.assertEqual(v, output_dict[k]) else: # expect no results self.assertEqual(0, len(res['networks'])) def test_list_noauth(self): self._test_list(None, _uuid()) def test_list_keystone(self): tenant_id = _uuid() self._test_list(tenant_id, tenant_id) def test_list_keystone_bad(self): tenant_id = _uuid() self._test_list(tenant_id + "bad", tenant_id) def test_list_pagination(self): id1 = str(_uuid()) id2 = str(_uuid()) input_dict1 = {'id': id1, 'name': 'net1', 'admin_state_up': True, 'status': "ACTIVE", 'tenant_id': '', 'shared': False, 'subnets': []} input_dict2 = {'id': id2, 'name': 'net2', 'admin_state_up': True, 'status': "ACTIVE", 'tenant_id': '', 'shared': False, 'subnets': []} return_value = [input_dict1, input_dict2] instance = self.plugin.return_value instance.get_networks.return_value = return_value params = {'limit': ['2'], 'marker': [str(_uuid())], 'sort_key': ['name'], 'sort_dir': ['asc']} res = self.api.get(_get_path('networks'), params=params).json self.assertEqual(2, len(res['networks'])) self.assertEqual(sorted([id1, id2]), sorted([res['networks'][0]['id'], res['networks'][1]['id']])) self.assertIn('networks_links', res) next_links = [] previous_links = [] for r in res['networks_links']: if r['rel'] == 'next': next_links.append(r) if r['rel'] == 'previous': previous_links.append(r) self.assertEqual(1, len(next_links)) self.assertEqual(1, len(previous_links)) url = urlparse.urlparse(next_links[0]['href']) self.assertEqual(url.path, _get_path('networks')) params['marker'] = [id2] self.assertEqual(params, urlparse.parse_qs(url.query)) url = urlparse.urlparse(previous_links[0]['href']) self.assertEqual(url.path, _get_path('networks')) params['marker'] = [id1] params['page_reverse'] = ['True'] self.assertEqual(params, urlparse.parse_qs(url.query)) def test_list_pagination_with_last_page(self): id = str(_uuid()) input_dict = {'id': id, 'name': 'net1', 'admin_state_up': True, 'status': "ACTIVE", 'tenant_id': '', 'shared': False, 'subnets': []} return_value = [input_dict] instance = self.plugin.return_value instance.get_networks.return_value = return_value params = {'limit': ['2'], 'marker': str(_uuid())} res = self.api.get(_get_path('networks'), params=params).json self.assertEqual(1, len(res['networks'])) self.assertEqual(id, res['networks'][0]['id']) self.assertIn('networks_links', res) previous_links = [] for r in res['networks_links']: self.assertNotEqual(r['rel'], 'next') if r['rel'] == 'previous': previous_links.append(r) self.assertEqual(1, len(previous_links)) url = urlparse.urlparse(previous_links[0]['href']) self.assertEqual(url.path, _get_path('networks')) expect_params = params.copy() expect_params['marker'] = [id] expect_params['page_reverse'] = ['True'] self.assertEqual(expect_params, urlparse.parse_qs(url.query)) def test_list_pagination_with_empty_page(self): return_value = [] instance = self.plugin.return_value instance.get_networks.return_value = return_value params = {'limit': ['2'], 'marker': str(_uuid())} res = self.api.get(_get_path('networks'), params=params).json self.assertEqual([], res['networks']) previous_links = [] if 'networks_links' in res: for r in res['networks_links']: self.assertNotEqual(r['rel'], 'next') if r['rel'] == 'previous': previous_links.append(r) self.assertEqual(1, len(previous_links)) url = urlparse.urlparse(previous_links[0]['href']) self.assertEqual(url.path, _get_path('networks')) expect_params = params.copy() del expect_params['marker'] expect_params['page_reverse'] = ['True'] self.assertEqual(expect_params, urlparse.parse_qs(url.query)) def test_list_pagination_reverse_with_last_page(self): id = str(_uuid()) input_dict = {'id': id, 'name': 'net1', 'admin_state_up': True, 'status': "ACTIVE", 'tenant_id': '', 'shared': False, 'subnets': []} return_value = [input_dict] instance = self.plugin.return_value instance.get_networks.return_value = return_value params = {'limit': ['2'], 'marker': [str(_uuid())], 'page_reverse': ['True']} res = self.api.get(_get_path('networks'), params=params).json self.assertEqual(len(res['networks']), 1) self.assertEqual(id, res['networks'][0]['id']) self.assertIn('networks_links', res) next_links = [] for r in res['networks_links']: self.assertNotEqual(r['rel'], 'previous') if r['rel'] == 'next': next_links.append(r) self.assertEqual(1, len(next_links)) url = urlparse.urlparse(next_links[0]['href']) self.assertEqual(url.path, _get_path('networks')) expected_params = params.copy() del expected_params['page_reverse'] expected_params['marker'] = [id] self.assertEqual(expected_params, urlparse.parse_qs(url.query)) def test_list_pagination_reverse_with_empty_page(self): return_value = [] instance = self.plugin.return_value instance.get_networks.return_value = return_value params = {'limit': ['2'], 'marker': [str(_uuid())], 'page_reverse': ['True']} res = self.api.get(_get_path('networks'), params=params).json self.assertEqual([], res['networks']) next_links = [] if 'networks_links' in res: for r in res['networks_links']: self.assertNotEqual(r['rel'], 'previous') if r['rel'] == 'next': next_links.append(r) self.assertEqual(1, len(next_links)) url = urlparse.urlparse(next_links[0]['href']) self.assertEqual(url.path, _get_path('networks')) expect_params = params.copy() del expect_params['marker'] del expect_params['page_reverse'] self.assertEqual(expect_params, urlparse.parse_qs(url.query)) def test_create(self): net_id = _uuid() data = {'network': {'name': 'net1', 'admin_state_up': True, 'tenant_id': _uuid()}} return_value = {'subnets': [], 'status': "ACTIVE", 'id': net_id} return_value.update(data['network'].copy()) instance = self.plugin.return_value instance.create_network.return_value = return_value instance.get_networks_count.return_value = 0 res = self.api.post(_get_path('networks', fmt=self.fmt), self.serialize(data), content_type='application/' + self.fmt) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('network', res) net = res['network'] self.assertEqual(net_id, net['id']) self.assertEqual("ACTIVE", net['status']) def test_create_use_defaults(self): net_id = _uuid() initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}} full_input = {'network': {'admin_state_up': True, 'shared': False}} full_input['network'].update(initial_input['network']) return_value = {'id': net_id, 'status': "ACTIVE"} return_value.update(full_input['network']) instance = self.plugin.return_value instance.create_network.return_value = return_value instance.get_networks_count.return_value = 0 res = self.api.post(_get_path('networks', fmt=self.fmt), self.serialize(initial_input), content_type='application/' + self.fmt) instance.create_network.assert_called_with(mock.ANY, network=full_input) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('network', res) net = res['network'] self.assertEqual(net_id, net['id']) self.assertTrue(net['admin_state_up']) self.assertEqual("ACTIVE", net['status']) def test_create_no_keystone_env(self): data = {'name': 'net1'} self._test_create_failure_bad_request('networks', data) def test_create_with_keystone_env(self): tenant_id = _uuid() net_id = _uuid() env = {'neutron.context': context.Context('', tenant_id)} # tenant_id should be fetched from env initial_input = {'network': {'name': 'net1'}} full_input = {'network': {'admin_state_up': True, 'shared': False, 'tenant_id': tenant_id}} full_input['network'].update(initial_input['network']) return_value = {'id': net_id, 'status': "ACTIVE"} return_value.update(full_input['network']) instance = self.plugin.return_value instance.create_network.return_value = return_value instance.get_networks_count.return_value = 0 res = self.api.post(_get_path('networks', fmt=self.fmt), self.serialize(initial_input), content_type='application/' + self.fmt, extra_environ=env) instance.create_network.assert_called_with(mock.ANY, network=full_input) self.assertEqual(exc.HTTPCreated.code, res.status_int) def test_create_bad_keystone_tenant(self): tenant_id = _uuid() data = {'network': {'name': 'net1', 'tenant_id': tenant_id}} env = {'neutron.context': context.Context('', tenant_id + "bad")} self._test_create_failure_bad_request('networks', data, extra_environ=env) def test_create_no_body(self): data = {'whoa': None} self._test_create_failure_bad_request('networks', data) def test_create_body_string_not_json(self): data = 'a string' self._test_create_failure_bad_request('networks', data) def test_create_body_boolean_not_json(self): data = True self._test_create_failure_bad_request('networks', data) def test_create_no_resource(self): data = {} self._test_create_failure_bad_request('networks', data) def test_create_missing_attr(self): data = {'port': {'what': 'who', 'tenant_id': _uuid()}} self._test_create_failure_bad_request('ports', data) def test_create_readonly_attr(self): data = {'network': {'name': 'net1', 'tenant_id': _uuid(), 'status': "ACTIVE"}} self._test_create_failure_bad_request('networks', data) def test_create_with_too_long_name(self): data = {'network': {'name': "12345678" * 32, 'admin_state_up': True, 'tenant_id': _uuid()}} res = self.api.post(_get_path('networks', fmt=self.fmt), self.serialize(data), content_type='application/' + self.fmt, expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_create_bulk(self): data = {'networks': [{'name': 'net1', 'admin_state_up': True, 'tenant_id': _uuid()}, {'name': 'net2', 'admin_state_up': True, 'tenant_id': _uuid()}]} def side_effect(context, network): net = network.copy() net['network'].update({'subnets': []}) return net['network'] instance = self.plugin.return_value instance.create_network.side_effect = side_effect instance.get_networks_count.return_value = 0 res = self.api.post(_get_path('networks', fmt=self.fmt), self.serialize(data), content_type='application/' + self.fmt) self.assertEqual(exc.HTTPCreated.code, res.status_int) def _test_create_failure_bad_request(self, resource, data, **kwargs): res = self.api.post(_get_path(resource, fmt=self.fmt), self.serialize(data), content_type='application/' + self.fmt, expect_errors=True, **kwargs) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_create_bulk_networks_none(self): self._test_create_failure_bad_request('networks', {'networks': None}) def test_create_bulk_networks_empty_list(self): self._test_create_failure_bad_request('networks', {'networks': []}) def test_create_bulk_missing_attr(self): data = {'ports': [{'what': 'who', 'tenant_id': _uuid()}]} self._test_create_failure_bad_request('ports', data) def test_create_bulk_partial_body(self): data = {'ports': [{'device_id': 'device_1', 'tenant_id': _uuid()}, {'tenant_id': _uuid()}]} self._test_create_failure_bad_request('ports', data) def test_create_attr_not_specified(self): net_id = _uuid() tenant_id = _uuid() device_id = _uuid() initial_input = {'port': {'name': '', 'network_id': net_id, 'tenant_id': tenant_id, 'device_id': device_id, 'admin_state_up': True}} full_input = {'port': {'admin_state_up': True, 'mac_address': attributes.ATTR_NOT_SPECIFIED, 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, 'device_owner': ''}} full_input['port'].update(initial_input['port']) return_value = {'id': _uuid(), 'status': 'ACTIVE', 'admin_state_up': True, 'mac_address': 'ca:fe:de:ad:be:ef', 'device_id': device_id, 'device_owner': ''} return_value.update(initial_input['port']) instance = self.plugin.return_value instance.get_network.return_value = { 'tenant_id': six.text_type(tenant_id) } instance.get_ports_count.return_value = 1 instance.create_port.return_value = return_value res = self.api.post(_get_path('ports', fmt=self.fmt), self.serialize(initial_input), content_type='application/' + self.fmt) instance.create_port.assert_called_with(mock.ANY, port=full_input) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('port', res) port = res['port'] self.assertEqual(net_id, port['network_id']) self.assertEqual('ca:fe:de:ad:be:ef', port['mac_address']) def test_create_return_extra_attr(self): net_id = _uuid() data = {'network': {'name': 'net1', 'admin_state_up': True, 'tenant_id': _uuid()}} return_value = {'subnets': [], 'status': "ACTIVE", 'id': net_id, 'v2attrs:something': "123"} return_value.update(data['network'].copy()) instance = self.plugin.return_value instance.create_network.return_value = return_value instance.get_networks_count.return_value = 0 res = self.api.post(_get_path('networks', fmt=self.fmt), self.serialize(data), content_type='application/' + self.fmt) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('network', res) net = res['network'] self.assertEqual(net_id, net['id']) self.assertEqual("ACTIVE", net['status']) self.assertNotIn('v2attrs:something', net) def test_fields(self): return_value = {'name': 'net1', 'admin_state_up': True, 'subnets': []} instance = self.plugin.return_value instance.get_network.return_value = return_value self.api.get(_get_path('networks', id=uuidutils.generate_uuid(), fmt=self.fmt)) def _test_delete(self, req_tenant_id, real_tenant_id, expected_code, expect_errors=False): env = {} if req_tenant_id: env = {'neutron.context': context.Context('', req_tenant_id)} instance = self.plugin.return_value instance.get_network.return_value = {'tenant_id': real_tenant_id, 'shared': False} instance.delete_network.return_value = None res = self.api.delete(_get_path('networks', id=uuidutils.generate_uuid(), fmt=self.fmt), extra_environ=env, expect_errors=expect_errors) self.assertEqual(expected_code, res.status_int) def test_delete_noauth(self): self._test_delete(None, _uuid(), exc.HTTPNoContent.code) def test_delete_keystone(self): tenant_id = _uuid() self._test_delete(tenant_id, tenant_id, exc.HTTPNoContent.code) def test_delete_keystone_bad_tenant(self): tenant_id = _uuid() self._test_delete(tenant_id + "bad", tenant_id, exc.HTTPNotFound.code, expect_errors=True) def _test_get(self, req_tenant_id, real_tenant_id, expected_code, expect_errors=False): env = {} shared = False if req_tenant_id: env = {'neutron.context': context.Context('', req_tenant_id)} if req_tenant_id.endswith('another'): shared = True env['neutron.context'].roles = ['tenant_admin'] data = {'tenant_id': real_tenant_id, 'shared': shared} instance = self.plugin.return_value instance.get_network.return_value = data res = self.api.get(_get_path('networks', id=uuidutils.generate_uuid(), fmt=self.fmt), extra_environ=env, expect_errors=expect_errors) self.assertEqual(expected_code, res.status_int) return res def test_get_noauth(self): self._test_get(None, _uuid(), 200) def test_get_keystone(self): tenant_id = _uuid() self._test_get(tenant_id, tenant_id, 200) def test_get_keystone_bad_tenant(self): tenant_id = _uuid() self._test_get(tenant_id + "bad", tenant_id, exc.HTTPNotFound.code, expect_errors=True) def test_get_keystone_shared_network(self): tenant_id = _uuid() self._test_get(tenant_id + "another", tenant_id, 200) def test_get_keystone_strip_admin_only_attribute(self): tenant_id = _uuid() # Inject rule in policy engine rules = oslo_policy.Rules.from_dict( {'get_network:name': "rule:admin_only"}) policy.set_rules(rules, overwrite=False) res = self._test_get(tenant_id, tenant_id, 200) res = self.deserialize(res) self.assertNotIn('name', res['network']) def _test_update(self, req_tenant_id, real_tenant_id, expected_code, expect_errors=False): env = {} if req_tenant_id: env = {'neutron.context': context.Context('', req_tenant_id)} # leave out 'name' field intentionally data = {'network': {'admin_state_up': True}} return_value = {'subnets': []} return_value.update(data['network'].copy()) instance = self.plugin.return_value instance.get_network.return_value = {'tenant_id': real_tenant_id, 'shared': False} instance.update_network.return_value = return_value res = self.api.put(_get_path('networks', id=uuidutils.generate_uuid(), fmt=self.fmt), self.serialize(data), extra_environ=env, expect_errors=expect_errors) # Ensure id attribute is included in fields returned by GET call # in update procedure. self.assertEqual(1, instance.get_network.call_count) self.assertIn('id', instance.get_network.call_args[1]['fields']) self.assertEqual(res.status_int, expected_code) def test_update_noauth(self): self._test_update(None, _uuid(), 200) def test_update_keystone(self): tenant_id = _uuid() self._test_update(tenant_id, tenant_id, 200) def test_update_keystone_bad_tenant(self): tenant_id = _uuid() self._test_update(tenant_id + "bad", tenant_id, exc.HTTPNotFound.code, expect_errors=True) def test_update_keystone_no_tenant(self): tenant_id = _uuid() self._test_update(tenant_id, None, exc.HTTPNotFound.code, expect_errors=True) def test_update_readonly_field(self): data = {'network': {'status': "NANANA"}} res = self.api.put(_get_path('networks', id=_uuid()), self.serialize(data), content_type='application/' + self.fmt, expect_errors=True) self.assertEqual(400, res.status_int) def test_invalid_attribute_field(self): data = {'network': {'invalid_key1': "foo1", 'invalid_key2': "foo2"}} res = self.api.put(_get_path('networks', id=_uuid()), self.serialize(data), content_type='application/' + self.fmt, expect_errors=True) self.assertEqual(400, res.status_int) def test_retry_on_index(self): instance = self.plugin.return_value instance.get_networks.side_effect = [db_exc.RetryRequest(None), []] api = webtest.TestApp(router.APIRouter()) api.get(_get_path('networks', fmt=self.fmt)) self.assertTrue(instance.get_networks.called) def test_retry_on_show(self): instance = self.plugin.return_value instance.get_network.side_effect = [db_exc.RetryRequest(None), {}] api = webtest.TestApp(router.APIRouter()) api.get(_get_path('networks', _uuid(), fmt=self.fmt)) self.assertTrue(instance.get_network.called) class SubresourceTest(base.BaseTestCase): def setUp(self): super(SubresourceTest, self).setUp() plugin = 'neutron.tests.unit.api.v2.test_base.TestSubresourcePlugin' extensions.PluginAwareExtensionManager._instance = None self.useFixture(tools.AttributeMapMemento()) self.config_parse() self.setup_coreplugin(plugin) self._plugin_patcher = mock.patch(plugin, autospec=True) self.plugin = self._plugin_patcher.start() api = router.APIRouter() SUB_RESOURCES = {} RESOURCE_ATTRIBUTE_MAP = {} SUB_RESOURCES['dummy'] = { 'collection_name': 'dummies', 'parent': {'collection_name': 'networks', 'member_name': 'network'} } RESOURCE_ATTRIBUTE_MAP['dummies'] = { 'foo': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'default': '', 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'required_by_policy': True, 'is_visible': True} } collection_name = SUB_RESOURCES['dummy'].get('collection_name') resource_name = 'dummy' parent = SUB_RESOURCES['dummy'].get('parent') params = RESOURCE_ATTRIBUTE_MAP['dummies'] member_actions = {'mactions': 'GET'} _plugin = manager.NeutronManager.get_plugin() controller = v2_base.create_resource(collection_name, resource_name, _plugin, params, member_actions=member_actions, parent=parent, allow_bulk=True, allow_pagination=True, allow_sorting=True) path_prefix = "/%s/{%s_id}/%s" % (parent['collection_name'], parent['member_name'], collection_name) mapper_kwargs = dict(controller=controller, path_prefix=path_prefix) api.map.collection(collection_name, resource_name, **mapper_kwargs) api.map.resource(collection_name, collection_name, controller=controller, parent_resource=parent, member=member_actions) self.api = webtest.TestApp(api) def tearDown(self): super(SubresourceTest, self).tearDown() def test_index_sub_resource(self): instance = self.plugin.return_value self.api.get('/networks/id1/dummies') instance.get_network_dummies.assert_called_once_with(mock.ANY, filters=mock.ANY, fields=mock.ANY, network_id='id1') def test_show_sub_resource(self): instance = self.plugin.return_value dummy_id = _uuid() self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id)) instance.get_network_dummy.assert_called_once_with(mock.ANY, dummy_id, network_id='id1', fields=mock.ANY) def test_create_sub_resource(self): instance = self.plugin.return_value body = {'dummy': {'foo': 'bar', 'tenant_id': _uuid()}} self.api.post_json('/networks/id1/dummies', body) instance.create_network_dummy.assert_called_once_with(mock.ANY, network_id='id1', dummy=body) def test_update_sub_resource(self): instance = self.plugin.return_value dummy_id = _uuid() body = {'dummy': {'foo': 'bar'}} self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id), body) instance.update_network_dummy.assert_called_once_with(mock.ANY, dummy_id, network_id='id1', dummy=body) def test_update_subresource_to_none(self): instance = self.plugin.return_value dummy_id = _uuid() body = {'dummy': {}} self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id), body) instance.update_network_dummy.assert_called_once_with(mock.ANY, dummy_id, network_id='id1', dummy=body) def test_delete_sub_resource(self): instance = self.plugin.return_value dummy_id = _uuid() self.api.delete('/networks/id1' + _get_path('dummies', id=dummy_id)) instance.delete_network_dummy.assert_called_once_with(mock.ANY, dummy_id, network_id='id1') def test_sub_resource_member_actions(self): instance = self.plugin.return_value dummy_id = _uuid() self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id, action='mactions')) instance.mactions.assert_called_once_with(mock.ANY, dummy_id, network_id='id1') # Note: since all resources use the same controller and validation # logic, we actually get really good coverage from testing just networks. class V2Views(base.BaseTestCase): def _view(self, keys, collection, resource): data = dict((key, 'value') for key in keys) data['fake'] = 'value' attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[collection] controller = v2_base.Controller(None, collection, resource, attr_info) res = controller._view(context.get_admin_context(), data) self.assertNotIn('fake', res) for key in keys: self.assertIn(key, res) def test_network(self): keys = ('id', 'name', 'subnets', 'admin_state_up', 'status', 'tenant_id') self._view(keys, 'networks', 'network') def test_port(self): keys = ('id', 'network_id', 'mac_address', 'fixed_ips', 'device_id', 'admin_state_up', 'tenant_id', 'status') self._view(keys, 'ports', 'port') def test_subnet(self): keys = ('id', 'network_id', 'tenant_id', 'gateway_ip', 'ip_version', 'cidr', 'enable_dhcp') self._view(keys, 'subnets', 'subnet') class NotificationTest(APIv2TestBase): def setUp(self): super(NotificationTest, self).setUp() fake_notifier.reset() def _resource_op_notifier(self, opname, resource, expected_errors=False): initial_input = {resource: {'name': 'myname'}} instance = self.plugin.return_value instance.get_networks.return_value = initial_input instance.get_networks_count.return_value = 0 expected_code = exc.HTTPCreated.code if opname == 'create': initial_input[resource]['tenant_id'] = _uuid() res = self.api.post_json( _get_path('networks'), initial_input, expect_errors=expected_errors) if opname == 'update': res = self.api.put_json( _get_path('networks', id=_uuid()), initial_input, expect_errors=expected_errors) expected_code = exc.HTTPOk.code if opname == 'delete': initial_input[resource]['tenant_id'] = _uuid() res = self.api.delete( _get_path('networks', id=_uuid()), expect_errors=expected_errors) expected_code = exc.HTTPNoContent.code expected_events = ('.'.join([resource, opname, "start"]), '.'.join([resource, opname, "end"])) self.assertEqual(len(expected_events), len(fake_notifier.NOTIFICATIONS)) for msg, event in zip(fake_notifier.NOTIFICATIONS, expected_events): self.assertEqual('INFO', msg['priority']) self.assertEqual(event, msg['event_type']) self.assertEqual(expected_code, res.status_int) def test_network_create_notifer(self): self._resource_op_notifier('create', 'network') def test_network_delete_notifer(self): self._resource_op_notifier('delete', 'network') def test_network_update_notifer(self): self._resource_op_notifier('update', 'network') class DHCPNotificationTest(APIv2TestBase): def setUp(self): # This test does not have database support so tracking cannot be used cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS') super(DHCPNotificationTest, self).setUp() def _test_dhcp_notifier(self, opname, resource, initial_input=None): instance = self.plugin.return_value instance.get_networks.return_value = initial_input instance.get_networks_count.return_value = 0 expected_code = exc.HTTPCreated.code with mock.patch.object(dhcp_rpc_agent_api.DhcpAgentNotifyAPI, 'notify') as dhcp_notifier: if opname == 'create': res = self.api.post_json( _get_path('networks'), initial_input) if opname == 'update': res = self.api.put_json( _get_path('networks', id=_uuid()), initial_input) expected_code = exc.HTTPOk.code if opname == 'delete': res = self.api.delete(_get_path('networks', id=_uuid())) expected_code = exc.HTTPNoContent.code expected_item = mock.call(mock.ANY, mock.ANY, resource + "." + opname + ".end") if initial_input and resource not in initial_input: resource += 's' num = len(initial_input[resource]) if initial_input and isinstance( initial_input[resource], list) else 1 expected = [expected_item for x in moves.range(num)] self.assertEqual(expected, dhcp_notifier.call_args_list) self.assertEqual(num, dhcp_notifier.call_count) self.assertEqual(expected_code, res.status_int) def test_network_create_dhcp_notifer(self): input = {'network': {'name': 'net', 'tenant_id': _uuid()}} self._test_dhcp_notifier('create', 'network', input) def test_network_delete_dhcp_notifer(self): self._test_dhcp_notifier('delete', 'network') def test_network_update_dhcp_notifer(self): input = {'network': {'name': 'net'}} self._test_dhcp_notifier('update', 'network', input) def test_networks_create_bulk_dhcp_notifer(self): input = {'networks': [{'name': 'net1', 'tenant_id': _uuid()}, {'name': 'net2', 'tenant_id': _uuid()}]} self._test_dhcp_notifier('create', 'network', input) class QuotaTest(APIv2TestBase): def setUp(self): # This test does not have database support so tracking cannot be used cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS') super(QuotaTest, self).setUp() # Use mock to let the API use a different QuotaEngine instance for # unit test in this class. This will ensure resource are registered # again and instantiated with neutron.quota.resource.CountableResource replacement_registry = resource_registry.ResourceRegistry() registry_patcher = mock.patch('neutron.quota.resource_registry.' 'ResourceRegistry.get_instance') mock_registry = registry_patcher.start().return_value mock_registry.get_resource = replacement_registry.get_resource mock_registry.resources = replacement_registry.resources # Register a resource replacement_registry.register_resource_by_name('network') def test_create_network_quota(self): cfg.CONF.set_override('quota_network', 1, group='QUOTAS') initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}} full_input = {'network': {'admin_state_up': True, 'subnets': []}} full_input['network'].update(initial_input['network']) instance = self.plugin.return_value instance.get_networks_count.return_value = 1 res = self.api.post_json( _get_path('networks'), initial_input, expect_errors=True) instance.get_networks_count.assert_called_with(mock.ANY, filters=mock.ANY) self.assertIn("Quota exceeded for resources", res.json['NeutronError']['message']) def test_create_network_quota_no_counts(self): cfg.CONF.set_override('quota_network', 1, group='QUOTAS') initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}} full_input = {'network': {'admin_state_up': True, 'subnets': []}} full_input['network'].update(initial_input['network']) instance = self.plugin.return_value instance.get_networks_count.side_effect = ( NotImplementedError()) instance.get_networks.return_value = ["foo"] res = self.api.post_json( _get_path('networks'), initial_input, expect_errors=True) instance.get_networks_count.assert_called_with(mock.ANY, filters=mock.ANY) self.assertIn("Quota exceeded for resources", res.json['NeutronError']['message']) def test_create_network_quota_without_limit(self): cfg.CONF.set_override('quota_network', -1, group='QUOTAS') initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}} instance = self.plugin.return_value instance.get_networks_count.return_value = 3 res = self.api.post_json( _get_path('networks'), initial_input) self.assertEqual(exc.HTTPCreated.code, res.status_int) class ExtensionTestCase(base.BaseTestCase): def setUp(self): # This test does not have database support so tracking cannot be used cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS') super(ExtensionTestCase, self).setUp() plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2' # Ensure existing ExtensionManager is not used extensions.PluginAwareExtensionManager._instance = None self.useFixture(tools.AttributeMapMemento()) # Create the default configurations self.config_parse() # Update the plugin and extensions path self.setup_coreplugin(plugin) cfg.CONF.set_override('api_extensions_path', EXTDIR) self._plugin_patcher = mock.patch(plugin, autospec=True) self.plugin = self._plugin_patcher.start() # Instantiate mock plugin and enable the V2attributes extension manager.NeutronManager.get_plugin().supported_extension_aliases = ( ["v2attrs"]) api = router.APIRouter() self.api = webtest.TestApp(api) quota.QUOTAS._driver = None cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', group='QUOTAS') def tearDown(self): super(ExtensionTestCase, self).tearDown() self.api = None self.plugin = None def test_extended_create(self): net_id = _uuid() initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid(), 'v2attrs:something_else': "abc"}} data = {'network': {'admin_state_up': True, 'shared': False}} data['network'].update(initial_input['network']) return_value = {'subnets': [], 'status': "ACTIVE", 'id': net_id, 'v2attrs:something': "123"} return_value.update(data['network'].copy()) instance = self.plugin.return_value instance.create_network.return_value = return_value instance.get_networks_count.return_value = 0 res = self.api.post_json(_get_path('networks'), initial_input) instance.create_network.assert_called_with(mock.ANY, network=data) self.assertEqual(exc.HTTPCreated.code, res.status_int) self.assertIn('network', res.json) net = res.json['network'] self.assertEqual(net_id, net['id']) self.assertEqual("ACTIVE", net['status']) self.assertEqual("123", net['v2attrs:something']) self.assertNotIn('v2attrs:something_else', net) class TestSubresourcePlugin(object): def get_network_dummies(self, context, network_id, filters=None, fields=None): return [] def get_network_dummy(self, context, id, network_id, fields=None): return {} def create_network_dummy(self, context, network_id, dummy): return {} def update_network_dummy(self, context, id, network_id, dummy): return {} def delete_network_dummy(self, context, id, network_id): return def mactions(self, context, id, network_id): return class ListArgsTestCase(base.BaseTestCase): def test_list_args(self): path = '/?fields=4&foo=3&fields=2&bar=1' request = webob.Request.blank(path) expect_val = ['2', '4'] actual_val = api_common.list_args(request, 'fields') self.assertEqual(expect_val, sorted(actual_val)) def test_list_args_with_empty(self): path = '/?foo=4&bar=3&baz=2&qux=1' request = webob.Request.blank(path) self.assertEqual([], api_common.list_args(request, 'fields')) class FiltersTestCase(base.BaseTestCase): def test_all_skip_args(self): path = '/?fields=4&fields=3&fields=2&fields=1' request = webob.Request.blank(path) self.assertEqual({}, api_common.get_filters(request, None, ["fields"])) def test_blank_values(self): path = '/?foo=&bar=&baz=&qux=' request = webob.Request.blank(path) self.assertEqual({}, api_common.get_filters(request, {})) def test_no_attr_info(self): path = '/?foo=4&bar=3&baz=2&qux=1' request = webob.Request.blank(path) expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']} actual_val = api_common.get_filters(request, {}) self.assertEqual(expect_val, actual_val) def test_attr_info_without_conversion(self): path = '/?foo=4&bar=3&baz=2&qux=1' request = webob.Request.blank(path) attr_info = {'foo': {'key': 'val'}} expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']} actual_val = api_common.get_filters(request, attr_info) self.assertEqual(expect_val, actual_val) def test_attr_info_with_convert_list_to(self): path = '/?foo=key=4&bar=3&foo=key=2&qux=1' request = webob.Request.blank(path) attr_info = { 'foo': { 'convert_list_to': attributes.convert_kvp_list_to_dict, } } expect_val = {'foo': {'key': ['2', '4']}, 'bar': ['3'], 'qux': ['1']} actual_val = api_common.get_filters(request, attr_info) self.assertOrderedEqual(expect_val, actual_val) def test_attr_info_with_convert_to(self): path = '/?foo=4&bar=3&baz=2&qux=1' request = webob.Request.blank(path) attr_info = {'foo': {'convert_to': attributes.convert_to_int}} expect_val = {'foo': [4], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']} actual_val = api_common.get_filters(request, attr_info) self.assertEqual(expect_val, actual_val) class CreateResourceTestCase(base.BaseTestCase): def test_resource_creation(self): resource = v2_base.create_resource('fakes', 'fake', None, {}) self.assertIsInstance(resource, webob.dec.wsgify) neutron-8.4.0/neutron/tests/unit/api/v2/test_resource.py0000664000567000056710000003632613044372760024551 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import oslo_i18n from webob import exc import webtest from neutron._i18n import _ from neutron.api.v2 import resource as wsgi_resource from neutron.common import exceptions as n_exc from neutron import context from neutron.tests import base from neutron import wsgi class RequestTestCase(base.BaseTestCase): def setUp(self): super(RequestTestCase, self).setUp() self.req = wsgi_resource.Request({'foo': 'bar'}) def test_content_type_missing(self): request = wsgi.Request.blank('/tests/123', method='POST') request.body = b"" self.assertIsNone(request.get_content_type()) def test_content_type_with_charset(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Type"] = "application/json; charset=UTF-8" result = request.get_content_type() self.assertEqual("application/json", result) def test_content_type_from_accept(self): content_type = 'application/json' request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = content_type result = request.best_match_content_type() self.assertEqual(content_type, result) def test_content_type_from_accept_best(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/json" result = request.best_match_content_type() self.assertEqual("application/json", result) request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = ("application/json; q=0.3, " "application/xml; q=0.9") result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_from_query_extension(self): request = wsgi.Request.blank('/tests/123.json') result = request.best_match_content_type() self.assertEqual("application/json", result) request = wsgi.Request.blank('/tests/123.invalid') result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_accept_and_query_extension(self): request = wsgi.Request.blank('/tests/123.json') request.headers["Accept"] = "application/xml" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_accept_default(self): request = wsgi.Request.blank('/tests/123.unsupported') request.headers["Accept"] = "application/unsupported1" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_context_with_neutron_context(self): ctxt = context.Context('fake_user', 'fake_tenant') self.req.environ['neutron.context'] = ctxt self.assertEqual(self.req.context, ctxt) def test_context_without_neutron_context(self): self.assertTrue(self.req.context.is_admin) def test_request_context_elevated(self): user_context = context.Context( 'fake_user', 'fake_project', admin=False) self.assertFalse(user_context.is_admin) admin_context = user_context.elevated() self.assertFalse(user_context.is_admin) self.assertTrue(admin_context.is_admin) self.assertNotIn('admin', user_context.roles) self.assertIn('admin', admin_context.roles) def test_best_match_language(self): # Test that we are actually invoking language negotiation by webop request = wsgi.Request.blank('/') oslo_i18n.get_available_languages = mock.MagicMock() oslo_i18n.get_available_languages.return_value = ['known-language', 'es', 'zh'] request.headers['Accept-Language'] = 'known-language' language = request.best_match_language() self.assertEqual('known-language', language) # If the Accept-Leader is an unknown language, missing or empty, # the best match locale should be None request.headers['Accept-Language'] = 'unknown-language' language = request.best_match_language() self.assertIsNone(language) request.headers['Accept-Language'] = '' language = request.best_match_language() self.assertIsNone(language) request.headers.pop('Accept-Language') language = request.best_match_language() self.assertIsNone(language) class ResourceTestCase(base.BaseTestCase): @staticmethod def _get_deserializer(): return wsgi.JSONDeserializer() def test_unmapped_neutron_error_with_json(self): msg = u'\u7f51\u7edc' class TestException(n_exc.NeutronException): message = msg expected_res = {'body': { 'NeutronError': { 'type': 'TestException', 'message': msg, 'detail': ''}}} controller = mock.MagicMock() controller.test.side_effect = TestException() resource = webtest.TestApp(wsgi_resource.Resource(controller)) environ = {'wsgiorg.routing_args': (None, {'action': 'test', 'format': 'json'})} res = resource.get('', extra_environ=environ, expect_errors=True) self.assertEqual(exc.HTTPInternalServerError.code, res.status_int) self.assertEqual(expected_res, wsgi.JSONDeserializer().deserialize(res.body)) @mock.patch('oslo_i18n.translate') def test_unmapped_neutron_error_localized(self, mock_translation): msg_translation = 'Translated error' mock_translation.return_value = msg_translation msg = _('Unmapped error') class TestException(n_exc.NeutronException): message = msg controller = mock.MagicMock() controller.test.side_effect = TestException() resource = webtest.TestApp(wsgi_resource.Resource(controller)) environ = {'wsgiorg.routing_args': (None, {'action': 'test', 'format': 'json'})} res = resource.get('', extra_environ=environ, expect_errors=True) self.assertEqual(exc.HTTPInternalServerError.code, res.status_int) self.assertIn(msg_translation, str(wsgi.JSONDeserializer().deserialize(res.body))) def test_mapped_neutron_error_with_json(self): msg = u'\u7f51\u7edc' class TestException(n_exc.NeutronException): message = msg expected_res = {'body': { 'NeutronError': { 'type': 'TestException', 'message': msg, 'detail': ''}}} controller = mock.MagicMock() controller.test.side_effect = TestException() faults = {TestException: exc.HTTPGatewayTimeout} resource = webtest.TestApp(wsgi_resource.Resource(controller, faults=faults)) environ = {'wsgiorg.routing_args': (None, {'action': 'test', 'format': 'json'})} res = resource.get('', extra_environ=environ, expect_errors=True) self.assertEqual(exc.HTTPGatewayTimeout.code, res.status_int) self.assertEqual(expected_res, wsgi.JSONDeserializer().deserialize(res.body)) @mock.patch('oslo_i18n.translate') def test_mapped_neutron_error_localized(self, mock_translation): msg_translation = 'Translated error' mock_translation.return_value = msg_translation msg = _('Unmapped error') class TestException(n_exc.NeutronException): message = msg controller = mock.MagicMock() controller.test.side_effect = TestException() faults = {TestException: exc.HTTPGatewayTimeout} resource = webtest.TestApp(wsgi_resource.Resource(controller, faults=faults)) environ = {'wsgiorg.routing_args': (None, {'action': 'test', 'format': 'json'})} res = resource.get('', extra_environ=environ, expect_errors=True) self.assertEqual(exc.HTTPGatewayTimeout.code, res.status_int) self.assertIn(msg_translation, str(wsgi.JSONDeserializer().deserialize(res.body))) @staticmethod def _make_request_with_side_effect(side_effect): controller = mock.MagicMock() controller.test.side_effect = side_effect resource = webtest.TestApp(wsgi_resource.Resource(controller)) routing_args = {'action': 'test'} environ = {'wsgiorg.routing_args': (None, routing_args)} res = resource.get('', extra_environ=environ, expect_errors=True) return res def test_http_error(self): res = self._make_request_with_side_effect(exc.HTTPGatewayTimeout()) # verify that the exception structure is the one expected # by the python-neutronclient self.assertEqual(exc.HTTPGatewayTimeout().explanation, res.json['NeutronError']['message']) self.assertEqual('HTTPGatewayTimeout', res.json['NeutronError']['type']) self.assertEqual('', res.json['NeutronError']['detail']) self.assertEqual(exc.HTTPGatewayTimeout.code, res.status_int) def test_unhandled_error(self): expected_res = {'body': {'NeutronError': {'detail': '', 'message': _( 'Request Failed: internal server ' 'error while processing your request.'), 'type': 'HTTPInternalServerError'}}} res = self._make_request_with_side_effect(side_effect=Exception()) self.assertEqual(exc.HTTPInternalServerError.code, res.status_int) self.assertEqual(expected_res, self._get_deserializer().deserialize(res.body)) def test_not_implemented_error(self): expected_res = {'body': {'NeutronError': {'detail': '', 'message': _( 'The server has either erred or is ' 'incapable of performing the requested ' 'operation.'), 'type': 'HTTPNotImplemented'}}} res = self._make_request_with_side_effect(exc.HTTPNotImplemented()) self.assertEqual(exc.HTTPNotImplemented.code, res.status_int) self.assertEqual(expected_res, self._get_deserializer().deserialize(res.body)) def test_status_200(self): controller = mock.MagicMock() controller.test = lambda request: {'foo': 'bar'} resource = webtest.TestApp(wsgi_resource.Resource(controller)) environ = {'wsgiorg.routing_args': (None, {'action': 'test'})} res = resource.get('', extra_environ=environ) self.assertEqual(200, res.status_int) def test_status_204(self): controller = mock.MagicMock() controller.test = lambda request: {'foo': 'bar'} resource = webtest.TestApp(wsgi_resource.Resource(controller)) environ = {'wsgiorg.routing_args': (None, {'action': 'delete'})} res = resource.delete('', extra_environ=environ) self.assertEqual(204, res.status_int) def test_action_status(self): controller = mock.MagicMock() controller.test = lambda request: {'foo': 'bar'} action_status = {'test_200': 200, 'test_201': 201, 'test_204': 204} resource = webtest.TestApp( wsgi_resource.Resource(controller, action_status=action_status)) for action in action_status: environ = {'wsgiorg.routing_args': (None, {'action': action})} res = resource.get('', extra_environ=environ) self.assertEqual(action_status[action], res.status_int) def _test_error_log_level(self, expected_webob_exc, expect_log_info=False, use_fault_map=True, exc_raised=None): if not exc_raised: class TestException(n_exc.NeutronException): message = 'Test Exception' exc_raised = TestException controller = mock.MagicMock() controller.test.side_effect = exc_raised() faults = {exc_raised: expected_webob_exc} if use_fault_map else {} resource = webtest.TestApp(wsgi_resource.Resource(controller, faults)) environ = {'wsgiorg.routing_args': (None, {'action': 'test'})} with mock.patch.object(wsgi_resource, 'LOG') as log: res = resource.get('', extra_environ=environ, expect_errors=True) self.assertEqual(expected_webob_exc.code, res.status_int) self.assertEqual(expect_log_info, log.info.called) self.assertNotEqual(expect_log_info, log.exception.called) def test_4xx_error_logged_info_level(self): self._test_error_log_level(exc.HTTPNotFound, expect_log_info=True) def test_non_4xx_error_logged_exception_level(self): self._test_error_log_level(exc.HTTPServiceUnavailable, expect_log_info=False) def test_unmapped_error_logged_exception_level(self): self._test_error_log_level(exc.HTTPInternalServerError, expect_log_info=False, use_fault_map=False) def test_webob_4xx_logged_info_level(self): self._test_error_log_level(exc.HTTPNotFound, use_fault_map=False, expect_log_info=True, exc_raised=exc.HTTPNotFound) def test_webob_5xx_logged_info_level(self): self._test_error_log_level(exc.HTTPServiceUnavailable, use_fault_map=False, expect_log_info=False, exc_raised=exc.HTTPServiceUnavailable) def test_no_route_args(self): controller = mock.MagicMock() resource = webtest.TestApp(wsgi_resource.Resource(controller)) environ = {} res = resource.get('', extra_environ=environ, expect_errors=True) self.assertEqual(exc.HTTPInternalServerError.code, res.status_int) def test_post_with_body(self): controller = mock.MagicMock() controller.test = lambda request, body: {'foo': 'bar'} resource = webtest.TestApp(wsgi_resource.Resource(controller)) environ = {'wsgiorg.routing_args': (None, {'action': 'test'})} res = resource.post('', params='{"key": "val"}', extra_environ=environ) self.assertEqual(200, res.status_int) neutron-8.4.0/neutron/tests/unit/api/v2/__init__.py0000664000567000056710000000000013044372736023401 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/api/v2/test_attributes.py0000664000567000056710000012365113044372760025106 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import string import mock import netaddr from oslo_utils import uuidutils import testtools import webob.exc from neutron._i18n import _ from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import exceptions as n_exc from neutron import context from neutron.tests import base from neutron.tests import tools class TestAttributes(base.BaseTestCase): def _construct_dict_and_constraints(self): """Constructs a test dictionary and a definition of constraints. :return: A (dictionary, constraint) tuple """ constraints = {'key1': {'type:values': ['val1', 'val2'], 'required': True}, 'key2': {'type:string': None, 'required': False}, 'key3': {'type:dict': {'k4': {'type:string': None, 'required': True}}, 'required': True}} dictionary = {'key1': 'val1', 'key2': 'a string value', 'key3': {'k4': 'a string value'}} return dictionary, constraints def test_is_attr_set(self): data = attributes.ATTR_NOT_SPECIFIED self.assertIs(attributes.is_attr_set(data), False) data = None self.assertIs(attributes.is_attr_set(data), False) data = "I'm set" self.assertIs(attributes.is_attr_set(data), True) def test_validate_values(self): msg = attributes._validate_values(4, [4, 6]) self.assertIsNone(msg) msg = attributes._validate_values(4, (4, 6)) self.assertIsNone(msg) msg = attributes._validate_values(7, [4, 6]) self.assertEqual("'7' is not in [4, 6]", msg) msg = attributes._validate_values(7, (4, 6)) self.assertEqual("'7' is not in (4, 6)", msg) def test_validate_not_empty_string(self): msg = attributes._validate_not_empty_string(' ', None) self.assertEqual(u"' ' Blank strings are not permitted", msg) def test_validate_not_empty_string_or_none(self): msg = attributes._validate_not_empty_string_or_none(' ', None) self.assertEqual(u"' ' Blank strings are not permitted", msg) msg = attributes._validate_not_empty_string_or_none(None, None) self.assertIsNone(msg) def test_validate_string_or_none(self): msg = attributes._validate_not_empty_string_or_none('test', None) self.assertIsNone(msg) msg = attributes._validate_not_empty_string_or_none(None, None) self.assertIsNone(msg) def test_validate_string(self): msg = attributes._validate_string(None, None) self.assertEqual("'None' is not a valid string", msg) # 0 == len(data) == max_len msg = attributes._validate_string("", 0) self.assertIsNone(msg) # 0 == len(data) < max_len msg = attributes._validate_string("", 9) self.assertIsNone(msg) # 0 < len(data) < max_len msg = attributes._validate_string("123456789", 10) self.assertIsNone(msg) # 0 < len(data) == max_len msg = attributes._validate_string("123456789", 9) self.assertIsNone(msg) # 0 < max_len < len(data) msg = attributes._validate_string("1234567890", 9) self.assertEqual("'1234567890' exceeds maximum length of 9", msg) msg = attributes._validate_string("123456789", None) self.assertIsNone(msg) def test_validate_list_of_unique_strings(self): data = "TEST" msg = attributes.validate_list_of_unique_strings(data, None) self.assertEqual("'TEST' is not a list", msg) data = ["TEST01", "TEST02", "TEST01"] msg = attributes.validate_list_of_unique_strings(data, None) self.assertEqual( "Duplicate items in the list: 'TEST01, TEST02, TEST01'", msg) data = ["12345678", "123456789"] msg = attributes.validate_list_of_unique_strings(data, 8) self.assertEqual("'123456789' exceeds maximum length of 8", msg) data = ["TEST01", "TEST02", "TEST03"] msg = attributes.validate_list_of_unique_strings(data, None) self.assertIsNone(msg) def test_validate_no_whitespace(self): data = 'no_white_space' result = attributes._validate_no_whitespace(data) self.assertEqual(data, result) self.assertRaises(n_exc.InvalidInput, attributes._validate_no_whitespace, 'i have whitespace') self.assertRaises(n_exc.InvalidInput, attributes._validate_no_whitespace, 'i\thave\twhitespace') for ws in string.whitespace: self.assertRaises(n_exc.InvalidInput, attributes._validate_no_whitespace, '%swhitespace-at-head' % ws) self.assertRaises(n_exc.InvalidInput, attributes._validate_no_whitespace, 'whitespace-at-tail%s' % ws) def test_validate_range(self): msg = attributes._validate_range(1, [1, 9]) self.assertIsNone(msg) msg = attributes._validate_range(5, [1, 9]) self.assertIsNone(msg) msg = attributes._validate_range(9, [1, 9]) self.assertIsNone(msg) msg = attributes._validate_range(1, (1, 9)) self.assertIsNone(msg) msg = attributes._validate_range(5, (1, 9)) self.assertIsNone(msg) msg = attributes._validate_range(9, (1, 9)) self.assertIsNone(msg) msg = attributes._validate_range(0, [1, 9]) self.assertEqual("'0' is too small - must be at least '1'", msg) msg = attributes._validate_range(10, (1, 9)) self.assertEqual("'10' is too large - must be no larger than '9'", msg) msg = attributes._validate_range("bogus", (1, 9)) self.assertEqual("'bogus' is not an integer", msg) msg = attributes._validate_range(10, (attributes.UNLIMITED, attributes.UNLIMITED)) self.assertIsNone(msg) msg = attributes._validate_range(10, (1, attributes.UNLIMITED)) self.assertIsNone(msg) msg = attributes._validate_range(1, (attributes.UNLIMITED, 9)) self.assertIsNone(msg) msg = attributes._validate_range(-1, (0, attributes.UNLIMITED)) self.assertEqual("'-1' is too small - must be at least '0'", msg) msg = attributes._validate_range(10, (attributes.UNLIMITED, 9)) self.assertEqual("'10' is too large - must be no larger than '9'", msg) def _test_validate_mac_address(self, validator, allow_none=False): mac_addr = "ff:16:3e:4f:00:00" msg = validator(mac_addr) self.assertIsNone(msg) mac_addr = "ffa:16:3e:4f:00:00" msg = validator(mac_addr) err_msg = "'%s' is not a valid MAC address" self.assertEqual(err_msg % mac_addr, msg) for invalid_mac_addr in constants.INVALID_MAC_ADDRESSES: msg = validator(invalid_mac_addr) self.assertEqual(err_msg % invalid_mac_addr, msg) mac_addr = "123" msg = validator(mac_addr) self.assertEqual(err_msg % mac_addr, msg) mac_addr = None msg = validator(mac_addr) if allow_none: self.assertIsNone(msg) else: self.assertEqual(err_msg % mac_addr, msg) mac_addr = "ff:16:3e:4f:00:00\r" msg = validator(mac_addr) self.assertEqual(err_msg % mac_addr, msg) def test_validate_mac_address(self): self._test_validate_mac_address(attributes._validate_mac_address) def test_validate_mac_address_or_none(self): self._test_validate_mac_address( attributes._validate_mac_address_or_none, allow_none=True) def test_validate_ip_address(self): ip_addr = '1.1.1.1' msg = attributes._validate_ip_address(ip_addr) self.assertIsNone(msg) ip_addr = '1111.1.1.1' msg = attributes._validate_ip_address(ip_addr) self.assertEqual("'%s' is not a valid IP address" % ip_addr, msg) # Depending on platform to run UTs, this case might or might not be # an equivalent to test_validate_ip_address_bsd. ip_addr = '1' * 59 msg = attributes._validate_ip_address(ip_addr) self.assertEqual("'%s' is not a valid IP address" % ip_addr, msg) ip_addr = '1.1.1.1 has whitespace' msg = attributes._validate_ip_address(ip_addr) self.assertEqual("'%s' is not a valid IP address" % ip_addr, msg) ip_addr = '111.1.1.1\twhitespace' msg = attributes._validate_ip_address(ip_addr) self.assertEqual("'%s' is not a valid IP address" % ip_addr, msg) ip_addr = '111.1.1.1\nwhitespace' msg = attributes._validate_ip_address(ip_addr) self.assertEqual("'%s' is not a valid IP address" % ip_addr, msg) for ws in string.whitespace: ip_addr = '%s111.1.1.1' % ws msg = attributes._validate_ip_address(ip_addr) self.assertEqual("'%s' is not a valid IP address" % ip_addr, msg) for ws in string.whitespace: ip_addr = '111.1.1.1%s' % ws msg = attributes._validate_ip_address(ip_addr) self.assertEqual("'%s' is not a valid IP address" % ip_addr, msg) def test_validate_ip_address_with_leading_zero(self): ip_addr = '1.1.1.01' expected_msg = ("'%(data)s' is not an accepted IP address, " "'%(ip)s' is recommended") msg = attributes._validate_ip_address(ip_addr) self.assertEqual(expected_msg % {"data": ip_addr, "ip": '1.1.1.1'}, msg) ip_addr = '1.1.1.011' msg = attributes._validate_ip_address(ip_addr) self.assertEqual(expected_msg % {"data": ip_addr, "ip": '1.1.1.11'}, msg) ip_addr = '1.1.1.09' msg = attributes._validate_ip_address(ip_addr) self.assertEqual(expected_msg % {"data": ip_addr, "ip": '1.1.1.9'}, msg) ip_addr = "fe80:0:0:0:0:0:0:0001" msg = attributes._validate_ip_address(ip_addr) self.assertIsNone(msg) def test_validate_ip_address_bsd(self): # NOTE(yamamoto): On NetBSD and OS X, netaddr.IPAddress() accepts # '1' * 59 as a valid address. The behaviour is inherited from # libc behaviour there. This test ensures that our validator reject # such addresses on such platforms by mocking netaddr to emulate # the behaviour. ip_addr = '1' * 59 with mock.patch('netaddr.IPAddress') as ip_address_cls: msg = attributes._validate_ip_address(ip_addr) ip_address_cls.assert_called_once_with(ip_addr, flags=netaddr.core.ZEROFILL) self.assertEqual("'%s' is not a valid IP address" % ip_addr, msg) def test_validate_ip_pools(self): pools = [[{'end': '10.0.0.254'}], [{'start': '10.0.0.254'}], [{'start': '1000.0.0.254', 'end': '1.1.1.1'}], [{'start': '10.0.0.2', 'end': '10.0.0.254', 'forza': 'juve'}], [{'start': '10.0.0.2', 'end': '10.0.0.254'}, {'end': '10.0.0.254'}], [None], None] for pool in pools: msg = attributes._validate_ip_pools(pool) self.assertIsNotNone(msg) pools = [[{'end': '10.0.0.254', 'start': '10.0.0.2'}, {'start': '11.0.0.2', 'end': '11.1.1.1'}], [{'start': '11.0.0.2', 'end': '11.0.0.100'}]] for pool in pools: msg = attributes._validate_ip_pools(pool) self.assertIsNone(msg) invalid_ip = '10.0.0.2\r' pools = [[{'end': '10.0.0.254', 'start': invalid_ip}]] for pool in pools: msg = attributes._validate_ip_pools(pool) self.assertEqual("'%s' is not a valid IP address" % invalid_ip, msg) def test_validate_fixed_ips(self): fixed_ips = [ {'data': [{'subnet_id': '00000000-ffff-ffff-ffff-000000000000', 'ip_address': '1111.1.1.1'}], 'error_msg': "'1111.1.1.1' is not a valid IP address"}, {'data': [{'subnet_id': 'invalid', 'ip_address': '1.1.1.1'}], 'error_msg': "'invalid' is not a valid UUID"}, {'data': None, 'error_msg': "Invalid data format for fixed IP: 'None'"}, {'data': "1.1.1.1", 'error_msg': "Invalid data format for fixed IP: '1.1.1.1'"}, {'data': ['00000000-ffff-ffff-ffff-000000000000', '1.1.1.1'], 'error_msg': "Invalid data format for fixed IP: " "'00000000-ffff-ffff-ffff-000000000000'"}, {'data': [['00000000-ffff-ffff-ffff-000000000000', '1.1.1.1']], 'error_msg': "Invalid data format for fixed IP: " "'['00000000-ffff-ffff-ffff-000000000000', " "'1.1.1.1']'"}, {'data': [{'subnet_id': '00000000-0fff-ffff-ffff-000000000000', 'ip_address': '1.1.1.1'}, {'subnet_id': '00000000-ffff-ffff-ffff-000000000000', 'ip_address': '1.1.1.1'}], 'error_msg': "Duplicate IP address '1.1.1.1'"}] for fixed in fixed_ips: msg = attributes._validate_fixed_ips(fixed['data']) self.assertEqual(fixed['error_msg'], msg) fixed_ips = [[{'subnet_id': '00000000-ffff-ffff-ffff-000000000000', 'ip_address': '1.1.1.1'}], [{'subnet_id': '00000000-0fff-ffff-ffff-000000000000', 'ip_address': '1.1.1.1'}, {'subnet_id': '00000000-ffff-ffff-ffff-000000000000', 'ip_address': '1.1.1.2'}]] for fixed in fixed_ips: msg = attributes._validate_fixed_ips(fixed) self.assertIsNone(msg) def test_validate_nameservers(self): ns_pools = [['1.1.1.2', '1.1.1.2'], ['www.hostname.com', 'www.hostname.com'], ['1000.0.0.1'], ['www.hostname.com'], ['www.great.marathons.to.travel'], ['valid'], ['77.hostname.com'], ['1' * 59], ['www.internal.hostname.com'], None] for ns in ns_pools: msg = attributes._validate_nameservers(ns, None) self.assertIsNotNone(msg) ns_pools = [['100.0.0.2'], ['1.1.1.1', '1.1.1.2']] for ns in ns_pools: msg = attributes._validate_nameservers(ns, None) self.assertIsNone(msg) def test_validate_hostroutes(self): hostroute_pools = [[{'destination': '100.0.0.0/24'}], [{'nexthop': '10.0.2.20'}], [{'nexthop': '10.0.2.20', 'forza': 'juve', 'destination': '100.0.0.0/8'}], [{'nexthop': '1110.0.2.20', 'destination': '100.0.0.0/8'}], [{'nexthop': '10.0.2.20', 'destination': '100.0.0.0'}], [{'nexthop': '10.0.2.20', 'destination': '100.0.0.0/8'}, {'nexthop': '10.0.2.20', 'destination': '100.0.0.0/8'}], [None], None] for host_routes in hostroute_pools: msg = attributes._validate_hostroutes(host_routes, None) self.assertIsNotNone(msg) hostroute_pools = [[{'destination': '100.0.0.0/24', 'nexthop': '10.0.2.20'}], [{'nexthop': '10.0.2.20', 'destination': '100.0.0.0/8'}, {'nexthop': '10.0.2.20', 'destination': '101.0.0.0/8'}]] for host_routes in hostroute_pools: msg = attributes._validate_hostroutes(host_routes, None) self.assertIsNone(msg) def test_validate_ip_address_or_none(self): ip_addr = None msg = attributes._validate_ip_address_or_none(ip_addr) self.assertIsNone(msg) ip_addr = '1.1.1.1' msg = attributes._validate_ip_address_or_none(ip_addr) self.assertIsNone(msg) ip_addr = '1111.1.1.1' msg = attributes._validate_ip_address_or_none(ip_addr) self.assertEqual("'%s' is not a valid IP address" % ip_addr, msg) def test_uuid_pattern(self): data = 'garbage' msg = attributes._validate_regex(data, attributes.UUID_PATTERN) self.assertIsNotNone(msg) data = '00000000-ffff-ffff-ffff-000000000000' msg = attributes._validate_regex(data, attributes.UUID_PATTERN) self.assertIsNone(msg) def test_mac_pattern(self): # Valid - 3 octets base_mac = "fa:16:3e:00:00:00" msg = attributes._validate_regex(base_mac, attributes.MAC_PATTERN) self.assertIsNone(msg) # Valid - 4 octets base_mac = "fa:16:3e:4f:00:00" msg = attributes._validate_regex(base_mac, attributes.MAC_PATTERN) self.assertIsNone(msg) # Invalid - not unicast base_mac = "01:16:3e:4f:00:00" msg = attributes._validate_regex(base_mac, attributes.MAC_PATTERN) self.assertIsNotNone(msg) # Invalid - invalid format base_mac = "a:16:3e:4f:00:00" msg = attributes._validate_regex(base_mac, attributes.MAC_PATTERN) self.assertIsNotNone(msg) # Invalid - invalid format base_mac = "ffa:16:3e:4f:00:00" msg = attributes._validate_regex(base_mac, attributes.MAC_PATTERN) self.assertIsNotNone(msg) # Invalid - invalid format base_mac = "01163e4f0000" msg = attributes._validate_regex(base_mac, attributes.MAC_PATTERN) self.assertIsNotNone(msg) # Invalid - invalid format base_mac = "01-16-3e-4f-00-00" msg = attributes._validate_regex(base_mac, attributes.MAC_PATTERN) self.assertIsNotNone(msg) # Invalid - invalid format base_mac = "00:16:3:f:00:00" msg = attributes._validate_regex(base_mac, attributes.MAC_PATTERN) self.assertIsNotNone(msg) # Invalid - invalid format base_mac = "12:3:4:5:67:89ab" msg = attributes._validate_regex(base_mac, attributes.MAC_PATTERN) self.assertIsNotNone(msg) def _test_validate_subnet(self, validator, allow_none=False): # Valid - IPv4 cidr = "10.0.2.0/24" msg = validator(cidr, None) self.assertIsNone(msg) # Valid - IPv6 without final octets cidr = "fe80::/24" msg = validator(cidr, None) self.assertIsNone(msg) # Valid - IPv6 with final octets cidr = "fe80::/24" msg = validator(cidr, None) self.assertIsNone(msg) # Valid - uncompressed ipv6 address cidr = "fe80:0:0:0:0:0:0:0/128" msg = validator(cidr, None) self.assertIsNone(msg) # Valid - ipv6 address with multiple consecutive zero cidr = "2001:0db8:0:0:1::1/128" msg = validator(cidr, None) self.assertIsNone(msg) # Valid - ipv6 address with multiple consecutive zero cidr = "2001:0db8::1:0:0:1/128" msg = validator(cidr, None) self.assertIsNone(msg) # Valid - ipv6 address with multiple consecutive zero cidr = "2001::0:1:0:0:1100/120" msg = validator(cidr, None) self.assertIsNone(msg) # Invalid - abbreviated ipv4 address cidr = "10/24" msg = validator(cidr, None) error = _("'%(data)s' isn't a recognized IP subnet cidr," " '%(cidr)s' is recommended") % {"data": cidr, "cidr": "10.0.0.0/24"} self.assertEqual(error, msg) # Invalid - IPv4 missing mask cidr = "10.0.2.0" msg = validator(cidr, None) error = _("'%(data)s' isn't a recognized IP subnet cidr," " '%(cidr)s' is recommended") % {"data": cidr, "cidr": "10.0.2.0/32"} self.assertEqual(error, msg) # Valid - IPv4 with non-zero masked bits is ok for i in range(1, 255): cidr = "192.168.1.%s/24" % i msg = validator(cidr, None) self.assertIsNone(msg) # Invalid - IPv6 without final octets, missing mask cidr = "fe80::" msg = validator(cidr, None) error = _("'%(data)s' isn't a recognized IP subnet cidr," " '%(cidr)s' is recommended") % {"data": cidr, "cidr": "fe80::/128"} self.assertEqual(error, msg) # Invalid - IPv6 with final octets, missing mask cidr = "fe80::0" msg = validator(cidr, None) error = _("'%(data)s' isn't a recognized IP subnet cidr," " '%(cidr)s' is recommended") % {"data": cidr, "cidr": "fe80::/128"} self.assertEqual(error, msg) # Invalid - Address format error cidr = 'invalid' msg = validator(cidr, None) error = "'%s' is not a valid IP subnet" % cidr self.assertEqual(error, msg) cidr = None msg = validator(cidr, None) if allow_none: self.assertIsNone(msg) else: error = "'%s' is not a valid IP subnet" % cidr self.assertEqual(error, msg) # Invalid - IPv4 with trailing CR cidr = "10.0.2.0/24\r" msg = validator(cidr, None) error = "'%s' is not a valid IP subnet" % cidr self.assertEqual(error, msg) def test_validate_subnet(self): self._test_validate_subnet(attributes._validate_subnet) def test_validate_subnet_or_none(self): self._test_validate_subnet(attributes._validate_subnet_or_none, allow_none=True) def _test_validate_regex(self, validator, allow_none=False): pattern = '[hc]at' data = None msg = validator(data, pattern) if allow_none: self.assertIsNone(msg) else: self.assertEqual("'None' is not a valid input", msg) data = 'bat' msg = validator(data, pattern) self.assertEqual("'%s' is not a valid input" % data, msg) data = 'hat' msg = validator(data, pattern) self.assertIsNone(msg) data = 'cat' msg = validator(data, pattern) self.assertIsNone(msg) def test_validate_regex(self): self._test_validate_regex(attributes._validate_regex) def test_validate_regex_or_none(self): self._test_validate_regex(attributes._validate_regex_or_none, allow_none=True) def test_validate_uuid(self): invalid_uuids = [None, 123, '123', 't5069610-744b-42a7-8bd8-ceac1a229cd4', 'e5069610-744bb-42a7-8bd8-ceac1a229cd4'] for uuid in invalid_uuids: msg = attributes._validate_uuid(uuid) error = "'%s' is not a valid UUID" % uuid self.assertEqual(error, msg) msg = attributes._validate_uuid('00000000-ffff-ffff-ffff-000000000000') self.assertIsNone(msg) def test__validate_list_of_items(self): # check not a list items = [None, 123, 'e5069610-744b-42a7-8bd8-ceac1a229cd4', '12345678123456781234567812345678', {'uuid': 'e5069610-744b-42a7-8bd8-ceac1a229cd4'}] for item in items: msg = attributes._validate_list_of_items(mock.Mock(), item) error = "'%s' is not a list" % item self.assertEqual(error, msg) # check duplicate items in a list duplicate_items = ['e5069610-744b-42a7-8bd8-ceac1a229cd4', 'f3eeab00-8367-4524-b662-55e64d4cacb5', 'e5069610-744b-42a7-8bd8-ceac1a229cd4'] msg = attributes._validate_list_of_items(mock.Mock(), duplicate_items) error = ("Duplicate items in the list: " "'%s'" % ', '.join(duplicate_items)) self.assertEqual(error, msg) # check valid lists valid_lists = [[], [1, 2, 3], ['a', 'b', 'c']] for list_obj in valid_lists: msg = attributes._validate_list_of_items( mock.Mock(return_value=None), list_obj) self.assertIsNone(msg) def test_validate_dict_type(self): for value in (None, True, '1', []): self.assertEqual("'%s' is not a dictionary" % value, attributes._validate_dict(value)) def test_validate_dict_without_constraints(self): msg = attributes._validate_dict({}) self.assertIsNone(msg) # Validate a dictionary without constraints. msg = attributes._validate_dict({'key': 'value'}) self.assertIsNone(msg) def test_validate_a_valid_dict_with_constraints(self): dictionary, constraints = self._construct_dict_and_constraints() msg = attributes._validate_dict(dictionary, constraints) self.assertIsNone(msg, 'Validation of a valid dictionary failed.') def test_validate_dict_with_invalid_validator(self): dictionary, constraints = self._construct_dict_and_constraints() constraints['key1'] = {'type:unsupported': None, 'required': True} msg = attributes._validate_dict(dictionary, constraints) self.assertEqual("Validator 'type:unsupported' does not exist.", msg) def test_validate_dict_not_required_keys(self): dictionary, constraints = self._construct_dict_and_constraints() del dictionary['key2'] msg = attributes._validate_dict(dictionary, constraints) self.assertIsNone(msg, 'Field that was not required by the specs was' 'required by the validator.') def test_validate_dict_required_keys(self): dictionary, constraints = self._construct_dict_and_constraints() del dictionary['key1'] msg = attributes._validate_dict(dictionary, constraints) self.assertIn('Expected keys:', msg) def test_validate_dict_wrong_values(self): dictionary, constraints = self._construct_dict_and_constraints() dictionary['key1'] = 'UNSUPPORTED' msg = attributes._validate_dict(dictionary, constraints) self.assertIsNotNone(msg) def test_validate_dict_convert_boolean(self): dictionary, constraints = self._construct_dict_and_constraints() constraints['key_bool'] = { 'type:boolean': None, 'required': False, 'convert_to': attributes.convert_to_boolean} dictionary['key_bool'] = 'true' msg = attributes._validate_dict(dictionary, constraints) self.assertIsNone(msg) # Explicitly comparing with literal 'True' as assertTrue # succeeds also for 'true' self.assertIs(True, dictionary['key_bool']) def test_subdictionary(self): dictionary, constraints = self._construct_dict_and_constraints() del dictionary['key3']['k4'] dictionary['key3']['k5'] = 'a string value' msg = attributes._validate_dict(dictionary, constraints) self.assertIn('Expected keys:', msg) def test_validate_dict_or_none(self): dictionary, constraints = self._construct_dict_and_constraints() # Check whether None is a valid value. msg = attributes._validate_dict_or_none(None, constraints) self.assertIsNone(msg, 'Validation of a None dictionary failed.') # Check validation of a regular dictionary. msg = attributes._validate_dict_or_none(dictionary, constraints) self.assertIsNone(msg, 'Validation of a valid dictionary failed.') def test_validate_dict_or_empty(self): dictionary, constraints = self._construct_dict_and_constraints() # Check whether an empty dictionary is valid. msg = attributes._validate_dict_or_empty({}, constraints) self.assertIsNone(msg, 'Validation of a None dictionary failed.') # Check validation of a regular dictionary. msg = attributes._validate_dict_or_none(dictionary, constraints) self.assertIsNone(msg, 'Validation of a valid dictionary failed.') self.assertIsNone(msg, 'Validation of a valid dictionary failed.') def test_validate_non_negative(self): for value in (-1, '-2'): self.assertEqual("'%s' should be non-negative" % value, attributes._validate_non_negative(value)) for value in (0, 1, '2', True, False): msg = attributes._validate_non_negative(value) self.assertIsNone(msg) class TestConvertToBoolean(base.BaseTestCase): def test_convert_to_boolean_bool(self): self.assertIs(attributes.convert_to_boolean(True), True) self.assertIs(attributes.convert_to_boolean(False), False) def test_convert_to_boolean_int(self): self.assertIs(attributes.convert_to_boolean(0), False) self.assertIs(attributes.convert_to_boolean(1), True) self.assertRaises(n_exc.InvalidInput, attributes.convert_to_boolean, 7) def test_convert_to_boolean_str(self): self.assertIs(attributes.convert_to_boolean('True'), True) self.assertIs(attributes.convert_to_boolean('true'), True) self.assertIs(attributes.convert_to_boolean('False'), False) self.assertIs(attributes.convert_to_boolean('false'), False) self.assertIs(attributes.convert_to_boolean('0'), False) self.assertIs(attributes.convert_to_boolean('1'), True) self.assertRaises(n_exc.InvalidInput, attributes.convert_to_boolean, '7') class TestConvertToInt(base.BaseTestCase): def test_convert_to_int_int(self): self.assertEqual(-1, attributes.convert_to_int(-1)) self.assertEqual(0, attributes.convert_to_int(0)) self.assertEqual(1, attributes.convert_to_int(1)) def test_convert_to_int_if_not_none(self): self.assertEqual(-1, attributes.convert_to_int_if_not_none(-1)) self.assertEqual(0, attributes.convert_to_int_if_not_none(0)) self.assertEqual(1, attributes.convert_to_int_if_not_none(1)) self.assertIsNone(attributes.convert_to_int_if_not_none(None)) def test_convert_to_int_str(self): self.assertEqual(4, attributes.convert_to_int('4')) self.assertEqual(6, attributes.convert_to_int('6')) self.assertRaises(n_exc.InvalidInput, attributes.convert_to_int, 'garbage') def test_convert_to_int_none(self): self.assertRaises(n_exc.InvalidInput, attributes.convert_to_int, None) def test_convert_none_to_empty_list_none(self): self.assertEqual([], attributes.convert_none_to_empty_list(None)) def test_convert_none_to_empty_dict(self): self.assertEqual({}, attributes.convert_none_to_empty_dict(None)) def test_convert_none_to_empty_list_value(self): values = ['1', 3, [], [1], {}, {'a': 3}] for value in values: self.assertEqual( value, attributes.convert_none_to_empty_list(value)) class TestConvertToFloat(base.BaseTestCase): # NOTE: the routine being tested here is a plugin-specific extension # module. As the plugin split proceed towards its second phase this # test should either be remove, or the validation routine moved into # neutron.api.v2.attributes def test_convert_to_float_positve_value(self): self.assertEqual( 1.111, attributes.convert_to_positive_float_or_none(1.111)) self.assertEqual(1, attributes.convert_to_positive_float_or_none(1)) self.assertEqual(0, attributes.convert_to_positive_float_or_none(0)) def test_convert_to_float_negative_value(self): self.assertRaises(n_exc.InvalidInput, attributes.convert_to_positive_float_or_none, -1.11) def test_convert_to_float_string(self): self.assertEqual(4, attributes.convert_to_positive_float_or_none('4')) self.assertEqual( 4.44, attributes.convert_to_positive_float_or_none('4.44')) self.assertRaises(n_exc.InvalidInput, attributes.convert_to_positive_float_or_none, 'garbage') def test_convert_to_float_none_value(self): self.assertIsNone(attributes.convert_to_positive_float_or_none(None)) class TestConvertKvp(base.BaseTestCase): def test_convert_kvp_list_to_dict_succeeds_for_missing_values(self): result = attributes.convert_kvp_list_to_dict(['True']) self.assertEqual({}, result) def test_convert_kvp_list_to_dict_succeeds_for_multiple_values(self): result = attributes.convert_kvp_list_to_dict( ['a=b', 'a=c', 'a=c', 'b=a']) expected = {'a': tools.UnorderedList(['c', 'b']), 'b': ['a']} self.assertEqual(expected, result) def test_convert_kvp_list_to_dict_succeeds_for_values(self): result = attributes.convert_kvp_list_to_dict(['a=b', 'c=d']) self.assertEqual({'a': ['b'], 'c': ['d']}, result) def test_convert_kvp_str_to_list_fails_for_missing_key(self): with testtools.ExpectedException(n_exc.InvalidInput): attributes.convert_kvp_str_to_list('=a') def test_convert_kvp_str_to_list_fails_for_missing_equals(self): with testtools.ExpectedException(n_exc.InvalidInput): attributes.convert_kvp_str_to_list('a') def test_convert_kvp_str_to_list_succeeds_for_one_equals(self): result = attributes.convert_kvp_str_to_list('a=') self.assertEqual(['a', ''], result) def test_convert_kvp_str_to_list_succeeds_for_two_equals(self): result = attributes.convert_kvp_str_to_list('a=a=a') self.assertEqual(['a', 'a=a'], result) class TestConvertToList(base.BaseTestCase): def test_convert_to_empty_list(self): for item in (None, [], (), {}): self.assertEqual([], attributes.convert_to_list(item)) def test_convert_to_list_string(self): for item in ('', 'foo'): self.assertEqual([item], attributes.convert_to_list(item)) def test_convert_to_list_iterable(self): for item in ([None], [1, 2, 3], (1, 2, 3), set([1, 2, 3]), ['foo']): self.assertEqual(list(item), attributes.convert_to_list(item)) def test_convert_to_list_non_iterable(self): for item in (True, False, 1, 1.2, object()): self.assertEqual([item], attributes.convert_to_list(item)) class TestResDict(base.BaseTestCase): class _MyException(Exception): pass _EXC_CLS = _MyException def _test_fill_default_value(self, attr_info, expected, res_dict): attributes.fill_default_value(attr_info, res_dict) self.assertEqual(expected, res_dict) def test_fill_default_value(self): attr_info = { 'key': { 'allow_post': True, 'default': attributes.ATTR_NOT_SPECIFIED, }, } self._test_fill_default_value(attr_info, {'key': 'X'}, {'key': 'X'}) self._test_fill_default_value( attr_info, {'key': attributes.ATTR_NOT_SPECIFIED}, {}) attr_info = { 'key': { 'allow_post': True, }, } self._test_fill_default_value(attr_info, {'key': 'X'}, {'key': 'X'}) self.assertRaises(ValueError, self._test_fill_default_value, attr_info, {'key': 'X'}, {}) self.assertRaises(self._EXC_CLS, attributes.fill_default_value, attr_info, {}, self._EXC_CLS) attr_info = { 'key': { 'allow_post': False, }, } self.assertRaises(ValueError, self._test_fill_default_value, attr_info, {'key': 'X'}, {'key': 'X'}) self._test_fill_default_value(attr_info, {}, {}) self.assertRaises(self._EXC_CLS, attributes.fill_default_value, attr_info, {'key': 'X'}, self._EXC_CLS) def _test_convert_value(self, attr_info, expected, res_dict): attributes.convert_value(attr_info, res_dict) self.assertEqual(expected, res_dict) def test_convert_value(self): attr_info = { 'key': { }, } self._test_convert_value(attr_info, {'key': attributes.ATTR_NOT_SPECIFIED}, {'key': attributes.ATTR_NOT_SPECIFIED}) self._test_convert_value(attr_info, {'key': 'X'}, {'key': 'X'}) self._test_convert_value(attr_info, {'other_key': 'X'}, {'other_key': 'X'}) attr_info = { 'key': { 'convert_to': attributes.convert_to_int, }, } self._test_convert_value(attr_info, {'key': attributes.ATTR_NOT_SPECIFIED}, {'key': attributes.ATTR_NOT_SPECIFIED}) self._test_convert_value(attr_info, {'key': 1}, {'key': '1'}) self._test_convert_value(attr_info, {'key': 1}, {'key': 1}) self.assertRaises(n_exc.InvalidInput, self._test_convert_value, attr_info, {'key': 1}, {'key': 'a'}) attr_info = { 'key': { 'validate': {'type:uuid': None}, }, } self._test_convert_value(attr_info, {'key': attributes.ATTR_NOT_SPECIFIED}, {'key': attributes.ATTR_NOT_SPECIFIED}) uuid_str = '01234567-1234-1234-1234-1234567890ab' self._test_convert_value(attr_info, {'key': uuid_str}, {'key': uuid_str}) self.assertRaises(ValueError, self._test_convert_value, attr_info, {'key': 1}, {'key': 1}) self.assertRaises(self._EXC_CLS, attributes.convert_value, attr_info, {'key': 1}, self._EXC_CLS) def test_populate_tenant_id(self): tenant_id_1 = uuidutils.generate_uuid() tenant_id_2 = uuidutils.generate_uuid() # apart from the admin, nobody can create a res on behalf of another # tenant ctx = context.Context(user_id=None, tenant_id=tenant_id_1) res_dict = {'tenant_id': tenant_id_2} self.assertRaises(webob.exc.HTTPBadRequest, attributes.populate_tenant_id, ctx, res_dict, None, None) ctx.is_admin = True self.assertIsNone(attributes.populate_tenant_id(ctx, res_dict, None, None)) # for each create request, the tenant_id should be added to the # req body res_dict2 = {} attributes.populate_tenant_id(ctx, res_dict2, None, True) self.assertEqual({'tenant_id': ctx.tenant_id}, res_dict2) # if the tenant_id is mandatory for the resource and not specified # in the request nor in the context, an exception should be raised res_dict3 = {} attr_info = {'tenant_id': {'allow_post': True}, } ctx.tenant_id = None self.assertRaises(webob.exc.HTTPBadRequest, attributes.populate_tenant_id, ctx, res_dict3, attr_info, True) class TestHelpers(base.DietTestCase): def _verify_port_attributes(self, attrs): for test_attribute in ('id', 'name', 'mac_address', 'network_id', 'tenant_id', 'fixed_ips', 'status'): self.assertIn(test_attribute, attrs) def test_get_collection_info(self): attrs = attributes.get_collection_info('ports') self._verify_port_attributes(attrs) def test_get_collection_info_missing(self): self.assertFalse(attributes.get_collection_info('meh')) def test_get_resource_info(self): attributes.REVERSED_PLURALS.pop('port', None) attrs = attributes.get_resource_info('port') self._verify_port_attributes(attrs) # verify side effect self.assertIn('port', attributes.REVERSED_PLURALS) def test_get_resource_info_missing(self): self.assertFalse(attributes.get_resource_info('meh')) def test_get_resource_info_cached(self): with mock.patch('neutron.api.v2.attributes.PLURALS') as mock_plurals: attributes.REVERSED_PLURALS['port'] = 'ports' attrs = attributes.get_resource_info('port') self._verify_port_attributes(attrs) self.assertEqual(0, mock_plurals.items.call_count) neutron-8.4.0/neutron/tests/unit/api/test_api_common.py0000664000567000056710000000553013044372760024505 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from testtools import matchers from webob import exc from neutron.api import api_common as common from neutron.tests import base class FakeController(common.NeutronController): _resource_name = 'fake' class APICommonTestCase(base.BaseTestCase): def setUp(self): super(APICommonTestCase, self).setUp() self.controller = FakeController(None) def test_prepare_request_body(self): body = { 'fake': { 'name': 'terminator', 'model': 'T-800', } } params = [ {'param-name': 'name', 'required': True}, {'param-name': 'model', 'required': True}, {'param-name': 'quote', 'required': False, 'default-value': "i'll be back"}, ] expect = { 'fake': { 'name': 'terminator', 'model': 'T-800', 'quote': "i'll be back", } } actual = self.controller._prepare_request_body(body, params) self.assertThat(expect, matchers.Equals(actual)) def test_prepare_request_body_none(self): body = None params = [ {'param-name': 'quote', 'required': False, 'default-value': "I'll be back"}, ] expect = { 'fake': { 'quote': "I'll be back", } } actual = self.controller._prepare_request_body(body, params) self.assertThat(expect, matchers.Equals(actual)) def test_prepare_request_body_keyerror(self): body = {'t2': {}} params = [] self.assertRaises(exc.HTTPBadRequest, self.controller._prepare_request_body, body, params) def test_prepare_request_param_value_none(self): body = { 'fake': { 'name': None, } } params = [ {'param-name': 'name', 'required': True}, ] self.assertRaises(exc.HTTPBadRequest, self.controller._prepare_request_body, body, params) neutron-8.4.0/neutron/tests/unit/api/test_extensions.py0000664000567000056710000012045313044372760024565 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import mock from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_service import wsgi as base_wsgi import routes import six import webob import webob.exc as webexc import webtest import neutron from neutron.api import extensions from neutron.api.v2 import attributes from neutron.common import config from neutron.common import exceptions from neutron import manager from neutron.plugins.common import constants from neutron import quota from neutron.tests import base from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit import extension_stubs as ext_stubs import neutron.tests.unit.extensions from neutron.tests.unit.extensions import extendedattribute as extattr from neutron.tests.unit import testlib_api from neutron import wsgi LOG = logging.getLogger(__name__) _uuid = test_base._uuid _get_path = test_base._get_path extensions_path = ':'.join(neutron.tests.unit.extensions.__path__) class ExtensionsTestApp(base_wsgi.Router): def __init__(self, options=None): options = options or {} mapper = routes.Mapper() controller = ext_stubs.StubBaseAppController() mapper.resource("dummy_resource", "/dummy_resources", controller=controller) super(ExtensionsTestApp, self).__init__(mapper) class FakePluginWithExtension(object): """A fake plugin used only for extension testing in this file.""" supported_extension_aliases = ["FOXNSOX"] def method_to_support_foxnsox_extension(self, context): self._log("method_to_support_foxnsox_extension", context) class ExtensionPathTest(base.BaseTestCase): def setUp(self): self.base_path = extensions.get_extensions_path() super(ExtensionPathTest, self).setUp() def test_get_extensions_path_with_plugins(self): path = extensions.get_extensions_path( {constants.CORE: FakePluginWithExtension()}) self.assertEqual(path, '%s:neutron/tests/unit/extensions' % self.base_path) def test_get_extensions_path_no_extensions(self): # Reset to default value, as it's overridden by base class cfg.CONF.set_override('api_extensions_path', '') path = extensions.get_extensions_path() self.assertEqual(path, self.base_path) def test_get_extensions_path_single_extension(self): cfg.CONF.set_override('api_extensions_path', 'path1') path = extensions.get_extensions_path() self.assertEqual(path, '%s:path1' % self.base_path) def test_get_extensions_path_multiple_extensions(self): cfg.CONF.set_override('api_extensions_path', 'path1:path2') path = extensions.get_extensions_path() self.assertEqual(path, '%s:path1:path2' % self.base_path) def test_get_extensions_path_duplicate_extensions(self): cfg.CONF.set_override('api_extensions_path', 'path1:path1') path = extensions.get_extensions_path() self.assertEqual(path, '%s:path1' % self.base_path) class PluginInterfaceTest(base.BaseTestCase): def test_issubclass_hook(self): class A(object): def f(self): pass class B(extensions.PluginInterface): @abc.abstractmethod def f(self): pass self.assertTrue(issubclass(A, B)) def test_issubclass_hook_class_without_abstract_methods(self): class A(object): def f(self): pass class B(extensions.PluginInterface): def f(self): pass self.assertFalse(issubclass(A, B)) def test_issubclass_hook_not_all_methods_implemented(self): class A(object): def f(self): pass class B(extensions.PluginInterface): @abc.abstractmethod def f(self): pass @abc.abstractmethod def g(self): pass self.assertFalse(issubclass(A, B)) class ResourceExtensionTest(base.BaseTestCase): class ResourceExtensionController(wsgi.Controller): def index(self, request): return "resource index" def show(self, request, id): return {'data': {'id': id}} def notimplemented_function(self, request, id): return webob.exc.HTTPNotImplemented() def custom_member_action(self, request, id): return {'member_action': 'value'} def custom_collection_method(self, request, **kwargs): return {'collection': 'value'} def custom_collection_action(self, request, **kwargs): return {'collection': 'value'} class DummySvcPlugin(wsgi.Controller): def get_plugin_type(self): return constants.DUMMY def index(self, request, **kwargs): return "resource index" def custom_member_action(self, request, **kwargs): return {'member_action': 'value'} def collection_action(self, request, **kwargs): return {'collection': 'value'} def show(self, request, id): return {'data': {'id': id}} def test_exceptions_notimplemented(self): controller = self.ResourceExtensionController() member = {'notimplemented_function': "GET"} res_ext = extensions.ResourceExtension('tweedles', controller, member_actions=member) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) # Ideally we would check for a 501 code here but webtest doesn't take # anything that is below 200 or above 400 so we can't actually check # it. It throws webtest.AppError instead. try: test_app.get("/tweedles/some_id/notimplemented_function") # Shouldn't be reached self.assertTrue(False) except webtest.AppError as e: self.assertIn('501', str(e)) def test_resource_can_be_added_as_extension(self): res_ext = extensions.ResourceExtension( 'tweedles', self.ResourceExtensionController()) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) index_response = test_app.get("/tweedles") self.assertEqual(200, index_response.status_int) self.assertEqual(b"resource index", index_response.body) show_response = test_app.get("/tweedles/25266") self.assertEqual({'data': {'id': "25266"}}, show_response.json) def test_resource_gets_prefix_of_plugin(self): class DummySvcPlugin(wsgi.Controller): def index(self, request): return "" def get_plugin_type(self): return constants.DUMMY res_ext = extensions.ResourceExtension( 'tweedles', DummySvcPlugin(), path_prefix="/dummy_svc") test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) index_response = test_app.get("/dummy_svc/tweedles") self.assertEqual(200, index_response.status_int) def test_resource_extension_with_custom_member_action(self): controller = self.ResourceExtensionController() member = {'custom_member_action': "GET"} res_ext = extensions.ResourceExtension('tweedles', controller, member_actions=member) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.get("/tweedles/some_id/custom_member_action") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['member_action'], "value") def test_resource_ext_with_custom_member_action_gets_plugin_prefix(self): controller = self.DummySvcPlugin() member = {'custom_member_action': "GET"} collections = {'collection_action': "GET"} res_ext = extensions.ResourceExtension('tweedles', controller, path_prefix="/dummy_svc", member_actions=member, collection_actions=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.get("/dummy_svc/tweedles/1/custom_member_action") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['member_action'], "value") response = test_app.get("/dummy_svc/tweedles/collection_action") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['collection'], "value") def test_plugin_prefix_with_parent_resource(self): controller = self.DummySvcPlugin() parent = dict(member_name="tenant", collection_name="tenants") member = {'custom_member_action': "GET"} collections = {'collection_action': "GET"} res_ext = extensions.ResourceExtension('tweedles', controller, parent, path_prefix="/dummy_svc", member_actions=member, collection_actions=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) index_response = test_app.get("/dummy_svc/tenants/1/tweedles") self.assertEqual(200, index_response.status_int) response = test_app.get("/dummy_svc/tenants/1/" "tweedles/1/custom_member_action") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['member_action'], "value") response = test_app.get("/dummy_svc/tenants/2/" "tweedles/collection_action") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['collection'], "value") def test_resource_extension_for_get_custom_collection_action(self): controller = self.ResourceExtensionController() collections = {'custom_collection_action': "GET"} res_ext = extensions.ResourceExtension('tweedles', controller, collection_actions=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.get("/tweedles/custom_collection_action") self.assertEqual(200, response.status_int) LOG.debug(jsonutils.loads(response.body)) self.assertEqual(jsonutils.loads(response.body)['collection'], "value") def test_resource_extension_for_put_custom_collection_action(self): controller = self.ResourceExtensionController() collections = {'custom_collection_action': "PUT"} res_ext = extensions.ResourceExtension('tweedles', controller, collection_actions=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.put("/tweedles/custom_collection_action") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['collection'], 'value') def test_resource_extension_for_post_custom_collection_action(self): controller = self.ResourceExtensionController() collections = {'custom_collection_action': "POST"} res_ext = extensions.ResourceExtension('tweedles', controller, collection_actions=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.post("/tweedles/custom_collection_action") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['collection'], 'value') def test_resource_extension_for_delete_custom_collection_action(self): controller = self.ResourceExtensionController() collections = {'custom_collection_action': "DELETE"} res_ext = extensions.ResourceExtension('tweedles', controller, collection_actions=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.delete("/tweedles/custom_collection_action") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['collection'], 'value') def test_resource_ext_for_formatted_req_on_custom_collection_action(self): controller = self.ResourceExtensionController() collections = {'custom_collection_action': "GET"} res_ext = extensions.ResourceExtension('tweedles', controller, collection_actions=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.get("/tweedles/custom_collection_action.json") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['collection'], "value") def test_resource_ext_for_nested_resource_custom_collection_action(self): controller = self.ResourceExtensionController() collections = {'custom_collection_action': "GET"} parent = dict(collection_name='beetles', member_name='beetle') res_ext = extensions.ResourceExtension('tweedles', controller, collection_actions=collections, parent=parent) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.get("/beetles/beetle_id" "/tweedles/custom_collection_action") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['collection'], "value") def test_resource_extension_for_get_custom_collection_method(self): controller = self.ResourceExtensionController() collections = {'custom_collection_method': "GET"} res_ext = extensions.ResourceExtension('tweedles', controller, collection_methods=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.get("/tweedles") self.assertEqual(200, response.status_int) self.assertEqual("value", jsonutils.loads(response.body)['collection']) def test_resource_extension_for_put_custom_collection_method(self): controller = self.ResourceExtensionController() collections = {'custom_collection_method': "PUT"} res_ext = extensions.ResourceExtension('tweedles', controller, collection_methods=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.put("/tweedles") self.assertEqual(200, response.status_int) self.assertEqual('value', jsonutils.loads(response.body)['collection']) def test_resource_extension_for_post_custom_collection_method(self): controller = self.ResourceExtensionController() collections = {'custom_collection_method': "POST"} res_ext = extensions.ResourceExtension('tweedles', controller, collection_methods=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.post("/tweedles") self.assertEqual(200, response.status_int) self.assertEqual('value', jsonutils.loads(response.body)['collection']) def test_resource_extension_for_delete_custom_collection_method(self): controller = self.ResourceExtensionController() collections = {'custom_collection_method': "DELETE"} res_ext = extensions.ResourceExtension('tweedles', controller, collection_methods=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.delete("/tweedles") self.assertEqual(200, response.status_int) self.assertEqual('value', jsonutils.loads(response.body)['collection']) def test_resource_ext_for_formatted_req_on_custom_collection_method(self): controller = self.ResourceExtensionController() collections = {'custom_collection_method': "GET"} res_ext = extensions.ResourceExtension('tweedles', controller, collection_methods=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.get("/tweedles.json") self.assertEqual(200, response.status_int) self.assertEqual("value", jsonutils.loads(response.body)['collection']) def test_resource_ext_for_nested_resource_custom_collection_method(self): controller = self.ResourceExtensionController() collections = {'custom_collection_method': "GET"} parent = {'collection_name': 'beetles', 'member_name': 'beetle'} res_ext = extensions.ResourceExtension('tweedles', controller, collection_methods=collections, parent=parent) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.get("/beetles/beetle_id/tweedles") self.assertEqual(200, response.status_int) self.assertEqual("value", jsonutils.loads(response.body)['collection']) def test_resource_extension_with_custom_member_action_and_attr_map(self): controller = self.ResourceExtensionController() member = {'custom_member_action': "GET"} params = { 'tweedles': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'default': '', 'is_visible': True}, } } res_ext = extensions.ResourceExtension('tweedles', controller, member_actions=member, attr_map=params) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.get("/tweedles/some_id/custom_member_action") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['member_action'], "value") def test_returns_404_for_non_existent_extension(self): test_app = _setup_extensions_test_app(SimpleExtensionManager(None)) response = test_app.get("/non_extistant_extension", status='*') self.assertEqual(404, response.status_int) class ActionExtensionTest(base.BaseTestCase): def setUp(self): super(ActionExtensionTest, self).setUp() self.extension_app = _setup_extensions_test_app() def test_extended_action_for_adding_extra_data(self): action_name = 'FOXNSOX:add_tweedle' action_params = dict(name='Beetle') req_body = jsonutils.dumps({action_name: action_params}) response = self.extension_app.post('/dummy_resources/1/action', req_body, content_type='application/json') self.assertEqual(b"Tweedle Beetle Added.", response.body) def test_extended_action_for_deleting_extra_data(self): action_name = 'FOXNSOX:delete_tweedle' action_params = dict(name='Bailey') req_body = jsonutils.dumps({action_name: action_params}) response = self.extension_app.post("/dummy_resources/1/action", req_body, content_type='application/json') self.assertEqual(b"Tweedle Bailey Deleted.", response.body) def test_returns_404_for_non_existent_action(self): non_existent_action = 'blah_action' action_params = dict(name="test") req_body = jsonutils.dumps({non_existent_action: action_params}) response = self.extension_app.post("/dummy_resources/1/action", req_body, content_type='application/json', status='*') self.assertEqual(404, response.status_int) def test_returns_404_for_non_existent_resource(self): action_name = 'add_tweedle' action_params = dict(name='Beetle') req_body = jsonutils.dumps({action_name: action_params}) response = self.extension_app.post("/asdf/1/action", req_body, content_type='application/json', status='*') self.assertEqual(404, response.status_int) class RequestExtensionTest(base.BaseTestCase): def test_headers_can_be_extended(self): def extend_headers(req, res): assert req.headers['X-NEW-REQUEST-HEADER'] == "sox" res.headers['X-NEW-RESPONSE-HEADER'] = "response_header_data" return res app = self._setup_app_with_request_handler(extend_headers, 'GET') response = app.get("/dummy_resources/1", headers={'X-NEW-REQUEST-HEADER': "sox"}) self.assertEqual(response.headers['X-NEW-RESPONSE-HEADER'], "response_header_data") def test_extend_get_resource_response(self): def extend_response_data(req, res): data = jsonutils.loads(res.body) data['FOXNSOX:extended_key'] = req.GET.get('extended_key') res.body = jsonutils.dumps(data).encode('utf-8') return res app = self._setup_app_with_request_handler(extend_response_data, 'GET') response = app.get("/dummy_resources/1?extended_key=extended_data") self.assertEqual(200, response.status_int) response_data = jsonutils.loads(response.body) self.assertEqual('extended_data', response_data['FOXNSOX:extended_key']) self.assertEqual('knox', response_data['fort']) def test_get_resources(self): app = _setup_extensions_test_app() response = app.get("/dummy_resources/1?chewing=newblue") response_data = jsonutils.loads(response.body) self.assertEqual('newblue', response_data['FOXNSOX:googoose']) self.assertEqual("Pig Bands!", response_data['FOXNSOX:big_bands']) def test_edit_previously_uneditable_field(self): def _update_handler(req, res): data = jsonutils.loads(res.body) data['uneditable'] = req.params['uneditable'] res.body = jsonutils.dumps(data).encode('utf-8') return res base_app = webtest.TestApp(setup_base_app(self)) response = base_app.put("/dummy_resources/1", {'uneditable': "new_value"}) self.assertEqual(response.json['uneditable'], "original_value") ext_app = self._setup_app_with_request_handler(_update_handler, 'PUT') ext_response = ext_app.put("/dummy_resources/1", {'uneditable': "new_value"}) self.assertEqual(ext_response.json['uneditable'], "new_value") def _setup_app_with_request_handler(self, handler, verb): req_ext = extensions.RequestExtension(verb, '/dummy_resources/:(id)', handler) manager = SimpleExtensionManager(None, None, req_ext) return _setup_extensions_test_app(manager) class ExtensionManagerTest(base.BaseTestCase): def test_optional_extensions_no_error(self): ext_mgr = extensions.ExtensionManager('') attr_map = {} ext_mgr.add_extension(ext_stubs.StubExtension('foo_alias', optional=['cats'])) ext_mgr.extend_resources("2.0", attr_map) self.assertIn('foo_alias', ext_mgr.extensions) def test_missing_required_extensions_raise_error(self): ext_mgr = extensions.ExtensionManager('') attr_map = {} ext_mgr.add_extension(ext_stubs.StubExtensionWithReqs('foo_alias')) self.assertRaises(exceptions.ExtensionsNotFound, ext_mgr.extend_resources, "2.0", attr_map) def test_missing_required_extensions_gracefully_error(self): ext_mgr = extensions.ExtensionManager('') attr_map = {} default_ext = list(constants.DEFAULT_SERVICE_PLUGINS.values())[0] ext_mgr.add_extension(ext_stubs.StubExtensionWithReqs(default_ext)) ext_mgr.extend_resources("2.0", attr_map) self.assertIn(default_ext, ext_mgr.extensions) def test_invalid_extensions_are_not_registered(self): class InvalidExtension(object): """Invalid extension. This Extension doesn't implement extension methods : get_name, get_description and get_updated """ def get_alias(self): return "invalid_extension" ext_mgr = extensions.ExtensionManager('') ext_mgr.add_extension(InvalidExtension()) ext_mgr.add_extension(ext_stubs.StubExtension("valid_extension")) self.assertIn('valid_extension', ext_mgr.extensions) self.assertNotIn('invalid_extension', ext_mgr.extensions) def test_assignment_of_attr_map(self): """Unit test for bug 1443342 In this bug, an extension that extended multiple resources with the same dict would cause future extensions to inadvertently modify the resources of all of the resources since they were referencing the same dictionary. """ class MultiResourceExtension(ext_stubs.StubExtension): """Generated Extended Resources. This extension's extended resource will assign to more than one resource. """ def get_extended_resources(self, version): EXTENDED_TIMESTAMP = { 'created_at': {'allow_post': False, 'allow_put': False, 'is_visible': True}} EXTENDED_RESOURCES = ["ext1", "ext2"] attrs = {} for resources in EXTENDED_RESOURCES: attrs[resources] = EXTENDED_TIMESTAMP return attrs class AttrExtension(ext_stubs.StubExtension): def get_extended_resources(self, version): attrs = { self.alias: { '%s-attr' % self.alias: {'allow_post': False, 'allow_put': False, 'is_visible': True}}} return attrs ext_mgr = extensions.ExtensionManager('') attr_map = {} ext_mgr.add_extension(MultiResourceExtension('timestamp')) ext_mgr.extend_resources("2.0", attr_map) ext_mgr.add_extension(AttrExtension("ext1")) ext_mgr.add_extension(AttrExtension("ext2")) ext_mgr.extend_resources("2.0", attr_map) self.assertIn('created_at', attr_map['ext2']) self.assertIn('created_at', attr_map['ext1']) # now we need to make sure the attrextensions didn't leak across self.assertNotIn('ext1-attr', attr_map['ext2']) self.assertNotIn('ext2-attr', attr_map['ext1']) class PluginAwareExtensionManagerTest(base.BaseTestCase): def test_unsupported_extensions_are_not_loaded(self): stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1", "e3"]) plugin_info = {constants.CORE: stub_plugin} with mock.patch("neutron.api.extensions.PluginAwareExtensionManager." "check_if_plugin_extensions_loaded"): ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) ext_mgr.add_extension(ext_stubs.StubExtension("e1")) ext_mgr.add_extension(ext_stubs.StubExtension("e2")) ext_mgr.add_extension(ext_stubs.StubExtension("e3")) self.assertIn("e1", ext_mgr.extensions) self.assertNotIn("e2", ext_mgr.extensions) self.assertIn("e3", ext_mgr.extensions) def test_extensions_are_not_loaded_for_plugins_unaware_of_extensions(self): class ExtensionUnawarePlugin(object): """This plugin does not implement supports_extension method. Extensions will not be loaded when this plugin is used. """ pass plugin_info = {constants.CORE: ExtensionUnawarePlugin()} ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) ext_mgr.add_extension(ext_stubs.StubExtension("e1")) self.assertNotIn("e1", ext_mgr.extensions) def test_extensions_not_loaded_for_plugin_without_expected_interface(self): class PluginWithoutExpectedIface(object): """Does not implement get_foo method as expected by extension.""" supported_extension_aliases = ["supported_extension"] plugin_info = {constants.CORE: PluginWithoutExpectedIface()} with mock.patch("neutron.api.extensions.PluginAwareExtensionManager." "check_if_plugin_extensions_loaded"): ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) ext_mgr.add_extension(ext_stubs.ExtensionExpectingPluginInterface( "supported_extension")) self.assertNotIn("e1", ext_mgr.extensions) def test_extensions_are_loaded_for_plugin_with_expected_interface(self): class PluginWithExpectedInterface(object): """Implements get_foo method as expected by extension.""" supported_extension_aliases = ["supported_extension"] def get_foo(self, bar=None): pass plugin_info = {constants.CORE: PluginWithExpectedInterface()} with mock.patch("neutron.api.extensions.PluginAwareExtensionManager." "check_if_plugin_extensions_loaded"): ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) ext_mgr.add_extension(ext_stubs.ExtensionExpectingPluginInterface( "supported_extension")) self.assertIn("supported_extension", ext_mgr.extensions) def test_extensions_expecting_neutron_plugin_interface_are_loaded(self): class ExtensionForQuamtumPluginInterface(ext_stubs.StubExtension): """This Extension does not implement get_plugin_interface method. This will work with any plugin implementing NeutronPluginBase """ pass stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"]) plugin_info = {constants.CORE: stub_plugin} with mock.patch("neutron.api.extensions.PluginAwareExtensionManager." "check_if_plugin_extensions_loaded"): ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) ext_mgr.add_extension(ExtensionForQuamtumPluginInterface("e1")) self.assertIn("e1", ext_mgr.extensions) def test_extensions_without_need_for__plugin_interface_are_loaded(self): class ExtensionWithNoNeedForPluginInterface(ext_stubs.StubExtension): """This Extension does not need any plugin interface. This will work with any plugin implementing NeutronPluginBase """ def get_plugin_interface(self): return None stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"]) plugin_info = {constants.CORE: stub_plugin} with mock.patch("neutron.api.extensions.PluginAwareExtensionManager." "check_if_plugin_extensions_loaded"): ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) ext_mgr.add_extension(ExtensionWithNoNeedForPluginInterface("e1")) self.assertIn("e1", ext_mgr.extensions) def test_extension_loaded_for_non_core_plugin(self): class NonCorePluginExtenstion(ext_stubs.StubExtension): def get_plugin_interface(self): return None stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"]) plugin_info = {constants.DUMMY: stub_plugin} with mock.patch("neutron.api.extensions.PluginAwareExtensionManager." "check_if_plugin_extensions_loaded"): ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) ext_mgr.add_extension(NonCorePluginExtenstion("e1")) self.assertIn("e1", ext_mgr.extensions) def test_unloaded_supported_extensions_raises_exception(self): stub_plugin = ext_stubs.StubPlugin( supported_extensions=["unloaded_extension"]) plugin_info = {constants.CORE: stub_plugin} self.assertRaises(exceptions.ExtensionsNotFound, extensions.PluginAwareExtensionManager, '', plugin_info) class ExtensionControllerTest(testlib_api.WebTestCase): def setUp(self): super(ExtensionControllerTest, self).setUp() self.test_app = _setup_extensions_test_app() def test_index_gets_all_registerd_extensions(self): response = self.test_app.get("/extensions." + self.fmt) res_body = self.deserialize(response) foxnsox = res_body["extensions"][0] self.assertEqual(foxnsox["alias"], "FOXNSOX") def test_extension_can_be_accessed_by_alias(self): response = self.test_app.get("/extensions/FOXNSOX." + self.fmt) foxnsox_extension = self.deserialize(response) foxnsox_extension = foxnsox_extension['extension'] self.assertEqual(foxnsox_extension["alias"], "FOXNSOX") def test_show_returns_not_found_for_non_existent_extension(self): response = self.test_app.get("/extensions/non_existent" + self.fmt, status="*") self.assertEqual(response.status_int, 404) def app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return ExtensionsTestApp(conf) def setup_base_app(test): base.BaseTestCase.config_parse() app = config.load_paste_app('extensions_test_app') return app def setup_extensions_middleware(extension_manager=None): extension_manager = (extension_manager or extensions.PluginAwareExtensionManager( extensions_path, {constants.CORE: FakePluginWithExtension()})) base.BaseTestCase.config_parse() app = config.load_paste_app('extensions_test_app') return extensions.ExtensionMiddleware(app, ext_mgr=extension_manager) def _setup_extensions_test_app(extension_manager=None): return webtest.TestApp(setup_extensions_middleware(extension_manager)) class SimpleExtensionManager(object): def __init__(self, resource_ext=None, action_ext=None, request_ext=None): self.resource_ext = resource_ext self.action_ext = action_ext self.request_ext = request_ext def get_resources(self): resource_exts = [] if self.resource_ext: resource_exts.append(self.resource_ext) return resource_exts def get_actions(self): action_exts = [] if self.action_ext: action_exts.append(self.action_ext) return action_exts def get_request_extensions(self): request_extensions = [] if self.request_ext: request_extensions.append(self.request_ext) return request_extensions class ExtensionExtendedAttributeTestPlugin(object): supported_extension_aliases = [ 'ext-obj-test', "extended-ext-attr" ] def __init__(self, configfile=None): super(ExtensionExtendedAttributeTestPlugin, self) self.objs = [] self.objh = {} def create_ext_test_resource(self, context, ext_test_resource): obj = ext_test_resource['ext_test_resource'] id = _uuid() obj['id'] = id self.objs.append(obj) self.objh.update({id: obj}) return obj def get_ext_test_resources(self, context, filters=None, fields=None): return self.objs def get_ext_test_resource(self, context, id, fields=None): return self.objh[id] class ExtensionExtendedAttributeTestCase(base.BaseTestCase): def setUp(self): super(ExtensionExtendedAttributeTestCase, self).setUp() plugin = ( "neutron.tests.unit.api.test_extensions." "ExtensionExtendedAttributeTestPlugin" ) # point config file to: neutron/tests/etc/neutron.conf self.config_parse() self.setup_coreplugin(plugin) ext_mgr = extensions.PluginAwareExtensionManager( extensions_path, {constants.CORE: ExtensionExtendedAttributeTestPlugin()} ) ext_mgr.extend_resources("2.0", {}) extensions.PluginAwareExtensionManager._instance = ext_mgr app = config.load_paste_app('extensions_test_app') self._api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr) self._tenant_id = "8c70909f-b081-452d-872b-df48e6c355d1" # Save the global RESOURCE_ATTRIBUTE_MAP self.saved_attr_map = {} for res, attrs in six.iteritems(attributes.RESOURCE_ATTRIBUTE_MAP): self.saved_attr_map[res] = attrs.copy() # Add the resources to the global attribute map # This is done here as the setup process won't # initialize the main API router which extends # the global attribute map attributes.RESOURCE_ATTRIBUTE_MAP.update( extattr.EXTENDED_ATTRIBUTES_2_0) self.agentscheduler_dbMinxin = manager.NeutronManager.get_plugin() self.addCleanup(self.restore_attribute_map) quota.QUOTAS._driver = None cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', group='QUOTAS') def restore_attribute_map(self): # Restore the original RESOURCE_ATTRIBUTE_MAP attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map def _do_request(self, method, path, data=None, params=None, action=None): content_type = 'application/json' body = None if data is not None: # empty dict is valid body = wsgi.Serializer().serialize(data, content_type) req = testlib_api.create_request( path, body, content_type, method, query_string=params) res = req.get_response(self._api) if res.status_code >= 400: raise webexc.HTTPClientError(detail=res.body, code=res.status_code) if res.status_code != webexc.HTTPNoContent.code: return res.json def _ext_test_resource_create(self, attr=None): data = { "ext_test_resource": { "tenant_id": self._tenant_id, "name": "test", extattr.EXTENDED_ATTRIBUTE: attr } } res = self._do_request('POST', _get_path('ext_test_resources'), data) return res['ext_test_resource'] def test_ext_test_resource_create(self): ext_test_resource = self._ext_test_resource_create() attr = _uuid() ext_test_resource = self._ext_test_resource_create(attr) self.assertEqual(ext_test_resource[extattr.EXTENDED_ATTRIBUTE], attr) def test_ext_test_resource_get(self): attr = _uuid() obj = self._ext_test_resource_create(attr) obj_id = obj['id'] res = self._do_request('GET', _get_path( 'ext_test_resources/{0}'.format(obj_id))) obj2 = res['ext_test_resource'] self.assertEqual(obj2[extattr.EXTENDED_ATTRIBUTE], attr) neutron-8.4.0/neutron/tests/unit/api/rpc/0000775000567000056710000000000013044373210021523 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/api/rpc/handlers/0000775000567000056710000000000013044373210023323 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/api/rpc/handlers/test_l3_rpc.py0000664000567000056710000000547413044372760026141 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Cisco Systems # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from neutron.api.rpc.handlers import l3_rpc from neutron.common import constants from neutron import context from neutron import manager from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit import testlib_api class TestL3RpcCallback(testlib_api.SqlTestCase): def setUp(self): super(TestL3RpcCallback, self).setUp() self.setup_coreplugin(test_db_base_plugin_v2.DB_PLUGIN_KLASS) self.plugin = manager.NeutronManager.get_plugin() self.ctx = context.get_admin_context() cfg.CONF.set_override('ipv6_pd_enabled', True) self.callbacks = l3_rpc.L3RpcCallback() self.network = self._prepare_network() def _prepare_network(self): network = {'network': {'name': 'abc', 'shared': False, 'tenant_id': 'tenant_id', 'admin_state_up': True}} return self.plugin.create_network(self.ctx, network) def _prepare_ipv6_pd_subnet(self): subnet = {'subnet': {'network_id': self.network['id'], 'tenant_id': 'tenant_id', 'cidr': None, 'ip_version': 6, 'use_default_subnetpool': True, 'name': 'ipv6_pd', 'enable_dhcp': True, 'host_routes': None, 'dns_nameservers': None, 'allocation_pools': None, 'ipv6_ra_mode': constants.IPV6_SLAAC, 'ipv6_address_mode': constants.IPV6_SLAAC}} return self.plugin.create_subnet(self.ctx, subnet) def test_process_prefix_update(self): subnet = self._prepare_ipv6_pd_subnet() data = {subnet['id']: '2001:db8::/64'} allocation_pools = [{'start': '2001:db8::2', 'end': '2001:db8::ffff:ffff:ffff:ffff'}] res = self.callbacks.process_prefix_update(self.ctx, subnets=data) updated_subnet = res[0] self.assertEqual(updated_subnet['cidr'], data[subnet['id']]) self.assertEqual(updated_subnet['allocation_pools'], allocation_pools) neutron-8.4.0/neutron/tests/unit/api/rpc/handlers/__init__.py0000664000567000056710000000000013044372736025436 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/api/rpc/handlers/test_securitygroups_rpc.py0000664000567000056710000000456113044372736030731 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.api.rpc.handlers import securitygroups_rpc from neutron.tests import base class SecurityGroupServerRpcApiTestCase(base.BaseTestCase): def test_security_group_rules_for_devices(self): rpcapi = securitygroups_rpc.SecurityGroupServerRpcApi('fake_topic') with mock.patch.object(rpcapi.client, 'call') as rpc_mock,\ mock.patch.object(rpcapi.client, 'prepare') as prepare_mock: prepare_mock.return_value = rpcapi.client rpcapi.security_group_rules_for_devices('context', ['fake_device']) rpc_mock.assert_called_once_with( 'context', 'security_group_rules_for_devices', devices=['fake_device']) class SGAgentRpcCallBackMixinTestCase(base.BaseTestCase): def setUp(self): super(SGAgentRpcCallBackMixinTestCase, self).setUp() self.rpc = securitygroups_rpc.SecurityGroupAgentRpcCallbackMixin() self.rpc.sg_agent = mock.Mock() def test_security_groups_rule_updated(self): self.rpc.security_groups_rule_updated(None, security_groups=['fake_sgid']) self.rpc.sg_agent.assert_has_calls( [mock.call.security_groups_rule_updated(['fake_sgid'])]) def test_security_groups_member_updated(self): self.rpc.security_groups_member_updated(None, security_groups=['fake_sgid']) self.rpc.sg_agent.assert_has_calls( [mock.call.security_groups_member_updated(['fake_sgid'])]) def test_security_groups_provider_updated(self): self.rpc.security_groups_provider_updated(None) self.rpc.sg_agent.assert_has_calls( [mock.call.security_groups_provider_updated(None)]) neutron-8.4.0/neutron/tests/unit/api/rpc/handlers/test_dvr_rpc.py0000664000567000056710000000407713044372736026417 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.api.rpc.handlers import dvr_rpc from neutron.tests import base class DVRServerRpcApiTestCase(base.BaseTestCase): def setUp(self): self.client_p = mock.patch.object(dvr_rpc.n_rpc, "get_client") self.client = self.client_p.start() self.rpc = dvr_rpc.DVRServerRpcApi('fake_topic') self.mock_cctxt = self.rpc.client.prepare.return_value self.ctxt = mock.ANY super(DVRServerRpcApiTestCase, self).setUp() def test_get_dvr_mac_address_by_host(self): self.rpc.get_dvr_mac_address_by_host(self.ctxt, 'foo_host') self.mock_cctxt.call.assert_called_with( self.ctxt, 'get_dvr_mac_address_by_host', host='foo_host') def test_get_dvr_mac_address_list(self): self.rpc.get_dvr_mac_address_list(self.ctxt) self.mock_cctxt.call.assert_called_with( self.ctxt, 'get_dvr_mac_address_list') def test_get_ports_on_host_by_subnet(self): self.rpc.get_ports_on_host_by_subnet( self.ctxt, 'foo_host', 'foo_subnet') self.mock_cctxt.call.assert_called_with( self.ctxt, 'get_ports_on_host_by_subnet', host='foo_host', subnet='foo_subnet') def test_get_subnet_for_dvr(self): self.rpc.get_subnet_for_dvr( self.ctxt, 'foo_subnet', fixed_ips='foo_fixed_ips') self.mock_cctxt.call.assert_called_with( self.ctxt, 'get_subnet_for_dvr', subnet='foo_subnet', fixed_ips='foo_fixed_ips') neutron-8.4.0/neutron/tests/unit/api/rpc/handlers/test_resources_rpc.py0000775000567000056710000002371413044372760027635 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import mock from oslo_versionedobjects import base as obj_base from oslo_versionedobjects import fields as obj_fields import testtools from neutron.api.rpc.callbacks import resources from neutron.api.rpc.callbacks import version_manager from neutron.api.rpc.handlers import resources_rpc from neutron.common import topics from neutron import context from neutron.objects import base as objects_base from neutron.tests import base def _create_test_dict(): return {'id': 'uuid', 'field': 'foo'} def _create_test_resource(context=None): resource_dict = _create_test_dict() resource = FakeResource(context, **resource_dict) resource.obj_reset_changes() return resource class FakeResource(objects_base.NeutronObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': obj_fields.UUIDField(), 'field': obj_fields.StringField() } @classmethod def get_objects(cls, context, **kwargs): return list() class ResourcesRpcBaseTestCase(base.BaseTestCase): def setUp(self): super(ResourcesRpcBaseTestCase, self).setUp() # TODO(mhickey) This is using temp registry pattern. The # pattern solution is to backup the object registry, register # a class locally, and then restore the original registry. # Refer to https://review.openstack.org/#/c/263800/ for more # details. This code should be updated when the patch is merged. self._base_test_backup = copy.copy( obj_base.VersionedObjectRegistry._registry._obj_classes) self.addCleanup(self._restore_obj_registry) self.context = context.get_admin_context() def _restore_obj_registry(self): obj_base.VersionedObjectRegistry._registry._obj_classes = ( self._base_test_backup) class _ValidateResourceTypeTestCase(base.BaseTestCase): def setUp(self): super(_ValidateResourceTypeTestCase, self).setUp() self.is_valid_mock = mock.patch.object( resources_rpc.resources, 'is_valid_resource_type').start() def test_valid_type(self): self.is_valid_mock.return_value = True resources_rpc._validate_resource_type('foo') def test_invalid_type(self): self.is_valid_mock.return_value = False with testtools.ExpectedException( resources_rpc.InvalidResourceTypeClass): resources_rpc._validate_resource_type('foo') class _ResourceTypeVersionedTopicTestCase(base.BaseTestCase): @mock.patch.object(resources_rpc, '_validate_resource_type') def test_resource_type_versioned_topic(self, validate_mock): obj_name = FakeResource.obj_name() expected = topics.RESOURCE_TOPIC_PATTERN % { 'resource_type': 'FakeResource', 'version': '1.0'} with mock.patch.object(resources_rpc.resources, 'get_resource_cls', return_value=FakeResource): observed = resources_rpc.resource_type_versioned_topic(obj_name) self.assertEqual(expected, observed) class ResourcesPullRpcApiTestCase(ResourcesRpcBaseTestCase): def setUp(self): super(ResourcesPullRpcApiTestCase, self).setUp() mock.patch.object(resources_rpc, '_validate_resource_type').start() mock.patch('neutron.api.rpc.callbacks.resources.get_resource_cls', return_value=FakeResource).start() self.rpc = resources_rpc.ResourcesPullRpcApi() mock.patch.object(self.rpc, 'client').start() self.cctxt_mock = self.rpc.client.prepare.return_value def test_is_singleton(self): self.assertIs(self.rpc, resources_rpc.ResourcesPullRpcApi()) def test_pull(self): obj_base.VersionedObjectRegistry.register(FakeResource) expected_obj = _create_test_resource(self.context) resource_id = expected_obj.id self.cctxt_mock.call.return_value = expected_obj.obj_to_primitive() result = self.rpc.pull( self.context, FakeResource.obj_name(), resource_id) self.cctxt_mock.call.assert_called_once_with( self.context, 'pull', resource_type='FakeResource', version=FakeResource.VERSION, resource_id=resource_id) self.assertEqual(expected_obj, result) def test_pull_resource_not_found(self): resource_dict = _create_test_dict() resource_id = resource_dict['id'] self.cctxt_mock.call.return_value = None with testtools.ExpectedException(resources_rpc.ResourceNotFound): self.rpc.pull(self.context, FakeResource.obj_name(), resource_id) class ResourcesPushToServerRpcCallbackTestCase(ResourcesRpcBaseTestCase): def test_report_versions(self): callbacks = resources_rpc.ResourcesPushToServerRpcCallback() with mock.patch('neutron.api.rpc.callbacks.version_manager' '.update_versions') as update_versions: version_map = {'A': '1.0'} callbacks.report_agent_resource_versions(context=mock.ANY, agent_type='DHCP Agent', agent_host='fake-host', version_map=version_map) update_versions.assert_called_once_with(mock.ANY, version_map) class ResourcesPullRpcCallbackTestCase(ResourcesRpcBaseTestCase): def setUp(self): super(ResourcesPullRpcCallbackTestCase, self).setUp() obj_base.VersionedObjectRegistry.register(FakeResource) self.callbacks = resources_rpc.ResourcesPullRpcCallback() self.resource_obj = _create_test_resource(self.context) def test_pull(self): resource_dict = _create_test_dict() with mock.patch.object( resources_rpc.prod_registry, 'pull', return_value=self.resource_obj) as registry_mock: primitive = self.callbacks.pull( self.context, resource_type=FakeResource.obj_name(), version=FakeResource.VERSION, resource_id=self.resource_obj.id) registry_mock.assert_called_once_with( 'FakeResource', self.resource_obj.id, context=self.context) self.assertEqual(resource_dict, primitive['versioned_object.data']) self.assertEqual(self.resource_obj.obj_to_primitive(), primitive) @mock.patch.object(FakeResource, 'obj_to_primitive') def test_pull_backports_to_older_version(self, to_prim_mock): with mock.patch.object(resources_rpc.prod_registry, 'pull', return_value=self.resource_obj): self.callbacks.pull( self.context, resource_type=FakeResource.obj_name(), version='0.9', # less than initial version 1.0 resource_id=self.resource_obj.id) to_prim_mock.assert_called_with(target_version='0.9') class ResourcesPushRpcApiTestCase(ResourcesRpcBaseTestCase): def setUp(self): super(ResourcesPushRpcApiTestCase, self).setUp() mock.patch.object(resources_rpc.n_rpc, 'get_client').start() mock.patch.object(resources_rpc, '_validate_resource_type').start() self.rpc = resources_rpc.ResourcesPushRpcApi() self.cctxt_mock = self.rpc.client.prepare.return_value self.resource_obj = _create_test_resource(self.context) def test__prepare_object_fanout_context(self): expected_topic = topics.RESOURCE_TOPIC_PATTERN % { 'resource_type': resources.get_resource_type(self.resource_obj), 'version': self.resource_obj.VERSION} with mock.patch.object(resources_rpc.resources, 'get_resource_cls', return_value=FakeResource): observed = self.rpc._prepare_object_fanout_context( self.resource_obj, self.resource_obj.VERSION) self.rpc.client.prepare.assert_called_once_with( fanout=True, topic=expected_topic) self.assertEqual(self.cctxt_mock, observed) def test_pushy(self): with mock.patch.object(resources_rpc.resources, 'get_resource_cls', return_value=FakeResource): with mock.patch.object(version_manager, 'get_resource_versions', return_value=set([FakeResource.VERSION])): self.rpc.push( self.context, self.resource_obj, 'TYPE') self.cctxt_mock.cast.assert_called_once_with( self.context, 'push', resource=self.resource_obj.obj_to_primitive(), event_type='TYPE') class ResourcesPushRpcCallbackTestCase(ResourcesRpcBaseTestCase): def setUp(self): super(ResourcesPushRpcCallbackTestCase, self).setUp() mock.patch.object(resources_rpc, '_validate_resource_type').start() mock.patch.object( resources_rpc.resources, 'get_resource_cls', return_value=FakeResource).start() self.resource_obj = _create_test_resource(self.context) self.resource_prim = self.resource_obj.obj_to_primitive() self.callbacks = resources_rpc.ResourcesPushRpcCallback() @mock.patch.object(resources_rpc.cons_registry, 'push') def test_push(self, reg_push_mock): obj_base.VersionedObjectRegistry.register(FakeResource) self.callbacks.push(self.context, self.resource_prim, 'TYPE') reg_push_mock.assert_called_once_with(self.resource_obj.obj_name(), self.resource_obj, 'TYPE') neutron-8.4.0/neutron/tests/unit/api/rpc/handlers/test_bgp_speaker_rpc.py0000664000567000056710000000323613044372760030077 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.api.rpc.handlers import bgp_speaker_rpc from neutron.tests import base class TestBgpSpeakerRpcCallback(base.BaseTestCase): def setUp(self): self.plugin_p = mock.patch('neutron.manager.NeutronManager.' 'get_service_plugins') self.plugin = self.plugin_p.start() self.callback = bgp_speaker_rpc.BgpSpeakerRpcCallback() super(TestBgpSpeakerRpcCallback, self).setUp() def test_get_bgp_speaker_info(self): self.callback.get_bgp_speaker_info(mock.Mock(), bgp_speaker_id='id1') self.assertIsNotNone(len(self.plugin.mock_calls)) def test_get_bgp_peer_info(self): self.callback.get_bgp_peer_info(mock.Mock(), bgp_peer_id='id1') self.assertIsNotNone(len(self.plugin.mock_calls)) def test_get_bgp_speakers(self): self.callback.get_bgp_speakers(mock.Mock(), host='host') self.assertIsNotNone(len(self.plugin.mock_calls)) neutron-8.4.0/neutron/tests/unit/api/rpc/handlers/test_dhcp_rpc.py0000664000567000056710000002576113044372760026542 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_db import exception as db_exc from neutron.api.rpc.handlers import dhcp_rpc from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import utils from neutron.extensions import portbindings from neutron.tests import base class TestDhcpRpcCallback(base.BaseTestCase): def setUp(self): super(TestDhcpRpcCallback, self).setUp() self.plugin_p = mock.patch('neutron.manager.NeutronManager.get_plugin') get_plugin = self.plugin_p.start() self.plugin = mock.MagicMock() get_plugin.return_value = self.plugin self.callbacks = dhcp_rpc.DhcpRpcCallback() self.log_p = mock.patch('neutron.api.rpc.handlers.dhcp_rpc.LOG') self.log = self.log_p.start() set_dirty_p = mock.patch('neutron.quota.resource_registry.' 'set_resources_dirty') self.mock_set_dirty = set_dirty_p.start() self.utils_p = mock.patch('neutron.plugins.common.utils.create_port') self.utils = self.utils_p.start() def test_get_active_networks(self): plugin_retval = [dict(id='a'), dict(id='b')] self.plugin.get_networks.return_value = plugin_retval networks = self.callbacks.get_active_networks(mock.Mock(), host='host') self.assertEqual(networks, ['a', 'b']) self.plugin.assert_has_calls( [mock.call.get_networks(mock.ANY, filters=dict(admin_state_up=[True]))]) self.assertEqual(len(self.log.mock_calls), 1) def test_group_by_network_id(self): port1 = {'network_id': 'a'} port2 = {'network_id': 'b'} port3 = {'network_id': 'a'} grouped_ports = self.callbacks._group_by_network_id( [port1, port2, port3]) expected = {'a': [port1, port3], 'b': [port2]} self.assertEqual(expected, grouped_ports) def test_get_active_networks_info(self): plugin_retval = [{'id': 'a'}, {'id': 'b'}] self.plugin.get_networks.return_value = plugin_retval port = {'network_id': 'a'} subnet = {'network_id': 'b', 'id': 'c'} self.plugin.get_ports.return_value = [port] self.plugin.get_subnets.return_value = [subnet] networks = self.callbacks.get_active_networks_info(mock.Mock(), host='host') expected = [{'id': 'a', 'subnets': [], 'ports': [port]}, {'id': 'b', 'subnets': [subnet], 'ports': []}] self.assertEqual(expected, networks) def _test__port_action_with_failures(self, exc=None, action=None): port = { 'network_id': 'foo_network_id', 'device_owner': constants.DEVICE_OWNER_DHCP, 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}] } self.plugin.create_port.side_effect = exc self.utils.side_effect = exc self.assertIsNone(self.callbacks._port_action(self.plugin, mock.Mock(), {'port': port}, action)) def _test__port_action_good_action(self, action, port, expected_call): self.callbacks._port_action(self.plugin, mock.Mock(), port, action) if action == 'create_port': self.utils.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY) else: self.plugin.assert_has_calls([expected_call]) def test_port_action_create_port(self): self._test__port_action_good_action( 'create_port', mock.Mock(), mock.call.create_port(mock.ANY, mock.ANY)) def test_port_action_update_port(self): fake_port = {'id': 'foo_port_id', 'port': mock.Mock()} self._test__port_action_good_action( 'update_port', fake_port, mock.call.update_port(mock.ANY, 'foo_port_id', mock.ANY)) def test__port_action_bad_action(self): self.assertRaises( n_exc.Invalid, self._test__port_action_with_failures, exc=None, action='foo_action') def test_create_port_catch_network_not_found(self): self._test__port_action_with_failures( exc=n_exc.NetworkNotFound(net_id='foo_network_id'), action='create_port') def test_create_port_catch_subnet_not_found(self): self._test__port_action_with_failures( exc=n_exc.SubnetNotFound(subnet_id='foo_subnet_id'), action='create_port') def test_create_port_catch_db_error(self): self._test__port_action_with_failures(exc=db_exc.DBError(), action='create_port') def test_create_port_catch_ip_generation_failure_reraise(self): self.assertRaises( n_exc.IpAddressGenerationFailure, self._test__port_action_with_failures, exc=n_exc.IpAddressGenerationFailure(net_id='foo_network_id'), action='create_port') def test_create_port_catch_and_handle_ip_generation_failure(self): self.plugin.get_subnet.side_effect = ( n_exc.SubnetNotFound(subnet_id='foo_subnet_id')) self._test__port_action_with_failures( exc=n_exc.IpAddressGenerationFailure(net_id='foo_network_id'), action='create_port') def test_get_network_info_return_none_on_not_found(self): self.plugin.get_network.side_effect = n_exc.NetworkNotFound(net_id='a') retval = self.callbacks.get_network_info(mock.Mock(), network_id='a') self.assertIsNone(retval) def test_get_network_info(self): network_retval = dict(id='a') subnet_retval = [dict(id='a'), dict(id='c'), dict(id='b')] port_retval = mock.Mock() self.plugin.get_network.return_value = network_retval self.plugin.get_subnets.return_value = subnet_retval self.plugin.get_ports.return_value = port_retval retval = self.callbacks.get_network_info(mock.Mock(), network_id='a') self.assertEqual(retval, network_retval) sorted_subnet_retval = [dict(id='a'), dict(id='b'), dict(id='c')] self.assertEqual(retval['subnets'], sorted_subnet_retval) self.assertEqual(retval['ports'], port_retval) def test_update_dhcp_port_verify_port_action_port_dict(self): port = {'port': {'network_id': 'foo_network_id', 'device_owner': constants.DEVICE_OWNER_DHCP, 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]} } expected_port = {'port': {'network_id': 'foo_network_id', 'device_owner': constants.DEVICE_OWNER_DHCP, portbindings.HOST_ID: 'foo_host', 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}] }, 'id': 'foo_port_id' } def _fake_port_action(plugin, context, port, action): self.assertEqual(expected_port, port) self.plugin.get_port.return_value = { 'device_id': constants.DEVICE_ID_RESERVED_DHCP_PORT} self.callbacks._port_action = _fake_port_action self.callbacks.update_dhcp_port(mock.Mock(), host='foo_host', port_id='foo_port_id', port=port) def test_update_reserved_dhcp_port(self): port = {'port': {'network_id': 'foo_network_id', 'device_owner': constants.DEVICE_OWNER_DHCP, 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]} } expected_port = {'port': {'network_id': 'foo_network_id', 'device_owner': constants.DEVICE_OWNER_DHCP, portbindings.HOST_ID: 'foo_host', 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}] }, 'id': 'foo_port_id' } def _fake_port_action(plugin, context, port, action): self.assertEqual(expected_port, port) self.plugin.get_port.return_value = { 'device_id': utils.get_dhcp_agent_device_id('foo_network_id', 'foo_host')} self.callbacks._port_action = _fake_port_action self.callbacks.update_dhcp_port( mock.Mock(), host='foo_host', port_id='foo_port_id', port=port) self.plugin.get_port.return_value = { 'device_id': 'other_id'} self.assertRaises(n_exc.DhcpPortInUse, self.callbacks.update_dhcp_port, mock.Mock(), host='foo_host', port_id='foo_port_id', port=port) def test_update_dhcp_port(self): port = {'port': {'network_id': 'foo_network_id', 'device_owner': constants.DEVICE_OWNER_DHCP, 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]} } expected_port = {'port': {'network_id': 'foo_network_id', 'device_owner': constants.DEVICE_OWNER_DHCP, portbindings.HOST_ID: 'foo_host', 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}] }, 'id': 'foo_port_id' } self.plugin.get_port.return_value = { 'device_id': constants.DEVICE_ID_RESERVED_DHCP_PORT} self.callbacks.update_dhcp_port(mock.Mock(), host='foo_host', port_id='foo_port_id', port=port) self.plugin.assert_has_calls([ mock.call.update_port(mock.ANY, 'foo_port_id', expected_port)]) def test_release_dhcp_port(self): port_retval = dict(id='port_id', fixed_ips=[dict(subnet_id='a')]) self.plugin.get_ports.return_value = [port_retval] self.callbacks.release_dhcp_port(mock.ANY, network_id='netid', device_id='devid') self.plugin.assert_has_calls([ mock.call.delete_ports_by_device_id(mock.ANY, 'devid', 'netid')]) neutron-8.4.0/neutron/tests/unit/api/rpc/callbacks/0000775000567000056710000000000013044373210023442 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/api/rpc/callbacks/test_resource_manager.py0000664000567000056710000001267513044372736030423 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.api.rpc.callbacks import exceptions as rpc_exc from neutron.api.rpc.callbacks import resource_manager from neutron.callbacks import exceptions as exceptions from neutron.tests.unit.services.qos import base IS_VALID_RESOURCE_TYPE = ( 'neutron.api.rpc.callbacks.resources.is_valid_resource_type') class ResourceCallbacksManagerTestCaseMixin(object): def test_register_fails_on_invalid_type(self): self.assertRaises( exceptions.Invalid, self.mgr.register, lambda: None, 'TYPE') @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test_clear_unregisters_all_callbacks(self, *mocks): self.mgr.register(lambda: None, 'TYPE1') self.mgr.register(lambda: None, 'TYPE2') self.mgr.clear() self.assertEqual([], self.mgr.get_subscribed_types()) def test_unregister_fails_on_invalid_type(self): self.assertRaises( exceptions.Invalid, self.mgr.unregister, lambda: None, 'TYPE') @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test_unregister_fails_on_unregistered_callback(self, *mocks): self.assertRaises( rpc_exc.CallbackNotFound, self.mgr.unregister, lambda: None, 'TYPE') @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test_unregister_unregisters_callback(self, *mocks): callback = lambda: None self.mgr.register(callback, 'TYPE') self.mgr.unregister(callback, 'TYPE') self.assertEqual([], self.mgr.get_subscribed_types()) @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test___init___does_not_reset_callbacks(self, *mocks): callback = lambda: None self.mgr.register(callback, 'TYPE') resource_manager.ProducerResourceCallbacksManager() self.assertEqual(['TYPE'], self.mgr.get_subscribed_types()) class ProducerResourceCallbacksManagerTestCase( base.BaseQosTestCase, ResourceCallbacksManagerTestCaseMixin): def setUp(self): super(ProducerResourceCallbacksManagerTestCase, self).setUp() self.mgr = self.prod_mgr @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test_register_registers_callback(self, *mocks): callback = lambda: None self.mgr.register(callback, 'TYPE') self.assertEqual(callback, self.mgr.get_callback('TYPE')) @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test_register_fails_on_multiple_calls(self, *mocks): self.mgr.register(lambda: None, 'TYPE') self.assertRaises( rpc_exc.CallbacksMaxLimitReached, self.mgr.register, lambda: None, 'TYPE') def test_get_callback_fails_on_invalid_type(self): self.assertRaises( exceptions.Invalid, self.mgr.get_callback, 'TYPE') @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test_get_callback_fails_on_unregistered_callback( self, *mocks): self.assertRaises( rpc_exc.CallbackNotFound, self.mgr.get_callback, 'TYPE') @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test_get_callback_returns_proper_callback(self, *mocks): callback1 = lambda: None callback2 = lambda: None self.mgr.register(callback1, 'TYPE1') self.mgr.register(callback2, 'TYPE2') self.assertEqual(callback1, self.mgr.get_callback('TYPE1')) self.assertEqual(callback2, self.mgr.get_callback('TYPE2')) class ConsumerResourceCallbacksManagerTestCase( base.BaseQosTestCase, ResourceCallbacksManagerTestCaseMixin): def setUp(self): super(ConsumerResourceCallbacksManagerTestCase, self).setUp() self.mgr = self.cons_mgr @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test_register_registers_callback(self, *mocks): callback = lambda: None self.mgr.register(callback, 'TYPE') self.assertEqual({callback}, self.mgr.get_callbacks('TYPE')) @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test_register_succeeds_on_multiple_calls(self, *mocks): callback1 = lambda: None callback2 = lambda: None self.mgr.register(callback1, 'TYPE') self.mgr.register(callback2, 'TYPE') @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test_get_callbacks_fails_on_unregistered_callback( self, *mocks): self.assertRaises( rpc_exc.CallbackNotFound, self.mgr.get_callbacks, 'TYPE') @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test_get_callbacks_returns_proper_callbacks(self, *mocks): callback1 = lambda: None callback2 = lambda: None self.mgr.register(callback1, 'TYPE1') self.mgr.register(callback2, 'TYPE2') self.assertEqual(set([callback1]), self.mgr.get_callbacks('TYPE1')) self.assertEqual(set([callback2]), self.mgr.get_callbacks('TYPE2')) neutron-8.4.0/neutron/tests/unit/api/rpc/callbacks/test_resources.py0000664000567000056710000000374313044372736027110 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api.rpc.callbacks import resources from neutron.objects.qos import policy from neutron.tests import base class GetResourceTypeTestCase(base.BaseTestCase): def test_get_resource_type_none(self): self.assertIsNone(resources.get_resource_type(None)) def test_get_resource_type_wrong_type(self): self.assertIsNone(resources.get_resource_type(object())) def test_get_resource_type(self): # we could use any other registered NeutronObject type here self.assertEqual(policy.QosPolicy.obj_name(), resources.get_resource_type(policy.QosPolicy())) class IsValidResourceTypeTestCase(base.BaseTestCase): def test_known_type(self): # it could be any other NeutronObject, assuming it's known to RPC # callbacks self.assertTrue(resources.is_valid_resource_type( policy.QosPolicy.obj_name())) def test_unknown_type(self): self.assertFalse( resources.is_valid_resource_type('unknown-resource-type')) class GetResourceClsTestCase(base.BaseTestCase): def test_known_type(self): # it could be any other NeutronObject, assuming it's known to RPC # callbacks self.assertEqual(policy.QosPolicy, resources.get_resource_cls(resources.QOS_POLICY)) def test_unknown_type(self): self.assertIsNone(resources.get_resource_cls('unknown-resource-type')) neutron-8.4.0/neutron/tests/unit/api/rpc/callbacks/__init__.py0000664000567000056710000000000013044372736025555 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/api/rpc/callbacks/producer/0000775000567000056710000000000013044373210025265 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/api/rpc/callbacks/producer/__init__.py0000664000567000056710000000000013044372736027400 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/api/rpc/callbacks/producer/test_registry.py0000664000567000056710000000536013044372736030566 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api.rpc.callbacks import exceptions from neutron.api.rpc.callbacks.producer import registry from neutron.api.rpc.callbacks import resources from neutron.objects.qos import policy from neutron.tests.unit.services.qos import base class ProducerRegistryTestCase(base.BaseQosTestCase): def test_pull_returns_callback_result(self): policy_obj = policy.QosPolicy(context=None) def _fake_policy_cb(*args, **kwargs): return policy_obj registry.provide(_fake_policy_cb, resources.QOS_POLICY) self.assertEqual( policy_obj, registry.pull(resources.QOS_POLICY, 'fake_id')) def test_pull_does_not_raise_on_none(self): def _none_cb(*args, **kwargs): pass registry.provide(_none_cb, resources.QOS_POLICY) obj = registry.pull(resources.QOS_POLICY, 'fake_id') self.assertIsNone(obj) def test_pull_raises_on_wrong_object_type(self): def _wrong_type_cb(*args, **kwargs): return object() registry.provide(_wrong_type_cb, resources.QOS_POLICY) self.assertRaises( exceptions.CallbackWrongResourceType, registry.pull, resources.QOS_POLICY, 'fake_id') def test_pull_raises_on_callback_not_found(self): self.assertRaises( exceptions.CallbackNotFound, registry.pull, resources.QOS_POLICY, 'fake_id') def test__get_manager_is_singleton(self): self.assertIs(registry._get_manager(), registry._get_manager()) def test_unprovide(self): def _fake_policy_cb(*args, **kwargs): pass registry.provide(_fake_policy_cb, resources.QOS_POLICY) registry.unprovide(_fake_policy_cb, resources.QOS_POLICY) self.assertRaises( exceptions.CallbackNotFound, registry.pull, resources.QOS_POLICY, 'fake_id') def test_clear_unprovides_all_producers(self): def _fake_policy_cb(*args, **kwargs): pass registry.provide(_fake_policy_cb, resources.QOS_POLICY) registry.clear() self.assertRaises( exceptions.CallbackNotFound, registry.pull, resources.QOS_POLICY, 'fake_id') neutron-8.4.0/neutron/tests/unit/api/rpc/callbacks/consumer/0000775000567000056710000000000013044373210025275 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/api/rpc/callbacks/consumer/__init__.py0000664000567000056710000000000013044372736027410 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/api/rpc/callbacks/consumer/test_registry.py0000664000567000056710000000404613044372760030573 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.api.rpc.callbacks.consumer import registry from neutron.tests import base class ConsumerRegistryTestCase(base.BaseTestCase): def setUp(self): super(ConsumerRegistryTestCase, self).setUp() def test__get_manager_is_singleton(self): self.assertIs(registry._get_manager(), registry._get_manager()) @mock.patch.object(registry, '_get_manager') def test_subscribe(self, manager_mock): callback = lambda: None registry.subscribe(callback, 'TYPE') manager_mock().register.assert_called_with(callback, 'TYPE') @mock.patch.object(registry, '_get_manager') def test_unsubscribe(self, manager_mock): callback = lambda: None registry.unsubscribe(callback, 'TYPE') manager_mock().unregister.assert_called_with(callback, 'TYPE') @mock.patch.object(registry, '_get_manager') def test_clear(self, manager_mock): registry.clear() manager_mock().clear.assert_called_with() @mock.patch.object(registry, '_get_manager') def test_push(self, manager_mock): resource_type_ = object() resource_ = object() event_type_ = object() callback1 = mock.Mock() callback2 = mock.Mock() callbacks = {callback1, callback2} manager_mock().get_callbacks.return_value = callbacks registry.push(resource_type_, resource_, event_type_) for callback in callbacks: callback.assert_called_with(resource_, event_type_) neutron-8.4.0/neutron/tests/unit/api/rpc/callbacks/test_version_manager.py0000664000567000056710000001644313044372760030253 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import mock from neutron.api.rpc.callbacks import exceptions from neutron.api.rpc.callbacks import resources from neutron.api.rpc.callbacks import version_manager from neutron.db import agents_db from neutron.tests import base TEST_RESOURCE_TYPE = 'TestResourceType' TEST_VERSION_A = '1.11' TEST_VERSION_B = '1.12' TEST_RESOURCE_TYPE_2 = 'AnotherResource' AGENT_HOST_1 = 'host-1' AGENT_HOST_2 = 'host-2' AGENT_TYPE_1 = 'dhcp-agent' AGENT_TYPE_2 = 'openvswitch-agent' CONSUMER_1 = version_manager.AgentConsumer(AGENT_TYPE_1, AGENT_HOST_1) CONSUMER_2 = version_manager.AgentConsumer(AGENT_TYPE_2, AGENT_HOST_2) class ResourceConsumerTrackerTest(base.BaseTestCase): def test_consumer_set_versions(self): cv = version_manager.ResourceConsumerTracker() cv.set_versions(CONSUMER_1, {TEST_RESOURCE_TYPE: TEST_VERSION_A}) self.assertIn(TEST_VERSION_A, cv.get_resource_versions(TEST_RESOURCE_TYPE)) def test_consumer_updates_version(self): cv = version_manager.ResourceConsumerTracker() for version in [TEST_VERSION_A, TEST_VERSION_B]: cv.set_versions(CONSUMER_1, {TEST_RESOURCE_TYPE: version}) self.assertEqual(set([TEST_VERSION_B]), cv.get_resource_versions(TEST_RESOURCE_TYPE)) def test_multiple_consumer_version_update(self): cv = version_manager.ResourceConsumerTracker() cv.set_versions(CONSUMER_1, {TEST_RESOURCE_TYPE: TEST_VERSION_A}) cv.set_versions(CONSUMER_2, {TEST_RESOURCE_TYPE: TEST_VERSION_A}) cv.set_versions(CONSUMER_1, {TEST_RESOURCE_TYPE: TEST_VERSION_B}) self.assertEqual(set([TEST_VERSION_A, TEST_VERSION_B]), cv.get_resource_versions(TEST_RESOURCE_TYPE)) def test_consumer_downgrades_removing_resource(self): cv = version_manager.ResourceConsumerTracker() cv.set_versions(CONSUMER_1, {TEST_RESOURCE_TYPE: TEST_VERSION_B, TEST_RESOURCE_TYPE_2: TEST_VERSION_A}) cv.set_versions(CONSUMER_1, {TEST_RESOURCE_TYPE: TEST_VERSION_A}) self.assertEqual(set(), cv.get_resource_versions(TEST_RESOURCE_TYPE_2)) self.assertEqual(set([TEST_VERSION_A]), cv.get_resource_versions(TEST_RESOURCE_TYPE)) def test_consumer_downgrades_stops_reporting(self): cv = version_manager.ResourceConsumerTracker() cv.set_versions(CONSUMER_1, {TEST_RESOURCE_TYPE: TEST_VERSION_B, TEST_RESOURCE_TYPE_2: TEST_VERSION_A}) cv.set_versions(CONSUMER_1, {}) for resource_type in [TEST_RESOURCE_TYPE, TEST_RESOURCE_TYPE_2]: self.assertEqual(set(), cv.get_resource_versions(resource_type)) def test_compatibility_liberty_sriov_and_ovs_agents(self): def _fake_local_versions(self): local_versions = collections.defaultdict(set) local_versions[resources.QOS_POLICY].add('1.11') return local_versions for agent_type in version_manager.NON_REPORTING_AGENT_TYPES: consumer_id = version_manager.AgentConsumer(agent_type, AGENT_HOST_1) cv = version_manager.ResourceConsumerTracker() cv._get_local_resource_versions = _fake_local_versions cv._versions = _fake_local_versions(mock.ANY) cv.set_versions(consumer_id, {}) self.assertEqual(set(['1.0', '1.11']), cv.get_resource_versions(resources.QOS_POLICY)) def test_different_adds_triggers_recalculation(self): cv = version_manager.ResourceConsumerTracker() for version in [TEST_VERSION_A, TEST_VERSION_B]: cv.set_versions(CONSUMER_1, {TEST_RESOURCE_TYPE: version}) self.assertTrue(cv._needs_recalculation) cv._recalculate_versions = mock.Mock() cv.get_resource_versions(TEST_RESOURCE_TYPE) cv._recalculate_versions.assert_called_once_with() class CachedResourceConsumerTrackerTest(base.BaseTestCase): def setUp(self): super(CachedResourceConsumerTrackerTest, self).setUp() self.refreshed = False class _FakePlugin(agents_db.AgentDbMixin): @staticmethod def get_agents_resource_versions(tracker): self.refreshed = True tracker.set_versions(CONSUMER_1, {TEST_RESOURCE_TYPE: TEST_VERSION_A}) self.get_plugin = mock.patch('neutron.manager.NeutronManager' '.get_plugin').start() self.get_plugin.return_value = _FakePlugin() def test_plugin_does_not_implement_agentsdb_exception(self): self.get_plugin.return_value = object() cached_tracker = version_manager.CachedResourceConsumerTracker() self.assertRaises(exceptions.NoAgentDbMixinImplemented, cached_tracker.get_resource_versions, resources.QOS_POLICY) def test_consumer_versions_callback(self): cached_tracker = version_manager.CachedResourceConsumerTracker() self.assertIn(TEST_VERSION_A, cached_tracker.get_resource_versions( TEST_RESOURCE_TYPE)) def test_update_versions(self): cached_tracker = version_manager.CachedResourceConsumerTracker() initial_versions = cached_tracker.get_resource_versions( TEST_RESOURCE_TYPE) initial_versions_2 = cached_tracker.get_resource_versions( TEST_RESOURCE_TYPE_2) cached_tracker.update_versions( CONSUMER_1, {TEST_RESOURCE_TYPE: TEST_VERSION_B, TEST_RESOURCE_TYPE_2: TEST_VERSION_A}) final_versions = cached_tracker.get_resource_versions( TEST_RESOURCE_TYPE) final_versions_2 = cached_tracker.get_resource_versions( TEST_RESOURCE_TYPE_2) self.assertNotEqual(initial_versions, final_versions) self.assertNotEqual(initial_versions_2, final_versions_2) def test_versions_ttl(self): cached_tracker = version_manager.CachedResourceConsumerTracker() with mock.patch('time.time') as time_patch: time_patch.return_value = 1 cached_tracker.get_resource_versions(TEST_RESOURCE_TYPE) self.assertTrue(self.refreshed) self.refreshed = False time_patch.return_value = 2 cached_tracker.get_resource_versions(TEST_RESOURCE_TYPE) self.assertFalse(self.refreshed) time_patch.return_value = 2 + version_manager.VERSIONS_TTL cached_tracker.get_resource_versions(TEST_RESOURCE_TYPE) self.assertTrue(self.refreshed) neutron-8.4.0/neutron/tests/unit/api/rpc/__init__.py0000664000567000056710000000000013044372736023636 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/api/rpc/agentnotifiers/0000775000567000056710000000000013044373210024544 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/api/rpc/agentnotifiers/test_bgp_dr_rpc_agent_api.py0000664000567000056710000000676713044372760032316 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.api.rpc.agentnotifiers import bgp_dr_rpc_agent_api from neutron import context from neutron.tests import base class TestBgpDrAgentNotifyApi(base.BaseTestCase): def setUp(self): super(TestBgpDrAgentNotifyApi, self).setUp() self.notifier = ( bgp_dr_rpc_agent_api.BgpDrAgentNotifyApi()) mock_cast_p = mock.patch.object(self.notifier, '_notification_host_cast') self.mock_cast = mock_cast_p.start() mock_call_p = mock.patch.object(self.notifier, '_notification_host_call') self.mock_call = mock_call_p.start() self.context = context.get_admin_context() self.host = 'host-1' def test_notify_dragent_bgp_routes_advertisement(self): bgp_speaker_id = 'bgp-speaker-1' routes = [{'destination': '1.1.1.1', 'next_hop': '2.2.2.2'}] self.notifier.bgp_routes_advertisement(self.context, bgp_speaker_id, routes, self.host) self.assertEqual(1, self.mock_cast.call_count) self.assertEqual(0, self.mock_call.call_count) def test_notify_dragent_bgp_routes_withdrawal(self): bgp_speaker_id = 'bgp-speaker-1' routes = [{'destination': '1.1.1.1'}] self.notifier.bgp_routes_withdrawal(self.context, bgp_speaker_id, routes, self.host) self.assertEqual(1, self.mock_cast.call_count) self.assertEqual(0, self.mock_call.call_count) def test_notify_bgp_peer_disassociated(self): bgp_speaker_id = 'bgp-speaker-1' bgp_peer_ip = '1.1.1.1' self.notifier.bgp_peer_disassociated(self.context, bgp_speaker_id, bgp_peer_ip, self.host) self.assertEqual(1, self.mock_cast.call_count) self.assertEqual(0, self.mock_call.call_count) def test_notify_bgp_peer_associated(self): bgp_speaker_id = 'bgp-speaker-1' bgp_peer_id = 'bgp-peer-1' self.notifier.bgp_peer_associated(self.context, bgp_speaker_id, bgp_peer_id, self.host) self.assertEqual(1, self.mock_cast.call_count) self.assertEqual(0, self.mock_call.call_count) def test_notify_bgp_speaker_created(self): bgp_speaker_id = 'bgp-speaker-1' self.notifier.bgp_speaker_created(self.context, bgp_speaker_id, self.host) self.assertEqual(1, self.mock_cast.call_count) self.assertEqual(0, self.mock_call.call_count) def test_notify_bgp_speaker_removed(self): bgp_speaker_id = 'bgp-speaker-1' self.notifier.bgp_speaker_removed(self.context, bgp_speaker_id, self.host) self.assertEqual(1, self.mock_cast.call_count) self.assertEqual(0, self.mock_call.call_count) neutron-8.4.0/neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py0000664000567000056710000002155313044372760031765 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import mock from oslo_utils import timeutils from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.common import utils from neutron.db import agents_db from neutron.db.agentschedulers_db import cfg from neutron.tests import base class TestDhcpAgentNotifyAPI(base.BaseTestCase): def setUp(self): super(TestDhcpAgentNotifyAPI, self).setUp() self.notifier = ( dhcp_rpc_agent_api.DhcpAgentNotifyAPI(plugin=mock.Mock())) mock_util_p = mock.patch.object(utils, 'is_extension_supported') mock_log_p = mock.patch.object(dhcp_rpc_agent_api, 'LOG') mock_fanout_p = mock.patch.object(self.notifier, '_fanout_message') mock_cast_p = mock.patch.object(self.notifier, '_cast_message') self.mock_util = mock_util_p.start() self.mock_log = mock_log_p.start() self.mock_fanout = mock_fanout_p.start() self.mock_cast = mock_cast_p.start() def _test__schedule_network(self, network, new_agents=None, existing_agents=None, expected_casts=0, expected_warnings=0): self.notifier.plugin.schedule_network.return_value = new_agents agents = self.notifier._schedule_network( mock.ANY, network, existing_agents) if new_agents is None: new_agents = [] self.assertEqual(new_agents + existing_agents, agents) self.assertEqual(expected_casts, self.mock_cast.call_count) self.assertEqual(expected_warnings, self.mock_log.warning.call_count) def test__schedule_network(self): agent = agents_db.Agent() agent.admin_state_up = True agent.heartbeat_timestamp = timeutils.utcnow() network = {'id': 'foo_net_id'} self._test__schedule_network(network, new_agents=[agent], existing_agents=[], expected_casts=1, expected_warnings=0) def test__schedule_network_no_existing_agents(self): agent = agents_db.Agent() agent.admin_state_up = True agent.heartbeat_timestamp = timeutils.utcnow() network = {'id': 'foo_net_id'} self._test__schedule_network(network, new_agents=None, existing_agents=[agent], expected_casts=0, expected_warnings=0) def test__schedule_network_no_new_agents(self): network = {'id': 'foo_net_id'} self._test__schedule_network(network, new_agents=None, existing_agents=[], expected_casts=0, expected_warnings=1) def _test__get_enabled_agents(self, network, agents=None, port_count=0, expected_warnings=0, expected_errors=0): self.notifier.plugin.get_ports_count.return_value = port_count enabled_agents = self.notifier._get_enabled_agents( mock.ANY, network, agents, mock.ANY, mock.ANY) if not cfg.CONF.enable_services_on_agents_with_admin_state_down: agents = [x for x in agents if x.admin_state_up] self.assertEqual(agents, enabled_agents) self.assertEqual(expected_warnings, self.mock_log.warning.call_count) self.assertEqual(expected_errors, self.mock_log.error.call_count) def test__get_enabled_agents(self): agent1 = agents_db.Agent() agent1.admin_state_up = True agent1.heartbeat_timestamp = timeutils.utcnow() agent2 = agents_db.Agent() agent2.admin_state_up = False agent2.heartbeat_timestamp = timeutils.utcnow() network = {'id': 'foo_network_id'} self._test__get_enabled_agents(network, agents=[agent1]) def test__get_enabled_agents_with_inactive_ones(self): agent1 = agents_db.Agent() agent1.admin_state_up = True agent1.heartbeat_timestamp = timeutils.utcnow() agent2 = agents_db.Agent() agent2.admin_state_up = True # This is effectively an inactive agent agent2.heartbeat_timestamp = datetime.datetime(2000, 1, 1, 0, 0) network = {'id': 'foo_network_id'} self._test__get_enabled_agents(network, agents=[agent1, agent2], expected_warnings=1, expected_errors=0) def test__get_enabled_agents_with_notification_required(self): network = {'id': 'foo_network_id', 'subnets': ['foo_subnet_id']} agent = agents_db.Agent() agent.admin_state_up = False agent.heartbeat_timestamp = timeutils.utcnow() self._test__get_enabled_agents(network, [agent], port_count=20, expected_warnings=0, expected_errors=1) def test__get_enabled_agents_with_admin_state_down(self): cfg.CONF.set_override( 'enable_services_on_agents_with_admin_state_down', True) agent1 = agents_db.Agent() agent1.admin_state_up = True agent1.heartbeat_timestamp = timeutils.utcnow() agent2 = agents_db.Agent() agent2.admin_state_up = False agent2.heartbeat_timestamp = timeutils.utcnow() network = {'id': 'foo_network_id'} self._test__get_enabled_agents(network, agents=[agent1, agent2]) def test__notify_agents_fanout_required(self): self.notifier._notify_agents(mock.ANY, 'network_delete_end', mock.ANY, 'foo_network_id') self.assertEqual(1, self.mock_fanout.call_count) def _test__notify_agents_with_function( self, function, expected_scheduling=0, expected_casts=0): with mock.patch.object(self.notifier, '_schedule_network') as f: with mock.patch.object(self.notifier, '_get_enabled_agents') as g: agent = agents_db.Agent() agent.admin_state_up = True agent.heartbeat_timestamp = timeutils.utcnow() g.return_value = [agent] function() self.assertEqual(expected_scheduling, f.call_count) self.assertEqual(expected_casts, self.mock_cast.call_count) def _test__notify_agents(self, method, expected_scheduling=0, expected_casts=0): self._test__notify_agents_with_function( lambda: self.notifier._notify_agents( mock.Mock(), method, {'port': {}}, 'foo_network_id'), expected_scheduling, expected_casts) def test__notify_agents_cast_required_with_scheduling(self): self._test__notify_agents('port_create_end', expected_scheduling=1, expected_casts=1) def test__notify_agents_cast_required_wo_scheduling_on_port_update(self): self._test__notify_agents('port_update_end', expected_scheduling=0, expected_casts=1) def test__notify_agents_cast_required_with_scheduling_subnet_create(self): self._test__notify_agents('subnet_create_end', expected_scheduling=1, expected_casts=1) def test__notify_agents_no_action(self): self._test__notify_agents('network_create_end', expected_scheduling=0, expected_casts=0) def test__notify_agents_with_router_interface_add(self): self._test__notify_agents_with_function( lambda: self.notifier._after_router_interface_created( mock.ANY, mock.ANY, mock.ANY, context=mock.Mock(), port={'id': 'foo_port_id', 'network_id': 'foo_network_id'}), expected_scheduling=1, expected_casts=1) def test__notify_agents_with_router_interface_delete(self): self._test__notify_agents_with_function( lambda: self.notifier._after_router_interface_deleted( mock.ANY, mock.ANY, mock.ANY, context=mock.Mock(), port={'id': 'foo_port_id', 'network_id': 'foo_network_id'}), expected_scheduling=0, expected_casts=1) def test__fanout_message(self): self.notifier._fanout_message(mock.ANY, mock.ANY, mock.ANY) self.assertEqual(1, self.mock_fanout.call_count) def test__cast_message(self): self.notifier._cast_message(mock.ANY, mock.ANY, mock.ANY) self.assertEqual(1, self.mock_cast.call_count) neutron-8.4.0/neutron/tests/unit/api/rpc/agentnotifiers/__init__.py0000664000567000056710000000000013044372736026657 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/api/rpc/agentnotifiers/test_l3_rpc_agent_api.py0000664000567000056710000000333413044372736031365 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api from neutron.tests import base class TestL3AgentNotifyAPI(base.BaseTestCase): def setUp(self): super(TestL3AgentNotifyAPI, self).setUp() self.rpc_client_mock = mock.patch( 'neutron.common.rpc.get_client').start().return_value self.l3_notifier = l3_rpc_agent_api.L3AgentNotifyAPI() def _test_arp_update(self, method): arp_table = {'ip_address': '1.1.1.1', 'mac_address': '22:f1:6c:9c:79:4a', 'subnet_id': 'subnet_id'} router_id = 'router_id' getattr(self.l3_notifier, method)(mock.Mock(), router_id, arp_table) self.rpc_client_mock.prepare.assert_called_once_with( fanout=True, version='1.2') cctxt = self.rpc_client_mock.prepare.return_value cctxt.cast.assert_called_once_with( mock.ANY, method, payload={'router_id': router_id, 'arp_table': arp_table}) def test_add_arp_entry(self): self._test_arp_update('add_arp_entry') def test_del_arp_entry(self): self._test_arp_update('del_arp_entry') neutron-8.4.0/neutron/tests/unit/db/0000775000567000056710000000000013044373210020553 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/db/test_agentschedulers_db.py0000664000567000056710000022513213044372760026027 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import mock from oslo_config import cfg from oslo_db import exception as db_exc import oslo_messaging from oslo_utils import uuidutils from webob import exc from neutron.api import extensions from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.api.rpc.handlers import dhcp_rpc from neutron.api.rpc.handlers import l3_rpc from neutron.api.v2 import attributes from neutron.common import constants from neutron import context from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.db import l3_agentschedulers_db from neutron.extensions import agent from neutron.extensions import dhcpagentscheduler from neutron.extensions import l3agentscheduler from neutron import manager from neutron.plugins.common import constants as service_constants from neutron.tests.common import helpers from neutron.tests import fake_notifier from neutron.tests import tools from neutron.tests.unit.api import test_extensions from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin from neutron.tests.unit.extensions import test_agent from neutron.tests.unit.extensions import test_l3 from neutron.tests.unit import testlib_api from neutron import wsgi L3_HOSTA = 'hosta' DHCP_HOSTA = 'hosta' L3_HOSTB = 'hostb' DHCP_HOSTC = 'hostc' DEVICE_OWNER_COMPUTE = ''.join([constants.DEVICE_OWNER_COMPUTE_PREFIX, 'test:', DHCP_HOSTA]) class AgentSchedulerTestMixIn(object): def _request_list(self, path, admin_context=True, expected_code=exc.HTTPOk.code): req = self._path_req(path, admin_context=admin_context) res = req.get_response(self.ext_api) self.assertEqual(expected_code, res.status_int) return self.deserialize(self.fmt, res) def _path_req(self, path, method='GET', data=None, query_string=None, admin_context=True): content_type = 'application/%s' % self.fmt body = None if data is not None: # empty dict is valid body = wsgi.Serializer().serialize(data, content_type) if admin_context: return testlib_api.create_request( path, body, content_type, method, query_string=query_string) else: return testlib_api.create_request( path, body, content_type, method, query_string=query_string, context=context.Context('', 'tenant_id')) def _path_create_request(self, path, data, admin_context=True): return self._path_req(path, method='POST', data=data, admin_context=admin_context) def _path_show_request(self, path, admin_context=True): return self._path_req(path, admin_context=admin_context) def _path_delete_request(self, path, admin_context=True): return self._path_req(path, method='DELETE', admin_context=admin_context) def _path_update_request(self, path, data, admin_context=True): return self._path_req(path, method='PUT', data=data, admin_context=admin_context) def _list_routers_hosted_by_l3_agent(self, agent_id, expected_code=exc.HTTPOk.code, admin_context=True): path = "/agents/%s/%s.%s" % (agent_id, l3agentscheduler.L3_ROUTERS, self.fmt) return self._request_list(path, expected_code=expected_code, admin_context=admin_context) def _list_networks_hosted_by_dhcp_agent(self, agent_id, expected_code=exc.HTTPOk.code, admin_context=True): path = "/agents/%s/%s.%s" % (agent_id, dhcpagentscheduler.DHCP_NETS, self.fmt) return self._request_list(path, expected_code=expected_code, admin_context=admin_context) def _list_l3_agents_hosting_router(self, router_id, expected_code=exc.HTTPOk.code, admin_context=True): path = "/routers/%s/%s.%s" % (router_id, l3agentscheduler.L3_AGENTS, self.fmt) return self._request_list(path, expected_code=expected_code, admin_context=admin_context) def _list_dhcp_agents_hosting_network(self, network_id, expected_code=exc.HTTPOk.code, admin_context=True): path = "/networks/%s/%s.%s" % (network_id, dhcpagentscheduler.DHCP_AGENTS, self.fmt) return self._request_list(path, expected_code=expected_code, admin_context=admin_context) def _add_router_to_l3_agent(self, id, router_id, expected_code=exc.HTTPCreated.code, admin_context=True): path = "/agents/%s/%s.%s" % (id, l3agentscheduler.L3_ROUTERS, self.fmt) req = self._path_create_request(path, {'router_id': router_id}, admin_context=admin_context) res = req.get_response(self.ext_api) self.assertEqual(expected_code, res.status_int) def _add_network_to_dhcp_agent(self, id, network_id, expected_code=exc.HTTPCreated.code, admin_context=True): path = "/agents/%s/%s.%s" % (id, dhcpagentscheduler.DHCP_NETS, self.fmt) req = self._path_create_request(path, {'network_id': network_id}, admin_context=admin_context) res = req.get_response(self.ext_api) self.assertEqual(expected_code, res.status_int) def _remove_network_from_dhcp_agent(self, id, network_id, expected_code=exc.HTTPNoContent.code, admin_context=True): path = "/agents/%s/%s/%s.%s" % (id, dhcpagentscheduler.DHCP_NETS, network_id, self.fmt) req = self._path_delete_request(path, admin_context=admin_context) res = req.get_response(self.ext_api) self.assertEqual(expected_code, res.status_int) def _remove_router_from_l3_agent(self, id, router_id, expected_code=exc.HTTPNoContent.code, admin_context=True): path = "/agents/%s/%s/%s.%s" % (id, l3agentscheduler.L3_ROUTERS, router_id, self.fmt) req = self._path_delete_request(path, admin_context=admin_context) res = req.get_response(self.ext_api) self.assertEqual(expected_code, res.status_int) def _assert_notify(self, notifications, expected_event_type): event_types = [event['event_type'] for event in notifications] self.assertIn(expected_event_type, event_types) def test_agent_registration_bad_timestamp(self): callback = agents_db.AgentExtRpcCallback() delta_time = datetime.datetime.now() - datetime.timedelta(days=1) str_time = delta_time.strftime('%Y-%m-%dT%H:%M:%S.%f') callback.report_state( self.adminContext, agent_state={ 'agent_state': helpers._get_dhcp_agent_dict(DHCP_HOSTA)}, time=str_time) def test_agent_registration_invalid_timestamp_allowed(self): callback = agents_db.AgentExtRpcCallback() utc_time = datetime.datetime.utcnow() delta_time = utc_time - datetime.timedelta(seconds=10) str_time = delta_time.strftime('%Y-%m-%dT%H:%M:%S.%f') callback.report_state( self.adminContext, agent_state={ 'agent_state': helpers._get_dhcp_agent_dict(DHCP_HOSTA)}, time=str_time) def _disable_agent(self, agent_id, admin_state_up=False): new_agent = {} new_agent['agent'] = {} new_agent['agent']['admin_state_up'] = admin_state_up self._update('agents', agent_id, new_agent) def _get_agent_id(self, agent_type, host): agents = self._list_agents() for agent_data in agents['agents']: if (agent_data['agent_type'] == agent_type and agent_data['host'] == host): return agent_data['id'] class OvsAgentSchedulerTestCaseBase(test_l3.L3NatTestCaseMixin, test_agent.AgentDBTestMixIn, AgentSchedulerTestMixIn, test_plugin.NeutronDbPluginV2TestCase): fmt = 'json' plugin_str = 'neutron.plugins.ml2.plugin.Ml2Plugin' l3_plugin = ('neutron.tests.unit.extensions.test_l3.' 'TestL3NatAgentSchedulingServicePlugin') def setUp(self): self.useFixture(tools.AttributeMapMemento()) if self.l3_plugin: service_plugins = {'l3_plugin_name': self.l3_plugin} else: service_plugins = None mock.patch('neutron.common.rpc.get_client').start() super(OvsAgentSchedulerTestCaseBase, self).setUp( self.plugin_str, service_plugins=service_plugins) ext_mgr = extensions.PluginAwareExtensionManager.get_instance() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) self.adminContext = context.get_admin_context() # Add the resources to the global attribute map # This is done here as the setup process won't # initialize the main API router which extends # the global attribute map attributes.RESOURCE_ATTRIBUTE_MAP.update( agent.RESOURCE_ATTRIBUTE_MAP) self.l3plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) self.l3_notify_p = mock.patch( 'neutron.extensions.l3agentscheduler.notify') self.patched_l3_notify = self.l3_notify_p.start() self.l3_periodic_p = mock.patch('neutron.db.l3_agentschedulers_db.' 'L3AgentSchedulerDbMixin.' 'start_periodic_l3_agent_status_check') self.patched_l3_periodic = self.l3_periodic_p.start() self.dhcp_notify_p = mock.patch( 'neutron.extensions.dhcpagentscheduler.notify') self.patched_dhcp_notify = self.dhcp_notify_p.start() class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase): def test_report_states(self): self._register_agent_states() agents = self._list_agents() self.assertEqual(4, len(agents['agents'])) def test_network_scheduling_on_network_creation(self): self._register_agent_states() with self.network() as net: dhcp_agents = self._list_dhcp_agents_hosting_network( net['network']['id']) self.assertEqual(0, len(dhcp_agents['agents'])) def test_network_auto_schedule_with_disabled(self): cfg.CONF.set_override('allow_overlapping_ips', True) with self.subnet(), self.subnet(): dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback() self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTC) self._disable_agent(hosta_id) dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA) # second agent will host all the networks since first is disabled. dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTC) networks = self._list_networks_hosted_by_dhcp_agent(hostc_id) num_hostc_nets = len(networks['networks']) networks = self._list_networks_hosted_by_dhcp_agent(hosta_id) num_hosta_nets = len(networks['networks']) self.assertEqual(0, num_hosta_nets) self.assertEqual(2, num_hostc_nets) def test_network_auto_schedule_with_no_dhcp(self): cfg.CONF.set_override('allow_overlapping_ips', True) with self.subnet(enable_dhcp=False), self.subnet(enable_dhcp=False): dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback() self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTC) self._disable_agent(hosta_id) dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA) dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTC) networks = self._list_networks_hosted_by_dhcp_agent(hostc_id) num_hostc_nets = len(networks['networks']) networks = self._list_networks_hosted_by_dhcp_agent(hosta_id) num_hosta_nets = len(networks['networks']) self.assertEqual(0, num_hosta_nets) self.assertEqual(0, num_hostc_nets) def test_network_auto_schedule_with_multiple_agents(self): cfg.CONF.set_override('dhcp_agents_per_network', 2) cfg.CONF.set_override('allow_overlapping_ips', True) with self.subnet(), self.subnet(): dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback() self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTC) dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA) dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTC) networks = self._list_networks_hosted_by_dhcp_agent(hostc_id) num_hostc_nets = len(networks['networks']) networks = self._list_networks_hosted_by_dhcp_agent(hosta_id) num_hosta_nets = len(networks['networks']) self.assertEqual(2, num_hosta_nets) self.assertEqual(2, num_hostc_nets) def test_network_auto_schedule_restart_dhcp_agent(self): cfg.CONF.set_override('dhcp_agents_per_network', 2) with self.subnet() as sub1: dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback() self._register_agent_states() dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA) dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA) dhcp_agents = self._list_dhcp_agents_hosting_network( sub1['subnet']['network_id']) self.assertEqual(1, len(dhcp_agents['agents'])) def test_network_auto_schedule_with_hosted(self): # one agent hosts all the networks, other hosts none cfg.CONF.set_override('allow_overlapping_ips', True) with self.subnet() as sub1, self.subnet(): dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback() self._register_agent_states() dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA) # second agent will not host the network since first has got it. dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTC) dhcp_agents = self._list_dhcp_agents_hosting_network( sub1['subnet']['network_id']) hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTC) hosta_nets = self._list_networks_hosted_by_dhcp_agent(hosta_id) num_hosta_nets = len(hosta_nets['networks']) hostc_nets = self._list_networks_hosted_by_dhcp_agent(hostc_id) num_hostc_nets = len(hostc_nets['networks']) self.assertEqual(2, num_hosta_nets) self.assertEqual(0, num_hostc_nets) self.assertEqual(1, len(dhcp_agents['agents'])) self.assertEqual(DHCP_HOSTA, dhcp_agents['agents'][0]['host']) def test_network_auto_schedule_with_hosted_2(self): # one agent hosts one network dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback() cfg.CONF.set_override('allow_overlapping_ips', True) with self.subnet() as sub1: helpers.register_dhcp_agent(DHCP_HOSTA) dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA) hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) self._disable_agent(hosta_id, admin_state_up=False) with self.subnet() as sub2: helpers.register_dhcp_agent(DHCP_HOSTC) dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTC) dhcp_agents_1 = self._list_dhcp_agents_hosting_network( sub1['subnet']['network_id']) dhcp_agents_2 = self._list_dhcp_agents_hosting_network( sub2['subnet']['network_id']) hosta_nets = self._list_networks_hosted_by_dhcp_agent(hosta_id) num_hosta_nets = len(hosta_nets['networks']) hostc_id = self._get_agent_id( constants.AGENT_TYPE_DHCP, DHCP_HOSTC) hostc_nets = self._list_networks_hosted_by_dhcp_agent(hostc_id) num_hostc_nets = len(hostc_nets['networks']) self.assertEqual(1, num_hosta_nets) self.assertEqual(1, num_hostc_nets) self.assertEqual(1, len(dhcp_agents_1['agents'])) self.assertEqual(1, len(dhcp_agents_2['agents'])) self.assertEqual(DHCP_HOSTA, dhcp_agents_1['agents'][0]['host']) self.assertEqual(DHCP_HOSTC, dhcp_agents_2['agents'][0]['host']) def test_network_scheduling_on_port_creation(self): with self.subnet() as subnet: dhcp_agents = self._list_dhcp_agents_hosting_network( subnet['subnet']['network_id']) result0 = len(dhcp_agents['agents']) self._register_agent_states() with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE) as port: dhcp_agents = self._list_dhcp_agents_hosting_network( port['port']['network_id']) result1 = len(dhcp_agents['agents']) self.assertEqual(0, result0) self.assertEqual(1, result1) def test_network_ha_scheduling_on_port_creation(self): cfg.CONF.set_override('dhcp_agents_per_network', 2) with self.subnet() as subnet: dhcp_agents = self._list_dhcp_agents_hosting_network( subnet['subnet']['network_id']) result0 = len(dhcp_agents['agents']) self._register_agent_states() with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE) as port: dhcp_agents = self._list_dhcp_agents_hosting_network( port['port']['network_id']) result1 = len(dhcp_agents['agents']) self.assertEqual(0, result0) self.assertEqual(2, result1) def test_network_ha_scheduling_on_port_creation_with_new_agent(self): cfg.CONF.set_override('dhcp_agents_per_network', 3) with self.subnet() as subnet: dhcp_agents = self._list_dhcp_agents_hosting_network( subnet['subnet']['network_id']) result0 = len(dhcp_agents['agents']) self._register_agent_states() with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE) as port: dhcp_agents = self._list_dhcp_agents_hosting_network( port['port']['network_id']) result1 = len(dhcp_agents['agents']) helpers.register_dhcp_agent('host1') with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE) as port: dhcp_agents = self._list_dhcp_agents_hosting_network( port['port']['network_id']) result2 = len(dhcp_agents['agents']) self.assertEqual(0, result0) self.assertEqual(2, result1) self.assertEqual(3, result2) def test_network_scheduler_with_disabled_agent(self): helpers.register_dhcp_agent(DHCP_HOSTA) with self.port() as port1: dhcp_agents = self._list_dhcp_agents_hosting_network( port1['port']['network_id']) self._delete('ports', port1['port']['id']) self._delete('networks', port1['port']['network_id']) self.assertEqual(1, len(dhcp_agents['agents'])) agents = self._list_agents() self._disable_agent(agents['agents'][0]['id']) with self.port() as port2: dhcp_agents = self._list_dhcp_agents_hosting_network( port2['port']['network_id']) self._delete('ports', port2['port']['id']) self.assertEqual(0, len(dhcp_agents['agents'])) def test_is_eligible_agent(self): agent_startup = ('neutron.db.agentschedulers_db.' 'DhcpAgentSchedulerDbMixin.agent_starting_up') is_eligible_agent = ('neutron.db.agentschedulers_db.' 'AgentSchedulerDbMixin.is_eligible_agent') dhcp_mixin = agentschedulers_db.DhcpAgentSchedulerDbMixin() with mock.patch(agent_startup) as startup,\ mock.patch(is_eligible_agent) as elig: tests = [(True, True), (True, False), (False, True), (False, False)] for rv1, rv2 in tests: startup.return_value = rv1 elig.return_value = rv2 self.assertEqual(rv1 or rv2, dhcp_mixin.is_eligible_agent(None, None, None)) def test_network_scheduler_with_down_agent(self): helpers.register_dhcp_agent(DHCP_HOSTA) eligible_agent_str = ('neutron.db.agentschedulers_db.' 'DhcpAgentSchedulerDbMixin.is_eligible_agent') with mock.patch(eligible_agent_str) as eligible_agent: eligible_agent.return_value = True with self.port() as port: dhcp_agents = self._list_dhcp_agents_hosting_network( port['port']['network_id']) self._delete('ports', port['port']['id']) self._delete('networks', port['port']['network_id']) self.assertEqual(1, len(dhcp_agents['agents'])) with mock.patch(eligible_agent_str) as eligible_agent: eligible_agent.return_value = False with self.port() as port: dhcp_agents = self._list_dhcp_agents_hosting_network( port['port']['network_id']) self._delete('ports', port['port']['id']) self.assertEqual(0, len(dhcp_agents['agents'])) def test_network_scheduler_with_hosted_network(self): plugin = manager.NeutronManager.get_plugin() helpers.register_dhcp_agent(DHCP_HOSTA) with self.port() as port1: dhcp_agents = self._list_dhcp_agents_hosting_network( port1['port']['network_id']) self.assertEqual(1, len(dhcp_agents['agents'])) with mock.patch.object(plugin, 'get_dhcp_agents_hosting_networks', autospec=True) as mock_hosting_agents: mock_hosting_agents.return_value = plugin.get_agents_db( self.adminContext) with self.network('test') as net1: pass with self.subnet(network=net1, cidr='10.0.1.0/24') as subnet1: pass with self.port(subnet=subnet1) as port2: pass dhcp_agents = self._list_dhcp_agents_hosting_network( port2['port']['network_id']) self.assertEqual(0, len(dhcp_agents['agents'])) def test_network_policy(self): with self.network() as net1: self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) self._list_networks_hosted_by_dhcp_agent( hosta_id, expected_code=exc.HTTPForbidden.code, admin_context=False) self._add_network_to_dhcp_agent( hosta_id, net1['network']['id'], expected_code=exc.HTTPForbidden.code, admin_context=False) self._add_network_to_dhcp_agent(hosta_id, net1['network']['id']) self._remove_network_from_dhcp_agent( hosta_id, net1['network']['id'], expected_code=exc.HTTPForbidden.code, admin_context=False) self._list_dhcp_agents_hosting_network( net1['network']['id'], expected_code=exc.HTTPForbidden.code, admin_context=False) def _test_network_add_to_dhcp_agent(self, admin_state_up=True): with self.network() as net1: self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) if not admin_state_up: self._set_agent_admin_state_up(DHCP_HOSTA, False) num_before_add = len( self._list_networks_hosted_by_dhcp_agent( hosta_id)['networks']) self._add_network_to_dhcp_agent(hosta_id, net1['network']['id']) num_after_add = len( self._list_networks_hosted_by_dhcp_agent( hosta_id)['networks']) self.assertEqual(0, num_before_add) self.assertEqual(1, num_after_add) def test_network_add_to_dhcp_agent(self): self._test_network_add_to_dhcp_agent() def test_network_add_to_dhcp_agent_with_admin_state_down(self): cfg.CONF.set_override( 'enable_services_on_agents_with_admin_state_down', True) self._test_network_add_to_dhcp_agent(admin_state_up=False) def test_network_remove_from_dhcp_agent(self): agent = helpers.register_dhcp_agent(DHCP_HOSTA) hosta_id = agent.id with self.port() as port1: num_before_remove = len( self._list_networks_hosted_by_dhcp_agent( hosta_id)['networks']) self._remove_network_from_dhcp_agent(hosta_id, port1['port']['network_id']) num_after_remove = len( self._list_networks_hosted_by_dhcp_agent( hosta_id)['networks']) self.assertEqual(1, num_before_remove) self.assertEqual(0, num_after_remove) def test_list_active_networks_on_not_registered_yet_dhcp_agent(self): plugin = manager.NeutronManager.get_plugin() nets = plugin.list_active_networks_on_active_dhcp_agent( self.adminContext, host=DHCP_HOSTA) self.assertEqual([], nets) def test_reserved_port_after_network_remove_from_dhcp_agent(self): helpers.register_dhcp_agent(DHCP_HOSTA) hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) with self.port(device_owner=constants.DEVICE_OWNER_DHCP, host=DHCP_HOSTA) as port1: self._remove_network_from_dhcp_agent(hosta_id, port1['port']['network_id']) port_res = self._list_ports( 'json', 200, network_id=port1['port']['network_id']) port_list = self.deserialize('json', port_res) self.assertEqual(port_list['ports'][0]['device_id'], constants.DEVICE_ID_RESERVED_DHCP_PORT) def _test_get_active_networks_from_admin_state_down_agent(self, keep_services): if keep_services: cfg.CONF.set_override( 'enable_services_on_agents_with_admin_state_down', True) helpers.register_dhcp_agent(DHCP_HOSTA) dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback() with self.port(): nets = dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA) self.assertEqual(1, len(nets)) self._set_agent_admin_state_up(DHCP_HOSTA, False) nets = dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA) if keep_services: self.assertEqual(1, len(nets)) else: self.assertEqual(0, len(nets)) def test_dhcp_agent_keep_services_off(self): self._test_get_active_networks_from_admin_state_down_agent(False) def test_dhcp_agent_keep_services_on(self): self._test_get_active_networks_from_admin_state_down_agent(True) def _take_down_agent_and_run_reschedule(self, host): # take down the agent on host A and ensure B is alive self.adminContext.session.begin(subtransactions=True) query = self.adminContext.session.query(agents_db.Agent) agt = query.filter_by(host=host).first() agt.heartbeat_timestamp = ( agt.heartbeat_timestamp - datetime.timedelta(hours=1)) self.adminContext.session.commit() plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) plugin.reschedule_routers_from_down_agents() def _set_agent_admin_state_up(self, host, state): self.adminContext.session.begin(subtransactions=True) query = self.adminContext.session.query(agents_db.Agent) agt_db = query.filter_by(host=host).first() agt_db.admin_state_up = state self.adminContext.session.commit() def test_router_rescheduler_catches_rpc_db_and_reschedule_exceptions(self): with self.router(): l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() # schedule the router to host A l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) mock.patch.object( plugin, 'reschedule_router', side_effect=[ db_exc.DBError(), oslo_messaging.RemoteError(), l3agentscheduler.RouterReschedulingFailed(router_id='f', agent_id='f'), ValueError('this raises'), Exception() ]).start() self._take_down_agent_and_run_reschedule(L3_HOSTA) # DBError self._take_down_agent_and_run_reschedule(L3_HOSTA) # RemoteError self._take_down_agent_and_run_reschedule(L3_HOSTA) # schedule err self._take_down_agent_and_run_reschedule(L3_HOSTA) # Value error self._take_down_agent_and_run_reschedule(L3_HOSTA) # Exception def test_router_rescheduler_catches_exceptions_on_fetching_bindings(self): with mock.patch('neutron.context.get_admin_context') as get_ctx: mock_ctx = mock.Mock() get_ctx.return_value = mock_ctx mock_ctx.session.query.side_effect = db_exc.DBError() plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) # check that no exception is raised plugin.reschedule_routers_from_down_agents() def test_router_rescheduler_iterates_after_reschedule_failure(self): plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() with self.router() as r1, self.router() as r2: # schedule the routers to host A l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) rs_mock = mock.patch.object( plugin, 'reschedule_router', side_effect=l3agentscheduler.RouterReschedulingFailed( router_id='f', agent_id='f'), ).start() self._take_down_agent_and_run_reschedule(L3_HOSTA) # make sure both had a reschedule attempt even though first failed rs_mock.assert_has_calls([mock.call(mock.ANY, r1['router']['id']), mock.call(mock.ANY, r2['router']['id'])], any_order=True) def test_router_is_not_rescheduled_from_alive_agent(self): with self.router(): l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() # schedule the router to host A l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) with mock.patch('neutron.db.l3_agentschedulers_db.' 'L3AgentSchedulerDbMixin.reschedule_router') as rr: # take down some unrelated agent and run reschedule check self._take_down_agent_and_run_reschedule(DHCP_HOSTC) self.assertFalse(rr.called) def test_router_is_not_rescheduled_if_agent_is_back_online(self): plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) l3_rpc_cb = l3_rpc.L3RpcCallback() agent = helpers.register_l3_agent(host=L3_HOSTA) with self.router(),\ self.router(),\ mock.patch.object(plugin, 'reschedule_router') as rs_mock,\ mock.patch.object(plugin, '_get_agent') as get_agent_mock: # schedule the routers to the agent l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) self._take_down_agent_and_run_reschedule(L3_HOSTA) # since _get_agent is mocked it will return Mock object and # agent.is_active will return true, so no rescheduling will be done self.assertFalse(rs_mock.called) # should be called only once as for second router alive agent id # will be in cache get_agent_mock.assert_called_once_with(mock.ANY, agent['id']) def test_router_reschedule_from_dead_agent(self): with self.router(): l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() # schedule the router to host A ret_a = l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) self._take_down_agent_and_run_reschedule(L3_HOSTA) # B should now pick up the router ret_b = l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTB) self.assertEqual(ret_b, ret_a) def test_router_no_reschedule_from_dead_admin_down_agent(self): with self.router() as r: l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() # schedule the router to host A l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) self._set_agent_admin_state_up(L3_HOSTA, False) self._take_down_agent_and_run_reschedule(L3_HOSTA) # A should still have it even though it was inactive due to the # admin_state being down rab = l3_agentschedulers_db.RouterL3AgentBinding binding = (self.adminContext.session.query(rab). filter(rab.router_id == r['router']['id']).first()) self.assertEqual(binding.l3_agent.host, L3_HOSTA) # B should not pick up the router ret_b = l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTB) self.assertFalse(ret_b) def test_router_reschedule_succeeded_after_failed_notification(self): l3_plugin = (manager.NeutronManager.get_service_plugins() [service_constants.L3_ROUTER_NAT]) l3_notifier = l3_plugin.agent_notifiers[constants.AGENT_TYPE_L3] l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() with self.router() as router: # schedule the router to host A l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) with mock.patch.object( l3_notifier, 'router_added_to_agent') as notification_mock: notification_mock.side_effect = [ oslo_messaging.MessagingTimeout, None] self._take_down_agent_and_run_reschedule(L3_HOSTA) self.assertEqual( 2, l3_notifier.router_added_to_agent.call_count) # make sure router was rescheduled even when first attempt # failed to notify l3 agent l3_agents = self._list_l3_agents_hosting_router( router['router']['id'])['agents'] self.assertEqual(1, len(l3_agents)) self.assertEqual(L3_HOSTB, l3_agents[0]['host']) def test_router_reschedule_failed_notification_all_attempts(self): l3_plugin = (manager.NeutronManager.get_service_plugins() [service_constants.L3_ROUTER_NAT]) l3_notifier = l3_plugin.agent_notifiers[constants.AGENT_TYPE_L3] l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() with self.router() as router: # schedule the router to host A l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) with mock.patch.object( l3_notifier, 'router_added_to_agent') as notification_mock: notification_mock.side_effect = oslo_messaging.MessagingTimeout self._take_down_agent_and_run_reschedule(L3_HOSTA) self.assertEqual( l3_agentschedulers_db.AGENT_NOTIFY_MAX_ATTEMPTS, l3_notifier.router_added_to_agent.call_count) l3_agents = self._list_l3_agents_hosting_router( router['router']['id'])['agents'] self.assertEqual(0, len(l3_agents)) def test_router_auto_schedule_with_invalid_router(self): with self.router() as router: l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() self._delete('routers', router['router']['id']) # deleted router ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA, router_ids=[router['router']['id']]) self.assertFalse(ret_a) # non-existent router ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA, router_ids=[uuidutils.generate_uuid()]) self.assertFalse(ret_a) def test_router_auto_schedule_with_hosted(self): with self.router() as router: l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() ret_a = l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) ret_b = l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTB) l3_agents = self._list_l3_agents_hosting_router( router['router']['id']) self.assertEqual(1, len(ret_a)) self.assertIn(router['router']['id'], ret_a) self.assertFalse(len(ret_b)) self.assertEqual(1, len(l3_agents['agents'])) self.assertEqual(L3_HOSTA, l3_agents['agents'][0]['host']) def test_router_auto_schedule_restart_l3_agent(self): with self.router(): l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) def test_router_auto_schedule_with_hosted_2(self): # one agent hosts one router l3_rpc_cb = l3_rpc.L3RpcCallback() with self.router() as router1: hosta_id = helpers.register_l3_agent(host=L3_HOSTA).id l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) self._disable_agent(hosta_id, admin_state_up=False) with self.router() as router2: hostb_id = helpers.register_l3_agent(host=L3_HOSTB).id l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTB) l3_agents_1 = self._list_l3_agents_hosting_router( router1['router']['id']) l3_agents_2 = self._list_l3_agents_hosting_router( router2['router']['id']) hosta_routers = self._list_routers_hosted_by_l3_agent(hosta_id) num_hosta_routers = len(hosta_routers['routers']) hostb_routers = self._list_routers_hosted_by_l3_agent(hostb_id) num_hostb_routers = len(hostb_routers['routers']) self.assertEqual(1, num_hosta_routers) self.assertEqual(1, num_hostb_routers) self.assertEqual(1, len(l3_agents_1['agents'])) self.assertEqual(1, len(l3_agents_2['agents'])) self.assertEqual(L3_HOSTA, l3_agents_1['agents'][0]['host']) self.assertEqual(L3_HOSTB, l3_agents_2['agents'][0]['host']) def test_router_auto_schedule_with_disabled(self): with self.router(), self.router(): l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) hostb_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTB) self._disable_agent(hosta_id) # first agent will not host router since it is disabled l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) # second agent will host all the routers since first is disabled. l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTB) hostb_routers = self._list_routers_hosted_by_l3_agent(hostb_id) num_hostb_routers = len(hostb_routers['routers']) hosta_routers = self._list_routers_hosted_by_l3_agent(hosta_id) num_hosta_routers = len(hosta_routers['routers']) self.assertEqual(2, num_hostb_routers) self.assertEqual(0, num_hosta_routers) def test_router_auto_schedule_with_candidates(self): with self.router() as router1, self.router() as router2: l3_rpc_cb = l3_rpc.L3RpcCallback() agent = helpers.register_l3_agent( host=L3_HOSTA, router_id=router1['router']['id']) l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) hosta_routers = self._list_routers_hosted_by_l3_agent(agent.id) num_hosta_routers = len(hosta_routers['routers']) l3_agents_1 = self._list_l3_agents_hosting_router( router1['router']['id']) l3_agents_2 = self._list_l3_agents_hosting_router( router2['router']['id']) # L3 agent will host only the compatible router. self.assertEqual(1, num_hosta_routers) self.assertEqual(1, len(l3_agents_1['agents'])) self.assertEqual(0, len(l3_agents_2['agents'])) def test_rpc_sync_routers(self): l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() # No routers ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA) self.assertEqual(0, len(ret_a)) with self.router() as v1, self.router() as v2, self.router() as v3: routers = (v1, v2, v3) router_ids = [r['router']['id'] for r in routers] # auto schedule routers first l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) # Get all routers ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA) self.assertEqual(3, len(ret_a)) self.assertEqual(set(router_ids), set([r['id'] for r in ret_a])) # Get all routers (router_ids=None) ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA, router_ids=None) self.assertEqual(3, len(ret_a)) self.assertEqual(set(router_ids), set([r['id'] for r in ret_a])) # Get router2 only ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA, router_ids=[router_ids[1]]) self.assertEqual(1, len(ret_a)) self.assertIn(router_ids[1], [r['id'] for r in ret_a]) # Get router1 and router3 ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA, router_ids=[router_ids[0], router_ids[2]]) self.assertEqual(2, len(ret_a)) self.assertIn(router_ids[0], [r['id'] for r in ret_a]) self.assertIn(router_ids[2], [r['id'] for r in ret_a]) def test_sync_router(self): l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) with self.router() as r1: ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA, router_ids=[r1['router']['id']]) # Not return router to agent if the router is not bound to it. self.assertEqual([], ret_a) host_routers = self._list_routers_hosted_by_l3_agent(hosta_id) # No router will be auto scheduled. self.assertEqual(0, len(host_routers['routers'])) def test_sync_dvr_router(self): l3_rpc_cb = l3_rpc.L3RpcCallback() dvr_agents = self._register_dvr_agents() with self.router() as r1, \ mock.patch.object(self.l3plugin, 'get_subnet_ids_on_router', return_value=['fake_subnet_id']), \ mock.patch.object(self.l3plugin, '_check_dvr_serviceable_ports_on_host', return_value=True): for l3_agent in dvr_agents: host = l3_agent['host'] ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=host, router_ids=[r1['router']['id']]) router_ids = [r['id'] for r in ret_a] # Return router to agent if there is dvr service port in agent. self.assertIn(r1['router']['id'], router_ids) host_routers = self._list_routers_hosted_by_l3_agent( l3_agent['id']) # No router will be auto scheduled. self.assertEqual(0, len(host_routers['routers'])) def test_router_schedule_with_candidates(self): with self.router() as router1,\ self.router() as router2,\ self.subnet() as subnet1,\ self.subnet(cidr='10.0.3.0/24') as subnet2: agent = helpers.register_l3_agent( host=L3_HOSTA, router_id=router1['router']['id']) self._router_interface_action('add', router1['router']['id'], subnet1['subnet']['id'], None) self._router_interface_action('add', router2['router']['id'], subnet2['subnet']['id'], None) hosta_routers = self._list_routers_hosted_by_l3_agent(agent.id) num_hosta_routers = len(hosta_routers['routers']) l3_agents_1 = self._list_l3_agents_hosting_router( router1['router']['id']) l3_agents_2 = self._list_l3_agents_hosting_router( router2['router']['id']) # safe cleanup self._router_interface_action('remove', router1['router']['id'], subnet1['subnet']['id'], None) self._router_interface_action('remove', router2['router']['id'], subnet2['subnet']['id'], None) # L3 agent will host only the compatible router. self.assertEqual(1, num_hosta_routers) self.assertEqual(1, len(l3_agents_1['agents'])) self.assertEqual(0, len(l3_agents_2['agents'])) def test_router_without_l3_agents(self): with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) data = {'router': {'tenant_id': uuidutils.generate_uuid()}} data['router']['name'] = 'router1' data['router']['external_gateway_info'] = { 'network_id': s['subnet']['network_id']} router_req = self.new_create_request('routers', data, self.fmt) res = router_req.get_response(self.ext_api) router = self.deserialize(self.fmt, res) l3agents = ( self.l3plugin.get_l3_agents_hosting_routers( self.adminContext, [router['router']['id']])) self._delete('routers', router['router']['id']) self.assertEqual(0, len(l3agents)) def test_dvr_router_scheduling_to_only_dvr_snat_agent(self): self._register_dvr_agents() with self.subnet() as s: net_id = s['subnet']['network_id'] self._set_net_external(net_id) router = {'name': 'router1', 'external_gateway_info': {'network_id': net_id}, 'tenant_id': 'tenant_id', 'admin_state_up': True, 'distributed': True} r = self.l3plugin.create_router(self.adminContext, {'router': router}) with mock.patch.object( self.l3plugin, '_check_dvr_serviceable_ports_on_host') as ports_exist: # emulating dvr serviceable ports exist on compute node ports_exist.return_value = True self.l3plugin.schedule_router( self.adminContext, r['id']) l3agents = self._list_l3_agents_hosting_router(r['id']) self.assertEqual(1, len(l3agents['agents'])) agent = l3agents['agents'][0] self.assertEqual('dvr_snat', agent['configurations']['agent_mode']) def test_dvr_router_csnat_rescheduling(self): helpers.register_l3_agent( host=L3_HOSTA, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) helpers.register_l3_agent( host=L3_HOSTB, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) with self.subnet() as s: net_id = s['subnet']['network_id'] self._set_net_external(net_id) router = {'name': 'router1', 'external_gateway_info': {'network_id': net_id}, 'tenant_id': 'tenant_id', 'admin_state_up': True, 'distributed': True} r = self.l3plugin.create_router(self.adminContext, {'router': router}) self.l3plugin.schedule_router( self.adminContext, r['id']) l3agents = self._list_l3_agents_hosting_router(r['id']) self.assertEqual(1, len(l3agents['agents'])) agent_host = l3agents['agents'][0]['host'] self._take_down_agent_and_run_reschedule(agent_host) l3agents = self._list_l3_agents_hosting_router(r['id']) self.assertEqual(1, len(l3agents['agents'])) new_agent_host = l3agents['agents'][0]['host'] self.assertNotEqual(agent_host, new_agent_host) def test_dvr_router_manual_rescheduling(self): helpers.register_l3_agent( host=L3_HOSTA, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) helpers.register_l3_agent( host=L3_HOSTB, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) with self.subnet() as s: net_id = s['subnet']['network_id'] self._set_net_external(net_id) router = {'name': 'router1', 'external_gateway_info': {'network_id': net_id}, 'tenant_id': 'tenant_id', 'admin_state_up': True, 'distributed': True} r = self.l3plugin.create_router(self.adminContext, {'router': router}) self.l3plugin.schedule_router( self.adminContext, r['id']) l3agents = self.l3plugin.list_l3_agents_hosting_router( self.adminContext, r['id']) self.assertEqual(1, len(l3agents['agents'])) agent = l3agents['agents'][0] # NOTE: Removing the router from the l3_agent will # remove all the namespace since there is no other # serviceable ports in the node that requires it. self.l3plugin.remove_router_from_l3_agent( self.adminContext, agent['id'], r['id']) l3agents = self.l3plugin.list_l3_agents_hosting_router( self.adminContext, r['id']) self.assertEqual(0, len(l3agents['agents'])) self.l3plugin.add_router_to_l3_agent( self.adminContext, agent['id'], r['id']) l3agents = self.l3plugin.list_l3_agents_hosting_router( self.adminContext, r['id']) self.assertEqual(1, len(l3agents['agents'])) new_agent = l3agents['agents'][0] self.assertEqual(agent['id'], new_agent['id']) def test_router_sync_data(self): with self.subnet() as s1,\ self.subnet(cidr='10.0.2.0/24') as s2,\ self.subnet(cidr='10.0.3.0/24') as s3: self._register_agent_states() self._set_net_external(s1['subnet']['network_id']) data = {'router': {'tenant_id': uuidutils.generate_uuid()}} data['router']['name'] = 'router1' data['router']['external_gateway_info'] = { 'network_id': s1['subnet']['network_id']} router_req = self.new_create_request('routers', data, self.fmt) res = router_req.get_response(self.ext_api) router = self.deserialize(self.fmt, res) self._router_interface_action('add', router['router']['id'], s2['subnet']['id'], None) self._router_interface_action('add', router['router']['id'], s3['subnet']['id'], None) l3agents = self._list_l3_agents_hosting_router( router['router']['id']) self.assertEqual(1, len(l3agents['agents'])) agents = self._list_agents() another_l3_agent_id = None another_l3_agent_host = None default = l3agents['agents'][0]['id'] for com in agents['agents']: if (com['id'] != default and com['agent_type'] == constants.AGENT_TYPE_L3): another_l3_agent_id = com['id'] another_l3_agent_host = com['host'] break self.assertIsNotNone(another_l3_agent_id) self._add_router_to_l3_agent(another_l3_agent_id, router['router']['id'], expected_code=exc.HTTPConflict.code) self._remove_router_from_l3_agent(default, router['router']['id']) self._add_router_to_l3_agent(another_l3_agent_id, router['router']['id']) l3agents = self._list_l3_agents_hosting_router( router['router']['id']) self.assertEqual(another_l3_agent_host, l3agents['agents'][0]['host']) self._remove_router_from_l3_agent(another_l3_agent_id, router['router']['id']) self._router_interface_action('remove', router['router']['id'], s2['subnet']['id'], None) l3agents = self._list_l3_agents_hosting_router( router['router']['id']) self.assertEqual(1, len(l3agents['agents'])) self._router_interface_action('remove', router['router']['id'], s3['subnet']['id'], None) self._delete('routers', router['router']['id']) def _test_router_add_to_l3_agent(self, admin_state_up=True): with self.router() as router1: self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) if not admin_state_up: self._set_agent_admin_state_up(L3_HOSTA, False) num_before_add = len( self._list_routers_hosted_by_l3_agent( hosta_id)['routers']) self._add_router_to_l3_agent(hosta_id, router1['router']['id']) hostb_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTB) self._add_router_to_l3_agent(hostb_id, router1['router']['id'], expected_code=exc.HTTPConflict.code) num_after_add = len( self._list_routers_hosted_by_l3_agent( hosta_id)['routers']) self.assertEqual(0, num_before_add) self.assertEqual(1, num_after_add) def test_router_add_to_l3_agent(self): self._test_router_add_to_l3_agent() def test_router_add_to_l3_agent_with_admin_state_down(self): cfg.CONF.set_override( 'enable_services_on_agents_with_admin_state_down', True) self._test_router_add_to_l3_agent(admin_state_up=False) def test_router_add_to_l3_agent_two_times(self): with self.router() as router1: self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) self._add_router_to_l3_agent(hosta_id, router1['router']['id']) # scheduling twice on the same agent is fine self._add_router_to_l3_agent(hosta_id, router1['router']['id']) def test_router_add_to_two_l3_agents(self): with self.router() as router1: self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) hostb_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTB) self._add_router_to_l3_agent(hosta_id, router1['router']['id']) self._add_router_to_l3_agent(hostb_id, router1['router']['id'], expected_code=exc.HTTPConflict.code) def test_router_policy(self): with self.router() as router1: self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) self._list_routers_hosted_by_l3_agent( hosta_id, expected_code=exc.HTTPForbidden.code, admin_context=False) self._add_router_to_l3_agent( hosta_id, router1['router']['id'], expected_code=exc.HTTPForbidden.code, admin_context=False) self._add_router_to_l3_agent( hosta_id, router1['router']['id']) self._remove_router_from_l3_agent( hosta_id, router1['router']['id'], expected_code=exc.HTTPForbidden.code, admin_context=False) self._list_l3_agents_hosting_router( router1['router']['id'], expected_code=exc.HTTPForbidden.code, admin_context=False) def _test_sync_routers_from_admin_state_down_agent(self, keep_services): if keep_services: cfg.CONF.set_override( 'enable_services_on_agents_with_admin_state_down', True) l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) with self.router() as router: self._add_router_to_l3_agent(hosta_id, router['router']['id']) routers = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA) self.assertEqual(1, len(routers)) self._set_agent_admin_state_up(L3_HOSTA, False) routers = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA) if keep_services: self.assertEqual(1, len(routers)) else: self.assertEqual(0, len(routers)) def test_l3_agent_keep_services_off(self): self._test_sync_routers_from_admin_state_down_agent(False) def test_l3_agent_keep_services_on(self): self._test_sync_routers_from_admin_state_down_agent(True) def test_list_routers_hosted_by_l3_agent_with_invalid_agent(self): invalid_agentid = 'non_existing_agent' self._list_routers_hosted_by_l3_agent(invalid_agentid, exc.HTTPNotFound.code) def test_list_networks_hosted_by_dhcp_agent_with_invalid_agent(self): invalid_agentid = 'non_existing_agent' self._list_networks_hosted_by_dhcp_agent(invalid_agentid, exc.HTTPNotFound.code) class OvsDhcpAgentNotifierTestCase(test_agent.AgentDBTestMixIn, AgentSchedulerTestMixIn, test_plugin.NeutronDbPluginV2TestCase): plugin_str = 'neutron.plugins.ml2.plugin.Ml2Plugin' def setUp(self): self.useFixture(tools.AttributeMapMemento()) super(OvsDhcpAgentNotifierTestCase, self).setUp(self.plugin_str) self.dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() self.dhcp_notifier_cast = mock.patch( 'neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.' 'DhcpAgentNotifyAPI._cast_message').start() ext_mgr = extensions.PluginAwareExtensionManager.get_instance() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) self.adminContext = context.get_admin_context() # Add the resources to the global attribute map # This is done here as the setup process won't # initialize the main API router which extends # the global attribute map attributes.RESOURCE_ATTRIBUTE_MAP.update( agent.RESOURCE_ATTRIBUTE_MAP) fake_notifier.reset() def test_network_add_to_dhcp_agent_notification(self): with self.network() as net1: network_id = net1['network']['id'] self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) self._add_network_to_dhcp_agent(hosta_id, network_id) self.dhcp_notifier_cast.assert_called_with( mock.ANY, 'network_create_end', {'network': {'id': network_id}}, DHCP_HOSTA) notifications = fake_notifier.NOTIFICATIONS expected_event_type = 'dhcp_agent.network.add' self._assert_notify(notifications, expected_event_type) def test_network_remove_from_dhcp_agent_notification(self): with self.network() as net1: network_id = net1['network']['id'] self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) self._add_network_to_dhcp_agent(hosta_id, network_id) self._remove_network_from_dhcp_agent(hosta_id, network_id) self.dhcp_notifier_cast.assert_called_with( mock.ANY, 'network_delete_end', {'network_id': network_id}, DHCP_HOSTA) notifications = fake_notifier.NOTIFICATIONS expected_event_type = 'dhcp_agent.network.remove' self._assert_notify(notifications, expected_event_type) def test_agent_updated_dhcp_agent_notification(self): self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) self._disable_agent(hosta_id, admin_state_up=False) self.dhcp_notifier_cast.assert_called_with( mock.ANY, 'agent_updated', {'admin_state_up': False}, DHCP_HOSTA) def _network_port_create( self, hosts, gateway=attributes.ATTR_NOT_SPECIFIED, owner=None): for host in hosts: helpers.register_dhcp_agent(host) with self.network() as net1: with self.subnet(network=net1, gateway_ip=gateway) as subnet1: if owner: with self.port(subnet=subnet1, device_owner=owner) as port: return [net1, subnet1, port] else: with self.port(subnet=subnet1) as port: return [net1, subnet1, port] def _notification_mocks(self, hosts, net, subnet, port): host_calls = {} for host in hosts: expected_calls = [ mock.call( mock.ANY, 'network_create_end', {'network': {'id': net['network']['id']}}, host), mock.call( mock.ANY, 'subnet_create_end', subnet, host, 'dhcp_agent'), mock.call( mock.ANY, 'port_create_end', {'port': port['port']}, host, 'dhcp_agent')] host_calls[host] = expected_calls return host_calls def test_network_port_create_notification(self): hosts = [DHCP_HOSTA] net, subnet, port = self._network_port_create(hosts) expected_calls = self._notification_mocks(hosts, net, subnet, port) self.assertEqual( expected_calls[DHCP_HOSTA], self.dhcp_notifier_cast.call_args_list) def test_network_ha_port_create_notification(self): cfg.CONF.set_override('dhcp_agents_per_network', 2) hosts = [DHCP_HOSTA, DHCP_HOSTC] net, subnet, port = self._network_port_create(hosts) expected_calls = self._notification_mocks(hosts, net, subnet, port) for expected in expected_calls[DHCP_HOSTA]: self.assertIn(expected, self.dhcp_notifier_cast.call_args_list) for expected in expected_calls[DHCP_HOSTC]: self.assertIn(expected, self.dhcp_notifier_cast.call_args_list) def _is_schedule_network_called(self, device_id): plugin = manager.NeutronManager.get_plugin() notifier = plugin.agent_notifiers[constants.AGENT_TYPE_DHCP] with self.subnet() as subnet,\ mock.patch.object(plugin, 'get_dhcp_agents_hosting_networks', return_value=[]),\ mock.patch.object(notifier, '_schedule_network', return_value=[]) as mock_sched: with self.port(subnet=subnet, device_id=device_id): return mock_sched.called def test_reserved_dhcp_port_creation(self): device_id = constants.DEVICE_ID_RESERVED_DHCP_PORT self.assertFalse(self._is_schedule_network_called(device_id)) def test_unreserved_dhcp_port_creation(self): device_id = 'not_reserved' self.assertTrue(self._is_schedule_network_called(device_id)) class OvsL3AgentNotifierTestCase(test_l3.L3NatTestCaseMixin, test_agent.AgentDBTestMixIn, AgentSchedulerTestMixIn, test_plugin.NeutronDbPluginV2TestCase): plugin_str = 'neutron.plugins.ml2.plugin.Ml2Plugin' l3_plugin = ('neutron.tests.unit.extensions.test_l3.' 'TestL3NatAgentSchedulingServicePlugin') def setUp(self): self.dhcp_notifier_cls_p = mock.patch( 'neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.' 'DhcpAgentNotifyAPI') self.dhcp_notifier = mock.Mock(name='dhcp_notifier') self.dhcp_notifier_cls = self.dhcp_notifier_cls_p.start() self.dhcp_notifier_cls.return_value = self.dhcp_notifier self.useFixture(tools.AttributeMapMemento()) if self.l3_plugin: service_plugins = {'l3_plugin_name': self.l3_plugin} else: service_plugins = None super(OvsL3AgentNotifierTestCase, self).setUp( self.plugin_str, service_plugins=service_plugins) ext_mgr = extensions.PluginAwareExtensionManager.get_instance() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) self.adminContext = context.get_admin_context() # Add the resources to the global attribute map # This is done here as the setup process won't # initialize the main API router which extends # the global attribute map attributes.RESOURCE_ATTRIBUTE_MAP.update( agent.RESOURCE_ATTRIBUTE_MAP) fake_notifier.reset() def test_router_add_to_l3_agent_notification(self): l3_plugin = (manager.NeutronManager.get_service_plugins() [service_constants.L3_ROUTER_NAT]) l3_notifier = l3_plugin.agent_notifiers[constants.AGENT_TYPE_L3] with mock.patch.object( l3_notifier.client, 'prepare', return_value=l3_notifier.client) as mock_prepare,\ mock.patch.object(l3_notifier.client, 'call') as mock_call,\ self.router() as router1: self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) self._add_router_to_l3_agent(hosta_id, router1['router']['id']) routers = [router1['router']['id']] mock_prepare.assert_called_with(server='hosta') mock_call.assert_called_with( mock.ANY, 'router_added_to_agent', payload=routers) notifications = fake_notifier.NOTIFICATIONS expected_event_type = 'l3_agent.router.add' self._assert_notify(notifications, expected_event_type) def test_router_remove_from_l3_agent_notification(self): l3_plugin = (manager.NeutronManager.get_service_plugins() [service_constants.L3_ROUTER_NAT]) l3_notifier = l3_plugin.agent_notifiers[constants.AGENT_TYPE_L3] with mock.patch.object( l3_notifier.client, 'prepare', return_value=l3_notifier.client) as mock_prepare,\ mock.patch.object(l3_notifier.client, 'cast') as mock_cast,\ mock.patch.object(l3_notifier.client, 'call'),\ self.router() as router1: self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) self._add_router_to_l3_agent(hosta_id, router1['router']['id']) self._remove_router_from_l3_agent(hosta_id, router1['router']['id']) mock_prepare.assert_called_with(server='hosta') mock_cast.assert_called_with( mock.ANY, 'router_removed_from_agent', payload={'router_id': router1['router']['id']}) notifications = fake_notifier.NOTIFICATIONS expected_event_type = 'l3_agent.router.remove' self._assert_notify(notifications, expected_event_type) def test_agent_updated_l3_agent_notification(self): l3_plugin = (manager.NeutronManager.get_service_plugins() [service_constants.L3_ROUTER_NAT]) l3_notifier = l3_plugin.agent_notifiers[constants.AGENT_TYPE_L3] with mock.patch.object( l3_notifier.client, 'prepare', return_value=l3_notifier.client) as mock_prepare,\ mock.patch.object(l3_notifier.client, 'cast') as mock_cast: agent_id = helpers.register_l3_agent(L3_HOSTA).id self._disable_agent(agent_id, admin_state_up=False) mock_prepare.assert_called_with(server=L3_HOSTA) mock_cast.assert_called_with( mock.ANY, 'agent_updated', payload={'admin_state_up': False}) neutron-8.4.0/neutron/tests/unit/db/test_l3_dvr_db.py0000664000567000056710000006761013044372760024045 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation, all rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_utils import uuidutils from neutron.common import constants as l3_const from neutron.common import exceptions from neutron import context from neutron.db import agents_db from neutron.db import common_db_mixin from neutron.db import l3_agentschedulers_db from neutron.db import l3_dvr_db from neutron.extensions import portbindings from neutron import manager from neutron.plugins.common import constants as plugin_const from neutron.tests.unit.db import test_db_base_plugin_v2 _uuid = uuidutils.generate_uuid class FakeL3Plugin(common_db_mixin.CommonDbMixin, l3_dvr_db.L3_NAT_with_dvr_db_mixin, l3_agentschedulers_db.L3AgentSchedulerDbMixin, agents_db.AgentDbMixin): pass class L3DvrTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self): core_plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin' super(L3DvrTestCase, self).setUp(plugin=core_plugin) self.core_plugin = manager.NeutronManager.get_plugin() self.ctx = context.get_admin_context() self.mixin = FakeL3Plugin() def _create_router(self, router): with self.ctx.session.begin(subtransactions=True): return self.mixin._create_router_db(self.ctx, router, 'foo_tenant') def _test__create_router_db(self, expected=False, distributed=None): router = {'name': 'foo_router', 'admin_state_up': True} if distributed is not None: router['distributed'] = distributed result = self._create_router(router) self.assertEqual(expected, result.extra_attributes['distributed']) def test_create_router_db_default(self): self._test__create_router_db(expected=False) def test_create_router_db_centralized(self): self._test__create_router_db(expected=False, distributed=False) def test_create_router_db_distributed(self): self._test__create_router_db(expected=True, distributed=True) def test__validate_router_migration_on_router_update(self): router = { 'name': 'foo_router', 'admin_state_up': True, 'distributed': True } router_db = self._create_router(router) self.assertIsNone(self.mixin._validate_router_migration( self.ctx, router_db, {'name': 'foo_router_2'})) def test__validate_router_migration_raise_error(self): router = { 'name': 'foo_router', 'admin_state_up': True, 'distributed': True } router_db = self._create_router(router) self.assertRaises(exceptions.BadRequest, self.mixin._validate_router_migration, self.ctx, router_db, {'distributed': False}) def test_upgrade_active_router_to_distributed_validation_failure(self): router = {'name': 'foo_router', 'admin_state_up': True} router_db = self._create_router(router) update = {'distributed': True} self.assertRaises(exceptions.BadRequest, self.mixin._validate_router_migration, self.ctx, router_db, update) def test_update_router_db_centralized_to_distributed(self): router = {'name': 'foo_router', 'admin_state_up': True} agent = {'id': _uuid()} distributed = {'distributed': True} router_db = self._create_router(router) router_id = router_db['id'] self.assertFalse(router_db.extra_attributes.distributed) self.mixin._get_router = mock.Mock(return_value=router_db) self.mixin._validate_router_migration = mock.Mock() self.mixin._update_distributed_attr = mock.Mock() self.mixin.list_l3_agents_hosting_router = mock.Mock( return_value={'agents': [agent]}) self.mixin._unbind_router = mock.Mock() router_db = self.mixin._update_router_db( self.ctx, router_id, distributed) # Assert that the DB value has changed self.assertTrue(router_db.extra_attributes.distributed) self.assertEqual(1, self.mixin._update_distributed_attr.call_count) def _test_get_device_owner(self, is_distributed=False, expected=l3_const.DEVICE_OWNER_ROUTER_INTF, pass_router_id=True): router = { 'name': 'foo_router', 'admin_state_up': True, 'distributed': is_distributed } router_db = self._create_router(router) router_pass = router_db['id'] if pass_router_id else router_db with mock.patch.object(self.mixin, '_get_router') as f: f.return_value = router_db result = self.mixin._get_device_owner(self.ctx, router_pass) self.assertEqual(expected, result) def test_get_device_owner_by_router_id(self): self._test_get_device_owner() def test__get_device_owner_centralized(self): self._test_get_device_owner(pass_router_id=False) def test__get_device_owner_distributed(self): self._test_get_device_owner( is_distributed=True, expected=l3_const.DEVICE_OWNER_DVR_INTERFACE, pass_router_id=False) def _test__is_distributed_router(self, router, expected): result = l3_dvr_db.is_distributed_router(router) self.assertEqual(expected, result) def test__is_distributed_router_by_db_object(self): router = {'name': 'foo_router', 'admin_state_up': True} router_db = self._create_router(router) self.mixin._get_device_owner(mock.ANY, router_db) def test__is_distributed_router_default(self): router = {'id': 'foo_router_id'} self._test__is_distributed_router(router, False) def test__is_distributed_router_centralized(self): router = {'id': 'foo_router_id', 'distributed': False} self._test__is_distributed_router(router, False) def test__is_distributed_router_distributed(self): router = {'id': 'foo_router_id', 'distributed': True} self._test__is_distributed_router(router, True) def test__get_agent_gw_ports_exist_for_network(self): with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp: plugin = mock.Mock() gp.return_value = plugin plugin.get_ports.return_value = [] self.mixin._get_agent_gw_ports_exist_for_network( self.ctx, 'network_id', 'host', 'agent_id') plugin.get_ports.assert_called_with(self.ctx, { 'network_id': ['network_id'], 'device_id': ['agent_id'], 'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW]}) def _test_prepare_direct_delete_dvr_internal_ports(self, port): with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp: plugin = mock.Mock() gp.return_value = plugin plugin.get_port.return_value = port self.mixin._router_exists = mock.Mock(return_value=True) self.assertRaises(exceptions.ServicePortInUse, self.mixin.prevent_l3_port_deletion, self.ctx, port['id']) def test_prevent_delete_floatingip_agent_gateway_port(self): port = { 'id': 'my_port_id', 'fixed_ips': mock.ANY, 'device_id': 'r_id', 'device_owner': l3_const.DEVICE_OWNER_AGENT_GW } self._test_prepare_direct_delete_dvr_internal_ports(port) def test_prevent_delete_csnat_port(self): port = { 'id': 'my_port_id', 'fixed_ips': mock.ANY, 'device_id': 'r_id', 'device_owner': l3_const.DEVICE_OWNER_ROUTER_SNAT } self._test_prepare_direct_delete_dvr_internal_ports(port) def test__create_gw_port_with_no_gateway(self): router = { 'name': 'foo_router', 'admin_state_up': True, 'distributed': True, } router_db = self._create_router(router) router_id = router_db['id'] self.assertTrue(router_db.extra_attributes.distributed) with mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin, '_create_gw_port'),\ mock.patch.object( self.mixin, '_create_snat_intf_ports_if_not_exists') as cs: self.mixin._create_gw_port( self.ctx, router_id, router_db, mock.ANY, mock.ANY) self.assertFalse(cs.call_count) def test_build_routers_list_with_gw_port_mismatch(self): routers = [{'gw_port_id': 'foo_gw_port_id', 'id': 'foo_router_id'}] gw_ports = {} routers = self.mixin._build_routers_list(self.ctx, routers, gw_ports) self.assertIsNone(routers[0].get('gw_port')) def setup_port_has_ipv6_address(self, port): with mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin, '_port_has_ipv6_address') as pv6: pv6.return_value = True result = self.mixin._port_has_ipv6_address(port) return result, pv6 def test__port_has_ipv6_address_for_dvr_snat_port(self): port = { 'id': 'my_port_id', 'device_owner': l3_const.DEVICE_OWNER_ROUTER_SNAT, } result, pv6 = self.setup_port_has_ipv6_address(port) self.assertFalse(result) self.assertFalse(pv6.called) def test__port_has_ipv6_address_for_non_snat_ports(self): port = { 'id': 'my_port_id', 'device_owner': l3_const.DEVICE_OWNER_DVR_INTERFACE, } result, pv6 = self.setup_port_has_ipv6_address(port) self.assertTrue(result) self.assertTrue(pv6.called) def _helper_delete_floatingip_agent_gateway_port(self, port_host): ports = [{ 'id': 'my_port_id', portbindings.HOST_ID: 'foo_host', 'network_id': 'ext_network_id', 'device_owner': l3_const.DEVICE_OWNER_ROUTER_GW }, { 'id': 'my_new_port_id', portbindings.HOST_ID: 'my_foo_host', 'network_id': 'ext_network_id', 'device_owner': l3_const.DEVICE_OWNER_ROUTER_GW }] with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp: plugin = mock.Mock() gp.return_value = plugin plugin.get_ports.return_value = ports self.mixin.delete_floatingip_agent_gateway_port( self.ctx, port_host, 'ext_network_id') plugin.get_ports.assert_called_with(self.ctx, filters={ 'network_id': ['ext_network_id'], 'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW]}) if port_host: plugin.ipam.delete_port.assert_called_once_with( self.ctx, 'my_port_id') else: plugin.ipam.delete_port.assert_called_with( self.ctx, 'my_new_port_id') def test_delete_floatingip_agent_gateway_port_without_host_id(self): self._helper_delete_floatingip_agent_gateway_port(None) def test_delete_floatingip_agent_gateway_port_with_host_id(self): self._helper_delete_floatingip_agent_gateway_port( 'foo_host') def _setup_delete_current_gw_port_deletes_fip_agent_gw_port( self, port=None, gw_port=True): router = mock.MagicMock() router.extra_attributes.distributed = True if gw_port: gw_port_db = { 'id': 'my_gw_id', 'network_id': 'ext_net_id', 'device_owner': l3_const.DEVICE_OWNER_ROUTER_GW } router.gw_port = gw_port_db else: router.gw_port = None with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp,\ mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin, '_delete_current_gw_port'),\ mock.patch.object( self.mixin, '_get_router') as grtr,\ mock.patch.object( self.mixin, 'delete_csnat_router_interface_ports') as del_csnat_port,\ mock.patch.object( self.mixin, 'delete_floatingip_agent_gateway_port') as del_agent_gw_port,\ mock.patch.object( self.mixin.l3_rpc_notifier, 'delete_fipnamespace_for_ext_net') as del_fip: plugin = mock.Mock() gp.return_value = plugin plugin.get_ports.return_value = port grtr.return_value = router self.mixin._delete_current_gw_port( self.ctx, router['id'], router, 'ext_network_id') return router, plugin, del_csnat_port, del_agent_gw_port, del_fip def test_delete_current_gw_port_deletes_fip_agent_gw_port_and_fipnamespace( self): rtr, plugin, d_csnat_port, d_agent_gw_port, del_fip = ( self._setup_delete_current_gw_port_deletes_fip_agent_gw_port()) self.assertTrue(d_csnat_port.called) self.assertTrue(d_agent_gw_port.called) d_csnat_port.assert_called_once_with( mock.ANY, rtr) d_agent_gw_port.assert_called_once_with(mock.ANY, None, 'ext_net_id') del_fip.assert_called_once_with(mock.ANY, 'ext_net_id') def test_delete_current_gw_port_never_calls_delete_fip_agent_gw_port(self): port = [{ 'id': 'my_port_id', 'network_id': 'ext_net_id', 'device_owner': l3_const.DEVICE_OWNER_ROUTER_GW }, { 'id': 'my_new_port_id', 'network_id': 'ext_net_id', 'device_owner': l3_const.DEVICE_OWNER_ROUTER_GW }] rtr, plugin, d_csnat_port, d_agent_gw_port, del_fip = ( self._setup_delete_current_gw_port_deletes_fip_agent_gw_port( port=port)) self.assertTrue(d_csnat_port.called) self.assertFalse(d_agent_gw_port.called) self.assertFalse(del_fip.called) d_csnat_port.assert_called_once_with( mock.ANY, rtr) def test_delete_current_gw_port_never_calls_delete_fipnamespace(self): rtr, plugin, d_csnat_port, d_agent_gw_port, del_fip = ( self._setup_delete_current_gw_port_deletes_fip_agent_gw_port( gw_port=False)) self.assertFalse(d_csnat_port.called) self.assertFalse(d_agent_gw_port.called) self.assertFalse(del_fip.called) def _floatingip_on_port_test_setup(self, hostid): router = {'id': 'foo_router_id', 'distributed': True} floatingip = { 'id': _uuid(), 'port_id': _uuid(), 'router_id': 'foo_router_id', 'host': hostid } if not hostid: hostid = 'not_my_host_id' routers = { 'foo_router_id': router } fipagent = { 'id': _uuid() } # NOTE: mock.patch is not needed here since self.mixin is created fresh # for each test. It doesn't work with some methods since the mixin is # tested in isolation (e.g. _get_agent_by_type_and_host). self.mixin._get_dvr_service_port_hostid = mock.Mock( return_value=hostid) self.mixin._get_agent_by_type_and_host = mock.Mock( return_value=fipagent) self.mixin._get_fip_sync_interfaces = mock.Mock( return_value='fip_interface') agent = mock.Mock() agent.id = fipagent['id'] self.mixin._process_floating_ips_dvr(self.ctx, routers, [floatingip], hostid, agent) return (router, floatingip) def test_floatingip_on_port_not_host(self): router, fip = self._floatingip_on_port_test_setup(None) self.assertNotIn(l3_const.FLOATINGIP_KEY, router) self.assertNotIn(l3_const.FLOATINGIP_AGENT_INTF_KEY, router) def test_floatingip_on_port_with_host(self): router, fip = self._floatingip_on_port_test_setup(_uuid()) self.assertTrue(self.mixin._get_fip_sync_interfaces.called) self.assertIn(l3_const.FLOATINGIP_KEY, router) self.assertIn(l3_const.FLOATINGIP_AGENT_INTF_KEY, router) self.assertIn(fip, router[l3_const.FLOATINGIP_KEY]) self.assertIn('fip_interface', router[l3_const.FLOATINGIP_AGENT_INTF_KEY]) def _setup_test_create_floatingip( self, fip, floatingip_db, router_db): port = { 'id': '1234', portbindings.HOST_ID: 'myhost', 'network_id': 'external_net' } with mock.patch.object(self.mixin, 'get_router') as grtr,\ mock.patch.object(self.mixin, '_get_dvr_service_port_hostid') as vmp,\ mock.patch.object( self.mixin, '_get_dvr_migrating_service_port_hostid' ) as mvmp,\ mock.patch.object( self.mixin, 'create_fip_agent_gw_port_if_not_exists') as c_fip,\ mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin, '_update_fip_assoc'): grtr.return_value = router_db vmp.return_value = 'my-host' mvmp.return_value = 'my-future-host' self.mixin._update_fip_assoc( self.ctx, fip, floatingip_db, port) return c_fip def test_create_floatingip_agent_gw_port_with_dvr_router(self): floatingip = { 'id': _uuid(), 'router_id': 'foo_router_id' } router = {'id': 'foo_router_id', 'distributed': True} fip = { 'id': _uuid(), 'port_id': _uuid() } create_fip = ( self._setup_test_create_floatingip( fip, floatingip, router)) self.assertTrue(create_fip.called) def test_create_floatingip_agent_gw_port_with_non_dvr_router(self): floatingip = { 'id': _uuid(), 'router_id': 'foo_router_id' } router = {'id': 'foo_router_id', 'distributed': False} fip = { 'id': _uuid(), 'port_id': _uuid() } create_fip = ( self._setup_test_create_floatingip( fip, floatingip, router)) self.assertFalse(create_fip.called) def test_remove_router_interface_csnat_ports_removal(self): router_dict = {'name': 'test_router', 'admin_state_up': True, 'distributed': True} router = self._create_router(router_dict) plugin = mock.MagicMock() with self.network() as net_ext,\ self.subnet() as subnet1,\ self.subnet(cidr='20.0.0.0/24') as subnet2: ext_net_id = net_ext['network']['id'] self.core_plugin.update_network( self.ctx, ext_net_id, {'network': {'router:external': True}}) self.mixin.update_router( self.ctx, router['id'], {'router': {'external_gateway_info': {'network_id': ext_net_id}}}) self.mixin.add_router_interface(self.ctx, router['id'], {'subnet_id': subnet1['subnet']['id']}) self.mixin.add_router_interface(self.ctx, router['id'], {'subnet_id': subnet2['subnet']['id']}) csnat_filters = {'device_owner': [l3_const.DEVICE_OWNER_ROUTER_SNAT]} csnat_ports = self.core_plugin.get_ports( self.ctx, filters=csnat_filters) self.assertEqual(2, len(csnat_ports)) dvr_filters = {'device_owner': [l3_const.DEVICE_OWNER_DVR_INTERFACE]} dvr_ports = self.core_plugin.get_ports( self.ctx, filters=dvr_filters) self.assertEqual(2, len(dvr_ports)) with mock.patch.object(manager.NeutronManager, 'get_service_plugins') as get_svc_plugin: get_svc_plugin.return_value = { plugin_const.L3_ROUTER_NAT: plugin} self.mixin.manager = manager self.mixin.remove_router_interface( self.ctx, router['id'], {'port_id': dvr_ports[0]['id']}) csnat_ports = self.core_plugin.get_ports( self.ctx, filters=csnat_filters) self.assertEqual(1, len(csnat_ports)) self.assertEqual(dvr_ports[1]['fixed_ips'][0]['subnet_id'], csnat_ports[0]['fixed_ips'][0]['subnet_id']) dvr_ports = self.core_plugin.get_ports( self.ctx, filters=dvr_filters) self.assertEqual(1, len(dvr_ports)) def test_remove_router_interface_csnat_ports_removal_with_ipv6(self): router_dict = {'name': 'test_router', 'admin_state_up': True, 'distributed': True} router = self._create_router(router_dict) plugin = mock.MagicMock() with self.network() as net_ext, self.network() as net_int: ext_net_id = net_ext['network']['id'] self.core_plugin.update_network( self.ctx, ext_net_id, {'network': {'router:external': True}}) self.mixin.update_router( self.ctx, router['id'], {'router': {'external_gateway_info': {'network_id': ext_net_id}}}) with self.subnet( network=net_int, cidr='20.0.0.0/24') as subnet_v4,\ self.subnet( network=net_int, cidr='fe80::/64', gateway_ip='fe80::1', ip_version=6) as subnet_v6: self.mixin.add_router_interface(self.ctx, router['id'], {'subnet_id': subnet_v4['subnet']['id']}) self.mixin.add_router_interface(self.ctx, router['id'], {'subnet_id': subnet_v6['subnet']['id']}) csnat_filters = {'device_owner': [l3_const.DEVICE_OWNER_ROUTER_SNAT]} csnat_ports = self.core_plugin.get_ports( self.ctx, filters=csnat_filters) self.assertEqual(2, len(csnat_ports)) dvr_filters = {'device_owner': [l3_const.DEVICE_OWNER_DVR_INTERFACE]} dvr_ports = self.core_plugin.get_ports( self.ctx, filters=dvr_filters) self.assertEqual(2, len(dvr_ports)) with mock.patch.object( manager.NeutronManager, 'get_service_plugins') as get_svc_plugin: get_svc_plugin.return_value = { plugin_const.L3_ROUTER_NAT: plugin} self.mixin.manager = manager self.mixin.remove_router_interface( self.ctx, router['id'], {'subnet_id': subnet_v4['subnet']['id']}) csnat_ports = self.core_plugin.get_ports( self.ctx, filters=csnat_filters) self.assertEqual(1, len(csnat_ports)) self.assertEqual( subnet_v6['subnet']['id'], csnat_ports[0]['fixed_ips'][0]['subnet_id']) dvr_ports = self.core_plugin.get_ports( self.ctx, filters=dvr_filters) self.assertEqual(1, len(dvr_ports)) def test__validate_router_migration_notify_advanced_services(self): router = {'name': 'foo_router', 'admin_state_up': False} router_db = self._create_router(router) with mock.patch.object(l3_dvr_db.registry, 'notify') as mock_notify: self.mixin._validate_router_migration( self.ctx, router_db, {'distributed': True}) kwargs = {'context': self.ctx, 'router': router_db} mock_notify.assert_called_once_with( 'router', 'before_update', self.mixin, **kwargs) def _test_update_arp_entry_for_dvr_service_port( self, device_owner, action): router_dict = {'name': 'test_router', 'admin_state_up': True, 'distributed': True} router = self._create_router(router_dict) with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp: plugin = mock.Mock() l3_notify = self.mixin.l3_rpc_notifier = mock.Mock() gp.return_value = plugin port = { 'id': 'my_port_id', 'fixed_ips': [ {'subnet_id': '51edc9e0-24f9-47f2-8e1e-2a41cb691323', 'ip_address': '10.0.0.11'}, {'subnet_id': '2b7c8a07-6f8e-4937-8701-f1d5da1a807c', 'ip_address': '10.0.0.21'}, {'subnet_id': '48534187-f077-4e81-93ff-81ec4cc0ad3b', 'ip_address': 'fd45:1515:7e0:0:f816:3eff:fe1a:1111'}], 'mac_address': 'my_mac', 'device_owner': device_owner } dvr_port = { 'id': 'dvr_port_id', 'fixed_ips': mock.ANY, 'device_owner': l3_const.DEVICE_OWNER_DVR_INTERFACE, 'device_id': router['id'] } plugin.get_ports.return_value = [dvr_port] if action == 'add': self.mixin.update_arp_entry_for_dvr_service_port( self.ctx, port) self.assertEqual(3, l3_notify.add_arp_entry.call_count) elif action == 'del': self.mixin.delete_arp_entry_for_dvr_service_port( self.ctx, port) self.assertEqual(3, l3_notify.del_arp_entry.call_count) def test_update_arp_entry_for_dvr_service_port_added(self): action = 'add' device_owner = l3_const.DEVICE_OWNER_LOADBALANCER self._test_update_arp_entry_for_dvr_service_port(device_owner, action) def test_update_arp_entry_for_dvr_service_port_deleted(self): action = 'del' device_owner = l3_const.DEVICE_OWNER_LOADBALANCER self._test_update_arp_entry_for_dvr_service_port(device_owner, action) def test_add_router_interface_csnat_ports_failure(self): router_dict = {'name': 'test_router', 'admin_state_up': True, 'distributed': True} router = self._create_router(router_dict) with self.network() as net_ext,\ self.subnet() as subnet: ext_net_id = net_ext['network']['id'] self.core_plugin.update_network( self.ctx, ext_net_id, {'network': {'router:external': True}}) self.mixin.update_router( self.ctx, router['id'], {'router': {'external_gateway_info': {'network_id': ext_net_id}}}) with mock.patch.object( self.mixin, '_add_csnat_router_interface_port') as f: f.side_effect = RuntimeError() self.assertRaises( RuntimeError, self.mixin.add_router_interface, self.ctx, router['id'], {'subnet_id': subnet['subnet']['id']}) filters = { 'device_id': [router['id']], } router_ports = self.core_plugin.get_ports(self.ctx, filters) self.assertEqual(1, len(router_ports)) self.assertEqual(l3_const.DEVICE_OWNER_ROUTER_GW, router_ports[0]['device_owner']) neutron-8.4.0/neutron/tests/unit/db/test_bgp_dragentscheduler_db.py0000664000567000056710000002115513044372760027021 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_utils import importutils from neutron.api.v2 import attributes from neutron import context from neutron.db import bgp_db from neutron.db import bgp_dragentscheduler_db as bgp_dras_db from neutron.extensions import agent from neutron.extensions import bgp from neutron.extensions import bgp_dragentscheduler as bgp_dras_ext from neutron import manager from neutron.tests.unit.db import test_bgp_db from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_base_plugin from neutron.tests.unit.extensions import test_agent from webob import exc class BgpDrSchedulerTestExtensionManager(object): def get_resources(self): attributes.RESOURCE_ATTRIBUTE_MAP.update( agent.RESOURCE_ATTRIBUTE_MAP) resources = agent.Agent.get_resources() resources.extend(bgp_dras_ext.Bgp_dragentscheduler.get_resources()) return resources def get_actions(self): return [] def get_request_extensions(self): return [] class TestBgpDrSchedulerPlugin(bgp_db.BgpDbMixin, bgp_dras_db.BgpDrAgentSchedulerDbMixin): bgp_drscheduler = importutils.import_object( cfg.CONF.bgp_drscheduler_driver) supported_extension_aliases = ["bgp_dragent_scheduler"] def get_plugin_description(self): return ("BGP dynamic routing service Plugin test class that test " "BGP speaker functionality, with scheduler.") class BgpDrSchedulingTestCase(test_agent.AgentDBTestMixIn, test_bgp_db.BgpEntityCreationMixin): def test_schedule_bgp_speaker(self): """Test happy path over full scheduling cycle.""" with self.bgp_speaker(4, 1234) as ri: bgp_speaker_id = ri['id'] self._register_bgp_dragent(host='host1') agent = self._list('agents')['agents'][0] agent_id = agent['id'] data = {'bgp_speaker_id': bgp_speaker_id} req = self.new_create_request('agents', data, self.fmt, agent_id, 'bgp-drinstances') res = req.get_response(self.ext_api) self.assertEqual(exc.HTTPCreated.code, res.status_int) req_show = self.new_show_request('agents', agent_id, self.fmt, 'bgp-drinstances') res = req_show.get_response(self.ext_api) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(self.fmt, res) self.assertIn('bgp_speakers', res) self.assertTrue(bgp_speaker_id, res['bgp_speakers'][0]['id']) req = self.new_delete_request('agents', agent_id, self.fmt, 'bgp-drinstances', bgp_speaker_id) res = req.get_response(self.ext_api) self.assertEqual(exc.HTTPNoContent.code, res.status_int) res = req_show.get_response(self.ext_api) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(self.fmt, res) self.assertIn('bgp_speakers', res) self.assertEqual([], res['bgp_speakers']) def test_schedule_bgp_speaker_on_invalid_agent(self): """Test error while scheduling BGP speaker on an invalid agent.""" with self.bgp_speaker(4, 1234) as ri: bgp_speaker_id = ri['id'] self._register_l3_agent(host='host1') # Register wrong agent agent = self._list('agents')['agents'][0] data = {'bgp_speaker_id': bgp_speaker_id} req = self.new_create_request( 'agents', data, self.fmt, agent['id'], 'bgp-drinstances') res = req.get_response(self.ext_api) # Raises an AgentNotFound exception if the agent is invalid self.assertEqual(exc.HTTPNotFound.code, res.status_int) def test_schedule_bgp_speaker_twice_on_same_agent(self): """Test error if a BGP speaker is scheduled twice on same agent""" with self.bgp_speaker(4, 1234) as ri: bgp_speaker_id = ri['id'] self._register_bgp_dragent(host='host1') agent = self._list('agents')['agents'][0] data = {'bgp_speaker_id': bgp_speaker_id} req = self.new_create_request( 'agents', data, self.fmt, agent['id'], 'bgp-drinstances') res = req.get_response(self.ext_api) self.assertEqual(exc.HTTPCreated.code, res.status_int) # Try second time, should raise conflict res = req.get_response(self.ext_api) self.assertEqual(exc.HTTPConflict.code, res.status_int) def test_schedule_bgp_speaker_on_two_different_agents(self): """Test that a BGP speaker can be associated to two agents.""" with self.bgp_speaker(4, 1234) as ri: bgp_speaker_id = ri['id'] self._register_bgp_dragent(host='host1') self._register_bgp_dragent(host='host2') data = {'bgp_speaker_id': bgp_speaker_id} agent1 = self._list('agents')['agents'][0] req = self.new_create_request( 'agents', data, self.fmt, agent1['id'], 'bgp-drinstances') res = req.get_response(self.ext_api) self.assertEqual(exc.HTTPCreated.code, res.status_int) agent2 = self._list('agents')['agents'][1] req = self.new_create_request( 'agents', data, self.fmt, agent2['id'], 'bgp-drinstances') res = req.get_response(self.ext_api) self.assertEqual(exc.HTTPCreated.code, res.status_int) def test_schedule_multi_bgp_speaker_on_one_dragent(self): """Test only one BGP speaker can be associated to one dragent.""" with self.bgp_speaker(4, 1) as ri1, self.bgp_speaker(4, 2) as ri2: self._register_bgp_dragent(host='host1') agent = self._list('agents')['agents'][0] data = {'bgp_speaker_id': ri1['id']} req = self.new_create_request( 'agents', data, self.fmt, agent['id'], 'bgp-drinstances') res = req.get_response(self.ext_api) self.assertEqual(exc.HTTPCreated.code, res.status_int) data = {'bgp_speaker_id': ri2['id']} req = self.new_create_request( 'agents', data, self.fmt, agent['id'], 'bgp-drinstances') res = req.get_response(self.ext_api) self.assertEqual(exc.HTTPConflict.code, res.status_int) def test_non_scheduled_bgp_speaker_binding_removal(self): """Test exception while removing an invalid binding.""" with self.bgp_speaker(4, 1234) as ri1: self._register_bgp_dragent(host='host1') agent = self._list('agents')['agents'][0] agent_id = agent['id'] self.assertRaises(bgp_dras_ext.DrAgentNotHostingBgpSpeaker, self.bgp_plugin.remove_bgp_speaker_from_dragent, self.context, agent_id, ri1['id']) class BgpDrPluginSchedulerTests(test_db_base_plugin.NeutronDbPluginV2TestCase, BgpDrSchedulingTestCase): def setUp(self, plugin=None, ext_mgr=None, service_plugins=None): if not plugin: plugin = ('neutron.tests.unit.db.' 'test_bgp_dragentscheduler_db.TestBgpDrSchedulerPlugin') if not service_plugins: service_plugins = {bgp.BGP_EXT_ALIAS: 'neutron.services.bgp.bgp_plugin.BgpPlugin'} ext_mgr = ext_mgr or BgpDrSchedulerTestExtensionManager() super(BgpDrPluginSchedulerTests, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.bgp_plugin = manager.NeutronManager.get_service_plugins().get( bgp.BGP_EXT_ALIAS) self.context = context.get_admin_context() neutron-8.4.0/neutron/tests/unit/db/test_ipam_pluggable_backend.py0000664000567000056710000007143113044372760026622 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Infoblox Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netaddr from oslo_config import cfg from oslo_utils import uuidutils import webob.exc from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import ipv6_utils from neutron.db import ipam_backend_mixin from neutron.db import ipam_pluggable_backend from neutron.ipam import requests as ipam_req from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_base class UseIpamMixin(object): def setUp(self): cfg.CONF.set_override("ipam_driver", 'internal') super(UseIpamMixin, self).setUp() class TestIpamHTTPResponse(UseIpamMixin, test_db_base.TestV2HTTPResponse): pass class TestIpamPorts(UseIpamMixin, test_db_base.TestPortsV2): pass class TestIpamNetworks(UseIpamMixin, test_db_base.TestNetworksV2): pass class TestIpamSubnets(UseIpamMixin, test_db_base.TestSubnetsV2): pass class TestIpamSubnetPool(UseIpamMixin, test_db_base.TestSubnetPoolsV2): pass class TestDbBasePluginIpam(test_db_base.NeutronDbPluginV2TestCase): def setUp(self): cfg.CONF.set_override("ipam_driver", 'internal') super(TestDbBasePluginIpam, self).setUp() self.tenant_id = uuidutils.generate_uuid() self.subnet_id = uuidutils.generate_uuid() def _prepare_mocks(self, address_factory=None, subnet_factory=None): if address_factory is None: address_factory = ipam_req.AddressRequestFactory if subnet_factory is None: subnet_factory = ipam_req.SubnetRequestFactory mocks = { 'driver': mock.Mock(), 'subnet': mock.Mock(), 'subnet_request': ipam_req.SpecificSubnetRequest( self.tenant_id, self.subnet_id, '10.0.0.0/24', '10.0.0.1', [netaddr.IPRange('10.0.0.2', '10.0.0.254')]), } mocks['driver'].get_subnet.return_value = mocks['subnet'] mocks['driver'].allocate_subnet.return_value = mocks['subnet'] mocks['driver'].get_subnet_request_factory.return_value = ( subnet_factory) mocks['driver'].get_address_request_factory.return_value = ( address_factory) mocks['subnet'].get_details.return_value = mocks['subnet_request'] return mocks def _prepare_ipam(self): mocks = self._prepare_mocks() mocks['ipam'] = ipam_pluggable_backend.IpamPluggableBackend() return mocks def _prepare_mocks_with_pool_mock(self, pool_mock, address_factory=None, subnet_factory=None): mocks = self._prepare_mocks(address_factory=address_factory, subnet_factory=subnet_factory) pool_mock.get_instance.return_value = mocks['driver'] return mocks def _get_allocate_mock(self, auto_ip='10.0.0.2', fail_ip='127.0.0.1', error_message='SomeError'): def allocate_mock(request): if type(request) == ipam_req.SpecificAddressRequest: if request.address == netaddr.IPAddress(fail_ip): raise n_exc.InvalidInput(error_message=error_message) else: return str(request.address) else: return auto_ip return allocate_mock def _validate_allocate_calls(self, expected_calls, mocks): self.assertTrue(mocks['subnet'].allocate.called) actual_calls = mocks['subnet'].allocate.call_args_list self.assertEqual(len(expected_calls), len(actual_calls)) i = 0 for call in expected_calls: if call['ip_address']: self.assertIsInstance(actual_calls[i][0][0], ipam_req.SpecificAddressRequest) self.assertEqual(netaddr.IPAddress(call['ip_address']), actual_calls[i][0][0].address) else: self.assertIsInstance(actual_calls[i][0][0], ipam_req.AnyAddressRequest) i += 1 def _convert_to_ips(self, data): ips = [{'ip_address': ip, 'subnet_id': data[ip][1], 'subnet_cidr': data[ip][0]} for ip in data] return sorted(ips, key=lambda t: t['subnet_cidr']) def _gen_subnet_id(self): return uuidutils.generate_uuid() def test_deallocate_single_ip(self): mocks = self._prepare_ipam() ip = '192.168.12.45' data = {ip: ['192.168.12.0/24', self._gen_subnet_id()]} ips = self._convert_to_ips(data) mocks['ipam']._ipam_deallocate_ips(mock.ANY, mocks['driver'], mock.ANY, ips) mocks['driver'].get_subnet.assert_called_once_with(data[ip][1]) mocks['subnet'].deallocate.assert_called_once_with(ip) def test_deallocate_multiple_ips(self): mocks = self._prepare_ipam() data = {'192.168.43.15': ['192.168.43.0/24', self._gen_subnet_id()], '172.23.158.84': ['172.23.128.0/17', self._gen_subnet_id()], '8.8.8.8': ['8.0.0.0/8', self._gen_subnet_id()]} ips = self._convert_to_ips(data) mocks['ipam']._ipam_deallocate_ips(mock.ANY, mocks['driver'], mock.ANY, ips) get_calls = [mock.call(data[ip][1]) for ip in data] mocks['driver'].get_subnet.assert_has_calls(get_calls, any_order=True) ip_calls = [mock.call(ip) for ip in data] mocks['subnet'].deallocate.assert_has_calls(ip_calls, any_order=True) def _single_ip_allocate_helper(self, mocks, ip, network, subnet): ips = [{'subnet_cidr': network, 'subnet_id': subnet}] if ip: ips[0]['ip_address'] = ip allocated_ips = mocks['ipam']._ipam_allocate_ips( mock.ANY, mocks['driver'], mock.ANY, ips) mocks['driver'].get_subnet.assert_called_once_with(subnet) self.assertTrue(mocks['subnet'].allocate.called) request = mocks['subnet'].allocate.call_args[0][0] return {'ips': allocated_ips, 'request': request} def test_allocate_single_fixed_ip(self): mocks = self._prepare_ipam() ip = '192.168.15.123' mocks['subnet'].allocate.return_value = ip results = self._single_ip_allocate_helper(mocks, ip, '192.168.15.0/24', self._gen_subnet_id()) self.assertIsInstance(results['request'], ipam_req.SpecificAddressRequest) self.assertEqual(netaddr.IPAddress(ip), results['request'].address) self.assertEqual(ip, results['ips'][0]['ip_address'], 'Should allocate the same ip as passed') def test_allocate_single_any_ip(self): mocks = self._prepare_ipam() network = '192.168.15.0/24' ip = '192.168.15.83' mocks['subnet'].allocate.return_value = ip results = self._single_ip_allocate_helper(mocks, '', network, self._gen_subnet_id()) self.assertIsInstance(results['request'], ipam_req.AnyAddressRequest) self.assertEqual(ip, results['ips'][0]['ip_address']) def test_allocate_eui64_ip(self): mocks = self._prepare_ipam() ip = {'subnet_id': self._gen_subnet_id(), 'subnet_cidr': '2001:470:abcd::/64', 'mac': '6c:62:6d:de:cf:49', 'eui64_address': True} eui64_ip = ipv6_utils.get_ipv6_addr_by_EUI64(ip['subnet_cidr'], ip['mac']) mocks['ipam']._ipam_allocate_ips(mock.ANY, mocks['driver'], mock.ANY, [ip]) request = mocks['subnet'].allocate.call_args[0][0] self.assertIsInstance(request, ipam_req.AutomaticAddressRequest) self.assertEqual(eui64_ip, request.address) def test_allocate_multiple_ips(self): mocks = self._prepare_ipam() data = {'': ['172.23.128.0/17', self._gen_subnet_id()], '192.168.43.15': ['192.168.43.0/24', self._gen_subnet_id()], '8.8.8.8': ['8.0.0.0/8', self._gen_subnet_id()]} ips = self._convert_to_ips(data) mocks['subnet'].allocate.side_effect = self._get_allocate_mock( auto_ip='172.23.128.94') mocks['ipam']._ipam_allocate_ips( mock.ANY, mocks['driver'], mock.ANY, ips) get_calls = [mock.call(data[ip][1]) for ip in data] mocks['driver'].get_subnet.assert_has_calls(get_calls, any_order=True) self._validate_allocate_calls(ips, mocks) def test_allocate_multiple_ips_with_exception(self): mocks = self._prepare_ipam() auto_ip = '172.23.128.94' fail_ip = '192.168.43.15' data = {'': ['172.23.128.0/17', self._gen_subnet_id()], fail_ip: ['192.168.43.0/24', self._gen_subnet_id()], '8.8.8.8': ['8.0.0.0/8', self._gen_subnet_id()]} ips = self._convert_to_ips(data) mocks['subnet'].allocate.side_effect = self._get_allocate_mock( auto_ip=auto_ip, fail_ip=fail_ip) # Exception should be raised on attempt to allocate second ip. # Revert action should be performed for the already allocated ips, # In this test case only one ip should be deallocated # and original error should be reraised self.assertRaises(n_exc.InvalidInput, mocks['ipam']._ipam_allocate_ips, mock.ANY, mocks['driver'], mock.ANY, ips) # get_subnet should be called only for the first two networks get_calls = [mock.call(data[ip][1]) for ip in ['', fail_ip]] mocks['driver'].get_subnet.assert_has_calls(get_calls, any_order=True) # Allocate should be called for the first two ips only self._validate_allocate_calls(ips[:-1], mocks) # Deallocate should be called for the first ip only mocks['subnet'].deallocate.assert_called_once_with(auto_ip) @mock.patch('neutron.ipam.driver.Pool') def test_create_subnet_over_ipam(self, pool_mock): mocks = self._prepare_mocks_with_pool_mock(pool_mock) cidr = '192.168.0.0/24' allocation_pools = [{'start': '192.168.0.2', 'end': '192.168.0.254'}] with self.subnet(allocation_pools=allocation_pools, cidr=cidr): pool_mock.get_instance.assert_called_once_with(None, mock.ANY) self.assertTrue(mocks['driver'].allocate_subnet.called) request = mocks['driver'].allocate_subnet.call_args[0][0] self.assertIsInstance(request, ipam_req.SpecificSubnetRequest) self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr) @mock.patch('neutron.ipam.driver.Pool') def test_create_ipv6_pd_subnet_over_ipam(self, pool_mock): mocks = self._prepare_mocks_with_pool_mock(pool_mock) cfg.CONF.set_override('ipv6_pd_enabled', True) cidr = constants.PROVISIONAL_IPV6_PD_PREFIX allocation_pools = [netaddr.IPRange('::2', '::ffff:ffff:ffff:ffff')] with self.subnet(cidr=None, ip_version=6, subnetpool_id=constants.IPV6_PD_POOL_ID, ipv6_ra_mode=constants.IPV6_SLAAC, ipv6_address_mode=constants.IPV6_SLAAC): self.assertEqual(2, pool_mock.get_instance.call_count) self.assertTrue(mocks['driver'].allocate_subnet.called) request = mocks['driver'].allocate_subnet.call_args[0][0] self.assertIsInstance(request, ipam_req.SpecificSubnetRequest) self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr) self.assertEqual(allocation_pools, request.allocation_pools) @mock.patch('neutron.ipam.driver.Pool') def test_create_subnet_over_ipam_with_rollback(self, pool_mock): mocks = self._prepare_mocks_with_pool_mock(pool_mock) mocks['driver'].allocate_subnet.side_effect = ValueError cidr = '10.0.2.0/24' with self.network() as network: self._create_subnet(self.fmt, network['network']['id'], cidr, expected_res_status=500) pool_mock.get_instance.assert_called_once_with(None, mock.ANY) self.assertTrue(mocks['driver'].allocate_subnet.called) request = mocks['driver'].allocate_subnet.call_args[0][0] self.assertIsInstance(request, ipam_req.SpecificSubnetRequest) self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr) # Verify no subnet was created for network req = self.new_show_request('networks', network['network']['id']) res = req.get_response(self.api) net = self.deserialize(self.fmt, res) self.assertEqual(0, len(net['network']['subnets'])) @mock.patch('neutron.ipam.driver.Pool') def test_ipam_subnet_deallocated_if_create_fails(self, pool_mock): mocks = self._prepare_mocks_with_pool_mock(pool_mock) cidr = '10.0.2.0/24' with mock.patch.object( ipam_backend_mixin.IpamBackendMixin, '_save_subnet', side_effect=ValueError), self.network() as network: self._create_subnet(self.fmt, network['network']['id'], cidr, expected_res_status=500) pool_mock.get_instance.assert_any_call(None, mock.ANY) self.assertEqual(2, pool_mock.get_instance.call_count) self.assertTrue(mocks['driver'].allocate_subnet.called) request = mocks['driver'].allocate_subnet.call_args[0][0] self.assertIsInstance(request, ipam_req.SpecificSubnetRequest) self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr) # Verify remove ipam subnet was called mocks['driver'].remove_subnet.assert_called_once_with( self.subnet_id) @mock.patch('neutron.ipam.driver.Pool') def test_update_subnet_over_ipam(self, pool_mock): mocks = self._prepare_mocks_with_pool_mock(pool_mock) cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}] with self.subnet(allocation_pools=allocation_pools, cidr=cidr) as subnet: data = {'subnet': {'allocation_pools': [ {'start': '10.0.0.10', 'end': '10.0.0.20'}, {'start': '10.0.0.30', 'end': '10.0.0.40'}]}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(200, res.status_code) pool_mock.get_instance.assert_any_call(None, mock.ANY) self.assertEqual(2, pool_mock.get_instance.call_count) self.assertTrue(mocks['driver'].update_subnet.called) request = mocks['driver'].update_subnet.call_args[0][0] self.assertIsInstance(request, ipam_req.SpecificSubnetRequest) self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr) ip_ranges = [netaddr.IPRange(p['start'], p['end']) for p in data['subnet']['allocation_pools']] self.assertEqual(ip_ranges, request.allocation_pools) @mock.patch('neutron.ipam.driver.Pool') def test_delete_subnet_over_ipam(self, pool_mock): mocks = self._prepare_mocks_with_pool_mock(pool_mock) gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=4) req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) pool_mock.get_instance.assert_any_call(None, mock.ANY) self.assertEqual(2, pool_mock.get_instance.call_count) mocks['driver'].remove_subnet.assert_called_once_with( subnet['subnet']['id']) @mock.patch('neutron.ipam.driver.Pool') def test_delete_subnet_over_ipam_with_rollback(self, pool_mock): mocks = self._prepare_mocks_with_pool_mock(pool_mock) mocks['driver'].remove_subnet.side_effect = ValueError gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=4) req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPServerError.code, res.status_int) pool_mock.get_instance.assert_any_call(None, mock.ANY) self.assertEqual(2, pool_mock.get_instance.call_count) mocks['driver'].remove_subnet.assert_called_once_with( subnet['subnet']['id']) # Verify subnet was recreated after failed ipam call subnet_req = self.new_show_request('subnets', subnet['subnet']['id']) raw_res = subnet_req.get_response(self.api) sub_res = self.deserialize(self.fmt, raw_res) self.assertIn(sub_res['subnet']['cidr'], cidr) self.assertIn(sub_res['subnet']['gateway_ip'], gateway_ip) @mock.patch('neutron.ipam.driver.Pool') def test_create_port_ipam(self, pool_mock): mocks = self._prepare_mocks_with_pool_mock(pool_mock) auto_ip = '10.0.0.2' expected_calls = [{'ip_address': ''}] mocks['subnet'].allocate.side_effect = self._get_allocate_mock( auto_ip=auto_ip) with self.subnet() as subnet: with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(ips[0]['ip_address'], auto_ip) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) self._validate_allocate_calls(expected_calls, mocks) @mock.patch('neutron.ipam.driver.Pool') def test_create_port_ipam_with_rollback(self, pool_mock): mocks = self._prepare_mocks_with_pool_mock(pool_mock) mocks['subnet'].allocate.side_effect = ValueError with self.network() as network: with self.subnet(network=network): net_id = network['network']['id'] data = { 'port': {'network_id': net_id, 'tenant_id': network['network']['tenant_id']}} port_req = self.new_create_request('ports', data) res = port_req.get_response(self.api) self.assertEqual(webob.exc.HTTPServerError.code, res.status_int) # verify no port left after failure req = self.new_list_request('ports', self.fmt, "network_id=%s" % net_id) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(0, len(res['ports'])) @mock.patch('neutron.ipam.driver.Pool') def test_update_port_ipam(self, pool_mock): mocks = self._prepare_mocks_with_pool_mock(pool_mock) auto_ip = '10.0.0.2' new_ip = '10.0.0.15' expected_calls = [{'ip_address': ip} for ip in ['', new_ip]] mocks['subnet'].allocate.side_effect = self._get_allocate_mock( auto_ip=auto_ip) with self.subnet() as subnet: with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(auto_ip, ips[0]['ip_address']) # Update port with another new ip data = {"port": {"fixed_ips": [{ 'subnet_id': subnet['subnet']['id'], 'ip_address': new_ip}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(new_ip, ips[0]['ip_address']) # Allocate should be called for the first two networks self._validate_allocate_calls(expected_calls, mocks) # Deallocate should be called for the first ip only mocks['subnet'].deallocate.assert_called_once_with(auto_ip) @mock.patch('neutron.ipam.driver.Pool') def test_delete_port_ipam(self, pool_mock): mocks = self._prepare_mocks_with_pool_mock(pool_mock) auto_ip = '10.0.0.2' mocks['subnet'].allocate.side_effect = self._get_allocate_mock( auto_ip=auto_ip) with self.subnet() as subnet: with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(auto_ip, ips[0]['ip_address']) req = self.new_delete_request('ports', port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) mocks['subnet'].deallocate.assert_called_once_with(auto_ip) def test_recreate_port_ipam(self): ip = '10.0.0.2' with self.subnet() as subnet: with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(ip, ips[0]['ip_address']) req = self.new_delete_request('ports', port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) with self.port(subnet=subnet, fixed_ips=ips) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(ip, ips[0]['ip_address']) @mock.patch('neutron.ipam.driver.Pool') def test_update_ips_for_port_passes_port_dict_to_factory(self, pool_mock): address_factory = mock.Mock() mocks = self._prepare_mocks_with_pool_mock( pool_mock, address_factory=address_factory) context = mock.Mock() new_ips = mock.Mock() original_ips = mock.Mock() mac = mock.Mock() ip_dict = {'ip_address': '192.1.1.10', 'subnet_id': uuidutils.generate_uuid()} changes = ipam_pluggable_backend.IpamPluggableBackend.Changes( add=[ip_dict], original=[], remove=[]) changes_mock = mock.Mock(return_value=changes) fixed_ips_mock = mock.Mock(return_value=changes.add) mocks['ipam'] = ipam_pluggable_backend.IpamPluggableBackend() mocks['ipam']._get_changed_ips_for_port = changes_mock mocks['ipam']._test_fixed_ips_for_port = fixed_ips_mock port_dict = {'device_owner': uuidutils.generate_uuid(), 'network_id': uuidutils.generate_uuid()} mocks['ipam']._update_ips_for_port(context, port_dict, original_ips, new_ips, mac) mocks['driver'].get_address_request_factory.assert_called_once_with() # Validate port_dict is passed into address_factory address_factory.get_request.assert_called_once_with(context, port_dict, ip_dict) @mock.patch('neutron.ipam.driver.Pool') def test_update_ips_for_port_passes_port_id_to_factory(self, pool_mock): port_id = mock.Mock() network_id = uuidutils.generate_uuid() address_factory = mock.Mock() mocks = self._prepare_mocks_with_pool_mock( pool_mock, address_factory=address_factory) context = mock.Mock() ip_dict = {'ip_address': '192.1.1.10', 'subnet_id': uuidutils.generate_uuid()} port_dict = {'port': {'device_owner': uuidutils.generate_uuid(), 'network_id': network_id, 'fixed_ips': [ip_dict]}} subnets = [{'id': ip_dict['subnet_id'], 'network_id': network_id, 'cidr': '192.1.1.0/24', 'ipv6_address_mode': None, 'ipv6_ra_mode': None}] get_subnets_mock = mock.Mock(return_value=subnets) get_subnet_mock = mock.Mock(return_value=subnets[0]) mocks['ipam'] = ipam_pluggable_backend.IpamPluggableBackend() mocks['ipam']._get_subnets = get_subnets_mock mocks['ipam']._get_subnet = get_subnet_mock mocks['ipam'].allocate_ips_for_port_and_store(context, port_dict, port_id) mocks['driver'].get_address_request_factory.assert_called_once_with() port_dict_with_id = port_dict['port'].copy() port_dict_with_id['id'] = port_id # Validate port id is added to port dict before address_factory call address_factory.get_request.assert_called_once_with(context, port_dict_with_id, ip_dict) # Verify incoming port dict is not changed ('id' is not added to it) self.assertIsNone(port_dict['port'].get('id')) def _test_update_db_subnet(self, pool_mock, subnet, expected_subnet, old_pools): subnet_factory = mock.Mock() context = mock.Mock() mocks = self._prepare_mocks_with_pool_mock( pool_mock, subnet_factory=subnet_factory) mocks['ipam'] = ipam_pluggable_backend.IpamPluggableBackend() mocks['ipam'].update_db_subnet(context, id, subnet, old_pools) mocks['driver'].get_subnet_request_factory.assert_called_once_with() subnet_factory.get_request.assert_called_once_with(context, expected_subnet, None) @mock.patch('neutron.ipam.driver.Pool') def test_update_db_subnet_unchanged_pools(self, pool_mock): old_pools = [netaddr.IPRange('192.1.1.2', '192.1.1.254')] subnet = {'id': uuidutils.generate_uuid(), 'network_id': uuidutils.generate_uuid(), 'cidr': '192.1.1.0/24', 'ipv6_address_mode': None, 'ipv6_ra_mode': None} subnet_with_pools = subnet.copy() subnet_with_pools['allocation_pools'] = old_pools # if subnet has no allocation pools set, then old pools has to # be added to subnet dict passed to request factory self._test_update_db_subnet(pool_mock, subnet, subnet_with_pools, old_pools) @mock.patch('neutron.ipam.driver.Pool') def test_update_db_subnet_new_pools(self, pool_mock): old_pools = [netaddr.IPRange('192.1.1.2', '192.1.1.254')] subnet = {'id': uuidutils.generate_uuid(), 'network_id': uuidutils.generate_uuid(), 'cidr': '192.1.1.0/24', 'allocation_pools': [ netaddr.IPRange('192.1.1.10', '192.1.1.254')], 'ipv6_address_mode': None, 'ipv6_ra_mode': None} # make a copy of subnet for validation, since update_subnet changes # incoming subnet dict expected_subnet = subnet.copy() # validate that subnet passed to request factory is the same as # incoming one, i.e. new pools in it are not overwritten by old pools self._test_update_db_subnet(pool_mock, subnet, expected_subnet, old_pools) neutron-8.4.0/neutron/tests/unit/db/__init__.py0000664000567000056710000000000013044372736022666 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/db/quota/0000775000567000056710000000000013044373210021704 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/db/quota/__init__.py0000664000567000056710000000000013044372736024017 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/db/quota/test_api.py0000664000567000056710000003540213044372736024106 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from neutron import context from neutron.db.quota import api as quota_api from neutron.tests.unit import testlib_api class TestQuotaDbApi(testlib_api.SqlTestCaseLight): def _set_context(self): self.tenant_id = 'Higuain' self.context = context.Context('Gonzalo', self.tenant_id, is_admin=False, is_advsvc=False) def _create_reservation(self, resource_deltas, tenant_id=None, expiration=None): tenant_id = tenant_id or self.tenant_id return quota_api.create_reservation( self.context, tenant_id, resource_deltas, expiration) def _create_quota_usage(self, resource, used, tenant_id=None): tenant_id = tenant_id or self.tenant_id return quota_api.set_quota_usage( self.context, resource, tenant_id, in_use=used) def _verify_quota_usage(self, usage_info, expected_resource=None, expected_used=None, expected_dirty=None): self.assertEqual(self.tenant_id, usage_info.tenant_id) if expected_resource: self.assertEqual(expected_resource, usage_info.resource) if expected_dirty is not None: self.assertEqual(expected_dirty, usage_info.dirty) if expected_used is not None: self.assertEqual(expected_used, usage_info.used) def setUp(self): super(TestQuotaDbApi, self).setUp() self._set_context() def test_create_quota_usage(self): usage_info = self._create_quota_usage('goals', 26) self._verify_quota_usage(usage_info, expected_resource='goals', expected_used=26) def test_update_quota_usage(self): self._create_quota_usage('goals', 26) # Higuain scores a double usage_info_1 = quota_api.set_quota_usage( self.context, 'goals', self.tenant_id, in_use=28) self._verify_quota_usage(usage_info_1, expected_used=28) usage_info_2 = quota_api.set_quota_usage( self.context, 'goals', self.tenant_id, in_use=24) self._verify_quota_usage(usage_info_2, expected_used=24) def test_update_quota_usage_with_deltas(self): self._create_quota_usage('goals', 26) # Higuain scores a double usage_info_1 = quota_api.set_quota_usage( self.context, 'goals', self.tenant_id, in_use=2, delta=True) self._verify_quota_usage(usage_info_1, expected_used=28) def test_set_quota_usage_dirty(self): self._create_quota_usage('goals', 26) # Higuain needs a shower after the match self.assertEqual(1, quota_api.set_quota_usage_dirty( self.context, 'goals', self.tenant_id)) usage_info = quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'goals', self.tenant_id) self._verify_quota_usage(usage_info, expected_dirty=True) # Higuain is clean now self.assertEqual(1, quota_api.set_quota_usage_dirty( self.context, 'goals', self.tenant_id, dirty=False)) usage_info = quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'goals', self.tenant_id) self._verify_quota_usage(usage_info, expected_dirty=False) def test_set_dirty_non_existing_quota_usage(self): self.assertEqual(0, quota_api.set_quota_usage_dirty( self.context, 'meh', self.tenant_id)) def test_set_resources_quota_usage_dirty(self): self._create_quota_usage('goals', 26) self._create_quota_usage('assists', 11) self._create_quota_usage('bookings', 3) self.assertEqual(2, quota_api.set_resources_quota_usage_dirty( self.context, ['goals', 'bookings'], self.tenant_id)) usage_info_goals = quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'goals', self.tenant_id) usage_info_assists = quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'assists', self.tenant_id) usage_info_bookings = quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'bookings', self.tenant_id) self._verify_quota_usage(usage_info_goals, expected_dirty=True) self._verify_quota_usage(usage_info_assists, expected_dirty=False) self._verify_quota_usage(usage_info_bookings, expected_dirty=True) def test_set_resources_quota_usage_dirty_with_empty_list(self): self._create_quota_usage('goals', 26) self._create_quota_usage('assists', 11) self._create_quota_usage('bookings', 3) # Expect all the resources for the tenant to be set dirty self.assertEqual(3, quota_api.set_resources_quota_usage_dirty( self.context, [], self.tenant_id)) usage_info_goals = quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'goals', self.tenant_id) usage_info_assists = quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'assists', self.tenant_id) usage_info_bookings = quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'bookings', self.tenant_id) self._verify_quota_usage(usage_info_goals, expected_dirty=True) self._verify_quota_usage(usage_info_assists, expected_dirty=True) self._verify_quota_usage(usage_info_bookings, expected_dirty=True) # Higuain is clean now self.assertEqual(1, quota_api.set_quota_usage_dirty( self.context, 'goals', self.tenant_id, dirty=False)) usage_info = quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'goals', self.tenant_id) self._verify_quota_usage(usage_info, expected_dirty=False) def _test_set_all_quota_usage_dirty(self, expected): self._create_quota_usage('goals', 26) self._create_quota_usage('goals', 12, tenant_id='Callejon') self.assertEqual(expected, quota_api.set_all_quota_usage_dirty( self.context, 'goals')) def test_set_all_quota_usage_dirty(self): # All goal scorers need a shower after the match, but since this is not # admin context we can clean only one self._test_set_all_quota_usage_dirty(expected=1) def test_get_quota_usage_by_tenant(self): self._create_quota_usage('goals', 26) self._create_quota_usage('assists', 11) # Create a resource for a different tenant self._create_quota_usage('mehs', 99, tenant_id='buffon') usage_infos = quota_api.get_quota_usage_by_tenant_id( self.context, self.tenant_id) self.assertEqual(2, len(usage_infos)) resources = [info.resource for info in usage_infos] self.assertIn('goals', resources) self.assertIn('assists', resources) def test_get_quota_usage_by_resource(self): self._create_quota_usage('goals', 26) self._create_quota_usage('assists', 11) self._create_quota_usage('goals', 12, tenant_id='Callejon') usage_infos = quota_api.get_quota_usage_by_resource( self.context, 'goals') # Only 1 result expected in tenant context self.assertEqual(1, len(usage_infos)) self._verify_quota_usage(usage_infos[0], expected_resource='goals', expected_used=26) def test_get_quota_usage_by_tenant_and_resource(self): self._create_quota_usage('goals', 26) usage_info = quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'goals', self.tenant_id) self._verify_quota_usage(usage_info, expected_resource='goals', expected_used=26) def test_get_non_existing_quota_usage_returns_none(self): self.assertIsNone(quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'goals', self.tenant_id)) def _verify_reserved_resources(self, expected, actual): for (resource, delta) in actual.items(): self.assertIn(resource, expected) self.assertEqual(delta, expected[resource]) del expected[resource] self.assertFalse(expected) def test_create_reservation(self): resources = {'goals': 2, 'assists': 1} resv = self._create_reservation(resources) self.assertEqual(self.tenant_id, resv.tenant_id) self._verify_reserved_resources(resources, resv.deltas) def test_create_reservation_with_expiration(self): resources = {'goals': 2, 'assists': 1} exp_date = datetime.datetime(2016, 3, 31, 14, 30) resv = self._create_reservation(resources, expiration=exp_date) self.assertEqual(self.tenant_id, resv.tenant_id) self.assertEqual(exp_date, resv.expiration) self._verify_reserved_resources(resources, resv.deltas) def test_remove_non_existent_reservation(self): self.assertIsNone(quota_api.remove_reservation(self.context, 'meh')) def _get_reservations_for_resource_helper(self): # create three reservation, 1 expired resources_1 = {'goals': 2, 'assists': 1} resources_2 = {'goals': 3, 'bookings': 1} resources_3 = {'bookings': 2, 'assists': 2} exp_date_1 = datetime.datetime(2016, 3, 31, 14, 30) exp_date_2 = datetime.datetime(2015, 3, 31, 14, 30) self._create_reservation(resources_1, expiration=exp_date_1) self._create_reservation(resources_2, expiration=exp_date_1) self._create_reservation(resources_3, expiration=exp_date_2) def test_get_reservations_for_resources(self): with mock.patch('neutron.db.quota.api.utcnow') as mock_utcnow: self._get_reservations_for_resource_helper() mock_utcnow.return_value = datetime.datetime( 2015, 5, 20, 0, 0) deltas = quota_api.get_reservations_for_resources( self.context, self.tenant_id, ['goals', 'assists', 'bookings']) self.assertIn('goals', deltas) self.assertEqual(5, deltas['goals']) self.assertIn('assists', deltas) self.assertEqual(1, deltas['assists']) self.assertIn('bookings', deltas) self.assertEqual(1, deltas['bookings']) self.assertEqual(3, len(deltas)) def test_get_expired_reservations_for_resources(self): with mock.patch('neutron.db.quota.api.utcnow') as mock_utcnow: mock_utcnow.return_value = datetime.datetime( 2015, 5, 20, 0, 0) self._get_reservations_for_resource_helper() deltas = quota_api.get_reservations_for_resources( self.context, self.tenant_id, ['goals', 'assists', 'bookings'], expired=True) self.assertIn('assists', deltas) self.assertEqual(2, deltas['assists']) self.assertIn('bookings', deltas) self.assertEqual(2, deltas['bookings']) self.assertEqual(2, len(deltas)) def test_get_reservation_for_resources_with_empty_list(self): self.assertIsNone(quota_api.get_reservations_for_resources( self.context, self.tenant_id, [])) def test_remove_expired_reservations(self): with mock.patch('neutron.db.quota.api.utcnow') as mock_utcnow: mock_utcnow.return_value = datetime.datetime( 2015, 5, 20, 0, 0) resources = {'goals': 2, 'assists': 1} exp_date_1 = datetime.datetime(2016, 3, 31, 14, 30) resv_1 = self._create_reservation(resources, expiration=exp_date_1) exp_date_2 = datetime.datetime(2015, 3, 31, 14, 30) resv_2 = self._create_reservation(resources, expiration=exp_date_2) self.assertEqual(1, quota_api.remove_expired_reservations( self.context, self.tenant_id)) self.assertIsNone(quota_api.get_reservation( self.context, resv_2.reservation_id)) self.assertIsNotNone(quota_api.get_reservation( self.context, resv_1.reservation_id)) def test_remove_expired_reservations_no_tenant(self): with mock.patch('neutron.db.quota.api.utcnow') as mock_utcnow: mock_utcnow.return_value = datetime.datetime( 2015, 5, 20, 0, 0) resources = {'goals': 2, 'assists': 1} exp_date_1 = datetime.datetime(2014, 3, 31, 14, 30) resv_1 = self._create_reservation(resources, expiration=exp_date_1) exp_date_2 = datetime.datetime(2015, 3, 31, 14, 30) resv_2 = self._create_reservation(resources, expiration=exp_date_2, tenant_id='Callejon') self.assertEqual(2, quota_api.remove_expired_reservations( self.context)) self.assertIsNone(quota_api.get_reservation( self.context, resv_2.reservation_id)) self.assertIsNone(quota_api.get_reservation( self.context, resv_1.reservation_id)) class TestQuotaDbApiAdminContext(TestQuotaDbApi): def _set_context(self): self.tenant_id = 'Higuain' self.context = context.Context('Gonzalo', self.tenant_id, is_admin=True, is_advsvc=True) def test_get_quota_usage_by_resource(self): self._create_quota_usage('goals', 26) self._create_quota_usage('assists', 11) self._create_quota_usage('goals', 12, tenant_id='Callejon') usage_infos = quota_api.get_quota_usage_by_resource( self.context, 'goals') # 2 results expected in admin context self.assertEqual(2, len(usage_infos)) for usage_info in usage_infos: self.assertEqual('goals', usage_info.resource) def test_set_all_quota_usage_dirty(self): # All goal scorers need a shower after the match, and with admin # context we should be able to clean all of them self._test_set_all_quota_usage_dirty(expected=2) neutron-8.4.0/neutron/tests/unit/db/quota/test_driver.py0000664000567000056710000002114513044372760024624 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron.common import exceptions from neutron import context from neutron.db import db_base_plugin_v2 as base_plugin from neutron.db.quota import driver from neutron.tests.unit import testlib_api class FakePlugin(base_plugin.NeutronDbPluginV2, driver.DbQuotaDriver): """A fake plugin class containing all DB methods.""" class TestResource(object): """Describe a test resource for quota checking.""" def __init__(self, name, default, fake_count=0): self.name = name self.quota = default self.fake_count = fake_count @property def default(self): return self.quota def count(self, *args, **kwargs): return self.fake_count PROJECT = 'prj_test' RESOURCE = 'res_test' ALT_RESOURCE = 'res_test_meh' class TestDbQuotaDriver(testlib_api.SqlTestCase): def setUp(self): super(TestDbQuotaDriver, self).setUp() self.plugin = FakePlugin() self.context = context.get_admin_context() def test_create_quota_limit(self): defaults = {RESOURCE: TestResource(RESOURCE, 4)} self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) quotas = self.plugin.get_tenant_quotas(self.context, defaults, PROJECT) self.assertEqual(2, quotas[RESOURCE]) def test_update_quota_limit(self): defaults = {RESOURCE: TestResource(RESOURCE, 4)} self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 3) quotas = self.plugin.get_tenant_quotas(self.context, defaults, PROJECT) self.assertEqual(3, quotas[RESOURCE]) def test_delete_tenant_quota_restores_default_limit(self): defaults = {RESOURCE: TestResource(RESOURCE, 4)} self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) self.plugin.delete_tenant_quota(self.context, PROJECT) quotas = self.plugin.get_tenant_quotas(self.context, defaults, PROJECT) self.assertEqual(4, quotas[RESOURCE]) def test_get_tenant_quotas(self): user_ctx = context.Context(user_id=PROJECT, tenant_id=PROJECT) self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) quotas = self.plugin.get_tenant_quotas(user_ctx, {}, PROJECT) self.assertEqual(2, quotas[RESOURCE]) def test_get_tenant_quotas_different_tenant(self): user_ctx = context.Context(user_id=PROJECT, tenant_id='another_project') self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) # It is appropriate to use assertFalse here as the expected return # value is an empty dict (the defaults passed in the statement below # after the request context) self.assertFalse(self.plugin.get_tenant_quotas(user_ctx, {}, PROJECT)) def test_get_all_quotas(self): project_1 = 'prj_test_1' project_2 = 'prj_test_2' resource_1 = 'res_test_1' resource_2 = 'res_test_2' resources = {resource_1: TestResource(resource_1, 3), resource_2: TestResource(resource_2, 5)} self.plugin.update_quota_limit(self.context, project_1, resource_1, 7) self.plugin.update_quota_limit(self.context, project_2, resource_2, 9) quotas = self.plugin.get_all_quotas(self.context, resources) # Expect two tenants' quotas self.assertEqual(2, len(quotas)) # But not quotas for the same tenant twice self.assertNotEqual(quotas[0]['tenant_id'], quotas[1]['tenant_id']) # Check the expected limits. The quotas can be in any order. for quota in quotas: self.assertEqual(3, len(quota)) project = quota['tenant_id'] self.assertIn(project, (project_1, project_2)) if project == project_1: expected_limit_r1 = 7 expected_limit_r2 = 5 if project == project_2: expected_limit_r1 = 3 expected_limit_r2 = 9 self.assertEqual(expected_limit_r1, quota[resource_1]) self.assertEqual(expected_limit_r2, quota[resource_2]) def test_limit_check(self): resources = {RESOURCE: TestResource(RESOURCE, 2)} values = {RESOURCE: 1} self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) self.plugin.limit_check(self.context, PROJECT, resources, values) def test_limit_check_over_quota(self): resources = {RESOURCE: TestResource(RESOURCE, 2)} values = {RESOURCE: 3} self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) self.assertRaises(exceptions.OverQuota, self.plugin.limit_check, context.get_admin_context(), PROJECT, resources, values) def test_limit_check_equals_to_quota(self): resources = {RESOURCE: TestResource(RESOURCE, 2)} values = {RESOURCE: 2} self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) self.plugin.limit_check(self.context, PROJECT, resources, values) def test_limit_check_value_lower_than_zero(self): resources = {RESOURCE: TestResource(RESOURCE, 2)} values = {RESOURCE: -1} self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) self.assertRaises(exceptions.InvalidQuotaValue, self.plugin.limit_check, context.get_admin_context(), PROJECT, resources, values) def _test_make_reservation_success(self, quota_driver, resource_name, deltas): resources = {resource_name: TestResource(resource_name, 2)} self.plugin.update_quota_limit(self.context, PROJECT, resource_name, 2) reservation = quota_driver.make_reservation( self.context, self.context.tenant_id, resources, deltas, self.plugin) self.assertIn(resource_name, reservation.deltas) self.assertEqual(deltas[resource_name], reservation.deltas[resource_name]) self.assertEqual(self.context.tenant_id, reservation.tenant_id) def test_make_reservation_single_resource(self): quota_driver = driver.DbQuotaDriver() self._test_make_reservation_success( quota_driver, RESOURCE, {RESOURCE: 1}) def test_make_reservation_fill_quota(self): quota_driver = driver.DbQuotaDriver() self._test_make_reservation_success( quota_driver, RESOURCE, {RESOURCE: 2}) def test_make_reservation_multiple_resources(self): quota_driver = driver.DbQuotaDriver() resources = {RESOURCE: TestResource(RESOURCE, 2), ALT_RESOURCE: TestResource(ALT_RESOURCE, 2)} deltas = {RESOURCE: 1, ALT_RESOURCE: 2} self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) self.plugin.update_quota_limit(self.context, PROJECT, ALT_RESOURCE, 2) reservation = quota_driver.make_reservation( self.context, self.context.tenant_id, resources, deltas, self.plugin) self.assertIn(RESOURCE, reservation.deltas) self.assertIn(ALT_RESOURCE, reservation.deltas) self.assertEqual(1, reservation.deltas[RESOURCE]) self.assertEqual(2, reservation.deltas[ALT_RESOURCE]) self.assertEqual(self.context.tenant_id, reservation.tenant_id) def test_make_reservation_over_quota_fails(self): quota_driver = driver.DbQuotaDriver() resources = {RESOURCE: TestResource(RESOURCE, 2, fake_count=2)} deltas = {RESOURCE: 1} self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) self.assertRaises(exceptions.OverQuota, quota_driver.make_reservation, self.context, self.context.tenant_id, resources, deltas, self.plugin) neutron-8.4.0/neutron/tests/unit/db/test_ipam_backend_mixin.py0000664000567000056710000002062313044372760026001 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Infoblox Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.common import constants from neutron.db import ipam_backend_mixin from neutron.tests import base class TestIpamBackendMixin(base.BaseTestCase): def setUp(self): super(TestIpamBackendMixin, self).setUp() self.mixin = ipam_backend_mixin.IpamBackendMixin() self.ctx = mock.Mock() self.default_new_ips = (('id-1', '192.168.1.1'), ('id-2', '192.168.1.2')) self.default_original_ips = (('id-1', '192.168.1.1'), ('id-5', '172.20.16.5')) self.owner_non_router = constants.DEVICE_OWNER_DHCP self.owner_router = constants.DEVICE_OWNER_ROUTER_INTF def _prepare_ips(self, ips): results = [] for ip in ips: ip_dict = {'ip_address': ip[1], 'subnet_id': ip[0]} if len(ip) > 2: ip_dict['delete_subnet'] = ip[2] results.append(ip_dict) return results def _mock_slaac_subnet_on(self): slaac_subnet = {'ipv6_address_mode': constants.IPV6_SLAAC, 'ipv6_ra_mode': constants.IPV6_SLAAC} self.mixin._get_subnet = mock.Mock(return_value=slaac_subnet) def _mock_slaac_subnet_off(self): non_slaac_subnet = {'ipv6_address_mode': None, 'ipv6_ra_mode': None} self.mixin._get_subnet = mock.Mock(return_value=non_slaac_subnet) def _mock_slaac_for_subnet_ids(self, subnet_ids): """Mock incoming subnets as autoaddressed.""" def _get_subnet(context, subnet_id): if subnet_id in subnet_ids: return {'ipv6_address_mode': constants.IPV6_SLAAC, 'ipv6_ra_mode': constants.IPV6_SLAAC} else: return {'ipv6_address_mode': None, 'ipv6_ra_mode': None} self.mixin._get_subnet = mock.Mock(side_effect=_get_subnet) def _test_get_changed_ips_for_port(self, expected_change, original_ips, new_ips, owner): change = self.mixin._get_changed_ips_for_port(self.ctx, original_ips, new_ips, owner) self.assertEqual(expected_change, change) def test__get_changed_ips_for_port(self): new_ips = self._prepare_ips(self.default_new_ips) original_ips = self._prepare_ips(self.default_original_ips) expected_change = self.mixin.Changes(add=[new_ips[1]], original=[original_ips[0]], remove=[original_ips[1]]) self._test_get_changed_ips_for_port(expected_change, original_ips, new_ips, self.owner_router) def test__get_changed_ips_for_port_autoaddress(self): new_ips = self._prepare_ips(self.default_new_ips) original = (('id-1', '192.168.1.1'), ('id-5', '2000:1234:5678::12FF:FE34:5678')) original_ips = self._prepare_ips(original) self._mock_slaac_subnet_on() expected_change = self.mixin.Changes(add=[new_ips[1]], original=original_ips, remove=[]) self._test_get_changed_ips_for_port(expected_change, original_ips, new_ips, self.owner_non_router) def test__get_changed_ips_for_port_remove_autoaddress(self): new = (('id-5', '2000:1234:5678::12FF:FE34:5678', True), ('id-1', '192.168.1.1')) new_ips = self._prepare_ips(new) reference_ips = [ip for ip in new_ips if ip['subnet_id'] == 'id-1'] original = (('id-5', '2000:1234:5678::12FF:FE34:5678'),) original_ips = self._prepare_ips(original) # mock ipv6 subnet as auto addressed and leave ipv4 as regular self._mock_slaac_for_subnet_ids([new[0][0]]) # Autoaddressed ip allocation has to be removed # if it has 'delete_subnet' flag set to True expected_change = self.mixin.Changes(add=reference_ips, original=[], remove=original_ips) self._test_get_changed_ips_for_port(expected_change, original_ips, new_ips, self.owner_non_router) def test__get_changed_ips_for_port_autoaddress_ipv6_pd_enabled(self): owner_not_router = constants.DEVICE_OWNER_DHCP new_ips = self._prepare_ips(self.default_new_ips) original = (('id-1', '192.168.1.1'), ('id-5', '2000:1234:5678::12FF:FE34:5678')) original_ips = self._prepare_ips(original) # mock to test auto address part pd_subnet = {'subnetpool_id': constants.IPV6_PD_POOL_ID, 'ipv6_address_mode': constants.IPV6_SLAAC, 'ipv6_ra_mode': constants.IPV6_SLAAC} self.mixin._get_subnet = mock.Mock(return_value=pd_subnet) # make a copy of original_ips # since it is changed by _get_changed_ips_for_port expected_change = self.mixin.Changes(add=[new_ips[1]], original=[original_ips[0]], remove=[original_ips[1]]) self._test_get_changed_ips_for_port(expected_change, original_ips, new_ips, owner_not_router) def _test_get_changed_ips_for_port_no_ip_address(self): # IP address should be added if only subnet_id is provided, # independently from auto_address status for subnet new_ips = [{'subnet_id': 'id-3'}] original_ips = [] expected_change = self.mixin.Changes(add=[new_ips[0]], original=[], remove=[]) self._test_get_changed_ips_for_port(expected_change, original_ips, new_ips, self.owner_non_router) def test__get_changed_ips_for_port_no_ip_address_no_slaac(self): self._mock_slaac_subnet_off() self._test_get_changed_ips_for_port_no_ip_address() def test__get_changed_ips_for_port_no_ip_address_slaac(self): self._mock_slaac_subnet_on() self._test_get_changed_ips_for_port_no_ip_address() def test__is_ip_required_by_subnet_for_router_port(self): # Owner -> router: # _get_subnet should not be called, # expected True self._mock_slaac_subnet_off() result = self.mixin._is_ip_required_by_subnet(self.ctx, 'id', self.owner_router) self.assertTrue(result) self.assertFalse(self.mixin._get_subnet.called) def test__is_ip_required_by_subnet_for_non_router_port(self): # Owner -> not router: # _get_subnet should be called, # expected True, because subnet is not slaac self._mock_slaac_subnet_off() result = self.mixin._is_ip_required_by_subnet(self.ctx, 'id', self.owner_non_router) self.assertTrue(result) self.assertTrue(self.mixin._get_subnet.called) def test__is_ip_required_by_subnet_for_non_router_port_and_slaac(self): # Owner -> not router: # _get_subnet should be called, # expected False, because subnet is slaac self._mock_slaac_subnet_on() result = self.mixin._is_ip_required_by_subnet(self.ctx, 'id', self.owner_non_router) self.assertFalse(result) self.assertTrue(self.mixin._get_subnet.called) neutron-8.4.0/neutron/tests/unit/db/test_allowedaddresspairs_db.py0000664000567000056710000003706013044372760026704 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from webob import exc as web_exc from neutron.api.v2 import attributes as attr from neutron.db import allowedaddresspairs_db as addr_pair_db from neutron.db import db_base_plugin_v2 from neutron.db import portsecurity_db from neutron.extensions import allowedaddresspairs as addr_pair from neutron.extensions import portsecurity as psec from neutron.extensions import securitygroup as secgroup from neutron import manager from neutron.tests.unit.db import test_db_base_plugin_v2 DB_PLUGIN_KLASS = ('neutron.tests.unit.db.test_allowedaddresspairs_db.' 'AllowedAddressPairTestPlugin') class AllowedAddressPairTestCase( test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self, plugin=None, ext_mgr=None): super(AllowedAddressPairTestCase, self).setUp(plugin) # Check if a plugin supports security groups plugin_obj = manager.NeutronManager.get_plugin() self._skip_port_security = ('port-security' not in plugin_obj.supported_extension_aliases) class AllowedAddressPairTestPlugin(portsecurity_db.PortSecurityDbMixin, db_base_plugin_v2.NeutronDbPluginV2, addr_pair_db.AllowedAddressPairsMixin): """Test plugin that implements necessary calls on create/delete port for associating ports with port security and allowed address pairs. """ supported_extension_aliases = ["allowed-address-pairs"] def create_port(self, context, port): p = port['port'] with context.session.begin(subtransactions=True): neutron_db = super(AllowedAddressPairTestPlugin, self).create_port( context, port) p.update(neutron_db) if attr.is_attr_set(p.get(addr_pair.ADDRESS_PAIRS)): self._process_create_allowed_address_pairs( context, p, p[addr_pair.ADDRESS_PAIRS]) else: p[addr_pair.ADDRESS_PAIRS] = None return port['port'] def update_port(self, context, id, port): delete_addr_pairs = self._check_update_deletes_allowed_address_pairs( port) has_addr_pairs = self._check_update_has_allowed_address_pairs(port) with context.session.begin(subtransactions=True): ret_port = super(AllowedAddressPairTestPlugin, self).update_port( context, id, port) # copy values over - but not fixed_ips port['port'].pop('fixed_ips', None) ret_port.update(port['port']) if (delete_addr_pairs or has_addr_pairs): # delete address pairs and readd them self._delete_allowed_address_pairs(context, id) self._process_create_allowed_address_pairs( context, ret_port, ret_port[addr_pair.ADDRESS_PAIRS]) return ret_port class AllowedAddressPairDBTestCase(AllowedAddressPairTestCase): def setUp(self, plugin=None, ext_mgr=None): plugin = plugin or DB_PLUGIN_KLASS super(AllowedAddressPairDBTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) class TestAllowedAddressPairs(AllowedAddressPairDBTestCase): def test_create_port_allowed_address_pairs_bad_format(self): with self.network() as net: bad_values = [False, True, 1.1, 1] for value in bad_values: self._create_port( self.fmt, net['network']['id'], expected_res_status=web_exc.HTTPBadRequest.code, arg_list=(addr_pair.ADDRESS_PAIRS,), allowed_address_pairs=value) def test_create_port_allowed_address_pairs(self): with self.network() as net: address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'}] res = self._create_port(self.fmt, net['network']['id'], arg_list=(addr_pair.ADDRESS_PAIRS,), allowed_address_pairs=address_pairs) port = self.deserialize(self.fmt, res) self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS], address_pairs) self._delete('ports', port['port']['id']) def test_create_port_security_true_allowed_address_pairs(self): if self._skip_port_security: self.skipTest("Plugin does not implement port-security extension") with self.network() as net: address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'}] res = self._create_port(self.fmt, net['network']['id'], arg_list=('port_security_enabled', addr_pair.ADDRESS_PAIRS,), port_security_enabled=True, allowed_address_pairs=address_pairs) port = self.deserialize(self.fmt, res) self.assertTrue(port['port'][psec.PORTSECURITY]) self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS], address_pairs) self._delete('ports', port['port']['id']) def test_create_port_security_false_allowed_address_pairs(self): if self._skip_port_security: self.skipTest("Plugin does not implement port-security extension") with self.network() as net: address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'}] res = self._create_port(self.fmt, net['network']['id'], arg_list=('port_security_enabled', addr_pair.ADDRESS_PAIRS,), port_security_enabled=False, allowed_address_pairs=address_pairs) self.deserialize(self.fmt, res) self.assertEqual(409, res.status_int) address_pairs = [] res = self._create_port(self.fmt, net['network']['id'], arg_list=('port_security_enabled', addr_pair.ADDRESS_PAIRS,), port_security_enabled=False, allowed_address_pairs=address_pairs) port = self.deserialize(self.fmt, res) self.assertFalse(port['port'][psec.PORTSECURITY]) self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS], address_pairs) self._delete('ports', port['port']['id']) def test_create_port_bad_mac(self): address_pairs = [{'mac_address': 'invalid_mac', 'ip_address': '10.0.0.1'}] self._create_port_with_address_pairs(address_pairs, 400) def test_create_port_bad_ip(self): address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1222'}] self._create_port_with_address_pairs(address_pairs, 400) def test_create_missing_ip_field(self): address_pairs = [{'mac_address': '00:00:00:00:00:01'}] self._create_port_with_address_pairs(address_pairs, 400) def test_create_duplicate_mac_ip(self): address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'}, {'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'}] self._create_port_with_address_pairs(address_pairs, 400) def test_more_than_max_allowed_address_pair(self): cfg.CONF.set_default('max_allowed_address_pair', 3) address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'}, {'mac_address': '00:00:00:00:00:02', 'ip_address': '10.0.0.2'}, {'mac_address': '00:00:00:00:00:03', 'ip_address': '10.0.0.3'}, {'mac_address': '00:00:00:00:00:04', 'ip_address': '10.0.0.4'}] self._create_port_with_address_pairs(address_pairs, 400) def test_equal_to_max_allowed_address_pair(self): cfg.CONF.set_default('max_allowed_address_pair', 3) address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'}, {'mac_address': '00:00:00:00:00:02', 'ip_address': '10.0.0.2'}, {'mac_address': '00:00:00:00:00:03', 'ip_address': '10.0.0.3'}] self._create_port_with_address_pairs(address_pairs, 201) def test_create_overlap_with_fixed_ip(self): address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.2'}] with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24') as subnet: fixed_ips = [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.2'}] res = self._create_port(self.fmt, network['network']['id'], arg_list=(addr_pair.ADDRESS_PAIRS, 'fixed_ips'), allowed_address_pairs=address_pairs, fixed_ips=fixed_ips) self.assertEqual(res.status_int, 201) port = self.deserialize(self.fmt, res) self._delete('ports', port['port']['id']) def test_create_port_extra_args(self): address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1', 'icbb': 'agreed'}] self._create_port_with_address_pairs(address_pairs, 400) def _create_port_with_address_pairs(self, address_pairs, ret_code): with self.network() as net: res = self._create_port(self.fmt, net['network']['id'], arg_list=(addr_pair.ADDRESS_PAIRS,), allowed_address_pairs=address_pairs) port = self.deserialize(self.fmt, res) self.assertEqual(res.status_int, ret_code) if ret_code == 201: self._delete('ports', port['port']['id']) def test_update_add_address_pairs(self): with self.network() as net: res = self._create_port(self.fmt, net['network']['id']) port = self.deserialize(self.fmt, res) address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'}] update_port = {'port': {addr_pair.ADDRESS_PAIRS: address_pairs}} req = self.new_update_request('ports', update_port, port['port']['id']) port = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS], address_pairs) self._delete('ports', port['port']['id']) def test_create_address_gets_port_mac(self): with self.network() as net: address_pairs = [{'ip_address': '23.23.23.23'}] res = self._create_port(self.fmt, net['network']['id'], arg_list=('port_security_enabled', addr_pair.ADDRESS_PAIRS,), allowed_address_pairs=address_pairs) port = self.deserialize(self.fmt, res)['port'] port_addr_mac = port[addr_pair.ADDRESS_PAIRS][0]['mac_address'] self.assertEqual(port_addr_mac, port['mac_address']) self._delete('ports', port['id']) def test_update_port_security_off_address_pairs(self): if self._skip_port_security: self.skipTest("Plugin does not implement port-security extension") with self.network() as net: with self.subnet(network=net) as subnet: address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'}] # The port should not have any security-groups associated to it with self.port(subnet=subnet, arg_list=(psec.PORTSECURITY, addr_pair.ADDRESS_PAIRS, secgroup.SECURITYGROUPS), port_security_enabled=True, allowed_address_pairs=address_pairs, security_groups=[]) as port: update_port = {'port': {psec.PORTSECURITY: False}} req = self.new_update_request('ports', update_port, port['port']['id']) res = req.get_response(self.api) self.assertEqual(409, res.status_int) def test_update_with_none_and_own_mac_for_duplicate_ip(self): with self.network() as net: res = self._create_port(self.fmt, net['network']['id']) port = self.deserialize(self.fmt, res) mac_address = port['port']['mac_address'] address_pairs = [{'ip_address': '10.0.0.1'}, {'mac_address': mac_address, 'ip_address': '10.0.0.1'}] update_port = {'port': {addr_pair.ADDRESS_PAIRS: address_pairs}} req = self.new_update_request('ports', update_port, port['port']['id']) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_create_port_remove_allowed_address_pairs_with_list(self): self._test_create_port_remove_allowed_address_pairs([]) def test_create_port_remove_allowed_address_pairs_with_none(self): self._test_create_port_remove_allowed_address_pairs(None) def _test_create_port_remove_allowed_address_pairs(self, update_value): with self.network() as net: address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'}] res = self._create_port(self.fmt, net['network']['id'], arg_list=(addr_pair.ADDRESS_PAIRS,), allowed_address_pairs=address_pairs) port = self.deserialize(self.fmt, res) update_port = {'port': {addr_pair.ADDRESS_PAIRS: update_value}} req = self.new_update_request('ports', update_port, port['port']['id']) port = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual([], port['port'][addr_pair.ADDRESS_PAIRS]) self._delete('ports', port['port']['id']) neutron-8.4.0/neutron/tests/unit/db/test_sqlalchemytypes.py0000664000567000056710000002037213044372760025430 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import netaddr from oslo_db import exception from oslo_db.sqlalchemy import test_base from oslo_utils import uuidutils import six import sqlalchemy as sa from neutron import context from neutron.db import sqlalchemytypes from neutron.tests import tools @six.add_metaclass(abc.ABCMeta) class SqlAlchemyTypesBaseTestCase(test_base.DbTestCase): def setUp(self): super(SqlAlchemyTypesBaseTestCase, self).setUp() meta = sa.MetaData(bind=self.engine) self.test_table = self._get_test_table(meta) self.test_table.create() self.addCleanup(meta.drop_all) self.ctxt = context.get_admin_context() @abc.abstractmethod def _get_test_table(self, meta): """Returns a new sa.Table() object for this test case.""" def _add_row(self, **kargs): self.engine.execute(self.test_table.insert().values(**kargs)) def _get_all(self): rows_select = self.test_table.select() return self.engine.execute(rows_select).fetchall() def _update_row(self, **kargs): self.engine.execute(self.test_table.update().values(**kargs)) def _delete_rows(self): self.engine.execute(self.test_table.delete()) def _validate_crud(self, data_field_name, expected=None): objs = self._get_all() self.assertEqual(len(expected) if expected else 0, len(objs)) if expected: for obj in objs: name = obj['id'] self.assertEqual(expected[name], obj[data_field_name]) class IPAddressTestCase(SqlAlchemyTypesBaseTestCase): def _get_test_table(self, meta): return sa.Table( 'fakeipaddressmodels', meta, sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('ip', sqlalchemytypes.IPAddress)) def _validate_ip_address(self, data_field_name, expected=None): objs = self._get_all() self.assertEqual(len(expected) if expected else 0, len(objs)) if expected: for obj in objs: name = obj['id'] self.assertEqual(expected[name], obj[data_field_name]) def _test_crud(self, ip_addresses): ip = netaddr.IPAddress(ip_addresses[0]) self._add_row(id='fake_id', ip=ip) self._validate_ip_address(data_field_name='ip', expected={'fake_id': ip}) ip2 = netaddr.IPAddress(ip_addresses[1]) self._update_row(ip=ip2) self._validate_ip_address(data_field_name='ip', expected={'fake_id': ip2}) self._delete_rows() self._validate_ip_address(data_field_name='ip', expected=None) def test_crud(self): ip_addresses = ["10.0.0.1", "10.0.0.2"] self._test_crud(ip_addresses) ip_addresses = [ "2210::ffff:ffff:ffff:ffff", "2120::ffff:ffff:ffff:ffff" ] self._test_crud(ip_addresses) def test_wrong_type(self): self.assertRaises(exception.DBError, self._add_row, id='fake_id', ip="") self.assertRaises(exception.DBError, self._add_row, id='fake_id', ip="10.0.0.5") def _test_multiple_create(self, entries): reference = {} for entry in entries: ip = netaddr.IPAddress(entry['ip']) name = entry['name'] self._add_row(id=name, ip=ip) reference[name] = ip self._validate_ip_address(data_field_name='ip', expected=reference) self._delete_rows() self._validate_ip_address(data_field_name='ip', expected=None) def test_multiple_create(self): ip_addresses = [ {'name': 'fake_id1', 'ip': "10.0.0.5"}, {'name': 'fake_id2', 'ip': "10.0.0.1"}, {'name': 'fake_id3', 'ip': "2210::ffff:ffff:ffff:ffff"}, {'name': 'fake_id4', 'ip': "2120::ffff:ffff:ffff:ffff"} ] self._test_multiple_create(ip_addresses) class CIDRTestCase(SqlAlchemyTypesBaseTestCase): def _get_test_table(self, meta): return sa.Table( 'fakecidrmodels', meta, sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('cidr', sqlalchemytypes.CIDR) ) def _get_one(self, value): row_select = self.test_table.select().\ where(self.test_table.c.cidr == value) return self.engine.execute(row_select).first() def _update_row(self, key, cidr): self.engine.execute( self.test_table.update().values(cidr=cidr). where(self.test_table.c.cidr == key)) def test_crud(self): cidrs = ["10.0.0.0/24", "10.123.250.9/32", "2001:db8::/42", "fe80::21e:67ff:fed0:56f0/64"] for cidr_str in cidrs: cidr = netaddr.IPNetwork(cidr_str) self._add_row(id=uuidutils.generate_uuid(), cidr=cidr) obj = self._get_one(cidr) self.assertEqual(cidr, obj['cidr']) random_cidr = netaddr.IPNetwork(tools.get_random_cidr()) self._update_row(cidr, random_cidr) obj = self._get_one(random_cidr) self.assertEqual(random_cidr, obj['cidr']) objs = self._get_all() self.assertEqual(len(cidrs), len(objs)) self._delete_rows() objs = self._get_all() self.assertEqual(0, len(objs)) def test_wrong_cidr(self): wrong_cidrs = ["10.500.5.0/24", "10.0.0.1/40", "10.0.0.10.0/24", "cidr", "", '2001:db8:5000::/64', '2001:db8::/130'] for cidr in wrong_cidrs: self.assertRaises(exception.DBError, self._add_row, id=uuidutils.generate_uuid(), cidr=cidr) class MACAddressTestCase(SqlAlchemyTypesBaseTestCase): def _get_test_table(self, meta): return sa.Table( 'fakemacaddressmodels', meta, sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('mac', sqlalchemytypes.MACAddress) ) def _get_one(self, value): row_select = self.test_table.select().\ where(self.test_table.c.mac == value) return self.engine.execute(row_select).first() def _get_all(self): rows_select = self.test_table.select() return self.engine.execute(rows_select).fetchall() def _update_row(self, key, mac): self.engine.execute( self.test_table.update().values(mac=mac). where(self.test_table.c.mac == key)) def _delete_row(self): self.engine.execute( self.test_table.delete()) def test_crud(self): mac_addresses = ['FA:16:3E:00:00:01', 'FA:16:3E:00:00:02'] for mac in mac_addresses: mac = netaddr.EUI(mac) self._add_row(id=uuidutils.generate_uuid(), mac=mac) obj = self._get_one(mac) self.assertEqual(mac, obj['mac']) random_mac = netaddr.EUI(tools.get_random_mac()) self._update_row(mac, random_mac) obj = self._get_one(random_mac) self.assertEqual(random_mac, obj['mac']) objs = self._get_all() self.assertEqual(len(mac_addresses), len(objs)) self._delete_rows() objs = self._get_all() self.assertEqual(0, len(objs)) def test_wrong_mac(self): wrong_macs = ["fake", "", -1, "FK:16:3E:00:00:02", "FA:16:3E:00:00:020"] for mac in wrong_macs: self.assertRaises(exception.DBError, self._add_row, id=uuidutils.generate_uuid(), mac=mac) neutron-8.4.0/neutron/tests/unit/db/test_migration.py0000664000567000056710000010224413044372760024171 0ustar jenkinsjenkins00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os import re import sys import textwrap from alembic.autogenerate import api as alembic_ag_api from alembic import config as alembic_config from alembic.operations import ops as alembic_ops from alembic import script as alembic_script import fixtures import mock from oslo_utils import fileutils import pkg_resources import sqlalchemy as sa from testtools import matchers from neutron.db import migration from neutron.db.migration import autogen from neutron.db.migration import cli from neutron.tests import base from neutron.tests import tools from neutron.tests.unit import testlib_api class FakeConfig(object): service = '' class FakeRevision(object): path = 'fakepath' def __init__(self, labels=None, down_revision=None, is_branch_point=False): if not labels: labels = set() self.branch_labels = labels self.down_revision = down_revision self.is_branch_point = is_branch_point self.revision = tools.get_random_string() self.module = mock.MagicMock() class MigrationEntrypointsMemento(fixtures.Fixture): '''Create a copy of the migration entrypoints map so it can be restored during test cleanup. ''' def _setUp(self): self.ep_backup = {} for proj, ep in cli.migration_entrypoints.items(): self.ep_backup[proj] = copy.copy(ep) self.addCleanup(self.restore) def restore(self): cli.migration_entrypoints = self.ep_backup class TestDbMigration(base.BaseTestCase): def setUp(self): super(TestDbMigration, self).setUp() mock.patch('alembic.op.get_bind').start() self.mock_alembic_is_offline = mock.patch( 'alembic.context.is_offline_mode', return_value=False).start() self.mock_alembic_is_offline.return_value = False self.mock_sa_inspector = mock.patch( 'sqlalchemy.engine.reflection.Inspector').start() def _prepare_mocked_sqlalchemy_inspector(self): mock_inspector = mock.MagicMock() mock_inspector.get_table_names.return_value = ['foo', 'bar'] mock_inspector.get_columns.return_value = [{'name': 'foo_column'}, {'name': 'bar_column'}] self.mock_sa_inspector.from_engine.return_value = mock_inspector def test_schema_has_table(self): self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_table('foo')) def test_schema_has_table_raises_if_offline(self): self.mock_alembic_is_offline.return_value = True self.assertRaises(RuntimeError, migration.schema_has_table, 'foo') def test_schema_has_column_missing_table(self): self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column('meh', 'meh')) def test_schema_has_column(self): self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_column('foo', 'foo_column')) def test_schema_has_column_raises_if_offline(self): self.mock_alembic_is_offline.return_value = True self.assertRaises(RuntimeError, migration.schema_has_column, 'foo', 'foo_col') def test_schema_has_column_missing_column(self): self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column( 'foo', column_name='meh')) class TestCli(base.BaseTestCase): def setUp(self): super(TestCli, self).setUp() self.do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command') self.do_alembic_cmd = self.do_alembic_cmd_p.start() self.mock_alembic_err = mock.patch('alembic.util.err').start() self.mock_alembic_warn = mock.patch('alembic.util.warn').start() self.mock_alembic_err.side_effect = SystemExit def mocked_root_dir(cfg): return os.path.join('/fake/dir', cli._get_project_base(cfg)) mock_root = mock.patch.object(cli, '_get_package_root_dir').start() mock_root.side_effect = mocked_root_dir # Avoid creating fake directories mock.patch('neutron.common.utils.ensure_dir').start() # Set up some configs and entrypoints for tests to chew on self.configs = [] self.projects = ('neutron', 'networking-foo', 'neutron-fwaas') ini = os.path.join(os.path.dirname(cli.__file__), 'alembic.ini') self.useFixture(MigrationEntrypointsMemento()) cli.migration_entrypoints = {} for project in self.projects: config = alembic_config.Config(ini) config.set_main_option('neutron_project', project) module_name = project.replace('-', '_') + '.db.migration' attrs = ('alembic_migrations',) script_location = ':'.join([module_name, attrs[0]]) config.set_main_option('script_location', script_location) self.configs.append(config) entrypoint = pkg_resources.EntryPoint(project, module_name, attrs=attrs) cli.migration_entrypoints[project] = entrypoint def _main_test_helper(self, argv, func_name, exp_kwargs=[{}]): with mock.patch.object(sys, 'argv', argv),\ mock.patch.object(cli, 'run_sanity_checks'),\ mock.patch.object(cli, 'validate_revisions'),\ mock.patch.object(cli, '_use_separate_migration_branches'): cli.main() self.do_alembic_cmd.assert_has_calls( [mock.call(mock.ANY, func_name, **kwargs) for kwargs in exp_kwargs] ) def test_stamp(self): self._main_test_helper( ['prog', 'stamp', 'foo'], 'stamp', [{'revision': 'foo', 'sql': False}] ) self._main_test_helper( ['prog', 'stamp', 'foo', '--sql'], 'stamp', [{'revision': 'foo', 'sql': True}] ) def _validate_cmd(self, cmd): self._main_test_helper( ['prog', cmd], cmd, [{'verbose': False}]) self._main_test_helper( ['prog', cmd, '--verbose'], cmd, [{'verbose': True}]) def test_branches(self): self._validate_cmd('branches') def test_current(self): self._validate_cmd('current') def test_history(self): self._validate_cmd('history') def test_heads(self): self._validate_cmd('heads') def test_check_migration(self): with mock.patch.object(cli, 'validate_head_file') as validate: self._main_test_helper(['prog', 'check_migration'], 'branches') self.assertEqual(len(self.projects), validate.call_count) def _test_database_sync_revision(self, separate_branches=True): with mock.patch.object(cli, 'update_head_files') as update,\ mock.patch.object(cli, '_use_separate_migration_branches', return_value=separate_branches): if separate_branches: mock.patch('os.path.exists').start() expected_kwargs = [{ 'message': 'message', 'sql': False, 'autogenerate': True, }] self._main_test_helper( ['prog', 'revision', '--autogenerate', '-m', 'message'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock() for kwarg in expected_kwargs: kwarg['autogenerate'] = False kwarg['sql'] = True self._main_test_helper( ['prog', 'revision', '--sql', '-m', 'message'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock() for kwarg in expected_kwargs: kwarg['sql'] = False kwarg['head'] = 'expand@head' self._main_test_helper( ['prog', 'revision', '-m', 'message', '--expand'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock() for kwarg in expected_kwargs: kwarg['head'] = 'contract@head' self._main_test_helper( ['prog', 'revision', '-m', 'message', '--contract'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) def test_database_sync_revision(self): self._test_database_sync_revision() def test_database_sync_revision_no_branches(self): # Test that old branchless approach is still supported self._test_database_sync_revision(separate_branches=False) def test_upgrade_revision(self): self._main_test_helper( ['prog', 'upgrade', '--sql', 'head'], 'upgrade', [{'desc': None, 'revision': 'heads', 'sql': True}] ) def test_upgrade_delta(self): self._main_test_helper( ['prog', 'upgrade', '--delta', '3'], 'upgrade', [{'desc': None, 'revision': '+3', 'sql': False}] ) def test_upgrade_revision_delta(self): self._main_test_helper( ['prog', 'upgrade', 'kilo', '--delta', '3'], 'upgrade', [{'desc': None, 'revision': 'kilo+3', 'sql': False}] ) def test_upgrade_expand(self): self._main_test_helper( ['prog', 'upgrade', '--expand'], 'upgrade', [{'desc': cli.EXPAND_BRANCH, 'revision': 'expand@head', 'sql': False}] ) def test_upgrade_expand_contract_are_mutually_exclusive(self): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--expand --contract'], 'upgrade') def _test_upgrade_conflicts_with_revision(self, mode): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--%s revision1' % mode], 'upgrade') def _test_upgrade_conflicts_with_delta(self, mode): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--%s +3' % mode], 'upgrade') def _test_revision_autogenerate_conflicts_with_branch(self, branch): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'revision', '--autogenerate', '--%s' % branch], 'revision') def test_revision_autogenerate_conflicts_with_expand(self): self._test_revision_autogenerate_conflicts_with_branch( cli.EXPAND_BRANCH) def test_revision_autogenerate_conflicts_with_contract(self): self._test_revision_autogenerate_conflicts_with_branch( cli.CONTRACT_BRANCH) def test_upgrade_expand_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('expand') def test_upgrade_contract_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('contract') def test_upgrade_expand_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('expand') def test_upgrade_contract_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('contract') def test_upgrade_contract(self): self._main_test_helper( ['prog', 'upgrade', '--contract'], 'upgrade', [{'desc': cli.CONTRACT_BRANCH, 'revision': 'contract@head', 'sql': False}] ) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_upgrade_milestone_expand_before_contract(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)] c_revs[1].module.neutron_milestone = [migration.LIBERTY] e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH}) for r in range(5)] e_revs[3].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = c_revs + e_revs self._main_test_helper( ['prog', '--subproject', 'neutron', 'upgrade', 'liberty'], 'upgrade', [{'desc': cli.EXPAND_BRANCH, 'revision': e_revs[3].revision, 'sql': False}, {'desc': cli.CONTRACT_BRANCH, 'revision': c_revs[1].revision, 'sql': False}] ) def assert_command_fails(self, command): # Avoid cluttering stdout with argparse error messages mock.patch('argparse.ArgumentParser._print_message').start() with mock.patch.object(sys, 'argv', command), mock.patch.object( cli, 'run_sanity_checks'): self.assertRaises(SystemExit, cli.main) def test_downgrade_fails(self): self.assert_command_fails(['prog', 'downgrade', '--sql', 'juno']) @mock.patch.object(cli, '_use_separate_migration_branches') def test_upgrade_negative_relative_revision_fails(self, use_mock): self.assert_command_fails(['prog', 'upgrade', '-2']) @mock.patch.object(cli, '_use_separate_migration_branches') def test_upgrade_negative_delta_fails(self, use_mock): self.assert_command_fails(['prog', 'upgrade', '--delta', '-2']) @mock.patch.object(cli, '_use_separate_migration_branches') def test_upgrade_rejects_delta_with_relative_revision(self, use_mock): self.assert_command_fails(['prog', 'upgrade', '+2', '--delta', '3']) def _test_validate_head_file_helper(self, heads, file_heads=None): if file_heads is None: file_heads = [] fake_config = self.configs[0] mock_open = self.useFixture( tools.OpenFixture(cli._get_head_file_path(fake_config), '\n'.join(file_heads))).mock_open with mock.patch('alembic.script.ScriptDirectory.from_config') as fc,\ mock.patch.object(cli, '_use_separate_migration_branches', return_value=False): fc.return_value.get_heads.return_value = heads if all(head in file_heads for head in heads): cli.validate_head_file(fake_config) else: self.assertRaises( SystemExit, cli.validate_head_file, fake_config ) self.assertTrue(self.mock_alembic_err.called) mock_open.assert_called_with( cli._get_head_file_path(fake_config)) fc.assert_called_once_with(fake_config) def _test_validate_head_files_helper(self, heads, contract_head='', expand_head=''): fake_config = self.configs[0] head_files_not_exist = (contract_head == expand_head == '') with mock.patch('alembic.script.ScriptDirectory.from_config') as fc,\ mock.patch('os.path.exists') as os_mock,\ mock.patch.object(cli, '_use_separate_migration_branches', return_value=True): if head_files_not_exist: os_mock.return_value = False else: os_mock.return_value = True fc.return_value.get_heads.return_value = heads revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH), heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)} fc.return_value.get_revision.side_effect = revs.__getitem__ mock_open_con = self.useFixture( tools.OpenFixture(cli._get_contract_head_file_path( fake_config), contract_head + '\n')).mock_open mock_open_ex = self.useFixture( tools.OpenFixture(cli._get_expand_head_file_path( fake_config), expand_head + '\n')).mock_open if contract_head in heads and expand_head in heads: cli.validate_head_file(fake_config) elif head_files_not_exist: cli.validate_head_file(fake_config) self.assertTrue(self.mock_alembic_warn.called) else: self.assertRaises( SystemExit, cli.validate_head_file, fake_config ) self.assertTrue(self.mock_alembic_err.called) if contract_head in heads and expand_head in heads: mock_open_ex.assert_called_with( cli._get_expand_head_file_path(fake_config)) mock_open_con.assert_called_with( cli._get_contract_head_file_path(fake_config)) if not head_files_not_exist: fc.assert_called_once_with(fake_config) def test_validate_head_files_success(self): self._test_validate_head_files_helper(['a', 'b'], contract_head='a', expand_head='b') def test_validate_head_files_missing_file(self): self._test_validate_head_files_helper(['a', 'b']) def test_validate_head_files_wrong_contents(self): self._test_validate_head_files_helper(['a', 'b'], contract_head='c', expand_head='d') def test_validate_head_file_branchless_wrong_contents(self): self._test_validate_head_file_helper(['a'], ['b']) def test_validate_head_file_branchless_success(self): self._test_validate_head_file_helper(['a'], ['a']) def test_validate_head_file_branchless_missing_file(self): self._test_validate_head_file_helper(['a']) def test_update_head_file_success(self): head = ['b'] mock_open = self.useFixture( tools.OpenFixture(cli._get_head_file_path( self.configs[0]))).mock_open with mock.patch('alembic.script.ScriptDirectory.from_config') as fc: fc.return_value.get_heads.return_value = head cli.update_head_file(self.configs[0]) mock_open.return_value.write.assert_called_with( '\n'.join(head)) @mock.patch.object(cli, '_use_separate_migration_branches', return_value=True) @mock.patch.object(fileutils, 'delete_if_exists') def test_update_head_files_success(self, *mocks): heads = ['a', 'b'] mock_open_con = self.useFixture( tools.OpenFixture(cli._get_contract_head_file_path( self.configs[0]))).mock_open mock_open_ex = self.useFixture( tools.OpenFixture(cli._get_expand_head_file_path( self.configs[0]))).mock_open with mock.patch('alembic.script.ScriptDirectory.from_config') as fc: fc.return_value.get_heads.return_value = heads revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH), heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)} fc.return_value.get_revision.side_effect = revs.__getitem__ cli.update_head_files(self.configs[0]) mock_open_con.return_value.write.assert_called_with( heads[0] + '\n') mock_open_ex.return_value.write.assert_called_with(heads[1] + '\n') old_head_file = cli._get_head_file_path( self.configs[0]) old_heads_file = cli._get_heads_file_path( self.configs[0]) delete_if_exists = mocks[0] self.assertIn(mock.call(old_head_file), delete_if_exists.call_args_list) self.assertIn(mock.call(old_heads_file), delete_if_exists.call_args_list) def test_get_project_base(self): config = alembic_config.Config() config.set_main_option('script_location', 'a.b.c:d') proj_base = cli._get_project_base(config) self.assertEqual('a', proj_base) def test_get_root_versions_dir(self): config = alembic_config.Config() config.set_main_option('script_location', 'a.b.c:d') versions_dir = cli._get_root_versions_dir(config) self.assertEqual('/fake/dir/a/a/b/c/d/versions', versions_dir) def test_get_subproject_script_location(self): foo_ep = cli._get_subproject_script_location('networking-foo') expected = 'networking_foo.db.migration:alembic_migrations' self.assertEqual(expected, foo_ep) def test_get_subproject_script_location_not_installed(self): self.assertRaises( SystemExit, cli._get_subproject_script_location, 'not-installed') def test_get_service_script_location(self): fwaas_ep = cli._get_service_script_location('fwaas') expected = 'neutron_fwaas.db.migration:alembic_migrations' self.assertEqual(expected, fwaas_ep) def test_get_service_script_location_not_installed(self): self.assertRaises( SystemExit, cli._get_service_script_location, 'myaas') def test_get_subproject_base_not_installed(self): self.assertRaises( SystemExit, cli._get_subproject_base, 'not-installed') def test__compare_labels_ok(self): labels = {'label1', 'label2'} fake_revision = FakeRevision(labels) cli._compare_labels(fake_revision, {'label1', 'label2'}) def test__compare_labels_fail_unexpected_labels(self): labels = {'label1', 'label2', 'label3'} fake_revision = FakeRevision(labels) self.assertRaises( SystemExit, cli._compare_labels, fake_revision, {'label1', 'label2'}) @mock.patch.object(cli, '_compare_labels') def test__validate_single_revision_labels_branchless_fail_different_labels( self, compare_mock): fake_down_revision = FakeRevision() fake_revision = FakeRevision(down_revision=fake_down_revision) script_dir = mock.Mock() script_dir.get_revision.return_value = fake_down_revision cli._validate_single_revision_labels(script_dir, fake_revision, label=None) expected_labels = set() compare_mock.assert_has_calls( [mock.call(revision, expected_labels) for revision in (fake_revision, fake_down_revision)] ) @mock.patch.object(cli, '_compare_labels') def test__validate_single_revision_labels_branches_fail_different_labels( self, compare_mock): fake_down_revision = FakeRevision() fake_revision = FakeRevision(down_revision=fake_down_revision) script_dir = mock.Mock() script_dir.get_revision.return_value = fake_down_revision cli._validate_single_revision_labels( script_dir, fake_revision, label='fakebranch') expected_labels = {'fakebranch'} compare_mock.assert_has_calls( [mock.call(revision, expected_labels) for revision in (fake_revision, fake_down_revision)] ) @mock.patch.object(cli, '_validate_single_revision_labels') def test__validate_revision_validates_branches(self, validate_mock): script_dir = mock.Mock() fake_revision = FakeRevision() branch = cli.MIGRATION_BRANCHES[0] fake_revision.path = os.path.join('/fake/path', branch) cli._validate_revision(script_dir, fake_revision) validate_mock.assert_called_with( script_dir, fake_revision, label=branch) @mock.patch.object(cli, '_validate_single_revision_labels') def test__validate_revision_validates_branchless_migrations( self, validate_mock): script_dir = mock.Mock() fake_revision = FakeRevision() cli._validate_revision(script_dir, fake_revision) validate_mock.assert_called_with(script_dir, fake_revision) @mock.patch.object(cli, '_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_validate_revisions_walks_thru_all_revisions( self, walk_mock, validate_mock): revisions = [FakeRevision() for i in range(10)] walk_mock.return_value = revisions cli.validate_revisions(self.configs[0]) validate_mock.assert_has_calls( [mock.call(mock.ANY, revision) for revision in revisions] ) @mock.patch.object(cli, '_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_validate_revisions_fails_on_multiple_branch_points( self, walk_mock, validate_mock): revisions = [FakeRevision(is_branch_point=True) for i in range(2)] walk_mock.return_value = revisions self.assertRaises( SystemExit, cli.validate_revisions, self.configs[0]) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__get_branch_points(self, walk_mock): revisions = [FakeRevision(is_branch_point=tools.get_random_boolean) for i in range(50)] walk_mock.return_value = revisions script_dir = alembic_script.ScriptDirectory.from_config( self.configs[0]) self.assertEqual(set(rev for rev in revisions if rev.is_branch_point), set(cli._get_branch_points(script_dir))) @mock.patch.object(cli, '_use_separate_migration_branches') @mock.patch.object(cli, '_get_version_branch_path') def test_autogen_process_directives( self, get_version_branch_path, use_separate_migration_branches): use_separate_migration_branches.return_value = True get_version_branch_path.side_effect = lambda cfg, release, branch: ( "/foo/expand" if branch == 'expand' else "/foo/contract") migration_script = alembic_ops.MigrationScript( 'eced083f5df', # these directives will be split into separate # expand/contract scripts alembic_ops.UpgradeOps( ops=[ alembic_ops.CreateTableOp( 'organization', [ sa.Column('id', sa.Integer(), primary_key=True), sa.Column('name', sa.String(50), nullable=False) ] ), alembic_ops.ModifyTableOps( 'user', ops=[ alembic_ops.AddColumnOp( 'user', sa.Column('organization_id', sa.Integer()) ), alembic_ops.CreateForeignKeyOp( 'org_fk', 'user', 'organization', ['organization_id'], ['id'] ), alembic_ops.DropConstraintOp( 'user', 'uq_user_org' ), alembic_ops.DropColumnOp( 'user', 'organization_name' ) ] ) ] ), # these will be discarded alembic_ops.DowngradeOps( ops=[ alembic_ops.AddColumnOp( 'user', sa.Column( 'organization_name', sa.String(50), nullable=True) ), alembic_ops.CreateUniqueConstraintOp( 'uq_user_org', 'user', ['user_name', 'organization_name'] ), alembic_ops.ModifyTableOps( 'user', ops=[ alembic_ops.DropConstraintOp('org_fk', 'user'), alembic_ops.DropColumnOp('user', 'organization_id') ] ), alembic_ops.DropTableOp('organization') ] ), message='create the organization table and ' 'replace user.organization_name' ) directives = [migration_script] autogen.process_revision_directives( mock.Mock(), mock.Mock(), directives ) expand = directives[0] contract = directives[1] self.assertEqual("/foo/expand", expand.version_path) self.assertEqual("/foo/contract", contract.version_path) self.assertTrue(expand.downgrade_ops.is_empty()) self.assertTrue(contract.downgrade_ops.is_empty()) def _get_regex(s): s = textwrap.dedent(s) s = re.escape(s) # alembic 0.8.9 added additional leading '# ' before comments return s.replace('\\#\\#\\#\\ ', '(# )?### ') expected_regex = ("""\ ### commands auto generated by Alembic - please adjust! ### op.create_table('organization', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=50), nullable=False), sa.PrimaryKeyConstraint('id') ) op.add_column('user', """ """sa.Column('organization_id', sa.Integer(), nullable=True)) op.create_foreign_key('org_fk', 'user', """ """'organization', ['organization_id'], ['id']) ### end Alembic commands ###""") self.assertThat( alembic_ag_api.render_python_code(expand.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex))) expected_regex = ("""\ ### commands auto generated by Alembic - please adjust! ### op.drop_constraint('user', 'uq_user_org', type_=None) op.drop_column('user', 'organization_name') ### end Alembic commands ###""") self.assertThat( alembic_ag_api.render_python_code(contract.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex))) @mock.patch.object(cli, '_get_branch_points', return_value=[]) @mock.patch.object(cli.CONF, 'split_branches', new_callable=mock.PropertyMock, return_value=True, create=True) def test__use_separate_migration_branches_enforced(self, *mocks): self.assertTrue(cli._use_separate_migration_branches(self.configs[0])) @mock.patch.object(cli, '_get_branch_points', return_value=[]) def test__use_separate_migration_branches_no_branch_points(self, *mocks): self.assertFalse(cli._use_separate_migration_branches(self.configs[0])) @mock.patch.object(cli, '_get_branch_points', return_value=['fake1']) def test__use_separate_migration_branches_with_branch_points(self, *mocks): self.assertTrue(cli._use_separate_migration_branches(self.configs[0])) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_one_branch(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)] c_revs[1].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = c_revs m = cli._find_milestone_revisions(self.configs[0], 'liberty', cli.CONTRACT_BRANCH) self.assertEqual(1, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'liberty', cli.EXPAND_BRANCH) self.assertEqual(0, len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_two_branches(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)] c_revs[1].module.neutron_milestone = [migration.LIBERTY] e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH}) for r in range(5)] e_revs[3].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = c_revs + e_revs m = cli._find_milestone_revisions(self.configs[0], 'liberty') self.assertEqual(2, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'mitaka') self.assertEqual(0, len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_branchless(self, walk_mock): revisions = [FakeRevision() for r in range(5)] revisions[2].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = revisions m = cli._find_milestone_revisions(self.configs[0], 'liberty') self.assertEqual(1, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'mitaka') self.assertEqual(0, len(m)) class TestSafetyChecks(base.BaseTestCase): def test_validate_revisions(self, *mocks): cli.validate_revisions(cli.get_neutron_config()) neutron-8.4.0/neutron/tests/unit/db/test_portsecurity_db_common.py0000664000567000056710000000567313044372760027001 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from sqlalchemy.orm import exc from neutron.db import portsecurity_db_common as pdc from neutron.extensions import portsecurity as psec from neutron.tests import base common = pdc.PortSecurityDbCommon class PortSecurityDbCommonTestCase(base.BaseTestCase): def setUp(self): super(PortSecurityDbCommonTestCase, self).setUp() self.common = common() def _test__get_security_binding_no_binding(self, getter): port_sec_enabled = True req = {psec.PORTSECURITY: port_sec_enabled} res = {} with mock.patch.object( self.common, '_model_query', create=True, side_effect=exc.NoResultFound): val = getter(req, res) self.assertEqual(port_sec_enabled, val) def test__get_port_security_binding_no_binding(self): self._test__get_security_binding_no_binding( self.common._get_port_security_binding) def test__get_network_security_binding_no_binding(self): self._test__get_security_binding_no_binding( self.common._get_network_security_binding) def _test__process_security_update_no_binding(self, creator, updater): req = {psec.PORTSECURITY: False} res = {} context = mock.Mock() with mock.patch.object( self.common, '_model_query', create=True, side_effect=exc.NoResultFound): updater(context, req, res) creator.assert_called_with(context, req, res) @mock.patch.object(common, '_process_port_port_security_create') def test__process_port_port_security_update_no_binding(self, creator): self._test__process_security_update_no_binding( creator, self.common._process_port_port_security_update) @mock.patch.object(common, '_process_network_port_security_create') def test__process_network_port_security_update_no_binding(self, creator): self._test__process_security_update_no_binding( creator, self.common._process_network_port_security_update) def test__extend_port_security_dict_no_port_security(self): for db_data in ({'port_security': None, 'name': 'net1'}, {}): response_data = {} self.common._extend_port_security_dict(response_data, db_data) self.assertTrue(response_data[psec.PORTSECURITY]) neutron-8.4.0/neutron/tests/unit/db/test_l3_db.py0000664000567000056710000002677713044372760023203 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import testtools from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import exceptions as n_exc from neutron.db import l3_db from neutron.extensions import l3 from neutron import manager from neutron.tests import base class TestL3_NAT_dbonly_mixin(base.BaseTestCase): def setUp(self): super(TestL3_NAT_dbonly_mixin, self).setUp() self.db = l3_db.L3_NAT_dbonly_mixin() def test__each_port_having_fixed_ips_none(self): """Be sure the method returns an empty list when None is passed""" filtered = l3_db.L3_NAT_dbonly_mixin._each_port_having_fixed_ips(None) self.assertEqual([], list(filtered)) def test__each_port_having_fixed_ips(self): """Basic test that ports without fixed ips are filtered out""" ports = [{'id': 'a', 'fixed_ips': [mock.sentinel.fixedip]}, {'id': 'b'}] filtered = l3_db.L3_NAT_dbonly_mixin._each_port_having_fixed_ips(ports) ids = [p['id'] for p in filtered] self.assertEqual(['a'], ids) def test__get_subnets_by_network_no_query(self): """Basic test that no query is performed if no Ports are passed""" context = mock.Mock() with mock.patch.object(manager.NeutronManager, 'get_plugin') as get_p: self.db._get_subnets_by_network_list(context, []) self.assertFalse(context.session.query.called) self.assertFalse(get_p.called) def test__get_subnets_by_network(self): """Basic test that the right query is called""" context = mock.MagicMock() query = context.session.query().outerjoin().filter() query.__iter__.return_value = [(mock.sentinel.subnet_db, mock.sentinel.address_scope_id)] with mock.patch.object(manager.NeutronManager, 'get_plugin') as get_p: get_p()._make_subnet_dict.return_value = { 'network_id': mock.sentinel.network_id} subnets = self.db._get_subnets_by_network_list( context, [mock.sentinel.network_id]) self.assertEqual({ mock.sentinel.network_id: [{ 'address_scope_id': mock.sentinel.address_scope_id, 'network_id': mock.sentinel.network_id}]}, subnets) def test__populate_ports_for_subnets_none(self): """Basic test that the method runs correctly with no ports""" ports = [] with mock.patch.object(manager.NeutronManager, 'get_plugin') as get_p: get_p().get_networks.return_value = [] self.db._populate_mtu_and_subnets_for_ports(mock.sentinel.context, ports) self.assertEqual([], ports) @mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_get_subnets_by_network_list') def test__populate_ports_for_subnets(self, get_subnets_by_network): cidr = "2001:db8::/64" subnet = {'id': mock.sentinel.subnet_id, 'cidr': cidr, 'gateway_ip': mock.sentinel.gateway_ip, 'dns_nameservers': mock.sentinel.dns_nameservers, 'ipv6_ra_mode': mock.sentinel.ipv6_ra_mode, 'subnetpool_id': mock.sentinel.subnetpool_id, 'address_scope_id': mock.sentinel.address_scope_id} get_subnets_by_network.return_value = {'net_id': [subnet]} ports = [{'network_id': 'net_id', 'id': 'port_id', 'fixed_ips': [{'subnet_id': mock.sentinel.subnet_id}]}] with mock.patch.object(manager.NeutronManager, 'get_plugin') as get_p: get_p().get_networks.return_value = [{'id': 'net_id', 'mtu': 1446}] self.db._populate_mtu_and_subnets_for_ports(mock.sentinel.context, ports) keys = ('id', 'cidr', 'gateway_ip', 'ipv6_ra_mode', 'subnetpool_id', 'dns_nameservers') address_scopes = {4: None, 6: mock.sentinel.address_scope_id} self.assertEqual([{'extra_subnets': [], 'fixed_ips': [{'subnet_id': mock.sentinel.subnet_id, 'prefixlen': 64}], 'id': 'port_id', 'mtu': 1446, 'network_id': 'net_id', 'subnets': [{k: subnet[k] for k in keys}], 'address_scopes': address_scopes}], ports) def test__get_sync_floating_ips_no_query(self): """Basic test that no query is performed if no router ids are passed""" db = l3_db.L3_NAT_dbonly_mixin() context = mock.Mock() db._get_sync_floating_ips(context, []) self.assertFalse(context.session.query.called) @mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_make_floatingip_dict') def test__make_floatingip_dict_with_scope(self, make_fip_dict): db = l3_db.L3_NAT_dbonly_mixin() make_fip_dict.return_value = {'id': mock.sentinel.fip_ip} result = db._make_floatingip_dict_with_scope( mock.sentinel.floating_ip_db, mock.sentinel.address_scope_id) self.assertEqual({ 'fixed_ip_address_scope': mock.sentinel.address_scope_id, 'id': mock.sentinel.fip_ip}, result) def test__unique_floatingip_iterator(self): query = mock.MagicMock() query.order_by().__iter__.return_value = [ ({'id': 'id1'}, 'scope1'), ({'id': 'id1'}, 'scope1'), ({'id': 'id2'}, 'scope2'), ({'id': 'id2'}, 'scope2'), ({'id': 'id2'}, 'scope2'), ({'id': 'id3'}, 'scope3')] query.reset_mock() result = list( l3_db.L3_NAT_dbonly_mixin._unique_floatingip_iterator(query)) query.order_by.assert_called_once_with(l3_db.FloatingIP.id) self.assertEqual([({'id': 'id1'}, 'scope1'), ({'id': 'id2'}, 'scope2'), ({'id': 'id3'}, 'scope3')], result) @mock.patch.object(manager.NeutronManager, 'get_plugin') def test_prevent_l3_port_deletion_port_not_found(self, gp): # port not found doesn't prevent gp.return_value.get_port.side_effect = n_exc.PortNotFound(port_id='1') self.db.prevent_l3_port_deletion(None, None) @mock.patch.object(manager.NeutronManager, 'get_plugin') def test_prevent_l3_port_device_owner_not_router(self, gp): # ignores other device owners gp.return_value.get_port.return_value = {'device_owner': 'cat'} self.db.prevent_l3_port_deletion(None, None) @mock.patch.object(manager.NeutronManager, 'get_plugin') def test_prevent_l3_port_no_fixed_ips(self, gp): # without fixed IPs is allowed gp.return_value.get_port.return_value = { 'device_owner': 'network:router_interface', 'fixed_ips': [], 'id': 'f' } self.db.prevent_l3_port_deletion(None, None) @mock.patch.object(manager.NeutronManager, 'get_plugin') def test_prevent_l3_port_no_router(self, gp): # without router is allowed gp.return_value.get_port.return_value = { 'device_owner': 'network:router_interface', 'device_id': '44', 'id': 'f', 'fixed_ips': [{'ip_address': '1.1.1.1', 'subnet_id': '4'}]} self.db.get_router = mock.Mock() self.db.get_router.side_effect = l3.RouterNotFound(router_id='44') self.db.prevent_l3_port_deletion(mock.Mock(), None) @mock.patch.object(manager.NeutronManager, 'get_plugin') def test_prevent_l3_port_existing_router(self, gp): gp.return_value.get_port.return_value = { 'device_owner': 'network:router_interface', 'device_id': 'some_router', 'id': 'f', 'fixed_ips': [{'ip_address': '1.1.1.1', 'subnet_id': '4'}]} self.db.get_router = mock.Mock() with testtools.ExpectedException(n_exc.ServicePortInUse): self.db.prevent_l3_port_deletion(mock.Mock(), None) @mock.patch.object(manager.NeutronManager, 'get_plugin') def test_prevent_l3_port_existing_floating_ip(self, gp): gp.return_value.get_port.return_value = { 'device_owner': 'network:floatingip', 'device_id': 'some_flip', 'id': 'f', 'fixed_ips': [{'ip_address': '1.1.1.1', 'subnet_id': '4'}]} self.db.get_floatingip = mock.Mock() with testtools.ExpectedException(n_exc.ServicePortInUse): self.db.prevent_l3_port_deletion(mock.Mock(), None) @mock.patch.object(l3_db, '_notify_subnetpool_address_scope_update') def test_subscribe_address_scope_of_subnetpool(self, notify): l3_db.subscribe() registry.notify(resources.SUBNETPOOL_ADDRESS_SCOPE, events.AFTER_UPDATE, mock.ANY, context=mock.ANY, subnetpool_id='fake_id') notify.assert_called_once_with(resources.SUBNETPOOL_ADDRESS_SCOPE, events.AFTER_UPDATE, mock.ANY, context=mock.ANY, subnetpool_id='fake_id') class L3_NAT_db_mixin(base.BaseTestCase): def setUp(self): super(L3_NAT_db_mixin, self).setUp() self.db = l3_db.L3_NAT_db_mixin() def _test_create_router(self, external_gateway_info=None): router_db = l3_db.Router(id='123') router_dict = {'id': '123', 'tenant_id': '456', 'external_gateway_info': external_gateway_info} # Need to use a copy here as the create_router method pops the gateway # information router_input = {'router': router_dict.copy()} with mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_create_router_db', return_value=router_db) as crd,\ mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_make_router_dict', return_value=router_dict),\ mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_update_router_gw_info') as urgi,\ mock.patch.object(l3_db.L3_NAT_db_mixin, 'notify_router_updated')\ as nru: self.db.create_router(mock.ANY, router_input) self.assertTrue(crd.called) if external_gateway_info: self.assertTrue(urgi.called) self.assertTrue(nru.called) else: self.assertFalse(urgi.called) self.assertFalse(nru.called) def test_create_router_no_gateway(self): self._test_create_router() def test_create_router_gateway(self): ext_gateway_info = {'network_id': 'net-id', 'enable_snat': True, 'external_fixed_ips': [ {'subnet_id': 'subnet-id', 'ip_address': 'ip'}]} self._test_create_router(ext_gateway_info) neutron-8.4.0/neutron/tests/unit/db/test_bgp_db.py0000664000567000056710000017516613044372760023432 0ustar jenkinsjenkins00000000000000# Copyright 2016 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import netaddr from oslo_config import cfg from oslo_utils import uuidutils from neutron.api.v2 import attributes as attrs from neutron.common import exceptions as n_exc from neutron.db import l3_dvr_ha_scheduler_db from neutron.db import l3_hamode_db from neutron.extensions import bgp from neutron.extensions import external_net from neutron.extensions import portbindings from neutron import manager from neutron.plugins.common import constants as p_const from neutron.services.bgp import bgp_plugin from neutron.tests.unit.extensions import test_l3 from neutron.tests.unit.plugins.ml2 import test_plugin _uuid = uuidutils.generate_uuid ADVERTISE_FIPS_KEY = 'advertise_floating_ip_host_routes' class TestL3Plugin(test_l3.TestL3NatAgentSchedulingServicePlugin, l3_hamode_db.L3_HA_NAT_db_mixin, l3_dvr_ha_scheduler_db.L3_DVR_HA_scheduler_db_mixin): pass class BgpEntityCreationMixin(object): @contextlib.contextmanager def bgp_speaker(self, ip_version, local_as, name='my-speaker', advertise_fip_host_routes=True, advertise_tenant_networks=True, networks=None, peers=None): data = {'ip_version': ip_version, ADVERTISE_FIPS_KEY: advertise_fip_host_routes, 'advertise_tenant_networks': advertise_tenant_networks, 'local_as': local_as, 'name': name} bgp_speaker = self.bgp_plugin.create_bgp_speaker(self.context, {'bgp_speaker': data}) bgp_speaker_id = bgp_speaker['id'] if networks: for network_id in networks: self.bgp_plugin.add_gateway_network( self.context, bgp_speaker_id, {'network_id': network_id}) if peers: for peer_id in peers: self.bgp_plugin.add_bgp_peer(self.context, bgp_speaker_id, {'bgp_peer_id': peer_id}) yield self.bgp_plugin.get_bgp_speaker(self.context, bgp_speaker_id) @contextlib.contextmanager def bgp_peer(self, tenant_id=_uuid(), remote_as='4321', peer_ip="192.168.1.1", auth_type="md5", password="my-secret", name="my-peer"): data = {'peer_ip': peer_ip, 'tenant_id': tenant_id, 'remote_as': remote_as, 'auth_type': auth_type, 'password': password, 'name': name} bgp_peer = self.bgp_plugin.create_bgp_peer(self.context, {'bgp_peer': data}) yield bgp_peer self.bgp_plugin.delete_bgp_peer(self.context, bgp_peer['id']) @contextlib.contextmanager def bgp_speaker_with_gateway_network(self, address_scope_id, local_as, advertise_fip_host_routes=True, advertise_tenant_networks=True, network_external=True, fmt=None, set_context=False): pass @contextlib.contextmanager def bgp_speaker_with_router(self, address_scope_id, local_as, gw_network_id=None, gw_subnet_ids=None, tenant_subnet_ids=None, advertise_fip_host_routes=True, advertise_tenant_networks=True, fmt=None, set_context=False, router_distributed=False): pass @contextlib.contextmanager def router(self, name='bgp-test-router', tenant_id=_uuid(), admin_state_up=True, **kwargs): request = {'router': {'tenant_id': tenant_id, 'name': name, 'admin_state_up': admin_state_up}} for arg in kwargs: request['router'][arg] = kwargs[arg] router = self.l3plugin.create_router(self.context, request) yield router @contextlib.contextmanager def router_with_external_and_tenant_networks( self, tenant_id=_uuid(), gw_prefix='8.8.8.0/24', tenant_prefix='192.168.0.0/16', address_scope=None, distributed=False, ha=False): prefixes = [gw_prefix, tenant_prefix] gw_ip_net = netaddr.IPNetwork(gw_prefix) tenant_ip_net = netaddr.IPNetwork(tenant_prefix) subnetpool_args = {'tenant_id': tenant_id, 'name': 'bgp-pool'} if address_scope: subnetpool_args['address_scope_id'] = address_scope['id'] with self.network() as ext_net, self.network() as int_net,\ self.subnetpool(prefixes, **subnetpool_args) as pool: subnetpool_id = pool['subnetpool']['id'] gw_net_id = ext_net['network']['id'] with self.subnet(ext_net, cidr=gw_prefix, subnetpool_id=subnetpool_id, ip_version=gw_ip_net.version),\ self.subnet(int_net, cidr=tenant_prefix, subnetpool_id=subnetpool_id, ip_version=tenant_ip_net.version) as int_subnet: self._update('networks', gw_net_id, {'network': {external_net.EXTERNAL: True}}) ext_gw_info = {'network_id': gw_net_id} with self.router(external_gateway_info=ext_gw_info, distributed=distributed, ha=ha) as router: router_id = router['id'] router_interface_info = {'subnet_id': int_subnet['subnet']['id']} self.l3plugin.add_router_interface(self.context, router_id, router_interface_info) yield router, ext_net, int_net class BgpTests(test_plugin.Ml2PluginV2TestCase, BgpEntityCreationMixin): fmt = 'json' def setup_parent(self): self.l3_plugin = ('neutron.tests.unit.db.test_bgp_db.' 'TestL3Plugin') super(BgpTests, self).setup_parent() def setUp(self): super(BgpTests, self).setUp() self.l3plugin = manager.NeutronManager.get_service_plugins().get( p_const.L3_ROUTER_NAT) self.bgp_plugin = bgp_plugin.BgpPlugin() self.plugin = manager.NeutronManager.get_plugin() self.l3plugin = manager.NeutronManager.get_service_plugins().get( p_const.L3_ROUTER_NAT) @contextlib.contextmanager def subnetpool_with_address_scope(self, ip_version, prefixes=None, shared=False, admin=True, name='test-pool', is_default_pool=False, tenant_id=None, **kwargs): if not tenant_id: tenant_id = _uuid() scope_data = {'tenant_id': tenant_id, 'ip_version': ip_version, 'shared': shared, 'name': name + '-scope'} address_scope = self.plugin.create_address_scope( self.context, {'address_scope': scope_data}) address_scope_id = address_scope['id'] pool_data = {'tenant_id': tenant_id, 'shared': shared, 'name': name, 'address_scope_id': address_scope_id, 'prefixes': prefixes, 'is_default': is_default_pool} for key in kwargs: pool_data[key] = kwargs[key] yield self.plugin.create_subnetpool(self.context, {'subnetpool': pool_data}) @contextlib.contextmanager def floatingip_from_address_scope_assoc(self, prefixes, address_scope_id, ext_prefixlen=24, int_prefixlen=24): pass def test_add_duplicate_bgp_peer_ip(self): peer_ip = '192.168.1.10' with self.bgp_peer(peer_ip=peer_ip) as peer1,\ self.bgp_peer(peer_ip=peer_ip) as peer2,\ self.subnetpool_with_address_scope(4, prefixes=['8.0.0.0/8']) as sp: with self.bgp_speaker(sp['ip_version'], 1234, peers=[peer1['id']]) as speaker: self.assertRaises(bgp.DuplicateBgpPeerIpException, self.bgp_plugin.add_bgp_peer, self.context, speaker['id'], {'bgp_peer_id': peer2['id']}) def test_bgpspeaker_create(self): with self.subnetpool_with_address_scope(4, prefixes=['8.0.0.0/8']) as sp: speaker_name = 'test-speaker' expected_values = [('ip_version', sp['ip_version']), ('name', speaker_name)] with self.bgp_speaker(sp['ip_version'], 1234, name=speaker_name) as bgp_speaker: for k, v in expected_values: self.assertEqual(v, bgp_speaker[k]) def test_bgp_speaker_list(self): with self.subnetpool_with_address_scope(4, prefixes=['8.0.0.0/8']) as sp1,\ self.subnetpool_with_address_scope(4, prefixes=['9.0.0.0/8']) as sp2: with self.bgp_speaker(sp1['ip_version'], 1234, name='speaker1'),\ self.bgp_speaker(sp2['ip_version'], 4321, name='speaker2'): speakers = self.bgp_plugin.get_bgp_speakers(self.context) self.assertEqual(2, len(speakers)) def test_bgp_speaker_update_local_as(self): local_as_1 = 1234 local_as_2 = 4321 with self.subnetpool_with_address_scope(4, prefixes=['8.0.0.0/8']) as sp: with self.bgp_speaker(sp['ip_version'], local_as_1) as speaker: self.assertEqual(local_as_1, speaker['local_as']) new_speaker = self.bgp_plugin.update_bgp_speaker( self.context, speaker['id'], {'bgp_speaker': {'local_as': local_as_2}}) self.assertEqual(local_as_2, new_speaker['local_as']) def test_bgp_speaker_show_non_existent(self): self.assertRaises(bgp.BgpSpeakerNotFound, self.bgp_plugin.get_bgp_speaker, self.context, _uuid()) def test_create_bgp_peer(self): args = {'tenant_id': _uuid(), 'remote_as': '1111', 'peer_ip': '10.10.10.10', 'auth_type': 'md5'} with self.bgp_peer(tenant_id=args['tenant_id'], remote_as=args['remote_as'], peer_ip=args['peer_ip'], auth_type='md5', password='my-secret') as peer: self.assertIsNone(peer.get('password')) for key in args: self.assertEqual(args[key], peer[key]) def test_update_bgp_peer_auth_type_none(self): args = {'tenant_id': _uuid(), 'remote_as': '1111', 'peer_ip': '10.10.10.10', 'auth_type': 'md5'} with self.bgp_peer(tenant_id=args['tenant_id'], remote_as=args['remote_as'], peer_ip=args['peer_ip'], auth_type='none', name="my-peer") as peer: data = {'bgp_peer': {'password': "my-secret", 'name': "my-peer1"}} self.assertRaises(bgp.BgpPeerNotAuthenticated, self.bgp_plugin.update_bgp_peer, self.context, peer['id'], data) def test_update_bgp_peer_password_none(self): args = {'tenant_id': _uuid(), 'remote_as': 1111, 'peer_ip': '10.10.10.10', 'name': 'my-peer', 'auth_type': 'none'} with self.bgp_peer(tenant_id=args['tenant_id'], remote_as=args['remote_as'], peer_ip=args['peer_ip'], auth_type=args['auth_type'], name=args['name']) as peer: data = {'bgp_peer': {'name': "my-peer1"}} updated_peer = self.bgp_plugin.update_bgp_peer(self.context, peer['id'], data) for key in args: if key == 'name': self.assertEqual('my-peer1', updated_peer[key]) else: self.assertEqual(peer[key], updated_peer[key]) def test_bgp_peer_show_non_existent(self): self.assertRaises(bgp.BgpPeerNotFound, self.bgp_plugin.get_bgp_peer, self.context, 'unreal-bgp-peer-id') def test_associate_bgp_peer(self): with self.bgp_peer() as peer,\ self.subnetpool_with_address_scope(4, prefixes=['8.0.0.0/8']) as sp: with self.bgp_speaker(sp['ip_version'], 1234) as speaker: self.bgp_plugin.add_bgp_peer(self.context, speaker['id'], {'bgp_peer_id': peer['id']}) new_speaker = self.bgp_plugin.get_bgp_speaker(self.context, speaker['id']) self.assertIn('peers', new_speaker) self.assertIn(peer['id'], new_speaker['peers']) self.assertEqual(1, len(new_speaker['peers'])) def test_remove_bgp_peer(self): with self.bgp_peer() as peer,\ self.subnetpool_with_address_scope(4, prefixes=['8.0.0.0/8']) as sp: with self.bgp_speaker(sp['ip_version'], 1234, peers=[peer['id']]) as speaker: self.bgp_plugin.remove_bgp_peer(self.context, speaker['id'], {'bgp_peer_id': peer['id']}) new_speaker = self.bgp_plugin.get_bgp_speaker(self.context, speaker['id']) self.assertIn('peers', new_speaker) self.assertTrue(not new_speaker['peers']) def test_remove_unassociated_bgp_peer(self): with self.bgp_peer() as peer,\ self.subnetpool_with_address_scope(4, prefixes=['8.0.0.0/8']) as sp: with self.bgp_speaker(sp['ip_version'], 1234) as speaker: self.assertRaises(bgp.BgpSpeakerPeerNotAssociated, self.bgp_plugin.remove_bgp_peer, self.context, speaker['id'], {'bgp_peer_id': peer['id']}) def test_remove_non_existent_bgp_peer(self): bgp_peer_id = "imaginary" with self.subnetpool_with_address_scope(4, prefixes=['8.0.0.0/8']) as sp: with self.bgp_speaker(sp['ip_version'], 1234) as speaker: self.assertRaises(bgp.BgpSpeakerPeerNotAssociated, self.bgp_plugin.remove_bgp_peer, self.context, speaker['id'], {'bgp_peer_id': bgp_peer_id}) def test_add_non_existent_bgp_peer(self): bgp_peer_id = "imaginary" with self.subnetpool_with_address_scope(4, prefixes=['8.0.0.0/8']) as sp: with self.bgp_speaker(sp['ip_version'], 1234) as speaker: self.assertRaises(bgp.BgpPeerNotFound, self.bgp_plugin.add_bgp_peer, self.context, speaker['id'], {'bgp_peer_id': bgp_peer_id}) def test_add_gateway_network(self): with self.subnetpool_with_address_scope(4, prefixes=['8.0.0.0/8']) as sp: with self.bgp_speaker(sp['ip_version'], 1234) as speaker,\ self.network() as network: network_id = network['network']['id'] self.bgp_plugin.add_gateway_network(self.context, speaker['id'], {'network_id': network_id}) new_speaker = self.bgp_plugin.get_bgp_speaker(self.context, speaker['id']) self.assertEqual(1, len(new_speaker['networks'])) self.assertTrue(network_id in new_speaker['networks']) def test_create_bgp_speaker_with_network(self): with self.subnetpool_with_address_scope(4, prefixes=['8.0.0.0/8']) as sp: network = self.plugin.create_network(self.context, {'network': {'name': 'test-net', 'tenant_id': _uuid(), 'admin_state_up': True, 'shared': True}}) with self.bgp_speaker(sp['ip_version'], 1234, networks=[network['id']]) as speaker: self.assertEqual(1, len(speaker['networks'])) self.assertTrue(network['id'] in speaker['networks']) def test_remove_gateway_network(self): with self.network() as network1,\ self.network() as network2,\ self.subnetpool_with_address_scope(4, prefixes=['8.0.0.0/8']) as sp: network1_id = network1['network']['id'] network2_id = network2['network']['id'] with self.bgp_speaker(sp['ip_version'], 1234, networks=[network1_id, network2_id]) as speaker: self.bgp_plugin.remove_gateway_network( self.context, speaker['id'], {'network_id': network1_id}) new_speaker = self.bgp_plugin.get_bgp_speaker(self.context, speaker['id']) self.assertEqual(1, len(new_speaker['networks'])) def test_add_non_existent_gateway_network(self): network_id = "imaginary" with self.subnetpool_with_address_scope(4, prefixes=['8.0.0.0/8']) as sp: with self.bgp_speaker(sp['ip_version'], 1234) as speaker: self.assertRaises(n_exc.NetworkNotFound, self.bgp_plugin.add_gateway_network, self.context, speaker['id'], {'network_id': network_id}) def test_remove_non_existent_gateway_network(self): network_id = "imaginary" with self.subnetpool_with_address_scope(4, prefixes=['8.0.0.0/8']) as sp: with self.bgp_speaker(sp['ip_version'], 1234) as speaker: self.assertRaises(bgp.BgpSpeakerNetworkNotAssociated, self.bgp_plugin.remove_gateway_network, self.context, speaker['id'], {'network_id': network_id}) def test_add_gateway_network_two_bgp_speakers_same_scope(self): with self.subnetpool_with_address_scope(4, prefixes=['8.0.0.0/8']) as sp: with self.bgp_speaker(sp['ip_version'], 1234) as speaker1,\ self.bgp_speaker(sp['ip_version'], 4321) as speaker2,\ self.network() as network: network_id = network['network']['id'] self.bgp_plugin.add_gateway_network(self.context, speaker1['id'], {'network_id': network_id}) self.bgp_plugin.add_gateway_network(self.context, speaker2['id'], {'network_id': network_id}) speaker1 = self.bgp_plugin.get_bgp_speaker(self.context, speaker1['id']) speaker2 = self.bgp_plugin.get_bgp_speaker(self.context, speaker2['id']) for speaker in [speaker1, speaker2]: self.assertEqual(1, len(speaker['networks'])) self.assertEqual(network_id, speaker['networks'][0]) def test_create_bgp_peer_md5_auth_no_password(self): bgp_peer = {'bgp_peer': {'auth_type': 'md5', 'password': None}} self.assertRaises(bgp.InvalidBgpPeerMd5Authentication, self.bgp_plugin.create_bgp_peer, self.context, bgp_peer) def test__get_address_scope_ids_for_bgp_speaker(self): prefixes1 = ['8.0.0.0/8'] prefixes2 = ['9.0.0.0/8'] prefixes3 = ['10.0.0.0/8'] tenant_id = _uuid() with self.bgp_speaker(4, 1234) as speaker,\ self.subnetpool_with_address_scope(4, prefixes=prefixes1, tenant_id=tenant_id) as sp1,\ self.subnetpool_with_address_scope(4, prefixes=prefixes2, tenant_id=tenant_id) as sp2,\ self.subnetpool_with_address_scope(4, prefixes=prefixes3, tenant_id=tenant_id) as sp3,\ self.network() as network1, self.network() as network2,\ self.network() as network3: network1_id = network1['network']['id'] network2_id = network2['network']['id'] network3_id = network3['network']['id'] base_subnet_data = {'allocation_pools': attrs.ATTR_NOT_SPECIFIED, 'cidr': attrs.ATTR_NOT_SPECIFIED, 'prefixlen': attrs.ATTR_NOT_SPECIFIED, 'ip_version': 4, 'enable_dhcp': True, 'dns_nameservers': attrs.ATTR_NOT_SPECIFIED, 'host_routes': attrs.ATTR_NOT_SPECIFIED} subnet1_data = {'network_id': network1_id, 'subnetpool_id': sp1['id'], 'name': 'subnet1', 'tenant_id': tenant_id} subnet2_data = {'network_id': network2_id, 'subnetpool_id': sp2['id'], 'name': 'subnet2', 'tenant_id': tenant_id} subnet3_data = {'network_id': network3_id, 'subnetpool_id': sp3['id'], 'name': 'subnet2', 'tenant_id': tenant_id} for k in base_subnet_data: subnet1_data[k] = base_subnet_data[k] subnet2_data[k] = base_subnet_data[k] subnet3_data[k] = base_subnet_data[k] self.plugin.create_subnet(self.context, {'subnet': subnet1_data}) self.plugin.create_subnet(self.context, {'subnet': subnet2_data}) self.plugin.create_subnet(self.context, {'subnet': subnet3_data}) self.bgp_plugin.add_gateway_network(self.context, speaker['id'], {'network_id': network1_id}) self.bgp_plugin.add_gateway_network(self.context, speaker['id'], {'network_id': network2_id}) scopes = self.bgp_plugin._get_address_scope_ids_for_bgp_speaker( self.context, speaker['id']) self.assertEqual(2, len(scopes)) self.assertTrue(sp1['address_scope_id'] in scopes) self.assertTrue(sp2['address_scope_id'] in scopes) def test_get_routes_by_bgp_speaker_binding(self): gw_prefix = '172.16.10.0/24' tenant_prefix = '10.10.10.0/24' tenant_id = _uuid() scope_data = {'tenant_id': tenant_id, 'ip_version': 4, 'shared': True, 'name': 'bgp-scope'} scope = self.plugin.create_address_scope( self.context, {'address_scope': scope_data}) with self.router_with_external_and_tenant_networks( tenant_id=tenant_id, gw_prefix=gw_prefix, tenant_prefix=tenant_prefix, address_scope=scope) as res: router, ext_net, int_net = res ext_gw_info = router['external_gateway_info'] gw_net_id = ext_net['network']['id'] with self.bgp_speaker(4, 1234, networks=[gw_net_id]) as speaker: bgp_speaker_id = speaker['id'] routes = self.bgp_plugin.get_routes_by_bgp_speaker_binding( self.context, bgp_speaker_id, gw_net_id) routes = list(routes) next_hop = ext_gw_info['external_fixed_ips'][0]['ip_address'] self.assertEqual(1, len(routes)) self.assertEqual(tenant_prefix, routes[0]['destination']) self.assertEqual(next_hop, routes[0]['next_hop']) def test_get_routes_by_binding_network(self): gw_prefix = '172.16.10.0/24' tenant_prefix = '10.10.10.0/24' tenant_id = _uuid() scope_data = {'tenant_id': tenant_id, 'ip_version': 4, 'shared': True, 'name': 'bgp-scope'} scope = self.plugin.create_address_scope( self.context, {'address_scope': scope_data}) with self.router_with_external_and_tenant_networks( tenant_id=tenant_id, gw_prefix=gw_prefix, tenant_prefix=tenant_prefix, address_scope=scope) as res: router, ext_net, int_net = res ext_gw_info = router['external_gateway_info'] gw_net_id = ext_net['network']['id'] with self.bgp_speaker(4, 1234, networks=[gw_net_id]) as speaker: bgp_speaker_id = speaker['id'] routes = self.bgp_plugin.get_routes_by_bgp_speaker_binding( self.context, bgp_speaker_id, gw_net_id) routes = list(routes) next_hop = ext_gw_info['external_fixed_ips'][0]['ip_address'] self.assertEqual(1, len(routes)) self.assertEqual(tenant_prefix, routes[0]['destination']) self.assertEqual(next_hop, routes[0]['next_hop']) def _advertised_routes_by_bgp_speaker(self, bgp_speaker_ip_version, local_as, tenant_cidr, gateway_cidr, fip_routes=True, router_distributed=False): tenant_id = _uuid() scope_data = {'tenant_id': tenant_id, 'ip_version': bgp_speaker_ip_version, 'shared': True, 'name': 'bgp-scope'} scope = self.plugin.create_address_scope( self.context, {'address_scope': scope_data}) with self.router_with_external_and_tenant_networks( tenant_id=tenant_id, gw_prefix=gateway_cidr, tenant_prefix=tenant_cidr, address_scope=scope, distributed=router_distributed) as res: router, ext_net, int_net = res gw_net_id = ext_net['network']['id'] with self.bgp_speaker( bgp_speaker_ip_version, local_as, networks=[gw_net_id], advertise_fip_host_routes=fip_routes) as speaker: routes = self.bgp_plugin.get_advertised_routes( self.context, speaker['id']) return routes['advertised_routes'] def test__tenant_prefixes_by_router_no_gateway_port(self): with self.network() as net1, self.network() as net2,\ self.subnetpool_with_address_scope(6, tenant_id='test-tenant', prefixes=['2001:db8::/63']) as pool: subnetpool_id = pool['id'] with self.subnet(network=net1, cidr=None, subnetpool_id=subnetpool_id, ip_version=6) as ext_subnet,\ self.subnet(network=net2, cidr=None, subnetpool_id=subnetpool_id, ip_version=6) as int_subnet,\ self.router() as router: router_id = router['id'] int_subnet_id = int_subnet['subnet']['id'] ext_subnet_id = ext_subnet['subnet']['id'] self.l3plugin.add_router_interface(self.context, router_id, {'subnet_id': int_subnet_id}) self.l3plugin.add_router_interface(self.context, router_id, {'subnet_id': ext_subnet_id}) with self.bgp_speaker(6, 1234) as speaker: bgp_speaker_id = speaker['id'] cidrs = list(self.bgp_plugin._tenant_prefixes_by_router( self.context, router_id, bgp_speaker_id)) self.assertFalse(cidrs) def test_get_ipv6_tenant_subnet_routes_by_bgp_speaker_ipv6(self): tenant_cidr = '2001:db8::/64' binding_cidr = '2001:ab8::/64' routes = self._advertised_routes_by_bgp_speaker(6, 1234, tenant_cidr, binding_cidr) self.assertEqual(1, len(routes)) dest_prefix = routes[0]['destination'] next_hop = routes[0]['next_hop'] self.assertEqual(tenant_cidr, dest_prefix) self.assertTrue(netaddr.IPSet([binding_cidr]).__contains__(next_hop)) def test_get_ipv4_tenant_subnet_routes_by_bgp_speaker_ipv4(self): tenant_cidr = '172.16.10.0/24' binding_cidr = '20.10.1.0/24' routes = self._advertised_routes_by_bgp_speaker(4, 1234, tenant_cidr, binding_cidr) routes = list(routes) self.assertEqual(1, len(routes)) dest_prefix = routes[0]['destination'] next_hop = routes[0]['next_hop'] self.assertEqual(tenant_cidr, dest_prefix) self.assertTrue(netaddr.IPSet([binding_cidr]).__contains__(next_hop)) def test_get_ipv4_tenant_subnet_routes_by_bgp_speaker_dvr_router(self): tenant_cidr = '172.16.10.0/24' binding_cidr = '20.10.1.0/24' routes = self._advertised_routes_by_bgp_speaker( 4, 1234, tenant_cidr, binding_cidr, router_distributed=True) routes = list(routes) self.assertEqual(1, len(routes)) def test_all_routes_by_bgp_speaker_different_tenant_address_scope(self): binding_cidr = '2001:db8::/64' tenant_cidr = '2002:ab8::/64' with self.subnetpool_with_address_scope(6, tenant_id='test-tenant', prefixes=[binding_cidr]) as ext_pool,\ self.subnetpool_with_address_scope(6, tenant_id='test-tenant', prefixes=[tenant_cidr]) as int_pool,\ self.network() as ext_net, self.network() as int_net: gw_net_id = ext_net['network']['id'] ext_pool_id = ext_pool['id'] int_pool_id = int_pool['id'] self._update('networks', gw_net_id, {'network': {external_net.EXTERNAL: True}}) with self.subnet(cidr=None, subnetpool_id=ext_pool_id, network=ext_net, ip_version=6) as ext_subnet,\ self.subnet(cidr=None, subnetpool_id=int_pool_id, network=int_net, ip_version=6) as int_subnet,\ self.router() as router: router_id = router['id'] int_subnet_id = int_subnet['subnet']['id'] ext_subnet_id = ext_subnet['subnet']['id'] self.l3plugin.add_router_interface(self.context, router_id, {'subnet_id': int_subnet_id}) self.l3plugin.add_router_interface(self.context, router_id, {'subnet_id': ext_subnet_id}) with self.bgp_speaker(6, 1234, networks=[gw_net_id]) as speaker: bgp_speaker_id = speaker['id'] cidrs = self.bgp_plugin.get_routes_by_bgp_speaker_id( self.context, bgp_speaker_id) self.assertEqual(0, len(list(cidrs))) def test__get_routes_by_router_with_fip(self): gw_prefix = '172.16.10.0/24' tenant_prefix = '10.10.10.0/24' tenant_id = _uuid() scope_data = {'tenant_id': tenant_id, 'ip_version': 4, 'shared': True, 'name': 'bgp-scope'} scope = self.plugin.create_address_scope( self.context, {'address_scope': scope_data}) with self.router_with_external_and_tenant_networks( tenant_id=tenant_id, gw_prefix=gw_prefix, tenant_prefix=tenant_prefix, address_scope=scope) as res: router, ext_net, int_net = res ext_gw_info = router['external_gateway_info'] gw_net_id = ext_net['network']['id'] tenant_net_id = int_net['network']['id'] fixed_port_data = {'port': {'name': 'test', 'network_id': tenant_net_id, 'tenant_id': tenant_id, 'admin_state_up': True, 'device_id': _uuid(), 'device_owner': 'compute:nova', 'mac_address': attrs.ATTR_NOT_SPECIFIED, 'fixed_ips': attrs.ATTR_NOT_SPECIFIED}} fixed_port = self.plugin.create_port(self.context, fixed_port_data) fip_data = {'floatingip': {'floating_network_id': gw_net_id, 'tenant_id': tenant_id, 'port_id': fixed_port['id']}} fip = self.l3plugin.create_floatingip(self.context, fip_data) fip_prefix = fip['floating_ip_address'] + '/32' with self.bgp_speaker(4, 1234, networks=[gw_net_id]) as speaker: bgp_speaker_id = speaker['id'] routes = self.bgp_plugin._get_routes_by_router(self.context, router['id']) routes = routes[bgp_speaker_id] next_hop = ext_gw_info['external_fixed_ips'][0]['ip_address'] self.assertEqual(2, len(routes)) tenant_prefix_found = False fip_prefix_found = False for route in routes: self.assertEqual(next_hop, route['next_hop']) if route['destination'] == tenant_prefix: tenant_prefix_found = True if route['destination'] == fip_prefix: fip_prefix_found = True self.assertTrue(tenant_prefix_found) self.assertTrue(fip_prefix_found) def test_get_routes_by_bgp_speaker_id_with_fip(self): gw_prefix = '172.16.10.0/24' tenant_prefix = '10.10.10.0/24' tenant_id = _uuid() scope_data = {'tenant_id': tenant_id, 'ip_version': 4, 'shared': True, 'name': 'bgp-scope'} scope = self.plugin.create_address_scope( self.context, {'address_scope': scope_data}) with self.router_with_external_and_tenant_networks( tenant_id=tenant_id, gw_prefix=gw_prefix, tenant_prefix=tenant_prefix, address_scope=scope) as res: router, ext_net, int_net = res ext_gw_info = router['external_gateway_info'] gw_net_id = ext_net['network']['id'] tenant_net_id = int_net['network']['id'] fixed_port_data = {'port': {'name': 'test', 'network_id': tenant_net_id, 'tenant_id': tenant_id, 'admin_state_up': True, 'device_id': _uuid(), 'device_owner': 'compute:nova', 'mac_address': attrs.ATTR_NOT_SPECIFIED, 'fixed_ips': attrs.ATTR_NOT_SPECIFIED}} fixed_port = self.plugin.create_port(self.context, fixed_port_data) fip_data = {'floatingip': {'floating_network_id': gw_net_id, 'tenant_id': tenant_id, 'port_id': fixed_port['id']}} fip = self.l3plugin.create_floatingip(self.context, fip_data) fip_prefix = fip['floating_ip_address'] + '/32' with self.bgp_speaker(4, 1234, networks=[gw_net_id]) as speaker: bgp_speaker_id = speaker['id'] routes = self.bgp_plugin.get_routes_by_bgp_speaker_id( self.context, bgp_speaker_id) routes = list(routes) next_hop = ext_gw_info['external_fixed_ips'][0]['ip_address'] self.assertEqual(2, len(routes)) tenant_prefix_found = False fip_prefix_found = False for route in routes: self.assertEqual(next_hop, route['next_hop']) if route['destination'] == tenant_prefix: tenant_prefix_found = True if route['destination'] == fip_prefix: fip_prefix_found = True self.assertTrue(tenant_prefix_found) self.assertTrue(fip_prefix_found) def test_get_routes_by_bgp_speaker_id_with_fip_dvr(self): gw_prefix = '172.16.10.0/24' tenant_prefix = '10.10.10.0/24' tenant_id = _uuid() scope_data = {'tenant_id': tenant_id, 'ip_version': 4, 'shared': True, 'name': 'bgp-scope'} scope = self.plugin.create_address_scope( self.context, {'address_scope': scope_data}) with self.router_with_external_and_tenant_networks( tenant_id=tenant_id, gw_prefix=gw_prefix, tenant_prefix=tenant_prefix, address_scope=scope, distributed=True) as res: router, ext_net, int_net = res ext_gw_info = router['external_gateway_info'] gw_net_id = ext_net['network']['id'] tenant_net_id = int_net['network']['id'] fixed_port_data = {'port': {'name': 'test', 'network_id': tenant_net_id, 'tenant_id': tenant_id, 'admin_state_up': True, 'device_id': _uuid(), 'device_owner': 'compute:nova', 'mac_address': attrs.ATTR_NOT_SPECIFIED, 'fixed_ips': attrs.ATTR_NOT_SPECIFIED, portbindings.HOST_ID: 'test-host'}} fixed_port = self.plugin.create_port(self.context, fixed_port_data) self.plugin._create_or_update_agent(self.context, {'agent_type': 'L3 agent', 'host': 'test-host', 'binary': 'neutron-l3-agent', 'topic': 'test'}) fip_gw = self.l3plugin.create_fip_agent_gw_port_if_not_exists( self.context, gw_net_id, 'test-host') fip_data = {'floatingip': {'floating_network_id': gw_net_id, 'tenant_id': tenant_id, 'port_id': fixed_port['id']}} fip = self.l3plugin.create_floatingip(self.context, fip_data) fip_prefix = fip['floating_ip_address'] + '/32' with self.bgp_speaker(4, 1234, networks=[gw_net_id]) as speaker: bgp_speaker_id = speaker['id'] routes = self.bgp_plugin.get_routes_by_bgp_speaker_id( self.context, bgp_speaker_id) routes = list(routes) cvr_gw_ip = ext_gw_info['external_fixed_ips'][0]['ip_address'] dvr_gw_ip = fip_gw['fixed_ips'][0]['ip_address'] self.assertEqual(2, len(routes)) tenant_route_verified = False fip_route_verified = False for route in routes: if route['destination'] == tenant_prefix: self.assertEqual(cvr_gw_ip, route['next_hop']) tenant_route_verified = True if route['destination'] == fip_prefix: self.assertEqual(dvr_gw_ip, route['next_hop']) fip_route_verified = True self.assertTrue(tenant_route_verified) self.assertTrue(fip_route_verified) def test__get_dvr_fip_host_routes_by_binding(self): gw_prefix = '172.16.10.0/24' tenant_prefix = '10.10.10.0/24' tenant_id = _uuid() scope_data = {'tenant_id': tenant_id, 'ip_version': 4, 'shared': True, 'name': 'bgp-scope'} scope = self.plugin.create_address_scope( self.context, {'address_scope': scope_data}) with self.router_with_external_and_tenant_networks( tenant_id=tenant_id, gw_prefix=gw_prefix, tenant_prefix=tenant_prefix, address_scope=scope, distributed=True) as res: router, ext_net, int_net = res gw_net_id = ext_net['network']['id'] tenant_net_id = int_net['network']['id'] fixed_port_data = {'port': {'name': 'test', 'network_id': tenant_net_id, 'tenant_id': tenant_id, 'admin_state_up': True, 'device_id': _uuid(), 'device_owner': 'compute:nova', 'mac_address': attrs.ATTR_NOT_SPECIFIED, 'fixed_ips': attrs.ATTR_NOT_SPECIFIED, portbindings.HOST_ID: 'test-host'}} fixed_port = self.plugin.create_port(self.context, fixed_port_data) self.plugin._create_or_update_agent(self.context, {'agent_type': 'L3 agent', 'host': 'test-host', 'binary': 'neutron-l3-agent', 'topic': 'test'}) fip_gw = self.l3plugin.create_fip_agent_gw_port_if_not_exists( self.context, gw_net_id, 'test-host') fip_data = {'floatingip': {'floating_network_id': gw_net_id, 'tenant_id': tenant_id, 'port_id': fixed_port['id']}} fip = self.l3plugin.create_floatingip(self.context, fip_data) fip_prefix = fip['floating_ip_address'] + '/32' with self.bgp_speaker(4, 1234, networks=[gw_net_id]) as speaker: bgp_speaker_id = speaker['id'] routes = self.bgp_plugin._get_dvr_fip_host_routes_by_binding( self.context, gw_net_id, bgp_speaker_id) routes = list(routes) dvr_gw_ip = fip_gw['fixed_ips'][0]['ip_address'] self.assertEqual(1, len(routes)) self.assertEqual(dvr_gw_ip, routes[0]['next_hop']) self.assertEqual(fip_prefix, routes[0]['destination']) def test__get_dvr_fip_host_routes_by_router(self): gw_prefix = '172.16.10.0/24' tenant_prefix = '10.10.10.0/24' tenant_id = _uuid() scope_data = {'tenant_id': tenant_id, 'ip_version': 4, 'shared': True, 'name': 'bgp-scope'} scope = self.plugin.create_address_scope( self.context, {'address_scope': scope_data}) with self.router_with_external_and_tenant_networks( tenant_id=tenant_id, gw_prefix=gw_prefix, tenant_prefix=tenant_prefix, address_scope=scope, distributed=True) as res: router, ext_net, int_net = res gw_net_id = ext_net['network']['id'] tenant_net_id = int_net['network']['id'] fixed_port_data = {'port': {'name': 'test', 'network_id': tenant_net_id, 'tenant_id': tenant_id, 'admin_state_up': True, 'device_id': _uuid(), 'device_owner': 'compute:nova', 'mac_address': attrs.ATTR_NOT_SPECIFIED, 'fixed_ips': attrs.ATTR_NOT_SPECIFIED, portbindings.HOST_ID: 'test-host'}} fixed_port = self.plugin.create_port(self.context, fixed_port_data) self.plugin._create_or_update_agent(self.context, {'agent_type': 'L3 agent', 'host': 'test-host', 'binary': 'neutron-l3-agent', 'topic': 'test'}) fip_gw = self.l3plugin.create_fip_agent_gw_port_if_not_exists( self.context, gw_net_id, 'test-host') fip_data = {'floatingip': {'floating_network_id': gw_net_id, 'tenant_id': tenant_id, 'port_id': fixed_port['id']}} fip = self.l3plugin.create_floatingip(self.context, fip_data) fip_prefix = fip['floating_ip_address'] + '/32' with self.bgp_speaker(4, 1234, networks=[gw_net_id]) as speaker: bgp_speaker_id = speaker['id'] routes = self.bgp_plugin._get_dvr_fip_host_routes_by_router( self.context, bgp_speaker_id, router['id']) routes = list(routes) dvr_gw_ip = fip_gw['fixed_ips'][0]['ip_address'] self.assertEqual(1, len(routes)) self.assertEqual(dvr_gw_ip, routes[0]['next_hop']) self.assertEqual(fip_prefix, routes[0]['destination']) def test_get_routes_by_bgp_speaker_binding_with_fip(self): gw_prefix = '172.16.10.0/24' tenant_prefix = '10.10.10.0/24' tenant_id = _uuid() scope_data = {'tenant_id': tenant_id, 'ip_version': 4, 'shared': True, 'name': 'bgp-scope'} scope = self.plugin.create_address_scope( self.context, {'address_scope': scope_data}) with self.router_with_external_and_tenant_networks( tenant_id=tenant_id, gw_prefix=gw_prefix, tenant_prefix=tenant_prefix, address_scope=scope) as res: router, ext_net, int_net = res ext_gw_info = router['external_gateway_info'] gw_net_id = ext_net['network']['id'] tenant_net_id = int_net['network']['id'] fixed_port_data = {'port': {'name': 'test', 'network_id': tenant_net_id, 'tenant_id': tenant_id, 'admin_state_up': True, 'device_id': _uuid(), 'device_owner': 'compute:nova', 'mac_address': attrs.ATTR_NOT_SPECIFIED, 'fixed_ips': attrs.ATTR_NOT_SPECIFIED}} fixed_port = self.plugin.create_port(self.context, fixed_port_data) fip_data = {'floatingip': {'floating_network_id': gw_net_id, 'tenant_id': tenant_id, 'port_id': fixed_port['id']}} fip = self.l3plugin.create_floatingip(self.context, fip_data) fip_prefix = fip['floating_ip_address'] + '/32' with self.bgp_speaker(4, 1234, networks=[gw_net_id]) as speaker: bgp_speaker_id = speaker['id'] routes = self.bgp_plugin.get_routes_by_bgp_speaker_binding( self.context, bgp_speaker_id, gw_net_id) routes = list(routes) next_hop = ext_gw_info['external_fixed_ips'][0]['ip_address'] self.assertEqual(2, len(routes)) tenant_prefix_found = False fip_prefix_found = False for route in routes: self.assertEqual(next_hop, route['next_hop']) if route['destination'] == tenant_prefix: tenant_prefix_found = True if route['destination'] == fip_prefix: fip_prefix_found = True self.assertTrue(tenant_prefix_found) self.assertTrue(fip_prefix_found) def test__bgp_speakers_for_gateway_network_by_ip_version(self): with self.network() as ext_net, self.bgp_speaker(6, 1234) as s1,\ self.bgp_speaker(6, 4321) as s2: gw_net_id = ext_net['network']['id'] self._update('networks', gw_net_id, {'network': {external_net.EXTERNAL: True}}) self.bgp_plugin.add_gateway_network(self.context, s1['id'], {'network_id': gw_net_id}) self.bgp_plugin.add_gateway_network(self.context, s2['id'], {'network_id': gw_net_id}) speakers = self.bgp_plugin._bgp_speakers_for_gw_network_by_family( self.context, gw_net_id, 6) self.assertEqual(2, len(speakers)) def test__bgp_speakers_for_gateway_network_by_ip_version_no_binding(self): with self.network() as ext_net, self.bgp_speaker(6, 1234),\ self.bgp_speaker(6, 4321): gw_net_id = ext_net['network']['id'] self._update('networks', gw_net_id, {'network': {external_net.EXTERNAL: True}}) speakers = self.bgp_plugin._bgp_speakers_for_gw_network_by_family( self.context, gw_net_id, 6) self.assertTrue(not speakers) def _create_scenario_test_l3_agents(self, agent_confs): for item in agent_confs: self.plugin._create_or_update_agent( self.context, {'agent_type': 'L3 agent', 'host': item['host'], 'binary': 'neutron-l3-agent', 'topic': 'test', 'configurations': {"agent_mode": item['mode']}}) def _create_scenario_test_ports(self, tenant_id, port_configs): ports = [] for item in port_configs: port_data = { 'port': {'name': 'test1', 'network_id': item['net_id'], 'tenant_id': tenant_id, 'admin_state_up': True, 'device_id': _uuid(), 'device_owner': 'compute:nova', 'mac_address': attrs.ATTR_NOT_SPECIFIED, 'fixed_ips': attrs.ATTR_NOT_SPECIFIED, portbindings.HOST_ID: item['host']}} port = self.plugin.create_port(self.context, port_data) ports.append(port) return ports def _create_scenario_test_fips(self, ext_net_id, tenant_id, port_ids): for port_id in port_ids: fip_data = {'floatingip': {'floating_network_id': ext_net_id, 'tenant_id': tenant_id, 'port_id': port_id}} self.l3plugin.create_floatingip(self.context, fip_data) def _test_legacy_router_fips_next_hop(self, router_ha=False): if router_ha: cfg.CONF.set_override('l3_ha', True) cfg.CONF.set_override('max_l3_agents_per_router', 2) cfg.CONF.set_override('min_l3_agents_per_router', 2) gw_prefix = '172.16.10.0/24' tenant_prefix = '10.10.10.0/24' tenant_id = _uuid() agent_confs = [{"host": "compute1", "mode": "dvr"}, {"host": "compute2", "mode": "dvr"}, {"host": "network1", "mode": "dvr_snat"}] if router_ha: agent_confs.append({"host": "network2", "mode": "dvr_snat"}) self._create_scenario_test_l3_agents(agent_confs) with self.router_with_external_and_tenant_networks( tenant_id=tenant_id, gw_prefix=gw_prefix, tenant_prefix=tenant_prefix, ha=router_ha) as res: router, ext_net, int_net = res gw_net_id = ext_net['network']['id'] ext_gw_info = router['external_gateway_info'] self.l3plugin.create_fip_agent_gw_port_if_not_exists( self.context, gw_net_id, 'compute1') self.l3plugin.create_fip_agent_gw_port_if_not_exists( self.context, gw_net_id, 'compute2') port_configs = [{'net_id': int_net['network']['id'], 'host': 'compute1'}, {'net_id': int_net['network']['id'], 'host': 'compute2'}] ports = self._create_scenario_test_ports(tenant_id, port_configs) port_ids = [port['id'] for port in ports] self._create_scenario_test_fips(gw_net_id, tenant_id, port_ids) next_hop = ext_gw_info[ 'external_fixed_ips'][0]['ip_address'] with self.bgp_speaker(4, 1234, networks=[gw_net_id]) as speaker: bgp_speaker_id = speaker['id'] routes = self.bgp_plugin.get_routes_by_bgp_speaker_id( self.context, bgp_speaker_id) routes = list(routes) self.assertEqual(2, len(routes)) for route in routes: self.assertEqual(next_hop, route['next_hop']) def test_legacy_router_fips_has_no_next_hop_to_fip_agent_gateway(self): self._test_legacy_router_fips_next_hop() def test_ha_router_fips_has_no_next_hop_to_fip_agent_gateway(self): self._test_legacy_router_fips_next_hop(router_ha=True) neutron-8.4.0/neutron/tests/unit/db/test_ipam_non_pluggable_backend.py0000664000567000056710000002257313044372760027477 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import ipv6_utils from neutron.db import db_base_plugin_common from neutron.db import db_base_plugin_v2 from neutron.db import ipam_non_pluggable_backend as non_ipam from neutron.db import models_v2 from neutron.tests import base class TestIpamNonPluggableBackend(base.BaseTestCase): """Unit Tests for non pluggable IPAM Logic.""" def test_generate_ip(self): with mock.patch.object(non_ipam.IpamNonPluggableBackend, '_try_generate_ip') as generate: with mock.patch.object(non_ipam.IpamNonPluggableBackend, '_rebuild_availability_ranges') as rebuild: non_ipam.IpamNonPluggableBackend._generate_ip('c', 's') generate.assert_called_once_with('c', 's') self.assertEqual(0, rebuild.call_count) def test_generate_ip_exhausted_pool(self): with mock.patch.object(non_ipam.IpamNonPluggableBackend, '_try_generate_ip') as generate: with mock.patch.object(non_ipam.IpamNonPluggableBackend, '_rebuild_availability_ranges') as rebuild: exception = n_exc.IpAddressGenerationFailure(net_id='n') # fail first call but not second generate.side_effect = [exception, None] non_ipam.IpamNonPluggableBackend._generate_ip('c', 's') self.assertEqual(2, generate.call_count) rebuild.assert_called_once_with('c', 's') def _validate_rebuild_availability_ranges(self, pools, allocations, expected): ip_qry = mock.Mock() ip_qry.with_lockmode.return_value = ip_qry ip_qry.filter_by.return_value = allocations pool_qry = mock.Mock() pool_qry.options.return_value = pool_qry pool_qry.with_lockmode.return_value = pool_qry pool_qry.filter_by.return_value = pools def return_queries_side_effect(*args, **kwargs): if args[0] == models_v2.IPAllocation: return ip_qry if args[0] == models_v2.IPAllocationPool: return pool_qry context = mock.Mock() context.session.query.side_effect = return_queries_side_effect subnets = [mock.MagicMock()] non_ipam.IpamNonPluggableBackend._rebuild_availability_ranges( context, subnets) actual = [[args[0].allocation_pool_id, args[0].first_ip, args[0].last_ip] for _name, args, _kwargs in context.session.add.mock_calls] self.assertEqual(expected, actual) def test_rebuild_availability_ranges(self): pools = [{'id': 'a', 'first_ip': '192.168.1.3', 'last_ip': '192.168.1.10'}, {'id': 'b', 'first_ip': '192.168.1.100', 'last_ip': '192.168.1.120'}] allocations = [{'ip_address': '192.168.1.3'}, {'ip_address': '192.168.1.78'}, {'ip_address': '192.168.1.7'}, {'ip_address': '192.168.1.110'}, {'ip_address': '192.168.1.11'}, {'ip_address': '192.168.1.4'}, {'ip_address': '192.168.1.111'}] expected = [['a', '192.168.1.5', '192.168.1.6'], ['a', '192.168.1.8', '192.168.1.10'], ['b', '192.168.1.100', '192.168.1.109'], ['b', '192.168.1.112', '192.168.1.120']] self._validate_rebuild_availability_ranges(pools, allocations, expected) def test_rebuild_ipv6_availability_ranges(self): pools = [{'id': 'a', 'first_ip': '2001::1', 'last_ip': '2001::50'}, {'id': 'b', 'first_ip': '2001::100', 'last_ip': '2001::ffff:ffff:ffff:fffe'}] allocations = [{'ip_address': '2001::10'}, {'ip_address': '2001::45'}, {'ip_address': '2001::60'}, {'ip_address': '2001::111'}, {'ip_address': '2001::200'}, {'ip_address': '2001::ffff:ffff:ffff:ff10'}, {'ip_address': '2001::ffff:ffff:ffff:f2f0'}] expected = [['a', '2001::1', '2001::f'], ['a', '2001::11', '2001::44'], ['a', '2001::46', '2001::50'], ['b', '2001::100', '2001::110'], ['b', '2001::112', '2001::1ff'], ['b', '2001::201', '2001::ffff:ffff:ffff:f2ef'], ['b', '2001::ffff:ffff:ffff:f2f1', '2001::ffff:ffff:ffff:ff0f'], ['b', '2001::ffff:ffff:ffff:ff11', '2001::ffff:ffff:ffff:fffe']] self._validate_rebuild_availability_ranges(pools, allocations, expected) def _test__allocate_ips_for_port(self, subnets, port, expected): # this test is incompatible with pluggable ipam, because subnets # were not actually created, so no ipam_subnet exists cfg.CONF.set_override("ipam_driver", None) plugin = db_base_plugin_v2.NeutronDbPluginV2() with mock.patch.object(db_base_plugin_common.DbBasePluginCommon, '_get_subnets') as get_subnets: with mock.patch.object(non_ipam.IpamNonPluggableBackend, '_check_unique_ip') as check_unique: context = mock.Mock() get_subnets.return_value = subnets check_unique.return_value = True actual = plugin.ipam._allocate_ips_for_port(context, port) self.assertEqual(expected, actual) def test__allocate_ips_for_port_2_slaac_subnets(self): subnets = [ { 'cidr': u'2001:100::/64', 'enable_dhcp': True, 'gateway_ip': u'2001:100::1', 'id': u'd1a28edd-bd83-480a-bd40-93d036c89f13', 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', 'ip_version': 6, 'ipv6_address_mode': None, 'ipv6_ra_mode': u'slaac'}, { 'cidr': u'2001:200::/64', 'enable_dhcp': True, 'gateway_ip': u'2001:200::1', 'id': u'dc813d3d-ed66-4184-8570-7325c8195e28', 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', 'ip_version': 6, 'ipv6_address_mode': None, 'ipv6_ra_mode': u'slaac'}] port = {'port': { 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, 'mac_address': '12:34:56:78:44:ab', 'device_owner': 'compute'}} expected = [] for subnet in subnets: addr = str(ipv6_utils.get_ipv6_addr_by_EUI64( subnet['cidr'], port['port']['mac_address'])) expected.append({'ip_address': addr, 'subnet_id': subnet['id']}) self._test__allocate_ips_for_port(subnets, port, expected) def test__allocate_ips_for_port_2_slaac_pd_subnets(self): subnets = [ { 'cidr': constants.PROVISIONAL_IPV6_PD_PREFIX, 'enable_dhcp': True, 'gateway_ip': '::1', 'id': 'd1a28edd-bd83-480a-bd40-93d036c89f13', 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', 'ip_version': 6, 'ipv6_address_mode': None, 'ipv6_ra_mode': 'slaac'}, { 'cidr': constants.PROVISIONAL_IPV6_PD_PREFIX, 'enable_dhcp': True, 'gateway_ip': '::1', 'id': 'dc813d3d-ed66-4184-8570-7325c8195e28', 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', 'ip_version': 6, 'ipv6_address_mode': None, 'ipv6_ra_mode': 'slaac'}] port = {'port': { 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, 'mac_address': '12:34:56:78:44:ab', 'device_owner': 'compute'}} expected = [] for subnet in subnets: addr = str(ipv6_utils.get_ipv6_addr_by_EUI64( subnet['cidr'], port['port']['mac_address'])) expected.append({'ip_address': addr, 'subnet_id': subnet['id']}) self._test__allocate_ips_for_port(subnets, port, expected) neutron-8.4.0/neutron/tests/unit/db/test_api.py0000664000567000056710000000625613044372760022757 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_db import exception as db_exc from sqlalchemy.orm import exc import testtools from neutron.common import exceptions from neutron.db import api as db_api from neutron.tests import base class TestExceptionToRetryContextManager(base.BaseTestCase): def test_translates_single_exception(self): with testtools.ExpectedException(db_exc.RetryRequest): with db_api.exc_to_retry(ValueError): raise ValueError() def test_translates_multiple_exception_types(self): with testtools.ExpectedException(db_exc.RetryRequest): with db_api.exc_to_retry((ValueError, TypeError)): raise TypeError() def test_passes_other_exceptions(self): with testtools.ExpectedException(ValueError): with db_api.exc_to_retry(TypeError): raise ValueError() def test_inner_exception_preserved_in_retryrequest(self): try: exc = ValueError('test') with db_api.exc_to_retry(ValueError): raise exc except db_exc.RetryRequest as e: self.assertEqual(exc, e.inner_exc) def test_retries_on_multi_exception_containing_target(self): with testtools.ExpectedException(db_exc.RetryRequest): with db_api.exc_to_retry(ValueError): e = exceptions.MultipleExceptions([ValueError(), TypeError()]) raise e class TestDeadLockDecorator(base.BaseTestCase): @db_api.retry_db_errors def _decorated_function(self, fail_count, exc_to_raise): self.fail_count = getattr(self, 'fail_count', fail_count + 1) - 1 if self.fail_count: raise exc_to_raise def test_regular_exception_excluded(self): with testtools.ExpectedException(ValueError): self._decorated_function(1, ValueError) def test_staledata_error_caught(self): e = exc.StaleDataError() self.assertIsNone(self._decorated_function(1, e)) def test_multi_exception_contains_deadlock(self): e = exceptions.MultipleExceptions([ValueError(), db_exc.DBDeadlock()]) self.assertIsNone(self._decorated_function(1, e)) def test_multi_nested_exception_contains_deadlock(self): i = exceptions.MultipleExceptions([ValueError(), db_exc.DBDeadlock()]) e = exceptions.MultipleExceptions([ValueError(), i]) self.assertIsNone(self._decorated_function(1, e)) def test_multi_exception_raised_on_exceed(self): e = exceptions.MultipleExceptions([ValueError(), db_exc.DBDeadlock()]) with testtools.ExpectedException(exceptions.MultipleExceptions): self._decorated_function(db_api.MAX_RETRIES + 1, e) neutron-8.4.0/neutron/tests/unit/db/test_dvr_mac_db.py0000664000567000056710000002207313044372760024261 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation, all rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants from neutron import context from neutron.db import dvr_mac_db from neutron.extensions import dvr from neutron.extensions import portbindings from neutron import manager from neutron.tests.unit.plugins.ml2 import test_plugin class DVRDbMixinImpl(dvr_mac_db.DVRDbMixin): def __init__(self, notifier): self.notifier = notifier class DvrDbMixinTestCase(test_plugin.Ml2PluginV2TestCase): def setUp(self): super(DvrDbMixinTestCase, self).setUp() self.ctx = context.get_admin_context() self.mixin = DVRDbMixinImpl(mock.Mock()) def _create_dvr_mac_entry(self, host, mac_address): with self.ctx.session.begin(subtransactions=True): entry = dvr_mac_db.DistributedVirtualRouterMacAddress( host=host, mac_address=mac_address) self.ctx.session.add(entry) def test__get_dvr_mac_address_by_host(self): with self.ctx.session.begin(subtransactions=True): entry = dvr_mac_db.DistributedVirtualRouterMacAddress( host='foo_host', mac_address='foo_mac_address') self.ctx.session.add(entry) result = self.mixin._get_dvr_mac_address_by_host(self.ctx, 'foo_host') self.assertEqual(entry, result) def test__get_dvr_mac_address_by_host_not_found(self): self.assertRaises(dvr.DVRMacAddressNotFound, self.mixin._get_dvr_mac_address_by_host, self.ctx, 'foo_host') def test__create_dvr_mac_address_success(self): entry = {'host': 'foo_host', 'mac_address': '00:11:22:33:44:55:66'} with mock.patch.object(dvr_mac_db.utils, 'get_random_mac') as f: f.return_value = entry['mac_address'] expected = self.mixin._create_dvr_mac_address( self.ctx, entry['host']) self.assertEqual(expected, entry) def test__create_dvr_mac_address_retries_exceeded_retry_logic(self): new_retries = 8 cfg.CONF.set_override('mac_generation_retries', new_retries) self._create_dvr_mac_entry('foo_host_1', 'non_unique_mac') with mock.patch.object(dvr_mac_db.utils, 'get_random_mac') as f: f.return_value = 'non_unique_mac' self.assertRaises(dvr.MacAddressGenerationFailure, self.mixin._create_dvr_mac_address, self.ctx, "foo_host_2") self.assertEqual(new_retries, f.call_count) def test_mac_not_cleared_on_agent_delete_event_with_remaining_agents(self): plugin = manager.NeutronManager.get_plugin() self._create_dvr_mac_entry('host_1', 'mac_1') self._create_dvr_mac_entry('host_2', 'mac_2') agent1 = {'host': 'host_1', 'id': 'a1'} agent2 = {'host': 'host_1', 'id': 'a2'} with mock.patch.object(plugin, 'get_agents', return_value=[agent2]): with mock.patch.object(plugin, 'notifier') as notifier: registry.notify(resources.AGENT, events.BEFORE_DELETE, self, context=self.ctx, agent=agent1) mac_list = self.mixin.get_dvr_mac_address_list(self.ctx) self.assertEqual(2, len(mac_list)) self.assertFalse(notifier.dvr_mac_address_update.called) def test_mac_cleared_on_agent_delete_event(self): plugin = manager.NeutronManager.get_plugin() self._create_dvr_mac_entry('host_1', 'mac_1') self._create_dvr_mac_entry('host_2', 'mac_2') agent = {'host': 'host_1', 'id': 'a1'} with mock.patch.object(plugin, 'notifier') as notifier: registry.notify(resources.AGENT, events.BEFORE_DELETE, self, context=self.ctx, agent=agent) mac_list = self.mixin.get_dvr_mac_address_list(self.ctx) self.assertEqual(1, len(mac_list)) self.assertEqual('host_2', mac_list[0]['host']) notifier.dvr_mac_address_update.assert_called_once_with( self.ctx, mac_list) def test_get_dvr_mac_address_list(self): self._create_dvr_mac_entry('host_1', 'mac_1') self._create_dvr_mac_entry('host_2', 'mac_2') mac_list = self.mixin.get_dvr_mac_address_list(self.ctx) self.assertEqual(2, len(mac_list)) def test_get_dvr_mac_address_by_host_existing_host(self): self._create_dvr_mac_entry('foo_host', 'foo_mac') with mock.patch.object(self.mixin, '_get_dvr_mac_address_by_host') as f: self.mixin.get_dvr_mac_address_by_host(self.ctx, 'foo_host') self.assertEqual(1, f.call_count) def test_get_dvr_mac_address_by_host_missing_host(self): with mock.patch.object(self.mixin, '_create_dvr_mac_address') as f: self.mixin.get_dvr_mac_address_by_host(self.ctx, 'foo_host') self.assertEqual(1, f.call_count) def test_get_subnet_for_dvr_returns_correct_mac(self): with self.subnet() as subnet,\ self.port(subnet=subnet),\ self.port(subnet=subnet): dvr_subnet = self.mixin.get_subnet_for_dvr(self.ctx, subnet['subnet']['id']) # no gateway port should be found so no info should be returned self.assertEqual({}, dvr_subnet) with self.port( subnet=subnet, fixed_ips=[{'ip_address': subnet['subnet'][ 'gateway_ip']}]) as gw_port: dvr_subnet = self.mixin.get_subnet_for_dvr( self.ctx, subnet['subnet']['id']) self.assertEqual(gw_port['port']['mac_address'], dvr_subnet['gateway_mac']) def test_get_subnet_for_dvr_returns_correct_mac_fixed_ips_passed(self): with self.subnet() as subnet,\ self.port(subnet=subnet, fixed_ips=[{'ip_address': '10.0.0.2'}]),\ self.port(subnet=subnet, fixed_ips=[{'ip_address': '10.0.0.3'}]): fixed_ips = [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.4'}] dvr_subnet = self.mixin.get_subnet_for_dvr( self.ctx, subnet['subnet']['id'], fixed_ips) # no gateway port should be found so no info should be returned self.assertEqual({}, dvr_subnet) with self.port( subnet=subnet, fixed_ips=[{'ip_address': '10.0.0.4'}]) as gw_port: dvr_subnet = self.mixin.get_subnet_for_dvr( self.ctx, subnet['subnet']['id'], fixed_ips) self.assertEqual(gw_port['port']['mac_address'], dvr_subnet['gateway_mac']) def test_get_ports_on_host_by_subnet(self): HOST = 'host1' host_arg = {portbindings.HOST_ID: HOST} arg_list = (portbindings.HOST_ID,) with self.subnet() as subnet,\ self.port(subnet=subnet, device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX, arg_list=arg_list, **host_arg) as compute_port,\ self.port(subnet=subnet, device_owner=constants.DEVICE_OWNER_DHCP, arg_list=arg_list, **host_arg) as dhcp_port,\ self.port(subnet=subnet, device_owner=constants.DEVICE_OWNER_LOADBALANCER, arg_list=arg_list, **host_arg) as lb_port,\ self.port(device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX, arg_list=arg_list, **host_arg),\ self.port(subnet=subnet, device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX, arg_list=arg_list, **{portbindings.HOST_ID: 'other'}),\ self.port(subnet=subnet, device_owner=constants.DEVICE_OWNER_NETWORK_PREFIX, arg_list=arg_list, **host_arg): expected_ids = [port['port']['id'] for port in [compute_port, dhcp_port, lb_port]] dvr_ports = self.mixin.get_ports_on_host_by_subnet( self.ctx, HOST, subnet['subnet']['id']) self.assertEqual(len(expected_ids), len(dvr_ports)) self.assertEqual(expected_ids, [port['id'] for port in dvr_ports]) neutron-8.4.0/neutron/tests/unit/db/test_db_base_plugin_common.py0000664000567000056710000000617013044372736026511 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.db import db_base_plugin_common from neutron.tests import base class DummyObject(object): def __init__(self, **kwargs): self.kwargs = kwargs def to_dict(self): return self.kwargs class ConvertToDictTestCase(base.BaseTestCase): @db_base_plugin_common.convert_result_to_dict def method_dict(self, fields=None): return DummyObject(one=1, two=2, three=3) @db_base_plugin_common.convert_result_to_dict def method_list(self): return [DummyObject(one=1, two=2, three=3)] * 3 def test_simple_object(self): expected = {'one': 1, 'two': 2, 'three': 3} observed = self.method_dict() self.assertEqual(expected, observed) def test_list_of_objects(self): expected = [{'one': 1, 'two': 2, 'three': 3}] * 3 observed = self.method_list() self.assertEqual(expected, observed) class FilterFieldsTestCase(base.BaseTestCase): @db_base_plugin_common.filter_fields def method_dict(self, fields=None): return {'one': 1, 'two': 2, 'three': 3} @db_base_plugin_common.filter_fields def method_list(self, fields=None): return [self.method_dict() for _ in range(3)] @db_base_plugin_common.filter_fields def method_multiple_arguments(self, not_used, fields=None, also_not_used=None): return {'one': 1, 'two': 2, 'three': 3} def test_no_fields(self): expected = {'one': 1, 'two': 2, 'three': 3} observed = self.method_dict() self.assertEqual(expected, observed) def test_dict(self): expected = {'two': 2} observed = self.method_dict(['two']) self.assertEqual(expected, observed) def test_list(self): expected = [{'two': 2}, {'two': 2}, {'two': 2}] observed = self.method_list(['two']) self.assertEqual(expected, observed) def test_multiple_arguments_positional(self): expected = {'two': 2} observed = self.method_multiple_arguments(list(), ['two']) self.assertEqual(expected, observed) def test_multiple_arguments_positional_and_keywords(self): expected = {'two': 2} observed = self.method_multiple_arguments(fields=['two'], not_used=None) self.assertEqual(expected, observed) def test_multiple_arguments_keyword(self): expected = {'two': 2} observed = self.method_multiple_arguments(list(), fields=['two']) self.assertEqual(expected, observed) neutron-8.4.0/neutron/tests/unit/db/metering/0000775000567000056710000000000013044373210022365 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/db/metering/__init__.py0000664000567000056710000000000013044372736024500 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/db/metering/test_metering_db.py0000664000567000056710000003165313044372760026276 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import webob.exc from neutron.api import extensions from neutron.common import config from neutron.common import constants as n_consts from neutron import context import neutron.extensions from neutron.extensions import metering from neutron.plugins.common import constants from neutron.services.metering import metering_plugin from neutron.tests.unit.db import test_db_base_plugin_v2 DB_METERING_PLUGIN_KLASS = ( "neutron.services.metering." "metering_plugin.MeteringPlugin" ) extensions_path = ':'.join(neutron.extensions.__path__) class MeteringPluginDbTestCaseMixin(object): def _create_metering_label(self, fmt, name, description, **kwargs): data = {'metering_label': {'name': name, 'tenant_id': kwargs.get('tenant_id', 'test-tenant'), 'shared': kwargs.get('shared', False), 'description': description}} req = self.new_create_request('metering-labels', data, fmt) if kwargs.get('set_context') and 'tenant_id' in kwargs: # create a specific auth context for this request req.environ['neutron.context'] = ( context.Context('', kwargs['tenant_id'], is_admin=kwargs.get('is_admin', True))) return req.get_response(self.ext_api) def _make_metering_label(self, fmt, name, description, **kwargs): res = self._create_metering_label(fmt, name, description, **kwargs) if res.status_int >= 400: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) def _create_metering_label_rule(self, fmt, metering_label_id, direction, remote_ip_prefix, excluded, **kwargs): data = {'metering_label_rule': {'metering_label_id': metering_label_id, 'tenant_id': kwargs.get('tenant_id', 'test-tenant'), 'direction': direction, 'excluded': excluded, 'remote_ip_prefix': remote_ip_prefix}} req = self.new_create_request('metering-label-rules', data, fmt) if kwargs.get('set_context') and 'tenant_id' in kwargs: # create a specific auth context for this request req.environ['neutron.context'] = ( context.Context('', kwargs['tenant_id'])) return req.get_response(self.ext_api) def _make_metering_label_rule(self, fmt, metering_label_id, direction, remote_ip_prefix, excluded, **kwargs): res = self._create_metering_label_rule(fmt, metering_label_id, direction, remote_ip_prefix, excluded, **kwargs) if res.status_int >= 400: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) @contextlib.contextmanager def metering_label(self, name='label', description='desc', fmt=None, **kwargs): if not fmt: fmt = self.fmt metering_label = self._make_metering_label(fmt, name, description, **kwargs) yield metering_label @contextlib.contextmanager def metering_label_rule(self, metering_label_id=None, direction='ingress', remote_ip_prefix='10.0.0.0/24', excluded='false', fmt=None): if not fmt: fmt = self.fmt metering_label_rule = self._make_metering_label_rule(fmt, metering_label_id, direction, remote_ip_prefix, excluded) yield metering_label_rule class MeteringPluginDbTestCase( test_db_base_plugin_v2.NeutronDbPluginV2TestCase, MeteringPluginDbTestCaseMixin): fmt = 'json' resource_prefix_map = dict( (k.replace('_', '-'), "/metering") for k in metering.RESOURCE_ATTRIBUTE_MAP.keys() ) def setUp(self, plugin=None): service_plugins = {'metering_plugin_name': DB_METERING_PLUGIN_KLASS} super(MeteringPluginDbTestCase, self).setUp( plugin=plugin, service_plugins=service_plugins ) self.plugin = metering_plugin.MeteringPlugin() ext_mgr = extensions.PluginAwareExtensionManager( extensions_path, {constants.METERING: self.plugin} ) app = config.load_paste_app('extensions_test_app') self.ext_api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr) class TestMetering(MeteringPluginDbTestCase): def test_create_metering_label(self): name = 'my label' description = 'my metering label' keys = [('name', name,), ('description', description)] with self.metering_label(name, description) as metering_label: for k, v, in keys: self.assertEqual(metering_label['metering_label'][k], v) def test_create_metering_label_shared(self): name = 'my label' description = 'my metering label' shared = True keys = [('name', name,), ('description', description), ('shared', shared)] with self.metering_label(name, description, shared=shared) as metering_label: for k, v, in keys: self.assertEqual(metering_label['metering_label'][k], v) def test_delete_metering_label(self): name = 'my label' description = 'my metering label' with self.metering_label(name, description) as metering_label: metering_label_id = metering_label['metering_label']['id'] self._delete('metering-labels', metering_label_id, 204) def test_list_metering_label(self): name = 'my label' description = 'my metering label' with self.metering_label(name, description) as v1,\ self.metering_label(name, description) as v2: metering_label = (v1, v2) self._test_list_resources('metering-label', metering_label) def test_create_metering_label_rule(self): name = 'my label' description = 'my metering label' with self.metering_label(name, description) as metering_label: metering_label_id = metering_label['metering_label']['id'] direction = 'egress' remote_ip_prefix = '192.168.0.0/24' excluded = True keys = [('metering_label_id', metering_label_id), ('direction', direction), ('excluded', excluded), ('remote_ip_prefix', remote_ip_prefix)] with self.metering_label_rule(metering_label_id, direction, remote_ip_prefix, excluded) as label_rule: for k, v, in keys: self.assertEqual(label_rule['metering_label_rule'][k], v) def test_delete_metering_label_rule(self): name = 'my label' description = 'my metering label' with self.metering_label(name, description) as metering_label: metering_label_id = metering_label['metering_label']['id'] direction = 'egress' remote_ip_prefix = '192.168.0.0/24' excluded = True with self.metering_label_rule(metering_label_id, direction, remote_ip_prefix, excluded) as label_rule: rule_id = label_rule['metering_label_rule']['id'] self._delete('metering-label-rules', rule_id, 204) def test_list_metering_label_rule(self): name = 'my label' description = 'my metering label' with self.metering_label(name, description) as metering_label: metering_label_id = metering_label['metering_label']['id'] direction = 'egress' remote_ip_prefix = '192.168.0.0/24' excluded = True with self.metering_label_rule(metering_label_id, direction, remote_ip_prefix, excluded) as v1,\ self.metering_label_rule(metering_label_id, 'ingress', remote_ip_prefix, excluded) as v2: metering_label_rule = (v1, v2) self._test_list_resources('metering-label-rule', metering_label_rule) def test_create_metering_label_rules(self): name = 'my label' description = 'my metering label' with self.metering_label(name, description) as metering_label: metering_label_id = metering_label['metering_label']['id'] direction = 'egress' remote_ip_prefix = '192.168.0.0/24' excluded = True with self.metering_label_rule(metering_label_id, direction, remote_ip_prefix, excluded) as v1,\ self.metering_label_rule(metering_label_id, direction, n_consts.IPv4_ANY, False) as v2: metering_label_rule = (v1, v2) self._test_list_resources('metering-label-rule', metering_label_rule) def test_create_overlap_metering_label_rules(self): name = 'my label' description = 'my metering label' with self.metering_label(name, description) as metering_label: metering_label_id = metering_label['metering_label']['id'] direction = 'egress' remote_ip_prefix1 = '192.168.0.0/24' remote_ip_prefix2 = '192.168.0.0/16' excluded = True with self.metering_label_rule(metering_label_id, direction, remote_ip_prefix1, excluded): res = self._create_metering_label_rule(self.fmt, metering_label_id, direction, remote_ip_prefix2, excluded) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_metering_label_rule_two_labels(self): name1 = 'my label 1' name2 = 'my label 2' description = 'my metering label' with self.metering_label(name1, description) as metering_label1: metering_label_id1 = metering_label1['metering_label']['id'] with self.metering_label(name2, description) as metering_label2: metering_label_id2 = metering_label2['metering_label']['id'] direction = 'egress' remote_ip_prefix = '192.168.0.0/24' excluded = True with self.metering_label_rule(metering_label_id1, direction, remote_ip_prefix, excluded) as v1,\ self.metering_label_rule(metering_label_id2, direction, remote_ip_prefix, excluded) as v2: metering_label_rule = (v1, v2) self._test_list_resources('metering-label-rule', metering_label_rule) neutron-8.4.0/neutron/tests/unit/db/test_db_base_plugin_v2.py0000664000567000056710000112360313044372760025547 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import copy import itertools import mock import netaddr from oslo_config import cfg from oslo_utils import importutils import six from sqlalchemy import event from sqlalchemy import orm import testtools from testtools import matchers import webob.exc import neutron from neutron.api import api_common from neutron.api import extensions from neutron.api.v2 import attributes from neutron.api.v2 import router from neutron.callbacks import exceptions from neutron.callbacks import registry from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import ipv6_utils from neutron.common import test_lib from neutron.common import utils from neutron import context from neutron.db import api as db_api from neutron.db import db_base_plugin_common from neutron.db import ipam_non_pluggable_backend as non_ipam from neutron.db import l3_db from neutron.db import models_v2 from neutron.db import securitygroups_db as sgdb from neutron import manager from neutron.tests import base from neutron.tests import tools from neutron.tests.unit.api import test_extensions from neutron.tests.unit import testlib_api DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' DEVICE_OWNER_NOT_COMPUTE = constants.DEVICE_OWNER_DHCP def optional_ctx(obj, fallback, **kwargs): if not obj: return fallback(**kwargs) @contextlib.contextmanager def context_wrapper(): yield obj return context_wrapper() def _fake_get_pagination_helper(self, request): return api_common.PaginationEmulatedHelper(request, self._primary_key) def _fake_get_sorting_helper(self, request): return api_common.SortingEmulatedHelper(request, self._attr_info) # TODO(banix): Move the following method to ML2 db test module when ML2 # mechanism driver unit tests are corrected to use Ml2PluginV2TestCase # instead of directly using NeutronDbPluginV2TestCase def _get_create_db_method(resource): ml2_method = '_create_%s_db' % resource if hasattr(manager.NeutronManager.get_plugin(), ml2_method): return ml2_method else: return 'create_%s' % resource class NeutronDbPluginV2TestCase(testlib_api.WebTestCase): fmt = 'json' resource_prefix_map = {} def setUp(self, plugin=None, service_plugins=None, ext_mgr=None): super(NeutronDbPluginV2TestCase, self).setUp() cfg.CONF.set_override('notify_nova_on_port_status_changes', False) cfg.CONF.set_override('allow_overlapping_ips', True) # Make sure at each test according extensions for the plugin is loaded extensions.PluginAwareExtensionManager._instance = None # Save the attributes map in case the plugin will alter it # loading extensions self.useFixture(tools.AttributeMapMemento()) self._tenant_id = 'test-tenant' if not plugin: plugin = DB_PLUGIN_KLASS # Update the plugin self.setup_coreplugin(plugin) cfg.CONF.set_override( 'service_plugins', [test_lib.test_config.get(key, default) for key, default in six.iteritems(service_plugins or {})] ) cfg.CONF.set_override('base_mac', "12:34:56:78:90:ab") cfg.CONF.set_override('max_dns_nameservers', 2) cfg.CONF.set_override('max_subnet_host_routes', 2) cfg.CONF.set_override('allow_pagination', True) cfg.CONF.set_override('allow_sorting', True) self.api = router.APIRouter() # Set the default status self.net_create_status = 'ACTIVE' self.port_create_status = 'ACTIVE' def _is_native_bulk_supported(): plugin_obj = manager.NeutronManager.get_plugin() native_bulk_attr_name = ("_%s__native_bulk_support" % plugin_obj.__class__.__name__) return getattr(plugin_obj, native_bulk_attr_name, False) self._skip_native_bulk = not _is_native_bulk_supported() def _is_native_pagination_support(): native_pagination_attr_name = ( "_%s__native_pagination_support" % manager.NeutronManager.get_plugin().__class__.__name__) return (cfg.CONF.allow_pagination and getattr(manager.NeutronManager.get_plugin(), native_pagination_attr_name, False)) self._skip_native_pagination = not _is_native_pagination_support() def _is_native_sorting_support(): native_sorting_attr_name = ( "_%s__native_sorting_support" % manager.NeutronManager.get_plugin().__class__.__name__) return (cfg.CONF.allow_sorting and getattr(manager.NeutronManager.get_plugin(), native_sorting_attr_name, False)) self.plugin = manager.NeutronManager.get_plugin() self._skip_native_sorting = not _is_native_sorting_support() if ext_mgr: self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) def tearDown(self): self.api = None self._deserializers = None self._skip_native_bulk = None self._skip_native_pagination = None self._skip_native_sortin = None self.ext_api = None super(NeutronDbPluginV2TestCase, self).tearDown() def setup_config(self): # Create the default configurations args = ['--config-file', base.etcdir('neutron.conf')] # If test_config specifies some config-file, use it, as well for config_file in test_lib.test_config.get('config_files', []): args.extend(['--config-file', config_file]) super(NeutronDbPluginV2TestCase, self).setup_config(args=args) def _req(self, method, resource, data=None, fmt=None, id=None, params=None, action=None, subresource=None, sub_id=None, context=None): fmt = fmt or self.fmt path = '/%s.%s' % ( '/'.join(p for p in (resource, id, subresource, sub_id, action) if p), fmt ) prefix = self.resource_prefix_map.get(resource) if prefix: path = prefix + path content_type = 'application/%s' % fmt body = None if data is not None: # empty dict is valid body = self.serialize(data) return testlib_api.create_request(path, body, content_type, method, query_string=params, context=context) def new_create_request(self, resource, data, fmt=None, id=None, subresource=None, context=None): return self._req('POST', resource, data, fmt, id=id, subresource=subresource, context=context) def new_list_request(self, resource, fmt=None, params=None, subresource=None): return self._req( 'GET', resource, None, fmt, params=params, subresource=subresource ) def new_show_request(self, resource, id, fmt=None, subresource=None, fields=None): if fields: params = "&".join(["fields=%s" % x for x in fields]) else: params = None return self._req('GET', resource, None, fmt, id=id, params=params, subresource=subresource) def new_delete_request(self, resource, id, fmt=None, subresource=None, sub_id=None, data=None): return self._req( 'DELETE', resource, data, fmt, id=id, subresource=subresource, sub_id=sub_id ) def new_update_request(self, resource, data, id, fmt=None, subresource=None, context=None): return self._req( 'PUT', resource, data, fmt, id=id, subresource=subresource, context=context ) def new_action_request(self, resource, data, id, action, fmt=None, subresource=None): return self._req( 'PUT', resource, data, fmt, id=id, action=action, subresource=subresource ) def deserialize(self, content_type, response): ctype = 'application/%s' % content_type data = self._deserializers[ctype].deserialize(response.body)['body'] return data def _create_bulk_from_list(self, fmt, resource, objects, **kwargs): """Creates a bulk request from a list of objects.""" collection = "%ss" % resource req_data = {collection: objects} req = self.new_create_request(collection, req_data, fmt) if ('set_context' in kwargs and kwargs['set_context'] is True and 'tenant_id' in kwargs): # create a specific auth context for this request req.environ['neutron.context'] = context.Context( '', kwargs['tenant_id']) elif 'context' in kwargs: req.environ['neutron.context'] = kwargs['context'] return req.get_response(self.api) def _create_bulk(self, fmt, number, resource, data, name='test', **kwargs): """Creates a bulk request for any kind of resource.""" objects = [] collection = "%ss" % resource for i in range(number): obj = copy.deepcopy(data) obj[resource]['name'] = "%s_%s" % (name, i) if 'override' in kwargs and i in kwargs['override']: obj[resource].update(kwargs['override'][i]) objects.append(obj) req_data = {collection: objects} req = self.new_create_request(collection, req_data, fmt) if ('set_context' in kwargs and kwargs['set_context'] is True and 'tenant_id' in kwargs): # create a specific auth context for this request req.environ['neutron.context'] = context.Context( '', kwargs['tenant_id']) elif 'context' in kwargs: req.environ['neutron.context'] = kwargs['context'] return req.get_response(self.api) def _create_network(self, fmt, name, admin_state_up, arg_list=None, set_context=False, tenant_id=None, **kwargs): tenant_id = tenant_id or self._tenant_id data = {'network': {'name': name, 'admin_state_up': admin_state_up, 'tenant_id': tenant_id}} for arg in (('admin_state_up', 'tenant_id', 'shared', 'vlan_transparent', 'availability_zone_hints') + (arg_list or ())): # Arg must be present if arg in kwargs: data['network'][arg] = kwargs[arg] network_req = self.new_create_request('networks', data, fmt) if set_context and tenant_id: # create a specific auth context for this request network_req.environ['neutron.context'] = context.Context( '', tenant_id) return network_req.get_response(self.api) def _create_network_bulk(self, fmt, number, name, admin_state_up, **kwargs): base_data = {'network': {'admin_state_up': admin_state_up, 'tenant_id': self._tenant_id}} return self._create_bulk(fmt, number, 'network', base_data, **kwargs) def _create_subnet(self, fmt, net_id, cidr, expected_res_status=None, **kwargs): data = {'subnet': {'network_id': net_id, 'ip_version': 4, 'tenant_id': self._tenant_id}} if cidr: data['subnet']['cidr'] = cidr for arg in ('ip_version', 'tenant_id', 'subnetpool_id', 'prefixlen', 'enable_dhcp', 'allocation_pools', 'dns_nameservers', 'host_routes', 'shared', 'ipv6_ra_mode', 'ipv6_address_mode'): # Arg must be present and not null (but can be false) if kwargs.get(arg) is not None: data['subnet'][arg] = kwargs[arg] if ('gateway_ip' in kwargs and kwargs['gateway_ip'] is not attributes.ATTR_NOT_SPECIFIED): data['subnet']['gateway_ip'] = kwargs['gateway_ip'] subnet_req = self.new_create_request('subnets', data, fmt) if (kwargs.get('set_context') and 'tenant_id' in kwargs): # create a specific auth context for this request subnet_req.environ['neutron.context'] = context.Context( '', kwargs['tenant_id']) subnet_res = subnet_req.get_response(self.api) if expected_res_status: self.assertEqual(expected_res_status, subnet_res.status_int) return subnet_res def _create_subnet_bulk(self, fmt, number, net_id, name, ip_version=4, **kwargs): base_data = {'subnet': {'network_id': net_id, 'ip_version': ip_version, 'tenant_id': self._tenant_id}} # auto-generate cidrs as they should not overlap overrides = dict((k, v) for (k, v) in zip(range(number), [{'cidr': "10.0.%s.0/24" % num} for num in range(number)])) kwargs.update({'override': overrides}) return self._create_bulk(fmt, number, 'subnet', base_data, **kwargs) def _create_subnetpool(self, fmt, prefixes, expected_res_status=None, admin=False, **kwargs): subnetpool = {'subnetpool': {'prefixes': prefixes}} for k, v in kwargs.items(): subnetpool['subnetpool'][k] = str(v) api = self._api_for_resource('subnetpools') subnetpools_req = self.new_create_request('subnetpools', subnetpool, fmt) if not admin: neutron_context = context.Context('', kwargs['tenant_id']) subnetpools_req.environ['neutron.context'] = neutron_context subnetpool_res = subnetpools_req.get_response(api) if expected_res_status: self.assertEqual(expected_res_status, subnetpool_res.status_int) return subnetpool_res def _create_port(self, fmt, net_id, expected_res_status=None, arg_list=None, set_context=False, tenant_id=None, **kwargs): tenant_id = tenant_id or self._tenant_id data = {'port': {'network_id': net_id, 'tenant_id': tenant_id}} for arg in (('admin_state_up', 'device_id', 'mac_address', 'name', 'fixed_ips', 'tenant_id', 'device_owner', 'security_groups') + (arg_list or ())): # Arg must be present if arg in kwargs: data['port'][arg] = kwargs[arg] # create a dhcp port device id if one hasn't been supplied if ('device_owner' in kwargs and kwargs['device_owner'] == constants.DEVICE_OWNER_DHCP and 'host' in kwargs and 'device_id' not in kwargs): device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host']) data['port']['device_id'] = device_id port_req = self.new_create_request('ports', data, fmt) if set_context and tenant_id: # create a specific auth context for this request port_req.environ['neutron.context'] = context.Context( '', tenant_id) port_res = port_req.get_response(self.api) if expected_res_status: self.assertEqual(expected_res_status, port_res.status_int) return port_res def _list_ports(self, fmt, expected_res_status=None, net_id=None, **kwargs): query_params = [] if net_id: query_params.append("network_id=%s" % net_id) if kwargs.get('device_owner'): query_params.append("device_owner=%s" % kwargs.get('device_owner')) port_req = self.new_list_request('ports', fmt, '&'.join(query_params)) if ('set_context' in kwargs and kwargs['set_context'] is True and 'tenant_id' in kwargs): # create a specific auth context for this request port_req.environ['neutron.context'] = context.Context( '', kwargs['tenant_id']) port_res = port_req.get_response(self.api) if expected_res_status: self.assertEqual(expected_res_status, port_res.status_int) return port_res def _create_port_bulk(self, fmt, number, net_id, name, admin_state_up, **kwargs): base_data = {'port': {'network_id': net_id, 'admin_state_up': admin_state_up, 'tenant_id': self._tenant_id}} return self._create_bulk(fmt, number, 'port', base_data, **kwargs) def _make_network(self, fmt, name, admin_state_up, **kwargs): res = self._create_network(fmt, name, admin_state_up, **kwargs) # TODO(salvatore-orlando): do exception handling in this test module # in a uniform way (we do it differently for ports, subnets, and nets # Things can go wrong - raise HTTP exc with res code only # so it can be caught by unit tests if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) def _make_subnet(self, fmt, network, gateway, cidr, subnetpool_id=None, allocation_pools=None, ip_version=4, enable_dhcp=True, dns_nameservers=None, host_routes=None, shared=None, ipv6_ra_mode=None, ipv6_address_mode=None, tenant_id=None, set_context=False): res = self._create_subnet(fmt, net_id=network['network']['id'], cidr=cidr, subnetpool_id=subnetpool_id, gateway_ip=gateway, tenant_id=(tenant_id or network['network']['tenant_id']), allocation_pools=allocation_pools, ip_version=ip_version, enable_dhcp=enable_dhcp, dns_nameservers=dns_nameservers, host_routes=host_routes, shared=shared, ipv6_ra_mode=ipv6_ra_mode, ipv6_address_mode=ipv6_address_mode, set_context=set_context) # Things can go wrong - raise HTTP exc with res code only # so it can be caught by unit tests if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) def _make_subnetpool(self, fmt, prefixes, admin=False, **kwargs): res = self._create_subnetpool(fmt, prefixes, None, admin, **kwargs) # Things can go wrong - raise HTTP exc with res code only # so it can be caught by unit tests if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) def _make_port(self, fmt, net_id, expected_res_status=None, **kwargs): res = self._create_port(fmt, net_id, expected_res_status, **kwargs) # Things can go wrong - raise HTTP exc with res code only # so it can be caught by unit tests if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) def _api_for_resource(self, resource): if resource in ['networks', 'subnets', 'ports', 'subnetpools']: return self.api else: return self.ext_api def _delete(self, collection, id, expected_code=webob.exc.HTTPNoContent.code, neutron_context=None): req = self.new_delete_request(collection, id) if neutron_context: # create a specific auth context for this request req.environ['neutron.context'] = neutron_context res = req.get_response(self._api_for_resource(collection)) self.assertEqual(expected_code, res.status_int) def _show_response(self, resource, id, neutron_context=None): req = self.new_show_request(resource, id) if neutron_context: # create a specific auth context for this request req.environ['neutron.context'] = neutron_context return req.get_response(self._api_for_resource(resource)) def _show(self, resource, id, expected_code=webob.exc.HTTPOk.code, neutron_context=None): res = self._show_response(resource, id, neutron_context=neutron_context) self.assertEqual(expected_code, res.status_int) return self.deserialize(self.fmt, res) def _update(self, resource, id, new_data, expected_code=webob.exc.HTTPOk.code, neutron_context=None): req = self.new_update_request(resource, new_data, id) if neutron_context: # create a specific auth context for this request req.environ['neutron.context'] = neutron_context res = req.get_response(self._api_for_resource(resource)) self.assertEqual(expected_code, res.status_int) return self.deserialize(self.fmt, res) def _list(self, resource, fmt=None, neutron_context=None, query_params=None): fmt = fmt or self.fmt req = self.new_list_request(resource, fmt, query_params) if neutron_context: req.environ['neutron.context'] = neutron_context res = req.get_response(self._api_for_resource(resource)) self.assertEqual(webob.exc.HTTPOk.code, res.status_int) return self.deserialize(fmt, res) def _fail_second_call(self, patched_plugin, orig, *args, **kwargs): """Invoked by test cases for injecting failures in plugin.""" def second_call(*args, **kwargs): raise n_exc.NeutronException() patched_plugin.side_effect = second_call return orig(*args, **kwargs) def _validate_behavior_on_bulk_failure( self, res, collection, errcode=webob.exc.HTTPClientError.code): self.assertEqual(errcode, res.status_int) req = self.new_list_request(collection) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPOk.code, res.status_int) items = self.deserialize(self.fmt, res) self.assertEqual(0, len(items[collection])) def _validate_behavior_on_bulk_success(self, res, collection, names=['test_0', 'test_1']): self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) items = self.deserialize(self.fmt, res)[collection] self.assertEqual(len(items), 2) self.assertEqual(items[0]['name'], 'test_0') self.assertEqual(items[1]['name'], 'test_1') def _test_list_resources(self, resource, items, neutron_context=None, query_params=None): res = self._list('%ss' % resource, neutron_context=neutron_context, query_params=query_params) resource = resource.replace('-', '_') self.assertItemsEqual([i['id'] for i in res['%ss' % resource]], [i[resource]['id'] for i in items]) @contextlib.contextmanager def network(self, name='net1', admin_state_up=True, fmt=None, **kwargs): network = self._make_network(fmt or self.fmt, name, admin_state_up, **kwargs) yield network @contextlib.contextmanager def subnet(self, network=None, gateway_ip=attributes.ATTR_NOT_SPECIFIED, cidr='10.0.0.0/24', subnetpool_id=None, fmt=None, ip_version=4, allocation_pools=None, enable_dhcp=True, dns_nameservers=None, host_routes=None, shared=None, ipv6_ra_mode=None, ipv6_address_mode=None, tenant_id=None, set_context=False): with optional_ctx(network, self.network, set_context=set_context, tenant_id=tenant_id) as network_to_use: subnet = self._make_subnet(fmt or self.fmt, network_to_use, gateway_ip, cidr, subnetpool_id, allocation_pools, ip_version, enable_dhcp, dns_nameservers, host_routes, shared=shared, ipv6_ra_mode=ipv6_ra_mode, ipv6_address_mode=ipv6_address_mode, tenant_id=tenant_id, set_context=set_context) yield subnet @contextlib.contextmanager def subnetpool(self, prefixes, admin=False, **kwargs): subnetpool = self._make_subnetpool(self.fmt, prefixes, admin, **kwargs) yield subnetpool @contextlib.contextmanager def port(self, subnet=None, fmt=None, set_context=False, tenant_id=None, **kwargs): with optional_ctx( subnet, self.subnet, set_context=set_context, tenant_id=tenant_id) as subnet_to_use: net_id = subnet_to_use['subnet']['network_id'] port = self._make_port( fmt or self.fmt, net_id, set_context=set_context, tenant_id=tenant_id, **kwargs) yield port def _test_list_with_sort(self, resource, items, sorts, resources=None, query_params=''): query_str = query_params for key, direction in sorts: query_str = query_str + "&sort_key=%s&sort_dir=%s" % (key, direction) if not resources: resources = '%ss' % resource req = self.new_list_request(resources, params=query_str) api = self._api_for_resource(resources) res = self.deserialize(self.fmt, req.get_response(api)) resource = resource.replace('-', '_') resources = resources.replace('-', '_') expected_res = [item[resource]['id'] for item in items] self.assertEqual(expected_res, [n['id'] for n in res[resources]]) def _test_list_with_pagination(self, resource, items, sort, limit, expected_page_num, resources=None, query_params='', verify_key='id'): if not resources: resources = '%ss' % resource query_str = query_params + '&' if query_params else '' query_str = query_str + ("limit=%s&sort_key=%s&" "sort_dir=%s") % (limit, sort[0], sort[1]) req = self.new_list_request(resources, params=query_str) items_res = [] page_num = 0 api = self._api_for_resource(resources) resource = resource.replace('-', '_') resources = resources.replace('-', '_') while req: page_num = page_num + 1 res = self.deserialize(self.fmt, req.get_response(api)) self.assertThat(len(res[resources]), matchers.LessThan(limit + 1)) items_res = items_res + res[resources] req = None if '%s_links' % resources in res: for link in res['%s_links' % resources]: if link['rel'] == 'next': content_type = 'application/%s' % self.fmt req = testlib_api.create_request(link['href'], '', content_type) self.assertEqual(len(res[resources]), limit) self.assertEqual(expected_page_num, page_num) self.assertEqual([item[resource][verify_key] for item in items], [n[verify_key] for n in items_res]) def _test_list_with_pagination_reverse(self, resource, items, sort, limit, expected_page_num, resources=None, query_params=''): if not resources: resources = '%ss' % resource resource = resource.replace('-', '_') api = self._api_for_resource(resources) marker = items[-1][resource]['id'] query_str = query_params + '&' if query_params else '' query_str = query_str + ("limit=%s&page_reverse=True&" "sort_key=%s&sort_dir=%s&" "marker=%s") % (limit, sort[0], sort[1], marker) req = self.new_list_request(resources, params=query_str) item_res = [items[-1][resource]] page_num = 0 resources = resources.replace('-', '_') while req: page_num = page_num + 1 res = self.deserialize(self.fmt, req.get_response(api)) self.assertThat(len(res[resources]), matchers.LessThan(limit + 1)) res[resources].reverse() item_res = item_res + res[resources] req = None if '%s_links' % resources in res: for link in res['%s_links' % resources]: if link['rel'] == 'previous': content_type = 'application/%s' % self.fmt req = testlib_api.create_request(link['href'], '', content_type) self.assertEqual(len(res[resources]), limit) self.assertEqual(expected_page_num, page_num) expected_res = [item[resource]['id'] for item in items] expected_res.reverse() self.assertEqual(expected_res, [n['id'] for n in item_res]) def _compare_resource(self, observed_res, expected_res, res_name): ''' Compare the observed and expected resources (ie compare subnets) ''' for k in expected_res: self.assertIn(k, observed_res[res_name]) if isinstance(expected_res[k], list): self.assertEqual(sorted(observed_res[res_name][k]), sorted(expected_res[k])) else: self.assertEqual(observed_res[res_name][k], expected_res[k]) def _validate_resource(self, resource, keys, res_name): for k in keys: self.assertIn(k, resource[res_name]) if isinstance(keys[k], list): self.assertEqual( sorted(resource[res_name][k], key=utils.safe_sort_key), sorted(keys[k], key=utils.safe_sort_key)) else: self.assertEqual(resource[res_name][k], keys[k]) class TestBasicGet(NeutronDbPluginV2TestCase): def test_single_get_admin(self): plugin = neutron.db.db_base_plugin_v2.NeutronDbPluginV2() with self.network() as network: net_id = network['network']['id'] ctx = context.get_admin_context() n = plugin._get_network(ctx, net_id) self.assertEqual(net_id, n.id) def test_single_get_tenant(self): plugin = neutron.db.db_base_plugin_v2.NeutronDbPluginV2() with self.network() as network: net_id = network['network']['id'] ctx = context.get_admin_context() n = plugin._get_network(ctx, net_id) self.assertEqual(net_id, n.id) class TestV2HTTPResponse(NeutronDbPluginV2TestCase): def test_create_returns_201(self): res = self._create_network(self.fmt, 'net2', True) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) def test_list_returns_200(self): req = self.new_list_request('networks') res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPOk.code, res.status_int) def _check_list_with_fields(self, res, field_name): self.assertEqual(webob.exc.HTTPOk.code, res.status_int) body = self.deserialize(self.fmt, res) # further checks: 1 networks self.assertEqual(1, len(body['networks'])) # 1 field in the network record self.assertEqual(1, len(body['networks'][0])) # field is 'name' self.assertIn(field_name, body['networks'][0]) def test_list_with_fields(self): self._create_network(self.fmt, 'some_net', True) req = self.new_list_request('networks', params="fields=name") res = req.get_response(self.api) self._check_list_with_fields(res, 'name') def test_list_with_fields_noadmin(self): tenant_id = 'some_tenant' self._create_network(self.fmt, 'some_net', True, tenant_id=tenant_id, set_context=True) req = self.new_list_request('networks', params="fields=name") req.environ['neutron.context'] = context.Context('', tenant_id) res = req.get_response(self.api) self._check_list_with_fields(res, 'name') def test_list_with_fields_noadmin_and_policy_field(self): """If a field used by policy is selected, do not duplicate it. Verifies that if the field parameter explicitly specifies a field which is used by the policy engine, then it is not duplicated in the response. """ tenant_id = 'some_tenant' self._create_network(self.fmt, 'some_net', True, tenant_id=tenant_id, set_context=True) req = self.new_list_request('networks', params="fields=tenant_id") req.environ['neutron.context'] = context.Context('', tenant_id) res = req.get_response(self.api) self._check_list_with_fields(res, 'tenant_id') def test_show_returns_200(self): with self.network() as net: req = self.new_show_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPOk.code, res.status_int) def test_delete_returns_204(self): res = self._create_network(self.fmt, 'net1', True) net = self.deserialize(self.fmt, res) req = self.new_delete_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_delete_with_req_body_returns_400(self): res = self._create_network(self.fmt, 'net1', True) net = self.deserialize(self.fmt, res) data = {"network": {"id": net['network']['id']}} req = self.new_delete_request('networks', net['network']['id'], data=data) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_update_returns_200(self): with self.network() as net: req = self.new_update_request('networks', {'network': {'name': 'steve'}}, net['network']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPOk.code, res.status_int) def test_update_invalid_json_400(self): with self.network() as net: req = self.new_update_request('networks', '{{"name": "aaa"}}', net['network']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_bad_route_404(self): req = self.new_list_request('doohickeys') res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) class TestPortsV2(NeutronDbPluginV2TestCase): def test_create_port_json(self): keys = [('admin_state_up', True), ('status', self.port_create_status)] with self.port(name='myname') as port: for k, v in keys: self.assertEqual(port['port'][k], v) self.assertIn('mac_address', port['port']) ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.2', ips[0]['ip_address']) self.assertEqual('myname', port['port']['name']) def test_create_port_as_admin(self): with self.network() as network: self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id='bad_tenant_id', device_id='fake_device', device_owner='fake_owner', fixed_ips=[], set_context=False) def test_create_port_bad_tenant(self): with self.network() as network: self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPNotFound.code, tenant_id='bad_tenant_id', device_id='fake_device', device_owner='fake_owner', fixed_ips=[], set_context=True) def test_create_port_public_network(self): keys = [('admin_state_up', True), ('status', self.port_create_status)] with self.network(shared=True) as network: port_res = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id='another_tenant', set_context=True) port = self.deserialize(self.fmt, port_res) for k, v in keys: self.assertEqual(port['port'][k], v) self.assertIn('mac_address', port['port']) self._delete('ports', port['port']['id']) def test_create_port_public_network_with_ip(self): with self.network(shared=True) as network: with self.subnet(network=network, cidr='10.0.0.0/24') as subnet: keys = [('admin_state_up', True), ('status', self.port_create_status), ('fixed_ips', [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.2'}])] port_res = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id='another_tenant', set_context=True) port = self.deserialize(self.fmt, port_res) for k, v in keys: self.assertEqual(port['port'][k], v) self.assertIn('mac_address', port['port']) self._delete('ports', port['port']['id']) def test_create_port_anticipating_allocation(self): with self.network(shared=True) as network: with self.subnet(network=network, cidr='10.0.0.0/24') as subnet: fixed_ips = [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.2'}] self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, fixed_ips=fixed_ips) def test_create_port_public_network_with_invalid_ip_no_subnet_id(self, expected_error='InvalidIpForNetwork'): with self.network(shared=True) as network: with self.subnet(network=network, cidr='10.0.0.0/24'): ips = [{'ip_address': '1.1.1.1'}] res = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPBadRequest.code, fixed_ips=ips, set_context=True) data = self.deserialize(self.fmt, res) msg = str(n_exc.InvalidIpForNetwork(ip_address='1.1.1.1')) self.assertEqual(expected_error, data['NeutronError']['type']) self.assertEqual(msg, data['NeutronError']['message']) def test_create_port_public_network_with_invalid_ip_and_subnet_id(self, expected_error='InvalidIpForSubnet'): with self.network(shared=True) as network: with self.subnet(network=network, cidr='10.0.0.0/24') as subnet: ips = [{'subnet_id': subnet['subnet']['id'], 'ip_address': '1.1.1.1'}] res = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPBadRequest.code, fixed_ips=ips, set_context=True) data = self.deserialize(self.fmt, res) msg = str(n_exc.InvalidIpForSubnet(ip_address='1.1.1.1')) self.assertEqual(expected_error, data['NeutronError']['type']) self.assertEqual(msg, data['NeutronError']['message']) def test_create_port_with_too_many_fixed_ips(self): with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24') as subnet: fixed_ips = [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.%s' % id} for id in range(3, cfg.CONF.max_fixed_ips_per_port + 4)] res = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPBadRequest.code, fixed_ips=fixed_ips, set_context=True) data = self.deserialize(self.fmt, res) expected_error = 'InvalidInput' self.assertEqual(expected_error, data['NeutronError']['type']) def test_create_ports_bulk_native(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk port create") with self.network() as net: res = self._create_port_bulk(self.fmt, 2, net['network']['id'], 'test', True) self._validate_behavior_on_bulk_success(res, 'ports') for p in self.deserialize(self.fmt, res)['ports']: self._delete('ports', p['id']) def test_create_ports_bulk_emulated(self): real_has_attr = hasattr #ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('six.moves.builtins.hasattr', new=fakehasattr): with self.network() as net: res = self._create_port_bulk(self.fmt, 2, net['network']['id'], 'test', True) self._validate_behavior_on_bulk_success(res, 'ports') for p in self.deserialize(self.fmt, res)['ports']: self._delete('ports', p['id']) def test_create_ports_bulk_wrong_input(self): with self.network() as net: overrides = {1: {'admin_state_up': 'doh'}} res = self._create_port_bulk(self.fmt, 2, net['network']['id'], 'test', True, override=overrides) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) req = self.new_list_request('ports') res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPOk.code, res.status_int) ports = self.deserialize(self.fmt, res) self.assertEqual(0, len(ports['ports'])) def test_get_ports_count(self): with self.port(), self.port(), self.port(), self.port() as p: tenid = p['port']['tenant_id'] ctx = context.Context(user_id=None, tenant_id=tenid, is_admin=False) pl = manager.NeutronManager.get_plugin() count = pl.get_ports_count(ctx, filters={'tenant_id': [tenid]}) self.assertEqual(4, count) def test_create_ports_bulk_emulated_plugin_failure(self): real_has_attr = hasattr #ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('six.moves.builtins.hasattr', new=fakehasattr): orig = manager.NeutronManager.get_plugin().create_port method_to_patch = _get_create_db_method('port') with mock.patch.object(manager.NeutronManager.get_plugin(), method_to_patch) as patched_plugin: def side_effect(*args, **kwargs): return self._fail_second_call(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect with self.network() as net: res = self._create_port_bulk(self.fmt, 2, net['network']['id'], 'test', True) # We expect a 500 as we injected a fault in the plugin self._validate_behavior_on_bulk_failure( res, 'ports', webob.exc.HTTPServerError.code ) def test_create_ports_bulk_native_plugin_failure(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk port create") ctx = context.get_admin_context() with self.network() as net: plugin = manager.NeutronManager.get_plugin() orig = plugin.create_port method_to_patch = _get_create_db_method('port') with mock.patch.object(plugin, method_to_patch) as patched_plugin: def side_effect(*args, **kwargs): return self._fail_second_call(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect res = self._create_port_bulk(self.fmt, 2, net['network']['id'], 'test', True, context=ctx) # We expect a 500 as we injected a fault in the plugin self._validate_behavior_on_bulk_failure( res, 'ports', webob.exc.HTTPServerError.code) def test_list_ports(self): # for this test we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) with self.port() as v1, self.port() as v2, self.port() as v3: ports = (v1, v2, v3) self._test_list_resources('port', ports) def test_list_ports_filtered_by_fixed_ip(self): # for this test we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) with self.port() as port1, self.port(): fixed_ips = port1['port']['fixed_ips'][0] query_params = """ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s """.strip() % (fixed_ips['ip_address'], '192.168.126.5', fixed_ips['subnet_id']) self._test_list_resources('port', [port1], query_params=query_params) def test_list_ports_public_network(self): with self.network(shared=True) as network: with self.subnet(network) as subnet: with self.port(subnet, tenant_id='tenant_1') as port1,\ self.port(subnet, tenant_id='tenant_2') as port2: # Admin request - must return both ports self._test_list_resources('port', [port1, port2]) # Tenant_1 request - must return single port n_context = context.Context('', 'tenant_1') self._test_list_resources('port', [port1], neutron_context=n_context) # Tenant_2 request - must return single port n_context = context.Context('', 'tenant_2') self._test_list_resources('port', [port2], neutron_context=n_context) def test_list_ports_for_network_owner(self): with self.network(tenant_id='tenant_1') as network: with self.subnet(network) as subnet: with self.port(subnet, tenant_id='tenant_1') as port1,\ self.port(subnet, tenant_id='tenant_2') as port2: # network owner request, should return all ports port_res = self._list_ports( 'json', set_context=True, tenant_id='tenant_1') port_list = self.deserialize('json', port_res)['ports'] port_ids = [p['id'] for p in port_list] self.assertEqual(2, len(port_list)) self.assertIn(port1['port']['id'], port_ids) self.assertIn(port2['port']['id'], port_ids) # another tenant request, only return ports belong to it port_res = self._list_ports( 'json', set_context=True, tenant_id='tenant_2') port_list = self.deserialize('json', port_res)['ports'] port_ids = [p['id'] for p in port_list] self.assertEqual(1, len(port_list)) self.assertNotIn(port1['port']['id'], port_ids) self.assertIn(port2['port']['id'], port_ids) def test_list_ports_with_sort_native(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") cfg.CONF.set_default('allow_overlapping_ips', True) with self.port(admin_state_up='True', mac_address='00:00:00:00:00:01') as port1,\ self.port(admin_state_up='False', mac_address='00:00:00:00:00:02') as port2,\ self.port(admin_state_up='False', mac_address='00:00:00:00:00:03') as port3: self._test_list_with_sort('port', (port3, port2, port1), [('admin_state_up', 'asc'), ('mac_address', 'desc')]) def test_list_ports_with_sort_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_sorting_helper', new=_fake_get_sorting_helper) helper_patcher.start() cfg.CONF.set_default('allow_overlapping_ips', True) with self.port(admin_state_up='True', mac_address='00:00:00:00:00:01') as port1,\ self.port(admin_state_up='False', mac_address='00:00:00:00:00:02') as port2,\ self.port(admin_state_up='False', mac_address='00:00:00:00:00:03') as port3: self._test_list_with_sort('port', (port3, port2, port1), [('admin_state_up', 'asc'), ('mac_address', 'desc')]) def test_list_ports_with_pagination_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") cfg.CONF.set_default('allow_overlapping_ips', True) with self.port(mac_address='00:00:00:00:00:01') as port1,\ self.port(mac_address='00:00:00:00:00:02') as port2,\ self.port(mac_address='00:00:00:00:00:03') as port3: self._test_list_with_pagination('port', (port1, port2, port3), ('mac_address', 'asc'), 2, 2) def test_list_ports_with_pagination_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() cfg.CONF.set_default('allow_overlapping_ips', True) with self.port(mac_address='00:00:00:00:00:01') as port1,\ self.port(mac_address='00:00:00:00:00:02') as port2,\ self.port(mac_address='00:00:00:00:00:03') as port3: self._test_list_with_pagination('port', (port1, port2, port3), ('mac_address', 'asc'), 2, 2) def test_list_ports_with_pagination_reverse_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") cfg.CONF.set_default('allow_overlapping_ips', True) with self.port(mac_address='00:00:00:00:00:01') as port1,\ self.port(mac_address='00:00:00:00:00:02') as port2,\ self.port(mac_address='00:00:00:00:00:03') as port3: self._test_list_with_pagination_reverse('port', (port1, port2, port3), ('mac_address', 'asc'), 2, 2) def test_list_ports_with_pagination_reverse_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() cfg.CONF.set_default('allow_overlapping_ips', True) with self.port(mac_address='00:00:00:00:00:01') as port1,\ self.port(mac_address='00:00:00:00:00:02') as port2,\ self.port(mac_address='00:00:00:00:00:03') as port3: self._test_list_with_pagination_reverse('port', (port1, port2, port3), ('mac_address', 'asc'), 2, 2) def test_show_port(self): with self.port() as port: req = self.new_show_request('ports', port['port']['id'], self.fmt) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(port['port']['id'], sport['port']['id']) def test_delete_port(self): with self.port() as port: self._delete('ports', port['port']['id']) self._show('ports', port['port']['id'], expected_code=webob.exc.HTTPNotFound.code) def test_delete_port_public_network(self): with self.network(shared=True) as network: port_res = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id='another_tenant', set_context=True) port = self.deserialize(self.fmt, port_res) self._delete('ports', port['port']['id']) self._show('ports', port['port']['id'], expected_code=webob.exc.HTTPNotFound.code) def test_delete_port_by_network_owner(self): with self.network(tenant_id='tenant_1') as network: with self.subnet(network) as subnet: with self.port(subnet, tenant_id='tenant_2') as port: self._delete( 'ports', port['port']['id'], neutron_context=context.Context('', 'tenant_1')) self._show('ports', port['port']['id'], expected_code=webob.exc.HTTPNotFound.code) def test_update_port(self): with self.port() as port: data = {'port': {'admin_state_up': False}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['port']['admin_state_up'], data['port']['admin_state_up']) def update_port_mac(self, port, updated_fixed_ips=None): orig_mac = port['mac_address'] mac = orig_mac.split(':') mac[5] = '01' if mac[5] != '01' else '00' new_mac = ':'.join(mac) data = {'port': {'mac_address': new_mac}} if updated_fixed_ips: data['port']['fixed_ips'] = updated_fixed_ips req = self.new_update_request('ports', data, port['id']) return req.get_response(self.api), new_mac def _check_v6_auto_address_address(self, port, subnet): if ipv6_utils.is_auto_address_subnet(subnet['subnet']): port_mac = port['port']['mac_address'] subnet_cidr = subnet['subnet']['cidr'] eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr, port_mac)) self.assertEqual(port['port']['fixed_ips'][0]['ip_address'], eui_addr) def check_update_port_mac( self, expected_status=webob.exc.HTTPOk.code, expected_error='StateInvalid', subnet=None, device_owner=DEVICE_OWNER_COMPUTE, updated_fixed_ips=None, host_arg=None, arg_list=None): host_arg = host_arg or {} arg_list = arg_list or [] with self.port(device_owner=device_owner, subnet=subnet, arg_list=arg_list, **host_arg) as port: self.assertIn('mac_address', port['port']) res, new_mac = self.update_port_mac( port['port'], updated_fixed_ips=updated_fixed_ips) self.assertEqual(expected_status, res.status_int) if expected_status == webob.exc.HTTPOk.code: result = self.deserialize(self.fmt, res) self.assertIn('port', result) self.assertEqual(new_mac, result['port']['mac_address']) if subnet and subnet['subnet']['ip_version'] == 6: self._check_v6_auto_address_address(port, subnet) else: error = self.deserialize(self.fmt, res) self.assertEqual(expected_error, error['NeutronError']['type']) def test_update_port_mac(self): self.check_update_port_mac() # sub-classes for plugins/drivers that support mac address update # override this method def test_update_dhcp_port_with_exceeding_fixed_ips(self): """ Max fixed ips per port is configured in configuration file by max_fixed_ips_per_port parameter. DHCP port is not restricted by this parameter. """ with self.subnet() as subnet: updated_fixed_ips = [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.%s' % id} for id in range(3, cfg.CONF.max_fixed_ips_per_port + 4)] host_arg = None or {} arg_list = None or [] with self.port(device_owner=constants.DEVICE_OWNER_DHCP, subnet=subnet, arg_list=arg_list, **host_arg) as port: data = {'port': {'fixed_ips': updated_fixed_ips}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPOk.code, res.status_int) result = self.deserialize(self.fmt, res) for fixed_ip in updated_fixed_ips: self.assertIn(fixed_ip, result['port']['fixed_ips']) def test_update_port_mac_ip(self): with self.subnet() as subnet: updated_fixed_ips = [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.3'}] self.check_update_port_mac(subnet=subnet, updated_fixed_ips=updated_fixed_ips) def test_update_port_mac_v6_slaac(self): with self.subnet(gateway_ip='fe80::1', cidr='2607:f0d0:1002:51::/64', ip_version=6, ipv6_address_mode=constants.IPV6_SLAAC) as subnet: self.assertTrue( ipv6_utils.is_auto_address_subnet(subnet['subnet'])) self.check_update_port_mac(subnet=subnet) def test_update_port_mac_bad_owner(self): self.check_update_port_mac( device_owner=DEVICE_OWNER_NOT_COMPUTE, expected_status=webob.exc.HTTPConflict.code, expected_error='UnsupportedPortDeviceOwner') def check_update_port_mac_used(self, expected_error='MacAddressInUse'): with self.subnet() as subnet: with self.port(subnet=subnet) as port: with self.port(subnet=subnet) as port2: self.assertIn('mac_address', port['port']) new_mac = port2['port']['mac_address'] data = {'port': {'mac_address': new_mac}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) error = self.deserialize(self.fmt, res) self.assertEqual(expected_error, error['NeutronError']['type']) def test_update_port_mac_used(self): self.check_update_port_mac_used() def test_update_port_not_admin(self): res = self._create_network(self.fmt, 'net1', True, tenant_id='not_admin', set_context=True) net1 = self.deserialize(self.fmt, res) res = self._create_port(self.fmt, net1['network']['id'], tenant_id='not_admin', set_context=True) port = self.deserialize(self.fmt, res) data = {'port': {'admin_state_up': False}} neutron_context = context.Context('', 'not_admin') port = self._update('ports', port['port']['id'], data, neutron_context=neutron_context) self.assertFalse(port['port']['admin_state_up']) def test_update_device_id_unchanged(self): with self.port() as port: data = {'port': {'admin_state_up': True, 'device_id': port['port']['device_id']}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertTrue(res['port']['admin_state_up']) def test_update_device_id_null(self): with self.port() as port: data = {'port': {'device_id': None}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_delete_network_if_port_exists(self): with self.port() as port: req = self.new_delete_request('networks', port['port']['network_id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_delete_network_port_exists_owned_by_network(self): res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) network_id = network['network']['id'] self._create_port(self.fmt, network_id, device_owner=constants.DEVICE_OWNER_DHCP) req = self.new_delete_request('networks', network_id) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_update_port_delete_ip(self): with self.subnet() as subnet: with self.port(subnet=subnet) as port: data = {'port': {'admin_state_up': False, 'fixed_ips': []}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['port']['admin_state_up'], res['port']['admin_state_up']) self.assertEqual(data['port']['fixed_ips'], res['port']['fixed_ips']) def test_no_more_port_exception(self): with self.subnet(cidr='10.0.0.0/31', enable_dhcp=False) as subnet: id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, id) data = self.deserialize(self.fmt, res) msg = str(n_exc.IpAddressGenerationFailure(net_id=id)) self.assertEqual(data['NeutronError']['message'], msg) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_update_port_update_ip(self): """Test update of port IP. Check that a configured IP 10.0.0.2 is replaced by 10.0.0.10. """ with self.subnet() as subnet: with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.2', ips[0]['ip_address']) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': "10.0.0.10"}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.10', ips[0]['ip_address']) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) def test_update_port_update_ip_address_only(self): with self.subnet() as subnet: with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.2', ips[0]['ip_address']) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': "10.0.0.10"}, {'ip_address': "10.0.0.2"}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] self.assertEqual(2, len(ips)) self.assertIn({'ip_address': '10.0.0.2', 'subnet_id': subnet['subnet']['id']}, ips) self.assertIn({'ip_address': '10.0.0.10', 'subnet_id': subnet['subnet']['id']}, ips) def test_update_port_update_ips(self): """Update IP and associate new IP on port. Check a port update with the specified subnet_id's. A IP address will be allocated for each subnet_id. """ with self.subnet() as subnet: with self.port(subnet=subnet) as port: data = {'port': {'admin_state_up': False, 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.3'}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['port']['admin_state_up'], res['port']['admin_state_up']) ips = res['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.3', ips[0]['ip_address'], '10.0.0.3') self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) def test_update_port_add_additional_ip(self): """Test update of port with additional IP.""" with self.subnet() as subnet: with self.port(subnet=subnet) as port: data = {'port': {'admin_state_up': False, 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['port']['admin_state_up'], res['port']['admin_state_up']) ips = res['port']['fixed_ips'] self.assertEqual(2, len(ips)) self.assertIn({'ip_address': '10.0.0.3', 'subnet_id': subnet['subnet']['id']}, ips) self.assertIn({'ip_address': '10.0.0.4', 'subnet_id': subnet['subnet']['id']}, ips) def test_update_port_invalid_fixed_ip_address_v6_slaac(self): with self.subnet( cidr='2607:f0d0:1002:51::/64', ip_version=6, ipv6_address_mode=constants.IPV6_SLAAC, gateway_ip=attributes.ATTR_NOT_SPECIFIED) as subnet: with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) port_mac = port['port']['mac_address'] subnet_cidr = subnet['subnet']['cidr'] eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr, port_mac)) self.assertEqual(ips[0]['ip_address'], eui_addr) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': '2607:f0d0:1002:51::5'}]}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) err = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) self.assertEqual('InvalidInput', err['NeutronError']['type']) def test_requested_duplicate_mac(self): with self.port() as port: mac = port['port']['mac_address'] # check that MAC address matches base MAC base_mac = cfg.CONF.base_mac[0:2] self.assertTrue(mac.startswith(base_mac)) kwargs = {"mac_address": mac} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_mac_generation(self): cfg.CONF.set_override('base_mac', "12:34:56:00:00:00") with self.port() as port: mac = port['port']['mac_address'] self.assertTrue(mac.startswith("12:34:56")) def test_mac_generation_4octet(self): cfg.CONF.set_override('base_mac', "12:34:56:78:00:00") with self.port() as port: mac = port['port']['mac_address'] self.assertTrue(mac.startswith("12:34:56:78")) def test_bad_mac_format(self): cfg.CONF.set_override('base_mac', "bad_mac") try: self.plugin._check_base_mac_format() except Exception: return self.fail("No exception for illegal base_mac format") def test_mac_exhaustion(self): # rather than actually consuming all MAC (would take a LONG time) # we try to allocate an already allocated mac address cfg.CONF.set_override('mac_generation_retries', 3) res = self._create_network(fmt=self.fmt, name='net1', admin_state_up=True) network = self.deserialize(self.fmt, res) net_id = network['network']['id'] error = n_exc.MacAddressInUse(net_id=net_id, mac='00:11:22:33:44:55') with mock.patch.object( neutron.db.db_base_plugin_v2.NeutronDbPluginV2, '_create_port_with_mac', side_effect=error) as create_mock: res = self._create_port(self.fmt, net_id=net_id) self.assertEqual(webob.exc.HTTPServiceUnavailable.code, res.status_int) self.assertEqual(3, create_mock.call_count) def test_requested_duplicate_ip(self): with self.subnet() as subnet: with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.2', ips[0]['ip_address']) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) # Check configuring of duplicate IP kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': ips[0]['ip_address']}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_requested_subnet_id(self): with self.subnet() as subnet: with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.2', ips[0]['ip_address']) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) # Request a IP from specific subnet kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port2 = self.deserialize(self.fmt, res) ips = port2['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.3', ips[0]['ip_address']) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) self._delete('ports', port2['port']['id']) def test_requested_subnet_id_not_on_network(self): with self.subnet() as subnet: with self.port(subnet=subnet) as port: # Create new network res = self._create_network(fmt=self.fmt, name='net2', admin_state_up=True) network2 = self.deserialize(self.fmt, res) subnet2 = self._make_subnet(self.fmt, network2, "1.1.1.1", "1.1.1.0/24", ip_version=4) net_id = port['port']['network_id'] # Request a IP from specific subnet kwargs = {"fixed_ips": [{'subnet_id': subnet2['subnet']['id']}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_overlapping_subnets(self): with self.subnet() as subnet: tenant_id = subnet['subnet']['tenant_id'] net_id = subnet['subnet']['network_id'] res = self._create_subnet(self.fmt, tenant_id=tenant_id, net_id=net_id, cidr='10.0.0.225/28', ip_version=4, gateway_ip=attributes.ATTR_NOT_SPECIFIED) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_requested_subnet_id_v4_and_v6(self): with self.subnet() as subnet: # Get a IPv4 and IPv6 address tenant_id = subnet['subnet']['tenant_id'] net_id = subnet['subnet']['network_id'] res = self._create_subnet( self.fmt, tenant_id=tenant_id, net_id=net_id, cidr='2607:f0d0:1002:51::/124', ip_version=6, gateway_ip=attributes.ATTR_NOT_SPECIFIED) subnet2 = self.deserialize(self.fmt, res) kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet2['subnet']['id']}]} res = self._create_port(self.fmt, net_id=net_id, **kwargs) port3 = self.deserialize(self.fmt, res) ips = port3['port']['fixed_ips'] self.assertEqual(2, len(ips)) self.assertIn({'ip_address': '10.0.0.2', 'subnet_id': subnet['subnet']['id']}, ips) self.assertIn({'ip_address': '2607:f0d0:1002:51::2', 'subnet_id': subnet2['subnet']['id']}, ips) res = self._create_port(self.fmt, net_id=net_id) port4 = self.deserialize(self.fmt, res) # Check that a v4 and a v6 address are allocated ips = port4['port']['fixed_ips'] self.assertEqual(2, len(ips)) self.assertIn({'ip_address': '10.0.0.3', 'subnet_id': subnet['subnet']['id']}, ips) self.assertIn({'ip_address': '2607:f0d0:1002:51::3', 'subnet_id': subnet2['subnet']['id']}, ips) self._delete('ports', port3['port']['id']) self._delete('ports', port4['port']['id']) def test_requested_invalid_fixed_ip_address_v6_slaac(self): with self.subnet(gateway_ip='fe80::1', cidr='2607:f0d0:1002:51::/64', ip_version=6, ipv6_address_mode=constants.IPV6_SLAAC) as subnet: kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '2607:f0d0:1002:51::5'}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) @mock.patch.object(non_ipam.IpamNonPluggableBackend, '_allocate_specific_ip') def test_requested_fixed_ip_address_v6_slaac_router_iface( self, alloc_specific_ip): with self.subnet(gateway_ip='fe80::1', cidr='fe80::/64', ip_version=6, ipv6_address_mode=constants.IPV6_SLAAC) as subnet: kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': 'fe80::1'}]} net_id = subnet['subnet']['network_id'] device_owner = constants.DEVICE_OWNER_ROUTER_INTF res = self._create_port(self.fmt, net_id=net_id, device_owner=device_owner, **kwargs) port = self.deserialize(self.fmt, res) self.assertEqual(len(port['port']['fixed_ips']), 1) self.assertEqual(port['port']['fixed_ips'][0]['ip_address'], 'fe80::1') self.assertFalse(alloc_specific_ip.called) def test_requested_subnet_id_v6_slaac(self): with self.subnet(gateway_ip='fe80::1', cidr='2607:f0d0:1002:51::/64', ip_version=6, ipv6_address_mode=constants.IPV6_SLAAC) as subnet: with self.port(subnet, fixed_ips=[{'subnet_id': subnet['subnet']['id']}]) as port: port_mac = port['port']['mac_address'] subnet_cidr = subnet['subnet']['cidr'] eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr, port_mac)) self.assertEqual(port['port']['fixed_ips'][0]['ip_address'], eui_addr) def test_requested_subnet_id_v4_and_v6_slaac(self): with self.network() as network: with self.subnet(network) as subnet,\ self.subnet( network, cidr='2607:f0d0:1002:51::/64', ip_version=6, gateway_ip='fe80::1', ipv6_address_mode=constants.IPV6_SLAAC) as subnet2: with self.port( subnet, fixed_ips=[{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet2['subnet']['id']}] ) as port: ips = port['port']['fixed_ips'] self.assertEqual(2, len(ips)) self.assertIn({'ip_address': '10.0.0.2', 'subnet_id': subnet['subnet']['id']}, ips) port_mac = port['port']['mac_address'] subnet_cidr = subnet2['subnet']['cidr'] eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64( subnet_cidr, port_mac)) self.assertIn({'ip_address': eui_addr, 'subnet_id': subnet2['subnet']['id']}, ips) def test_create_router_port_ipv4_and_ipv6_slaac_no_fixed_ips(self): with self.network() as network: # Create an IPv4 and an IPv6 SLAAC subnet on the network with self.subnet(network),\ self.subnet(network, cidr='2607:f0d0:1002:51::/64', ip_version=6, gateway_ip='fe80::1', ipv6_address_mode=constants.IPV6_SLAAC): # Create a router port without specifying fixed_ips port = self._make_port( self.fmt, network['network']['id'], device_owner=constants.DEVICE_OWNER_ROUTER_INTF) # Router port should only have an IPv4 address fixed_ips = port['port']['fixed_ips'] self.assertEqual(1, len(fixed_ips)) self.assertEqual('10.0.0.2', fixed_ips[0]['ip_address']) def _make_v6_subnet(self, network, ra_addr_mode, ipv6_pd=False): cidr = 'fe80::/64' gateway = 'fe80::1' subnetpool_id = None if ipv6_pd: cidr = None gateway = None subnetpool_id = constants.IPV6_PD_POOL_ID cfg.CONF.set_override('ipv6_pd_enabled', True) return (self._make_subnet(self.fmt, network, gateway=gateway, subnetpool_id=subnetpool_id, cidr=cidr, ip_version=6, ipv6_ra_mode=ra_addr_mode, ipv6_address_mode=ra_addr_mode)) @staticmethod def _calc_ipv6_addr_by_EUI64(port, subnet): port_mac = port['port']['mac_address'] subnet_cidr = subnet['subnet']['cidr'] return str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr, port_mac)) def test_ip_allocation_for_ipv6_subnet_slaac_address_mode(self): res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_v6_subnet(network, constants.IPV6_SLAAC) port = self._make_port(self.fmt, network['network']['id']) self.assertEqual(1, len(port['port']['fixed_ips'])) self.assertEqual(self._calc_ipv6_addr_by_EUI64(port, subnet), port['port']['fixed_ips'][0]['ip_address']) def _test_create_port_with_ipv6_subnet_in_fixed_ips(self, addr_mode, ipv6_pd=False): """Test port create with an IPv6 subnet incl in fixed IPs.""" with self.network(name='net') as network: subnet = self._make_v6_subnet(network, addr_mode, ipv6_pd) subnet_id = subnet['subnet']['id'] fixed_ips = [{'subnet_id': subnet_id}] with self.port(subnet=subnet, fixed_ips=fixed_ips) as port: if addr_mode == constants.IPV6_SLAAC: exp_ip_addr = self._calc_ipv6_addr_by_EUI64(port, subnet) else: exp_ip_addr = 'fe80::2' port_fixed_ips = port['port']['fixed_ips'] self.assertEqual(1, len(port_fixed_ips)) self.assertEqual(exp_ip_addr, port_fixed_ips[0]['ip_address']) def test_create_port_with_ipv6_slaac_subnet_in_fixed_ips(self): self._test_create_port_with_ipv6_subnet_in_fixed_ips( addr_mode=constants.IPV6_SLAAC) def test_create_port_with_ipv6_pd_subnet_in_fixed_ips(self): self._test_create_port_with_ipv6_subnet_in_fixed_ips( addr_mode=constants.IPV6_SLAAC, ipv6_pd=True) def test_create_port_with_ipv6_dhcp_stateful_subnet_in_fixed_ips(self): self._test_create_port_with_ipv6_subnet_in_fixed_ips( addr_mode=constants.DHCPV6_STATEFUL) def test_create_port_with_multiple_ipv4_and_ipv6_subnets(self): """Test port create with multiple IPv4, IPv6 DHCP/SLAAC subnets.""" res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) sub_dicts = [ {'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24', 'ip_version': 4, 'ra_addr_mode': None}, {'gateway': '10.0.1.1', 'cidr': '10.0.1.0/24', 'ip_version': 4, 'ra_addr_mode': None}, {'gateway': 'fe80::1', 'cidr': 'fe80::/64', 'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC}, {'gateway': 'fe81::1', 'cidr': 'fe81::/64', 'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC}, {'gateway': 'fe82::1', 'cidr': 'fe82::/64', 'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL}, {'gateway': 'fe83::1', 'cidr': 'fe83::/64', 'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL}] subnets = {} for sub_dict in sub_dicts: subnet = self._make_subnet( self.fmt, network, gateway=sub_dict['gateway'], cidr=sub_dict['cidr'], ip_version=sub_dict['ip_version'], ipv6_ra_mode=sub_dict['ra_addr_mode'], ipv6_address_mode=sub_dict['ra_addr_mode']) subnets[subnet['subnet']['id']] = sub_dict res = self._create_port(self.fmt, net_id=network['network']['id']) port = self.deserialize(self.fmt, res) # Since the create port request was made without a list of fixed IPs, # the port should be associated with addresses for one of the # IPv4 subnets, one of the DHCPv6 subnets, and both of the IPv6 # SLAAC subnets. self.assertEqual(4, len(port['port']['fixed_ips'])) addr_mode_count = {None: 0, constants.DHCPV6_STATEFUL: 0, constants.IPV6_SLAAC: 0} for fixed_ip in port['port']['fixed_ips']: subnet_id = fixed_ip['subnet_id'] if subnet_id in subnets: addr_mode_count[subnets[subnet_id]['ra_addr_mode']] += 1 self.assertEqual(1, addr_mode_count[None]) self.assertEqual(1, addr_mode_count[constants.DHCPV6_STATEFUL]) self.assertEqual(2, addr_mode_count[constants.IPV6_SLAAC]) def test_delete_port_with_ipv6_slaac_address(self): """Test that a port with an IPv6 SLAAC address can be deleted.""" res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) # Create a port that has an associated IPv6 SLAAC address self._make_v6_subnet(network, constants.IPV6_SLAAC) res = self._create_port(self.fmt, net_id=network['network']['id']) port = self.deserialize(self.fmt, res) self.assertEqual(1, len(port['port']['fixed_ips'])) # Confirm that the port can be deleted self._delete('ports', port['port']['id']) self._show('ports', port['port']['id'], expected_code=webob.exc.HTTPNotFound.code) def test_update_port_with_ipv6_slaac_subnet_in_fixed_ips(self): """Test port update with an IPv6 SLAAC subnet in fixed IPs.""" res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) # Create a port using an IPv4 subnet and an IPv6 SLAAC subnet self._make_subnet(self.fmt, network, gateway='10.0.0.1', cidr='10.0.0.0/24', ip_version=4) subnet_v6 = self._make_v6_subnet(network, constants.IPV6_SLAAC) res = self._create_port(self.fmt, net_id=network['network']['id']) port = self.deserialize(self.fmt, res) self.assertEqual(2, len(port['port']['fixed_ips'])) # Update port including only the IPv6 SLAAC subnet data = {'port': {'fixed_ips': [{'subnet_id': subnet_v6['subnet']['id']}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) # Port should only have an address corresponding to IPv6 SLAAC subnet ips = res['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(self._calc_ipv6_addr_by_EUI64(port, subnet_v6), ips[0]['ip_address']) def test_update_port_excluding_ipv6_slaac_subnet_from_fixed_ips(self): """Test port update excluding IPv6 SLAAC subnet from fixed ips.""" res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) # Create a port using an IPv4 subnet and an IPv6 SLAAC subnet subnet_v4 = self._make_subnet(self.fmt, network, gateway='10.0.0.1', cidr='10.0.0.0/24', ip_version=4) subnet_v6 = self._make_v6_subnet(network, constants.IPV6_SLAAC) res = self._create_port(self.fmt, net_id=network['network']['id']) port = self.deserialize(self.fmt, res) self.assertEqual(2, len(port['port']['fixed_ips'])) # Update port including only the IPv4 subnet data = {'port': {'fixed_ips': [{'subnet_id': subnet_v4['subnet']['id'], 'ip_address': "10.0.0.10"}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) # Port should still have an addr corresponding to IPv6 SLAAC subnet ips = res['port']['fixed_ips'] self.assertEqual(2, len(ips)) eui_addr = self._calc_ipv6_addr_by_EUI64(port, subnet_v6) expected_v6_ip = {'subnet_id': subnet_v6['subnet']['id'], 'ip_address': eui_addr} self.assertIn(expected_v6_ip, ips) def test_ip_allocation_for_ipv6_2_subnet_slaac_mode(self): res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) v6_subnet_1 = self._make_subnet(self.fmt, network, gateway='2001:100::1', cidr='2001:100::0/64', ip_version=6, ipv6_ra_mode=constants.IPV6_SLAAC) v6_subnet_2 = self._make_subnet(self.fmt, network, gateway='2001:200::1', cidr='2001:200::0/64', ip_version=6, ipv6_ra_mode=constants.IPV6_SLAAC) port = self._make_port(self.fmt, network['network']['id']) port_mac = port['port']['mac_address'] cidr_1 = v6_subnet_1['subnet']['cidr'] cidr_2 = v6_subnet_2['subnet']['cidr'] eui_addr_1 = str(ipv6_utils.get_ipv6_addr_by_EUI64(cidr_1, port_mac)) eui_addr_2 = str(ipv6_utils.get_ipv6_addr_by_EUI64(cidr_2, port_mac)) self.assertEqual({eui_addr_1, eui_addr_2}, {fixed_ip['ip_address'] for fixed_ip in port['port']['fixed_ips']}) def test_range_allocation(self): with self.subnet(gateway_ip='10.0.0.3', cidr='10.0.0.0/29') as subnet: kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port = self.deserialize(self.fmt, res) ips = port['port']['fixed_ips'] self.assertEqual(5, len(ips)) alloc = ['10.0.0.1', '10.0.0.2', '10.0.0.4', '10.0.0.5', '10.0.0.6'] for ip in ips: self.assertIn(ip['ip_address'], alloc) self.assertEqual(ip['subnet_id'], subnet['subnet']['id']) alloc.remove(ip['ip_address']) self.assertEqual(0, len(alloc)) self._delete('ports', port['port']['id']) with self.subnet(gateway_ip='11.0.0.6', cidr='11.0.0.0/29') as subnet: kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port = self.deserialize(self.fmt, res) ips = port['port']['fixed_ips'] self.assertEqual(5, len(ips)) alloc = ['11.0.0.1', '11.0.0.2', '11.0.0.3', '11.0.0.4', '11.0.0.5'] for ip in ips: self.assertIn(ip['ip_address'], alloc) self.assertEqual(ip['subnet_id'], subnet['subnet']['id']) alloc.remove(ip['ip_address']) self.assertEqual(0, len(alloc)) self._delete('ports', port['port']['id']) def test_requested_invalid_fixed_ips(self): with self.subnet() as subnet: with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.2', ips[0]['ip_address']) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) # Test invalid subnet_id kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': '00000000-ffff-ffff-ffff-000000000000'}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port2 = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) # Test invalid IP address on specified subnet_id kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '1.1.1.1'}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port2 = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) # Test invalid addresses - IP's not on subnet or network # address or broadcast address bad_ips = ['1.1.1.1', '10.0.0.0', '10.0.0.255'] net_id = port['port']['network_id'] for ip in bad_ips: kwargs = {"fixed_ips": [{'ip_address': ip}]} res = self._create_port(self.fmt, net_id=net_id, **kwargs) port2 = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) # Enable allocation of gateway address kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.1'}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port2 = self.deserialize(self.fmt, res) ips = port2['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.1', ips[0]['ip_address']) self.assertEqual(subnet['subnet']['id'], ips[0]['subnet_id']) self._delete('ports', port2['port']['id']) def test_invalid_ip(self): with self.subnet() as subnet: # Allocate specific IP kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '1011.0.0.5'}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_requested_split(self): with self.subnet() as subnet: with self.port(subnet=subnet) as port: ports_to_delete = [] ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.2', ips[0]['ip_address']) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) # Allocate specific IP kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.5'}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port2 = self.deserialize(self.fmt, res) ports_to_delete.append(port2) ips = port2['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.5', ips[0]['ip_address']) self.assertEqual(subnet['subnet']['id'], ips[0]['subnet_id']) # Allocate specific IP's allocated = ['10.0.0.3', '10.0.0.4', '10.0.0.6'] for a in allocated: res = self._create_port(self.fmt, net_id=net_id) port2 = self.deserialize(self.fmt, res) ports_to_delete.append(port2) ips = port2['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(a, ips[0]['ip_address']) self.assertEqual(subnet['subnet']['id'], ips[0]['subnet_id']) for p in ports_to_delete: self._delete('ports', p['port']['id']) def test_duplicate_ips(self): with self.subnet() as subnet: # Allocate specific IP kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.5'}, {'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.5'}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_fixed_ip_invalid_subnet_id(self): with self.subnet() as subnet: # Allocate specific IP kwargs = {"fixed_ips": [{'subnet_id': 'i am invalid', 'ip_address': '10.0.0.5'}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_fixed_ip_invalid_ip(self): with self.subnet() as subnet: # Allocate specific IP kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.55555'}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_requested_ips_only(self): with self.subnet() as subnet: with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.2', ips[0]['ip_address']) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) ips_only = ['10.0.0.18', '10.0.0.20', '10.0.0.22', '10.0.0.21', '10.0.0.3', '10.0.0.17', '10.0.0.19'] ports_to_delete = [] for i in ips_only: kwargs = {"fixed_ips": [{'ip_address': i}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port = self.deserialize(self.fmt, res) ports_to_delete.append(port) ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(i, ips[0]['ip_address']) self.assertEqual(subnet['subnet']['id'], ips[0]['subnet_id']) for p in ports_to_delete: self._delete('ports', p['port']['id']) def test_invalid_admin_state(self): with self.network() as network: data = {'port': {'network_id': network['network']['id'], 'tenant_id': network['network']['tenant_id'], 'admin_state_up': 7, 'fixed_ips': []}} port_req = self.new_create_request('ports', data) res = port_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_invalid_mac_address(self): with self.network() as network: data = {'port': {'network_id': network['network']['id'], 'tenant_id': network['network']['tenant_id'], 'admin_state_up': 1, 'mac_address': 'mac', 'fixed_ips': []}} port_req = self.new_create_request('ports', data) res = port_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_max_fixed_ips_exceeded(self): with self.subnet(gateway_ip='10.0.0.3', cidr='10.0.0.0/24') as subnet: kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_max_fixed_ips_exceeded(self): with self.subnet(gateway_ip='10.0.0.3', cidr='10.0.0.0/24') as subnet: with self.port(subnet) as port: data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.2'}, {'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.4'}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}]}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_delete_ports_by_device_id(self): plugin = manager.NeutronManager.get_plugin() ctx = context.get_admin_context() with self.subnet() as subnet: with self.port(subnet=subnet, device_id='owner1') as p1,\ self.port(subnet=subnet, device_id='owner1') as p2,\ self.port(subnet=subnet, device_id='owner2') as p3: network_id = subnet['subnet']['network_id'] plugin.delete_ports_by_device_id(ctx, 'owner1', network_id) self._show('ports', p1['port']['id'], expected_code=webob.exc.HTTPNotFound.code) self._show('ports', p2['port']['id'], expected_code=webob.exc.HTTPNotFound.code) self._show('ports', p3['port']['id'], expected_code=webob.exc.HTTPOk.code) def _test_delete_ports_by_device_id_second_call_failure(self, plugin): ctx = context.get_admin_context() with self.subnet() as subnet: with self.port(subnet=subnet, device_id='owner1') as p1,\ self.port(subnet=subnet, device_id='owner1') as p2,\ self.port(subnet=subnet, device_id='owner2') as p3: orig = plugin.delete_port with mock.patch.object(plugin, 'delete_port') as del_port: def side_effect(*args, **kwargs): return self._fail_second_call(del_port, orig, *args, **kwargs) del_port.side_effect = side_effect network_id = subnet['subnet']['network_id'] self.assertRaises(n_exc.NeutronException, plugin.delete_ports_by_device_id, ctx, 'owner1', network_id) statuses = { self._show_response('ports', p['port']['id']).status_int for p in [p1, p2]} expected = {webob.exc.HTTPNotFound.code, webob.exc.HTTPOk.code} self.assertEqual(expected, statuses) self._show('ports', p3['port']['id'], expected_code=webob.exc.HTTPOk.code) def test_delete_ports_by_device_id_second_call_failure(self): plugin = manager.NeutronManager.get_plugin() self._test_delete_ports_by_device_id_second_call_failure(plugin) def _test_delete_ports_ignores_port_not_found(self, plugin): ctx = context.get_admin_context() with self.subnet() as subnet: with self.port(subnet=subnet, device_id='owner1') as p,\ mock.patch.object(plugin, 'delete_port') as del_port: del_port.side_effect = n_exc.PortNotFound( port_id=p['port']['id'] ) network_id = subnet['subnet']['network_id'] try: plugin.delete_ports_by_device_id(ctx, 'owner1', network_id) except n_exc.PortNotFound: self.fail("delete_ports_by_device_id unexpectedly raised " "a PortNotFound exception. It should ignore " "this exception because it is often called at " "the same time other concurrent operations are " "deleting some of the same ports.") def test_delete_ports_ignores_port_not_found(self): plugin = manager.NeutronManager.get_plugin() self._test_delete_ports_ignores_port_not_found(plugin) class TestNetworksV2(NeutronDbPluginV2TestCase): # NOTE(cerberus): successful network update and delete are # effectively tested above def test_create_network(self): name = 'net1' keys = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', self.net_create_status), ('shared', False)] with self.network(name=name) as net: for k, v in keys: self.assertEqual(net['network'][k], v) def test_create_public_network(self): name = 'public_net' keys = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', self.net_create_status), ('shared', True)] with self.network(name=name, shared=True) as net: for k, v in keys: self.assertEqual(net['network'][k], v) def test_create_public_network_no_admin_tenant(self): name = 'public_net' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: with self.network(name=name, shared=True, tenant_id="another_tenant", set_context=True): pass self.assertEqual(webob.exc.HTTPForbidden.code, ctx_manager.exception.code) def test_update_network(self): with self.network() as network: data = {'network': {'name': 'a_brand_new_name'}} req = self.new_update_request('networks', data, network['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['network']['name'], res['network']['name']) def test_update_shared_network_noadmin_returns_403(self): with self.network(shared=True) as network: data = {'network': {'name': 'a_brand_new_name'}} req = self.new_update_request('networks', data, network['network']['id']) req.environ['neutron.context'] = context.Context('', 'somebody') res = req.get_response(self.api) # The API layer always returns 404 on updates in place of 403 self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_update_network_set_shared(self): with self.network(shared=False) as network: data = {'network': {'shared': True}} req = self.new_update_request('networks', data, network['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertTrue(res['network']['shared']) def test_update_network_set_shared_owner_returns_403(self): with self.network(shared=False) as network: net_owner = network['network']['tenant_id'] data = {'network': {'shared': True}} req = self.new_update_request('networks', data, network['network']['id']) req.environ['neutron.context'] = context.Context('u', net_owner) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPForbidden.code, res.status_int) def test_update_network_with_subnet_set_shared(self): with self.network(shared=False) as network: with self.subnet(network=network) as subnet: data = {'network': {'shared': True}} req = self.new_update_request('networks', data, network['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertTrue(res['network']['shared']) # must query db to see whether subnet's shared attribute # has been updated or not ctx = context.Context('', '', is_admin=True) subnet_db = manager.NeutronManager.get_plugin().get_subnet( ctx, subnet['subnet']['id']) self.assertTrue(subnet_db['shared']) def test_update_network_set_not_shared_single_tenant(self): with self.network(shared=True) as network: res1 = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id=network['network']['tenant_id'], set_context=True) data = {'network': {'shared': False}} req = self.new_update_request('networks', data, network['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertFalse(res['network']['shared']) port1 = self.deserialize(self.fmt, res1) self._delete('ports', port1['port']['id']) def test_update_network_set_not_shared_other_tenant_returns_409(self): with self.network(shared=True) as network: res1 = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id='somebody_else', set_context=True) data = {'network': {'shared': False}} req = self.new_update_request('networks', data, network['network']['id']) self.assertEqual(webob.exc.HTTPConflict.code, req.get_response(self.api).status_int) port1 = self.deserialize(self.fmt, res1) self._delete('ports', port1['port']['id']) def test_update_network_set_not_shared_multi_tenants_returns_409(self): with self.network(shared=True) as network: res1 = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id='somebody_else', set_context=True) res2 = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id=network['network']['tenant_id'], set_context=True) data = {'network': {'shared': False}} req = self.new_update_request('networks', data, network['network']['id']) self.assertEqual(webob.exc.HTTPConflict.code, req.get_response(self.api).status_int) port1 = self.deserialize(self.fmt, res1) port2 = self.deserialize(self.fmt, res2) self._delete('ports', port1['port']['id']) self._delete('ports', port2['port']['id']) def test_update_network_set_not_shared_multi_tenants2_returns_409(self): with self.network(shared=True) as network: res1 = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id='somebody_else', set_context=True) self._create_subnet(self.fmt, network['network']['id'], '10.0.0.0/24', webob.exc.HTTPCreated.code, tenant_id=network['network']['tenant_id'], set_context=True) data = {'network': {'shared': False}} req = self.new_update_request('networks', data, network['network']['id']) self.assertEqual(webob.exc.HTTPConflict.code, req.get_response(self.api).status_int) port1 = self.deserialize(self.fmt, res1) self._delete('ports', port1['port']['id']) def test_create_networks_bulk_native(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk network create") res = self._create_network_bulk(self.fmt, 2, 'test', True) self._validate_behavior_on_bulk_success(res, 'networks') def test_create_networks_bulk_native_quotas(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk network create") quota = 4 cfg.CONF.set_override('quota_network', quota, group='QUOTAS') res = self._create_network_bulk(self.fmt, quota + 1, 'test', True) self._validate_behavior_on_bulk_failure( res, 'networks', errcode=webob.exc.HTTPConflict.code) def test_create_networks_bulk_tenants_and_quotas(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk network create") quota = 2 cfg.CONF.set_override('quota_network', quota, group='QUOTAS') networks = [{'network': {'name': 'n1', 'tenant_id': self._tenant_id}}, {'network': {'name': 'n2', 'tenant_id': self._tenant_id}}, {'network': {'name': 'n1', 'tenant_id': 't1'}}, {'network': {'name': 'n2', 'tenant_id': 't1'}}] res = self._create_bulk_from_list(self.fmt, 'network', networks) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) def test_create_networks_bulk_tenants_and_quotas_fail(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk network create") quota = 2 cfg.CONF.set_override('quota_network', quota, group='QUOTAS') networks = [{'network': {'name': 'n1', 'tenant_id': self._tenant_id}}, {'network': {'name': 'n2', 'tenant_id': self._tenant_id}}, {'network': {'name': 'n1', 'tenant_id': 't1'}}, {'network': {'name': 'n3', 'tenant_id': self._tenant_id}}, {'network': {'name': 'n2', 'tenant_id': 't1'}}] res = self._create_bulk_from_list(self.fmt, 'network', networks) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_networks_bulk_emulated(self): real_has_attr = hasattr #ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('six.moves.builtins.hasattr', new=fakehasattr): res = self._create_network_bulk(self.fmt, 2, 'test', True) self._validate_behavior_on_bulk_success(res, 'networks') def test_create_networks_bulk_wrong_input(self): res = self._create_network_bulk(self.fmt, 2, 'test', True, override={1: {'admin_state_up': 'doh'}}) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) req = self.new_list_request('networks') res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPOk.code, res.status_int) nets = self.deserialize(self.fmt, res) self.assertEqual(0, len(nets['networks'])) def test_create_networks_bulk_emulated_plugin_failure(self): real_has_attr = hasattr def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) orig = manager.NeutronManager.get_plugin().create_network #ensures the API choose the emulation code path with mock.patch('six.moves.builtins.hasattr', new=fakehasattr): method_to_patch = _get_create_db_method('network') with mock.patch.object(manager.NeutronManager.get_plugin(), method_to_patch) as patched_plugin: def side_effect(*args, **kwargs): return self._fail_second_call(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect res = self._create_network_bulk(self.fmt, 2, 'test', True) # We expect a 500 as we injected a fault in the plugin self._validate_behavior_on_bulk_failure( res, 'networks', webob.exc.HTTPServerError.code ) def test_create_networks_bulk_native_plugin_failure(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk network create") orig = manager.NeutronManager.get_plugin().create_network method_to_patch = _get_create_db_method('network') with mock.patch.object(manager.NeutronManager.get_plugin(), method_to_patch) as patched_plugin: def side_effect(*args, **kwargs): return self._fail_second_call(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect res = self._create_network_bulk(self.fmt, 2, 'test', True) # We expect a 500 as we injected a fault in the plugin self._validate_behavior_on_bulk_failure( res, 'networks', webob.exc.HTTPServerError.code ) def test_list_networks(self): with self.network() as v1, self.network() as v2, self.network() as v3: networks = (v1, v2, v3) self._test_list_resources('network', networks) def test_list_networks_with_sort_native(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") with self.network(admin_state_up=True, name='net1') as net1,\ self.network(admin_state_up=False, name='net2') as net2,\ self.network(admin_state_up=False, name='net3') as net3: self._test_list_with_sort('network', (net3, net2, net1), [('admin_state_up', 'asc'), ('name', 'desc')]) def test_list_networks_with_sort_extended_attr_native_returns_400(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") with self.network(admin_state_up=True, name='net1'),\ self.network(admin_state_up=False, name='net2'),\ self.network(admin_state_up=False, name='net3'): req = self.new_list_request( 'networks', params='sort_key=provider:segmentation_id&sort_dir=asc') res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_list_networks_with_sort_remote_key_native_returns_400(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") with self.network(admin_state_up=True, name='net1'),\ self.network(admin_state_up=False, name='net2'),\ self.network(admin_state_up=False, name='net3'): req = self.new_list_request( 'networks', params='sort_key=subnets&sort_dir=asc') res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_list_networks_with_sort_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_sorting_helper', new=_fake_get_sorting_helper) helper_patcher.start() with self.network(admin_state_up=True, name='net1') as net1,\ self.network(admin_state_up=False, name='net2') as net2,\ self.network(admin_state_up=False, name='net3') as net3: self._test_list_with_sort('network', (net3, net2, net1), [('admin_state_up', 'asc'), ('name', 'desc')]) def test_list_networks_with_pagination_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") with self.network(name='net1') as net1,\ self.network(name='net2') as net2,\ self.network(name='net3') as net3: self._test_list_with_pagination('network', (net1, net2, net3), ('name', 'asc'), 2, 2) def test_list_networks_with_pagination_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() with self.network(name='net1') as net1,\ self.network(name='net2') as net2,\ self.network(name='net3') as net3: self._test_list_with_pagination('network', (net1, net2, net3), ('name', 'asc'), 2, 2) def test_list_networks_without_pk_in_fields_pagination_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() with self.network(name='net1', shared=True) as net1,\ self.network(name='net2', shared=False) as net2,\ self.network(name='net3', shared=True) as net3: self._test_list_with_pagination('network', (net1, net2, net3), ('name', 'asc'), 2, 2, query_params="fields=name", verify_key='name') def test_list_networks_without_pk_in_fields_pagination_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") with self.network(name='net1') as net1,\ self.network(name='net2') as net2,\ self.network(name='net3') as net3: self._test_list_with_pagination('network', (net1, net2, net3), ('name', 'asc'), 2, 2, query_params="fields=shared", verify_key='shared') def test_list_networks_with_pagination_reverse_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") with self.network(name='net1') as net1,\ self.network(name='net2') as net2,\ self.network(name='net3') as net3: self._test_list_with_pagination_reverse('network', (net1, net2, net3), ('name', 'asc'), 2, 2) def test_list_networks_with_pagination_reverse_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() with self.network(name='net1') as net1,\ self.network(name='net2') as net2,\ self.network(name='net3') as net3: self._test_list_with_pagination_reverse('network', (net1, net2, net3), ('name', 'asc'), 2, 2) def test_list_networks_with_parameters(self): with self.network(name='net1', admin_state_up=False) as net1,\ self.network(name='net2') as net2: query_params = 'admin_state_up=False' self._test_list_resources('network', [net1], query_params=query_params) query_params = 'admin_state_up=True' self._test_list_resources('network', [net2], query_params=query_params) def test_list_networks_with_fields(self): with self.network(name='net1') as net1: req = self.new_list_request('networks', params='fields=name') res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(1, len(res['networks'])) self.assertEqual(res['networks'][0]['name'], net1['network']['name']) self.assertIsNone(res['networks'][0].get('id')) def test_list_networks_with_parameters_invalid_values(self): with self.network(name='net1', admin_state_up=False),\ self.network(name='net2'): req = self.new_list_request('networks', params='admin_state_up=fake') res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_list_shared_networks_with_non_admin_user(self): with self.network(shared=False, name='net1', tenant_id='tenant1') as net1,\ self.network(shared=True, name='net2', tenant_id='another_tenant') as net2,\ self.network(shared=False, name='net3', tenant_id='another_tenant'): ctx = context.Context(user_id='non_admin', tenant_id='tenant1', is_admin=False) self._test_list_resources('network', (net1, net2), ctx) def test_show_network(self): with self.network(name='net1') as net: req = self.new_show_request('networks', net['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['network']['name'], net['network']['name']) def test_show_network_with_subnet(self): with self.network(name='net1') as net: with self.subnet(net) as subnet: req = self.new_show_request('networks', net['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['network']['subnets'][0], subnet['subnet']['id']) def test_invalid_admin_status(self): value = [[7, False, webob.exc.HTTPClientError.code], [True, True, webob.exc.HTTPCreated.code], ["True", True, webob.exc.HTTPCreated.code], ["true", True, webob.exc.HTTPCreated.code], [1, True, webob.exc.HTTPCreated.code], ["False", False, webob.exc.HTTPCreated.code], [False, False, webob.exc.HTTPCreated.code], ["false", False, webob.exc.HTTPCreated.code], ["7", False, webob.exc.HTTPClientError.code]] for v in value: data = {'network': {'name': 'net', 'admin_state_up': v[0], 'tenant_id': self._tenant_id}} network_req = self.new_create_request('networks', data) req = network_req.get_response(self.api) self.assertEqual(req.status_int, v[2]) if v[2] == webob.exc.HTTPCreated.code: res = self.deserialize(self.fmt, req) self.assertEqual(res['network']['admin_state_up'], v[1]) class TestSubnetsV2(NeutronDbPluginV2TestCase): def _test_create_subnet(self, network=None, expected=None, **kwargs): keys = kwargs.copy() keys.setdefault('cidr', '10.0.0.0/24') keys.setdefault('ip_version', 4) keys.setdefault('enable_dhcp', True) with self.subnet(network=network, **keys) as subnet: # verify the response has each key with the correct value self._validate_resource(subnet, keys, 'subnet') # verify the configured validations are correct if expected: self._compare_resource(subnet, expected, 'subnet') self._delete('subnets', subnet['subnet']['id']) return subnet def test_create_subnet(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' subnet = self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr) self.assertEqual(4, subnet['subnet']['ip_version']) self.assertIn('name', subnet['subnet']) def test_create_subnet_with_network_different_tenant(self): with self.network(shared=False, tenant_id='tenant1') as network: ctx = context.Context(user_id='non_admin', tenant_id='tenant2', is_admin=False) data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': '4', 'gateway_ip': '10.0.2.1'}} req = self.new_create_request('subnets', data, self.fmt, context=ctx) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_create_two_subnets(self): gateway_ips = ['10.0.0.1', '10.0.1.1'] cidrs = ['10.0.0.0/24', '10.0.1.0/24'] with self.network() as network: with self.subnet(network=network, gateway_ip=gateway_ips[0], cidr=cidrs[0]): with self.subnet(network=network, gateway_ip=gateway_ips[1], cidr=cidrs[1]): net_req = self.new_show_request('networks', network['network']['id']) raw_res = net_req.get_response(self.api) net_res = self.deserialize(self.fmt, raw_res) for subnet_id in net_res['network']['subnets']: sub_req = self.new_show_request('subnets', subnet_id) raw_res = sub_req.get_response(self.api) sub_res = self.deserialize(self.fmt, raw_res) self.assertIn(sub_res['subnet']['cidr'], cidrs) self.assertIn(sub_res['subnet']['gateway_ip'], gateway_ips) def test_create_two_subnets_same_cidr_returns_400(self): gateway_ip_1 = '10.0.0.1' cidr_1 = '10.0.0.0/24' gateway_ip_2 = '10.0.0.10' cidr_2 = '10.0.0.0/24' with self.network() as network: with self.subnet(network=network, gateway_ip=gateway_ip_1, cidr=cidr_1): with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: with self.subnet(network=network, gateway_ip=gateway_ip_2, cidr=cidr_2): pass self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) def test_create_subnet_bad_V4_cidr(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0', 'ip_version': '4', 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_with_cidr_and_default_subnetpool(self): """Expect subnet-create to keep semantic with default pools.""" with self.network() as network: tenant_id = network['network']['tenant_id'] subnetpool_prefix = '10.0.0.0/8' with self.subnetpool(prefixes=[subnetpool_prefix], admin=True, name="My subnet pool", tenant_id=tenant_id, min_prefixlen='25', is_default=True): data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.0.0/24', 'ip_version': '4', 'tenant_id': tenant_id}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) subnet = self.deserialize(self.fmt, res)['subnet'] self.assertIsNone(subnet['subnetpool_id']) def test_create_subnet_no_cidr_and_default_subnetpool(self): """Expect subnet-create to keep semantic with default pools.""" with self.network() as network: tenant_id = network['network']['tenant_id'] subnetpool_prefix = '10.0.0.0/8' with self.subnetpool(prefixes=[subnetpool_prefix], admin=True, name="My subnet pool", tenant_id=tenant_id, min_prefixlen='25', is_default=True): data = {'subnet': {'network_id': network['network']['id'], 'ip_version': '4', 'tenant_id': tenant_id}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual( webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_no_ip_version(self): with self.network() as network: cfg.CONF.set_override('default_ipv4_subnet_pool', None) cfg.CONF.set_override('default_ipv6_subnet_pool', None) data = {'subnet': {'network_id': network['network']['id'], 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_only_ip_version_v6_no_pool(self): with self.network() as network: tenant_id = network['network']['tenant_id'] cfg.CONF.set_override('ipv6_pd_enabled', False) cfg.CONF.set_override('default_ipv6_subnet_pool', None) data = {'subnet': {'network_id': network['network']['id'], 'ip_version': '6', 'tenant_id': tenant_id}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_bad_V4_cidr_prefix_len(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': constants.IPv4_ANY, 'ip_version': '4', 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '0.0.0.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_bad_V6_cidr(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': 'fe80::', 'ip_version': '6', 'tenant_id': network['network']['tenant_id'], 'gateway_ip': 'fe80::1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_V6_slaac_big_prefix(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '2014::/65', 'ip_version': '6', 'tenant_id': network['network']['tenant_id'], 'gateway_ip': 'fe80::1', 'ipv6_address_mode': 'slaac'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_2_subnets_overlapping_cidr_allowed_returns_200(self): cidr_1 = '10.0.0.0/23' cidr_2 = '10.0.0.0/24' cfg.CONF.set_override('allow_overlapping_ips', True) with self.subnet(cidr=cidr_1), self.subnet(cidr=cidr_2): pass def test_create_2_subnets_overlapping_cidr_not_allowed_returns_400(self): cidr_1 = '10.0.0.0/23' cidr_2 = '10.0.0.0/24' cfg.CONF.set_override('allow_overlapping_ips', False) with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: with self.subnet(cidr=cidr_1), self.subnet(cidr=cidr_2): pass self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) def test_create_subnets_bulk_native(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk subnet create") with self.network() as net: res = self._create_subnet_bulk(self.fmt, 2, net['network']['id'], 'test') self._validate_behavior_on_bulk_success(res, 'subnets') def test_create_subnets_bulk_emulated(self): real_has_attr = hasattr #ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('six.moves.builtins.hasattr', new=fakehasattr): with self.network() as net: res = self._create_subnet_bulk(self.fmt, 2, net['network']['id'], 'test') self._validate_behavior_on_bulk_success(res, 'subnets') def test_create_subnets_bulk_emulated_plugin_failure(self): real_has_attr = hasattr #ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('six.moves.builtins.hasattr', new=fakehasattr): orig = manager.NeutronManager.get_plugin().create_subnet method_to_patch = _get_create_db_method('subnet') with mock.patch.object(manager.NeutronManager.get_plugin(), method_to_patch) as patched_plugin: def side_effect(*args, **kwargs): self._fail_second_call(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect with self.network() as net: res = self._create_subnet_bulk(self.fmt, 2, net['network']['id'], 'test') self._delete('networks', net['network']['id']) # We expect a 500 as we injected a fault in the plugin self._validate_behavior_on_bulk_failure( res, 'subnets', webob.exc.HTTPServerError.code ) def test_create_subnets_bulk_native_plugin_failure(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk subnet create") plugin = manager.NeutronManager.get_plugin() orig = plugin.create_subnet method_to_patch = _get_create_db_method('subnet') with mock.patch.object(plugin, method_to_patch) as patched_plugin: def side_effect(*args, **kwargs): return self._fail_second_call(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect with self.network() as net: res = self._create_subnet_bulk(self.fmt, 2, net['network']['id'], 'test') # We expect a 500 as we injected a fault in the plugin self._validate_behavior_on_bulk_failure( res, 'subnets', webob.exc.HTTPServerError.code ) def test_delete_subnet(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' # Create new network res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=4) req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_delete_subnet_port_exists_owned_by_network(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' # Create new network res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=4) self._create_port(self.fmt, network['network']['id'], device_owner=constants.DEVICE_OWNER_DHCP) req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_delete_subnet_dhcp_port_associated_with_other_subnets(self): res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet1 = self._make_subnet(self.fmt, network, '10.0.0.1', '10.0.0.0/24', ip_version=4) subnet2 = self._make_subnet(self.fmt, network, '10.0.1.1', '10.0.1.0/24', ip_version=4) res = self._create_port(self.fmt, network['network']['id'], device_owner=constants.DEVICE_OWNER_DHCP, fixed_ips=[ {'subnet_id': subnet1['subnet']['id']}, {'subnet_id': subnet2['subnet']['id']} ]) port = self.deserialize(self.fmt, res) expected_subnets = [subnet1['subnet']['id'], subnet2['subnet']['id']] self.assertEqual(expected_subnets, [s['subnet_id'] for s in port['port']['fixed_ips']]) req = self.new_delete_request('subnets', subnet1['subnet']['id']) res = req.get_response(self.api) self.assertEqual(204, res.status_int) port = self._show('ports', port['port']['id']) expected_subnets = [subnet2['subnet']['id']] self.assertEqual(expected_subnets, [s['subnet_id'] for s in port['port']['fixed_ips']]) req = self.new_delete_request('subnets', subnet2['subnet']['id']) res = req.get_response(self.api) self.assertEqual(204, res.status_int) port = self._show('ports', port['port']['id']) self.assertFalse(port['port']['fixed_ips']) def test_delete_subnet_port_exists_owned_by_other(self): with self.subnet() as subnet: with self.port(subnet=subnet): id = subnet['subnet']['id'] req = self.new_delete_request('subnets', id) res = req.get_response(self.api) data = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) msg = str(n_exc.SubnetInUse(subnet_id=id)) self.assertEqual(data['NeutronError']['message'], msg) def test_delete_subnet_with_other_subnet_on_network_still_in_use(self): with self.network() as network: with self.subnet(network=network) as subnet1,\ self.subnet(network=network, cidr='10.0.1.0/24') as subnet2: subnet1_id = subnet1['subnet']['id'] subnet2_id = subnet2['subnet']['id'] with self.port( subnet=subnet1, fixed_ips=[{'subnet_id': subnet1_id}]): req = self.new_delete_request('subnets', subnet2_id) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def _create_slaac_subnet_and_port(self, port_owner=None): # Create an IPv6 SLAAC subnet and a port using that subnet res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway='fe80::1', cidr='fe80::/64', ip_version=6, ipv6_ra_mode=constants.IPV6_SLAAC, ipv6_address_mode=constants.IPV6_SLAAC) kwargs = {} if port_owner: kwargs['device_owner'] = port_owner if port_owner in constants.ROUTER_INTERFACE_OWNERS: kwargs['fixed_ips'] = [{'ip_address': 'fe80::1'}] res = self._create_port(self.fmt, net_id=network['network']['id'], **kwargs) port = self.deserialize(self.fmt, res) self.assertEqual(1, len(port['port']['fixed_ips'])) # The port should have an address from the subnet req = self.new_show_request('ports', port['port']['id'], self.fmt) res = req.get_response(self.api) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(1, len(sport['port']['fixed_ips'])) return subnet, port def test_delete_subnet_ipv6_slaac_port_exists(self): """Test IPv6 SLAAC subnet delete when a port is still using subnet.""" subnet, port = self._create_slaac_subnet_and_port() # Delete the subnet req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) # The port should no longer have an address from the deleted subnet req = self.new_show_request('ports', port['port']['id'], self.fmt) res = req.get_response(self.api) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(0, len(sport['port']['fixed_ips'])) def test_delete_subnet_ipv6_slaac_router_port_exists(self): """Test IPv6 SLAAC subnet delete with a router port using the subnet""" subnet, port = self._create_slaac_subnet_and_port( constants.DEVICE_OWNER_ROUTER_INTF) # Delete the subnet and assert that we get a HTTP 409 error req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) # The subnet should still exist and the port should still have an # address from the subnet req = self.new_show_request('subnets', subnet['subnet']['id'], self.fmt) res = req.get_response(self.api) ssubnet = self.deserialize(self.fmt, req.get_response(self.api)) self.assertIsNotNone(ssubnet) req = self.new_show_request('ports', port['port']['id'], self.fmt) res = req.get_response(self.api) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(1, len(sport['port']['fixed_ips'])) port_subnet_ids = [fip['subnet_id'] for fip in sport['port']['fixed_ips']] self.assertIn(subnet['subnet']['id'], port_subnet_ids) def test_delete_network(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' # Create new network res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=4) req = self.new_delete_request('networks', network['network']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) req = self.new_show_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_create_subnet_bad_tenant(self): with self.network() as network: self._create_subnet(self.fmt, network['network']['id'], '10.0.2.0/24', webob.exc.HTTPNotFound.code, ip_version=4, tenant_id='bad_tenant_id', gateway_ip='10.0.2.1', device_owner='fake_owner', set_context=True) def test_create_subnet_as_admin(self): with self.network() as network: self._create_subnet(self.fmt, network['network']['id'], '10.0.2.0/24', webob.exc.HTTPCreated.code, ip_version=4, tenant_id='bad_tenant_id', gateway_ip='10.0.2.1', device_owner='fake_owner', set_context=False) def test_create_subnet_nonzero_cidr(self): # Pass None as gateway_ip to prevent ip auto allocation for gw # Previously gateway ip was allocated after validations, # so no errors were raised if gw ip was out of range. with self.subnet(cidr='10.129.122.5/8') as v1,\ self.subnet(cidr='11.129.122.5/15') as v2,\ self.subnet(cidr='12.129.122.5/16') as v3,\ self.subnet(cidr='13.129.122.5/18') as v4,\ self.subnet(cidr='14.129.122.5/22') as v5,\ self.subnet(cidr='15.129.122.5/24') as v6,\ self.subnet(cidr='16.129.122.5/28') as v7,\ self.subnet(cidr='17.129.122.5/32', gateway_ip=None, enable_dhcp=False) as v8: subs = (v1, v2, v3, v4, v5, v6, v7, v8) # the API should accept and correct these for users self.assertEqual('10.0.0.0/8', subs[0]['subnet']['cidr']) self.assertEqual('11.128.0.0/15', subs[1]['subnet']['cidr']) self.assertEqual('12.129.0.0/16', subs[2]['subnet']['cidr']) self.assertEqual('13.129.64.0/18', subs[3]['subnet']['cidr']) self.assertEqual('14.129.120.0/22', subs[4]['subnet']['cidr']) self.assertEqual('15.129.122.0/24', subs[5]['subnet']['cidr']) self.assertEqual('16.129.122.0/28', subs[6]['subnet']['cidr']) self.assertEqual('17.129.122.5/32', subs[7]['subnet']['cidr']) def _test_create_subnet_with_invalid_netmask_returns_400(self, *args): with self.network() as network: for cidr in args: ip_version = netaddr.IPNetwork(cidr).version self._create_subnet(self.fmt, network['network']['id'], cidr, webob.exc.HTTPClientError.code, ip_version=ip_version) def test_create_subnet_with_invalid_netmask_returns_400_ipv4(self): self._test_create_subnet_with_invalid_netmask_returns_400( '10.0.0.0/31', '10.0.0.0/32') def test_create_subnet_with_invalid_netmask_returns_400_ipv6(self): self._test_create_subnet_with_invalid_netmask_returns_400( 'cafe:cafe::/127', 'cafe:cafe::/128') def test_create_subnet_bad_ip_version(self): with self.network() as network: # Check bad IP version data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 'abc', 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_bad_ip_version_null(self): with self.network() as network: # Check bad IP version data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': None, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_bad_uuid(self): with self.network() as network: # Check invalid UUID data = {'subnet': {'network_id': None, 'cidr': '10.0.2.0/24', 'ip_version': 4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_bad_boolean(self): with self.network() as network: # Check invalid boolean data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': '4', 'enable_dhcp': None, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_bad_pools(self): with self.network() as network: # Check allocation pools allocation_pools = [[{'end': '10.0.0.254'}], [{'start': '10.0.0.254'}], [{'start': '1000.0.0.254'}], [{'start': '10.0.0.2', 'end': '10.0.0.254'}, {'end': '10.0.0.254'}], None, [{'start': '10.0.0.200', 'end': '10.0.3.20'}], [{'start': '10.0.2.250', 'end': '10.0.3.5'}], [{'start': '10.0.2.10', 'end': '10.0.2.5'}], [{'start': '10.0.0.2', 'end': '10.0.0.3'}, {'start': '10.0.0.2', 'end': '10.0.0.3'}]] tenant_id = network['network']['tenant_id'] for pool in allocation_pools: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': '4', 'tenant_id': tenant_id, 'gateway_ip': '10.0.2.1', 'allocation_pools': pool}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_bad_nameserver(self): with self.network() as network: # Check nameservers nameserver_pools = [['1100.0.0.2'], ['1.1.1.2', '1.1000.1.3'], ['1.1.1.2', '1.1.1.2']] tenant_id = network['network']['tenant_id'] for nameservers in nameserver_pools: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': '4', 'tenant_id': tenant_id, 'gateway_ip': '10.0.2.1', 'dns_nameservers': nameservers}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_bad_hostroutes(self): with self.network() as network: # Check hostroutes hostroute_pools = [[{'destination': '100.0.0.0/24'}], [{'nexthop': '10.0.2.20'}], [{'nexthop': '10.0.2.20', 'destination': '100.0.0.0/8'}, {'nexthop': '10.0.2.20', 'destination': '100.0.0.0/8'}]] tenant_id = network['network']['tenant_id'] for hostroutes in hostroute_pools: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': '4', 'tenant_id': tenant_id, 'gateway_ip': '10.0.2.1', 'host_routes': hostroutes}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_defaults(self): gateway = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}] enable_dhcp = True subnet = self._test_create_subnet() # verify cidr & gw have been correctly generated self.assertEqual(cidr, subnet['subnet']['cidr']) self.assertEqual(gateway, subnet['subnet']['gateway_ip']) self.assertEqual(enable_dhcp, subnet['subnet']['enable_dhcp']) self.assertEqual(allocation_pools, subnet['subnet']['allocation_pools']) def test_create_subnet_gw_values(self): cidr = '10.0.0.0/24' # Gateway is last IP in range gateway = '10.0.0.254' allocation_pools = [{'start': '10.0.0.1', 'end': '10.0.0.253'}] expected = {'gateway_ip': gateway, 'cidr': cidr, 'allocation_pools': allocation_pools} self._test_create_subnet(expected=expected, gateway_ip=gateway) # Gateway is first in subnet gateway = '10.0.0.1' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}] expected = {'gateway_ip': gateway, 'cidr': cidr, 'allocation_pools': allocation_pools} self._test_create_subnet(expected=expected, gateway_ip=gateway) def test_create_subnet_ipv6_gw_values(self): cidr = '2001::/64' # Gateway is last IP in IPv6 DHCPv6 stateful subnet gateway = '2001::ffff:ffff:ffff:ffff' allocation_pools = [{'start': '2001::1', 'end': '2001::ffff:ffff:ffff:fffe'}] expected = {'gateway_ip': gateway, 'cidr': cidr, 'allocation_pools': allocation_pools} self._test_create_subnet(expected=expected, gateway_ip=gateway, cidr=cidr, ip_version=6, ipv6_ra_mode=constants.DHCPV6_STATEFUL, ipv6_address_mode=constants.DHCPV6_STATEFUL) # Gateway is first IP in IPv6 DHCPv6 stateful subnet gateway = '2001::1' allocation_pools = [{'start': '2001::2', 'end': '2001::ffff:ffff:ffff:ffff'}] expected = {'gateway_ip': gateway, 'cidr': cidr, 'allocation_pools': allocation_pools} self._test_create_subnet(expected=expected, gateway_ip=gateway, cidr=cidr, ip_version=6, ipv6_ra_mode=constants.DHCPV6_STATEFUL, ipv6_address_mode=constants.DHCPV6_STATEFUL) # If gateway_ip is not specified, allocate first IP from the subnet expected = {'gateway_ip': gateway, 'cidr': cidr} self._test_create_subnet(expected=expected, cidr=cidr, ip_version=6, ipv6_ra_mode=constants.IPV6_SLAAC, ipv6_address_mode=constants.IPV6_SLAAC) @testtools.skipIf(tools.is_bsd(), 'bug/1484837') def test_create_subnet_ipv6_pd_gw_values(self): cidr = constants.PROVISIONAL_IPV6_PD_PREFIX # Gateway is last IP in IPv6 DHCPv6 Stateless subnet gateway = '::ffff:ffff:ffff:ffff' allocation_pools = [{'start': '::1', 'end': '::ffff:ffff:ffff:fffe'}] expected = {'gateway_ip': gateway, 'cidr': cidr, 'allocation_pools': allocation_pools} self._test_create_subnet(expected=expected, gateway_ip=gateway, cidr=cidr, ip_version=6, ipv6_ra_mode=constants.DHCPV6_STATELESS, ipv6_address_mode=constants.DHCPV6_STATELESS) # Gateway is first IP in IPv6 DHCPv6 Stateless subnet gateway = '::1' allocation_pools = [{'start': '::2', 'end': '::ffff:ffff:ffff:ffff'}] expected = {'gateway_ip': gateway, 'cidr': cidr, 'allocation_pools': allocation_pools} self._test_create_subnet(expected=expected, gateway_ip=gateway, cidr=cidr, ip_version=6, ipv6_ra_mode=constants.DHCPV6_STATELESS, ipv6_address_mode=constants.DHCPV6_STATELESS) # If gateway_ip is not specified, allocate first IP from the subnet expected = {'gateway_ip': gateway, 'cidr': cidr} self._test_create_subnet(expected=expected, cidr=cidr, ip_version=6, ipv6_ra_mode=constants.IPV6_SLAAC, ipv6_address_mode=constants.IPV6_SLAAC) def test_create_subnet_gw_outside_cidr_returns_400(self): cfg.CONF.set_override('force_gateway_on_subnet', True) with self.network() as network: self._create_subnet(self.fmt, network['network']['id'], '10.0.0.0/24', webob.exc.HTTPClientError.code, gateway_ip='100.0.0.1') def test_create_subnet_gw_outside_cidr_returns_201(self): cfg.CONF.set_override('force_gateway_on_subnet', False) with self.network() as network: self._create_subnet(self.fmt, network['network']['id'], '10.0.0.0/24', webob.exc.HTTPCreated.code, gateway_ip='100.0.0.1') def test_create_subnet_gw_is_nw_addr_returns_400(self): cfg.CONF.set_override('force_gateway_on_subnet', False) with self.network() as network: self._create_subnet(self.fmt, network['network']['id'], '10.0.0.0/24', webob.exc.HTTPClientError.code, gateway_ip='10.0.0.0') def test_create_subnet_gw_is_broadcast_addr_returns_400(self): cfg.CONF.set_override('force_gateway_on_subnet', False) with self.network() as network: self._create_subnet(self.fmt, network['network']['id'], '10.0.0.0/24', webob.exc.HTTPClientError.code, gateway_ip='10.0.0.255') def test_create_subnet_gw_of_network_returns_400(self): with self.network() as network: self._create_subnet(self.fmt, network['network']['id'], '10.0.0.0/24', webob.exc.HTTPClientError.code, gateway_ip='10.0.0.0') def test_create_subnet_gw_bcast_returns_400(self): with self.network() as network: self._create_subnet(self.fmt, network['network']['id'], '10.0.0.0/24', webob.exc.HTTPClientError.code, gateway_ip='10.0.0.255') def test_create_subnet_with_allocation_pool(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools) def test_create_subnet_with_none_gateway(self): cidr = '10.0.0.0/24' self._test_create_subnet(gateway_ip=None, cidr=cidr) def test_create_subnet_with_none_gateway_fully_allocated(self): cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.1', 'end': '10.0.0.254'}] self._test_create_subnet(gateway_ip=None, cidr=cidr, allocation_pools=allocation_pools) def test_subnet_with_allocation_range(self): with self.network() as network: net_id = network['network']['id'] data = {'subnet': {'network_id': net_id, 'cidr': '10.0.0.0/24', 'ip_version': 4, 'gateway_ip': '10.0.0.1', 'tenant_id': network['network']['tenant_id'], 'allocation_pools': [{'start': '10.0.0.100', 'end': '10.0.0.120'}]}} subnet_req = self.new_create_request('subnets', data) subnet = self.deserialize(self.fmt, subnet_req.get_response(self.api)) # Check fixed IP not in allocation range kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.10'}]} res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) port = self.deserialize(self.fmt, res) # delete the port self._delete('ports', port['port']['id']) # Check when fixed IP is gateway kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.1'}]} res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) port = self.deserialize(self.fmt, res) # delete the port self._delete('ports', port['port']['id']) def test_create_subnet_with_none_gateway_allocation_pool(self): cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}] self._test_create_subnet(gateway_ip=None, cidr=cidr, allocation_pools=allocation_pools) def test_create_subnet_with_v6_allocation_pool(self): gateway_ip = 'fe80::1' cidr = 'fe80::/80' allocation_pools = [{'start': 'fe80::2', 'end': 'fe80::ffff:fffa:ffff'}] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, ip_version=6, allocation_pools=allocation_pools) @testtools.skipIf(tools.is_bsd(), 'bug/1484837') def test_create_subnet_with_v6_pd_allocation_pool(self): gateway_ip = '::1' cidr = constants.PROVISIONAL_IPV6_PD_PREFIX allocation_pools = [{'start': '::2', 'end': '::ffff:ffff:ffff:fffe'}] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, ip_version=6, allocation_pools=allocation_pools) def test_create_subnet_with_large_allocation_pool(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/8' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}, {'start': '10.1.0.0', 'end': '10.200.0.100'}] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools) def test_create_subnet_multiple_allocation_pools(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}, {'start': '10.0.0.110', 'end': '10.0.0.150'}] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools) def test_create_subnet_with_dhcp_disabled(self): enable_dhcp = False self._test_create_subnet(enable_dhcp=enable_dhcp) def test_create_subnet_default_gw_conflict_allocation_pool_returns_409( self): cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.1', 'end': '10.0.0.5'}] with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(cidr=cidr, allocation_pools=allocation_pools) self.assertEqual(webob.exc.HTTPConflict.code, ctx_manager.exception.code) def test_create_subnet_gateway_in_allocation_pool_returns_409(self): gateway_ip = '10.0.0.50' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.1', 'end': '10.0.0.100'}] with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools) self.assertEqual(webob.exc.HTTPConflict.code, ctx_manager.exception.code) def test_create_subnet_overlapping_allocation_pools_returns_409(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.150'}, {'start': '10.0.0.140', 'end': '10.0.0.180'}] with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools) self.assertEqual(webob.exc.HTTPConflict.code, ctx_manager.exception.code) def test_create_subnet_invalid_allocation_pool_returns_400(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.256'}] with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools) self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) def test_create_subnet_out_of_range_allocation_pool_returns_400(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.1.6'}] with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools) self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) def test_create_subnet_shared_returns_400(self): cidr = '10.0.0.0/24' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(cidr=cidr, shared=True) self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) def test_create_subnet_inconsistent_ipv6_cidrv4(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 6, 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_inconsistent_ipv4_cidrv6(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': 'fe80::0/80', 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_inconsistent_ipv4_gatewayv6(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 4, 'gateway_ip': 'fe80::1', 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_inconsistent_ipv6_gatewayv4(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': 'fe80::0/80', 'ip_version': 6, 'gateway_ip': '192.168.0.1', 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_inconsistent_ipv6_dns_v4(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': 'fe80::0/80', 'ip_version': 6, 'dns_nameservers': ['192.168.0.1'], 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_inconsistent_ipv4_hostroute_dst_v6(self): host_routes = [{'destination': 'fe80::0/48', 'nexthop': '10.0.2.20'}] with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 4, 'host_routes': host_routes, 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_inconsistent_ipv4_hostroute_np_v6(self): host_routes = [{'destination': '172.16.0.0/24', 'nexthop': 'fe80::1'}] with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 4, 'host_routes': host_routes, 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def _test_validate_subnet_ipv6_modes(self, cur_subnet=None, expect_success=True, **modes): plugin = manager.NeutronManager.get_plugin() ctx = context.get_admin_context() new_subnet = {'ip_version': 6, 'cidr': 'fe80::/64', 'enable_dhcp': True, 'ipv6_address_mode': None, 'ipv6_ra_mode': None} for mode, value in modes.items(): new_subnet[mode] = value if expect_success: plugin._validate_subnet(ctx, new_subnet, cur_subnet) else: self.assertRaises(n_exc.InvalidInput, plugin._validate_subnet, ctx, new_subnet, cur_subnet) def _test_validate_subnet_ipv6_pd_modes(self, cur_subnet=None, expect_success=True, **modes): plugin = manager.NeutronManager.get_plugin() ctx = context.get_admin_context() new_subnet = {'ip_version': 6, 'cidr': constants.PROVISIONAL_IPV6_PD_PREFIX, 'enable_dhcp': True, 'ipv6_address_mode': None, 'ipv6_ra_mode': None} for mode, value in modes.items(): new_subnet[mode] = value if expect_success: plugin._validate_subnet(ctx, new_subnet, cur_subnet) else: self.assertRaises(n_exc.InvalidInput, plugin._validate_subnet, ctx, new_subnet, cur_subnet) def test_create_subnet_ipv6_ra_modes(self): # Test all RA modes with no address mode specified for ra_mode in constants.IPV6_MODES: self._test_validate_subnet_ipv6_modes( ipv6_ra_mode=ra_mode) self._test_validate_subnet_ipv6_pd_modes( ipv6_ra_mode=ra_mode) def test_create_subnet_ipv6_addr_modes(self): # Test all address modes with no RA mode specified for addr_mode in constants.IPV6_MODES: self._test_validate_subnet_ipv6_modes( ipv6_address_mode=addr_mode) self._test_validate_subnet_ipv6_pd_modes( ipv6_address_mode=addr_mode) def test_create_subnet_ipv6_same_ra_and_addr_modes(self): # Test all ipv6 modes with ra_mode==addr_mode for ipv6_mode in constants.IPV6_MODES: self._test_validate_subnet_ipv6_modes( ipv6_ra_mode=ipv6_mode, ipv6_address_mode=ipv6_mode) self._test_validate_subnet_ipv6_pd_modes( ipv6_ra_mode=ipv6_mode, ipv6_address_mode=ipv6_mode) def test_create_subnet_ipv6_different_ra_and_addr_modes(self): # Test all ipv6 modes with ra_mode!=addr_mode for ra_mode, addr_mode in itertools.permutations( constants.IPV6_MODES, 2): self._test_validate_subnet_ipv6_modes( expect_success=not (ra_mode and addr_mode), ipv6_ra_mode=ra_mode, ipv6_address_mode=addr_mode) self._test_validate_subnet_ipv6_pd_modes( expect_success=not (ra_mode and addr_mode), ipv6_ra_mode=ra_mode, ipv6_address_mode=addr_mode) def test_create_subnet_ipv6_out_of_cidr_global_returns_400(self): cfg.CONF.set_override('force_gateway_on_subnet', True) gateway_ip = '2000::1' cidr = '2001::/64' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet( gateway_ip=gateway_ip, cidr=cidr, ip_version=constants.IP_VERSION_6, ipv6_ra_mode=constants.DHCPV6_STATEFUL, ipv6_address_mode=constants.DHCPV6_STATEFUL) self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) def test_create_subnet_ipv6_out_of_cidr_global(self): cfg.CONF.set_override('force_gateway_on_subnet', False) gateway_ip = '2000::1' cidr = '2001::/64' subnet = self._test_create_subnet( gateway_ip=gateway_ip, cidr=cidr, ip_version=constants.IP_VERSION_6, ipv6_ra_mode=constants.DHCPV6_STATEFUL, ipv6_address_mode=constants.DHCPV6_STATEFUL) self.assertEqual(constants.IP_VERSION_6, subnet['subnet']['ip_version']) self.assertEqual(gateway_ip, subnet['subnet']['gateway_ip']) self.assertEqual(cidr, subnet['subnet']['cidr']) def test_create_subnet_ipv6_gw_is_nw_addr_returns_400(self): cfg.CONF.set_override('force_gateway_on_subnet', False) gateway_ip = '2001::0' cidr = '2001::/64' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet( gateway_ip=gateway_ip, cidr=cidr, ip_version=constants.IP_VERSION_6, ipv6_ra_mode=constants.DHCPV6_STATEFUL, ipv6_address_mode=constants.DHCPV6_STATEFUL) self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) def test_create_subnet_ipv6_gw_is_nw_end_addr_returns_201(self): cfg.CONF.set_override('force_gateway_on_subnet', False) gateway_ip = '2001::ffff' cidr = '2001::/112' subnet = self._test_create_subnet( gateway_ip=gateway_ip, cidr=cidr, ip_version=constants.IP_VERSION_6, ipv6_ra_mode=constants.DHCPV6_STATEFUL, ipv6_address_mode=constants.DHCPV6_STATEFUL) self.assertEqual(constants.IP_VERSION_6, subnet['subnet']['ip_version']) self.assertEqual(gateway_ip, subnet['subnet']['gateway_ip']) self.assertEqual(cidr, subnet['subnet']['cidr']) def test_create_subnet_ipv6_out_of_cidr_lla(self): gateway_ip = 'fe80::1' cidr = '2001::/64' self._test_create_subnet( gateway_ip=gateway_ip, cidr=cidr, ip_version=6, ipv6_ra_mode=constants.IPV6_SLAAC, ipv6_address_mode=constants.IPV6_SLAAC) def test_create_subnet_ipv6_attributes_no_dhcp_enabled(self): gateway_ip = 'fe80::1' cidr = 'fe80::/64' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: for mode in constants.IPV6_MODES: self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, ip_version=6, enable_dhcp=False, ipv6_ra_mode=mode, ipv6_address_mode=mode) self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) def test_create_subnet_invalid_ipv6_ra_mode(self): gateway_ip = 'fe80::1' cidr = 'fe80::/80' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, ip_version=6, ipv6_ra_mode='foo', ipv6_address_mode='slaac') self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) def test_create_subnet_invalid_ipv6_address_mode(self): gateway_ip = 'fe80::1' cidr = 'fe80::/80' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, ip_version=6, ipv6_ra_mode='slaac', ipv6_address_mode='baz') self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) def test_create_subnet_ipv6_ra_mode_ip_version_4(self): cidr = '10.0.2.0/24' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(cidr=cidr, ip_version=4, ipv6_ra_mode=constants.DHCPV6_STATEFUL) self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) def test_create_subnet_ipv6_address_mode_ip_version_4(self): cidr = '10.0.2.0/24' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet( cidr=cidr, ip_version=4, ipv6_address_mode=constants.DHCPV6_STATEFUL) self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) def _test_create_subnet_ipv6_auto_addr_with_port_on_network( self, addr_mode, device_owner=DEVICE_OWNER_COMPUTE, insert_db_reference_error=False): # Create a network with one IPv4 subnet and one port with self.network() as network,\ self.subnet(network=network) as v4_subnet,\ self.port(subnet=v4_subnet, device_owner=device_owner) as port: if insert_db_reference_error: orig_fn = orm.Session.add def db_ref_err_for_ipalloc(s, instance): if instance.__class__.__name__ == 'IPAllocation': # tweak port_id to cause a FK violation, # thus DBReferenceError instance.port_id = 'nonexistent' return orig_fn(s, instance) mock.patch.object(orm.Session, 'add', side_effect=db_ref_err_for_ipalloc, autospec=True).start() v6_subnet = {'ip_version': 6, 'cidr': 'fe80::/64', 'gateway_ip': 'fe80::1', 'tenant_id': v4_subnet['subnet']['tenant_id']} mock.patch.object(db_base_plugin_common.DbBasePluginCommon, '_get_subnet', return_value=v6_subnet).start() # Add an IPv6 auto-address subnet to the network with mock.patch.object(manager.NeutronManager.get_plugin(), 'update_port') as mock_updated_port: v6_subnet = self._make_subnet(self.fmt, network, 'fe80::1', 'fe80::/64', ip_version=6, ipv6_ra_mode=addr_mode, ipv6_address_mode=addr_mode) if (insert_db_reference_error or device_owner == constants.DEVICE_OWNER_ROUTER_SNAT or device_owner in constants.ROUTER_INTERFACE_OWNERS): # DVR SNAT and router interfaces should not have been # updated with addresses from the new auto-address subnet self.assertEqual(1, len(port['port']['fixed_ips'])) else: # Confirm that the port has been updated with an address # from the new auto-address subnet mock_updated_port.assert_called_with(mock.ANY, port['port']['id'], mock.ANY) req = self.new_show_request('ports', port['port']['id'], self.fmt) sport = self.deserialize(self.fmt, req.get_response(self.api)) fixed_ips = sport['port']['fixed_ips'] self.assertEqual(2, len(fixed_ips)) self.assertIn(v6_subnet['subnet']['id'], [fixed_ip['subnet_id'] for fixed_ip in fixed_ips]) def test_create_subnet_ipv6_slaac_with_port_on_network(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( constants.IPV6_SLAAC) def test_create_subnet_dhcpv6_stateless_with_port_on_network(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( constants.DHCPV6_STATELESS) def test_create_subnet_ipv6_slaac_with_dhcp_port_on_network(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( constants.IPV6_SLAAC, device_owner=constants.DEVICE_OWNER_DHCP) def test_create_subnet_ipv6_slaac_with_router_intf_on_network(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( constants.IPV6_SLAAC, device_owner=constants.DEVICE_OWNER_ROUTER_INTF) def test_create_subnet_ipv6_slaac_with_snat_intf_on_network(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( constants.IPV6_SLAAC, device_owner=constants.DEVICE_OWNER_ROUTER_SNAT) def test_create_subnet_ipv6_slaac_with_db_reference_error(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( constants.IPV6_SLAAC, insert_db_reference_error=True) def test_update_subnet_no_gateway(self): with self.subnet() as subnet: data = {'subnet': {'gateway_ip': '10.0.0.1'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['subnet']['gateway_ip'], res['subnet']['gateway_ip']) data = {'subnet': {'gateway_ip': None}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertIsNone(data['subnet']['gateway_ip']) def test_subnet_usable_after_update(self): with self.subnet() as subnet: data = {'subnet': {'name': 'newname'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['subnet']['name'], res['subnet']['name']) with self.port(subnet=subnet): pass def test_update_subnet(self): with self.subnet() as subnet: data = {'subnet': {'gateway_ip': '10.0.0.1'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['subnet']['gateway_ip'], res['subnet']['gateway_ip']) def test_update_subnet_adding_additional_host_routes_and_dns(self): host_routes = [{'destination': '172.16.0.0/24', 'nexthop': '10.0.2.2'}] with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 4, 'dns_nameservers': ['192.168.0.1'], 'host_routes': host_routes, 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = self.deserialize(self.fmt, subnet_req.get_response(self.api)) host_routes = [{'destination': '172.16.0.0/24', 'nexthop': '10.0.2.2'}, {'destination': '192.168.0.0/24', 'nexthop': '10.0.2.3'}] dns_nameservers = ['192.168.0.1', '192.168.0.2'] data = {'subnet': {'host_routes': host_routes, 'dns_nameservers': dns_nameservers}} req = self.new_update_request('subnets', data, res['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual( sorted(res['subnet']['host_routes'], key=utils.safe_sort_key), sorted(host_routes, key=utils.safe_sort_key)) self.assertEqual(dns_nameservers, res['subnet']['dns_nameservers']) def test_update_subnet_shared_returns_400(self): with self.network(shared=True) as network: with self.subnet(network=network) as subnet: data = {'subnet': {'shared': True}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_gw_outside_cidr_returns_400(self): cfg.CONF.set_override('force_gateway_on_subnet', True) with self.network() as network: with self.subnet(network=network) as subnet: data = {'subnet': {'gateway_ip': '100.0.0.1'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_gw_outside_cidr_returns_200(self): cfg.CONF.set_override('force_gateway_on_subnet', False) with self.network() as network: with self.subnet(network=network) as subnet: data = {'subnet': {'gateway_ip': '100.0.0.1'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPOk.code, res.status_int) def test_update_subnet_gw_ip_in_use_by_router_returns_409(self): with self.network() as network: with self.subnet(network=network, allocation_pools=[{'start': '10.0.0.2', 'end': '10.0.0.8'}]) as subnet: s = subnet['subnet'] with self.port( subnet=subnet, fixed_ips=[{'subnet_id': s['id'], 'ip_address': s['gateway_ip']}] ) as port: # this protection only applies to router ports so we need # to make this port belong to a router ctx = context.get_admin_context() with ctx.session.begin(): router = l3_db.Router() ctx.session.add(router) with ctx.session.begin(): rp = l3_db.RouterPort(router_id=router.id, port_id=port['port']['id']) ctx.session.add(rp) data = {'subnet': {'gateway_ip': '10.0.0.99'}} req = self.new_update_request('subnets', data, s['id']) res = req.get_response(self.api) self.assertEqual(409, res.status_int) # should work fine if it's not a router port with ctx.session.begin(): ctx.session.delete(rp) ctx.session.delete(router) res = req.get_response(self.api) self.assertEqual(res.status_int, 200) def test_update_subnet_inconsistent_ipv4_gatewayv6(self): with self.network() as network: with self.subnet(network=network) as subnet: data = {'subnet': {'gateway_ip': 'fe80::1'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_inconsistent_ipv6_gatewayv4(self): with self.network() as network: with self.subnet(network=network, ip_version=6, cidr='fe80::/48') as subnet: data = {'subnet': {'gateway_ip': '10.1.1.1'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_inconsistent_ipv4_dns_v6(self): dns_nameservers = ['fe80::1'] with self.network() as network: with self.subnet(network=network) as subnet: data = {'subnet': {'dns_nameservers': dns_nameservers}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self): host_routes = [{'destination': 'fe80::0/48', 'nexthop': '10.0.2.20'}] with self.network() as network: with self.subnet(network=network, ip_version=6, cidr='fe80::/48') as subnet: data = {'subnet': {'host_routes': host_routes}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self): host_routes = [{'destination': '172.16.0.0/24', 'nexthop': 'fe80::1'}] with self.network() as network: with self.subnet(network=network, ip_version=6, cidr='fe80::/48') as subnet: data = {'subnet': {'host_routes': host_routes}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_gateway_in_allocation_pool_returns_409(self): allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}] with self.network() as network: with self.subnet(network=network, allocation_pools=allocation_pools, cidr='10.0.0.0/24') as subnet: data = {'subnet': {'gateway_ip': '10.0.0.50'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_update_subnet_ipv6_attributes_fails(self): with self.subnet(ip_version=6, cidr='fe80::/64', ipv6_ra_mode=constants.IPV6_SLAAC, ipv6_address_mode=constants.IPV6_SLAAC) as subnet: data = {'subnet': {'ipv6_ra_mode': constants.DHCPV6_STATEFUL, 'ipv6_address_mode': constants.DHCPV6_STATEFUL}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_ipv6_ra_mode_fails(self): with self.subnet(ip_version=6, cidr='fe80::/64', ipv6_ra_mode=constants.IPV6_SLAAC) as subnet: data = {'subnet': {'ipv6_ra_mode': constants.DHCPV6_STATEFUL}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_ipv6_address_mode_fails(self): with self.subnet(ip_version=6, cidr='fe80::/64', ipv6_address_mode=constants.IPV6_SLAAC) as subnet: data = {'subnet': {'ipv6_address_mode': constants.DHCPV6_STATEFUL}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_ipv6_cannot_disable_dhcp(self): with self.subnet(ip_version=6, cidr='fe80::/64', ipv6_ra_mode=constants.IPV6_SLAAC, ipv6_address_mode=constants.IPV6_SLAAC) as subnet: data = {'subnet': {'enable_dhcp': False}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_ipv6_ra_mode_ip_version_4(self): with self.network() as network: with self.subnet(network=network) as subnet: data = {'subnet': {'ipv6_ra_mode': constants.DHCPV6_STATEFUL}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_ipv6_address_mode_ip_version_4(self): with self.network() as network: with self.subnet(network=network) as subnet: data = {'subnet': {'ipv6_address_mode': constants.DHCPV6_STATEFUL}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def _verify_updated_subnet_allocation_pools(self, res, with_gateway_ip): res = self.deserialize(self.fmt, res) self.assertEqual(2, len(res['subnet']['allocation_pools'])) res_vals = ( list(res['subnet']['allocation_pools'][0].values()) + list(res['subnet']['allocation_pools'][1].values()) ) for pool_val in ['10', '20', '30', '40']: self.assertIn('192.168.0.%s' % (pool_val), res_vals) if with_gateway_ip: self.assertEqual('192.168.0.9', (res['subnet']['gateway_ip'])) def _test_update_subnet_allocation_pools(self, with_gateway_ip=False): """Test that we can successfully update with sane params. This will create a subnet with specified allocation_pools Then issue an update (PUT) to update these using correct (i.e. non erroneous) params. Finally retrieve the updated subnet and verify. """ allocation_pools = [{'start': '192.168.0.2', 'end': '192.168.0.254'}] with self.network() as network: with self.subnet(network=network, allocation_pools=allocation_pools, cidr='192.168.0.0/24') as subnet: data = {'subnet': {'allocation_pools': [ {'start': '192.168.0.10', 'end': '192.168.0.20'}, {'start': '192.168.0.30', 'end': '192.168.0.40'}]}} if with_gateway_ip: data['subnet']['gateway_ip'] = '192.168.0.9' req = self.new_update_request('subnets', data, subnet['subnet']['id']) #check res code and contents res = req.get_response(self.api) self.assertEqual(200, res.status_code) self._verify_updated_subnet_allocation_pools(res, with_gateway_ip) #GET subnet to verify DB updated correctly req = self.new_show_request('subnets', subnet['subnet']['id'], self.fmt) res = req.get_response(self.api) self._verify_updated_subnet_allocation_pools(res, with_gateway_ip) def test_update_subnet_allocation_pools(self): self._test_update_subnet_allocation_pools() def test_update_subnet_allocation_pools_and_gateway_ip(self): self._test_update_subnet_allocation_pools(with_gateway_ip=True) #updating alloc pool to something outside subnet.cidr def test_update_subnet_allocation_pools_invalid_pool_for_cidr(self): """Test update alloc pool to something outside subnet.cidr. This makes sure that an erroneous allocation_pool specified in a subnet update (outside subnet cidr) will result in an error. """ allocation_pools = [{'start': '192.168.0.2', 'end': '192.168.0.254'}] with self.network() as network: with self.subnet(network=network, allocation_pools=allocation_pools, cidr='192.168.0.0/24') as subnet: data = {'subnet': {'allocation_pools': [ {'start': '10.0.0.10', 'end': '10.0.0.20'}]}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) #updating alloc pool on top of existing subnet.gateway_ip def test_update_subnet_allocation_pools_over_gateway_ip_returns_409(self): allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}] with self.network() as network: with self.subnet(network=network, allocation_pools=allocation_pools, cidr='10.0.0.0/24') as subnet: data = {'subnet': {'allocation_pools': [ {'start': '10.0.0.1', 'end': '10.0.0.254'}]}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_update_subnet_allocation_pools_invalid_returns_400(self): allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}] with self.network() as network: with self.subnet(network=network, allocation_pools=allocation_pools, cidr='10.0.0.0/24') as subnet: # Check allocation pools invalid_pools = [[{'end': '10.0.0.254'}], [{'start': '10.0.0.254'}], [{'start': '1000.0.0.254'}], [{'start': '10.0.0.2', 'end': '10.0.0.254'}, {'end': '10.0.0.254'}], None, [{'start': '10.0.0.200', 'end': '10.0.3.20'}], [{'start': '10.0.2.250', 'end': '10.0.3.5'}], [{'start': '10.0.0.0', 'end': '10.0.0.50'}], [{'start': '10.0.2.10', 'end': '10.0.2.5'}], [{'start': 'fe80::2', 'end': 'fe80::ffff'}]] for pool in invalid_pools: data = {'subnet': {'allocation_pools': pool}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_allocation_pools_overlapping_returns_409(self): allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}] with self.network() as network: with self.subnet(network=network, allocation_pools=allocation_pools, cidr='10.0.0.0/24') as subnet: data = {'subnet': {'allocation_pools': [ {'start': '10.0.0.20', 'end': '10.0.0.40'}, {'start': '10.0.0.30', 'end': '10.0.0.50'}]}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_show_subnet(self): with self.network() as network: with self.subnet(network=network) as subnet: req = self.new_show_request('subnets', subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['subnet']['id'], subnet['subnet']['id']) self.assertEqual(res['subnet']['network_id'], network['network']['id']) def test_list_subnets(self): with self.network() as network: with self.subnet(network=network, gateway_ip='10.0.0.1', cidr='10.0.0.0/24') as v1,\ self.subnet(network=network, gateway_ip='10.0.1.1', cidr='10.0.1.0/24') as v2,\ self.subnet(network=network, gateway_ip='10.0.2.1', cidr='10.0.2.0/24') as v3: subnets = (v1, v2, v3) self._test_list_resources('subnet', subnets) def test_list_subnets_shared(self): with self.network(shared=True) as network: with self.subnet(network=network, cidr='10.0.0.0/24') as subnet: with self.subnet(cidr='10.0.1.0/24') as priv_subnet: # normal user should see only 1 subnet req = self.new_list_request('subnets') req.environ['neutron.context'] = context.Context( '', 'some_tenant') res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(1, len(res['subnets'])) self.assertEqual(res['subnets'][0]['cidr'], subnet['subnet']['cidr']) # admin will see both subnets admin_req = self.new_list_request('subnets') admin_res = self.deserialize( self.fmt, admin_req.get_response(self.api)) self.assertEqual(2, len(admin_res['subnets'])) cidrs = [sub['cidr'] for sub in admin_res['subnets']] self.assertIn(subnet['subnet']['cidr'], cidrs) self.assertIn(priv_subnet['subnet']['cidr'], cidrs) def test_list_subnets_with_parameter(self): with self.network() as network: with self.subnet(network=network, gateway_ip='10.0.0.1', cidr='10.0.0.0/24') as v1,\ self.subnet(network=network, gateway_ip='10.0.1.1', cidr='10.0.1.0/24') as v2: subnets = (v1, v2) query_params = 'ip_version=4&ip_version=6' self._test_list_resources('subnet', subnets, query_params=query_params) query_params = 'ip_version=6' self._test_list_resources('subnet', [], query_params=query_params) def test_list_subnets_with_sort_native(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") with self.subnet(enable_dhcp=True, cidr='10.0.0.0/24') as subnet1,\ self.subnet(enable_dhcp=False, cidr='11.0.0.0/24') as subnet2,\ self.subnet(enable_dhcp=False, cidr='12.0.0.0/24') as subnet3: self._test_list_with_sort('subnet', (subnet3, subnet2, subnet1), [('enable_dhcp', 'asc'), ('cidr', 'desc')]) def test_list_subnets_with_sort_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_sorting_helper', new=_fake_get_sorting_helper) helper_patcher.start() with self.subnet(enable_dhcp=True, cidr='10.0.0.0/24') as subnet1,\ self.subnet(enable_dhcp=False, cidr='11.0.0.0/24') as subnet2,\ self.subnet(enable_dhcp=False, cidr='12.0.0.0/24') as subnet3: self._test_list_with_sort('subnet', (subnet3, subnet2, subnet1), [('enable_dhcp', 'asc'), ('cidr', 'desc')]) def test_list_subnets_with_pagination_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented sorting feature") with self.subnet(cidr='10.0.0.0/24') as subnet1,\ self.subnet(cidr='11.0.0.0/24') as subnet2,\ self.subnet(cidr='12.0.0.0/24') as subnet3: self._test_list_with_pagination('subnet', (subnet1, subnet2, subnet3), ('cidr', 'asc'), 2, 2) def test_list_subnets_with_pagination_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() with self.subnet(cidr='10.0.0.0/24') as subnet1,\ self.subnet(cidr='11.0.0.0/24') as subnet2,\ self.subnet(cidr='12.0.0.0/24') as subnet3: self._test_list_with_pagination('subnet', (subnet1, subnet2, subnet3), ('cidr', 'asc'), 2, 2) def test_list_subnets_with_pagination_reverse_native(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") with self.subnet(cidr='10.0.0.0/24') as subnet1,\ self.subnet(cidr='11.0.0.0/24') as subnet2,\ self.subnet(cidr='12.0.0.0/24') as subnet3: self._test_list_with_pagination_reverse('subnet', (subnet1, subnet2, subnet3), ('cidr', 'asc'), 2, 2) def test_list_subnets_with_pagination_reverse_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() with self.subnet(cidr='10.0.0.0/24') as subnet1,\ self.subnet(cidr='11.0.0.0/24') as subnet2,\ self.subnet(cidr='12.0.0.0/24') as subnet3: self._test_list_with_pagination_reverse('subnet', (subnet1, subnet2, subnet3), ('cidr', 'asc'), 2, 2) def test_invalid_ip_version(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 7, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_invalid_subnet(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': 'invalid', 'ip_version': 4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def _test_unsupported_subnet_cidr(self, subnet_cidr): with self.network() as network: subnet = {'network_id': network['network']['id'], 'cidr': subnet_cidr, 'ip_version': 4, 'enable_dhcp': True, 'tenant_id': network['network']['tenant_id']} plugin = manager.NeutronManager.get_plugin() if hasattr(plugin, '_validate_subnet'): self.assertRaises(n_exc.InvalidInput, plugin._validate_subnet, context.get_admin_context(), subnet) def test_unsupported_subnet_cidr_multicast(self): self._test_unsupported_subnet_cidr("224.0.0.1/16") def test_unsupported_subnet_cidr_loopback(self): self._test_unsupported_subnet_cidr("127.0.0.1/8") def test_invalid_ip_address(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': 'ipaddress'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_invalid_uuid(self): with self.network() as network: data = {'subnet': {'network_id': 'invalid-uuid', 'cidr': '10.0.2.0/24', 'ip_version': 4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.0.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_with_one_dns(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}] dns_nameservers = ['1.2.3.4'] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools, dns_nameservers=dns_nameservers) def test_create_subnet_with_two_dns(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}] dns_nameservers = ['1.2.3.4', '4.3.2.1'] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools, dns_nameservers=dns_nameservers) def test_create_subnet_with_too_many_dns(self): with self.network() as network: dns_list = ['1.1.1.1', '2.2.2.2', '3.3.3.3'] data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.0.1', 'dns_nameservers': dns_list}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_with_one_host_route(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}] host_routes = [{'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'}] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools, host_routes=host_routes) def test_create_subnet_with_two_host_routes(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}] host_routes = [{'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'}, {'destination': '12.0.0.0/8', 'nexthop': '4.3.2.1'}] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools, host_routes=host_routes) def test_create_subnet_with_too_many_routes(self): with self.network() as network: host_routes = [{'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'}, {'destination': '12.0.0.0/8', 'nexthop': '4.3.2.1'}, {'destination': '141.212.0.0/16', 'nexthop': '2.2.2.2'}] data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.0.1', 'host_routes': host_routes}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_dns(self): with self.subnet() as subnet: data = {'subnet': {'dns_nameservers': ['11.0.0.1']}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['subnet']['dns_nameservers'], res['subnet']['dns_nameservers']) def test_subnet_lifecycle_dns_retains_order(self): cfg.CONF.set_override('max_dns_nameservers', 3) with self.subnet(dns_nameservers=['1.1.1.1', '2.2.2.2', '3.3.3.3']) as subnet: subnets = self._show('subnets', subnet['subnet']['id'], expected_code=webob.exc.HTTPOk.code) self.assertEqual(['1.1.1.1', '2.2.2.2', '3.3.3.3'], subnets['subnet']['dns_nameservers']) data = {'subnet': {'dns_nameservers': ['2.2.2.2', '3.3.3.3', '1.1.1.1']}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['subnet']['dns_nameservers'], res['subnet']['dns_nameservers']) subnets = self._show('subnets', subnet['subnet']['id'], expected_code=webob.exc.HTTPOk.code) self.assertEqual(data['subnet']['dns_nameservers'], subnets['subnet']['dns_nameservers']) def test_update_subnet_dns_to_None(self): with self.subnet(dns_nameservers=['11.0.0.1']) as subnet: data = {'subnet': {'dns_nameservers': None}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual([], res['subnet']['dns_nameservers']) data = {'subnet': {'dns_nameservers': ['11.0.0.3']}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['subnet']['dns_nameservers'], res['subnet']['dns_nameservers']) def test_update_subnet_dns_with_too_many_entries(self): with self.subnet() as subnet: dns_list = ['1.1.1.1', '2.2.2.2', '3.3.3.3'] data = {'subnet': {'dns_nameservers': dns_list}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_route(self): with self.subnet() as subnet: data = {'subnet': {'host_routes': [{'destination': '12.0.0.0/8', 'nexthop': '1.2.3.4'}]}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['subnet']['host_routes'], res['subnet']['host_routes']) def test_update_subnet_route_to_None(self): with self.subnet(host_routes=[{'destination': '12.0.0.0/8', 'nexthop': '1.2.3.4'}]) as subnet: data = {'subnet': {'host_routes': None}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual([], res['subnet']['host_routes']) data = {'subnet': {'host_routes': [{'destination': '12.0.0.0/8', 'nexthop': '1.2.3.4'}]}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['subnet']['host_routes'], res['subnet']['host_routes']) def _test_update_subnet(self, old_gw=None, new_gw=None, check_gateway=False): allocation_pools = [{'start': '192.168.0.16', 'end': '192.168.0.254'}] with self.network() as network: with self.subnet(network=network, gateway_ip=old_gw, allocation_pools=allocation_pools, cidr='192.168.0.0/24') as subnet: data = { 'subnet': { 'allocation_pools': [ {'start': '192.168.0.10', 'end': '192.168.0.20'}, {'start': '192.168.0.30', 'end': '192.168.0.40'}], 'gateway_ip': new_gw}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(200, res.status_code) self._verify_updated_subnet_allocation_pools( res, with_gateway_ip=check_gateway) def test_update_subnet_from_no_gw_to_no_gw(self): self._test_update_subnet() def test_update_subnet_from_gw_to_no_gw(self): self._test_update_subnet(old_gw='192.168.0.15') def test_update_subnet_from_gw_to_new_gw(self): self._test_update_subnet(old_gw='192.168.0.15', new_gw='192.168.0.9', check_gateway=True) def test_update_subnet_route_with_too_many_entries(self): with self.subnet() as subnet: data = {'subnet': {'host_routes': [ {'destination': '12.0.0.0/8', 'nexthop': '1.2.3.4'}, {'destination': '13.0.0.0/8', 'nexthop': '1.2.3.5'}, {'destination': '14.0.0.0/8', 'nexthop': '1.2.3.6'}]}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_delete_subnet_with_dns(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' dns_nameservers = ['1.2.3.4'] # Create new network res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=4, dns_nameservers=dns_nameservers) req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_delete_subnet_with_route(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' host_routes = [{'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'}] # Create new network res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=4, host_routes=host_routes) req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_delete_subnet_with_dns_and_route(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' dns_nameservers = ['1.2.3.4'] host_routes = [{'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'}] # Create new network res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=4, dns_nameservers=dns_nameservers, host_routes=host_routes) req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_delete_subnet_with_callback(self): with self.subnet() as subnet,\ mock.patch.object(registry, 'notify') as notify: errors = [ exceptions.NotificationError( 'fake_id', n_exc.NeutronException()), ] notify.side_effect = [ exceptions.CallbackFailure(errors=errors), None ] # Make sure the delete request fails delete_request = self.new_delete_request('subnets', subnet['subnet']['id']) delete_response = delete_request.get_response(self.api) self.assertIn('NeutronError', delete_response.json) self.assertEqual('SubnetInUse', delete_response.json['NeutronError']['type']) # Make sure the subnet wasn't deleted list_request = self.new_list_request( 'subnets', params="id=%s" % subnet['subnet']['id']) list_response = list_request.get_response(self.api) self.assertEqual(subnet['subnet']['id'], list_response.json['subnets'][0]['id']) def _helper_test_validate_subnet(self, option, exception): cfg.CONF.set_override(option, 0) with self.network() as network: subnet = {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1', 'dns_nameservers': ['8.8.8.8'], 'host_routes': [{'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'}]} plugin = manager.NeutronManager.get_plugin() e = self.assertRaises(exception, plugin._validate_subnet, context.get_admin_context(), subnet) self.assertThat( str(e), matchers.Not(matchers.Contains('built-in function id'))) def test_validate_subnet_dns_nameservers_exhausted(self): self._helper_test_validate_subnet( 'max_dns_nameservers', n_exc.DNSNameServersExhausted) def test_validate_subnet_host_routes_exhausted(self): self._helper_test_validate_subnet( 'max_subnet_host_routes', n_exc.HostRoutesExhausted) def test_port_prevents_network_deletion(self): with self.port() as p: self._delete('networks', p['port']['network_id'], expected_code=webob.exc.HTTPConflict.code) def test_port_prevents_subnet_deletion(self): with self.port() as p: self._delete('subnets', p['port']['fixed_ips'][0]['subnet_id'], expected_code=webob.exc.HTTPConflict.code) class TestSubnetPoolsV2(NeutronDbPluginV2TestCase): _POOL_NAME = 'test-pool' def _test_create_subnetpool(self, prefixes, expected=None, admin=False, **kwargs): keys = kwargs.copy() keys.setdefault('tenant_id', self._tenant_id) with self.subnetpool(prefixes, admin, **keys) as subnetpool: self._validate_resource(subnetpool, keys, 'subnetpool') if expected: self._compare_resource(subnetpool, expected, 'subnetpool') return subnetpool def _validate_default_prefix(self, prefix, subnetpool): self.assertEqual(subnetpool['subnetpool']['default_prefixlen'], prefix) def _validate_min_prefix(self, prefix, subnetpool): self.assertEqual(subnetpool['subnetpool']['min_prefixlen'], prefix) def _validate_max_prefix(self, prefix, subnetpool): self.assertEqual(subnetpool['subnetpool']['max_prefixlen'], prefix) def _validate_is_default(self, subnetpool): self.assertTrue(subnetpool['subnetpool']['is_default']) def test_create_subnetpool_empty_prefix_list(self): self.assertRaises(webob.exc.HTTPClientError, self._test_create_subnetpool, [], name=self._POOL_NAME, tenant_id=self._tenant_id, min_prefixlen='21') def test_create_default_subnetpools(self): for cidr, min_prefixlen in (['fe80::/48', '64'], ['10.10.10.0/24', '24']): pool = self._test_create_subnetpool([cidr], admin=True, tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen=min_prefixlen, is_default=True) self._validate_is_default(pool) def test_cannot_create_multiple_default_subnetpools(self): for cidr1, cidr2, min_prefixlen in (['fe80::/48', '2001::/48', '64'], ['10.10.10.0/24', '10.10.20.0/24', '24']): pool = self._test_create_subnetpool([cidr1], admin=True, tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen=min_prefixlen, is_default=True) self._validate_is_default(pool) self.assertRaises(webob.exc.HTTPClientError, self._test_create_subnetpool, [cidr2], admin=True, tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen=min_prefixlen, is_default=True) def test_create_subnetpool_ipv4_24_with_defaults(self): subnet = netaddr.IPNetwork('10.10.10.0/24') subnetpool = self._test_create_subnetpool([subnet.cidr], name=self._POOL_NAME, tenant_id=self._tenant_id, min_prefixlen='21') self._validate_default_prefix('21', subnetpool) self._validate_min_prefix('21', subnetpool) def test_create_subnetpool_ipv4_21_with_defaults(self): subnet = netaddr.IPNetwork('10.10.10.0/21') subnetpool = self._test_create_subnetpool([subnet.cidr], name=self._POOL_NAME, tenant_id=self._tenant_id, min_prefixlen='21') self._validate_default_prefix('21', subnetpool) self._validate_min_prefix('21', subnetpool) def test_create_subnetpool_ipv4_default_prefix_too_small(self): subnet = netaddr.IPNetwork('10.10.10.0/21') self.assertRaises(webob.exc.HTTPClientError, self._test_create_subnetpool, [subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', default_prefixlen='20') def test_create_subnetpool_ipv4_default_prefix_too_large(self): subnet = netaddr.IPNetwork('10.10.10.0/21') self.assertRaises(webob.exc.HTTPClientError, self._test_create_subnetpool, [subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME, max_prefixlen=24, default_prefixlen='32') def test_create_subnetpool_ipv4_default_prefix_bounds(self): subnet = netaddr.IPNetwork('10.10.10.0/21') subnetpool = self._test_create_subnetpool([subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME) self._validate_min_prefix('8', subnetpool) self._validate_default_prefix('8', subnetpool) self._validate_max_prefix('32', subnetpool) def test_create_subnetpool_ipv6_default_prefix_bounds(self): subnet = netaddr.IPNetwork('fe80::/48') subnetpool = self._test_create_subnetpool([subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME) self._validate_min_prefix('64', subnetpool) self._validate_default_prefix('64', subnetpool) self._validate_max_prefix('128', subnetpool) def test_create_subnetpool_ipv4_supported_default_prefix(self): subnet = netaddr.IPNetwork('10.10.10.0/21') subnetpool = self._test_create_subnetpool([subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', default_prefixlen='26') self._validate_default_prefix('26', subnetpool) def test_create_subnetpool_ipv4_supported_min_prefix(self): subnet = netaddr.IPNetwork('10.10.10.0/24') subnetpool = self._test_create_subnetpool([subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='26') self._validate_min_prefix('26', subnetpool) self._validate_default_prefix('26', subnetpool) def test_create_subnetpool_ipv4_default_prefix_smaller_than_min(self): subnet = netaddr.IPNetwork('10.10.10.0/21') self.assertRaises(webob.exc.HTTPClientError, self._test_create_subnetpool, [subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME, default_prefixlen='22', min_prefixlen='23') def test_create_subnetpool_mixed_ip_version(self): subnet_v4 = netaddr.IPNetwork('10.10.10.0/21') subnet_v6 = netaddr.IPNetwork('fe80::/48') self.assertRaises(webob.exc.HTTPClientError, self._test_create_subnetpool, [subnet_v4.cidr, subnet_v6.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') def test_create_subnetpool_ipv6_with_defaults(self): subnet = netaddr.IPNetwork('fe80::/48') subnetpool = self._test_create_subnetpool([subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='48') self._validate_default_prefix('48', subnetpool) self._validate_min_prefix('48', subnetpool) def test_get_subnetpool(self): subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') req = self.new_show_request('subnetpools', subnetpool['subnetpool']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(subnetpool['subnetpool']['id'], res['subnetpool']['id']) def test_get_subnetpool_different_tenants_not_shared(self): subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], shared=False, tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') req = self.new_show_request('subnetpools', subnetpool['subnetpool']['id']) neutron_context = context.Context('', 'not-the-owner') req.environ['neutron.context'] = neutron_context res = req.get_response(self.api) self.assertEqual(404, res.status_int) def test_get_subnetpool_different_tenants_shared(self): subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], None, True, name=self._POOL_NAME, min_prefixlen='24', shared=True) req = self.new_show_request('subnetpools', subnetpool['subnetpool']['id']) neutron_context = context.Context('', self._tenant_id) req.environ['neutron.context'] = neutron_context res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(subnetpool['subnetpool']['id'], res['subnetpool']['id']) def test_list_subnetpools_different_tenants_shared(self): self._test_create_subnetpool(['10.10.10.0/24'], None, True, name=self._POOL_NAME, min_prefixlen='24', shared=True) admin_res = self._list('subnetpools') mortal_res = self._list('subnetpools', neutron_context=context.Context('', 'not-the-owner')) self.assertEqual(1, len(admin_res['subnetpools'])) self.assertEqual(1, len(mortal_res['subnetpools'])) def test_list_subnetpools_different_tenants_not_shared(self): self._test_create_subnetpool(['10.10.10.0/24'], None, True, name=self._POOL_NAME, min_prefixlen='24', shared=False) admin_res = self._list('subnetpools') mortal_res = self._list('subnetpools', neutron_context=context.Context('', 'not-the-owner')) self.assertEqual(1, len(admin_res['subnetpools'])) self.assertEqual(0, len(mortal_res['subnetpools'])) def test_delete_subnetpool(self): subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') req = self.new_delete_request('subnetpools', subnetpool['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(204, res.status_int) def test_delete_nonexistent_subnetpool(self): req = self.new_delete_request('subnetpools', 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa') res = req.get_response(self._api_for_resource('subnetpools')) self.assertEqual(404, res.status_int) def test_update_subnetpool_prefix_list_append(self): initial_subnetpool = self._test_create_subnetpool(['10.10.8.0/21'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') data = {'subnetpool': {'prefixes': ['10.10.8.0/21', '3.3.3.0/24', '2.2.2.0/24']}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) api = self._api_for_resource('subnetpools') res = self.deserialize(self.fmt, req.get_response(api)) self.assertItemsEqual(res['subnetpool']['prefixes'], ['10.10.8.0/21', '3.3.3.0/24', '2.2.2.0/24']) def test_update_subnetpool_prefix_list_compaction(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') data = {'subnetpool': {'prefixes': ['10.10.10.0/24', '10.10.11.0/24']}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) api = self._api_for_resource('subnetpools') res = self.deserialize(self.fmt, req.get_response(api)) self.assertItemsEqual(res['subnetpool']['prefixes'], ['10.10.10.0/23']) def test_illegal_subnetpool_prefix_list_update(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') data = {'subnetpool': {'prefixes': ['10.10.11.0/24']}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) api = self._api_for_resource('subnetpools') res = req.get_response(api) self.assertEqual(400, res.status_int) def test_update_subnetpool_default_prefix(self): initial_subnetpool = self._test_create_subnetpool(['10.10.8.0/21'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') data = {'subnetpool': {'default_prefixlen': '26'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) api = self._api_for_resource('subnetpools') res = self.deserialize(self.fmt, req.get_response(api)) self.assertEqual(26, res['subnetpool']['default_prefixlen']) def test_update_subnetpool_min_prefix(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') data = {'subnetpool': {'min_prefixlen': '21'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(21, res['subnetpool']['min_prefixlen']) def test_update_subnetpool_min_prefix_larger_than_max(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', max_prefixlen='24') data = {'subnetpool': {'min_prefixlen': '28'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_update_subnetpool_max_prefix(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', max_prefixlen='24') data = {'subnetpool': {'max_prefixlen': '26'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(26, res['subnetpool']['max_prefixlen']) def test_update_subnetpool_max_prefix_less_than_min(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') data = {'subnetpool': {'max_prefixlen': '21'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_update_subnetpool_max_prefix_less_than_default(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', default_prefixlen='24') data = {'subnetpool': {'max_prefixlen': '22'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_update_subnetpool_default_prefix_less_than_min(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') data = {'subnetpool': {'default_prefixlen': '20'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_update_subnetpool_default_prefix_larger_than_max(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', max_prefixlen='24') data = {'subnetpool': {'default_prefixlen': '28'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_update_subnetpool_prefix_list_mixed_ip_version(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') data = {'subnetpool': {'prefixes': ['fe80::/48']}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_update_subnetpool_default_quota(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24', default_quota=10) self.assertEqual(10, initial_subnetpool['subnetpool']['default_quota']) data = {'subnetpool': {'default_quota': '1'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(1, res['subnetpool']['default_quota']) def test_allocate_subnet_bad_gateway(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/8'], tenant_id=self._tenant_id, name=self._POOL_NAME, default_prefixlen='24') # Request a subnet allocation (no CIDR) data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'prefixlen': 32, 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) result = req.get_response(self.api) self.assertEqual(409, result.status_int) def test_allocate_any_subnet_with_prefixlen(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request a subnet allocation (no CIDR) data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'prefixlen': 24, 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = self.deserialize(self.fmt, req.get_response(self.api)) subnet = netaddr.IPNetwork(res['subnet']['cidr']) self.assertEqual(24, subnet.prefixlen) # Assert the allocated subnet CIDR is a subnet of our pool prefix supernet = netaddr.smallest_matching_cidr( subnet, sp['subnetpool']['prefixes']) self.assertEqual(supernet, netaddr.IPNetwork('10.10.0.0/16')) def test_allocate_any_subnet_with_default_prefixlen(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request any subnet allocation using default prefix data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = self.deserialize(self.fmt, req.get_response(self.api)) subnet = netaddr.IPNetwork(res['subnet']['cidr']) self.assertEqual(subnet.prefixlen, int(sp['subnetpool']['default_prefixlen'])) def test_allocate_specific_subnet_with_mismatch_prefixlen(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.1.0/24', 'prefixlen': 26, 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_allocate_specific_subnet_with_matching_prefixlen(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.1.0/24', 'prefixlen': 24, 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_allocate_specific_subnet(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.1.0/24', 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = self.deserialize(self.fmt, req.get_response(self.api)) # Assert the allocated subnet CIDR is what we expect subnet = netaddr.IPNetwork(res['subnet']['cidr']) self.assertEqual(netaddr.IPNetwork('10.10.1.0/24'), subnet) def test_allocate_specific_subnet_non_existent_prefix(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '192.168.1.0/24', 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(500, res.status_int) def test_allocate_specific_subnet_already_allocated(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.10.0/24', 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) # Allocate the subnet res = req.get_response(self.api) self.assertEqual(201, res.status_int) # Attempt to allocate it again res = req.get_response(self.api) # Assert error self.assertEqual(500, res.status_int) def test_allocate_specific_subnet_prefix_too_small(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.0.0/20', 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_allocate_specific_subnet_prefix_specific_gw(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.1.0/24', 'gateway_ip': '10.10.1.254', 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual('10.10.1.254', res['subnet']['gateway_ip']) def test_allocate_specific_subnet_prefix_allocation_pools(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request a specific subnet allocation pools = [{'start': '10.10.1.2', 'end': '10.10.1.253'}] data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.1.0/24', 'gateway_ip': '10.10.1.1', 'ip_version': 4, 'allocation_pools': pools, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(pools[0]['start'], res['subnet']['allocation_pools'][0]['start']) self.assertEqual(pools[0]['end'], res['subnet']['allocation_pools'][0]['end']) def test_allocate_any_subnet_prefix_allocation_pools(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request an any subnet allocation pools = [{'start': '10.10.10.1', 'end': '10.10.10.254'}] data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'prefixlen': '24', 'ip_version': 4, 'allocation_pools': pools, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_allocate_specific_subnet_prefix_too_large(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', max_prefixlen='21') # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.0.0/24', 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_delete_subnetpool_existing_allocations(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.0.0/24', 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) req.get_response(self.api) req = self.new_delete_request('subnetpools', sp['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_allocate_subnet_over_quota(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', default_quota=2048) # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'ip_version': 4, 'prefixlen': 21, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) # Allocate a subnet to fill the quota res = req.get_response(self.api) self.assertEqual(201, res.status_int) # Attempt to allocate a /21 again res = req.get_response(self.api) # Assert error self.assertEqual(409, res.status_int) def test_allocate_any_ipv4_subnet_ipv6_pool(self): with self.network() as network: sp = self._test_create_subnetpool(['2001:db8:1:2::/63'], tenant_id=self._tenant_id, name=self._POOL_NAME) # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'ip_version': 4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(400, res.status_int) class DbModelTestCase(testlib_api.SqlTestCase): """DB model tests.""" def test_repr(self): """testing the string representation of 'model' classes.""" network = models_v2.Network(name="net_net", status="OK", admin_state_up=True) actual_repr_output = repr(network) exp_start_with = "") final_exp = exp_start_with + exp_middle + exp_end_with self.assertEqual(final_exp, actual_repr_output) def _make_network(self, ctx): with ctx.session.begin(): network = models_v2.Network(name="net_net", status="OK", tenant_id='dbcheck', admin_state_up=True) ctx.session.add(network) return network def _make_subnet(self, ctx, network_id): with ctx.session.begin(): subnet = models_v2.Subnet(name="subsub", ip_version=4, tenant_id='dbcheck', cidr='turn_down_for_what', network_id=network_id) ctx.session.add(subnet) return subnet def _make_port(self, ctx, network_id): with ctx.session.begin(): port = models_v2.Port(network_id=network_id, mac_address='1', tenant_id='dbcheck', admin_state_up=True, status="COOL", device_id="devid", device_owner="me") ctx.session.add(port) return port def _make_subnetpool(self, ctx): with ctx.session.begin(): subnetpool = models_v2.SubnetPool( ip_version=4, default_prefixlen=4, min_prefixlen=4, max_prefixlen=4, shared=False, default_quota=4, address_scope_id='f', tenant_id='dbcheck', is_default=False ) ctx.session.add(subnetpool) return subnetpool def _make_security_group_and_rule(self, ctx): with ctx.session.begin(): sg = sgdb.SecurityGroup(name='sg', description='sg') rule = sgdb.SecurityGroupRule(security_group=sg, port_range_min=1, port_range_max=2, protocol='TCP', ethertype='v4', direction='ingress', remote_ip_prefix='0.0.0.0/0') ctx.session.add(sg) ctx.session.add(rule) return sg, rule def _make_floating_ip(self, ctx, port_id): with ctx.session.begin(): flip = l3_db.FloatingIP(floating_ip_address='1.2.3.4', floating_network_id='somenet', floating_port_id=port_id) ctx.session.add(flip) return flip def _make_router(self, ctx): with ctx.session.begin(): router = l3_db.Router() ctx.session.add(router) return router def _get_neutron_attr(self, ctx, attr_id): return ctx.session.query( models_v2.model_base.StandardAttribute).filter( models_v2.model_base.StandardAttribute.id == attr_id).one() def _test_standardattr_removed_on_obj_delete(self, ctx, obj): attr_id = obj.standard_attr_id self.assertEqual( obj.__table__.name, self._get_neutron_attr(ctx, attr_id).resource_type) with ctx.session.begin(): ctx.session.delete(obj) with testtools.ExpectedException(orm.exc.NoResultFound): # we want to make sure that the attr resource was removed self._get_neutron_attr(ctx, attr_id) def test_standardattr_removed_on_subnet_delete(self): ctx = context.get_admin_context() network = self._make_network(ctx) subnet = self._make_subnet(ctx, network.id) self._test_standardattr_removed_on_obj_delete(ctx, subnet) def test_standardattr_removed_on_network_delete(self): ctx = context.get_admin_context() network = self._make_network(ctx) self._test_standardattr_removed_on_obj_delete(ctx, network) def test_standardattr_removed_on_subnetpool_delete(self): ctx = context.get_admin_context() spool = self._make_subnetpool(ctx) self._test_standardattr_removed_on_obj_delete(ctx, spool) def test_standardattr_removed_on_port_delete(self): ctx = context.get_admin_context() network = self._make_network(ctx) port = self._make_port(ctx, network.id) self._test_standardattr_removed_on_obj_delete(ctx, port) def test_standardattr_removed_on_sg_delete(self): ctx = context.get_admin_context() sg, rule = self._make_security_group_and_rule(ctx) self._test_standardattr_removed_on_obj_delete(ctx, sg) # make sure the attr entry was wiped out for the rule as well with testtools.ExpectedException(orm.exc.NoResultFound): self._get_neutron_attr(ctx, rule.standard_attr_id) def test_standardattr_removed_on_floating_ip_delete(self): ctx = context.get_admin_context() network = self._make_network(ctx) port = self._make_port(ctx, network.id) flip = self._make_floating_ip(ctx, port.id) self._test_standardattr_removed_on_obj_delete(ctx, flip) def test_standardattr_removed_on_router_delete(self): ctx = context.get_admin_context() router = self._make_router(ctx) self._test_standardattr_removed_on_obj_delete(ctx, router) def test_resource_type_fields(self): ctx = context.get_admin_context() network = self._make_network(ctx) port = self._make_port(ctx, network.id) subnet = self._make_subnet(ctx, network.id) spool = self._make_subnetpool(ctx) for disc, obj in (('ports', port), ('networks', network), ('subnets', subnet), ('subnetpools', spool)): self.assertEqual( disc, obj.standard_attr.resource_type) class NeutronDbPluginV2AsMixinTestCase(NeutronDbPluginV2TestCase, testlib_api.SqlTestCase): """Tests for NeutronDbPluginV2 as Mixin. While NeutronDbPluginV2TestCase checks NeutronDbPlugin and all plugins as a complete plugin, this test case verifies abilities of NeutronDbPlugin which are provided to other plugins (e.g. DB operations). This test case may include tests only for NeutronDbPlugin, so this should not be used in unit tests for other plugins. """ def setUp(self): super(NeutronDbPluginV2AsMixinTestCase, self).setUp() self.plugin = importutils.import_object(DB_PLUGIN_KLASS) self.context = context.get_admin_context() self.net_data = {'network': {'id': 'fake-id', 'name': 'net1', 'admin_state_up': True, 'tenant_id': 'test-tenant', 'shared': False}} def test_create_network_with_default_status(self): net = self.plugin.create_network(self.context, self.net_data) default_net_create_status = 'ACTIVE' expected = [('id', 'fake-id'), ('name', 'net1'), ('admin_state_up', True), ('tenant_id', 'test-tenant'), ('shared', False), ('status', default_net_create_status)] for k, v in expected: self.assertEqual(net[k], v) def test_create_network_with_status_BUILD(self): self.net_data['network']['status'] = 'BUILD' net = self.plugin.create_network(self.context, self.net_data) self.assertEqual(net['status'], 'BUILD') def test_get_user_allocation_for_dhcp_port_returns_none(self): plugin = manager.NeutronManager.get_plugin() with self.network() as net, self.network() as net1: with self.subnet(network=net, cidr='10.0.0.0/24') as subnet,\ self.subnet(network=net1, cidr='10.0.1.0/24') as subnet1: with self.port(subnet=subnet, device_owner=constants.DEVICE_OWNER_DHCP),\ self.port(subnet=subnet1): # check that user allocations on another network don't # affect _subnet_get_user_allocation method res = plugin._subnet_get_user_allocation( context.get_admin_context(), subnet['subnet']['id']) self.assertIsNone(res) def test__validate_network_subnetpools(self): network = models_v2.Network() network.subnets = [models_v2.Subnet(subnetpool_id='test_id', ip_version=4)] new_subnetpool_id = None self.assertRaises(n_exc.NetworkSubnetPoolAffinityError, self.plugin.ipam._validate_network_subnetpools, network, new_subnetpool_id, 4) class TestNetworks(testlib_api.SqlTestCase): def setUp(self): super(TestNetworks, self).setUp() self._tenant_id = 'test-tenant' # Update the plugin self.setup_coreplugin(DB_PLUGIN_KLASS) def _create_network(self, plugin, ctx, shared=True): network = {'network': {'name': 'net', 'shared': shared, 'admin_state_up': True, 'tenant_id': self._tenant_id}} created_network = plugin.create_network(ctx, network) return (network, created_network['id']) def _create_port(self, plugin, ctx, net_id, device_owner, tenant_id): port = {'port': {'name': 'port', 'network_id': net_id, 'mac_address': attributes.ATTR_NOT_SPECIFIED, 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, 'admin_state_up': True, 'device_id': 'device_id', 'device_owner': device_owner, 'tenant_id': tenant_id}} plugin.create_port(ctx, port) def _test_update_shared_net_used(self, device_owner, expected_exception=None): plugin = manager.NeutronManager.get_plugin() ctx = context.get_admin_context() network, net_id = self._create_network(plugin, ctx) self._create_port(plugin, ctx, net_id, device_owner, self._tenant_id + '1') network['network']['shared'] = False if (expected_exception): with testlib_api.ExpectedException(expected_exception): plugin.update_network(ctx, net_id, network) else: plugin.update_network(ctx, net_id, network) def test_update_shared_net_used_fails(self): self._test_update_shared_net_used('', n_exc.InvalidSharedSetting) def test_update_shared_net_used_as_router_gateway(self): self._test_update_shared_net_used( constants.DEVICE_OWNER_ROUTER_GW) def test_update_shared_net_used_by_floating_ip(self): self._test_update_shared_net_used( constants.DEVICE_OWNER_FLOATINGIP) class DbOperationBoundMixin(object): """Mixin to support tests that assert constraints on DB operations.""" admin = True def setUp(self, *args, **kwargs): super(DbOperationBoundMixin, self).setUp(*args, **kwargs) self._db_execute_count = 0 def _event_incrementer(*args, **kwargs): self._db_execute_count += 1 engine = db_api.get_engine() event.listen(engine, 'after_execute', _event_incrementer) self.addCleanup(event.remove, engine, 'after_execute', _event_incrementer) def _get_context(self): if self.admin: return context.get_admin_context() return context.Context('', 'fake') def get_api_kwargs(self): context_ = self._get_context() return {'set_context': True, 'tenant_id': context_.tenant} def _list_and_count_queries(self, resource): self._db_execute_count = 0 self.assertNotEqual([], self._list(resource, neutron_context=self._get_context())) query_count = self._db_execute_count # sanity check to make sure queries are being observed self.assertNotEqual(0, query_count) return query_count def _assert_object_list_queries_constant(self, obj_creator, plural): obj_creator() before_count = self._list_and_count_queries(plural) # one more thing shouldn't change the db query count obj_creator() self.assertEqual(before_count, self._list_and_count_queries(plural)) neutron-8.4.0/neutron/tests/unit/db/test_common_db_mixin.py0000664000567000056710000000370613044372760025344 0ustar jenkinsjenkins00000000000000# Copyright 2016 # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron import context from neutron.db import common_db_mixin from neutron.tests.unit import testlib_api class TestCommonHelpFunctions(testlib_api.SqlTestCase): def setUp(self): super(TestCommonHelpFunctions, self).setUp() self.admin_ctx = context.get_admin_context() def test__safe_creation_create_bindings_fails(self): create_fn = mock.Mock(return_value={'id': 1234}) create_bindings = mock.Mock(side_effect=ValueError) tx_check = lambda i: setattr(self, '_active', self.admin_ctx.session.is_active) delete_fn = mock.Mock(side_effect=tx_check) self.assertRaises(ValueError, common_db_mixin.safe_creation, self.admin_ctx, create_fn, delete_fn, create_bindings) delete_fn.assert_called_once_with(1234) self.assertTrue(self._active) def test__safe_creation_deletion_fails(self): create_fn = mock.Mock(return_value={'id': 1234}) create_bindings = mock.Mock(side_effect=ValueError) delete_fn = mock.Mock(side_effect=EnvironmentError) self.assertRaises(ValueError, common_db_mixin.safe_creation, self.admin_ctx, create_fn, delete_fn, create_bindings) delete_fn.assert_called_once_with(1234) neutron-8.4.0/neutron/tests/unit/db/test_l3_hamode_db.py0000664000567000056710000015103513044372760024502 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_db import exception as db_exc from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy import orm import testtools from neutron.api.rpc.handlers import l3_rpc from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import exceptions as n_exc from neutron import context from neutron.db import agents_db from neutron.db import common_db_mixin from neutron.db import l3_agentschedulers_db from neutron.db import l3_hamode_db from neutron.extensions import external_net from neutron.extensions import l3 from neutron.extensions import l3_ext_ha_mode from neutron.extensions import portbindings from neutron.extensions import providernet from neutron import manager from neutron.scheduler import l3_agent_scheduler from neutron.tests.common import helpers from neutron.tests.unit import testlib_api _uuid = uuidutils.generate_uuid class FakeL3PluginWithAgents(common_db_mixin.CommonDbMixin, l3_hamode_db.L3_HA_NAT_db_mixin, l3_agentschedulers_db.L3AgentSchedulerDbMixin, agents_db.AgentDbMixin): pass class L3HATestFramework(testlib_api.SqlTestCase): def setUp(self): super(L3HATestFramework, self).setUp() self.admin_ctx = context.get_admin_context() self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin') self.core_plugin = manager.NeutronManager.get_plugin() notif_p = mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin, '_notify_ha_interfaces_updated') self.notif_m = notif_p.start() cfg.CONF.set_override('allow_overlapping_ips', True) self.plugin = FakeL3PluginWithAgents() self.plugin.router_scheduler = l3_agent_scheduler.ChanceScheduler() self.agent1 = helpers.register_l3_agent() self.agent2 = helpers.register_l3_agent( 'host_2', constants.L3_AGENT_MODE_DVR_SNAT) def _create_router(self, ha=True, tenant_id='tenant1', distributed=None, ctx=None, admin_state_up=True): if ctx is None: ctx = self.admin_ctx ctx.tenant_id = tenant_id router = {'name': 'router1', 'admin_state_up': admin_state_up, 'tenant_id': tenant_id} if ha is not None: router['ha'] = ha if distributed is not None: router['distributed'] = distributed return self.plugin.create_router(ctx, {'router': router}) def _migrate_router(self, router_id, ha): self._update_router(router_id, admin_state=False) self._update_router(router_id, ha=ha) return self._update_router(router_id, admin_state=True) def _update_router(self, router_id, ha=None, distributed=None, ctx=None, admin_state=None): if ctx is None: ctx = self.admin_ctx data = {'ha': ha} if ha is not None else {} if distributed is not None: data['distributed'] = distributed if admin_state is not None: data['admin_state_up'] = admin_state return self.plugin._update_router_db(ctx, router_id, data) class L3HATestCase(L3HATestFramework): def test_verify_configuration_succeed(self): # Default configuration should pass self.plugin._verify_configuration() def test_verify_configuration_l3_ha_net_cidr_is_not_a_cidr(self): cfg.CONF.set_override('l3_ha_net_cidr', 'not a cidr') self.assertRaises( l3_ext_ha_mode.HANetworkCIDRNotValid, self.plugin._verify_configuration) def test_verify_configuration_l3_ha_net_cidr_is_not_a_subnet(self): cfg.CONF.set_override('l3_ha_net_cidr', '10.0.0.1/8') self.assertRaises( l3_ext_ha_mode.HANetworkCIDRNotValid, self.plugin._verify_configuration) def test_verify_configuration_min_l3_agents_per_router_below_minimum(self): cfg.CONF.set_override('min_l3_agents_per_router', 0) self.assertRaises( l3_ext_ha_mode.HAMinimumAgentsNumberNotValid, self.plugin._check_num_agents_per_router) def test_verify_configuration_min_l3_agents_per_router_eq_one(self): cfg.CONF.set_override('min_l3_agents_per_router', 1) self.plugin._check_num_agents_per_router() def test_verify_configuration_max_l3_agents_below_min_l3_agents(self): cfg.CONF.set_override('max_l3_agents_per_router', 3) cfg.CONF.set_override('min_l3_agents_per_router', 4) self.assertRaises( l3_ext_ha_mode.HAMaximumAgentsNumberNotValid, self.plugin._check_num_agents_per_router) def test_verify_configuration_max_l3_agents_unlimited(self): cfg.CONF.set_override('max_l3_agents_per_router', l3_hamode_db.UNLIMITED_AGENTS_PER_ROUTER) self.plugin._check_num_agents_per_router() def test_get_ha_router_port_bindings(self): router = self._create_router() bindings = self.plugin.get_ha_router_port_bindings( self.admin_ctx, [router['id']]) binding_dicts = [{'router_id': binding['router_id'], 'l3_agent_id': binding['l3_agent_id']} for binding in bindings] self.assertIn({'router_id': router['id'], 'l3_agent_id': self.agent1['id']}, binding_dicts) self.assertIn({'router_id': router['id'], 'l3_agent_id': self.agent2['id']}, binding_dicts) def test_get_l3_bindings_hosting_router_with_ha_states_ha_router(self): router = self._create_router() self.plugin.update_routers_states( self.admin_ctx, {router['id']: 'active'}, self.agent1['host']) bindings = self.plugin.get_l3_bindings_hosting_router_with_ha_states( self.admin_ctx, router['id']) agent_ids = [(agent[0]['id'], agent[1]) for agent in bindings] self.assertIn((self.agent1['id'], 'active'), agent_ids) self.assertIn((self.agent2['id'], 'standby'), agent_ids) def test_get_l3_bindings_hosting_router_with_ha_states_agent_none(self): with mock.patch.object(self.plugin, 'schedule_router'): # Do not bind router to leave agents as None router = self._create_router() res = self.admin_ctx.session.query( l3_hamode_db.L3HARouterAgentPortBinding).filter( l3_hamode_db.L3HARouterAgentPortBinding.router_id == router['id'] ).all() # Check that agents are None self.assertEqual([None, None], [r.agent for r in res]) bindings = self.plugin.get_l3_bindings_hosting_router_with_ha_states( self.admin_ctx, router['id']) self.assertEqual([], bindings) def test_get_l3_bindings_hosting_router_with_ha_states_not_scheduled(self): router = self._create_router(ha=False) # Check that there no L3 agents scheduled for this router res = self.admin_ctx.session.query( l3_hamode_db.L3HARouterAgentPortBinding).filter( l3_hamode_db.L3HARouterAgentPortBinding.router_id == router['id'] ).all() self.assertEqual([], [r.agent for r in res]) bindings = self.plugin.get_l3_bindings_hosting_router_with_ha_states( self.admin_ctx, router['id']) self.assertEqual([], bindings) def test_get_l3_bindings_hosting_router_with_ha_states_active_and_dead( self): router = self._create_router() with mock.patch.object(agents_db.Agent, 'is_active', new_callable=mock.PropertyMock, return_value=False): self.plugin.update_routers_states( self.admin_ctx, {router['id']: 'active'}, self.agent1['host']) bindings = ( self.plugin.get_l3_bindings_hosting_router_with_ha_states( self.admin_ctx, router['id'])) agent_ids = [(agent[0]['id'], agent[1]) for agent in bindings] self.assertIn((self.agent1['id'], 'standby'), agent_ids) def test_router_created_in_active_state(self): router = self._create_router() self.assertEqual(constants.ROUTER_STATUS_ACTIVE, router['status']) def test_router_update_stay_active(self): router = self._create_router() router['name'] = 'test_update' router_updated = self.plugin._update_router_db(self.admin_ctx, router['id'], router) self.assertEqual(constants.ROUTER_STATUS_ACTIVE, router_updated['status']) def test_allocating_router_hidden_from_sync(self): r1, r2 = self._create_router(), self._create_router() r1['status'] = constants.ROUTER_STATUS_ALLOCATING self.plugin._update_router_db(self.admin_ctx, r1['id'], r1) # store shorter name for readability get_method = self.plugin._get_active_l3_agent_routers_sync_data # r1 should be hidden expected = [self.plugin.get_router(self.admin_ctx, r2['id'])] self.assertEqual(expected, get_method(self.admin_ctx, None, None, [r1['id'], r2['id']])) # but once it transitions back, all is well in the world again! r1['status'] = constants.ROUTER_STATUS_ACTIVE self.plugin._update_router_db(self.admin_ctx, r1['id'], r1) expected.append(self.plugin.get_router(self.admin_ctx, r1['id'])) # just compare ids since python3 won't let us sort dicts expected = sorted([r['id'] for r in expected]) result = sorted([r['id'] for r in get_method( self.admin_ctx, None, None, [r1['id'], r2['id']])]) self.assertEqual(expected, result) def test_router_ha_update_allocating_then_active(self): router = self._create_router() _orig = self.plugin._delete_ha_interfaces def check_state(context, router_id): self.assertEqual( constants.ROUTER_STATUS_ALLOCATING, self.plugin._get_router(context, router_id)['status']) return _orig(context, router_id) with mock.patch.object(self.plugin, '_delete_ha_interfaces', side_effect=check_state) as ha_mock: router = self._migrate_router(router['id'], ha=False) self.assertTrue(ha_mock.called) self.assertEqual(constants.ROUTER_STATUS_ACTIVE, router['status']) def test_router_created_allocating_state_during_interface_create(self): _orig = self.plugin._create_ha_interfaces def check_state(context, router_db, ha_network): self.assertEqual(constants.ROUTER_STATUS_ALLOCATING, router_db.status) return _orig(context, router_db, ha_network) with mock.patch.object(self.plugin, '_create_ha_interfaces', side_effect=check_state) as ha_mock: router = self._create_router() self.assertTrue(ha_mock.called) self.assertEqual(constants.ROUTER_STATUS_ACTIVE, router['status']) def test_ha_router_create(self): router = self._create_router() self.assertTrue(router['ha']) def test_ha_router_create_with_distributed(self): router = self._create_router(ha=True, distributed=True) self.assertTrue(router['ha']) self.assertTrue(router['distributed']) ha_network = self.plugin.get_ha_network(self.admin_ctx, router['tenant_id']) self.assertIsNotNone(ha_network) def test_no_ha_router_create(self): router = self._create_router(ha=False) self.assertFalse(router['ha']) def test_add_ha_network_settings(self): cfg.CONF.set_override('l3_ha_network_type', 'abc') cfg.CONF.set_override('l3_ha_network_physical_name', 'def') network = {} self.plugin._add_ha_network_settings(network) self.assertEqual('abc', network[providernet.NETWORK_TYPE]) self.assertEqual('def', network[providernet.PHYSICAL_NETWORK]) def test_router_create_with_ha_conf_enabled(self): cfg.CONF.set_override('l3_ha', True) router = self._create_router(ha=None) self.assertTrue(router['ha']) def test_ha_router_delete_with_distributed(self): router = self._create_router(ha=True, distributed=True) self.plugin.delete_router(self.admin_ctx, router['id']) self.assertRaises(l3.RouterNotFound, self.plugin._get_router, self.admin_ctx, router['id']) def test_migration_from_ha(self): router = self._create_router() self.assertTrue(router['ha']) router = self._migrate_router(router['id'], False) self.assertFalse(router.extra_attributes['ha']) self.assertIsNone(router.extra_attributes['ha_vr_id']) def test_migration_to_ha(self): router = self._create_router(ha=False) self.assertFalse(router['ha']) router = self._migrate_router(router['id'], True) self.assertTrue(router.extra_attributes['ha']) self.assertIsNotNone(router.extra_attributes['ha_vr_id']) def test_migration_requires_admin_state_down(self): router = self._create_router(ha=False) self.assertRaises(n_exc.BadRequest, self._update_router, router['id'], ha=True) def test_migrate_ha_router_to_distributed_and_ha(self): router = self._create_router(ha=True, admin_state_up=False, distributed=False) self.assertTrue(router['ha']) self.assertRaises(l3_ext_ha_mode.DVRmodeUpdateOfHaNotSupported, self._update_router, router['id'], ha=True, distributed=True) def test_migrate_ha_router_to_distributed_and_not_ha(self): router = self._create_router(ha=True, admin_state_up=False, distributed=False) self.assertTrue(router['ha']) self.assertRaises(l3_ext_ha_mode.DVRmodeUpdateOfHaNotSupported, self._update_router, router['id'], ha=False, distributed=True) def test_migrate_dvr_router_to_ha_and_not_dvr(self): router = self._create_router(ha=False, admin_state_up=False, distributed=True) self.assertTrue(router['distributed']) self.assertRaises(l3_ext_ha_mode.HAmodeUpdateOfDvrNotSupported, self._update_router, router['id'], ha=True, distributed=True) def test_migrate_dvr_router_to_ha_and_dvr(self): router = self._create_router(ha=False, admin_state_up=False, distributed=True) self.assertTrue(router['distributed']) self.assertRaises(l3_ext_ha_mode.HAmodeUpdateOfDvrNotSupported, self._update_router, router['id'], ha=True, distributed=True) def test_migrate_distributed_router_to_ha(self): router = self._create_router(ha=False, distributed=True) self.assertFalse(router['ha']) self.assertTrue(router['distributed']) self.assertRaises(l3_ext_ha_mode.HAmodeUpdateOfDvrNotSupported, self._update_router, router['id'], ha=True) def test_migrate_legacy_router_to_distributed_and_ha(self): router = self._create_router(ha=False, distributed=False) self.assertFalse(router['ha']) self.assertFalse(router['distributed']) self.assertRaises(l3_ext_ha_mode.UpdateToDvrHamodeNotSupported, self._update_router, router['id'], ha=True, distributed=True) def test_migrate_legacy_router_to_ha_not_enough_agents(self): router = self._create_router(ha=False, distributed=False) self.assertFalse(router['ha']) self.assertFalse(router['distributed']) helpers.set_agent_admin_state(self.agent2['id'], admin_state_up=False) self.assertRaises(l3_ext_ha_mode.HANotEnoughAvailableAgents, self._migrate_router, router['id'], ha=True) def test_unbind_ha_router(self): router = self._create_router() bound_agents = self.plugin.get_l3_agents_hosting_routers( self.admin_ctx, [router['id']]) self.assertEqual(2, len(bound_agents)) with mock.patch.object(manager.NeutronManager, 'get_service_plugins') as mock_manager: self.plugin._unbind_ha_router(self.admin_ctx, router['id']) bound_agents = self.plugin.get_l3_agents_hosting_routers( self.admin_ctx, [router['id']]) self.assertEqual(0, len(bound_agents)) self.assertEqual(2, mock_manager.call_count) def test_get_ha_sync_data_for_host_with_non_dvr_agent(self): with mock.patch.object(self.plugin, '_get_dvr_sync_data') as mock_get_sync: self.plugin.supported_extension_aliases = ['dvr', 'l3-ha'] self.plugin.get_ha_sync_data_for_host(self.admin_ctx, self.agent1['host'], self.agent1) self.assertFalse(mock_get_sync.called) def test_get_ha_sync_data_for_host_with_dvr_agent(self): with mock.patch.object(self.plugin, '_get_dvr_sync_data') as mock_get_sync: self.plugin.supported_extension_aliases = ['dvr', 'l3-ha'] self.plugin.get_ha_sync_data_for_host(self.admin_ctx, self.agent2['host'], self.agent2) self.assertTrue(mock_get_sync.called) def test_l3_agent_routers_query_interface(self): router = self._create_router() routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx, self.agent1['host'], self.agent1) self.assertEqual(1, len(routers)) router = routers[0] self.assertIsNotNone(router.get('ha')) interface = router.get(constants.HA_INTERFACE_KEY) self.assertIsNotNone(interface) self.assertEqual(constants.DEVICE_OWNER_ROUTER_HA_INTF, interface['device_owner']) subnets = interface['subnets'] self.assertEqual(1, len(subnets)) self.assertEqual(cfg.CONF.l3_ha_net_cidr, subnets[0]['cidr']) def test_unique_ha_network_per_tenant(self): tenant1 = _uuid() tenant2 = _uuid() self._create_router(tenant_id=tenant1) self._create_router(tenant_id=tenant2) ha_network1 = self.plugin.get_ha_network(self.admin_ctx, tenant1) ha_network2 = self.plugin.get_ha_network(self.admin_ctx, tenant2) self.assertNotEqual( ha_network1['network_id'], ha_network2['network_id']) def _deployed_router_change_ha_flag(self, to_ha): router1 = self._create_router(ha=not to_ha) routers = self.plugin.get_ha_sync_data_for_host( self.admin_ctx, self.agent1['host'], self.agent1) router = routers[0] interface = router.get(constants.HA_INTERFACE_KEY) if to_ha: self.assertIsNone(interface) else: self.assertIsNotNone(interface) self._migrate_router(router['id'], to_ha) self.plugin.schedule_router(self.admin_ctx, router1['id']) routers = self.plugin.get_ha_sync_data_for_host( self.admin_ctx, self.agent1['host'], self.agent1) router = routers[0] interface = router.get(constants.HA_INTERFACE_KEY) if to_ha: self.assertIsNotNone(interface) else: self.assertIsNone(interface) def test_deployed_router_can_have_ha_enabled(self): self._deployed_router_change_ha_flag(to_ha=True) def test_deployed_router_can_have_ha_disabled(self): self._deployed_router_change_ha_flag(to_ha=False) def test_create_ha_router_notifies_agent(self): self._create_router() self.assertTrue(self.notif_m.called) def test_update_router_to_ha_notifies_agent(self): router = self._create_router(ha=False) self.notif_m.reset_mock() self._migrate_router(router['id'], True) self.assertTrue(self.notif_m.called) def test_unique_vr_id_between_routers(self): self._create_router() self._create_router() routers = self.plugin.get_ha_sync_data_for_host( self.admin_ctx, self.agent1['host'], self.agent1) self.assertEqual(2, len(routers)) self.assertNotEqual(routers[0]['ha_vr_id'], routers[1]['ha_vr_id']) @mock.patch('neutron.db.l3_hamode_db.VR_ID_RANGE', new=set(range(1, 1))) def test_vr_id_depleted(self): self.assertRaises(l3_ext_ha_mode.NoVRIDAvailable, self._create_router) @mock.patch('neutron.db.l3_hamode_db.VR_ID_RANGE', new=set(range(1, 2))) def test_vr_id_unique_range_per_tenant(self): self._create_router() self._create_router(tenant_id=_uuid()) routers = self.plugin.get_ha_sync_data_for_host( self.admin_ctx, self.agent1['host'], self.agent1) self.assertEqual(2, len(routers)) self.assertEqual(routers[0]['ha_vr_id'], routers[1]['ha_vr_id']) @mock.patch('neutron.db.l3_hamode_db.MAX_ALLOCATION_TRIES', new=2) def test_vr_id_allocation_contraint_conflict(self): router = self._create_router() network = self.plugin.get_ha_network(self.admin_ctx, router['tenant_id']) with mock.patch.object(self.plugin, '_get_allocated_vr_id', return_value=set()) as alloc: self.assertRaises(l3_ext_ha_mode.MaxVRIDAllocationTriesReached, self.plugin._allocate_vr_id, self.admin_ctx, network.network_id, router['id']) self.assertEqual(2, len(alloc.mock_calls)) def test_vr_id_allocation_delete_router(self): router = self._create_router() network = self.plugin.get_ha_network(self.admin_ctx, router['tenant_id']) allocs_before = self.plugin._get_allocated_vr_id(self.admin_ctx, network.network_id) router = self._create_router() allocs_current = self.plugin._get_allocated_vr_id(self.admin_ctx, network.network_id) self.assertNotEqual(allocs_before, allocs_current) self.plugin.delete_router(self.admin_ctx, router['id']) allocs_after = self.plugin._get_allocated_vr_id(self.admin_ctx, network.network_id) self.assertEqual(allocs_before, allocs_after) def test_vr_id_allocation_router_migration(self): router = self._create_router() network = self.plugin.get_ha_network(self.admin_ctx, router['tenant_id']) allocs_before = self.plugin._get_allocated_vr_id(self.admin_ctx, network.network_id) router = self._create_router() self._migrate_router(router['id'], False) allocs_after = self.plugin._get_allocated_vr_id(self.admin_ctx, network.network_id) self.assertEqual(allocs_before, allocs_after) def test_one_ha_router_one_not(self): self._create_router(ha=False) self._create_router() routers = self.plugin.get_ha_sync_data_for_host( self.admin_ctx, self.agent1['host'], self.agent1) ha0 = routers[0]['ha'] ha1 = routers[1]['ha'] self.assertNotEqual(ha0, ha1) def test_add_ha_port_subtransactions_blocked(self): with self.admin_ctx.session.begin(): self.assertRaises(RuntimeError, self.plugin.add_ha_port, self.admin_ctx, 'id', 'id', 'id') def test_add_ha_port_binding_failure_rolls_back_port(self): router = self._create_router() device_filter = {'device_id': [router['id']]} ports_before = self.core_plugin.get_ports( self.admin_ctx, filters=device_filter) network = self.plugin.get_ha_network(self.admin_ctx, router['tenant_id']) with mock.patch.object(l3_hamode_db, 'L3HARouterAgentPortBinding', side_effect=ValueError): self.assertRaises(ValueError, self.plugin.add_ha_port, self.admin_ctx, router['id'], network.network_id, router['tenant_id']) ports_after = self.core_plugin.get_ports( self.admin_ctx, filters=device_filter) self.assertEqual(ports_before, ports_after) def test_create_ha_network_binding_failure_rolls_back_network(self): networks_before = self.core_plugin.get_networks(self.admin_ctx) with mock.patch.object(l3_hamode_db, 'L3HARouterNetwork', side_effect=ValueError): self.assertRaises(ValueError, self.plugin._create_ha_network, self.admin_ctx, _uuid()) networks_after = self.core_plugin.get_networks(self.admin_ctx) self.assertEqual(networks_before, networks_after) def test_create_ha_network_subnet_failure_rolls_back_network(self): networks_before = self.core_plugin.get_networks(self.admin_ctx) with mock.patch.object(self.plugin, '_create_ha_subnet', side_effect=ValueError): self.assertRaises(ValueError, self.plugin._create_ha_network, self.admin_ctx, _uuid()) networks_after = self.core_plugin.get_networks(self.admin_ctx) self.assertEqual(networks_before, networks_after) def test_create_ha_interfaces_and_ensure_network_net_exists(self): router = self._create_router() router_db = self.plugin._get_router(self.admin_ctx, router['id']) with mock.patch.object(self.plugin, '_create_ha_network') as create: self.plugin._create_ha_interfaces_and_ensure_network( self.admin_ctx, router_db) self.assertFalse(create.called) def test_create_ha_interfaces_and_ensure_network_concurrent_create(self): # create a non-ha router so we can manually invoke the create ha # interfaces call down below router = self._create_router(ha=False) router_db = self.plugin._get_router(self.admin_ctx, router['id']) orig_create = self.plugin._create_ha_network created_nets = [] def _create_ha_network(*args, **kwargs): # create the network and then raise the error to simulate another # worker creating the network before us. created_nets.append(orig_create(*args, **kwargs)) raise db_exc.DBDuplicateEntry(columns=['tenant_id']) with mock.patch.object(self.plugin, '_create_ha_network', new=_create_ha_network): net = self.plugin._create_ha_interfaces_and_ensure_network( self.admin_ctx, router_db)[1] # ensure that it used the concurrently created network self.assertEqual([net], created_nets) def _test_ensure_with_patched_int_create(self, _create_ha_interfaces): # create a non-ha router so we can manually invoke the create ha # interfaces call down below router = self._create_router(ha=False) router_db = self.plugin._get_router(self.admin_ctx, router['id']) with mock.patch.object(self.plugin, '_create_ha_interfaces', new=_create_ha_interfaces): self.plugin._create_ha_interfaces_and_ensure_network( self.admin_ctx, router_db) self.assertTrue(_create_ha_interfaces.called) def test_create_ha_interfaces_and_ensure_network_concurrent_delete(self): orig_create = self.plugin._create_ha_interfaces def _create_ha_interfaces(ctx, rdb, ha_net): # concurrent delete on the first attempt if not getattr(_create_ha_interfaces, 'called', False): setattr(_create_ha_interfaces, 'called', True) self.core_plugin.delete_network(self.admin_ctx, ha_net['network_id']) return orig_create(ctx, rdb, ha_net) self._test_ensure_with_patched_int_create(_create_ha_interfaces) def test_create_ha_interfaces_and_ensure_network_concurrent_swap(self): orig_create = self.plugin._create_ha_interfaces def _create_ha_interfaces(ctx, rdb, ha_net): # concurrent delete on the first attempt if not getattr(_create_ha_interfaces, 'called', False): setattr(_create_ha_interfaces, 'called', True) self.core_plugin.delete_network(self.admin_ctx, ha_net['network_id']) self.plugin._create_ha_network(self.admin_ctx, rdb.tenant_id) return orig_create(ctx, rdb, ha_net) self._test_ensure_with_patched_int_create(_create_ha_interfaces) def test_create_ha_network_tenant_binding_raises_duplicate(self): router = self._create_router() network = self.plugin.get_ha_network(self.admin_ctx, router['tenant_id']) self.plugin._create_ha_network_tenant_binding( self.admin_ctx, 't1', network['network_id']) with testtools.ExpectedException(db_exc.DBDuplicateEntry): self.plugin._create_ha_network_tenant_binding( self.admin_ctx, 't1', network['network_id']) def test_create_ha_interfaces_binding_failure_rolls_back_ports(self): router = self._create_router() network = self.plugin.get_ha_network(self.admin_ctx, router['tenant_id']) device_filter = {'device_id': [router['id']]} ports_before = self.core_plugin.get_ports( self.admin_ctx, filters=device_filter) router_db = self.plugin._get_router(self.admin_ctx, router['id']) with mock.patch.object(l3_hamode_db, 'L3HARouterAgentPortBinding', side_effect=ValueError): self.assertRaises(ValueError, self.plugin._create_ha_interfaces, self.admin_ctx, router_db, network) ports_after = self.core_plugin.get_ports( self.admin_ctx, filters=device_filter) self.assertEqual(ports_before, ports_after) def test_create_router_db_ha_attribute_failure_rolls_back_router(self): routers_before = self.plugin.get_routers(self.admin_ctx) for method in ('_set_vr_id', '_create_ha_interfaces', '_notify_ha_interfaces_updated'): with mock.patch.object(self.plugin, method, side_effect=ValueError): self.assertRaises(ValueError, self._create_router) routers_after = self.plugin.get_routers(self.admin_ctx) self.assertEqual(routers_before, routers_after) def test_get_active_host_for_ha_router(self): router = self._create_router() self.assertEqual( None, self.plugin.get_active_host_for_ha_router( self.admin_ctx, router['id'])) self.plugin.update_routers_states( self.admin_ctx, {router['id']: 'active'}, self.agent2['host']) self.assertEqual( self.agent2['host'], self.plugin.get_active_host_for_ha_router( self.admin_ctx, router['id'])) def test_update_routers_states(self): router1 = self._create_router() router2 = self._create_router() routers = self.plugin.get_ha_sync_data_for_host( self.admin_ctx, self.agent1['host'], self.agent1) for router in routers: self.assertEqual('standby', router[constants.HA_ROUTER_STATE_KEY]) states = {router1['id']: 'active', router2['id']: 'standby'} self.plugin.update_routers_states( self.admin_ctx, states, self.agent1['host']) routers = self.plugin.get_ha_sync_data_for_host( self.admin_ctx, self.agent1['host'], self.agent1) for router in routers: self.assertEqual(states[router['id']], router[constants.HA_ROUTER_STATE_KEY]) def test_sync_ha_router_info_ha_interface_port_concurrently_deleted(self): router1 = self._create_router() router2 = self._create_router() # retrieve all router ha port bindings bindings = self.plugin.get_ha_router_port_bindings( self.admin_ctx, [router1['id'], router2['id']]) self.assertEqual(4, len(bindings)) routers = self.plugin.get_ha_sync_data_for_host( self.admin_ctx, self.agent1['host'], self.agent1) self.assertEqual(2, len(routers)) bindings = self.plugin.get_ha_router_port_bindings( self.admin_ctx, [router1['id'], router2['id']], self.agent1['host']) self.assertEqual(2, len(bindings)) fake_binding = mock.Mock() fake_binding.router_id = router2['id'] fake_binding.port = None with mock.patch.object( self.plugin, "get_ha_router_port_bindings", return_value=[bindings[0], fake_binding]): routers = self.plugin.get_ha_sync_data_for_host( self.admin_ctx, self.agent1['host'], self.agent1) self.assertEqual(1, len(routers)) def test_set_router_states_handles_concurrently_deleted_router(self): router1 = self._create_router() router2 = self._create_router() bindings = self.plugin.get_ha_router_port_bindings( self.admin_ctx, [router1['id'], router2['id']]) self.plugin.delete_router(self.admin_ctx, router1['id']) self.plugin._set_router_states( self.admin_ctx, bindings, {router1['id']: 'active', router2['id']: 'active'}) routers = self.plugin.get_ha_sync_data_for_host( self.admin_ctx, self.agent1['host'], self.agent1) self.assertEqual('active', routers[0][constants.HA_ROUTER_STATE_KEY]) def test_update_routers_states_port_not_found(self): router1 = self._create_router() port = {'id': 'foo', 'device_id': router1['id']} with mock.patch.object(self.core_plugin, 'get_ports', return_value=[port]): with mock.patch.object( self.core_plugin, 'update_port', side_effect=n_exc.PortNotFound(port_id='foo')): states = {router1['id']: 'active'} self.plugin.update_routers_states( self.admin_ctx, states, self.agent1['host']) def test_exclude_dvr_agents_for_ha_candidates(self): """Test dvr agents configured with "dvr" only, as opposed to "dvr_snat", are excluded. This test case tests that when get_number_of_agents_for_scheduling is called, it does not count dvr only agents. """ # Test setup registers two l3 agents. # Register another l3 agent with dvr mode and assert that # get_number_of_ha_agent_candidates return 2. helpers.register_l3_agent('host_3', constants.L3_AGENT_MODE_DVR) num_ha_candidates = self.plugin.get_number_of_agents_for_scheduling( self.admin_ctx) self.assertEqual(2, num_ha_candidates) def test_include_dvr_snat_agents_for_ha_candidates(self): """Test dvr agents configured with "dvr_snat" are excluded. This test case tests that when get_number_of_agents_for_scheduling is called, it ounts dvr_snat agents. """ # Test setup registers two l3 agents. # Register another l3 agent with dvr mode and assert that # get_number_of_ha_agent_candidates return 2. helpers.register_l3_agent('host_3', constants.L3_AGENT_MODE_DVR_SNAT) num_ha_candidates = self.plugin.get_number_of_agents_for_scheduling( self.admin_ctx) self.assertEqual(3, num_ha_candidates) def test_get_number_of_agents_for_scheduling_not_enough_agents(self): cfg.CONF.set_override('min_l3_agents_per_router', 3) helpers.kill_agent(helpers.register_l3_agent(host='l3host_3')['id']) self.assertRaises(l3_ext_ha_mode.HANotEnoughAvailableAgents, self.plugin.get_number_of_agents_for_scheduling, self.admin_ctx) def test_ha_network_deleted_if_no_ha_router_present_two_tenants(self): # Create two routers in different tenants. router1 = self._create_router() router2 = self._create_router(tenant_id='tenant2') nets_before = [net['name'] for net in self.core_plugin.get_networks(self.admin_ctx)] # Check that HA networks created for each tenant self.assertIn('HA network tenant %s' % router1['tenant_id'], nets_before) self.assertIn('HA network tenant %s' % router2['tenant_id'], nets_before) # Delete router1 self.plugin.delete_router(self.admin_ctx, router1['id']) nets_after = [net['name'] for net in self.core_plugin.get_networks(self.admin_ctx)] # Check that HA network for tenant1 is deleted and for tenant2 is not. self.assertNotIn('HA network tenant %s' % router1['tenant_id'], nets_after) self.assertIn('HA network tenant %s' % router2['tenant_id'], nets_after) def test_ha_network_is_not_delete_if_ha_router_is_present(self): # Create 2 routers in one tenant and check if one is deleted, HA # network still exists. router1 = self._create_router() router2 = self._create_router() nets_before = [net['name'] for net in self.core_plugin.get_networks(self.admin_ctx)] self.assertIn('HA network tenant %s' % router1['tenant_id'], nets_before) self.plugin.delete_router(self.admin_ctx, router2['id']) nets_after = [net['name'] for net in self.core_plugin.get_networks(self.admin_ctx)] self.assertIn('HA network tenant %s' % router1['tenant_id'], nets_after) def test_ha_network_delete_ha_and_non_ha_router(self): # Create HA and non-HA router. Check after deletion HA router HA # network is deleted. router1 = self._create_router(ha=False) router2 = self._create_router() nets_before = [net['name'] for net in self.core_plugin.get_networks(self.admin_ctx)] self.assertIn('HA network tenant %s' % router1['tenant_id'], nets_before) self.plugin.delete_router(self.admin_ctx, router2['id']) nets_after = [net['name'] for net in self.core_plugin.get_networks(self.admin_ctx)] self.assertNotIn('HA network tenant %s' % router1['tenant_id'], nets_after) def _test_ha_network_is_not_deleted_raise_exception(self, exception): router1 = self._create_router() nets_before = [net['name'] for net in self.core_plugin.get_networks(self.admin_ctx)] self.assertIn('HA network tenant %s' % router1['tenant_id'], nets_before) with mock.patch.object(self.plugin, '_delete_ha_network', side_effect=exception): self.plugin.delete_router(self.admin_ctx, router1['id']) nets_after = [net['name'] for net in self.core_plugin.get_networks(self.admin_ctx)] self.assertIn('HA network tenant %s' % router1['tenant_id'], nets_after) def test_ha_network_is_not_deleted_if_another_ha_router_is_created(self): # If another router was created during deletion of current router, # _delete_ha_network will fail with InvalidRequestError. Check that HA # network won't be deleted. self._test_ha_network_is_not_deleted_raise_exception( sa.exc.InvalidRequestError) def test_ha_network_is_not_deleted_if_network_in_use(self): self._test_ha_network_is_not_deleted_raise_exception( n_exc.NetworkInUse(net_id="foo_net_id")) def test_ha_network_is_not_deleted_if_db_deleted_error(self): self._test_ha_network_is_not_deleted_raise_exception( orm.exc.ObjectDeletedError(None)) def test_ha_router_create_failed_no_ha_network_delete(self): tenant_id = "foo_tenant_id" nets_before = self.core_plugin.get_networks(self.admin_ctx) self.assertNotIn('HA network tenant %s' % tenant_id, nets_before) # Unable to create HA network with mock.patch.object(self.core_plugin, 'create_network', side_effect=n_exc.NoNetworkAvailable): self.assertRaises(n_exc.NoNetworkAvailable, self._create_router, True, tenant_id) nets_after = self.core_plugin.get_networks(self.admin_ctx) self.assertEqual(nets_before, nets_after) self.assertNotIn('HA network tenant %s' % tenant_id, nets_after) def test_update_port_status_port_bingding_deleted_concurrently(self): router1 = self._create_router() states = {router1['id']: 'active'} with mock.patch.object(self.plugin, 'get_ha_router_port_bindings'): (self.admin_ctx.session.query( l3_hamode_db.L3HARouterAgentPortBinding). filter_by(router_id=router1['id']).delete()) self.plugin.update_routers_states( self.admin_ctx, states, self.agent1['host']) class L3HAModeDbTestCase(L3HATestFramework): def _create_network(self, plugin, ctx, name='net', tenant_id='tenant1', external=False): network = {'network': {'name': name, 'shared': False, 'admin_state_up': True, 'tenant_id': tenant_id, external_net.EXTERNAL: external}} return plugin.create_network(ctx, network)['id'] def _create_subnet(self, plugin, ctx, network_id, cidr='10.0.0.0/8', name='subnet', tenant_id='tenant1'): subnet = {'subnet': {'name': name, 'ip_version': 4, 'network_id': network_id, 'cidr': cidr, 'gateway_ip': attributes.ATTR_NOT_SPECIFIED, 'allocation_pools': attributes.ATTR_NOT_SPECIFIED, 'dns_nameservers': attributes.ATTR_NOT_SPECIFIED, 'host_routes': attributes.ATTR_NOT_SPECIFIED, 'tenant_id': tenant_id, 'enable_dhcp': True, 'ipv6_ra_mode': attributes.ATTR_NOT_SPECIFIED}} created_subnet = plugin.create_subnet(ctx, subnet) return created_subnet def test_remove_ha_in_use(self): router = self._create_router(ctx=self.admin_ctx) network_id = self._create_network(self.core_plugin, self.admin_ctx) subnet = self._create_subnet(self.core_plugin, self.admin_ctx, network_id) interface_info = {'subnet_id': subnet['id']} self.plugin.add_router_interface(self.admin_ctx, router['id'], interface_info) self.assertRaises(l3.RouterInUse, self.plugin.delete_router, self.admin_ctx, router['id']) bindings = self.plugin.get_ha_router_port_bindings( self.admin_ctx, [router['id']]) self.assertEqual(2, len(bindings)) def test_update_router_port_bindings_no_ports(self): self.plugin._update_router_port_bindings( self.admin_ctx, {}, self.agent1['host']) def _get_first_interface(self, router_id): device_filter = {'device_id': [router_id], 'device_owner': [constants.DEVICE_OWNER_ROUTER_INTF]} return self.core_plugin.get_ports( self.admin_ctx, filters=device_filter)[0] def test_update_router_port_bindings_updates_host(self): network_id = self._create_network(self.core_plugin, self.admin_ctx) subnet = self._create_subnet(self.core_plugin, self.admin_ctx, network_id) interface_info = {'subnet_id': subnet['id']} router = self._create_router() self.plugin.add_router_interface(self.admin_ctx, router['id'], interface_info) self.plugin._update_router_port_bindings( self.admin_ctx, {router['id']: 'active'}, self.agent1['host']) port = self._get_first_interface(router['id']) self.assertEqual(self.agent1['host'], port[portbindings.HOST_ID]) self.plugin._update_router_port_bindings( self.admin_ctx, {router['id']: 'active'}, self.agent2['host']) port = self._get_first_interface(router['id']) self.assertEqual(self.agent2['host'], port[portbindings.HOST_ID]) def test_ensure_host_set_on_ports_dvr_ha_binds_to_active(self): agent3 = helpers.register_l3_agent('host_3', constants.L3_AGENT_MODE_DVR_SNAT) ext_net = self._create_network(self.core_plugin, self.admin_ctx, external=True) int_net = self._create_network(self.core_plugin, self.admin_ctx) subnet = self._create_subnet(self.core_plugin, self.admin_ctx, int_net) interface_info = {'subnet_id': subnet['id']} router = self._create_router(ha=True, distributed=True) self.plugin._update_router_gw_info(self.admin_ctx, router['id'], {'network_id': ext_net}) self.plugin.add_router_interface(self.admin_ctx, router['id'], interface_info) bindings = self.plugin.get_ha_router_port_bindings( self.admin_ctx, router_ids=[router['id']], host=self.agent2['host']) self.plugin._set_router_states(self.admin_ctx, bindings, {router['id']: 'active'}) callback = l3_rpc.L3RpcCallback() callback._l3plugin = self.plugin # Get router with interfaces router = self.plugin._get_dvr_sync_data(self.admin_ctx, self.agent2['host'], self.agent2, [router['id']])[0] callback._ensure_host_set_on_ports(self.admin_ctx, agent3['host'], [router]) device_filter = {'device_id': [router['id']], 'device_owner': [constants.DEVICE_OWNER_ROUTER_SNAT] } port = self.core_plugin.get_ports(self.admin_ctx, filters=device_filter)[0] self.assertNotEqual(agent3['host'], port[portbindings.HOST_ID]) callback._ensure_host_set_on_ports(self.admin_ctx, self.agent2['host'], [router]) port = self.core_plugin.get_ports(self.admin_ctx, filters=device_filter)[0] self.assertEqual(self.agent2['host'], port[portbindings.HOST_ID]) def test_ensure_host_set_on_ports_binds_correctly(self): network_id = self._create_network(self.core_plugin, self.admin_ctx) subnet = self._create_subnet(self.core_plugin, self.admin_ctx, network_id) interface_info = {'subnet_id': subnet['id']} router = self._create_router() self.plugin.add_router_interface(self.admin_ctx, router['id'], interface_info) port = self._get_first_interface(router['id']) self.assertEqual('', port[portbindings.HOST_ID]) # Update the router object to include the first interface router = ( self.plugin.list_active_sync_routers_on_active_l3_agent( self.admin_ctx, self.agent1['host'], [router['id']]))[0] # ensure_host_set_on_ports binds an unbound port callback = l3_rpc.L3RpcCallback() callback._l3plugin = self.plugin callback._ensure_host_set_on_ports( self.admin_ctx, self.agent1['host'], [router]) port = self._get_first_interface(router['id']) self.assertEqual(self.agent1['host'], port[portbindings.HOST_ID]) # ensure_host_set_on_ports does not rebind a bound port router = ( self.plugin.list_active_sync_routers_on_active_l3_agent( self.admin_ctx, self.agent1['host'], [router['id']]))[0] callback._ensure_host_set_on_ports( self.admin_ctx, self.agent2['host'], [router]) port = self._get_first_interface(router['id']) self.assertEqual(self.agent1['host'], port[portbindings.HOST_ID]) def test_is_ha_router_port(self): network_id = self._create_network(self.core_plugin, self.admin_ctx) subnet = self._create_subnet(self.core_plugin, self.admin_ctx, network_id) interface_info = {'subnet_id': subnet['id']} router = self._create_router() self.plugin.add_router_interface(self.admin_ctx, router['id'], interface_info) port = self._get_first_interface(router['id']) self.assertTrue(l3_hamode_db.is_ha_router_port( port['device_owner'], port['device_id'])) def test_is_ha_router_port_for_normal_port(self): network_id = self._create_network(self.core_plugin, self.admin_ctx) subnet = self._create_subnet(self.core_plugin, self.admin_ctx, network_id) interface_info = {'subnet_id': subnet['id']} router = self._create_router(ha=False) self.plugin.add_router_interface(self.admin_ctx, router['id'], interface_info) device_filter = {'device_id': [router['id']], 'device_owner': [constants.DEVICE_OWNER_ROUTER_INTF]} port = self.core_plugin.get_ports( self.admin_ctx, filters=device_filter)[0] self.assertFalse(l3_hamode_db.is_ha_router_port( port['device_owner'], port['device_id'])) class L3HAUserTestCase(L3HATestFramework): def setUp(self): super(L3HAUserTestCase, self).setUp() self.user_ctx = context.Context('', _uuid()) def test_create_ha_router(self): self._create_router(ctx=self.user_ctx) def test_update_router(self): router = self._create_router(ctx=self.user_ctx) self._update_router(router['id'], ctx=self.user_ctx) def test_delete_router(self): router = self._create_router(ctx=self.user_ctx) self.plugin.delete_router(self.user_ctx, router['id']) neutron-8.4.0/neutron/tests/unit/db/test_agents_db.py0000664000567000056710000003634713044372760024140 0ustar jenkinsjenkins00000000000000# pylint: disable=pointless-string-statement # Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import datetime import mock from oslo_config import cfg from oslo_db import exception as exc from oslo_utils import timeutils import testscenarios from neutron.common import constants from neutron.common import exceptions as n_exc from neutron import context from neutron.db import agents_db from neutron.db import db_base_plugin_v2 as base_plugin from neutron.tests.unit import testlib_api # the below code is required for the following reason # (as documented in testscenarios) """Multiply tests depending on their 'scenarios' attribute. This can be assigned to 'load_tests' in any test module to make this automatically work across tests in the module. """ load_tests = testscenarios.load_tests_apply_scenarios TEST_RESOURCE_VERSIONS = {"A": "1.0"} AGENT_STATUS = {'agent_type': 'Open vSwitch agent', 'binary': 'neutron-openvswitch-agent', 'host': 'overcloud-notcompute', 'topic': 'N/A', 'resource_versions': TEST_RESOURCE_VERSIONS} TEST_TIME = '2016-02-26T17:08:06.116' class FakePlugin(base_plugin.NeutronDbPluginV2, agents_db.AgentDbMixin): """A fake plugin class containing all DB methods.""" class TestAgentsDbBase(testlib_api.SqlTestCase): def setUp(self): super(TestAgentsDbBase, self).setUp() self.context = context.get_admin_context() self.plugin = FakePlugin() def _get_agents(self, hosts, agent_type): return [ agents_db.Agent( binary='foo-agent', host=host, agent_type=agent_type, topic='foo_topic', configurations="{}", resource_versions="{}", created_at=timeutils.utcnow(), started_at=timeutils.utcnow(), heartbeat_timestamp=timeutils.utcnow()) for host in hosts ] def _save_agents(self, agents): for agent in agents: with self.context.session.begin(subtransactions=True): self.context.session.add(agent) def _create_and_save_agents(self, hosts, agent_type, down_agents_count=0, down_but_version_considered=0): agents = self._get_agents(hosts, agent_type) # bring down the specified agents for agent in agents[:down_agents_count]: agent['heartbeat_timestamp'] -= datetime.timedelta(minutes=60) # bring down just enough so their version is still considered for agent in agents[down_agents_count:( down_but_version_considered + down_agents_count)]: agent['heartbeat_timestamp'] -= datetime.timedelta( seconds=(cfg.CONF.agent_down_time + 1)) self._save_agents(agents) return agents class TestAgentsDbMixin(TestAgentsDbBase): def setUp(self): super(TestAgentsDbMixin, self).setUp() self.agent_status = dict(AGENT_STATUS) def test_get_enabled_agent_on_host_found(self): agents = self._create_and_save_agents(['foo_host'], constants.AGENT_TYPE_L3) expected = self.plugin.get_enabled_agent_on_host( self.context, constants.AGENT_TYPE_L3, 'foo_host') self.assertEqual(expected, agents[0]) def test_get_enabled_agent_on_host_not_found(self): with mock.patch.object(agents_db.LOG, 'debug') as mock_log: agent = self.plugin.get_enabled_agent_on_host( self.context, constants.AGENT_TYPE_L3, 'foo_agent') self.assertIsNone(agent) self.assertTrue(mock_log.called) def _assert_ref_fields_are_equal(self, reference, result): """Compare (key, value) pairs of a reference dict with the result Note: the result MAY have additional keys """ for field, value in reference.items(): self.assertEqual(value, result[field], field) def test_create_or_update_agent_new_entry(self): self.plugin.create_or_update_agent(self.context, self.agent_status) agent = self.plugin.get_agents(self.context)[0] self._assert_ref_fields_are_equal(self.agent_status, agent) def test_create_or_update_agent_existing_entry(self): self.plugin.create_or_update_agent(self.context, self.agent_status) self.plugin.create_or_update_agent(self.context, self.agent_status) self.plugin.create_or_update_agent(self.context, self.agent_status) agents = self.plugin.get_agents(self.context) self.assertEqual(len(agents), 1) agent = agents[0] self._assert_ref_fields_are_equal(self.agent_status, agent) def test_create_or_update_agent_logs_heartbeat(self): status = self.agent_status.copy() status['configurations'] = {'log_agent_heartbeats': True} with mock.patch.object(agents_db.LOG, 'info') as info: self.plugin.create_or_update_agent(self.context, status) self.assertTrue(info.called) status['configurations'] = {'log_agent_heartbeats': False} info.reset_mock() self.plugin.create_or_update_agent(self.context, status) self.assertFalse(info.called) def test_create_or_update_agent_concurrent_insert(self): # NOTE(rpodolyaka): emulate violation of the unique constraint caused # by a concurrent insert. Ensure we make another # attempt on fail with mock.patch('sqlalchemy.orm.Session.add') as add_mock: add_mock.side_effect = [ exc.DBDuplicateEntry(), None ] self.plugin.create_or_update_agent(self.context, self.agent_status) self.assertEqual(add_mock.call_count, 2, "Agent entry creation hasn't been retried") def test_create_or_update_agent_disable_new_agents(self): cfg.CONF.set_override('enable_new_agents', False) self.plugin.create_or_update_agent(self.context, self.agent_status) agent = self.plugin.get_agents(self.context)[0] self.assertFalse(agent['admin_state_up']) def test_agent_health_check(self): agents = [{'agent_type': "DHCP Agent", 'heartbeat_timestamp': '2015-05-06 22:40:40.432295', 'host': 'some.node', 'alive': True}] with mock.patch.object(self.plugin, 'get_agents', return_value=agents),\ mock.patch.object(agents_db.LOG, 'warning') as warn,\ mock.patch.object(agents_db.LOG, 'debug') as debug: self.plugin.agent_health_check() self.assertTrue(debug.called) self.assertFalse(warn.called) agents[0]['alive'] = False self.plugin.agent_health_check() warn.assert_called_once_with( mock.ANY, {'count': 1, 'total': 1, 'data': " Type Last heartbeat host\n" " DHCP Agent 2015-05-06 22:40:40.432295 some.node"} ) def test__get_dict(self): db_obj = mock.Mock(conf1='{"test": "1234"}') conf1 = self.plugin._get_dict(db_obj, 'conf1') self.assertIn('test', conf1) self.assertEqual("1234", conf1['test']) def test__get_dict_missing(self): with mock.patch.object(agents_db.LOG, 'warning') as warn: db_obj = mock.Mock(spec=['agent_type', 'host']) self.plugin._get_dict(db_obj, 'missing_conf') self.assertTrue(warn.called) def test__get_dict_ignore_missing(self): with mock.patch.object(agents_db.LOG, 'warning') as warn: db_obj = mock.Mock(spec=['agent_type', 'host']) missing_conf = self.plugin._get_dict(db_obj, 'missing_conf', ignore_missing=True) self.assertEqual({}, missing_conf) warn.assert_not_called() def test__get_dict_broken(self): with mock.patch.object(agents_db.LOG, 'warning') as warn: db_obj = mock.Mock(conf1='{"test": BROKEN') conf1 = self.plugin._get_dict(db_obj, 'conf1', ignore_missing=True) self.assertEqual({}, conf1) self.assertTrue(warn.called) def get_configurations_dict(self): db_obj = mock.Mock(configurations='{"cfg1": "val1"}') cfg = self.plugin.get_configuration_dict(db_obj) self.assertIn('cfg', cfg) def test_get_agents_resource_versions(self): tracker = mock.Mock() self._create_and_save_agents( ['host-%d' % i for i in range(5)], constants.AGENT_TYPE_L3, down_agents_count=3, down_but_version_considered=2) self.plugin.get_agents_resource_versions(tracker) self.assertEqual(tracker.set_versions.call_count, 2) class TestAgentsDbGetAgents(TestAgentsDbBase): scenarios = [ ('Get all agents', dict(agents=5, down_agents=2, agents_alive=None, expected_agents=5)), ('Get alive agents (True)', dict(agents=5, down_agents=2, agents_alive='True', expected_agents=3)), ('Get down agents (False)', dict(agents=5, down_agents=2, agents_alive='False', expected_agents=2)), ('Get alive agents (true)', dict(agents=5, down_agents=2, agents_alive='true', expected_agents=3)), ('Get down agents (false)', dict(agents=5, down_agents=2, agents_alive='false', expected_agents=2)), ('Get agents invalid alive filter', dict(agents=5, down_agents=2, agents_alive='invalid', expected_agents=None)), ] def setUp(self): # ensure that the first scenario will execute with nosetests if not hasattr(self, 'agents'): self.__dict__.update(self.scenarios[0][1]) super(TestAgentsDbGetAgents, self).setUp() def test_get_agents(self): hosts = ['host-%s' % i for i in range(self.agents)] self._create_and_save_agents(hosts, constants.AGENT_TYPE_L3, down_agents_count=self.down_agents) if self.agents_alive == 'invalid': self.assertRaises(n_exc.InvalidInput, self.plugin.get_agents, self.context, filters={'alive': [self.agents_alive]}) else: returned_agents = self.plugin.get_agents( self.context, filters={'alive': [self.agents_alive]} if self.agents_alive else None) self.assertEqual(self.expected_agents, len(returned_agents)) if self.agents_alive: alive = (self.agents_alive == 'True' or self.agents_alive == 'true') for agent in returned_agents: self.assertEqual(alive, agent['alive']) class TestAgentExtRpcCallback(TestAgentsDbBase): def setUp(self): super(TestAgentExtRpcCallback, self).setUp() self.callback = agents_db.AgentExtRpcCallback(self.plugin) self.callback.server_versions_rpc = mock.Mock() self.versions_rpc = self.callback.server_versions_rpc self.callback.START_TIME = datetime.datetime(datetime.MINYEAR, 1, 1) self.update_versions = mock.patch( 'neutron.api.rpc.callbacks.version_manager.' 'update_versions').start() self.agent_state = {'agent_state': dict(AGENT_STATUS)} def test_create_or_update_agent_updates_version_manager(self): self.callback.report_state(self.context, agent_state=self.agent_state, time=TEST_TIME) self.update_versions.assert_called_once_with( mock.ANY, TEST_RESOURCE_VERSIONS) def test_create_or_update_agent_updates_other_servers(self): callback = self.callback callback.report_state(self.context, agent_state=self.agent_state, time=TEST_TIME) report_agent_resource_versions = ( self.versions_rpc.report_agent_resource_versions) report_agent_resource_versions.assert_called_once_with( mock.ANY, mock.ANY, mock.ANY, TEST_RESOURCE_VERSIONS) def test_no_version_updates_on_further_state_reports(self): self.test_create_or_update_agent_updates_version_manager() # agents include resource_versions only in the first report after # start so versions should not be updated on the second report second_agent_state = copy.deepcopy(self.agent_state) second_agent_state['agent_state'].pop('resource_versions') self.update_versions.reset_mock() report_agent_resource_versions = ( self.versions_rpc.report_agent_resource_versions) report_agent_resource_versions.reset_mock() self.callback.report_state(self.context, agent_state=second_agent_state, time=TEST_TIME) self.assertFalse(self.update_versions.called) self.assertFalse(report_agent_resource_versions.called) def test_version_updates_on_agent_revival(self): self.test_create_or_update_agent_updates_version_manager() second_agent_state = copy.deepcopy(self.agent_state) second_agent_state['agent_state'].pop('resource_versions') self._take_down_agent() self.update_versions.reset_mock() report_agent_resource_versions = ( self.versions_rpc.report_agent_resource_versions) report_agent_resource_versions.reset_mock() # agent didn't include resource_versions in report but server will # take them from db for the revived agent self.callback.report_state(self.context, agent_state=second_agent_state, time=TEST_TIME) self.update_versions.assert_called_once_with( mock.ANY, TEST_RESOURCE_VERSIONS) report_agent_resource_versions.assert_called_once_with( mock.ANY, mock.ANY, mock.ANY, TEST_RESOURCE_VERSIONS) def _take_down_agent(self): with self.context.session.begin(subtransactions=True): query = self.context.session.query(agents_db.Agent) agt = query.first() agt.heartbeat_timestamp = ( agt.heartbeat_timestamp - datetime.timedelta(hours=1)) neutron-8.4.0/neutron/tests/unit/db/test_securitygroups_db.py0000664000567000056710000003353213044372760025757 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import sqlalchemy import testtools from neutron.callbacks import events from neutron.callbacks import exceptions from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants from neutron import context from neutron.db import common_db_mixin from neutron.db import securitygroups_db from neutron.extensions import securitygroup from neutron.tests.unit import testlib_api FAKE_SECGROUP = {'security_group': {"tenant_id": 'fake', 'description': 'fake', 'name': 'fake'}} FAKE_SECGROUP_RULE = {'security_group_rule': {"tenant_id": 'fake', 'description': 'fake', 'name': 'fake', 'port_range_min': '21', 'protocol': 'tcp', 'port_range_max': '23', 'remote_ip_prefix': '10.0.0.1', 'ethertype': 'IPv4', 'remote_group_id': None, 'security_group_id': 'None', 'direction': 'ingress'}} def fake_callback(resource, event, *args, **kwargs): raise KeyError('bar') class SecurityGroupDbMixinImpl(securitygroups_db.SecurityGroupDbMixin, common_db_mixin.CommonDbMixin): pass class SecurityGroupDbMixinTestCase(testlib_api.SqlTestCase): def setUp(self): super(SecurityGroupDbMixinTestCase, self).setUp() self.ctx = context.get_admin_context() self.mixin = SecurityGroupDbMixinImpl() def test_create_security_group_conflict(self): with mock.patch.object(registry, "notify") as mock_notify: mock_notify.side_effect = exceptions.CallbackFailure(Exception()) secgroup = {'security_group': mock.ANY} with testtools.ExpectedException( securitygroup.SecurityGroupConflict): self.mixin.create_security_group(self.ctx, secgroup) def test_delete_security_group_in_use(self): with mock.patch.object(self.mixin, '_get_port_security_group_bindings'),\ mock.patch.object(self.mixin, '_get_security_group'),\ mock.patch.object(registry, "notify") as mock_notify: mock_notify.side_effect = exceptions.CallbackFailure(Exception()) with testtools.ExpectedException( securitygroup.SecurityGroupInUse): self.mixin.delete_security_group(self.ctx, mock.ANY) def test_update_security_group_conflict(self): with mock.patch.object(registry, "notify") as mock_notify: mock_notify.side_effect = exceptions.CallbackFailure(Exception()) secgroup = {'security_group': mock.ANY} with testtools.ExpectedException( securitygroup.SecurityGroupConflict): self.mixin.update_security_group(self.ctx, 'foo_id', secgroup) def test_create_security_group_rule_conflict(self): with mock.patch.object(self.mixin, '_validate_security_group_rule'),\ mock.patch.object(self.mixin, '_check_for_duplicate_rules_in_db'),\ mock.patch.object(registry, "notify") as mock_notify: mock_notify.side_effect = exceptions.CallbackFailure(Exception()) with testtools.ExpectedException( securitygroup.SecurityGroupConflict): self.mixin.create_security_group_rule( self.ctx, mock.MagicMock()) def test__check_for_duplicate_rules_in_db_does_not_drop_protocol(self): with mock.patch.object(self.mixin, 'get_security_group_rules', return_value=[mock.Mock()]): context = mock.Mock() rule_dict = { 'security_group_rule': {'protocol': None, 'tenant_id': 'fake', 'security_group_id': 'fake', 'direction': 'fake'} } self.mixin._check_for_duplicate_rules_in_db(context, rule_dict) self.assertIn('protocol', rule_dict['security_group_rule']) def test_delete_security_group_rule_in_use(self): with mock.patch.object(registry, "notify") as mock_notify: mock_notify.side_effect = exceptions.CallbackFailure(Exception()) with testtools.ExpectedException( securitygroup.SecurityGroupRuleInUse): self.mixin.delete_security_group_rule(self.ctx, mock.ANY) def test_delete_security_group_rule_raise_error_on_not_found(self): with testtools.ExpectedException( securitygroup.SecurityGroupRuleNotFound): self.mixin.delete_security_group_rule(self.ctx, 'foo_rule') def test_validate_ethertype_and_protocol(self): fake_ipv4_rules = [{'protocol': constants.PROTO_NAME_IPV6_ICMP, 'ethertype': constants.IPv4}, {'protocol': constants.PROTO_NAME_IPV6_ICMP_LEGACY, 'ethertype': constants.IPv4}, {'protocol': constants.PROTO_NAME_IPV6_ENCAP, 'ethertype': constants.IPv4}, {'protocol': constants.PROTO_NAME_IPV6_ROUTE, 'ethertype': constants.IPv4}, {'protocol': constants.PROTO_NAME_IPV6_FRAG, 'ethertype': constants.IPv4}, {'protocol': constants.PROTO_NAME_IPV6_NONXT, 'ethertype': constants.IPv4}, {'protocol': constants.PROTO_NAME_IPV6_OPTS, 'ethertype': constants.IPv4}] # test wrong protocols for rule in fake_ipv4_rules: with testtools.ExpectedException( securitygroup.SecurityGroupEthertypeConflictWithProtocol): self.mixin._validate_ethertype_and_protocol(rule) def test_security_group_precommit_create_event_fail(self): registry.subscribe(fake_callback, resources.SECURITY_GROUP, events.PRECOMMIT_CREATE) with mock.patch.object(sqlalchemy.orm.session.SessionTransaction, 'rollback') as mock_rollback: self.assertRaises(securitygroup.SecurityGroupConflict, self.mixin.create_security_group, self.ctx, FAKE_SECGROUP) self.assertTrue(mock_rollback.called) def test_security_group_precommit_update_event_fail(self): registry.subscribe(fake_callback, resources.SECURITY_GROUP, events.PRECOMMIT_UPDATE) sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP) with mock.patch.object(sqlalchemy.orm.session.SessionTransaction, 'rollback') as mock_rollback: self.assertRaises(securitygroup.SecurityGroupConflict, self.mixin.update_security_group, self.ctx, sg_dict['id'], FAKE_SECGROUP) self.assertTrue(mock_rollback.called) def test_security_group_precommit_delete_event_fail(self): registry.subscribe(fake_callback, resources.SECURITY_GROUP, events.PRECOMMIT_DELETE) sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP) with mock.patch.object(sqlalchemy.orm.session.SessionTransaction, 'rollback') as mock_rollback: self.assertRaises(securitygroup.SecurityGroupInUse, self.mixin.delete_security_group, self.ctx, sg_dict['id']) self.assertTrue(mock_rollback.called) def test_security_group_precommit_create_event(self): with mock.patch.object(registry, "notify") as mock_notify: self.mixin.create_security_group(self.ctx, FAKE_SECGROUP) mock_notify.assert_has_calls([mock.call('security_group', 'precommit_create', mock.ANY, context=mock.ANY, is_default=mock.ANY, security_group=mock.ANY)]) def test_security_group_precommit_update_event(self): sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP) with mock.patch.object(registry, "notify") as mock_notify: self.mixin.update_security_group(self.ctx, sg_dict['id'], FAKE_SECGROUP) mock_notify.assert_has_calls([mock.call('security_group', 'precommit_update', mock.ANY, context=mock.ANY, security_group=mock.ANY, security_group_id=sg_dict['id'])]) def test_security_group_precommit_delete_event(self): sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP) with mock.patch.object(registry, "notify") as mock_notify: self.mixin.delete_security_group(self.ctx, sg_dict['id']) mock_notify.assert_has_calls([mock.call('security_group', 'precommit_delete', mock.ANY, context=mock.ANY, security_group=mock.ANY, security_group_id=sg_dict['id'])]) def test_security_group_rule_precommit_create_event_fail(self): registry.subscribe(fake_callback, resources.SECURITY_GROUP_RULE, events.PRECOMMIT_CREATE) sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP) fake_rule = FAKE_SECGROUP_RULE fake_rule['security_group_rule']['security_group_id'] = sg_dict['id'] with mock.patch.object(sqlalchemy.orm.session.SessionTransaction, 'rollback') as mock_rollback,\ mock.patch.object(self.mixin, '_get_security_group'): self.assertRaises(securitygroup.SecurityGroupConflict, self.mixin.create_security_group_rule, self.ctx, fake_rule) self.assertTrue(mock_rollback.called) def test_security_group_rule_precommit_delete_event_fail(self): registry.subscribe(fake_callback, resources.SECURITY_GROUP_RULE, events.PRECOMMIT_DELETE) sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP) fake_rule = FAKE_SECGROUP_RULE fake_rule['security_group_rule']['security_group_id'] = sg_dict['id'] with mock.patch.object(sqlalchemy.orm.session.SessionTransaction, 'rollback') as mock_rollback,\ mock.patch.object(self.mixin, '_get_security_group'): sg_rule_dict = self.mixin.create_security_group_rule(self.ctx, fake_rule) self.assertRaises(securitygroup.SecurityGroupRuleInUse, self.mixin.delete_security_group_rule, self.ctx, sg_rule_dict['id']) self.assertTrue(mock_rollback.called) def test_security_group_rule_precommit_create_event(self): sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP) fake_rule = FAKE_SECGROUP_RULE fake_rule['security_group_rule']['security_group_id'] = sg_dict['id'] with mock.patch.object(registry, "notify") as mock_notify, \ mock.patch.object(self.mixin, '_get_security_group'): self.mixin.create_security_group_rule(self.ctx, fake_rule) mock_notify.assert_has_calls([mock.call('security_group_rule', 'precommit_create', mock.ANY, context=mock.ANY, security_group_rule=mock.ANY)]) def test_security_group_rule_precommit_delete_event(self): sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP) fake_rule = FAKE_SECGROUP_RULE fake_rule['security_group_rule']['security_group_id'] = sg_dict['id'] with mock.patch.object(registry, "notify") as mock_notify, \ mock.patch.object(self.mixin, '_get_security_group'): sg_rule_dict = self.mixin.create_security_group_rule(self.ctx, fake_rule) self.mixin.delete_security_group_rule(self.ctx, sg_rule_dict['id']) mock_notify.assert_has_calls([mock.call('security_group_rule', 'precommit_delete', mock.ANY, context=mock.ANY, security_group_rule_id=mock.ANY)]) def test_get_ip_proto_name_and_num(self): protocols = [constants.PROTO_NAME_UDP, str(constants.PROTO_NUM_TCP), 'blah', '111'] protocol_names_nums = ( [[constants.PROTO_NAME_UDP, str(constants.PROTO_NUM_UDP)], [constants.PROTO_NAME_TCP, str(constants.PROTO_NUM_TCP)], ['blah', 'blah'], ['111', '111']]) for i, protocol in enumerate(protocols): self.assertEqual(protocol_names_nums[i], self.mixin._get_ip_proto_name_and_num(protocol)) def test__validate_port_range_for_icmp_exception(self): states = [(1, 256, securitygroup.SecurityGroupInvalidIcmpValue), (None, 6, securitygroup.SecurityGroupMissingIcmpType), (300, 1, securitygroup.SecurityGroupInvalidIcmpValue)] for protocol in (constants.PROTO_NAME_ICMP, constants.PROTO_NAME_IPV6_ICMP, constants.PROTO_NAME_IPV6_ICMP_LEGACY): for pmin, pmax, exception in states: self.assertRaises(exception, self.mixin._validate_port_range, {'port_range_min': pmin, 'port_range_max': pmax, 'protocol': protocol}) neutron-8.4.0/neutron/tests/unit/db/test_portsecurity_db.py0000664000567000056710000000335013044372736025422 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.db import portsecurity_db as pd from neutron.db import portsecurity_db_common as pdc from neutron.tests import base common = pdc.PortSecurityDbCommon class FakePlugin(pd.PortSecurityDbMixin): supported_extension_aliases = ['port-security'] class PortSecurityDbMixinTestCase(base.BaseTestCase): def setUp(self): super(PortSecurityDbMixinTestCase, self).setUp() self.plugin = FakePlugin() @mock.patch.object(common, '_extend_port_security_dict') def test__extend_port_security_dict_relies_on_common(self, extend): response = mock.Mock() dbdata = mock.Mock() self.plugin._extend_port_security_dict(response, dbdata) extend.assert_called_once_with(response, dbdata) @mock.patch.object(common, '_extend_port_security_dict') def test__extend_port_security_dict_ignored_if_extension_disabled(self, extend): response = mock.Mock() dbdata = mock.Mock() self.plugin.supported_extension_aliases = [] self.plugin._extend_port_security_dict(response, dbdata) self.assertFalse(extend.called) neutron-8.4.0/neutron/tests/unit/tests/0000775000567000056710000000000013044373210021330 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/tests/test_base.py0000664000567000056710000000474113044372760023672 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests to test the test framework""" import sys import unittest2 from neutron.tests import base class BrokenExceptionHandlerTestCase(base.DietTestCase): # Embedded to hide from the regular test discovery class MyTestCase(base.DietTestCase): def setUp(self): super(BrokenExceptionHandlerTestCase.MyTestCase, self).setUp() self.addOnException(self._diag_collect) def _diag_collect(self, exc_info): raise ValueError('whoopsie daisy') def runTest(self): raise IndexError("Thou shalt not pass by reference") def test_broken_exception_handler(self): result = self.MyTestCase().run() # ensure both exceptions are logged self.assertIn('Thou shalt', result.errors[0][1]) self.assertIn('whoopsie', result.errors[0][1]) self.assertFalse(result.wasSuccessful()) class SystemExitTestCase(base.DietTestCase): # Embedded to hide from the regular test discovery class MyTestCase(base.DietTestCase): def __init__(self, exitcode): super(SystemExitTestCase.MyTestCase, self).__init__() self.exitcode = exitcode def runTest(self): if self.exitcode is not None: sys.exit(self.exitcode) def test_no_sysexit(self): result = self.MyTestCase(exitcode=None).run() self.assertTrue(result.wasSuccessful()) def test_sysexit(self): expectedFails = [self.MyTestCase(exitcode) for exitcode in (0, 1)] suite = unittest2.TestSuite(tests=expectedFails) result = self.defaultTestResult() try: suite.run(result) except SystemExit: self.fail('SystemExit escaped!') self.assertEqual([], result.errors) self.assertItemsEqual(set(id(t) for t in expectedFails), set(id(t) for (t, traceback) in result.failures)) neutron-8.4.0/neutron/tests/unit/tests/example/0000775000567000056710000000000013044373210022763 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/tests/example/__init__.py0000664000567000056710000000000013044372736025076 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/tests/example/README0000664000567000056710000000014013044372736023652 0ustar jenkinsjenkins00000000000000This directory is used by: neutron.tests.unit.tests.test_tools.ImportModulesRecursivelyTestCase neutron-8.4.0/neutron/tests/unit/tests/example/dir/0000775000567000056710000000000013044373210023541 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/tests/example/dir/__init__.py0000664000567000056710000000000013044372736025654 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/tests/example/dir/example_module.py0000664000567000056710000000000013044372736027115 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/tests/__init__.py0000664000567000056710000000000013044372736023443 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/tests/common/0000775000567000056710000000000013044373210022620 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/tests/common/__init__.py0000664000567000056710000000000013044372736024733 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/tests/common/test_net_helpers.py0000664000567000056710000000567113044372760026563 0ustar jenkinsjenkins00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.common import constants as n_const from neutron.tests import base from neutron.tests.common import net_helpers ss_output = """ State Recv-Q Send-Q Local Address:Port Peer Address:Port LISTEN 0 10 127.0.0.1:6640 *:* LISTEN 0 128 *:46675 *:* LISTEN 0 128 *:22 *:* LISTEN 0 128 *:5432 *:* LISTEN 0 128 *:3260 *:* LISTEN 0 50 *:3306 *:* ESTAB 0 36 10.0.0.202:22 10.0.0.44:45258 ESTAB 0 0 127.0.0.1:32965 127.0.0.1:4369 ESTAB 0 0 10.0.0.202:22 10.0.0.44:36104 LISTEN 0 128 :::80 :::* LISTEN 0 128 :::4369 :::* LISTEN 0 128 :::22 :::* LISTEN 0 128 :::5432 :::* LISTEN 0 128 :::3260 :::* LISTEN 0 128 :::5672 :::* ESTAB 0 0 ::ffff:127.0.0.1:4369 ::ffff:127.0.0.1:32965 """ ss_output_template = """ LISTEN 0 10 127.0.0.1:%d *:* """ class PortAllocationTestCase(base.DietTestCase): def test__get_source_ports_from_ss_output(self): result = net_helpers._get_source_ports_from_ss_output(ss_output) expected = {6640, 46675, 5432, 3260, 3306, 22, 32965, 4369, 5672, 80} self.assertEqual(expected, result) def test_get_free_namespace_port(self): ss_output2 = ss_output for p in range(1024, 65535): ss_output2 += ss_output_template % p with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') \ as ipwrapper: m = mock.MagicMock() m.netns.execute.return_value = ss_output2 ipwrapper.return_value = m result = net_helpers.get_free_namespace_port( n_const.PROTO_NAME_TCP) self.assertEqual(65535, result) neutron-8.4.0/neutron/tests/unit/tests/test_post_mortem_debug.py0000664000567000056710000000770013044372736026477 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import mock from six import moves from neutron.tests import base from neutron.tests import post_mortem_debug class TestTesttoolsExceptionHandler(base.BaseTestCase): def test_exception_handler(self): try: self.assertTrue(False) except Exception: exc_info = sys.exc_info() with mock.patch('traceback.print_exception') as mock_print_exception: with mock.patch('pdb.post_mortem') as mock_post_mortem: with mock.patch.object(post_mortem_debug, 'get_ignored_traceback', return_value=mock.Mock()): post_mortem_debug.get_exception_handler('pdb')(exc_info) # traceback will become post_mortem_debug.FilteredTraceback filtered_exc_info = (exc_info[0], exc_info[1], mock.ANY) mock_print_exception.assert_called_once_with(*filtered_exc_info) mock_post_mortem.assert_called_once_with(mock.ANY) def test__get_debugger(self): def import_mock(name, *args): mod_mock = mock.Mock() mod_mock.__name__ = name mod_mock.post_mortem = mock.Mock() return mod_mock with mock.patch('six.moves.builtins.__import__', side_effect=import_mock): pdb_debugger = post_mortem_debug._get_debugger('pdb') pudb_debugger = post_mortem_debug._get_debugger('pudb') self.assertEqual('pdb', pdb_debugger.__name__) self.assertEqual('pudb', pudb_debugger.__name__) class TestFilteredTraceback(base.BaseTestCase): def test_filter_traceback(self): tb1 = mock.Mock() tb2 = mock.Mock() tb1.tb_next = tb2 tb2.tb_next = None ftb1 = post_mortem_debug.FilteredTraceback(tb1, tb2) for attr in ['lasti', 'lineno', 'frame']: attr_name = 'tb_%s' % attr self.assertEqual(getattr(tb1, attr_name, None), getattr(ftb1, attr_name, None)) self.assertIsNone(ftb1.tb_next) class TestGetIgnoredTraceback(base.BaseTestCase): def _test_get_ignored_traceback(self, ignored_bit_array, expected): root_tb = mock.Mock() tb = root_tb tracebacks = [tb] for x in moves.range(len(ignored_bit_array) - 1): tb.tb_next = mock.Mock() tb = tb.tb_next tracebacks.append(tb) tb.tb_next = None tb = root_tb for ignored in ignored_bit_array: if ignored: tb.tb_frame.f_globals = ['__unittest'] else: tb.tb_frame.f_globals = [] tb = tb.tb_next actual = post_mortem_debug.get_ignored_traceback(root_tb) if expected is not None: expected = tracebacks[expected] self.assertEqual(expected, actual) def test_no_ignored_tracebacks(self): self._test_get_ignored_traceback([0, 0, 0], None) def test_single_member_trailing_chain(self): self._test_get_ignored_traceback([0, 0, 1], 2) def test_two_member_trailing_chain(self): self._test_get_ignored_traceback([0, 1, 1], 1) def test_first_traceback_ignored(self): self._test_get_ignored_traceback([1, 0, 0], None) def test_middle_traceback_ignored(self): self._test_get_ignored_traceback([0, 1, 0], None) neutron-8.4.0/neutron/tests/unit/tests/test_tools.py0000664000567000056710000000223213044372760024111 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys from neutron.tests import base from neutron.tests import tools from neutron.tests.unit import tests # noqa EXAMPLE_MODULE = 'neutron.tests.unit.tests.example.dir.example_module' class ImportModulesRecursivelyTestCase(base.BaseTestCase): def test_object_modules(self): sys.modules.pop(EXAMPLE_MODULE, None) modules = tools.import_modules_recursively( os.path.dirname(tests.__file__)) self.assertIn( 'neutron.tests.unit.tests.example.dir.example_module', modules) self.assertIn(EXAMPLE_MODULE, sys.modules) neutron-8.4.0/neutron/tests/unit/notifiers/0000775000567000056710000000000013044373210022170 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/notifiers/__init__.py0000664000567000056710000000000013044372736024303 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/notifiers/test_batch_notifier.py0000664000567000056710000000376013044372736026603 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.notifiers import batch_notifier from neutron.tests import base class TestBatchNotifier(base.BaseTestCase): def setUp(self): super(TestBatchNotifier, self).setUp() self.notifier = batch_notifier.BatchNotifier(0.1, lambda x: x) self.spawn_n = mock.patch('eventlet.spawn_n').start() def test_queue_event_no_event(self): self.notifier.queue_event(None) self.assertEqual(0, len(self.notifier.pending_events)) self.assertEqual(0, self.spawn_n.call_count) def test_queue_event_first_event(self): self.notifier.queue_event(mock.Mock()) self.assertEqual(1, len(self.notifier.pending_events)) self.assertEqual(1, self.spawn_n.call_count) def test_queue_event_multiple_events(self): events = 6 for i in range(0, events): self.notifier.queue_event(mock.Mock()) self.assertEqual(events, len(self.notifier.pending_events)) self.assertEqual(1, self.spawn_n.call_count) def test_queue_event_call_send_events(self): with mock.patch.object(self.notifier, 'callback') as send_events: self.spawn_n.side_effect = lambda func: func() self.notifier.queue_event(mock.Mock()) self.assertFalse(self.notifier._waiting_to_send) self.assertTrue(send_events.called) neutron-8.4.0/neutron/tests/unit/notifiers/test_nova.py0000664000567000056710000003724113044372760024564 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from novaclient import exceptions as nova_exceptions from oslo_config import cfg from oslo_utils import uuidutils from sqlalchemy.orm import attributes as sql_attr from neutron.common import constants as n_const from neutron.common import exceptions as n_exc from neutron.db import models_v2 from neutron.notifiers import nova from neutron.tests import base DEVICE_OWNER_COMPUTE = n_const.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' class TestNovaNotify(base.BaseTestCase): def setUp(self, plugin=None): super(TestNovaNotify, self).setUp() class FakePlugin(object): def get_port(self, context, port_id): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' return {'device_id': device_id, 'device_owner': DEVICE_OWNER_COMPUTE} self.nova_notifier = nova.Notifier() self.nova_notifier._plugin_ref = FakePlugin() def test_notify_port_status_all_values(self): states = [n_const.PORT_STATUS_ACTIVE, n_const.PORT_STATUS_DOWN, n_const.PORT_STATUS_ERROR, n_const.PORT_STATUS_BUILD, sql_attr.NO_VALUE] device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' # test all combinations for previous_port_status in states: for current_port_status in states: port = models_v2.Port(id='port-uuid', device_id=device_id, device_owner=DEVICE_OWNER_COMPUTE, status=current_port_status) self._record_port_status_changed_helper(current_port_status, previous_port_status, port) def test_port_without_uuid_device_id_no_notify(self): port = models_v2.Port(id='port-uuid', device_id='compute_probe:', device_owner=DEVICE_OWNER_COMPUTE, status=n_const.PORT_STATUS_ACTIVE) self._record_port_status_changed_helper(n_const.PORT_STATUS_ACTIVE, sql_attr.NO_VALUE, port) def test_port_without_device_owner_no_notify(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' port = models_v2.Port(id='port-uuid', device_id=device_id, status=n_const.PORT_STATUS_ACTIVE) self._record_port_status_changed_helper(n_const.PORT_STATUS_ACTIVE, sql_attr.NO_VALUE, port) def test_port_without_device_id_no_notify(self): port = models_v2.Port(id='port-uuid', device_owner=n_const.DEVICE_OWNER_DHCP, status=n_const.PORT_STATUS_ACTIVE) self._record_port_status_changed_helper(n_const.PORT_STATUS_ACTIVE, sql_attr.NO_VALUE, port) def test_port_without_id_no_notify(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' port = models_v2.Port(device_id=device_id, device_owner=DEVICE_OWNER_COMPUTE, status=n_const.PORT_STATUS_ACTIVE) self._record_port_status_changed_helper(n_const.PORT_STATUS_ACTIVE, sql_attr.NO_VALUE, port) def test_non_compute_instances_no_notify(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' port = models_v2.Port(id='port-uuid', device_id=device_id, device_owner=n_const.DEVICE_OWNER_DHCP, status=n_const.PORT_STATUS_ACTIVE) self._record_port_status_changed_helper(n_const.PORT_STATUS_ACTIVE, sql_attr.NO_VALUE, port) def _record_port_status_changed_helper(self, current_port_status, previous_port_status, port): if not (port.device_id and port.id and port.device_owner and port.device_owner.startswith( n_const.DEVICE_OWNER_COMPUTE_PREFIX) and uuidutils.is_uuid_like(port.device_id)): return if (previous_port_status == n_const.PORT_STATUS_ACTIVE and current_port_status == n_const.PORT_STATUS_DOWN): event_name = nova.VIF_UNPLUGGED elif (previous_port_status in [sql_attr.NO_VALUE, n_const.PORT_STATUS_DOWN, n_const.PORT_STATUS_BUILD] and current_port_status in [n_const.PORT_STATUS_ACTIVE, n_const.PORT_STATUS_ERROR]): event_name = nova.VIF_PLUGGED else: return status = nova.NEUTRON_NOVA_EVENT_STATUS_MAP.get(current_port_status) self.nova_notifier.record_port_status_changed(port, current_port_status, previous_port_status, None) event = {'server_uuid': port.device_id, 'status': status, 'name': event_name, 'tag': 'port-uuid'} self.assertEqual(event, port._notify_event) def test_update_fixed_ip_changed(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' returned_obj = {'port': {'device_owner': DEVICE_OWNER_COMPUTE, 'id': u'bee50827-bcee-4cc8-91c1-a27b0ce54222', 'device_id': device_id}} expected_event = {'server_uuid': device_id, 'name': 'network-changed'} event = self.nova_notifier.create_port_changed_event('update_port', {}, returned_obj) self.assertEqual(event, expected_event) def test_create_floatingip_notify(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' returned_obj = {'floatingip': {'port_id': u'bee50827-bcee-4cc8-91c1-a27b0ce54222'}} expected_event = {'server_uuid': device_id, 'name': 'network-changed'} event = self.nova_notifier.create_port_changed_event( 'create_floatingip', {}, returned_obj) self.assertEqual(event, expected_event) def test_create_floatingip_no_port_id_no_notify(self): returned_obj = {'floatingip': {'port_id': None}} event = self.nova_notifier.create_port_changed_event( 'create_floatingip', {}, returned_obj) self.assertFalse(event, None) def test_delete_floatingip_notify(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' returned_obj = {'floatingip': {'port_id': u'bee50827-bcee-4cc8-91c1-a27b0ce54222'}} expected_event = {'server_uuid': device_id, 'name': 'network-changed'} event = self.nova_notifier.create_port_changed_event( 'delete_floatingip', {}, returned_obj) self.assertEqual(expected_event, event) def test_delete_floatingip_deleted_port_no_notify(self): port_id = 'bee50827-bcee-4cc8-91c1-a27b0ce54222' with mock.patch.object( self.nova_notifier._plugin_ref, 'get_port', side_effect=n_exc.PortNotFound(port_id=port_id)): returned_obj = {'floatingip': {'port_id': port_id}} event = self.nova_notifier.create_port_changed_event( 'delete_floatingip', {}, returned_obj) self.assertIsNone(event) def test_delete_floatingip_no_port_id_no_notify(self): returned_obj = {'floatingip': {'port_id': None}} event = self.nova_notifier.create_port_changed_event( 'delete_floatingip', {}, returned_obj) self.assertIsNone(event) def test_associate_floatingip_notify(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' returned_obj = {'floatingip': {'port_id': u'5a39def4-3d3f-473d-9ff4-8e90064b9cc1'}} original_obj = {'port_id': None} expected_event = {'server_uuid': device_id, 'name': 'network-changed'} event = self.nova_notifier.create_port_changed_event( 'update_floatingip', original_obj, returned_obj) self.assertEqual(expected_event, event) def test_disassociate_floatingip_notify(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' returned_obj = {'floatingip': {'port_id': None}} original_obj = {'port_id': '5a39def4-3d3f-473d-9ff4-8e90064b9cc1'} expected_event = {'server_uuid': device_id, 'name': 'network-changed'} event = self.nova_notifier.create_port_changed_event( 'update_floatingip', original_obj, returned_obj) self.assertEqual(expected_event, event) def test_no_notification_notify_nova_on_port_data_changes_false(self): cfg.CONF.set_override('notify_nova_on_port_data_changes', False) with mock.patch.object(self.nova_notifier, 'send_events') as send_events: self.nova_notifier.send_network_change('update_floatingip', {}, {}) self.assertFalse(send_events.called, False) def test_nova_send_events_returns_bad_list(self): with mock.patch.object( self.nova_notifier.nclient.server_external_events, 'create') as nclient_create: nclient_create.return_value = 'i am a string!' self.nova_notifier.send_events([]) def test_nova_send_event_rasies_404(self): with mock.patch.object( self.nova_notifier.nclient.server_external_events, 'create') as nclient_create: nclient_create.side_effect = nova_exceptions.NotFound self.nova_notifier.send_events([]) def test_nova_send_events_raises(self): with mock.patch.object( self.nova_notifier.nclient.server_external_events, 'create') as nclient_create: nclient_create.side_effect = Exception self.nova_notifier.send_events([]) def test_nova_send_events_returns_non_200(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' with mock.patch.object( self.nova_notifier.nclient.server_external_events, 'create') as nclient_create: nclient_create.return_value = [{'code': 404, 'name': 'network-changed', 'server_uuid': device_id}] self.nova_notifier.send_events( [{'name': 'network-changed', 'server_uuid': device_id}]) def test_nova_send_events_return_200(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' with mock.patch.object( self.nova_notifier.nclient.server_external_events, 'create') as nclient_create: nclient_create.return_value = [{'code': 200, 'name': 'network-changed', 'server_uuid': device_id}] self.nova_notifier.send_events( [{'name': 'network-changed', 'server_uuid': device_id}]) def test_nova_send_events_multiple(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' with mock.patch.object( self.nova_notifier.nclient.server_external_events, 'create') as nclient_create: nclient_create.return_value = [{'code': 200, 'name': 'network-changed', 'server_uuid': device_id}, {'code': 200, 'name': 'network-changed', 'server_uuid': device_id}] self.nova_notifier.send_events([ {'name': 'network-changed', 'server_uuid': device_id}, {'name': 'network-changed', 'server_uuid': device_id}]) def test_reassociate_floatingip_without_disassociate_event(self): returned_obj = {'floatingip': {'port_id': 'f5348a16-609a-4971-b0f0-4b8def5235fb'}} original_obj = {'port_id': '5a39def4-3d3f-473d-9ff4-8e90064b9cc1'} self.nova_notifier._waiting_to_send = True self.nova_notifier.send_network_change( 'update_floatingip', original_obj, returned_obj) self.assertEqual( 2, len(self.nova_notifier.batch_notifier.pending_events)) returned_obj_non = {'floatingip': {'port_id': None}} event_dis = self.nova_notifier.create_port_changed_event( 'update_floatingip', original_obj, returned_obj_non) event_assoc = self.nova_notifier.create_port_changed_event( 'update_floatingip', original_obj, returned_obj) self.assertEqual( self.nova_notifier.batch_notifier.pending_events[0], event_dis) self.assertEqual( self.nova_notifier.batch_notifier.pending_events[1], event_assoc) def test_delete_port_notify(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' port_id = 'bee50827-bcee-4cc8-91c1-a27b0ce54222' returned_obj = {'port': {'device_owner': DEVICE_OWNER_COMPUTE, 'id': port_id, 'device_id': device_id}} expected_event = {'server_uuid': device_id, 'name': nova.VIF_DELETED, 'tag': port_id} event = self.nova_notifier.create_port_changed_event('delete_port', {}, returned_obj) self.assertEqual(expected_event, event) @mock.patch('novaclient.client.Client') def test_endpoint_types(self, mock_client): nova.Notifier() mock_client.assert_called_once_with( nova.NOVA_API_VERSION, session=mock.ANY, region_name=cfg.CONF.nova.region_name, endpoint_type='public', extensions=mock.ANY) mock_client.reset_mock() cfg.CONF.set_override('endpoint_type', 'internal', 'nova') nova.Notifier() mock_client.assert_called_once_with( nova.NOVA_API_VERSION, session=mock.ANY, region_name=cfg.CONF.nova.region_name, endpoint_type='internal', extensions=mock.ANY) neutron-8.4.0/neutron/tests/unit/cmd/0000775000567000056710000000000013044373210020731 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/cmd/__init__.py0000664000567000056710000000000013044372736023044 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/cmd/server/0000775000567000056710000000000013044373210022237 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/unit/cmd/server/__init__.py0000664000567000056710000000245313044372736024370 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from neutron.cmd.eventlet import server from neutron.tests import base @mock.patch('neutron.server.wsgi_eventlet.eventlet_wsgi_server') @mock.patch('neutron.server.wsgi_pecan.pecan_wsgi_server') class TestNeutronServer(base.BaseTestCase): def test_legacy_server(self, pecan_mock, legacy_mock): cfg.CONF.set_override('web_framework', 'legacy') server._main_neutron_server() pecan_mock.assert_not_called() legacy_mock.assert_called_with() def test_pecan_server(self, pecan_mock, legacy_mock): cfg.CONF.set_override('web_framework', 'pecan') server._main_neutron_server() pecan_mock.assert_called_with() legacy_mock.assert_not_called() neutron-8.4.0/neutron/tests/unit/cmd/test_ovs_cleanup.py0000664000567000056710000000647413044372760024704 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import mock from oslo_utils import uuidutils from neutron.agent.common import ovs_lib from neutron.agent.linux import ip_lib from neutron.cmd import ovs_cleanup as util from neutron.tests import base class TestOVSCleanup(base.BaseTestCase): @mock.patch('neutron.common.config.setup_logging') @mock.patch('neutron.cmd.ovs_cleanup.setup_conf') @mock.patch('neutron.agent.common.ovs_lib.BaseOVS.get_bridges') @mock.patch('neutron.agent.common.ovs_lib.OVSBridge') @mock.patch.object(util, 'collect_neutron_ports') @mock.patch.object(util, 'delete_neutron_ports') def test_main(self, mock_delete, mock_collect, mock_ovs, mock_get_bridges, mock_conf, mock_logging): bridges = ['br-int', 'br-ex'] ports = ['p1', 'p2', 'p3'] conf = mock.Mock() conf.ovs_all_ports = False conf.ovs_integration_bridge = 'br-int' conf.external_network_bridge = 'br-ex' mock_conf.return_value = conf mock_get_bridges.return_value = bridges mock_collect.return_value = ports util.main() mock_ovs.assert_has_calls([mock.call().delete_ports( all_ports=False)]) mock_collect.assert_called_once_with(set(bridges)) mock_delete.assert_called_once_with(ports) def test_collect_neutron_ports(self): port1 = ovs_lib.VifPort('tap1234', 1, uuidutils.generate_uuid(), '11:22:33:44:55:66', 'br') port2 = ovs_lib.VifPort('tap5678', 2, uuidutils.generate_uuid(), '77:88:99:aa:bb:cc', 'br') port3 = ovs_lib.VifPort('tap90ab', 3, uuidutils.generate_uuid(), '99:00:aa:bb:cc:dd', 'br') ports = [[port1, port2], [port3]] portnames = [p.port_name for p in itertools.chain(*ports)] with mock.patch('neutron.agent.common.ovs_lib.OVSBridge') as ovs: ovs.return_value.get_vif_ports.side_effect = ports bridges = ['br-int', 'br-ex'] ret = util.collect_neutron_ports(bridges) self.assertEqual(ret, portnames) @mock.patch.object(ip_lib, 'IPDevice') def test_delete_neutron_ports(self, mock_ip): ports = ['tap1234', 'tap5678', 'tap09ab'] port_found = [True, False, True] mock_ip.return_value.exists.side_effect = port_found util.delete_neutron_ports(ports) mock_ip.assert_has_calls( [mock.call('tap1234'), mock.call().exists(), mock.call().link.delete(), mock.call('tap5678'), mock.call().exists(), mock.call('tap09ab'), mock.call().exists(), mock.call().link.delete()]) neutron-8.4.0/neutron/tests/unit/cmd/test_netns_cleanup.py0000664000567000056710000002553413044372760025222 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.cmd import netns_cleanup as util from neutron.tests import base class TestNetnsCleanup(base.BaseTestCase): def test_kill_dhcp(self, dhcp_active=True): conf = mock.Mock() conf.dhcp_driver = 'driver' method_to_patch = 'oslo_utils.importutils.import_object' with mock.patch(method_to_patch) as import_object: driver = mock.Mock() driver.active = dhcp_active import_object.return_value = driver util.kill_dhcp(conf, 'ns') expected_params = {'conf': conf, 'network': mock.ANY, 'process_monitor': mock.ANY, 'plugin': mock.ANY} import_object.assert_called_once_with('driver', **expected_params) if dhcp_active: driver.assert_has_calls([mock.call.disable()]) else: self.assertFalse(driver.called) def test_kill_dhcp_no_active(self): self.test_kill_dhcp(False) def test_eligible_for_deletion_ns_not_uuid(self): conf = mock.Mock() conf.agent_type = None ns = 'not_a_uuid' self.assertFalse(util.eligible_for_deletion(conf, ns)) def _test_eligible_for_deletion_helper(self, prefix, force, is_empty, expected): ns = prefix + '6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d' conf = mock.Mock() conf.agent_type = None with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap: ip_wrap.return_value.namespace_is_empty.return_value = is_empty self.assertEqual(util.eligible_for_deletion(conf, ns, force), expected) expected_calls = [mock.call(namespace=ns)] if not force: expected_calls.append(mock.call().namespace_is_empty()) ip_wrap.assert_has_calls(expected_calls) def test_eligible_for_deletion_empty(self): self._test_eligible_for_deletion_helper('qrouter-', False, True, True) def test_eligible_for_deletion_not_empty(self): self._test_eligible_for_deletion_helper('qdhcp-', False, False, False) def test_eligible_for_deletion_not_empty_forced(self): self._test_eligible_for_deletion_helper('qdhcp-', True, False, True) def test_eligible_for_deletion_fip_namespace(self): self._test_eligible_for_deletion_helper('fip-', False, True, True) def test_eligible_for_deletion_lbaas_namespace(self): self._test_eligible_for_deletion_helper('qlbaas-', False, True, True) def test_eligible_for_deletion_snat_namespace(self): self._test_eligible_for_deletion_helper('snat-', False, True, True) def test_eligible_for_deletion_filtered_by_agent_type(self): ns_dhcp = 'qdhcp-' + '6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d' ns_l3 = 'qrouter-' + '6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d' conf = mock.Mock() conf.agent_type = 'dhcp' with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap: ip_wrap.return_value.namespace_is_empty.return_value = True self.assertEqual(True, util.eligible_for_deletion(conf, ns_dhcp, False)) self.assertEqual(False, util.eligible_for_deletion(conf, ns_l3, False)) expected_calls = [mock.call(namespace=ns_dhcp), mock.call().namespace_is_empty()] ip_wrap.assert_has_calls(expected_calls) def test_unplug_device_regular_device(self): conf = mock.Mock() device = mock.Mock() util.unplug_device(conf, device) device.assert_has_calls([mock.call.link.delete()]) def test_unplug_device_ovs_port(self): conf = mock.Mock() conf.ovs_integration_bridge = 'br-int' device = mock.Mock() device.name = 'tap1' device.link.delete.side_effect = RuntimeError with mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge') as ovs_br_cls: br_patch = mock.patch( 'neutron.agent.common.ovs_lib.BaseOVS.get_bridge_for_iface') with br_patch as mock_get_bridge_for_iface: mock_get_bridge_for_iface.return_value = 'br-int' ovs_bridge = mock.Mock() ovs_br_cls.return_value = ovs_bridge util.unplug_device(conf, device) mock_get_bridge_for_iface.assert_called_once_with('tap1') ovs_br_cls.assert_called_once_with('br-int') ovs_bridge.assert_has_calls( [mock.call.delete_port(device.name)]) def test_unplug_device_cannot_determine_bridge_port(self): conf = mock.Mock() conf.ovs_integration_bridge = 'br-int' device = mock.Mock() device.name = 'tap1' device.link.delete.side_effect = RuntimeError with mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge') as ovs_br_cls: br_patch = mock.patch( 'neutron.agent.common.ovs_lib.BaseOVS.get_bridge_for_iface') with br_patch as mock_get_bridge_for_iface: with mock.patch.object(util.LOG, 'debug') as debug: mock_get_bridge_for_iface.return_value = None ovs_bridge = mock.Mock() ovs_br_cls.return_value = ovs_bridge util.unplug_device(conf, device) mock_get_bridge_for_iface.assert_called_once_with('tap1') self.assertEqual([], ovs_br_cls.mock_calls) self.assertTrue(debug.called) def _test_destroy_namespace_helper(self, force, num_devices): ns = 'qrouter-6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d' conf = mock.Mock() lo_device = mock.Mock() lo_device.name = 'lo' devices = [lo_device] while num_devices: dev = mock.Mock() dev.name = 'tap%d' % num_devices devices.append(dev) num_devices -= 1 with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap: ip_wrap.return_value.get_devices.return_value = devices ip_wrap.return_value.netns.exists.return_value = True with mock.patch.object(util, 'unplug_device') as unplug: with mock.patch.object(util, 'kill_dhcp') as kill_dhcp: util.destroy_namespace(conf, ns, force) expected = [mock.call(namespace=ns)] if force: expected.extend([ mock.call().netns.exists(ns), mock.call().get_devices(exclude_loopback=True)]) self.assertTrue(kill_dhcp.called) unplug.assert_has_calls( [mock.call(conf, d) for d in devices[1:]]) expected.append(mock.call().garbage_collect_namespace()) ip_wrap.assert_has_calls(expected) def test_destroy_namespace_empty(self): self._test_destroy_namespace_helper(False, 0) def test_destroy_namespace_not_empty(self): self._test_destroy_namespace_helper(False, 1) def test_destroy_namespace_not_empty_forced(self): self._test_destroy_namespace_helper(True, 2) def test_destroy_namespace_exception(self): ns = 'qrouter-6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d' conf = mock.Mock() with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap: ip_wrap.side_effect = Exception() util.destroy_namespace(conf, ns) def test_main(self): namespaces = ['ns1', 'ns2'] with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap: ip_wrap.get_namespaces.return_value = namespaces with mock.patch('time.sleep') as time_sleep: conf = mock.Mock() conf.force = False methods_to_mock = dict( eligible_for_deletion=mock.DEFAULT, destroy_namespace=mock.DEFAULT, setup_conf=mock.DEFAULT) with mock.patch.multiple(util, **methods_to_mock) as mocks: mocks['eligible_for_deletion'].return_value = True mocks['setup_conf'].return_value = conf with mock.patch('neutron.common.config.setup_logging'): util.main() mocks['eligible_for_deletion'].assert_has_calls( [mock.call(conf, 'ns1', False), mock.call(conf, 'ns2', False)]) mocks['destroy_namespace'].assert_has_calls( [mock.call(conf, 'ns1', False), mock.call(conf, 'ns2', False)]) ip_wrap.assert_has_calls( [mock.call.get_namespaces()]) time_sleep.assert_called_once_with(2) def test_main_no_candidates(self): namespaces = ['ns1', 'ns2'] with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap: ip_wrap.get_namespaces.return_value = namespaces with mock.patch('time.sleep') as time_sleep: conf = mock.Mock() conf.force = False methods_to_mock = dict( eligible_for_deletion=mock.DEFAULT, destroy_namespace=mock.DEFAULT, setup_conf=mock.DEFAULT) with mock.patch.multiple(util, **methods_to_mock) as mocks: mocks['eligible_for_deletion'].return_value = False mocks['setup_conf'].return_value = conf with mock.patch('neutron.common.config.setup_logging'): util.main() ip_wrap.assert_has_calls( [mock.call.get_namespaces()]) mocks['eligible_for_deletion'].assert_has_calls( [mock.call(conf, 'ns1', False), mock.call(conf, 'ns2', False)]) self.assertFalse(mocks['destroy_namespace'].called) self.assertFalse(time_sleep.called) neutron-8.4.0/neutron/tests/unit/cmd/test_sanity_check.py0000664000567000056710000000155013044372736025023 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.cmd import sanity_check from neutron.tests import base class TestSanityCheck(base.BaseTestCase): def test_setup_conf(self): # verify that configuration can be successfully imported sanity_check.setup_conf() neutron-8.4.0/neutron/tests/unit/extension_stubs.py0000664000567000056710000000420013044372760024001 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron.api import extensions from neutron import wsgi class StubExtension(extensions.ExtensionDescriptor): def __init__(self, alias="stub_extension", optional=None): self.alias = alias self.optional = optional or [] def get_name(self): return "Stub Extension" def get_alias(self): return self.alias def get_description(self): return "" def get_updated(self): return "" def get_optional_extensions(self): return self.optional class StubExtensionWithReqs(StubExtension): def get_required_extensions(self): return ["foo"] class StubPlugin(object): def __init__(self, supported_extensions=None): supported_extensions = supported_extensions or [] self.supported_extension_aliases = supported_extensions class ExtensionExpectingPluginInterface(StubExtension): """Expect plugin to implement all methods in StubPluginInterface. This extension expects plugin to implement all the methods defined in StubPluginInterface. """ def get_plugin_interface(self): return StubPluginInterface class StubPluginInterface(extensions.PluginInterface): @abc.abstractmethod def get_foo(self, bar=None): pass class StubBaseAppController(wsgi.Controller): def index(self, request): return "base app index" def show(self, request, id): return {'fort': 'knox'} def update(self, request, id): return {'uneditable': 'original_value'} neutron-8.4.0/neutron/tests/functional/0000775000567000056710000000000013044373210021351 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/test_service.py0000664000567000056710000000305413044372736024440 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import processutils from oslo_config import cfg from oslo_service import service from neutron import service as neutron_service from neutron.tests import base from neutron.tests.functional import test_server class TestService(base.BaseTestCase): def test_api_workers_default(self): self.assertEqual(processutils.get_worker_count(), neutron_service._get_api_workers()) def test_api_workers_from_config(self): cfg.CONF.set_override('api_workers', 1234) self.assertEqual(1234, neutron_service._get_api_workers()) class TestServiceRestart(test_server.TestNeutronServer): def _start_service(self, host, binary, topic, manager, workers, *args, **kwargs): server = neutron_service.Service(host, binary, topic, manager, *args, **kwargs) service.launch(cfg.CONF, server, workers).wait() neutron-8.4.0/neutron/tests/functional/plugins/0000775000567000056710000000000013044373210023032 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/plugins/__init__.py0000664000567000056710000000000013044372736025145 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/plugins/ml2/0000775000567000056710000000000013044373210023524 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/plugins/ml2/__init__.py0000664000567000056710000000000013044372736025637 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/plugins/ml2/drivers/0000775000567000056710000000000013044373210025202 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/plugins/ml2/drivers/__init__.py0000664000567000056710000000000013044372736027315 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/plugins/ml2/drivers/macvtap/0000775000567000056710000000000013044373210026635 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/plugins/ml2/drivers/macvtap/__init__.py0000664000567000056710000000000013044372736030750 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/plugins/ml2/drivers/macvtap/agent/0000775000567000056710000000000013044373210027733 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000neutron-8.4.0/neutron/tests/functional/plugins/ml2/drivers/macvtap/agent/test_macvtap_neutron_agent.pyneutron-8.4.0/neutron/tests/functional/plugins/ml2/drivers/macvtap/agent/test_macvtap_neutron_agent.0000664000567000056710000000300613044372760035366 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import constants from neutron.plugins.ml2.drivers.macvtap.agent import macvtap_neutron_agent from neutron.tests.common import net_helpers from neutron.tests.functional import base as functional_base class MacvtapAgentTestCase(functional_base.BaseSudoTestCase): def setUp(self): super(MacvtapAgentTestCase, self).setUp() self.mgr = macvtap_neutron_agent.MacvtapManager({}) def test_get_all_devices(self): # Veth is simulating the hosts eth device. In this test it is used as # src_dev for the macvtap veth1, veth2 = self.useFixture(net_helpers.VethFixture()).ports macvtap = self.useFixture(net_helpers.MacvtapFixture( src_dev=veth1.name, mode='bridge', prefix=constants.MACVTAP_DEVICE_PREFIX)).ip_dev self.assertEqual(set([macvtap.link.address]), self.mgr.get_all_devices()) neutron-8.4.0/neutron/tests/functional/plugins/ml2/drivers/macvtap/agent/__init__.py0000664000567000056710000000000013044372736032046 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/plugins/ml2/test_plugin.py0000664000567000056710000000664313044372760026455 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import constants from neutron import context from neutron.db import agents_db from neutron.extensions import portbindings from neutron.tests.common import helpers from neutron.tests.unit.plugins.ml2 import base as ml2_test_base DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' class TestMl2PortBinding(ml2_test_base.ML2TestFramework, agents_db.AgentDbMixin): def setUp(self): super(TestMl2PortBinding, self).setUp() self.admin_context = context.get_admin_context() self.host_args = {portbindings.HOST_ID: helpers.HOST, 'admin_state_up': True} def test_port_bind_successfully(self): helpers.register_ovs_agent(host=helpers.HOST) with self.network() as network: with self.subnet(network=network) as subnet: with self.port( subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID, 'admin_state_up',), **self.host_args) as port: # Note: Port creation invokes _bind_port_if_needed(), # therefore it is all we need in order to test a successful # binding self.assertEqual(port['port']['binding:vif_type'], portbindings.VIF_TYPE_OVS) def test_port_bind_retry(self): agent = helpers.register_ovs_agent(host=helpers.HOST) helpers.kill_agent(agent_id=agent.id) with self.network() as network: with self.subnet(network=network) as subnet: with self.port( subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID, 'admin_state_up',), **self.host_args) as port: # Since the agent is dead, expect binding to fail self.assertEqual(port['port']['binding:vif_type'], portbindings.VIF_TYPE_BINDING_FAILED) helpers.revive_agent(agent.id) # When an agent starts, The RPC call get_device_details() # will invoke get_bound_port_context() which eventually use # _bind_port_if_needed() bound_context = self.plugin.get_bound_port_context( self.admin_context, port['port']['id'], helpers.HOST) # Since the agent is back online, expect binding to succeed self.assertEqual(bound_context.vif_type, portbindings.VIF_TYPE_OVS) self.assertEqual(bound_context.current['binding:vif_type'], portbindings.VIF_TYPE_OVS) neutron-8.4.0/neutron/tests/functional/scheduler/0000775000567000056710000000000013044373210023327 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/scheduler/__init__.py0000664000567000056710000000000013044372736025442 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/scheduler/test_l3_agent_scheduler.py0000664000567000056710000007643713044372760030524 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import random import testscenarios from oslo_utils import uuidutils from neutron.api.v2 import attributes from neutron.common import constants from neutron import context from neutron.db import external_net_db from neutron.scheduler import l3_agent_scheduler from neutron.services.l3_router import l3_router_plugin from neutron.tests.common import helpers from neutron.tests.unit.db import test_db_base_plugin_v2 _uuid = uuidutils.generate_uuid PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin' # Required to generate tests from scenarios. Not compatible with nose. load_tests = testscenarios.load_tests_apply_scenarios class L3SchedulerBaseTest(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): """Base class for functional test of L3 schedulers. Provides basic setup and utility functions. """ def setUp(self): super(L3SchedulerBaseTest, self).setUp(PLUGIN_NAME) self.l3_plugin = l3_router_plugin.L3RouterPlugin() self.adminContext = context.get_admin_context() self.adminContext.tenant_id = _uuid() def _create_l3_agent(self, host, context, agent_mode='legacy', state=True, ext_net_id=''): agent = helpers.register_l3_agent(host, agent_mode, ext_net_id=ext_net_id) helpers.set_agent_admin_state(agent.id, state) return agent def _create_router(self, name): router = {'name': name, 'admin_state_up': True, 'tenant_id': self.adminContext.tenant_id} return self.l3_plugin.create_router( self.adminContext, {'router': router}) def _create_legacy_agents(self, agent_count, down_agent_count): # Creates legacy l3 agents and sets admin state based on # down agent count. self.hosts = ['host-%s' % i for i in range(agent_count)] self.l3_agents = [self._create_l3_agent(self.hosts[i], self.adminContext, 'legacy', (i >= down_agent_count)) for i in range(agent_count)] def _create_routers(self, scheduled_router_count, expected_scheduled_router_count): routers = [] if (scheduled_router_count + expected_scheduled_router_count): for i in range(scheduled_router_count + expected_scheduled_router_count): router = self._create_router('schd_rtr' + str(i)) routers.append(router) else: # create at least one router to test scheduling routers.append(self._create_router('schd_rtr0')) return routers def _pre_scheduler_routers(self, scheduler, count): hosting_agents = [] # schedule routers before calling schedule: for i in range(count): router = self.routers[i] agent = random.choice(self.l3_agents) scheduler.bind_router(self.adminContext, router['id'], agent) hosting_agents.append(agent) return hosting_agents def _test_auto_schedule(self, expected_count): router_ids = [rtr['id'] for rtr in self.routers] did_it_schedule = False # Try scheduling on each host for host in self.hosts: did_it_schedule = self.scheduler.auto_schedule_routers( self.l3_plugin, self.adminContext, host, router_ids) if did_it_schedule: break if expected_count: self.assertTrue(did_it_schedule, 'Failed to schedule agent') else: self.assertFalse(did_it_schedule, 'Agent scheduled, not expected') class L3ChanceSchedulerTestCase(L3SchedulerBaseTest): """Test various scenarios for chance scheduler. agent_count Number of l3 agents (also number of hosts). down_agent_count Number of l3 agents which are down. scheduled_router_count Number of routers that have been previously scheduled. expected_scheduled_router_count Number of newly scheduled routers. """ scenarios = [ ('No routers scheduled if no agents are present', dict(agent_count=0, down_agent_count=0, scheduled_router_count=0, expected_scheduled_router_count=0)), ('No routers scheduled if it is already hosted', dict(agent_count=1, down_agent_count=0, scheduled_router_count=1, expected_scheduled_router_count=0)), ('No routers scheduled if all agents are down', dict(agent_count=2, down_agent_count=2, scheduled_router_count=0, expected_scheduled_router_count=0)), ('Router scheduled to the agent if router is not yet hosted', dict(agent_count=1, down_agent_count=0, scheduled_router_count=0, expected_scheduled_router_count=1)), ('Router scheduled to the agent even if it already hosts a router', dict(agent_count=1, down_agent_count=0, scheduled_router_count=1, expected_scheduled_router_count=1)), ] def setUp(self): super(L3ChanceSchedulerTestCase, self).setUp() self._create_legacy_agents(self.agent_count, self.down_agent_count) self.routers = self._create_routers(self.scheduled_router_count, self.expected_scheduled_router_count) self.scheduler = l3_agent_scheduler.ChanceScheduler() def test_chance_schedule_router(self): # Pre schedule routers self._pre_scheduler_routers(self.scheduler, self.scheduled_router_count) # schedule: actual_scheduled_agent = self.scheduler.schedule( self.l3_plugin, self.adminContext, self.routers[-1]['id']) if self.expected_scheduled_router_count: self.assertIsNotNone(actual_scheduled_agent, message='Failed to schedule agent') else: self.assertIsNone(actual_scheduled_agent, message='Agent scheduled but not expected') def test_auto_schedule_routers(self): # Pre schedule routers self._pre_scheduler_routers(self.scheduler, self.scheduled_router_count) # The test self._test_auto_schedule(self.expected_scheduled_router_count) class L3LeastRoutersSchedulerTestCase(L3SchedulerBaseTest): """Test various scenarios for least router scheduler. agent_count Number of l3 agents (also number of hosts). down_agent_count Number of l3 agents which are down. scheduled_router_count Number of routers that have been previously scheduled expected_scheduled_router_count Number of newly scheduled routers """ scenarios = [ ('No routers scheduled if no agents are present', dict(agent_count=0, down_agent_count=0, scheduled_router_count=0, expected_scheduled_router_count=0)), ('No routers scheduled if it is already hosted', dict(agent_count=1, down_agent_count=0, scheduled_router_count=1, expected_scheduled_router_count=1)), ('No routers scheduled if all agents are down', dict(agent_count=2, down_agent_count=2, scheduled_router_count=0, expected_scheduled_router_count=0)), ('Router scheduled to the agent if router is not yet hosted', dict(agent_count=1, down_agent_count=0, scheduled_router_count=0, expected_scheduled_router_count=1)), ('Router scheduled to the agent even if it already hosts a router', dict(agent_count=1, down_agent_count=0, scheduled_router_count=1, expected_scheduled_router_count=1)), ('Router is scheduled to agent hosting least routers', dict(agent_count=2, down_agent_count=0, scheduled_router_count=1, expected_scheduled_router_count=1)), ] def setUp(self): super(L3LeastRoutersSchedulerTestCase, self).setUp() self._create_legacy_agents(self.agent_count, self.down_agent_count) self.routers = self._create_routers(self.scheduled_router_count, self.expected_scheduled_router_count) self.scheduler = l3_agent_scheduler.LeastRoutersScheduler() def test_least_routers_schedule(self): # Pre schedule routers hosting_agents = self._pre_scheduler_routers(self.scheduler, self.scheduled_router_count) actual_scheduled_agent = self.scheduler.schedule( self.l3_plugin, self.adminContext, self.routers[-1]['id']) if self.expected_scheduled_router_count: # For case where there is just one agent: if self.agent_count == 1: self.assertEqual(actual_scheduled_agent.id, self.l3_agents[0].id) else: self.assertNotIn(actual_scheduled_agent.id, [x.id for x in hosting_agents], message='The expected agent was not scheduled') else: self.assertIsNone(actual_scheduled_agent, message='Expected no agent to be scheduled,' ' but it got scheduled') def test_auto_schedule_routers(self): # Pre schedule routers self._pre_scheduler_routers(self.scheduler, self.scheduled_router_count) # The test self._test_auto_schedule(self.expected_scheduled_router_count) class L3AZSchedulerBaseTest(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self): core_plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin' super(L3AZSchedulerBaseTest, self).setUp(plugin=core_plugin) self.l3_plugin = l3_router_plugin.L3RouterPlugin() self.l3_plugin.router_scheduler = None self.adminContext = context.get_admin_context() self.adminContext.tenant_id = '_func_test_tenant_' def _create_l3_agent(self, host, context, agent_mode='legacy', plugin=None, state=True, az='nova'): agent = helpers.register_l3_agent(host, agent_mode, az=az) helpers.set_agent_admin_state(agent.id, state) return agent def _create_legacy_agents(self, agent_count, down_agent_count, az): # Creates legacy l3 agents and sets admin state based on # down agent count. hosts = ['%s-host-%s' % (az, i) for i in range(agent_count)] l3_agents = [ self._create_l3_agent(hosts[i], self.adminContext, 'legacy', self.l3_plugin, (i >= down_agent_count), az=az) for i in range(agent_count)] return l3_agents def _create_router(self, az_hints, ha): router = {'name': 'router1', 'admin_state_up': True, 'availability_zone_hints': az_hints, 'tenant_id': self._tenant_id} if ha: router['ha'] = True return self.l3_plugin.create_router( self.adminContext, {'router': router}) class L3AZLeastRoutersSchedulerTestCase(L3AZSchedulerBaseTest): """Test various scenarios for AZ router scheduler. az_count Number of AZs. router_az_hints Number of AZs in availability_zone_hints of the router. agent_count[each az] Number of l3 agents (also number of hosts). max_l3_agents_per_router Maximum number of agents on which a router will be scheduled. 0 means test for regular router. min_l3_agents_per_router Minimum number of agents on which a router will be scheduled. N/A for regular router test. down_agent_count[each az] Number of l3 agents which are down. expected_scheduled_agent_count[each az] Number of newly scheduled l3 agents. """ scenarios = [ ('Regular router, Scheduled specified AZ', dict(az_count=2, router_az_hints=1, agent_count=[1, 1], max_l3_agents_per_router=0, min_l3_agents_per_router=0, down_agent_count=[0, 0], expected_scheduled_agent_count=[1, 0])), ('HA router, Scheduled specified AZs', dict(az_count=3, router_az_hints=2, agent_count=[1, 1, 1], max_l3_agents_per_router=2, min_l3_agents_per_router=2, down_agent_count=[0, 0, 0], expected_scheduled_agent_count=[1, 1, 0])), ('HA router, max_l3_agents_per_routers > az_hints', dict(az_count=2, router_az_hints=2, agent_count=[2, 1], max_l3_agents_per_router=3, min_l3_agents_per_router=2, down_agent_count=[0, 0], expected_scheduled_agent_count=[2, 1])), ('HA router, not enough agents', dict(az_count=3, router_az_hints=2, agent_count=[2, 2, 2], max_l3_agents_per_router=3, min_l3_agents_per_router=2, down_agent_count=[1, 1, 0], expected_scheduled_agent_count=[1, 1, 0])), ] def test_schedule_router(self): scheduler = l3_agent_scheduler.AZLeastRoutersScheduler() ha = False if self.max_l3_agents_per_router: self.config(max_l3_agents_per_router=self.max_l3_agents_per_router) self.config(min_l3_agents_per_router=self.min_l3_agents_per_router) ha = True # create l3 agents for i in range(self.az_count): az = 'az%s' % i self._create_legacy_agents(self.agent_count[i], self.down_agent_count[i], az) # create router. # note that ha-router needs enough agents beforehand. az_hints = ['az%s' % i for i in range(self.router_az_hints)] router = self._create_router(az_hints, ha) scheduler.schedule(self.l3_plugin, self.adminContext, router['id']) # schedule returns only one agent. so get all agents scheduled. scheduled_agents = self.l3_plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']]) scheduled_azs = collections.defaultdict(int) for agent in scheduled_agents: scheduled_azs[agent['availability_zone']] += 1 for i in range(self.az_count): self.assertEqual(self.expected_scheduled_agent_count[i], scheduled_azs.get('az%s' % i, 0)) class L3AZAutoScheduleTestCaseBase(L3AZSchedulerBaseTest): """Test various scenarios for AZ router scheduler. az_count Number of AZs. router_az_hints Number of AZs in availability_zone_hints of the router. agent_az AZ of newly activated l3 agent. agent_count[each az] Number of l3 agents (also number of hosts). max_l3_agents_per_router Maximum number of agents on which a router will be scheduled. 0 means test for regular router. min_l3_agents_per_router Minimum number of agents on which a router will be scheduled. N/A for regular router test. down_agent_count[each az] Number of l3 agents which are down. scheduled_agent_count[each az] Number of l3 agents that have been previously scheduled expected_scheduled_agent_count[each az] Number of newly scheduled l3 agents """ scenarios = [ ('Regular router, not scheduled, agent in specified AZ activated', dict(az_count=2, router_az_hints=1, agent_az='az0', agent_count=[1, 1], max_l3_agents_per_router=0, min_l3_agents_per_router=0, down_agent_count=[1, 1], scheduled_agent_count=[0, 0], expected_scheduled_agent_count=[1, 0])), ('Regular router, not scheduled, agent not in specified AZ activated', dict(az_count=2, router_az_hints=1, agent_az='az1', agent_count=[1, 1], max_l3_agents_per_router=0, min_l3_agents_per_router=0, down_agent_count=[1, 1], scheduled_agent_count=[0, 0], expected_scheduled_agent_count=[0, 0])), ('HA router, not scheduled, agent in specified AZ activated', dict(az_count=3, router_az_hints=2, agent_az='az1', agent_count=[1, 1, 1], max_l3_agents_per_router=2, min_l3_agents_per_router=2, down_agent_count=[0, 1, 0], scheduled_agent_count=[0, 0, 0], expected_scheduled_agent_count=[0, 1, 0])), ('HA router, not scheduled, agent not in specified AZ activated', dict(az_count=3, router_az_hints=2, agent_az='az2', agent_count=[1, 1, 1], max_l3_agents_per_router=2, min_l3_agents_per_router=2, down_agent_count=[0, 0, 1], scheduled_agent_count=[0, 0, 0], expected_scheduled_agent_count=[0, 0, 0])), ('HA router, partial scheduled, agent in specified AZ activated', dict(az_count=3, router_az_hints=2, agent_az='az1', agent_count=[1, 1, 1], max_l3_agents_per_router=2, min_l3_agents_per_router=2, down_agent_count=[0, 1, 0], scheduled_agent_count=[1, 0, 0], expected_scheduled_agent_count=[1, 1, 0])), ] def test_auto_schedule_router(self): scheduler = l3_agent_scheduler.AZLeastRoutersScheduler() ha = False if self.max_l3_agents_per_router: self.config(max_l3_agents_per_router=self.max_l3_agents_per_router) self.config(min_l3_agents_per_router=self.min_l3_agents_per_router) ha = True # create l3 agents l3_agents = {} for i in range(self.az_count): az = 'az%s' % i l3_agents[az] = self._create_legacy_agents( self.agent_count[i], self.down_agent_count[i], az) # create router. # note that ha-router needs enough agents beforehand. az_hints = ['az%s' % i for i in range(self.router_az_hints)] router = self._create_router(az_hints, ha) # schedule some agents before calling auto schedule for i in range(self.az_count): az = 'az%s' % i for j in range(self.scheduled_agent_count[i]): agent = l3_agents[az][j + self.down_agent_count[i]] scheduler.bind_router(self.adminContext, router['id'], agent) # activate down agent and call auto_schedule_routers activate_agent = l3_agents[self.agent_az][0] helpers.set_agent_admin_state(activate_agent['id'], admin_state_up=True) scheduler.auto_schedule_routers(self.l3_plugin, self.adminContext, activate_agent['host'], None) scheduled_agents = self.l3_plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']]) scheduled_azs = collections.defaultdict(int) for agent in scheduled_agents: scheduled_azs[agent['availability_zone']] += 1 for i in range(self.az_count): self.assertEqual(self.expected_scheduled_agent_count[i], scheduled_azs.get('az%s' % i, 0)) class L3DVRSchedulerBaseTest(L3SchedulerBaseTest): """Base class for functional test of DVR L3 schedulers. Provides basic setup and utility functions. """ def setUp(self): super(L3DVRSchedulerBaseTest, self).setUp() self.default_ext_net_id = _uuid() self.default_ext_subnet_id = _uuid() self.router_ext_net_id = _uuid() self.router_ext_subnet_id = _uuid() def _create_router(self, name, distributed, ext_net_id=None): router = {'name': name, 'admin_state_up': True, 'tenant_id': self.adminContext.tenant_id, 'distributed': distributed} if ext_net_id: router['external_gateway_info'] = {'network_id': ext_net_id} return self.l3_plugin.create_router(self.adminContext, {'router': router}) def _create_network(self, net_id, name=None, external=False): network_dict = {'tenant_id': self.adminContext.tenant_id, 'id': net_id, 'name': name, 'admin_state_up': True, 'shared': False, 'status': constants.NET_STATUS_ACTIVE} network = self.plugin.create_network(self.adminContext, {'network': network_dict}) if external: with self.adminContext.session.begin(): network = external_net_db.ExternalNetwork(network_id=net_id) self.adminContext.session.add(network) return network def _create_subnet(self, sub_id, network_id, cidr, gw_ip, name='test_sub'): subnet = {'tenant_id': self.adminContext.tenant_id, 'id': sub_id, 'name': name, 'network_id': network_id, 'ip_version': 4, 'cidr': cidr, 'enable_dhcp': False, 'gateway_ip': gw_ip, 'shared': False, 'allocation_pools': attributes.ATTR_NOT_SPECIFIED, 'dns_nameservers': attributes.ATTR_NOT_SPECIFIED, 'host_routes': attributes.ATTR_NOT_SPECIFIED} return self.plugin.create_subnet(self.adminContext, {'subnet': subnet}) class L3DVRSchedulerTestCase(L3DVRSchedulerBaseTest): """Test various scenarios for L3 DVR schedulers: agent_mode L3 agent mode. second_agent_mode Second L3 agent mode for scenarios with two agents. agent_has_ext_network Is there external network on the host. router_is_distributed Is router distributed. router_already_hosted Is router already hosted. router_has_ext_gw Does router have external gateway. router_agent_have_same_ext_net Do router and agent have the same external network. expected_router_scheduled To verify do we expect router to get scheduled. """ def get_scenario(agent_mode=constants.L3_AGENT_MODE_DVR_SNAT, second_agent_mode=None, agent_has_ext_network=False, router_is_distributed=False, router_already_hosted=False, router_has_ext_gw=False, router_agent_have_same_ext_net=False, expected_router_scheduled=False): return dict(agent_mode=agent_mode, second_agent_mode=second_agent_mode, agent_has_ext_network=agent_has_ext_network, router_is_distributed=router_is_distributed, router_already_hosted=router_already_hosted, router_has_ext_gw=router_has_ext_gw, router_agent_have_same_ext_net=router_agent_have_same_ext_net, expected_router_scheduled=expected_router_scheduled) scenarios = [ ('Legacy router not scheduled on dvr agent', get_scenario(agent_mode=constants.L3_AGENT_MODE_DVR)), ('Legacy router scheduled on dvr_snat agent', get_scenario(expected_router_scheduled=True)), ('Distributed router not scheduled on legacy agent', get_scenario(agent_mode=constants.L3_AGENT_MODE_LEGACY, router_is_distributed=True)), ('Distributed router not scheduled on dvr agent', get_scenario(agent_mode=constants.L3_AGENT_MODE_DVR, router_is_distributed=True)), ('Distributed router scheduled on dvr_snat agent', get_scenario(router_is_distributed=True, expected_router_scheduled=True)), ('Already hosted legacy router not scheduled on dvr agent', get_scenario(agent_mode=constants.L3_AGENT_MODE_DVR, router_already_hosted=True)), ('Already hosted legacy router not scheduled on dvr_snat agent', get_scenario(router_already_hosted=True)), ('Already hosted distributed router not scheduled on legacy agent', get_scenario(agent_mode=constants.L3_AGENT_MODE_LEGACY, router_already_hosted=True, router_is_distributed=True)), ('Already hosted distributed router not scheduled on dvr agent', get_scenario(agent_mode=constants.L3_AGENT_MODE_DVR, router_is_distributed=True, router_already_hosted=True)), ('Already hosted distributed router not scheduled on dvr_snat agent', get_scenario(router_is_distributed=True, router_already_hosted=True)), ('Already hosted legacy router not scheduled on additional dvr agent', get_scenario(agent_mode=constants.L3_AGENT_MODE_LEGACY, second_agent_mode=constants.L3_AGENT_MODE_DVR_SNAT, router_already_hosted=True)), ('Distributed router not scheduled if it is on a different ' 'external network than the dvr_snat agent', get_scenario(agent_has_ext_network=True, router_is_distributed=True, router_has_ext_gw=True, router_agent_have_same_ext_net=False)), ] def setUp(self): super(L3DVRSchedulerTestCase, self).setUp() agent_cnt = 2 if self.second_agent_mode else 1 # create hosts for each agent self.hosts = ['host-%s' % i for i in range(agent_cnt)] # create default external network self._create_network(self.default_ext_net_id, name='_test-ext-net', external=True) self._create_subnet(self.default_ext_subnet_id, self.default_ext_net_id, '10.10.9.0/24', '10.10.9.1', '_test-ext-net-subnet') if self.router_has_ext_gw and not self.router_agent_have_same_ext_net: # for the test cases in which router and agent are not on same # external network, we create an external network for router self._create_network(self.router_ext_net_id, name='_test-ext-net2', external=True) self._create_subnet(self.router_ext_subnet_id, self.router_ext_net_id, '10.10.8.0/24', '10.10.8.1', '_test-ext-net2-subnet') # create agents: self.l3_agents = [self._create_l3_agent(self.hosts[0], self.adminContext, self.agent_mode, True, self.default_ext_net_id if self.agent_has_ext_network else '')] if self.second_agent_mode: self.l3_agents.append(self._create_l3_agent(self.hosts[1], self.adminContext, self.second_agent_mode, True, self.default_ext_net_id if self.agent_has_ext_network else '')) # The router to schedule: self.router_to_schedule = self._create_router_to_schedule() def _create_router_to_schedule(self): router_to_schedule = None if self.router_has_ext_gw: if self.router_agent_have_same_ext_net: router_to_schedule = self._create_router('schd_rtr', self.router_is_distributed, self.default_ext_net_id) else: router_to_schedule = self._create_router('schd_rtr', self.router_is_distributed, self.router_ext_net_id) else: router_to_schedule = self._create_router('schd_rtr', self.router_is_distributed) return router_to_schedule def _test_schedule_router(self): if self.router_already_hosted: self.scheduler.bind_router(self.adminContext, self.router_to_schedule['id'], self.l3_agents[0]) # schedule: actual_scheduled_agent = self.scheduler.schedule( self.l3_plugin, self.adminContext, self.router_to_schedule['id']) # check for router scheduling: self.assertEqual(self.expected_router_scheduled, bool(actual_scheduled_agent), message='Failed to schedule agent') def _test_auto_schedule_routers(self): if self.router_already_hosted: self.scheduler.bind_router(self.adminContext, self.router_to_schedule['id'], self.l3_agents[0]) did_it_schedule = False # schedule: for host in self.hosts: did_it_schedule = self.scheduler.auto_schedule_routers( self.l3_plugin, self.adminContext, host, [self.router_to_schedule['id']]) if did_it_schedule: break if self.router_already_hosted: self.assertFalse(did_it_schedule, 'Agent pre scheduled, yet no binding found!') elif self.expected_router_scheduled: self.assertTrue(did_it_schedule, 'Agent not scheduled, not expected') else: self.assertFalse(did_it_schedule, 'Agent scheduled, not expected') def test_least_routers_schedule_router(self): self.scheduler = l3_agent_scheduler.LeastRoutersScheduler() self._test_schedule_router() def test_least_routers_auto_schedule_routers(self): self.scheduler = l3_agent_scheduler.LeastRoutersScheduler() self._test_auto_schedule_routers() def test_chance_schedule_router(self): self.scheduler = l3_agent_scheduler.ChanceScheduler() self._test_schedule_router() def test_chance_auto_schedule_routers(self): self.scheduler = l3_agent_scheduler.ChanceScheduler() self._test_auto_schedule_routers() neutron-8.4.0/neutron/tests/functional/scheduler/test_dhcp_agent_scheduler.py0000664000567000056710000005631413044372760031114 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from operator import attrgetter import six import testscenarios from neutron import context from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.db import common_db_mixin from neutron.scheduler import dhcp_agent_scheduler from neutron.tests.unit.scheduler import (test_dhcp_agent_scheduler as test_dhcp_sch) # Required to generate tests from scenarios. Not compatible with nose. load_tests = testscenarios.load_tests_apply_scenarios class BaseTestScheduleNetwork(object): """Base class which defines scenarios for schedulers. agent_count Number of dhcp agents (also number of hosts). max_agents_per_network Maximum DHCP Agents that can be scheduled for a network. scheduled_agent_count Number of agents the network has previously scheduled down_agent_count Number of dhcp agents which are down expected_scheduled_agent_count Number of scheduled agents the schedule() should return or 'None' if the schedule() cannot schedule the network. """ scenarios = [ ('No agents scheduled if no agents are present', dict(agent_count=0, max_agents_per_network=1, scheduled_agent_count=0, down_agent_count=0, expected_scheduled_agent_count=None)), ('No agents scheduled if network already hosted and' ' max_agents_per_network reached', dict(agent_count=1, max_agents_per_network=1, scheduled_agent_count=1, down_agent_count=0, expected_scheduled_agent_count=None)), ('No agents scheduled if all agents are down', dict(agent_count=2, max_agents_per_network=1, scheduled_agent_count=0, down_agent_count=2, expected_scheduled_agent_count=None)), ('Agent scheduled to the network if network is not yet hosted', dict(agent_count=1, max_agents_per_network=1, scheduled_agent_count=0, down_agent_count=0, expected_scheduled_agent_count=1)), ('Additional Agents scheduled to the network if max_agents_per_network' ' is not yet reached', dict(agent_count=3, max_agents_per_network=3, scheduled_agent_count=1, down_agent_count=0, expected_scheduled_agent_count=2)), ('No agent scheduled if agent is dead', dict(agent_count=3, max_agents_per_network=3, scheduled_agent_count=1, down_agent_count=1, expected_scheduled_agent_count=1)), ] class TestChanceScheduleNetwork(test_dhcp_sch.TestDhcpSchedulerBaseTestCase, agentschedulers_db.DhcpAgentSchedulerDbMixin, agents_db.AgentDbMixin, common_db_mixin.CommonDbMixin, BaseTestScheduleNetwork): """Test various scenarios for ChanceScheduler.schedule.""" def test_schedule_network(self): self.config(dhcp_agents_per_network=self.max_agents_per_network) scheduler = dhcp_agent_scheduler.ChanceScheduler() # create dhcp agents hosts = ['host-%s' % i for i in range(self.agent_count)] dhcp_agents = self._create_and_set_agents_down( hosts, down_agent_count=self.down_agent_count) active_agents = dhcp_agents[self.down_agent_count:] # schedule some agents before calling schedule if self.scheduled_agent_count: # schedule the network schedule_agents = active_agents[:self.scheduled_agent_count] scheduler.resource_filter.bind(self.ctx, schedule_agents, self.network_id) actual_scheduled_agents = scheduler.schedule(self, self.ctx, self.network) if self.expected_scheduled_agent_count: self.assertEqual(self.expected_scheduled_agent_count, len(actual_scheduled_agents)) hosted_agents = self.list_dhcp_agents_hosting_network( self.ctx, self.network_id) self.assertEqual(self.scheduled_agent_count + len(actual_scheduled_agents), len(hosted_agents['agents'])) else: self.assertEqual([], actual_scheduled_agents) class TestWeightScheduleNetwork(test_dhcp_sch.TestDhcpSchedulerBaseTestCase, agentschedulers_db.DhcpAgentSchedulerDbMixin, agents_db.AgentDbMixin, common_db_mixin.CommonDbMixin, BaseTestScheduleNetwork): """Test various scenarios for WeightScheduler.schedule.""" def test_weight_schedule_network(self): self.config(dhcp_agents_per_network=self.max_agents_per_network) scheduler = dhcp_agent_scheduler.WeightScheduler() # create dhcp agents hosts = ['host-%s' % i for i in range(self.agent_count)] dhcp_agents = self._create_and_set_agents_down( hosts, down_agent_count=self.down_agent_count) active_agents = dhcp_agents[self.down_agent_count:] unscheduled_active_agents = list(active_agents) # schedule some agents before calling schedule if self.scheduled_agent_count: # schedule the network schedule_agents = active_agents[:self.scheduled_agent_count] scheduler.resource_filter.bind(self.ctx, schedule_agents, self.network_id) for agent in schedule_agents: unscheduled_active_agents.remove(agent) actual_scheduled_agents = scheduler.schedule(self, self.ctx, self.network) if self.expected_scheduled_agent_count: sorted_unscheduled_active_agents = sorted( unscheduled_active_agents, key=attrgetter('load'))[0:self.expected_scheduled_agent_count] self.assertItemsEqual( (agent['id'] for agent in actual_scheduled_agents), (agent['id'] for agent in sorted_unscheduled_active_agents)) self.assertEqual(self.expected_scheduled_agent_count, len(actual_scheduled_agents)) hosted_agents = self.list_dhcp_agents_hosting_network( self.ctx, self.network_id) self.assertEqual(self.scheduled_agent_count + len(actual_scheduled_agents), len(hosted_agents['agents'])) else: self.assertEqual([], actual_scheduled_agents) class TestAutoSchedule(test_dhcp_sch.TestDhcpSchedulerBaseTestCase, agentschedulers_db.DhcpAgentSchedulerDbMixin, agents_db.AgentDbMixin, common_db_mixin.CommonDbMixin): """Test various scenarios for ChanceScheduler.auto_schedule_networks. Below is the brief description of the scenario variables -------------------------------------------------------- agent_count number of DHCP agents (also number of hosts). max_agents_per_network Maximum DHCP Agents that can be scheduled for a network. network_count Number of networks. networks_with_dhcp_disabled List of networks with dhcp disabled hosted_networks A mapping of agent id to the ids of the networks that they should be initially hosting. expected_auto_schedule_return_value Expected return value of 'auto_schedule_networks'. expected_hosted_networks This stores the expected networks that should have been scheduled (or that could have already been scheduled) for each agent after the 'auto_schedule_networks' function is called. no_network_with_az_match If this parameter is True, there is no unscheduled network with availability_zone_hints matches to an availability_zone of agents to be scheduled. The default is False. """ scenarios = [ ('Agent scheduled to the network if network is not yet hosted', dict(agent_count=1, max_agents_per_network=1, network_count=1, networks_with_dhcp_disabled=[], hosted_networks={}, expected_auto_schedule_return_value=True, expected_hosted_networks={'agent-0': ['network-0']})), ('No agent scheduled if no networks are present', dict(agent_count=1, max_agents_per_network=1, network_count=0, networks_with_dhcp_disabled=[], hosted_networks={}, expected_auto_schedule_return_value=False, expected_hosted_networks={'agent-0': []})), ('Agents scheduled to the networks if networks are not yet hosted', dict(agent_count=2, max_agents_per_network=3, network_count=2, networks_with_dhcp_disabled=[], hosted_networks={}, expected_auto_schedule_return_value=True, expected_hosted_networks={'agent-0': ['network-0', 'network-1'], 'agent-1': ['network-0', 'network-1']})), ('No new agents scheduled if networks are already hosted', dict(agent_count=2, max_agents_per_network=3, network_count=2, networks_with_dhcp_disabled=[], hosted_networks={'agent-0': ['network-0', 'network-1'], 'agent-1': ['network-0', 'network-1']}, expected_auto_schedule_return_value=True, expected_hosted_networks={'agent-0': ['network-0', 'network-1'], 'agent-1': ['network-0', 'network-1']})), ('Additional agents scheduled to the networks if' ' max_agents_per_network is not yet reached', dict(agent_count=4, max_agents_per_network=3, network_count=4, networks_with_dhcp_disabled=[], hosted_networks={'agent-0': ['network-0', 'network-1'], 'agent-1': ['network-0'], 'agent-2': ['network-2'], 'agent-3': ['network-0', 'network-2']}, expected_auto_schedule_return_value=True, expected_hosted_networks={'agent-0': ['network-0', 'network-1', 'network-2', 'network-3'], 'agent-1': ['network-0', 'network-1', 'network-2', 'network-3'], 'agent-2': ['network-1', 'network-2', 'network-3'], 'agent-3': ['network-0', 'network-1', 'network-2', 'network-3']})), ('No agents scheduled if networks already hosted and' ' max_agents_per_network reached', dict(agent_count=4, max_agents_per_network=1, network_count=4, networks_with_dhcp_disabled=[], hosted_networks={'agent-0': ['network-0'], 'agent-1': ['network-2'], 'agent-2': ['network-1'], 'agent-3': ['network-3']}, expected_auto_schedule_return_value=True, expected_hosted_networks={'agent-0': ['network-0'], 'agent-1': ['network-2'], 'agent-2': ['network-1'], 'agent-3': ['network-3']})), ('No agents scheduled to the network with dhcp disabled', dict(agent_count=2, max_agents_per_network=3, network_count=2, networks_with_dhcp_disabled=['network-1'], hosted_networks={}, expected_auto_schedule_return_value=True, expected_hosted_networks={'agent-0': ['network-0'], 'agent-1': ['network-0']})), ('No agents scheduled if all networks have dhcp disabled', dict(agent_count=2, max_agents_per_network=3, network_count=2, networks_with_dhcp_disabled=['network-0', 'network-1'], hosted_networks={}, expected_auto_schedule_return_value=False, expected_hosted_networks={'agent-0': [], 'agent-1': []})), ('No agents scheduled if unscheduled network does not match AZ', dict(agent_count=1, max_agents_per_network=1, network_count=1, networks_with_dhcp_disabled=[], hosted_networks={}, expected_auto_schedule_return_value=True, expected_hosted_networks={'agent-0': []}, no_network_with_az_match=True)), ] def _strip_host_index(self, name): """Strips the host index. Eg. if name = '2-agent-3', then 'agent-3' is returned. """ return name[name.find('-') + 1:] def _extract_index(self, name): """Extracts the index number and returns. Eg. if name = '2-agent-3', then 3 is returned """ return int(name.split('-')[-1]) def get_subnets(self, context, fields=None): subnets = [] for net_id in self._networks: enable_dhcp = (not self._strip_host_index(net_id) in self.networks_with_dhcp_disabled) subnets.append({'network_id': net_id, 'enable_dhcp': enable_dhcp}) return subnets def get_network(self, context, net_id): az_hints = [] if getattr(self, 'no_network_with_az_match', False): az_hints = ['not-match'] return {'availability_zone_hints': az_hints} def _get_hosted_networks_on_dhcp_agent(self, agent_id): query = self.ctx.session.query( agentschedulers_db.NetworkDhcpAgentBinding.network_id) query = query.filter( agentschedulers_db.NetworkDhcpAgentBinding.dhcp_agent_id == agent_id) return [item[0] for item in query] def _test_auto_schedule(self, host_index): self.config(dhcp_agents_per_network=self.max_agents_per_network) scheduler = dhcp_agent_scheduler.ChanceScheduler() self.ctx = context.get_admin_context() msg = 'host_index = %s' % host_index # create dhcp agents hosts = ['%s-agent-%s' % (host_index, i) for i in range(self.agent_count)] dhcp_agents = self._create_and_set_agents_down(hosts) # create networks self._networks = ['%s-network-%s' % (host_index, i) for i in range(self.network_count)] self._save_networks(self._networks) # pre schedule the networks to the agents defined in # self.hosted_networks before calling auto_schedule_network for agent, networks in six.iteritems(self.hosted_networks): agent_index = self._extract_index(agent) for net in networks: net_index = self._extract_index(net) scheduler.resource_filter.bind(self.ctx, [dhcp_agents[agent_index]], self._networks[net_index]) retval = scheduler.auto_schedule_networks(self, self.ctx, hosts[host_index]) self.assertEqual(self.expected_auto_schedule_return_value, retval, message=msg) agent_id = dhcp_agents[host_index].id hosted_networks = self._get_hosted_networks_on_dhcp_agent(agent_id) hosted_net_ids = [self._strip_host_index(net) for net in hosted_networks] expected_hosted_networks = self.expected_hosted_networks['agent-%s' % host_index] self.assertItemsEqual(hosted_net_ids, expected_hosted_networks, msg) def test_auto_schedule(self): for i in range(self.agent_count): self._test_auto_schedule(i) class TestAZAwareWeightScheduler(test_dhcp_sch.TestDhcpSchedulerBaseTestCase, agentschedulers_db.DhcpAgentSchedulerDbMixin, agents_db.AgentDbMixin, common_db_mixin.CommonDbMixin): """Test various scenarios for AZAwareWeightScheduler.schedule. az_count Number of AZs. network_az_hints Number of AZs in availability_zone_hints of the network. agent_count[each az] Number of dhcp agents (also number of hosts). max_agents_per_network Maximum DHCP Agents that can be scheduled for a network. scheduled_agent_count[each az] Number of agents the network has previously scheduled down_agent_count[each az] Number of dhcp agents which are down expected_scheduled_agent_count[each az] Number of scheduled agents the schedule() should return or 'None' if the schedule() cannot schedule the network. """ scenarios = [ ('Single hint, Single agent, Scheduled an agent of the specified AZ', dict(az_count=2, network_az_hints=1, agent_count=[1, 1], max_agents_per_network=1, scheduled_agent_count=[0, 0], down_agent_count=[0, 0], expected_scheduled_agent_count=[1, 0])), ('Multi hints, Multi agents Scheduled agents of the specified AZs', dict(az_count=3, network_az_hints=2, agent_count=[1, 1, 1], max_agents_per_network=2, scheduled_agent_count=[0, 0, 0], down_agent_count=[0, 0, 0], expected_scheduled_agent_count=[1, 1, 0])), ('Single hint, Multi agents, Scheduled agents of the specified AZ', dict(az_count=2, network_az_hints=1, agent_count=[2, 1], max_agents_per_network=2, scheduled_agent_count=[0, 0], down_agent_count=[0, 0], expected_scheduled_agent_count=[2, 0])), ('Multi hints, Multi agents, Only single AZ available', dict(az_count=2, network_az_hints=2, agent_count=[2, 1], max_agents_per_network=2, scheduled_agent_count=[0, 0], down_agent_count=[0, 1], expected_scheduled_agent_count=[2, 0])), ('Multi hints, Multi agents, Not enough agents', dict(az_count=3, network_az_hints=3, agent_count=[1, 1, 1], max_agents_per_network=3, scheduled_agent_count=[0, 0, 0], down_agent_count=[0, 1, 0], expected_scheduled_agent_count=[1, 0, 1])), ('Multi hints, Multi agents, Partially scheduled, Another AZ selected', dict(az_count=3, network_az_hints=2, agent_count=[1, 1, 1], max_agents_per_network=2, scheduled_agent_count=[1, 0, 0], down_agent_count=[0, 0, 0], expected_scheduled_agent_count=[0, 1, 0])), ('No hint, Scheduled independent to AZ', dict(az_count=3, network_az_hints=0, agent_count=[1, 1, 1], max_agents_per_network=3, scheduled_agent_count=[0, 0, 0], down_agent_count=[0, 0, 0], expected_scheduled_agent_count=[1, 1, 1])), ] def _set_network_az_hints(self): self.network['availability_zone_hints'] = [] for i in range(self.network_az_hints): self.network['availability_zone_hints'].append('az%s' % i) def test_schedule_network(self): self.config(dhcp_agents_per_network=self.max_agents_per_network) scheduler = dhcp_agent_scheduler.AZAwareWeightScheduler() self._set_network_az_hints() # create dhcp agents for i in range(self.az_count): az = 'az%s' % i hosts = ['%s-host-%s' % (az, j) for j in range(self.agent_count[i])] dhcp_agents = self._create_and_set_agents_down( hosts, down_agent_count=self.down_agent_count[i], az=az) active_agents = dhcp_agents[self.down_agent_count[i]:] # schedule some agents before calling schedule if self.scheduled_agent_count[i]: # schedule the network schedule_agents = active_agents[:self.scheduled_agent_count[i]] scheduler.resource_filter.bind( self.ctx, schedule_agents, self.network_id) actual_scheduled_agents = scheduler.schedule(self, self.ctx, self.network) scheduled_azs = collections.defaultdict(int) for agent in actual_scheduled_agents: scheduled_azs[agent['availability_zone']] += 1 hosted_agents = self.list_dhcp_agents_hosting_network( self.ctx, self.network_id) hosted_azs = collections.defaultdict(int) for agent in hosted_agents['agents']: hosted_azs[agent['availability_zone']] += 1 for i in range(self.az_count): self.assertEqual(self.expected_scheduled_agent_count[i], scheduled_azs.get('az%s' % i, 0)) self.assertEqual(self.scheduled_agent_count[i] + scheduled_azs.get('az%s' % i, 0), hosted_azs.get('az%s' % i, 0)) neutron-8.4.0/neutron/tests/functional/services/0000775000567000056710000000000013044373210023174 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/services/__init__.py0000664000567000056710000000000013044372736025307 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/services/l3_router/0000775000567000056710000000000013044373210025112 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/services/l3_router/__init__.py0000664000567000056710000000000013044372736027225 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/services/l3_router/test_l3_dvr_ha_router_plugin.py0000664000567000056710000003423213044372760033357 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.common import constants from neutron.common import topics from neutron.extensions import external_net from neutron.extensions import l3_ext_ha_mode from neutron.extensions import portbindings from neutron.tests.common import helpers from neutron.tests.functional.services.l3_router import \ test_l3_dvr_router_plugin DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' class L3DvrHATestCase(test_l3_dvr_router_plugin.L3DvrTestCase): def setUp(self): super(L3DvrHATestCase, self).setUp() self.l3_agent_2 = helpers.register_l3_agent( host="standby", agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) def _create_router(self, distributed=True, ha=True): return (super(L3DvrHATestCase, self). _create_router(distributed=distributed, ha=ha)) def test_update_router_db_cvr_to_dvrha(self): router = self._create_router(distributed=False, ha=False) self.assertRaises( l3_ext_ha_mode.UpdateToDvrHamodeNotSupported, self.l3_plugin.update_router, self.context, router['id'], {'router': {'distributed': True, 'ha': True}} ) router = self.l3_plugin.get_router(self.context, router['id']) self.assertFalse(router['distributed']) self.assertFalse(router['ha']) def test_update_router_db_dvrha_to_cvr(self): router = self._create_router(distributed=True, ha=True) self.assertRaises( l3_ext_ha_mode.DVRmodeUpdateOfDvrHaNotSupported, self.l3_plugin.update_router, self.context, router['id'], {'router': {'distributed': False, 'ha': False}} ) router = self.l3_plugin.get_router(self.context, router['id']) self.assertTrue(router['distributed']) self.assertTrue(router['ha']) def test_update_router_db_dvrha_to_dvr(self): router = self._create_router(distributed=True, ha=True) self.l3_plugin.update_router( self.context, router['id'], {'router': {'admin_state_up': False}}) self.assertRaises( l3_ext_ha_mode.HAmodeUpdateOfDvrHaNotSupported, self.l3_plugin.update_router, self.context, router['id'], {'router': {'distributed': True, 'ha': False}} ) router = self.l3_plugin.get_router(self.context, router['id']) self.assertTrue(router['distributed']) self.assertTrue(router['ha']) def test_update_router_db_dvrha_to_cvrha(self): router = self._create_router(distributed=True, ha=True) self.assertRaises( l3_ext_ha_mode.DVRmodeUpdateOfDvrHaNotSupported, self.l3_plugin.update_router, self.context, router['id'], {'router': {'distributed': False, 'ha': True}} ) router = self.l3_plugin.get_router(self.context, router['id']) self.assertTrue(router['distributed']) self.assertTrue(router['ha']) def test_update_router_db_dvr_to_dvrha(self): router = self._create_router(distributed=True, ha=False) self.assertRaises( l3_ext_ha_mode.HAmodeUpdateOfDvrNotSupported, self.l3_plugin.update_router, self.context, router['id'], {'router': {'distributed': True, 'ha': True}} ) router = self.l3_plugin.get_router(self.context, router['id']) self.assertTrue(router['distributed']) self.assertFalse(router['ha']) def test_update_router_db_cvrha_to_dvrha(self): router = self._create_router(distributed=False, ha=True) self.assertRaises( l3_ext_ha_mode.DVRmodeUpdateOfHaNotSupported, self.l3_plugin.update_router, self.context, router['id'], {'router': {'distributed': True, 'ha': True}} ) router = self.l3_plugin.get_router(self.context, router['id']) self.assertFalse(router['distributed']) self.assertTrue(router['ha']) def _assert_router_is_hosted_on_both_dvr_snat_agents(self, router): agents = self.l3_plugin.list_l3_agents_hosting_router( self.context, router['id']) self.assertEqual(2, len(agents['agents'])) dvr_snat_agents = self.l3_plugin.get_ha_router_port_bindings( self.context, [router['id']]) dvr_snat_agent_ids = [a.l3_agent_id for a in dvr_snat_agents] self.assertIn(self.l3_agent['id'], dvr_snat_agent_ids) self.assertIn(self.l3_agent_2['id'], dvr_snat_agent_ids) def test_router_notifications(self): """Check that notifications go to the right hosts in different conditions """ # register l3 agents in dvr mode in addition to existing dvr_snat agent HOST1, HOST2, HOST3 = 'host1', 'host2', 'host3' for host in [HOST1, HOST2, HOST3]: helpers.register_l3_agent( host=host, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router(distributed=True, ha=True) arg_list = (portbindings.HOST_ID,) with self.subnet() as ext_subnet, \ self.subnet(cidr='20.0.0.0/24') as subnet1, \ self.subnet(cidr='30.0.0.0/24') as subnet2, \ self.subnet(cidr='40.0.0.0/24') as subnet3, \ self.port(subnet=subnet1, device_owner=DEVICE_OWNER_COMPUTE, arg_list=arg_list, **{portbindings.HOST_ID: HOST1}), \ self.port(subnet=subnet2, device_owner=constants.DEVICE_OWNER_DHCP, arg_list=arg_list, **{portbindings.HOST_ID: HOST2}), \ self.port(subnet=subnet3, device_owner=constants.DEVICE_OWNER_NEUTRON_PREFIX, arg_list=arg_list, **{portbindings.HOST_ID: HOST3}): # make net external ext_net_id = ext_subnet['subnet']['network_id'] self._update('networks', ext_net_id, {'network': {external_net.EXTERNAL: True}}) with mock.patch.object(self.l3_plugin.l3_rpc_notifier.client, 'prepare') as mock_prepare: # add external gateway to router self.l3_plugin.update_router( self.context, router['id'], {'router': { 'external_gateway_info': {'network_id': ext_net_id}}}) # router has no interfaces so notification goes # to only dvr_snat agents (self.l3_agent and self.l3_agent_2) self.assertEqual(2, mock_prepare.call_count) expected = [mock.call(server=self.l3_agent['host'], topic=topics.L3_AGENT, version='1.1'), mock.call(server=self.l3_agent_2['host'], topic=topics.L3_AGENT, version='1.1')] mock_prepare.assert_has_calls(expected, any_order=True) mock_prepare.reset_mock() self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet1['subnet']['id']}) self.assertEqual(3, mock_prepare.call_count) expected = [mock.call(server=self.l3_agent['host'], topic=topics.L3_AGENT, version='1.1'), mock.call(server=self.l3_agent_2['host'], topic=topics.L3_AGENT, version='1.1'), mock.call(server=HOST1, topic=topics.L3_AGENT, version='1.1')] mock_prepare.assert_has_calls(expected, any_order=True) mock_prepare.reset_mock() self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet2['subnet']['id']}) self.assertEqual(4, mock_prepare.call_count) expected = [mock.call(server=self.l3_agent['host'], topic=topics.L3_AGENT, version='1.1'), mock.call(server=self.l3_agent_2['host'], topic=topics.L3_AGENT, version='1.1'), mock.call(server=HOST1, topic=topics.L3_AGENT, version='1.1'), mock.call(server=HOST2, topic=topics.L3_AGENT, version='1.1')] mock_prepare.assert_has_calls(expected, any_order=True) mock_prepare.reset_mock() self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet3['subnet']['id']}) # there are no dvr serviceable ports on HOST3, so notification # goes to the same hosts self.assertEqual(4, mock_prepare.call_count) expected = [mock.call(server=self.l3_agent['host'], topic=topics.L3_AGENT, version='1.1'), mock.call(server=self.l3_agent_2['host'], topic=topics.L3_AGENT, version='1.1'), mock.call(server=HOST1, topic=topics.L3_AGENT, version='1.1'), mock.call(server=HOST2, topic=topics.L3_AGENT, version='1.1')] mock_prepare.assert_has_calls(expected, any_order=True) def test_router_is_not_removed_from_snat_agent_on_interface_removal(self): """Check that dvr router is not removed from dvr_snat l3 agents on router interface removal """ router = self._create_router(distributed=True, ha=True) kwargs = {'arg_list': (external_net.EXTERNAL,), external_net.EXTERNAL: True} with self.subnet() as subnet, \ self.network(**kwargs) as ext_net, \ self.subnet(network=ext_net, cidr='20.0.0.0/24'): self.l3_plugin._update_router_gw_info( self.context, router['id'], {'network_id': ext_net['network']['id']}) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) self._assert_router_is_hosted_on_both_dvr_snat_agents(router) with mock.patch.object(self.l3_plugin, '_l3_rpc_notifier') as l3_notifier: self.l3_plugin.remove_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) self._assert_router_is_hosted_on_both_dvr_snat_agents(router) self.assertFalse(l3_notifier.router_removed_from_agent.called) def test_router_is_not_removed_from_snat_agent_on_dhcp_port_deletion(self): """Check that dvr router is not removed from l3 agent hosting SNAT for it on DHCP port removal """ router = self._create_router(distributed=True, ha=True) kwargs = {'arg_list': (external_net.EXTERNAL,), external_net.EXTERNAL: True} with self.network(**kwargs) as ext_net, \ self.subnet(network=ext_net), \ self.subnet(cidr='20.0.0.0/24') as subnet, \ self.port(subnet=subnet, device_owner=constants.DEVICE_OWNER_DHCP) as port: self.core_plugin.update_port( self.context, port['port']['id'], {'port': {'binding:host_id': self.l3_agent['host']}}) self.l3_plugin._update_router_gw_info( self.context, router['id'], {'network_id': ext_net['network']['id']}) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) # router should be scheduled to both dvr_snat l3 agents self._assert_router_is_hosted_on_both_dvr_snat_agents(router) notifier = self.l3_plugin.agent_notifiers[ constants.AGENT_TYPE_L3] with mock.patch.object( notifier, 'router_removed_from_agent', side_effect=Exception("BOOOOOOM!")) as remove_mock: self._delete('ports', port['port']['id']) # now when port is deleted the router still has external # gateway and should still be scheduled to the snat agent remove_mock.assert_not_called() self._assert_router_is_hosted_on_both_dvr_snat_agents(router) def test_update_router_db_centralized_to_distributed(self): self.skipTest('Valid for DVR-only routers') def test__get_router_ids_for_agent(self): self.skipTest('Valid for DVR-only routers') def test_router_auto_scheduling(self): self.skipTest('Valid for DVR-only routers') neutron-8.4.0/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py0000664000567000056710000022613013044372760032707 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.api.rpc.handlers import l3_rpc from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import topics from neutron import context from neutron.extensions import external_net from neutron.extensions import portbindings from neutron.tests.common import helpers from neutron.tests.unit.plugins.ml2 import base as ml2_test_base DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' class L3DvrTestCase(ml2_test_base.ML2TestFramework): def setUp(self): super(L3DvrTestCase, self).setUp() self.l3_agent = helpers.register_l3_agent( agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) def _create_router(self, distributed=True, ha=False): return (super(L3DvrTestCase, self). _create_router(distributed=distributed, ha=ha)) def test_update_router_db_centralized_to_distributed(self): router = self._create_router(distributed=False) # router needs to be in admin state down in order to be upgraded to DVR self.l3_plugin.update_router( self.context, router['id'], {'router': {'admin_state_up': False}}) self.assertFalse(router['distributed']) self.l3_plugin.update_router( self.context, router['id'], {'router': {'distributed': True}}) router = self.l3_plugin.get_router(self.context, router['id']) self.assertTrue(router['distributed']) def test_get_device_owner_distributed_router_object(self): router = self._create_router() self.assertEqual( constants.DEVICE_OWNER_DVR_INTERFACE, self.l3_plugin._get_device_owner(self.context, router)) def test_get_device_owner_distributed_router_id(self): router = self._create_router() self.assertEqual( constants.DEVICE_OWNER_DVR_INTERFACE, self.l3_plugin._get_device_owner(self.context, router['id'])) def test_get_device_owner_centralized(self): router = self._create_router(distributed=False) self.assertEqual( constants.DEVICE_OWNER_ROUTER_INTF, self.l3_plugin._get_device_owner(self.context, router['id'])) def test_get_agent_gw_ports_exist_for_network_no_port(self): self.assertIsNone( self.l3_plugin._get_agent_gw_ports_exist_for_network( self.context, 'network_id', 'host', 'agent_id')) def _test_remove_router_interface_leaves_snat_intact(self, by_subnet): with self.subnet() as subnet1, \ self.subnet(cidr='20.0.0.0/24') as subnet2: kwargs = {'arg_list': (external_net.EXTERNAL,), external_net.EXTERNAL: True} with self.network(**kwargs) as ext_net, \ self.subnet(network=ext_net, cidr='30.0.0.0/24'): router = self._create_router() self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet1['subnet']['id']}) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet2['subnet']['id']}) self.l3_plugin._update_router_gw_info( self.context, router['id'], {'network_id': ext_net['network']['id']}) snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces( self.context, [router['id']]) self.assertEqual( 2, len(snat_router_intfs[router['id']])) if by_subnet: self.l3_plugin.remove_router_interface( self.context, router['id'], {'subnet_id': subnet1['subnet']['id']}) else: port = self.core_plugin.get_ports( self.context, filters={ 'network_id': [subnet1['subnet']['network_id']], 'device_owner': [constants.DEVICE_OWNER_DVR_INTERFACE]})[0] self.l3_plugin.remove_router_interface( self.context, router['id'], {'port_id': port['id']}) self.assertEqual( 1, len(self.l3_plugin._get_snat_sync_interfaces( self.context, [router['id']]))) def test_remove_router_interface_by_subnet_leaves_snat_intact(self): self._test_remove_router_interface_leaves_snat_intact(by_subnet=True) def test_remove_router_interface_by_port_leaves_snat_intact(self): self._test_remove_router_interface_leaves_snat_intact( by_subnet=False) def setup_create_agent_gw_port_for_network(self, network=None): if not network: network = self._make_network(self.fmt, '', True) network_id = network['network']['id'] port = self.core_plugin.create_port( self.context, {'port': {'tenant_id': '', 'network_id': network_id, 'mac_address': attributes.ATTR_NOT_SPECIFIED, 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, 'device_id': self.l3_agent['id'], 'device_owner': constants.DEVICE_OWNER_AGENT_GW, portbindings.HOST_ID: '', 'admin_state_up': True, 'name': ''}}) return network_id, port def test_get_agent_gw_port_for_network(self): network_id, port = ( self.setup_create_agent_gw_port_for_network()) self.assertEqual( port['id'], self.l3_plugin._get_agent_gw_ports_exist_for_network( self.context, network_id, None, self.l3_agent['id'])['id']) def test_delete_agent_gw_port_for_network(self): network_id, port = ( self.setup_create_agent_gw_port_for_network()) self.l3_plugin.delete_floatingip_agent_gateway_port( self.context, "", network_id) self.assertIsNone( self.l3_plugin._get_agent_gw_ports_exist_for_network( self.context, network_id, "", self.l3_agent['id'])) def test_get_fip_sync_interfaces(self): self.setup_create_agent_gw_port_for_network() self.assertEqual( 1, len(self.l3_plugin._get_fip_sync_interfaces( self.context, self.l3_agent['id']))) def test_process_routers(self): router = self._create_router() result = self.l3_plugin._process_routers(self.context, [router]) self.assertEqual( router['id'], result[router['id']]['id']) def test_agent_gw_port_delete_when_last_gateway_for_ext_net_removed(self): kwargs = {'arg_list': (external_net.EXTERNAL,), external_net.EXTERNAL: True} net1 = self._make_network(self.fmt, 'net1', True) net2 = self._make_network(self.fmt, 'net2', True) subnet1 = self._make_subnet( self.fmt, net1, '10.1.0.1', '10.1.0.0/24', enable_dhcp=True) subnet2 = self._make_subnet( self.fmt, net2, '10.1.0.1', '10.1.0.0/24', enable_dhcp=True) ext_net = self._make_network(self.fmt, 'ext_net', True, **kwargs) self._make_subnet( self.fmt, ext_net, '20.0.0.1', '20.0.0.0/24', enable_dhcp=True) # Create first router and add an interface router1 = self._create_router() ext_net_id = ext_net['network']['id'] self.l3_plugin.add_router_interface( self.context, router1['id'], {'subnet_id': subnet1['subnet']['id']}) # Set gateway to first router self.l3_plugin._update_router_gw_info( self.context, router1['id'], {'network_id': ext_net_id}) # Create second router and add an interface router2 = self._create_router() self.l3_plugin.add_router_interface( self.context, router2['id'], {'subnet_id': subnet2['subnet']['id']}) # Set gateway to second router self.l3_plugin._update_router_gw_info( self.context, router2['id'], {'network_id': ext_net_id}) # Create an agent gateway port for the external network net_id, agent_gw_port = ( self.setup_create_agent_gw_port_for_network(network=ext_net)) # Check for agent gateway ports self.assertIsNotNone( self.l3_plugin._get_agent_gw_ports_exist_for_network( self.context, ext_net_id, "", self.l3_agent['id'])) self.l3_plugin._update_router_gw_info( self.context, router1['id'], {}) # Check for agent gateway port after deleting one of the gw self.assertIsNotNone( self.l3_plugin._get_agent_gw_ports_exist_for_network( self.context, ext_net_id, "", self.l3_agent['id'])) self.l3_plugin._update_router_gw_info( self.context, router2['id'], {}) # Check for agent gateway port after deleting last gw self.assertIsNone( self.l3_plugin._get_agent_gw_ports_exist_for_network( self.context, ext_net_id, "", self.l3_agent['id'])) def _test_create_floating_ip_agent_notification(self, dvr=True): with self.subnet() as ext_subnet,\ self.subnet(cidr='20.0.0.0/24') as int_subnet,\ self.port(subnet=int_subnet, device_owner=DEVICE_OWNER_COMPUTE) as int_port: # make net external ext_net_id = ext_subnet['subnet']['network_id'] self._update('networks', ext_net_id, {'network': {external_net.EXTERNAL: True}}) router = self._create_router(distributed=dvr) self.l3_plugin.update_router( self.context, router['id'], {'router': { 'external_gateway_info': {'network_id': ext_net_id}}}) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': int_subnet['subnet']['id']}) floating_ip = {'floating_network_id': ext_net_id, 'router_id': router['id'], 'port_id': int_port['port']['id'], 'tenant_id': int_port['port']['tenant_id'], 'dns_name': '', 'dns_domain': ''} with mock.patch.object( self.l3_plugin, '_l3_rpc_notifier') as l3_notif: self.l3_plugin.create_floatingip( self.context, {'floatingip': floating_ip}) if dvr: l3_notif.routers_updated_on_host.assert_called_once_with( self.context, [router['id']], int_port['port'][portbindings.HOST_ID]) self.assertFalse(l3_notif.routers_updated.called) else: l3_notif.routers_updated.assert_called_once_with( self.context, [router['id']], None) self.assertFalse( l3_notif.routers_updated_on_host.called) def test_create_floating_ip_agent_notification(self): self._test_create_floating_ip_agent_notification() def test_create_floating_ip_agent_notification_non_dvr(self): self._test_create_floating_ip_agent_notification(dvr=False) def _test_update_floating_ip_agent_notification(self, dvr=True): with self.subnet() as ext_subnet,\ self.subnet(cidr='20.0.0.0/24') as int_subnet1,\ self.subnet(cidr='30.0.0.0/24') as int_subnet2,\ self.port(subnet=int_subnet1, device_owner=DEVICE_OWNER_COMPUTE) as int_port1,\ self.port(subnet=int_subnet2, device_owner=DEVICE_OWNER_COMPUTE) as int_port2: # locate internal ports on different hosts self.core_plugin.update_port( self.context, int_port1['port']['id'], {'port': {portbindings.HOST_ID: 'host1'}}) self.core_plugin.update_port( self.context, int_port2['port']['id'], {'port': {portbindings.HOST_ID: 'host2'}}) # and create l3 agents on corresponding hosts helpers.register_l3_agent(host='host1', agent_mode=constants.L3_AGENT_MODE_DVR) helpers.register_l3_agent(host='host2', agent_mode=constants.L3_AGENT_MODE_DVR) # make net external ext_net_id = ext_subnet['subnet']['network_id'] self._update('networks', ext_net_id, {'network': {external_net.EXTERNAL: True}}) router1 = self._create_router(distributed=dvr) router2 = self._create_router(distributed=dvr) for router in (router1, router2): self.l3_plugin.update_router( self.context, router['id'], {'router': { 'external_gateway_info': {'network_id': ext_net_id}}}) self.l3_plugin.add_router_interface( self.context, router1['id'], {'subnet_id': int_subnet1['subnet']['id']}) self.l3_plugin.add_router_interface( self.context, router2['id'], {'subnet_id': int_subnet2['subnet']['id']}) floating_ip = {'floating_network_id': ext_net_id, 'router_id': router1['id'], 'port_id': int_port1['port']['id'], 'tenant_id': int_port1['port']['tenant_id'], 'dns_name': '', 'dns_domain': ''} floating_ip = self.l3_plugin.create_floatingip( self.context, {'floatingip': floating_ip}) with mock.patch.object( self.l3_plugin, '_l3_rpc_notifier') as l3_notif: updated_floating_ip = {'router_id': router2['id'], 'port_id': int_port2['port']['id']} self.l3_plugin.update_floatingip( self.context, floating_ip['id'], {'floatingip': updated_floating_ip}) if dvr: self.assertEqual( 2, l3_notif.routers_updated_on_host.call_count) expected_calls = [ mock.call(self.context, [router1['id']], 'host1'), mock.call(self.context, [router2['id']], 'host2')] l3_notif.routers_updated_on_host.assert_has_calls( expected_calls) self.assertFalse(l3_notif.routers_updated.called) else: self.assertEqual( 2, l3_notif.routers_updated.call_count) expected_calls = [ mock.call(self.context, [router1['id']], None), mock.call(self.context, [router2['id']], None)] l3_notif.routers_updated.assert_has_calls( expected_calls) self.assertFalse(l3_notif.routers_updated_on_host.called) def test_update_floating_ip_agent_notification(self): self._test_update_floating_ip_agent_notification() def test_update_floating_ip_agent_notification_non_dvr(self): self._test_update_floating_ip_agent_notification(dvr=False) def _test_delete_floating_ip_agent_notification(self, dvr=True): with self.subnet() as ext_subnet,\ self.subnet(cidr='20.0.0.0/24') as int_subnet,\ self.port(subnet=int_subnet, device_owner=DEVICE_OWNER_COMPUTE) as int_port: # make net external ext_net_id = ext_subnet['subnet']['network_id'] self._update('networks', ext_net_id, {'network': {external_net.EXTERNAL: True}}) router = self._create_router(distributed=dvr) self.l3_plugin.update_router( self.context, router['id'], {'router': { 'external_gateway_info': {'network_id': ext_net_id}}}) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': int_subnet['subnet']['id']}) floating_ip = {'floating_network_id': ext_net_id, 'router_id': router['id'], 'port_id': int_port['port']['id'], 'tenant_id': int_port['port']['tenant_id'], 'dns_name': '', 'dns_domain': ''} floating_ip = self.l3_plugin.create_floatingip( self.context, {'floatingip': floating_ip}) with mock.patch.object( self.l3_plugin, '_l3_rpc_notifier') as l3_notif: self.l3_plugin.delete_floatingip( self.context, floating_ip['id']) if dvr: l3_notif.routers_updated_on_host.assert_called_once_with( self.context, [router['id']], int_port['port'][portbindings.HOST_ID]) self.assertFalse(l3_notif.routers_updated.called) else: l3_notif.routers_updated.assert_called_once_with( self.context, [router['id']], None) self.assertFalse( l3_notif.routers_updated_on_host.called) def test_delete_floating_ip_agent_notification(self): self._test_delete_floating_ip_agent_notification() def test_delete_floating_ip_agent_notification_non_dvr(self): self._test_delete_floating_ip_agent_notification(dvr=False) def test_router_with_ipv4_and_multiple_ipv6_on_same_network(self): kwargs = {'arg_list': (external_net.EXTERNAL,), external_net.EXTERNAL: True} ext_net = self._make_network(self.fmt, '', True, **kwargs) self._make_subnet( self.fmt, ext_net, '10.0.0.1', '10.0.0.0/24', ip_version=4, enable_dhcp=True) self._make_subnet( self.fmt, ext_net, '2001:db8::1', '2001:db8::/64', ip_version=6, enable_dhcp=True) router1 = self._create_router() self.l3_plugin._update_router_gw_info( self.context, router1['id'], {'network_id': ext_net['network']['id']}) snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces( self.context, [router1['id']]) self.assertEqual(0, len(snat_router_intfs[router1['id']])) private_net1 = self._make_network(self.fmt, 'net1', True) private_ipv6_subnet1 = self._make_subnet(self.fmt, private_net1, 'fd00::1', cidr='fd00::1/64', ip_version=6, ipv6_ra_mode='slaac', ipv6_address_mode='slaac') private_ipv6_subnet2 = self._make_subnet(self.fmt, private_net1, 'fd01::1', cidr='fd01::1/64', ip_version=6, ipv6_ra_mode='slaac', ipv6_address_mode='slaac') # Add the first IPv6 subnet to the router self.l3_plugin.add_router_interface( self.context, router1['id'], {'subnet_id': private_ipv6_subnet1['subnet']['id']}) # Check for the internal snat port interfaces snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces( self.context, [router1['id']]) self.assertEqual(1, len(snat_router_intfs[router1['id']])) # Add the second IPv6 subnet to the router self.l3_plugin.add_router_interface( self.context, router1['id'], {'subnet_id': private_ipv6_subnet2['subnet']['id']}) # Check for the internal snat port interfaces snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces( self.context, [router1['id']]) snat_intf_list = snat_router_intfs[router1['id']] fixed_ips = snat_intf_list[0]['fixed_ips'] self.assertEqual(1, len(snat_router_intfs[router1['id']])) self.assertEqual(2, len(fixed_ips)) # Now delete the router interface and it should update the # SNAT port with the right fixed_ips instead of deleting it. self.l3_plugin.remove_router_interface( self.context, router1['id'], {'subnet_id': private_ipv6_subnet2['subnet']['id']}) # Check for the internal snat port interfaces snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces( self.context, [router1['id']]) snat_intf_list = snat_router_intfs[router1['id']] fixed_ips = snat_intf_list[0]['fixed_ips'] self.assertEqual(1, len(snat_router_intfs[router1['id']])) self.assertEqual(1, len(fixed_ips)) def test_allowed_addr_pairs_arp_update_for_port_with_original_owner(self): HOST1 = 'host1' helpers.register_l3_agent( host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() private_net1 = self._make_network(self.fmt, 'net1', True) test_allocation_pools = [{'start': '10.1.0.2', 'end': '10.1.0.20'}] fixed_vrrp_ip = [{'ip_address': '10.1.0.201'}] kwargs = {'arg_list': (external_net.EXTERNAL,), external_net.EXTERNAL: True} ext_net = self._make_network(self.fmt, '', True, **kwargs) self._make_subnet( self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24', ip_version=4, enable_dhcp=True) # Set gateway to router self.l3_plugin._update_router_gw_info( self.context, router['id'], {'network_id': ext_net['network']['id']}) private_subnet1 = self._make_subnet( self.fmt, private_net1, '10.1.0.1', cidr='10.1.0.0/24', ip_version=4, allocation_pools=test_allocation_pools, enable_dhcp=True) vrrp_port = self._make_port( self.fmt, private_net1['network']['id'], device_owner=constants.DEVICE_OWNER_LOADBALANCER, fixed_ips=fixed_vrrp_ip) allowed_address_pairs = [ {'ip_address': '10.1.0.201', 'mac_address': vrrp_port['port']['mac_address']}] with self.port( subnet=private_subnet1, device_owner=DEVICE_OWNER_COMPUTE) as int_port: self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': private_subnet1['subnet']['id']}) with mock.patch.object(self.l3_plugin, '_l3_rpc_notifier') as l3_notifier: vm_port = self.core_plugin.update_port( self.context, int_port['port']['id'], {'port': {portbindings.HOST_ID: HOST1}}) l3_notifier.routers_updated_on_host.assert_called_once_with( self.context, {router['id']}, HOST1) self.assertEqual(1, l3_notifier.add_arp_entry.call_count) l3_notifier.reset_mock() floating_ip = {'floating_network_id': ext_net['network']['id'], 'router_id': router['id'], 'port_id': vrrp_port['port']['id'], 'tenant_id': vrrp_port['port']['tenant_id']} floating_ip = self.l3_plugin.create_floatingip( self.context, {'floatingip': floating_ip}) vrrp_port_db = self.core_plugin.get_port( self.context, vrrp_port['port']['id']) self.assertNotEqual(vrrp_port_db[portbindings.HOST_ID], HOST1) # Now update the VM port with the allowed_address_pair l3_notifier.reset_mock() self.core_plugin.update_port( self.context, vm_port['id'], {'port': { 'allowed_address_pairs': allowed_address_pairs}}) updated_vm_port = self.core_plugin.get_port( self.context, vm_port['id']) expected_allowed_address_pairs = updated_vm_port.get( 'allowed_address_pairs') self.assertEqual(expected_allowed_address_pairs, allowed_address_pairs) cur_vrrp_port_db = self.core_plugin.get_port( self.context, vrrp_port['port']['id']) self.assertEqual(cur_vrrp_port_db[portbindings.HOST_ID], HOST1) self.assertTrue(cur_vrrp_port_db.get(portbindings.PROFILE)) port_profile = cur_vrrp_port_db.get(portbindings.PROFILE) self.assertTrue(port_profile) self.assertEqual(port_profile['original_owner'], constants.DEVICE_OWNER_LOADBALANCER) l3_notifier.reset_mock() port_profile['new_owner'] = 'test_owner' self.core_plugin.update_port( self.context, cur_vrrp_port_db['id'], {'port': {portbindings.PROFILE: port_profile}}) # Now the vrrp port should have an 'original_owner' # and gets updated with a new profile. In this case # the update triggers a notification to the neutron # server, but this should not trigger another arp # update of this port or router_updated event to the # agent, otherwise this will mess up with the arp # table in the router namespace. self.assertEqual(0, l3_notifier.add_arp_entry.call_count) self.assertEqual( 0, l3_notifier.routers_updated_on_host.call_count) def test_allowed_addr_pairs_delayed_fip_and_update_arp_entry(self): HOST1 = 'host1' helpers.register_l3_agent( host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR) HOST2 = 'host2' helpers.register_l3_agent( host=HOST2, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() private_net1 = self._make_network(self.fmt, 'net1', True) test_allocation_pools = [{'start': '10.1.0.2', 'end': '10.1.0.20'}] fixed_vrrp_ip = [{'ip_address': '10.1.0.201'}] kwargs = {'arg_list': (external_net.EXTERNAL,), external_net.EXTERNAL: True} ext_net = self._make_network(self.fmt, '', True, **kwargs) self._make_subnet( self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24', ip_version=4, enable_dhcp=True) # Set gateway to router self.l3_plugin._update_router_gw_info( self.context, router['id'], {'network_id': ext_net['network']['id']}) private_subnet1 = self._make_subnet( self.fmt, private_net1, '10.1.0.1', cidr='10.1.0.0/24', ip_version=4, allocation_pools=test_allocation_pools, enable_dhcp=True) vrrp_port = self._make_port( self.fmt, private_net1['network']['id'], fixed_ips=fixed_vrrp_ip) allowed_address_pairs = [ {'ip_address': '10.1.0.201', 'mac_address': vrrp_port['port']['mac_address']}] with self.port( subnet=private_subnet1, device_owner=DEVICE_OWNER_COMPUTE) as int_port,\ self.port(subnet=private_subnet1, device_owner=DEVICE_OWNER_COMPUTE) as int_port2: self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': private_subnet1['subnet']['id']}) with mock.patch.object(self.l3_plugin, '_l3_rpc_notifier') as l3_notifier: vm_port = self.core_plugin.update_port( self.context, int_port['port']['id'], {'port': {portbindings.HOST_ID: HOST1}}) vm_port_mac = vm_port['mac_address'] vm_port_fixed_ips = vm_port['fixed_ips'] vm_port_subnet_id = vm_port_fixed_ips[0]['subnet_id'] vm_arp_table = { 'ip_address': vm_port_fixed_ips[0]['ip_address'], 'mac_address': vm_port_mac, 'subnet_id': vm_port_subnet_id} vm_port2 = self.core_plugin.update_port( self.context, int_port2['port']['id'], {'port': {portbindings.HOST_ID: HOST2}}) l3_notifier.reset_mock() # Now update the VM port with the allowed_address_pair self.core_plugin.update_port( self.context, vm_port['id'], {'port': { 'allowed_address_pairs': allowed_address_pairs}}) self.core_plugin.update_port( self.context, vm_port2['id'], {'port': { 'allowed_address_pairs': allowed_address_pairs}}) self.assertEqual( 0, l3_notifier.routers_updated_on_host.call_count) updated_vm_port1 = self.core_plugin.get_port( self.context, vm_port['id']) updated_vm_port2 = self.core_plugin.get_port( self.context, vm_port2['id']) self.assertEqual(4, l3_notifier.add_arp_entry.call_count) expected_allowed_address_pairs = updated_vm_port1.get( 'allowed_address_pairs') self.assertEqual(expected_allowed_address_pairs, allowed_address_pairs) expected_allowed_address_pairs_2 = updated_vm_port2.get( 'allowed_address_pairs') self.assertEqual(expected_allowed_address_pairs_2, allowed_address_pairs) # Now the VRRP port is attached to the VM port. At this # point, the VRRP port should not have inherited the # port host bindings from the parent VM port. cur_vrrp_port_db = self.core_plugin.get_port( self.context, vrrp_port['port']['id']) self.assertNotEqual( cur_vrrp_port_db[portbindings.HOST_ID], HOST1) self.assertNotEqual( cur_vrrp_port_db[portbindings.HOST_ID], HOST2) # Before we try to associate a floatingip make sure that # only one of the Service port associated with the # allowed_address_pair port is active and the other one # is DOWN mod_vm_port2 = self.core_plugin.update_port( self.context, updated_vm_port2['id'], {'port': { 'admin_state_up': False}}) self.assertFalse(mod_vm_port2['admin_state_up']) # Next we can try to associate the floatingip to the # VRRP port that is already attached to the VM port l3_notifier.reset_mock() floating_ip = {'floating_network_id': ext_net['network']['id'], 'router_id': router['id'], 'port_id': vrrp_port['port']['id'], 'tenant_id': vrrp_port['port']['tenant_id']} floating_ip = self.l3_plugin.create_floatingip( self.context, {'floatingip': floating_ip}) self.assertEqual( 2, l3_notifier.routers_updated_on_host.call_count) self.assertEqual(3, l3_notifier.add_arp_entry.call_count) post_update_vrrp_port_db = self.core_plugin.get_port( self.context, vrrp_port['port']['id']) vrrp_port_fixed_ips = post_update_vrrp_port_db['fixed_ips'] vrrp_port_subnet_id = vrrp_port_fixed_ips[0]['subnet_id'] vrrp_arp_table = { 'ip_address': vrrp_port_fixed_ips[0]['ip_address'], 'mac_address': vm_port_mac, 'subnet_id': vrrp_port_subnet_id} vrrp_arp_table1 = { 'ip_address': vrrp_port_fixed_ips[0]['ip_address'], 'mac_address': vrrp_port['port']['mac_address'], 'subnet_id': vrrp_port_subnet_id} self.assertEqual( post_update_vrrp_port_db[portbindings.HOST_ID], HOST1) expected_calls = [ mock.call(self.context, router['id'], vrrp_arp_table1), mock.call(self.context, router['id'], vm_arp_table), mock.call(self.context, router['id'], vrrp_arp_table)] l3_notifier.add_arp_entry.assert_has_calls( expected_calls) def test_allowed_address_pairs_update_arp_entry(self): HOST1 = 'host1' helpers.register_l3_agent( host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() private_net1 = self._make_network(self.fmt, 'net1', True) test_allocation_pools = [{'start': '10.1.0.2', 'end': '10.1.0.20'}] fixed_vrrp_ip = [{'ip_address': '10.1.0.201'}] kwargs = {'arg_list': (external_net.EXTERNAL,), external_net.EXTERNAL: True} ext_net = self._make_network(self.fmt, '', True, **kwargs) self._make_subnet( self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24', ip_version=4, enable_dhcp=True) # Set gateway to router self.l3_plugin._update_router_gw_info( self.context, router['id'], {'network_id': ext_net['network']['id']}) private_subnet1 = self._make_subnet( self.fmt, private_net1, '10.1.0.1', cidr='10.1.0.0/24', ip_version=4, allocation_pools=test_allocation_pools, enable_dhcp=True) vrrp_port = self._make_port( self.fmt, private_net1['network']['id'], fixed_ips=fixed_vrrp_ip) allowed_address_pairs = [ {'ip_address': '10.1.0.201', 'mac_address': vrrp_port['port']['mac_address']}] with self.port( subnet=private_subnet1, device_owner=DEVICE_OWNER_COMPUTE) as int_port: self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': private_subnet1['subnet']['id']}) with mock.patch.object(self.l3_plugin, '_l3_rpc_notifier') as l3_notifier: vm_port = self.core_plugin.update_port( self.context, int_port['port']['id'], {'port': {portbindings.HOST_ID: HOST1}}) vm_port_mac = vm_port['mac_address'] vm_port_fixed_ips = vm_port['fixed_ips'] vm_port_subnet_id = vm_port_fixed_ips[0]['subnet_id'] vm_arp_table = { 'ip_address': vm_port_fixed_ips[0]['ip_address'], 'mac_address': vm_port_mac, 'subnet_id': vm_port_subnet_id} l3_notifier.routers_updated_on_host.assert_called_once_with( self.context, {router['id']}, HOST1) self.assertEqual(1, l3_notifier.add_arp_entry.call_count) l3_notifier.reset_mock() floating_ip = {'floating_network_id': ext_net['network']['id'], 'router_id': router['id'], 'port_id': vrrp_port['port']['id'], 'tenant_id': vrrp_port['port']['tenant_id']} floating_ip = self.l3_plugin.create_floatingip( self.context, {'floatingip': floating_ip}) vrrp_port_db = self.core_plugin.get_port( self.context, vrrp_port['port']['id']) self.assertNotEqual(vrrp_port_db[portbindings.HOST_ID], HOST1) # Now update the VM port with the allowed_address_pair l3_notifier.reset_mock() self.core_plugin.update_port( self.context, vm_port['id'], {'port': { 'allowed_address_pairs': allowed_address_pairs}}) self.assertEqual( 2, l3_notifier.routers_updated_on_host.call_count) updated_vm_port = self.core_plugin.get_port( self.context, vm_port['id']) self.assertEqual(3, l3_notifier.add_arp_entry.call_count) expected_allowed_address_pairs = updated_vm_port.get( 'allowed_address_pairs') self.assertEqual(expected_allowed_address_pairs, allowed_address_pairs) cur_vrrp_port_db = self.core_plugin.get_port( self.context, vrrp_port['port']['id']) vrrp_port_fixed_ips = cur_vrrp_port_db['fixed_ips'] vrrp_port_subnet_id = vrrp_port_fixed_ips[0]['subnet_id'] vrrp_arp_table = { 'ip_address': vrrp_port_fixed_ips[0]['ip_address'], 'mac_address': vm_port_mac, 'subnet_id': vrrp_port_subnet_id} vrrp_arp_table1 = { 'ip_address': vrrp_port_fixed_ips[0]['ip_address'], 'mac_address': vrrp_port['port']['mac_address'], 'subnet_id': vrrp_port_subnet_id} self.assertEqual(cur_vrrp_port_db[portbindings.HOST_ID], HOST1) expected_calls = [ mock.call(self.context, router['id'], vrrp_arp_table1), mock.call(self.context, router['id'], vm_arp_table), mock.call(self.context, router['id'], vrrp_arp_table)] l3_notifier.add_arp_entry.assert_has_calls( expected_calls) def test_update_service_port_with_allowed_address_pairs(self): HOST1 = 'host1' helpers.register_l3_agent( host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() private_net1 = self._make_network(self.fmt, 'net1', True) test_allocation_pools = [{'start': '10.1.0.2', 'end': '10.1.0.20'}] fixed_vrrp_ip = [{'ip_address': '10.1.0.201'}] kwargs = {'arg_list': (external_net.EXTERNAL,), external_net.EXTERNAL: True} ext_net = self._make_network(self.fmt, '', True, **kwargs) self._make_subnet( self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24', ip_version=4, enable_dhcp=True) # Set gateway to router self.l3_plugin._update_router_gw_info( self.context, router['id'], {'network_id': ext_net['network']['id']}) private_subnet1 = self._make_subnet( self.fmt, private_net1, '10.1.0.1', cidr='10.1.0.0/24', ip_version=4, allocation_pools=test_allocation_pools, enable_dhcp=True) vrrp_port = self._make_port( self.fmt, private_net1['network']['id'], device_owner=constants.DEVICE_OWNER_LOADBALANCER, fixed_ips=fixed_vrrp_ip) allowed_address_pairs = [ {'ip_address': '10.1.0.201', 'mac_address': vrrp_port['port']['mac_address']}] with self.port( subnet=private_subnet1, device_owner=DEVICE_OWNER_COMPUTE) as int_port: self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': private_subnet1['subnet']['id']}) with mock.patch.object(self.l3_plugin, '_l3_rpc_notifier') as l3_notifier: self.core_plugin.update_port( self.context, int_port['port']['id'], {'port': {portbindings.HOST_ID: HOST1}}) l3_notifier.routers_updated_on_host.assert_called_once_with( self.context, {router['id']}, HOST1) floating_ip = {'floating_network_id': ext_net['network']['id'], 'router_id': router['id'], 'port_id': vrrp_port['port']['id'], 'tenant_id': vrrp_port['port']['tenant_id']} floating_ip = self.l3_plugin.create_floatingip( self.context, {'floatingip': floating_ip}) vrrp_port_db = self.core_plugin.get_port( self.context, vrrp_port['port']['id']) self.assertNotEqual(vrrp_port_db[portbindings.HOST_ID], HOST1) # Now update the VM port with the allowed_address_pair cur_int_port = self.core_plugin.update_port( self.context, int_port['port']['id'], {'port': { 'allowed_address_pairs': allowed_address_pairs}}) cur_vrrp_port_db = self.core_plugin.get_port( self.context, vrrp_port['port']['id']) # Check to make sure that we are not chaning the existing # device_owner for the allowed_address_pair port. self.assertEqual( cur_vrrp_port_db['device_owner'], constants.DEVICE_OWNER_LOADBALANCER) self.assertEqual(cur_vrrp_port_db[portbindings.HOST_ID], HOST1) self.assertTrue(cur_vrrp_port_db.get(portbindings.PROFILE)) port_profile = cur_vrrp_port_db.get(portbindings.PROFILE) self.assertTrue(port_profile) self.assertEqual(port_profile['original_owner'], constants.DEVICE_OWNER_LOADBALANCER) # Now change the compute port admin_state_up from True to # False, and see if the vrrp ports device_owner and binding # inheritence reverts back to normal mod_int_port = self.core_plugin.update_port( self.context, cur_int_port['id'], {'port': { 'admin_state_up': False}}) self.assertFalse(mod_int_port['admin_state_up']) new_vrrp_port_db = self.core_plugin.get_port( self.context, cur_vrrp_port_db['id']) new_port_profile = new_vrrp_port_db.get(portbindings.PROFILE) self.assertEqual({}, new_port_profile) self.assertNotEqual( new_vrrp_port_db[portbindings.HOST_ID], HOST1) # Now change the compute port admin_state_up from False to # True, and see if the vrrp ports device_owner and binding # inherits from the associated parent compute port. new_mod_int_port = self.core_plugin.update_port( self.context, mod_int_port['id'], {'port': { 'admin_state_up': True}}) self.assertTrue(new_mod_int_port['admin_state_up']) cur_new_vrrp_port_db = self.core_plugin.get_port( self.context, new_vrrp_port_db['id']) self.assertNotEqual( cur_new_vrrp_port_db['device_owner'], DEVICE_OWNER_COMPUTE) self.assertEqual( cur_new_vrrp_port_db[portbindings.HOST_ID], HOST1) # Now let us try to remove vrrp_port device_owner and see # how it inherits from the compute port. updated_vrrp_port = self.core_plugin.update_port( self.context, cur_new_vrrp_port_db['id'], {'port': {'device_owner': "", portbindings.PROFILE: {'original_owner': ""}}}) updated_vm_port = self.core_plugin.update_port( self.context, new_mod_int_port['id'], {'port': { 'admin_state_up': False}}) self.assertFalse(updated_vm_port['admin_state_up']) # This port admin_state down should not cause any issue # with the existing vrrp port device_owner, but should # only change the port_binding HOST_ID. cur_new_vrrp_port_db = self.core_plugin.get_port( self.context, updated_vrrp_port['id']) self.assertEqual( "", cur_new_vrrp_port_db['device_owner']) self.assertEqual( "", cur_new_vrrp_port_db[portbindings.HOST_ID]) updated_vm_port = self.core_plugin.update_port( self.context, new_mod_int_port['id'], {'port': { 'admin_state_up': True}}) self.assertTrue(updated_vm_port['admin_state_up']) updated_vrrp_port_db = self.core_plugin.get_port( self.context, new_vrrp_port_db['id']) self.assertEqual( updated_vrrp_port_db['device_owner'], DEVICE_OWNER_COMPUTE) self.assertEqual( updated_vrrp_port_db[portbindings.HOST_ID], HOST1) def test_update_vm_port_host_router_update(self): # register l3 agents in dvr mode in addition to existing dvr_snat agent HOST1 = 'host1' helpers.register_l3_agent( host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR) HOST2 = 'host2' helpers.register_l3_agent( host=HOST2, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() with self.subnet() as subnet: self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) with mock.patch.object(self.l3_plugin, '_l3_rpc_notifier') as l3_notifier,\ self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE) as port: self.l3_plugin.agent_notifiers[ constants.AGENT_TYPE_L3] = l3_notifier self.core_plugin.update_port( self.context, port['port']['id'], {'port': {portbindings.HOST_ID: HOST1}}) l3_notifier.routers_updated_on_host.assert_called_once_with( self.context, {router['id']}, HOST1) self.assertFalse(l3_notifier.routers_updated.called) # updating port's host (instance migration) l3_notifier.reset_mock() self.core_plugin.update_port( self.context, port['port']['id'], {'port': {portbindings.HOST_ID: HOST2}}) l3_notifier.routers_updated_on_host.assert_called_once_with( self.context, {router['id']}, HOST2) l3_notifier.router_removed_from_agent.assert_called_once_with( mock.ANY, router['id'], HOST1) def test_dvr_router_manual_rescheduling_removes_router(self): router = self._create_router() kwargs = {'arg_list': (external_net.EXTERNAL,), external_net.EXTERNAL: True} with self.network(**kwargs) as ext_net,\ self.subnet(network=ext_net),\ self.subnet(cidr='20.0.0.0/24') as subnet,\ self.port(subnet=subnet): self.l3_plugin._update_router_gw_info( self.context, router['id'], {'network_id': ext_net['network']['id']}) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) self.l3_plugin.schedule_router(self.context, router['id'], candidates=[self.l3_agent]) # Now the VM should be also scheduled on the node notifier = self.l3_plugin.agent_notifiers[ constants.AGENT_TYPE_L3] with mock.patch.object( notifier, 'router_removed_from_agent') as rtr_remove_mock: self.l3_plugin.remove_router_from_l3_agent( self.context, self.l3_agent['id'], router['id']) rtr_remove_mock.assert_called_once_with( self.context, router['id'], self.l3_agent['host']) def test_dvr_router_manual_rescheduling_updates_router(self): router = self._create_router() kwargs = {'arg_list': (external_net.EXTERNAL,), external_net.EXTERNAL: True} with self.network(**kwargs) as ext_net,\ self.subnet(network=ext_net),\ self.subnet(cidr='20.0.0.0/24') as subnet,\ self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE) as port: self.core_plugin.update_port( self.context, port['port']['id'], {'port': {'binding:host_id': self.l3_agent['host']}}) self.l3_plugin._update_router_gw_info( self.context, router['id'], {'network_id': ext_net['network']['id']}) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) self.l3_plugin.schedule_router(self.context, router['id'], candidates=[self.l3_agent]) # Now the VM should be also scheduled on the node notifier = self.l3_plugin.agent_notifiers[ constants.AGENT_TYPE_L3] with mock.patch.object( notifier, 'routers_updated_on_host') as rtr_update_mock: self.l3_plugin.remove_router_from_l3_agent( self.context, self.l3_agent['id'], router['id']) rtr_update_mock.assert_called_once_with( self.context, [router['id']], self.l3_agent['host']) def _test_router_remove_from_agent_on_vm_port_deletion( self, non_admin_port=False): # register l3 agent in dvr mode in addition to existing dvr_snat agent HOST = 'host1' non_admin_tenant = 'tenant1' helpers.register_l3_agent( host=HOST, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() with self.network(shared=True) as net,\ self.subnet(network=net) as subnet,\ self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, tenant_id=non_admin_tenant, set_context=non_admin_port) as port: self.core_plugin.update_port( self.context, port['port']['id'], {'port': {portbindings.HOST_ID: HOST}}) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) with mock.patch.object(self.l3_plugin.l3_rpc_notifier, 'router_removed_from_agent') as remove_mock: ctx = context.Context( '', non_admin_tenant) if non_admin_port else self.context self._delete('ports', port['port']['id'], neutron_context=ctx) remove_mock.assert_called_once_with( mock.ANY, router['id'], HOST) def test_router_remove_from_agent_on_vm_port_deletion(self): self._test_router_remove_from_agent_on_vm_port_deletion() def test_admin_router_remove_from_agent_on_vm_port_deletion(self): self._test_router_remove_from_agent_on_vm_port_deletion( non_admin_port=True) def test_dvr_router_notifications_for_live_migration_with_fip(self): self._dvr_router_notifications_for_live_migration( with_floatingip=True) def test_dvr_router_notifications_for_live_migration_without_fip(self): self._dvr_router_notifications_for_live_migration() def _dvr_router_notifications_for_live_migration( self, with_floatingip=False): """Check the router notifications go to the right hosts with live migration without hostbinding on the port. """ # register l3 agents in dvr mode in addition to existing dvr_snat agent HOST1, HOST2 = 'host1', 'host2' for host in [HOST1, HOST2]: helpers.register_l3_agent( host=host, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() arg_list = (portbindings.HOST_ID,) with self.subnet() as ext_subnet,\ self.subnet(cidr='20.0.0.0/24') as subnet1,\ self.port(subnet=subnet1, device_owner=DEVICE_OWNER_COMPUTE, arg_list=arg_list, **{portbindings.HOST_ID: HOST1}) as vm_port: # make net external ext_net_id = ext_subnet['subnet']['network_id'] self._update('networks', ext_net_id, {'network': {external_net.EXTERNAL: True}}) # add external gateway to router self.l3_plugin.update_router( self.context, router['id'], {'router': { 'external_gateway_info': {'network_id': ext_net_id}}}) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet1['subnet']['id']}) if with_floatingip: floating_ip = {'floating_network_id': ext_net_id, 'router_id': router['id'], 'port_id': vm_port['port']['id'], 'tenant_id': vm_port['port']['tenant_id'], 'dns_name': '', 'dns_domain': ''} floating_ip = self.l3_plugin.create_floatingip( self.context, {'floatingip': floating_ip}) with mock.patch.object(self.l3_plugin, '_l3_rpc_notifier') as l3_notifier,\ mock.patch.object( self.l3_plugin, 'create_fip_agent_gw_port_if_not_exists' ) as fip_agent: live_migration_port_profile = { 'migrating_to': HOST2 } # Update the VM Port with Migration porbinding Profile. # With this change, it should trigger a notification to # the Destination host to create a Router ahead of time # before the VM Port binding has changed to HOST2. updated_port = self.core_plugin.update_port( self.context, vm_port['port']['id'], {'port': { portbindings.PROFILE: live_migration_port_profile}}) l3_notifier.routers_updated_on_host.assert_called_once_with( self.context, {router['id']}, HOST2) # Check the port-binding is still with the old HOST1, but # the router update notification has been sent to the new # host 'HOST2' based on the live migration profile change. self.assertEqual(updated_port[portbindings.HOST_ID], HOST1) self.assertNotEqual(updated_port[portbindings.HOST_ID], HOST2) if with_floatingip: fip_agent.return_value = True # Since we have already created the floatingip for the # port, it should be creating the floatingip agent gw # port for the new host if it does not exist. fip_agent.assert_called_once_with( mock.ANY, floating_ip['floating_network_id'], HOST2) def test_router_notifications(self): """Check that notifications go to the right hosts in different conditions """ # register l3 agents in dvr mode in addition to existing dvr_snat agent HOST1, HOST2, HOST3 = 'host1', 'host2', 'host3' for host in [HOST1, HOST2, HOST3]: helpers.register_l3_agent( host=host, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() arg_list = (portbindings.HOST_ID,) with self.subnet() as ext_subnet,\ self.subnet(cidr='20.0.0.0/24') as subnet1,\ self.subnet(cidr='30.0.0.0/24') as subnet2,\ self.subnet(cidr='40.0.0.0/24') as subnet3,\ self.port(subnet=subnet1, device_owner=DEVICE_OWNER_COMPUTE, arg_list=arg_list, **{portbindings.HOST_ID: HOST1}),\ self.port(subnet=subnet2, device_owner=constants.DEVICE_OWNER_DHCP, arg_list=arg_list, **{portbindings.HOST_ID: HOST2}),\ self.port(subnet=subnet3, device_owner=constants.DEVICE_OWNER_NEUTRON_PREFIX, arg_list=arg_list, **{portbindings.HOST_ID: HOST3}): # make net external ext_net_id = ext_subnet['subnet']['network_id'] self._update('networks', ext_net_id, {'network': {external_net.EXTERNAL: True}}) with mock.patch.object(self.l3_plugin.l3_rpc_notifier.client, 'prepare') as mock_prepare: # add external gateway to router self.l3_plugin.update_router( self.context, router['id'], {'router': { 'external_gateway_info': {'network_id': ext_net_id}}}) # router has no interfaces so notification goes # to only dvr_snat agent mock_prepare.assert_called_once_with( server=self.l3_agent['host'], topic=topics.L3_AGENT, version='1.1') mock_prepare.reset_mock() self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet1['subnet']['id']}) self.assertEqual(2, mock_prepare.call_count) expected = [mock.call(server=self.l3_agent['host'], topic=topics.L3_AGENT, version='1.1'), mock.call(server=HOST1, topic=topics.L3_AGENT, version='1.1')] mock_prepare.assert_has_calls(expected, any_order=True) mock_prepare.reset_mock() self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet2['subnet']['id']}) self.assertEqual(3, mock_prepare.call_count) expected = [mock.call(server=self.l3_agent['host'], topic=topics.L3_AGENT, version='1.1'), mock.call(server=HOST1, topic=topics.L3_AGENT, version='1.1'), mock.call(server=HOST2, topic=topics.L3_AGENT, version='1.1')] mock_prepare.assert_has_calls(expected, any_order=True) mock_prepare.reset_mock() self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet3['subnet']['id']}) # there are no dvr serviceable ports on HOST3, so notification # goes to the same hosts self.assertEqual(3, mock_prepare.call_count) expected = [mock.call(server=self.l3_agent['host'], topic=topics.L3_AGENT, version='1.1'), mock.call(server=HOST1, topic=topics.L3_AGENT, version='1.1'), mock.call(server=HOST2, topic=topics.L3_AGENT, version='1.1')] mock_prepare.assert_has_calls(expected, any_order=True) def test_router_is_not_removed_from_snat_agent_on_interface_removal(self): """Check that dvr router is not removed from l3 agent hosting SNAT for it on router interface removal """ router = self._create_router() kwargs = {'arg_list': (external_net.EXTERNAL,), external_net.EXTERNAL: True} with self.subnet() as subnet,\ self.network(**kwargs) as ext_net,\ self.subnet(network=ext_net, cidr='20.0.0.0/24'): self.l3_plugin._update_router_gw_info( self.context, router['id'], {'network_id': ext_net['network']['id']}) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) agents = self.l3_plugin.list_l3_agents_hosting_router( self.context, router['id']) self.assertEqual(1, len(agents['agents'])) with mock.patch.object(self.l3_plugin, '_l3_rpc_notifier') as l3_notifier: self.l3_plugin.remove_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) agents = self.l3_plugin.list_l3_agents_hosting_router( self.context, router['id']) self.assertEqual(1, len(agents['agents'])) self.assertFalse(l3_notifier.router_removed_from_agent.called) def test_router_is_not_removed_from_snat_agent_on_dhcp_port_deletion(self): """Check that dvr router is not removed from l3 agent hosting SNAT for it on DHCP port removal """ router = self._create_router() kwargs = {'arg_list': (external_net.EXTERNAL,), external_net.EXTERNAL: True} with self.network(**kwargs) as ext_net,\ self.subnet(network=ext_net),\ self.subnet(cidr='20.0.0.0/24') as subnet,\ self.port(subnet=subnet, device_owner=constants.DEVICE_OWNER_DHCP) as port: self.core_plugin.update_port( self.context, port['port']['id'], {'port': {'binding:host_id': self.l3_agent['host']}}) self.l3_plugin._update_router_gw_info( self.context, router['id'], {'network_id': ext_net['network']['id']}) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) # router should be scheduled to the dvr_snat l3 agent agents = self.l3_plugin.list_l3_agents_hosting_router( self.context, router['id']) self.assertEqual(1, len(agents['agents'])) self.assertEqual(self.l3_agent['id'], agents['agents'][0]['id']) notifier = self.l3_plugin.agent_notifiers[ constants.AGENT_TYPE_L3] with mock.patch.object( notifier, 'router_removed_from_agent') as remove_mock: self._delete('ports', port['port']['id']) # now when port is deleted the router still has external # gateway and should still be scheduled to the snat agent agents = self.l3_plugin.list_l3_agents_hosting_router( self.context, router['id']) self.assertEqual(1, len(agents['agents'])) self.assertEqual(self.l3_agent['id'], agents['agents'][0]['id']) self.assertFalse(remove_mock.called) def test__get_dvr_subnet_ids_on_host_query(self): with self.subnet(cidr='20.0.0.0/24') as subnet1,\ self.subnet(cidr='30.0.0.0/24') as subnet2,\ self.subnet(cidr='40.0.0.0/24') as subnet3,\ self.port(subnet=subnet1, device_owner=DEVICE_OWNER_COMPUTE) as p1,\ self.port(subnet=subnet2, device_owner=constants.DEVICE_OWNER_DHCP) as p2,\ self.port(subnet=subnet3, device_owner=constants.DEVICE_OWNER_NEUTRON_PREFIX)\ as p3,\ self.port(subnet=subnet3, device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX)\ as p4: host = 'host1' subnet_ids = [item[0] for item in self.l3_plugin._get_dvr_subnet_ids_on_host_query( self.context, host)] self.assertEqual([], subnet_ids) self.core_plugin.update_port( self.context, p1['port']['id'], {'port': {portbindings.HOST_ID: host}}) expected = {subnet1['subnet']['id']} subnet_ids = [item[0] for item in self.l3_plugin._get_dvr_subnet_ids_on_host_query( self.context, host)] self.assertEqual(expected, set(subnet_ids)) self.core_plugin.update_port( self.context, p2['port']['id'], {'port': {portbindings.HOST_ID: host}}) expected.add(subnet2['subnet']['id']) subnet_ids = [item[0] for item in self.l3_plugin._get_dvr_subnet_ids_on_host_query( self.context, host)] self.assertEqual(expected, set(subnet_ids)) self.core_plugin.update_port( self.context, p3['port']['id'], {'port': {portbindings.HOST_ID: host}}) # p3 is non dvr serviceable so no subnet3 expected subnet_ids = [item[0] for item in self.l3_plugin._get_dvr_subnet_ids_on_host_query( self.context, host)] self.assertEqual(expected, set(subnet_ids)) other_host = 'other' + host self.core_plugin.update_port( self.context, p4['port']['id'], {'port': {portbindings.HOST_ID: other_host}}) # p4 is on other host so no subnet3 expected subnet_ids = [item[0] for item in self.l3_plugin._get_dvr_subnet_ids_on_host_query( self.context, host)] self.assertEqual(expected, set(subnet_ids)) self.core_plugin.update_port( self.context, p4['port']['id'], {'port': {portbindings.HOST_ID: host}}) # finally p4 is on the right host so subnet3 is expected expected.add(subnet3['subnet']['id']) subnet_ids = [item[0] for item in self.l3_plugin._get_dvr_subnet_ids_on_host_query( self.context, host)] self.assertEqual(expected, set(subnet_ids)) def test__get_dvr_router_ids_for_host(self): router1 = self._create_router() router2 = self._create_router() host = 'host1' arg_list = (portbindings.HOST_ID,) with self.subnet(cidr='20.0.0.0/24') as subnet1,\ self.subnet(cidr='30.0.0.0/24') as subnet2,\ self.port(subnet=subnet1, device_owner=DEVICE_OWNER_COMPUTE, arg_list=arg_list, **{portbindings.HOST_ID: host}),\ self.port(subnet=subnet2, device_owner=constants.DEVICE_OWNER_DHCP, arg_list=arg_list, **{portbindings.HOST_ID: host}): router_ids = self.l3_plugin._get_dvr_router_ids_for_host( self.context, host) self.assertEqual([], router_ids) self.l3_plugin.add_router_interface( self.context, router1['id'], {'subnet_id': subnet1['subnet']['id']}) router_ids = self.l3_plugin._get_dvr_router_ids_for_host( self.context, host) expected = {router1['id']} self.assertEqual(expected, set(router_ids)) self.l3_plugin.add_router_interface( self.context, router2['id'], {'subnet_id': subnet2['subnet']['id']}) router_ids = self.l3_plugin._get_dvr_router_ids_for_host( self.context, host) expected.add(router2['id']) self.assertEqual(expected, set(router_ids)) def test__get_router_ids_for_agent(self): router1 = self._create_router() router2 = self._create_router() router3 = self._create_router() arg_list = (portbindings.HOST_ID,) host = self.l3_agent['host'] with self.subnet() as ext_subnet,\ self.subnet(cidr='20.0.0.0/24') as subnet1,\ self.subnet(cidr='30.0.0.0/24') as subnet2,\ self.port(subnet=subnet1, device_owner=DEVICE_OWNER_COMPUTE, arg_list=arg_list, **{portbindings.HOST_ID: host}),\ self.port(subnet=subnet2, device_owner=constants.DEVICE_OWNER_DHCP, arg_list=arg_list, **{portbindings.HOST_ID: host}): ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, []) self.assertEqual([], ids) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, [router1['id'], router2['id']]) self.assertEqual([], ids) self.l3_plugin.add_router_interface( self.context, router1['id'], {'subnet_id': subnet1['subnet']['id']}) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, []) self.assertEqual([router1['id']], ids) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, [router1['id']]) self.assertEqual([router1['id']], ids) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, [router1['id'], router2['id']]) self.assertEqual([router1['id']], ids) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, [router2['id']]) self.assertEqual([], ids) self.l3_plugin.add_router_interface( self.context, router2['id'], {'subnet_id': subnet2['subnet']['id']}) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, []) self.assertEqual({router1['id'], router2['id']}, set(ids)) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, [router1['id']]) self.assertEqual([router1['id']], ids) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, [router1['id'], router2['id']]) self.assertEqual({router1['id'], router2['id']}, set(ids)) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, [router2['id']]) self.assertEqual([router2['id']], ids) # make net external ext_net_id = ext_subnet['subnet']['network_id'] self._update('networks', ext_net_id, {'network': {external_net.EXTERNAL: True}}) # add external gateway to router self.l3_plugin.update_router( self.context, router3['id'], {'router': { 'external_gateway_info': {'network_id': ext_net_id}}}) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, []) self.assertEqual({router1['id'], router2['id'], router3['id']}, set(ids)) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, [router3['id']]) self.assertEqual([router3['id']], ids) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, [router1['id'], router3['id']]) self.assertEqual({router1['id'], router3['id']}, set(ids)) def test_remove_router_interface(self): HOST1 = 'host1' helpers.register_l3_agent( host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() arg_list = (portbindings.HOST_ID,) with self.subnet() as subnet,\ self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=arg_list, **{portbindings.HOST_ID: HOST1}): l3_notifier = mock.Mock() self.l3_plugin.l3_rpc_notifier = l3_notifier self.l3_plugin.agent_notifiers[ constants.AGENT_TYPE_L3] = l3_notifier self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) self.l3_plugin.schedule_router(self.context, router['id']) self.l3_plugin.remove_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) l3_notifier.router_removed_from_agent.assert_called_once_with( self.context, router['id'], HOST1) def test_router_auto_scheduling(self): router = self._create_router() agents = self.l3_plugin.list_l3_agents_hosting_router( self.context, router['id']) # router is not scheduled yet self.assertEqual([], agents['agents']) l3_rpc_handler = l3_rpc.L3RpcCallback() # router should be auto scheduled once l3 agent requests router ids l3_rpc_handler.get_router_ids(self.context, self.l3_agent['host']) agents = self.l3_plugin.list_l3_agents_hosting_router( self.context, router['id']) self.assertEqual(1, len(agents['agents'])) self.assertEqual(self.l3_agent['id'], agents['agents'][0]['id']) neutron-8.4.0/neutron/tests/functional/services/bgp/0000775000567000056710000000000013044373210023744 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/services/bgp/scheduler/0000775000567000056710000000000013044373210025722 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/services/bgp/scheduler/__init__.py0000664000567000056710000000000013044372760030032 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/services/bgp/scheduler/test_bgp_dragent_scheduler.py0000664000567000056710000002012113044372760033652 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testscenarios from neutron import context from neutron.db import agents_db from neutron.db import bgp_db from neutron.db import bgp_dragentscheduler_db as bgp_dras_db from neutron.db import common_db_mixin from neutron.services.bgp.scheduler import bgp_dragent_scheduler as bgp_dras from neutron.tests.common import helpers from neutron.tests.unit import testlib_api # Required to generate tests from scenarios. Not compatible with nose. load_tests = testscenarios.load_tests_apply_scenarios class TestAutoSchedule(testlib_api.SqlTestCase, bgp_dras_db.BgpDrAgentSchedulerDbMixin, agents_db.AgentDbMixin, common_db_mixin.CommonDbMixin): """Test various scenarios for schedule_unscheduled_bgp_speakers. Below is the brief description of the scenario variables -------------------------------------------------------- host_count number of hosts. agent_count number of BGP dynamic routing agents. down_agent_count number of DRAgents which are inactive. bgp_speaker_count Number of bgp_speakers. hosted_bgp_speakers A mapping of agent id to the ids of the bgp_speakers that they should be initially hosting. expected_schedule_return_value Expected return value of 'schedule_unscheduled_bgp_speakers'. expected_hosted_bgp_speakers This stores the expected bgp_speakers that should have been scheduled (or that could have already been scheduled) for each agent after the 'schedule_unscheduled_bgp_speakers' function is called. """ scenarios = [ ('No BgpDrAgent scheduled, if no DRAgent is present', dict(host_count=1, agent_count=0, down_agent_count=0, bgp_speaker_count=1, hosted_bgp_speakers={}, expected_schedule_return_value=False)), ('No BgpDrAgent scheduled, if no BGP speaker are present', dict(host_count=1, agent_count=1, down_agent_count=0, bgp_speaker_count=0, hosted_bgp_speakers={}, expected_schedule_return_value=False, expected_hosted_bgp_speakers={'agent-0': []})), ('No BgpDrAgent scheduled, if BGP speaker already hosted', dict(host_count=1, agent_count=1, down_agent_count=0, bgp_speaker_count=1, hosted_bgp_speakers={'agent-0': ['bgp-speaker-0']}, expected_schedule_return_value=False, expected_hosted_bgp_speakers={'agent-0': ['bgp-speaker-0']})), ('BgpDrAgent scheduled to the speaker, if the speaker is not hosted', dict(host_count=1, agent_count=1, down_agent_count=0, bgp_speaker_count=1, hosted_bgp_speakers={}, expected_schedule_return_value=True, expected_hosted_bgp_speakers={'agent-0': ['bgp-speaker-0']})), ('No BgpDrAgent scheduled, if all the agents are down', dict(host_count=2, agent_count=2, down_agent_count=2, bgp_speaker_count=1, hosted_bgp_speakers={}, expected_schedule_return_value=False, expected_hosted_bgp_speakers={'agent-0': [], 'agent-1': [], })), ] def _strip_host_index(self, name): """Strips the host index. Eg. if name = '2-agent-3', then 'agent-3' is returned. """ return name[name.find('-') + 1:] def _extract_index(self, name): """Extracts the index number and returns. Eg. if name = '2-agent-3', then 3 is returned """ return int(name.split('-')[-1]) def _get_hosted_bgp_speakers_on_dragent(self, agent_id): query = self.ctx.session.query( bgp_dras_db.BgpSpeakerDrAgentBinding.bgp_speaker_id) query = query.filter( bgp_dras_db.BgpSpeakerDrAgentBinding.agent_id == agent_id) return [item[0] for item in query] def _create_and_set_agents_down(self, hosts, agent_count=0, down_agent_count=0, admin_state_up=True): agents = [] if agent_count: for i, host in enumerate(hosts): is_alive = i >= down_agent_count agents.append(helpers.register_bgp_dragent( host, admin_state_up=admin_state_up, alive=is_alive)) return agents def _save_bgp_speakers(self, bgp_speakers): cls = bgp_db.BgpDbMixin() bgp_speaker_body = { 'bgp_speaker': {'name': 'fake_bgp_speaker', 'ip_version': '4', 'local_as': '123', 'advertise_floating_ip_host_routes': '0', 'advertise_tenant_networks': '0', 'peers': [], 'networks': []}} i = 1 for bgp_speaker_id in bgp_speakers: bgp_speaker_body['bgp_speaker']['local_as'] = i cls._save_bgp_speaker(self.ctx, bgp_speaker_body, uuid=bgp_speaker_id) i = i + 1 def _test_auto_schedule(self, host_index): scheduler = bgp_dras.ChanceScheduler() self.ctx = context.get_admin_context() msg = 'host_index = %s' % host_index # create hosts hosts = ['%s-agent-%s' % (host_index, i) for i in range(self.host_count)] bgp_dragents = self._create_and_set_agents_down(hosts, self.agent_count, self.down_agent_count) # create bgp_speakers self._bgp_speakers = ['%s-bgp-speaker-%s' % (host_index, i) for i in range(self.bgp_speaker_count)] self._save_bgp_speakers(self._bgp_speakers) # pre schedule the bgp_speakers to the agents defined in # self.hosted_bgp_speakers before calling auto_schedule_bgp_speaker for agent, bgp_speakers in self.hosted_bgp_speakers.items(): agent_index = self._extract_index(agent) for bgp_speaker in bgp_speakers: bs_index = self._extract_index(bgp_speaker) scheduler.bind(self.ctx, [bgp_dragents[agent_index]], self._bgp_speakers[bs_index]) retval = scheduler.schedule_unscheduled_bgp_speakers(self.ctx, hosts[host_index]) self.assertEqual(self.expected_schedule_return_value, retval, message=msg) if self.agent_count: agent_id = bgp_dragents[host_index].id hosted_bgp_speakers = self._get_hosted_bgp_speakers_on_dragent( agent_id) hosted_bs_ids = [self._strip_host_index(net) for net in hosted_bgp_speakers] expected_hosted_bgp_speakers = self.expected_hosted_bgp_speakers[ 'agent-%s' % host_index] self.assertItemsEqual(hosted_bs_ids, expected_hosted_bgp_speakers, msg) def test_auto_schedule(self): for i in range(self.host_count): self._test_auto_schedule(i) neutron-8.4.0/neutron/tests/functional/services/bgp/__init__.py0000664000567000056710000000000013044372760026054 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/__init__.py0000664000567000056710000000217313044372760023476 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ In order to save gate resources, test paths that have similar environmental requirements to the functional path are marked for discovery. """ import os.path def load_tests(loader, tests, pattern): this_dir = os.path.dirname(__file__) parent_dir = os.path.dirname(this_dir) target_dirs = [ this_dir, os.path.join(parent_dir, 'retargetable'), ] for start_dir in target_dirs: new_tests = loader.discover(start_dir=start_dir, pattern=pattern) tests.addTests(new_tests) return tests neutron-8.4.0/neutron/tests/functional/common/0000775000567000056710000000000013044373210022641 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/common/__init__.py0000664000567000056710000000000013044372736024754 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/common/test_utils.py0000664000567000056710000000354513044372760025432 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path import stat from neutron.common import utils from neutron.tests import base class TestReplaceFile(base.BaseTestCase): def setUp(self): super(TestReplaceFile, self).setUp() temp_dir = self.get_default_temp_dir().path self.file_name = os.path.join(temp_dir, "new_file") self.data = "data to copy" def _verify_result(self, file_mode): self.assertTrue(os.path.exists(self.file_name)) with open(self.file_name) as f: content = f.read() self.assertEqual(self.data, content) mode = os.stat(self.file_name).st_mode self.assertEqual(file_mode, stat.S_IMODE(mode)) def test_replace_file_default_mode(self): file_mode = 0o644 utils.replace_file(self.file_name, self.data) self._verify_result(file_mode) def test_replace_file_custom_mode(self): file_mode = 0o722 utils.replace_file(self.file_name, self.data, file_mode) self._verify_result(file_mode) def test_replace_file_custom_mode_twice(self): file_mode = 0o722 utils.replace_file(self.file_name, self.data, file_mode) self.data = "new data to copy" file_mode = 0o777 utils.replace_file(self.file_name, self.data, file_mode) self._verify_result(file_mode) neutron-8.4.0/neutron/tests/functional/pecan_wsgi/0000775000567000056710000000000013044373210023470 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/pecan_wsgi/config.py0000664000567000056710000000165113044372736025326 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # use main app settings except for the port number so testing doesn't need to # listen on the main neutron port app = { 'root': 'neutron.pecan_wsgi.controllers.root.RootController', 'modules': ['neutron.pecan_wsgi'], 'errors': { 400: '/error', '__force_dict__': True } } neutron-8.4.0/neutron/tests/functional/pecan_wsgi/test_controllers.py0000664000567000056710000006013213044372760027462 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import namedtuple import mock from oslo_config import cfg from oslo_policy import policy as oslo_policy from oslo_serialization import jsonutils import pecan from pecan import request from neutron.api import extensions from neutron.api.v2 import attributes from neutron.common import constants as n_const from neutron import context from neutron import manager from neutron.pecan_wsgi.controllers import root as controllers from neutron.plugins.common import constants from neutron import policy from neutron.tests.common import helpers from neutron.tests.functional.pecan_wsgi import test_functional from neutron.tests.functional.pecan_wsgi import utils as pecan_utils _SERVICE_PLUGIN_RESOURCE = 'serviceplugin' _SERVICE_PLUGIN_COLLECTION = _SERVICE_PLUGIN_RESOURCE + 's' _SERVICE_PLUGIN_INDEX_BODY = {_SERVICE_PLUGIN_COLLECTION: []} class FakeServicePluginController(object): resource = _SERVICE_PLUGIN_RESOURCE collection = _SERVICE_PLUGIN_COLLECTION @pecan.expose(generic=True, content_type='application/json', template='json') def index(self): return _SERVICE_PLUGIN_INDEX_BODY class TestRootController(test_functional.PecanFunctionalTest): """Test version listing on root URI.""" base_url = '/' def setUp(self): super(TestRootController, self).setUp() self.setup_service_plugin() self.plugin = manager.NeutronManager.get_plugin() self.ctx = context.get_admin_context() def setup_service_plugin(self): manager.NeutronManager.set_controller_for_resource( _SERVICE_PLUGIN_COLLECTION, FakeServicePluginController()) def _test_method_returns_code(self, method, code=200): api_method = getattr(self.app, method) response = api_method(self.base_url, expect_errors=True) self.assertEqual(response.status_int, code) def test_get(self): response = self.app.get(self.base_url) self.assertEqual(response.status_int, 200) json_body = jsonutils.loads(response.body) versions = json_body.get('versions') self.assertEqual(1, len(versions)) for (attr, value) in controllers.V2Controller.version_info.items(): self.assertIn(attr, versions[0]) self.assertEqual(value, versions[0][attr]) def test_methods(self): self._test_method_returns_code('post') self._test_method_returns_code('patch') self._test_method_returns_code('delete') self._test_method_returns_code('head') self._test_method_returns_code('put') class TestV2Controller(TestRootController): base_url = '/v2.0' def test_get(self): """Verify current version info are returned.""" response = self.app.get(self.base_url) self.assertEqual(response.status_int, 200) json_body = jsonutils.loads(response.body) self.assertEqual('v2.0', json_body['version']['id']) self.assertEqual('CURRENT', json_body['version']['status']) def test_routing_successs(self): """Test dispatch to controller for existing resource.""" response = self.app.get('%s/ports.json' % self.base_url) self.assertEqual(response.status_int, 200) def test_routing_failure(self): """Test dispatch to controller for non-existing resource.""" response = self.app.get('%s/idonotexist.json' % self.base_url, expect_errors=True) self.assertEqual(response.status_int, 404) def test_methods(self): self._test_method_returns_code('post', 405) self._test_method_returns_code('put', 405) self._test_method_returns_code('patch', 405) self._test_method_returns_code('delete', 405) self._test_method_returns_code('head', 405) self._test_method_returns_code('delete', 405) class TestExtensionsController(TestRootController): """Test extension listing and detail reporting.""" base_url = '/v2.0/extensions' def _get_supported_extensions(self): ext_mgr = extensions.PluginAwareExtensionManager.get_instance() return ext_mgr.get_supported_extension_aliases() def test_index(self): response = self.app.get(self.base_url) self.assertEqual(response.status_int, 200) json_body = jsonutils.loads(response.body) returned_aliases = [ext['alias'] for ext in json_body['extensions']] supported_extensions = self._get_supported_extensions() self.assertEqual(supported_extensions, set(returned_aliases)) def test_get(self): # Fetch any extension supported by plugins test_alias = self._get_supported_extensions().pop() response = self.app.get('%s/%s' % (self.base_url, test_alias)) self.assertEqual(response.status_int, 200) json_body = jsonutils.loads(response.body) self.assertEqual(test_alias, json_body['extension']['alias']) def test_methods(self): self._test_method_returns_code('post', 405) self._test_method_returns_code('put', 405) self._test_method_returns_code('patch', 405) self._test_method_returns_code('delete', 405) self._test_method_returns_code('head', 405) self._test_method_returns_code('delete', 405) class TestQuotasController(test_functional.PecanFunctionalTest): """Test quota management API controller.""" base_url = '/v2.0/quotas' default_expected_limits = { 'network': 10, 'port': 50, 'subnet': 10} def _verify_limits(self, response, limits): for resource, limit in limits.items(): self.assertEqual(limit, response['quota'][resource]) def _verify_default_limits(self, response): self._verify_limits(response, self.default_expected_limits) def _verify_after_update(self, response, updated_limits): expected_limits = self.default_expected_limits.copy() expected_limits.update(updated_limits) self._verify_limits(response, expected_limits) def test_index_admin(self): # NOTE(salv-orlando): The quota controller has an hardcoded check for # admin-ness for this operation, which is supposed to return quotas for # all tenants. Such check is "vestigial" from the home-grown WSGI and # shall be removed response = self.app.get('%s.json' % self.base_url, headers={'X-Project-Id': 'admin', 'X-Roles': 'admin'}) self.assertEqual(200, response.status_int) def test_index(self): response = self.app.get('%s.json' % self.base_url, expect_errors=True) self.assertEqual(403, response.status_int) def test_get_admin(self): response = self.app.get('%s/foo.json' % self.base_url, headers={'X-Project-Id': 'admin', 'X-Roles': 'admin'}) self.assertEqual(200, response.status_int) # As quota limits have not been updated, expect default values json_body = jsonutils.loads(response.body) self._verify_default_limits(json_body) def test_get(self): # It is not ok to access another tenant's limits url = '%s/foo.json' % self.base_url response = self.app.get(url, expect_errors=True) self.assertEqual(403, response.status_int) # It is however ok to retrieve your own limits response = self.app.get(url, headers={'X-Project-Id': 'foo'}) self.assertEqual(200, response.status_int) json_body = jsonutils.loads(response.body) self._verify_default_limits(json_body) def test_put_get_delete(self): # PUT and DELETE actions are in the same test as a meaningful DELETE # test would require a put anyway url = '%s/foo.json' % self.base_url response = self.app.put_json(url, params={'quota': {'network': 99}}, headers={'X-Project-Id': 'admin', 'X-Roles': 'admin'}) self.assertEqual(200, response.status_int) json_body = jsonutils.loads(response.body) self._verify_after_update(json_body, {'network': 99}) response = self.app.get(url, headers={'X-Project-Id': 'foo'}) self.assertEqual(200, response.status_int) json_body = jsonutils.loads(response.body) self._verify_after_update(json_body, {'network': 99}) response = self.app.delete(url, headers={'X-Project-Id': 'admin', 'X-Roles': 'admin'}) self.assertEqual(204, response.status_int) # As DELETE does not return a body we need another GET response = self.app.get(url, headers={'X-Project-Id': 'foo'}) self.assertEqual(200, response.status_int) json_body = jsonutils.loads(response.body) self._verify_default_limits(json_body) class TestResourceController(TestRootController): """Test generic controller""" # TODO(salv-orlando): This test case must not explicitly test the 'port' # resource. Also it should validate correct plugin/resource association base_url = '/v2.0' def setUp(self): super(TestResourceController, self).setUp() self._gen_port() def _gen_port(self): network_id = self.plugin.create_network(context.get_admin_context(), { 'network': {'name': 'pecannet', 'tenant_id': 'tenid', 'shared': False, 'admin_state_up': True, 'status': 'ACTIVE'}})['id'] self.port = self.plugin.create_port(context.get_admin_context(), { 'port': {'tenant_id': 'tenid', 'network_id': network_id, 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, 'mac_address': '00:11:22:33:44:55', 'admin_state_up': True, 'device_id': 'FF', 'device_owner': 'pecan', 'name': 'pecan'}}) def test_get(self): response = self.app.get('/v2.0/ports.json') self.assertEqual(response.status_int, 200) def test_post(self): response = self.app.post_json( '/v2.0/ports.json', params={'port': {'network_id': self.port['network_id'], 'admin_state_up': True, 'tenant_id': 'tenid'}}, headers={'X-Project-Id': 'tenid'}) self.assertEqual(response.status_int, 201) def test_put(self): response = self.app.put_json('/v2.0/ports/%s.json' % self.port['id'], params={'port': {'name': 'test'}}, headers={'X-Project-Id': 'tenid'}) self.assertEqual(response.status_int, 200) json_body = jsonutils.loads(response.body) self.assertEqual(1, len(json_body)) self.assertIn('port', json_body) self.assertEqual('test', json_body['port']['name']) self.assertEqual('tenid', json_body['port']['tenant_id']) def test_delete(self): response = self.app.delete('/v2.0/ports/%s.json' % self.port['id'], headers={'X-Project-Id': 'tenid'}) self.assertEqual(response.status_int, 204) def test_plugin_initialized(self): self.assertIsNotNone(manager.NeutronManager._instance) def test_methods(self): self._test_method_returns_code('post', 405) self._test_method_returns_code('put', 405) self._test_method_returns_code('patch', 405) self._test_method_returns_code('delete', 405) self._test_method_returns_code('head', 405) self._test_method_returns_code('delete', 405) class TestRequestProcessing(TestResourceController): def setUp(self): super(TestRequestProcessing, self).setUp() # request.context is thread-local storage so it has to be accessed by # the controller. We can capture it into a list here to assert on after # the request finishes. def capture_request_details(*args, **kwargs): self.captured_context = request.context mock.patch('neutron.pecan_wsgi.controllers.resource.' 'CollectionsController.get', side_effect=capture_request_details).start() mock.patch('neutron.pecan_wsgi.controllers.resource.' 'CollectionsController.create', side_effect=capture_request_details).start() mock.patch('neutron.pecan_wsgi.controllers.resource.' 'ItemController.get', side_effect=capture_request_details).start() # TODO(kevinbenton): add context tests for X-Roles etc def test_context_set_in_request(self): self.app.get('/v2.0/ports.json', headers={'X-Project-Id': 'tenant_id'}) self.assertEqual('tenant_id', self.captured_context['neutron_context'].tenant_id) def test_core_resource_identified(self): self.app.get('/v2.0/ports.json') self.assertEqual('port', self.captured_context['resource']) self.assertEqual('ports', self.captured_context['collection']) def test_lookup_identifies_resource_id(self): # We now this will return a 404 but that's not the point as it is # mocked self.app.get('/v2.0/ports/reina.json') self.assertEqual('port', self.captured_context['resource']) self.assertEqual('ports', self.captured_context['collection']) self.assertEqual('reina', self.captured_context['resource_id']) def test_resource_processing_post(self): self.app.post_json( '/v2.0/ports.json', params={'port': {'network_id': self.port['network_id'], 'name': 'the_port', 'admin_state_up': True}}, headers={'X-Project-Id': 'tenid'}) self.assertEqual('port', self.captured_context['resource']) self.assertEqual('ports', self.captured_context['collection']) resources = self.captured_context['resources'] self.assertEqual(1, len(resources)) self.assertEqual(self.port['network_id'], resources[0]['network_id']) self.assertEqual('the_port', resources[0]['name']) def test_resource_processing_post_bulk(self): self.app.post_json( '/v2.0/ports.json', params={'ports': [{'network_id': self.port['network_id'], 'name': 'the_port_1', 'admin_state_up': True}, {'network_id': self.port['network_id'], 'name': 'the_port_2', 'admin_state_up': True}]}, headers={'X-Project-Id': 'tenid'}) resources = self.captured_context['resources'] self.assertEqual(2, len(resources)) self.assertEqual(self.port['network_id'], resources[0]['network_id']) self.assertEqual('the_port_1', resources[0]['name']) self.assertEqual(self.port['network_id'], resources[1]['network_id']) self.assertEqual('the_port_2', resources[1]['name']) def test_resource_processing_post_unknown_attribute_returns_400(self): response = self.app.post_json( '/v2.0/ports.json', params={'port': {'network_id': self.port['network_id'], 'name': 'the_port', 'alien': 'E.T.', 'admin_state_up': True}}, headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(400, response.status_int) def test_resource_processing_post_validation_errori_returns_400(self): response = self.app.post_json( '/v2.0/ports.json', params={'port': {'network_id': self.port['network_id'], 'name': 'the_port', 'admin_state_up': 'invalid_value'}}, headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(400, response.status_int) def test_service_plugin_identified(self): # TODO(kevinbenton): fix the unit test setup to include an l3 plugin self.skipTest("A dummy l3 plugin needs to be setup") self.app.get('/v2.0/routers.json') self.assertEqual('router', self.req_stash['resource_type']) # make sure the core plugin was identified as the handler for ports self.assertEqual( manager.NeutronManager.get_service_plugins()['L3_ROUTER_NAT'], self.req_stash['plugin']) def test_service_plugin_uri(self): service_plugin = namedtuple('DummyServicePlugin', 'path_prefix') service_plugin.path_prefix = 'dummy' nm = manager.NeutronManager.get_instance() nm.service_plugins['dummy_sp'] = service_plugin response = self.do_request('/v2.0/dummy/serviceplugins.json') self.assertEqual(200, response.status_int) self.assertEqual(_SERVICE_PLUGIN_INDEX_BODY, response.json_body) class TestRouterController(TestResourceController): """Specialized tests for the router resource controller This test class adds tests specific for the router controller in order to verify the 'member_action' functionality, which this controller uses for adding and removing router interfaces. """ def setUp(self): cfg.CONF.set_override( 'service_plugins', ['neutron.services.l3_router.l3_router_plugin.L3RouterPlugin']) super(TestRouterController, self).setUp() plugin = manager.NeutronManager.get_plugin() ctx = context.get_admin_context() service_plugins = manager.NeutronManager.get_service_plugins() l3_plugin = service_plugins[constants.L3_ROUTER_NAT] network_id = pecan_utils.create_network(ctx, plugin)['id'] self.subnet = pecan_utils.create_subnet(ctx, plugin, network_id) self.router = pecan_utils.create_router(ctx, l3_plugin) def test_member_actions_processing(self): response = self.app.put_json( '/v2.0/routers/%s/add_router_interface.json' % self.router['id'], params={'subnet_id': self.subnet['id']}, headers={'X-Project-Id': 'tenid'}) self.assertEqual(200, response.status_int) def test_non_existing_member_action_returns_404(self): response = self.app.put_json( '/v2.0/routers/%s/do_meh.json' % self.router['id'], params={'subnet_id': 'doesitevenmatter'}, headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(404, response.status_int) def test_unsupported_method_member_action(self): response = self.app.post_json( '/v2.0/routers/%s/add_router_interface.json' % self.router['id'], params={'subnet_id': self.subnet['id']}, headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(405, response.status_int) response = self.app.get( '/v2.0/routers/%s/add_router_interface.json' % self.router['id'], headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(405, response.status_int) class TestDHCPAgentShimControllers(test_functional.PecanFunctionalTest): def setUp(self): super(TestDHCPAgentShimControllers, self).setUp() policy.init() policy._ENFORCER.set_rules( oslo_policy.Rules.from_dict( {'get_dhcp-agents': 'role:admin', 'get_dhcp-networks': 'role:admin', 'create_dhcp-networks': 'role:admin', 'delete_dhcp-networks': 'role:admin'}), overwrite=False) plugin = manager.NeutronManager.get_plugin() ctx = context.get_admin_context() self.network = pecan_utils.create_network(ctx, plugin) self.agent = helpers.register_dhcp_agent() # NOTE(blogan): Not sending notifications because this test is for # testing the shim controllers plugin.agent_notifiers[n_const.AGENT_TYPE_DHCP] = None def test_list_dhcp_agents_hosting_network(self): response = self.app.get( '/v2.0/networks/%s/dhcp-agents.json' % self.network['id'], headers={'X-Roles': 'admin'}) self.assertEqual(200, response.status_int) def test_list_networks_on_dhcp_agent(self): response = self.app.get( '/v2.0/agents/%s/dhcp-networks.json' % self.agent.id, headers={'X-Project-Id': 'tenid', 'X-Roles': 'admin'}) self.assertEqual(200, response.status_int) def test_add_remove_dhcp_agent(self): headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'} self.app.post_json( '/v2.0/agents/%s/dhcp-networks.json' % self.agent.id, headers=headers, params={'network_id': self.network['id']}) response = self.app.get( '/v2.0/networks/%s/dhcp-agents.json' % self.network['id'], headers=headers) self.assertIn(self.agent.id, [a['id'] for a in response.json['agents']]) self.app.delete('/v2.0/agents/%(a)s/dhcp-networks/%(n)s.json' % { 'a': self.agent.id, 'n': self.network['id']}, headers=headers) response = self.app.get( '/v2.0/networks/%s/dhcp-agents.json' % self.network['id'], headers=headers) self.assertNotIn(self.agent.id, [a['id'] for a in response.json['agents']]) class TestL3AgentShimControllers(test_functional.PecanFunctionalTest): def setUp(self): cfg.CONF.set_override( 'service_plugins', ['neutron.services.l3_router.l3_router_plugin.L3RouterPlugin']) super(TestL3AgentShimControllers, self).setUp() policy.init() policy._ENFORCER.set_rules( oslo_policy.Rules.from_dict( {'get_l3-agents': 'role:admin', 'get_l3-routers': 'role:admin'}), overwrite=False) ctx = context.get_admin_context() service_plugins = manager.NeutronManager.get_service_plugins() l3_plugin = service_plugins[constants.L3_ROUTER_NAT] self.router = pecan_utils.create_router(ctx, l3_plugin) self.agent = helpers.register_l3_agent() # NOTE(blogan): Not sending notifications because this test is for # testing the shim controllers l3_plugin.agent_notifiers[n_const.AGENT_TYPE_L3] = None def test_list_l3_agents_hosting_router(self): response = self.app.get( '/v2.0/routers/%s/l3-agents.json' % self.router['id'], headers={'X-Roles': 'admin'}) self.assertEqual(200, response.status_int) def test_list_routers_on_l3_agent(self): response = self.app.get( '/v2.0/agents/%s/l3-routers.json' % self.agent.id, headers={'X-Roles': 'admin'}) self.assertEqual(200, response.status_int) def test_add_remove_l3_agent(self): headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'} self.app.post_json( '/v2.0/agents/%s/l3-routers.json' % self.agent.id, headers=headers, params={'router_id': self.router['id']}) response = self.app.get( '/v2.0/routers/%s/l3-agents.json' % self.router['id'], headers=headers) self.assertIn(self.agent.id, [a['id'] for a in response.json['agents']]) self.app.delete('/v2.0/agents/%(a)s/l3-routers/%(n)s.json' % { 'a': self.agent.id, 'n': self.router['id']}, headers=headers) response = self.app.get( '/v2.0/routers/%s/l3-agents.json' % self.router['id'], headers=headers) self.assertNotIn(self.agent.id, [a['id'] for a in response.json['agents']]) neutron-8.4.0/neutron/tests/functional/pecan_wsgi/__init__.py0000664000567000056710000000220213044372760025606 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from pecan import set_config from pecan.testing import load_test_app from unittest import TestCase __all__ = ['FunctionalTest'] class FunctionalTest(TestCase): """ Used for functional tests where you need to test your literal application and its integration with the framework. """ def setUp(self): self.app = load_test_app(os.path.join( os.path.dirname(__file__), 'config.py' )) def tearDown(self): set_config({}, overwrite=True) neutron-8.4.0/neutron/tests/functional/pecan_wsgi/utils.py0000664000567000056710000000304213044372760025212 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def create_network(context, plugin): return plugin.create_network( context, {'network': {'name': 'pecannet', 'tenant_id': 'tenid', 'shared': False, 'admin_state_up': True, 'status': 'ACTIVE'}}) def create_subnet(context, plugin, network_id): return plugin.create_subnet( context, {'subnet': {'tenant_id': 'tenid', 'network_id': network_id, 'name': 'pecansub', 'ip_version': 4, 'cidr': '10.20.30.0/24', 'gateway_ip': '10.20.30.1', 'enable_dhcp': True, 'allocation_pools': [ {'start': '10.20.30.2', 'end': '10.20.30.254'}], 'dns_nameservers': [], 'host_routes': []}}) def create_router(context, l3_plugin): return l3_plugin.create_router( context, {'router': {'name': 'pecanrtr', 'tenant_id': 'tenid', 'admin_state_up': True}}) neutron-8.4.0/neutron/tests/functional/pecan_wsgi/test_functional.py0000664000567000056710000001053513044372760027260 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from oslo_config import cfg from oslo_utils import uuidutils from pecan import set_config from pecan.testing import load_test_app import testtools from neutron.api import extensions from neutron.common import exceptions as n_exc from neutron.tests.unit import testlib_api class PecanFunctionalTest(testlib_api.SqlTestCase): def setUp(self): self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin') super(PecanFunctionalTest, self).setUp() self.addCleanup(extensions.PluginAwareExtensionManager.clear_instance) self.addCleanup(set_config, {}, overwrite=True) self.set_config_overrides() self.setup_app() def setup_app(self): self.app = load_test_app(os.path.join( os.path.dirname(__file__), 'config.py' )) def set_config_overrides(self): cfg.CONF.set_override('auth_strategy', 'noauth') def do_request(self, url, tenant_id=None, admin=False, expect_errors=False): if admin: if not tenant_id: tenant_id = 'admin' headers = {'X-Tenant-Id': tenant_id, 'X-Roles': 'admin'} else: headers = {'X-Tenant-ID': tenant_id or ''} return self.app.get(url, headers=headers, expect_errors=expect_errors) class TestErrors(PecanFunctionalTest): def test_404(self): response = self.app.get('/assert_called_once', expect_errors=True) self.assertEqual(response.status_int, 404) def test_bad_method(self): response = self.app.patch('/v2.0/ports/44.json', expect_errors=True) self.assertEqual(response.status_int, 405) class TestRequestID(PecanFunctionalTest): def test_request_id(self): response = self.app.get('/v2.0/') self.assertIn('x-openstack-request-id', response.headers) self.assertTrue( response.headers['x-openstack-request-id'].startswith('req-')) id_part = response.headers['x-openstack-request-id'].split('req-')[1] self.assertTrue(uuidutils.is_uuid_like(id_part)) class TestKeystoneAuth(PecanFunctionalTest): def set_config_overrides(self): # default auth strategy is keystone so we pass pass def test_auth_enforced(self): response = self.app.get('/v2.0/', expect_errors=True) self.assertEqual(response.status_int, 401) class TestInvalidAuth(PecanFunctionalTest): def setup_app(self): # disable normal app setup since it will fail pass def test_invalid_auth_strategy(self): cfg.CONF.set_override('auth_strategy', 'badvalue') with testtools.ExpectedException(n_exc.InvalidConfigurationOption): load_test_app(os.path.join(os.path.dirname(__file__), 'config.py')) class TestExceptionTranslationHook(PecanFunctionalTest): def test_neutron_nonfound_to_webob_exception(self): # this endpoint raises a Neutron notfound exception. make sure it gets # translated into a 404 error with mock.patch( 'neutron.pecan_wsgi.controllers.resource.' 'CollectionsController.get', side_effect=n_exc.NotFound() ): response = self.app.get('/v2.0/ports.json', expect_errors=True) self.assertEqual(response.status_int, 404) def test_unexpected_exception(self): with mock.patch( 'neutron.pecan_wsgi.controllers.resource.' 'CollectionsController.get', side_effect=ValueError('secretpassword') ): response = self.app.get('/v2.0/ports.json', expect_errors=True) self.assertNotIn(response.body, 'secretpassword') self.assertEqual(response.status_int, 500) neutron-8.4.0/neutron/tests/functional/pecan_wsgi/test_hooks.py0000664000567000056710000005270213044372760026243 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_policy import policy as oslo_policy from oslo_serialization import jsonutils from neutron.api.v2 import attributes from neutron import context from neutron.db.quota import driver as quota_driver from neutron import manager from neutron.pecan_wsgi.controllers import resource from neutron.pecan_wsgi.hooks import policy_enforcement as pe from neutron import policy from neutron.tests.functional.pecan_wsgi import test_functional class TestOwnershipHook(test_functional.PecanFunctionalTest): def test_network_ownership_check(self): net_response = self.app.post_json( '/v2.0/networks.json', params={'network': {'name': 'meh'}}, headers={'X-Project-Id': 'tenid'}) network_id = jsonutils.loads(net_response.body)['network']['id'] port_response = self.app.post_json( '/v2.0/ports.json', params={'port': {'network_id': network_id, 'admin_state_up': True}}, headers={'X-Project-Id': 'tenid'}) self.assertEqual(201, port_response.status_int) class TestQuotaEnforcementHook(test_functional.PecanFunctionalTest): def test_quota_enforcement_single(self): ctx = context.get_admin_context() quota_driver.DbQuotaDriver.update_quota_limit( ctx, 'tenid', 'network', 1) # There is enough headroom for creating a network response = self.app.post_json( '/v2.0/networks.json', params={'network': {'name': 'meh'}}, headers={'X-Project-Id': 'tenid'}) self.assertEqual(response.status_int, 201) # But a second request will fail response = self.app.post_json( '/v2.0/networks.json', params={'network': {'name': 'meh-2'}}, headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(response.status_int, 409) def test_quota_enforcement_bulk_request(self): ctx = context.get_admin_context() quota_driver.DbQuotaDriver.update_quota_limit( ctx, 'tenid', 'network', 3) # There is enough headroom for a bulk request creating 2 networks response = self.app.post_json( '/v2.0/networks.json', params={'networks': [ {'name': 'meh1'}, {'name': 'meh2'}]}, headers={'X-Project-Id': 'tenid'}) self.assertEqual(response.status_int, 201) # But it won't be possible to create 2 more networks... response = self.app.post_json( '/v2.0/networks.json', params={'networks': [ {'name': 'meh3'}, {'name': 'meh4'}]}, headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(response.status_int, 409) class TestPolicyEnforcementHook(test_functional.PecanFunctionalTest): FAKE_RESOURCE = { 'mehs': { 'id': {'allow_post': False, 'allow_put': False, 'is_visible': True, 'primary_key': True}, 'attr': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': ''}, 'restricted_attr': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': ''}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attributes.TENANT_ID_MAX_LEN}, 'is_visible': True} } } def setUp(self): # Create a controller for a fake resource. This will make the tests # independent from the evolution of the API (so if one changes the API # or the default policies there won't be any risk of breaking these # tests, or at least I hope so) super(TestPolicyEnforcementHook, self).setUp() self.mock_plugin = mock.Mock() attributes.RESOURCE_ATTRIBUTE_MAP.update(self.FAKE_RESOURCE) attributes.PLURALS['mehs'] = 'meh' manager.NeutronManager.set_plugin_for_resource('meh', self.mock_plugin) fake_controller = resource.CollectionsController('mehs', 'meh') manager.NeutronManager.set_controller_for_resource( 'mehs', fake_controller) # Inject policies for the fake resource policy.init() policy._ENFORCER.set_rules( oslo_policy.Rules.from_dict( {'create_meh': '', 'update_meh': 'rule:admin_only', 'delete_meh': 'rule:admin_only', 'get_meh': 'rule:admin_only or field:mehs:id=xxx', 'get_meh:restricted_attr': 'rule:admin_only'}), overwrite=False) def test_before_on_create_authorized(self): # Mock a return value for an hypothetical create operation self.mock_plugin.create_meh.return_value = { 'id': 'xxx', 'attr': 'meh', 'restricted_attr': '', 'tenant_id': 'tenid'} response = self.app.post_json('/v2.0/mehs.json', params={'meh': {'attr': 'meh'}}, headers={'X-Project-Id': 'tenid'}) # We expect this operation to succeed self.assertEqual(201, response.status_int) self.assertEqual(0, self.mock_plugin.get_meh.call_count) self.assertEqual(1, self.mock_plugin.create_meh.call_count) def test_before_on_put_not_authorized(self): # The policy hook here should load the resource, and therefore we must # mock a get response self.mock_plugin.get_meh.return_value = { 'id': 'xxx', 'attr': 'meh', 'restricted_attr': '', 'tenant_id': 'tenid'} # The policy engine should trigger an exception in 'before', and the # plugin method should not be called at all response = self.app.put_json('/v2.0/mehs/xxx.json', params={'meh': {'attr': 'meh'}}, headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(403, response.status_int) self.assertEqual(1, self.mock_plugin.get_meh.call_count) self.assertEqual(0, self.mock_plugin.update_meh.call_count) def test_before_on_delete_not_authorized(self): # The policy hook here should load the resource, and therefore we must # mock a get response self.mock_plugin.delete_meh.return_value = None self.mock_plugin.get_meh.return_value = { 'id': 'xxx', 'attr': 'meh', 'restricted_attr': '', 'tenant_id': 'tenid'} # The policy engine should trigger an exception in 'before', and the # plugin method should not be called response = self.app.delete_json('/v2.0/mehs/xxx.json', headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(403, response.status_int) self.assertEqual(1, self.mock_plugin.get_meh.call_count) self.assertEqual(0, self.mock_plugin.delete_meh.call_count) def test_after_on_get_not_authorized(self): # The GET test policy will deny access to anything whose id is not # 'xxx', so the following request should be forbidden self.mock_plugin.get_meh.return_value = { 'id': 'yyy', 'attr': 'meh', 'restricted_attr': '', 'tenant_id': 'tenid'} # The policy engine should trigger an exception in 'after', and the # plugin method should be called response = self.app.get('/v2.0/mehs/yyy.json', headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(403, response.status_int) self.assertEqual(1, self.mock_plugin.get_meh.call_count) def test_after_on_get_excludes_admin_attribute(self): self.mock_plugin.get_meh.return_value = { 'id': 'xxx', 'attr': 'meh', 'restricted_attr': '', 'tenant_id': 'tenid'} response = self.app.get('/v2.0/mehs/xxx.json', headers={'X-Project-Id': 'tenid'}) self.assertEqual(200, response.status_int) json_response = jsonutils.loads(response.body) self.assertNotIn('restricted_attr', json_response['meh']) def test_after_on_list_excludes_admin_attribute(self): self.mock_plugin.get_mehs.return_value = [{ 'id': 'xxx', 'attr': 'meh', 'restricted_attr': '', 'tenant_id': 'tenid'}] response = self.app.get('/v2.0/mehs', headers={'X-Project-Id': 'tenid'}) self.assertEqual(200, response.status_int) json_response = jsonutils.loads(response.body) self.assertNotIn('restricted_attr', json_response['mehs'][0]) class TestDHCPNotifierHook(test_functional.PecanFunctionalTest): def setUp(self): # the DHCP notifier needs to be mocked so that correct operations can # be easily validated. For the purpose of this test it is indeed not # necessary that the notification is actually received and processed by # the agent patcher = mock.patch('neutron.api.rpc.agentnotifiers.' 'dhcp_rpc_agent_api.DhcpAgentNotifyAPI.notify') self.mock_notifier = patcher.start() super(TestDHCPNotifierHook, self).setUp() def test_dhcp_notifications_disabled(self): cfg.CONF.set_override('dhcp_agent_notification', False) self.app.post_json( '/v2.0/networks.json', params={'network': {'name': 'meh'}}, headers={'X-Project-Id': 'tenid'}) self.assertEqual(0, self.mock_notifier.call_count) def test_get_does_not_trigger_notification(self): self.do_request('/v2.0/networks', tenant_id='tenid') self.assertEqual(0, self.mock_notifier.call_count) def test_post_put_delete_triggers_notification(self): req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'} response = self.app.post_json( '/v2.0/networks.json', params={'network': {'name': 'meh'}}, headers=req_headers) self.assertEqual(201, response.status_int) json_body = jsonutils.loads(response.body) self.assertEqual(1, self.mock_notifier.call_count) self.assertEqual(mock.call(mock.ANY, json_body, 'network.create.end'), self.mock_notifier.mock_calls[-1]) network_id = json_body['network']['id'] response = self.app.put_json( '/v2.0/networks/%s.json' % network_id, params={'network': {'name': 'meh-2'}}, headers=req_headers) self.assertEqual(200, response.status_int) json_body = jsonutils.loads(response.body) self.assertEqual(2, self.mock_notifier.call_count) self.assertEqual(mock.call(mock.ANY, json_body, 'network.update.end'), self.mock_notifier.mock_calls[-1]) response = self.app.delete( '/v2.0/networks/%s.json' % network_id, headers=req_headers) self.assertEqual(204, response.status_int) self.assertEqual(3, self.mock_notifier.call_count) # No need to validate data content sent to the notifier as it's just # going to load the object from the database self.assertEqual(mock.call(mock.ANY, mock.ANY, 'network.delete.end'), self.mock_notifier.mock_calls[-1]) def test_bulk_create_triggers_notifications(self): req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'} response = self.app.post_json( '/v2.0/networks.json', params={'networks': [{'name': 'meh_1'}, {'name': 'meh_2'}]}, headers=req_headers) self.assertEqual(201, response.status_int) json_body = jsonutils.loads(response.body) item_1 = json_body['networks'][0] item_2 = json_body['networks'][1] self.assertEqual(2, self.mock_notifier.call_count) self.mock_notifier.assert_has_calls( [mock.call(mock.ANY, {'network': item_1}, 'network.create.end'), mock.call(mock.ANY, {'network': item_2}, 'network.create.end')]) class TestNovaNotifierHook(test_functional.PecanFunctionalTest): def setUp(self): patcher = mock.patch('neutron.pecan_wsgi.hooks.notifier.NotifierHook.' '_nova_notify') self.mock_notifier = patcher.start() super(TestNovaNotifierHook, self).setUp() def test_nova_notifications_disabled(self): cfg.CONF.set_override('notify_nova_on_port_data_changes', False) self.app.post_json( '/v2.0/networks.json', params={'network': {'name': 'meh'}}, headers={'X-Project-Id': 'tenid'}) self.assertFalse(self.mock_notifier.called) def test_post_put_delete_triggers_notification(self): req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'} response = self.app.post_json( '/v2.0/networks.json', params={'network': {'name': 'meh'}}, headers=req_headers) self.assertEqual(201, response.status_int) json_body = jsonutils.loads(response.body) self.mock_notifier.assert_called_once_with('create', 'network', {}, json_body) self.mock_notifier.reset_mock() network_id = json_body['network']['id'] # NOTE(kevinbenton): the original passed into the notifier does # not contain all of the fields of the object. Only those required # by the policy engine are included. orig = pe.fetch_resource(context.get_admin_context(), 'network', network_id) response = self.app.put_json( '/v2.0/networks/%s.json' % network_id, params={'network': {'name': 'meh-2'}}, headers=req_headers) self.assertEqual(200, response.status_int) json_body = jsonutils.loads(response.body) self.mock_notifier.assert_called_once_with('update', 'network', orig, json_body) self.mock_notifier.reset_mock() orig = pe.fetch_resource(context.get_admin_context(), 'network', network_id) response = self.app.delete( '/v2.0/networks/%s.json' % network_id, headers=req_headers) self.assertEqual(204, response.status_int) # No need to validate data content sent to the notifier as it's just # going to load the object from the database self.mock_notifier.assert_called_once_with('delete', 'network', {}, {'network': orig}) def test_bulk_create_triggers_notifications(self): req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'} response = self.app.post_json( '/v2.0/networks.json', params={'networks': [{'name': 'meh_1'}, {'name': 'meh_2'}]}, headers=req_headers) self.assertEqual(201, response.status_int) json_body = jsonutils.loads(response.body) item_1 = json_body['networks'][0] item_2 = json_body['networks'][1] self.assertEqual( [mock.call('create', 'network', {}, {'network': item_1}), mock.call('create', 'network', {}, {'network': item_2})], self.mock_notifier.mock_calls) class TestMetricsNotifierHook(test_functional.PecanFunctionalTest): def setUp(self): patcher = mock.patch('neutron.pecan_wsgi.hooks.notifier.NotifierHook.' '_notifier') self.mock_notifier = patcher.start().info super(TestMetricsNotifierHook, self).setUp() def test_post_put_delete_triggers_notification(self): req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'} payload = {'network': {'name': 'meh'}} response = self.app.post_json( '/v2.0/networks.json', params=payload, headers=req_headers) self.assertEqual(201, response.status_int) json_body = jsonutils.loads(response.body) self.assertEqual( [mock.call(mock.ANY, 'network.create.start', payload), mock.call(mock.ANY, 'network.create.end', json_body)], self.mock_notifier.mock_calls) self.mock_notifier.reset_mock() network_id = json_body['network']['id'] payload = {'network': {'name': 'meh-2'}} response = self.app.put_json( '/v2.0/networks/%s.json' % network_id, params=payload, headers=req_headers) self.assertEqual(200, response.status_int) json_body = jsonutils.loads(response.body) # id should be in payload sent to notifier payload['id'] = network_id self.assertEqual( [mock.call(mock.ANY, 'network.update.start', payload), mock.call(mock.ANY, 'network.update.end', json_body)], self.mock_notifier.mock_calls) self.mock_notifier.reset_mock() response = self.app.delete( '/v2.0/networks/%s.json' % network_id, headers=req_headers) self.assertEqual(204, response.status_int) payload = {'network_id': network_id} self.assertEqual( [mock.call(mock.ANY, 'network.delete.start', payload), mock.call(mock.ANY, 'network.delete.end', payload)], self.mock_notifier.mock_calls) def test_bulk_create_triggers_notification(self): req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'} payload = {'networks': [{'name': 'meh_1'}, {'name': 'meh_2'}]} response = self.app.post_json( '/v2.0/networks.json', params=payload, headers=req_headers) self.assertEqual(201, response.status_int) json_body = jsonutils.loads(response.body) self.assertEqual(2, self.mock_notifier.call_count) self.mock_notifier.assert_has_calls( [mock.call(mock.ANY, 'network.create.start', payload), mock.call(mock.ANY, 'network.create.end', json_body)]) def test_bad_create_doesnt_emit_end(self): req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'} payload = {'network': {'name': 'meh'}} plugin = manager.NeutronManager.get_plugin() with mock.patch.object(plugin, 'create_network', side_effect=ValueError): response = self.app.post_json( '/v2.0/networks.json', params=payload, headers=req_headers, expect_errors=True) self.assertEqual(500, response.status_int) self.assertEqual( [mock.call(mock.ANY, 'network.create.start', mock.ANY)], self.mock_notifier.mock_calls) def test_bad_update_doesnt_emit_end(self): req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'} payload = {'network': {'name': 'meh'}} response = self.app.post_json( '/v2.0/networks.json', params=payload, headers=req_headers, expect_errors=True) self.assertEqual(201, response.status_int) json_body = jsonutils.loads(response.body) self.mock_notifier.reset_mock() plugin = manager.NeutronManager.get_plugin() with mock.patch.object(plugin, 'update_network', side_effect=ValueError): response = self.app.put_json( '/v2.0/networks/%s.json' % json_body['network']['id'], params=payload, headers=req_headers, expect_errors=True) self.assertEqual(500, response.status_int) self.assertEqual( [mock.call(mock.ANY, 'network.update.start', mock.ANY)], self.mock_notifier.mock_calls) def test_bad_delete_doesnt_emit_end(self): req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'} payload = {'network': {'name': 'meh'}} response = self.app.post_json( '/v2.0/networks.json', params=payload, headers=req_headers, expect_errors=True) self.assertEqual(201, response.status_int) json_body = jsonutils.loads(response.body) self.mock_notifier.reset_mock() plugin = manager.NeutronManager.get_plugin() with mock.patch.object(plugin, 'delete_network', side_effect=ValueError): response = self.app.delete( '/v2.0/networks/%s.json' % json_body['network']['id'], headers=req_headers, expect_errors=True) self.assertEqual(500, response.status_int) self.assertEqual( [mock.call(mock.ANY, 'network.delete.start', mock.ANY)], self.mock_notifier.mock_calls) neutron-8.4.0/neutron/tests/functional/sanity/0000775000567000056710000000000013044373210022660 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/sanity/__init__.py0000664000567000056710000000000013044372736024773 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/sanity/test_sanity.py0000664000567000056710000000540313044372760025613 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.cmd.sanity import checks from neutron.tests import base from neutron.tests.functional import base as functional_base class SanityTestCase(base.BaseTestCase): """Sanity checks that do not require root access. Tests that just call checks.some_function() are to ensure that neutron-sanity-check runs without throwing an exception, as in the case where someone modifies the API without updating the check script. """ def setUp(self): super(SanityTestCase, self).setUp() def test_nova_notify_runs(self): checks.nova_notify_supported() def test_dnsmasq_version(self): checks.dnsmasq_version_supported() def test_dibbler_version(self): checks.dibbler_version_supported() def test_ipset_support(self): checks.ipset_supported() def test_ip6tables_support(self): checks.ip6tables_supported() class SanityTestCaseRoot(functional_base.BaseSudoTestCase): """Sanity checks that require root access. Tests that just call checks.some_function() are to ensure that neutron-sanity-check runs without throwing an exception, as in the case where someone modifies the API without updating the check script. """ def test_ovs_vxlan_support_runs(self): checks.ovs_vxlan_supported() def test_ovs_geneve_support_runs(self): checks.ovs_geneve_supported() def test_iproute2_vxlan_support_runs(self): checks.iproute2_vxlan_supported() def test_ovs_patch_support_runs(self): checks.patch_supported() def test_arp_responder_runs(self): checks.arp_responder_supported() def test_arp_header_match_runs(self): checks.arp_header_match_supported() def test_icmpv6_header_match_runs(self): checks.icmpv6_header_match_supported() def test_vf_management_runs(self): checks.vf_management_supported() def test_namespace_root_read_detection_runs(self): checks.netns_read_requires_helper() def test_ovsdb_native_supported_runs(self): checks.ovsdb_native_supported() def test_keepalived_ipv6_support(self): checks.keepalived_ipv6_supported() neutron-8.4.0/neutron/tests/functional/agent/0000775000567000056710000000000013044373210022447 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/agent/ovsdb/0000775000567000056710000000000013044373210023564 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/agent/ovsdb/test_impl_idl.py0000664000567000056710000000577013044372760027010 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.agent.common import ovs_lib from neutron.agent.ovsdb import api from neutron.agent.ovsdb import impl_idl from neutron.tests import base as test_base from neutron.tests.common import net_helpers from neutron.tests.functional import base # NOTE(twilson) functools.partial does not work for this def trpatch(*args, **kwargs): def wrapped(fn): return mock.patch.object(impl_idl.NeutronOVSDBTransaction, *args, **kwargs)(fn) return wrapped class ImplIdlTestCase(base.BaseSudoTestCase): def setUp(self): super(ImplIdlTestCase, self).setUp() self.config(group='OVS', ovsdb_interface='native') self.ovs = ovs_lib.BaseOVS() self.brname = test_base.get_rand_device_name(net_helpers.BR_PREFIX) # Make sure exceptions pass through by calling do_post_commit directly mock.patch.object( impl_idl.NeutronOVSDBTransaction, "post_commit", side_effect=impl_idl.NeutronOVSDBTransaction.do_post_commit, autospec=True).start() def _add_br(self): # NOTE(twilson) we will be raising exceptions with add_br, so schedule # cleanup before that. self.addCleanup(self.ovs.delete_bridge, self.brname) ovsdb = self.ovs.ovsdb with ovsdb.transaction(check_error=True) as tr: tr.add(ovsdb.add_br(self.brname)) return tr def _add_br_and_test(self): self._add_br() ofport = self.ovs.db_get_val("Interface", self.brname, "ofport") self.assertTrue(int(ofport)) self.assertTrue(ofport > -1) def test_post_commit_vswitchd_completed_no_failures(self): self._add_br_and_test() @trpatch("vswitchd_has_completed", return_value=True) @trpatch("post_commit_failed_interfaces", return_value=["failed_if1"]) @trpatch("timeout_exceeded", return_value=False) def test_post_commit_vswitchd_completed_failures(self, *args): self.assertRaises(impl_idl.VswitchdInterfaceAddException, self._add_br) @trpatch("vswitchd_has_completed", return_value=False) def test_post_commit_vswitchd_incomplete_timeout(self, *args): # Due to timing issues we may rarely hit the global timeout, which # raises RuntimeError to match the vsctl implementation self.ovs.vsctl_timeout = 3 self.assertRaises((api.TimeoutException, RuntimeError), self._add_br) neutron-8.4.0/neutron/tests/functional/agent/ovsdb/__init__.py0000664000567000056710000000000013044372736025677 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/agent/l3/0000775000567000056710000000000013044373210022765 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/agent/l3/test_namespace_manager.py0000664000567000056710000000674013044372736030047 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import uuidutils from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import namespace_manager from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib from neutron.tests.functional import base _uuid = uuidutils.generate_uuid class NamespaceManagerTestFramework(base.BaseSudoTestCase): def setUp(self): super(NamespaceManagerTestFramework, self).setUp() self.agent_conf = mock.MagicMock() self.metadata_driver_mock = mock.Mock() self.namespace_manager = namespace_manager.NamespaceManager( self.agent_conf, driver=None, metadata_driver=self.metadata_driver_mock) def _create_namespace(self, router_id, ns_class): namespace = ns_class(router_id, self.agent_conf, driver=None, use_ipv6=False) namespace.create() self.addCleanup(self._delete_namespace, namespace) return namespace.name def _delete_namespace(self, namespace): try: namespace.delete() except RuntimeError as e: # If the namespace didn't exist when delete was attempted, mission # accomplished. Otherwise, re-raise the exception if 'No such file or directory' not in str(e): raise e def _namespace_exists(self, namespace): ip = ip_lib.IPWrapper(namespace=namespace) return ip.netns.exists(namespace) class NamespaceManagerTestCase(NamespaceManagerTestFramework): def test_namespace_manager(self): router_id = _uuid() router_id_to_delete = _uuid() to_keep = set() to_delete = set() to_retrieve = set() to_keep.add(self._create_namespace(router_id, namespaces.RouterNamespace)) to_keep.add(self._create_namespace(router_id, dvr_snat_ns.SnatNamespace)) to_delete.add(self._create_namespace(router_id_to_delete, dvr_snat_ns.SnatNamespace)) to_retrieve = to_keep | to_delete with mock.patch.object(namespace_manager.NamespaceManager, 'list_all', return_value=to_retrieve): with self.namespace_manager as ns_manager: for ns_name in to_keep: id_to_keep = ns_manager.get_prefix_and_id(ns_name)[1] ns_manager.keep_router(id_to_keep) for ns_name in to_keep: self.assertTrue(self._namespace_exists(ns_name)) for ns_name in to_delete: (self.metadata_driver_mock.destroy_monitored_metadata_proxy. assert_called_once_with(mock.ANY, router_id_to_delete, self.agent_conf)) self.assertFalse(self._namespace_exists(ns_name)) neutron-8.4.0/neutron/tests/functional/agent/l3/test_ha_router.py0000664000567000056710000003530713044372760026407 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock import six from neutron.agent.l3 import agent as neutron_l3_agent from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.common import constants as l3_constants from neutron.common import utils as common_utils from neutron.tests.common import l3_test_common from neutron.tests.common import net_helpers from neutron.tests.functional.agent.l3 import framework class L3HATestCase(framework.L3AgentTestFramework): def test_ha_router_update_floatingip_statuses(self): self._test_update_floatingip_statuses( self.generate_router_info(enable_ha=True)) def test_keepalived_state_change_notification(self): enqueue_mock = mock.patch.object( self.agent, 'enqueue_state_change').start() router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) utils.wait_until_true(lambda: router.ha_state == 'master') self.fail_ha_router(router) utils.wait_until_true(lambda: router.ha_state == 'backup') utils.wait_until_true(lambda: enqueue_mock.call_count == 3) calls = [args[0] for args in enqueue_mock.call_args_list] self.assertEqual((router.router_id, 'backup'), calls[0]) self.assertEqual((router.router_id, 'master'), calls[1]) self.assertEqual((router.router_id, 'backup'), calls[2]) def _expected_rpc_report(self, expected): calls = (args[0][1] for args in self.agent.plugin_rpc.update_ha_routers_states.call_args_list) # Get the last state reported for each router actual_router_states = {} for call in calls: for router_id, state in six.iteritems(call): actual_router_states[router_id] = state return actual_router_states == expected def test_keepalived_state_change_bulk_rpc(self): router_info = self.generate_router_info(enable_ha=True) router1 = self.manage_router(self.agent, router_info) self.fail_ha_router(router1) router_info = self.generate_router_info(enable_ha=True) router2 = self.manage_router(self.agent, router_info) utils.wait_until_true(lambda: router1.ha_state == 'backup') utils.wait_until_true(lambda: router2.ha_state == 'master') utils.wait_until_true( lambda: self._expected_rpc_report( {router1.router_id: 'standby', router2.router_id: 'active'})) def test_ha_router_lifecycle(self): router_info = self._router_lifecycle(enable_ha=True) # ensure everything was cleaned up self._router_lifecycle(enable_ha=True, router_info=router_info) def test_conntrack_disassociate_fip_ha_router(self): self._test_conntrack_disassociate_fip(ha=True) def test_ipv6_ha_router_lifecycle(self): self._router_lifecycle(enable_ha=True, ip_version=6) def test_ipv6_ha_router_lifecycle_with_no_gw_subnet(self): self.agent.conf.set_override('ipv6_gateway', 'fe80::f816:3eff:fe2e:1') self._router_lifecycle(enable_ha=True, ip_version=6, v6_ext_gw_with_sub=False) def test_ipv6_ha_router_lifecycle_with_no_gw_subnet_for_router_advts(self): # Verify that router gw interface is configured to receive Router # Advts from upstream router when no external gateway is configured. self._router_lifecycle(enable_ha=True, dual_stack=True, v6_ext_gw_with_sub=False) def test_keepalived_configuration(self): router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) expected = self.get_expected_keepalive_configuration(router) self.assertEqual(expected, router.keepalived_manager.get_conf_on_disk()) # Add a new FIP and change the GW IP address router.router = copy.deepcopy(router.router) existing_fip = '19.4.4.2' new_fip = '19.4.4.3' self._add_fip(router, new_fip) subnet_id = framework._uuid() fixed_ips = [{'ip_address': '19.4.4.10', 'prefixlen': 24, 'subnet_id': subnet_id}] subnets = [{'id': subnet_id, 'cidr': '19.4.4.0/24', 'gateway_ip': '19.4.4.5'}] router.router['gw_port']['subnets'] = subnets router.router['gw_port']['fixed_ips'] = fixed_ips router.process(self.agent) # Get the updated configuration and assert that both FIPs are in, # and that the GW IP address was updated. new_config = router.keepalived_manager.config.get_config_str() old_gw = '0.0.0.0/0 via 19.4.4.1' new_gw = '0.0.0.0/0 via 19.4.4.5' old_external_device_ip = '19.4.4.4' new_external_device_ip = '19.4.4.10' self.assertIn(existing_fip, new_config) self.assertIn(new_fip, new_config) self.assertNotIn(old_gw, new_config) self.assertIn(new_gw, new_config) external_port = router.get_ex_gw_port() external_device_name = router.get_external_device_name( external_port['id']) self.assertNotIn('%s/24 dev %s' % (old_external_device_ip, external_device_name), new_config) self.assertIn('%s/24 dev %s' % (new_external_device_ip, external_device_name), new_config) def test_ha_router_conf_on_restarted_agent(self): router_info = self.generate_router_info(enable_ha=True) router1 = self.manage_router(self.agent, router_info) self._add_fip(router1, '192.168.111.12') restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport( self.agent.host, self.agent.conf) self.manage_router(restarted_agent, router1.router) utils.wait_until_true(lambda: self.floating_ips_configured(router1)) self.assertIn( router1._get_primary_vip(), self._get_addresses_on_device( router1.ns_name, router1.get_ha_device_name())) def test_ha_router_ipv6_radvd_status(self): router_info = self.generate_router_info(ip_version=6, enable_ha=True) router1 = self.manage_router(self.agent, router_info) utils.wait_until_true(lambda: router1.ha_state == 'master') utils.wait_until_true(lambda: router1.radvd.enabled) def _check_lla_status(router, expected): internal_devices = router.router[l3_constants.INTERFACE_KEY] for device in internal_devices: lladdr = ip_lib.get_ipv6_lladdr(device['mac_address']) exists = ip_lib.device_exists_with_ips_and_mac( router.get_internal_device_name(device['id']), [lladdr], device['mac_address'], router.ns_name) self.assertEqual(expected, exists) _check_lla_status(router1, True) device_name = router1.get_ha_device_name() ha_device = ip_lib.IPDevice(device_name, namespace=router1.ns_name) ha_device.link.set_down() utils.wait_until_true(lambda: router1.ha_state == 'backup') utils.wait_until_true(lambda: not router1.radvd.enabled, timeout=10) _check_lla_status(router1, False) def test_ha_router_process_ipv6_subnets_to_existing_port(self): router_info = self.generate_router_info(enable_ha=True, ip_version=6) router = self.manage_router(self.agent, router_info) def verify_ip_in_keepalived_config(router, iface): config = router.keepalived_manager.config.get_config_str() ip_cidrs = common_utils.fixed_ip_cidrs(iface['fixed_ips']) for ip_addr in ip_cidrs: self.assertIn(ip_addr, config) interface_id = router.router[l3_constants.INTERFACE_KEY][0]['id'] slaac = l3_constants.IPV6_SLAAC slaac_mode = {'ra_mode': slaac, 'address_mode': slaac} # Add a second IPv6 subnet to the router internal interface. self._add_internal_interface_by_subnet(router.router, count=1, ip_version=6, ipv6_subnet_modes=[slaac_mode], interface_id=interface_id) router.process(self.agent) utils.wait_until_true(lambda: router.ha_state == 'master') # Verify that router internal interface is present and is configured # with IP address from both the subnets. internal_iface = router.router[l3_constants.INTERFACE_KEY][0] self.assertEqual(2, len(internal_iface['fixed_ips'])) self._assert_internal_devices(router) # Verify that keepalived config is properly updated. verify_ip_in_keepalived_config(router, internal_iface) # Remove one subnet from the router internal iface interfaces = copy.deepcopy(router.router.get( l3_constants.INTERFACE_KEY, [])) fixed_ips, subnets = [], [] fixed_ips.append(interfaces[0]['fixed_ips'][0]) subnets.append(interfaces[0]['subnets'][0]) interfaces[0].update({'fixed_ips': fixed_ips, 'subnets': subnets}) router.router[l3_constants.INTERFACE_KEY] = interfaces router.process(self.agent) # Verify that router internal interface has a single ipaddress internal_iface = router.router[l3_constants.INTERFACE_KEY][0] self.assertEqual(1, len(internal_iface['fixed_ips'])) self._assert_internal_devices(router) # Verify that keepalived config is properly updated. verify_ip_in_keepalived_config(router, internal_iface) def test_delete_external_gateway_on_standby_router(self): router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) self.fail_ha_router(router) utils.wait_until_true(lambda: router.ha_state == 'backup') # The purpose of the test is to simply make sure no exception is raised port = router.get_ex_gw_port() interface_name = router.get_external_device_name(port['id']) router.external_gateway_removed(port, interface_name) def test_removing_floatingip_immediately(self): router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) ex_gw_port = router.get_ex_gw_port() interface_name = router.get_external_device_interface_name(ex_gw_port) utils.wait_until_true(lambda: router.ha_state == 'master') self._add_fip(router, '172.168.1.20', fixed_address='10.0.0.3') router.process(self.agent) router.router[l3_constants.FLOATINGIP_KEY] = [] # The purpose of the test is to simply make sure no exception is raised # Because router.process will consume the FloatingIpSetupException, # call the configure_fip_addresses directly here router.configure_fip_addresses(interface_name) def test_ha_port_status_update(self): router_info = self.generate_router_info(enable_ha=True) router_info[l3_constants.HA_INTERFACE_KEY]['status'] = ( l3_constants.PORT_STATUS_DOWN) router1 = self.manage_router(self.agent, router_info) utils.wait_until_true(lambda: router1.ha_state == 'backup') router1.router[l3_constants.HA_INTERFACE_KEY]['status'] = ( l3_constants.PORT_STATUS_ACTIVE) self.agent._process_updated_router(router1.router) utils.wait_until_true(lambda: router1.ha_state == 'master') def test_ha_router_namespace_has_ip_nonlocal_bind_disabled(self): router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) try: ip_nonlocal_bind_value = ip_lib.get_ip_nonlocal_bind( router.router_namespace.name) except RuntimeError as rte: stat_message = 'cannot stat /proc/sys/net/ipv4/ip_nonlocal_bind' if stat_message in str(rte): raise self.skipException( "This kernel doesn't support %s in network namespaces." % ( ip_lib.IP_NONLOCAL_BIND)) raise self.assertEqual(0, ip_nonlocal_bind_value) class L3HATestFailover(framework.L3AgentTestFramework): NESTED_NAMESPACE_SEPARATOR = '@' def setUp(self): super(L3HATestFailover, self).setUp() conf = self._configure_agent('agent2') self.failover_agent = neutron_l3_agent.L3NATAgentWithStateReport( 'agent2', conf) br_int_1 = self._get_agent_ovs_integration_bridge(self.agent) br_int_2 = self._get_agent_ovs_integration_bridge(self.failover_agent) veth1, veth2 = self.useFixture(net_helpers.VethFixture()).ports br_int_1.add_port(veth1.name) br_int_2.add_port(veth2.name) def test_ha_router_failover(self): router_info = self.generate_router_info(enable_ha=True) get_ns_name = mock.patch.object( namespaces.RouterNamespace, '_get_ns_name').start() get_ns_name.return_value = "%s%s%s" % ( 'qrouter-' + router_info['id'], self.NESTED_NAMESPACE_SEPARATOR, self.agent.host) router1 = self.manage_router(self.agent, router_info) router_info_2 = copy.deepcopy(router_info) router_info_2[l3_constants.HA_INTERFACE_KEY] = ( l3_test_common.get_ha_interface(ip='169.254.192.2', mac='22:22:22:22:22:22')) get_ns_name.return_value = "%s%s%s" % ( namespaces.RouterNamespace._get_ns_name(router_info_2['id']), self.NESTED_NAMESPACE_SEPARATOR, self.failover_agent.host) router2 = self.manage_router(self.failover_agent, router_info_2) utils.wait_until_true(lambda: router1.ha_state == 'master') utils.wait_until_true(lambda: router2.ha_state == 'backup') self.fail_ha_router(router1) utils.wait_until_true(lambda: router2.ha_state == 'master') utils.wait_until_true(lambda: router1.ha_state == 'backup') class LinuxBridgeL3HATestCase(L3HATestCase): INTERFACE_DRIVER = 'neutron.agent.linux.interface.BridgeInterfaceDriver' neutron-8.4.0/neutron/tests/functional/agent/l3/test_legacy_router.py0000664000567000056710000004242113044372760027256 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron.agent.l3 import namespace_manager from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants as l3_constants from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers from neutron.tests.functional.agent.l3 import framework class L3AgentTestCase(framework.L3AgentTestFramework): def test_agent_notifications_for_router_events(self): """Test notifications for router create, update, and delete. Make sure that when the agent sends notifications of router events for router create, update, and delete, that the correct handler is called with the right resource, event, and router information. """ event_handler = mock.Mock() registry.subscribe(event_handler, resources.ROUTER, events.BEFORE_CREATE) registry.subscribe(event_handler, resources.ROUTER, events.AFTER_CREATE) registry.subscribe(event_handler, resources.ROUTER, events.BEFORE_UPDATE) registry.subscribe(event_handler, resources.ROUTER, events.AFTER_UPDATE) registry.subscribe(event_handler, resources.ROUTER, events.BEFORE_DELETE) registry.subscribe(event_handler, resources.ROUTER, events.AFTER_DELETE) router_info = self.generate_router_info(enable_ha=False) router = self.manage_router(self.agent, router_info) self.agent._process_updated_router(router.router) self._delete_router(self.agent, router.router_id) expected_calls = [ mock.call('router', 'before_create', self.agent, router=router), mock.call('router', 'after_create', self.agent, router=router), mock.call('router', 'before_update', self.agent, router=router), mock.call('router', 'after_update', self.agent, router=router), mock.call('router', 'before_delete', self.agent, router=router), mock.call('router', 'after_delete', self.agent, router=router)] event_handler.assert_has_calls(expected_calls) def test_legacy_router_update_floatingip_statuses(self): self._test_update_floatingip_statuses( self.generate_router_info(enable_ha=False)) def test_legacy_router_lifecycle(self): self._router_lifecycle(enable_ha=False, dual_stack=True) def test_legacy_router_lifecycle_with_no_gateway_subnet(self): self.agent.conf.set_override('ipv6_gateway', 'fe80::f816:3eff:fe2e:1') self._router_lifecycle(enable_ha=False, dual_stack=True, v6_ext_gw_with_sub=False) def test_legacy_router_gateway_update_to_none(self): router_info = self.generate_router_info(False) router = self.manage_router(self.agent, router_info) gw_port = router.get_ex_gw_port() interface_name = router.get_external_device_name(gw_port['id']) device = ip_lib.IPDevice(interface_name, namespace=router.ns_name) self.assertIn('gateway', device.route.get_gateway()) # Make this copy, so that the agent will think there is change in # external gateway port. router.ex_gw_port = copy.deepcopy(router.ex_gw_port) for subnet in gw_port['subnets']: subnet['gateway_ip'] = None router.process(self.agent) self.assertIsNone(device.route.get_gateway()) def test_legacy_router_ns_rebuild(self): router_info = self.generate_router_info(False) router = self.manage_router(self.agent, router_info) gw_port = router.router['gw_port'] gw_inf_name = router.get_external_device_name(gw_port['id']) gw_device = ip_lib.IPDevice(gw_inf_name, namespace=router.ns_name) router_ports = [gw_device] for i_port in router_info.get(l3_constants.INTERFACE_KEY, []): interface_name = router.get_internal_device_name(i_port['id']) router_ports.append( ip_lib.IPDevice(interface_name, namespace=router.ns_name)) namespaces.Namespace.delete(router.router_namespace) # l3 agent should be able to rebuild the ns when it is deleted self.manage_router(self.agent, router_info) # Assert the router ports are there in namespace self.assertTrue(all([port.exists() for port in router_ports])) self._delete_router(self.agent, router.router_id) def test_conntrack_disassociate_fip_legacy_router(self): self._test_conntrack_disassociate_fip(ha=False) def _test_periodic_sync_routers_task(self, routers_to_keep, routers_deleted, routers_deleted_during_resync): ns_names_to_retrieve = set() deleted_routers_info = [] for r in routers_to_keep: ri = self.manage_router(self.agent, r) ns_names_to_retrieve.add(ri.ns_name) for r in routers_deleted + routers_deleted_during_resync: ri = self.manage_router(self.agent, r) deleted_routers_info.append(ri) ns_names_to_retrieve.add(ri.ns_name) mocked_get_router_ids = self.mock_plugin_api.get_router_ids mocked_get_router_ids.return_value = [r['id'] for r in routers_to_keep + routers_deleted_during_resync] mocked_get_routers = self.mock_plugin_api.get_routers mocked_get_routers.return_value = (routers_to_keep + routers_deleted_during_resync) # clear agent router_info as it will be after restart self.agent.router_info = {} # Synchronize the agent with the plug-in with mock.patch.object(namespace_manager.NamespaceManager, 'list_all', return_value=ns_names_to_retrieve): self.agent.periodic_sync_routers_task(self.agent.context) # Mock the plugin RPC API so a known external network id is returned # when the router updates are processed by the agent external_network_id = framework._uuid() self.mock_plugin_api.get_external_network_id.return_value = ( external_network_id) # Plug external_gateway_info in the routers that are not going to be # deleted by the agent when it processes the updates. Otherwise, # _process_router_if_compatible in the agent fails for r in routers_to_keep: r['external_gateway_info'] = {'network_id': external_network_id} # while sync updates are still in the queue, higher priority # router_deleted events may be added there as well for r in routers_deleted_during_resync: self.agent.router_deleted(self.agent.context, r['id']) # make sure all events are processed while not self.agent._queue._queue.empty(): self.agent._process_router_update() for r in routers_to_keep: self.assertIn(r['id'], self.agent.router_info) self.assertTrue(self._namespace_exists(namespaces.NS_PREFIX + r['id'])) for ri in deleted_routers_info: self.assertNotIn(ri.router_id, self.agent.router_info) self._assert_router_does_not_exist(ri) def test_periodic_sync_routers_task(self): routers_to_keep = [] for i in range(2): routers_to_keep.append(self.generate_router_info(False)) self._test_periodic_sync_routers_task(routers_to_keep, routers_deleted=[], routers_deleted_during_resync=[]) def test_periodic_sync_routers_task_routers_deleted_while_agent_down(self): routers_to_keep = [] routers_deleted = [] for i in range(2): routers_to_keep.append(self.generate_router_info(False)) for i in range(2): routers_deleted.append(self.generate_router_info(False)) self._test_periodic_sync_routers_task(routers_to_keep, routers_deleted, routers_deleted_during_resync=[]) def test_periodic_sync_routers_task_routers_deleted_while_agent_sync(self): routers_to_keep = [] routers_deleted_during_resync = [] for i in range(2): routers_to_keep.append(self.generate_router_info(False)) for i in range(2): routers_deleted_during_resync.append( self.generate_router_info(False)) self._test_periodic_sync_routers_task( routers_to_keep, routers_deleted=[], routers_deleted_during_resync=routers_deleted_during_resync) def _setup_fip_with_fixed_ip_from_same_subnet(self, enable_snat): """Setup 2 FakeMachines from same subnet, one with floatingip associated. """ router_info = self.generate_router_info(enable_ha=False, enable_snat=enable_snat) router = self.manage_router(self.agent, router_info) router_ip_cidr = self._port_first_ip_cidr(router.internal_ports[0]) router_ip = router_ip_cidr.partition('/')[0] br_int = framework.get_ovs_bridge( self.agent.conf.ovs_integration_bridge) src_machine, dst_machine = self.useFixture( machine_fixtures.PeerMachines( br_int, net_helpers.increment_ip_cidr(router_ip_cidr), router_ip)).machines dst_fip = '19.4.4.10' router.router[l3_constants.FLOATINGIP_KEY] = [] self._add_fip(router, dst_fip, fixed_address=dst_machine.ip) router.process(self.agent) return src_machine, dst_machine, dst_fip def test_fip_connection_from_same_subnet(self): '''Test connection to floatingip which is associated with fixed_ip on the same subnet of the source fixed_ip. In other words it confirms that return packets surely go through the router. ''' src_machine, dst_machine, dst_fip = ( self._setup_fip_with_fixed_ip_from_same_subnet(enable_snat=True)) protocol_port = net_helpers.get_free_namespace_port( l3_constants.PROTO_NAME_TCP, dst_machine.namespace) # client sends to fip netcat = net_helpers.NetcatTester( src_machine.namespace, dst_machine.namespace, dst_fip, protocol_port, protocol=net_helpers.NetcatTester.TCP) self.addCleanup(netcat.stop_processes) self.assertTrue(netcat.test_connectivity()) def test_ping_floatingip_reply_with_floatingip(self): src_machine, _, dst_fip = ( self._setup_fip_with_fixed_ip_from_same_subnet(enable_snat=False)) # Verify that the ping replys with fip ns_ip_wrapper = ip_lib.IPWrapper(src_machine.namespace) result = ns_ip_wrapper.netns.execute( ['ping', '-c', 1, '-W', 5, dst_fip]) self._assert_ping_reply_from_expected_address(result, dst_fip) def _setup_address_scope(self, internal_address_scope1, internal_address_scope2, gw_address_scope=None): router_info = self.generate_router_info(enable_ha=False, num_internal_ports=2) address_scope1 = { str(l3_constants.IP_VERSION_4): internal_address_scope1} address_scope2 = { str(l3_constants.IP_VERSION_4): internal_address_scope2} if gw_address_scope: router_info['gw_port']['address_scopes'] = { str(l3_constants.IP_VERSION_4): gw_address_scope} router_info[l3_constants.INTERFACE_KEY][0]['address_scopes'] = ( address_scope1) router_info[l3_constants.INTERFACE_KEY][1]['address_scopes'] = ( address_scope2) router = self.manage_router(self.agent, router_info) router_ip_cidr1 = self._port_first_ip_cidr(router.internal_ports[0]) router_ip1 = router_ip_cidr1.partition('/')[0] router_ip_cidr2 = self._port_first_ip_cidr(router.internal_ports[1]) router_ip2 = router_ip_cidr2.partition('/')[0] br_int = framework.get_ovs_bridge( self.agent.conf.ovs_integration_bridge) test_machine1 = self.useFixture( machine_fixtures.FakeMachine( br_int, net_helpers.increment_ip_cidr(router_ip_cidr1), router_ip1)) test_machine2 = self.useFixture( machine_fixtures.FakeMachine( br_int, net_helpers.increment_ip_cidr(router_ip_cidr2), router_ip2)) return test_machine1, test_machine2, router def test_connection_from_same_address_scope(self): test_machine1, test_machine2, _ = self._setup_address_scope( 'scope1', 'scope1') # Internal networks that are in the same address scope can connected # each other net_helpers.assert_ping(test_machine1.namespace, test_machine2.ip, 5) net_helpers.assert_ping(test_machine2.namespace, test_machine1.ip, 5) def test_connection_from_diff_address_scope(self): test_machine1, test_machine2, _ = self._setup_address_scope( 'scope1', 'scope2') # Internal networks that are not in the same address scope should # not reach each other test_machine1.assert_no_ping(test_machine2.ip) test_machine2.assert_no_ping(test_machine1.ip) def test_fip_connection_for_address_scope(self): (machine_same_scope, machine_diff_scope, router) = self._setup_address_scope('scope1', 'scope2', 'scope1') router.router[l3_constants.FLOATINGIP_KEY] = [] fip_same_scope = '19.4.4.10' self._add_fip(router, fip_same_scope, fixed_address=machine_same_scope.ip, fixed_ip_address_scope='scope1') fip_diff_scope = '19.4.4.11' self._add_fip(router, fip_diff_scope, fixed_address=machine_diff_scope.ip, fixed_ip_address_scope='scope2') router.process(self.agent) br_ex = framework.get_ovs_bridge( self.agent.conf.external_network_bridge) src_machine = self.useFixture( machine_fixtures.FakeMachine(br_ex, '19.4.4.12/24')) # Floating ip should work no matter of address scope net_helpers.assert_ping(src_machine.namespace, fip_same_scope, 5) net_helpers.assert_ping(src_machine.namespace, fip_diff_scope, 5) def test_direct_route_for_address_scope(self): (machine_same_scope, machine_diff_scope, router) = self._setup_address_scope('scope1', 'scope2', 'scope1') gw_port = router.get_ex_gw_port() gw_ip = self._port_first_ip_cidr(gw_port).partition('/')[0] br_ex = framework.get_ovs_bridge( self.agent.conf.external_network_bridge) src_machine = self.useFixture( machine_fixtures.FakeMachine(br_ex, '19.4.4.12/24', gw_ip)) # For the internal networks that are in the same address scope as # external network, they can directly route to external network net_helpers.assert_ping( src_machine.namespace, machine_same_scope.ip, 5) # For the internal networks that are not in the same address scope as # external networks. SNAT will be used. Direct route will not work # here. src_machine.assert_no_ping(machine_diff_scope.ip) def test_connection_from_diff_address_scope_with_fip(self): (machine_same_scope, machine_diff_scope, router) = self._setup_address_scope('scope1', 'scope2', 'scope1') router.router[l3_constants.FLOATINGIP_KEY] = [] fip = '19.4.4.11' self._add_fip(router, fip, fixed_address=machine_diff_scope.ip, fixed_ip_address_scope='scope2') router.process(self.agent) # For the internal networks that are in the same address scope as # external network, they should be able to reach the floating ip net_helpers.assert_ping(machine_same_scope.namespace, fip, 5) # For the port with fip, it should be able to reach the internal # networks that are in the same address scope as external network net_helpers.assert_ping(machine_diff_scope.namespace, machine_same_scope.ip, 5) neutron-8.4.0/neutron/tests/functional/agent/l3/__init__.py0000664000567000056710000000000013044372736025100 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/agent/l3/test_metadata_proxy.py0000664000567000056710000001315013044372736027433 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path import time import webob import webob.dec import webob.exc from neutron.agent.linux import dhcp from neutron.agent.linux import utils from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers from neutron.tests.functional.agent.l3 import framework from neutron.tests.functional.agent.linux import helpers METADATA_REQUEST_TIMEOUT = 60 METADATA_REQUEST_SLEEP = 5 class MetadataFakeProxyHandler(object): def __init__(self, status): self.status = status @webob.dec.wsgify() def __call__(self, req): return webob.Response(status=self.status) class MetadataL3AgentTestCase(framework.L3AgentTestFramework): SOCKET_MODE = 0o644 def _create_metadata_fake_server(self, status): server = utils.UnixDomainWSGIServer('metadata-fake-server') self.addCleanup(server.stop) # NOTE(cbrandily): TempDir fixture creates a folder with 0o700 # permissions but metadata_proxy_socket folder must be readable by all # users self.useFixture( helpers.RecursivePermDirFixture( os.path.dirname(self.agent.conf.metadata_proxy_socket), 0o555)) server.start(MetadataFakeProxyHandler(status), self.agent.conf.metadata_proxy_socket, workers=0, backlog=4096, mode=self.SOCKET_MODE) def _query_metadata_proxy(self, machine): url = 'http://%(host)s:%(port)s' % {'host': dhcp.METADATA_DEFAULT_IP, 'port': dhcp.METADATA_PORT} cmd = 'curl', '--max-time', METADATA_REQUEST_TIMEOUT, '-D-', url i = 0 CONNECTION_REFUSED_TIMEOUT = METADATA_REQUEST_TIMEOUT // 2 while i <= CONNECTION_REFUSED_TIMEOUT: try: raw_headers = machine.execute(cmd) break except RuntimeError as e: if 'Connection refused' in str(e): time.sleep(METADATA_REQUEST_SLEEP) i += METADATA_REQUEST_SLEEP else: self.fail('metadata proxy unreachable ' 'on %s before timeout' % url) if i > CONNECTION_REFUSED_TIMEOUT: self.fail('Timed out waiting metadata proxy to become available') return raw_headers.splitlines()[0] def test_access_to_metadata_proxy(self): """Test access to the l3-agent metadata proxy. The test creates: * A l3-agent metadata service: * A router (which creates a metadata proxy in the router namespace), * A fake metadata server * A "client" namespace (simulating a vm) with a port on router internal subnet. The test queries from the "client" namespace the metadata proxy on http://169.254.169.254 and asserts that the metadata proxy added the X-Forwarded-For and X-Neutron-Router-Id headers to the request and forwarded the http request to the fake metadata server and the response to the "client" namespace. """ router_info = self.generate_router_info(enable_ha=False) router = self.manage_router(self.agent, router_info) self._create_metadata_fake_server(webob.exc.HTTPOk.code) # Create and configure client namespace router_ip_cidr = self._port_first_ip_cidr(router.internal_ports[0]) br_int = framework.get_ovs_bridge( self.agent.conf.ovs_integration_bridge) machine = self.useFixture( machine_fixtures.FakeMachine( br_int, net_helpers.increment_ip_cidr(router_ip_cidr), router_ip_cidr.partition('/')[0])) # Query metadata proxy firstline = self._query_metadata_proxy(machine) # Check status code self.assertIn(str(webob.exc.HTTPOk.code), firstline.split()) class UnprivilegedUserMetadataL3AgentTestCase(MetadataL3AgentTestCase): """Test metadata proxy with least privileged user. The least privileged user has uid=65534 and is commonly named 'nobody' but not always, that's why we use its uid. """ SOCKET_MODE = 0o664 def setUp(self): super(UnprivilegedUserMetadataL3AgentTestCase, self).setUp() self.agent.conf.set_override('metadata_proxy_user', '65534') self.agent.conf.set_override('metadata_proxy_watch_log', False) class UnprivilegedUserGroupMetadataL3AgentTestCase(MetadataL3AgentTestCase): """Test metadata proxy with least privileged user/group. The least privileged user has uid=65534 and is commonly named 'nobody' but not always, that's why we use its uid. Its group has gid=65534 and is commonly named 'nobody' or 'nogroup', that's why we use its gid. """ SOCKET_MODE = 0o666 def setUp(self): super(UnprivilegedUserGroupMetadataL3AgentTestCase, self).setUp() self.agent.conf.set_override('metadata_proxy_user', '65534') self.agent.conf.set_override('metadata_proxy_group', '65534') self.agent.conf.set_override('metadata_proxy_watch_log', False) neutron-8.4.0/neutron/tests/functional/agent/l3/framework.py0000664000567000056710000005520013044372760025347 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import functools import mock import netaddr from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils import testtools from neutron.agent.common import config as agent_config from neutron.agent.common import ovs_lib from neutron.agent.l3 import agent as neutron_l3_agent from neutron.agent import l3_agent as l3_agent_main from neutron.agent.linux import external_process from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.common import config as common_config from neutron.common import constants as l3_constants from neutron.common import utils as common_utils from neutron.tests.common import l3_test_common from neutron.tests.common import net_helpers from neutron.tests.functional import base _uuid = uuidutils.generate_uuid def get_ovs_bridge(br_name): return ovs_lib.OVSBridge(br_name) class L3AgentTestFramework(base.BaseSudoTestCase): INTERFACE_DRIVER = 'neutron.agent.linux.interface.OVSInterfaceDriver' def setUp(self): super(L3AgentTestFramework, self).setUp() self.mock_plugin_api = mock.patch( 'neutron.agent.l3.agent.L3PluginApi').start().return_value mock.patch('neutron.agent.rpc.PluginReportStateAPI').start() self.conf = self._configure_agent('agent1') self.agent = neutron_l3_agent.L3NATAgentWithStateReport('agent1', self.conf) def _get_config_opts(self): config = cfg.ConfigOpts() config.register_opts(common_config.core_opts) config.register_opts(common_config.core_cli_opts) logging.register_options(config) agent_config.register_process_monitor_opts(config) return config def _configure_agent(self, host, agent_mode='dvr_snat'): conf = self._get_config_opts() l3_agent_main.register_opts(conf) conf.set_override('interface_driver', self.INTERFACE_DRIVER) br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge br_ex = self.useFixture(net_helpers.OVSBridgeFixture()).bridge conf.set_override('ovs_integration_bridge', br_int.br_name) conf.set_override('external_network_bridge', br_ex.br_name) temp_dir = self.get_new_temp_dir() get_temp_file_path = functools.partial(self.get_temp_file_path, root=temp_dir) conf.set_override('state_path', temp_dir.path) # NOTE(cbrandily): log_file or log_dir must be set otherwise # metadata_proxy_watch_log has no effect conf.set_override('log_file', get_temp_file_path('log_file')) conf.set_override('metadata_proxy_socket', get_temp_file_path('metadata_proxy')) conf.set_override('ha_confs_path', get_temp_file_path('ha_confs')) conf.set_override('external_pids', get_temp_file_path('external/pids')) conf.set_override('host', host) conf.set_override('agent_mode', agent_mode) return conf def _get_agent_ovs_integration_bridge(self, agent): return get_ovs_bridge(agent.conf.ovs_integration_bridge) def generate_router_info(self, enable_ha, ip_version=4, extra_routes=True, enable_fip=True, enable_snat=True, num_internal_ports=1, dual_stack=False, v6_ext_gw_with_sub=True): if ip_version == 6 and not dual_stack: enable_snat = False enable_fip = False extra_routes = False return l3_test_common.prepare_router_data(ip_version=ip_version, enable_snat=enable_snat, num_internal_ports=( num_internal_ports), enable_floating_ip=enable_fip, enable_ha=enable_ha, extra_routes=extra_routes, dual_stack=dual_stack, v6_ext_gw_with_sub=( v6_ext_gw_with_sub)) def _test_conntrack_disassociate_fip(self, ha): '''Test that conntrack immediately drops stateful connection that uses floating IP once it's disassociated. ''' router_info = self.generate_router_info(enable_ha=ha) router = self.manage_router(self.agent, router_info) port = net_helpers.get_free_namespace_port(l3_constants.PROTO_NAME_TCP, router.ns_name) client_address = '19.4.4.3' server_address = '35.4.0.4' def clean_fips(router): router.router[l3_constants.FLOATINGIP_KEY] = [] clean_fips(router) self._add_fip(router, client_address, fixed_address=server_address) router.process(self.agent) router_ns = ip_lib.IPWrapper(namespace=router.ns_name) netcat = net_helpers.NetcatTester( router.ns_name, router.ns_name, client_address, port, protocol=net_helpers.NetcatTester.TCP) self.addCleanup(netcat.stop_processes) def assert_num_of_conntrack_rules(n): out = router_ns.netns.execute(["conntrack", "-L", "--orig-src", client_address]) self.assertEqual( n, len([line for line in out.strip().split('\n') if line])) if ha: utils.wait_until_true(lambda: router.ha_state == 'master') with self.assert_max_execution_time(100): assert_num_of_conntrack_rules(0) self.assertTrue(netcat.test_connectivity()) assert_num_of_conntrack_rules(1) clean_fips(router) router.process(self.agent) assert_num_of_conntrack_rules(0) with testtools.ExpectedException(RuntimeError): netcat.test_connectivity() def _test_update_floatingip_statuses(self, router_info): router = self.manage_router(self.agent, router_info) rpc = self.agent.plugin_rpc.update_floatingip_statuses self.assertTrue(rpc.called) # Assert that every defined FIP is updated via RPC expected_fips = set([ (fip['id'], l3_constants.FLOATINGIP_STATUS_ACTIVE) for fip in router.router[l3_constants.FLOATINGIP_KEY]]) call = [args[0] for args in rpc.call_args_list][0] actual_fips = set( [(fip_id, status) for fip_id, status in call[2].items()]) self.assertEqual(expected_fips, actual_fips) def _gateway_check(self, gateway_ip, external_device): expected_gateway = gateway_ip ip_vers = netaddr.IPAddress(expected_gateway).version existing_gateway = (external_device.route.get_gateway( ip_version=ip_vers).get('gateway')) self.assertEqual(expected_gateway, existing_gateway) def _assert_ha_device(self, router): def ha_router_dev_name_getter(not_used): return router.get_ha_device_name() self.assertTrue(self.device_exists_with_ips_and_mac( router.router[l3_constants.HA_INTERFACE_KEY], ha_router_dev_name_getter, router.ns_name)) def _assert_gateway(self, router, v6_ext_gw_with_sub=True): external_port = router.get_ex_gw_port() external_device_name = router.get_external_device_name( external_port['id']) external_device = ip_lib.IPDevice(external_device_name, namespace=router.ns_name) for subnet in external_port['subnets']: self._gateway_check(subnet['gateway_ip'], external_device) if not v6_ext_gw_with_sub: self._gateway_check(self.agent.conf.ipv6_gateway, external_device) def _assert_external_device(self, router): external_port = router.get_ex_gw_port() self.assertTrue(self.device_exists_with_ips_and_mac( external_port, router.get_external_device_name, router.ns_name)) def _router_lifecycle(self, enable_ha, ip_version=4, dual_stack=False, v6_ext_gw_with_sub=True, router_info=None): router_info = router_info or self.generate_router_info( enable_ha, ip_version, dual_stack=dual_stack, v6_ext_gw_with_sub=(v6_ext_gw_with_sub)) return_copy = copy.deepcopy(router_info) router = self.manage_router(self.agent, router_info) # Add multiple-IPv6-prefix internal router port slaac = l3_constants.IPV6_SLAAC slaac_mode = {'ra_mode': slaac, 'address_mode': slaac} subnet_modes = [slaac_mode] * 2 self._add_internal_interface_by_subnet(router.router, count=2, ip_version=6, ipv6_subnet_modes=subnet_modes) router.process(self.agent) if enable_ha: port = router.get_ex_gw_port() interface_name = router.get_external_device_name(port['id']) self._assert_no_ip_addresses_on_interface(router.ns_name, interface_name) utils.wait_until_true(lambda: router.ha_state == 'master') # Keepalived notifies of a state transition when it starts, # not when it ends. Thus, we have to wait until keepalived finishes # configuring everything. We verify this by waiting until the last # device has an IP address. device = router.router[l3_constants.INTERFACE_KEY][-1] device_exists = functools.partial( self.device_exists_with_ips_and_mac, device, router.get_internal_device_name, router.ns_name) utils.wait_until_true(device_exists) self.assertTrue(self._namespace_exists(router.ns_name)) utils.wait_until_true( lambda: self._metadata_proxy_exists(self.agent.conf, router)) self._assert_internal_devices(router) self._assert_external_device(router) if not (enable_ha and (ip_version == 6 or dual_stack)): # Note(SridharG): enable the assert_gateway for IPv6 once # keepalived on Ubuntu14.04 (i.e., check-neutron-dsvm-functional # platform) is updated to 1.2.10 (or above). # For more details: https://review.openstack.org/#/c/151284/ self._assert_gateway(router, v6_ext_gw_with_sub) self.assertTrue(self.floating_ips_configured(router)) self._assert_snat_chains(router) self._assert_floating_ip_chains(router) self._assert_iptables_rules_converged(router) self._assert_extra_routes(router) ip_versions = [4, 6] if (ip_version == 6 or dual_stack) else [4] self._assert_onlink_subnet_routes(router, ip_versions) self._assert_metadata_chains(router) # Verify router gateway interface is configured to receive Router Advts # when IPv6 is enabled and no IPv6 gateway is configured. if router.use_ipv6 and not v6_ext_gw_with_sub: if not self.agent.conf.ipv6_gateway: external_port = router.get_ex_gw_port() external_device_name = router.get_external_device_name( external_port['id']) ip_wrapper = ip_lib.IPWrapper(namespace=router.ns_name) ra_state = ip_wrapper.netns.execute(['sysctl', '-b', 'net.ipv6.conf.%s.accept_ra' % external_device_name]) self.assertEqual('2', ra_state) if enable_ha: self._assert_ha_device(router) self.assertTrue(router.keepalived_manager.get_process().active) self._delete_router(self.agent, router.router_id) self._assert_interfaces_deleted_from_ovs() self._assert_router_does_not_exist(router) if enable_ha: self.assertFalse(router.keepalived_manager.get_process().active) return return_copy def manage_router(self, agent, router): self.addCleanup(agent._safe_router_removed, router['id']) agent._process_added_router(router) return agent.router_info[router['id']] def _delete_router(self, agent, router_id): agent._router_removed(router_id) def _add_fip(self, router, fip_address, fixed_address='10.0.0.2', host=None, fixed_ip_address_scope=None): fip = {'id': _uuid(), 'port_id': _uuid(), 'floating_ip_address': fip_address, 'fixed_ip_address': fixed_address, 'host': host, 'fixed_ip_address_scope': fixed_ip_address_scope} router.router[l3_constants.FLOATINGIP_KEY].append(fip) def _add_internal_interface_by_subnet(self, router, count=1, ip_version=4, ipv6_subnet_modes=None, interface_id=None): return l3_test_common.router_append_subnet(router, count, ip_version, ipv6_subnet_modes, interface_id) def _namespace_exists(self, namespace): ip = ip_lib.IPWrapper(namespace=namespace) return ip.netns.exists(namespace) def _metadata_proxy_exists(self, conf, router): pm = external_process.ProcessManager( conf, router.router_id, router.ns_name) return pm.active def device_exists_with_ips_and_mac(self, expected_device, name_getter, namespace): ip_cidrs = common_utils.fixed_ip_cidrs(expected_device['fixed_ips']) return ip_lib.device_exists_with_ips_and_mac( name_getter(expected_device['id']), ip_cidrs, expected_device['mac_address'], namespace) @staticmethod def _port_first_ip_cidr(port): fixed_ip = port['fixed_ips'][0] return common_utils.ip_to_cidr(fixed_ip['ip_address'], fixed_ip['prefixlen']) def get_device_mtu(self, target_device, name_getter, namespace): device = ip_lib.IPDevice(name_getter(target_device), namespace) return device.link.mtu def get_expected_keepalive_configuration(self, router): ha_device_name = router.get_ha_device_name() external_port = router.get_ex_gw_port() ex_port_ipv6 = ip_lib.get_ipv6_lladdr(external_port['mac_address']) external_device_name = router.get_external_device_name( external_port['id']) external_device_cidr = self._port_first_ip_cidr(external_port) internal_port = router.router[l3_constants.INTERFACE_KEY][0] int_port_ipv6 = ip_lib.get_ipv6_lladdr(internal_port['mac_address']) internal_device_name = router.get_internal_device_name( internal_port['id']) internal_device_cidr = self._port_first_ip_cidr(internal_port) floating_ip_cidr = common_utils.ip_to_cidr( router.get_floating_ips()[0]['floating_ip_address']) default_gateway_ip = external_port['subnets'][0].get('gateway_ip') extra_subnet_cidr = external_port['extra_subnets'][0].get('cidr') return """vrrp_instance VR_1 { state BACKUP interface %(ha_device_name)s virtual_router_id 1 priority 50 garp_master_delay 60 nopreempt advert_int 2 track_interface { %(ha_device_name)s } virtual_ipaddress { 169.254.0.1/24 dev %(ha_device_name)s } virtual_ipaddress_excluded { %(floating_ip_cidr)s dev %(external_device_name)s %(external_device_cidr)s dev %(external_device_name)s %(internal_device_cidr)s dev %(internal_device_name)s %(ex_port_ipv6)s dev %(external_device_name)s scope link %(int_port_ipv6)s dev %(internal_device_name)s scope link } virtual_routes { 0.0.0.0/0 via %(default_gateway_ip)s dev %(external_device_name)s 8.8.8.0/24 via 19.4.4.4 %(extra_subnet_cidr)s dev %(external_device_name)s scope link } }""" % { 'ha_device_name': ha_device_name, 'external_device_name': external_device_name, 'external_device_cidr': external_device_cidr, 'internal_device_name': internal_device_name, 'internal_device_cidr': internal_device_cidr, 'floating_ip_cidr': floating_ip_cidr, 'default_gateway_ip': default_gateway_ip, 'int_port_ipv6': int_port_ipv6, 'ex_port_ipv6': ex_port_ipv6, 'extra_subnet_cidr': extra_subnet_cidr, } def _get_rule(self, iptables_manager, table, chain, predicate): rules = iptables_manager.get_chain(table, chain) result = next(rule for rule in rules if predicate(rule)) return result def _assert_router_does_not_exist(self, router): # If the namespace assertion succeeds # then the devices and iptable rules have also been deleted, # so there's no need to check that explicitly. self.assertFalse(self._namespace_exists(router.ns_name)) utils.wait_until_true( lambda: not self._metadata_proxy_exists(self.agent.conf, router)) def _assert_snat_chains(self, router): self.assertFalse(router.iptables_manager.is_chain_empty( 'nat', 'snat')) self.assertFalse(router.iptables_manager.is_chain_empty( 'nat', 'POSTROUTING')) def _assert_floating_ip_chains(self, router): self.assertFalse(router.iptables_manager.is_chain_empty( 'nat', 'float-snat')) def _assert_iptables_rules_converged(self, router): # if your code is failing on this line, it means you are not generating # your iptables rules in the same format that iptables-save returns # them. run iptables-save to see the format they should be in self.assertFalse(router.iptables_manager.apply()) def _assert_metadata_chains(self, router): metadata_port_filter = lambda rule: ( str(self.agent.conf.metadata_port) in rule.rule) self.assertTrue(self._get_rule(router.iptables_manager, 'nat', 'PREROUTING', metadata_port_filter)) self.assertTrue(self._get_rule(router.iptables_manager, 'filter', 'INPUT', metadata_port_filter)) def _assert_internal_devices(self, router): internal_devices = router.router[l3_constants.INTERFACE_KEY] self.assertTrue(len(internal_devices)) for device in internal_devices: self.assertTrue(self.device_exists_with_ips_and_mac( device, router.get_internal_device_name, router.ns_name)) def _assert_extra_routes(self, router, namespace=None): if namespace is None: namespace = router.ns_name routes = ip_lib.get_routing_table(4, namespace=namespace) routes = [{'nexthop': route['nexthop'], 'destination': route['destination']} for route in routes] for extra_route in router.router['routes']: self.assertIn(extra_route, routes) def _assert_onlink_subnet_routes( self, router, ip_versions, namespace=None): ns_name = namespace or router.ns_name routes = [] for ip_version in ip_versions: _routes = ip_lib.get_routing_table(ip_version, namespace=ns_name) routes.extend(_routes) routes = set(route['destination'] for route in routes) extra_subnets = router.get_ex_gw_port()['extra_subnets'] for extra_subnet in (route['cidr'] for route in extra_subnets): self.assertIn(extra_subnet, routes) def _assert_interfaces_deleted_from_ovs(self): def assert_ovs_bridge_empty(bridge_name): bridge = ovs_lib.OVSBridge(bridge_name) self.assertFalse(bridge.get_port_name_list()) assert_ovs_bridge_empty(self.agent.conf.ovs_integration_bridge) assert_ovs_bridge_empty(self.agent.conf.external_network_bridge) def floating_ips_configured(self, router): floating_ips = router.router[l3_constants.FLOATINGIP_KEY] external_port = router.get_ex_gw_port() return len(floating_ips) and all( ip_lib.device_exists_with_ips_and_mac( router.get_external_device_name(external_port['id']), ['%s/32' % fip['floating_ip_address']], external_port['mac_address'], namespace=router.ns_name) for fip in floating_ips) def fail_ha_router(self, router): device_name = router.get_ha_device_name() ha_device = ip_lib.IPDevice(device_name, router.ha_namespace) ha_device.link.set_down() @classmethod def _get_addresses_on_device(cls, namespace, interface): return [address['cidr'] for address in ip_lib.IPDevice(interface, namespace=namespace).addr.list()] def _assert_no_ip_addresses_on_interface(self, namespace, interface): self.assertEqual( [], self._get_addresses_on_device(namespace, interface)) def _assert_ip_address_on_interface(self, namespace, interface, ip_address): self.assertIn( ip_address, self._get_addresses_on_device(namespace, interface)) def _assert_ping_reply_from_expected_address( self, ping_result, expected_address): ping_results = ping_result.split('\n') self.assertGreater( len(ping_results), 1, "The result from ping should be multiple lines") self.assertIn( expected_address, ping_results[1], ("Expect to see %s in the reply of ping, but failed" % expected_address)) neutron-8.4.0/neutron/tests/functional/agent/l3/test_keepalived_state_change.py0000664000567000056710000001353013044372760031227 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import os import re import eventlet import mock import netaddr from oslo_config import cfg from oslo_config import fixture as fixture_config from oslo_utils import uuidutils from neutron._i18n import _ from neutron.agent.l3 import keepalived_state_change from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.tests.common import machine_fixtures as mf from neutron.tests.common import net_helpers from neutron.tests.functional import base IPV4_NEIGH_REGEXP = re.compile( r'(?P(\d{1,3}\.){3}\d{1,3}) ' '.*(?P([0-9A-Fa-f]{2}:){5}([0-9A-Fa-f]){2}).*') def get_arp_ip_mac_pairs(device_name, namespace): """Generate (ip, mac) pairs from device's ip neigh. Each neigh entry has following format: 192.168.0.1 lladdr fa:16:3e:01:ba:d3 STALE """ device = ip_lib.IPDevice(device_name, namespace) for entry in device.neigh.show(ip_version=4).splitlines(): match = IPV4_NEIGH_REGEXP.match(entry) if match: yield match.group('ip'), match.group('mac') def has_expected_arp_entry(device_name, namespace, ip, mac): return (ip, mac) in get_arp_ip_mac_pairs(device_name, namespace) class TestKeepalivedStateChange(base.BaseSudoTestCase): def setUp(self): super(TestKeepalivedStateChange, self).setUp() self.conf_fixture = self.useFixture(fixture_config.Config()) self.conf_fixture.register_opt( cfg.StrOpt('metadata_proxy_socket', default='$state_path/metadata_proxy', help=_('Location of Metadata Proxy UNIX domain ' 'socket'))) self.router_id = uuidutils.generate_uuid() self.conf_dir = self.get_default_temp_dir().path self.cidr = '169.254.128.1/24' self.interface_name = 'interface' self.monitor = keepalived_state_change.MonitorDaemon( self.get_temp_file_path('monitor.pid'), self.router_id, 1, 2, 'namespace', self.conf_dir, self.interface_name, self.cidr) mock.patch.object(self.monitor, 'notify_agent').start() self.line = '1: %s inet %s' % (self.interface_name, self.cidr) def test_parse_and_handle_event_wrong_device_completes_without_error(self): self.monitor.parse_and_handle_event( '1: wrong_device inet wrong_cidr') def _get_state(self): with open(os.path.join(self.monitor.conf_dir, 'state')) as state_file: return state_file.read() def test_parse_and_handle_event_writes_to_file(self): self.monitor.parse_and_handle_event('Deleted %s' % self.line) self.assertEqual('backup', self._get_state()) self.monitor.parse_and_handle_event(self.line) self.assertEqual('master', self._get_state()) def test_parse_and_handle_event_fails_writing_state(self): with mock.patch.object( self.monitor, 'write_state_change', side_effect=OSError): self.monitor.parse_and_handle_event(self.line) def test_parse_and_handle_event_fails_notifying_agent(self): with mock.patch.object( self.monitor, 'notify_agent', side_effect=Exception): self.monitor.parse_and_handle_event(self.line) class TestMonitorDaemon(base.BaseSudoTestCase): def setUp(self): super(TestMonitorDaemon, self).setUp() bridge = self.useFixture(net_helpers.OVSBridgeFixture()).bridge self.machines = self.useFixture(mf.PeerMachines(bridge)) self.router, self.peer = self.machines.machines[:2] conf_dir = self.get_default_temp_dir().path monitor = keepalived_state_change.MonitorDaemon( self.get_temp_file_path('monitor.pid'), uuidutils.generate_uuid(), 1, 2, self.router.namespace, conf_dir, 'foo-iface', self.machines.ip_cidr ) eventlet.spawn_n(monitor.run, run_as_root=True) monitor_started = functools.partial( lambda mon: mon.monitor is not None, monitor) utils.wait_until_true(monitor_started) self.addCleanup(monitor.monitor.stop) def test_new_fip_sends_garp(self): next_ip_cidr = net_helpers.increment_ip_cidr(self.machines.ip_cidr, 2) expected_ip = str(netaddr.IPNetwork(next_ip_cidr).ip) # Create incomplete ARP entry self.peer.assert_no_ping(expected_ip) has_entry = has_expected_arp_entry( self.peer.port.name, self.peer.namespace, expected_ip, self.router.port.link.address) self.assertFalse(has_entry) self.router.port.addr.add(next_ip_cidr) has_arp_entry_predicate = functools.partial( has_expected_arp_entry, self.peer.port.name, self.peer.namespace, expected_ip, self.router.port.link.address, ) exc = RuntimeError( "No ARP entry in %s namespace containing IP address %s and MAC " "address %s" % ( self.peer.namespace, expected_ip, self.router.port.link.address)) utils.wait_until_true( has_arp_entry_predicate, exception=exc) neutron-8.4.0/neutron/tests/functional/agent/l3/test_dvr_router.py0000664000567000056710000015700213044372760026607 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import functools import mock import netaddr import testtools from neutron.agent.l3 import agent as neutron_l3_agent from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_manager from neutron.agent.linux import utils from neutron.common import constants as l3_constants from neutron.common import exceptions as n_exc from neutron.extensions import portbindings from neutron.tests.common import l3_test_common from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers from neutron.tests.functional.agent.l3 import framework DEVICE_OWNER_COMPUTE = l3_constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' class TestDvrRouter(framework.L3AgentTestFramework): def manage_router(self, agent, router): def _safe_fipnamespace_delete_on_ext_net(ext_net_id): try: agent.fipnamespace_delete_on_ext_net(None, ext_net_id) except RuntimeError: pass if router['gw_port']: self.addCleanup( _safe_fipnamespace_delete_on_ext_net, router['gw_port']['network_id']) return super(TestDvrRouter, self).manage_router(agent, router) def test_dvr_update_floatingip_statuses(self): self.agent.conf.agent_mode = 'dvr' self._test_update_floatingip_statuses(self.generate_dvr_router_info()) def test_dvr_router_lifecycle_ha_with_snat_with_fips_nmtu(self): self._dvr_router_lifecycle(enable_ha=True, enable_snat=True, use_port_mtu=True) def test_dvr_router_lifecycle_without_ha_without_snat_with_fips(self): self._dvr_router_lifecycle(enable_ha=False, enable_snat=False) def test_dvr_router_lifecycle_without_ha_with_snat_with_fips(self): self._dvr_router_lifecycle(enable_ha=False, enable_snat=True) def test_dvr_router_lifecycle_ha_with_snat_with_fips(self): self._dvr_router_lifecycle(enable_ha=True, enable_snat=True) def _helper_create_dvr_router_fips_for_ext_network( self, agent_mode, **dvr_router_kwargs): self.agent.conf.agent_mode = agent_mode router_info = self.generate_dvr_router_info(**dvr_router_kwargs) self.mock_plugin_api.get_external_network_id.return_value = ( router_info['_floatingips'][0]['floating_network_id']) router = self.manage_router(self.agent, router_info) fip_ns = router.fip_ns.get_name() return router, fip_ns def _validate_fips_for_external_network(self, router, fip_ns): self.assertTrue(self._namespace_exists(router.ns_name)) self.assertTrue(self._namespace_exists(fip_ns)) self._assert_dvr_floating_ips(router) self._assert_snat_namespace_does_not_exist(router) def test_dvr_gateway_move_does_not_remove_redirect_rules(self): """Test to validate snat redirect rules not cleared with snat move.""" self.agent.conf.agent_mode = 'dvr_snat' router_info = self.generate_dvr_router_info(enable_snat=True) router_info[l3_constants.FLOATINGIP_KEY] = [] router_info[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = [] router1 = self.manage_router(self.agent, router_info) router1.router['gw_port_host'] = "" self.agent._process_updated_router(router1.router) router_updated = self.agent.router_info[router1.router['id']] self.assertTrue(self._namespace_exists(router_updated.ns_name)) ns_ipr = ip_lib.IPRule(namespace=router1.ns_name) ip4_rules_list = ns_ipr.rule.list_rules(l3_constants.IP_VERSION_4) self.assertEqual(5, len(ip4_rules_list)) # IPRule list should have 5 entries. # Three entries from 'default', 'main' and 'local' table. # The remaining 2 is for the two router interfaces(csnat ports). default_rules_list_count = 0 interface_rules_list_count = 0 for ip_rule in ip4_rules_list: tbl_index = ip_rule['table'] if tbl_index in ['local', 'default', 'main']: default_rules_list_count = default_rules_list_count + 1 else: interface_rules_list_count = interface_rules_list_count + 1 self.assertEqual(3, default_rules_list_count) self.assertEqual(2, interface_rules_list_count) def test_dvr_update_gateway_port_with_no_gw_port_in_namespace(self): self.agent.conf.agent_mode = 'dvr' # Create the router with external net router_info = self.generate_dvr_router_info() external_gw_port = router_info['gw_port'] ext_net_id = router_info['_floatingips'][0]['floating_network_id'] self.mock_plugin_api.get_external_network_id.return_value = ext_net_id router = self.manage_router(self.agent, router_info) fg_port = router.fip_ns.agent_gateway_port fg_port_name = router.fip_ns.get_ext_device_name(fg_port['id']) fg_device = ip_lib.IPDevice(fg_port_name, namespace=router.fip_ns.name) # Now validate if the gateway is properly configured. self.assertIn('gateway', fg_device.route.get_gateway()) self._validate_fips_for_external_network( router, router.fip_ns.get_name()) # Now delete the fg- port that was created ext_net_bridge = self.agent.conf.external_network_bridge router.fip_ns.driver.unplug(fg_port_name, bridge=ext_net_bridge, namespace=router.fip_ns.name, prefix=dvr_fip_ns.FIP_EXT_DEV_PREFIX) # Now check if the fg- port is missing. self.assertFalse(fg_device.exists()) # Now change the gateway ip for the router and do an update. router.ex_gw_port = copy.deepcopy(router.ex_gw_port) new_fg_port = copy.deepcopy(fg_port) for subnet in new_fg_port['subnets']: subnet['gateway_ip'] = '19.4.4.2' router.router[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = [new_fg_port] self.assertRaises(n_exc.FloatingIpSetupException, self.manage_router, self.agent, router.router) router = self.manage_router(self.agent, router.router) self.assertTrue(fg_device.exists()) self.assertEqual({'gateway': u'19.4.4.2'}, fg_device.route.get_gateway()) self._validate_fips_for_external_network( router, router.fip_ns.get_name()) self._delete_router(self.agent, router.router_id) self._assert_fip_namespace_deleted(external_gw_port) @mock.patch.object(dvr_fip_ns.FipNamespace, 'subscribe') def test_dvr_process_fips_with_no_gw_port_in_namespace( self, fip_subscribe): self.agent.conf.agent_mode = 'dvr' # Create the router with external net router_info = self.generate_dvr_router_info() external_gw_port = router_info['gw_port'] ext_net_id = router_info['_floatingips'][0]['floating_network_id'] self.mock_plugin_api.get_external_network_id.return_value = ext_net_id # Create the fip namespace up front dvr_fip_ns.FipNamespace(ext_net_id, self.agent.conf, self.agent.driver, self.agent.use_ipv6).create() # Create the router with the fip, this shouldn't allow the # update_gateway_port to be called without the fg- port fip_subscribe.return_value = False # This will raise the exception and will also clear # subscription for the ext_net_id self.assertRaises(n_exc.FloatingIpSetupException, self.manage_router, self.agent, router_info) fip_subscribe.return_value = True # Now update the router again router = self.manage_router(self.agent, router_info) fg_port = router.fip_ns.agent_gateway_port fg_port_name = router.fip_ns.get_ext_device_name(fg_port['id']) fg_device = ip_lib.IPDevice(fg_port_name, namespace=router.fip_ns.name) # Now validate if the gateway is properly configured. self.assertIn('gateway', fg_device.route.get_gateway()) self._validate_fips_for_external_network( router, router.fip_ns.get_name()) self._delete_router(self.agent, router.router_id) self._assert_fip_namespace_deleted(external_gw_port) def test_dvr_router_fips_stale_gw_port(self): self.agent.conf.agent_mode = 'dvr' # Create the router with external net dvr_router_kwargs = {'ip_address': '19.4.4.3', 'subnet_cidr': '19.4.4.0/24', 'gateway_ip': '19.4.4.1', 'gateway_mac': 'ca:fe:de:ab:cd:ef'} router_info = self.generate_dvr_router_info(**dvr_router_kwargs) external_gw_port = router_info['gw_port'] ext_net_id = router_info['_floatingips'][0]['floating_network_id'] self.mock_plugin_api.get_external_network_id.return_value(ext_net_id) # Create the fip namespace up front stale_fip_ns = dvr_fip_ns.FipNamespace(ext_net_id, self.agent.conf, self.agent.driver, self.agent.use_ipv6) stale_fip_ns.create() # Add a stale fg port to the namespace fixed_ip = external_gw_port['fixed_ips'][0] float_subnet = external_gw_port['subnets'][0] fip_gw_port_ip = str(netaddr.IPAddress(fixed_ip['ip_address']) + 10) prefixlen = netaddr.IPNetwork(float_subnet['cidr']).prefixlen stale_agent_gw_port = { 'subnets': [{'cidr': float_subnet['cidr'], 'gateway_ip': float_subnet['gateway_ip'], 'id': fixed_ip['subnet_id']}], 'network_id': external_gw_port['network_id'], 'device_owner': l3_constants.DEVICE_OWNER_AGENT_GW, 'mac_address': 'fa:16:3e:80:8f:89', portbindings.HOST_ID: self.agent.conf.host, 'fixed_ips': [{'subnet_id': fixed_ip['subnet_id'], 'ip_address': fip_gw_port_ip, 'prefixlen': prefixlen}], 'id': framework._uuid(), 'device_id': framework._uuid()} stale_fip_ns.create_or_update_gateway_port(stale_agent_gw_port) stale_dev_exists = self.device_exists_with_ips_and_mac( stale_agent_gw_port, stale_fip_ns.get_ext_device_name, stale_fip_ns.get_name()) self.assertTrue(stale_dev_exists) # Create the router, this shouldn't allow the duplicate port to stay router = self.manage_router(self.agent, router_info) # Assert the device no longer exists stale_dev_exists = self.device_exists_with_ips_and_mac( stale_agent_gw_port, stale_fip_ns.get_ext_device_name, stale_fip_ns.get_name()) self.assertFalse(stale_dev_exists) # Validate things are looking good and clean up self._validate_fips_for_external_network( router, router.fip_ns.get_name()) ext_gateway_port = router_info['gw_port'] self._delete_router(self.agent, router.router_id) self._assert_fip_namespace_deleted(ext_gateway_port) def test_dvr_router_gateway_redirect_cleanup_on_agent_restart(self): """Test to validate the router namespace gateway redirect rule cleanup. This test checks for the non existence of the gateway redirect rules in the router namespace after the agent restarts while the gateway is removed for the router. """ self.agent.conf.agent_mode = 'dvr_snat' router_info = self.generate_dvr_router_info() router1 = self.manage_router(self.agent, router_info) self._assert_snat_namespace_exists(router1) self.assertTrue(self._namespace_exists(router1.ns_name)) restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport( self.agent.host, self.agent.conf) router1.router['gw_port'] = "" router1.router['gw_port_host'] = "" router1.router['external_gateway_info'] = "" restarted_router = self.manage_router(restarted_agent, router1.router) self.assertTrue(self._namespace_exists(restarted_router.ns_name)) ns_ipr = ip_lib.IPRule(namespace=router1.ns_name) ip4_rules_list = ns_ipr.rule.list_rules(l3_constants.IP_VERSION_4) ip6_rules_list = ns_ipr.rule.list_rules(l3_constants.IP_VERSION_6) # Just make sure the basic set of rules are there in the router # namespace self.assertEqual(3, len(ip4_rules_list)) self.assertEqual(2, len(ip6_rules_list)) def test_dvr_unused_snat_ns_deleted_when_agent_restarts_after_move(self): """Test to validate the stale snat namespace delete with snat move. This test validates the stale snat namespace cleanup when the agent restarts after the gateway port has been moved from the agent. """ self.agent.conf.agent_mode = 'dvr_snat' router_info = self.generate_dvr_router_info() router1 = self.manage_router(self.agent, router_info) self._assert_snat_namespace_exists(router1) restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport( self.agent.host, self.agent.conf) router1.router['gw_port_host'] = "my-new-host" restarted_router = self.manage_router(restarted_agent, router1.router) self._assert_snat_namespace_does_not_exist(restarted_router) def test_dvr_router_fips_for_multiple_ext_networks(self): agent_mode = 'dvr' # Create the first router fip with external net1 dvr_router1_kwargs = {'ip_address': '19.4.4.3', 'subnet_cidr': '19.4.4.0/24', 'gateway_ip': '19.4.4.1', 'gateway_mac': 'ca:fe:de:ab:cd:ef'} router1, fip1_ns = ( self._helper_create_dvr_router_fips_for_ext_network( agent_mode, **dvr_router1_kwargs)) # Validate the fip with external net1 self._validate_fips_for_external_network(router1, fip1_ns) # Create the second router fip with external net2 dvr_router2_kwargs = {'ip_address': '19.4.5.3', 'subnet_cidr': '19.4.5.0/24', 'gateway_ip': '19.4.5.1', 'gateway_mac': 'ca:fe:de:ab:cd:fe'} router2, fip2_ns = ( self._helper_create_dvr_router_fips_for_ext_network( agent_mode, **dvr_router2_kwargs)) # Validate the fip with external net2 self._validate_fips_for_external_network(router2, fip2_ns) def _dvr_router_lifecycle(self, enable_ha=False, enable_snat=False, custom_mtu=2000, use_port_mtu=False, ip_version=4, dual_stack=False): '''Test dvr router lifecycle :param enable_ha: sets the ha value for the router. :param enable_snat: the value of enable_snat is used to set the agent_mode. ''' # The value of agent_mode can be dvr, dvr_snat, or legacy. # Since by definition this is a dvr (distributed = true) # only dvr and dvr_snat are applicable self.agent.conf.agent_mode = 'dvr_snat' if enable_snat else 'dvr' # We get the router info particular to a dvr router router_info = self.generate_dvr_router_info( enable_ha, enable_snat, extra_routes=True) if use_port_mtu: for key in ('_interfaces', '_snat_router_interfaces', '_floatingip_agent_interfaces'): for port in router_info[key]: port['mtu'] = custom_mtu router_info['gw_port']['mtu'] = custom_mtu router_info['_ha_interface']['mtu'] = custom_mtu else: self.agent.conf.network_device_mtu = custom_mtu # We need to mock the get_agent_gateway_port return value # because the whole L3PluginApi is mocked and we need the port # gateway_port information before the l3_agent will create it. # The port returned needs to have the same information as # router_info['gw_port'] self.mock_plugin_api.get_agent_gateway_port.return_value = router_info[ 'gw_port'] # We also need to mock the get_external_network_id method to # get the correct fip namespace. self.mock_plugin_api.get_external_network_id.return_value = ( router_info['_floatingips'][0]['floating_network_id']) # With all that set we can now ask the l3_agent to # manage the router (create it, create namespaces, # attach interfaces, etc...) router = self.manage_router(self.agent, router_info) if enable_ha: port = router.get_ex_gw_port() interface_name = router.get_external_device_name(port['id']) self._assert_no_ip_addresses_on_interface(router.ha_namespace, interface_name) utils.wait_until_true(lambda: router.ha_state == 'master') # Keepalived notifies of a state transition when it starts, # not when it ends. Thus, we have to wait until keepalived finishes # configuring everything. We verify this by waiting until the last # device has an IP address. device = router.router[l3_constants.INTERFACE_KEY][-1] device_exists = functools.partial( self.device_exists_with_ips_and_mac, device, router.get_internal_device_name, router.ns_name) utils.wait_until_true(device_exists) name = router.get_internal_device_name(device['id']) self.assertEqual(custom_mtu, ip_lib.IPDevice(name, router.ns_name).link.mtu) ext_gateway_port = router_info['gw_port'] self.assertTrue(self._namespace_exists(router.ns_name)) utils.wait_until_true( lambda: self._metadata_proxy_exists(self.agent.conf, router)) self._assert_internal_devices(router) self._assert_dvr_external_device(router) self._assert_dvr_gateway(router) self._assert_dvr_floating_ips(router) self._assert_snat_chains(router) self._assert_floating_ip_chains(router) self._assert_metadata_chains(router) self._assert_rfp_fpr_mtu(router, custom_mtu) if enable_snat: ip_versions = [4, 6] if (ip_version == 6 or dual_stack) else [4] snat_ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name( router.router_id) self._assert_onlink_subnet_routes( router, ip_versions, snat_ns_name) self._assert_extra_routes(router, namespace=snat_ns_name) # During normal operation, a router-gateway-clear followed by # a router delete results in two notifications to the agent. This # code flow simulates the exceptional case where the notification of # the clearing of the gateway hast been missed, so we are checking # that the L3 agent is robust enough to handle that case and delete # the router correctly. self._delete_router(self.agent, router.router_id) self._assert_fip_namespace_deleted(ext_gateway_port) self._assert_router_does_not_exist(router) self._assert_snat_namespace_does_not_exist(router) def generate_dvr_router_info(self, enable_ha=False, enable_snat=False, enable_gw=True, agent=None, extra_routes=False, **kwargs): if not agent: agent = self.agent router = l3_test_common.prepare_router_data( enable_snat=enable_snat, enable_floating_ip=True, enable_ha=enable_ha, extra_routes=extra_routes, num_internal_ports=2, enable_gw=enable_gw, **kwargs) internal_ports = router.get(l3_constants.INTERFACE_KEY, []) router['distributed'] = True router['gw_port_host'] = agent.conf.host floating_ip = router['_floatingips'][0] floating_ip['host'] = agent.conf.host if enable_gw: external_gw_port = router['gw_port'] router['gw_port'][portbindings.HOST_ID] = agent.conf.host floating_ip['floating_network_id'] = external_gw_port['network_id'] floating_ip['port_id'] = internal_ports[0]['id'] floating_ip['status'] = 'ACTIVE' self._add_snat_port_info_to_router(router, internal_ports) # FIP has a dependency on external gateway. So we need to create # the snat_port info and fip_agent_gw_port_info irrespective of # the agent type the dvr supports. The namespace creation is # dependent on the agent_type. self._add_fip_agent_gw_port_info_to_router(router, external_gw_port) return router def _add_fip_agent_gw_port_info_to_router(self, router, external_gw_port): # Add fip agent gateway port information to the router_info fip_gw_port_list = router.get( l3_constants.FLOATINGIP_AGENT_INTF_KEY, []) if not fip_gw_port_list and external_gw_port: # Get values from external gateway port fixed_ip = external_gw_port['fixed_ips'][0] float_subnet = external_gw_port['subnets'][0] port_ip = fixed_ip['ip_address'] # Pick an ip address which is not the same as port_ip fip_gw_port_ip = str(netaddr.IPAddress(port_ip) + 5) # Add floatingip agent gateway port info to router prefixlen = netaddr.IPNetwork(float_subnet['cidr']).prefixlen router[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = [ {'subnets': [ {'cidr': float_subnet['cidr'], 'gateway_ip': float_subnet['gateway_ip'], 'id': fixed_ip['subnet_id']}], 'network_id': external_gw_port['network_id'], 'device_owner': l3_constants.DEVICE_OWNER_AGENT_GW, 'mac_address': 'fa:16:3e:80:8d:89', portbindings.HOST_ID: self.agent.conf.host, 'fixed_ips': [{'subnet_id': fixed_ip['subnet_id'], 'ip_address': fip_gw_port_ip, 'prefixlen': prefixlen}], 'id': framework._uuid(), 'device_id': framework._uuid()} ] def _add_snat_port_info_to_router(self, router, internal_ports): # Add snat port information to the router snat_port_list = router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) if not snat_port_list and internal_ports: router[l3_constants.SNAT_ROUTER_INTF_KEY] = [] for port in internal_ports: # Get values from internal port fixed_ip = port['fixed_ips'][0] snat_subnet = port['subnets'][0] port_ip = fixed_ip['ip_address'] # Pick an ip address which is not the same as port_ip snat_ip = str(netaddr.IPAddress(port_ip) + 5) # Add the info to router as the first snat port # in the list of snat ports prefixlen = netaddr.IPNetwork(snat_subnet['cidr']).prefixlen snat_router_port = { 'subnets': [ {'cidr': snat_subnet['cidr'], 'gateway_ip': snat_subnet['gateway_ip'], 'id': fixed_ip['subnet_id']}], 'network_id': port['network_id'], 'device_owner': l3_constants.DEVICE_OWNER_ROUTER_SNAT, 'mac_address': 'fa:16:3e:80:8d:89', 'fixed_ips': [{'subnet_id': fixed_ip['subnet_id'], 'ip_address': snat_ip, 'prefixlen': prefixlen}], 'id': framework._uuid(), 'device_id': framework._uuid()} # Get the address scope if there is any if 'address_scopes' in port: snat_router_port['address_scopes'] = port['address_scopes'] router[l3_constants.SNAT_ROUTER_INTF_KEY].append( snat_router_port) def _assert_dvr_external_device(self, router): external_port = router.get_ex_gw_port() snat_ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name( router.router_id) # if the agent is in dvr_snat mode, then we have to check # that the correct ports and ip addresses exist in the # snat_ns_name namespace if self.agent.conf.agent_mode == 'dvr_snat': device_exists = functools.partial( self.device_exists_with_ips_and_mac, external_port, router.get_external_device_name, snat_ns_name) utils.wait_until_true(device_exists) # if the agent is in dvr mode then the snat_ns_name namespace # should not be present at all: elif self.agent.conf.agent_mode == 'dvr': self.assertFalse( self._namespace_exists(snat_ns_name), "namespace %s was found but agent is in dvr mode not dvr_snat" % (str(snat_ns_name)) ) # if the agent is anything else the test is misconfigured # we force a test failure with message else: self.assertTrue(False, " agent not configured for dvr or dvr_snat") def _assert_dvr_gateway(self, router): gateway_expected_in_snat_namespace = ( self.agent.conf.agent_mode == 'dvr_snat' ) if gateway_expected_in_snat_namespace: self._assert_dvr_snat_gateway(router) self._assert_removal_of_already_deleted_gateway_device(router) snat_namespace_should_not_exist = ( self.agent.conf.agent_mode == 'dvr' ) if snat_namespace_should_not_exist: self._assert_snat_namespace_does_not_exist(router) def _assert_dvr_snat_gateway(self, router): namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name( router.router_id) external_port = router.get_ex_gw_port() external_device_name = router.get_external_device_name( external_port['id']) external_device = ip_lib.IPDevice(external_device_name, namespace=namespace) existing_gateway = ( external_device.route.get_gateway().get('gateway')) expected_gateway = external_port['subnets'][0]['gateway_ip'] self.assertEqual(expected_gateway, existing_gateway) def _assert_removal_of_already_deleted_gateway_device(self, router): namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name( router.router_id) device = ip_lib.IPDevice("fakedevice", namespace=namespace) # Assert that no exception is thrown for this case self.assertIsNone(router._delete_gateway_device_if_exists( device, "192.168.0.1", 0)) def _assert_snat_namespace_does_not_exist(self, router): namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name( router.router_id) self.assertFalse(self._namespace_exists(namespace)) def _assert_dvr_floating_ips(self, router): # in the fip namespace: # Check that the fg- (floatingip_agent_gateway) # is created with the ip address of the external gateway port floating_ips = router.router[l3_constants.FLOATINGIP_KEY] self.assertTrue(floating_ips) # We need to fetch the floatingip agent gateway port info # from the router_info floating_agent_gw_port = ( router.router[l3_constants.FLOATINGIP_AGENT_INTF_KEY]) self.assertTrue(floating_agent_gw_port) external_gw_port = floating_agent_gw_port[0] fip_ns = self.agent.get_fip_ns(floating_ips[0]['floating_network_id']) fip_ns_name = fip_ns.get_name() fg_port_created_successfully = ip_lib.device_exists_with_ips_and_mac( fip_ns.get_ext_device_name(external_gw_port['id']), [self._port_first_ip_cidr(external_gw_port)], external_gw_port['mac_address'], namespace=fip_ns_name) self.assertTrue(fg_port_created_successfully) # Check fpr-router device has been created device_name = fip_ns.get_int_device_name(router.router_id) fpr_router_device_created_successfully = ip_lib.device_exists( device_name, namespace=fip_ns_name) self.assertTrue(fpr_router_device_created_successfully) # In the router namespace # Check rfp- is created correctly for fip in floating_ips: device_name = fip_ns.get_rtr_ext_device_name(router.router_id) self.assertTrue(ip_lib.device_exists( device_name, namespace=router.ns_name)) # In the router namespace, check the iptables rules are set correctly for fip in floating_ips: floatingip = fip['floating_ip_address'] fixedip = fip['fixed_ip_address'] expected_rules = router.floating_forward_rules(floatingip, fixedip) self._assert_iptables_rules_exist( router.iptables_manager, 'nat', expected_rules) def test_dvr_router_rem_fips_on_restarted_agent(self): self.agent.conf.agent_mode = 'dvr_snat' router_info = self.generate_dvr_router_info() router1 = self.manage_router(self.agent, router_info) fip_ns = router1.fip_ns.get_name() self.assertTrue(self._namespace_exists(fip_ns)) restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport( self.agent.host, self.agent.conf) router1.router[l3_constants.FLOATINGIP_KEY] = [] self.manage_router(restarted_agent, router1.router) self._assert_dvr_snat_gateway(router1) self.assertTrue(self._namespace_exists(fip_ns)) def test_dvr_router_add_fips_on_restarted_agent(self): self.agent.conf.agent_mode = 'dvr' router_info = self.generate_dvr_router_info() router = self.manage_router(self.agent, router_info) floating_ips = router.router[l3_constants.FLOATINGIP_KEY] router_ns = router.ns_name fip_rule_prio_1 = self._get_fixed_ip_rule_priority( router_ns, floating_ips[0]['fixed_ip_address']) restarted_agent = neutron_l3_agent.L3NATAgent( self.agent.host, self.agent.conf) floating_ips[0]['floating_ip_address'] = '21.4.4.2' floating_ips[0]['fixed_ip_address'] = '10.0.0.2' self.manage_router(restarted_agent, router_info) fip_rule_prio_2 = self._get_fixed_ip_rule_priority( router_ns, floating_ips[0]['fixed_ip_address']) self.assertNotEqual(fip_rule_prio_1, fip_rule_prio_2) def _assert_iptables_rules_exist( self, router_iptables_manager, table_name, expected_rules): rules = router_iptables_manager.get_rules_for_table(table_name) for rule in expected_rules: self.assertIn( str(iptables_manager.IptablesRule(rule[0], rule[1])), rules) def test_dvr_router_floating_ip_moved(self): self.agent.conf.agent_mode = 'dvr' router_info = self.generate_dvr_router_info() router = self.manage_router(self.agent, router_info) floating_ips = router.router[l3_constants.FLOATINGIP_KEY] router_ns = router.ns_name fixed_ip = floating_ips[0]['fixed_ip_address'] self.assertTrue(self._fixed_ip_rule_exists(router_ns, fixed_ip)) # Floating IP reassigned to another fixed IP new_fixed_ip = '10.0.0.2' self.assertNotEqual(new_fixed_ip, fixed_ip) floating_ips[0]['fixed_ip_address'] = new_fixed_ip self.agent._process_updated_router(router.router) self.assertFalse(self._fixed_ip_rule_exists(router_ns, fixed_ip)) self.assertTrue(self._fixed_ip_rule_exists(router_ns, new_fixed_ip)) def test_prevent_snat_rule_exist_on_restarted_agent(self): self.agent.conf.agent_mode = 'dvr_snat' router_info = self.generate_dvr_router_info() router = self.manage_router(self.agent, router_info) ext_port = router.get_ex_gw_port() rfp_devicename = router.get_external_device_interface_name(ext_port) prevent_snat_rule = router._prevent_snat_for_internal_traffic_rule( rfp_devicename) self._assert_iptables_rules_exist( router.iptables_manager, 'nat', [prevent_snat_rule]) restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport( self.agent.host, self.agent.conf) restarted_router = self.manage_router(restarted_agent, router_info) self._assert_iptables_rules_exist( restarted_router.iptables_manager, 'nat', [prevent_snat_rule]) def _get_fixed_ip_rule_priority(self, namespace, fip): iprule = ip_lib.IPRule(namespace) lines = iprule.rule._as_root([4], ['show']).splitlines() for line in lines: if fip in line: info = iprule.rule._parse_line(4, line) return info['priority'] def _fixed_ip_rule_exists(self, namespace, ip): iprule = ip_lib.IPRule(namespace) lines = iprule.rule._as_root([4], ['show']).splitlines() for line in lines: if ip in line: info = iprule.rule._parse_line(4, line) if info['from'] == ip: return True return False def test_dvr_router_add_internal_network_set_arp_cache(self): # Check that, when the router is set up and there are # existing ports on the uplinked subnet, the ARP # cache is properly populated. self.agent.conf.agent_mode = 'dvr_snat' router_info = l3_test_common.prepare_router_data() router_info['distributed'] = True expected_neighbor = '35.4.1.10' port_data = { 'fixed_ips': [{'ip_address': expected_neighbor}], 'mac_address': 'fa:3e:aa:bb:cc:dd', 'device_owner': DEVICE_OWNER_COMPUTE } self.agent.plugin_rpc.get_ports_by_subnet.return_value = [port_data] router1 = self.manage_router(self.agent, router_info) internal_device = router1.get_internal_device_name( router_info['_interfaces'][0]['id']) neighbors = ip_lib.IPDevice(internal_device, router1.ns_name).neigh self.assertEqual(expected_neighbor, neighbors.show(ip_version=4).split()[0]) def _assert_rfp_fpr_mtu(self, router, expected_mtu=1500): dev_mtu = self.get_device_mtu( router.router_id, router.fip_ns.get_rtr_ext_device_name, router.ns_name) self.assertEqual(expected_mtu, dev_mtu) dev_mtu = self.get_device_mtu( router.router_id, router.fip_ns.get_int_device_name, router.fip_ns.get_name()) self.assertEqual(expected_mtu, dev_mtu) def test_dvr_router_fip_agent_mismatch(self): """Test to validate the floatingip agent mismatch. This test validates the condition where floatingip agent gateway port host mismatches with the agent and so the binding will not be there. """ self.agent.conf.agent_mode = 'dvr' router_info = self.generate_dvr_router_info() floating_ip = router_info['_floatingips'][0] floating_ip['host'] = 'my_new_host' # In this case the floatingip binding is different and so it # should not create the floatingip namespace on the given agent. # This is also like there is no current binding. router1 = self.manage_router(self.agent, router_info) fip_ns = router1.fip_ns.get_name() self.assertTrue(self._namespace_exists(router1.ns_name)) self.assertFalse(self._namespace_exists(fip_ns)) self._assert_snat_namespace_does_not_exist(router1) def test_dvr_router_fip_create_for_migrating_port(self): """Test to validate the floatingip create on port migrate. This test validates the condition where floatingip host mismatches with the agent, but the 'dest_host' variable matches with the agent host, due to port pre-migrate phase. """ self.agent.conf.agent_mode = 'dvr' router_info = self.generate_dvr_router_info() floating_ip = router_info['_floatingips'][0] floating_ip['host'] = 'my_new_host' floating_ip['dest_host'] = self.agent.host # Now we have the floatingip 'host' pointing to host that # does not match to the 'agent.host' and the floatingip # 'dest_host' matches with the agent.host in the case # of live migration due to the port_profile update from # nova. router1 = self.manage_router(self.agent, router_info) fip_ns = router1.fip_ns.get_name() self.assertTrue(self._namespace_exists(router1.ns_name)) self.assertTrue(self._namespace_exists(fip_ns)) def test_dvr_router_fip_late_binding(self): """Test to validate the floatingip migration or latebinding. This test validates the condition where floatingip private port changes while migration or when the private port host binding is done later after floatingip association. """ self.agent.conf.agent_mode = 'dvr' router_info = self.generate_dvr_router_info() fip_agent_gw_port = router_info[l3_constants.FLOATINGIP_AGENT_INTF_KEY] # Now let us not pass the FLOATINGIP_AGENT_INTF_KEY, to emulate # that the server did not create the port, since there was no valid # host binding. router_info[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = [] self.mock_plugin_api.get_agent_gateway_port.return_value = ( fip_agent_gw_port[0]) router1 = self.manage_router(self.agent, router_info) fip_ns = router1.fip_ns.get_name() self.assertTrue(self._namespace_exists(router1.ns_name)) self.assertTrue(self._namespace_exists(fip_ns)) self._assert_snat_namespace_does_not_exist(router1) def _assert_snat_namespace_exists(self, router): namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name( router.router_id) self.assertTrue(self._namespace_exists(namespace)) def _get_dvr_snat_namespace_device_status( self, router, internal_dev_name=None): """Function returns the internal and external device status.""" snat_ns = dvr_snat_ns.SnatNamespace.get_snat_ns_name( router.router_id) external_port = router.get_ex_gw_port() external_device_name = router.get_external_device_name( external_port['id']) qg_device_created_successfully = ip_lib.device_exists( external_device_name, namespace=snat_ns) sg_device_created_successfully = ip_lib.device_exists( internal_dev_name, namespace=snat_ns) return qg_device_created_successfully, sg_device_created_successfully def test_dvr_router_snat_namespace_with_interface_remove(self): """Test to validate the snat namespace with interface remove. This test validates the snat namespace for all the external and internal devices. It also validates if the internal device corresponding to the router interface is removed when the router interface is deleted. """ self.agent.conf.agent_mode = 'dvr_snat' router_info = self.generate_dvr_router_info() snat_internal_port = router_info[l3_constants.SNAT_ROUTER_INTF_KEY] router1 = self.manage_router(self.agent, router_info) csnat_internal_port = ( router1.router[l3_constants.SNAT_ROUTER_INTF_KEY]) # Now save the internal device name to verify later internal_device_name = router1._get_snat_int_device_name( csnat_internal_port[0]['id']) self._assert_snat_namespace_exists(router1) qg_device, sg_device = self._get_dvr_snat_namespace_device_status( router1, internal_dev_name=internal_device_name) self.assertTrue(qg_device) self.assertTrue(sg_device) self.assertEqual(router1.snat_ports, snat_internal_port) # Now let us not pass INTERFACE_KEY, to emulate # the interface has been removed. router1.router[l3_constants.INTERFACE_KEY] = [] # Now let us not pass the SNAT_ROUTER_INTF_KEY, to emulate # that the server did not send it, since the interface has been # removed. router1.router[l3_constants.SNAT_ROUTER_INTF_KEY] = [] self.agent._process_updated_router(router1.router) router_updated = self.agent.router_info[router_info['id']] self._assert_snat_namespace_exists(router_updated) qg_device, sg_device = self._get_dvr_snat_namespace_device_status( router_updated, internal_dev_name=internal_device_name) self.assertFalse(sg_device) self.assertTrue(qg_device) def _mocked_dvr_ha_router(self, agent, enable_gw=True): r_info = self.generate_dvr_router_info(enable_ha=True, enable_snat=True, agent=agent, enable_gw=enable_gw) r_snat_ns_name = namespaces.build_ns_name(dvr_snat_ns.SNAT_NS_PREFIX, r_info['id']) mocked_r_snat_ns_name = r_snat_ns_name + '@' + agent.host r_ns_name = namespaces.build_ns_name(namespaces.NS_PREFIX, r_info['id']) mocked_r_ns_name = r_ns_name + '@' + agent.host return r_info, mocked_r_ns_name, mocked_r_snat_ns_name def _setup_dvr_ha_agents(self): self.agent.conf.agent_mode = 'dvr_snat' conf = self._configure_agent('agent2') self.failover_agent = neutron_l3_agent.L3NATAgentWithStateReport( 'agent2', conf) self.failover_agent.conf.agent_mode = 'dvr_snat' def _setup_dvr_ha_bridges(self): br_int_1 = self._get_agent_ovs_integration_bridge(self.agent) br_int_2 = self._get_agent_ovs_integration_bridge(self.failover_agent) veth1, veth2 = self.useFixture(net_helpers.VethFixture()).ports br_int_1.add_port(veth1.name) br_int_2.add_port(veth2.name) def _create_dvr_ha_router(self, agent, enable_gw=True): get_ns_name = mock.patch.object(namespaces.RouterNamespace, '_get_ns_name').start() get_snat_ns_name = mock.patch.object(dvr_snat_ns.SnatNamespace, 'get_snat_ns_name').start() (r_info, mocked_r_ns_name, mocked_r_snat_ns_name) = self._mocked_dvr_ha_router(agent, enable_gw) get_ns_name.return_value = mocked_r_ns_name get_snat_ns_name.return_value = mocked_r_snat_ns_name router = self.manage_router(agent, r_info) return router def _assert_ip_addresses_in_dvr_ha_snat_namespace(self, router): namespace = router.ha_namespace ex_gw_port = router.get_ex_gw_port() snat_ports = router.get_snat_interfaces() if not snat_ports: return snat_port = snat_ports[0] ex_gw_port_name = router.get_external_device_name( ex_gw_port['id']) snat_port_name = router._get_snat_int_device_name( snat_port['id']) ip = ex_gw_port["fixed_ips"][0]['ip_address'] prefix_len = ex_gw_port["fixed_ips"][0]['prefixlen'] ex_gw_port_cidr = ip + "/" + str(prefix_len) ip = snat_port["fixed_ips"][0]['ip_address'] prefix_len = snat_port["fixed_ips"][0]['prefixlen'] snat_port_cidr = ip + "/" + str(prefix_len) self._assert_ip_address_on_interface(namespace, ex_gw_port_name, ex_gw_port_cidr) self._assert_ip_address_on_interface(namespace, snat_port_name, snat_port_cidr) def _assert_no_ip_addresses_in_dvr_ha_snat_namespace(self, router): namespace = router.ha_namespace ex_gw_port = router.get_ex_gw_port() snat_ports = router.get_snat_interfaces() if not snat_ports: return snat_port = snat_ports[0] ex_gw_port_name = router.get_external_device_name( ex_gw_port['id']) snat_port_name = router._get_snat_int_device_name( snat_port['id']) self._assert_no_ip_addresses_on_interface(namespace, snat_port_name) self._assert_no_ip_addresses_on_interface(namespace, ex_gw_port_name) def _test_dvr_ha_router_failover(self, enable_gw): self._setup_dvr_ha_agents() self._setup_dvr_ha_bridges() router1 = self._create_dvr_ha_router(self.agent, enable_gw=enable_gw) router2 = self._create_dvr_ha_router(self.failover_agent, enable_gw) utils.wait_until_true(lambda: router1.ha_state == 'master') utils.wait_until_true(lambda: router2.ha_state == 'backup') self._assert_ip_addresses_in_dvr_ha_snat_namespace(router1) self._assert_no_ip_addresses_in_dvr_ha_snat_namespace(router2) self.fail_ha_router(router1) utils.wait_until_true(lambda: router2.ha_state == 'master') utils.wait_until_true(lambda: router1.ha_state == 'backup') self._assert_ip_addresses_in_dvr_ha_snat_namespace(router2) self._assert_no_ip_addresses_in_dvr_ha_snat_namespace(router1) def test_dvr_ha_router_failover_with_gw(self): self._test_dvr_ha_router_failover(enable_gw=True) def test_dvr_ha_router_failover_without_gw(self): self._test_dvr_ha_router_failover(enable_gw=False) def test_dvr_router_static_routes(self): """Test to validate the extra routes on dvr routers.""" self.agent.conf.agent_mode = 'dvr_snat' router_info = self.generate_dvr_router_info(enable_snat=True) router1 = self.manage_router(self.agent, router_info) self.assertTrue(self._namespace_exists(router1.ns_name)) self._assert_snat_namespace_exists(router1) snat_ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name( router1.router_id) # Now try to add routes that are suitable for both the # router namespace and the snat namespace. router1.router['routes'] = [{'destination': '8.8.4.0/24', 'nexthop': '35.4.0.20'}] self.agent._process_updated_router(router1.router) router_updated = self.agent.router_info[router_info['id']] self._assert_extra_routes(router_updated, namespace=snat_ns_name) self._assert_extra_routes(router_updated) def test_dvr_router_gateway_update_to_none(self): self.agent.conf.agent_mode = 'dvr_snat' router_info = self.generate_dvr_router_info(enable_snat=True) router = self.manage_router(self.agent, router_info) gw_port = router.get_ex_gw_port() ex_gw_port_name = router.get_external_device_name(gw_port['id']) ex_gw_device = ip_lib.IPDevice(ex_gw_port_name, namespace=router.snat_namespace.name) fg_port = router.fip_ns.agent_gateway_port fg_port_name = router.fip_ns.get_ext_device_name(fg_port['id']) fg_device = ip_lib.IPDevice(fg_port_name, namespace=router.fip_ns.name) self.assertIn('gateway', ex_gw_device.route.get_gateway()) self.assertIn('gateway', fg_device.route.get_gateway()) # Make this copy to make agent think gw_port changed. router.ex_gw_port = copy.deepcopy(router.ex_gw_port) for subnet in gw_port['subnets']: subnet['gateway_ip'] = None new_fg_port = copy.deepcopy(fg_port) for subnet in new_fg_port['subnets']: subnet['gateway_ip'] = None router.router[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = [new_fg_port] router.process(self.agent) self.assertIsNone(ex_gw_device.route.get_gateway()) self.assertIsNone(fg_device.route.get_gateway()) def _assert_fip_namespace_deleted(self, ext_gateway_port): ext_net_id = ext_gateway_port['network_id'] fip_ns = self.agent.get_fip_ns(ext_net_id) fip_ns.unsubscribe = mock.Mock() self.agent.fipnamespace_delete_on_ext_net( self.agent.context, ext_net_id) self._assert_interfaces_deleted_from_ovs() fip_ns_name = fip_ns.get_name() self.assertFalse(self._namespace_exists(fip_ns_name)) self.assertTrue(fip_ns.destroyed) self.assertTrue(fip_ns.unsubscribe.called) def _setup_address_scope(self, internal_address_scope1, internal_address_scope2, gw_address_scope=None): router_info = self.generate_dvr_router_info(enable_snat=True) address_scope1 = { str(l3_constants.IP_VERSION_4): internal_address_scope1} address_scope2 = { str(l3_constants.IP_VERSION_4): internal_address_scope2} if gw_address_scope: router_info['gw_port']['address_scopes'] = { str(l3_constants.IP_VERSION_4): gw_address_scope} router_info[l3_constants.INTERFACE_KEY][0]['address_scopes'] = ( address_scope1) router_info[l3_constants.INTERFACE_KEY][1]['address_scopes'] = ( address_scope2) # Renew the address scope router_info[l3_constants.SNAT_ROUTER_INTF_KEY] = [] self._add_snat_port_info_to_router( router_info, router_info[l3_constants.INTERFACE_KEY]) router = self.manage_router(self.agent, router_info) router_ip_cidr1 = self._port_first_ip_cidr(router.internal_ports[0]) router_ip1 = router_ip_cidr1.partition('/')[0] router_ip_cidr2 = self._port_first_ip_cidr(router.internal_ports[1]) router_ip2 = router_ip_cidr2.partition('/')[0] br_int = framework.get_ovs_bridge( self.agent.conf.ovs_integration_bridge) test_machine1 = self.useFixture( machine_fixtures.FakeMachine( br_int, net_helpers.increment_ip_cidr(router_ip_cidr1, 10), router_ip1)) test_machine2 = self.useFixture( machine_fixtures.FakeMachine( br_int, net_helpers.increment_ip_cidr(router_ip_cidr2, 10), router_ip2)) return test_machine1, test_machine2, router def test_connection_from_same_address_scope(self): self.agent.conf.agent_mode = 'dvr_snat' test_machine1, test_machine2, _ = self._setup_address_scope( 'scope1', 'scope1') # Internal networks that are in the same address scope can connected # each other net_helpers.assert_ping(test_machine1.namespace, test_machine2.ip, 5) net_helpers.assert_ping(test_machine2.namespace, test_machine1.ip, 5) def test_connection_from_diff_address_scope(self): self.agent.conf.agent_mode = 'dvr_snat' test_machine1, test_machine2, _ = self._setup_address_scope( 'scope1', 'scope2') # Internal networks that are not in the same address scope should # not reach each other test_machine1.assert_no_ping(test_machine2.ip) test_machine2.assert_no_ping(test_machine1.ip) @testtools.skip('bug/1543885') def test_fip_connection_for_address_scope(self): self.agent.conf.agent_mode = 'dvr_snat' (machine_same_scope, machine_diff_scope, router) = self._setup_address_scope('scope1', 'scope2', 'scope1') router.router[l3_constants.FLOATINGIP_KEY] = [] fip_same_scope = '19.4.4.10' self._add_fip(router, fip_same_scope, fixed_address=machine_same_scope.ip, host=self.agent.conf.host, fixed_ip_address_scope='scope1') fip_diff_scope = '19.4.4.11' self._add_fip(router, fip_diff_scope, fixed_address=machine_diff_scope.ip, host=self.agent.conf.host, fixed_ip_address_scope='scope2') router.process(self.agent) br_ex = framework.get_ovs_bridge( self.agent.conf.external_network_bridge) src_machine = self.useFixture( machine_fixtures.FakeMachine(br_ex, '19.4.4.12/24')) # Floating ip should work no matter of address scope net_helpers.assert_ping(src_machine.namespace, fip_same_scope, 5) net_helpers.assert_ping(src_machine.namespace, fip_diff_scope, 5) def test_direct_route_for_address_scope(self): self.agent.conf.agent_mode = 'dvr_snat' (machine_same_scope, machine_diff_scope, router) = self._setup_address_scope('scope1', 'scope2', 'scope1') gw_port = router.get_ex_gw_port() gw_ip = self._port_first_ip_cidr(gw_port).partition('/')[0] br_ex = framework.get_ovs_bridge( self.agent.conf.external_network_bridge) src_machine = self.useFixture( machine_fixtures.FakeMachine(br_ex, '19.4.4.12/24', gw_ip)) # For the internal networks that are in the same address scope as # external network, they can directly route to external network net_helpers.assert_ping( src_machine.namespace, machine_same_scope.ip, 5) # For the internal networks that are not in the same address scope as # external networks. SNAT will be used. Direct route will not work # here. src_machine.assert_no_ping(machine_diff_scope.ip) def test_dvr_snat_namespace_has_ip_nonlocal_bind_disabled(self): self.agent.conf.agent_mode = 'dvr_snat' router_info = self.generate_dvr_router_info( enable_ha=True, enable_snat=True) router = self.manage_router(self.agent, router_info) try: ip_nonlocal_bind_value = ip_lib.get_ip_nonlocal_bind( router.snat_namespace.name) except RuntimeError as rte: stat_message = 'cannot stat /proc/sys/net/ipv4/ip_nonlocal_bind' if stat_message in str(rte): raise self.skipException( "This kernel doesn't support %s in network namespaces." % ( ip_lib.IP_NONLOCAL_BIND)) raise self.assertEqual(0, ip_nonlocal_bind_value) neutron-8.4.0/neutron/tests/functional/agent/test_l2_ovs_agent.py0000664000567000056710000002743213044372760026463 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # Copyright (c) 2015 SUSE Linux Products GmbH # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from eventlet.timeout import Timeout from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.tests.common import net_helpers from neutron.tests.functional.agent.l2 import base class TestOVSAgent(base.OVSAgentTestFramework): def test_port_creation_and_deletion(self): self.setup_agent_and_ports( port_dicts=self.create_test_ports()) self.wait_until_ports_state(self.ports, up=True) for port in self.ports: self.agent.int_br.delete_port(port['vif_name']) self.wait_until_ports_state(self.ports, up=False) def _check_datapath_type_netdev(self, expected, default=False): if not default: self.config.set_override('datapath_type', expected, "OVS") agent = self.create_agent() self.start_agent(agent) for br_name in (getattr(self, br) for br in ('br_int', 'br_tun', 'br_phys')): actual = self.ovs.db_get_val('Bridge', br_name, 'datapath_type') self.assertEqual(expected, actual) def test_datapath_type_change(self): self._check_datapath_type_netdev('system') self._check_datapath_type_netdev('netdev') def test_datapath_type_netdev(self): self._check_datapath_type_netdev( constants.OVS_DATAPATH_NETDEV) def test_datapath_type_system(self): self._check_datapath_type_netdev( constants.OVS_DATAPATH_SYSTEM) def test_datapath_type_default(self): self._check_datapath_type_netdev( constants.OVS_DATAPATH_SYSTEM, default=True) def test_resync_devices_set_up_after_exception(self): self.setup_agent_and_ports( port_dicts=self.create_test_ports(), trigger_resync=True) self.wait_until_ports_state(self.ports, up=True) def test_reprocess_port_when_ovs_restarts(self): self.setup_agent_and_ports( port_dicts=self.create_test_ports()) self.wait_until_ports_state(self.ports, up=True) self.agent.check_ovs_status.return_value = constants.OVS_RESTARTED # OVS restarted, the agent should reprocess all the ports self.agent.plugin_rpc.update_device_list.reset_mock() self.wait_until_ports_state(self.ports, up=True) def test_resync_dev_up_after_failure(self): self.setup_agent_and_ports( port_dicts=self.create_test_ports(), failed_dev_up=True) # in the RPC mock the first port fails and should # be re-synced expected_ports = self.ports + [self.ports[0]] self.wait_until_ports_state(expected_ports, up=True) def test_resync_dev_down_after_failure(self): self.setup_agent_and_ports( port_dicts=self.create_test_ports(), failed_dev_down=True) self.wait_until_ports_state(self.ports, up=True) for port in self.ports: self.agent.int_br.delete_port(port['vif_name']) # in the RPC mock the first port fails and should # be re-synced expected_ports = self.ports + [self.ports[0]] self.wait_until_ports_state(expected_ports, up=False) def test_ancillary_port_creation_and_deletion(self): external_bridge = self.useFixture( net_helpers.OVSBridgeFixture()).bridge self.setup_agent_and_ports( port_dicts=self.create_test_ports(), ancillary_bridge=external_bridge) self.wait_until_ports_state(self.ports, up=True) for port in self.ports: external_bridge.delete_port(port['vif_name']) self.wait_until_ports_state(self.ports, up=False) def test_resync_ancillary_devices(self): external_bridge = self.useFixture( net_helpers.OVSBridgeFixture()).bridge self.setup_agent_and_ports( port_dicts=self.create_test_ports(), ancillary_bridge=external_bridge, trigger_resync=True) self.wait_until_ports_state(self.ports, up=True) def test_resync_ancillary_dev_up_after_failure(self): external_bridge = self.useFixture( net_helpers.OVSBridgeFixture()).bridge self.setup_agent_and_ports( port_dicts=self.create_test_ports(), ancillary_bridge=external_bridge, failed_dev_up=True) # in the RPC mock the first port fails and should # be re-synced expected_ports = self.ports + [self.ports[0]] self.wait_until_ports_state(expected_ports, up=True) def test_resync_ancillary_dev_down_after_failure(self): external_bridge = self.useFixture( net_helpers.OVSBridgeFixture()).bridge self.setup_agent_and_ports( port_dicts=self.create_test_ports(), ancillary_bridge=external_bridge, failed_dev_down=True) self.wait_until_ports_state(self.ports, up=True) for port in self.ports: external_bridge.delete_port(port['vif_name']) # in the RPC mock the first port fails and should # be re-synced expected_ports = self.ports + [self.ports[0]] self.wait_until_ports_state(expected_ports, up=False) def test_port_vlan_tags(self): self.setup_agent_and_ports( port_dicts=self.create_test_ports(), trigger_resync=True) self.wait_until_ports_state(self.ports, up=True) self.assert_vlan_tags(self.ports, self.agent) def _test_assert_bridges_ports_vxlan(self, local_ip=None): agent = self.create_agent(local_ip=local_ip) self.assertTrue(self.ovs.bridge_exists(self.br_int)) self.assertTrue(self.ovs.bridge_exists(self.br_tun)) self.assert_bridge_ports() self.assert_patch_ports(agent) def test_assert_bridges_ports_vxlan_ipv4(self): self._test_assert_bridges_ports_vxlan() def test_assert_bridges_ports_vxlan_ipv6(self): self._test_assert_bridges_ports_vxlan(local_ip='2001:db8:100::1') def test_assert_bridges_ports_no_tunnel(self): self.create_agent(create_tunnels=False) self.assertTrue(self.ovs.bridge_exists(self.br_int)) self.assertFalse(self.ovs.bridge_exists(self.br_tun)) def test_assert_pings_during_br_int_setup_not_lost(self): self.setup_agent_and_ports(port_dicts=self.create_test_ports(), create_tunnels=False) self.wait_until_ports_state(self.ports, up=True) ips = [port['fixed_ips'][0]['ip_address'] for port in self.ports] with net_helpers.async_ping(self.namespace, ips) as done: while not done(): self.agent.setup_integration_br() time.sleep(0.25) def test_assert_pings_during_br_phys_setup_not_lost_in_vlan_to_flat(self): provider_net = self._create_test_network_dict() provider_net['network_type'] = 'flat' self._test_assert_pings_during_br_phys_setup_not_lost(provider_net) def test_assert_pings_during_br_phys_setup_not_lost_in_vlan_to_vlan(self): provider_net = self._create_test_network_dict() provider_net['network_type'] = 'vlan' provider_net['segmentation_id'] = 876 self._test_assert_pings_during_br_phys_setup_not_lost(provider_net) def _test_assert_pings_during_br_phys_setup_not_lost(self, provider_net): # Separate namespace is needed when pinging from one port to another, # otherwise Linux ping uses loopback instead for sending and receiving # ping, hence ignoring flow setup. ns_phys = self.useFixture(net_helpers.NamespaceFixture()).name ports = self.create_test_ports(amount=2) port_int = ports[0] port_phys = ports[1] ip_int = port_int['fixed_ips'][0]['ip_address'] ip_phys = port_phys['fixed_ips'][0]['ip_address'] self.setup_agent_and_ports(port_dicts=[port_int], create_tunnels=False, network=provider_net) self.plug_ports_to_phys_br(provider_net, [port_phys], namespace=ns_phys) # The OVS agent doesn't monitor the physical bridges, no notification # is sent when a port is up on a physical bridge, hence waiting only # for the ports connected to br-int self.wait_until_ports_state([port_int], up=True) with net_helpers.async_ping(ns_phys, [ip_int]) as done: while not done(): self.agent.setup_physical_bridges(self.agent.bridge_mappings) time.sleep(0.25) with net_helpers.async_ping(self.namespace, [ip_phys]) as done: while not done(): self.agent.setup_physical_bridges(self.agent.bridge_mappings) time.sleep(0.25) def test_assert_br_int_patch_port_ofports_dont_change(self): # When the integration bridge is setup, it should reuse the existing # patch ports between br-int and br-tun. self.setup_agent_and_ports(port_dicts=[], create_tunnels=True) patch_int_ofport_before = self.agent.patch_int_ofport patch_tun_ofport_before = self.agent.patch_tun_ofport self.setup_agent_and_ports(port_dicts=[], create_tunnels=True) self.assertEqual(patch_int_ofport_before, self.agent.patch_int_ofport) self.assertEqual(patch_tun_ofport_before, self.agent.patch_tun_ofport) def test_assert_br_phys_patch_port_ofports_dont_change(self): # When the integration bridge is setup, it should reuse the existing # patch ports between br-int and br-phys. self.setup_agent_and_ports(port_dicts=[]) patch_int_ofport_before = self.agent.int_ofports['physnet'] patch_phys_ofport_before = self.agent.phys_ofports['physnet'] self.setup_agent_and_ports(port_dicts=[]) self.assertEqual(patch_int_ofport_before, self.agent.int_ofports['physnet']) self.assertEqual(patch_phys_ofport_before, self.agent.phys_ofports['physnet']) def test_noresync_after_port_gone(self): '''This will test the scenario where a port is removed after listing it but before getting vif info about it. ''' self.ports = self.create_test_ports(amount=2) self.agent = self.create_agent(create_tunnels=False) self.network = self._create_test_network_dict() self._plug_ports(self.network, self.ports, self.agent) self.start_agent(self.agent, ports=self.ports, unplug_ports=[self.ports[1]]) self.wait_until_ports_state([self.ports[0]], up=True) self.assertRaises( Timeout, self.wait_until_ports_state, [self.ports[1]], up=True, timeout=10) class TestOVSAgentExtensionConfig(base.OVSAgentTestFramework): def setUp(self): super(TestOVSAgentExtensionConfig, self).setUp() self.config.set_override('extensions', ['qos'], 'agent') self.agent = self.create_agent(create_tunnels=False) def test_report_loaded_extension(self): self.agent._report_state() agent_state = self.agent.state_rpc.report_state.call_args[0][1] self.assertEqual(['qos'], agent_state['configurations']['extensions']) neutron-8.4.0/neutron/tests/functional/agent/__init__.py0000664000567000056710000000000013044372736024562 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/agent/test_dhcp_agent.py0000664000567000056710000003703013044372760026170 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os.path import eventlet import fixtures import mock import netaddr from oslo_config import fixture as fixture_config from oslo_utils import uuidutils from neutron.agent.common import config from neutron.agent.common import ovs_lib from neutron.agent.dhcp import agent from neutron.agent import dhcp_agent from neutron.agent.linux import dhcp from neutron.agent.linux import external_process from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.common import constants from neutron.common import utils as common_utils from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import helpers from neutron.tests.functional import base class DHCPAgentOVSTestFramework(base.BaseSudoTestCase): _DHCP_PORT_MAC_ADDRESS = netaddr.EUI("24:77:03:7d:00:4c") _DHCP_PORT_MAC_ADDRESS.dialect = netaddr.mac_unix _TENANT_PORT_MAC_ADDRESS = netaddr.EUI("24:77:03:7d:00:3a") _TENANT_PORT_MAC_ADDRESS.dialect = netaddr.mac_unix _IP_ADDRS = { 4: {'addr': '192.168.10.11', 'cidr': '192.168.10.0/24', 'gateway': '192.168.10.1'}, 6: {'addr': '0:0:0:0:0:ffff:c0a8:a0b', 'cidr': '0:0:0:0:0:ffff:c0a8:a00/120', 'gateway': '0:0:0:0:0:ffff:c0a8:a01'}, } def setUp(self): super(DHCPAgentOVSTestFramework, self).setUp() config.setup_logging() self.conf_fixture = self.useFixture(fixture_config.Config()) self.conf = self.conf_fixture.conf dhcp_agent.register_options(self.conf) # NOTE(cbrandily): TempDir fixture creates a folder with 0o700 # permissions but agent dir must be readable by dnsmasq user (nobody) agent_config_dir = self.useFixture(fixtures.TempDir()).path self.useFixture( helpers.RecursivePermDirFixture(agent_config_dir, 0o555)) self.conf.set_override("dhcp_confs", agent_config_dir) self.conf.set_override( 'interface_driver', 'neutron.agent.linux.interface.OVSInterfaceDriver') self.conf.set_override('report_interval', 0, 'AGENT') br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge self.conf.set_override('ovs_integration_bridge', br_int.br_name) self.mock_plugin_api = mock.patch( 'neutron.agent.dhcp.agent.DhcpPluginApi').start().return_value mock.patch('neutron.agent.rpc.PluginReportStateAPI').start() self.agent = agent.DhcpAgentWithStateReport('localhost') self.ovs_driver = interface.OVSInterfaceDriver(self.conf) self.conf.set_override('check_child_processes_interval', 1, 'AGENT') def network_dict_for_dhcp(self, dhcp_enabled=True, ip_version=4, prefix_override=None): net_id = uuidutils.generate_uuid() subnet_dict = self.create_subnet_dict( net_id, dhcp_enabled, ip_version, prefix_override) port_dict = self.create_port_dict( net_id, subnet_dict.id, mac_address=str(self._DHCP_PORT_MAC_ADDRESS), ip_version=ip_version) port_dict.device_id = common_utils.get_dhcp_agent_device_id( net_id, self.conf.host) net_dict = self.create_network_dict( net_id, [subnet_dict], [port_dict]) return net_dict def create_subnet_dict(self, net_id, dhcp_enabled=True, ip_version=4, prefix_override=None): cidr = self._IP_ADDRS[ip_version]['cidr'] if prefix_override is not None: cidr = '/'.join((cidr.split('/')[0], str(prefix_override))) sn_dict = dhcp.DictModel({ "id": uuidutils.generate_uuid(), "network_id": net_id, "ip_version": ip_version, "cidr": cidr, "gateway_ip": (self. _IP_ADDRS[ip_version]['gateway']), "enable_dhcp": dhcp_enabled, "dns_nameservers": [], "host_routes": [], "ipv6_ra_mode": None, "ipv6_address_mode": None}) if ip_version == 6: sn_dict['ipv6_address_mode'] = constants.DHCPV6_STATEFUL return sn_dict def create_port_dict(self, network_id, subnet_id, mac_address, ip_version=4, ip_address=None): ip_address = (self._IP_ADDRS[ip_version]['addr'] if not ip_address else ip_address) port_dict = dhcp.DictModel({ "id": uuidutils.generate_uuid(), "name": "foo", "mac_address": mac_address, "network_id": network_id, "admin_state_up": True, "device_id": uuidutils.generate_uuid(), "device_owner": "foo", "fixed_ips": [{"subnet_id": subnet_id, "ip_address": ip_address}], }) return port_dict def create_network_dict(self, net_id, subnets=None, ports=None): subnets = [] if not subnets else subnets ports = [] if not ports else ports net_dict = dhcp.NetModel(d={ "id": net_id, "subnets": subnets, "ports": ports, "admin_state_up": True, "tenant_id": uuidutils.generate_uuid(), }) return net_dict def get_interface_name(self, network, port): device_manager = dhcp.DeviceManager(conf=self.conf, plugin=mock.Mock()) return device_manager.get_interface_name(network, port) def configure_dhcp_for_network(self, network, dhcp_enabled=True): self.agent.configure_dhcp_for_network(network) self.addCleanup(self._cleanup_network, network, dhcp_enabled) def _cleanup_network(self, network, dhcp_enabled): self.mock_plugin_api.release_dhcp_port.return_value = None if dhcp_enabled: self.agent.call_driver('disable', network) def assert_dhcp_resources(self, network, dhcp_enabled): ovs = ovs_lib.BaseOVS() port = network.ports[0] iface_name = self.get_interface_name(network, port) self.assertEqual(dhcp_enabled, ovs.port_exists(iface_name)) self.assert_dhcp_namespace(network.namespace, dhcp_enabled) self.assert_dhcp_device(network.namespace, iface_name, dhcp_enabled) def assert_dhcp_namespace(self, namespace, dhcp_enabled): ip = ip_lib.IPWrapper() self.assertEqual(dhcp_enabled, ip.netns.exists(namespace)) def assert_dhcp_device(self, namespace, dhcp_iface_name, dhcp_enabled): dev = ip_lib.IPDevice(dhcp_iface_name, namespace) self.assertEqual(dhcp_enabled, ip_lib.device_exists( dhcp_iface_name, namespace)) if dhcp_enabled: self.assertEqual(self._DHCP_PORT_MAC_ADDRESS, dev.link.address) def _plug_port_for_dhcp_request(self, network, port): namespace = network.namespace vif_name = self.get_interface_name(network.id, port) self.ovs_driver.plug(network.id, port.id, vif_name, port.mac_address, self.conf['ovs_integration_bridge'], namespace=namespace) def _ip_list_for_vif(self, vif_name, namespace): ip_device = ip_lib.IPDevice(vif_name, namespace) return ip_device.addr.list(ip_version=4) def _get_network_port_for_allocation_test(self): network = self.network_dict_for_dhcp() ip_addr = netaddr.IPNetwork(network.subnets[0].cidr)[1] port = self.create_port_dict( network.id, network.subnets[0].id, mac_address=str(self._TENANT_PORT_MAC_ADDRESS), ip_address=str(ip_addr)) return network, port def assert_good_allocation_for_port(self, network, port): vif_name = self.get_interface_name(network.id, port) self._run_dhclient(vif_name, network) predicate = lambda: len( self._ip_list_for_vif(vif_name, network.namespace)) utils.wait_until_true(predicate, 10) ip_list = self._ip_list_for_vif(vif_name, network.namespace) cidr = ip_list[0].get('cidr') ip_addr = str(netaddr.IPNetwork(cidr).ip) self.assertEqual(port.fixed_ips[0].ip_address, ip_addr) def assert_bad_allocation_for_port(self, network, port): vif_name = self.get_interface_name(network.id, port) self._run_dhclient(vif_name, network) # we need wait some time (10 seconds is enough) and check # that dhclient not configured ip-address for interface eventlet.sleep(10) ip_list = self._ip_list_for_vif(vif_name, network.namespace) self.assertEqual([], ip_list) def _run_dhclient(self, vif_name, network): # NOTE: Before run dhclient we should create resolv.conf file # in namespace, where we will run dhclient for testing address # allocation for port, otherwise, dhclient will override # system /etc/resolv.conf # By default, folder for dhcp-agent's namespace doesn't exist # that's why we use AdminDirFixture for create directory # with admin permissions in /etc/netns/ and touch resolv.conf in it. etc_dir = '/etc/netns/%s' % network.namespace self.useFixture(helpers.AdminDirFixture(etc_dir)) cmd = ['touch', os.path.join(etc_dir, 'resolv.conf')] utils.execute(cmd, run_as_root=True) dhclient_cmd = ['dhclient', '--no-pid', '-d', '-1', vif_name] proc = net_helpers.RootHelperProcess( cmd=dhclient_cmd, namespace=network.namespace) self.addCleanup(proc.wait) self.addCleanup(proc.kill) def _get_metadata_proxy_process(self, network): return external_process.ProcessManager( self.conf, network.id, network.namespace) class DHCPAgentOVSTestCase(DHCPAgentOVSTestFramework): def test_create_subnet_with_dhcp(self): dhcp_enabled = True for version in [4, 6]: network = self.network_dict_for_dhcp( dhcp_enabled, ip_version=version) self.configure_dhcp_for_network(network=network, dhcp_enabled=dhcp_enabled) self.assert_dhcp_resources(network, dhcp_enabled) def test_create_subnet_with_non64_ipv6_cidrs(self): # the agent should not throw exceptions on weird prefixes dhcp_enabled = True version = 6 for i in (0, 1, 41, 81, 121, 127, 128): network = self.network_dict_for_dhcp( dhcp_enabled, ip_version=version, prefix_override=i) self.configure_dhcp_for_network(network=network, dhcp_enabled=dhcp_enabled) self.assertFalse(self.agent.needs_resync_reasons[network.id], msg="prefix size of %s triggered resync" % i) def test_agent_mtu_set_on_interface_driver(self): network = self.network_dict_for_dhcp() network["mtu"] = 789 self.configure_dhcp_for_network(network=network) port = network.ports[0] iface_name = self.get_interface_name(network, port) dev = ip_lib.IPDevice(iface_name, network.namespace) self.assertEqual(789, dev.link.mtu) def test_good_address_allocation(self): network, port = self._get_network_port_for_allocation_test() network.ports.append(port) self.configure_dhcp_for_network(network=network) self._plug_port_for_dhcp_request(network, port) self.assert_good_allocation_for_port(network, port) def test_bad_address_allocation(self): network, port = self._get_network_port_for_allocation_test() network.ports.append(port) self.configure_dhcp_for_network(network=network) bad_mac_address = netaddr.EUI(self._TENANT_PORT_MAC_ADDRESS.value + 1) bad_mac_address.dialect = netaddr.mac_unix port.mac_address = str(bad_mac_address) self._plug_port_for_dhcp_request(network, port) self.assert_bad_allocation_for_port(network, port) def _spawn_network_metadata_proxy(self): network = self.network_dict_for_dhcp() self.conf.set_override('enable_isolated_metadata', True) self.addCleanup(self.agent.disable_isolated_metadata_proxy, network) self.configure_dhcp_for_network(network=network) pm = self._get_metadata_proxy_process(network) utils.wait_until_true( lambda: pm.active, timeout=5, sleep=0.01, exception=RuntimeError("Metadata proxy didn't spawn")) return (pm, network) def test_metadata_proxy_respawned(self): pm, network = self._spawn_network_metadata_proxy() old_pid = pm.pid utils.execute(['kill', '-9', old_pid], run_as_root=True) utils.wait_until_true( lambda: pm.active and pm.pid != old_pid, timeout=5, sleep=0.1, exception=RuntimeError("Metadata proxy didn't respawn")) def test_stale_metadata_proxy_killed(self): pm, network = self._spawn_network_metadata_proxy() self.conf.set_override('enable_isolated_metadata', False) self.configure_dhcp_for_network(network=network) utils.wait_until_true( lambda: not pm.active, timeout=5, sleep=0.1, exception=RuntimeError("Stale metadata proxy didn't get killed")) def _test_metadata_proxy_spawn_kill_with_subnet_create_delete(self): network = self.network_dict_for_dhcp(ip_version=6) self.configure_dhcp_for_network(network=network) pm = self._get_metadata_proxy_process(network) # A newly created network with ipv6 subnet will not have metadata proxy self.assertFalse(pm.active) new_network = copy.deepcopy(network) dhcp_enabled_ipv4_subnet = self.create_subnet_dict(network.id) new_network.subnets.append(dhcp_enabled_ipv4_subnet) self.mock_plugin_api.get_network_info.return_value = new_network self.agent.refresh_dhcp_helper(network.id) # Metadata proxy should be spawned for the newly added subnet utils.wait_until_true( lambda: pm.active, timeout=5, sleep=0.1, exception=RuntimeError("Metadata proxy didn't spawn")) self.mock_plugin_api.get_network_info.return_value = network self.agent.refresh_dhcp_helper(network.id) # Metadata proxy should be killed because network doesn't need it. utils.wait_until_true( lambda: not pm.active, timeout=5, sleep=0.1, exception=RuntimeError("Metadata proxy didn't get killed")) def test_enable_isolated_metadata_for_subnet_create_delete(self): self.conf.set_override('force_metadata', False) self.conf.set_override('enable_isolated_metadata', True) self._test_metadata_proxy_spawn_kill_with_subnet_create_delete() def test_force_metadata_for_subnet_create_delete(self): self.conf.set_override('force_metadata', True) self.conf.set_override('enable_isolated_metadata', False) self._test_metadata_proxy_spawn_kill_with_subnet_create_delete() neutron-8.4.0/neutron/tests/functional/agent/test_ovs_flows.py0000664000567000056710000003252613044372760026122 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet import fixtures import mock import testscenarios from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import importutils from testtools.content import text_content from neutron.agent.linux import ip_lib from neutron.cmd.sanity import checks from neutron.common import constants as n_const from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent \ import ovs_neutron_agent as ovsagt from neutron.tests.common import base as common_base from neutron.tests.common import net_helpers from neutron.tests.functional.agent import test_ovs_lib from neutron.tests.functional import base from neutron.tests import tools cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.' 'common.config') class OVSAgentTestBase(test_ovs_lib.OVSBridgeTestBase, base.BaseSudoTestCase): scenarios = testscenarios.multiply_scenarios([ ('ofctl', {'main_module': ('neutron.plugins.ml2.drivers.openvswitch.' 'agent.openflow.ovs_ofctl.main')}), ('native', {'main_module': ('neutron.plugins.ml2.drivers.openvswitch.' 'agent.openflow.native.main')})], test_ovs_lib.OVSBridgeTestBase.scenarios) def setUp(self): super(OVSAgentTestBase, self).setUp() self.br = self.useFixture(net_helpers.OVSBridgeFixture()).bridge self.of_interface_mod = importutils.import_module(self.main_module) self.br_int_cls = None self.br_tun_cls = None self.br_phys_cls = None self.br_int = None self.init_done = False self.init_done_ev = eventlet.event.Event() self.main_ev = eventlet.event.Event() self.addCleanup(self._kill_main) retry_count = 3 while True: cfg.CONF.set_override('of_listen_port', net_helpers.get_free_namespace_port( n_const.PROTO_NAME_TCP), group='OVS') self.of_interface_mod.init_config() self._main_thread = eventlet.spawn(self._kick_main) # Wait for _kick_main -> of_interface main -> _agent_main # NOTE(yamamoto): This complexity came from how "native" # of_interface runs its openflow controller. "native" # of_interface's main routine blocks while running the # embedded openflow controller. In that case, the agent # rpc_loop runs in another thread. However, for FT we # need to run setUp() and test_xxx() in the same thread. # So I made this run of_interface's main in a separate # thread instead. try: while not self.init_done: self.init_done_ev.wait() break except fixtures.TimeoutException: self._kill_main() retry_count -= 1 if retry_count < 0: raise Exception('port allocation failed') def _kick_main(self): with mock.patch.object(ovsagt, 'main', self._agent_main): self.of_interface_mod.main() def _kill_main(self): self.main_ev.send() self._main_thread.wait() def _agent_main(self, bridge_classes): self.br_int_cls = bridge_classes['br_int'] self.br_phys_cls = bridge_classes['br_phys'] self.br_tun_cls = bridge_classes['br_tun'] self.br_int = self.br_int_cls(self.br.br_name) self.br_int.set_secure_mode() self.br_int.setup_controllers(cfg.CONF) self.br_int.setup_default_table() # signal to setUp() self.init_done = True self.init_done_ev.send() self.main_ev.wait() class ARPSpoofTestCase(OVSAgentTestBase): def setUp(self): # NOTE(kevinbenton): it would be way cooler to use scapy for # these but scapy requires the python process to be running as # root to bind to the ports. self.addOnException(self.collect_flows_and_ports) super(ARPSpoofTestCase, self).setUp() self.skip_without_arp_support() self.src_addr = '192.168.0.1' self.dst_addr = '192.168.0.2' self.src_namespace = self.useFixture( net_helpers.NamespaceFixture()).name self.dst_namespace = self.useFixture( net_helpers.NamespaceFixture()).name self.src_p = self.useFixture( net_helpers.OVSPortFixture(self.br, self.src_namespace)).port self.dst_p = self.useFixture( net_helpers.OVSPortFixture(self.br, self.dst_namespace)).port # wait to add IPs until after anti-spoof rules to ensure ARP doesn't # happen before def collect_flows_and_ports(self, exc_info): nicevif = lambda x: ['%s=%s' % (k, getattr(x, k)) for k in ['ofport', 'port_name', 'switch', 'vif_id', 'vif_mac']] nicedev = lambda x: ['%s=%s' % (k, getattr(x, k)) for k in ['name', 'namespace']] + x.addr.list() details = {'flows': self.br.dump_all_flows(), 'vifs': map(nicevif, self.br.get_vif_ports()), 'src_ip': self.src_addr, 'dest_ip': self.dst_addr, 'sourt_port': nicedev(self.src_p), 'dest_port': nicedev(self.dst_p)} self.addDetail('arp-test-state', text_content(jsonutils.dumps(details, indent=5))) @common_base.no_skip_on_missing_deps def skip_without_arp_support(self): if not checks.arp_header_match_supported(): self.skipTest("ARP header matching not supported") def test_arp_spoof_doesnt_block_normal_traffic(self): self._setup_arp_spoof_for_port(self.src_p.name, [self.src_addr]) self._setup_arp_spoof_for_port(self.dst_p.name, [self.dst_addr]) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) def test_mac_spoof_blocks_wrong_mac(self): self._setup_arp_spoof_for_port(self.src_p.name, [self.src_addr]) self._setup_arp_spoof_for_port(self.dst_p.name, [self.dst_addr]) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) # changing the allowed mac should stop the port from working self._setup_arp_spoof_for_port(self.src_p.name, [self.src_addr], mac='00:11:22:33:44:55') net_helpers.assert_no_ping(self.src_namespace, self.dst_addr, count=2) def test_arp_spoof_doesnt_block_ipv6(self): self.src_addr = '2000::1' self.dst_addr = '2000::2' self._setup_arp_spoof_for_port(self.src_p.name, [self.src_addr]) self._setup_arp_spoof_for_port(self.dst_p.name, [self.dst_addr]) self.src_p.addr.add('%s/64' % self.src_addr) self.dst_p.addr.add('%s/64' % self.dst_addr) # make sure the IPv6 addresses are ready before pinging self.src_p.addr.wait_until_address_ready(self.src_addr) self.dst_p.addr.wait_until_address_ready(self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) def test_arp_spoof_blocks_response(self): # this will prevent the destination from responding to the ARP # request for it's own address self._setup_arp_spoof_for_port(self.dst_p.name, ['192.168.0.3']) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) net_helpers.assert_no_ping(self.src_namespace, self.dst_addr, count=2) def test_arp_spoof_blocks_icmpv6_neigh_advt(self): self.src_addr = '2000::1' self.dst_addr = '2000::2' # this will prevent the destination from responding (i.e., icmpv6 # neighbour advertisement) to the icmpv6 neighbour solicitation # request for it's own address (2000::2) as spoofing rules added # below only allow '2000::3'. self._setup_arp_spoof_for_port(self.dst_p.name, ['2000::3']) self.src_p.addr.add('%s/64' % self.src_addr) self.dst_p.addr.add('%s/64' % self.dst_addr) # make sure the IPv6 addresses are ready before pinging self.src_p.addr.wait_until_address_ready(self.src_addr) self.dst_p.addr.wait_until_address_ready(self.dst_addr) net_helpers.assert_no_ping(self.src_namespace, self.dst_addr, count=2) def test_arp_spoof_blocks_request(self): # this will prevent the source from sending an ARP # request with its own address self._setup_arp_spoof_for_port(self.src_p.name, ['192.168.0.3']) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) ns_ip_wrapper = ip_lib.IPWrapper(self.src_namespace) try: ns_ip_wrapper.netns.execute(['arping', '-I', self.src_p.name, '-c1', self.dst_addr]) tools.fail("arping should have failed. The arp request should " "have been blocked.") except RuntimeError: pass def test_arp_spoof_allowed_address_pairs(self): self._setup_arp_spoof_for_port(self.dst_p.name, ['192.168.0.3', self.dst_addr]) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) def test_arp_spoof_icmpv6_neigh_advt_allowed_address_pairs(self): self.src_addr = '2000::1' self.dst_addr = '2000::2' self._setup_arp_spoof_for_port(self.dst_p.name, ['2000::3', self.dst_addr]) self.src_p.addr.add('%s/64' % self.src_addr) self.dst_p.addr.add('%s/64' % self.dst_addr) # make sure the IPv6 addresses are ready before pinging self.src_p.addr.wait_until_address_ready(self.src_addr) self.dst_p.addr.wait_until_address_ready(self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) def test_arp_spoof_allowed_address_pairs_0cidr(self): self._setup_arp_spoof_for_port(self.dst_p.name, ['9.9.9.9/0', '1.2.3.4']) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) def test_arp_spoof_disable_port_security(self): # block first and then disable port security to make sure old rules # are cleared self._setup_arp_spoof_for_port(self.dst_p.name, ['192.168.0.3']) self._setup_arp_spoof_for_port(self.dst_p.name, ['192.168.0.3'], psec=False) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) def test_arp_spoof_disable_network_port(self): # block first and then disable port security to make sure old rules # are cleared self._setup_arp_spoof_for_port(self.dst_p.name, ['192.168.0.3']) self._setup_arp_spoof_for_port( self.dst_p.name, ['192.168.0.3'], device_owner=n_const.DEVICE_OWNER_ROUTER_GW) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) def _setup_arp_spoof_for_port(self, port, addrs, psec=True, device_owner='nobody', mac=None): vif = next( vif for vif in self.br.get_vif_ports() if vif.port_name == port) ip_addr = addrs.pop() details = {'port_security_enabled': psec, 'fixed_ips': [{'ip_address': ip_addr}], 'device_owner': device_owner, 'allowed_address_pairs': [ dict(ip_address=ip) for ip in addrs]} if mac: vif.vif_mac = mac ovsagt.OVSNeutronAgent.setup_arp_spoofing_protection( self.br_int, vif, details) class CanaryTableTestCase(OVSAgentTestBase): def test_canary_table(self): self.br_int.delete_flows() self.assertEqual(constants.OVS_RESTARTED, self.br_int.check_canary_table()) self.br_int.setup_canary_table() self.assertEqual(constants.OVS_NORMAL, self.br_int.check_canary_table()) neutron-8.4.0/neutron/tests/functional/agent/test_firewall.py0000664000567000056710000010413013044372760025675 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel Corporation. # Copyright 2015 Isaku Yamahata # # Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import functools import random import netaddr from oslo_config import cfg from oslo_log import log as logging import testscenarios from neutron.agent import firewall from neutron.agent.linux import iptables_firewall from neutron.agent.linux import openvswitch_firewall from neutron.agent import securitygroups_rpc as sg_cfg from neutron.cmd.sanity import checks from neutron.common import constants from neutron.tests.common import conn_testers from neutron.tests.functional import base LOG = logging.getLogger(__name__) load_tests = testscenarios.load_tests_apply_scenarios reverse_direction = { conn_testers.ConnectionTester.INGRESS: conn_testers.ConnectionTester.EGRESS, conn_testers.ConnectionTester.EGRESS: conn_testers.ConnectionTester.INGRESS} reverse_transport_protocol = { conn_testers.ConnectionTester.TCP: conn_testers.ConnectionTester.UDP, conn_testers.ConnectionTester.UDP: conn_testers.ConnectionTester.TCP} DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' VLAN_COUNT = 4096 def skip_if_firewall(firewall_name): def outter(f): @functools.wraps(f) def wrap(self, *args, **kwargs): if self.firewall_name == firewall_name: self.skipTest("This test doesn't use %s firewall" % firewall_name) return f(self, *args, **kwargs) return wrap return outter def _add_rule(sg_rules, base, port_range_min=None, port_range_max=None): rule = copy.copy(base) if port_range_min: rule['port_range_min'] = port_range_min if port_range_max: rule['port_range_max'] = port_range_max sg_rules.append(rule) class BaseFirewallTestCase(base.BaseSudoTestCase): FAKE_SECURITY_GROUP_ID = 'fake_sg_id' MAC_SPOOFED = "fa:16:3e:9a:2f:48" scenarios = [('IptablesFirewallDriver without ipset', {'enable_ipset': False, 'initialize': 'initialize_iptables', 'firewall_name': 'iptables'}), ('IptablesFirewallDriver with ipset', {'enable_ipset': True, 'initialize': 'initialize_iptables', 'firewall_name': 'iptables'}), ('OVS Firewall Driver', {'initialize': 'initialize_ovs', 'firewall_name': 'openvswitch'})] ip_cidr = None vlan_range = set(range(VLAN_COUNT)) def setUp(self): cfg.CONF.register_opts(sg_cfg.security_group_opts, 'SECURITYGROUP') super(BaseFirewallTestCase, self).setUp() self.tester, self.firewall = getattr(self, self.initialize)() if self.firewall_name == "openvswitch": self.assign_vlan_to_peers() self.src_port_desc = self._create_port_description( self.tester.vm_port_id, [self.tester.vm_ip_address], self.tester.vm_mac_address, [self.FAKE_SECURITY_GROUP_ID]) # FIXME(jlibosva): We should consider to call prepare_port_filter with # deferred bridge depending on its performance self.firewall.prepare_port_filter(self.src_port_desc) def initialize_iptables(self): cfg.CONF.set_override('enable_ipset', self.enable_ipset, 'SECURITYGROUP') tester = self.useFixture( conn_testers.LinuxBridgeConnectionTester(self.ip_cidr)) firewall_drv = iptables_firewall.IptablesFirewallDriver( namespace=tester.bridge_namespace) return tester, firewall_drv def initialize_ovs(self): # Tests for ovs requires kernel >= 4.3 and OVS >= 2.5 if not checks.ovs_conntrack_supported(): self.skipTest("Open vSwitch with conntrack is not installed " "on this machine. To run tests for OVS/CT firewall," " please meet the requirements (kernel>=4.3, " "OVS>=2.5. More info at" "https://github.com/openvswitch/ovs/blob/master/" "FAQ.md") tester = self.useFixture( conn_testers.OVSConnectionTester(self.ip_cidr)) firewall_drv = openvswitch_firewall.OVSFirewallDriver(tester.bridge) return tester, firewall_drv def assign_vlan_to_peers(self): vlan = self.get_not_used_vlan() LOG.debug("Using %d vlan tag for this test", vlan) self.tester.set_vm_tag(vlan) self.tester.set_peer_tag(vlan) def get_not_used_vlan(self): port_vlans = self.firewall.int_br.br.ovsdb.db_find( 'Port', ('tag', '!=', '[]'), columns=['tag']).execute() used_vlan_tags = {val['tag'] for val in port_vlans} available_vlans = self.vlan_range - used_vlan_tags return random.choice(list(available_vlans)) @staticmethod def _create_port_description(port_id, ip_addresses, mac_address, sg_ids): return {'admin_state_up': True, 'device': port_id, 'device_owner': DEVICE_OWNER_COMPUTE, 'fixed_ips': ip_addresses, 'mac_address': mac_address, 'port_security_enabled': True, 'security_groups': sg_ids, 'status': 'ACTIVE'} def _apply_security_group_rules(self, sg_id, sg_rules): with self.firewall.defer_apply(): self.firewall.update_security_group_rules(sg_id, sg_rules) self.firewall.update_port_filter(self.src_port_desc) def _apply_security_group_members(self, sg_id, members): with self.firewall.defer_apply(): self.firewall.update_security_group_members(sg_id, members) self.firewall.update_port_filter(self.src_port_desc) class FirewallTestCase(BaseFirewallTestCase): ip_cidr = '192.168.0.1/24' @skip_if_firewall('openvswitch') def test_rule_application_converges(self): sg_rules = [{'ethertype': 'IPv4', 'direction': 'egress'}, {'ethertype': 'IPv6', 'direction': 'egress'}, {'ethertype': 'IPv4', 'direction': 'ingress', 'source_ip_prefix': '0.0.0.0/0', 'protocol': 'icmp'}, {'ethertype': 'IPv6', 'direction': 'ingress', 'source_ip_prefix': '0::0/0', 'protocol': 'ipv6-icmp'}] # make sure port ranges converge on all protocols with and without # port ranges (prevents regression of bug 1502924) for proto in ('tcp', 'udp', 'icmp'): for version in ('IPv4', 'IPv6'): if proto == 'icmp' and version == 'IPv6': proto = 'ipv6-icmp' base = {'ethertype': version, 'direction': 'ingress', 'protocol': proto} sg_rules.append(copy.copy(base)) _add_rule(sg_rules, base, port_range_min=50, port_range_max=50) _add_rule(sg_rules, base, port_range_max=55) _add_rule(sg_rules, base, port_range_min=60, port_range_max=60) _add_rule(sg_rules, base, port_range_max=65) # add some single-host rules to prevent regression of bug 1502917 sg_rules.append({'ethertype': 'IPv4', 'direction': 'ingress', 'source_ip_prefix': '77.77.77.77/32'}) sg_rules.append({'ethertype': 'IPv6', 'direction': 'ingress', 'source_ip_prefix': 'fe80::1/128'}) self.firewall.update_security_group_rules( self.FAKE_SECURITY_GROUP_ID, sg_rules) self.firewall.prepare_port_filter(self.src_port_desc) # after one prepare call, another apply should be a NOOP self.assertEqual([], self.firewall.iptables._apply()) orig_sg_rules = copy.copy(sg_rules) for proto in ('tcp', 'udp', 'icmp'): for version in ('IPv4', 'IPv6'): if proto == 'icmp' and version == 'IPv6': proto = 'ipv6-icmp' # make sure firewall is in converged state self.firewall.update_security_group_rules( self.FAKE_SECURITY_GROUP_ID, orig_sg_rules) self.firewall.update_port_filter(self.src_port_desc) sg_rules = copy.copy(orig_sg_rules) # remove one rule and add another to make sure it results in # exactly one delete and insert sg_rules.pop(0 if version == 'IPv4' else 1) sg_rules.append({'ethertype': version, 'direction': 'egress', 'protocol': proto}) self.firewall.update_security_group_rules( self.FAKE_SECURITY_GROUP_ID, sg_rules) result = self.firewall.update_port_filter(self.src_port_desc) deletes = [r for r in result if r.startswith('-D ')] creates = [r for r in result if r.startswith('-I ')] self.assertEqual(1, len(deletes)) self.assertEqual(1, len(creates)) # quick sanity check to make sure the insert was for the # correct proto self.assertIn('-p %s' % proto, creates[0]) # another apply should be a NOOP if the right rule was removed # and the new one was inserted in the correct position self.assertEqual([], self.firewall.iptables._apply()) @skip_if_firewall('openvswitch') def test_rule_ordering_correct(self): sg_rules = [ {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'tcp', 'port_range_min': i, 'port_range_max': i} for i in range(50, 61) ] self.firewall.update_security_group_rules( self.FAKE_SECURITY_GROUP_ID, sg_rules) self.firewall.prepare_port_filter(self.src_port_desc) self._assert_sg_out_tcp_rules_appear_in_order(sg_rules) # remove a rule and add a new one sg_rules.pop(5) sg_rules.insert(8, {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'tcp', 'port_range_min': 400, 'port_range_max': 400}) self.firewall.update_security_group_rules( self.FAKE_SECURITY_GROUP_ID, sg_rules) self.firewall.prepare_port_filter(self.src_port_desc) self._assert_sg_out_tcp_rules_appear_in_order(sg_rules) # reverse all of the rules (requires lots of deletes and inserts) sg_rules = list(reversed(sg_rules)) self.firewall.update_security_group_rules( self.FAKE_SECURITY_GROUP_ID, sg_rules) self.firewall.prepare_port_filter(self.src_port_desc) self._assert_sg_out_tcp_rules_appear_in_order(sg_rules) def _assert_sg_out_tcp_rules_appear_in_order(self, sg_rules): outgoing_rule_pref = '-A %s-o%s' % (self.firewall.iptables.wrap_name, self.src_port_desc['device'][3:13]) rules = [ r for r in self.firewall.iptables.get_rules_for_table('filter') if r.startswith(outgoing_rule_pref) ] # we want to ensure the rules went in in the same order we sent indexes = [rules.index('%s -p tcp -m tcp --dport %s -j RETURN' % (outgoing_rule_pref, i['port_range_min'])) for i in sg_rules] # all indexes should be in order with no unexpected rules in between self.assertEqual(range(indexes[0], indexes[-1] + 1), indexes) def test_ingress_icmp_secgroup(self): # update the sg_group to make ping pass sg_rules = [{'ethertype': constants.IPv4, 'direction': firewall.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP}, {'ethertype': constants.IPv4, 'direction': firewall.EGRESS_DIRECTION}] self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) def test_mac_spoofing(self): sg_rules = [{'ethertype': constants.IPv4, 'direction': firewall.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP}, {'ethertype': constants.IPv4, 'direction': firewall.EGRESS_DIRECTION}] self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.vm_mac_address = self.MAC_SPOOFED self.tester.flush_arp_tables() self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) @skip_if_firewall('openvswitch') def test_mac_spoofing_works_without_port_security_enabled(self): self.src_port_desc['port_security_enabled'] = False self.firewall.update_port_filter(self.src_port_desc) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.vm_mac_address = self.MAC_SPOOFED self.tester.flush_arp_tables() self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) def test_port_security_enabled_set_to_false(self): self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.src_port_desc['port_security_enabled'] = False self.firewall.update_port_filter(self.src_port_desc) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) def test_dhcp_requests_from_vm(self): # DHCPv4 uses source port 67, destination port 68 self.tester.assert_connection(direction=self.tester.EGRESS, protocol=self.tester.UDP, src_port=68, dst_port=67) def test_dhcp_server_forbidden_on_vm(self): self.tester.assert_no_connection(direction=self.tester.EGRESS, protocol=self.tester.UDP, src_port=67, dst_port=68) self.tester.assert_no_connection(direction=self.tester.INGRESS, protocol=self.tester.UDP, src_port=68, dst_port=67) def test_ip_spoofing(self): sg_rules = [{'ethertype': constants.IPv4, 'direction': firewall.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP}] self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) not_allowed_ip = "%s/24" % ( netaddr.IPAddress(self.tester.vm_ip_address) + 1) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.vm_ip_cidr = not_allowed_ip self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) self.tester.assert_no_connection(protocol=self.tester.UDP, src_port=68, dst_port=67, direction=self.tester.EGRESS) @skip_if_firewall('openvswitch') def test_ip_spoofing_works_without_port_security_enabled(self): self.src_port_desc['port_security_enabled'] = False self.firewall.update_port_filter(self.src_port_desc) sg_rules = [{'ethertype': constants.IPv4, 'direction': firewall.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP}] self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) not_allowed_ip = "%s/24" % ( netaddr.IPAddress(self.tester.vm_ip_address) + 1) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.vm_ip_cidr = not_allowed_ip self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) def test_allowed_address_pairs(self): sg_rules = [{'ethertype': constants.IPv4, 'direction': firewall.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP}, {'ethertype': constants.IPv4, 'direction': firewall.EGRESS_DIRECTION}] self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) port_mac = self.tester.vm_mac_address allowed_ip = netaddr.IPAddress(self.tester.vm_ip_address) + 1 not_allowed_ip = "%s/24" % (allowed_ip + 1) self.src_port_desc['allowed_address_pairs'] = [ {'mac_address': port_mac, 'ip_address': allowed_ip}] allowed_ip = "%s/24" % allowed_ip self.firewall.update_port_filter(self.src_port_desc) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.vm_ip_cidr = allowed_ip self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.vm_ip_cidr = not_allowed_ip self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) def test_arp_is_allowed(self): self.tester.assert_connection(protocol=self.tester.ARP, direction=self.tester.EGRESS) self.tester.assert_connection(protocol=self.tester.ARP, direction=self.tester.INGRESS) def _test_rule(self, direction, protocol): sg_rules = [{'ethertype': constants.IPv4, 'direction': direction, 'protocol': protocol}] self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) not_allowed_direction = reverse_direction[direction] not_allowed_protocol = reverse_transport_protocol[protocol] self.tester.assert_connection(protocol=protocol, direction=direction) self.tester.assert_no_connection(protocol=not_allowed_protocol, direction=direction) self.tester.assert_no_connection(protocol=protocol, direction=not_allowed_direction) def test_ingress_tcp_rule(self): self._test_rule(self.tester.INGRESS, self.tester.TCP) def test_next_port_closed(self): # https://bugs.launchpad.net/neutron/+bug/1611991 was caused by wrong # masking in rules which allow traffic to a port with even port number port = 42 for direction in (self.tester.EGRESS, self.tester.INGRESS): sg_rules = [{'ethertype': constants.IPv4, 'direction': direction, 'protocol': constants.PROTO_NAME_TCP, 'source_port_range_min': port, 'source_port_range_max': port}] self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.TCP, direction=direction, src_port=port) self.tester.assert_no_connection(protocol=self.tester.TCP, direction=direction, src_port=port + 1) def test_ingress_udp_rule(self): self._test_rule(self.tester.INGRESS, self.tester.UDP) def test_egress_tcp_rule(self): self._test_rule(self.tester.EGRESS, self.tester.TCP) def test_egress_udp_rule(self): self._test_rule(self.tester.EGRESS, self.tester.UDP) def test_connection_with_destination_port_range(self): port_min = 12345 port_max = 12346 sg_rules = [{'ethertype': constants.IPv4, 'direction': firewall.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_TCP, 'port_range_min': port_min, 'port_range_max': port_max}] self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.TCP, direction=self.tester.INGRESS, dst_port=port_min) self.tester.assert_connection(protocol=self.tester.TCP, direction=self.tester.INGRESS, dst_port=port_max) self.tester.assert_no_connection(protocol=self.tester.TCP, direction=self.tester.INGRESS, dst_port=port_min - 1) self.tester.assert_no_connection(protocol=self.tester.TCP, direction=self.tester.INGRESS, dst_port=port_max + 1) def test_connection_with_source_port_range(self): source_port_min = 12345 source_port_max = 12346 sg_rules = [{'ethertype': constants.IPv4, 'direction': firewall.EGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_TCP, 'source_port_range_min': source_port_min, 'source_port_range_max': source_port_max}] self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.TCP, direction=self.tester.EGRESS, src_port=source_port_min) self.tester.assert_connection(protocol=self.tester.TCP, direction=self.tester.EGRESS, src_port=source_port_max) self.tester.assert_no_connection(protocol=self.tester.TCP, direction=self.tester.EGRESS, src_port=source_port_min - 1) self.tester.assert_no_connection(protocol=self.tester.TCP, direction=self.tester.EGRESS, src_port=source_port_max + 1) @skip_if_firewall('iptables') def test_established_connection_is_cut(self): port = 12345 sg_rules = [{'ethertype': constants.IPv4, 'direction': firewall.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_TCP, 'port_range_min': port, 'port_range_max': port}] connection = {'protocol': self.tester.TCP, 'direction': self.tester.INGRESS, 'dst_port': port} self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.establish_connection(**connection) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, list()) self.tester.assert_no_established_connection(**connection) @skip_if_firewall('openvswitch') def test_preventing_firewall_blink(self): direction = self.tester.INGRESS sg_rules = [{'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'tcp'}] self.tester.start_sending_icmp(direction) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, {}) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.stop_sending_icmp(direction) packets_sent = self.tester.get_sent_icmp_packets(direction) packets_received = self.tester.get_received_icmp_packets(direction) self.assertGreater(packets_sent, 0) self.assertEqual(packets_received, 0) def test_remote_security_groups(self): remote_sg_id = 'remote_sg_id' peer_port_desc = self._create_port_description( self.tester.peer_port_id, [self.tester.peer_ip_address], self.tester.peer_mac_address, [remote_sg_id]) vm_sg_members = {'IPv4': [self.tester.peer_ip_address]} peer_sg_rules = [{'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'icmp'}] self.firewall.update_security_group_rules(remote_sg_id, peer_sg_rules) self.firewall.update_security_group_members(remote_sg_id, vm_sg_members) self.firewall.prepare_port_filter(peer_port_desc) vm_sg_rules = [{'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'icmp', 'remote_group_id': remote_sg_id}] self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, vm_sg_rules) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.assert_no_connection(protocol=self.tester.TCP, direction=self.tester.INGRESS) self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) def test_related_connection(self): """Test ICMP net unreachable packets get back When destination address of ip traffic is not reachable, ICMP packets are returned. This packets are marked as RELATED traffic by conntrack and this test case validates such packets are not dropped by the firewall as ingress ICMP packets are not allowed in this test case. The used address below 1.2.3.4 is outside of subnet that is used in tester object. """ # Enable ip forwarding on the interface in order to reply with # destionation net unreachable self.tester._peer.execute([ 'sysctl', '-w', 'net.ipv4.conf.%s.forwarding=1' % self.tester._peer.port.name]) self.tester.set_vm_default_gateway(self.tester.peer_ip_address) vm_sg_rules = [{'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'icmp'}] self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, vm_sg_rules) self.tester.assert_net_unreachable(self.tester.EGRESS, '1.2.3.4') class FirewallTestCaseIPv6(BaseFirewallTestCase): scenarios = [('OVS Firewall Driver', {'initialize': 'initialize_ovs', 'firewall_name': 'openvswitch'})] ip_cidr = '2001:db8:aaaa::1/64' def test_icmp_from_specific_address(self): sg_rules = [{'ethertype': constants.IPv6, 'direction': firewall.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP, 'source_ip_prefix': self.tester.peer_ip_address}] self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) def test_icmp_to_specific_address(self): sg_rules = [{'ethertype': constants.IPv6, 'direction': firewall.EGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP, 'destination_ip_prefix': self.tester.peer_ip_address}] self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) def test_tcp_from_specific_address(self): sg_rules = [{'ethertype': constants.IPv6, 'direction': firewall.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_TCP, 'source_ip_prefix': self.tester.peer_ip_address}] self.tester.assert_no_connection(protocol=self.tester.TCP, direction=self.tester.INGRESS) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.TCP, direction=self.tester.INGRESS) self.tester.assert_no_connection(protocol=self.tester.UDP, direction=self.tester.INGRESS) self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) def test_tcp_to_specific_address(self): sg_rules = [{'ethertype': constants.IPv6, 'direction': firewall.EGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_TCP, 'destination_ip_prefix': self.tester.peer_ip_address}] self.tester.assert_no_connection(protocol=self.tester.TCP, direction=self.tester.EGRESS) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.TCP, direction=self.tester.EGRESS) self.tester.assert_no_connection(protocol=self.tester.UDP, direction=self.tester.EGRESS) self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) def test_udp_from_specific_address(self): sg_rules = [{'ethertype': constants.IPv6, 'direction': firewall.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_UDP, 'source_ip_prefix': self.tester.peer_ip_address}] self.tester.assert_no_connection(protocol=self.tester.UDP, direction=self.tester.INGRESS) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.UDP, direction=self.tester.INGRESS) self.tester.assert_no_connection(protocol=self.tester.TCP, direction=self.tester.INGRESS) self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) def test_udp_to_specific_address(self): sg_rules = [{'ethertype': constants.IPv6, 'direction': firewall.EGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_UDP, 'destination_ip_prefix': self.tester.peer_ip_address}] self.tester.assert_no_connection(protocol=self.tester.UDP, direction=self.tester.EGRESS) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.UDP, direction=self.tester.EGRESS) self.tester.assert_no_connection(protocol=self.tester.TCP, direction=self.tester.EGRESS) self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) @skip_if_firewall('openvswitch') def test_ip_spoofing(self): sg_rules = [{'ethertype': constants.IPv6, 'direction': firewall.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP}] self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) not_allowed_ip = "%s/64" % ( netaddr.IPAddress(self.tester.vm_ip_address) + 1) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.vm_ip_cidr = not_allowed_ip self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) self.tester.assert_no_connection(protocol=self.tester.UDP, src_port=546, dst_port=547, direction=self.tester.EGRESS) neutron-8.4.0/neutron/tests/functional/agent/test_ovs_lib.py0000664000567000056710000003757413044372760025546 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import uuid import mock from neutron.agent.common import ovs_lib from neutron.agent.linux import ip_lib from neutron.tests import base as tests_base from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import base class OVSBridgeTestBase(base.BaseOVSLinuxTestCase): # TODO(twilson) So far, only ovsdb-related tests are written. It would be # good to also add the openflow-related functions def setUp(self): super(OVSBridgeTestBase, self).setUp() self.ovs = ovs_lib.BaseOVS() self.br = self.useFixture(net_helpers.OVSBridgeFixture()).bridge def create_ovs_port(self, *interface_attrs): # Convert ((a, b), (c, d)) to {a: b, c: d} and add 'type' by default attrs = collections.OrderedDict(interface_attrs) attrs.setdefault('type', 'internal') port_name = tests_base.get_rand_device_name(net_helpers.PORT_PREFIX) return (port_name, self.br.add_port(port_name, *attrs.items())) def create_ovs_vif_port(self, iface_id=None, mac=None, iface_field='iface-id'): if iface_id is None: iface_id = base.get_rand_name() if mac is None: mac = base.get_rand_name() attrs = ('external_ids', {iface_field: iface_id, 'attached-mac': mac}) port_name, ofport = self.create_ovs_port(attrs) return ovs_lib.VifPort(port_name, ofport, iface_id, mac, self.br) class OVSBridgeTestCase(OVSBridgeTestBase): def test_port_lifecycle(self): (port_name, ofport) = self.create_ovs_port(('type', 'internal')) # ofport should always be an integer string with value -1 or > 0. self.assertTrue(int(ofport)) self.assertTrue(int(self.br.get_port_ofport(port_name))) self.assertTrue(self.br.port_exists(port_name)) self.assertEqual(self.br.br_name, self.br.get_bridge_for_iface(port_name)) self.br.delete_port(port_name) self.assertFalse(self.br.port_exists(port_name)) def test_duplicate_port_may_exist_false(self): port_name, ofport = self.create_ovs_port(('type', 'internal')) cmd = self.br.ovsdb.add_port(self.br.br_name, port_name, may_exist=False) self.assertRaises(RuntimeError, cmd.execute, check_error=True) def test_delete_port_if_exists_false(self): cmd = self.br.ovsdb.del_port('nonexistantport', if_exists=False) self.assertRaises(RuntimeError, cmd.execute, check_error=True) def test_replace_port(self): port_name = tests_base.get_rand_device_name(net_helpers.PORT_PREFIX) self.br.replace_port(port_name, ('type', 'internal')) self.assertTrue(self.br.port_exists(port_name)) self.assertEqual('internal', self.br.db_get_val('Interface', port_name, 'type')) self.br.replace_port(port_name, ('type', 'internal'), ('external_ids', {'test': 'test'})) self.assertTrue(self.br.port_exists(port_name)) self.assertEqual('test', self.br.db_get_val('Interface', port_name, 'external_ids')['test']) def test_attribute_lifecycle(self): (port_name, ofport) = self.create_ovs_port() tag = 42 self.ovs.set_db_attribute('Port', port_name, 'tag', tag) self.assertEqual(tag, self.ovs.db_get_val('Port', port_name, 'tag')) self.assertEqual(tag, self.br.get_port_tag_dict()[port_name]) self.ovs.clear_db_attribute('Port', port_name, 'tag') self.assertEqual([], self.ovs.db_get_val('Port', port_name, 'tag')) self.assertEqual([], self.br.get_port_tag_dict()[port_name]) def test_get_bridge_external_bridge_id(self): self.ovs.set_db_attribute('Bridge', self.br.br_name, 'external_ids', {'bridge-id': self.br.br_name}) self.assertEqual( self.br.br_name, self.ovs.get_bridge_external_bridge_id(self.br.br_name)) def test_controller_lifecycle(self): controllers = {'tcp:127.0.0.1:6633', 'tcp:172.17.16.10:55'} self.br.set_controller(controllers) self.assertSetEqual(controllers, set(self.br.get_controller())) self.br.del_controller() self.assertEqual([], self.br.get_controller()) def test_non_index_queries(self): controllers = ['tcp:127.0.0.1:6633'] self.br.set_controller(controllers) cmd = self.br.ovsdb.db_set('Controller', self.br.br_name, ('connection_mode', 'out-of-band')) cmd.execute(check_error=True) self.assertEqual('out-of-band', self.br.db_get_val('Controller', self.br.br_name, 'connection_mode')) def test_set_fail_mode_secure(self): self.br.set_secure_mode() self._assert_br_fail_mode(ovs_lib.FAILMODE_SECURE) def test_set_fail_mode_standalone(self): self.br.set_standalone_mode() self._assert_br_fail_mode(ovs_lib.FAILMODE_STANDALONE) def _assert_br_fail_mode(self, fail_mode): self.assertEqual( self.br.db_get_val('Bridge', self.br.br_name, 'fail_mode'), fail_mode) def test_set_protocols(self): self.br.set_protocols('OpenFlow10') self.assertEqual( self.br.db_get_val('Bridge', self.br.br_name, 'protocols'), "OpenFlow10") def test_get_datapath_id(self): brdev = ip_lib.IPDevice(self.br.br_name) dpid = brdev.link.attributes['link/ether'].replace(':', '') self.br.set_db_attribute('Bridge', self.br.br_name, 'datapath_id', dpid) self.assertIn(dpid, self.br.get_datapath_id()) def _test_add_tunnel_port(self, attrs): port_name = tests_base.get_rand_device_name(net_helpers.PORT_PREFIX) self.br.add_tunnel_port(port_name, attrs['remote_ip'], attrs['local_ip']) self.assertEqual('gre', self.ovs.db_get_val('Interface', port_name, 'type')) options = self.ovs.db_get_val('Interface', port_name, 'options') for attr, val in attrs.items(): self.assertEqual(val, options[attr]) def test_add_tunnel_port_ipv4(self): attrs = { 'remote_ip': '192.0.2.1', # RFC 5737 TEST-NET-1 'local_ip': '198.51.100.1', # RFC 5737 TEST-NET-2 } self._test_add_tunnel_port(attrs) def test_add_tunnel_port_ipv6(self): attrs = { 'remote_ip': '2001:db8:200::1', 'local_ip': '2001:db8:100::1', } self._test_add_tunnel_port(attrs) def test_add_patch_port(self): local = tests_base.get_rand_device_name(net_helpers.PORT_PREFIX) peer = 'remotepeer' self.br.add_patch_port(local, peer) self.assertEqual(self.ovs.db_get_val('Interface', local, 'type'), 'patch') options = self.ovs.db_get_val('Interface', local, 'options') self.assertEqual(peer, options['peer']) def test_get_port_name_list(self): # Note that ovs-vsctl's list-ports does not include the port created # with the same name as the bridge ports = {self.create_ovs_port()[0] for i in range(5)} self.assertSetEqual(ports, set(self.br.get_port_name_list())) def test_get_iface_name_list(self): ifaces = {self.create_ovs_port()[0] for i in range(5)} self.assertSetEqual(ifaces, set(self.br.get_iface_name_list())) def test_get_port_stats(self): # Nothing seems to use this function? (port_name, ofport) = self.create_ovs_port() stats = set(self.br.get_port_stats(port_name).keys()) self.assertTrue(set(['rx_packets', 'tx_packets']).issubset(stats)) def test_get_vif_ports(self): for i in range(2): self.create_ovs_port() vif_ports = [self.create_ovs_vif_port() for i in range(3)] ports = self.br.get_vif_ports() self.assertEqual(3, len(ports)) self.assertTrue(all([isinstance(x, ovs_lib.VifPort) for x in ports])) self.assertEqual(sorted([x.port_name for x in vif_ports]), sorted([x.port_name for x in ports])) def test_get_vif_ports_with_bond(self): for i in range(2): self.create_ovs_port() vif_ports = [self.create_ovs_vif_port() for i in range(3)] # bond ports don't have records in the Interface table but they do in # the Port table orig = self.br.get_port_name_list new_port_name_list = lambda: orig() + ['bondport'] mock.patch.object(self.br, 'get_port_name_list', new=new_port_name_list).start() ports = self.br.get_vif_ports() self.assertEqual(3, len(ports)) self.assertTrue(all([isinstance(x, ovs_lib.VifPort) for x in ports])) self.assertEqual(sorted([x.port_name for x in vif_ports]), sorted([x.port_name for x in ports])) def test_get_vif_port_set(self): for i in range(2): self.create_ovs_port() vif_ports = [self.create_ovs_vif_port() for i in range(2)] ports = self.br.get_vif_port_set() expected = set([x.vif_id for x in vif_ports]) self.assertEqual(expected, ports) def test_get_vif_port_set_with_missing_port(self): self.create_ovs_port() vif_ports = [self.create_ovs_vif_port()] # return an extra port to make sure the db list ignores it orig = self.br.get_port_name_list new_port_name_list = lambda: orig() + ['anotherport'] mock.patch.object(self.br, 'get_port_name_list', new=new_port_name_list).start() ports = self.br.get_vif_port_set() expected = set([vif_ports[0].vif_id]) self.assertEqual(expected, ports) def test_get_vif_port_set_on_empty_bridge_returns_empty_set(self): # Create a port on self.br self.create_ovs_vif_port() # Create another, empty bridge br_2 = self.useFixture(net_helpers.OVSBridgeFixture()).bridge # Assert that get_vif_port_set on an empty bridge returns an empty set, # and does not return the other bridge's ports. self.assertEqual(set(), br_2.get_vif_port_set()) def test_get_ports_attributes(self): port_names = [self.create_ovs_port()[0], self.create_ovs_port()[0]] db_ports = self.br.get_ports_attributes('Interface', columns=['name']) db_ports_names = [p['name'] for p in db_ports] self.assertEqual(sorted(port_names), sorted(db_ports_names)) def test_get_port_tag_dict(self): # Simple case tested in port test_set_get_clear_db_val pass def test_get_vif_port_by_id(self): for i in range(2): self.create_ovs_port() vif_ports = [self.create_ovs_vif_port() for i in range(3)] for vif in vif_ports: self.assertEqual(self.br.get_vif_port_by_id(vif.vif_id).vif_id, vif.vif_id) def test_get_vifs_by_ids(self): for i in range(2): self.create_ovs_port() vif_ports = [self.create_ovs_vif_port() for i in range(3)] by_id = self.br.get_vifs_by_ids([v.vif_id for v in vif_ports]) # convert to str for comparison of VifPorts by_id = {vid: str(vport) for vid, vport in by_id.items()} self.assertEqual({v.vif_id: str(v) for v in vif_ports}, by_id) def test_delete_ports(self): # TODO(twilson) I intensely dislike the current delete_ports function # as the default behavior is really delete_vif_ports(), then it acts # more like a delete_ports() seems like it should if all_ports=True is # passed # Create 2 non-vif ports and 2 vif ports nonvifs = {self.create_ovs_port()[0] for i in range(2)} vifs = {self.create_ovs_vif_port().port_name for i in range(2)} self.assertSetEqual(nonvifs.union(vifs), set(self.br.get_port_name_list())) self.br.delete_ports() self.assertSetEqual(nonvifs, set(self.br.get_port_name_list())) self.br.delete_ports(all_ports=True) self.assertEqual(len(self.br.get_port_name_list()), 0) def test_set_controller_connection_mode(self): controllers = ['tcp:192.0.2.0:6633'] self._set_controllers_connection_mode(controllers) def test_set_multi_controllers_connection_mode(self): controllers = ['tcp:192.0.2.0:6633', 'tcp:192.0.2.1:55'] self._set_controllers_connection_mode(controllers) def _set_controllers_connection_mode(self, controllers): self.br.set_controller(controllers) self.assertEqual(sorted(controllers), sorted(self.br.get_controller())) self.br.set_controllers_connection_mode('out-of-band') self._assert_controllers_connection_mode('out-of-band') self.br.del_controller() self.assertEqual([], self.br.get_controller()) def _assert_controllers_connection_mode(self, connection_mode): controllers = self.br.db_get_val('Bridge', self.br.br_name, 'controller') controllers = [controllers] if isinstance( controllers, uuid.UUID) else controllers for controller in controllers: self.assertEqual(connection_mode, self.br.db_get_val('Controller', controller, 'connection_mode')) def test_egress_bw_limit(self): port_name, _ = self.create_ovs_port() self.br.create_egress_bw_limit_for_port(port_name, 700, 70) max_rate, burst = self.br.get_egress_bw_limit_for_port(port_name) self.assertEqual(700, max_rate) self.assertEqual(70, burst) self.br.delete_egress_bw_limit_for_port(port_name) max_rate, burst = self.br.get_egress_bw_limit_for_port(port_name) self.assertIsNone(max_rate) self.assertIsNone(burst) class OVSLibTestCase(base.BaseOVSLinuxTestCase): def setUp(self): super(OVSLibTestCase, self).setUp() self.ovs = ovs_lib.BaseOVS() def test_bridge_lifecycle_baseovs(self): name = base.get_rand_name(prefix=net_helpers.BR_PREFIX) self.addCleanup(self.ovs.delete_bridge, name) br = self.ovs.add_bridge(name) self.assertEqual(br.br_name, name) self.assertTrue(self.ovs.bridge_exists(name)) self.ovs.delete_bridge(name) self.assertFalse(self.ovs.bridge_exists(name)) def test_get_bridges(self): bridges = { self.useFixture(net_helpers.OVSBridgeFixture()).bridge.br_name for i in range(5)} self.assertTrue(set(self.ovs.get_bridges()).issuperset(bridges)) def test_bridge_lifecycle_ovsbridge(self): name = base.get_rand_name(prefix=net_helpers.BR_PREFIX) br = ovs_lib.OVSBridge(name) self.assertEqual(br.br_name, name) # Make sure that instantiating an OVSBridge does not actually create self.assertFalse(self.ovs.bridge_exists(name)) self.addCleanup(self.ovs.delete_bridge, name) br.create() self.assertTrue(self.ovs.bridge_exists(name)) br.destroy() self.assertFalse(self.ovs.bridge_exists(name)) neutron-8.4.0/neutron/tests/functional/agent/linux/0000775000567000056710000000000013044373210023606 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/agent/linux/simple_daemon.py0000664000567000056710000000343113044372736027011 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_config import cfg from neutron._i18n import _ from neutron.agent.linux import daemon def main(): class SimpleDaemon(daemon.Daemon): """The purpose of this daemon is to serve as an example, and also as a dummy daemon, which can be invoked by functional testing, it does nothing but setting the pid file, and staying detached in the background. """ def run(self): while True: time.sleep(10) opts = [ cfg.StrOpt('uuid', help=_('uuid provided from the command line ' 'so external_process can track us via /proc/' 'cmdline interface.'), required=True), cfg.StrOpt('pid_file', help=_('Location of pid file of this process.'), required=True) ] cfg.CONF.register_cli_opts(opts) # Don't get the default configuration file cfg.CONF(project='neutron', default_config_files=[]) simple_daemon = SimpleDaemon(cfg.CONF.pid_file, uuid=cfg.CONF.uuid) simple_daemon.start() if __name__ == "__main__": main() neutron-8.4.0/neutron/tests/functional/agent/linux/test_tc_lib.py0000664000567000056710000000554613044372760026476 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 OVH SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron.agent.linux import ip_lib from neutron.agent.linux import tc_lib from neutron.tests.functional import base as functional_base LOG = logging.getLogger(__name__) TEST_HZ_VALUE = 250 LATENCY = 50 BW_LIMIT = 1024 BURST = 512 BASE_DEV_NAME = "test_tap" class TcLibTestCase(functional_base.BaseSudoTestCase): def create_device(self, name): """Create a tuntap with the specified name. The device is cleaned up at the end of the test. """ ip = ip_lib.IPWrapper() tap_device = ip.add_tuntap(name) self.addCleanup(tap_device.link.delete) tap_device.link.set_up() def test_filters_bandwidth_limit(self): device_name = "%s_filters" % BASE_DEV_NAME self.create_device(device_name) tc = tc_lib.TcCommand(device_name, TEST_HZ_VALUE) tc.set_filters_bw_limit(BW_LIMIT, BURST) bw_limit, burst = tc.get_filters_bw_limits() self.assertEqual(BW_LIMIT, bw_limit) self.assertEqual(BURST, burst) new_bw_limit = BW_LIMIT + 500 new_burst = BURST + 50 tc.update_filters_bw_limit(new_bw_limit, new_burst) bw_limit, burst = tc.get_filters_bw_limits() self.assertEqual(new_bw_limit, bw_limit) self.assertEqual(new_burst, burst) tc.delete_filters_bw_limit() bw_limit, burst = tc.get_filters_bw_limits() self.assertIsNone(bw_limit) self.assertIsNone(burst) def test_tbf_bandwidth_limit(self): device_name = "%s_tbf" % BASE_DEV_NAME self.create_device(device_name) tc = tc_lib.TcCommand(device_name, TEST_HZ_VALUE) tc.set_tbf_bw_limit(BW_LIMIT, BURST, LATENCY) bw_limit, burst = tc.get_tbf_bw_limits() self.assertEqual(BW_LIMIT, bw_limit) self.assertEqual(BURST, burst) new_bw_limit = BW_LIMIT + 500 new_burst = BURST + 50 tc.update_tbf_bw_limit(new_bw_limit, new_burst, LATENCY) bw_limit, burst = tc.get_tbf_bw_limits() self.assertEqual(new_bw_limit, bw_limit) self.assertEqual(new_burst, burst) tc.delete_tbf_bw_limit() bw_limit, burst = tc.get_tbf_bw_limits() self.assertIsNone(bw_limit) self.assertIsNone(burst) neutron-8.4.0/neutron/tests/functional/agent/linux/test_dhcp.py0000664000567000056710000000731413044372760026153 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from neutron.agent.common import config from neutron.agent.dhcp import config as dhcp_conf from neutron.agent.linux import dhcp from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.common import config as common_conf from neutron.tests import base as tests_base from neutron.tests.common import net_helpers from neutron.tests.functional import base as functional_base class TestDhcp(functional_base.BaseSudoTestCase): def setUp(self): super(TestDhcp, self).setUp() conf = cfg.ConfigOpts() conf.register_opts(config.INTERFACE_DRIVER_OPTS) conf.register_opts(interface.OPTS) conf.register_opts(common_conf.core_opts) conf.register_opts(dhcp_conf.DHCP_AGENT_OPTS) conf.set_override('interface_driver', 'openvswitch') conf.set_override('host', 'foo_host') self.conf = conf br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge self.conf.set_override('ovs_integration_bridge', br_int.br_name) def test_cleanup_stale_devices(self): plugin = mock.MagicMock() dev_mgr = dhcp.DeviceManager(self.conf, plugin) network = { 'id': 'foo_id', 'tenant_id': 'foo_tenant', 'namespace': 'qdhcp-foo_id', 'ports': [], 'subnets': [tests_base.AttributeDict({'id': 'subnet_foo_id', 'enable_dhcp': True, 'ipv6_address_mode': None, 'ipv6_ra_mode': None, 'cidr': '10.0.0.0/24', 'ip_version': 4, 'gateway_ip': '10.0.0.1'})]} dhcp_port = { 'id': 'foo_port_id', 'mac_address': '10:22:33:44:55:67', 'fixed_ips': [tests_base.AttributeDict( {'subnet_id': 'subnet_foo_id', 'ip_address': '10.0.0.1'})] } plugin.create_dhcp_port.return_value = tests_base.AttributeDict( dhcp_port) dev_mgr.driver.plug("foo_id", "foo_id2", "tapfoo_id2", "10:22:33:44:55:68", namespace="qdhcp-foo_id") dev_mgr.driver.plug("foo_id", "foo_id3", "tapfoo_id3", "10:22:33:44:55:69", namespace="qdhcp-foo_id") ipw = ip_lib.IPWrapper(namespace="qdhcp-foo_id") devices = ipw.get_devices(exclude_loopback=True) self.addCleanup(ipw.netns.delete, 'qdhcp-foo_id') self.assertEqual(2, len(devices)) # setting up dhcp for the network dev_mgr.setup(tests_base.AttributeDict(network)) devices = ipw.get_devices(exclude_loopback=True) # only one non-loopback device should remain self.assertEqual(1, len(devices)) self.assertEqual("tapfoo_port_id", devices[0].name) neutron-8.4.0/neutron/tests/functional/agent/linux/helpers.py0000664000567000056710000000552013044372736025640 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import multiprocessing import os import time import fixtures from neutron.agent.linux import utils from neutron.tests import tools class RecursivePermDirFixture(fixtures.Fixture): """Ensure at least perms permissions on directory and ancestors.""" def __init__(self, directory, perms): super(RecursivePermDirFixture, self).__init__() self.directory = directory self.least_perms = perms def _setUp(self): previous_directory = None current_directory = self.directory while previous_directory != current_directory: perms = os.stat(current_directory).st_mode if perms & self.least_perms != self.least_perms: os.chmod(current_directory, perms | self.least_perms) previous_directory = current_directory current_directory = os.path.dirname(current_directory) class AdminDirFixture(fixtures.Fixture): """Handle directory create/delete with admin permissions required""" def __init__(self, directory): super(AdminDirFixture, self).__init__() self.directory = directory def _setUp(self): # NOTE(cbrandily): Ensure we will not delete a directory existing # before test run during cleanup. if os.path.exists(self.directory): tools.fail('%s already exists' % self.directory) create_cmd = ['mkdir', '-p', self.directory] delete_cmd = ['rm', '-r', self.directory] utils.execute(create_cmd, run_as_root=True) self.addCleanup(utils.execute, delete_cmd, run_as_root=True) class SleepyProcessFixture(fixtures.Fixture): """ Process fixture that performs time.sleep for the given number of seconds. """ def __init__(self, timeout=60): super(SleepyProcessFixture, self).__init__() self.timeout = timeout @staticmethod def yawn(seconds): time.sleep(seconds) def _setUp(self): self.process = multiprocessing.Process(target=self.yawn, args=[self.timeout]) self.process.start() self.addCleanup(self.destroy) def destroy(self): self.process.terminate() @property def pid(self): return self.process.pid neutron-8.4.0/neutron/tests/functional/agent/linux/test_ipset.py0000664000567000056710000000756413044372760026370 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.linux import ip_lib from neutron.agent.linux import ipset_manager from neutron.agent.linux import iptables_manager from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import base from neutron.tests.functional import base as functional_base MAX_IPSET_NAME_LENGTH = 28 IPSET_ETHERTYPE = 'IPv4' UNRELATED_IP = '1.1.1.1' class IpsetBase(functional_base.BaseSudoTestCase): def setUp(self): super(IpsetBase, self).setUp() bridge = self.useFixture(net_helpers.VethBridgeFixture()).bridge self.source, self.destination = self.useFixture( machine_fixtures.PeerMachines(bridge)).machines self.ipset_name = base.get_rand_name(MAX_IPSET_NAME_LENGTH, 'set-') self.icmp_accept_rule = ('-p icmp -m set --match-set %s src -j ACCEPT' % self.ipset_name) self.ipset = self._create_ipset_manager_and_set( ip_lib.IPWrapper(self.destination.namespace), self.ipset_name) self.addCleanup(self.ipset._destroy, self.ipset_name) self.dst_iptables = iptables_manager.IptablesManager( namespace=self.destination.namespace) self._add_iptables_ipset_rules() self.addCleanup(self._remove_iptables_ipset_rules) def _create_ipset_manager_and_set(self, dst_ns, set_name): ipset = ipset_manager.IpsetManager( namespace=dst_ns.namespace) ipset._create_set(set_name, IPSET_ETHERTYPE) return ipset def _remove_iptables_ipset_rules(self): self.dst_iptables.ipv4['filter'].remove_rule( 'INPUT', base.ICMP_BLOCK_RULE) self.dst_iptables.ipv4['filter'].remove_rule( 'INPUT', self.icmp_accept_rule) self.dst_iptables.apply() def _add_iptables_ipset_rules(self): self.dst_iptables.ipv4['filter'].add_rule( 'INPUT', self.icmp_accept_rule) self.dst_iptables.ipv4['filter'].add_rule( 'INPUT', base.ICMP_BLOCK_RULE) self.dst_iptables.apply() class IpsetManagerTestCase(IpsetBase): def test_add_member_allows_ping(self): self.source.assert_no_ping(self.destination.ip) self.ipset._add_member_to_set(self.ipset_name, self.source.ip) self.source.assert_ping(self.destination.ip) def test_del_member_denies_ping(self): self.ipset._add_member_to_set(self.ipset_name, self.source.ip) self.source.assert_ping(self.destination.ip) self.ipset._del_member_from_set(self.ipset_name, self.source.ip) self.source.assert_no_ping(self.destination.ip) def test_refresh_ipset_allows_ping(self): self.ipset._refresh_set( self.ipset_name, [UNRELATED_IP], IPSET_ETHERTYPE) self.source.assert_no_ping(self.destination.ip) self.ipset._refresh_set( self.ipset_name, [UNRELATED_IP, self.source.ip], IPSET_ETHERTYPE) self.source.assert_ping(self.destination.ip) self.ipset._refresh_set( self.ipset_name, [self.source.ip, UNRELATED_IP], IPSET_ETHERTYPE) self.source.assert_ping(self.destination.ip) def test_destroy_ipset_set(self): self._remove_iptables_ipset_rules() self.ipset._destroy(self.ipset_name) neutron-8.4.0/neutron/tests/functional/agent/linux/__init__.py0000664000567000056710000000000013044372736025721 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/agent/linux/test_async_process.py0000664000567000056710000000546113044372760030111 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet import six from neutron._i18n import _ from neutron.agent.linux import async_process from neutron.agent.linux import utils from neutron.tests import base class AsyncProcessTestFramework(base.BaseTestCase): def setUp(self): super(AsyncProcessTestFramework, self).setUp() self.test_file_path = self.get_temp_file_path('test_async_process.tmp') self.data = [six.text_type(x) for x in range(4)] with open(self.test_file_path, 'w') as f: f.writelines('%s\n' % item for item in self.data) def _check_stdout(self, proc): # Ensure that all the output from the file is read output = [] while output != self.data: new_output = list(proc.iter_stdout()) if new_output: output += new_output eventlet.sleep(0.01) class TestAsyncProcess(AsyncProcessTestFramework): def _safe_stop(self, proc): try: proc.stop() except async_process.AsyncProcessException: pass def test_stopping_async_process_lifecycle(self): proc = async_process.AsyncProcess(['tail', '-f', self.test_file_path]) self.addCleanup(self._safe_stop, proc) proc.start(block=True) self._check_stdout(proc) proc.stop(block=True) # Ensure that the process and greenthreads have stopped proc._process.wait() self.assertEqual(proc._process.returncode, -9) for watcher in proc._watchers: watcher.wait() def test_async_process_respawns(self): proc = async_process.AsyncProcess(['tail', '-f', self.test_file_path], respawn_interval=0) self.addCleanup(self._safe_stop, proc) proc.start() # Ensure that the same output is read twice self._check_stdout(proc) pid = proc.pid utils.execute(['kill', '-9', pid]) utils.wait_until_true( lambda: proc.is_active() and pid != proc.pid, timeout=5, sleep=0.01, exception=RuntimeError(_("Async process didn't respawn"))) self._check_stdout(proc) neutron-8.4.0/neutron/tests/functional/agent/linux/test_interface.py0000664000567000056710000001013513044372760027170 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_utils import uuidutils import testtools from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.common import exceptions from neutron.common import utils from neutron.tests import base as tests_base from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import base class OVSInterfaceDriverTestCase(base.BaseOVSLinuxTestCase): def setUp(self): super(OVSInterfaceDriverTestCase, self).setUp() conf = cfg.ConfigOpts() conf.register_opts(interface.OPTS) self.interface = interface.OVSInterfaceDriver(conf) def test_plug_checks_if_bridge_exists(self): with testtools.ExpectedException(exceptions.BridgeDoesNotExist): self.interface.plug(network_id=42, port_id=71, device_name='not_a_device', mac_address='', bridge='not_a_bridge', namespace='not_a_namespace') def test_plug_succeeds(self): device_name = tests_base.get_rand_name() mac_address = utils.get_random_mac('fa:16:3e:00:00:00'.split(':')) namespace = self.useFixture(net_helpers.NamespaceFixture()).name bridge = self.useFixture(net_helpers.OVSBridgeFixture()).bridge self.assertFalse(bridge.get_port_name_list()) self.interface.plug(network_id=uuidutils.generate_uuid(), port_id=uuidutils.generate_uuid(), device_name=device_name, mac_address=mac_address, bridge=bridge.br_name, namespace=namespace) self.assertIn(device_name, bridge.get_port_name_list()) self.assertTrue(ip_lib.device_exists(device_name, namespace)) def test_plug_with_namespace_sets_mtu_higher_than_bridge(self): device_mtu = 1450 # Create a new OVS bridge ovs_bridge = self.useFixture(net_helpers.OVSBridgeFixture()).bridge self.assertFalse(ovs_bridge.get_port_name_list()) # Add a new linuxbridge port with reduced MTU to OVS bridge lb_bridge = self.useFixture( net_helpers.LinuxBridgeFixture()).bridge lb_bridge_port = self.useFixture( net_helpers.LinuxBridgePortFixture(lb_bridge)) lb_bridge_port.port.link.set_mtu(device_mtu - 1) ovs_bridge.add_port(lb_bridge_port.port.name) # Now plug a device with intended MTU that is higher than for the port # above and validate that its MTU is not reduced to the least MTU on # the bridge device_name = tests_base.get_rand_name() mac_address = utils.get_random_mac('fa:16:3e:00:00:00'.split(':')) namespace = self.useFixture(net_helpers.NamespaceFixture()).name self.interface.plug(network_id=uuidutils.generate_uuid(), port_id=uuidutils.generate_uuid(), device_name=device_name, mac_address=mac_address, bridge=ovs_bridge.br_name, namespace=namespace, mtu=device_mtu) self.assertIn(device_name, ovs_bridge.get_port_name_list()) self.assertTrue(ip_lib.device_exists(device_name, namespace)) self.assertEqual( device_mtu, ip_lib.IPDevice(device_name, namespace=namespace).link.mtu ) neutron-8.4.0/neutron/tests/functional/agent/linux/test_linuxbridge_arp_protect.py0000664000567000056710000001567013044372760032157 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import constants from neutron.common import utils from neutron.plugins.ml2.drivers.linuxbridge.agent import arp_protect from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers from neutron.tests.functional import base as functional_base no_arping = net_helpers.assert_no_arping arping = net_helpers.assert_arping class LinuxBridgeARPSpoofTestCase(functional_base.BaseSudoTestCase): def setUp(self): super(LinuxBridgeARPSpoofTestCase, self).setUp() lbfixture = self.useFixture(net_helpers.LinuxBridgeFixture()) self.addCleanup(setattr, arp_protect, 'NAMESPACE', None) arp_protect.NAMESPACE = lbfixture.namespace bridge = lbfixture.bridge self.source, self.destination, self.observer = self.useFixture( machine_fixtures.PeerMachines(bridge, amount=3)).machines self.addCleanup(self._ensure_rules_cleaned) def _ensure_rules_cleaned(self): rules = [r for r in arp_protect.ebtables(['-L']).splitlines() if r and 'Bridge' not in r] self.assertEqual([], rules, 'Test leaked ebtables rules') def _add_arp_protection(self, machine, addresses, extra_port_dict=None): port_dict = {'fixed_ips': [{'ip_address': a} for a in addresses], 'device_owner': 'nobody', 'mac_address': machine.port.link.address} if extra_port_dict: port_dict.update(extra_port_dict) name = net_helpers.VethFixture.get_peer_name(machine.port.name) arp_protect.setup_arp_spoofing_protection(name, port_dict) self.addCleanup(arp_protect.delete_arp_spoofing_protection, [name]) def test_arp_no_protection(self): arping(self.source.namespace, self.destination.ip) arping(self.destination.namespace, self.source.ip) def test_arp_correct_protection(self): self._add_arp_protection(self.source, [self.source.ip]) self._add_arp_protection(self.destination, [self.destination.ip]) arping(self.source.namespace, self.destination.ip) arping(self.destination.namespace, self.source.ip) def test_arp_correct_protection_allowed_address_pairs(self): smac = self.source.port.link.address port = {'mac_address': '00:11:22:33:44:55', 'allowed_address_pairs': [{'mac_address': smac, 'ip_address': self.source.ip}]} # make sure a large number of allowed address pairs works for i in range(100000): port['allowed_address_pairs'].append( {'mac_address': utils.get_random_mac( 'fa:16:3e:00:00:00'.split(':')), 'ip_address': '10.10.10.10'}) self._add_arp_protection(self.source, ['1.2.2.2'], port) self._add_arp_protection(self.destination, [self.destination.ip]) arping(self.source.namespace, self.destination.ip) arping(self.destination.namespace, self.source.ip) def test_arp_fails_incorrect_protection(self): self._add_arp_protection(self.source, ['1.1.1.1']) self._add_arp_protection(self.destination, ['2.2.2.2']) no_arping(self.source.namespace, self.destination.ip) no_arping(self.destination.namespace, self.source.ip) def test_arp_fails_incorrect_mac_protection(self): # a bad mac filter on the source will prevent any traffic from it self._add_arp_protection(self.source, [self.source.ip], {'mac_address': '00:11:22:33:44:55'}) no_arping(self.source.namespace, self.destination.ip) no_arping(self.destination.namespace, self.source.ip) # correcting it should make it work self._add_arp_protection(self.source, [self.source.ip]) arping(self.source.namespace, self.destination.ip) def test_arp_protection_removal(self): self._add_arp_protection(self.source, ['1.1.1.1']) self._add_arp_protection(self.destination, ['2.2.2.2']) no_arping(self.observer.namespace, self.destination.ip) no_arping(self.observer.namespace, self.source.ip) name = net_helpers.VethFixture.get_peer_name(self.source.port.name) arp_protect.delete_arp_spoofing_protection([name]) # spoofing should have been removed from source, but not dest arping(self.observer.namespace, self.source.ip) no_arping(self.observer.namespace, self.destination.ip) def test_arp_protection_update(self): self._add_arp_protection(self.source, ['1.1.1.1']) self._add_arp_protection(self.destination, ['2.2.2.2']) no_arping(self.observer.namespace, self.destination.ip) no_arping(self.observer.namespace, self.source.ip) self._add_arp_protection(self.source, ['192.0.0.0/1']) # spoofing should have been updated on source, but not dest arping(self.observer.namespace, self.source.ip) no_arping(self.observer.namespace, self.destination.ip) def test_arp_protection_port_security_disabled(self): self._add_arp_protection(self.source, ['1.1.1.1']) no_arping(self.observer.namespace, self.source.ip) self._add_arp_protection(self.source, ['1.1.1.1'], {'port_security_enabled': False}) arping(self.observer.namespace, self.source.ip) def test_arp_protection_network_owner(self): self._add_arp_protection(self.source, ['1.1.1.1']) no_arping(self.observer.namespace, self.source.ip) self._add_arp_protection(self.source, ['1.1.1.1'], {'device_owner': constants.DEVICE_OWNER_ROUTER_GW}) arping(self.observer.namespace, self.source.ip) def test_arp_protection_dead_reference_removal(self): self._add_arp_protection(self.source, ['1.1.1.1']) self._add_arp_protection(self.destination, ['2.2.2.2']) no_arping(self.observer.namespace, self.destination.ip) no_arping(self.observer.namespace, self.source.ip) name = net_helpers.VethFixture.get_peer_name(self.source.port.name) # This should remove all arp protect rules that aren't source port arp_protect.delete_unreferenced_arp_protection([name]) no_arping(self.observer.namespace, self.source.ip) arping(self.observer.namespace, self.destination.ip) neutron-8.4.0/neutron/tests/functional/agent/linux/test_keepalived.py0000664000567000056710000000762213044372760027350 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ from neutron.agent.linux import external_process from neutron.agent.linux import keepalived from neutron.agent.linux import utils from neutron.tests import base from neutron.tests.functional.agent.linux import helpers from neutron.tests.unit.agent.linux import test_keepalived class KeepalivedManagerTestCase(base.BaseTestCase, test_keepalived.KeepalivedConfBaseMixin): def setUp(self): super(KeepalivedManagerTestCase, self).setUp() cfg.CONF.set_override('check_child_processes_interval', 1, 'AGENT') self.expected_config = self._get_config() self.process_monitor = external_process.ProcessMonitor(cfg.CONF, 'router') self.manager = keepalived.KeepalivedManager( 'router1', self.expected_config, self.process_monitor, conf_path=cfg.CONF.state_path) self.addCleanup(self.manager.disable) def _spawn_keepalived(self, keepalived_manager): keepalived_manager.spawn() process = keepalived_manager.get_process() utils.wait_until_true( lambda: process.active, timeout=5, sleep=0.01, exception=RuntimeError(_("Keepalived didn't spawn"))) return process def test_keepalived_spawn(self): self._spawn_keepalived(self.manager) self.assertEqual(self.expected_config.get_config_str(), self.manager.get_conf_on_disk()) def _test_keepalived_respawns(self, normal_exit=True): process = self._spawn_keepalived(self.manager) pid = process.pid exit_code = '-15' if normal_exit else '-9' # Exit the process, and see that when it comes back # It's indeed a different process utils.execute(['kill', exit_code, pid], run_as_root=True) utils.wait_until_true( lambda: process.active and pid != process.pid, timeout=5, sleep=0.01, exception=RuntimeError(_("Keepalived didn't respawn"))) def test_keepalived_respawns(self): self._test_keepalived_respawns() def test_keepalived_respawn_with_unexpected_exit(self): self._test_keepalived_respawns(False) def _test_keepalived_spawns_conflicting_pid(self, process, pid_file): # Test the situation when keepalived PID file contains PID of an # existing non-keepalived process. This situation can happen e.g. # after hard node reset. spawn_process = helpers.SleepyProcessFixture() self.useFixture(spawn_process) with open(pid_file, "w") as f_pid_file: f_pid_file.write("%s" % spawn_process.pid) self._spawn_keepalived(self.manager) def test_keepalived_spawns_conflicting_pid_base_process(self): process = self.manager.get_process() pid_file = process.get_pid_file_name() self._test_keepalived_spawns_conflicting_pid(process, pid_file) def test_keepalived_spawns_conflicting_pid_vrrp_subprocess(self): process = self.manager.get_process() pid_file = process.get_pid_file_name() self._test_keepalived_spawns_conflicting_pid( process, self.manager.get_vrrp_pid_file_name(pid_file)) neutron-8.4.0/neutron/tests/functional/agent/linux/test_process_monitor.py0000664000567000056710000000731013044372760030456 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg from six import moves from neutron.agent.linux import external_process from neutron.agent.linux import utils from neutron.tests import base from neutron.tests.functional.agent.linux import simple_daemon UUID_FORMAT = "test-uuid-%d" SERVICE_NAME = "service" class BaseTestProcessMonitor(base.BaseTestCase): def setUp(self): super(BaseTestProcessMonitor, self).setUp() cfg.CONF.set_override('check_child_processes_interval', 1, 'AGENT') self._child_processes = [] self._process_monitor = None self.create_child_processes_manager('respawn') self.addCleanup(self.cleanup_spawned_children) def create_child_processes_manager(self, action): cfg.CONF.set_override('check_child_processes_action', action, 'AGENT') self._process_monitor = self.build_process_monitor() def build_process_monitor(self): return external_process.ProcessMonitor( config=cfg.CONF, resource_type='test') def _make_cmdline_callback(self, uuid): def _cmdline_callback(pidfile): cmdline = ["python", simple_daemon.__file__, "--uuid=%s" % uuid, "--pid_file=%s" % pidfile] return cmdline return _cmdline_callback def spawn_n_children(self, n, service=None): self._child_processes = [] for child_number in moves.range(n): uuid = self._child_uuid(child_number) _callback = self._make_cmdline_callback(uuid) pm = external_process.ProcessManager( conf=cfg.CONF, uuid=uuid, default_cmd_callback=_callback, service=service) pm.enable() self._process_monitor.register(uuid, SERVICE_NAME, pm) self._child_processes.append(pm) @staticmethod def _child_uuid(child_number): return UUID_FORMAT % child_number def _kill_last_child(self): self._child_processes[-1].disable() def wait_for_all_children_respawned(self): def all_children_active(): return all(pm.active for pm in self._child_processes) for pm in self._child_processes: directory = os.path.dirname(pm.get_pid_file_name()) self.assertEqual(0o755, os.stat(directory).st_mode & 0o777) # we need to allow extra_time for the check process to happen # and properly execute action over the gone processes under # high load conditions max_wait_time = ( cfg.CONF.AGENT.check_child_processes_interval + 5) utils.wait_until_true( all_children_active, timeout=max_wait_time, sleep=0.01, exception=RuntimeError('Not all children respawned.')) def cleanup_spawned_children(self): self._process_monitor.stop() for pm in self._child_processes: pm.disable() class TestProcessMonitor(BaseTestProcessMonitor): def test_respawn_handler(self): self.spawn_n_children(2) self._kill_last_child() self.wait_for_all_children_respawned() neutron-8.4.0/neutron/tests/functional/agent/linux/test_utils.py0000664000567000056710000000737613044372760026405 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import eventlet import testtools from neutron.agent.linux import async_process from neutron.agent.linux import utils from neutron.tests.functional.agent.linux import test_async_process from neutron.tests.functional import base as functional_base class TestPIDHelpers(test_async_process.AsyncProcessTestFramework): def test_get_cmdline_from_pid_and_pid_invoked_with_cmdline(self): cmd = ['tail', '-f', self.test_file_path] proc = async_process.AsyncProcess(cmd) proc.start(block=True) self.addCleanup(proc.stop) pid = proc.pid self.assertEqual(cmd, utils.get_cmdline_from_pid(pid)) self.assertTrue(utils.pid_invoked_with_cmdline(pid, cmd)) self.assertEqual([], utils.get_cmdline_from_pid(-1)) def test_wait_until_true_predicate_succeeds(self): utils.wait_until_true(lambda: True) def test_wait_until_true_predicate_fails(self): with testtools.ExpectedException(eventlet.timeout.Timeout): utils.wait_until_true(lambda: False, 2) class TestGetRootHelperChildPid(functional_base.BaseSudoTestCase): def _addcleanup_sleep_process(self, parent_pid): sleep_pid = utils.execute( ['ps', '--ppid', parent_pid, '-o', 'pid=']).strip() self.addCleanup( utils.execute, ['kill', '-9', sleep_pid], check_exit_code=False, run_as_root=True) def test_get_root_helper_child_pid_returns_first_child(self): """Test that the first child, not lowest child pid is returned. Test creates following proccess tree: sudo + | +--rootwrap + | +--bash+ | +--sleep 100 and tests that pid of `bash' command is returned. """ def wait_for_sleep_is_spawned(parent_pid): proc_tree = utils.execute( ['pstree', parent_pid], check_exit_code=False) processes = [command.strip() for command in proc_tree.split('---') if command] return 'sleep' == processes[-1] cmd = ['bash', '-c', '(sleep 100)'] proc = async_process.AsyncProcess(cmd, run_as_root=True) proc.start() # root helpers spawn their child processes asynchronously, and we # don't want to use proc.start(block=True) as that uses # get_root_helper_child_pid (The method under test) internally. sudo_pid = proc._process.pid utils.wait_until_true( functools.partial( wait_for_sleep_is_spawned, sudo_pid), sleep=0.1) child_pid = utils.get_root_helper_child_pid( sudo_pid, cmd, run_as_root=True) self.assertIsNotNone( child_pid, "get_root_helper_child_pid is expected to return the pid of the " "bash process") self._addcleanup_sleep_process(child_pid) with open('/proc/%s/cmdline' % child_pid, 'r') as f_proc_cmdline: cmdline = f_proc_cmdline.readline().split('\0')[0] self.assertIn('bash', cmdline) neutron-8.4.0/neutron/tests/functional/agent/linux/test_ip_monitor.py0000664000567000056710000000506413044372736027417 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.linux import async_process from neutron.agent.linux import ip_monitor from neutron.tests.functional.agent.linux import test_ip_lib class TestIPMonitor(test_ip_lib.IpLibTestFramework): def setUp(self): super(TestIPMonitor, self).setUp() attr = self.generate_device_details() self.device = self.manage_device(attr) self.monitor = ip_monitor.IPMonitor(attr.namespace) self.addCleanup(self._safe_stop_monitor) def _safe_stop_monitor(self): try: self.monitor.stop() except async_process.AsyncProcessException: pass def test_ip_monitor_lifecycle(self): self.assertFalse(self.monitor.is_active()) self.monitor.start() self.assertTrue(self.monitor.is_active()) self.monitor.stop() self.assertFalse(self.monitor.is_active()) def test_ip_monitor_events(self): self.monitor.start() cidr = '169.254.128.1/24' self.device.addr.add(cidr) self._assert_event(expected_name=self.device.name, expected_cidr=cidr, expected_added=True, event=ip_monitor.IPMonitorEvent.from_text( next(self.monitor.iter_stdout(block=True)))) self.device.addr.delete(cidr) self._assert_event(expected_name=self.device.name, expected_cidr=cidr, expected_added=False, event=ip_monitor.IPMonitorEvent.from_text( next(self.monitor.iter_stdout(block=True)))) def _assert_event(self, expected_name, expected_cidr, expected_added, event): self.assertEqual(expected_name, event.interface) self.assertEqual(expected_added, event.added) self.assertEqual(expected_cidr, event.cidr) neutron-8.4.0/neutron/tests/functional/agent/linux/base.py0000664000567000056710000000315313044372760025105 0ustar jenkinsjenkins00000000000000# Copyright 2014 Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testscenarios from neutron.tests import base as tests_base from neutron.tests.functional import base MARK_VALUE = '0x1' MARK_MASK = '0xffffffff' ICMP_MARK_RULE = ('-j MARK --set-xmark %(value)s/%(mask)s' % {'value': MARK_VALUE, 'mask': MARK_MASK}) MARKED_BLOCK_RULE = '-m mark --mark %s -j DROP' % MARK_VALUE ICMP_BLOCK_RULE = '-p icmp -j DROP' #TODO(jschwarz): Move these two functions to neutron/tests/common/ get_rand_name = tests_base.get_rand_name # Regarding MRO, it goes BaseOVSLinuxTestCase, WithScenarios, # BaseSudoTestCase, ..., UnitTest, object. setUp is not defined in # WithScenarios, so it will correctly be found in BaseSudoTestCase. class BaseOVSLinuxTestCase(testscenarios.WithScenarios, base.BaseSudoTestCase): scenarios = [ ('vsctl', dict(ovsdb_interface='vsctl')), ('native', dict(ovsdb_interface='native')), ] def setUp(self): super(BaseOVSLinuxTestCase, self).setUp() self.config(group='OVS', ovsdb_interface=self.ovsdb_interface) neutron-8.4.0/neutron/tests/functional/agent/linux/test_iptables.py0000664000567000056710000002005013044372760027030 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path import testtools from neutron.agent.linux import iptables_manager from neutron.agent.linux import utils from neutron.common import constants from neutron.tests import base from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import base as linux_base from neutron.tests.functional.agent.linux.bin import ipt_binname from neutron.tests.functional import base as functional_base class IptablesManagerTestCase(functional_base.BaseSudoTestCase): DIRECTION_CHAIN_MAPPER = {'ingress': 'INPUT', 'egress': 'OUTPUT'} PROTOCOL_BLOCK_RULE = '-p %s -j DROP' PROTOCOL_PORT_BLOCK_RULE = ('-p %(protocol)s -m %(protocol)s ' '--dport %(port)d -j DROP') def setUp(self): super(IptablesManagerTestCase, self).setUp() bridge = self.useFixture(net_helpers.VethBridgeFixture()).bridge self.client, self.server = self.useFixture( machine_fixtures.PeerMachines(bridge)).machines self.client_fw, self.server_fw = self.create_firewalls() # The port is used in isolated namespace that precludes possibility of # port conflicts self.port = net_helpers.get_free_namespace_port( constants.PROTO_NAME_TCP, self.server.namespace) def create_firewalls(self): client_iptables = iptables_manager.IptablesManager( namespace=self.client.namespace) server_iptables = iptables_manager.IptablesManager( namespace=self.server.namespace) return client_iptables, server_iptables def filter_add_rule(self, fw_manager, address, direction, protocol, port): self._ipv4_filter_execute(fw_manager, 'add_rule', direction, protocol, port) def filter_remove_rule(self, fw_manager, address, direction, protocol, port): self._ipv4_filter_execute(fw_manager, 'remove_rule', direction, protocol, port) def _ipv4_filter_execute(self, fw_manager, method, direction, protocol, port): chain, rule = self._get_chain_and_rule(direction, protocol, port) method = getattr(fw_manager.ipv4['filter'], method) method(chain, rule) fw_manager.apply() def _get_chain_and_rule(self, direction, protocol, port): chain = self.DIRECTION_CHAIN_MAPPER[direction] if port: rule = self.PROTOCOL_PORT_BLOCK_RULE % {'protocol': protocol, 'port': port} else: rule = self.PROTOCOL_BLOCK_RULE % protocol return chain, rule def _test_with_nc(self, fw_manager, direction, port, protocol): netcat = net_helpers.NetcatTester( self.client.namespace, self.server.namespace, self.server.ip, self.port, protocol) self.addCleanup(netcat.stop_processes) filter_params = 'direction %s, port %s and protocol %s' % ( direction, port, protocol) self.assertTrue(netcat.test_connectivity(), 'Failed connectivity check before applying a filter ' 'with %s' % filter_params) # REVISIT(jlibosva): Make sure we have ASSURED conntrack entry for # given connection self.filter_add_rule( fw_manager, self.server.ip, direction, protocol, port) with testtools.ExpectedException( RuntimeError, msg='Wrongfully passed a connectivity check after applying ' 'a filter with %s' % filter_params): netcat.test_connectivity() self.filter_remove_rule( fw_manager, self.server.ip, direction, protocol, port) # With TCP packets will get through after firewall was removed, so we # would get old data on socket and with UDP process died, so we need to # respawn processes to have clean sockets self.assertTrue(netcat.test_connectivity(True), 'Failed connectivity check after removing a filter ' 'with %s' % filter_params) def test_icmp(self): self.client.assert_ping(self.server.ip) self.server_fw.ipv4['filter'].add_rule('INPUT', linux_base.ICMP_BLOCK_RULE) self.server_fw.apply() self.client.assert_no_ping(self.server.ip) self.server_fw.ipv4['filter'].remove_rule('INPUT', linux_base.ICMP_BLOCK_RULE) self.server_fw.apply() self.client.assert_ping(self.server.ip) def test_mangle_icmp(self): self.client.assert_ping(self.server.ip) self.server_fw.ipv4['mangle'].add_rule('INPUT', linux_base.ICMP_MARK_RULE) self.server_fw.ipv4['filter'].add_rule('INPUT', linux_base.MARKED_BLOCK_RULE) self.server_fw.apply() self.client.assert_no_ping(self.server.ip) self.server_fw.ipv4['mangle'].remove_rule('INPUT', linux_base.ICMP_MARK_RULE) self.server_fw.ipv4['filter'].remove_rule('INPUT', linux_base.MARKED_BLOCK_RULE) self.server_fw.apply() self.client.assert_ping(self.server.ip) def test_tcp_input_port(self): self._test_with_nc(self.server_fw, 'ingress', self.port, protocol=net_helpers.NetcatTester.TCP) def test_tcp_output_port(self): self._test_with_nc(self.client_fw, 'egress', self.port, protocol=net_helpers.NetcatTester.TCP) def test_tcp_input(self): self._test_with_nc(self.server_fw, 'ingress', port=None, protocol=net_helpers.NetcatTester.TCP) def test_tcp_output(self): self._test_with_nc(self.client_fw, 'egress', port=None, protocol=net_helpers.NetcatTester.TCP) def test_udp_input_port(self): self._test_with_nc(self.server_fw, 'ingress', self.port, protocol=net_helpers.NetcatTester.UDP) def test_udp_output_port(self): self._test_with_nc(self.client_fw, 'egress', self.port, protocol=net_helpers.NetcatTester.UDP) def test_udp_input(self): self._test_with_nc(self.server_fw, 'ingress', port=None, protocol=net_helpers.NetcatTester.UDP) def test_udp_output(self): self._test_with_nc(self.client_fw, 'egress', port=None, protocol=net_helpers.NetcatTester.UDP) class IptablesManagerNonRootTestCase(base.BaseTestCase): @staticmethod def _normalize_module_name(name): for suf in ['.pyc', '.pyo']: if name.endswith(suf): return name[:-len(suf)] + '.py' return name def _test_binary_name(self, module, *extra_options): executable = self._normalize_module_name(module.__file__) expected = os.path.basename(executable)[:16] observed = utils.execute([executable] + list(extra_options)).rstrip() self.assertEqual(expected, observed) def test_binary_name(self): self._test_binary_name(ipt_binname) def test_binary_name_eventlet_spawn(self): self._test_binary_name(ipt_binname, 'spawn') neutron-8.4.0/neutron/tests/functional/agent/linux/bin/0000775000567000056710000000000013044373210024356 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/agent/linux/bin/__init__.py0000664000567000056710000000000013044372736026471 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/agent/linux/bin/ipt_binname.py0000775000567000056710000000231713044372736027237 0ustar jenkinsjenkins00000000000000#! /usr/bin/env python # Copyright (C) 2014 VA Linux Systems Japan K.K. # Copyright (C) 2014 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function import sys import eventlet def print_binary_name(): # NOTE(yamamoto): Don't move this import to module-level. # The aim is to test importing from eventlet non-main thread. # See Bug #1367075 for details. from neutron.agent.linux import iptables_manager print(iptables_manager.binary_name) if __name__ == "__main__": if 'spawn' in sys.argv: eventlet.spawn(print_binary_name).wait() else: print_binary_name() neutron-8.4.0/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py0000664000567000056710000001353113044372760030117 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests in this module will be skipped unless: - ovsdb-client is installed - ovsdb-client can be invoked password-less via the configured root helper - sudo testing is enabled (see neutron.tests.functional.base for details) """ from oslo_config import cfg from neutron.agent.common import ovs_lib from neutron.agent.linux import ovsdb_monitor from neutron.agent.linux import utils from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import base as linux_base from neutron.tests.functional import base as functional_base class BaseMonitorTest(linux_base.BaseOVSLinuxTestCase): def setUp(self): super(BaseMonitorTest, self).setUp() rootwrap_not_configured = (cfg.CONF.AGENT.root_helper == functional_base.SUDO_CMD) if rootwrap_not_configured: # The monitor tests require a nested invocation that has # to be emulated by double sudo if rootwrap is not # configured. self.config(group='AGENT', root_helper=" ".join([functional_base.SUDO_CMD] * 2)) self._check_test_requirements() # ovsdb-client monitor needs to have a bridge to make any output self.useFixture(net_helpers.OVSBridgeFixture()) def _check_test_requirements(self): self.check_command(['ovsdb-client', 'list-dbs'], 'Exit code: 1', 'password-less sudo not granted for ovsdb-client', run_as_root=True) class TestOvsdbMonitor(BaseMonitorTest): def setUp(self): super(TestOvsdbMonitor, self).setUp() self.monitor = ovsdb_monitor.OvsdbMonitor('Bridge') self.addCleanup(self.monitor.stop) self.monitor.start() def collect_monitor_output(self): output = list(self.monitor.iter_stdout()) if output: # Output[0] is header row with spaces for column separation. # Use 'other_config' as an indication of the table header. self.assertIn('other_config', output[0]) return True def test_monitor_generates_initial_output(self): utils.wait_until_true(self.collect_monitor_output, timeout=30) class TestSimpleInterfaceMonitor(BaseMonitorTest): def setUp(self): super(TestSimpleInterfaceMonitor, self).setUp() self.monitor = ovsdb_monitor.SimpleInterfaceMonitor() self.addCleanup(self.monitor.stop) self.monitor.start(block=True, timeout=60) def test_has_updates(self): utils.wait_until_true(lambda: self.monitor.has_updates) # clear the event list self.monitor.get_events() self.useFixture(net_helpers.OVSPortFixture()) # has_updates after port addition should become True utils.wait_until_true(lambda: self.monitor.has_updates is True) def _expected_devices_events(self, devices, state): """Helper to check that events are received for expected devices. :param devices: The list of expected devices. WARNING: This list is modified by this method :param state: The state of the devices (added or removed) """ events = self.monitor.get_events() event_devices = [ (dev['name'], dev['external_ids']) for dev in events.get(state)] for dev in event_devices: if dev[0] in devices: devices.remove(dev[0]) self.assertEqual(dev[1].get('iface-status'), 'active') if not devices: return True def test_get_events(self): utils.wait_until_true(lambda: self.monitor.has_updates) devices = self.monitor.get_events() self.assertTrue(devices.get('added'), 'Initial call should always be true') br = self.useFixture(net_helpers.OVSBridgeFixture()) p1 = self.useFixture(net_helpers.OVSPortFixture(br.bridge)) p2 = self.useFixture(net_helpers.OVSPortFixture(br.bridge)) added_devices = [p1.port.name, p2.port.name] utils.wait_until_true( lambda: self._expected_devices_events(added_devices, 'added')) br.bridge.delete_port(p1.port.name) br.bridge.delete_port(p2.port.name) removed_devices = [p1.port.name, p2.port.name] utils.wait_until_true( lambda: self._expected_devices_events(removed_devices, 'removed')) # restart self.monitor.stop(block=True) self.monitor.start(block=True, timeout=60) devices = self.monitor.get_events() self.assertTrue(devices.get('added'), 'Initial call should always be true') def test_get_events_includes_ofport(self): utils.wait_until_true(lambda: self.monitor.has_updates) self.monitor.get_events() # clear initial events br = self.useFixture(net_helpers.OVSBridgeFixture()) p1 = self.useFixture(net_helpers.OVSPortFixture(br.bridge)) def p1_event_has_ofport(): if not self.monitor.has_updates: return for e in self.monitor.new_events['added']: if (e['name'] == p1.port.name and e['ofport'] != ovs_lib.UNASSIGNED_OFPORT): return True utils.wait_until_true(p1_event_has_ofport) neutron-8.4.0/neutron/tests/functional/agent/linux/test_bridge_lib.py0000664000567000056710000000630313044372760027314 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Thales Services SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.linux import bridge_lib from neutron.tests.common import net_helpers from neutron.tests.functional import base class BridgeLibTestCase(base.BaseSudoTestCase): def setUp(self): super(BridgeLibTestCase, self).setUp() self.bridge, self.port_fixture = self.create_bridge_port_fixture() def create_bridge_port_fixture(self): bridge = self.useFixture( net_helpers.LinuxBridgeFixture(namespace=None)).bridge port_fixture = self.useFixture( net_helpers.LinuxBridgePortFixture(bridge)) return bridge, port_fixture def test_is_bridged_interface(self): self.assertTrue( bridge_lib.is_bridged_interface(self.port_fixture.br_port.name)) def test_is_not_bridged_interface(self): self.assertFalse( bridge_lib.is_bridged_interface(self.port_fixture.port.name)) def test_get_bridge_names(self): self.assertIn(self.bridge.name, bridge_lib.get_bridge_names()) def test_get_interface_bridged_time(self): port = self.port_fixture.br_port t1 = bridge_lib.get_interface_bridged_time(port) self.bridge.delif(port) self.bridge.addif(port) t2 = bridge_lib.get_interface_bridged_time(port) self.assertIsNotNone(t1) self.assertIsNotNone(t2) self.assertGreater(t2, t1) def test_get_interface_bridge(self): bridge = bridge_lib.BridgeDevice.get_interface_bridge( self.port_fixture.br_port.name) self.assertEqual(self.bridge.name, bridge.name) def test_get_interface_no_bridge(self): bridge = bridge_lib.BridgeDevice.get_interface_bridge( self.port_fixture.port.name) self.assertIsNone(bridge) def test_get_interfaces(self): self.assertEqual( [self.port_fixture.br_port.name], self.bridge.get_interfaces()) def test_get_interfaces_no_bridge(self): bridge = bridge_lib.BridgeDevice('--fake--') self.assertEqual([], bridge.get_interfaces()) def test_disable_ipv6(self): sysfs_path = ("/proc/sys/net/ipv6/conf/%s/disable_ipv6" % self.bridge.name) # first, make sure it's enabled with open(sysfs_path, 'r') as sysfs_disable_ipv6_file: sysfs_disable_ipv6 = sysfs_disable_ipv6_file.read() self.assertEqual("0\n", sysfs_disable_ipv6) self.assertEqual(0, self.bridge.disable_ipv6()) with open(sysfs_path, 'r') as sysfs_disable_ipv6_file: sysfs_disable_ipv6 = sysfs_disable_ipv6_file.read() self.assertEqual("1\n", sysfs_disable_ipv6) neutron-8.4.0/neutron/tests/functional/agent/linux/test_ip_lib.py0000664000567000056710000002001713044372760026466 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import netaddr from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from neutron.agent.common import config from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.common import utils from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import base from neutron.tests.functional import base as functional_base LOG = logging.getLogger(__name__) Device = collections.namedtuple('Device', 'name ip_cidrs mac_address namespace') WRONG_IP = '0.0.0.0' TEST_IP = '240.0.0.1' class IpLibTestFramework(functional_base.BaseSudoTestCase): def setUp(self): super(IpLibTestFramework, self).setUp() self._configure() def _configure(self): config.register_interface_driver_opts_helper(cfg.CONF) cfg.CONF.set_override( 'interface_driver', 'neutron.agent.linux.interface.OVSInterfaceDriver') cfg.CONF.register_opts(interface.OPTS) self.driver = importutils.import_object(cfg.CONF.interface_driver, cfg.CONF) def generate_device_details(self, name=None, ip_cidrs=None, mac_address=None, namespace=None): return Device(name or base.get_rand_name(), ip_cidrs or ["%s/24" % TEST_IP], mac_address or utils.get_random_mac('fa:16:3e:00:00:00'.split(':')), namespace or base.get_rand_name()) def _safe_delete_device(self, device): try: device.link.delete() except RuntimeError: LOG.debug('Could not delete %s, was it already deleted?', device) def manage_device(self, attr): """Create a tuntap with the specified attributes. The device is cleaned up at the end of the test. :param attr: A Device namedtuple :return: A tuntap ip_lib.IPDevice """ ip = ip_lib.IPWrapper(namespace=attr.namespace) if attr.namespace: ip.netns.add(attr.namespace) self.addCleanup(ip.netns.delete, attr.namespace) tap_device = ip.add_tuntap(attr.name) self.addCleanup(self._safe_delete_device, tap_device) tap_device.link.set_address(attr.mac_address) self.driver.init_l3(attr.name, attr.ip_cidrs, namespace=attr.namespace) tap_device.link.set_up() return tap_device class IpLibTestCase(IpLibTestFramework): def test_device_exists(self): attr = self.generate_device_details() self.assertFalse( ip_lib.device_exists(attr.name, namespace=attr.namespace)) device = self.manage_device(attr) self.assertTrue( ip_lib.device_exists(device.name, namespace=attr.namespace)) device.link.delete() self.assertFalse( ip_lib.device_exists(attr.name, namespace=attr.namespace)) def test_ipdevice_exists(self): attr = self.generate_device_details() device = self.manage_device(attr) self.assertTrue(device.exists()) device.link.delete() self.assertFalse(device.exists()) def test_vxlan_exists(self): attr = self.generate_device_details() ip = ip_lib.IPWrapper(namespace=attr.namespace) ip.netns.add(attr.namespace) self.addCleanup(ip.netns.delete, attr.namespace) self.assertFalse(ip_lib.vxlan_in_use(9999, namespace=attr.namespace)) device = ip.add_vxlan(attr.name, 9999) self.addCleanup(self._safe_delete_device, device) self.assertTrue(ip_lib.vxlan_in_use(9999, namespace=attr.namespace)) device.link.delete() self.assertFalse(ip_lib.vxlan_in_use(9999, namespace=attr.namespace)) def test_ipwrapper_get_device_by_ip_None(self): ip_wrapper = ip_lib.IPWrapper(namespace=None) self.assertIsNone(ip_wrapper.get_device_by_ip(ip=None)) def test_ipwrapper_get_device_by_ip(self): attr = self.generate_device_details() self.manage_device(attr) ip_wrapper = ip_lib.IPWrapper(namespace=attr.namespace) self.assertEqual(attr.name, ip_wrapper.get_device_by_ip(TEST_IP).name) self.assertIsNone(ip_wrapper.get_device_by_ip(WRONG_IP)) def test_device_exists_with_ips_and_mac(self): attr = self.generate_device_details() device = self.manage_device(attr) self.assertTrue( ip_lib.device_exists_with_ips_and_mac(*attr)) wrong_ip_cidr = '10.0.0.1/8' wrong_mac_address = 'aa:aa:aa:aa:aa:aa' attr = self.generate_device_details(name='wrong_name') self.assertFalse( ip_lib.device_exists_with_ips_and_mac(*attr)) attr = self.generate_device_details(ip_cidrs=[wrong_ip_cidr]) self.assertFalse(ip_lib.device_exists_with_ips_and_mac(*attr)) attr = self.generate_device_details(mac_address=wrong_mac_address) self.assertFalse(ip_lib.device_exists_with_ips_and_mac(*attr)) attr = self.generate_device_details(namespace='wrong_namespace') self.assertFalse(ip_lib.device_exists_with_ips_and_mac(*attr)) device.link.delete() def test_get_routing_table(self): attr = self.generate_device_details() device = self.manage_device(attr) device_ip = attr.ip_cidrs[0].split('/')[0] destination = '8.8.8.0/24' device.route.add_route(destination, device_ip) expected_routes = [{'nexthop': device_ip, 'device': attr.name, 'destination': destination, 'scope': None}, {'nexthop': None, 'device': attr.name, 'destination': str( netaddr.IPNetwork(attr.ip_cidrs[0]).cidr), 'scope': 'link'}] routes = ip_lib.get_routing_table(4, namespace=attr.namespace) self.assertEqual(expected_routes, routes) def _check_for_device_name(self, ip, name, should_exist): exist = any(d for d in ip.get_devices() if d.name == name) self.assertEqual(should_exist, exist) def test_dummy_exists(self): namespace = self.useFixture(net_helpers.NamespaceFixture()) dev_name = base.get_rand_name() device = namespace.ip_wrapper.add_dummy(dev_name) self.addCleanup(self._safe_delete_device, device) self._check_for_device_name(namespace.ip_wrapper, dev_name, True) device.link.delete() self._check_for_device_name(namespace.ip_wrapper, dev_name, False) class TestSetIpNonlocalBind(functional_base.BaseSudoTestCase): def test_assigned_value(self): namespace = self.useFixture(net_helpers.NamespaceFixture()) for expected in (0, 1): try: ip_lib.set_ip_nonlocal_bind(expected, namespace.name) except RuntimeError as rte: stat_message = ( 'cannot stat /proc/sys/net/ipv4/ip_nonlocal_bind') if stat_message in str(rte): raise self.skipException( "This kernel doesn't support %s in network " "namespaces." % ip_lib.IP_NONLOCAL_BIND) raise observed = ip_lib.get_ip_nonlocal_bind(namespace.name) self.assertEqual(expected, observed) neutron-8.4.0/neutron/tests/functional/agent/test_l2_lb_agent.py0000664000567000056710000000435313044372760026246 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg import testtools from neutron.plugins.ml2.drivers.linuxbridge.agent import \ linuxbridge_neutron_agent from neutron.tests.functional.agent.linux import test_ip_lib lba = linuxbridge_neutron_agent class LinuxBridgeAgentTests(test_ip_lib.IpLibTestFramework): def setUp(self): super(LinuxBridgeAgentTests, self).setUp() agent_rpc = ('neutron.agent.rpc.PluginApi') mock.patch(agent_rpc).start() mock.patch('neutron.agent.rpc.PluginReportStateAPI').start() cfg.CONF.set_override('enable_vxlan', False, 'VXLAN') def test_validate_interface_mappings(self): mappings = {'physnet1': 'int1', 'physnet2': 'int2'} with testtools.ExpectedException(SystemExit): lba.LinuxBridgeManager({}, mappings) self.manage_device( self.generate_device_details()._replace(namespace=None, name='int1')) with testtools.ExpectedException(SystemExit): lba.LinuxBridgeManager({}, mappings) self.manage_device( self.generate_device_details()._replace(namespace=None, name='int2')) lba.LinuxBridgeManager({}, mappings) def test_validate_bridge_mappings(self): mappings = {'physnet1': 'br-eth1'} with testtools.ExpectedException(SystemExit): lba.LinuxBridgeManager(mappings, {}) self.manage_device( self.generate_device_details()._replace(namespace=None, name='br-eth1')) lba.LinuxBridgeManager(mappings, {}) neutron-8.4.0/neutron/tests/functional/agent/l2/0000775000567000056710000000000013044373210022764 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/agent/l2/__init__.py0000664000567000056710000000000013044372736025077 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/agent/l2/extensions/0000775000567000056710000000000013044373210025163 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/agent/l2/extensions/test_ovs_agent_qos_extension.py0000664000567000056710000001711413044372760033554 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from oslo_utils import uuidutils from neutron.api.rpc.callbacks.consumer import registry as consumer_reg from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks import resources from neutron.objects.qos import policy from neutron.objects.qos import rule from neutron.tests.common.agents import l2_extensions from neutron.tests.functional.agent.l2 import base TEST_POLICY_ID1 = "a2d72369-4246-4f19-bd3c-af51ec8d70cd" TEST_POLICY_ID2 = "46ebaec0-0570-43ac-82f6-60d2b03168c5" TEST_BW_LIMIT_RULE_1 = rule.QosBandwidthLimitRule( context=None, qos_policy_id=TEST_POLICY_ID1, id="5f126d84-551a-4dcf-bb01-0e9c0df0c793", max_kbps=1000, max_burst_kbps=10) TEST_BW_LIMIT_RULE_2 = rule.QosBandwidthLimitRule( context=None, qos_policy_id=TEST_POLICY_ID2, id="fa9128d9-44af-49b2-99bb-96548378ad42", max_kbps=900, max_burst_kbps=9) class OVSAgentQoSExtensionTestFramework(base.OVSAgentTestFramework): def setUp(self): super(OVSAgentQoSExtensionTestFramework, self).setUp() self.config.set_override('extensions', ['qos'], 'agent') self._set_pull_mock() self.set_test_qos_rules(TEST_POLICY_ID1, [TEST_BW_LIMIT_RULE_1]) self.set_test_qos_rules(TEST_POLICY_ID2, [TEST_BW_LIMIT_RULE_2]) def _set_pull_mock(self): self.qos_policies = {} def _pull_mock(context, resource_type, resource_id): return self.qos_policies[resource_id] self.pull = mock.patch( 'neutron.api.rpc.handlers.resources_rpc.' 'ResourcesPullRpcApi.pull').start() self.pull.side_effect = _pull_mock def set_test_qos_rules(self, policy_id, policy_rules): """This function sets the policy test rules to be exposed.""" qos_policy = policy.QosPolicy( context=None, tenant_id=uuidutils.generate_uuid(), id=policy_id, name="Test Policy Name", description="This is a policy for testing purposes", shared=False, rules=policy_rules) qos_policy.obj_reset_changes() self.qos_policies[policy_id] = qos_policy def _create_test_port_dict(self, policy_id=None): port_dict = super(OVSAgentQoSExtensionTestFramework, self)._create_test_port_dict() port_dict['qos_policy_id'] = policy_id port_dict['network_qos_policy_id'] = None return port_dict def _get_device_details(self, port, network): dev = super(OVSAgentQoSExtensionTestFramework, self)._get_device_details(port, network) dev['qos_policy_id'] = port['qos_policy_id'] return dev def _assert_bandwidth_limit_rule_is_set(self, port, rule): max_rate, burst = ( self.agent.int_br.get_egress_bw_limit_for_port(port['vif_name'])) self.assertEqual(max_rate, rule.max_kbps) self.assertEqual(burst, rule.max_burst_kbps) def _assert_bandwidth_limit_rule_not_set(self, port): max_rate, burst = ( self.agent.int_br.get_egress_bw_limit_for_port(port['vif_name'])) self.assertIsNone(max_rate) self.assertIsNone(burst) def wait_until_bandwidth_limit_rule_applied(self, port, rule): l2_extensions.wait_until_bandwidth_limit_rule_applied( self.agent.int_br, port['vif_name'], rule) def _create_port_with_qos(self): port_dict = self._create_test_port_dict() port_dict['qos_policy_id'] = TEST_POLICY_ID1 self.setup_agent_and_ports([port_dict]) self.wait_until_ports_state(self.ports, up=True) self.wait_until_bandwidth_limit_rule_applied(port_dict, TEST_BW_LIMIT_RULE_1) return port_dict class TestOVSAgentQosExtension(OVSAgentQoSExtensionTestFramework): def test_port_creation_with_bandwidth_limit(self): """Make sure bandwidth limit rules are set in low level to ports.""" self.setup_agent_and_ports( port_dicts=self.create_test_ports(amount=1, policy_id=TEST_POLICY_ID1)) self.wait_until_ports_state(self.ports, up=True) for port in self.ports: self._assert_bandwidth_limit_rule_is_set( port, TEST_BW_LIMIT_RULE_1) def test_port_creation_with_different_bandwidth_limits(self): """Make sure different types of policies end on the right ports.""" port_dicts = self.create_test_ports(amount=3) port_dicts[0]['qos_policy_id'] = TEST_POLICY_ID1 port_dicts[1]['qos_policy_id'] = TEST_POLICY_ID2 self.setup_agent_and_ports(port_dicts) self.wait_until_ports_state(self.ports, up=True) self._assert_bandwidth_limit_rule_is_set(self.ports[0], TEST_BW_LIMIT_RULE_1) self._assert_bandwidth_limit_rule_is_set(self.ports[1], TEST_BW_LIMIT_RULE_2) self._assert_bandwidth_limit_rule_not_set(self.ports[2]) def test_simple_port_policy_update(self): self.setup_agent_and_ports( port_dicts=self.create_test_ports(amount=1, policy_id=TEST_POLICY_ID1)) self.wait_until_ports_state(self.ports, up=True) policy_copy = copy.deepcopy(self.qos_policies[TEST_POLICY_ID1]) policy_copy.rules[0].max_kbps = 500 policy_copy.rules[0].max_burst_kbps = 5 consumer_reg.push(resources.QOS_POLICY, policy_copy, events.UPDATED) self.wait_until_bandwidth_limit_rule_applied(self.ports[0], policy_copy.rules[0]) self._assert_bandwidth_limit_rule_is_set(self.ports[0], policy_copy.rules[0]) def test_port_qos_disassociation(self): """Test that qos_policy_id set to None will remove all qos rules from given port. """ port_dict = self._create_port_with_qos() port_dict['qos_policy_id'] = None self.agent.port_update(None, port=port_dict) self.wait_until_bandwidth_limit_rule_applied(port_dict, None) def test_port_qos_update_policy_id(self): """Test that change of qos policy id on given port refreshes all its rules. """ port_dict = self._create_port_with_qos() port_dict['qos_policy_id'] = TEST_POLICY_ID2 self.agent.port_update(None, port=port_dict) self.wait_until_bandwidth_limit_rule_applied(port_dict, TEST_BW_LIMIT_RULE_2) def test_policy_rule_delete(self): port_dict = self._create_port_with_qos() policy_copy = copy.deepcopy(self.qos_policies[TEST_POLICY_ID1]) policy_copy.rules = list() consumer_reg.push(resources.QOS_POLICY, policy_copy, events.UPDATED) self.wait_until_bandwidth_limit_rule_applied(port_dict, None) neutron-8.4.0/neutron/tests/functional/agent/l2/extensions/__init__.py0000664000567000056710000000000013044372736027276 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/agent/l2/base.py0000664000567000056710000004250613044372760024270 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # Copyright (c) 2015 SUSE Linux Products GmbH # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import eventlet import mock from oslo_config import cfg from oslo_utils import uuidutils from neutron.agent.common import config as agent_config from neutron.agent.common import ovs_lib from neutron.agent.l2.extensions import manager as ext_manager from neutron.agent.linux import interface from neutron.agent.linux import polling from neutron.agent.linux import utils as agent_utils from neutron.common import config as common_config from neutron.common import constants as n_const from neutron.common import utils from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers.openvswitch.agent.common import config \ as ovs_config from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ import br_int from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ import br_phys from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ import br_tun from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent \ as ovs_agent from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import base class OVSAgentTestFramework(base.BaseOVSLinuxTestCase): def setUp(self): super(OVSAgentTestFramework, self).setUp() agent_rpc = ('neutron.plugins.ml2.drivers.openvswitch.agent.' 'ovs_neutron_agent.OVSPluginApi') mock.patch(agent_rpc).start() mock.patch('neutron.agent.rpc.PluginReportStateAPI').start() self.br_int = base.get_rand_name(n_const.DEVICE_NAME_MAX_LEN, prefix='br-int') self.br_tun = base.get_rand_name(n_const.DEVICE_NAME_MAX_LEN, prefix='br-tun') self.br_phys = base.get_rand_name(n_const.DEVICE_NAME_MAX_LEN, prefix='br-phys') patch_name_len = n_const.DEVICE_NAME_MAX_LEN - len("-patch-tun") self.patch_tun = "%s-patch-tun" % self.br_int[patch_name_len:] self.patch_int = "%s-patch-int" % self.br_tun[patch_name_len:] self.ovs = ovs_lib.BaseOVS() self.config = self._configure_agent() self.driver = interface.OVSInterfaceDriver(self.config) self.namespace = self.useFixture(net_helpers.NamespaceFixture()).name def _get_config_opts(self): config = cfg.ConfigOpts() config.register_opts(common_config.core_opts) config.register_opts(interface.OPTS) config.register_opts(ovs_config.ovs_opts, "OVS") config.register_opts(ovs_config.agent_opts, "AGENT") agent_config.register_interface_driver_opts_helper(config) agent_config.register_agent_state_opts_helper(config) ext_manager.register_opts(config) return config def _configure_agent(self): config = self._get_config_opts() config.set_override( 'interface_driver', 'neutron.agent.linux.interface.OVSInterfaceDriver') config.set_override('integration_bridge', self.br_int, "OVS") config.set_override('ovs_integration_bridge', self.br_int) config.set_override('tunnel_bridge', self.br_tun, "OVS") config.set_override('int_peer_patch_port', self.patch_tun, "OVS") config.set_override('tun_peer_patch_port', self.patch_int, "OVS") config.set_override('host', 'ovs-agent') return config def _bridge_classes(self): return { 'br_int': br_int.OVSIntegrationBridge, 'br_phys': br_phys.OVSPhysicalBridge, 'br_tun': br_tun.OVSTunnelBridge } def create_agent(self, create_tunnels=True, ancillary_bridge=None, local_ip='192.168.10.1'): if create_tunnels: tunnel_types = [p_const.TYPE_VXLAN] else: tunnel_types = None bridge_mappings = ['physnet:%s' % self.br_phys] self.config.set_override('tunnel_types', tunnel_types, "AGENT") self.config.set_override('polling_interval', 1, "AGENT") self.config.set_override('prevent_arp_spoofing', False, "AGENT") self.config.set_override('local_ip', local_ip, "OVS") self.config.set_override('bridge_mappings', bridge_mappings, "OVS") # Physical bridges should be created prior to running self._bridge_classes()['br_phys'](self.br_phys).create() agent = ovs_agent.OVSNeutronAgent(self._bridge_classes(), self.config) self.addCleanup(self.ovs.delete_bridge, self.br_int) if tunnel_types: self.addCleanup(self.ovs.delete_bridge, self.br_tun) self.addCleanup(self.ovs.delete_bridge, self.br_phys) agent.sg_agent = mock.Mock() agent.ancillary_brs = [] if ancillary_bridge: agent.ancillary_brs.append(ancillary_bridge) return agent def _mock_get_events(self, agent, polling_manager, ports): get_events = polling_manager.get_events p_ids = [p['id'] for p in ports] def filter_events(): events = get_events() filtered_ports = [] for dev in events['added']: iface_id = agent.int_br.portid_from_external_ids( dev.get('external_ids', [])) if iface_id in p_ids: # if the event is not about a port that was created by # this test, we filter the event out. Since these tests are # not run in isolation processing all the events might make # some test fail ( e.g. the agent might keep resycing # because it keeps finding not ready ports that are created # by other tests) filtered_ports.append(dev) return {'added': filtered_ports, 'removed': events['removed']} polling_manager.get_events = mock.Mock(side_effect=filter_events) def start_agent(self, agent, ports=None, unplug_ports=None): if unplug_ports is None: unplug_ports = [] if ports is None: ports = [] self.setup_agent_rpc_mocks(agent, unplug_ports) polling_manager = polling.InterfacePollingMinimizer() self._mock_get_events(agent, polling_manager, ports) self.addCleanup(polling_manager.stop) polling_manager.start() agent_utils.wait_until_true( polling_manager._monitor.is_active) agent.check_ovs_status = mock.Mock( return_value=constants.OVS_NORMAL) t = eventlet.spawn(agent.rpc_loop, polling_manager) def stop_agent(agent, rpc_loop_thread): agent.run_daemon_loop = False rpc_loop_thread.wait() self.addCleanup(stop_agent, agent, t) return polling_manager def _create_test_port_dict(self): return {'id': uuidutils.generate_uuid(), 'mac_address': utils.get_random_mac( 'fa:16:3e:00:00:00'.split(':')), 'fixed_ips': [{ 'ip_address': '10.%d.%d.%d' % ( random.randint(3, 254), random.randint(3, 254), random.randint(3, 254))}], 'vif_name': base.get_rand_name( self.driver.DEV_NAME_LEN, self.driver.DEV_NAME_PREFIX)} def _create_test_network_dict(self): return {'id': uuidutils.generate_uuid(), 'tenant_id': uuidutils.generate_uuid()} def _plug_ports(self, network, ports, agent, bridge=None, namespace=None): if namespace is None: namespace = self.namespace for port in ports: bridge = bridge or agent.int_br self.driver.plug( network.get('id'), port.get('id'), port.get('vif_name'), port.get('mac_address'), bridge.br_name, namespace=namespace) ip_cidrs = ["%s/8" % (port.get('fixed_ips')[0][ 'ip_address'])] self.driver.init_l3(port.get('vif_name'), ip_cidrs, namespace=namespace) def _unplug_ports(self, ports, agent): for port in ports: self.driver.unplug( port.get('vif_name'), agent.int_br.br_name, self.namespace) def _get_device_details(self, port, network): dev = {'device': port['id'], 'port_id': port['id'], 'network_id': network['id'], 'network_type': network.get('network_type', 'vlan'), 'physical_network': network.get('physical_network', 'physnet'), 'segmentation_id': network.get('segmentation_id', 1), 'fixed_ips': port['fixed_ips'], 'device_owner': 'compute', 'port_security_enabled': True, 'security_groups': ['default'], 'admin_state_up': True} return dev def assert_bridge(self, br, exists=True): self.assertEqual(exists, self.ovs.bridge_exists(br)) def assert_patch_ports(self, agent): def get_peer(port): return agent.int_br.db_get_val( 'Interface', port, 'options', check_error=True) agent_utils.wait_until_true( lambda: get_peer(self.patch_int) == {'peer': self.patch_tun}) agent_utils.wait_until_true( lambda: get_peer(self.patch_tun) == {'peer': self.patch_int}) def assert_bridge_ports(self): for port in [self.patch_tun, self.patch_int]: self.assertTrue(self.ovs.port_exists(port)) def assert_vlan_tags(self, ports, agent): for port in ports: res = agent.int_br.db_get_val('Port', port.get('vif_name'), 'tag') self.assertTrue(res) def _expected_plugin_rpc_call(self, call, expected_devices, is_up=True): """Helper to check expected rpc call are received :param call: The call to check :param expected_devices: The device for which call is expected :param is_up: True if expected_devices are devices that are set up, False if expected_devices are devices that are set down """ if is_up: rpc_devices = [ dev for args in call.call_args_list for dev in args[0][1]] else: rpc_devices = [ dev for args in call.call_args_list for dev in args[0][2]] for dev in rpc_devices: if dev in expected_devices: expected_devices.remove(dev) # reset mock otherwise if the mock is called again the same call param # will be processed again call.reset_mock() return not expected_devices def create_test_ports(self, amount=3, **kwargs): ports = [] for x in range(amount): ports.append(self._create_test_port_dict(**kwargs)) return ports def _mock_update_device(self, context, devices_up, devices_down, agent_id, host=None): dev_up = [] dev_down = [] for port in self.ports: if devices_up and port['id'] in devices_up: dev_up.append(port['id']) if devices_down and port['id'] in devices_down: dev_down.append({'device': port['id'], 'exists': True}) return {'devices_up': dev_up, 'failed_devices_up': [], 'devices_down': dev_down, 'failed_devices_down': []} def setup_agent_rpc_mocks(self, agent, unplug_ports): def mock_device_details(context, devices, agent_id, host=None): details = [] for port in self.ports: if port['id'] in devices: dev = self._get_device_details( port, self.network) details.append(dev) ports_to_unplug = [x for x in unplug_ports if x['id'] in devices] if ports_to_unplug: self._unplug_ports(ports_to_unplug, self.agent) return {'devices': details, 'failed_devices': []} (agent.plugin_rpc.get_devices_details_list_and_failed_devices. side_effect) = mock_device_details agent.plugin_rpc.update_device_list.side_effect = ( self._mock_update_device) def _prepare_resync_trigger(self, agent): def mock_device_raise_exception(context, devices_up, devices_down, agent_id, host=None): agent.plugin_rpc.update_device_list.side_effect = ( self._mock_update_device) raise Exception('Exception to trigger resync') self.agent.plugin_rpc.update_device_list.side_effect = ( mock_device_raise_exception) def _prepare_failed_dev_up_trigger(self, agent): def mock_failed_devices_up(context, devices_up, devices_down, agent_id, host=None): failed_devices = [] devices = list(devices_up) # first port fails if self.ports[0]['id'] in devices_up: # reassign side_effect so that next RPC call will succeed agent.plugin_rpc.update_device_list.side_effect = ( self._mock_update_device) devices.remove(self.ports[0]['id']) failed_devices.append(self.ports[0]['id']) return {'devices_up': devices, 'failed_devices_up': failed_devices, 'devices_down': [], 'failed_devices_down': []} self.agent.plugin_rpc.update_device_list.side_effect = ( mock_failed_devices_up) def _prepare_failed_dev_down_trigger(self, agent): def mock_failed_devices_down(context, devices_up, devices_down, agent_id, host=None): # first port fails failed_port_id = self.ports[0]['id'] failed_devices_down = [] dev_down = [ {'device': p['id'], 'exists': True} for p in self.ports if p['id'] in devices_down and ( p['id'] != failed_port_id)] # check if it's the call to set devices down and if the device # that is supposed to fail is in the call then modify the # side_effect so that next RPC call will succeed. if devices_down and failed_port_id in devices_down: agent.plugin_rpc.update_device_list.side_effect = ( self._mock_update_device) failed_devices_down.append(failed_port_id) return {'devices_up': devices_up, 'failed_devices_up': [], 'devices_down': dev_down, 'failed_devices_down': failed_devices_down} self.agent.plugin_rpc.update_device_list.side_effect = ( mock_failed_devices_down) def wait_until_ports_state(self, ports, up, timeout=60): port_ids = [p['id'] for p in ports] agent_utils.wait_until_true( lambda: self._expected_plugin_rpc_call( self.agent.plugin_rpc.update_device_list, port_ids, up), timeout=timeout) def setup_agent_and_ports(self, port_dicts, create_tunnels=True, ancillary_bridge=None, trigger_resync=False, failed_dev_up=False, failed_dev_down=False, network=None): self.ports = port_dicts self.agent = self.create_agent(create_tunnels=create_tunnels, ancillary_bridge=ancillary_bridge) self.polling_manager = self.start_agent(self.agent, ports=self.ports) self.network = network or self._create_test_network_dict() if trigger_resync: self._prepare_resync_trigger(self.agent) elif failed_dev_up: self._prepare_failed_dev_up_trigger(self.agent) elif failed_dev_down: self._prepare_failed_dev_down_trigger(self.agent) self._plug_ports(self.network, self.ports, self.agent, bridge=ancillary_bridge) def plug_ports_to_phys_br(self, network, ports, namespace=None): physical_network = network.get('physical_network', 'physnet') phys_segmentation_id = network.get('segmentation_id', None) network_type = network.get('network_type', 'flat') phys_br = self.agent.phys_brs[physical_network] self._plug_ports(network, ports, self.agent, bridge=phys_br, namespace=namespace) if phys_segmentation_id and network_type == 'vlan': for port in ports: phys_br.set_db_attribute( "Port", port['vif_name'], "tag", phys_segmentation_id) neutron-8.4.0/neutron/tests/functional/agent/windows/0000775000567000056710000000000013044373210024141 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/agent/windows/__init__.py0000664000567000056710000000000013044372736026254 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/agent/windows/test_ip_lib.py0000664000567000056710000000240513044372736027025 0ustar jenkinsjenkins00000000000000# Copyright 2016 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.windows import ip_lib from neutron.tests import base WRONG_IP = '0.0.0.0' TEST_IP = '127.0.0.1' class IpLibTestCase(base.BaseTestCase): def test_ipwrapper_get_device_by_ip_None(self): self.assertIsNone(ip_lib.IPWrapper().get_device_by_ip(WRONG_IP)) def test_ipwrapper_get_device_by_ip(self): ip_dev = ip_lib.IPWrapper().get_device_by_ip(TEST_IP) self.assertEqual('lo', ip_dev.device_name) def test_device_has_ip(self): not_a_device = ip_lib.IPDevice('#!#._not_a_device_bleargh!!@@@') self.assertFalse(not_a_device.device_has_ip(TEST_IP)) neutron-8.4.0/neutron/tests/functional/base.py0000664000567000056710000000574213044372760022656 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg from neutron.agent.common import config from neutron.agent.linux import utils from neutron.common import utils as common_utils from neutron.tests import base from neutron.tests.common import base as common_base SUDO_CMD = 'sudo -n' # This is the directory from which infra fetches log files for functional tests DEFAULT_LOG_DIR = '/tmp/dsvm-functional-logs/' class BaseSudoTestCase(base.BaseTestCase): """ Base class for tests requiring invocation of commands via a root helper. This class skips (during setUp) its tests unless sudo is enabled, ie: OS_SUDO_TESTING is set to '1' or 'True' in the test execution environment. This is intended to allow developers to run the functional suite (e.g. tox -e functional) without test failures if sudo invocations are not allowed. Running sudo tests in the upstream gate jobs (*-neutron-dsvm-functional) requires the additional step of setting OS_ROOTWRAP_CMD to the rootwrap command configured by devstack, e.g. sudo /usr/local/bin/neutron-rootwrap /etc/neutron/rootwrap.conf Gate jobs do not allow invocations of sudo without rootwrap to ensure that rootwrap configuration gets as much testing as possible. """ def setUp(self): super(BaseSudoTestCase, self).setUp() if not base.bool_from_env('OS_SUDO_TESTING'): self.skipTest('Testing with sudo is not enabled') # Have each test log into its own log file cfg.CONF.set_override('debug', True) common_utils.ensure_dir(DEFAULT_LOG_DIR) log_file = base.sanitize_log_path( os.path.join(DEFAULT_LOG_DIR, "%s.log" % self.id())) cfg.CONF.set_override('log_file', log_file) config.setup_logging() config.register_root_helper(cfg.CONF) self.config(group='AGENT', root_helper=os.environ.get('OS_ROOTWRAP_CMD', SUDO_CMD)) self.config(group='AGENT', root_helper_daemon=os.environ.get( 'OS_ROOTWRAP_DAEMON_CMD')) @common_base.no_skip_on_missing_deps def check_command(self, cmd, error_text, skip_msg, run_as_root=False): try: utils.execute(cmd, run_as_root=run_as_root) except RuntimeError as e: if error_text in str(e): self.skipTest(skip_msg) raise neutron-8.4.0/neutron/tests/functional/requirements.txt0000664000567000056710000000044113044372736024650 0ustar jenkinsjenkins00000000000000# Additional requirements for functional tests # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. psutil>=1.1.1,<2.0.0 psycopg2 neutron-8.4.0/neutron/tests/functional/test_server.py0000664000567000056710000002316713044372760024312 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import signal import socket import time import traceback import httplib2 import mock from oslo_config import cfg import psutil from neutron.agent.linux import utils from neutron import service from neutron.tests import base from neutron import worker from neutron import wsgi CONF = cfg.CONF # This message will be written to temporary file each time # start method is called. FAKE_START_MSG = "start".encode("utf-8") TARGET_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin' class TestNeutronServer(base.BaseTestCase): def setUp(self): super(TestNeutronServer, self).setUp() self.service_pid = None self.workers = None self.temp_file = self.get_temp_file_path("test_server.tmp") self.health_checker = self._check_active self.pipein, self.pipeout = os.pipe() self.addCleanup(self._destroy_workers) def _destroy_workers(self): if self.service_pid: # Make sure all processes are stopped os.kill(self.service_pid, signal.SIGKILL) def _start_server(self, callback, workers): """Run a given service. :param callback: callback that will start the required service :param workers: number of service workers :returns: list of spawned workers' pids """ self.workers = workers # Fork a new process in which server will be started pid = os.fork() if pid == 0: status = 0 try: callback(workers) except SystemExit as exc: status = exc.code except BaseException: traceback.print_exc() status = 2 # Really exit os._exit(status) self.service_pid = pid # If number of workers is 1 it is assumed that we run # a service in the current process. if self.workers > 1: # Wait at most 10 seconds to spawn workers condition = lambda: self.workers == len(self._get_workers()) utils.wait_until_true( condition, timeout=10, sleep=0.1, exception=RuntimeError( "Failed to start %d workers." % self.workers)) workers = self._get_workers() self.assertEqual(len(workers), self.workers) return workers # Wait for a service to start. utils.wait_until_true(self.health_checker, timeout=10, sleep=0.1, exception=RuntimeError( "Failed to start service.")) return [self.service_pid] def _get_workers(self): """Get the list of processes in which WSGI server is running.""" def safe_ppid(proc): try: return proc.ppid except psutil.NoSuchProcess: return None if self.workers > 1: return [proc.pid for proc in psutil.process_iter() if safe_ppid(proc) == self.service_pid] else: return [proc.pid for proc in psutil.process_iter() if proc.pid == self.service_pid] def _check_active(self): """Dummy service activity check.""" time.sleep(5) return True def _fake_start(self): with open(self.temp_file, 'a') as f: f.write(FAKE_START_MSG) def _test_restart_service_on_sighup(self, service, workers=1): """Test that a service correctly (re)starts on receiving SIGHUP. 1. Start a service with a given number of workers. 2. Send SIGHUP to the service. 3. Wait for workers (if any) to (re)start. """ self._start_server(callback=service, workers=workers) os.kill(self.service_pid, signal.SIGHUP) expected_msg = FAKE_START_MSG * workers * 2 # Wait for temp file to be created and its size reaching the expected # value expected_size = len(expected_msg) condition = lambda: (os.path.isfile(self.temp_file) and os.stat(self.temp_file).st_size == expected_size) utils.wait_until_true( condition, timeout=5, sleep=0.1, exception=RuntimeError( "Timed out waiting for file %(filename)s to be created and " "its size become equal to %(size)s." % {'filename': self.temp_file, 'size': expected_size})) # Verify that start has been called twice for each worker (one for # initial start, and the second one on SIGHUP after children were # terminated). with open(self.temp_file, 'r') as f: res = f.readline() self.assertEqual(expected_msg, res) class TestWsgiServer(TestNeutronServer): """Tests for neutron.wsgi.Server.""" def setUp(self): super(TestWsgiServer, self).setUp() self.health_checker = self._check_active self.port = None @staticmethod def application(environ, start_response): """A primitive test application.""" response_body = 'Response' status = '200 OK' response_headers = [('Content-Type', 'text/plain'), ('Content-Length', str(len(response_body)))] start_response(status, response_headers) return [response_body] def _check_active(self): """Check a wsgi service is active by making a GET request.""" port = int(os.read(self.pipein, 5)) conn = httplib2.HTTPConnectionWithTimeout("localhost", port) try: conn.request("GET", "/") resp = conn.getresponse() return resp.status == 200 except socket.error: return False def _run_wsgi(self, workers=1): """Start WSGI server with a test application.""" # Mock start method to check that children are started again on # receiving SIGHUP. with mock.patch("neutron.wsgi.WorkerService.start") as start_method: start_method.side_effect = self._fake_start server = wsgi.Server("Test") server.start(self.application, 0, "0.0.0.0", workers=workers) # Memorize a port that was chosen for the service self.port = server.port os.write(self.pipeout, str(self.port)) server.wait() def test_restart_wsgi_on_sighup_multiple_workers(self): self._test_restart_service_on_sighup(service=self._run_wsgi, workers=2) class TestRPCServer(TestNeutronServer): """Tests for neutron RPC server.""" def setUp(self): super(TestRPCServer, self).setUp() self.setup_coreplugin(TARGET_PLUGIN) self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True) self.plugin = self._plugin_patcher.start() self.plugin.return_value.rpc_workers_supported = True def _serve_rpc(self, workers=1): """Start RPC server with a given number of workers.""" # Mock start method to check that children are started again on # receiving SIGHUP. with mock.patch("neutron.service.RpcWorker.start") as start_method: with mock.patch( "neutron.manager.NeutronManager.get_plugin" ) as get_plugin: start_method.side_effect = self._fake_start get_plugin.return_value = self.plugin CONF.set_override("rpc_workers", workers) # not interested in state report workers specifically CONF.set_override("rpc_state_report_workers", 0) launcher = service.serve_rpc() launcher.wait() def test_restart_rpc_on_sighup_multiple_workers(self): self._test_restart_service_on_sighup(service=self._serve_rpc, workers=2) class TestPluginWorker(TestNeutronServer): """Ensure that a plugin returning Workers spawns workers""" def setUp(self): super(TestPluginWorker, self).setUp() self.setup_coreplugin(TARGET_PLUGIN) self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True) self.plugin = self._plugin_patcher.start() def _start_plugin(self, workers=1): with mock.patch('neutron.manager.NeutronManager.get_plugin') as gp: gp.return_value = self.plugin launchers = service.start_plugin_workers() for launcher in launchers: launcher.wait() def test_start(self): class FakeWorker(worker.NeutronWorker): def start(self): pass def wait(self): pass def stop(self): pass def reset(self): pass # Make both ABC happy and ensure 'self' is correct FakeWorker.start = self._fake_start workers = [FakeWorker()] self.plugin.return_value.get_workers.return_value = workers self._test_restart_service_on_sighup(service=self._start_plugin, workers=len(workers)) neutron-8.4.0/neutron/tests/functional/api/0000775000567000056710000000000013044373210022122 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/api/__init__.py0000664000567000056710000000000013044372736024235 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/api/test_policies.py0000664000567000056710000000720413044372736025361 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path from neutron.api import extensions from neutron.api.v2 import attributes from neutron import context from neutron import policy from neutron.tests import base from neutron.tests import tools TEST_PATH = os.path.dirname(os.path.abspath(__file__)) class APIPolicyTestCase(base.BaseTestCase): """ Tests for REST API policy checks. Ideally this would be done against an environment with an instantiated plugin, but there appears to be problems with instantiating a plugin against an sqlite environment and as yet, there is no precedent for running a functional test against an actual database backend. """ api_version = "2.0" def setUp(self): super(APIPolicyTestCase, self).setUp() self.useFixture(tools.AttributeMapMemento()) self.extension_path = os.path.abspath(os.path.join( TEST_PATH, "../../../extensions")) policy.reset() def _network_definition(self): return {'name': 'test_network', 'ports': [], 'subnets': [], 'status': 'up', 'admin_state_up': True, 'shared': False, 'tenant_id': 'admin', 'id': 'test_network', 'router:external': True} def _check_external_router_policy(self, context): return policy.check(context, 'get_network', self._network_definition()) def test_premature_loading(self): """ Verifies that loading policies by way of admin context before populating extensions and extending the resource map results in networks with router:external is true being invisible to regular tenants. """ extension_manager = extensions.ExtensionManager(self.extension_path) admin_context = context.get_admin_context() tenant_context = context.Context('test_user', 'test_tenant_id', False) extension_manager.extend_resources(self.api_version, attributes.RESOURCE_ATTRIBUTE_MAP) self.assertTrue(self._check_external_router_policy(admin_context)) self.assertFalse(self._check_external_router_policy(tenant_context)) def test_proper_load_order(self): """ Verifies that loading policies by way of admin context after populating extensions and extending the resource map results in networks with router:external are visible to regular tenants. """ extension_manager = extensions.ExtensionManager(self.extension_path) extension_manager.extend_resources(self.api_version, attributes.RESOURCE_ATTRIBUTE_MAP) admin_context = context.get_admin_context() tenant_context = context.Context('test_user', 'test_tenant_id', False) self.assertTrue(self._check_external_router_policy(admin_context)) self.assertTrue(self._check_external_router_policy(tenant_context)) def tearDown(self): policy.reset() super(APIPolicyTestCase, self).tearDown() neutron-8.4.0/neutron/tests/functional/db/0000775000567000056710000000000013044373210021736 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/db/__init__.py0000664000567000056710000000000013044372736024051 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/db/test_models.py0000664000567000056710000000232413044372736024647 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy from neutron.tests import base class TestDBCreation(base.BaseTestCase): """Check database schema can be created without conflicts. For each test case is created a SQLite memory database. """ def setUp(self): super(TestDBCreation, self).setUp() self.engine = sqlalchemy.create_engine('sqlite://') def _test_creation(self, module): metadata = module.get_metadata() metadata.create_all(self.engine) def test_head_creation(self): from neutron.db.migration.models import head self._test_creation(head) neutron-8.4.0/neutron/tests/functional/db/test_ipam.py0000664000567000056710000002425613044372760024317 0ustar jenkinsjenkins00000000000000# Copyright 2015 SUSE Linux Products GmbH # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_db.sqlalchemy import session import testtools from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import exceptions as n_exc from neutron import context from neutron.db import db_base_plugin_v2 as base_plugin from neutron.db import model_base from neutron.db import models_v2 from neutron.ipam.drivers.neutrondb_ipam import db_models as ipam_models from neutron.tests import base from neutron.tests.common import base as common_base def get_admin_test_context(db_url): """ get_admin_test_context is used to provide a test context. A new session is created using the db url specified """ ctx = context.Context(user_id=None, tenant_id=None, is_admin=True, overwrite=False) facade = session.EngineFacade(db_url, mysql_sql_mode='STRICT_ALL_TABLES') ctx._session = facade.get_session(autocommit=False, expire_on_commit=True) return ctx class IpamTestCase(object): """ Base class for tests that aim to test ip allocation. """ def configure_test(self, use_pluggable_ipam=False): model_base.BASEV2.metadata.create_all(self.engine) cfg.CONF.set_override('notify_nova_on_port_status_changes', False) if use_pluggable_ipam: self._turn_on_pluggable_ipam() else: self._turn_off_pluggable_ipam() self.plugin = base_plugin.NeutronDbPluginV2() self.cxt = get_admin_test_context(self.engine.url) self.addCleanup(self.cxt._session.close) self.tenant_id = 'test_tenant' self.network_id = 'test_net_id' self.subnet_id = 'test_sub_id' self.port_id = 'test_p_id' self._create_network() self._create_subnet() def _turn_off_pluggable_ipam(self): cfg.CONF.set_override('ipam_driver', None) self.ip_availability_range = models_v2.IPAvailabilityRange def _turn_on_pluggable_ipam(self): cfg.CONF.set_override('ipam_driver', 'internal') DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' self.setup_coreplugin(DB_PLUGIN_KLASS) self.ip_availability_range = ipam_models.IpamAvailabilityRange def result_set_to_dicts(self, resultset, keys): dicts = [] for item in resultset: item_dict = dict((x, item[x]) for x in keys) dicts.append(item_dict) return dicts def assert_ip_alloc_matches(self, expected): result_set = self.cxt.session.query(models_v2.IPAllocation).all() keys = ['port_id', 'ip_address', 'subnet_id', 'network_id'] actual = self.result_set_to_dicts(result_set, keys) self.assertEqual(expected, actual) def assert_ip_avail_range_matches(self, expected): result_set = self.cxt.session.query( self.ip_availability_range).all() keys = ['first_ip', 'last_ip'] actual = self.result_set_to_dicts(result_set, keys) self.assertEqual(expected, actual) def assert_ip_alloc_pool_matches(self, expected): result_set = self.cxt.session.query(models_v2.IPAllocationPool).all() keys = ['first_ip', 'last_ip', 'subnet_id'] actual = self.result_set_to_dicts(result_set, keys) self.assertEqual(expected, actual) def _create_network(self): network = {'tenant_id': self.tenant_id, 'id': self.network_id, 'name': 'test-net', 'admin_state_up': True, 'shared': False, 'status': constants.NET_STATUS_ACTIVE} return self.plugin.create_network(self.cxt, {'network': network}) def _create_subnet(self): subnet = {'tenant_id': self.tenant_id, 'id': self.subnet_id, 'name': 'test_sub', 'network_id': self.network_id, 'ip_version': 4, 'cidr': '10.10.10.0/29', 'enable_dhcp': False, 'gateway_ip': '10.10.10.1', 'shared': False, 'allocation_pools': attributes.ATTR_NOT_SPECIFIED, 'dns_nameservers': attributes.ATTR_NOT_SPECIFIED, 'host_routes': attributes.ATTR_NOT_SPECIFIED} return self.plugin.create_subnet(self.cxt, {'subnet': subnet}) def _create_port(self, port_id, fixed_ips=None): port_fixed_ips = (fixed_ips if fixed_ips else attributes.ATTR_NOT_SPECIFIED) port = {'tenant_id': self.tenant_id, 'name': 'test_port', 'id': port_id, 'network_id': self.network_id, 'mac_address': attributes.ATTR_NOT_SPECIFIED, 'admin_state_up': True, 'status': constants.PORT_STATUS_ACTIVE, 'device_id': 'test_dev_id', 'device_owner': 'compute', 'fixed_ips': port_fixed_ips} self.plugin.create_port(self.cxt, {'port': port}) def test_allocate_fixed_ip(self): fixed_ip = [{'ip_address': "10.10.10.3", 'subnet_id': self.subnet_id}] self._create_port(self.port_id, fixed_ip) ip_alloc_expected = [{'port_id': self.port_id, 'ip_address': fixed_ip[0].get('ip_address'), 'subnet_id': self.subnet_id, 'network_id': self.network_id}] ip_avail_ranges_expected = [{'first_ip': '10.10.10.2', 'last_ip': '10.10.10.2'}, {'first_ip': '10.10.10.4', 'last_ip': '10.10.10.6'}] ip_alloc_pool_expected = [{'first_ip': '10.10.10.2', 'last_ip': '10.10.10.6', 'subnet_id': self.subnet_id}] self.assert_ip_alloc_matches(ip_alloc_expected) self.assert_ip_alloc_pool_matches(ip_alloc_pool_expected) self.assert_ip_avail_range_matches( ip_avail_ranges_expected) def test_allocate_first_available_ip(self): self._create_port(self.port_id) ip_alloc_expected = [{'port_id': self.port_id, 'ip_address': '10.10.10.2', 'subnet_id': self.subnet_id, 'network_id': self.network_id}] ip_avail_ranges_expected = [{'first_ip': '10.10.10.3', 'last_ip': '10.10.10.6'}] ip_alloc_pool_expected = [{'first_ip': '10.10.10.2', 'last_ip': '10.10.10.6', 'subnet_id': self.subnet_id}] self.assert_ip_alloc_matches(ip_alloc_expected) self.assert_ip_alloc_pool_matches(ip_alloc_pool_expected) self.assert_ip_avail_range_matches( ip_avail_ranges_expected) def test_allocate_ip_exausted_pool(self): # available from .2 up to .6 -> 5 for i in range(1, 6): self._create_port(self.port_id + str(i)) ip_avail_ranges_expected = [] ip_alloc_pool_expected = [{'first_ip': '10.10.10.2', 'last_ip': '10.10.10.6', 'subnet_id': self.subnet_id}] self.assert_ip_alloc_pool_matches(ip_alloc_pool_expected) self.assert_ip_avail_range_matches( ip_avail_ranges_expected) # Create another port with testtools.ExpectedException(n_exc.IpAddressGenerationFailure): self._create_port(self.port_id) def test_rebuild_availability_range(self): for i in range(1, 6): self._create_port(self.port_id + str(i)) ip_avail_ranges_expected = [] ip_alloc_pool_expected = [{'first_ip': '10.10.10.2', 'last_ip': '10.10.10.6', 'subnet_id': self.subnet_id}] self.assert_ip_alloc_pool_matches(ip_alloc_pool_expected) self.assert_ip_avail_range_matches( ip_avail_ranges_expected) # Delete some ports, this will free the first two IPs for i in range(1, 3): self.plugin.delete_port(self.cxt, self.port_id + str(i)) # Create another port, this will trigger the rebuilding of the # availability ranges self._create_port(self.port_id) ip_avail_ranges_expected = [{'first_ip': '10.10.10.3', 'last_ip': '10.10.10.3'}] ip_alloc = self.cxt.session.query(models_v2.IPAllocation).all() self.assertEqual(4, len(ip_alloc)) self.assert_ip_alloc_pool_matches(ip_alloc_pool_expected) self.assert_ip_avail_range_matches( ip_avail_ranges_expected) class TestIpamMySql(common_base.MySQLTestCase, base.BaseTestCase, IpamTestCase): def setUp(self): super(TestIpamMySql, self).setUp() self.configure_test() class TestIpamPsql(common_base.PostgreSQLTestCase, base.BaseTestCase, IpamTestCase): def setUp(self): super(TestIpamPsql, self).setUp() self.configure_test() class TestPluggableIpamMySql(common_base.MySQLTestCase, base.BaseTestCase, IpamTestCase): def setUp(self): super(TestPluggableIpamMySql, self).setUp() self.configure_test(use_pluggable_ipam=True) class TestPluggableIpamPsql(common_base.PostgreSQLTestCase, base.BaseTestCase, IpamTestCase): def setUp(self): super(TestPluggableIpamPsql, self).setUp() self.configure_test(use_pluggable_ipam=True) neutron-8.4.0/neutron/tests/functional/db/test_migrations.py0000664000567000056710000003500613044372760025540 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from alembic import script as alembic_script from contextlib import contextmanager from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_db.sqlalchemy import test_base from oslo_db.sqlalchemy import test_migrations import six import sqlalchemy from sqlalchemy import event import sqlalchemy.types as types import neutron.db.migration as migration_help from neutron.db.migration.alembic_migrations import external from neutron.db.migration import cli as migration from neutron.db.migration.models import head as head_models from neutron.tests import base as ntest_base from neutron.tests.common import base cfg.CONF.import_opt('core_plugin', 'neutron.common.config') CORE_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin' class _TestModelsMigrations(test_migrations.ModelsMigrationsSync): '''Test for checking of equality models state and migrations. For the opportunistic testing you need to set up a db named 'openstack_citest' with user 'openstack_citest' and password 'openstack_citest' on localhost. The test will then use that db and user/password combo to run the tests. For PostgreSQL on Ubuntu this can be done with the following commands:: sudo -u postgres psql postgres=# create user openstack_citest with createdb login password 'openstack_citest'; postgres=# create database openstack_citest with owner openstack_citest; For MySQL on Ubuntu this can be done with the following commands:: mysql -u root >create database openstack_citest; >grant all privileges on openstack_citest.* to openstack_citest@localhost identified by 'openstack_citest'; Output is a list that contains information about differences between db and models. Output example:: [('add_table', Table('bat', MetaData(bind=None), Column('info', String(), table=), schema=None)), ('remove_table', Table(u'bar', MetaData(bind=None), Column(u'data', VARCHAR(), table=), schema=None)), ('add_column', None, 'foo', Column('data', Integer(), table=)), ('remove_column', None, 'foo', Column(u'old_data', VARCHAR(), table=None)), [('modify_nullable', None, 'foo', u'x', {'existing_server_default': None, 'existing_type': INTEGER()}, True, False)]] * ``remove_*`` means that there is extra table/column/constraint in db; * ``add_*`` means that it is missing in db; * ``modify_*`` means that on column in db is set wrong type/nullable/server_default. Element contains information: - what should be modified, - schema, - table, - column, - existing correct column parameters, - right value, - wrong value. ''' TIMEOUT_SCALING_FACTOR = 4 def setUp(self): super(_TestModelsMigrations, self).setUp() self.cfg = self.useFixture(config_fixture.Config()) self.cfg.config(core_plugin=CORE_PLUGIN) self.alembic_config = migration.get_neutron_config() self.alembic_config.neutron_config = cfg.CONF # Migration tests can take a long time self.useFixture( ntest_base.Timeout(scaling=self.TIMEOUT_SCALING_FACTOR)) def db_sync(self, engine): cfg.CONF.set_override('connection', engine.url, group='database') migration.do_alembic_command(self.alembic_config, 'upgrade', 'heads') def get_engine(self): return self.engine def get_metadata(self): return head_models.get_metadata() def include_object(self, object_, name, type_, reflected, compare_to): if type_ == 'table' and (name == 'alembic_version' or name in external.TABLES): return False return super(_TestModelsMigrations, self).include_object( object_, name, type_, reflected, compare_to) def filter_metadata_diff(self, diff): return list(filter(self.remove_unrelated_errors, diff)) # TODO(akamyshikova): remove this method as soon as comparison with Variant # will be implemented in oslo.db or alembic def compare_type(self, ctxt, insp_col, meta_col, insp_type, meta_type): if isinstance(meta_type, types.Variant): orig_type = meta_col.type meta_col.type = meta_type.impl try: return self.compare_type(ctxt, insp_col, meta_col, insp_type, meta_type.impl) finally: meta_col.type = orig_type else: ret = super(_TestModelsMigrations, self).compare_type( ctxt, insp_col, meta_col, insp_type, meta_type) if ret is not None: return ret return ctxt.impl.compare_type(insp_col, meta_col) # Remove some difference that are not mistakes just specific of # dialects, etc def remove_unrelated_errors(self, element): insp = sqlalchemy.engine.reflection.Inspector.from_engine( self.get_engine()) dialect = self.get_engine().dialect.name if isinstance(element, tuple): if dialect == 'mysql' and element[0] == 'remove_index': table_name = element[1].table.name for fk in insp.get_foreign_keys(table_name): if fk['name'] == element[1].name: return False cols = [c.name for c in element[1].expressions] for col in cols: if col in insp.get_pk_constraint( table_name)['constrained_columns']: return False else: for modified, _, table, column, _, _, new in element: if modified == 'modify_default' and dialect == 'mysql': constrained = insp.get_pk_constraint(table) if column in constrained['constrained_columns']: return False return True class TestModelsMigrationsMysql(_TestModelsMigrations, base.MySQLTestCase): @contextmanager def _listener(self, engine, listener_func): try: event.listen(engine, 'before_execute', listener_func) yield finally: event.remove(engine, 'before_execute', listener_func) # There is no use to run this against both dialects, so add this test just # for MySQL tests def test_external_tables_not_changed(self): def block_external_tables(conn, clauseelement, multiparams, params): if isinstance(clauseelement, sqlalchemy.sql.selectable.Select): return if (isinstance(clauseelement, six.string_types) and any(name in clauseelement for name in external.TABLES)): self.fail("External table referenced by neutron core " "migration.") if hasattr(clauseelement, 'element'): element = clauseelement.element if (element.name in external.TABLES or (hasattr(clauseelement, 'table') and element.table.name in external.TABLES)): # Table 'nsxv_vdr_dhcp_bindings' was created in liberty, # before NSXV has moved to separate repo. if ((isinstance(clauseelement, sqlalchemy.sql.ddl.CreateTable) and element.name == 'nsxv_vdr_dhcp_bindings')): return self.fail("External table referenced by neutron core " "migration.") engine = self.get_engine() cfg.CONF.set_override('connection', engine.url, group='database') with engine.begin() as connection: self.alembic_config.attributes['connection'] = connection migration.do_alembic_command(self.alembic_config, 'upgrade', 'kilo') with self._listener(engine, block_external_tables): migration.do_alembic_command(self.alembic_config, 'upgrade', 'heads') def test_branches(self): def check_expand_branch(conn, clauseelement, multiparams, params): if isinstance(clauseelement, migration_help.DROP_OPERATIONS): self.fail("Migration from expand branch contains drop command") def check_contract_branch(conn, clauseelement, multiparams, params): if isinstance(clauseelement, migration_help.CREATION_OPERATIONS): # Skip tables that were created by mistake in contract branch if hasattr(clauseelement, 'element'): element = clauseelement.element if any([ isinstance(element, sqlalchemy.Table) and element.name in ['ml2_geneve_allocations', 'ml2_geneve_endpoints'], isinstance(element, sqlalchemy.Index) and element.table.name == 'ml2_geneve_allocations' ]): return self.fail("Migration from contract branch contains create " "command") engine = self.get_engine() cfg.CONF.set_override('connection', engine.url, group='database') with engine.begin() as connection: self.alembic_config.attributes['connection'] = connection migration.do_alembic_command(self.alembic_config, 'upgrade', 'kilo') with self._listener(engine, check_expand_branch): migration.do_alembic_command( self.alembic_config, 'upgrade', '%s@head' % migration.EXPAND_BRANCH) with self._listener(engine, check_contract_branch): migration.do_alembic_command( self.alembic_config, 'upgrade', '%s@head' % migration.CONTRACT_BRANCH) def test_check_mysql_engine(self): engine = self.get_engine() cfg.CONF.set_override('connection', engine.url, group='database') with engine.begin() as connection: self.alembic_config.attributes['connection'] = connection migration.do_alembic_command(self.alembic_config, 'upgrade', 'heads') insp = sqlalchemy.engine.reflection.Inspector.from_engine(engine) # Test that table creation on MySQL only builds InnoDB tables tables = insp.get_table_names() self.assertTrue(len(tables) > 0, "No tables found. Wrong schema?") res = [table for table in tables if insp.get_table_options(table)['mysql_engine'] != 'InnoDB' and table != 'alembic_version'] self.assertEqual(0, len(res), "%s non InnoDB tables created" % res) def _test_has_offline_migrations(self, revision, expected): engine = self.get_engine() cfg.CONF.set_override('connection', engine.url, group='database') migration.do_alembic_command(self.alembic_config, 'upgrade', revision) self.assertEqual(expected, migration.has_offline_migrations(self.alembic_config, 'unused')) def test_has_offline_migrations_pending_contract_scripts(self): self._test_has_offline_migrations('kilo', True) def test_has_offline_migrations_all_heads_upgraded(self): self._test_has_offline_migrations('heads', False) class TestModelsMigrationsPsql(_TestModelsMigrations, base.PostgreSQLTestCase): pass class TestSanityCheck(test_base.DbTestCase): def setUp(self): super(TestSanityCheck, self).setUp() self.alembic_config = migration.get_neutron_config() self.alembic_config.neutron_config = cfg.CONF def test_check_sanity_1df244e556f5(self): ha_router_agent_port_bindings = sqlalchemy.Table( 'ha_router_agent_port_bindings', sqlalchemy.MetaData(), sqlalchemy.Column('port_id', sqlalchemy.String(36)), sqlalchemy.Column('router_id', sqlalchemy.String(36)), sqlalchemy.Column('l3_agent_id', sqlalchemy.String(36))) with self.engine.connect() as conn: ha_router_agent_port_bindings.create(conn) conn.execute(ha_router_agent_port_bindings.insert(), [ {'port_id': '1234', 'router_id': '12345', 'l3_agent_id': '123'}, {'port_id': '12343', 'router_id': '12345', 'l3_agent_id': '123'} ]) script_dir = alembic_script.ScriptDirectory.from_config( self.alembic_config) script = script_dir.get_revision("1df244e556f5").module self.assertRaises(script.DuplicateL3HARouterAgentPortBinding, script.check_sanity, conn) class TestWalkMigrations(test_base.DbTestCase): def setUp(self): super(TestWalkMigrations, self).setUp() self.alembic_config = migration.get_neutron_config() self.alembic_config.neutron_config = cfg.CONF def test_no_downgrade(self): script_dir = alembic_script.ScriptDirectory.from_config( self.alembic_config) versions = [v for v in script_dir.walk_revisions(base='base', head='heads')] failed_revisions = [] for version in versions: if hasattr(version.module, 'downgrade'): failed_revisions.append(version.revision) if failed_revisions: self.fail('Migrations %s have downgrade' % failed_revisions) neutron-8.4.0/neutron/tests/functional/cmd/0000775000567000056710000000000013044373210022114 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/cmd/test_linuxbridge_cleanup.py0000664000567000056710000000643613044372760027572 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Thales Services SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mock from neutron.agent.linux import ip_lib from neutron.common import constants from neutron.plugins.ml2.drivers.linuxbridge.agent import \ linuxbridge_neutron_agent as lb_agent from neutron.tests.common import config_fixtures from neutron.tests.common import net_helpers from neutron.tests.functional import base from neutron.tests import tools class LinuxbridgeCleanupTest(base.BaseSudoTestCase): def _test_linuxbridge_cleanup(self, bridge_exists, callback): br_fixture = self.useFixture( tools.SafeCleanupFixture( net_helpers.LinuxBridgeFixture( prefix=lb_agent.BRIDGE_NAME_PREFIX))).fixture config = callback(br_fixture) config.update({'VXLAN': {'enable_vxlan': 'False'}}) temp_dir = self.useFixture(fixtures.TempDir()).path conf = self.useFixture(config_fixtures.ConfigFileFixture( base_filename='neutron.conf', config=config, temp_dir=temp_dir)) cmd = 'neutron-linuxbridge-cleanup', '--config-file', conf.filename ip_wrapper = ip_lib.IPWrapper(br_fixture.namespace) ip_wrapper.netns.execute(cmd) self.assertEqual(bridge_exists, ip_lib.device_exists( br_fixture.bridge.name, br_fixture.namespace)) def test_cleanup_empty_bridge(self): def callback(br_fixture): return config_fixtures.ConfigDict() self._test_linuxbridge_cleanup(False, callback) def test_no_cleanup_bridge_with_tap(self): def callback(br_fixture): # TODO(cbrandily): refactor net_helpers to avoid mocking it mock.patch.object( net_helpers, 'VETH0_PREFIX', new_callable=mock.PropertyMock( return_value=constants.TAP_DEVICE_PREFIX + '0')).start() mock.patch.object( net_helpers, 'VETH1_PREFIX', new_callable=mock.PropertyMock( return_value=constants.TAP_DEVICE_PREFIX + '1')).start() self.useFixture( tools.SafeCleanupFixture( net_helpers.LinuxBridgePortFixture( br_fixture.bridge, br_fixture.namespace))) return config_fixtures.ConfigDict() self._test_linuxbridge_cleanup(True, callback) def test_no_cleanup_bridge_in_bridge_mappings(self): def callback(br_fixture): br_name = br_fixture.bridge.name conf = config_fixtures.ConfigDict() conf.update( {'LINUX_BRIDGE': {'bridge_mappings': 'physnet:%s' % br_name}}) return conf self._test_linuxbridge_cleanup(True, callback) neutron-8.4.0/neutron/tests/functional/cmd/__init__.py0000664000567000056710000000000013044372736024227 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/functional/cmd/test_netns_cleanup.py0000664000567000056710000000511213044372760026373 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.agent.l3 import agent as l3_agent from neutron.agent.linux import dhcp from neutron.agent.linux import ip_lib from neutron.cmd import netns_cleanup from neutron.tests.common import net_helpers from neutron.tests.functional import base GET_NAMESPACES = 'neutron.agent.linux.ip_lib.IPWrapper.get_namespaces' TEST_INTERFACE_DRIVER = 'neutron.agent.linux.interface.OVSInterfaceDriver' class NetnsCleanupTest(base.BaseSudoTestCase): def setUp(self): super(NetnsCleanupTest, self).setUp() self.get_namespaces_p = mock.patch(GET_NAMESPACES) self.get_namespaces = self.get_namespaces_p.start() def setup_config(self, args=None): if args is None: args = [] # force option enabled to make sure non-empty namespaces are # cleaned up and deleted args.append('--force') self.conf = netns_cleanup.setup_conf() self.conf.set_override('interface_driver', TEST_INTERFACE_DRIVER) self.config_parse(conf=self.conf, args=args) def test_cleanup_network_namespaces_cleans_dhcp_and_l3_namespaces(self): dhcp_namespace = self.useFixture( net_helpers.NamespaceFixture(dhcp.NS_PREFIX)).name l3_namespace = self.useFixture( net_helpers.NamespaceFixture(l3_agent.NS_PREFIX)).name bridge = self.useFixture( net_helpers.VethPortFixture(namespace=dhcp_namespace)).bridge self.useFixture( net_helpers.VethPortFixture(bridge, l3_namespace)) # we scope the get_namespaces to our own ones not to affect other # tests, as otherwise cleanup will kill them all self.get_namespaces.return_value = [l3_namespace, dhcp_namespace] netns_cleanup.cleanup_network_namespaces(self.conf) self.get_namespaces_p.stop() namespaces_now = ip_lib.IPWrapper.get_namespaces() self.assertNotIn(l3_namespace, namespaces_now) self.assertNotIn(dhcp_namespace, namespaces_now) neutron-8.4.0/neutron/tests/tempest/0000775000567000056710000000000013044373210020670 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/tempest/config.py0000664000567000056710000000321213044372760022516 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from tempest import config CONF = config.CONF NeutronPluginOptions = [ cfg.BoolOpt('specify_floating_ip_address_available', default=True, help='Allow passing an IP Address of the floating ip when ' 'creating the floating ip')] # TODO(amuller): Redo configuration options registration as part of the planned # transition to the Tempest plugin architecture for opt in NeutronPluginOptions: CONF.register_opt(opt, 'neutron_plugin_options') config_opts_translator = { 'project_network_cidr': 'tenant_network_cidr', 'project_network_v6_cidr': 'tenant_network_v6_cidr', 'project_network_mask_bits': 'tenant_network_mask_bits', 'project_network_v6_mask_bits': 'tenant_network_v6_mask_bits'} def safe_get_config_value(group, name): """Safely get Oslo config opts from Tempest, using old and new names.""" conf_group = getattr(CONF, group) try: return getattr(conf_group, name) except cfg.NoSuchOptError: return getattr(conf_group, config_opts_translator[name]) neutron-8.4.0/neutron/tests/tempest/services/0000775000567000056710000000000013044373210022513 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/tempest/services/__init__.py0000664000567000056710000000000013044372736024626 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/tempest/services/network/0000775000567000056710000000000013044373210024204 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/tempest/services/network/json/0000775000567000056710000000000013044373210025155 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/tempest/services/network/json/__init__.py0000664000567000056710000000000013044372736027270 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/tempest/services/network/json/network_client.py0000664000567000056710000007250413044372760030577 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_serialization import jsonutils as json from six.moves.urllib import parse as urlparse from tempest.lib.common import rest_client as service_client from tempest.lib import exceptions as lib_exc from neutron.tests.tempest import exceptions class NetworkClientJSON(service_client.RestClient): """ Tempest REST client for Neutron. Uses v2 of the Neutron API, since the V1 API has been removed from the code base. Implements create, delete, update, list and show for the basic Neutron abstractions (networks, sub-networks, routers, ports and floating IP): Implements add/remove interface to router using subnet ID / port ID It also implements list, show, update and reset for OpenStack Networking quotas """ version = '2.0' uri_prefix = "v2.0" def get_uri(self, plural_name): # get service prefix from resource name # The following list represents resource names that do not require # changing underscore to a hyphen hyphen_exceptions = ["service_profiles"] # the following map is used to construct proper URI # for the given neutron resource service_resource_prefix_map = { 'bgp-peers': '', 'bgp-speakers': '', 'networks': '', 'subnets': '', 'subnetpools': '', 'ports': '', 'metering_labels': 'metering', 'metering_label_rules': 'metering', 'policies': 'qos', 'bandwidth_limit_rules': 'qos', 'rule_types': 'qos', 'rbac-policies': '', } service_prefix = service_resource_prefix_map.get( plural_name) if plural_name not in hyphen_exceptions: plural_name = plural_name.replace("_", "-") if service_prefix: uri = '%s/%s/%s' % (self.uri_prefix, service_prefix, plural_name) else: uri = '%s/%s' % (self.uri_prefix, plural_name) return uri def pluralize(self, resource_name): # get plural from map or just add 's' # map from resource name to a plural name # needed only for those which can't be constructed as name + 's' resource_plural_map = { 'security_groups': 'security_groups', 'security_group_rules': 'security_group_rules', 'quotas': 'quotas', 'qos_policy': 'policies', 'rbac_policy': 'rbac_policies', } return resource_plural_map.get(resource_name, resource_name + 's') def _lister(self, plural_name): def _list(**filters): uri = self.get_uri(plural_name) if filters: uri += '?' + urlparse.urlencode(filters, doseq=1) resp, body = self.get(uri) result = {plural_name: self.deserialize_list(body)} self.expected_success(200, resp.status) return service_client.ResponseBody(resp, result) return _list def _deleter(self, resource_name): def _delete(resource_id): plural = self.pluralize(resource_name) uri = '%s/%s' % (self.get_uri(plural), resource_id) resp, body = self.delete(uri) self.expected_success(204, resp.status) return service_client.ResponseBody(resp, body) return _delete def _shower(self, resource_name): def _show(resource_id, **fields): # fields is a dict which key is 'fields' and value is a # list of field's name. An example: # {'fields': ['id', 'name']} plural = self.pluralize(resource_name) uri = '%s/%s' % (self.get_uri(plural), resource_id) if fields: uri += '?' + urlparse.urlencode(fields, doseq=1) resp, body = self.get(uri) body = self.deserialize_single(body) self.expected_success(200, resp.status) return service_client.ResponseBody(resp, body) return _show def _creater(self, resource_name): def _create(**kwargs): plural = self.pluralize(resource_name) uri = self.get_uri(plural) post_data = self.serialize({resource_name: kwargs}) resp, body = self.post(uri, post_data) body = self.deserialize_single(body) self.expected_success(201, resp.status) return service_client.ResponseBody(resp, body) return _create def _updater(self, resource_name): def _update(res_id, **kwargs): plural = self.pluralize(resource_name) uri = '%s/%s' % (self.get_uri(plural), res_id) post_data = self.serialize({resource_name: kwargs}) resp, body = self.put(uri, post_data) body = self.deserialize_single(body) self.expected_success(200, resp.status) return service_client.ResponseBody(resp, body) return _update def __getattr__(self, name): method_prefixes = ["list_", "delete_", "show_", "create_", "update_"] method_functors = [self._lister, self._deleter, self._shower, self._creater, self._updater] for index, prefix in enumerate(method_prefixes): prefix_len = len(prefix) if name[:prefix_len] == prefix: return method_functors[index](name[prefix_len:]) raise AttributeError(name) # Subnetpool methods def create_subnetpool(self, name, **kwargs): subnetpool_data = {'name': name} for arg in kwargs: subnetpool_data[arg] = kwargs[arg] post_data = {'subnetpool': subnetpool_data} body = self.serialize_list(post_data, "subnetpools", "subnetpool") uri = self.get_uri("subnetpools") resp, body = self.post(uri, body) body = {'subnetpool': self.deserialize_list(body)} self.expected_success(201, resp.status) return service_client.ResponseBody(resp, body) def get_subnetpool(self, id): uri = self.get_uri("subnetpools") subnetpool_uri = '%s/%s' % (uri, id) resp, body = self.get(subnetpool_uri) body = {'subnetpool': self.deserialize_list(body)} self.expected_success(200, resp.status) return service_client.ResponseBody(resp, body) def delete_subnetpool(self, id): uri = self.get_uri("subnetpools") subnetpool_uri = '%s/%s' % (uri, id) resp, body = self.delete(subnetpool_uri) self.expected_success(204, resp.status) return service_client.ResponseBody(resp, body) def list_subnetpools(self, **filters): uri = self.get_uri("subnetpools") if filters: uri = '?'.join([uri, urlparse.urlencode(filters)]) resp, body = self.get(uri) body = {'subnetpools': self.deserialize_list(body)} self.expected_success(200, resp.status) return service_client.ResponseBody(resp, body) def update_subnetpool(self, id, **kwargs): subnetpool_data = {} for arg in kwargs: subnetpool_data[arg] = kwargs[arg] post_data = {'subnetpool': subnetpool_data} body = self.serialize_list(post_data, "subnetpools", "subnetpool") uri = self.get_uri("subnetpools") subnetpool_uri = '%s/%s' % (uri, id) resp, body = self.put(subnetpool_uri, body) body = {'subnetpool': self.deserialize_list(body)} self.expected_success(200, resp.status) return service_client.ResponseBody(resp, body) # BGP speaker methods def create_bgp_speaker(self, post_data): body = self.serialize_list(post_data, "bgp-speakers", "bgp-speaker") uri = self.get_uri("bgp-speakers") resp, body = self.post(uri, body) body = {'bgp-speaker': self.deserialize_list(body)} self.expected_success(201, resp.status) return service_client.ResponseBody(resp, body) def get_bgp_speaker(self, id): uri = self.get_uri("bgp-speakers") bgp_speaker_uri = '%s/%s' % (uri, id) resp, body = self.get(bgp_speaker_uri) body = {'bgp-speaker': self.deserialize_list(body)} self.expected_success(200, resp.status) return service_client.ResponseBody(resp, body) def get_bgp_speakers(self): uri = self.get_uri("bgp-speakers") resp, body = self.get(uri) body = {'bgp-speakers': self.deserialize_list(body)} self.expected_success(200, resp.status) return service_client.ResponseBody(resp, body) def update_bgp_speaker(self, id, put_data): body = self.serialize_list(put_data, "bgp-speakers", "bgp-speaker") uri = self.get_uri("bgp-speakers") bgp_speaker_uri = '%s/%s' % (uri, id) resp, body = self.put(bgp_speaker_uri, body) body = {'bgp-speaker': self.deserialize_list(body)} self.expected_success(200, resp.status) return service_client.ResponseBody(resp, body) def delete_bgp_speaker(self, id): uri = self.get_uri("bgp-speakers") bgp_speaker_uri = '%s/%s' % (uri, id) resp, body = self.delete(bgp_speaker_uri) self.expected_success(204, resp.status) return service_client.ResponseBody(resp, body) def create_bgp_peer(self, post_data): body = self.serialize_list(post_data, "bgp-peers", "bgp-peer") uri = self.get_uri("bgp-peers") resp, body = self.post(uri, body) body = {'bgp-peer': self.deserialize_list(body)} self.expected_success(201, resp.status) return service_client.ResponseBody(resp, body) def get_bgp_peer(self, id): uri = self.get_uri("bgp-peers") bgp_speaker_uri = '%s/%s' % (uri, id) resp, body = self.get(bgp_speaker_uri) body = {'bgp-peer': self.deserialize_list(body)} self.expected_success(200, resp.status) return service_client.ResponseBody(resp, body) def delete_bgp_peer(self, id): uri = self.get_uri("bgp-peers") bgp_speaker_uri = '%s/%s' % (uri, id) resp, body = self.delete(bgp_speaker_uri) self.expected_success(204, resp.status) return service_client.ResponseBody(resp, body) def add_bgp_peer_with_id(self, bgp_speaker_id, bgp_peer_id): uri = '%s/bgp-speakers/%s/add_bgp_peer' % (self.uri_prefix, bgp_speaker_id) update_body = {"bgp_peer_id": bgp_peer_id} update_body = json.dumps(update_body) resp, body = self.put(uri, update_body) self.expected_success(200, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def remove_bgp_peer_with_id(self, bgp_speaker_id, bgp_peer_id): uri = '%s/bgp-speakers/%s/remove_bgp_peer' % (self.uri_prefix, bgp_speaker_id) update_body = {"bgp_peer_id": bgp_peer_id} update_body = json.dumps(update_body) resp, body = self.put(uri, update_body) self.expected_success(200, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def add_bgp_gateway_network(self, bgp_speaker_id, network_id): uri = '%s/bgp-speakers/%s/add_gateway_network' % (self.uri_prefix, bgp_speaker_id) update_body = {"network_id": network_id} update_body = json.dumps(update_body) resp, body = self.put(uri, update_body) self.expected_success(200, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def remove_bgp_gateway_network(self, bgp_speaker_id, network_id): uri = '%s/bgp-speakers/%s/remove_gateway_network' uri = uri % (self.uri_prefix, bgp_speaker_id) update_body = {"network_id": network_id} update_body = json.dumps(update_body) resp, body = self.put(uri, update_body) self.expected_success(200, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def get_bgp_advertised_routes(self, bgp_speaker_id): base_uri = '%s/bgp-speakers/%s/get_advertised_routes' uri = base_uri % (self.uri_prefix, bgp_speaker_id) resp, body = self.get(uri) body = {'advertised_routes': self.deserialize_list(body)} self.expected_success(200, resp.status) return service_client.ResponseBody(resp, body) def get_bgp_router_routes(self, router_id): base_uri = '%s/router-routes/%s' uri = base_uri % (self.uri_prefix, router_id) resp, body = self.get(uri) body = self.deserialize_list(body) self.expected_success(200, resp.status) return service_client.ResponseBody(resp, body) # Common methods that are hard to automate def create_bulk_network(self, names, shared=False): network_list = [{'name': name, 'shared': shared} for name in names] post_data = {'networks': network_list} body = self.serialize_list(post_data, "networks", "network") uri = self.get_uri("networks") resp, body = self.post(uri, body) body = {'networks': self.deserialize_list(body)} self.expected_success(201, resp.status) return service_client.ResponseBody(resp, body) def create_bulk_subnet(self, subnet_list): post_data = {'subnets': subnet_list} body = self.serialize_list(post_data, 'subnets', 'subnet') uri = self.get_uri('subnets') resp, body = self.post(uri, body) body = {'subnets': self.deserialize_list(body)} self.expected_success(201, resp.status) return service_client.ResponseBody(resp, body) def create_bulk_port(self, port_list): post_data = {'ports': port_list} body = self.serialize_list(post_data, 'ports', 'port') uri = self.get_uri('ports') resp, body = self.post(uri, body) body = {'ports': self.deserialize_list(body)} self.expected_success(201, resp.status) return service_client.ResponseBody(resp, body) def wait_for_resource_deletion(self, resource_type, id): """Waits for a resource to be deleted.""" start_time = int(time.time()) while True: if self.is_resource_deleted(resource_type, id): return if int(time.time()) - start_time >= self.build_timeout: raise exceptions.TimeoutException time.sleep(self.build_interval) def is_resource_deleted(self, resource_type, id): method = 'show_' + resource_type try: getattr(self, method)(id) except AttributeError: raise Exception("Unknown resource type %s " % resource_type) except lib_exc.NotFound: return True return False def deserialize_single(self, body): return json.loads(body) def deserialize_list(self, body): res = json.loads(body) # expecting response in form # {'resources': [ res1, res2] } => when pagination disabled # {'resources': [..], 'resources_links': {}} => if pagination enabled for k in res.keys(): if k.endswith("_links"): continue return res[k] def serialize(self, data): return json.dumps(data) def serialize_list(self, data, root=None, item=None): return self.serialize(data) def update_quotas(self, tenant_id, **kwargs): put_body = {'quota': kwargs} body = json.dumps(put_body) uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id) resp, body = self.put(uri, body) self.expected_success(200, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body['quota']) def reset_quotas(self, tenant_id): uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id) resp, body = self.delete(uri) self.expected_success(204, resp.status) return service_client.ResponseBody(resp, body) def create_router(self, name, admin_state_up=True, **kwargs): post_body = {'router': kwargs} post_body['router']['name'] = name post_body['router']['admin_state_up'] = admin_state_up body = json.dumps(post_body) uri = '%s/routers' % (self.uri_prefix) resp, body = self.post(uri, body) self.expected_success(201, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def _update_router(self, router_id, set_enable_snat, **kwargs): uri = '%s/routers/%s' % (self.uri_prefix, router_id) resp, body = self.get(uri) self.expected_success(200, resp.status) body = json.loads(body) update_body = {} update_body['name'] = kwargs.get('name', body['router']['name']) update_body['admin_state_up'] = kwargs.get( 'admin_state_up', body['router']['admin_state_up']) if 'description' in kwargs: update_body['description'] = kwargs['description'] cur_gw_info = body['router']['external_gateway_info'] if cur_gw_info: # TODO(kevinbenton): setting the external gateway info is not # allowed for a regular tenant. If the ability to update is also # merged, a test case for this will need to be added similar to # the SNAT case. cur_gw_info.pop('external_fixed_ips', None) if not set_enable_snat: cur_gw_info.pop('enable_snat', None) update_body['external_gateway_info'] = kwargs.get( 'external_gateway_info', body['router']['external_gateway_info']) if 'distributed' in kwargs: update_body['distributed'] = kwargs['distributed'] update_body = dict(router=update_body) update_body = json.dumps(update_body) resp, body = self.put(uri, update_body) self.expected_success(200, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def update_router(self, router_id, **kwargs): """Update a router leaving enable_snat to its default value.""" # If external_gateway_info contains enable_snat the request will fail # with 404 unless executed with admin client, and therefore we instruct # _update_router to not set this attribute # NOTE(salv-orlando): The above applies as long as Neutron's default # policy is to restrict enable_snat usage to admins only. return self._update_router(router_id, set_enable_snat=False, **kwargs) def update_router_with_snat_gw_info(self, router_id, **kwargs): """Update a router passing also the enable_snat attribute. This method must be execute with admin credentials, otherwise the API call will return a 404 error. """ return self._update_router(router_id, set_enable_snat=True, **kwargs) def add_router_interface_with_subnet_id(self, router_id, subnet_id): uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix, router_id) update_body = {"subnet_id": subnet_id} update_body = json.dumps(update_body) resp, body = self.put(uri, update_body) self.expected_success(200, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def add_router_interface_with_port_id(self, router_id, port_id): uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix, router_id) update_body = {"port_id": port_id} update_body = json.dumps(update_body) resp, body = self.put(uri, update_body) self.expected_success(200, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def remove_router_interface_with_subnet_id(self, router_id, subnet_id): uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix, router_id) update_body = {"subnet_id": subnet_id} update_body = json.dumps(update_body) resp, body = self.put(uri, update_body) self.expected_success(200, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def remove_router_interface_with_port_id(self, router_id, port_id): uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix, router_id) update_body = {"port_id": port_id} update_body = json.dumps(update_body) resp, body = self.put(uri, update_body) self.expected_success(200, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def list_router_interfaces(self, uuid): uri = '%s/ports?device_id=%s' % (self.uri_prefix, uuid) resp, body = self.get(uri) self.expected_success(200, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def update_agent(self, agent_id, agent_info): """ :param agent_info: Agent update information. E.g {"admin_state_up": True} """ uri = '%s/agents/%s' % (self.uri_prefix, agent_id) agent = {"agent": agent_info} body = json.dumps(agent) resp, body = self.put(uri, body) self.expected_success(200, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def list_routers_on_l3_agent(self, agent_id): uri = '%s/agents/%s/l3-routers' % (self.uri_prefix, agent_id) resp, body = self.get(uri) self.expected_success(200, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def list_l3_agents_hosting_router(self, router_id): uri = '%s/routers/%s/l3-agents' % (self.uri_prefix, router_id) resp, body = self.get(uri) self.expected_success(200, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def add_router_to_l3_agent(self, agent_id, router_id): uri = '%s/agents/%s/l3-routers' % (self.uri_prefix, agent_id) post_body = {"router_id": router_id} body = json.dumps(post_body) resp, body = self.post(uri, body) self.expected_success(201, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def remove_router_from_l3_agent(self, agent_id, router_id): uri = '%s/agents/%s/l3-routers/%s' % ( self.uri_prefix, agent_id, router_id) resp, body = self.delete(uri) self.expected_success(204, resp.status) return service_client.ResponseBody(resp, body) def list_dhcp_agent_hosting_network(self, network_id): uri = '%s/networks/%s/dhcp-agents' % (self.uri_prefix, network_id) resp, body = self.get(uri) self.expected_success(200, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def list_networks_hosted_by_one_dhcp_agent(self, agent_id): uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id) resp, body = self.get(uri) self.expected_success(200, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def remove_network_from_dhcp_agent(self, agent_id, network_id): uri = '%s/agents/%s/dhcp-networks/%s' % (self.uri_prefix, agent_id, network_id) resp, body = self.delete(uri) self.expected_success(204, resp.status) return service_client.ResponseBody(resp, body) def update_extra_routes(self, router_id, nexthop, destination): uri = '%s/routers/%s' % (self.uri_prefix, router_id) put_body = { 'router': { 'routes': [{'nexthop': nexthop, "destination": destination}] } } body = json.dumps(put_body) resp, body = self.put(uri, body) self.expected_success(200, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def delete_extra_routes(self, router_id): uri = '%s/routers/%s' % (self.uri_prefix, router_id) null_routes = None put_body = { 'router': { 'routes': null_routes } } body = json.dumps(put_body) resp, body = self.put(uri, body) self.expected_success(200, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def add_dhcp_agent_to_network(self, agent_id, network_id): post_body = {'network_id': network_id} body = json.dumps(post_body) uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id) resp, body = self.post(uri, body) self.expected_success(201, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def list_qos_policies(self, **filters): if filters: uri = '%s/qos/policies?%s' % (self.uri_prefix, urlparse.urlencode(filters)) else: uri = '%s/qos/policies' % self.uri_prefix resp, body = self.get(uri) self.expected_success(200, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def create_qos_policy(self, name, description, shared, tenant_id=None): uri = '%s/qos/policies' % self.uri_prefix post_data = {'policy': { 'name': name, 'description': description, 'shared': shared }} if tenant_id is not None: post_data['policy']['tenant_id'] = tenant_id resp, body = self.post(uri, self.serialize(post_data)) body = self.deserialize_single(body) self.expected_success(201, resp.status) return service_client.ResponseBody(resp, body) def update_qos_policy(self, policy_id, **kwargs): uri = '%s/qos/policies/%s' % (self.uri_prefix, policy_id) post_data = self.serialize({'policy': kwargs}) resp, body = self.put(uri, post_data) body = self.deserialize_single(body) self.expected_success(200, resp.status) return service_client.ResponseBody(resp, body) def create_bandwidth_limit_rule(self, policy_id, max_kbps, max_burst_kbps): uri = '%s/qos/policies/%s/bandwidth_limit_rules' % ( self.uri_prefix, policy_id) post_data = self.serialize( {'bandwidth_limit_rule': { 'max_kbps': max_kbps, 'max_burst_kbps': max_burst_kbps} }) resp, body = self.post(uri, post_data) self.expected_success(201, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def list_bandwidth_limit_rules(self, policy_id): uri = '%s/qos/policies/%s/bandwidth_limit_rules' % ( self.uri_prefix, policy_id) resp, body = self.get(uri) body = self.deserialize_single(body) self.expected_success(200, resp.status) return service_client.ResponseBody(resp, body) def show_bandwidth_limit_rule(self, policy_id, rule_id): uri = '%s/qos/policies/%s/bandwidth_limit_rules/%s' % ( self.uri_prefix, policy_id, rule_id) resp, body = self.get(uri) body = self.deserialize_single(body) self.expected_success(200, resp.status) return service_client.ResponseBody(resp, body) def update_bandwidth_limit_rule(self, policy_id, rule_id, **kwargs): uri = '%s/qos/policies/%s/bandwidth_limit_rules/%s' % ( self.uri_prefix, policy_id, rule_id) post_data = {'bandwidth_limit_rule': kwargs} resp, body = self.put(uri, json.dumps(post_data)) body = self.deserialize_single(body) self.expected_success(200, resp.status) return service_client.ResponseBody(resp, body) def delete_bandwidth_limit_rule(self, policy_id, rule_id): uri = '%s/qos/policies/%s/bandwidth_limit_rules/%s' % ( self.uri_prefix, policy_id, rule_id) resp, body = self.delete(uri) self.expected_success(204, resp.status) return service_client.ResponseBody(resp, body) def list_qos_rule_types(self): uri = '%s/qos/rule-types' % self.uri_prefix resp, body = self.get(uri) self.expected_success(200, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) def get_auto_allocated_topology(self, tenant_id=None): uri = '%s/auto-allocated-topology/%s' % (self.uri_prefix, tenant_id) resp, body = self.get(uri) self.expected_success(200, resp.status) body = json.loads(body) return service_client.ResponseBody(resp, body) neutron-8.4.0/neutron/tests/tempest/services/network/__init__.py0000664000567000056710000000000013044372736026317 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/tempest/__init__.py0000664000567000056710000000000013044372736023003 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/tempest/common/0000775000567000056710000000000013044373210022160 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/tempest/common/__init__.py0000664000567000056710000000000013044372736024273 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/tempest/common/tempest_fixtures.py0000664000567000056710000000144613044372736026165 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency.fixture import lockutils class LockFixture(lockutils.LockFixture): def __init__(self, name): super(LockFixture, self).__init__(name, 'tempest-') neutron-8.4.0/neutron/tests/tempest/README.rst0000664000567000056710000000074613044372760022377 0ustar jenkinsjenkins00000000000000WARNING ======= The files under this path were copied from tempest as part of the move of the api tests, and they will be removed as required over time to minimize the depedency on the tempest testing framework. While it exists, only neutron.tests.api and neutron.tests.retargetable should be importing files from this path. neutron.tests.tempest.config uses the global cfg.CONF instance and importing it outside of the api tests has the potential to break Neutron's use of cfg.CONF. neutron-8.4.0/neutron/tests/tempest/exceptions.py0000664000567000056710000000171013044372736023436 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib import exceptions TempestException = exceptions.TempestException class InvalidConfiguration(TempestException): message = "Invalid Configuration" class InvalidCredentials(TempestException): message = "Invalid Credentials" class InvalidServiceTag(TempestException): message = "Invalid service tag" neutron-8.4.0/neutron/tests/post_mortem_debug.py0000664000567000056710000001021513044372736023312 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import traceback def get_exception_handler(debugger_name): debugger = _get_debugger(debugger_name) return functools.partial(_exception_handler, debugger) def _get_debugger(debugger_name): try: debugger = __import__(debugger_name) except ImportError: raise ValueError("can't import %s module as a post mortem debugger" % debugger_name) if 'post_mortem' in dir(debugger): return debugger else: raise ValueError("%s is not a supported post mortem debugger" % debugger_name) def _exception_handler(debugger, exc_info): """Exception handler enabling post-mortem debugging. A class extending testtools.TestCase can add this handler in setUp(): self.addOnException(post_mortem_debug.exception_handler) When an exception occurs, the user will be dropped into a debugger session in the execution environment of the failure. Frames associated with the testing framework are excluded so that the post-mortem session for an assertion failure will start at the assertion call (e.g. self.assertTrue) rather than the framework code that raises the failure exception (e.g. the assertTrue method). """ tb = exc_info[2] ignored_traceback = get_ignored_traceback(tb) if ignored_traceback: tb = FilteredTraceback(tb, ignored_traceback) traceback.print_exception(exc_info[0], exc_info[1], tb) debugger.post_mortem(tb) def get_ignored_traceback(tb): """Retrieve the first traceback of an ignored trailing chain. Given an initial traceback, find the first traceback of a trailing chain of tracebacks that should be ignored. The criteria for whether a traceback should be ignored is whether its frame's globals include the __unittest marker variable. This criteria is culled from: unittest.TestResult._is_relevant_tb_level For example: tb.tb_next => tb0.tb_next => tb1.tb_next - If no tracebacks were to be ignored, None would be returned. - If only tb1 was to be ignored, tb1 would be returned. - If tb0 and tb1 were to be ignored, tb0 would be returned. - If either of only tb or only tb0 was to be ignored, None would be returned because neither tb or tb0 would be part of a trailing chain of ignored tracebacks. """ # Turn the traceback chain into a list tb_list = [] while tb: tb_list.append(tb) tb = tb.tb_next # Find all members of an ignored trailing chain ignored_tracebacks = [] for tb in reversed(tb_list): if '__unittest' in tb.tb_frame.f_globals: ignored_tracebacks.append(tb) else: break # Return the first member of the ignored trailing chain if ignored_tracebacks: return ignored_tracebacks[-1] class FilteredTraceback(object): """Wraps a traceback to filter unwanted frames.""" def __init__(self, tb, filtered_traceback): """Constructor. :param tb: The start of the traceback chain to filter. :param filtered_traceback: The first traceback of a trailing chain that is to be filtered. """ self._tb = tb self.tb_lasti = self._tb.tb_lasti self.tb_lineno = self._tb.tb_lineno self.tb_frame = self._tb.tb_frame self._filtered_traceback = filtered_traceback @property def tb_next(self): tb_next = self._tb.tb_next if tb_next and tb_next != self._filtered_traceback: return FilteredTraceback(tb_next, self._filtered_traceback) neutron-8.4.0/neutron/tests/fake_notifier.py0000664000567000056710000000334313044372736022405 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import functools NOTIFICATIONS = [] def reset(): del NOTIFICATIONS[:] FakeMessage = collections.namedtuple('Message', ['publisher_id', 'priority', 'event_type', 'payload']) class FakeNotifier(object): def __init__(self, transport, publisher_id=None, driver=None, topic=None, serializer=None, retry=None): self.transport = transport self.publisher_id = publisher_id for priority in ('debug', 'info', 'warn', 'error', 'critical'): setattr(self, priority, functools.partial(self._notify, priority=priority.upper())) def prepare(self, publisher_id=None): if publisher_id is None: publisher_id = self.publisher_id return self.__class__(self.transport, publisher_id) def _notify(self, ctxt, event_type, payload, priority): msg = dict(publisher_id=self.publisher_id, priority=priority, event_type=event_type, payload=payload) NOTIFICATIONS.append(msg) neutron-8.4.0/neutron/tests/base.py0000664000567000056710000004005113044372760020504 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base test cases for all neutron tests. """ import contextlib import gc import os import os.path import random import weakref import eventlet.timeout import fixtures import mock from oslo_concurrency.fixture import lockutils from oslo_config import cfg from oslo_messaging import conffixture as messaging_conffixture from oslo_utils import excutils from oslo_utils import strutils from oslotest import base import six import testtools from neutron._i18n import _ from neutron.agent.linux import external_process from neutron.api.rpc.callbacks.consumer import registry as rpc_consumer_reg from neutron.callbacks import manager as registry_manager from neutron.callbacks import registry from neutron.common import config from neutron.common import constants from neutron.common import rpc as n_rpc from neutron.db import agentschedulers_db from neutron import manager from neutron import policy from neutron.tests import fake_notifier from neutron.tests import post_mortem_debug from neutron.tests import tools CONF = cfg.CONF CONF.import_opt('state_path', 'neutron.common.config') ROOTDIR = os.path.dirname(__file__) ETCDIR = os.path.join(ROOTDIR, 'etc') def etcdir(*p): return os.path.join(ETCDIR, *p) def fake_use_fatal_exceptions(*args): return True def get_rand_name(max_length=None, prefix='test'): """Return a random string. The string will start with 'prefix' and will be exactly 'max_length'. If 'max_length' is None, then exactly 8 random characters, each hexadecimal, will be added. In case len(prefix) <= len(max_length), ValueError will be raised to indicate the problem. """ if max_length: length = max_length - len(prefix) if length <= 0: raise ValueError("'max_length' must be bigger than 'len(prefix)'.") suffix = ''.join(str(random.randint(0, 9)) for i in range(length)) else: suffix = hex(random.randint(0x10000000, 0x7fffffff))[2:] return prefix + suffix def get_rand_device_name(prefix='test'): return get_rand_name( max_length=constants.DEVICE_NAME_MAX_LEN, prefix=prefix) def bool_from_env(key, strict=False, default=False): value = os.environ.get(key) return strutils.bool_from_string(value, strict=strict, default=default) def sanitize_log_path(path): # Sanitize the string so that its log path is shell friendly return path.replace(' ', '-').replace('(', '_').replace(')', '_') class AttributeDict(dict): """ Provide attribute access (dict.key) to dictionary values. """ def __getattr__(self, name): """Allow attribute access for all keys in the dict.""" if name in self: return self[name] raise AttributeError(_("Unknown attribute '%s'.") % name) class DietTestCase(base.BaseTestCase): """Same great taste, less filling. BaseTestCase is responsible for doing lots of plugin-centric setup that not all tests require (or can tolerate). This class provides only functionality that is common across all tests. """ def setUp(self): super(DietTestCase, self).setUp() # FIXME(amuller): this must be called in the Neutron unit tests base # class to initialize the DB connection string. Moving this may cause # non-deterministic failures. Bug #1489098 for more info. config.set_db_defaults() # Configure this first to ensure pm debugging support for setUp() debugger = os.environ.get('OS_POST_MORTEM_DEBUGGER') if debugger: self.addOnException(post_mortem_debug.get_exception_handler( debugger)) # Make sure we see all relevant deprecation warnings when running tests self.useFixture(tools.WarningsFixture()) # NOTE(ihrachys): oslotest already sets stopall for cleanup, but it # does it using six.moves.mock (the library was moved into # unittest.mock in Python 3.4). So until we switch to six.moves.mock # everywhere in unit tests, we can't remove this setup. The base class # is used in 3party projects, so we would need to switch all of them to # six before removing the cleanup callback from here. self.addCleanup(mock.patch.stopall) self.addOnException(self.check_for_systemexit) self.orig_pid = os.getpid() tools.reset_random_seed() def addOnException(self, handler): def safe_handler(*args, **kwargs): try: return handler(*args, **kwargs) except Exception: with excutils.save_and_reraise_exception(reraise=False) as ctx: self.addDetail('failure in exception handler %s' % handler, testtools.content.TracebackContent( (ctx.type_, ctx.value, ctx.tb), self)) return super(DietTestCase, self).addOnException(safe_handler) def check_for_systemexit(self, exc_info): if isinstance(exc_info[1], SystemExit): if os.getpid() != self.orig_pid: # Subprocess - let it just exit raise # This makes sys.exit(0) still a failure self.force_failure = True @contextlib.contextmanager def assert_max_execution_time(self, max_execution_time=5): with eventlet.timeout.Timeout(max_execution_time, False): yield return self.fail('Execution of this test timed out') def assertOrderedEqual(self, expected, actual): expect_val = self.sort_dict_lists(expected) actual_val = self.sort_dict_lists(actual) self.assertEqual(expect_val, actual_val) def sort_dict_lists(self, dic): for key, value in six.iteritems(dic): if isinstance(value, list): dic[key] = sorted(value) elif isinstance(value, dict): dic[key] = self.sort_dict_lists(value) return dic def assertDictSupersetOf(self, expected_subset, actual_superset): """Checks that actual dict contains the expected dict. After checking that the arguments are of the right type, this checks that each item in expected_subset is in, and matches, what is in actual_superset. Separate tests are done, so that detailed info can be reported upon failure. """ if not isinstance(expected_subset, dict): self.fail("expected_subset (%s) is not an instance of dict" % type(expected_subset)) if not isinstance(actual_superset, dict): self.fail("actual_superset (%s) is not an instance of dict" % type(actual_superset)) for k, v in expected_subset.items(): self.assertIn(k, actual_superset) self.assertEqual(v, actual_superset[k], "Key %(key)s expected: %(exp)r, actual %(act)r" % {'key': k, 'exp': v, 'act': actual_superset[k]}) class ProcessMonitorFixture(fixtures.Fixture): """Test fixture to capture and cleanup any spawn process monitor.""" def _setUp(self): self.old_callable = ( external_process.ProcessMonitor._spawn_checking_thread) p = mock.patch("neutron.agent.linux.external_process.ProcessMonitor." "_spawn_checking_thread", new=lambda x: self.record_calls(x)) p.start() self.instances = [] self.addCleanup(self.stop) def stop(self): for instance in self.instances: instance.stop() def record_calls(self, instance): self.old_callable(instance) self.instances.append(instance) class BaseTestCase(DietTestCase): @staticmethod def config_parse(conf=None, args=None): """Create the default configurations.""" # neutron.conf includes rpc_backend which needs to be cleaned up if args is None: args = [] args += ['--config-file', etcdir('neutron.conf')] if conf is None: config.init(args=args) else: conf(args) def setUp(self): super(BaseTestCase, self).setUp() self.useFixture(lockutils.ExternalLockFixture()) cfg.CONF.set_override('state_path', self.get_default_temp_dir().path) self.addCleanup(CONF.reset) self.useFixture(ProcessMonitorFixture()) self.useFixture(fixtures.MonkeyPatch( 'neutron.common.exceptions.NeutronException.use_fatal_exceptions', fake_use_fatal_exceptions)) self.useFixture(fixtures.MonkeyPatch( 'oslo_config.cfg.find_config_files', lambda project=None, prog=None, extension=None: [])) self.setup_rpc_mocks() self.setup_config() self.setup_test_registry_instance() policy.init() self.addCleanup(policy.reset) self.addCleanup(rpc_consumer_reg.clear) def get_new_temp_dir(self): """Create a new temporary directory. :returns fixtures.TempDir """ return self.useFixture(fixtures.TempDir()) def get_default_temp_dir(self): """Create a default temporary directory. Returns the same directory during the whole test case. :returns fixtures.TempDir """ if not hasattr(self, '_temp_dir'): self._temp_dir = self.get_new_temp_dir() return self._temp_dir def get_temp_file_path(self, filename, root=None): """Returns an absolute path for a temporary file. If root is None, the file is created in default temporary directory. It also creates the directory if it's not initialized yet. If root is not None, the file is created inside the directory passed as root= argument. :param filename: filename :type filename: string :param root: temporary directory to create a new file in :type root: fixtures.TempDir :returns absolute file path string """ root = root or self.get_default_temp_dir() return root.join(filename) def setup_rpc_mocks(self): # don't actually start RPC listeners when testing mock.patch( 'neutron.common.rpc.Connection.consume_in_threads', return_value=[]).start() self.useFixture(fixtures.MonkeyPatch( 'oslo_messaging.Notifier', fake_notifier.FakeNotifier)) self.messaging_conf = messaging_conffixture.ConfFixture(CONF) self.messaging_conf.transport_driver = 'fake' # NOTE(russellb) We want all calls to return immediately. self.messaging_conf.response_timeout = 0 self.useFixture(self.messaging_conf) self.addCleanup(n_rpc.clear_extra_exmods) n_rpc.add_extra_exmods('neutron.test') self.addCleanup(n_rpc.cleanup) n_rpc.init(CONF) def setup_test_registry_instance(self): """Give a private copy of the registry to each test.""" self._callback_manager = registry_manager.CallbacksManager() mock.patch.object(registry, '_get_callback_manager', return_value=self._callback_manager).start() def setup_config(self, args=None): """Tests that need a non-default config can override this method.""" self.config_parse(args=args) def config(self, **kw): """Override some configuration values. The keyword arguments are the names of configuration options to override and their values. If a group argument is supplied, the overrides are applied to the specified configuration option group. All overrides are automatically cleared at the end of the current test by the fixtures cleanup process. """ group = kw.pop('group', None) for k, v in six.iteritems(kw): CONF.set_override(k, v, group) def setup_coreplugin(self, core_plugin=None): cp = PluginFixture(core_plugin) self.useFixture(cp) self.patched_dhcp_periodic = cp.patched_dhcp_periodic self.patched_default_svc_plugins = cp.patched_default_svc_plugins def setup_notification_driver(self, notification_driver=None): self.addCleanup(fake_notifier.reset) if notification_driver is None: notification_driver = [fake_notifier.__name__] cfg.CONF.set_override("notification_driver", notification_driver) class PluginFixture(fixtures.Fixture): def __init__(self, core_plugin=None): super(PluginFixture, self).__init__() self.core_plugin = core_plugin def _setUp(self): # Do not load default service plugins in the testing framework # as all the mocking involved can cause havoc. self.default_svc_plugins_p = mock.patch( 'neutron.manager.NeutronManager._get_default_service_plugins') self.patched_default_svc_plugins = self.default_svc_plugins_p.start() self.dhcp_periodic_p = mock.patch( 'neutron.db.agentschedulers_db.DhcpAgentSchedulerDbMixin.' 'start_periodic_dhcp_agent_status_check') self.patched_dhcp_periodic = self.dhcp_periodic_p.start() self.agent_health_check_p = mock.patch( 'neutron.db.agentschedulers_db.DhcpAgentSchedulerDbMixin.' 'add_agent_status_check') self.agent_health_check = self.agent_health_check_p.start() # Plugin cleanup should be triggered last so that # test-specific cleanup has a chance to release references. self.addCleanup(self.cleanup_core_plugin) if self.core_plugin is not None: cfg.CONF.set_override('core_plugin', self.core_plugin) def cleanup_core_plugin(self): """Ensure that the core plugin is deallocated.""" nm = manager.NeutronManager if not nm.has_instance(): return # TODO(marun) Fix plugins that do not properly initialize notifiers agentschedulers_db.AgentSchedulerDbMixin.agent_notifiers = {} # Perform a check for deallocation only if explicitly # configured to do so since calling gc.collect() after every # test increases test suite execution time by ~50%. check_plugin_deallocation = ( bool_from_env('OS_CHECK_PLUGIN_DEALLOCATION')) if check_plugin_deallocation: plugin = weakref.ref(nm._instance.plugin) nm.clear_instance() if check_plugin_deallocation: gc.collect() # TODO(marun) Ensure that mocks are deallocated? if plugin() and not isinstance(plugin(), mock.Base): raise AssertionError( 'The plugin for this test was not deallocated.') class Timeout(fixtures.Fixture): """Setup per test timeouts. In order to avoid test deadlocks we support setting up a test timeout parameter read from the environment. In almost all cases where the timeout is reached this means a deadlock. A scaling factor allows extremely long tests to specify they need more time. """ def __init__(self, timeout=None, scaling=1): super(Timeout, self).__init__() if timeout is None: timeout = os.environ.get('OS_TEST_TIMEOUT', 0) try: self.test_timeout = int(timeout) except ValueError: # If timeout value is invalid do not set a timeout. self.test_timeout = 0 if scaling >= 1: self.test_timeout *= scaling else: raise ValueError('scaling value must be >= 1') def setUp(self): super(Timeout, self).setUp() if self.test_timeout > 0: self.useFixture(fixtures.Timeout(self.test_timeout, gentle=True)) neutron-8.4.0/neutron/tests/retargetable/0000775000567000056710000000000013044373210021650 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/retargetable/rest_fixture.py0000664000567000056710000000460613044372736024767 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you # may not use this file except in compliance with the License. You may # obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. """ This module defines a client fixture that can be used to target a deployed neutron daemon. The potential for conflict between Tempest configuration and Neutron configuration requires that neutron.tests.tempest imports be isolated in this module for now. """ from tempest.lib import exceptions as tlib_exceptions from neutron.tests import base from neutron.tests.retargetable import client_fixtures from tempest import test as t_test class RestClientFixture(client_fixtures.AbstractClientFixture): """Targets the Neutron API via REST.""" @property def client(self): if not hasattr(self, '_client'): manager = t_test.BaseTestCase.get_client_manager() self._client = manager.network_client return self._client @property def NotFound(self): return tlib_exceptions.NotFound def _cleanup_network(self, id_): try: self.delete_network(id_) except self.NotFound: pass def create_network(self, **kwargs): network = self._create_network(**kwargs) self.addCleanup(self._cleanup_network, network.id) return network def _create_network(self, **kwargs): # Internal method - use create_network() instead body = self.client.create_network(**kwargs) return base.AttributeDict(body['network']) def update_network(self, id_, **kwargs): body = self.client.update_network(id_, **kwargs) return base.AttributeDict(body['network']) def get_network(self, id_, **kwargs): body = self.client.show_network(id_, **kwargs) return base.AttributeDict(body['network']) def get_networks(self, **kwargs): body = self.client.list_networks(**kwargs) return [base.AttributeDict(x) for x in body['networks']] def delete_network(self, id_): self.client.delete_network(id_) neutron-8.4.0/neutron/tests/retargetable/test_example.py0000664000567000056710000000327313044372760024732 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you # may not use this file except in compliance with the License. You may # obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. import testtools from neutron.tests import base as tests_base from neutron.tests.retargetable import base class TestExample(base.RetargetableApiTest): """This class is an example of how to write a retargetable api test. See the parent class for details about how the 'client' attribute is configured via testscenarios. """ def test_network_lifecycle(self): net = self.client.create_network(name=tests_base.get_rand_name()) listed_networks = {x.id: x.name for x in self.client.get_networks()} self.assertIn(net.id, listed_networks) self.assertEqual(listed_networks[net.id], net.name, 'Listed network name is not as expected.') updated_name = 'new %s' % net.name updated_net = self.client.update_network(net.id, name=updated_name) self.assertEqual(updated_name, updated_net.name, 'Updated network name is not as expected.') self.client.delete_network(net.id) with testtools.ExpectedException(self.client.NotFound, msg='Network was not deleted'): self.client.get_network(net.id) neutron-8.4.0/neutron/tests/retargetable/__init__.py0000664000567000056710000000000013044372736023763 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/retargetable/base.py0000664000567000056710000000542213044372736023153 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you # may not use this file except in compliance with the License. You may # obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. """ This module defines a base test case that uses testscenarios to parametize the test methods of subclasses by varying the client fixture used to target the Neutron API. PluginClientFixture targets the Neutron API directly via the plugin api, and will be executed by default. testscenarios will ensure that each test is run against all plugins defined in plugin_configurations. RestClientFixture targets a deployed Neutron daemon, and will be used instead of PluginClientFixture only if OS_TEST_API_WITH_REST is set to 1. Reference: https://pypi.python.org/pypi/testscenarios/ """ import testscenarios from neutron.tests import base as tests_base from neutron.tests.retargetable import client_fixtures from neutron.tests.unit.plugins.ml2 import test_plugin # Each plugin must add a class to plugin_configurations that can configure the # plugin for use with PluginClient. For a given plugin, the setup # used for NeutronDbPluginV2TestCase can usually be reused. See the # configuration classes listed below for examples of this reuse. # TODO(marun) Discover plugin conf via a metaclass plugin_configurations = [ test_plugin.Ml2ConfFixture(), ] def rest_enabled(): return tests_base.bool_from_env('OS_TEST_API_WITH_REST') def get_plugin_scenarios(): scenarios = [] for conf in plugin_configurations: name = conf.plugin_name class_name = name.rsplit('.', 1)[-1] client = client_fixtures.PluginClientFixture(conf) scenarios.append((class_name, {'client': client})) return scenarios def get_scenarios(): if rest_enabled(): # FIXME(marun) Remove local import once tempest config is safe # to import alongside neutron config from neutron.tests.retargetable import rest_fixture return [('tempest', {'client': rest_fixture.RestClientFixture()})] else: return get_plugin_scenarios() class RetargetableApiTest(testscenarios.WithScenarios, tests_base.BaseTestCase): scenarios = get_scenarios() def setUp(self): super(RetargetableApiTest, self).setUp() if rest_enabled(): raise self.skipException( 'Tempest fixture requirements prevent this test from running') self.useFixture(self.client) neutron-8.4.0/neutron/tests/retargetable/client_fixtures.py0000664000567000056710000000700513044372760025444 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you # may not use this file except in compliance with the License. You may # obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. """ This module defines client fixtures that can be used to target the Neutron API via different methods. """ import abc import fixtures import six from neutron.common import exceptions as n_exc from neutron import context from neutron import manager from neutron.tests import base from neutron.tests.unit import testlib_api @six.add_metaclass(abc.ABCMeta) class AbstractClientFixture(fixtures.Fixture): """ Base class for a client that can interact the neutron api in some manner. """ @abc.abstractproperty def NotFound(self): """The exception that indicates a resource could not be found. Tests can use this property to assert for a missing resource in a client-agnostic way. """ @abc.abstractmethod def create_network(self, **kwargs): pass @abc.abstractmethod def update_network(self, id_, **kwargs): pass @abc.abstractmethod def get_network(self, id_, fields=None): pass @abc.abstractmethod def get_networks(self, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abc.abstractmethod def delete_network(self, id_): pass class PluginClientFixture(AbstractClientFixture): """Targets the Neutron API via the plugin API""" def __init__(self, plugin_conf): super(PluginClientFixture, self).__init__() self.plugin_conf = plugin_conf def _setUp(self): super(PluginClientFixture, self)._setUp() self.useFixture(testlib_api.SqlFixture()) self.useFixture(self.plugin_conf) self.useFixture(base.PluginFixture(self.plugin_conf.plugin_name)) @property def ctx(self): if not hasattr(self, '_ctx'): self._ctx = context.Context('', 'test-tenant') return self._ctx @property def plugin(self): return manager.NeutronManager.get_plugin() @property def NotFound(self): return n_exc.NetworkNotFound def create_network(self, **kwargs): # Supply defaults that are expected to be set by the api # framework kwargs.setdefault('admin_state_up', True) kwargs.setdefault('shared', False) kwargs.setdefault('tenant_id', self.ctx.tenant_id) data = dict(network=kwargs) result = self.plugin.create_network(self.ctx, data) return base.AttributeDict(result) def update_network(self, id_, **kwargs): data = dict(network=kwargs) result = self.plugin.update_network(self.ctx, id_, data) return base.AttributeDict(result) def get_network(self, *args, **kwargs): result = self.plugin.get_network(self.ctx, *args, **kwargs) return base.AttributeDict(result) def get_networks(self, *args, **kwargs): result = self.plugin.get_networks(self.ctx, *args, **kwargs) return [base.AttributeDict(x) for x in result] def delete_network(self, id_): self.plugin.delete_network(self.ctx, id_) neutron-8.4.0/neutron/tests/api/0000775000567000056710000000000013044373210017760 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/api/admin/0000775000567000056710000000000013044373210021050 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/api/admin/test_routers_dvr.py0000664000567000056710000001067313044372760025057 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.common.utils import data_utils from tempest import test from neutron.tests.api import base_routers as base class RoutersTestDVR(base.BaseRouterTest): @classmethod def resource_setup(cls): for ext in ['router', 'dvr']: if not test.is_extension_enabled(ext, 'network'): msg = "%s extension not enabled." % ext raise cls.skipException(msg) # The check above will pass if api_extensions=all, which does # not mean DVR extension itself is present. # Instead, we have to check whether DVR is actually present by using # admin credentials to create router with distributed=True attribute # and checking for BadRequest exception and that the resulting router # has a distributed attribute. super(RoutersTestDVR, cls).resource_setup() name = data_utils.rand_name('pretest-check') router = cls.admin_client.create_router(name) if 'distributed' not in router['router']: msg = "'distributed' attribute not found. DVR Possibly not enabled" raise cls.skipException(msg) cls.admin_client.delete_router(router['router']['id']) @test.attr(type='smoke') @test.idempotent_id('08a2a0a8-f1e4-4b34-8e30-e522e836c44e') def test_distributed_router_creation(self): """ Test uses administrative credentials to creates a DVR (Distributed Virtual Routing) router using the distributed=True. Acceptance The router is created and the "distributed" attribute is set to True """ name = data_utils.rand_name('router') router = self.admin_client.create_router(name, distributed=True) self.addCleanup(self.admin_client.delete_router, router['router']['id']) self.assertTrue(router['router']['distributed']) @test.attr(type='smoke') @test.idempotent_id('8a0a72b4-7290-4677-afeb-b4ffe37bc352') def test_centralized_router_creation(self): """ Test uses administrative credentials to creates a CVR (Centralized Virtual Routing) router using the distributed=False. Acceptance The router is created and the "distributed" attribute is set to False, thus making it a "Centralized Virtual Router" as opposed to a "Distributed Virtual Router" """ name = data_utils.rand_name('router') router = self.admin_client.create_router(name, distributed=False) self.addCleanup(self.admin_client.delete_router, router['router']['id']) self.assertFalse(router['router']['distributed']) @test.attr(type='smoke') @test.idempotent_id('acd43596-c1fb-439d-ada8-31ad48ae3c2e') def test_centralized_router_update_to_dvr(self): """ Test uses administrative credentials to creates a CVR (Centralized Virtual Routing) router using the distributed=False.Then it will "update" the router distributed attribute to True Acceptance The router is created and the "distributed" attribute is set to False. Once the router is updated, the distributed attribute will be set to True """ name = data_utils.rand_name('router') # router needs to be in admin state down in order to be upgraded to DVR router = self.admin_client.create_router(name, distributed=False, admin_state_up=False) self.addCleanup(self.admin_client.delete_router, router['router']['id']) self.assertFalse(router['router']['distributed']) router = self.admin_client.update_router(router['router']['id'], distributed=True) self.assertTrue(router['router']['distributed']) neutron-8.4.0/neutron/tests/api/admin/__init__.py0000664000567000056710000000000013044372760023160 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/api/admin/test_quotas.py0000664000567000056710000000645213044372760024015 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from tempest.lib.common.utils import data_utils from tempest import test from neutron.tests.api import base class QuotasTest(base.BaseAdminNetworkTest): """ Tests the following operations in the Neutron API using the REST client for Neutron: list quotas for tenants who have non-default quota values show quotas for a specified tenant update quotas for a specified tenant reset quotas to default values for a specified tenant v2.0 of the API is assumed. It is also assumed that the per-tenant quota extension API is configured in /etc/neutron/neutron.conf as follows: quota_driver = neutron.db.driver.DbQuotaDriver """ @classmethod def resource_setup(cls): super(QuotasTest, cls).resource_setup() if not test.is_extension_enabled('quotas', 'network'): msg = "quotas extension not enabled." raise cls.skipException(msg) @test.attr(type='gate') @test.idempotent_id('2390f766-836d-40ef-9aeb-e810d78207fb') def test_quotas(self): # Add a tenant to conduct the test test_tenant = data_utils.rand_name('test_tenant_') test_description = data_utils.rand_name('desc_') tenant = self.identity_admin_client.create_tenant( name=test_tenant, description=test_description)['tenant'] tenant_id = tenant['id'] self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id) new_quotas = {'network': 0, 'security_group': 0} # Change quotas for tenant quota_set = self.admin_client.update_quotas(tenant_id, **new_quotas) self.addCleanup(self.admin_client.reset_quotas, tenant_id) for key, value in six.iteritems(new_quotas): self.assertEqual(value, quota_set[key]) # Confirm our tenant is listed among tenants with non default quotas non_default_quotas = self.admin_client.list_quotas() found = False for qs in non_default_quotas['quotas']: if qs['tenant_id'] == tenant_id: found = True self.assertTrue(found) # Confirm from API quotas were changed as requested for tenant quota_set = self.admin_client.show_quotas(tenant_id) quota_set = quota_set['quota'] for key, value in six.iteritems(new_quotas): self.assertEqual(value, quota_set[key]) # Reset quotas to default and confirm self.admin_client.reset_quotas(tenant_id) non_default_quotas = self.admin_client.list_quotas() for q in non_default_quotas['quotas']: self.assertNotEqual(tenant_id, q['tenant_id']) neutron-8.4.0/neutron/tests/api/admin/test_l3_agent_scheduler.py0000664000567000056710000001043513044372760026227 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.common.utils import data_utils from tempest import test from neutron.tests.api import base from neutron.tests.tempest import exceptions AGENT_TYPE = 'L3 agent' AGENT_MODES = ( 'legacy', 'dvr_snat' ) class L3AgentSchedulerTestJSON(base.BaseAdminNetworkTest): _agent_mode = 'legacy' """ Tests the following operations in the Neutron API using the REST client for Neutron: List routers that the given L3 agent is hosting. List L3 agents hosting the given router. Add and Remove Router to L3 agent v2.0 of the Neutron API is assumed. The l3_agent_scheduler extension is required for these tests. """ @classmethod def skip_checks(cls): super(L3AgentSchedulerTestJSON, cls).skip_checks() if not test.is_extension_enabled('l3_agent_scheduler', 'network'): msg = "L3 Agent Scheduler Extension not enabled." raise cls.skipException(msg) @classmethod def resource_setup(cls): super(L3AgentSchedulerTestJSON, cls).resource_setup() body = cls.admin_client.list_agents() agents = body['agents'] for agent in agents: # TODO(armax): falling back on default _agent_mode can be # dropped as soon as Icehouse is dropped. agent_mode = ( agent['configurations'].get('agent_mode', cls._agent_mode)) if agent['agent_type'] == AGENT_TYPE and agent_mode in AGENT_MODES: cls.agent = agent break else: msg = "L3 Agent Scheduler enabled in conf, but L3 Agent not found" raise exceptions.InvalidConfiguration(msg) cls.router = cls.create_router(data_utils.rand_name('router')) # NOTE(armax): If DVR is an available extension, and the created router # is indeed a distributed one, more resources need to be provisioned # in order to bind the router to the L3 agent. # That said, let's preserve the existing test logic, where the extra # query and setup steps are only required if the extension is available # and only if the router's default type is distributed. if test.is_extension_enabled('dvr', 'network'): is_dvr_router = cls.admin_client.show_router( cls.router['id'])['router'].get('distributed', False) if is_dvr_router: cls.network = cls.create_network() cls.create_subnet(cls.network) cls.port = cls.create_port(cls.network) cls.client.add_router_interface_with_port_id( cls.router['id'], cls.port['id']) @test.attr(type='smoke') @test.idempotent_id('b7ce6e89-e837-4ded-9b78-9ed3c9c6a45a') def test_list_routers_on_l3_agent(self): self.admin_client.list_routers_on_l3_agent(self.agent['id']) @test.attr(type='smoke') @test.idempotent_id('9464e5e7-8625-49c3-8fd1-89c52be59d66') def test_add_list_remove_router_on_l3_agent(self): l3_agent_ids = list() self.admin_client.add_router_to_l3_agent( self.agent['id'], self.router['id']) body = ( self.admin_client.list_l3_agents_hosting_router(self.router['id'])) for agent in body['agents']: l3_agent_ids.append(agent['id']) self.assertIn('agent_type', agent) self.assertEqual('L3 agent', agent['agent_type']) self.assertIn(self.agent['id'], l3_agent_ids) body = self.admin_client.remove_router_from_l3_agent( self.agent['id'], self.router['id']) # NOTE(afazekas): The deletion not asserted, because neutron # is not forbidden to reschedule the router to the same agent neutron-8.4.0/neutron/tests/api/admin/test_shared_network_extension.py0000664000567000056710000005012713044372760027612 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P.dsvsv # Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from tempest.lib.common.utils import data_utils from tempest.lib import exceptions as lib_exc from tempest import test import testtools from neutron.tests.api import base class SharedNetworksTest(base.BaseAdminNetworkTest): @classmethod def resource_setup(cls): super(SharedNetworksTest, cls).resource_setup() cls.shared_network = cls.create_shared_network() @test.idempotent_id('6661d219-b96d-4597-ad10-55766123421a') def test_filtering_shared_networks(self): # this test is necessary because the 'shared' column does not actually # exist on networks so the filter function has to translate it into # queries against the RBAC table self.create_network() self._check_shared_correct( self.client.list_networks(shared=True)['networks'], True) self._check_shared_correct( self.admin_client.list_networks(shared=True)['networks'], True) self._check_shared_correct( self.client.list_networks(shared=False)['networks'], False) self._check_shared_correct( self.admin_client.list_networks(shared=False)['networks'], False) def _check_shared_correct(self, items, shared): self.assertNotEmpty(items) self.assertTrue(all(n['shared'] == shared for n in items)) @test.idempotent_id('6661d219-b96d-4597-ad10-51672353421a') def test_filtering_shared_subnets(self): # shared subnets need to be tested because their shared status isn't # visible as a regular API attribute and it's solely dependent on the # parent network reg = self.create_network() priv = self.create_subnet(reg, client=self.client) shared = self.create_subnet(self.shared_network, client=self.admin_client) self.assertIn(shared, self.client.list_subnets(shared=True)['subnets']) self.assertIn(shared, self.admin_client.list_subnets(shared=True)['subnets']) self.assertNotIn(priv, self.client.list_subnets(shared=True)['subnets']) self.assertNotIn(priv, self.admin_client.list_subnets(shared=True)['subnets']) self.assertIn(priv, self.client.list_subnets(shared=False)['subnets']) self.assertIn(priv, self.admin_client.list_subnets(shared=False)['subnets']) self.assertNotIn(shared, self.client.list_subnets(shared=False)['subnets']) self.assertNotIn(shared, self.admin_client.list_subnets(shared=False)['subnets']) @test.idempotent_id('6661d219-b96d-4597-ad10-55766ce4abf7') def test_create_update_shared_network(self): shared_network = self.create_shared_network() net_id = shared_network['id'] self.assertEqual('ACTIVE', shared_network['status']) self.assertIsNotNone(shared_network['id']) self.assertTrue(self.shared_network['shared']) new_name = "New_shared_network" body = self.admin_client.update_network(net_id, name=new_name, admin_state_up=False, shared=False) updated_net = body['network'] self.assertEqual(new_name, updated_net['name']) self.assertFalse(updated_net['shared']) self.assertFalse(updated_net['admin_state_up']) @test.idempotent_id('9c31fabb-0181-464f-9ace-95144fe9ca77') def test_create_port_shared_network_as_non_admin_tenant(self): # create a port as non admin body = self.client.create_port(network_id=self.shared_network['id']) port = body['port'] self.addCleanup(self.admin_client.delete_port, port['id']) # verify the tenant id of admin network and non admin port self.assertNotEqual(self.shared_network['tenant_id'], port['tenant_id']) @test.idempotent_id('3e39c4a6-9caf-4710-88f1-d20073c6dd76') def test_create_bulk_shared_network(self): # Creates 2 networks in one request net_nm = [data_utils.rand_name('network'), data_utils.rand_name('network')] body = self.admin_client.create_bulk_network(net_nm, shared=True) created_networks = body['networks'] for net in created_networks: self.addCleanup(self.admin_client.delete_network, net['id']) self.assertIsNotNone(net['id']) self.assertTrue(net['shared']) def _list_shared_networks(self, user): body = user.list_networks(shared=True) networks_list = [net['id'] for net in body['networks']] self.assertIn(self.shared_network['id'], networks_list) self.assertTrue(self.shared_network['shared']) @test.idempotent_id('a064a9fd-e02f-474a-8159-f828cd636a28') def test_list_shared_networks(self): # List the shared networks and confirm that # shared network extension attribute is returned for those networks # that are created as shared self._list_shared_networks(self.admin_client) self._list_shared_networks(self.client) def _show_shared_network(self, user): body = user.show_network(self.shared_network['id']) show_shared_net = body['network'] self.assertEqual(self.shared_network['name'], show_shared_net['name']) self.assertEqual(self.shared_network['id'], show_shared_net['id']) self.assertTrue(show_shared_net['shared']) @test.idempotent_id('e03c92a2-638d-4bfa-b50a-b1f66f087e58') def test_show_shared_networks_attribute(self): # Show a shared network and confirm that # shared network extension attribute is returned. self._show_shared_network(self.admin_client) self._show_shared_network(self.client) class AllowedAddressPairSharedNetworkTest(base.BaseAdminNetworkTest): allowed_address_pairs = [{'ip_address': '1.1.1.1'}] @classmethod def skip_checks(cls): super(AllowedAddressPairSharedNetworkTest, cls).skip_checks() if not test.is_extension_enabled('allowed-address-pairs', 'network'): msg = "Allowed Address Pairs extension not enabled." raise cls.skipException(msg) @classmethod def resource_setup(cls): super(AllowedAddressPairSharedNetworkTest, cls).resource_setup() cls.network = cls.create_shared_network() cls.create_subnet(cls.network, client=cls.admin_client) @test.attr(type='smoke') @test.idempotent_id('86c3529b-1231-40de-803c-ffffffff1fff') def test_create_with_address_pair_blocked_on_other_network(self): with testtools.ExpectedException(lib_exc.Forbidden): self.create_port(self.network, allowed_address_pairs=self.allowed_address_pairs) @test.attr(type='smoke') @test.idempotent_id('86c3529b-1231-40de-803c-ffffffff2fff') def test_update_with_address_pair_blocked_on_other_network(self): port = self.create_port(self.network) with testtools.ExpectedException(lib_exc.Forbidden): self.update_port( port, allowed_address_pairs=self.allowed_address_pairs) class RBACSharedNetworksTest(base.BaseAdminNetworkTest): force_tenant_isolation = True credentials = ['primary', 'alt', 'admin'] @classmethod def resource_setup(cls): super(RBACSharedNetworksTest, cls).resource_setup() if not test.is_extension_enabled('rbac-policies', 'network'): msg = "rbac-policies extension not enabled." raise cls.skipException(msg) cls.client2 = cls.alt_manager.network_client def _make_admin_net_and_subnet_shared_to_tenant_id(self, tenant_id): net = self.admin_client.create_network( name=data_utils.rand_name('test-network-'))['network'] self.addCleanup(self.admin_client.delete_network, net['id']) subnet = self.create_subnet(net, client=self.admin_client) # network is shared to first unprivileged client by default pol = self.admin_client.create_rbac_policy( object_type='network', object_id=net['id'], action='access_as_shared', target_tenant=tenant_id )['rbac_policy'] return {'network': net, 'subnet': subnet, 'policy': pol} @test.attr(type='smoke') @test.idempotent_id('86c3529b-1231-40de-803c-bfffffff1eee') def test_create_rbac_policy_with_target_tenant_none(self): with testtools.ExpectedException(lib_exc.BadRequest): self._make_admin_net_and_subnet_shared_to_tenant_id( tenant_id=None) @test.attr(type='smoke') @test.idempotent_id('86c3529b-1231-40de-803c-bfffffff1fff') def test_create_rbac_policy_with_target_tenant_too_long_id(self): with testtools.ExpectedException(lib_exc.BadRequest): target_tenant = '1234' * 100 self._make_admin_net_and_subnet_shared_to_tenant_id( tenant_id=target_tenant) @test.attr(type='smoke') @test.idempotent_id('86c3529b-1231-40de-803c-afffffff1fff') def test_network_only_visible_to_policy_target(self): net = self._make_admin_net_and_subnet_shared_to_tenant_id( self.client.tenant_id)['network'] self.client.show_network(net['id']) with testtools.ExpectedException(lib_exc.NotFound): # client2 has not been granted access self.client2.show_network(net['id']) @test.attr(type='smoke') @test.idempotent_id('86c3529b-1231-40de-803c-afffffff2fff') def test_subnet_on_network_only_visible_to_policy_target(self): sub = self._make_admin_net_and_subnet_shared_to_tenant_id( self.client.tenant_id)['subnet'] self.client.show_subnet(sub['id']) with testtools.ExpectedException(lib_exc.NotFound): # client2 has not been granted access self.client2.show_subnet(sub['id']) @test.attr(type='smoke') @test.idempotent_id('86c3529b-1231-40de-803c-afffffff2eee') def test_policy_target_update(self): res = self._make_admin_net_and_subnet_shared_to_tenant_id( self.client.tenant_id) # change to client2 update_res = self.admin_client.update_rbac_policy( res['policy']['id'], target_tenant=self.client2.tenant_id) self.assertEqual(self.client2.tenant_id, update_res['rbac_policy']['target_tenant']) # make sure everything else stayed the same res['policy'].pop('target_tenant') update_res['rbac_policy'].pop('target_tenant') self.assertEqual(res['policy'], update_res['rbac_policy']) @test.idempotent_id('86c3529b-1231-40de-803c-affefefef321') def test_duplicate_policy_error(self): res = self._make_admin_net_and_subnet_shared_to_tenant_id( self.client.tenant_id) with testtools.ExpectedException(lib_exc.Conflict): self.admin_client.create_rbac_policy( object_type='network', object_id=res['network']['id'], action='access_as_shared', target_tenant=self.client.tenant_id) @test.attr(type='smoke') @test.idempotent_id('86c3529b-1231-40de-803c-afffffff3fff') def test_port_presence_prevents_network_rbac_policy_deletion(self): res = self._make_admin_net_and_subnet_shared_to_tenant_id( self.client.tenant_id) port = self.client.create_port(network_id=res['network']['id'])['port'] # a port on the network should prevent the deletion of a policy # required for it to exist with testtools.ExpectedException(lib_exc.Conflict): self.admin_client.delete_rbac_policy(res['policy']['id']) # a wildcard policy should allow the specific policy to be deleted # since it allows the remaining port wild = self.admin_client.create_rbac_policy( object_type='network', object_id=res['network']['id'], action='access_as_shared', target_tenant='*')['rbac_policy'] self.admin_client.delete_rbac_policy(res['policy']['id']) # now that wildcard is the only remaining, it should be subjected to # to the same restriction with testtools.ExpectedException(lib_exc.Conflict): self.admin_client.delete_rbac_policy(wild['id']) # similarly, we can't update the policy to a different tenant with testtools.ExpectedException(lib_exc.Conflict): self.admin_client.update_rbac_policy( wild['id'], target_tenant=self.client2.tenant_id) self.client.delete_port(port['id']) # anchor is gone, delete should pass self.admin_client.delete_rbac_policy(wild['id']) @test.attr(type='smoke') @test.idempotent_id('86c3529b-1231-40de-803c-beefbeefbeef') def test_tenant_can_delete_port_on_own_network(self): net = self.create_network() # owned by self.client self.client.create_rbac_policy( object_type='network', object_id=net['id'], action='access_as_shared', target_tenant=self.client2.tenant_id) port = self.client2.create_port(network_id=net['id'])['port'] self.client.delete_port(port['id']) @test.attr(type='smoke') @test.idempotent_id('86c3529b-1231-40de-803c-afffffff4fff') def test_regular_client_shares_to_another_regular_client(self): net = self.create_network() # owned by self.client with testtools.ExpectedException(lib_exc.NotFound): self.client2.show_network(net['id']) pol = self.client.create_rbac_policy( object_type='network', object_id=net['id'], action='access_as_shared', target_tenant=self.client2.tenant_id) self.client2.show_network(net['id']) self.assertIn(pol['rbac_policy'], self.client.list_rbac_policies()['rbac_policies']) # ensure that 'client2' can't see the policy sharing the network to it # because the policy belongs to 'client' self.assertNotIn(pol['rbac_policy']['id'], [p['id'] for p in self.client2.list_rbac_policies()['rbac_policies']]) @test.attr(type='smoke') @test.idempotent_id('bf5052b8-b11e-407c-8e43-113447404d3e') def test_filter_fields(self): net = self.create_network() self.client.create_rbac_policy( object_type='network', object_id=net['id'], action='access_as_shared', target_tenant=self.client2.tenant_id) field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'), ('tenant_id', 'target_tenant')) for fields in field_args: res = self.client.list_rbac_policies(fields=fields) self.assertEqual(set(fields), set(res['rbac_policies'][0].keys())) @test.attr(type='smoke') @test.idempotent_id('86c3529b-1231-40de-803c-afffffff5fff') def test_policy_show(self): res = self._make_admin_net_and_subnet_shared_to_tenant_id( self.client.tenant_id) p1 = res['policy'] p2 = self.admin_client.create_rbac_policy( object_type='network', object_id=res['network']['id'], action='access_as_shared', target_tenant='*')['rbac_policy'] self.assertEqual( p1, self.admin_client.show_rbac_policy(p1['id'])['rbac_policy']) self.assertEqual( p2, self.admin_client.show_rbac_policy(p2['id'])['rbac_policy']) @test.attr(type='smoke') @test.idempotent_id('e7bcb1ea-4877-4266-87bb-76f68b421f31') def test_filter_policies(self): net = self.create_network() pol1 = self.client.create_rbac_policy( object_type='network', object_id=net['id'], action='access_as_shared', target_tenant=self.client2.tenant_id)['rbac_policy'] pol2 = self.client.create_rbac_policy( object_type='network', object_id=net['id'], action='access_as_shared', target_tenant=self.client.tenant_id)['rbac_policy'] res1 = self.client.list_rbac_policies(id=pol1['id'])['rbac_policies'] res2 = self.client.list_rbac_policies(id=pol2['id'])['rbac_policies'] self.assertEqual(1, len(res1)) self.assertEqual(1, len(res2)) self.assertEqual(pol1['id'], res1[0]['id']) self.assertEqual(pol2['id'], res2[0]['id']) @test.attr(type='smoke') @test.idempotent_id('86c3529b-1231-40de-803c-afffffff6fff') def test_regular_client_blocked_from_sharing_anothers_network(self): net = self._make_admin_net_and_subnet_shared_to_tenant_id( self.client.tenant_id)['network'] with testtools.ExpectedException(lib_exc.BadRequest): self.client.create_rbac_policy( object_type='network', object_id=net['id'], action='access_as_shared', target_tenant=self.client.tenant_id) @test.attr(type='smoke') @test.idempotent_id('c5f8f785-ce8d-4430-af7e-a236205862fb') def test_rbac_policy_quota(self): if not test.is_extension_enabled('quotas', 'network'): msg = "quotas extension not enabled." raise self.skipException(msg) quota = self.client.show_quotas(self.client.tenant_id)['quota'] max_policies = quota['rbac_policy'] self.assertGreater(max_policies, 0) net = self.client.create_network( name=data_utils.rand_name('test-network-'))['network'] self.addCleanup(self.client.delete_network, net['id']) with testtools.ExpectedException(lib_exc.Conflict): for i in range(0, max_policies + 1): self.admin_client.create_rbac_policy( object_type='network', object_id=net['id'], action='access_as_shared', target_tenant=str(uuid.uuid4()).replace('-', '')) @test.attr(type='smoke') @test.idempotent_id('86c3529b-1231-40de-803c-afffffff7fff') def test_regular_client_blocked_from_sharing_with_wildcard(self): net = self.create_network() with testtools.ExpectedException(lib_exc.Forbidden): self.client.create_rbac_policy( object_type='network', object_id=net['id'], action='access_as_shared', target_tenant='*') # ensure it works on update as well pol = self.client.create_rbac_policy( object_type='network', object_id=net['id'], action='access_as_shared', target_tenant=self.client2.tenant_id) with testtools.ExpectedException(lib_exc.Forbidden): self.client.update_rbac_policy(pol['rbac_policy']['id'], target_tenant='*') @test.attr(type='smoke') @test.idempotent_id('86c3529b-1231-40de-803c-aeeeeeee7fff') def test_filtering_works_with_rbac_records_present(self): resp = self._make_admin_net_and_subnet_shared_to_tenant_id( self.client.tenant_id) net = resp['network']['id'] sub = resp['subnet']['id'] self.admin_client.create_rbac_policy( object_type='network', object_id=net, action='access_as_shared', target_tenant='*') self._assert_shared_object_id_listing_presence('subnets', False, sub) self._assert_shared_object_id_listing_presence('subnets', True, sub) self._assert_shared_object_id_listing_presence('networks', False, net) self._assert_shared_object_id_listing_presence('networks', True, net) def _assert_shared_object_id_listing_presence(self, resource, shared, oid): lister = getattr(self.admin_client, 'list_%s' % resource) objects = [o['id'] for o in lister(shared=shared)[resource]] if shared: self.assertIn(oid, objects) else: self.assertNotIn(oid, objects) neutron-8.4.0/neutron/tests/api/admin/test_agent_management.py0000664000567000056710000000737113044372760025774 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.tests.tempest.common import tempest_fixtures from tempest import test from neutron.tests.api import base class AgentManagementTestJSON(base.BaseAdminNetworkTest): @classmethod def resource_setup(cls): super(AgentManagementTestJSON, cls).resource_setup() if not test.is_extension_enabled('agent', 'network'): msg = "agent extension not enabled." raise cls.skipException(msg) body = cls.admin_client.list_agents() agents = body['agents'] cls.agent = agents[0] # don't modify this agent cls.dyn_agent = agents[1] @test.attr(type='smoke') @test.idempotent_id('9c80f04d-11f3-44a4-8738-ed2f879b0ff4') def test_list_agent(self): body = self.admin_client.list_agents() agents = body['agents'] # Heartbeats must be excluded from comparison self.agent.pop('heartbeat_timestamp', None) self.agent.pop('configurations', None) for agent in agents: agent.pop('heartbeat_timestamp', None) agent.pop('configurations', None) self.assertIn(self.agent, agents) @test.attr(type=['smoke']) @test.idempotent_id('e335be47-b9a1-46fd-be30-0874c0b751e6') def test_list_agents_non_admin(self): body = self.client.list_agents() self.assertEqual(len(body["agents"]), 0) @test.attr(type='smoke') @test.idempotent_id('869bc8e8-0fda-4a30-9b71-f8a7cf58ca9f') def test_show_agent(self): body = self.admin_client.show_agent(self.agent['id']) agent = body['agent'] self.assertEqual(agent['id'], self.agent['id']) @test.attr(type='smoke') @test.idempotent_id('371dfc5b-55b9-4cb5-ac82-c40eadaac941') def test_update_agent_status(self): origin_status = self.agent['admin_state_up'] # Try to update the 'admin_state_up' to the original # one to avoid the negative effect. agent_status = {'admin_state_up': origin_status} body = self.admin_client.update_agent(agent_id=self.agent['id'], agent_info=agent_status) updated_status = body['agent']['admin_state_up'] self.assertEqual(origin_status, updated_status) @test.attr(type='smoke') @test.idempotent_id('68a94a14-1243-46e6-83bf-157627e31556') def test_update_agent_description(self): self.useFixture(tempest_fixtures.LockFixture('agent_description')) description = 'description for update agent.' agent_description = {'description': description} body = self.admin_client.update_agent(agent_id=self.dyn_agent['id'], agent_info=agent_description) self.addCleanup(self._restore_agent) updated_description = body['agent']['description'] self.assertEqual(updated_description, description) def _restore_agent(self): """ Restore the agent description after update test. """ description = self.dyn_agent['description'] origin_agent = {'description': description} self.admin_client.update_agent(agent_id=self.dyn_agent['id'], agent_info=origin_agent) neutron-8.4.0/neutron/tests/api/admin/test_dhcp_agent_scheduler.py0000664000567000056710000000777513044372760026644 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import test from neutron.tests.api import base class DHCPAgentSchedulersTestJSON(base.BaseAdminNetworkTest): @classmethod def resource_setup(cls): super(DHCPAgentSchedulersTestJSON, cls).resource_setup() if not test.is_extension_enabled('dhcp_agent_scheduler', 'network'): msg = "dhcp_agent_scheduler extension not enabled." raise cls.skipException(msg) # Create a network and make sure it will be hosted by a # dhcp agent: this is done by creating a regular port cls.network = cls.create_network() cls.subnet = cls.create_subnet(cls.network) cls.cidr = cls.subnet['cidr'] cls.port = cls.create_port(cls.network) @test.attr(type='smoke') @test.idempotent_id('5032b1fe-eb42-4a64-8f3b-6e189d8b5c7d') def test_list_dhcp_agent_hosting_network(self): self.admin_client.list_dhcp_agent_hosting_network( self.network['id']) @test.attr(type='smoke') @test.idempotent_id('30c48f98-e45d-4ffb-841c-b8aad57c7587') def test_list_networks_hosted_by_one_dhcp(self): body = self.admin_client.list_dhcp_agent_hosting_network( self.network['id']) agents = body['agents'] self.assertIsNotNone(agents) agent = agents[0] self.assertTrue(self._check_network_in_dhcp_agent( self.network['id'], agent)) def _check_network_in_dhcp_agent(self, network_id, agent): network_ids = [] body = self.admin_client.list_networks_hosted_by_one_dhcp_agent( agent['id']) networks = body['networks'] for network in networks: network_ids.append(network['id']) return network_id in network_ids @test.attr(type='smoke') @test.idempotent_id('a0856713-6549-470c-a656-e97c8df9a14d') def test_add_remove_network_from_dhcp_agent(self): # The agent is now bound to the network, we can free the port self.client.delete_port(self.port['id']) self.ports.remove(self.port) agent = dict() agent['agent_type'] = None body = self.admin_client.list_agents() agents = body['agents'] for a in agents: if a['agent_type'] == 'DHCP agent': agent = a break self.assertEqual(agent['agent_type'], 'DHCP agent', 'Could not find ' 'DHCP agent in agent list though dhcp_agent_scheduler' ' is enabled.') network = self.create_network() network_id = network['id'] if self._check_network_in_dhcp_agent(network_id, agent): self._remove_network_from_dhcp_agent(network_id, agent) self._add_dhcp_agent_to_network(network_id, agent) else: self._add_dhcp_agent_to_network(network_id, agent) self._remove_network_from_dhcp_agent(network_id, agent) def _remove_network_from_dhcp_agent(self, network_id, agent): self.admin_client.remove_network_from_dhcp_agent( agent_id=agent['id'], network_id=network_id) self.assertFalse(self._check_network_in_dhcp_agent( network_id, agent)) def _add_dhcp_agent_to_network(self, network_id, agent): self.admin_client.add_dhcp_agent_to_network(agent['id'], network_id) self.assertTrue(self._check_network_in_dhcp_agent( network_id, agent)) neutron-8.4.0/neutron/tests/api/admin/test_extension_driver_port_security_admin.py0000664000567000056710000000265013044372760032227 0ustar jenkinsjenkins00000000000000# Copyright 2015 Cisco Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib import exceptions as lib_exc from tempest import test from neutron.tests.api import base from neutron.tests.api import base_security_groups as base_security class PortSecurityAdminTests(base_security.BaseSecGroupTest, base.BaseAdminNetworkTest): @test.attr(type=['negative', 'smoke']) @test.idempotent_id('d39a96e2-2dea-4feb-8093-e7ac991ce6f8') @test.requires_ext(extension='port-security', service='network') def test_create_port_security_false_on_shared_network(self): network = self.create_shared_network() self.assertTrue(network['shared']) self.create_subnet(network, client=self.admin_client) self.assertRaises(lib_exc.Forbidden, self.create_port, network, port_security_enabled=False) neutron-8.4.0/neutron/tests/api/admin/test_floating_ips_admin_actions.py0000664000567000056710000000765613044372760030056 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.common.utils import data_utils from tempest.lib import exceptions as lib_exc from tempest import test import testtools from neutron.tests.api import base from neutron.tests.tempest import config CONF = config.CONF class FloatingIPAdminTestJSON(base.BaseAdminNetworkTest): force_tenant_isolation = True credentials = ['primary', 'alt', 'admin'] @classmethod def resource_setup(cls): super(FloatingIPAdminTestJSON, cls).resource_setup() cls.ext_net_id = CONF.network.public_network_id cls.floating_ip = cls.create_floatingip(cls.ext_net_id) cls.alt_client = cls.alt_manager.network_client cls.network = cls.create_network() cls.subnet = cls.create_subnet(cls.network) cls.router = cls.create_router(data_utils.rand_name('router-'), external_network_id=cls.ext_net_id) cls.create_router_interface(cls.router['id'], cls.subnet['id']) cls.port = cls.create_port(cls.network) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('11116ee9-4e99-5b15-b8e1-aa7df92ca589') def test_associate_floating_ip_with_port_from_another_tenant(self): body = self.admin_client.create_floatingip( floating_network_id=self.ext_net_id) floating_ip = body['floatingip'] test_tenant = data_utils.rand_name('test_tenant_') test_description = data_utils.rand_name('desc_') tenant = self.identity_admin_client.create_tenant( name=test_tenant, description=test_description)['tenant'] tenant_id = tenant['id'] self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id) port = self.admin_client.create_port(network_id=self.network['id'], tenant_id=tenant_id) self.addCleanup(self.admin_client.delete_port, port['port']['id']) self.assertRaises(lib_exc.BadRequest, self.admin_client.update_floatingip, floating_ip['id'], port_id=port['port']['id']) @testtools.skipUnless( CONF.neutron_plugin_options.specify_floating_ip_address_available, "Feature for specifying floating IP address is disabled") @test.attr(type='smoke') @test.idempotent_id('332a8ae4-402e-4b98-bb6f-532e5a87b8e0') def test_create_floatingip_with_specified_ip_address(self): # other tests may end up stealing the IP before we can use it # since it's on the external network so we need to retry if it's # in use. for i in range(100): fip = self.get_unused_ip(self.ext_net_id, ip_version=4) try: body = self.admin_client.create_floatingip( floating_network_id=self.ext_net_id, floating_ip_address=fip) break except lib_exc.Conflict: pass else: self.fail("Could not get an unused IP after 100 attempts") created_floating_ip = body['floatingip'] self.addCleanup(self.admin_client.delete_floatingip, created_floating_ip['id']) self.assertIsNotNone(created_floating_ip['id']) self.assertIsNotNone(created_floating_ip['tenant_id']) self.assertEqual(created_floating_ip['floating_ip_address'], fip) neutron-8.4.0/neutron/tests/api/admin/test_external_network_extension.py0000664000567000056710000002333213044372760030164 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from tempest.lib.common.utils import data_utils from tempest.lib import exceptions as lib_exc from tempest import test import testtools from neutron.tests.api import base class ExternalNetworksRBACTestJSON(base.BaseAdminNetworkTest): credentials = ['primary', 'alt', 'admin'] @classmethod def resource_setup(cls): if not test.is_extension_enabled('rbac-policies', 'network'): msg = "rbac-policies extension not enabled." raise cls.skipException(msg) super(ExternalNetworksRBACTestJSON, cls).resource_setup() cls.client2 = cls.alt_manager.network_client def _create_network(self, external=True): post_body = {'name': data_utils.rand_name('network-')} if external: post_body['router:external'] = external body = self.admin_client.create_network(**post_body) network = body['network'] self.addCleanup(self.admin_client.delete_network, network['id']) return network @test.attr(type='smoke') @test.idempotent_id('afd8f1b7-a81e-4629-bca8-a367b3a144bb') def test_regular_client_shares_with_another(self): net = self.create_network() self.client.create_rbac_policy( object_type='network', object_id=net['id'], action='access_as_external', target_tenant=self.client2.tenant_id) body = self.client2.list_networks() networks_list = [n['id'] for n in body['networks']] self.assertIn(net['id'], networks_list) r = self.client2.create_router( data_utils.rand_name('router-'), external_gateway_info={'network_id': net['id']})['router'] self.addCleanup(self.admin_client.delete_router, r['id']) @test.idempotent_id('afd8f1b7-a81e-4629-bca8-a367b3a144bb') def test_regular_client_blocked_from_creating_external_wild_policies(self): net = self.create_network() with testtools.ExpectedException(lib_exc.Forbidden): self.client.create_rbac_policy( object_type='network', object_id=net['id'], action='access_as_external', target_tenant='*') @test.attr(type='smoke') @test.idempotent_id('a2e19f06-48a9-4e4c-b717-08cb2008707d') def test_wildcard_policy_created_from_external_network_api(self): # create external makes wildcard net_id = self._create_network(external=True)['id'] self.assertEqual(1, len(self.admin_client.list_rbac_policies( object_id=net_id, action='access_as_external', target_tenant='*')['rbac_policies'])) # update to non-external clears wildcard self.admin_client.update_network(net_id, **{'router:external': False}) self.assertEqual(0, len(self.admin_client.list_rbac_policies( object_id=net_id, action='access_as_external', target_tenant='*')['rbac_policies'])) # create non-external has no wildcard net_id = self._create_network(external=False)['id'] self.assertEqual(0, len(self.admin_client.list_rbac_policies( object_id=net_id, action='access_as_external', target_tenant='*')['rbac_policies'])) # update to external makes wildcard self.admin_client.update_network(net_id, **{'router:external': True}) self.assertEqual(1, len(self.admin_client.list_rbac_policies( object_id=net_id, action='access_as_external', target_tenant='*')['rbac_policies'])) @test.idempotent_id('a5539002-5bdb-48b5-b124-abcd12347865') def test_external_update_policy_from_wildcard_to_specific_tenant(self): net_id = self._create_network(external=True)['id'] rbac_pol = self.admin_client.list_rbac_policies( object_id=net_id, action='access_as_external', target_tenant='*')['rbac_policies'][0] r = self.client2.create_router( data_utils.rand_name('router-'), external_gateway_info={'network_id': net_id})['router'] self.addCleanup(self.admin_client.delete_router, r['id']) # changing wildcard to specific tenant should be okay since its the # only one using the network self.admin_client.update_rbac_policy( rbac_pol['id'], target_tenant=self.client2.tenant_id) @test.idempotent_id('a5539002-5bdb-48b5-b124-e9eedd5975e6') def test_external_conversion_on_policy_create(self): net_id = self._create_network(external=False)['id'] self.admin_client.create_rbac_policy( object_type='network', object_id=net_id, action='access_as_external', target_tenant=self.client2.tenant_id) body = self.admin_client.show_network(net_id)['network'] self.assertTrue(body['router:external']) @test.idempotent_id('01364c50-bfb6-46c4-b44c-edc4564d61cf') def test_policy_allows_tenant_to_allocate_floatingip(self): net = self._create_network(external=False) # share to the admin client so it gets converted to external but # not shared to everyone self.admin_client.create_rbac_policy( object_type='network', object_id=net['id'], action='access_as_external', target_tenant=self.admin_client.tenant_id) self.create_subnet(net, client=self.admin_client, enable_dhcp=False) with testtools.ExpectedException(lib_exc.NotFound): self.client2.create_floatingip( floating_network_id=net['id']) self.admin_client.create_rbac_policy( object_type='network', object_id=net['id'], action='access_as_external', target_tenant=self.client2.tenant_id) self.client2.create_floatingip( floating_network_id=net['id']) @test.idempotent_id('476be1e0-f72e-47dc-9a14-4435926bbe82') def test_policy_allows_tenant_to_attach_ext_gw(self): net = self._create_network(external=False) self.create_subnet(net, client=self.admin_client, enable_dhcp=False) self.admin_client.create_rbac_policy( object_type='network', object_id=net['id'], action='access_as_external', target_tenant=self.client2.tenant_id) r = self.client2.create_router( data_utils.rand_name('router-'), external_gateway_info={'network_id': net['id']})['router'] self.addCleanup(self.admin_client.delete_router, r['id']) @test.idempotent_id('d54decee-4203-4ced-91a2-ea42ca63e154') def test_delete_policies_while_tenant_attached_to_net(self): net = self._create_network(external=False) self.create_subnet(net, client=self.admin_client, enable_dhcp=False) wildcard = self.admin_client.create_rbac_policy( object_type='network', object_id=net['id'], action='access_as_external', target_tenant='*')['rbac_policy'] r = self.client2.create_router( data_utils.rand_name('router-'), external_gateway_info={'network_id': net['id']})['router'] # delete should fail because the wildcard is required for the tenant's # access with testtools.ExpectedException(lib_exc.Conflict): self.admin_client.delete_rbac_policy(wildcard['id']) tenant = self.admin_client.create_rbac_policy( object_type='network', object_id=net['id'], action='access_as_external', target_tenant=self.client2.tenant_id)['rbac_policy'] # now we can delete the policy because the tenant has its own policy # to allow it access self.admin_client.delete_rbac_policy(wildcard['id']) # but now we can't delete the tenant's policy without the wildcard with testtools.ExpectedException(lib_exc.Conflict): self.admin_client.delete_rbac_policy(tenant['id']) wildcard = self.admin_client.create_rbac_policy( object_type='network', object_id=net['id'], action='access_as_external', target_tenant='*')['rbac_policy'] # with the wildcard added back we can delete the tenant's policy self.admin_client.delete_rbac_policy(tenant['id']) self.admin_client.delete_router(r['id']) # now without the tenant attached, the wildcard can be deleted self.admin_client.delete_rbac_policy(wildcard['id']) # finally we ensure that the tenant can't attach to the network since # there are no policies allowing it with testtools.ExpectedException(lib_exc.NotFound): self.client2.create_router( data_utils.rand_name('router-'), external_gateway_info={'network_id': net['id']}) @test.idempotent_id('7041cec7-d8fe-4c78-9b04-b51b2fd49dc9') def test_wildcard_policy_delete_blocked_on_default_ext(self): public_net_id = cfg.CONF.network.public_network_id # ensure it is default before so we don't wipe out the policy self.admin_client.update_network(public_net_id, is_default=True) policy = self.admin_client.list_rbac_policies( object_id=public_net_id, action='access_as_external', target_tenant='*')['rbac_policies'][0] with testtools.ExpectedException(lib_exc.Conflict): self.admin_client.delete_rbac_policy(policy['id']) neutron-8.4.0/neutron/tests/api/base_security_groups.py0000664000567000056710000000344013044372760024604 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.common.utils import data_utils from neutron.tests.api import base class BaseSecGroupTest(base.BaseNetworkTest): @classmethod def resource_setup(cls): super(BaseSecGroupTest, cls).resource_setup() def _create_security_group(self, **kwargs): # Create a security group name = data_utils.rand_name('secgroup-') group_create_body = self.client.create_security_group(name=name, **kwargs) self.addCleanup(self._delete_security_group, group_create_body['security_group']['id']) self.assertEqual(group_create_body['security_group']['name'], name) return group_create_body, name def _delete_security_group(self, secgroup_id): self.client.delete_security_group(secgroup_id) # Asserting that the security group is not found in the list # after deletion list_body = self.client.list_security_groups() secgroup_list = list() for secgroup in list_body['security_groups']: secgroup_list.append(secgroup['id']) self.assertNotIn(secgroup_id, secgroup_list) neutron-8.4.0/neutron/tests/api/test_metering_extensions.py0000664000567000056710000001521213044372760025474 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.common.utils import data_utils from tempest import test from neutron.tests.api import base class MeteringTestJSON(base.BaseAdminNetworkTest): """ Tests the following operations in the Neutron API using the REST client for Neutron: List, Show, Create, Delete Metering labels List, Show, Create, Delete Metering labels rules """ @classmethod def resource_setup(cls): super(MeteringTestJSON, cls).resource_setup() if not test.is_extension_enabled('metering', 'network'): msg = "metering extension not enabled." raise cls.skipException(msg) description = "metering label created by tempest" name = data_utils.rand_name("metering-label") cls.metering_label = cls.create_metering_label(name, description) remote_ip_prefix = ("10.0.0.0/24" if cls._ip_version == 4 else "fd02::/64") direction = "ingress" cls.metering_label_rule = cls.create_metering_label_rule( remote_ip_prefix, direction, metering_label_id=cls.metering_label['id']) def _delete_metering_label(self, metering_label_id): # Deletes a label and verifies if it is deleted or not self.admin_client.delete_metering_label(metering_label_id) # Asserting that the label is not found in list after deletion labels = self.admin_client.list_metering_labels(id=metering_label_id) self.assertEqual(len(labels['metering_labels']), 0) def _delete_metering_label_rule(self, metering_label_rule_id): # Deletes a rule and verifies if it is deleted or not self.admin_client.delete_metering_label_rule( metering_label_rule_id) # Asserting that the rule is not found in list after deletion rules = (self.admin_client.list_metering_label_rules( id=metering_label_rule_id)) self.assertEqual(len(rules['metering_label_rules']), 0) @test.attr(type='smoke') @test.idempotent_id('e2fb2f8c-45bf-429a-9f17-171c70444612') def test_list_metering_labels(self): # Verify label filtering body = self.admin_client.list_metering_labels(id=33) metering_labels = body['metering_labels'] self.assertEqual(0, len(metering_labels)) @test.attr(type='smoke') @test.idempotent_id('ec8e15ff-95d0-433b-b8a6-b466bddb1e50') def test_create_delete_metering_label_with_filters(self): # Creates a label name = data_utils.rand_name('metering-label-') description = "label created by tempest" body = self.admin_client.create_metering_label(name=name, description=description) metering_label = body['metering_label'] self.addCleanup(self._delete_metering_label, metering_label['id']) # Assert whether created labels are found in labels list or fail # if created labels are not found in labels list labels = (self.admin_client.list_metering_labels( id=metering_label['id'])) self.assertEqual(len(labels['metering_labels']), 1) @test.attr(type='smoke') @test.idempotent_id('30abb445-0eea-472e-bd02-8649f54a5968') def test_show_metering_label(self): # Verifies the details of a label body = self.admin_client.show_metering_label(self.metering_label['id']) metering_label = body['metering_label'] self.assertEqual(self.metering_label['id'], metering_label['id']) self.assertEqual(self.metering_label['tenant_id'], metering_label['tenant_id']) self.assertEqual(self.metering_label['name'], metering_label['name']) self.assertEqual(self.metering_label['description'], metering_label['description']) @test.attr(type='smoke') @test.idempotent_id('cc832399-6681-493b-9d79-0202831a1281') def test_list_metering_label_rules(self): # Verify rule filtering body = self.admin_client.list_metering_label_rules(id=33) metering_label_rules = body['metering_label_rules'] self.assertEqual(0, len(metering_label_rules)) @test.attr(type='smoke') @test.idempotent_id('f4d547cd-3aee-408f-bf36-454f8825e045') def test_create_delete_metering_label_rule_with_filters(self): # Creates a rule remote_ip_prefix = ("10.0.1.0/24" if self._ip_version == 4 else "fd03::/64") body = (self.admin_client.create_metering_label_rule( remote_ip_prefix=remote_ip_prefix, direction="ingress", metering_label_id=self.metering_label['id'])) metering_label_rule = body['metering_label_rule'] self.addCleanup(self._delete_metering_label_rule, metering_label_rule['id']) # Assert whether created rules are found in rules list or fail # if created rules are not found in rules list rules = (self.admin_client.list_metering_label_rules( id=metering_label_rule['id'])) self.assertEqual(len(rules['metering_label_rules']), 1) @test.attr(type='smoke') @test.idempotent_id('b7354489-96ea-41f3-9452-bace120fb4a7') def test_show_metering_label_rule(self): # Verifies the details of a rule body = (self.admin_client.show_metering_label_rule( self.metering_label_rule['id'])) metering_label_rule = body['metering_label_rule'] self.assertEqual(self.metering_label_rule['id'], metering_label_rule['id']) self.assertEqual(self.metering_label_rule['remote_ip_prefix'], metering_label_rule['remote_ip_prefix']) self.assertEqual(self.metering_label_rule['direction'], metering_label_rule['direction']) self.assertEqual(self.metering_label_rule['metering_label_id'], metering_label_rule['metering_label_id']) self.assertFalse(metering_label_rule['excluded']) class MeteringIpV6TestJSON(MeteringTestJSON): _ip_version = 6 neutron-8.4.0/neutron/tests/api/test_bgp_speaker_extensions_negative.py0000664000567000056710000001450513044372760030032 0ustar jenkinsjenkins00000000000000# Copyright 2016 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from tempest.lib import exceptions as lib_exc from neutron.tests.api import test_bgp_speaker_extensions as test_base from tempest import test class BgpSpeakerTestJSONNegative(test_base.BgpSpeakerTestJSONBase): """Negative test cases asserting proper behavior of BGP API extension""" @test.attr(type=['negative', 'smoke']) @test.idempotent_id('75e9ee2f-6efd-4320-bff7-ae24741c8b06') def test_create_bgp_speaker_illegal_local_asn(self): self.assertRaises(lib_exc.BadRequest, self.create_bgp_speaker, local_as='65537') @test.attr(type=['negative', 'smoke']) @test.idempotent_id('6742ec2e-382a-4453-8791-13a19b47cd13') def test_create_bgp_speaker_non_admin(self): self.assertRaises(lib_exc.Forbidden, self.client.create_bgp_speaker, {'bgp_speaker': self.default_bgp_speaker_args}) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('33f7aaf0-9786-478b-b2d1-a51086a50eb4') def test_create_bgp_peer_non_admin(self): self.assertRaises(lib_exc.Forbidden, self.client.create_bgp_peer, {'bgp_peer': self.default_bgp_peer_args}) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('39435932-0266-4358-899b-0e9b1e53c3e9') def test_update_bgp_speaker_local_asn(self): bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args) bgp_speaker_id = bgp_speaker['bgp-speaker']['id'] self.assertRaises(lib_exc.BadRequest, self.update_bgp_speaker, bgp_speaker_id, local_as='4321') @test.idempotent_id('9cc33701-51e5-421f-a5d5-fd7b330e550f') def test_get_advertised_routes_tenant_networks(self): addr_scope1 = self.create_address_scope('my-scope1', ip_version=4) addr_scope2 = self.create_address_scope('my-scope2', ip_version=4) ext_net = self.create_shared_network(**{'router:external': True}) tenant_net1 = self.create_network() tenant_net2 = self.create_network() ext_subnetpool = self.create_subnetpool( 'test-pool-ext', is_admin=True, default_prefixlen=24, address_scope_id=addr_scope1['id'], prefixes=['8.0.0.0/8']) tenant_subnetpool1 = self.create_subnetpool( 'tenant-test-pool', default_prefixlen=25, address_scope_id=addr_scope1['id'], prefixes=['10.10.0.0/16']) tenant_subnetpool2 = self.create_subnetpool( 'tenant-test-pool', default_prefixlen=25, address_scope_id=addr_scope2['id'], prefixes=['11.10.0.0/16']) self.create_subnet({'id': ext_net['id']}, cidr=netaddr.IPNetwork('8.0.0.0/24'), ip_version=4, client=self.admin_client, subnetpool_id=ext_subnetpool['id']) tenant_subnet1 = self.create_subnet( {'id': tenant_net1['id']}, cidr=netaddr.IPNetwork('10.10.0.0/24'), ip_version=4, subnetpool_id=tenant_subnetpool1['id']) tenant_subnet2 = self.create_subnet( {'id': tenant_net2['id']}, cidr=netaddr.IPNetwork('11.10.0.0/24'), ip_version=4, subnetpool_id=tenant_subnetpool2['id']) ext_gw_info = {'network_id': ext_net['id']} router = self.admin_client.create_router( 'my-router', distributed=False, external_gateway_info=ext_gw_info)['router'] self.admin_routers.append(router) self.admin_client.add_router_interface_with_subnet_id( router['id'], tenant_subnet1['id']) self.admin_routerports.append({'router_id': router['id'], 'subnet_id': tenant_subnet1['id']}) self.admin_client.add_router_interface_with_subnet_id( router['id'], tenant_subnet2['id']) self.admin_routerports.append({'router_id': router['id'], 'subnet_id': tenant_subnet2['id']}) bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args) bgp_speaker_id = bgp_speaker['bgp-speaker']['id'] self.admin_client.add_bgp_gateway_network(bgp_speaker_id, ext_net['id']) routes = self.admin_client.get_bgp_advertised_routes(bgp_speaker_id) self.assertEqual(1, len(routes['advertised_routes'])) self.assertEqual(tenant_subnet1['cidr'], routes['advertised_routes'][0]['destination']) fixed_ip = router['external_gateway_info']['external_fixed_ips'][0] self.assertEqual(fixed_ip['ip_address'], routes['advertised_routes'][0]['next_hop']) neutron-8.4.0/neutron/tests/api/test_networks.py0000664000567000056710000000732313044372760023263 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import test from neutron.tests.api import base from neutron.tests.tempest import config CONF = config.CONF class NetworksTestJSON(base.BaseNetworkTest): """ Tests the following operations in the Neutron API using the REST client for Neutron: list tenant's networks show a network show a tenant network details v2.0 of the Neutron API is assumed. """ @classmethod def resource_setup(cls): super(NetworksTestJSON, cls).resource_setup() cls.network = cls.create_network() @test.attr(type='smoke') @test.idempotent_id('2bf13842-c93f-4a69-83ed-717d2ec3b44e') def test_show_network(self): # Verify the details of a network body = self.client.show_network(self.network['id']) network = body['network'] fields = ['id', 'name'] if test.is_extension_enabled('net-mtu', 'network'): fields.append('mtu') for key in fields: self.assertEqual(network[key], self.network[key]) @test.attr(type='smoke') @test.idempotent_id('867819bb-c4b6-45f7-acf9-90edcf70aa5e') def test_show_network_fields(self): # Verify specific fields of a network fields = ['id', 'name'] if test.is_extension_enabled('net-mtu', 'network'): fields.append('mtu') body = self.client.show_network(self.network['id'], fields=fields) network = body['network'] self.assertEqual(sorted(network.keys()), sorted(fields)) for field_name in fields: self.assertEqual(network[field_name], self.network[field_name]) @test.attr(type='smoke') @test.idempotent_id('c72c1c0c-2193-4aca-ccc4-b1442640bbbb') def test_create_update_network_description(self): if not test.is_extension_enabled('standard-attr-description', 'network'): msg = "standard-attr-description not enabled." raise self.skipException(msg) body = self.create_network(description='d1') self.assertEqual('d1', body['description']) net_id = body['id'] body = self.client.list_networks(id=net_id)['networks'][0] self.assertEqual('d1', body['description']) body = self.client.update_network(body['id'], description='d2') self.assertEqual('d2', body['network']['description']) body = self.client.list_networks(id=net_id)['networks'][0] self.assertEqual('d2', body['description']) @test.attr(type='smoke') @test.idempotent_id('6ae6d24f-9194-4869-9c85-c313cb20e080') def test_list_networks_fields(self): # Verify specific fields of the networks fields = ['id', 'name'] if test.is_extension_enabled('net-mtu', 'network'): fields.append('mtu') body = self.client.list_networks(fields=fields) networks = body['networks'] self.assertNotEmpty(networks, "Network list returned is empty") for network in networks: self.assertEqual(sorted(network.keys()), sorted(fields)) neutron-8.4.0/neutron/tests/api/test_extension_driver_port_security.py0000664000567000056710000001536113044372760027772 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from tempest.lib import exceptions as lib_exc from tempest import test from neutron.tests.api import base from neutron.tests.api import base_security_groups as base_security FAKE_IP = '10.0.0.1' FAKE_MAC = '00:25:64:e8:19:dd' @ddt.ddt class PortSecTest(base_security.BaseSecGroupTest, base.BaseNetworkTest): @test.attr(type='smoke') @test.idempotent_id('7c338ddf-e64e-4118-bd33-e49a1f2f1495') @test.requires_ext(extension='port-security', service='network') def test_port_sec_default_value(self): # Default port-sec value is True, and the attr of the port will inherit # from the port-sec of the network when it not be specified in API network = self.create_network() self.assertTrue(network['port_security_enabled']) self.create_subnet(network) port = self.create_port(network) self.assertTrue(port['port_security_enabled']) @test.attr(type='smoke') @test.idempotent_id('e60eafd2-31de-4c38-8106-55447d033b57') @test.requires_ext(extension='port-security', service='network') @ddt.unpack @ddt.data({'port_sec_net': False, 'port_sec_port': True, 'expected': True}, {'port_sec_net': True, 'port_sec_port': False, 'expected': False}) def test_port_sec_specific_value(self, port_sec_net, port_sec_port, expected): network = self.create_network(port_security_enabled=port_sec_net) self.create_subnet(network) port = self.create_port(network, port_security_enabled=port_sec_port) self.assertEqual(network['port_security_enabled'], port_sec_net) self.assertEqual(port['port_security_enabled'], expected) @test.attr(type=['smoke']) @test.idempotent_id('05642059-1bfc-4581-9bc9-aaa5db08dd60') @test.requires_ext(extension='port-security', service='network') def test_create_port_sec_with_security_group(self): network = self.create_network(port_security_enabled=True) self.create_subnet(network) port = self.create_port(network, security_groups=[]) self.assertTrue(port['port_security_enabled']) self.client.delete_port(port['id']) port = self.create_port(network, security_groups=[], port_security_enabled=False) self.assertFalse(port['port_security_enabled']) self.assertEmpty(port['security_groups']) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('05642059-1bfc-4581-9bc9-aaa5db08dd60') @test.requires_ext(extension='port-security', service='network') def test_port_sec_update_port_failed(self): network = self.create_network() self.create_subnet(network) sec_group_body, sec_group_name = self._create_security_group() port = self.create_port(network) # Exception when set port-sec to False with sec-group defined self.assertRaises(lib_exc.Conflict, self.update_port, port, port_security_enabled=False) port = self.update_port(port, security_groups=[], port_security_enabled=False) self.assertEmpty(port['security_groups']) self.assertFalse(port['port_security_enabled']) port = self.update_port( port, security_groups=[sec_group_body['security_group']['id']], port_security_enabled=True) self.assertNotEmpty(port['security_groups']) self.assertTrue(port['port_security_enabled']) # Remove security group from port before deletion on resource_cleanup self.update_port(port, security_groups=[]) @test.attr(type=['smoke']) @test.idempotent_id('05642059-1bfc-4581-9bc9-aaa5db08dd60') @test.requires_ext(extension='port-security', service='network') def test_port_sec_update_pass(self): network = self.create_network() self.create_subnet(network) sec_group, _ = self._create_security_group() sec_group_id = sec_group['security_group']['id'] port = self.create_port(network, security_groups=[sec_group_id], port_security_enabled=True) self.assertNotEmpty(port['security_groups']) self.assertTrue(port['port_security_enabled']) port = self.update_port(port, security_groups=[]) self.assertEmpty(port['security_groups']) self.assertTrue(port['port_security_enabled']) port = self.update_port(port, security_groups=[sec_group_id]) self.assertNotEmpty(port['security_groups']) port = self.update_port(port, security_groups=[], port_security_enabled=False) self.assertEmpty(port['security_groups']) self.assertFalse(port['port_security_enabled']) @test.attr(type=['smoke']) @test.idempotent_id('2df6114b-b8c3-48a1-96e8-47f08159d35c') @test.requires_ext(extension='port-security', service='network') def test_delete_with_port_sec(self): network = self.create_network(port_security_enabled=True) port = self.create_port(network=network, port_security_enabled=True) self.client.delete_port(port['id']) self.assertTrue(self.client.is_resource_deleted('port', port['id'])) self.client.delete_network(network['id']) self.assertTrue( self.client.is_resource_deleted('network', network['id'])) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('ed93e453-3f8d-495e-8e7e-b0e268c2ebd9') @test.requires_ext(extension='port-security', service='network') @test.requires_ext(extension='allowed-address-pairs', service='network') def test_allow_address_pairs(self): network = self.create_network() self.create_subnet(network) port = self.create_port(network=network, port_security_enabled=False) allowed_address_pairs = [{'ip_address': FAKE_IP, 'mac_address': FAKE_MAC}] # Exception when set address-pairs with port-sec is False self.assertRaises(lib_exc.Conflict, self.update_port, port, allowed_address_pairs=allowed_address_pairs) neutron-8.4.0/neutron/tests/api/test_security_groups.py0000664000567000056710000000501413044372760024650 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.common.utils import data_utils from tempest import test from neutron.tests.api import base_security_groups as base class SecGroupTest(base.BaseSecGroupTest): @classmethod def resource_setup(cls): super(SecGroupTest, cls).resource_setup() if not test.is_extension_enabled('security-group', 'network'): msg = "security-group extension not enabled." raise cls.skipException(msg) @test.attr(type='smoke') @test.idempotent_id('bfd128e5-3c92-44b6-9d66-7fe29d22c802') def test_create_list_update_show_delete_security_group(self): group_create_body, name = self._create_security_group() # List security groups and verify if created group is there in response list_body = self.client.list_security_groups() secgroup_list = list() for secgroup in list_body['security_groups']: secgroup_list.append(secgroup['id']) self.assertIn(group_create_body['security_group']['id'], secgroup_list) # Update the security group new_name = data_utils.rand_name('security-') new_description = data_utils.rand_name('security-description') update_body = self.client.update_security_group( group_create_body['security_group']['id'], name=new_name, description=new_description) # Verify if security group is updated self.assertEqual(update_body['security_group']['name'], new_name) self.assertEqual(update_body['security_group']['description'], new_description) # Show details of the updated security group show_body = self.client.show_security_group( group_create_body['security_group']['id']) self.assertEqual(show_body['security_group']['name'], new_name) self.assertEqual(show_body['security_group']['description'], new_description) neutron-8.4.0/neutron/tests/api/base_routers.py0000664000567000056710000000352013044372760023040 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.tests.api import base class BaseRouterTest(base.BaseAdminNetworkTest): # NOTE(salv-orlando): This class inherits from BaseAdminNetworkTest # as some router operations, such as enabling or disabling SNAT # require admin credentials by default def _cleanup_router(self, router): self.delete_router(router) self.routers.remove(router) def _create_router(self, name, admin_state_up=False, external_network_id=None, enable_snat=None): # associate a cleanup with created routers to avoid quota limits router = self.create_router(name, admin_state_up, external_network_id, enable_snat) self.addCleanup(self._cleanup_router, router) return router def _delete_router(self, router_id, network_client=None): client = network_client or self.client client.delete_router(router_id) # Asserting that the router is not found in the list # after deletion list_body = self.client.list_routers() routers_list = list() for router in list_body['routers']: routers_list.append(router['id']) self.assertNotIn(router_id, routers_list) neutron-8.4.0/neutron/tests/api/clients.py0000664000567000056710000000513013044372760022003 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import manager from tempest.services.identity.v2.json.tenants_client import \ TenantsClient from neutron.tests.tempest import config from neutron.tests.tempest.services.network.json.network_client import \ NetworkClientJSON CONF = config.CONF class Manager(manager.Manager): """ Top level manager for OpenStack tempest clients """ default_params = { 'disable_ssl_certificate_validation': CONF.identity.disable_ssl_certificate_validation, 'ca_certs': CONF.identity.ca_certificates_file, 'trace_requests': CONF.debug.trace_requests } # NOTE: Tempest uses timeout values of compute API if project specific # timeout values don't exist. default_params_with_timeout_values = { 'build_interval': CONF.compute.build_interval, 'build_timeout': CONF.compute.build_timeout } default_params_with_timeout_values.update(default_params) def __init__(self, credentials=None, service=None): super(Manager, self).__init__(credentials=credentials) self._set_identity_clients() self.network_client = NetworkClientJSON( self.auth_provider, CONF.network.catalog_type, CONF.network.region or CONF.identity.region, endpoint_type=CONF.network.endpoint_type, build_interval=CONF.network.build_interval, build_timeout=CONF.network.build_timeout, **self.default_params) def _set_identity_clients(self): params = { 'service': CONF.identity.catalog_type, 'region': CONF.identity.region } params.update(self.default_params_with_timeout_values) params_v2_admin = params.copy() params_v2_admin['endpoint_type'] = CONF.identity.v2_admin_endpoint_type # Client uses admin endpoint type of Keystone API v2 self.tenants_client = TenantsClient(self.auth_provider, **params_v2_admin) neutron-8.4.0/neutron/tests/api/test_auto_allocated_topology.py0000664000567000056710000001027513044372760026323 0ustar jenkinsjenkins00000000000000# Copyright 2016 IBM # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from tempest import test from neutron.tests.api import base class TestAutoAllocatedTopology(base.BaseAdminNetworkTest): """ NOTE: This test may eventually migrate to Tempest. Tests the Get-Me-A-Network operation in the Neutron API using the REST client for Neutron. """ @classmethod def skip_checks(cls): super(TestAutoAllocatedTopology, cls).skip_checks() if not test.is_extension_enabled('auto-allocated-topology', 'network'): raise cls.skipException("auto-allocated-topology extension not " "enabled") @classmethod def resource_setup(cls): super(TestAutoAllocatedTopology, cls).resource_setup() # The deployment must contain a default subnetpool body = cls.client.list_subnetpools(is_default=True) # The deployment may contain one or two default subnetpools: # one ipv4 pool, or one ipv6 pool, or one of each. # This run-time dependency should be revisited if the test is # moved over to tempest. cls.num_subnetpools = len(body['subnetpools']) if cls.num_subnetpools == 0: raise cls.skipException("No default subnetpool") # Ensure the public external network is the default external network public_net_id = cfg.CONF.network.public_network_id cls.admin_client.update_network(public_net_id, is_default=True) def _count_topology_resources(self): '''Count the resources whose names begin with 'auto_allocated_'.''' def _count(resources): return len([resource['id'] for resource in resources if resource['name'].startswith('auto_allocated_')]) networks = _count(self.client.list_networks()['networks']) subnets = _count(self.client.list_subnets()['subnets']) routers = _count(self.client.list_routers()['routers']) return networks, subnets, routers def _add_topology_cleanup(self, client): '''Add the auto-allocated resources to the cleanup lists.''' body = client.list_routers(name='auto_allocated_router') self.routers.extend(body['routers']) body = client.list_subnets(name='auto_allocated_subnet_v4') self.subnets.extend(body['subnets']) body = client.list_subnets(name='auto_allocated_subnet_v6') self.subnets.extend(body['subnets']) body = client.list_networks(name='auto_allocated_network') self.networks.extend(body['networks']) @test.attr(type='smoke') @test.idempotent_id('64bc0b02-cee4-11e5-9f3c-080027605a2b') def test_get_allocated_net_topology_as_tenant(self): resources_before = self._count_topology_resources() self.assertEqual((0, 0, 0), resources_before) body = self.client.get_auto_allocated_topology() topology = body['auto_allocated_topology'] self.assertIsNotNone(topology) self._add_topology_cleanup(self.client) network_id1 = topology['id'] self.assertIsNotNone(network_id1) resources_after1 = self._count_topology_resources() # One network, two subnets (v4 and v6) and one router self.assertEqual((1, self.num_subnetpools, 1), resources_after1) body = self.client.get_auto_allocated_topology() topology = body['auto_allocated_topology'] network_id2 = topology['id'] resources_after2 = self._count_topology_resources() # After the initial GET, the API should be idempotent self.assertEqual(network_id1, network_id2) self.assertEqual(resources_after1, resources_after2) neutron-8.4.0/neutron/tests/api/__init__.py0000664000567000056710000000000013044372760022070 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/tests/api/test_floating_ips_negative.py0000664000567000056710000000563513044372760025753 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.common.utils import data_utils from tempest.lib import exceptions as lib_exc from tempest import test from neutron.tests.api import base from neutron.tests.tempest import config CONF = config.CONF class FloatingIPNegativeTestJSON(base.BaseNetworkTest): @classmethod def resource_setup(cls): super(FloatingIPNegativeTestJSON, cls).resource_setup() if not test.is_extension_enabled('router', 'network'): msg = "router extension not enabled." raise cls.skipException(msg) cls.ext_net_id = CONF.network.public_network_id # Create a network with a subnet connected to a router. cls.network = cls.create_network() cls.subnet = cls.create_subnet(cls.network) cls.router = cls.create_router(data_utils.rand_name('router')) cls.create_router_interface(cls.router['id'], cls.subnet['id']) cls.port = cls.create_port(cls.network) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('0b5b8797-6de7-4191-905c-a48b888eb429') def test_associate_floatingip_with_port_with_floatingip(self): net = self.create_network() subnet = self.create_subnet(net) r = self.create_router('test') self.create_router_interface(r['id'], subnet['id']) self.client.update_router( r['id'], external_gateway_info={ 'network_id': self.ext_net_id}) self.addCleanup(self.client.update_router, self.router['id'], external_gateway_info={}) port = self.create_port(net) body1 = self.client.create_floatingip( floating_network_id=self.ext_net_id) floating_ip1 = body1['floatingip'] self.addCleanup(self.client.delete_floatingip, floating_ip1['id']) body2 = self.client.create_floatingip( floating_network_id=self.ext_net_id) floating_ip2 = body2['floatingip'] self.addCleanup(self.client.delete_floatingip, floating_ip2['id']) self.client.update_floatingip(floating_ip1['id'], port_id=port['id']) self.assertRaises(lib_exc.Conflict, self.client.update_floatingip, floating_ip2['id'], port_id=port['id']) neutron-8.4.0/neutron/tests/api/test_bgp_speaker_extensions.py0000664000567000056710000003347413044372760026156 0ustar jenkinsjenkins00000000000000# Copyright 2016 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from tempest import config from tempest.lib import exceptions as lib_exc from tempest import test import testtools from neutron.tests.api import base from neutron.tests.tempest.common import tempest_fixtures as fixtures CONF = config.CONF class BgpSpeakerTestJSONBase(base.BaseAdminNetworkTest): default_bgp_speaker_args = {'local_as': '1234', 'ip_version': 4, 'name': 'my-bgp-speaker', 'advertise_floating_ip_host_routes': True, 'advertise_tenant_networks': True} default_bgp_peer_args = {'remote_as': '4321', 'name': 'my-bgp-peer', 'peer_ip': '192.168.1.1', 'auth_type': 'md5', 'password': 'my-secret'} @classmethod def resource_setup(cls): super(BgpSpeakerTestJSONBase, cls).resource_setup() if not test.is_extension_enabled('bgp_speaker', 'network'): msg = "BGP Speaker extension is not enabled." raise cls.skipException(msg) cls.admin_routerports = [] cls.admin_floatingips = [] cls.admin_routers = [] cls.ext_net_id = CONF.network.public_network_id @classmethod def resource_cleanup(cls): for floatingip in cls.admin_floatingips: cls._try_delete_resource(cls.admin_client.delete_floatingip, floatingip['id']) for routerport in cls.admin_routerports: cls._try_delete_resource( cls.admin_client.remove_router_interface_with_subnet_id, routerport['router_id'], routerport['subnet_id']) for router in cls.admin_routers: cls._try_delete_resource(cls.admin_client.delete_router, router['id']) super(BgpSpeakerTestJSONBase, cls).resource_cleanup() def create_bgp_speaker(self, auto_delete=True, **args): data = {'bgp_speaker': args} bgp_speaker = self.admin_client.create_bgp_speaker(data) bgp_speaker_id = bgp_speaker['bgp-speaker']['id'] if auto_delete: self.addCleanup(self.delete_bgp_speaker, bgp_speaker_id) return bgp_speaker def create_bgp_peer(self, **args): bgp_peer = self.admin_client.create_bgp_peer({'bgp_peer': args}) bgp_peer_id = bgp_peer['bgp-peer']['id'] self.addCleanup(self.delete_bgp_peer, bgp_peer_id) return bgp_peer def update_bgp_speaker(self, id, **args): data = {'bgp_speaker': args} return self.admin_client.update_bgp_speaker(id, data) def delete_bgp_speaker(self, id): return self.admin_client.delete_bgp_speaker(id) def get_bgp_speaker(self, id): return self.admin_client.get_bgp_speaker(id) def create_bgp_speaker_and_peer(self): bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args) bgp_peer = self.create_bgp_peer(**self.default_bgp_peer_args) return (bgp_speaker, bgp_peer) def delete_bgp_peer(self, id): return self.admin_client.delete_bgp_peer(id) def add_bgp_peer(self, bgp_speaker_id, bgp_peer_id): return self.admin_client.add_bgp_peer_with_id(bgp_speaker_id, bgp_peer_id) def remove_bgp_peer(self, bgp_speaker_id, bgp_peer_id): return self.admin_client.remove_bgp_peer_with_id(bgp_speaker_id, bgp_peer_id) def delete_address_scope(self, id): return self.admin_client.delete_address_scope(id) class BgpSpeakerTestJSON(BgpSpeakerTestJSONBase): """ Tests the following operations in the Neutron API using the REST client for Neutron: Create bgp-speaker Delete bgp-speaker Create bgp-peer Update bgp-peer Delete bgp-peer """ @test.idempotent_id('df259771-7104-4ffa-b77f-bd183600d7f9') def test_delete_bgp_speaker(self): bgp_speaker = self.create_bgp_speaker(auto_delete=False, **self.default_bgp_speaker_args) bgp_speaker_id = bgp_speaker['bgp-speaker']['id'] self.delete_bgp_speaker(bgp_speaker_id) self.assertRaises(lib_exc.NotFound, self.get_bgp_speaker, bgp_speaker_id) @test.idempotent_id('81d9dc45-19f8-4c6e-88b8-401d965cd1b0') def test_create_bgp_peer(self): self.create_bgp_peer(**self.default_bgp_peer_args) @test.idempotent_id('6ade0319-1ee2-493c-ac4b-5eb230ff3a77') def test_add_bgp_peer(self): bgp_speaker, bgp_peer = self.create_bgp_speaker_and_peer() bgp_speaker_id = bgp_speaker['bgp-speaker']['id'] bgp_peer_id = bgp_peer['bgp-peer']['id'] self.add_bgp_peer(bgp_speaker_id, bgp_peer_id) bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id) bgp_peers_list = bgp_speaker['bgp-speaker']['peers'] self.assertEqual(1, len(bgp_peers_list)) self.assertTrue(bgp_peer_id in bgp_peers_list) @test.idempotent_id('f9737708-1d79-440b-8350-779f97d882ee') def test_remove_bgp_peer(self): bgp_peer = self.create_bgp_peer(**self.default_bgp_peer_args) bgp_peer_id = bgp_peer['bgp-peer']['id'] bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args) bgp_speaker_id = bgp_speaker['bgp-speaker']['id'] self.add_bgp_peer(bgp_speaker_id, bgp_peer_id) bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id) bgp_peers_list = bgp_speaker['bgp-speaker']['peers'] self.assertTrue(bgp_peer_id in bgp_peers_list) bgp_speaker = self.remove_bgp_peer(bgp_speaker_id, bgp_peer_id) bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id) bgp_peers_list = bgp_speaker['bgp-speaker']['peers'] self.assertTrue(not bgp_peers_list) @testtools.skip('bug/1553374') @test.idempotent_id('23c8eb37-d10d-4f43-b2e7-6542cb6a4405') def test_add_gateway_network(self): self.useFixture(fixtures.LockFixture('gateway_network_binding')) bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args) bgp_speaker_id = bgp_speaker['bgp-speaker']['id'] self.admin_client.add_bgp_gateway_network(bgp_speaker_id, self.ext_net_id) bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id) network_list = bgp_speaker['bgp-speaker']['networks'] self.assertEqual(1, len(network_list)) self.assertTrue(self.ext_net_id in network_list) @testtools.skip('bug/1553374') @test.idempotent_id('6cfc7137-0d99-4a3d-826c-9d1a3a1767b0') def test_remove_gateway_network(self): self.useFixture(fixtures.LockFixture('gateway_network_binding')) bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args) bgp_speaker_id = bgp_speaker['bgp-speaker']['id'] self.admin_client.add_bgp_gateway_network(bgp_speaker_id, self.ext_net_id) bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id) networks = bgp_speaker['bgp-speaker']['networks'] self.assertTrue(self.ext_net_id in networks) self.admin_client.remove_bgp_gateway_network(bgp_speaker_id, self.ext_net_id) bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id) network_list = bgp_speaker['bgp-speaker']['networks'] self.assertTrue(not network_list) @testtools.skip('bug/1553374') @test.idempotent_id('5bef22ad-5e70-4f7b-937a-dc1944642996') def test_get_advertised_routes_null_address_scope(self): self.useFixture(fixtures.LockFixture('gateway_network_binding')) bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args) bgp_speaker_id = bgp_speaker['bgp-speaker']['id'] self.admin_client.add_bgp_gateway_network(bgp_speaker_id, self.ext_net_id) routes = self.admin_client.get_bgp_advertised_routes(bgp_speaker_id) self.assertEqual(0, len(routes['advertised_routes'])) @testtools.skip('bug/1553374') @test.idempotent_id('cae9cdb1-ad65-423c-9604-d4cd0073616e') def test_get_advertised_routes_floating_ips(self): self.useFixture(fixtures.LockFixture('gateway_network_binding')) bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args) bgp_speaker_id = bgp_speaker['bgp-speaker']['id'] self.admin_client.add_bgp_gateway_network(bgp_speaker_id, self.ext_net_id) tenant_net = self.create_network() tenant_subnet = self.create_subnet(tenant_net) ext_gw_info = {'network_id': self.ext_net_id} router = self.admin_client.create_router( 'my-router', external_gateway_info=ext_gw_info, admin_state_up=True, distributed=False) self.admin_routers.append(router['router']) self.admin_client.add_router_interface_with_subnet_id( router['router']['id'], tenant_subnet['id']) self.admin_routerports.append({'router_id': router['router']['id'], 'subnet_id': tenant_subnet['id']}) tenant_port = self.create_port(tenant_net) floatingip = self.create_floatingip(self.ext_net_id) self.admin_floatingips.append(floatingip) self.client.update_floatingip(floatingip['id'], port_id=tenant_port['id']) routes = self.admin_client.get_bgp_advertised_routes(bgp_speaker_id) self.assertEqual(1, len(routes['advertised_routes'])) self.assertEqual(floatingip['floating_ip_address'] + '/32', routes['advertised_routes'][0]['destination']) @testtools.skip('bug/1553374') @test.idempotent_id('c9ad566e-fe8f-4559-8303-bbad9062a30c') def test_get_advertised_routes_tenant_networks(self): self.useFixture(fixtures.LockFixture('gateway_network_binding')) addr_scope = self.create_address_scope('my-scope', ip_version=4) ext_net = self.create_shared_network(**{'router:external': True}) tenant_net = self.create_network() ext_subnetpool = self.create_subnetpool( 'test-pool-ext', is_admin=True, default_prefixlen=24, address_scope_id=addr_scope['id'], prefixes=['8.0.0.0/8']) tenant_subnetpool = self.create_subnetpool( 'tenant-test-pool', default_prefixlen=25, address_scope_id=addr_scope['id'], prefixes=['10.10.0.0/16']) self.create_subnet({'id': ext_net['id']}, cidr=netaddr.IPNetwork('8.0.0.0/24'), ip_version=4, client=self.admin_client, subnetpool_id=ext_subnetpool['id']) tenant_subnet = self.create_subnet( {'id': tenant_net['id']}, cidr=netaddr.IPNetwork('10.10.0.0/24'), ip_version=4, subnetpool_id=tenant_subnetpool['id']) ext_gw_info = {'network_id': ext_net['id']} router = self.admin_client.create_router( 'my-router', external_gateway_info=ext_gw_info, distributed=False)['router'] self.admin_routers.append(router) self.admin_client.add_router_interface_with_subnet_id( router['id'], tenant_subnet['id']) self.admin_routerports.append({'router_id': router['id'], 'subnet_id': tenant_subnet['id']}) bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args) bgp_speaker_id = bgp_speaker['bgp-speaker']['id'] self.admin_client.add_bgp_gateway_network(bgp_speaker_id, ext_net['id']) routes = self.admin_client.get_bgp_advertised_routes(bgp_speaker_id) self.assertEqual(1, len(routes['advertised_routes'])) self.assertEqual(tenant_subnet['cidr'], routes['advertised_routes'][0]['destination']) fixed_ip = router['external_gateway_info']['external_fixed_ips'][0] self.assertEqual(fixed_ip['ip_address'], routes['advertised_routes'][0]['next_hop']) neutron-8.4.0/neutron/tests/api/test_qos.py0000664000567000056710000010752013044372760022211 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.common.utils import data_utils from tempest.lib import exceptions from tempest import test import testtools from neutron.services.qos import qos_consts from neutron.tests.api import base class QosTestJSON(base.BaseAdminNetworkTest): @classmethod def resource_setup(cls): super(QosTestJSON, cls).resource_setup() if not test.is_extension_enabled('qos', 'network'): msg = "qos extension not enabled." raise cls.skipException(msg) @test.attr(type='smoke') @test.idempotent_id('108fbdf7-3463-4e47-9871-d07f3dcf5bbb') def test_create_policy(self): policy = self.create_qos_policy(name='test-policy', description='test policy desc1', shared=False) # Test 'show policy' retrieved_policy = self.admin_client.show_qos_policy(policy['id']) retrieved_policy = retrieved_policy['policy'] self.assertEqual('test-policy', retrieved_policy['name']) self.assertEqual('test policy desc1', retrieved_policy['description']) self.assertFalse(retrieved_policy['shared']) # Test 'list policies' policies = self.admin_client.list_qos_policies()['policies'] policies_ids = [p['id'] for p in policies] self.assertIn(policy['id'], policies_ids) @test.attr(type='smoke') @test.idempotent_id('f8d20e92-f06d-4805-b54f-230f77715815') def test_list_policy_filter_by_name(self): self.create_qos_policy(name='test', description='test policy', shared=False) self.create_qos_policy(name='test2', description='test policy', shared=False) policies = (self.admin_client. list_qos_policies(name='test')['policies']) self.assertEqual(1, len(policies)) retrieved_policy = policies[0] self.assertEqual('test', retrieved_policy['name']) @test.attr(type='smoke') @test.idempotent_id('8e88a54b-f0b2-4b7d-b061-a15d93c2c7d6') def test_policy_update(self): policy = self.create_qos_policy(name='test-policy', description='', shared=False) self.admin_client.update_qos_policy(policy['id'], description='test policy desc2', shared=True) retrieved_policy = self.admin_client.show_qos_policy(policy['id']) retrieved_policy = retrieved_policy['policy'] self.assertEqual('test policy desc2', retrieved_policy['description']) self.assertTrue(retrieved_policy['shared']) self.assertEqual([], retrieved_policy['rules']) @test.idempotent_id('6e880e0f-bbfc-4e54-87c6-680f90e1b618') def test_policy_update_forbidden_for_regular_tenants_own_policy(self): policy = self.create_qos_policy(name='test-policy', description='', shared=False, tenant_id=self.client.tenant_id) self.assertRaises( exceptions.Forbidden, self.client.update_qos_policy, policy['id'], description='test policy') @test.idempotent_id('4ecfd7e7-47b6-4702-be38-be9235901a87') def test_policy_update_forbidden_for_regular_tenants_foreign_policy(self): policy = self.create_qos_policy(name='test-policy', description='', shared=False, tenant_id=self.admin_client.tenant_id) self.assertRaises( exceptions.NotFound, self.client.update_qos_policy, policy['id'], description='test policy') @test.idempotent_id('ee263db4-009a-4641-83e5-d0e83506ba4c') def test_shared_policy_update(self): policy = self.create_qos_policy(name='test-policy', description='', shared=True) self.admin_client.update_qos_policy(policy['id'], description='test policy desc2') retrieved_policy = self.admin_client.show_qos_policy(policy['id']) retrieved_policy = retrieved_policy['policy'] self.assertTrue(retrieved_policy['shared']) self.admin_client.update_qos_policy(policy['id'], shared=False) retrieved_policy = self.admin_client.show_qos_policy(policy['id']) retrieved_policy = retrieved_policy['policy'] self.assertFalse(retrieved_policy['shared']) @test.attr(type='smoke') @test.idempotent_id('1cb42653-54bd-4a9a-b888-c55e18199201') def test_delete_policy(self): policy = self.admin_client.create_qos_policy( 'test-policy', 'desc', True)['policy'] retrieved_policy = self.admin_client.show_qos_policy(policy['id']) retrieved_policy = retrieved_policy['policy'] self.assertEqual('test-policy', retrieved_policy['name']) self.admin_client.delete_qos_policy(policy['id']) self.assertRaises(exceptions.NotFound, self.admin_client.show_qos_policy, policy['id']) @test.attr(type='smoke') @test.idempotent_id('cf776f77-8d3d-49f2-8572-12d6a1557224') def test_list_admin_rule_types(self): self._test_list_rule_types(self.admin_client) @test.attr(type='smoke') @test.idempotent_id('49c8ea35-83a9-453a-bd23-239cf3b13929') def test_list_regular_rule_types(self): self._test_list_rule_types(self.client) def _test_list_rule_types(self, client): # List supported rule types # TODO(QoS): since in gate we run both ovs and linuxbridge ml2 drivers, # and since Linux Bridge ml2 driver does not have QoS support yet, ml2 # plugin reports no rule types are supported. Once linuxbridge will # receive support for QoS, the list of expected rule types will change. # # In theory, we could make the test conditional on which ml2 drivers # are enabled in gate (or more specifically, on which supported qos # rules are claimed by core plugin), but that option doesn't seem to be # available thru tempest.lib framework expected_rule_types = [] expected_rule_details = ['type'] rule_types = client.list_qos_rule_types() actual_list_rule_types = rule_types['rule_types'] actual_rule_types = [rule['type'] for rule in actual_list_rule_types] # Verify that only required fields present in rule details for rule in actual_list_rule_types: self.assertEqual(tuple(rule.keys()), tuple(expected_rule_details)) # Verify if expected rules are present in the actual rules list for rule in expected_rule_types: self.assertIn(rule, actual_rule_types) def _disassociate_network(self, client, network_id): client.update_network(network_id, qos_policy_id=None) updated_network = self.admin_client.show_network(network_id) self.assertIsNone(updated_network['network']['qos_policy_id']) @test.attr(type='smoke') @test.idempotent_id('65b9ef75-1911-406a-bbdb-ca1d68d528b0') def test_policy_association_with_admin_network(self): policy = self.create_qos_policy(name='test-policy', description='test policy', shared=False) network = self.create_shared_network('test network', qos_policy_id=policy['id']) retrieved_network = self.admin_client.show_network(network['id']) self.assertEqual( policy['id'], retrieved_network['network']['qos_policy_id']) self._disassociate_network(self.admin_client, network['id']) @test.attr(type='smoke') @test.idempotent_id('1738de5d-0476-4163-9022-5e1b548c208e') def test_policy_association_with_tenant_network(self): policy = self.create_qos_policy(name='test-policy', description='test policy', shared=True) network = self.create_network('test network', qos_policy_id=policy['id']) retrieved_network = self.admin_client.show_network(network['id']) self.assertEqual( policy['id'], retrieved_network['network']['qos_policy_id']) self._disassociate_network(self.client, network['id']) @test.attr(type='smoke') @test.idempotent_id('9efe63d0-836f-4cc2-b00c-468e63aa614e') def test_policy_association_with_network_nonexistent_policy(self): self.assertRaises( exceptions.NotFound, self.create_network, 'test network', qos_policy_id='9efe63d0-836f-4cc2-b00c-468e63aa614e') @test.attr(type='smoke') @test.idempotent_id('1aa55a79-324f-47d9-a076-894a8fc2448b') def test_policy_association_with_network_non_shared_policy(self): policy = self.create_qos_policy(name='test-policy', description='test policy', shared=False) self.assertRaises( exceptions.NotFound, self.create_network, 'test network', qos_policy_id=policy['id']) @test.attr(type='smoke') @test.idempotent_id('09a9392c-1359-4cbb-989f-fb768e5834a8') def test_policy_update_association_with_admin_network(self): policy = self.create_qos_policy(name='test-policy', description='test policy', shared=False) network = self.create_shared_network('test network') retrieved_network = self.admin_client.show_network(network['id']) self.assertIsNone(retrieved_network['network']['qos_policy_id']) self.admin_client.update_network(network['id'], qos_policy_id=policy['id']) retrieved_network = self.admin_client.show_network(network['id']) self.assertEqual( policy['id'], retrieved_network['network']['qos_policy_id']) self._disassociate_network(self.admin_client, network['id']) def _disassociate_port(self, port_id): self.client.update_port(port_id, qos_policy_id=None) updated_port = self.admin_client.show_port(port_id) self.assertIsNone(updated_port['port']['qos_policy_id']) @test.attr(type='smoke') @test.idempotent_id('98fcd95e-84cf-4746-860e-44692e674f2e') def test_policy_association_with_port_shared_policy(self): policy = self.create_qos_policy(name='test-policy', description='test policy', shared=True) network = self.create_shared_network('test network') port = self.create_port(network, qos_policy_id=policy['id']) retrieved_port = self.admin_client.show_port(port['id']) self.assertEqual( policy['id'], retrieved_port['port']['qos_policy_id']) self._disassociate_port(port['id']) @test.attr(type='smoke') @test.idempotent_id('49e02f5a-e1dd-41d5-9855-cfa37f2d195e') def test_policy_association_with_port_nonexistent_policy(self): network = self.create_shared_network('test network') self.assertRaises( exceptions.NotFound, self.create_port, network, qos_policy_id='49e02f5a-e1dd-41d5-9855-cfa37f2d195e') @test.attr(type='smoke') @test.idempotent_id('f53d961c-9fe5-4422-8b66-7add972c6031') def test_policy_association_with_port_non_shared_policy(self): policy = self.create_qos_policy(name='test-policy', description='test policy', shared=False) network = self.create_shared_network('test network') self.assertRaises( exceptions.NotFound, self.create_port, network, qos_policy_id=policy['id']) @test.attr(type='smoke') @test.idempotent_id('f8163237-fba9-4db5-9526-bad6d2343c76') def test_policy_update_association_with_port_shared_policy(self): policy = self.create_qos_policy(name='test-policy', description='test policy', shared=True) network = self.create_shared_network('test network') port = self.create_port(network) retrieved_port = self.admin_client.show_port(port['id']) self.assertIsNone(retrieved_port['port']['qos_policy_id']) self.client.update_port(port['id'], qos_policy_id=policy['id']) retrieved_port = self.admin_client.show_port(port['id']) self.assertEqual( policy['id'], retrieved_port['port']['qos_policy_id']) self._disassociate_port(port['id']) @test.attr(type='smoke') @test.idempotent_id('18163237-8ba9-4db5-9525-bad6d2343c75') def test_delete_not_allowed_if_policy_in_use_by_network(self): policy = self.create_qos_policy(name='test-policy', description='test policy', shared=True) network = self.create_shared_network( 'test network', qos_policy_id=policy['id']) self.assertRaises( exceptions.Conflict, self.admin_client.delete_qos_policy, policy['id']) self._disassociate_network(self.admin_client, network['id']) self.admin_client.delete_qos_policy(policy['id']) @test.attr(type='smoke') @test.idempotent_id('24153230-84a9-4dd5-9525-bad6d2343c75') def test_delete_not_allowed_if_policy_in_use_by_port(self): policy = self.create_qos_policy(name='test-policy', description='test policy', shared=True) network = self.create_shared_network('test network') port = self.create_port(network, qos_policy_id=policy['id']) self.assertRaises( exceptions.Conflict, self.admin_client.delete_qos_policy, policy['id']) self._disassociate_port(port['id']) self.admin_client.delete_qos_policy(policy['id']) @test.attr(type='smoke') @test.idempotent_id('a2a5849b-dd06-4b18-9664-0b6828a1fc27') def test_qos_policy_delete_with_rules(self): policy = self.create_qos_policy(name='test-policy', description='test policy', shared=False) self.admin_client.create_bandwidth_limit_rule( policy['id'], 200, 1337)['bandwidth_limit_rule'] self.admin_client.delete_qos_policy(policy['id']) with testtools.ExpectedException(exceptions.NotFound): self.admin_client.show_qos_policy(policy['id']) @test.idempotent_id('fb384bde-a973-41c3-a542-6f77a092155f') def test_get_policy_that_is_shared(self): policy = self.create_qos_policy( name='test-policy-shared', description='shared policy', shared=True, tenant_id=self.admin_client.tenant_id) obtained_policy = self.client.show_qos_policy(policy['id'])['policy'] self.assertEqual(obtained_policy, policy) class QosBandwidthLimitRuleTestJSON(base.BaseAdminNetworkTest): @classmethod def resource_setup(cls): super(QosBandwidthLimitRuleTestJSON, cls).resource_setup() if not test.is_extension_enabled('qos', 'network'): msg = "qos extension not enabled." raise cls.skipException(msg) @test.attr(type='smoke') @test.idempotent_id('8a59b00b-3e9c-4787-92f8-93a5cdf5e378') def test_rule_create(self): policy = self.create_qos_policy(name='test-policy', description='test policy', shared=False) rule = self.create_qos_bandwidth_limit_rule(policy_id=policy['id'], max_kbps=200, max_burst_kbps=1337) # Test 'show rule' retrieved_rule = self.admin_client.show_bandwidth_limit_rule( policy['id'], rule['id']) retrieved_rule = retrieved_rule['bandwidth_limit_rule'] self.assertEqual(rule['id'], retrieved_rule['id']) self.assertEqual(200, retrieved_rule['max_kbps']) self.assertEqual(1337, retrieved_rule['max_burst_kbps']) # Test 'list rules' rules = self.admin_client.list_bandwidth_limit_rules(policy['id']) rules = rules['bandwidth_limit_rules'] rules_ids = [r['id'] for r in rules] self.assertIn(rule['id'], rules_ids) # Test 'show policy' retrieved_policy = self.admin_client.show_qos_policy(policy['id']) policy_rules = retrieved_policy['policy']['rules'] self.assertEqual(1, len(policy_rules)) self.assertEqual(rule['id'], policy_rules[0]['id']) self.assertEqual(qos_consts.RULE_TYPE_BANDWIDTH_LIMIT, policy_rules[0]['type']) @test.attr(type='smoke') @test.idempotent_id('8a59b00b-ab01-4787-92f8-93a5cdf5e378') def test_rule_create_fail_for_the_same_type(self): policy = self.create_qos_policy(name='test-policy', description='test policy', shared=False) self.create_qos_bandwidth_limit_rule(policy_id=policy['id'], max_kbps=200, max_burst_kbps=1337) self.assertRaises(exceptions.Conflict, self.create_qos_bandwidth_limit_rule, policy_id=policy['id'], max_kbps=201, max_burst_kbps=1338) @test.attr(type='smoke') @test.idempotent_id('149a6988-2568-47d2-931e-2dbc858943b3') def test_rule_update(self): policy = self.create_qos_policy(name='test-policy', description='test policy', shared=False) rule = self.create_qos_bandwidth_limit_rule(policy_id=policy['id'], max_kbps=1, max_burst_kbps=1) self.admin_client.update_bandwidth_limit_rule(policy['id'], rule['id'], max_kbps=200, max_burst_kbps=1337) retrieved_policy = self.admin_client.show_bandwidth_limit_rule( policy['id'], rule['id']) retrieved_policy = retrieved_policy['bandwidth_limit_rule'] self.assertEqual(200, retrieved_policy['max_kbps']) self.assertEqual(1337, retrieved_policy['max_burst_kbps']) @test.attr(type='smoke') @test.idempotent_id('67ee6efd-7b33-4a68-927d-275b4f8ba958') def test_rule_delete(self): policy = self.create_qos_policy(name='test-policy', description='test policy', shared=False) rule = self.admin_client.create_bandwidth_limit_rule( policy['id'], 200, 1337)['bandwidth_limit_rule'] retrieved_policy = self.admin_client.show_bandwidth_limit_rule( policy['id'], rule['id']) retrieved_policy = retrieved_policy['bandwidth_limit_rule'] self.assertEqual(rule['id'], retrieved_policy['id']) self.admin_client.delete_bandwidth_limit_rule(policy['id'], rule['id']) self.assertRaises(exceptions.NotFound, self.admin_client.show_bandwidth_limit_rule, policy['id'], rule['id']) @test.attr(type='smoke') @test.idempotent_id('f211222c-5808-46cb-a961-983bbab6b852') def test_rule_create_rule_nonexistent_policy(self): self.assertRaises( exceptions.NotFound, self.create_qos_bandwidth_limit_rule, 'policy', 200, 1337) @test.attr(type='smoke') @test.idempotent_id('eed8e2a6-22da-421b-89b9-935a2c1a1b50') def test_policy_create_forbidden_for_regular_tenants(self): self.assertRaises( exceptions.Forbidden, self.client.create_qos_policy, 'test-policy', 'test policy', False) @test.attr(type='smoke') @test.idempotent_id('a4a2e7ad-786f-4927-a85a-e545a93bd274') def test_rule_create_forbidden_for_regular_tenants(self): self.assertRaises( exceptions.Forbidden, self.client.create_bandwidth_limit_rule, 'policy', 1, 2) @test.idempotent_id('1bfc55d9-6fd8-4293-ab3a-b1d69bf7cd2e') def test_rule_update_forbidden_for_regular_tenants_own_policy(self): policy = self.create_qos_policy(name='test-policy', description='test policy', shared=False, tenant_id=self.client.tenant_id) rule = self.create_qos_bandwidth_limit_rule(policy_id=policy['id'], max_kbps=1, max_burst_kbps=1) self.assertRaises( exceptions.NotFound, self.client.update_bandwidth_limit_rule, policy['id'], rule['id'], max_kbps=2, max_burst_kbps=4) @test.idempotent_id('9a607936-4b6f-4c2f-ad21-bd5b3d4fc91f') def test_rule_update_forbidden_for_regular_tenants_foreign_policy(self): policy = self.create_qos_policy(name='test-policy', description='test policy', shared=False, tenant_id=self.admin_client.tenant_id) rule = self.create_qos_bandwidth_limit_rule(policy_id=policy['id'], max_kbps=1, max_burst_kbps=1) self.assertRaises( exceptions.NotFound, self.client.update_bandwidth_limit_rule, policy['id'], rule['id'], max_kbps=2, max_burst_kbps=4) @test.attr(type='smoke') @test.idempotent_id('ce0bd0c2-54d9-4e29-85f1-cfb36ac3ebe2') def test_get_rules_by_policy(self): policy1 = self.create_qos_policy(name='test-policy1', description='test policy1', shared=False) rule1 = self.create_qos_bandwidth_limit_rule(policy_id=policy1['id'], max_kbps=200, max_burst_kbps=1337) policy2 = self.create_qos_policy(name='test-policy2', description='test policy2', shared=False) rule2 = self.create_qos_bandwidth_limit_rule(policy_id=policy2['id'], max_kbps=5000, max_burst_kbps=2523) # Test 'list rules' rules = self.admin_client.list_bandwidth_limit_rules(policy1['id']) rules = rules['bandwidth_limit_rules'] rules_ids = [r['id'] for r in rules] self.assertIn(rule1['id'], rules_ids) self.assertNotIn(rule2['id'], rules_ids) class RbacSharedQosPoliciesTest(base.BaseAdminNetworkTest): force_tenant_isolation = True credentials = ['primary', 'alt', 'admin'] @classmethod def resource_setup(cls): super(RbacSharedQosPoliciesTest, cls).resource_setup() if not test.is_extension_enabled('qos', 'network'): msg = "qos extension not enabled." raise cls.skipException(msg) cls.client2 = cls.alt_manager.network_client def _create_qos_policy(self, tenant_id=None): args = {'name': data_utils.rand_name('test-policy'), 'description': 'test policy', 'shared': False, 'tenant_id': tenant_id} qos_policy = self.admin_client.create_qos_policy(**args)['policy'] self.addCleanup(self.admin_client.delete_qos_policy, qos_policy['id']) return qos_policy def _make_admin_policy_shared_to_tenant_id(self, tenant_id): policy = self._create_qos_policy() rbac_policy = self.admin_client.create_rbac_policy( object_type='qos_policy', object_id=policy['id'], action='access_as_shared', target_tenant=tenant_id, )['rbac_policy'] return {'policy': policy, 'rbac_policy': rbac_policy} def _create_network(self, qos_policy_id, client, should_cleanup=True): net = client.create_network( name=data_utils.rand_name('test-network'), qos_policy_id=qos_policy_id)['network'] if should_cleanup: self.addCleanup(client.delete_network, net['id']) return net @test.idempotent_id('b9dcf582-d3b3-11e5-950a-54ee756c66df') def test_policy_sharing_with_wildcard(self): qos_pol = self.create_qos_policy( name=data_utils.rand_name('test-policy'), description='test-shared-policy', shared=False) self.assertNotIn(qos_pol, self.client2.list_qos_policies()['policies']) # test update shared False -> True self.admin_client.update_qos_policy(qos_pol['id'], shared=True) qos_pol['shared'] = True self.client2.show_qos_policy(qos_pol['id']) rbac_pol = {'target_tenant': '*', 'tenant_id': self.admin_client.tenant_id, 'object_type': 'qos_policy', 'object_id': qos_pol['id'], 'action': 'access_as_shared'} rbac_policies = self.admin_client.list_rbac_policies()['rbac_policies'] rbac_policies = [r for r in rbac_policies if r.pop('id')] self.assertIn(rbac_pol, rbac_policies) # update shared True -> False should fail because the policy is bound # to a network net = self._create_network(qos_pol['id'], self.admin_client, False) with testtools.ExpectedException(exceptions.Conflict): self.admin_client.update_qos_policy(qos_pol['id'], shared=False) # delete the network, and update shared True -> False should pass now self.admin_client.delete_network(net['id']) self.admin_client.update_qos_policy(qos_pol['id'], shared=False) qos_pol['shared'] = False self.assertNotIn(qos_pol, self.client2.list_qos_policies()['policies']) def _create_net_bound_qos_rbacs(self): res = self._make_admin_policy_shared_to_tenant_id( self.client.tenant_id) qos_policy, rbac_for_client_tenant = res['policy'], res['rbac_policy'] # add a wildcard rbac rule - now the policy globally shared rbac_wildcard = self.admin_client.create_rbac_policy( object_type='qos_policy', object_id=qos_policy['id'], action='access_as_shared', target_tenant='*', )['rbac_policy'] # tenant1 now uses qos policy for net self._create_network(qos_policy['id'], self.client) return rbac_for_client_tenant, rbac_wildcard @test.idempotent_id('328b1f70-d424-11e5-a57f-54ee756c66df') def test_net_bound_shared_policy_wildcard_and_tenant_id_wild_remove(self): client_rbac, wildcard_rbac = self._create_net_bound_qos_rbacs() # globally unshare the qos-policy, the specific share should remain self.admin_client.delete_rbac_policy(wildcard_rbac['id']) self.client.list_rbac_policies(id=client_rbac['id']) @test.idempotent_id('328b1f70-d424-11e5-a57f-54ee756c66df') def test_net_bound_shared_policy_wildcard_and_tenant_id_wild_remains(self): client_rbac, wildcard_rbac = self._create_net_bound_qos_rbacs() # remove client_rbac policy the wildcard share should remain self.admin_client.delete_rbac_policy(client_rbac['id']) self.client.list_rbac_policies(id=wildcard_rbac['id']) @test.idempotent_id('2ace9adc-da6e-11e5-aafe-54ee756c66df') def test_policy_sharing_with_wildcard_and_tenant_id(self): res = self._make_admin_policy_shared_to_tenant_id( self.client.tenant_id) qos_policy, rbac = res['policy'], res['rbac_policy'] qos_pol = self.client.show_qos_policy(qos_policy['id'])['policy'] self.assertTrue(qos_pol['shared']) with testtools.ExpectedException(exceptions.NotFound): self.client2.show_qos_policy(qos_policy['id']) # make the qos-policy globally shared self.admin_client.update_qos_policy(qos_policy['id'], shared=True) qos_pol = self.client2.show_qos_policy(qos_policy['id'])['policy'] self.assertTrue(qos_pol['shared']) # globally unshare the qos-policy, the specific share should remain self.admin_client.update_qos_policy(qos_policy['id'], shared=False) self.client.show_qos_policy(qos_policy['id']) with testtools.ExpectedException(exceptions.NotFound): self.client2.show_qos_policy(qos_policy['id']) self.assertIn(rbac, self.admin_client.list_rbac_policies()['rbac_policies']) @test.idempotent_id('9f85c76a-a350-11e5-8ae5-54ee756c66df') def test_policy_target_update(self): res = self._make_admin_policy_shared_to_tenant_id( self.client.tenant_id) # change to client2 update_res = self.admin_client.update_rbac_policy( res['rbac_policy']['id'], target_tenant=self.client2.tenant_id) self.assertEqual(self.client2.tenant_id, update_res['rbac_policy']['target_tenant']) # make sure everything else stayed the same res['rbac_policy'].pop('target_tenant') update_res['rbac_policy'].pop('target_tenant') self.assertEqual(res['rbac_policy'], update_res['rbac_policy']) @test.idempotent_id('a9b39f46-a350-11e5-97c7-54ee756c66df') def test_network_presence_prevents_policy_rbac_policy_deletion(self): res = self._make_admin_policy_shared_to_tenant_id( self.client2.tenant_id) qos_policy_id = res['policy']['id'] self._create_network(qos_policy_id, self.client2) # a network with shared qos-policy should prevent the deletion of an # rbac-policy required for it to be shared with testtools.ExpectedException(exceptions.Conflict): self.admin_client.delete_rbac_policy(res['rbac_policy']['id']) # a wildcard policy should allow the specific policy to be deleted # since it allows the remaining port wild = self.admin_client.create_rbac_policy( object_type='qos_policy', object_id=res['policy']['id'], action='access_as_shared', target_tenant='*')['rbac_policy'] self.admin_client.delete_rbac_policy(res['rbac_policy']['id']) # now that wildcard is the only remaining, it should be subjected to # the same restriction with testtools.ExpectedException(exceptions.Conflict): self.admin_client.delete_rbac_policy(wild['id']) # we can't update the policy to a different tenant with testtools.ExpectedException(exceptions.Conflict): self.admin_client.update_rbac_policy( wild['id'], target_tenant=self.client2.tenant_id) @test.idempotent_id('b0fe87e8-a350-11e5-9f08-54ee756c66df') def test_regular_client_shares_to_another_regular_client(self): # owned by self.admin_client policy = self._create_qos_policy() with testtools.ExpectedException(exceptions.NotFound): self.client.show_qos_policy(policy['id']) rbac_policy = self.admin_client.create_rbac_policy( object_type='qos_policy', object_id=policy['id'], action='access_as_shared', target_tenant=self.client.tenant_id)['rbac_policy'] self.client.show_qos_policy(policy['id']) self.assertIn(rbac_policy, self.admin_client.list_rbac_policies()['rbac_policies']) # ensure that 'client2' can't see the rbac-policy sharing the # qos-policy to it because the rbac-policy belongs to 'client' self.assertNotIn(rbac_policy['id'], [p['id'] for p in self.client2.list_rbac_policies()['rbac_policies']]) @test.idempotent_id('ba88d0ca-a350-11e5-a06f-54ee756c66df') def test_filter_fields(self): policy = self._create_qos_policy() self.admin_client.create_rbac_policy( object_type='qos_policy', object_id=policy['id'], action='access_as_shared', target_tenant=self.client2.tenant_id) field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'), ('tenant_id', 'target_tenant')) for fields in field_args: res = self.admin_client.list_rbac_policies(fields=fields) self.assertEqual(set(fields), set(res['rbac_policies'][0].keys())) @test.idempotent_id('c10d993a-a350-11e5-9c7a-54ee756c66df') def test_rbac_policy_show(self): res = self._make_admin_policy_shared_to_tenant_id( self.client.tenant_id) p1 = res['rbac_policy'] p2 = self.admin_client.create_rbac_policy( object_type='qos_policy', object_id=res['policy']['id'], action='access_as_shared', target_tenant='*')['rbac_policy'] self.assertEqual( p1, self.admin_client.show_rbac_policy(p1['id'])['rbac_policy']) self.assertEqual( p2, self.admin_client.show_rbac_policy(p2['id'])['rbac_policy']) @test.idempotent_id('c7496f86-a350-11e5-b380-54ee756c66df') def test_filter_rbac_policies(self): policy = self._create_qos_policy() rbac_pol1 = self.admin_client.create_rbac_policy( object_type='qos_policy', object_id=policy['id'], action='access_as_shared', target_tenant=self.client2.tenant_id)['rbac_policy'] rbac_pol2 = self.admin_client.create_rbac_policy( object_type='qos_policy', object_id=policy['id'], action='access_as_shared', target_tenant=self.admin_client.tenant_id)['rbac_policy'] res1 = self.admin_client.list_rbac_policies(id=rbac_pol1['id'])[ 'rbac_policies'] res2 = self.admin_client.list_rbac_policies(id=rbac_pol2['id'])[ 'rbac_policies'] self.assertEqual(1, len(res1)) self.assertEqual(1, len(res2)) self.assertEqual(rbac_pol1['id'], res1[0]['id']) self.assertEqual(rbac_pol2['id'], res2[0]['id']) @test.idempotent_id('cd7d755a-a350-11e5-a344-54ee756c66df') def test_regular_client_blocked_from_sharing_anothers_policy(self): qos_policy = self._make_admin_policy_shared_to_tenant_id( self.client.tenant_id)['policy'] with testtools.ExpectedException(exceptions.BadRequest): self.client.create_rbac_policy( object_type='qos_policy', object_id=qos_policy['id'], action='access_as_shared', target_tenant=self.client2.tenant_id) # make sure the rbac-policy is invisible to the tenant for which it's # being shared self.assertFalse(self.client.list_rbac_policies()['rbac_policies']) neutron-8.4.0/neutron/tests/api/test_flavors_extensions.py0000664000567000056710000001572113044372760025343 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib import exceptions as lib_exc from tempest import test from neutron.tests.api import base class TestFlavorsJson(base.BaseAdminNetworkTest): """ Tests the following operations in the Neutron API using the REST client for Neutron: List, Show, Create, Update, Delete Flavors List, Show, Create, Update, Delete service profiles """ @classmethod def resource_setup(cls): super(TestFlavorsJson, cls).resource_setup() if not test.is_extension_enabled('flavors', 'network'): msg = "flavors extension not enabled." raise cls.skipException(msg) # Use flavors service type as know this is loaded service_type = "FLAVORS" description_flavor = "flavor is created by tempest" name_flavor = "Best flavor created by tempest" # The check above will pass if api_extensions=all, which does # not mean flavors extension itself is present. try: cls.flavor = cls.create_flavor(name_flavor, description_flavor, service_type) except lib_exc.NotFound: msg = "flavors plugin not enabled." raise cls.skipException(msg) description_sp = "service profile created by tempest" # Drivers are supported as is an empty driver field. Use an # empty field for now since otherwise driver is validated against the # servicetype configuration which may differ in test scenarios. driver = "" metainfo = '{"data": "value"}' cls.service_profile = cls.create_service_profile( description=description_sp, metainfo=metainfo, driver=driver) def _delete_service_profile(self, service_profile_id): # Deletes a service profile and verifies if it is deleted or not self.admin_client.delete_service_profile(service_profile_id) # Asserting that service profile is not found in list after deletion labels = self.admin_client.list_service_profiles(id=service_profile_id) self.assertEqual(len(labels['service_profiles']), 0) @test.attr(type='smoke') @test.idempotent_id('ec8e15ff-95d0-433b-b8a6-b466bddb1e50') def test_create_update_delete_service_profile(self): # Creates a service profile description = "service_profile created by tempest" driver = "" metainfo = '{"data": "value"}' body = self.admin_client.create_service_profile( description=description, driver=driver, metainfo=metainfo) service_profile = body['service_profile'] # Updates a service profile self.admin_client.update_service_profile(service_profile['id'], enabled=False) self.assertTrue(service_profile['enabled']) # Deletes a service profile self.addCleanup(self._delete_service_profile, service_profile['id']) # Assert whether created service profiles are found in service profile # lists or fail if created service profiles are not found in service # profiles list labels = (self.admin_client.list_service_profiles( id=service_profile['id'])) self.assertEqual(len(labels['service_profiles']), 1) @test.attr(type='smoke') @test.idempotent_id('ec8e15ff-95d0-433b-b8a6-b466bddb1e50') def test_create_update_delete_flavor(self): # Creates a flavor description = "flavor created by tempest" service = "FLAVORS" name = "Best flavor created by tempest" body = self.admin_client.create_flavor(name=name, service_type=service, description=description) flavor = body['flavor'] # Updates a flavor self.admin_client.update_flavor(flavor['id'], enabled=False) self.assertTrue(flavor['enabled']) # Deletes a flavor self.addCleanup(self._delete_flavor, flavor['id']) # Assert whether created flavors are found in flavor lists or fail # if created flavors are not found in flavors list labels = (self.admin_client.list_flavors(id=flavor['id'])) self.assertEqual(len(labels['flavors']), 1) @test.attr(type='smoke') @test.idempotent_id('30abb445-0eea-472e-bd02-8649f54a5968') def test_show_service_profile(self): # Verifies the details of a service profile body = self.admin_client.show_service_profile( self.service_profile['id']) service_profile = body['service_profile'] self.assertEqual(self.service_profile['id'], service_profile['id']) self.assertEqual(self.service_profile['description'], service_profile['description']) self.assertEqual(self.service_profile['metainfo'], service_profile['metainfo']) self.assertTrue(service_profile['enabled']) @test.attr(type='smoke') @test.idempotent_id('30abb445-0eea-472e-bd02-8649f54a5968') def test_show_flavor(self): # Verifies the details of a flavor body = self.admin_client.show_flavor(self.flavor['id']) flavor = body['flavor'] self.assertEqual(self.flavor['id'], flavor['id']) self.assertEqual(self.flavor['description'], flavor['description']) self.assertEqual(self.flavor['name'], flavor['name']) self.assertTrue(flavor['enabled']) @test.attr(type='smoke') @test.idempotent_id('e2fb2f8c-45bf-429a-9f17-171c70444612') def test_list_flavors(self): # Verify flavor lists body = self.admin_client.list_flavors(id=33) flavors = body['flavors'] self.assertEqual(0, len(flavors)) @test.attr(type='smoke') @test.idempotent_id('e2fb2f8c-45bf-429a-9f17-171c70444612') def test_list_service_profiles(self): # Verify service profiles lists body = self.admin_client.list_service_profiles(id=33) service_profiles = body['service_profiles'] self.assertEqual(0, len(service_profiles)) def _delete_flavor(self, flavor_id): # Deletes a flavor and verifies if it is deleted or not self.admin_client.delete_flavor(flavor_id) # Asserting that the flavor is not found in list after deletion labels = self.admin_client.list_flavors(id=flavor_id) self.assertEqual(len(labels['flavors']), 0) class TestFlavorsIpV6TestJSON(TestFlavorsJson): _ip_version = 6 neutron-8.4.0/neutron/tests/api/test_allowed_address_pair.py0000664000567000056710000001300113044372760025544 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from tempest import test from neutron.tests.api import base from neutron.tests.tempest import config CONF = config.CONF class AllowedAddressPairTestJSON(base.BaseNetworkTest): """ Tests the Neutron Allowed Address Pair API extension using the Tempest REST client. The following API operations are tested with this extension: create port list ports update port show port v2.0 of the Neutron API is assumed. It is also assumed that the following options are defined in the [network-feature-enabled] section of etc/tempest.conf api_extensions """ @classmethod def resource_setup(cls): super(AllowedAddressPairTestJSON, cls).resource_setup() if not test.is_extension_enabled('allowed-address-pairs', 'network'): msg = "Allowed Address Pairs extension not enabled." raise cls.skipException(msg) cls.network = cls.create_network() cls.create_subnet(cls.network) port = cls.create_port(cls.network) cls.ip_address = port['fixed_ips'][0]['ip_address'] cls.mac_address = port['mac_address'] @test.attr(type='smoke') @test.idempotent_id('86c3529b-1231-40de-803c-00e40882f043') def test_create_list_port_with_address_pair(self): # Create port with allowed address pair attribute allowed_address_pairs = [{'ip_address': self.ip_address, 'mac_address': self.mac_address}] body = self.client.create_port( network_id=self.network['id'], allowed_address_pairs=allowed_address_pairs) port_id = body['port']['id'] self.addCleanup(self.client.delete_port, port_id) # Confirm port was created with allowed address pair attribute body = self.client.list_ports() ports = body['ports'] port = [p for p in ports if p['id'] == port_id] msg = 'Created port not found in list of ports returned by Neutron' self.assertTrue(port, msg) self._confirm_allowed_address_pair(port[0], self.ip_address) @test.attr(type='smoke') def _update_port_with_address(self, address, mac_address=None, **kwargs): # Create a port without allowed address pair body = self.client.create_port(network_id=self.network['id']) port_id = body['port']['id'] self.addCleanup(self.client.delete_port, port_id) if mac_address is None: mac_address = self.mac_address # Update allowed address pair attribute of port allowed_address_pairs = [{'ip_address': address, 'mac_address': mac_address}] if kwargs: allowed_address_pairs.append(kwargs['allowed_address_pairs']) body = self.client.update_port( port_id, allowed_address_pairs=allowed_address_pairs) allowed_address_pair = body['port']['allowed_address_pairs'] self.assertEqual(allowed_address_pair, allowed_address_pairs) @test.attr(type='smoke') @test.idempotent_id('9599b337-272c-47fd-b3cf-509414414ac4') def test_update_port_with_address_pair(self): # Update port with allowed address pair self._update_port_with_address(self.ip_address) @test.attr(type='smoke') @test.idempotent_id('4d6d178f-34f6-4bff-a01c-0a2f8fe909e4') def test_update_port_with_cidr_address_pair(self): # Update allowed address pair with cidr cidr = str( netaddr.IPNetwork(config.safe_get_config_value( 'network', 'project_network_cidr'))) self._update_port_with_address(cidr) @test.attr(type='smoke') @test.idempotent_id('b3f20091-6cd5-472b-8487-3516137df933') def test_update_port_with_multiple_ip_mac_address_pair(self): # Create an ip _address and mac_address through port create resp = self.client.create_port(network_id=self.network['id']) newportid = resp['port']['id'] self.addCleanup(self.client.delete_port, newportid) ipaddress = resp['port']['fixed_ips'][0]['ip_address'] macaddress = resp['port']['mac_address'] # Update allowed address pair port with multiple ip and mac allowed_address_pairs = {'ip_address': ipaddress, 'mac_address': macaddress} self._update_port_with_address( self.ip_address, self.mac_address, allowed_address_pairs=allowed_address_pairs) def _confirm_allowed_address_pair(self, port, ip): msg = 'Port allowed address pairs should not be empty' self.assertTrue(port['allowed_address_pairs'], msg) ip_address = port['allowed_address_pairs'][0]['ip_address'] mac_address = port['allowed_address_pairs'][0]['mac_address'] self.assertEqual(ip_address, ip) self.assertEqual(mac_address, self.mac_address) class AllowedAddressPairIpV6TestJSON(AllowedAddressPairTestJSON): _ip_version = 6 neutron-8.4.0/neutron/tests/api/test_security_groups_negative.py0000664000567000056710000000643313044372760026540 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib import exceptions as lib_exc from tempest import test from neutron.tests.api import base_security_groups as base from neutron.tests.tempest import config CONF = config.CONF class NegativeSecGroupTest(base.BaseSecGroupTest): @classmethod def resource_setup(cls): super(NegativeSecGroupTest, cls).resource_setup() if not test.is_extension_enabled('security-group', 'network'): msg = "security-group extension not enabled." raise cls.skipException(msg) @test.attr(type=['negative', 'gate']) @test.idempotent_id('0d9c7791-f2ad-4e2f-ac73-abf2373b0d2d') def test_create_security_group_rule_with_invalid_ports(self): group_create_body, _ = self._create_security_group() # Create rule for tcp protocol with invalid ports states = [(-16, 80, 'Invalid value for port -16'), (80, 79, 'port_range_min must be <= port_range_max'), (80, 65536, 'Invalid value for port 65536'), (None, 6, 'port_range_min must be <= port_range_max'), (-16, 65536, 'Invalid value for port')] for pmin, pmax, msg in states: ex = self.assertRaises( lib_exc.BadRequest, self.client.create_security_group_rule, security_group_id=group_create_body['security_group']['id'], protocol='tcp', port_range_min=pmin, port_range_max=pmax, direction='ingress', ethertype=self.ethertype) self.assertIn(msg, str(ex)) # Create rule for icmp protocol with invalid ports states = [(1, 256, 'Invalid value for ICMP code'), (-1, 25, 'Invalid value'), (None, 6, 'ICMP type (port-range-min) is missing'), (300, 1, 'Invalid value for ICMP type')] for pmin, pmax, msg in states: ex = self.assertRaises( lib_exc.BadRequest, self.client.create_security_group_rule, security_group_id=group_create_body['security_group']['id'], protocol='icmp', port_range_min=pmin, port_range_max=pmax, direction='ingress', ethertype=self.ethertype) self.assertIn(msg, str(ex)) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('55100aa8-b24f-333c-0bef-64eefd85f15c') def test_update_default_security_group_name(self): sg_list = self.client.list_security_groups(name='default') sg = sg_list['security_groups'][0] self.assertRaises(lib_exc.Conflict, self.client.update_security_group, sg['id'], name='test') class NegativeSecGroupIPv6Test(NegativeSecGroupTest): _ip_version = 6 neutron-8.4.0/neutron/tests/api/test_dhcp_ipv6.py0000664000567000056710000000745513044372760023277 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from tempest.lib import exceptions as lib_exc from tempest import test from neutron.common import constants from neutron.tests.api import base from neutron.tests.tempest import config CONF = config.CONF class NetworksTestDHCPv6(base.BaseNetworkTest): _ip_version = 6 @classmethod def skip_checks(cls): msg = None if not CONF.network_feature_enabled.ipv6: msg = "IPv6 is not enabled" elif not CONF.network_feature_enabled.ipv6_subnet_attributes: msg = "DHCPv6 attributes are not enabled." if msg: raise cls.skipException(msg) @classmethod def resource_setup(cls): super(NetworksTestDHCPv6, cls).resource_setup() cls.network = cls.create_network() def _remove_from_list_by_index(self, things_list, elem): for index, i in enumerate(things_list): if i['id'] == elem['id']: break del things_list[index] def _clean_network(self): body = self.client.list_ports() ports = body['ports'] for port in ports: if (port['device_owner'].startswith( constants.DEVICE_OWNER_ROUTER_INTF) and port['device_id'] in [r['id'] for r in self.routers]): self.client.remove_router_interface_with_port_id( port['device_id'], port['id'] ) else: if port['id'] in [p['id'] for p in self.ports]: self.client.delete_port(port['id']) self._remove_from_list_by_index(self.ports, port) body = self.client.list_subnets() subnets = body['subnets'] for subnet in subnets: if subnet['id'] in [s['id'] for s in self.subnets]: self.client.delete_subnet(subnet['id']) self._remove_from_list_by_index(self.subnets, subnet) body = self.client.list_routers() routers = body['routers'] for router in routers: if router['id'] in [r['id'] for r in self.routers]: self.client.delete_router(router['id']) self._remove_from_list_by_index(self.routers, router) @test.idempotent_id('98244d88-d990-4570-91d4-6b25d70d08af') def test_dhcp_stateful_fixedips_outrange(self): """When port gets IP address from fixed IP range it shall be checked if it's from subnets range. """ kwargs = {'ipv6_ra_mode': 'dhcpv6-stateful', 'ipv6_address_mode': 'dhcpv6-stateful'} subnet = self.create_subnet(self.network, **kwargs) ip_range = netaddr.IPRange(subnet["allocation_pools"][0]["start"], subnet["allocation_pools"][0]["end"]) for i in range(1, 3): ip = netaddr.IPAddress(ip_range.last + i).format() self.assertRaises(lib_exc.BadRequest, self.create_port, self.network, fixed_ips=[{'subnet_id': subnet['id'], 'ip_address': ip}]) def tearDown(self): self._clean_network() super(NetworksTestDHCPv6, self).tearDown() neutron-8.4.0/neutron/tests/api/test_address_scopes.py0000664000567000056710000001146313044372760024410 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.common.utils import data_utils from tempest.lib import exceptions as lib_exc from tempest import test from neutron.tests.api import base ADDRESS_SCOPE_NAME = 'smoke-address-scope' class AddressScopeTestBase(base.BaseAdminNetworkTest): @classmethod def resource_setup(cls): super(AddressScopeTestBase, cls).resource_setup() if not test.is_extension_enabled('address-scope', 'network'): msg = "address-scope extension not enabled." raise cls.skipException(msg) def _create_address_scope(self, is_admin=False, **kwargs): name = data_utils.rand_name(ADDRESS_SCOPE_NAME) return self.create_address_scope(name=name, is_admin=is_admin, **kwargs) def _test_update_address_scope_helper(self, is_admin=False, shared=None): address_scope = self._create_address_scope(is_admin=is_admin, ip_version=4) if is_admin: client = self.admin_client else: client = self.client kwargs = {'name': 'new_name'} if shared is not None: kwargs['shared'] = shared client.update_address_scope(address_scope['id'], **kwargs) body = client.show_address_scope(address_scope['id']) address_scope = body['address_scope'] self.assertEqual('new_name', address_scope['name']) return address_scope class AddressScopeTest(AddressScopeTestBase): @test.attr(type='smoke') @test.idempotent_id('045f9294-8b1a-4848-b6a8-edf1b41e9d06') def test_tenant_create_list_address_scope(self): address_scope = self._create_address_scope(ip_version=4) body = self.client.list_address_scopes() returned_address_scopes = body['address_scopes'] self.assertIn(address_scope['id'], [a_s['id'] for a_s in returned_address_scopes], "Created address scope id should be in the list") self.assertIn(address_scope['name'], [a_s['name'] for a_s in returned_address_scopes], "Created address scope name should be in the list") @test.attr(type='smoke') @test.idempotent_id('85e0326b-4c75-4b92-bd6e-7c7de6aaf05c') def test_show_address_scope(self): address_scope = self._create_address_scope(ip_version=4) body = self.client.show_address_scope( address_scope['id']) returned_address_scope = body['address_scope'] self.assertEqual(address_scope['id'], returned_address_scope['id']) self.assertEqual(address_scope['name'], returned_address_scope['name']) self.assertFalse(returned_address_scope['shared']) @test.attr(type='smoke') @test.idempotent_id('85a259b2-ace6-4e32-9657-a9a392b452aa') def test_tenant_update_address_scope(self): self._test_update_address_scope_helper() @test.attr(type='smoke') @test.idempotent_id('22b3b600-72a8-4b60-bc94-0f29dd6271df') def test_delete_address_scope(self): address_scope = self._create_address_scope(ip_version=4) self.client.delete_address_scope(address_scope['id']) self.assertRaises(lib_exc.NotFound, self.client.show_address_scope, address_scope['id']) @test.attr(type='smoke') @test.idempotent_id('5a06c287-8036-4d04-9d78-def8e06d43df') def test_admin_create_shared_address_scope(self): address_scope = self._create_address_scope(is_admin=True, shared=True, ip_version=4) body = self.admin_client.show_address_scope( address_scope['id']) returned_address_scope = body['address_scope'] self.assertEqual(address_scope['name'], returned_address_scope['name']) self.assertTrue(returned_address_scope['shared']) @test.attr(type='smoke') @test.idempotent_id('e9e1ccdd-9ccd-4076-9503-71820529508b') def test_admin_update_shared_address_scope(self): address_scope = self._test_update_address_scope_helper(is_admin=True, shared=True) self.assertTrue(address_scope['shared']) neutron-8.4.0/neutron/tests/api/test_subnetpools_negative.py0000664000567000056710000003247013044372760025647 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import netaddr from tempest.lib.common.utils import data_utils from tempest.lib import exceptions as lib_exc from tempest import test from neutron.tests.api import test_subnetpools SUBNETPOOL_NAME = 'smoke-subnetpool' class SubnetPoolsNegativeTestJSON(test_subnetpools.SubnetPoolsTestBase): smaller_prefix = u'10.11.12.0/26' @test.attr(type=['negative', 'smoke']) @test.idempotent_id('0212a042-603a-4f46-99e0-e37de9374d30') def test_get_non_existent_subnetpool(self): non_exist_id = data_utils.rand_name('subnetpool') self.assertRaises(lib_exc.NotFound, self.client.show_subnetpool, non_exist_id) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('dc9336e5-f28f-4658-a0b0-cc79e607007d') def test_tenant_get_not_shared_admin_subnetpool(self): created_subnetpool = self._create_subnetpool(is_admin=True) # None-shared admin subnetpool cannot be retrieved by tenant user. self.assertRaises(lib_exc.NotFound, self.client.show_subnetpool, created_subnetpool['id']) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('5e1f2f86-d81a-498c-82ed-32a49f4dc4d3') def test_delete_non_existent_subnetpool(self): non_exist_id = data_utils.rand_name('subnetpool') self.assertRaises(lib_exc.NotFound, self.client.delete_subnetpool, non_exist_id) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('d1143fe2-212b-4e23-a308-d18f7d8d78d6') def test_tenant_create_shared_subnetpool(self): # 'shared' subnetpool can only be created by admin. self.assertRaises(lib_exc.Forbidden, self._create_subnetpool, is_admin=False, shared=True) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('4be84d30-60ca-4bd3-8512-db5b36ce1378') def test_update_non_existent_subnetpool(self): non_exist_id = data_utils.rand_name('subnetpool') self.assertRaises(lib_exc.NotFound, self.client.update_subnetpool, non_exist_id, name='foo-name') @test.attr(type=['negative', 'smoke']) @test.idempotent_id('e6cd6d87-6173-45dd-bf04-c18ea7ec7537') def test_update_subnetpool_not_modifiable_shared(self): # 'shared' attributes can be specified during creation. # But this attribute is not modifiable after creation. created_subnetpool = self._create_subnetpool(is_admin=True) pool_id = created_subnetpool['id'] self.assertRaises(lib_exc.BadRequest, self.client.update_subnetpool, pool_id, shared=True) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('62f7c43b-bff1-4def-8bb7-4754b840aaad') def test_update_subnetpool_prefixes_shrink(self): # Shrink current subnetpool prefixes is not supported created_subnetpool = self._create_subnetpool() self.assertRaises(lib_exc.BadRequest, self.client.update_subnetpool, created_subnetpool['id'], prefixes=[self.smaller_prefix]) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('fc011824-153e-4469-97ad-9808eb88cae1') def test_create_subnet_different_pools_same_network(self): network = self.create_network(network_name='smoke-network') created_subnetpool = self._create_subnetpool( is_admin=True, prefixes=['192.168.0.0/16']) subnet = self.create_subnet( network, cidr=netaddr.IPNetwork('10.10.10.0/24'), ip_version=4, gateway=None, client=self.admin_client) # add the subnet created by admin to the cleanUp because the # the base.py doesn't delete it using the admin client self.addCleanup(self.admin_client.delete_subnet, subnet['id']) self.assertRaises(lib_exc.BadRequest, self.create_subnet, network, ip_version=4, subnetpool_id=created_subnetpool['id'], client=self.admin_client) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('9589e332-638e-476e-81bd-013d964aa3cb') @test.requires_ext(extension='address-scope', service='network') def test_create_subnetpool_associate_invalid_address_scope(self): self.assertRaises(lib_exc.BadRequest, self._create_subnetpool, address_scope_id='foo-addr-scope') @test.attr(type=['negative', 'smoke']) @test.idempotent_id('3b6c5942-485d-4964-a560-55608af020b5') @test.requires_ext(extension='address-scope', service='network') def test_create_subnetpool_associate_non_exist_address_scope(self): self.assertRaises(lib_exc.NotFound, self._create_subnetpool, address_scope_id=str(uuid.uuid4())) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('2dfb4269-8657-485a-a053-b022e911456e') @test.requires_ext(extension='address-scope', service='network') def test_create_subnetpool_associate_address_scope_prefix_intersect(self): address_scope = self.create_address_scope( name=data_utils.rand_name('smoke-address-scope'), ip_version=4) addr_scope_id = address_scope['id'] self._create_subnetpool(address_scope_id=addr_scope_id) subnetpool_data = {'name': 'foo-subnetpool', 'prefixes': [u'10.11.12.13/24'], 'min_prefixlen': '29', 'address_scope_id': addr_scope_id} self.assertRaises(lib_exc.Conflict, self._create_subnetpool, **subnetpool_data) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('83a19a13-5384-42e2-b579-43fc69c80914') @test.requires_ext(extension='address-scope', service='network') def test_create_sp_associate_address_scope_multiple_prefix_intersect(self): address_scope = self.create_address_scope( name=data_utils.rand_name('smoke-address-scope'), ip_version=4) addr_scope_id = address_scope['id'] self._create_subnetpool(prefixes=[u'20.0.0.0/18', u'30.0.0.0/18'], address_scope_id=addr_scope_id) prefixes = [u'40.0.0.0/18', u'50.0.0.0/18', u'30.0.0.0/12'] subnetpool_data = {'name': 'foo-subnetpool', 'prefixes': prefixes, 'min_prefixlen': '29', 'address_scope_id': addr_scope_id} self.assertRaises(lib_exc.Conflict, self._create_subnetpool, **subnetpool_data) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('f06d8e7b-908b-4e94-b570-8156be6a4bf1') @test.requires_ext(extension='address-scope', service='network') def test_create_subnetpool_associate_address_scope_of_other_owner(self): address_scope = self.create_address_scope( name=data_utils.rand_name('smoke-address-scope'), is_admin=True, ip_version=4) self.assertRaises(lib_exc.NotFound, self._create_subnetpool, address_scope_id=address_scope['id']) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('3396ec6c-cb80-4ebe-b897-84e904580bdf') @test.requires_ext(extension='address-scope', service='network') def test_tenant_create_subnetpool_associate_shared_address_scope(self): address_scope = self.create_address_scope( name=data_utils.rand_name('smoke-address-scope'), is_admin=True, shared=True, ip_version=4) self.assertRaises(lib_exc.BadRequest, self._create_subnetpool, address_scope_id=address_scope['id']) @test.attr(type='smoke') @test.idempotent_id('6d3d9ad5-32d4-4d63-aa00-8c62f73e2881') @test.requires_ext(extension='address-scope', service='network') def test_update_subnetpool_associate_address_scope_of_other_owner(self): address_scope = self.create_address_scope( name=data_utils.rand_name('smoke-address-scope'), is_admin=True, ip_version=4) address_scope_id = address_scope['id'] created_subnetpool = self._create_subnetpool(self.client) self.assertRaises(lib_exc.NotFound, self.client.update_subnetpool, created_subnetpool['id'], address_scope_id=address_scope_id) def _test_update_subnetpool_prefix_intersect_helper( self, pool_1_prefixes, pool_2_prefixes, pool_1_updated_prefixes): # create two subnet pools associating to an address scope. # Updating the first subnet pool with the prefix intersecting # with the second one should be a failure address_scope = self.create_address_scope( name=data_utils.rand_name('smoke-address-scope'), ip_version=4) addr_scope_id = address_scope['id'] pool_values = {'address_scope_id': addr_scope_id, 'prefixes': pool_1_prefixes} created_subnetpool_1 = self._create_subnetpool(**pool_values) pool_id_1 = created_subnetpool_1['id'] pool_values = {'address_scope_id': addr_scope_id, 'prefixes': pool_2_prefixes} self._create_subnetpool(**pool_values) # now update the pool_id_1 with the prefix intersecting with # pool_id_2 self.assertRaises(lib_exc.Conflict, self.client.update_subnetpool, pool_id_1, prefixes=pool_1_updated_prefixes) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('96006292-7214-40e0-a471-153fb76e6b31') @test.requires_ext(extension='address-scope', service='network') def test_update_subnetpool_prefix_intersect(self): pool_1_prefix = [u'20.0.0.0/18'] pool_2_prefix = [u'20.10.0.0/24'] pool_1_updated_prefix = [u'20.0.0.0/12'] self._test_update_subnetpool_prefix_intersect_helper( pool_1_prefix, pool_2_prefix, pool_1_updated_prefix) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('4d3f8a79-c530-4e59-9acf-6c05968adbfe') @test.requires_ext(extension='address-scope', service='network') def test_update_subnetpool_multiple_prefix_intersect(self): pool_1_prefixes = [u'20.0.0.0/18', u'30.0.0.0/18'] pool_2_prefixes = [u'20.10.0.0/24', u'40.0.0.0/18', '50.0.0.0/18'] pool_1_updated_prefixes = [u'20.0.0.0/18', u'30.0.0.0/18', u'50.0.0.0/12'] self._test_update_subnetpool_prefix_intersect_helper( pool_1_prefixes, pool_2_prefixes, pool_1_updated_prefixes) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('7438e49e-1351-45d8-937b-892059fb97f5') @test.requires_ext(extension='address-scope', service='network') def test_tenant_update_sp_prefix_associated_with_shared_addr_scope(self): address_scope = self.create_address_scope( name=data_utils.rand_name('smoke-address-scope'), is_admin=True, shared=True, ip_version=4) addr_scope_id = address_scope['id'] pool_values = {'prefixes': [u'20.0.0.0/18', u'30.0.0.0/18']} created_subnetpool = self._create_subnetpool(**pool_values) pool_id = created_subnetpool['id'] # associate the subnetpool to the address scope as an admin self.admin_client.update_subnetpool(pool_id, address_scope_id=addr_scope_id) body = self.admin_client.show_subnetpool(pool_id) self.assertEqual(addr_scope_id, body['subnetpool']['address_scope_id']) # updating the subnetpool prefix by the tenant user should fail # since the tenant is not the owner of address scope update_prefixes = [u'20.0.0.0/18', u'30.0.0.0/18', u'40.0.0.0/18'] self.assertRaises(lib_exc.BadRequest, self.client.update_subnetpool, pool_id, prefixes=update_prefixes) # admin can update the prefixes self.admin_client.update_subnetpool(pool_id, prefixes=update_prefixes) body = self.admin_client.show_subnetpool(pool_id) self.assertEqual(update_prefixes, body['subnetpool']['prefixes']) @test.attr(type='smoke') @test.idempotent_id('648fee7d-a909-4ced-bad3-3a169444c0a8') @test.requires_ext(extension='address-scope', service='network') def test_update_subnetpool_associate_address_scope_wrong_ip_version(self): address_scope = self.create_address_scope( name=data_utils.rand_name('smoke-address-scope'), ip_version=6) created_subnetpool = self._create_subnetpool() self.assertRaises(lib_exc.BadRequest, self.client.update_subnetpool, created_subnetpool['id'], address_scope_id=address_scope['id']) neutron-8.4.0/neutron/tests/api/test_subnetpools.py0000664000567000056710000003716313044372760023771 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.common.utils import data_utils from tempest import test from neutron.tests.api import base SUBNETPOOL_NAME = 'smoke-subnetpool' SUBNET_NAME = 'smoke-subnet' class SubnetPoolsTestBase(base.BaseAdminNetworkTest): @classmethod def resource_setup(cls): super(SubnetPoolsTestBase, cls).resource_setup() min_prefixlen = '29' prefixes = [u'10.11.12.0/24'] cls._subnetpool_data = {'prefixes': prefixes, 'min_prefixlen': min_prefixlen} def _create_subnetpool(self, is_admin=False, **kwargs): if 'name' not in kwargs: name = data_utils.rand_name(SUBNETPOOL_NAME) else: name = kwargs.pop('name') if 'prefixes' not in kwargs: kwargs['prefixes'] = self._subnetpool_data['prefixes'] if 'min_prefixlen' not in kwargs: kwargs['min_prefixlen'] = self._subnetpool_data['min_prefixlen'] return self.create_subnetpool(name=name, is_admin=is_admin, **kwargs) class SubnetPoolsTest(SubnetPoolsTestBase): min_prefixlen = '28' max_prefixlen = '31' _ip_version = 4 subnet_cidr = u'10.11.12.0/31' new_prefix = u'10.11.15.0/24' larger_prefix = u'10.11.0.0/16' """ Tests the following operations in the Neutron API using the REST client for Neutron: create a subnetpool for a tenant list tenant's subnetpools show a tenant subnetpool details subnetpool update delete a subnetpool All subnetpool tests are run once with ipv4 and once with ipv6. v2.0 of the Neutron API is assumed. """ def _new_subnetpool_attributes(self): new_name = data_utils.rand_name(SUBNETPOOL_NAME) return {'name': new_name, 'min_prefixlen': self.min_prefixlen, 'max_prefixlen': self.max_prefixlen} def _check_equality_updated_subnetpool(self, expected_values, updated_pool): self.assertEqual(expected_values['name'], updated_pool['name']) self.assertEqual(expected_values['min_prefixlen'], updated_pool['min_prefixlen']) self.assertEqual(expected_values['max_prefixlen'], updated_pool['max_prefixlen']) # expected_values may not contains all subnetpool values if 'prefixes' in expected_values: self.assertEqual(expected_values['prefixes'], updated_pool['prefixes']) @test.attr(type='smoke') @test.idempotent_id('6e1781ec-b45b-4042-aebe-f485c022996e') def test_create_list_subnetpool(self): created_subnetpool = self._create_subnetpool() body = self.client.list_subnetpools() subnetpools = body['subnetpools'] self.assertIn(created_subnetpool['id'], [sp['id'] for sp in subnetpools], "Created subnetpool id should be in the list") self.assertIn(created_subnetpool['name'], [sp['name'] for sp in subnetpools], "Created subnetpool name should be in the list") @test.attr(type='smoke') @test.idempotent_id('c72c1c0c-2193-4aca-ddd4-b1442640bbbb') def test_create_update_subnetpool_description(self): if not test.is_extension_enabled('standard-attr-description', 'network'): msg = "standard-attr-description not enabled." raise self.skipException(msg) body = self._create_subnetpool(description='d1') self.assertEqual('d1', body['description']) sub_id = body['id'] body = filter(lambda x: x['id'] == sub_id, self.client.list_subnetpools()['subnetpools'])[0] self.assertEqual('d1', body['description']) body = self.client.update_subnetpool(sub_id, description='d2') self.assertEqual('d2', body['subnetpool']['description']) body = filter(lambda x: x['id'] == sub_id, self.client.list_subnetpools()['subnetpools'])[0] self.assertEqual('d2', body['description']) @test.attr(type='smoke') @test.idempotent_id('741d08c2-1e3f-42be-99c7-0ea93c5b728c') def test_get_subnetpool(self): created_subnetpool = self._create_subnetpool() prefixlen = self._subnetpool_data['min_prefixlen'] body = self.client.show_subnetpool(created_subnetpool['id']) subnetpool = body['subnetpool'] self.assertEqual(created_subnetpool['name'], subnetpool['name']) self.assertEqual(created_subnetpool['id'], subnetpool['id']) self.assertEqual(prefixlen, subnetpool['min_prefixlen']) self.assertEqual(prefixlen, subnetpool['default_prefixlen']) self.assertFalse(subnetpool['shared']) @test.attr(type='smoke') @test.idempotent_id('764f1b93-1c4a-4513-9e7b-6c2fc5e9270c') def test_tenant_update_subnetpool(self): created_subnetpool = self._create_subnetpool() pool_id = created_subnetpool['id'] subnetpool_data = self._new_subnetpool_attributes() self.client.update_subnetpool(created_subnetpool['id'], **subnetpool_data) body = self.client.show_subnetpool(pool_id) subnetpool = body['subnetpool'] self._check_equality_updated_subnetpool(subnetpool_data, subnetpool) self.assertFalse(subnetpool['shared']) @test.attr(type='smoke') @test.idempotent_id('4b496082-c992-4319-90be-d4a7ce646290') def test_update_subnetpool_prefixes_append(self): # We can append new prefixes to subnetpool create_subnetpool = self._create_subnetpool() pool_id = create_subnetpool['id'] old_prefixes = self._subnetpool_data['prefixes'] new_prefixes = old_prefixes[:] new_prefixes.append(self.new_prefix) subnetpool_data = {'prefixes': new_prefixes} self.client.update_subnetpool(pool_id, **subnetpool_data) body = self.client.show_subnetpool(pool_id) prefixes = body['subnetpool']['prefixes'] self.assertIn(self.new_prefix, prefixes) self.assertIn(old_prefixes[0], prefixes) @test.attr(type='smoke') @test.idempotent_id('2cae5d6a-9d32-42d8-8067-f13970ae13bb') def test_update_subnetpool_prefixes_extend(self): # We can extend current subnetpool prefixes created_subnetpool = self._create_subnetpool() pool_id = created_subnetpool['id'] old_prefixes = self._subnetpool_data['prefixes'] subnetpool_data = {'prefixes': [self.larger_prefix]} self.client.update_subnetpool(pool_id, **subnetpool_data) body = self.client.show_subnetpool(pool_id) prefixes = body['subnetpool']['prefixes'] self.assertIn(self.larger_prefix, prefixes) self.assertNotIn(old_prefixes[0], prefixes) @test.attr(type='smoke') @test.idempotent_id('d70c6c35-913b-4f24-909f-14cd0d29b2d2') def test_admin_create_shared_subnetpool(self): created_subnetpool = self._create_subnetpool(is_admin=True, shared=True) pool_id = created_subnetpool['id'] # Shared subnetpool can be retrieved by tenant user. body = self.client.show_subnetpool(pool_id) subnetpool = body['subnetpool'] self.assertEqual(created_subnetpool['name'], subnetpool['name']) self.assertTrue(subnetpool['shared']) def _create_subnet_from_pool(self, subnet_values=None, pool_values=None): if pool_values is None: pool_values = {} created_subnetpool = self._create_subnetpool(**pool_values) pool_id = created_subnetpool['id'] subnet_name = data_utils.rand_name(SUBNETPOOL_NAME) network = self.create_network() subnet_kwargs = {'name': subnet_name, 'subnetpool_id': pool_id} if subnet_values: subnet_kwargs.update(subnet_values) # not creating the subnet using the base.create_subnet because # that function needs to be enhanced to support subnet_create when # prefixlen and subnetpool_id is specified. body = self.client.create_subnet( network_id=network['id'], ip_version=self._ip_version, **subnet_kwargs) subnet = body['subnet'] return pool_id, subnet @test.attr(type='smoke') @test.idempotent_id('1362ed7d-3089-42eb-b3a5-d6cb8398ee77') def test_create_subnet_from_pool_with_prefixlen(self): subnet_values = {"prefixlen": self.max_prefixlen} pool_id, subnet = self._create_subnet_from_pool( subnet_values=subnet_values) cidr = str(subnet['cidr']) self.assertEqual(pool_id, subnet['subnetpool_id']) self.assertTrue(cidr.endswith(str(self.max_prefixlen))) @test.attr(type='smoke') @test.idempotent_id('86b86189-9789-4582-9c3b-7e2bfe5735ee') def test_create_subnet_from_pool_with_subnet_cidr(self): subnet_values = {"cidr": self.subnet_cidr} pool_id, subnet = self._create_subnet_from_pool( subnet_values=subnet_values) cidr = str(subnet['cidr']) self.assertEqual(pool_id, subnet['subnetpool_id']) self.assertEqual(cidr, self.subnet_cidr) @test.attr(type='smoke') @test.idempotent_id('83f76e3a-9c40-40c2-a015-b7c5242178d8') def test_create_subnet_from_pool_with_default_prefixlen(self): # If neither cidr nor prefixlen is specified, # subnet will use subnetpool default_prefixlen for cidr. pool_id, subnet = self._create_subnet_from_pool() cidr = str(subnet['cidr']) self.assertEqual(pool_id, subnet['subnetpool_id']) prefixlen = self._subnetpool_data['min_prefixlen'] self.assertTrue(cidr.endswith(str(prefixlen))) @test.attr(type='smoke') @test.idempotent_id('a64af292-ec52-4bde-b654-a6984acaf477') def test_create_subnet_from_pool_with_quota(self): pool_values = {'default_quota': 4} subnet_values = {"prefixlen": self.max_prefixlen} pool_id, subnet = self._create_subnet_from_pool( subnet_values=subnet_values, pool_values=pool_values) cidr = str(subnet['cidr']) self.assertEqual(pool_id, subnet['subnetpool_id']) self.assertTrue(cidr.endswith(str(self.max_prefixlen))) @test.attr(type='smoke') @test.idempotent_id('49b44c64-1619-4b29-b527-ffc3c3115dc4') @test.requires_ext(extension='address-scope', service='network') def test_create_subnetpool_associate_address_scope(self): address_scope = self.create_address_scope( name=data_utils.rand_name('smoke-address-scope'), ip_version=self._ip_version) created_subnetpool = self._create_subnetpool( address_scope_id=address_scope['id']) body = self.client.show_subnetpool(created_subnetpool['id']) self.assertEqual(address_scope['id'], body['subnetpool']['address_scope_id']) @test.attr(type='smoke') @test.idempotent_id('910b6393-db24-4f6f-87dc-b36892ad6c8c') @test.requires_ext(extension='address-scope', service='network') def test_update_subnetpool_associate_address_scope(self): address_scope = self.create_address_scope( name=data_utils.rand_name('smoke-address-scope'), ip_version=self._ip_version) created_subnetpool = self._create_subnetpool() pool_id = created_subnetpool['id'] body = self.client.show_subnetpool(pool_id) self.assertIsNone(body['subnetpool']['address_scope_id']) self.client.update_subnetpool(pool_id, address_scope_id=address_scope['id']) body = self.client.show_subnetpool(pool_id) self.assertEqual(address_scope['id'], body['subnetpool']['address_scope_id']) @test.attr(type='smoke') @test.idempotent_id('18302e80-46a3-4563-82ac-ccd1dd57f652') @test.requires_ext(extension='address-scope', service='network') def test_update_subnetpool_associate_another_address_scope(self): address_scope = self.create_address_scope( name=data_utils.rand_name('smoke-address-scope'), ip_version=self._ip_version) another_address_scope = self.create_address_scope( name=data_utils.rand_name('smoke-address-scope'), ip_version=self._ip_version) created_subnetpool = self._create_subnetpool( address_scope_id=address_scope['id']) pool_id = created_subnetpool['id'] body = self.client.show_subnetpool(pool_id) self.assertEqual(address_scope['id'], body['subnetpool']['address_scope_id']) self.client.update_subnetpool( pool_id, address_scope_id=another_address_scope['id']) body = self.client.show_subnetpool(pool_id) self.assertEqual(another_address_scope['id'], body['subnetpool']['address_scope_id']) @test.attr(type='smoke') @test.idempotent_id('f8970048-e41b-42d6-934b-a1297b07706a') @test.requires_ext(extension='address-scope', service='network') def test_update_subnetpool_disassociate_address_scope(self): address_scope = self.create_address_scope( name=data_utils.rand_name('smoke-address-scope'), ip_version=self._ip_version) created_subnetpool = self._create_subnetpool( address_scope_id=address_scope['id']) pool_id = created_subnetpool['id'] body = self.client.show_subnetpool(pool_id) self.assertEqual(address_scope['id'], body['subnetpool']['address_scope_id']) self.client.update_subnetpool(pool_id, address_scope_id=None) body = self.client.show_subnetpool(pool_id) self.assertIsNone(body['subnetpool']['address_scope_id']) class SubnetPoolsTestV6(SubnetPoolsTest): min_prefixlen = '48' max_prefixlen = '64' _ip_version = 6 subnet_cidr = '2001:db8:3::/64' new_prefix = u'2001:db8:5::/64' larger_prefix = u'2001:db8::/32' @classmethod def resource_setup(cls): super(SubnetPoolsTestV6, cls).resource_setup() min_prefixlen = '64' prefixes = [u'2001:db8:3::/48'] cls._subnetpool_data = {'min_prefixlen': min_prefixlen, 'prefixes': prefixes} @test.attr(type='smoke') @test.idempotent_id('f62d73dc-cf6f-4879-b94b-dab53982bf3b') def test_create_dual_stack_subnets_from_subnetpools(self): pool_id_v6, subnet_v6 = self._create_subnet_from_pool() pool_values_v4 = {'prefixes': ['192.168.0.0/16'], 'min_prefixlen': 21, 'max_prefixlen': 32} create_v4_subnetpool = self._create_subnetpool(**pool_values_v4) pool_id_v4 = create_v4_subnetpool['id'] subnet_v4 = self.client.create_subnet( network_id=subnet_v6['network_id'], ip_version=4, subnetpool_id=pool_id_v4)['subnet'] self.assertEqual(subnet_v4['network_id'], subnet_v6['network_id']) neutron-8.4.0/neutron/tests/api/test_address_scopes_negative.py0000664000567000056710000001045713044372760026274 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.common.utils import data_utils from tempest.lib import exceptions as lib_exc from tempest import test from neutron.tests.api import test_address_scopes class AddressScopeTestNegative(test_address_scopes.AddressScopeTestBase): @test.attr(type=['negative', 'smoke']) @test.idempotent_id('9c92ec34-0c50-4104-aa47-9ce98d5088df') def test_tenant_create_shared_address_scope(self): self.assertRaises(lib_exc.Forbidden, self._create_address_scope, shared=True, ip_version=4) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('a857b61e-bf53-4fab-b21a-b0daaf81b5bd') def test_tenant_update_address_scope_shared_true(self): self.assertRaises(lib_exc.Forbidden, self._test_update_address_scope_helper, shared=True) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('a859ef2f-9c76-4e2e-ba0f-e0339a489e8c') def test_tenant_update_address_scope_shared_false(self): self.assertRaises(lib_exc.Forbidden, self._test_update_address_scope_helper, shared=False) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('9b6dd7ad-cabb-4f55-bd5e-e61176ef41f6') def test_get_non_existent_address_scope(self): non_exist_id = data_utils.rand_name('address_scope') self.assertRaises(lib_exc.NotFound, self.client.show_address_scope, non_exist_id) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('ef213552-f2da-487d-bf4a-e1705d115ff1') def test_tenant_get_not_shared_admin_address_scope(self): address_scope = self._create_address_scope(is_admin=True, ip_version=4) # None-shared admin address scope cannot be retrieved by tenant user. self.assertRaises(lib_exc.NotFound, self.client.show_address_scope, address_scope['id']) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('5c25dc6a-1e92-467a-9cc7-cda74b6003db') def test_delete_non_existent_address_scope(self): non_exist_id = data_utils.rand_name('address_scope') self.assertRaises(lib_exc.NotFound, self.client.delete_address_scope, non_exist_id) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('47c25dc5-e886-4a84-88c3-ac5031969661') def test_update_non_existent_address_scope(self): non_exist_id = data_utils.rand_name('address_scope') self.assertRaises(lib_exc.NotFound, self.client.update_address_scope, non_exist_id, name='foo-name') @test.attr(type=['negative', 'smoke']) @test.idempotent_id('702d0515-82cb-4207-b0d9-703336e54665') def test_update_shared_address_scope_to_unshare(self): address_scope = self._create_address_scope(is_admin=True, shared=True, ip_version=4) self.assertRaises(lib_exc.BadRequest, self.admin_client.update_address_scope, address_scope['id'], name='new-name', shared=False) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('1e471e5c-6f9c-437a-9257-fd9bc4b6f0fb') def test_delete_address_scope_associated_with_subnetpool(self): address_scope = self._create_address_scope(ip_version=4) prefixes = [u'10.11.12.0/24'] subnetpool_data = { 'name': 'foo-subnetpool', 'min_prefixlen': '29', 'prefixes': prefixes, 'address_scope_id': address_scope['id']} self.create_subnetpool(**subnetpool_data) self.assertRaises(lib_exc.Conflict, self.client.delete_address_scope, address_scope['id']) neutron-8.4.0/neutron/tests/api/base.py0000664000567000056710000004346013044372760021264 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from tempest.lib.common.utils import data_utils from tempest.lib import exceptions as lib_exc from tempest import test from neutron.tests.api import clients from neutron.tests.tempest import config from neutron.tests.tempest import exceptions CONF = config.CONF class BaseNetworkTest(test.BaseTestCase): """ Base class for the Neutron tests that use the Tempest Neutron REST client Per the Neutron API Guide, API v1.x was removed from the source code tree (docs.openstack.org/api/openstack-network/2.0/content/Overview-d1e71.html) Therefore, v2.x of the Neutron API is assumed. It is also assumed that the following options are defined in the [network] section of etc/tempest.conf: project_network_cidr with a block of cidr's from which smaller blocks can be allocated for tenant networks project_network_mask_bits with the mask bits to be used to partition the block defined by tenant-network_cidr Finally, it is assumed that the following option is defined in the [service_available] section of etc/tempest.conf neutron as True """ force_tenant_isolation = False credentials = ['primary'] # Default to ipv4. _ip_version = 4 @classmethod def get_client_manager(cls, credential_type=None, roles=None, force_new=None): manager = test.BaseTestCase.get_client_manager( credential_type=credential_type, roles=roles, force_new=force_new) # Neutron uses a different clients manager than the one in the Tempest return clients.Manager(manager.credentials) @classmethod def skip_checks(cls): super(BaseNetworkTest, cls).skip_checks() if not CONF.service_available.neutron: raise cls.skipException("Neutron support is required") if cls._ip_version == 6 and not CONF.network_feature_enabled.ipv6: raise cls.skipException("IPv6 Tests are disabled.") @classmethod def setup_credentials(cls): # Create no network resources for these test. cls.set_network_resources() super(BaseNetworkTest, cls).setup_credentials() @classmethod def setup_clients(cls): super(BaseNetworkTest, cls).setup_clients() cls.client = cls.os.network_client @classmethod def resource_setup(cls): super(BaseNetworkTest, cls).resource_setup() cls.networks = [] cls.shared_networks = [] cls.subnets = [] cls.ports = [] cls.routers = [] cls.floating_ips = [] cls.metering_labels = [] cls.service_profiles = [] cls.flavors = [] cls.metering_label_rules = [] cls.qos_rules = [] cls.qos_policies = [] cls.ethertype = "IPv" + str(cls._ip_version) cls.address_scopes = [] cls.admin_address_scopes = [] cls.subnetpools = [] cls.admin_subnetpools = [] @classmethod def resource_cleanup(cls): if CONF.service_available.neutron: # Clean up QoS rules for qos_rule in cls.qos_rules: cls._try_delete_resource(cls.admin_client.delete_qos_rule, qos_rule['id']) # Clean up QoS policies for qos_policy in cls.qos_policies: cls._try_delete_resource(cls.admin_client.delete_qos_policy, qos_policy['id']) # Clean up floating IPs for floating_ip in cls.floating_ips: cls._try_delete_resource(cls.client.delete_floatingip, floating_ip['id']) # Clean up routers for router in cls.routers: cls._try_delete_resource(cls.delete_router, router) # Clean up metering label rules for metering_label_rule in cls.metering_label_rules: cls._try_delete_resource( cls.admin_client.delete_metering_label_rule, metering_label_rule['id']) # Clean up metering labels for metering_label in cls.metering_labels: cls._try_delete_resource( cls.admin_client.delete_metering_label, metering_label['id']) # Clean up flavors for flavor in cls.flavors: cls._try_delete_resource( cls.admin_client.delete_flavor, flavor['id']) # Clean up service profiles for service_profile in cls.service_profiles: cls._try_delete_resource( cls.admin_client.delete_service_profile, service_profile['id']) # Clean up ports for port in cls.ports: cls._try_delete_resource(cls.client.delete_port, port['id']) # Clean up subnets for subnet in cls.subnets: cls._try_delete_resource(cls.client.delete_subnet, subnet['id']) # Clean up networks for network in cls.networks: cls._try_delete_resource(cls.client.delete_network, network['id']) # Clean up shared networks for network in cls.shared_networks: cls._try_delete_resource(cls.admin_client.delete_network, network['id']) for subnetpool in cls.subnetpools: cls._try_delete_resource(cls.client.delete_subnetpool, subnetpool['id']) for subnetpool in cls.admin_subnetpools: cls._try_delete_resource(cls.admin_client.delete_subnetpool, subnetpool['id']) for address_scope in cls.address_scopes: cls._try_delete_resource(cls.client.delete_address_scope, address_scope['id']) for address_scope in cls.admin_address_scopes: cls._try_delete_resource( cls.admin_client.delete_address_scope, address_scope['id']) super(BaseNetworkTest, cls).resource_cleanup() @classmethod def _try_delete_resource(cls, delete_callable, *args, **kwargs): """Cleanup resources in case of test-failure Some resources are explicitly deleted by the test. If the test failed to delete a resource, this method will execute the appropriate delete methods. Otherwise, the method ignores NotFound exceptions thrown for resources that were correctly deleted by the test. :param delete_callable: delete method :param args: arguments for delete method :param kwargs: keyword arguments for delete method """ try: delete_callable(*args, **kwargs) # if resource is not found, this means it was deleted in the test except lib_exc.NotFound: pass @classmethod def create_network(cls, network_name=None, **kwargs): """Wrapper utility that returns a test network.""" network_name = network_name or data_utils.rand_name('test-network-') body = cls.client.create_network(name=network_name, **kwargs) network = body['network'] cls.networks.append(network) return network @classmethod def create_shared_network(cls, network_name=None, **post_body): network_name = network_name or data_utils.rand_name('sharednetwork-') post_body.update({'name': network_name, 'shared': True}) body = cls.admin_client.create_network(**post_body) network = body['network'] cls.shared_networks.append(network) return network @classmethod def create_subnet(cls, network, gateway='', cidr=None, mask_bits=None, ip_version=None, client=None, **kwargs): """Wrapper utility that returns a test subnet.""" # allow tests to use admin client if not client: client = cls.client # The cidr and mask_bits depend on the ip version. ip_version = ip_version if ip_version is not None else cls._ip_version gateway_not_set = gateway == '' if ip_version == 4: cidr = cidr or netaddr.IPNetwork( config.safe_get_config_value( 'network', 'project_network_cidr')) mask_bits = ( mask_bits or config.safe_get_config_value( 'network', 'project_network_mask_bits')) elif ip_version == 6: cidr = ( cidr or netaddr.IPNetwork( config.safe_get_config_value( 'network', 'project_network_v6_cidr'))) mask_bits = ( mask_bits or config.safe_get_config_value( 'network', 'project_network_v6_mask_bits')) # Find a cidr that is not in use yet and create a subnet with it for subnet_cidr in cidr.subnet(mask_bits): if gateway_not_set: gateway_ip = str(netaddr.IPAddress(subnet_cidr) + 1) else: gateway_ip = gateway try: body = client.create_subnet( network_id=network['id'], cidr=str(subnet_cidr), ip_version=ip_version, gateway_ip=gateway_ip, **kwargs) break except lib_exc.BadRequest as e: is_overlapping_cidr = 'overlaps with another subnet' in str(e) if not is_overlapping_cidr: raise else: message = 'Available CIDR for subnet creation could not be found' raise ValueError(message) subnet = body['subnet'] cls.subnets.append(subnet) return subnet @classmethod def create_port(cls, network, **kwargs): """Wrapper utility that returns a test port.""" body = cls.client.create_port(network_id=network['id'], **kwargs) port = body['port'] cls.ports.append(port) return port @classmethod def update_port(cls, port, **kwargs): """Wrapper utility that updates a test port.""" body = cls.client.update_port(port['id'], **kwargs) return body['port'] @classmethod def create_router(cls, router_name=None, admin_state_up=False, external_network_id=None, enable_snat=None, **kwargs): ext_gw_info = {} if external_network_id: ext_gw_info['network_id'] = external_network_id if enable_snat: ext_gw_info['enable_snat'] = enable_snat body = cls.client.create_router( router_name, external_gateway_info=ext_gw_info, admin_state_up=admin_state_up, **kwargs) router = body['router'] cls.routers.append(router) return router @classmethod def create_floatingip(cls, external_network_id): """Wrapper utility that returns a test floating IP.""" body = cls.client.create_floatingip( floating_network_id=external_network_id) fip = body['floatingip'] cls.floating_ips.append(fip) return fip @classmethod def create_router_interface(cls, router_id, subnet_id): """Wrapper utility that returns a router interface.""" interface = cls.client.add_router_interface_with_subnet_id( router_id, subnet_id) return interface @classmethod def create_qos_policy(cls, name, description, shared, tenant_id=None): """Wrapper utility that returns a test QoS policy.""" body = cls.admin_client.create_qos_policy( name, description, shared, tenant_id) qos_policy = body['policy'] cls.qos_policies.append(qos_policy) return qos_policy @classmethod def create_qos_bandwidth_limit_rule(cls, policy_id, max_kbps, max_burst_kbps): """Wrapper utility that returns a test QoS bandwidth limit rule.""" body = cls.admin_client.create_bandwidth_limit_rule( policy_id, max_kbps, max_burst_kbps) qos_rule = body['bandwidth_limit_rule'] cls.qos_rules.append(qos_rule) return qos_rule @classmethod def delete_router(cls, router): body = cls.client.list_router_interfaces(router['id']) interfaces = body['ports'] for i in interfaces: try: cls.client.remove_router_interface_with_subnet_id( router['id'], i['fixed_ips'][0]['subnet_id']) except lib_exc.NotFound: pass cls.client.delete_router(router['id']) @classmethod def create_address_scope(cls, name, is_admin=False, **kwargs): if is_admin: body = cls.admin_client.create_address_scope(name=name, **kwargs) cls.admin_address_scopes.append(body['address_scope']) else: body = cls.client.create_address_scope(name=name, **kwargs) cls.address_scopes.append(body['address_scope']) return body['address_scope'] @classmethod def create_subnetpool(cls, name, is_admin=False, **kwargs): if is_admin: body = cls.admin_client.create_subnetpool(name, **kwargs) cls.admin_subnetpools.append(body['subnetpool']) else: body = cls.client.create_subnetpool(name, **kwargs) cls.subnetpools.append(body['subnetpool']) return body['subnetpool'] class BaseAdminNetworkTest(BaseNetworkTest): credentials = ['primary', 'admin'] @classmethod def setup_clients(cls): super(BaseAdminNetworkTest, cls).setup_clients() cls.admin_client = cls.os_adm.network_client cls.identity_admin_client = cls.os_adm.tenants_client @classmethod def create_metering_label(cls, name, description): """Wrapper utility that returns a test metering label.""" body = cls.admin_client.create_metering_label( description=description, name=data_utils.rand_name("metering-label")) metering_label = body['metering_label'] cls.metering_labels.append(metering_label) return metering_label @classmethod def create_metering_label_rule(cls, remote_ip_prefix, direction, metering_label_id): """Wrapper utility that returns a test metering label rule.""" body = cls.admin_client.create_metering_label_rule( remote_ip_prefix=remote_ip_prefix, direction=direction, metering_label_id=metering_label_id) metering_label_rule = body['metering_label_rule'] cls.metering_label_rules.append(metering_label_rule) return metering_label_rule @classmethod def create_flavor(cls, name, description, service_type): """Wrapper utility that returns a test flavor.""" body = cls.admin_client.create_flavor( description=description, service_type=service_type, name=name) flavor = body['flavor'] cls.flavors.append(flavor) return flavor @classmethod def create_service_profile(cls, description, metainfo, driver): """Wrapper utility that returns a test service profile.""" body = cls.admin_client.create_service_profile( driver=driver, metainfo=metainfo, description=description) service_profile = body['service_profile'] cls.service_profiles.append(service_profile) return service_profile @classmethod def get_unused_ip(cls, net_id, ip_version=None): """Get an unused ip address in a allocaion pool of net""" body = cls.admin_client.list_ports(network_id=net_id) ports = body['ports'] used_ips = [] for port in ports: used_ips.extend( [fixed_ip['ip_address'] for fixed_ip in port['fixed_ips']]) body = cls.admin_client.list_subnets(network_id=net_id) subnets = body['subnets'] for subnet in subnets: if ip_version and subnet['ip_version'] != ip_version: continue cidr = subnet['cidr'] allocation_pools = subnet['allocation_pools'] iterators = [] if allocation_pools: for allocation_pool in allocation_pools: iterators.append(netaddr.iter_iprange( allocation_pool['start'], allocation_pool['end'])) else: net = netaddr.IPNetwork(cidr) def _iterip(): for ip in net: if ip not in (net.network, net.broadcast): yield ip iterators.append(iter(_iterip())) for iterator in iterators: for ip in iterator: if str(ip) not in used_ips: return str(ip) message = ( "net(%s) has no usable IP address in allocation pools" % net_id) raise exceptions.InvalidConfiguration(message) neutron-8.4.0/neutron/tests/api/test_routers.py0000664000567000056710000002705413044372760023115 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr import six from tempest.lib.common.utils import data_utils from tempest import test from neutron.tests.api import base_routers as base from neutron.tests.tempest import config CONF = config.CONF class RoutersTest(base.BaseRouterTest): @classmethod def skip_checks(cls): super(RoutersTest, cls).skip_checks() if not test.is_extension_enabled('router', 'network'): msg = "router extension not enabled." raise cls.skipException(msg) @classmethod def resource_setup(cls): super(RoutersTest, cls).resource_setup() cls.tenant_cidr = ( config.safe_get_config_value('network', 'project_network_cidr') if cls._ip_version == 4 else config.safe_get_config_value('network', 'project_network_v6_cidr')) @test.attr(type='smoke') @test.idempotent_id('c72c1c0c-2193-4aca-eeee-b1442640eeee') def test_create_update_router_description(self): if not test.is_extension_enabled('standard-attr-description', 'network'): msg = "standard-attr-description not enabled." raise self.skipException(msg) body = self.create_router(description='d1', router_name='test') self.assertEqual('d1', body['description']) body = self.client.show_router(body['id'])['router'] self.assertEqual('d1', body['description']) body = self.client.update_router(body['id'], description='d2') self.assertEqual('d2', body['router']['description']) body = self.client.show_router(body['router']['id'])['router'] self.assertEqual('d2', body['description']) @test.idempotent_id('847257cc-6afd-4154-b8fb-af49f5670ce8') @test.requires_ext(extension='ext-gw-mode', service='network') @test.attr(type='smoke') def test_create_router_with_default_snat_value(self): # Create a router with default snat rule name = data_utils.rand_name('router') router = self._create_router( name, external_network_id=CONF.network.public_network_id) self._verify_router_gateway( router['id'], {'network_id': CONF.network.public_network_id, 'enable_snat': True}) @test.idempotent_id('ea74068d-09e9-4fd7-8995-9b6a1ace920f') @test.requires_ext(extension='ext-gw-mode', service='network') @test.attr(type='smoke') def test_create_router_with_snat_explicit(self): name = data_utils.rand_name('snat-router') # Create a router enabling snat attributes enable_snat_states = [False, True] for enable_snat in enable_snat_states: external_gateway_info = { 'network_id': CONF.network.public_network_id, 'enable_snat': enable_snat} create_body = self.admin_client.create_router( name, external_gateway_info=external_gateway_info) self.addCleanup(self.admin_client.delete_router, create_body['router']['id']) # Verify snat attributes after router creation self._verify_router_gateway(create_body['router']['id'], exp_ext_gw_info=external_gateway_info) def _verify_router_gateway(self, router_id, exp_ext_gw_info=None): show_body = self.admin_client.show_router(router_id) actual_ext_gw_info = show_body['router']['external_gateway_info'] if exp_ext_gw_info is None: self.assertIsNone(actual_ext_gw_info) return # Verify only keys passed in exp_ext_gw_info for k, v in six.iteritems(exp_ext_gw_info): self.assertEqual(v, actual_ext_gw_info[k]) def _verify_gateway_port(self, router_id): list_body = self.admin_client.list_ports( network_id=CONF.network.public_network_id, device_id=router_id) self.assertEqual(len(list_body['ports']), 1) gw_port = list_body['ports'][0] fixed_ips = gw_port['fixed_ips'] self.assertGreaterEqual(len(fixed_ips), 1) public_net_body = self.admin_client.show_network( CONF.network.public_network_id) public_subnet_id = public_net_body['network']['subnets'][0] self.assertIn(public_subnet_id, [x['subnet_id'] for x in fixed_ips]) @test.idempotent_id('b386c111-3b21-466d-880c-5e72b01e1a33') @test.requires_ext(extension='ext-gw-mode', service='network') @test.attr(type='smoke') def test_update_router_set_gateway_with_snat_explicit(self): router = self._create_router(data_utils.rand_name('router-')) self.admin_client.update_router_with_snat_gw_info( router['id'], external_gateway_info={ 'network_id': CONF.network.public_network_id, 'enable_snat': True}) self._verify_router_gateway( router['id'], {'network_id': CONF.network.public_network_id, 'enable_snat': True}) self._verify_gateway_port(router['id']) @test.idempotent_id('96536bc7-8262-4fb2-9967-5c46940fa279') @test.requires_ext(extension='ext-gw-mode', service='network') @test.attr(type='smoke') def test_update_router_set_gateway_without_snat(self): router = self._create_router(data_utils.rand_name('router-')) self.admin_client.update_router_with_snat_gw_info( router['id'], external_gateway_info={ 'network_id': CONF.network.public_network_id, 'enable_snat': False}) self._verify_router_gateway( router['id'], {'network_id': CONF.network.public_network_id, 'enable_snat': False}) self._verify_gateway_port(router['id']) @test.idempotent_id('f2faf994-97f4-410b-a831-9bc977b64374') @test.requires_ext(extension='ext-gw-mode', service='network') @test.attr(type='smoke') def test_update_router_reset_gateway_without_snat(self): router = self._create_router( data_utils.rand_name('router-'), external_network_id=CONF.network.public_network_id) self.admin_client.update_router_with_snat_gw_info( router['id'], external_gateway_info={ 'network_id': CONF.network.public_network_id, 'enable_snat': False}) self._verify_router_gateway( router['id'], {'network_id': CONF.network.public_network_id, 'enable_snat': False}) self._verify_gateway_port(router['id']) @test.idempotent_id('c86ac3a8-50bd-4b00-a6b8-62af84a0765c') @test.requires_ext(extension='extraroute', service='network') @test.attr(type='smoke') def test_update_extra_route(self): self.network = self.create_network() self.name = self.network['name'] self.subnet = self.create_subnet(self.network) # Add router interface with subnet id self.router = self._create_router( data_utils.rand_name('router-'), True) self.create_router_interface(self.router['id'], self.subnet['id']) self.addCleanup( self._delete_extra_routes, self.router['id']) # Update router extra route, second ip of the range is # used as next hop cidr = netaddr.IPNetwork(self.subnet['cidr']) next_hop = str(cidr[2]) destination = str(self.subnet['cidr']) extra_route = self.client.update_extra_routes(self.router['id'], next_hop, destination) self.assertEqual(1, len(extra_route['router']['routes'])) self.assertEqual(destination, extra_route['router']['routes'][0]['destination']) self.assertEqual(next_hop, extra_route['router']['routes'][0]['nexthop']) show_body = self.client.show_router(self.router['id']) self.assertEqual(destination, show_body['router']['routes'][0]['destination']) self.assertEqual(next_hop, show_body['router']['routes'][0]['nexthop']) def _delete_extra_routes(self, router_id): self.client.delete_extra_routes(router_id) @test.attr(type='smoke') @test.idempotent_id('01f185d1-d1a6-4cf9-abf7-e0e1384c169c') def test_network_attached_with_two_routers(self): network = self.create_network(data_utils.rand_name('network1')) self.create_subnet(network) port1 = self.create_port(network) port2 = self.create_port(network) router1 = self._create_router(data_utils.rand_name('router1')) router2 = self._create_router(data_utils.rand_name('router2')) self.client.add_router_interface_with_port_id( router1['id'], port1['id']) self.client.add_router_interface_with_port_id( router2['id'], port2['id']) self.addCleanup(self.client.remove_router_interface_with_port_id, router1['id'], port1['id']) self.addCleanup(self.client.remove_router_interface_with_port_id, router2['id'], port2['id']) body = self.client.show_port(port1['id']) port_show1 = body['port'] body = self.client.show_port(port2['id']) port_show2 = body['port'] self.assertEqual(port_show1['network_id'], network['id']) self.assertEqual(port_show2['network_id'], network['id']) self.assertEqual(port_show1['device_id'], router1['id']) self.assertEqual(port_show2['device_id'], router2['id']) class RoutersIpV6Test(RoutersTest): _ip_version = 6 class DvrRoutersTest(base.BaseRouterTest): @classmethod def skip_checks(cls): super(DvrRoutersTest, cls).skip_checks() if not test.is_extension_enabled('dvr', 'network'): msg = "DVR extension not enabled." raise cls.skipException(msg) @test.attr(type='smoke') @test.idempotent_id('141297aa-3424-455d-aa8d-f2d95731e00a') def test_create_distributed_router(self): name = data_utils.rand_name('router') create_body = self.admin_client.create_router( name, distributed=True) self.addCleanup(self._delete_router, create_body['router']['id'], self.admin_client) self.assertTrue(create_body['router']['distributed']) @test.attr(type='smoke') @test.idempotent_id('644d7a4a-01a1-4b68-bb8d-0c0042cb1729') def test_convert_centralized_router(self): router = self._create_router(data_utils.rand_name('router')) self.assertNotIn('distributed', router) update_body = self.admin_client.update_router(router['id'], distributed=True) self.assertTrue(update_body['router']['distributed']) show_body = self.admin_client.show_router(router['id']) self.assertTrue(show_body['router']['distributed']) show_body = self.client.show_router(router['id']) self.assertNotIn('distributed', show_body['router']) neutron-8.4.0/neutron/tests/api/test_network_ip_availability.py0000664000567000056710000001603113044372760026316 0ustar jenkinsjenkins00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from tempest.lib.common.utils import data_utils from tempest.lib import exceptions as lib_exc from tempest import test from neutron.tests.api import base from neutron.tests.tempest import config from neutron_lib import constants as lib_constants CONF = config.CONF # 3 IP addresses are taken from every total for IPv4 these are reserved DEFAULT_IP4_RESERVED = 3 # 2 IP addresses are taken from every total for IPv6 these are reserved # I assume the reason for having one less than IPv4 is it does not have # broadcast address DEFAULT_IP6_RESERVED = 2 DELETE_TIMEOUT = 10 DELETE_SLEEP = 2 class NetworksIpAvailabilityTest(base.BaseAdminNetworkTest): """ Tests the following operations in the Neutron API using the REST client for Neutron: test total and used ips for net create test total and ips for net after subnet create test total and used ips for net after subnet and port create """ @classmethod def skip_checks(cls): super(NetworksIpAvailabilityTest, cls).skip_checks() if not test.is_extension_enabled('network-ip-availability', 'network'): msg = "network-ip-availability extension not enabled." raise cls.skipException(msg) def _get_used_ips(self, network, net_availability): if network: for availability in net_availability['network_ip_availabilities']: if availability['network_id'] == network['id']: return availability['used_ips'] def _cleanUp_port(self, port_id): # delete port, any way to avoid race try: self.client.delete_port(port_id) # if port is not found, this means it was deleted in the test except lib_exc.NotFound: pass def _assert_total_and_used_ips(self, expected_used, expected_total, network, net_availability): if network: for availability in net_availability['network_ip_availabilities']: if availability['network_id'] == network['id']: self.assertEqual(expected_total, availability['total_ips']) self.assertEqual(expected_used, availability['used_ips']) def _create_subnet(self, network, ip_version): if ip_version == lib_constants.IP_VERSION_4: cidr = netaddr.IPNetwork('20.0.0.0/24') mask_bits = config.safe_get_config_value( 'network', 'project_network_mask_bits') elif ip_version == lib_constants.IP_VERSION_6: cidr = netaddr.IPNetwork('20:db8::/64') mask_bits = config.safe_get_config_value( 'network', 'project_network_v6_mask_bits') subnet_cidr = cidr.subnet(mask_bits).next() prefix_len = subnet_cidr.prefixlen subnet = self.create_subnet(network, cidr=subnet_cidr, enable_dhcp=False, mask_bits=mask_bits, ip_version=ip_version) return subnet, prefix_len def calc_total_ips(prefix, ip_version): # will calculate total ips after removing reserved. if ip_version == lib_constants.IP_VERSION_4: total_ips = 2 ** (32 - prefix) - DEFAULT_IP4_RESERVED elif ip_version == lib_constants.IP_VERSION_6: total_ips = 2 ** (128 - prefix) - DEFAULT_IP6_RESERVED return total_ips class NetworksIpAvailabilityIPv4Test(NetworksIpAvailabilityTest): @test.attr(type='smoke') @test.idempotent_id('0f33cc8c-1bf6-47d1-9ce1-010618240599') def test_admin_network_availability_before_subnet(self): net_name = data_utils.rand_name('network-') network = self.create_network(network_name=net_name) self.addCleanup(self.client.delete_network, network['id']) net_availability = self.admin_client.list_network_ip_availabilities() self._assert_total_and_used_ips(0, 0, network, net_availability) @test.attr(type='smoke') @test.idempotent_id('3aecd3b2-16ed-4b87-a54a-91d7b3c2986b') def test_net_ip_availability_after_subnet_and_ports(self): net_name = data_utils.rand_name('network-') network = self.create_network(network_name=net_name) self.addCleanup(self.client.delete_network, network['id']) subnet, prefix = self._create_subnet(network, self._ip_version) self.addCleanup(self.client.delete_subnet, subnet['id']) body = self.admin_client.list_network_ip_availabilities() used_ip = self._get_used_ips(network, body) port1 = self.client.create_port(network_id=network['id']) self.addCleanup(self.client.delete_port, port1['port']['id']) port2 = self.client.create_port(network_id=network['id']) self.addCleanup(self.client.delete_port, port2['port']['id']) net_availability = self.admin_client.list_network_ip_availabilities() self._assert_total_and_used_ips( used_ip + 2, calc_total_ips(prefix, self._ip_version), network, net_availability) @test.attr(type='smoke') @test.idempotent_id('9f11254d-757b-492e-b14b-f52144e4ee7b') def test_net_ip_availability_after_port_delete(self): net_name = data_utils.rand_name('network-') network = self.create_network(network_name=net_name) self.addCleanup(self.client.delete_network, network['id']) subnet, prefix = self._create_subnet(network, self._ip_version) self.addCleanup(self.client.delete_subnet, subnet['id']) port = self.client.create_port(network_id=network['id']) self.addCleanup(self._cleanUp_port, port['port']['id']) net_availability = self.admin_client.list_network_ip_availabilities() used_ip = self._get_used_ips(network, net_availability) self.client.delete_port(port['port']['id']) def get_net_availability(): availabilities = self.admin_client.list_network_ip_availabilities() used_ip_after_port_delete = self._get_used_ips(network, availabilities) return used_ip - 1 == used_ip_after_port_delete self.assertTrue( test.call_until_true( get_net_availability, DELETE_TIMEOUT, DELETE_SLEEP), msg="IP address did not become available after port delete") class NetworksIpAvailabilityIPv6Test(NetworksIpAvailabilityIPv4Test): _ip_version = lib_constants.IP_VERSION_6 neutron-8.4.0/neutron/tests/api/test_floating_ips.py0000664000567000056710000000524613044372760024067 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.common.utils import data_utils from tempest import test from neutron.tests.api import base from neutron.tests.tempest import config CONF = config.CONF class FloatingIPTestJSON(base.BaseNetworkTest): @classmethod def resource_setup(cls): super(FloatingIPTestJSON, cls).resource_setup() if not test.is_extension_enabled('router', 'network'): msg = "router extension not enabled." raise cls.skipException(msg) cls.ext_net_id = CONF.network.public_network_id # Create network, subnet, router and add interface cls.network = cls.create_network() cls.subnet = cls.create_subnet(cls.network) cls.router = cls.create_router(data_utils.rand_name('router-'), external_network_id=cls.ext_net_id) cls.create_router_interface(cls.router['id'], cls.subnet['id']) cls.port = list() # Create two ports one each for Creation and Updating of floatingIP for i in range(2): cls.create_port(cls.network) @test.attr(type='smoke') @test.idempotent_id('c72c1c0c-2193-4aca-eeee-b1442641ffff') def test_create_update_floatingip_description(self): if not test.is_extension_enabled('standard-attr-description', 'network'): msg = "standard-attr-description not enabled." raise self.skipException(msg) body = self.client.create_floatingip( floating_network_id=self.ext_net_id, port_id=self.ports[0]['id'], description='d1' )['floatingip'] self.assertEqual('d1', body['description']) body = self.client.show_floatingip(body['id'])['floatingip'] self.assertEqual('d1', body['description']) body = self.client.update_floatingip(body['id'], description='d2') self.assertEqual('d2', body['floatingip']['description']) body = self.client.show_floatingip(body['floatingip']['id']) self.assertEqual('d2', body['floatingip']['description']) neutron-8.4.0/neutron/tests/api/test_extra_dhcp_options.py0000664000567000056710000000766013044372760025307 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.common.utils import data_utils from tempest import test from neutron.tests.api import base class ExtraDHCPOptionsTestJSON(base.BaseNetworkTest): """ Tests the following operations with the Extra DHCP Options Neutron API extension: port create port list port show port update v2.0 of the Neutron API is assumed. It is also assumed that the Extra DHCP Options extension is enabled in the [network-feature-enabled] section of etc/tempest.conf """ @classmethod def resource_setup(cls): super(ExtraDHCPOptionsTestJSON, cls).resource_setup() if not test.is_extension_enabled('extra_dhcp_opt', 'network'): msg = "Extra DHCP Options extension not enabled." raise cls.skipException(msg) cls.network = cls.create_network() cls.subnet = cls.create_subnet(cls.network) cls.port = cls.create_port(cls.network) cls.ip_tftp = ('123.123.123.123' if cls._ip_version == 4 else '2015::dead') cls.ip_server = ('123.123.123.45' if cls._ip_version == 4 else '2015::badd') cls.extra_dhcp_opts = [ {'opt_value': 'pxelinux.0', 'opt_name': 'bootfile-name'}, {'opt_value': cls.ip_tftp, 'opt_name': 'tftp-server'}, {'opt_value': cls.ip_server, 'opt_name': 'server-ip-address'} ] @test.attr(type='smoke') @test.idempotent_id('d2c17063-3767-4a24-be4f-a23dbfa133c9') def test_create_list_port_with_extra_dhcp_options(self): # Create a port with Extra DHCP Options body = self.client.create_port( network_id=self.network['id'], extra_dhcp_opts=self.extra_dhcp_opts) port_id = body['port']['id'] self.addCleanup(self.client.delete_port, port_id) # Confirm port created has Extra DHCP Options body = self.client.list_ports() ports = body['ports'] port = [p for p in ports if p['id'] == port_id] self.assertTrue(port) self._confirm_extra_dhcp_options(port[0], self.extra_dhcp_opts) @test.attr(type='smoke') @test.idempotent_id('9a6aebf4-86ee-4f47-b07a-7f7232c55607') def test_update_show_port_with_extra_dhcp_options(self): # Update port with extra dhcp options name = data_utils.rand_name('new-port-name') body = self.client.update_port( self.port['id'], name=name, extra_dhcp_opts=self.extra_dhcp_opts) # Confirm extra dhcp options were added to the port body = self.client.show_port(self.port['id']) self._confirm_extra_dhcp_options(body['port'], self.extra_dhcp_opts) def _confirm_extra_dhcp_options(self, port, extra_dhcp_opts): retrieved = port['extra_dhcp_opts'] self.assertEqual(len(retrieved), len(extra_dhcp_opts)) for retrieved_option in retrieved: for option in extra_dhcp_opts: if (retrieved_option['opt_value'] == option['opt_value'] and retrieved_option['opt_name'] == option['opt_name']): break else: self.fail('Extra DHCP option not found in port %s' % str(retrieved_option)) class ExtraDHCPOptionsIpV6TestJSON(ExtraDHCPOptionsTestJSON): _ip_version = 6 neutron-8.4.0/neutron/tests/api/test_timestamp.py0000664000567000056710000001744013044372760023413 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from tempest.lib.common.utils import data_utils from tempest import test from neutron.tests.api import base class TestTimeStamp(base.BaseAdminNetworkTest): ## attributes for subnetpool min_prefixlen = '28' max_prefixlen = '31' _ip_version = 4 subnet_cidr = '10.11.12.0/31' new_prefix = '10.11.15.0/24' larger_prefix = '10.11.0.0/16' @classmethod def skip_checks(cls): super(TestTimeStamp, cls).skip_checks() if not test.is_extension_enabled('timestamp_core', 'network'): raise cls.skipException("timestamp_core extension not enabled") @classmethod def resource_setup(cls): super(TestTimeStamp, cls).resource_setup() prefixes = ['10.11.12.0/24'] cls._subnetpool_data = {'min_prefixlen': '29', 'prefixes': prefixes} def _create_subnetpool(self, is_admin=False, **kwargs): name = data_utils.rand_name('subnetpool-') subnetpool_data = copy.deepcopy(self._subnetpool_data) for key in subnetpool_data.keys(): kwargs[key] = subnetpool_data[key] return self.create_subnetpool(name=name, is_admin=is_admin, **kwargs) @test.idempotent_id('462be770-b310-4df9-9c42-773217e4c8b1') def test_create_network_with_timestamp(self): network = self.create_network() # Verifies body contains timestamp fields self.assertIsNotNone(network['created_at']) self.assertIsNotNone(network['updated_at']) @test.idempotent_id('4db5417a-e11c-474d-a361-af00ebef57c5') def test_update_network_with_timestamp(self): network = self.create_network() origin_updated_at = network['updated_at'] update_body = {'name': network['name'] + 'new'} body = self.admin_client.update_network(network['id'], **update_body) updated_network = body['network'] new_updated_at = updated_network['updated_at'] self.assertEqual(network['created_at'], updated_network['created_at']) # Verify that origin_updated_at is not same with new_updated_at self.assertIsNot(origin_updated_at, new_updated_at) @test.idempotent_id('2ac50ab2-7ebd-4e27-b3ce-a9e399faaea2') def test_show_networks_attribute_with_timestamp(self): network = self.create_network() body = self.client.show_network(network['id']) show_net = body['network'] # verify the timestamp from creation and showed is same self.assertEqual(network['created_at'], show_net['created_at']) self.assertEqual(network['updated_at'], show_net['updated_at']) @test.idempotent_id('8ee55186-454f-4b97-9f9f-eb2772ee891c') def test_create_subnet_with_timestamp(self): network = self.create_network() subnet = self.create_subnet(network) # Verifies body contains timestamp fields self.assertIsNotNone(subnet['created_at']) self.assertIsNotNone(subnet['updated_at']) @test.idempotent_id('a490215a-6f4c-4af9-9a4c-57c41f1c4fa1') def test_update_subnet_with_timestamp(self): network = self.create_network() subnet = self.create_subnet(network) origin_updated_at = subnet['updated_at'] update_body = {'name': subnet['name'] + 'new'} body = self.admin_client.update_subnet(subnet['id'], **update_body) updated_subnet = body['subnet'] new_updated_at = updated_subnet['updated_at'] self.assertEqual(subnet['created_at'], updated_subnet['created_at']) # Verify that origin_updated_at is not same with new_updated_at self.assertIsNot(origin_updated_at, new_updated_at) @test.idempotent_id('1836a086-e7cf-4141-bf57-0cfe79e8051e') def test_show_subnet_attribute_with_timestamp(self): network = self.create_network() subnet = self.create_subnet(network) body = self.client.show_subnet(subnet['id']) show_subnet = body['subnet'] # verify the timestamp from creation and showed is same self.assertEqual(subnet['created_at'], show_subnet['created_at']) self.assertEqual(subnet['updated_at'], show_subnet['updated_at']) @test.idempotent_id('e2450a7b-d84f-4600-a093-45e78597bbac') def test_create_port_with_timestamp(self): network = self.create_network() port = self.create_port(network) # Verifies body contains timestamp fields self.assertIsNotNone(port['created_at']) self.assertIsNotNone(port['updated_at']) @test.idempotent_id('4241e0d3-54b4-46ce-a9a7-093fc764161b') def test_update_port_with_timestamp(self): network = self.create_network() port = self.create_port(network) origin_updated_at = port['updated_at'] update_body = {'name': port['name'] + 'new'} body = self.admin_client.update_port(port['id'], **update_body) updated_port = body['port'] new_updated_at = updated_port['updated_at'] self.assertEqual(port['created_at'], updated_port['created_at']) # Verify that origin_updated_at is not same with new_updated_at self.assertIsNot(origin_updated_at, new_updated_at) @test.idempotent_id('584c6723-40b6-4f26-81dd-f508f9d9fb51') def test_show_port_attribute_with_timestamp(self): network = self.create_network() port = self.create_port(network) body = self.client.show_port(port['id']) show_port = body['port'] # verify the timestamp from creation and showed is same self.assertEqual(port['created_at'], show_port['created_at']) self.assertEqual(port['updated_at'], show_port['updated_at']) @test.idempotent_id('87a8b196-4b90-44f0-b7f3-d2057d7d658e') def test_create_subnetpool_with_timestamp(self): sp = self._create_subnetpool() # Verifies body contains timestamp fields self.assertIsNotNone(sp['created_at']) self.assertIsNotNone(sp['updated_at']) @test.idempotent_id('d48c7578-c3d2-4f9b-a7a1-be2008c770a0') def test_update_subnetpool_with_timestamp(self): sp = self._create_subnetpool() origin_updated_at = sp['updated_at'] update_body = {'name': sp['name'] + 'new', 'min_prefixlen': self.min_prefixlen, 'max_prefixlen': self.max_prefixlen} body = self.client.update_subnetpool(sp['id'], **update_body) updated_sp = body['subnetpool'] new_updated_at = updated_sp['updated_at'] self.assertEqual(sp['created_at'], updated_sp['created_at']) # Verify that origin_updated_at is not same with new_updated_at self.assertIsNot(origin_updated_at, new_updated_at) @test.idempotent_id('1d3970e6-bcf7-46cd-b7d7-0807759c73b4') def test_show_subnetpool_attribute_with_timestamp(self): sp = self._create_subnetpool() body = self.client.show_subnetpool(sp['id']) show_sp = body['subnetpool'] # verify the timestamp from creation and showed is same self.assertEqual(sp['created_at'], show_sp['created_at']) self.assertEqual(sp['updated_at'], show_sp['updated_at']) neutron-8.4.0/neutron/tests/api/requirements.txt0000664000567000056710000000044213044372760023255 0ustar jenkinsjenkins00000000000000# Additional requirements for api tests # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. tempest>=10.0.0,<12.1.0 # Apache-2.0 neutron-8.4.0/neutron/tests/api/test_service_type_management.py0000664000567000056710000000231313044372760026276 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import test from neutron.tests.api import base class ServiceTypeManagementTest(base.BaseNetworkTest): @classmethod def resource_setup(cls): super(ServiceTypeManagementTest, cls).resource_setup() if not test.is_extension_enabled('service-type', 'network'): msg = "Neutron Service Type Management not enabled." raise cls.skipException(msg) @test.attr(type='smoke') @test.idempotent_id('2cbbeea9-f010-40f6-8df5-4eaa0c918ea6') def test_service_provider_list(self): body = self.client.list_service_providers() self.assertIsInstance(body['service_providers'], list) neutron-8.4.0/neutron/tests/api/test_routers_negative.py0000664000567000056710000000331113044372760024765 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.common.utils import data_utils from tempest.lib import exceptions as lib_exc from tempest import test import testtools from neutron.tests.api import base_routers as base class DvrRoutersNegativeTest(base.BaseRouterTest): @classmethod def skip_checks(cls): super(DvrRoutersNegativeTest, cls).skip_checks() if not test.is_extension_enabled('dvr', 'network'): msg = "DVR extension not enabled." raise cls.skipException(msg) @classmethod def resource_setup(cls): super(DvrRoutersNegativeTest, cls).resource_setup() cls.router = cls.create_router(data_utils.rand_name('router')) cls.network = cls.create_network() cls.subnet = cls.create_subnet(cls.network) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('4990b055-8fc7-48ab-bba7-aa28beaad0b9') def test_router_create_tenant_distributed_returns_forbidden(self): with testtools.ExpectedException(lib_exc.Forbidden): self.create_router( data_utils.rand_name('router'), distributed=True) neutron-8.4.0/neutron/tests/api/test_ports.py0000664000567000056710000000342213044372760022552 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import test from neutron.tests.api import base class PortsTestJSON(base.BaseNetworkTest): @classmethod def resource_setup(cls): super(PortsTestJSON, cls).resource_setup() cls.network = cls.create_network() @test.attr(type='smoke') @test.idempotent_id('c72c1c0c-2193-4aca-bbb4-b1442640bbbb') def test_create_update_port_description(self): if not test.is_extension_enabled('standard-attr-description', 'network'): msg = "standard-attr-description not enabled." raise self.skipException(msg) body = self.create_port(self.network, description='d1') self.assertEqual('d1', body['description']) body = self.client.list_ports(id=body['id'])['ports'][0] self.assertEqual('d1', body['description']) body = self.client.update_port(body['id'], description='d2') self.assertEqual('d2', body['port']['description']) body = self.client.list_ports(id=body['port']['id'])['ports'][0] self.assertEqual('d2', body['description']) neutron-8.4.0/neutron/notifiers/0000775000567000056710000000000013044373210020047 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/notifiers/nova.py0000664000567000056710000002366613044372760021412 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from novaclient import client as nova_client from novaclient import exceptions as nova_exceptions from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils from sqlalchemy.orm import attributes as sql_attr from neutron._i18n import _LE, _LI, _LW from neutron.common import constants from neutron.common import exceptions as exc from neutron import context from neutron import manager from neutron.notifiers import batch_notifier LOG = logging.getLogger(__name__) VIF_UNPLUGGED = 'network-vif-unplugged' VIF_PLUGGED = 'network-vif-plugged' VIF_DELETED = 'network-vif-deleted' NEUTRON_NOVA_EVENT_STATUS_MAP = {constants.PORT_STATUS_ACTIVE: 'completed', constants.PORT_STATUS_ERROR: 'failed', constants.PORT_STATUS_DOWN: 'completed'} NOVA_API_VERSION = "2" class Notifier(object): def __init__(self): # FIXME(jamielennox): A notifier is being created for each Controller # and each Notifier is handling it's own auth. That means that we are # authenticating the exact same thing len(controllers) times. This # should be an easy thing to optimize. # FIXME(kevinbenton): remove this comment and the one above once the # switch to pecan is complete since only one notifier is constructed # in the pecan notification hook. auth = ks_loading.load_auth_from_conf_options(cfg.CONF, 'nova') session = ks_loading.load_session_from_conf_options( cfg.CONF, 'nova', auth=auth) extensions = [ ext for ext in nova_client.discover_extensions(NOVA_API_VERSION) if ext.name == "server_external_events"] self.nclient = nova_client.Client( NOVA_API_VERSION, session=session, region_name=cfg.CONF.nova.region_name, endpoint_type=cfg.CONF.nova.endpoint_type, extensions=extensions) self.batch_notifier = batch_notifier.BatchNotifier( cfg.CONF.send_events_interval, self.send_events) def _is_compute_port(self, port): try: if (port['device_id'] and uuidutils.is_uuid_like(port['device_id']) and port['device_owner'].startswith( constants.DEVICE_OWNER_COMPUTE_PREFIX)): return True except (KeyError, AttributeError): pass return False def _get_network_changed_event(self, device_id): return {'name': 'network-changed', 'server_uuid': device_id} def _get_port_delete_event(self, port): return {'server_uuid': port['device_id'], 'name': VIF_DELETED, 'tag': port['id']} @property def _plugin(self): # NOTE(arosen): this cannot be set in __init__ currently since # this class is initialized at the same time as NeutronManager() # which is decorated with synchronized() if not hasattr(self, '_plugin_ref'): self._plugin_ref = manager.NeutronManager.get_plugin() return self._plugin_ref def send_network_change(self, action, original_obj, returned_obj): """Called when a network change is made that nova cares about. :param action: the event that occurred. :param original_obj: the previous value of resource before action. :param returned_obj: the body returned to client as result of action. """ if not cfg.CONF.notify_nova_on_port_data_changes: return # When neutron re-assigns floating ip from an original instance # port to a new instance port without disassociate it first, an # event should be sent for original instance, that will make nova # know original instance's info, and update database for it. if (action == 'update_floatingip' and returned_obj['floatingip'].get('port_id') and original_obj.get('port_id')): disassociate_returned_obj = {'floatingip': {'port_id': None}} event = self.create_port_changed_event(action, original_obj, disassociate_returned_obj) self.batch_notifier.queue_event(event) event = self.create_port_changed_event(action, original_obj, returned_obj) self.batch_notifier.queue_event(event) def create_port_changed_event(self, action, original_obj, returned_obj): port = None if action in ['update_port', 'delete_port']: port = returned_obj['port'] elif action in ['update_floatingip', 'create_floatingip', 'delete_floatingip']: # NOTE(arosen) if we are associating a floatingip the # port_id is in the returned_obj. Otherwise on disassociate # it's in the original_object port_id = (returned_obj['floatingip'].get('port_id') or original_obj.get('port_id')) if port_id is None: return ctx = context.get_admin_context() try: port = self._plugin.get_port(ctx, port_id) except exc.PortNotFound: LOG.debug("Port %s was deleted, no need to send any " "notification", port_id) return if port and self._is_compute_port(port): if action == 'delete_port': return self._get_port_delete_event(port) else: return self._get_network_changed_event(port['device_id']) def record_port_status_changed(self, port, current_port_status, previous_port_status, initiator): """Determine if nova needs to be notified due to port status change. """ # clear out previous _notify_event port._notify_event = None # If there is no device_id set there is nothing we can do here. if not port.device_id: LOG.debug("device_id is not set on port yet.") return if not port.id: LOG.warning(_LW("Port ID not set! Nova will not be notified of " "port status change.")) return # We only want to notify about nova ports. if not self._is_compute_port(port): return # We notify nova when a vif is unplugged which only occurs when # the status goes from ACTIVE to DOWN. if (previous_port_status == constants.PORT_STATUS_ACTIVE and current_port_status == constants.PORT_STATUS_DOWN): event_name = VIF_UNPLUGGED # We only notify nova when a vif is plugged which only occurs # when the status goes from: # NO_VALUE/DOWN/BUILD -> ACTIVE/ERROR. elif (previous_port_status in [sql_attr.NO_VALUE, constants.PORT_STATUS_DOWN, constants.PORT_STATUS_BUILD] and current_port_status in [constants.PORT_STATUS_ACTIVE, constants.PORT_STATUS_ERROR]): event_name = VIF_PLUGGED # All the remaining state transitions are of no interest to nova else: LOG.debug("Ignoring state change previous_port_status: " "%(pre_status)s current_port_status: %(cur_status)s" " port_id %(id)s", {'pre_status': previous_port_status, 'cur_status': current_port_status, 'id': port.id}) return port._notify_event = ( {'server_uuid': port.device_id, 'name': event_name, 'status': NEUTRON_NOVA_EVENT_STATUS_MAP.get(current_port_status), 'tag': port.id}) def send_port_status(self, mapper, connection, port): event = getattr(port, "_notify_event", None) self.batch_notifier.queue_event(event) port._notify_event = None def send_events(self, batched_events): LOG.debug("Sending events: %s", batched_events) try: response = self.nclient.server_external_events.create( batched_events) except nova_exceptions.NotFound: LOG.debug("Nova returned NotFound for event: %s", batched_events) except Exception: LOG.exception(_LE("Failed to notify nova on events: %s"), batched_events) else: if not isinstance(response, list): LOG.error(_LE("Error response returned from nova: %s"), response) return response_error = False for event in response: try: code = event['code'] except KeyError: response_error = True continue if code != 200: LOG.warning(_LW("Nova event: %s returned with failed " "status"), event) else: LOG.info(_LI("Nova event response: %s"), event) if response_error: LOG.error(_LE("Error response returned from nova: %s"), response) neutron-8.4.0/neutron/notifiers/__init__.py0000664000567000056710000000000013044372736022162 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/notifiers/batch_notifier.py0000664000567000056710000000444113044372736023420 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet class BatchNotifier(object): def __init__(self, batch_interval, callback): self.pending_events = [] self._waiting_to_send = False self.callback = callback self.batch_interval = batch_interval def queue_event(self, event): """Called to queue sending an event with the next batch of events. Sending events individually, as they occur, has been problematic as it can result in a flood of sends. Previously, there was a loopingcall thread that would send batched events on a periodic interval. However, maintaining a persistent thread in the loopingcall was also problematic. This replaces the loopingcall with a mechanism that creates a short-lived thread on demand when the first event is queued. That thread will sleep once for the same batch_duration to allow other events to queue up in pending_events and then will send them when it wakes. If a thread is already alive and waiting, this call will simply queue the event and return leaving it up to the thread to send it. :param event: the event that occurred. """ if not event: return self.pending_events.append(event) if self._waiting_to_send: return self._waiting_to_send = True def last_out_sends(): eventlet.sleep(self.batch_interval) self._waiting_to_send = False self._notify() eventlet.spawn_n(last_out_sends) def _notify(self): if not self.pending_events: return batched_events = self.pending_events self.pending_events = [] self.callback(batched_events) neutron-8.4.0/neutron/cmd/0000775000567000056710000000000013044373210016610 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/cmd/netns_cleanup.py0000664000567000056710000001502313044372760022032 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import re import time from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from neutron._i18n import _, _LE from neutron.agent.common import config as agent_config from neutron.agent.common import ovs_lib from neutron.agent.dhcp import config as dhcp_config from neutron.agent.l3 import agent as l3_agent from neutron.agent.l3 import dvr from neutron.agent.l3 import dvr_fip_ns from neutron.agent.linux import dhcp from neutron.agent.linux import external_process from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.api.v2 import attributes from neutron.common import config LOG = logging.getLogger(__name__) LB_NS_PREFIX = 'qlbaas-' NS_PREFIXES = { 'dhcp': [dhcp.NS_PREFIX], 'l3': [l3_agent.NS_PREFIX, dvr.SNAT_NS_PREFIX, dvr_fip_ns.FIP_NS_PREFIX], 'lbaas': [LB_NS_PREFIX], } class FakeDhcpPlugin(object): """Fake RPC plugin to bypass any RPC calls.""" def __getattribute__(self, name): def fake_method(*args): pass return fake_method def setup_conf(): """Setup the cfg for the clean up utility. Use separate setup_conf for the utility because there are many options from the main config that do not apply during clean-up. """ cli_opts = [ cfg.BoolOpt('force', default=False, help=_('Delete the namespace by removing all devices.')), cfg.StrOpt('agent-type', choices=['dhcp', 'l3', 'lbaas'], help=_('Cleanup resources of a specific agent type only.')), ] conf = cfg.CONF conf.register_cli_opts(cli_opts) agent_config.register_interface_driver_opts_helper(conf) conf.register_opts(dhcp_config.DHCP_AGENT_OPTS) conf.register_opts(dhcp_config.DHCP_OPTS) conf.register_opts(dhcp_config.DNSMASQ_OPTS) conf.register_opts(interface.OPTS) return conf def _get_dhcp_process_monitor(config): return external_process.ProcessMonitor(config=config, resource_type='dhcp') def kill_dhcp(conf, namespace): """Disable DHCP for a network if DHCP is still active.""" network_id = namespace.replace(dhcp.NS_PREFIX, '') dhcp_driver = importutils.import_object( conf.dhcp_driver, conf=conf, process_monitor=_get_dhcp_process_monitor(conf), network=dhcp.NetModel({'id': network_id}), plugin=FakeDhcpPlugin()) if dhcp_driver.active: dhcp_driver.disable() def eligible_for_deletion(conf, namespace, force=False): """Determine whether a namespace is eligible for deletion. Eligibility is determined by having only the lo device or if force is passed as a parameter. """ if conf.agent_type: prefixes = NS_PREFIXES.get(conf.agent_type) else: prefixes = itertools.chain(*NS_PREFIXES.values()) ns_mangling_pattern = '(%s%s)' % ('|'.join(prefixes), attributes.UUID_PATTERN) # filter out namespaces without UUID as the name if not re.match(ns_mangling_pattern, namespace): return False ip = ip_lib.IPWrapper(namespace=namespace) return force or ip.namespace_is_empty() def unplug_device(conf, device): orig_log_fail_as_error = device.get_log_fail_as_error() device.set_log_fail_as_error(False) try: device.link.delete() except RuntimeError: device.set_log_fail_as_error(orig_log_fail_as_error) # Maybe the device is OVS port, so try to delete ovs = ovs_lib.BaseOVS() bridge_name = ovs.get_bridge_for_iface(device.name) if bridge_name: bridge = ovs_lib.OVSBridge(bridge_name) bridge.delete_port(device.name) else: LOG.debug('Unable to find bridge for device: %s', device.name) finally: device.set_log_fail_as_error(orig_log_fail_as_error) def destroy_namespace(conf, namespace, force=False): """Destroy a given namespace. If force is True, then dhcp (if it exists) will be disabled and all devices will be forcibly removed. """ try: ip = ip_lib.IPWrapper(namespace=namespace) if force: kill_dhcp(conf, namespace) # NOTE: The dhcp driver will remove the namespace if is it empty, # so a second check is required here. if ip.netns.exists(namespace): for device in ip.get_devices(exclude_loopback=True): unplug_device(conf, device) ip.garbage_collect_namespace() except Exception: LOG.exception(_LE('Error unable to destroy namespace: %s'), namespace) def cleanup_network_namespaces(conf): # Identify namespaces that are candidates for deletion. candidates = [ns for ns in ip_lib.IPWrapper.get_namespaces() if eligible_for_deletion(conf, ns, conf.force)] if candidates: time.sleep(2) for namespace in candidates: destroy_namespace(conf, namespace, conf.force) def main(): """Main method for cleaning up network namespaces. This method will make two passes checking for namespaces to delete. The process will identify candidates, sleep, and call garbage collect. The garbage collection will re-verify that the namespace meets the criteria for deletion (ie it is empty). The period of sleep and the 2nd pass allow time for the namespace state to settle, so that the check prior deletion will re-confirm the namespace is empty. The utility is designed to clean-up after the forced or unexpected termination of Neutron agents. The --force flag should only be used as part of the cleanup of a devstack installation as it will blindly purge namespaces and their devices. This option also kills any lingering DHCP instances. """ conf = setup_conf() conf() config.setup_logging() cleanup_network_namespaces(conf) neutron-8.4.0/neutron/cmd/ipset_cleanup.py0000664000567000056710000000730213044372760022030 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from neutron._i18n import _, _LE, _LI from neutron.agent.linux import ipset_manager from neutron.agent.linux import utils from neutron.common import config LOG = logging.getLogger(__name__) def setup_conf(): """Setup the cfg for the clean up utility. Use separate setup_conf for the utility because there are many options from the main config that do not apply during clean-up. """ cli_opts = [ cfg.BoolOpt('allsets', default=False, help=_('Destroy all IPsets.')), cfg.BoolOpt('force', default=False, help=_('Destroy IPsets even if there is an iptables ' 'reference.')), cfg.StrOpt('prefix', default=ipset_manager.NET_PREFIX, help=_('String prefix used to match IPset names.')), ] conf = cfg.CONF conf.register_cli_opts(cli_opts) return conf def remove_iptables_reference(ipset): # Remove any iptables reference to this IPset cmd = ['iptables-save'] if 'IPv4' in ipset else ['ip6tables-save'] iptables_save = utils.execute(cmd, run_as_root=True) if ipset in iptables_save: cmd = ['iptables'] if 'IPv4' in ipset else ['ip6tables'] LOG.info(_LI("Removing iptables rule for IPset: %s"), ipset) for rule in iptables_save.splitlines(): if '--match-set %s ' % ipset in rule and rule.startswith('-A'): # change to delete params = rule.split() params[0] = '-D' try: utils.execute(cmd + params, run_as_root=True) except Exception: LOG.exception(_LE('Error, unable to remove iptables rule ' 'for IPset: %s'), ipset) def destroy_ipset(conf, ipset): # If there is an iptables reference and we don't remove it, the # IPset removal will fail below if conf.force: remove_iptables_reference(ipset) LOG.info(_LI("Destroying IPset: %s"), ipset) cmd = ['ipset', 'destroy', ipset] try: utils.execute(cmd, run_as_root=True) except Exception: LOG.exception(_LE('Error, unable to destroy IPset: %s'), ipset) def cleanup_ipsets(conf): # Identify ipsets for destruction. LOG.info(_LI("Destroying IPsets with prefix: %s"), conf.prefix) cmd = ['ipset', '-L', '-n'] ipsets = utils.execute(cmd, run_as_root=True) for ipset in ipsets.split('\n'): if conf.allsets or ipset.startswith(conf.prefix): destroy_ipset(conf, ipset) LOG.info(_LI("IPset cleanup completed successfully")) def main(): """Main method for cleaning up IPsets. The utility is designed to clean-up after the forced or unexpected termination of Neutron agents. The --allsets flag should only be used as part of the cleanup of a devstack installation as it will blindly destroy all IPsets. """ conf = setup_conf() conf() config.setup_logging() cleanup_ipsets(conf) neutron-8.4.0/neutron/cmd/__init__.py0000664000567000056710000000210713044372736020735 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging as sys_logging from oslo_reports import guru_meditation_report as gmr from neutron import version # During the call to gmr.TextGuruMeditation.setup_autorun(), Guru Meditation # Report tries to start logging. Set a handler here to accommodate this. logger = sys_logging.getLogger(None) if not logger.handlers: logger.addHandler(sys_logging.StreamHandler()) _version_string = version.version_info.release_string() gmr.TextGuruMeditation.setup_autorun(version=_version_string) neutron-8.4.0/neutron/cmd/pd_notify.py0000664000567000056710000000247213044372760021173 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Cisco Systems. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import signal import sys from neutron.common import utils def main(): """Expected arguments: sys.argv[1] - The add/update/delete operation performed by the PD agent sys.argv[2] - The file where the new prefix should be written sys.argv[3] - The process ID of the L3 agent to be notified of this change """ operation = sys.argv[1] prefix_fname = sys.argv[2] agent_pid = sys.argv[3] prefix = os.getenv('PREFIX1', "::") if operation == "add" or operation == "update": utils.replace_file(prefix_fname, "%s/64" % prefix) elif operation == "delete": utils.replace_file(prefix_fname, "::/64") os.kill(int(agent_pid), signal.SIGUSR1) neutron-8.4.0/neutron/cmd/linuxbridge_cleanup.py0000664000567000056710000000505513044372760023223 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_log import log as logging from neutron._i18n import _LE, _LI from neutron.common import config from neutron.common import utils as n_utils from neutron.plugins.ml2.drivers.linuxbridge.agent \ import linuxbridge_neutron_agent LOG = logging.getLogger(__name__) def remove_empty_bridges(): try: interface_mappings = n_utils.parse_mappings( cfg.CONF.LINUX_BRIDGE.physical_interface_mappings) except ValueError as e: LOG.error(_LE("Parsing physical_interface_mappings failed: %s."), e) sys.exit(1) LOG.info(_LI("Interface mappings: %s."), interface_mappings) try: bridge_mappings = n_utils.parse_mappings( cfg.CONF.LINUX_BRIDGE.bridge_mappings) except ValueError as e: LOG.error(_LE("Parsing bridge_mappings failed: %s."), e) sys.exit(1) LOG.info(_LI("Bridge mappings: %s."), bridge_mappings) lb_manager = linuxbridge_neutron_agent.LinuxBridgeManager( bridge_mappings, interface_mappings) bridge_names = lb_manager.get_deletable_bridges() for bridge_name in bridge_names: if lb_manager.get_tap_devices_count(bridge_name): continue try: lb_manager.delete_bridge(bridge_name) LOG.info(_LI("Linux bridge %s deleted"), bridge_name) except RuntimeError: LOG.exception(_LE("Linux bridge %s delete failed"), bridge_name) LOG.info(_LI("Linux bridge cleanup completed successfully")) def main(): """Main method for cleaning up empty linux bridges. This tool deletes every empty linux bridge managed by linuxbridge agent (brq.* linux bridges) except thes ones defined using bridge_mappings option in section LINUX_BRIDGE (created by deployers). This tool should not be called during an instance create, migrate, etc. as it can delete a linux bridge about to be used by nova. """ cfg.CONF(sys.argv[1:]) config.setup_logging() remove_empty_bridges() neutron-8.4.0/neutron/cmd/ovs_cleanup.py0000664000567000056710000000702613044372760021516 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from neutron._i18n import _, _LI from neutron.agent.common import config as agent_config from neutron.agent.common import ovs_lib from neutron.agent.l3 import config as l3_config from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.common import config LOG = logging.getLogger(__name__) def setup_conf(): """Setup the cfg for the clean up utility. Use separate setup_conf for the utility because there are many options from the main config that do not apply during clean-up. """ opts = [ cfg.BoolOpt('ovs_all_ports', default=False, help=_('True to delete all ports on all the OpenvSwitch ' 'bridges. False to delete ports created by ' 'Neutron on integration and external network ' 'bridges.')) ] conf = cfg.CONF conf.register_cli_opts(opts) conf.register_opts(l3_config.OPTS) conf.register_opts(interface.OPTS) agent_config.register_interface_driver_opts_helper(conf) return conf def collect_neutron_ports(bridges): """Collect ports created by Neutron from OVS.""" ports = [] for bridge in bridges: ovs = ovs_lib.OVSBridge(bridge) ports += [port.port_name for port in ovs.get_vif_ports()] return ports def delete_neutron_ports(ports): """Delete non-internal ports created by Neutron Non-internal OVS ports need to be removed manually. """ for port in ports: device = ip_lib.IPDevice(port) if device.exists(): device.link.delete() LOG.info(_LI("Deleting port: %s"), port) def main(): """Main method for cleaning up OVS bridges. The utility cleans up the integration bridges used by Neutron. """ conf = setup_conf() conf() config.setup_logging() configuration_bridges = set([conf.ovs_integration_bridge, conf.external_network_bridge]) ovs = ovs_lib.BaseOVS() ovs_bridges = set(ovs.get_bridges()) available_configuration_bridges = configuration_bridges & ovs_bridges if conf.ovs_all_ports: bridges = ovs_bridges else: bridges = available_configuration_bridges # Collect existing ports created by Neutron on configuration bridges. # After deleting ports from OVS bridges, we cannot determine which # ports were created by Neutron, so port information is collected now. ports = collect_neutron_ports(available_configuration_bridges) for bridge in bridges: LOG.info(_LI("Cleaning bridge: %s"), bridge) ovs = ovs_lib.OVSBridge(bridge) ovs.delete_ports(all_ports=conf.ovs_all_ports) # Remove remaining ports created by Neutron (usually veth pair) delete_neutron_ports(ports) LOG.info(_LI("OVS cleanup completed successfully")) neutron-8.4.0/neutron/cmd/sanity/0000775000567000056710000000000013044373210020117 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/cmd/sanity/__init__.py0000664000567000056710000000000013044372736022232 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/cmd/sanity/checks.py0000664000567000056710000003316313044372760021750 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import shutil import tempfile import netaddr from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils from neutron._i18n import _LE from neutron.agent.common import ovs_lib from neutron.agent.l3 import ha_router from neutron.agent.l3 import namespaces from neutron.agent.linux import external_process from neutron.agent.linux import ip_lib from neutron.agent.linux import ip_link_support from neutron.agent.linux import keepalived from neutron.agent.linux import utils as agent_utils from neutron.common import constants as n_consts from neutron.common import utils from neutron.plugins.common import constants as const from neutron.plugins.ml2.drivers.openvswitch.agent.common \ import constants as ovs_const LOG = logging.getLogger(__name__) MINIMUM_DNSMASQ_VERSION = 2.67 MINIMUM_DIBBLER_VERSION = '1.0.1' def ovs_vxlan_supported(from_ip='192.0.2.1', to_ip='192.0.2.2'): name = "vxlantest-" + utils.get_random_string(6) with ovs_lib.OVSBridge(name) as br: port = br.add_tunnel_port(from_ip, to_ip, const.TYPE_VXLAN) return port != ovs_lib.INVALID_OFPORT def ovs_geneve_supported(from_ip='192.0.2.3', to_ip='192.0.2.4'): name = "genevetest-" + utils.get_random_string(6) with ovs_lib.OVSBridge(name) as br: port = br.add_tunnel_port(from_ip, to_ip, const.TYPE_GENEVE) return port != ovs_lib.INVALID_OFPORT def iproute2_vxlan_supported(): ip = ip_lib.IPWrapper() name = "vxlantest-" + utils.get_random_string(4) port = ip.add_vxlan(name, 3000) ip.del_veth(name) return name == port.name def patch_supported(): seed = utils.get_random_string(6) name = "patchtest-" + seed peer_name = "peertest0-" + seed patch_name = "peertest1-" + seed with ovs_lib.OVSBridge(name) as br: port = br.add_patch_port(patch_name, peer_name) return port != ovs_lib.INVALID_OFPORT def nova_notify_supported(): try: import neutron.notifiers.nova # noqa since unused return True except ImportError: return False def ofctl_arg_supported(cmd, **kwargs): """Verify if ovs-ofctl binary supports cmd with **kwargs. :param cmd: ovs-ofctl command to use for test. :param **kwargs: arguments to test with the command. :returns: a boolean if the supplied arguments are supported. """ br_name = 'br-test-%s' % utils.get_random_string(6) with ovs_lib.OVSBridge(br_name) as test_br: full_args = ["ovs-ofctl", cmd, test_br.br_name, ovs_lib._build_flow_expr_str(kwargs, cmd.split('-')[0])] try: agent_utils.execute(full_args, run_as_root=True) except RuntimeError as e: LOG.debug("Exception while checking supported feature via " "command %s. Exception: %s", full_args, e) return False except Exception: LOG.exception(_LE("Unexpected exception while checking supported" " feature via command: %s"), full_args) return False else: return True def arp_responder_supported(): mac = netaddr.EUI('dead:1234:beef', dialect=netaddr.mac_unix) ip = netaddr.IPAddress('240.0.0.1') actions = ovs_const.ARP_RESPONDER_ACTIONS % {'mac': mac, 'ip': ip} return ofctl_arg_supported(cmd='add-flow', table=21, priority=1, proto='arp', dl_vlan=42, nw_dst='%s' % ip, actions=actions) def arp_header_match_supported(): return ofctl_arg_supported(cmd='add-flow', table=24, priority=1, proto='arp', arp_op='0x2', arp_spa='1.1.1.1', actions="NORMAL") def icmpv6_header_match_supported(): return ofctl_arg_supported(cmd='add-flow', table=ovs_const.ARP_SPOOF_TABLE, priority=1, dl_type=n_consts.ETHERTYPE_IPV6, nw_proto=n_consts.PROTO_NUM_IPV6_ICMP, icmp_type=n_consts.ICMPV6_TYPE_NA, nd_target='fdf8:f53b:82e4::10', actions="NORMAL") def vf_management_supported(): is_supported = True required_caps = ( ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_STATE, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_SPOOFCHK, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE) try: vf_section = ip_link_support.IpLinkSupport.get_vf_mgmt_section() for cap in required_caps: if not ip_link_support.IpLinkSupport.vf_mgmt_capability_supported( vf_section, cap): is_supported = False LOG.debug("ip link command does not support " "vf capability '%(cap)s'", cap) except ip_link_support.UnsupportedIpLinkCommand: LOG.exception(_LE("Unexpected exception while checking supported " "ip link command")) return False return is_supported def netns_read_requires_helper(): ipw = ip_lib.IPWrapper() nsname = "netnsreadtest-" + uuidutils.generate_uuid() ipw.netns.add(nsname) try: # read without root_helper. if exists, not required. ipw_nohelp = ip_lib.IPWrapper() exists = ipw_nohelp.netns.exists(nsname) finally: ipw.netns.delete(nsname) return not exists def get_minimal_dnsmasq_version_supported(): return MINIMUM_DNSMASQ_VERSION def dnsmasq_version_supported(): try: cmd = ['dnsmasq', '--version'] env = {'LC_ALL': 'C'} out = agent_utils.execute(cmd, addl_env=env) m = re.search(r"version (\d+\.\d+)", out) ver = float(m.group(1)) if m else 0 if ver < MINIMUM_DNSMASQ_VERSION: return False except (OSError, RuntimeError, IndexError, ValueError) as e: LOG.debug("Exception while checking minimal dnsmasq version. " "Exception: %s", e) return False return True class KeepalivedIPv6Test(object): def __init__(self, ha_port, gw_port, gw_vip, default_gw): self.ha_port = ha_port self.gw_port = gw_port self.gw_vip = gw_vip self.default_gw = default_gw self.manager = None self.config = None self.config_path = None self.nsname = "keepalivedtest-" + uuidutils.generate_uuid() self.pm = external_process.ProcessMonitor(cfg.CONF, 'router') self.orig_interval = cfg.CONF.AGENT.check_child_processes_interval def configure(self): config = keepalived.KeepalivedConf() instance1 = keepalived.KeepalivedInstance('MASTER', self.ha_port, 1, ['169.254.192.0/18'], advert_int=5) instance1.track_interfaces.append(self.ha_port) # Configure keepalived with an IPv6 address (gw_vip) on gw_port. vip_addr1 = keepalived.KeepalivedVipAddress(self.gw_vip, self.gw_port) instance1.vips.append(vip_addr1) # Configure keepalived with an IPv6 default route on gw_port. gateway_route = keepalived.KeepalivedVirtualRoute(n_consts.IPv6_ANY, self.default_gw, self.gw_port) instance1.virtual_routes.gateway_routes = [gateway_route] config.add_instance(instance1) self.config = config def start_keepalived_process(self): # Disable process monitoring for Keepalived process. cfg.CONF.set_override('check_child_processes_interval', 0, 'AGENT') # Create a temp directory to store keepalived configuration. self.config_path = tempfile.mkdtemp() # Instantiate keepalived manager with the IPv6 configuration. self.manager = keepalived.KeepalivedManager('router1', self.config, namespace=self.nsname, process_monitor=self.pm, conf_path=self.config_path) self.manager.spawn() def verify_ipv6_address_assignment(self, gw_dev): process = self.manager.get_process() agent_utils.wait_until_true(lambda: process.active) def _gw_vip_assigned(): iface_ip = gw_dev.addr.list(ip_version=6, scope='global') if iface_ip: return self.gw_vip == iface_ip[0]['cidr'] agent_utils.wait_until_true(_gw_vip_assigned) def __enter__(self): ip_lib.IPWrapper().netns.add(self.nsname) return self def __exit__(self, exc_type, exc_value, exc_tb): self.pm.stop() if self.manager: self.manager.disable() if self.config_path: shutil.rmtree(self.config_path, ignore_errors=True) ip_lib.IPWrapper().netns.delete(self.nsname) cfg.CONF.set_override('check_child_processes_interval', self.orig_interval, 'AGENT') def keepalived_ipv6_supported(): """Check if keepalived supports IPv6 functionality. Validation is done as follows. 1. Create a namespace. 2. Create OVS bridge with two ports (ha_port and gw_port) 3. Move the ovs ports to the namespace. 4. Spawn keepalived process inside the namespace with IPv6 configuration. 5. Verify if IPv6 address is assigned to gw_port. 6. Verify if IPv6 default route is configured by keepalived. """ random_str = utils.get_random_string(6) br_name = "ka-test-" + random_str ha_port = ha_router.HA_DEV_PREFIX + random_str gw_port = namespaces.INTERNAL_DEV_PREFIX + random_str gw_vip = 'fdf8:f53b:82e4::10/64' expected_default_gw = 'fe80:f816::1' with ovs_lib.OVSBridge(br_name) as br: with KeepalivedIPv6Test(ha_port, gw_port, gw_vip, expected_default_gw) as ka: br.add_port(ha_port, ('type', 'internal')) br.add_port(gw_port, ('type', 'internal')) ha_dev = ip_lib.IPDevice(ha_port) gw_dev = ip_lib.IPDevice(gw_port) ha_dev.link.set_netns(ka.nsname) gw_dev.link.set_netns(ka.nsname) ha_dev.link.set_up() gw_dev.link.set_up() ka.configure() ka.start_keepalived_process() ka.verify_ipv6_address_assignment(gw_dev) default_gw = gw_dev.route.get_gateway(ip_version=6) if default_gw: default_gw = default_gw['gateway'] return expected_default_gw == default_gw def ovsdb_native_supported(): # Running the test should ensure we are configured for OVSDB native try: ovs = ovs_lib.BaseOVS() ovs.get_bridges() return True except ImportError as ex: LOG.error(_LE("Failed to import required modules. Ensure that the " "python-openvswitch package is installed. Error: %s"), ex) except Exception: LOG.exception(_LE("Unexpected exception occurred.")) return False def ovs_conntrack_supported(): random_str = utils.get_random_string(6) br_name = "ovs-test-" + random_str with ovs_lib.OVSBridge(br_name) as br: try: br.set_protocols(["OpenFlow%d" % i for i in range(10, 15)]) except RuntimeError as e: LOG.debug("Exception while checking ovs conntrack support: %s", e) return False return ofctl_arg_supported(cmd='add-flow', ct_state='+trk', actions='drop') def ebtables_supported(): try: cmd = ['ebtables', '--version'] agent_utils.execute(cmd) return True except (OSError, RuntimeError, IndexError, ValueError) as e: LOG.debug("Exception while checking for installed ebtables. " "Exception: %s", e) return False def ipset_supported(): try: cmd = ['ipset', '--version'] agent_utils.execute(cmd) return True except (OSError, RuntimeError, IndexError, ValueError) as e: LOG.debug("Exception while checking for installed ipset. " "Exception: %s", e) return False def ip6tables_supported(): try: cmd = ['ip6tables', '--version'] agent_utils.execute(cmd) return True except (OSError, RuntimeError, IndexError, ValueError) as e: LOG.debug("Exception while checking for installed ip6tables. " "Exception: %s", e) return False def get_minimal_dibbler_version_supported(): return MINIMUM_DIBBLER_VERSION def dibbler_version_supported(): try: cmd = ['dibbler-client', 'help'] out = agent_utils.execute(cmd) return '-w' in out except (OSError, RuntimeError, IndexError, ValueError) as e: LOG.debug("Exception while checking minimal dibbler version. " "Exception: %s", e) return False neutron-8.4.0/neutron/cmd/keepalived_state_change.py0000664000567000056710000000130713044372736024015 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.l3 import keepalived_state_change def main(): keepalived_state_change.main() neutron-8.4.0/neutron/cmd/sanity_check.py0000664000567000056710000003130313044372760021637 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_log import log as logging from neutron._i18n import _, _LE, _LW from neutron.agent import dhcp_agent from neutron.cmd.sanity import checks from neutron.common import config from neutron.db import l3_hamode_db LOG = logging.getLogger(__name__) def setup_conf(): cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.' 'agent.common.config') cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.' 'agent.common.config') cfg.CONF.import_group('VXLAN', 'neutron.plugins.ml2.drivers.linuxbridge.' 'agent.common.config') cfg.CONF.import_group('ml2', 'neutron.plugins.ml2.config') cfg.CONF.import_group('ml2_sriov', 'neutron.plugins.ml2.drivers.mech_sriov.mech_driver.' 'mech_driver') cfg.CONF.import_group('SECURITYGROUP', 'neutron.agent.securitygroups_rpc') dhcp_agent.register_options(cfg.CONF) cfg.CONF.register_opts(l3_hamode_db.L3_HA_OPTS) class BoolOptCallback(cfg.BoolOpt): def __init__(self, name, callback, **kwargs): if 'default' not in kwargs: kwargs['default'] = False self.callback = callback super(BoolOptCallback, self).__init__(name, **kwargs) def check_ovs_vxlan(): result = checks.ovs_vxlan_supported() if not result: LOG.error(_LE('Check for Open vSwitch VXLAN support failed. ' 'Please ensure that the version of openvswitch ' 'being used has VXLAN support.')) return result def check_ovs_geneve(): result = checks.ovs_geneve_supported() if not result: LOG.error(_LE('Check for Open vSwitch Geneve support failed. ' 'Please ensure that the version of openvswitch ' 'and kernel being used has Geneve support.')) return result def check_iproute2_vxlan(): result = checks.iproute2_vxlan_supported() if not result: LOG.error(_LE('Check for iproute2 VXLAN support failed. Please ensure ' 'that the iproute2 has VXLAN support.')) return result def check_ovs_patch(): result = checks.patch_supported() if not result: LOG.error(_LE('Check for Open vSwitch patch port support failed. ' 'Please ensure that the version of openvswitch ' 'being used has patch port support or disable features ' 'requiring patch ports (gre/vxlan, etc.).')) return result def check_read_netns(): required = checks.netns_read_requires_helper() if not required and cfg.CONF.AGENT.use_helper_for_ns_read: LOG.warning(_LW("The user that is executing neutron can read the " "namespaces without using the root_helper. Disable " "the use_helper_for_ns_read option to avoid a " "performance impact.")) # Don't fail because nothing is actually broken. Just not optimal. result = True elif required and not cfg.CONF.AGENT.use_helper_for_ns_read: LOG.error(_LE("The user that is executing neutron does not have " "permissions to read the namespaces. Enable the " "use_helper_for_ns_read configuration option.")) result = False else: # everything is configured appropriately result = True return result # NOTE(ihrachyshka): since the minimal version is currently capped due to # missing hwaddr matching in dnsmasq < 2.67, a better version of the check # would actually start dnsmasq server and issue a DHCP request using a IPv6 # DHCP client. def check_dnsmasq_version(): result = checks.dnsmasq_version_supported() if not result: LOG.error(_LE('The installed version of dnsmasq is too old. ' 'Please update to at least version %s.'), checks.get_minimal_dnsmasq_version_supported()) return result def check_keepalived_ipv6_support(): result = checks.keepalived_ipv6_supported() if not result: LOG.error(_LE('The installed version of keepalived does not support ' 'IPv6. Please update to at least version 1.2.10 for ' 'IPv6 support.')) return result def check_dibbler_version(): result = checks.dibbler_version_supported() if not result: LOG.error(_LE('The installed version of dibbler-client is too old. ' 'Please update to at least version %s.'), checks.get_minimal_dibbler_version_supported()) return result def check_nova_notify(): result = checks.nova_notify_supported() if not result: LOG.error(_LE('Nova notifications are enabled, but novaclient is not ' 'installed. Either disable nova notifications or ' 'install python-novaclient.')) return result def check_arp_responder(): result = checks.arp_responder_supported() if not result: LOG.error(_LE('Check for Open vSwitch ARP responder support failed. ' 'Please ensure that the version of openvswitch ' 'being used has ARP flows support.')) return result def check_arp_header_match(): result = checks.arp_header_match_supported() if not result: LOG.error(_LE('Check for Open vSwitch support of ARP header matching ' 'failed. ARP spoofing suppression will not work. A ' 'newer version of OVS is required.')) return result def check_icmpv6_header_match(): result = checks.icmpv6_header_match_supported() if not result: LOG.error(_LE('Check for Open vSwitch support of ICMPv6 header ' 'matching failed. ICMPv6 Neighbor Advt spoofing (part ' 'of arp spoofing) suppression will not work. A newer ' 'version of OVS is required.')) return result def check_vf_management(): result = checks.vf_management_supported() if not result: LOG.error(_LE('Check for VF management support failed. ' 'Please ensure that the version of ip link ' 'being used has VF support.')) return result def check_ovsdb_native(): cfg.CONF.set_override('ovsdb_interface', 'native', group='OVS') result = checks.ovsdb_native_supported() if not result: LOG.error(_LE('Check for native OVSDB support failed.')) return result def check_ovs_conntrack(): result = checks.ovs_conntrack_supported() if not result: LOG.error(_LE('Check for Open vSwitch support of conntrack support ' 'failed. OVS/CT firewall will not work. A newer ' 'version of OVS (2.5+) and linux kernel (4.3+) are ' 'required. See ' 'https://github.com/openvswitch/ovs/blob/master/FAQ.md' 'for more information.')) return result def check_ebtables(): result = checks.ebtables_supported() if not result: LOG.error(_LE('Cannot run ebtables. Please ensure that it ' 'is installed.')) return result def check_ipset(): result = checks.ipset_supported() if not result: LOG.error(_LE('Cannot run ipset. Please ensure that it ' 'is installed.')) return result def check_ip6tables(): result = checks.ip6tables_supported() if not result: LOG.error(_LE('Cannot run ip6tables. Please ensure that it ' 'is installed.')) return result # Define CLI opts to test specific features, with a callback for the test OPTS = [ BoolOptCallback('ovs_vxlan', check_ovs_vxlan, default=False, help=_('Check for OVS vxlan support')), BoolOptCallback('ovs_geneve', check_ovs_geneve, default=False, help=_('Check for OVS Geneve support')), BoolOptCallback('iproute2_vxlan', check_iproute2_vxlan, default=False, help=_('Check for iproute2 vxlan support')), BoolOptCallback('ovs_patch', check_ovs_patch, default=False, help=_('Check for patch port support')), BoolOptCallback('nova_notify', check_nova_notify, help=_('Check for nova notification support')), BoolOptCallback('arp_responder', check_arp_responder, help=_('Check for ARP responder support')), BoolOptCallback('arp_header_match', check_arp_header_match, help=_('Check for ARP header match support')), BoolOptCallback('icmpv6_header_match', check_icmpv6_header_match, help=_('Check for ICMPv6 header match support')), BoolOptCallback('vf_management', check_vf_management, help=_('Check for VF management support')), BoolOptCallback('read_netns', check_read_netns, help=_('Check netns permission settings')), BoolOptCallback('dnsmasq_version', check_dnsmasq_version, help=_('Check minimal dnsmasq version')), BoolOptCallback('ovsdb_native', check_ovsdb_native, help=_('Check ovsdb native interface support')), BoolOptCallback('ovs_conntrack', check_ovs_conntrack, help=_('Check ovs conntrack support')), BoolOptCallback('ebtables_installed', check_ebtables, help=_('Check ebtables installation')), BoolOptCallback('keepalived_ipv6_support', check_keepalived_ipv6_support, help=_('Check keepalived IPv6 support')), BoolOptCallback('dibbler_version', check_dibbler_version, help=_('Check minimal dibbler version')), BoolOptCallback('ipset_installed', check_ipset, help=_('Check ipset installation')), BoolOptCallback('ip6tables_installed', check_ip6tables, help=_('Check ip6tables installation')), ] def enable_tests_from_config(): """If a test can depend on configuration, use this function to set the appropriate CLI option to enable that test. It will then be possible to run all necessary tests, just by passing in the appropriate configs. """ cfg.CONF.set_default('vf_management', True) if 'vxlan' in cfg.CONF.AGENT.tunnel_types: cfg.CONF.set_default('ovs_vxlan', True) if 'geneve' in cfg.CONF.AGENT.tunnel_types: cfg.CONF.set_default('ovs_geneve', True) if ('vxlan' in cfg.CONF.ml2.type_drivers or cfg.CONF.VXLAN.enable_vxlan): cfg.CONF.set_default('iproute2_vxlan', True) if cfg.CONF.AGENT.tunnel_types: cfg.CONF.set_default('ovs_patch', True) if not cfg.CONF.OVS.use_veth_interconnection: cfg.CONF.set_default('ovs_patch', True) if (cfg.CONF.notify_nova_on_port_status_changes or cfg.CONF.notify_nova_on_port_data_changes): cfg.CONF.set_default('nova_notify', True) if cfg.CONF.AGENT.arp_responder: cfg.CONF.set_default('arp_responder', True) if cfg.CONF.AGENT.prevent_arp_spoofing: cfg.CONF.set_default('arp_header_match', True) cfg.CONF.set_default('icmpv6_header_match', True) if not cfg.CONF.AGENT.use_helper_for_ns_read: cfg.CONF.set_default('read_netns', True) if cfg.CONF.dhcp_driver == 'neutron.agent.linux.dhcp.Dnsmasq': cfg.CONF.set_default('dnsmasq_version', True) if cfg.CONF.OVS.ovsdb_interface == 'native': cfg.CONF.set_default('ovsdb_native', True) if cfg.CONF.l3_ha: cfg.CONF.set_default('keepalived_ipv6_support', True) if cfg.CONF.SECURITYGROUP.enable_ipset: cfg.CONF.set_default('ipset_installed', True) if cfg.CONF.SECURITYGROUP.enable_security_group: cfg.CONF.set_default('ip6tables_installed', True) def all_tests_passed(): return all(opt.callback() for opt in OPTS if cfg.CONF.get(opt.name)) def main(): setup_conf() cfg.CONF.register_cli_opts(OPTS) cfg.CONF.set_override('use_stderr', True) config.setup_logging() config.init(sys.argv[1:], default_config_files=[]) if cfg.CONF.config_file: enable_tests_from_config() return 0 if all_tests_passed() else 1 neutron-8.4.0/neutron/cmd/eventlet/0000775000567000056710000000000013044373210020436 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/cmd/eventlet/plugins/0000775000567000056710000000000013044373210022117 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/cmd/eventlet/plugins/sriov_nic_neutron_agent.py0000664000567000056710000000127313044372736027433 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import neutron.plugins.ml2.drivers.mech_sriov.agent.sriov_nic_agent \ as agent_main def main(): agent_main.main() neutron-8.4.0/neutron/cmd/eventlet/plugins/__init__.py0000664000567000056710000000000013044372736024232 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/cmd/eventlet/plugins/ovs_neutron_agent.py0000664000567000056710000000132113044372736026241 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import neutron.plugins.ml2.drivers.openvswitch.agent.main as agent_main def main(): agent_main.main() neutron-8.4.0/neutron/cmd/eventlet/plugins/linuxbridge_neutron_agent.py0000664000567000056710000000131013044372736027744 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import \ neutron.plugins.ml2.drivers.linuxbridge.agent.linuxbridge_neutron_agent \ as agent_main def main(): agent_main.main() neutron-8.4.0/neutron/cmd/eventlet/plugins/macvtap_neutron_agent.py0000664000567000056710000000130013044372736027062 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.plugins.ml2.drivers.macvtap.agent import ( macvtap_neutron_agent as agent_main) def main(): agent_main.main() neutron-8.4.0/neutron/cmd/eventlet/services/0000775000567000056710000000000013044373210022261 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/cmd/eventlet/services/__init__.py0000664000567000056710000000000013044372736024374 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/cmd/eventlet/services/metering_agent.py0000664000567000056710000000124113044372736025635 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.services.metering.agents import metering_agent def main(): metering_agent.main() neutron-8.4.0/neutron/cmd/eventlet/__init__.py0000664000567000056710000000120613044372736022562 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import eventlet_utils eventlet_utils.monkey_patch() neutron-8.4.0/neutron/cmd/eventlet/server/0000775000567000056710000000000013044373210021744 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/cmd/eventlet/server/__init__.py0000664000567000056710000000204213044372736024067 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron import server from neutron.server import rpc_eventlet from neutron.server import wsgi_eventlet from neutron.server import wsgi_pecan def main(): server.boot_server(_main_neutron_server) def _main_neutron_server(): if cfg.CONF.web_framework == 'legacy': wsgi_eventlet.eventlet_wsgi_server() else: wsgi_pecan.pecan_wsgi_server() def main_rpc_eventlet(): server.boot_server(rpc_eventlet.eventlet_rpc_server) neutron-8.4.0/neutron/cmd/eventlet/usage_audit.py0000664000567000056710000000337513044372760023323 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 New Dream Network, LLC (DreamHost) # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Cron script to generate usage notifications for networks, ports and subnets. """ import sys from neutron.common import config from neutron.common import rpc as n_rpc from neutron import context from neutron import manager from neutron.plugins.common import constants def main(): config.init(sys.argv[1:]) config.setup_logging() cxt = context.get_admin_context() plugin = manager.NeutronManager.get_plugin() l3_plugin = manager.NeutronManager.get_service_plugins().get( constants.L3_ROUTER_NAT) notifier = n_rpc.get_notifier('network') for network in plugin.get_networks(cxt): notifier.info(cxt, 'network.exists', {'network': network}) for subnet in plugin.get_subnets(cxt): notifier.info(cxt, 'subnet.exists', {'subnet': subnet}) for port in plugin.get_ports(cxt): notifier.info(cxt, 'port.exists', {'port': port}) for router in l3_plugin.get_routers(cxt): notifier.info(cxt, 'router.exists', {'router': router}) for floatingip in l3_plugin.get_floatingips(cxt): notifier.info(cxt, 'floatingip.exists', {'floatingip': floatingip}) neutron-8.4.0/neutron/cmd/eventlet/agents/0000775000567000056710000000000013044373210021717 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/cmd/eventlet/agents/l3.py0000664000567000056710000000120213044372736022616 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent import l3_agent def main(): l3_agent.main() neutron-8.4.0/neutron/cmd/eventlet/agents/metadata_proxy.py0000664000567000056710000000123113044372736025323 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.metadata import namespace_proxy def main(): namespace_proxy.main() neutron-8.4.0/neutron/cmd/eventlet/agents/dhcp.py0000664000567000056710000000120613044372736023222 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent import dhcp_agent def main(): dhcp_agent.main() neutron-8.4.0/neutron/cmd/eventlet/agents/bgp_dragent.py0000664000567000056710000000127213044372760024560 0ustar jenkinsjenkins00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron.services.bgp.agent import entry as bgp_dragent def main(): bgp_dragent.main() neutron-8.4.0/neutron/cmd/eventlet/agents/__init__.py0000664000567000056710000000000013044372736024032 0ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron/cmd/eventlet/agents/metadata.py0000664000567000056710000000121613044372736024065 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent import metadata_agent def main(): metadata_agent.main() neutron-8.4.0/.pylintrc0000664000567000056710000000642413044372760016237 0ustar jenkinsjenkins00000000000000# The format of this file isn't really documented; just use --generate-rcfile [MASTER] # Add to the black list. It should be a base name, not a # path. You may set this option multiple times. # # Note the 'openstack' below is intended to match only # neutron.openstack.common. If we ever have another 'openstack' # dirname, then we'll need to expand the ignore features in pylint :/ ignore=.git,tests,openstack [MESSAGES CONTROL] # NOTE(gus): This is a long list. A number of these are important and # should be re-enabled once the offending code is fixed (or marked # with a local disable) disable= # "F" Fatal errors that prevent further processing import-error, # "I" Informational noise locally-disabled, # "E" Error for important programming issues (likely bugs) access-member-before-definition, no-member, no-method-argument, no-self-argument, # "W" Warnings for stylistic problems or minor programming issues abstract-method, arguments-differ, attribute-defined-outside-init, bad-builtin, bad-indentation, broad-except, dangerous-default-value, deprecated-lambda, expression-not-assigned, fixme, global-statement, no-init, non-parent-init-called, not-callable, protected-access, redefined-builtin, redefined-outer-name, signature-differs, star-args, super-init-not-called, super-on-old-class, unpacking-non-sequence, unused-argument, unused-import, unused-variable, # TODO(dougwig) - disable nonstandard-exception while we have neutron_lib shims nonstandard-exception, # "C" Coding convention violations bad-continuation, invalid-name, missing-docstring, superfluous-parens, # "R" Refactor recommendations abstract-class-little-used, abstract-class-not-used, duplicate-code, interface-not-implemented, no-self-use, too-few-public-methods, too-many-ancestors, too-many-arguments, too-many-branches, too-many-instance-attributes, too-many-lines, too-many-locals, too-many-public-methods, too-many-return-statements, too-many-statements [BASIC] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long # and be lowecased with underscores method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$ # Module names matching neutron-* are ok (files in bin/) module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [FORMAT] # Maximum number of characters on a single line. max-line-length=79 [VARIABLES] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. # _ is used by our localization additional-builtins=_ [CLASSES] # List of interface methods to ignore, separated by a comma. ignore-iface-methods= [IMPORTS] # Deprecated modules which should not be used, separated by a comma deprecated-modules= # should use openstack.common.jsonutils json [TYPECHECK] # List of module names for which member attributes should not be checked ignored-modules=six.moves,_MovedItems [REPORTS] # Tells whether to display a full report or only the messages reports=no neutron-8.4.0/devstack/0000775000567000056710000000000013044373210016157 5ustar jenkinsjenkins00000000000000neutron-8.4.0/devstack/plugin.sh0000664000567000056710000000345513044372760020031 0ustar jenkinsjenkins00000000000000LIBDIR=$DEST/neutron/devstack/lib source $LIBDIR/bgp source $LIBDIR/flavors source $LIBDIR/l2_agent source $LIBDIR/l2_agent_sriovnicswitch source $LIBDIR/ml2 source $LIBDIR/qos if [[ "$1" == "stack" ]]; then case "$2" in install) if is_service_enabled q-flavors; then configure_flavors fi if is_service_enabled q-qos; then configure_qos fi if is_service_enabled q-bgp; then configure_bgp fi ;; post-config) if is_service_enabled q-agt; then configure_l2_agent fi if is_service_enabled q-bgp && is_service_enabled q-bgp-agt; then configure_bgp_dragent fi #Note: sriov agent should run with OVS or linux bridge agent #because they are the mechanisms that bind the DHCP and router ports. #Currently devstack lacks the option to run two agents on the same node. #Therefore we create new service, q-sriov-agt, and the q-agt should be OVS #or linux bridge. if is_service_enabled q-sriov-agt; then configure_$Q_PLUGIN configure_l2_agent configure_l2_agent_sriovnicswitch fi ;; extra) if is_service_enabled q-sriov-agt; then start_l2_agent_sriov fi if is_service_enabled q-bgp && is_service_enabled q-bgp-agt; then start_bgp_dragent fi ;; esac elif [[ "$1" == "unstack" ]]; then if is_service_enabled q-sriov-agt; then stop_l2_agent_sriov fi if is_service_enabled q-bgp && is_service_enabled q-bgp-agt; then stop_bgp_dragent fi fi neutron-8.4.0/devstack/settings0000664000567000056710000000054513044372760017757 0ustar jenkinsjenkins00000000000000L2_AGENT_EXTENSIONS=${L2_AGENT_EXTENSIONS:-} #BGP binary and config information AGENT_BGP_BINARY=${AGENT_BGP_BINARY:-"$NEUTRON_BIN_DIR/neutron-bgp-dragent"} Q_BGP_DRAGENT_CONF_FILE=${Q_BGP_DRAGENT_CONF_FILE:-"$NEUTRON_CONF_DIR/bgp_dragent.ini"} BGP_ROUTER_ID=${BGP_ROUTER_ID:-} RYU_BGP_SPEAKER_DRIVER="neutron.services.bgp.driver.ryu.driver.RyuBgpDriver" neutron-8.4.0/devstack/lib/0000775000567000056710000000000013044373210016725 5ustar jenkinsjenkins00000000000000neutron-8.4.0/devstack/lib/l2_agent0000664000567000056710000000063613044372736020364 0ustar jenkinsjenkins00000000000000function plugin_agent_add_l2_agent_extension { local l2_agent_extension=$1 if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then L2_AGENT_EXTENSIONS=$l2_agent_extension elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then L2_AGENT_EXTENSIONS+=",$l2_agent_extension" fi } function configure_l2_agent { iniset /$Q_PLUGIN_CONF_FILE agent extensions "$L2_AGENT_EXTENSIONS" } neutron-8.4.0/devstack/lib/ovs0000664000567000056710000000647713044372760017506 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. OVS_REPO=${OVS_REPO:-https://github.com/openvswitch/ovs.git} OVS_REPO_NAME=$(basename ${OVS_REPO} | cut -f1 -d'.') OVS_BRANCH=${OVS_BRANCH:-origin/master} # Functions # load_module() - Load module using modprobe module given by argument and dies # on failure # - fatal argument is optional and says whether function should # exit if module can't be loaded function load_module { local module=$1 local fatal=$2 if [ "$(trueorfalse True fatal)" == "True" ]; then sudo modprobe $module || (dmesg && die $LINENO "FAILED TO LOAD $module") else sudo modprobe $module || (echo "FAILED TO LOAD vport_geneve" && dmesg) fi } # compile_ovs() - Compile OVS from source and load needed modules. # Accepts two parameters: # - first one is True, modules are built and installed. # - second optional parameter defines prefix for ovs compilation # - third optional parameter defines localstatedir for ovs single machine runtime # Env variables OVS_REPO_NAME, OVS_REPO and OVS_BRANCH must be set function compile_ovs { local _pwd=$PWD local build_modules=${1:-True} local prefix=$2 local localstatedir=$3 if [ -n "$prefix" ]; then prefix="--prefix=$prefix" fi if [ -n "$localstatedir" ]; then localstatedir="--localstatedir=$localstatedir" fi cd $DEST if [ ! -d $OVS_REPO_NAME ] ; then git clone $OVS_REPO cd $OVS_REPO_NAME git checkout $OVS_BRANCH else cd $OVS_REPO_NAME fi # TODO: Can you create package list files like you can inside devstack? install_package autoconf automake libtool gcc patch make if is_fedora ; then # is_fedora covers Fedora, RHEL, CentOS, etc... install_package kernel-devel fi if [ ! -f configure ] ; then ./boot.sh fi if [ ! -f config.status ] || [ configure -nt config.status ] ; then if [[ "$build_modules" == "True" ]]; then ./configure $prefix $localstatedir --with-linux=/lib/modules/$(uname -r)/build else ./configure $prefix $localstatedir fi fi make -j$[$(nproc) + 1] sudo make install if [[ "$build_modules" == "True" ]]; then sudo make INSTALL_MOD_DIR=kernel/net/openvswitch modules_install sudo modprobe -r vport_geneve sudo modprobe -r openvswitch fi load_module openvswitch load_module vport-geneve False dmesg | tail cd $_pwd } # start_new_ovs() - removes old ovs database, creates a new one and starts ovs function start_new_ovs () { rm -f /etc/openvswitch/conf.db /etc/openvswitch/.conf.db~lock~ sudo /usr/share/openvswitch/scripts/ovs-ctl start } neutron-8.4.0/devstack/lib/l2_agent_sriovnicswitch0000775000567000056710000000153213044372736023521 0ustar jenkinsjenkins00000000000000SRIOV_AGENT_CONF="${Q_PLUGIN_CONF_PATH}/sriov_agent.ini" SRIOV_AGENT_BINARY="${NEUTRON_BIN_DIR}/neutron-sriov-nic-agent" function configure_l2_agent_sriovnicswitch { if [[ -n "$PHYSICAL_NETWORK" ]] && [[ -n "$PHYSICAL_INTERFACE" ]]; then PHYSICAL_DEVICE_MAPPINGS=$PHYSICAL_NETWORK:$PHYSICAL_INTERFACE fi if [[ -n "$PHYSICAL_DEVICE_MAPPINGS" ]]; then iniset /$SRIOV_AGENT_CONF sriov_nic physical_device_mappings $PHYSICAL_DEVICE_MAPPINGS fi iniset /$SRIOV_AGENT_CONF securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver iniset /$SRIOV_AGENT_CONF agent extensions "$L2_AGENT_EXTENSIONS" } function start_l2_agent_sriov { run_process q-sriov-agt "$SRIOV_AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$SRIOV_AGENT_CONF" } function stop_l2_agent_sriov { stop_process q-sriov-agt } neutron-8.4.0/devstack/lib/ml2_drivers/0000775000567000056710000000000013044373210021155 5ustar jenkinsjenkins00000000000000neutron-8.4.0/devstack/lib/ml2_drivers/sriovnicswitch0000775000567000056710000000006013044372736024171 0ustar jenkinsjenkins00000000000000function configure_ml2_sriovnicswitch { : } neutron-8.4.0/devstack/lib/flavors0000664000567000056710000000024013044372736020334 0ustar jenkinsjenkins00000000000000# Neutron flavors plugin # ---------------------- FLAVORS_PLUGIN=flavors function configure_flavors { _neutron_service_plugin_class_add $FLAVORS_PLUGIN } neutron-8.4.0/devstack/lib/bgp0000664000567000056710000000156713044372760017442 0ustar jenkinsjenkins00000000000000function configure_bgp_service_plugin { _neutron_service_plugin_class_add "bgp" } function configure_bgp { configure_bgp_service_plugin } function configure_bgp_dragent { cp $NEUTRON_DIR/etc/bgp_dragent.ini.sample $Q_BGP_DRAGENT_CONF_FILE iniset $Q_BGP_DRAGENT_CONF_FILE DEFAULT verbose True iniset $Q_BGP_DRAGENT_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL if [ -n "$BGP_ROUTER_ID" ]; then iniset $Q_BGP_DRAGENT_CONF_FILE BGP bgp_router_id $BGP_ROUTER_ID fi if [ -z "$BGP_SPEAKER_DRIVER" ]; then BGP_SPEAKER_DRIVER=$RYU_BGP_SPEAKER_DRIVER fi iniset $Q_BGP_DRAGENT_CONF_FILE BGP bgp_speaker_driver $BGP_SPEAKER_DRIVER } function start_bgp_dragent { run_process q-bgp-agt "$AGENT_BGP_BINARY --config-file $NEUTRON_CONF --config-file /$Q_BGP_DRAGENT_CONF_FILE" } function stop_bgp_dragent { stop_process q-bgp-agt } neutron-8.4.0/devstack/lib/qos0000664000567000056710000000054613044372736017473 0ustar jenkinsjenkins00000000000000function configure_qos_service_plugin { _neutron_service_plugin_class_add "qos" } function configure_qos_core_plugin { configure_qos_$Q_PLUGIN } function configure_qos_l2_agent { plugin_agent_add_l2_agent_extension "qos" } function configure_qos { configure_qos_service_plugin configure_qos_core_plugin configure_qos_l2_agent } neutron-8.4.0/devstack/lib/ml20000664000567000056710000000137613044372736017365 0ustar jenkinsjenkins00000000000000source $LIBDIR/ml2_drivers/sriovnicswitch function enable_ml2_extension_driver { local extension_driver=$1 if [[ -z "$Q_ML2_PLUGIN_EXT_DRIVERS" ]]; then Q_ML2_PLUGIN_EXT_DRIVERS=$extension_driver elif [[ ! ,${Q_ML2_PLUGIN_EXT_DRIVERS}, =~ ,${extension_driver}, ]]; then Q_ML2_PLUGIN_EXT_DRIVERS+=",$extension_driver" fi } function configure_qos_ml2 { enable_ml2_extension_driver "qos" } function configure_ml2 { OIFS=$IFS; IFS=","; mechanism_drivers_array=($Q_ML2_PLUGIN_MECHANISM_DRIVERS); IFS=$OIFS; for mechanism_driver in "${mechanism_drivers_array[@]}"; do if [ "$(type -t configure_ml2_$mechanism_driver)" = function ]; then configure_ml2_$mechanism_driver fi done }neutron-8.4.0/neutron.egg-info/0000775000567000056710000000000013044373210017537 5ustar jenkinsjenkins00000000000000neutron-8.4.0/neutron.egg-info/requires.txt0000664000567000056710000000161613044373206022150 0ustar jenkinsjenkins00000000000000pbr>=1.6 Paste PasteDeploy>=1.5.0 Routes!=2.0,!=2.1,!=2.3.0,>=1.12.3 debtcollector>=1.2.0 eventlet!=0.18.3,>=0.18.2 pecan>=1.0.0 greenlet>=0.3.2 httplib2>=0.7.5 requests!=2.9.0,>=2.8.1 Jinja2>=2.8 keystonemiddleware!=4.1.0,>=4.0.0 netaddr!=0.7.16,>=0.7.12 netifaces>=0.10.4 neutron-lib>=0.0.1 python-neutronclient!=4.1.0,>=2.6.0 retrying!=1.3.0,>=1.2.3 ryu>=3.30 SQLAlchemy<1.1.0,>=1.0.10 WebOb>=1.2.3 keystoneauth1>=2.1.0 alembic>=0.8.0 six>=1.9.0 stevedore>=1.5.0 oslo.concurrency>=3.7.1 oslo.config>=3.7.0 oslo.context>=0.2.0 oslo.db>=4.1.0 oslo.i18n>=2.1.0 oslo.log>=1.14.0 oslo.messaging>=4.0.0 oslo.middleware>=3.0.0 oslo.policy>=0.5.0 oslo.reports>=0.6.0 oslo.rootwrap>=2.0.0 oslo.serialization>=1.10.0 oslo.service>=1.0.0 oslo.utils>=3.5.0 oslo.versionedobjects>=1.5.0 ovs>=2.4.0 python-novaclient!=2.33.0,>=2.29.0 python-designateclient>=1.5.0 [:(python_version!='2.7')] Routes!=2.0,!=2.3.0,>=1.12.3 neutron-8.4.0/neutron.egg-info/SOURCES.txt0000664000567000056710000017274013044373210021436 0ustar jenkinsjenkins00000000000000.coveragerc .mailmap .pylintrc .testr.conf AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE MANIFEST.in README.rst TESTING.rst babel.cfg openstack-common.conf requirements.txt run_tests.sh setup.cfg setup.py test-requirements.txt tox.ini bin/neutron-rootwrap-xen-dom0 devstack/plugin.sh devstack/settings devstack/lib/bgp devstack/lib/flavors devstack/lib/l2_agent devstack/lib/l2_agent_sriovnicswitch devstack/lib/ml2 devstack/lib/ovs devstack/lib/qos devstack/lib/ml2_drivers/sriovnicswitch doc/Makefile doc/pom.xml doc/source/conf.py doc/source/index.rst doc/source/dashboards/index.rst doc/source/devref/address_scopes.rst doc/source/devref/alembic_migrations.rst doc/source/devref/api_extensions.rst doc/source/devref/api_layer.rst doc/source/devref/callbacks.rst doc/source/devref/client_command_extensions.rst doc/source/devref/contribute.rst doc/source/devref/db_layer.rst doc/source/devref/development.environment.rst doc/source/devref/dns_order.rst doc/source/devref/effective_neutron.rst doc/source/devref/external_dns_integration.rst doc/source/devref/fullstack_testing.rst doc/source/devref/i18n.rst doc/source/devref/index.rst doc/source/devref/instrumentation.rst doc/source/devref/l2_agent_extensions.rst doc/source/devref/l2_agents.rst doc/source/devref/layer3.rst doc/source/devref/linuxbridge_agent.rst doc/source/devref/ml2_ext_manager.rst doc/source/devref/network_ip_availability.rst doc/source/devref/neutron_api.rst doc/source/devref/openvswitch_agent.rst doc/source/devref/openvswitch_firewall.rst doc/source/devref/oslo-incubator.rst doc/source/devref/ovs_vhostuser.rst doc/source/devref/plugin-api.rst doc/source/devref/policy.rst doc/source/devref/quality_of_service.rst doc/source/devref/quota.rst doc/source/devref/rpc_api.rst doc/source/devref/rpc_callbacks.rst doc/source/devref/security_group_api.rst doc/source/devref/service_extensions.rst doc/source/devref/services_and_agents.rst doc/source/devref/sriov_nic_agent.rst doc/source/devref/tag.rst doc/source/devref/template_model_sync_test.rst doc/source/devref/testing_coverage.rst doc/source/devref/upgrade.rst doc/source/devref/images/fullstack_multinode_simulation.png doc/source/devref/images/under-the-hood-scenario-1-ovs-compute.png doc/source/devref/images/under-the-hood-scenario-1-ovs-netns.png doc/source/devref/images/under-the-hood-scenario-1-ovs-network.png doc/source/policies/blueprints.rst doc/source/policies/bugs.rst doc/source/policies/code-reviews.rst doc/source/policies/contributor-onboarding.rst doc/source/policies/gate-failure-triage.rst doc/source/policies/index.rst doc/source/policies/neutron-teams.rst doc/source/policies/thirdparty-ci.rst doc/source/stadium/index.rst doc/source/stadium/sub_project_guidelines.rst doc/source/stadium/sub_projects.rst etc/README.txt etc/api-paste.ini etc/policy.json etc/rootwrap.conf etc/neutron/plugins/ml2/.placeholder etc/neutron/rootwrap.d/debug.filters etc/neutron/rootwrap.d/dhcp.filters etc/neutron/rootwrap.d/dibbler.filters etc/neutron/rootwrap.d/ebtables.filters etc/neutron/rootwrap.d/ipset-firewall.filters etc/neutron/rootwrap.d/iptables-firewall.filters etc/neutron/rootwrap.d/l3.filters etc/neutron/rootwrap.d/linuxbridge-plugin.filters etc/neutron/rootwrap.d/openvswitch-plugin.filters etc/oslo-config-generator/bgp_dragent.ini etc/oslo-config-generator/dhcp_agent.ini etc/oslo-config-generator/l3_agent.ini etc/oslo-config-generator/linuxbridge_agent.ini etc/oslo-config-generator/macvtap_agent.ini etc/oslo-config-generator/metadata_agent.ini etc/oslo-config-generator/metering_agent.ini etc/oslo-config-generator/ml2_conf.ini etc/oslo-config-generator/ml2_conf_sriov.ini etc/oslo-config-generator/neutron.conf etc/oslo-config-generator/openvswitch_agent.ini etc/oslo-config-generator/sriov_agent.ini neutron/__init__.py neutron/_i18n.py neutron/auth.py neutron/context.py neutron/i18n.py neutron/manager.py neutron/neutron_plugin_base_v2.py neutron/opts.py neutron/policy.py neutron/service.py neutron/version.py neutron/worker.py neutron/wsgi.py neutron.egg-info/PKG-INFO neutron.egg-info/SOURCES.txt neutron.egg-info/dependency_links.txt neutron.egg-info/entry_points.txt neutron.egg-info/not-zip-safe neutron.egg-info/pbr.json neutron.egg-info/requires.txt neutron.egg-info/top_level.txt neutron/agent/__init__.py neutron/agent/dhcp_agent.py neutron/agent/firewall.py neutron/agent/l3_agent.py neutron/agent/metadata_agent.py neutron/agent/rpc.py neutron/agent/securitygroups_rpc.py neutron/agent/common/__init__.py neutron/agent/common/base_polling.py neutron/agent/common/config.py neutron/agent/common/ip_lib.py neutron/agent/common/ovs_lib.py neutron/agent/common/polling.py neutron/agent/common/utils.py neutron/agent/dhcp/__init__.py neutron/agent/dhcp/agent.py neutron/agent/dhcp/config.py neutron/agent/l2/__init__.py neutron/agent/l2/agent_extension.py neutron/agent/l2/extensions/__init__.py neutron/agent/l2/extensions/manager.py neutron/agent/l2/extensions/qos.py neutron/agent/l3/__init__.py neutron/agent/l3/agent.py neutron/agent/l3/config.py neutron/agent/l3/dvr.py neutron/agent/l3/dvr_edge_ha_router.py neutron/agent/l3/dvr_edge_router.py neutron/agent/l3/dvr_fip_ns.py neutron/agent/l3/dvr_local_router.py neutron/agent/l3/dvr_router_base.py neutron/agent/l3/dvr_snat_ns.py neutron/agent/l3/fip_rule_priority_allocator.py neutron/agent/l3/ha.py neutron/agent/l3/ha_router.py neutron/agent/l3/item_allocator.py neutron/agent/l3/keepalived_state_change.py neutron/agent/l3/legacy_router.py neutron/agent/l3/link_local_allocator.py neutron/agent/l3/namespace_manager.py neutron/agent/l3/namespaces.py neutron/agent/l3/router_info.py neutron/agent/l3/router_processing_queue.py neutron/agent/linux/__init__.py neutron/agent/linux/async_process.py neutron/agent/linux/bridge_lib.py neutron/agent/linux/daemon.py neutron/agent/linux/dhcp.py neutron/agent/linux/dibbler.py neutron/agent/linux/external_process.py neutron/agent/linux/interface.py neutron/agent/linux/ip_conntrack.py neutron/agent/linux/ip_lib.py neutron/agent/linux/ip_link_support.py neutron/agent/linux/ip_monitor.py neutron/agent/linux/ipset_manager.py neutron/agent/linux/iptables_comments.py neutron/agent/linux/iptables_firewall.py neutron/agent/linux/iptables_manager.py neutron/agent/linux/keepalived.py neutron/agent/linux/ovsdb_monitor.py neutron/agent/linux/pd.py neutron/agent/linux/pd_driver.py neutron/agent/linux/polling.py neutron/agent/linux/ra.py neutron/agent/linux/tc_lib.py neutron/agent/linux/utils.py neutron/agent/linux/openvswitch_firewall/__init__.py neutron/agent/linux/openvswitch_firewall/constants.py neutron/agent/linux/openvswitch_firewall/firewall.py neutron/agent/linux/openvswitch_firewall/rules.py neutron/agent/metadata/__init__.py neutron/agent/metadata/agent.py neutron/agent/metadata/config.py neutron/agent/metadata/driver.py neutron/agent/metadata/namespace_proxy.py neutron/agent/ovsdb/__init__.py neutron/agent/ovsdb/api.py neutron/agent/ovsdb/impl_idl.py neutron/agent/ovsdb/impl_vsctl.py neutron/agent/ovsdb/native/__init__.py neutron/agent/ovsdb/native/commands.py neutron/agent/ovsdb/native/connection.py neutron/agent/ovsdb/native/helpers.py neutron/agent/ovsdb/native/idlutils.py neutron/agent/windows/__init__.py neutron/agent/windows/ip_lib.py neutron/agent/windows/polling.py neutron/agent/windows/utils.py neutron/api/__init__.py neutron/api/api_common.py neutron/api/extensions.py neutron/api/versions.py neutron/api/rpc/__init__.py neutron/api/rpc/agentnotifiers/__init__.py neutron/api/rpc/agentnotifiers/bgp_dr_rpc_agent_api.py neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py neutron/api/rpc/callbacks/__init__.py neutron/api/rpc/callbacks/events.py neutron/api/rpc/callbacks/exceptions.py neutron/api/rpc/callbacks/resource_manager.py neutron/api/rpc/callbacks/resources.py neutron/api/rpc/callbacks/version_manager.py neutron/api/rpc/callbacks/consumer/__init__.py neutron/api/rpc/callbacks/consumer/registry.py neutron/api/rpc/callbacks/producer/__init__.py neutron/api/rpc/callbacks/producer/registry.py neutron/api/rpc/handlers/__init__.py neutron/api/rpc/handlers/bgp_speaker_rpc.py neutron/api/rpc/handlers/dhcp_rpc.py neutron/api/rpc/handlers/dvr_rpc.py neutron/api/rpc/handlers/l3_rpc.py neutron/api/rpc/handlers/metadata_rpc.py neutron/api/rpc/handlers/resources_rpc.py neutron/api/rpc/handlers/securitygroups_rpc.py neutron/api/v2/__init__.py neutron/api/v2/attributes.py neutron/api/v2/base.py neutron/api/v2/resource.py neutron/api/v2/resource_helper.py neutron/api/v2/router.py neutron/api/views/__init__.py neutron/api/views/versions.py neutron/callbacks/__init__.py neutron/callbacks/events.py neutron/callbacks/exceptions.py neutron/callbacks/manager.py neutron/callbacks/registry.py neutron/callbacks/resources.py neutron/cmd/__init__.py neutron/cmd/ipset_cleanup.py neutron/cmd/keepalived_state_change.py neutron/cmd/linuxbridge_cleanup.py neutron/cmd/netns_cleanup.py neutron/cmd/ovs_cleanup.py neutron/cmd/pd_notify.py neutron/cmd/sanity_check.py neutron/cmd/eventlet/__init__.py neutron/cmd/eventlet/usage_audit.py neutron/cmd/eventlet/agents/__init__.py neutron/cmd/eventlet/agents/bgp_dragent.py neutron/cmd/eventlet/agents/dhcp.py neutron/cmd/eventlet/agents/l3.py neutron/cmd/eventlet/agents/metadata.py neutron/cmd/eventlet/agents/metadata_proxy.py neutron/cmd/eventlet/plugins/__init__.py neutron/cmd/eventlet/plugins/linuxbridge_neutron_agent.py neutron/cmd/eventlet/plugins/macvtap_neutron_agent.py neutron/cmd/eventlet/plugins/ovs_neutron_agent.py neutron/cmd/eventlet/plugins/sriov_nic_neutron_agent.py neutron/cmd/eventlet/server/__init__.py neutron/cmd/eventlet/services/__init__.py neutron/cmd/eventlet/services/metering_agent.py neutron/cmd/sanity/__init__.py neutron/cmd/sanity/checks.py neutron/common/__init__.py neutron/common/_deprecate.py neutron/common/config.py neutron/common/constants.py neutron/common/eventlet_utils.py neutron/common/exceptions.py neutron/common/ipv6_utils.py neutron/common/rpc.py neutron/common/test_lib.py neutron/common/topics.py neutron/common/utils.py neutron/core_extensions/__init__.py neutron/core_extensions/base.py neutron/core_extensions/qos.py neutron/db/__init__.py neutron/db/address_scope_db.py neutron/db/agents_db.py neutron/db/agentschedulers_db.py neutron/db/allowedaddresspairs_db.py neutron/db/api.py neutron/db/bgp_db.py neutron/db/bgp_dragentscheduler_db.py neutron/db/common_db_mixin.py neutron/db/db_base_plugin_common.py neutron/db/db_base_plugin_v2.py neutron/db/dns_db.py neutron/db/dvr_mac_db.py neutron/db/external_net_db.py neutron/db/extradhcpopt_db.py neutron/db/extraroute_db.py neutron/db/flavors_db.py neutron/db/ipam_backend_mixin.py neutron/db/ipam_non_pluggable_backend.py neutron/db/ipam_pluggable_backend.py neutron/db/l3_agentschedulers_db.py neutron/db/l3_attrs_db.py neutron/db/l3_db.py neutron/db/l3_dvr_db.py neutron/db/l3_dvr_ha_scheduler_db.py neutron/db/l3_dvrscheduler_db.py neutron/db/l3_gwmode_db.py neutron/db/l3_hamode_db.py neutron/db/l3_hascheduler_db.py neutron/db/model_base.py neutron/db/models_v2.py neutron/db/netmtu_db.py neutron/db/network_ip_availability_db.py neutron/db/portbindings_base.py neutron/db/portbindings_db.py neutron/db/portsecurity_db.py neutron/db/portsecurity_db_common.py neutron/db/quota_db.py neutron/db/rbac_db_mixin.py neutron/db/rbac_db_models.py neutron/db/securitygroups_db.py neutron/db/securitygroups_rpc_base.py neutron/db/servicetype_db.py neutron/db/sqlalchemytypes.py neutron/db/sqlalchemyutils.py neutron/db/standardattrdescription_db.py neutron/db/tag_db.py neutron/db/vlantransparent_db.py neutron/db/availability_zone/__init__.py neutron/db/availability_zone/network.py neutron/db/availability_zone/router.py neutron/db/metering/__init__.py neutron/db/metering/metering_db.py neutron/db/metering/metering_rpc.py neutron/db/migration/README neutron/db/migration/__init__.py neutron/db/migration/alembic.ini neutron/db/migration/autogen.py neutron/db/migration/cli.py neutron/db/migration/connection.py neutron/db/migration/alembic_migrations/__init__.py neutron/db/migration/alembic_migrations/agent_init_ops.py neutron/db/migration/alembic_migrations/brocade_init_ops.py neutron/db/migration/alembic_migrations/cisco_init_ops.py neutron/db/migration/alembic_migrations/core_init_ops.py neutron/db/migration/alembic_migrations/dvr_init_opts.py neutron/db/migration/alembic_migrations/env.py neutron/db/migration/alembic_migrations/external.py neutron/db/migration/alembic_migrations/firewall_init_ops.py neutron/db/migration/alembic_migrations/l3_init_ops.py neutron/db/migration/alembic_migrations/lb_init_ops.py neutron/db/migration/alembic_migrations/loadbalancer_init_ops.py neutron/db/migration/alembic_migrations/metering_init_ops.py neutron/db/migration/alembic_migrations/ml2_init_ops.py neutron/db/migration/alembic_migrations/nec_init_ops.py neutron/db/migration/alembic_migrations/nsxv_initial_opts.py neutron/db/migration/alembic_migrations/nuage_init_opts.py neutron/db/migration/alembic_migrations/other_extensions_init_ops.py neutron/db/migration/alembic_migrations/other_plugins_init_ops.py neutron/db/migration/alembic_migrations/ovs_init_ops.py neutron/db/migration/alembic_migrations/portsec_init_ops.py neutron/db/migration/alembic_migrations/script.py.mako neutron/db/migration/alembic_migrations/secgroup_init_ops.py neutron/db/migration/alembic_migrations/vmware_init_ops.py neutron/db/migration/alembic_migrations/vpn_init_ops.py neutron/db/migration/alembic_migrations/versions/CONTRACT_HEAD neutron/db/migration/alembic_migrations/versions/EXPAND_HEAD neutron/db/migration/alembic_migrations/versions/README neutron/db/migration/alembic_migrations/versions/kilo_initial.py neutron/db/migration/alembic_migrations/versions/liberty/contract/11926bcfe72d_add_geneve_ml2_type_driver.py neutron/db/migration/alembic_migrations/versions/liberty/contract/2a16083502f3_metaplugin_removal.py neutron/db/migration/alembic_migrations/versions/liberty/contract/2e5352a0ad4d_add_missing_foreign_keys.py neutron/db/migration/alembic_migrations/versions/liberty/contract/30018084ec99_initial.py neutron/db/migration/alembic_migrations/versions/liberty/contract/4af11ca47297_drop_cisco_monolithic_tables.py neutron/db/migration/alembic_migrations/versions/liberty/contract/4ffceebfada_rbac_network.py neutron/db/migration/alembic_migrations/versions/liberty/contract/5498d17be016_drop_legacy_ovs_and_lb.py neutron/db/migration/alembic_migrations/versions/liberty/expand/1b4c6e320f79_address_scope_support_in_subnetpool.py neutron/db/migration/alembic_migrations/versions/liberty/expand/1c844d1677f7_dns_nameservers_order.py neutron/db/migration/alembic_migrations/versions/liberty/expand/26c371498592_subnetpool_hash.py neutron/db/migration/alembic_migrations/versions/liberty/expand/31337ec0ffee_flavors.py neutron/db/migration/alembic_migrations/versions/liberty/expand/34af2b5c5a59_add_dns_name_to_port.py neutron/db/migration/alembic_migrations/versions/liberty/expand/354db87e3225_nsxv_vdr_metadata.py neutron/db/migration/alembic_migrations/versions/liberty/expand/45f955889773_quota_usage.py neutron/db/migration/alembic_migrations/versions/liberty/expand/48153cb5f051_qos_db_changes.py neutron/db/migration/alembic_migrations/versions/liberty/expand/52c5312f6baf_address_scopes.py neutron/db/migration/alembic_migrations/versions/liberty/expand/599c6a226151_neutrodb_ipam.py neutron/db/migration/alembic_migrations/versions/liberty/expand/8675309a5c4f_rbac_network.py neutron/db/migration/alembic_migrations/versions/liberty/expand/9859ac9c136_quota_reservations.py neutron/db/migration/alembic_migrations/versions/mitaka/contract/1b294093239c_remove_embrane_plugin.py neutron/db/migration/alembic_migrations/versions/mitaka/contract/2b4c2465d44b_dvr_sheduling_refactoring.py neutron/db/migration/alembic_migrations/versions/mitaka/contract/4ffceebfcdc_standard_desc.py neutron/db/migration/alembic_migrations/versions/mitaka/contract/5ffceebfada_rbac_network_external.py neutron/db/migration/alembic_migrations/versions/mitaka/contract/8a6d8bdae39_migrate_neutron_resources_table.py neutron/db/migration/alembic_migrations/versions/mitaka/contract/c6c112992c9_rbac_qos_policy.py neutron/db/migration/alembic_migrations/versions/mitaka/contract/e3278ee65050_drop_nec_plugin_tables.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/0e66c5227a8a_add_desc_to_standard_attr.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/13cfb89f881a_add_is_default_to_subnetpool.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/15be73214821_add_bgp_model_data.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/15e43b934f81_rbac_qos_policy.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/19f26505c74f_auto_allocated_topology.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/1df244e556f5_add_unique_ha_router_agent_port_bindings.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/2f9e956e7532_tag_support.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/31ed664953e6_add_resource_versions_row_to_agent_table.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/32e5974ada25_add_neutron_resources_table.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/3894bccad37f_add_timestamp_to_base_resources.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/59cb5b6cf4d_availability_zone.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/659bf3d90664_add_attributes_to_support_external_dns_integration.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/b4caf27aae4_add_bgp_dragent_model_data.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/c3a73f615e4_add_ip_version_to_address_scope.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/dce3ec7a25c9_router_az.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/ec7fcfbf72ee_network_az.py neutron/db/migration/models/__init__.py neutron/db/migration/models/head.py neutron/db/qos/__init__.py neutron/db/qos/api.py neutron/db/qos/models.py neutron/db/quota/__init__.py neutron/db/quota/api.py neutron/db/quota/driver.py neutron/db/quota/models.py neutron/debug/README neutron/debug/__init__.py neutron/debug/commands.py neutron/debug/debug_agent.py neutron/debug/shell.py neutron/extensions/__init__.py neutron/extensions/address_scope.py neutron/extensions/agent.py neutron/extensions/allowedaddresspairs.py neutron/extensions/auto_allocated_topology.py neutron/extensions/availability_zone.py neutron/extensions/bgp.py neutron/extensions/bgp_dragentscheduler.py neutron/extensions/default_subnetpools.py neutron/extensions/dhcpagentscheduler.py neutron/extensions/dns.py neutron/extensions/dvr.py neutron/extensions/external_net.py neutron/extensions/extra_dhcp_opt.py neutron/extensions/extraroute.py neutron/extensions/flavors.py neutron/extensions/l3.py neutron/extensions/l3_ext_gw_mode.py neutron/extensions/l3_ext_ha_mode.py neutron/extensions/l3agentscheduler.py neutron/extensions/metering.py neutron/extensions/multiprovidernet.py neutron/extensions/netmtu.py neutron/extensions/network_availability_zone.py neutron/extensions/network_ip_availability.py neutron/extensions/portbindings.py neutron/extensions/portsecurity.py neutron/extensions/providernet.py neutron/extensions/qos.py neutron/extensions/quotasv2.py neutron/extensions/rbac.py neutron/extensions/router_availability_zone.py neutron/extensions/routerservicetype.py neutron/extensions/securitygroup.py neutron/extensions/servicetype.py neutron/extensions/standardattrdescription.py neutron/extensions/subnetallocation.py neutron/extensions/tag.py neutron/extensions/timestamp_core.py neutron/extensions/vlantransparent.py neutron/hacking/__init__.py neutron/hacking/checks.py neutron/ipam/__init__.py neutron/ipam/driver.py neutron/ipam/exceptions.py neutron/ipam/requests.py neutron/ipam/subnet_alloc.py neutron/ipam/utils.py neutron/ipam/drivers/__init__.py neutron/ipam/drivers/neutrondb_ipam/__init__.py neutron/ipam/drivers/neutrondb_ipam/db_api.py neutron/ipam/drivers/neutrondb_ipam/db_models.py neutron/ipam/drivers/neutrondb_ipam/driver.py neutron/locale/de/LC_MESSAGES/neutron.po neutron/locale/es/LC_MESSAGES/neutron.po neutron/locale/fr/LC_MESSAGES/neutron.po neutron/locale/it/LC_MESSAGES/neutron.po neutron/locale/ja/LC_MESSAGES/neutron.po neutron/locale/ko_KR/LC_MESSAGES/neutron-log-error.po neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po neutron/locale/ko_KR/LC_MESSAGES/neutron-log-warning.po neutron/locale/ko_KR/LC_MESSAGES/neutron.po neutron/locale/pt_BR/LC_MESSAGES/neutron.po neutron/locale/ru/LC_MESSAGES/neutron.po neutron/locale/tr_TR/LC_MESSAGES/neutron-log-error.po neutron/locale/tr_TR/LC_MESSAGES/neutron-log-info.po neutron/locale/tr_TR/LC_MESSAGES/neutron-log-warning.po neutron/locale/tr_TR/LC_MESSAGES/neutron.po neutron/locale/zh_CN/LC_MESSAGES/neutron.po neutron/locale/zh_TW/LC_MESSAGES/neutron.po neutron/notifiers/__init__.py neutron/notifiers/batch_notifier.py neutron/notifiers/nova.py neutron/objects/__init__.py neutron/objects/base.py neutron/objects/common_types.py neutron/objects/rbac_db.py neutron/objects/db/__init__.py neutron/objects/db/api.py neutron/objects/qos/__init__.py neutron/objects/qos/policy.py neutron/objects/qos/rule.py neutron/objects/qos/rule_type.py neutron/openstack/__init__.py neutron/openstack/common/__init__.py neutron/openstack/common/cache/__init__.py neutron/openstack/common/cache/backends.py neutron/openstack/common/cache/cache.py neutron/openstack/common/cache/_backends/__init__.py neutron/openstack/common/cache/_backends/memory.py neutron/pecan_wsgi/__init__.py neutron/pecan_wsgi/app.py neutron/pecan_wsgi/constants.py neutron/pecan_wsgi/startup.py neutron/pecan_wsgi/controllers/__init__.py neutron/pecan_wsgi/controllers/extensions.py neutron/pecan_wsgi/controllers/quota.py neutron/pecan_wsgi/controllers/resource.py neutron/pecan_wsgi/controllers/root.py neutron/pecan_wsgi/controllers/router.py neutron/pecan_wsgi/controllers/utils.py neutron/pecan_wsgi/hooks/__init__.py neutron/pecan_wsgi/hooks/body_validation.py neutron/pecan_wsgi/hooks/context.py neutron/pecan_wsgi/hooks/notifier.py neutron/pecan_wsgi/hooks/ownership_validation.py neutron/pecan_wsgi/hooks/policy_enforcement.py neutron/pecan_wsgi/hooks/quota_enforcement.py neutron/pecan_wsgi/hooks/translation.py neutron/plugins/__init__.py neutron/plugins/common/__init__.py neutron/plugins/common/constants.py neutron/plugins/common/utils.py neutron/plugins/hyperv/__init__.py neutron/plugins/hyperv/agent/__init__.py neutron/plugins/hyperv/agent/security_groups_driver.py neutron/plugins/ml2/README neutron/plugins/ml2/__init__.py neutron/plugins/ml2/config.py neutron/plugins/ml2/db.py neutron/plugins/ml2/driver_api.py neutron/plugins/ml2/driver_context.py neutron/plugins/ml2/managers.py neutron/plugins/ml2/models.py neutron/plugins/ml2/plugin.py neutron/plugins/ml2/rpc.py neutron/plugins/ml2/common/__init__.py neutron/plugins/ml2/common/exceptions.py neutron/plugins/ml2/drivers/__init__.py neutron/plugins/ml2/drivers/helpers.py neutron/plugins/ml2/drivers/mech_agent.py neutron/plugins/ml2/drivers/type_flat.py neutron/plugins/ml2/drivers/type_geneve.py neutron/plugins/ml2/drivers/type_gre.py neutron/plugins/ml2/drivers/type_local.py neutron/plugins/ml2/drivers/type_tunnel.py neutron/plugins/ml2/drivers/type_vlan.py neutron/plugins/ml2/drivers/type_vxlan.py neutron/plugins/ml2/drivers/agent/__init__.py neutron/plugins/ml2/drivers/agent/_agent_manager_base.py neutron/plugins/ml2/drivers/agent/_common_agent.py neutron/plugins/ml2/drivers/agent/config.py neutron/plugins/ml2/drivers/l2pop/README neutron/plugins/ml2/drivers/l2pop/__init__.py neutron/plugins/ml2/drivers/l2pop/config.py neutron/plugins/ml2/drivers/l2pop/db.py neutron/plugins/ml2/drivers/l2pop/mech_driver.py neutron/plugins/ml2/drivers/l2pop/rpc.py neutron/plugins/ml2/drivers/l2pop/rpc_manager/__init__.py neutron/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc.py neutron/plugins/ml2/drivers/linuxbridge/__init__.py neutron/plugins/ml2/drivers/linuxbridge/agent/__init__.py neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py neutron/plugins/ml2/drivers/linuxbridge/agent/common/__init__.py neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py neutron/plugins/ml2/drivers/linuxbridge/agent/common/constants.py neutron/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/__init__.py neutron/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/qos_driver.py neutron/plugins/ml2/drivers/linuxbridge/mech_driver/__init__.py neutron/plugins/ml2/drivers/linuxbridge/mech_driver/mech_linuxbridge.py neutron/plugins/ml2/drivers/macvtap/__init__.py neutron/plugins/ml2/drivers/macvtap/macvtap_common.py neutron/plugins/ml2/drivers/macvtap/agent/__init__.py neutron/plugins/ml2/drivers/macvtap/agent/config.py neutron/plugins/ml2/drivers/macvtap/agent/macvtap_neutron_agent.py neutron/plugins/ml2/drivers/macvtap/mech_driver/__init__.py neutron/plugins/ml2/drivers/macvtap/mech_driver/mech_macvtap.py neutron/plugins/ml2/drivers/mech_sriov/__init__.py neutron/plugins/ml2/drivers/mech_sriov/agent/__init__.py neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py neutron/plugins/ml2/drivers/mech_sriov/agent/common/__init__.py neutron/plugins/ml2/drivers/mech_sriov/agent/common/config.py neutron/plugins/ml2/drivers/mech_sriov/agent/common/exceptions.py neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/__init__.py neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py neutron/plugins/ml2/drivers/mech_sriov/mech_driver/__init__.py neutron/plugins/ml2/drivers/mech_sriov/mech_driver/exceptions.py neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py neutron/plugins/ml2/drivers/openvswitch/__init__.py neutron/plugins/ml2/drivers/openvswitch/agent/__init__.py neutron/plugins/ml2/drivers/openvswitch/agent/main.py neutron/plugins/ml2/drivers/openvswitch/agent/ovs_agent_extension_api.py neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py neutron/plugins/ml2/drivers/openvswitch/agent/common/__init__.py neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/__init__.py neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/__init__.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/br_cookie.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/__init__.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_dvr_process.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_phys.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_tun.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/main.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_ryuapp.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/__init__.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_dvr_process.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_int.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_phys.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_tun.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/main.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ofswitch.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge.py neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/README neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/contrib/build-rpm.sh neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/etc/xapi.d/plugins/netwrap neutron/plugins/ml2/drivers/openvswitch/mech_driver/__init__.py neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py neutron/plugins/ml2/extensions/__init__.py neutron/plugins/ml2/extensions/dns_integration.py neutron/plugins/ml2/extensions/port_security.py neutron/plugins/ml2/extensions/qos.py neutron/quota/__init__.py neutron/quota/resource.py neutron/quota/resource_registry.py neutron/scheduler/__init__.py neutron/scheduler/base_resource_filter.py neutron/scheduler/base_scheduler.py neutron/scheduler/dhcp_agent_scheduler.py neutron/scheduler/l3_agent_scheduler.py neutron/server/__init__.py neutron/server/rpc_eventlet.py neutron/server/wsgi_eventlet.py neutron/server/wsgi_pecan.py neutron/services/__init__.py neutron/services/provider_configuration.py neutron/services/service_base.py neutron/services/auto_allocate/__init__.py neutron/services/auto_allocate/db.py neutron/services/auto_allocate/exceptions.py neutron/services/auto_allocate/models.py neutron/services/auto_allocate/plugin.py neutron/services/bgp/__init__.py neutron/services/bgp/bgp_plugin.py neutron/services/bgp/agent/__init__.py neutron/services/bgp/agent/bgp_dragent.py neutron/services/bgp/agent/config.py neutron/services/bgp/agent/entry.py neutron/services/bgp/common/__init__.py neutron/services/bgp/common/constants.py neutron/services/bgp/common/opts.py neutron/services/bgp/driver/__init__.py neutron/services/bgp/driver/base.py neutron/services/bgp/driver/exceptions.py neutron/services/bgp/driver/utils.py neutron/services/bgp/driver/ryu/__init__.py neutron/services/bgp/driver/ryu/driver.py neutron/services/bgp/scheduler/__init__.py neutron/services/bgp/scheduler/bgp_dragent_scheduler.py neutron/services/externaldns/__init__.py neutron/services/externaldns/driver.py neutron/services/externaldns/drivers/__init__.py neutron/services/externaldns/drivers/designate/__init__.py neutron/services/externaldns/drivers/designate/driver.py neutron/services/firewall/__init__.py neutron/services/firewall/agents/__init__.py neutron/services/firewall/agents/firewall_agent_api.py neutron/services/firewall/agents/l3reference/__init__.py neutron/services/firewall/agents/l3reference/firewall_l3_agent.py neutron/services/flavors/__init__.py neutron/services/flavors/flavors_plugin.py neutron/services/l3_router/README neutron/services/l3_router/__init__.py neutron/services/l3_router/l3_router_plugin.py neutron/services/metering/__init__.py neutron/services/metering/metering_plugin.py neutron/services/metering/agents/__init__.py neutron/services/metering/agents/metering_agent.py neutron/services/metering/drivers/__init__.py neutron/services/metering/drivers/abstract_driver.py neutron/services/metering/drivers/iptables/__init__.py neutron/services/metering/drivers/iptables/iptables_driver.py neutron/services/metering/drivers/noop/__init__.py neutron/services/metering/drivers/noop/noop_driver.py neutron/services/network_ip_availability/__init__.py neutron/services/network_ip_availability/plugin.py neutron/services/qos/__init__.py neutron/services/qos/qos_consts.py neutron/services/qos/qos_plugin.py neutron/services/qos/notification_drivers/__init__.py neutron/services/qos/notification_drivers/manager.py neutron/services/qos/notification_drivers/message_queue.py neutron/services/qos/notification_drivers/qos_base.py neutron/services/rbac/__init__.py neutron/services/tag/__init__.py neutron/services/tag/tag_plugin.py neutron/services/timestamp/__init__.py neutron/services/timestamp/timestamp_db.py neutron/services/timestamp/timestamp_plugin.py neutron/tests/__init__.py neutron/tests/base.py neutron/tests/fake_notifier.py neutron/tests/post_mortem_debug.py neutron/tests/tools.py neutron/tests/api/__init__.py neutron/tests/api/base.py neutron/tests/api/base_routers.py neutron/tests/api/base_security_groups.py neutron/tests/api/clients.py neutron/tests/api/requirements.txt neutron/tests/api/test_address_scopes.py neutron/tests/api/test_address_scopes_negative.py neutron/tests/api/test_allowed_address_pair.py neutron/tests/api/test_auto_allocated_topology.py neutron/tests/api/test_bgp_speaker_extensions.py neutron/tests/api/test_bgp_speaker_extensions_negative.py neutron/tests/api/test_dhcp_ipv6.py neutron/tests/api/test_extension_driver_port_security.py neutron/tests/api/test_extra_dhcp_options.py neutron/tests/api/test_flavors_extensions.py neutron/tests/api/test_floating_ips.py neutron/tests/api/test_floating_ips_negative.py neutron/tests/api/test_metering_extensions.py neutron/tests/api/test_network_ip_availability.py neutron/tests/api/test_networks.py neutron/tests/api/test_ports.py neutron/tests/api/test_qos.py neutron/tests/api/test_routers.py neutron/tests/api/test_routers_negative.py neutron/tests/api/test_security_groups.py neutron/tests/api/test_security_groups_negative.py neutron/tests/api/test_service_type_management.py neutron/tests/api/test_subnetpools.py neutron/tests/api/test_subnetpools_negative.py neutron/tests/api/test_timestamp.py neutron/tests/api/admin/__init__.py neutron/tests/api/admin/test_agent_management.py neutron/tests/api/admin/test_dhcp_agent_scheduler.py neutron/tests/api/admin/test_extension_driver_port_security_admin.py neutron/tests/api/admin/test_external_network_extension.py neutron/tests/api/admin/test_floating_ips_admin_actions.py neutron/tests/api/admin/test_l3_agent_scheduler.py neutron/tests/api/admin/test_quotas.py neutron/tests/api/admin/test_routers_dvr.py neutron/tests/api/admin/test_shared_network_extension.py neutron/tests/common/__init__.py neutron/tests/common/base.py neutron/tests/common/config_fixtures.py neutron/tests/common/conn_testers.py neutron/tests/common/helpers.py neutron/tests/common/l3_test_common.py neutron/tests/common/machine_fixtures.py neutron/tests/common/net_helpers.py neutron/tests/common/agents/__init__.py neutron/tests/common/agents/l2_extensions.py neutron/tests/common/agents/l3_agent.py neutron/tests/common/agents/ovs_agent.py neutron/tests/contrib/README neutron/tests/contrib/functional-testing.filters neutron/tests/contrib/gate_hook.sh neutron/tests/contrib/post_test_hook.sh neutron/tests/etc/api-paste.ini.test neutron/tests/etc/neutron.conf neutron/tests/etc/neutron_test.conf neutron/tests/etc/neutron_test2.conf.example neutron/tests/etc/policy.json neutron/tests/fullstack/README neutron/tests/fullstack/__init__.py neutron/tests/fullstack/base.py neutron/tests/fullstack/test_connectivity.py neutron/tests/fullstack/test_l3_agent.py neutron/tests/fullstack/test_qos.py neutron/tests/fullstack/resources/__init__.py neutron/tests/fullstack/resources/client.py neutron/tests/fullstack/resources/config.py neutron/tests/fullstack/resources/environment.py neutron/tests/fullstack/resources/machine.py neutron/tests/fullstack/resources/process.py neutron/tests/functional/__init__.py neutron/tests/functional/base.py neutron/tests/functional/requirements.txt neutron/tests/functional/test_server.py neutron/tests/functional/test_service.py neutron/tests/functional/agent/__init__.py neutron/tests/functional/agent/test_dhcp_agent.py neutron/tests/functional/agent/test_firewall.py neutron/tests/functional/agent/test_l2_lb_agent.py neutron/tests/functional/agent/test_l2_ovs_agent.py neutron/tests/functional/agent/test_ovs_flows.py neutron/tests/functional/agent/test_ovs_lib.py neutron/tests/functional/agent/l2/__init__.py neutron/tests/functional/agent/l2/base.py neutron/tests/functional/agent/l2/extensions/__init__.py neutron/tests/functional/agent/l2/extensions/test_ovs_agent_qos_extension.py neutron/tests/functional/agent/l3/__init__.py neutron/tests/functional/agent/l3/framework.py neutron/tests/functional/agent/l3/test_dvr_router.py neutron/tests/functional/agent/l3/test_ha_router.py neutron/tests/functional/agent/l3/test_keepalived_state_change.py neutron/tests/functional/agent/l3/test_legacy_router.py neutron/tests/functional/agent/l3/test_metadata_proxy.py neutron/tests/functional/agent/l3/test_namespace_manager.py neutron/tests/functional/agent/linux/__init__.py neutron/tests/functional/agent/linux/base.py neutron/tests/functional/agent/linux/helpers.py neutron/tests/functional/agent/linux/simple_daemon.py neutron/tests/functional/agent/linux/test_async_process.py neutron/tests/functional/agent/linux/test_bridge_lib.py neutron/tests/functional/agent/linux/test_dhcp.py neutron/tests/functional/agent/linux/test_interface.py neutron/tests/functional/agent/linux/test_ip_lib.py neutron/tests/functional/agent/linux/test_ip_monitor.py neutron/tests/functional/agent/linux/test_ipset.py neutron/tests/functional/agent/linux/test_iptables.py neutron/tests/functional/agent/linux/test_keepalived.py neutron/tests/functional/agent/linux/test_linuxbridge_arp_protect.py neutron/tests/functional/agent/linux/test_ovsdb_monitor.py neutron/tests/functional/agent/linux/test_process_monitor.py neutron/tests/functional/agent/linux/test_tc_lib.py neutron/tests/functional/agent/linux/test_utils.py neutron/tests/functional/agent/linux/bin/__init__.py neutron/tests/functional/agent/linux/bin/ipt_binname.py neutron/tests/functional/agent/ovsdb/__init__.py neutron/tests/functional/agent/ovsdb/test_impl_idl.py neutron/tests/functional/agent/windows/__init__.py neutron/tests/functional/agent/windows/test_ip_lib.py neutron/tests/functional/api/__init__.py neutron/tests/functional/api/test_policies.py neutron/tests/functional/cmd/__init__.py neutron/tests/functional/cmd/test_linuxbridge_cleanup.py neutron/tests/functional/cmd/test_netns_cleanup.py neutron/tests/functional/common/__init__.py neutron/tests/functional/common/test_utils.py neutron/tests/functional/db/__init__.py neutron/tests/functional/db/test_ipam.py neutron/tests/functional/db/test_migrations.py neutron/tests/functional/db/test_models.py neutron/tests/functional/pecan_wsgi/__init__.py neutron/tests/functional/pecan_wsgi/config.py neutron/tests/functional/pecan_wsgi/test_controllers.py neutron/tests/functional/pecan_wsgi/test_functional.py neutron/tests/functional/pecan_wsgi/test_hooks.py neutron/tests/functional/pecan_wsgi/utils.py neutron/tests/functional/plugins/__init__.py neutron/tests/functional/plugins/ml2/__init__.py neutron/tests/functional/plugins/ml2/test_plugin.py neutron/tests/functional/plugins/ml2/drivers/__init__.py neutron/tests/functional/plugins/ml2/drivers/macvtap/__init__.py neutron/tests/functional/plugins/ml2/drivers/macvtap/agent/__init__.py neutron/tests/functional/plugins/ml2/drivers/macvtap/agent/test_macvtap_neutron_agent.py neutron/tests/functional/sanity/__init__.py neutron/tests/functional/sanity/test_sanity.py neutron/tests/functional/scheduler/__init__.py neutron/tests/functional/scheduler/test_dhcp_agent_scheduler.py neutron/tests/functional/scheduler/test_l3_agent_scheduler.py neutron/tests/functional/services/__init__.py neutron/tests/functional/services/bgp/__init__.py neutron/tests/functional/services/bgp/scheduler/__init__.py neutron/tests/functional/services/bgp/scheduler/test_bgp_dragent_scheduler.py neutron/tests/functional/services/l3_router/__init__.py neutron/tests/functional/services/l3_router/test_l3_dvr_ha_router_plugin.py neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py neutron/tests/retargetable/__init__.py neutron/tests/retargetable/base.py neutron/tests/retargetable/client_fixtures.py neutron/tests/retargetable/rest_fixture.py neutron/tests/retargetable/test_example.py neutron/tests/tempest/README.rst neutron/tests/tempest/__init__.py neutron/tests/tempest/config.py neutron/tests/tempest/exceptions.py neutron/tests/tempest/common/__init__.py neutron/tests/tempest/common/tempest_fixtures.py neutron/tests/tempest/services/__init__.py neutron/tests/tempest/services/network/__init__.py neutron/tests/tempest/services/network/json/__init__.py neutron/tests/tempest/services/network/json/network_client.py neutron/tests/unit/__init__.py neutron/tests/unit/_test_extension_portbindings.py neutron/tests/unit/dummy_plugin.py neutron/tests/unit/extension_stubs.py neutron/tests/unit/test_auth.py neutron/tests/unit/test_context.py neutron/tests/unit/test_manager.py neutron/tests/unit/test_policy.py neutron/tests/unit/test_service.py neutron/tests/unit/test_wsgi.py neutron/tests/unit/testlib_api.py neutron/tests/unit/agent/__init__.py neutron/tests/unit/agent/test_rpc.py neutron/tests/unit/agent/test_securitygroups_rpc.py neutron/tests/unit/agent/common/__init__.py neutron/tests/unit/agent/common/test_config.py neutron/tests/unit/agent/common/test_ovs_lib.py neutron/tests/unit/agent/common/test_polling.py neutron/tests/unit/agent/common/test_utils.py neutron/tests/unit/agent/dhcp/__init__.py neutron/tests/unit/agent/dhcp/test_agent.py neutron/tests/unit/agent/l2/__init__.py neutron/tests/unit/agent/l2/extensions/__init__.py neutron/tests/unit/agent/l2/extensions/test_manager.py neutron/tests/unit/agent/l2/extensions/test_qos.py neutron/tests/unit/agent/l3/__init__.py neutron/tests/unit/agent/l3/test_agent.py neutron/tests/unit/agent/l3/test_dvr_fip_ns.py neutron/tests/unit/agent/l3/test_dvr_local_router.py neutron/tests/unit/agent/l3/test_fip_rule_priority_allocator.py neutron/tests/unit/agent/l3/test_ha_router.py neutron/tests/unit/agent/l3/test_item_allocator.py neutron/tests/unit/agent/l3/test_legacy_router.py neutron/tests/unit/agent/l3/test_link_local_allocator.py neutron/tests/unit/agent/l3/test_namespace_manager.py neutron/tests/unit/agent/l3/test_router_info.py neutron/tests/unit/agent/l3/test_router_processing_queue.py neutron/tests/unit/agent/linux/__init__.py neutron/tests/unit/agent/linux/failing_process.py neutron/tests/unit/agent/linux/test_async_process.py neutron/tests/unit/agent/linux/test_bridge_lib.py neutron/tests/unit/agent/linux/test_daemon.py neutron/tests/unit/agent/linux/test_dhcp.py neutron/tests/unit/agent/linux/test_external_process.py neutron/tests/unit/agent/linux/test_interface.py neutron/tests/unit/agent/linux/test_ip_conntrack.py neutron/tests/unit/agent/linux/test_ip_lib.py neutron/tests/unit/agent/linux/test_ip_link_support.py neutron/tests/unit/agent/linux/test_ip_monitor.py neutron/tests/unit/agent/linux/test_ipset_manager.py neutron/tests/unit/agent/linux/test_iptables_firewall.py neutron/tests/unit/agent/linux/test_iptables_manager.py neutron/tests/unit/agent/linux/test_keepalived.py neutron/tests/unit/agent/linux/test_ovsdb_monitor.py neutron/tests/unit/agent/linux/test_pd.py neutron/tests/unit/agent/linux/test_polling.py neutron/tests/unit/agent/linux/test_tc_lib.py neutron/tests/unit/agent/linux/test_utils.py neutron/tests/unit/agent/linux/openvswitch_firewall/__init__.py neutron/tests/unit/agent/linux/openvswitch_firewall/test_firewall.py neutron/tests/unit/agent/linux/openvswitch_firewall/test_rules.py neutron/tests/unit/agent/metadata/__init__.py neutron/tests/unit/agent/metadata/test_agent.py neutron/tests/unit/agent/metadata/test_driver.py neutron/tests/unit/agent/metadata/test_namespace_proxy.py neutron/tests/unit/agent/ovsdb/__init__.py neutron/tests/unit/agent/ovsdb/test_impl_idl.py neutron/tests/unit/agent/ovsdb/native/__init__.py neutron/tests/unit/agent/ovsdb/native/test_helpers.py neutron/tests/unit/agent/windows/__init__.py neutron/tests/unit/agent/windows/test_ip_lib.py neutron/tests/unit/api/__init__.py neutron/tests/unit/api/test_api_common.py neutron/tests/unit/api/test_extensions.py neutron/tests/unit/api/rpc/__init__.py neutron/tests/unit/api/rpc/agentnotifiers/__init__.py neutron/tests/unit/api/rpc/agentnotifiers/test_bgp_dr_rpc_agent_api.py neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py neutron/tests/unit/api/rpc/agentnotifiers/test_l3_rpc_agent_api.py neutron/tests/unit/api/rpc/callbacks/__init__.py neutron/tests/unit/api/rpc/callbacks/test_resource_manager.py neutron/tests/unit/api/rpc/callbacks/test_resources.py neutron/tests/unit/api/rpc/callbacks/test_version_manager.py neutron/tests/unit/api/rpc/callbacks/consumer/__init__.py neutron/tests/unit/api/rpc/callbacks/consumer/test_registry.py neutron/tests/unit/api/rpc/callbacks/producer/__init__.py neutron/tests/unit/api/rpc/callbacks/producer/test_registry.py neutron/tests/unit/api/rpc/handlers/__init__.py neutron/tests/unit/api/rpc/handlers/test_bgp_speaker_rpc.py neutron/tests/unit/api/rpc/handlers/test_dhcp_rpc.py neutron/tests/unit/api/rpc/handlers/test_dvr_rpc.py neutron/tests/unit/api/rpc/handlers/test_l3_rpc.py neutron/tests/unit/api/rpc/handlers/test_resources_rpc.py neutron/tests/unit/api/rpc/handlers/test_securitygroups_rpc.py neutron/tests/unit/api/v2/__init__.py neutron/tests/unit/api/v2/test_attributes.py neutron/tests/unit/api/v2/test_base.py neutron/tests/unit/api/v2/test_resource.py neutron/tests/unit/callbacks/__init__.py neutron/tests/unit/callbacks/test_manager.py neutron/tests/unit/cmd/__init__.py neutron/tests/unit/cmd/test_netns_cleanup.py neutron/tests/unit/cmd/test_ovs_cleanup.py neutron/tests/unit/cmd/test_sanity_check.py neutron/tests/unit/cmd/server/__init__.py neutron/tests/unit/common/__init__.py neutron/tests/unit/common/test_ipv6_utils.py neutron/tests/unit/common/test_rpc.py neutron/tests/unit/common/test_utils.py neutron/tests/unit/core_extensions/__init__.py neutron/tests/unit/core_extensions/test_qos.py neutron/tests/unit/db/__init__.py neutron/tests/unit/db/test_agents_db.py neutron/tests/unit/db/test_agentschedulers_db.py neutron/tests/unit/db/test_allowedaddresspairs_db.py neutron/tests/unit/db/test_api.py neutron/tests/unit/db/test_bgp_db.py neutron/tests/unit/db/test_bgp_dragentscheduler_db.py neutron/tests/unit/db/test_common_db_mixin.py neutron/tests/unit/db/test_db_base_plugin_common.py neutron/tests/unit/db/test_db_base_plugin_v2.py neutron/tests/unit/db/test_dvr_mac_db.py neutron/tests/unit/db/test_ipam_backend_mixin.py neutron/tests/unit/db/test_ipam_non_pluggable_backend.py neutron/tests/unit/db/test_ipam_pluggable_backend.py neutron/tests/unit/db/test_l3_db.py neutron/tests/unit/db/test_l3_dvr_db.py neutron/tests/unit/db/test_l3_hamode_db.py neutron/tests/unit/db/test_migration.py neutron/tests/unit/db/test_portsecurity_db.py neutron/tests/unit/db/test_portsecurity_db_common.py neutron/tests/unit/db/test_securitygroups_db.py neutron/tests/unit/db/test_sqlalchemytypes.py neutron/tests/unit/db/metering/__init__.py neutron/tests/unit/db/metering/test_metering_db.py neutron/tests/unit/db/quota/__init__.py neutron/tests/unit/db/quota/test_api.py neutron/tests/unit/db/quota/test_driver.py neutron/tests/unit/debug/__init__.py neutron/tests/unit/debug/test_commands.py neutron/tests/unit/extensions/__init__.py neutron/tests/unit/extensions/base.py neutron/tests/unit/extensions/extendedattribute.py neutron/tests/unit/extensions/extensionattribute.py neutron/tests/unit/extensions/foxinsocks.py neutron/tests/unit/extensions/test_address_scope.py neutron/tests/unit/extensions/test_agent.py neutron/tests/unit/extensions/test_availability_zone.py neutron/tests/unit/extensions/test_bgp_dragentscheduler.py neutron/tests/unit/extensions/test_default_subnetpools.py neutron/tests/unit/extensions/test_dns.py neutron/tests/unit/extensions/test_external_net.py neutron/tests/unit/extensions/test_extra_dhcp_opt.py neutron/tests/unit/extensions/test_extraroute.py neutron/tests/unit/extensions/test_flavors.py neutron/tests/unit/extensions/test_l3.py neutron/tests/unit/extensions/test_l3_ext_gw_mode.py neutron/tests/unit/extensions/test_netmtu.py neutron/tests/unit/extensions/test_network_ip_availability.py neutron/tests/unit/extensions/test_portsecurity.py neutron/tests/unit/extensions/test_providernet.py neutron/tests/unit/extensions/test_quotasv2.py neutron/tests/unit/extensions/test_router_availability_zone.py neutron/tests/unit/extensions/test_securitygroup.py neutron/tests/unit/extensions/test_servicetype.py neutron/tests/unit/extensions/test_tag.py neutron/tests/unit/extensions/test_timestamp_core.py neutron/tests/unit/extensions/test_vlantransparent.py neutron/tests/unit/extensions/v2attributes.py neutron/tests/unit/hacking/__init__.py neutron/tests/unit/hacking/test_checks.py neutron/tests/unit/ipam/__init__.py neutron/tests/unit/ipam/fake_driver.py neutron/tests/unit/ipam/test_requests.py neutron/tests/unit/ipam/test_subnet_alloc.py neutron/tests/unit/ipam/test_utils.py neutron/tests/unit/ipam/drivers/__init__.py neutron/tests/unit/ipam/drivers/neutrondb_ipam/__init__.py neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_db_api.py neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_driver.py neutron/tests/unit/notifiers/__init__.py neutron/tests/unit/notifiers/test_batch_notifier.py neutron/tests/unit/notifiers/test_nova.py neutron/tests/unit/objects/__init__.py neutron/tests/unit/objects/test_base.py neutron/tests/unit/objects/test_common_types.py neutron/tests/unit/objects/test_objects.py neutron/tests/unit/objects/test_rbac_db.py neutron/tests/unit/objects/qos/__init__.py neutron/tests/unit/objects/qos/test_policy.py neutron/tests/unit/objects/qos/test_rule.py neutron/tests/unit/objects/qos/test_rule_type.py neutron/tests/unit/plugins/__init__.py neutron/tests/unit/plugins/common/__init__.py neutron/tests/unit/plugins/common/test_utils.py neutron/tests/unit/plugins/ml2/__init__.py neutron/tests/unit/plugins/ml2/_test_mech_agent.py neutron/tests/unit/plugins/ml2/base.py neutron/tests/unit/plugins/ml2/test_agent_scheduler.py neutron/tests/unit/plugins/ml2/test_db.py neutron/tests/unit/plugins/ml2/test_driver_context.py neutron/tests/unit/plugins/ml2/test_ext_portsecurity.py neutron/tests/unit/plugins/ml2/test_extension_driver_api.py neutron/tests/unit/plugins/ml2/test_managers.py neutron/tests/unit/plugins/ml2/test_plugin.py neutron/tests/unit/plugins/ml2/test_port_binding.py neutron/tests/unit/plugins/ml2/test_rpc.py neutron/tests/unit/plugins/ml2/test_security_group.py neutron/tests/unit/plugins/ml2/test_tracked_resources.py neutron/tests/unit/plugins/ml2/drivers/__init__.py neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py neutron/tests/unit/plugins/ml2/drivers/ext_test.py neutron/tests/unit/plugins/ml2/drivers/mech_fake_agent.py neutron/tests/unit/plugins/ml2/drivers/mechanism_logger.py neutron/tests/unit/plugins/ml2/drivers/mechanism_test.py neutron/tests/unit/plugins/ml2/drivers/test_helpers.py neutron/tests/unit/plugins/ml2/drivers/test_type_flat.py neutron/tests/unit/plugins/ml2/drivers/test_type_geneve.py neutron/tests/unit/plugins/ml2/drivers/test_type_gre.py neutron/tests/unit/plugins/ml2/drivers/test_type_local.py neutron/tests/unit/plugins/ml2/drivers/test_type_vlan.py neutron/tests/unit/plugins/ml2/drivers/test_type_vxlan.py neutron/tests/unit/plugins/ml2/drivers/agent/__init__.py neutron/tests/unit/plugins/ml2/drivers/agent/test__agent_manager_base.py neutron/tests/unit/plugins/ml2/drivers/agent/test__common_agent.py neutron/tests/unit/plugins/ml2/drivers/l2pop/__init__.py neutron/tests/unit/plugins/ml2/drivers/l2pop/test_db.py neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/__init__.py neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc_base.py neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/test_l2population_rpc.py neutron/tests/unit/plugins/ml2/drivers/linuxbridge/__init__.py neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/__init__.py neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_neutron_agent.py neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/__init__.py neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/test_qos_driver.py neutron/tests/unit/plugins/ml2/drivers/linuxbridge/mech_driver/__init__.py neutron/tests/unit/plugins/ml2/drivers/linuxbridge/mech_driver/test_mech_linuxbridge.py neutron/tests/unit/plugins/ml2/drivers/macvtap/__init__.py neutron/tests/unit/plugins/ml2/drivers/macvtap/test_macvtap_common.py neutron/tests/unit/plugins/ml2/drivers/macvtap/agent/__init__.py neutron/tests/unit/plugins/ml2/drivers/macvtap/agent/test_macvtap_neutron_agent.py neutron/tests/unit/plugins/ml2/drivers/macvtap/mech_driver/__init__.py neutron/tests/unit/plugins/ml2/drivers/macvtap/mech_driver/test_mech_macvtap.py neutron/tests/unit/plugins/ml2/drivers/mech_sriov/__init__.py neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/__init__.py neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_eswitch_manager.py neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_pci_lib.py neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_sriov_nic_agent.py neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/common/__init__.py neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/common/test_config.py neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/__init__.py neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/test_qos_driver.py neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/__init__.py neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/test_mech_sriov_nic_switch.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/__init__.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/__init__.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/fake_oflib.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/ovs_test_base.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_agent_extension_api.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/__init__.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/test_qos_driver.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/__init__.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/test_br_cookie.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/__init__.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge_test_base.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_int.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_phys.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_tun.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_ovs_bridge.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/__init__.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge_test_base.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_int.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_phys.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_tun.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/__init__.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/test_mech_openvswitch.py neutron/tests/unit/plugins/ml2/extensions/__init__.py neutron/tests/unit/plugins/ml2/extensions/fake_extension.py neutron/tests/unit/plugins/ml2/extensions/test_dns_integration.py neutron/tests/unit/plugins/ml2/extensions/test_port_security.py neutron/tests/unit/quota/__init__.py neutron/tests/unit/quota/test_resource.py neutron/tests/unit/quota/test_resource_registry.py neutron/tests/unit/scheduler/__init__.py neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py neutron/tests/unit/scheduler/test_l3_agent_scheduler.py neutron/tests/unit/services/__init__.py neutron/tests/unit/services/test_provider_configuration.py neutron/tests/unit/services/auto_allocate/__init__.py neutron/tests/unit/services/auto_allocate/test_db.py neutron/tests/unit/services/bgp/__init__.py neutron/tests/unit/services/bgp/agent/__init__.py neutron/tests/unit/services/bgp/agent/test_bgp_dragent.py neutron/tests/unit/services/bgp/driver/__init__.py neutron/tests/unit/services/bgp/driver/test_utils.py neutron/tests/unit/services/bgp/driver/ryu/__init__.py neutron/tests/unit/services/bgp/driver/ryu/test_driver.py neutron/tests/unit/services/bgp/scheduler/__init__.py neutron/tests/unit/services/bgp/scheduler/test_bgp_dragent_scheduler.py neutron/tests/unit/services/metering/__init__.py neutron/tests/unit/services/metering/test_metering_plugin.py neutron/tests/unit/services/metering/agents/__init__.py neutron/tests/unit/services/metering/agents/test_metering_agent.py neutron/tests/unit/services/metering/drivers/__init__.py neutron/tests/unit/services/metering/drivers/test_iptables.py neutron/tests/unit/services/qos/__init__.py neutron/tests/unit/services/qos/base.py neutron/tests/unit/services/qos/test_qos_plugin.py neutron/tests/unit/services/qos/notification_drivers/__init__.py neutron/tests/unit/services/qos/notification_drivers/dummy.py neutron/tests/unit/services/qos/notification_drivers/test_manager.py neutron/tests/unit/services/qos/notification_drivers/test_message_queue.py neutron/tests/unit/tests/__init__.py neutron/tests/unit/tests/test_base.py neutron/tests/unit/tests/test_post_mortem_debug.py neutron/tests/unit/tests/test_tools.py neutron/tests/unit/tests/common/__init__.py neutron/tests/unit/tests/common/test_net_helpers.py neutron/tests/unit/tests/example/README neutron/tests/unit/tests/example/__init__.py neutron/tests/unit/tests/example/dir/__init__.py neutron/tests/unit/tests/example/dir/example_module.py neutron/tests/var/ca.crt neutron/tests/var/certandkey.pem neutron/tests/var/certificate.crt neutron/tests/var/privatekey.key rally-jobs/README.rst rally-jobs/neutron-neutron.yaml rally-jobs/extra/README.rst rally-jobs/plugins/README.rst rally-jobs/plugins/__init__.py releasenotes/notes/.placeholder releasenotes/notes/1500-default-mtu-b0d6e4ab193b62a4.yaml releasenotes/notes/1500-default-segment-mtu-54e2cf6aea9602d5.yaml releasenotes/notes/QoS-for-linuxbridge-agent-bdb13515aac4e555.yaml releasenotes/notes/access_as_external_rbac-455dc74b9fa22761.yaml releasenotes/notes/add-availability-zone-4440cf00be7c54ba.yaml releasenotes/notes/add-designate-driver-ssl-options-169c299c96f2aff0.yaml releasenotes/notes/add-get-me-a-network-56321aeef5389001.yaml releasenotes/notes/add-integration-with-external-dns-f56ec8a4993b1fc4.yaml releasenotes/notes/add-ip-protocols-in-sg-60467a073e771aee.yaml releasenotes/notes/add-port-rebinding-chance-33178b9abacf5804.yaml releasenotes/notes/add-rbac-qos-8b1154ee756c66df.yaml releasenotes/notes/add-standard-attr-descriptions-1ba0d7a454c3fd8f.yaml releasenotes/notes/add-tags-to-core-resources-b05330a129900609.yaml releasenotes/notes/add-timestamp-fields-f9ab949fc88f05f6.yaml releasenotes/notes/advertise_mtu_by_default-d8b0b056a74517b8.yaml releasenotes/notes/advertisement-intervals-for-radvd-configurable-6d85b5fdd97a2742.yaml releasenotes/notes/allow-non-admins-to-define-external-extra-routes-0d541fc356a5c546.yaml releasenotes/notes/bgp-support-ef361825ca63f28b.yaml releasenotes/notes/clear-allowed-address-pairs-with-none-4757bcca78076c9e.yaml releasenotes/notes/config-file-generation-2eafc6602d57178e.yaml releasenotes/notes/config-wsgi-pool-size-a4c06753b79fee6d.yaml releasenotes/notes/correlate-address-scope-with-network-ea16e16b0154ac21.yaml releasenotes/notes/default-local-dns-a1c3fa1451f228fa.yaml releasenotes/notes/default-subnetpool-semantics-1cdc5cdde2be88c2.yaml releasenotes/notes/deprecate-force_gateway_on_subnet-376855c4e66f4e11.yaml releasenotes/notes/deprecate-network-device-mtu-59b78264c9974808.yaml releasenotes/notes/deprecate-router_id-34aca9ea5ee9e789.yaml releasenotes/notes/deprecate_max_fixed_ips_per_port-5e80518cbf25cfd6.yaml releasenotes/notes/deprecated-driver-e368e0befc9bee4c.yaml releasenotes/notes/direct-physical-vnic-878d15bdb758b70e.yaml releasenotes/notes/dvr-ha-support-cc67e84d9380cd0b.yaml releasenotes/notes/dvr-ovs-agent-6052a8d60fddde22.yaml releasenotes/notes/dvr-support-live-migration-b818b12bd9cbb518.yaml releasenotes/notes/end-to-end-mtu-00345fc4282cb8fb.yaml releasenotes/notes/fail-on-missing-extensions-bc332124b780875b.yaml releasenotes/notes/firewall_driver_not_needed_on_server-4159669ad834dea6.yaml releasenotes/notes/hyperv-neutron-agent-decomposition-ae6a052aeb48c6ac.yaml releasenotes/notes/keepalived-state-change-server-threads-9ed775e7533dd1a0.yaml releasenotes/notes/l3ha-agent-server-dependency-1fcb775328ac4502.yaml releasenotes/notes/linuxbridge-agent-extensions-66bdf9feee25ef99.yaml releasenotes/notes/linuxbridge_vxlan_arp_responder-e9ea91552e1b62a7.yaml releasenotes/notes/macvtap-l2-agent-2b551d8ec341196d.yaml releasenotes/notes/macvtap_assigned_vf_check-f4d07660ffd82a24.yaml releasenotes/notes/mtu-selection-and-advertisement-ab29f9ec43140224.yaml releasenotes/notes/network_ip_availability-d64bd7032b3c15ee.yaml releasenotes/notes/new-vif-type-for-pf-passthrough-33ec560b9b5d246f.yaml releasenotes/notes/oslo-messaging-notifier-queue-d94677076a1db261.yaml releasenotes/notes/oslo-reports-166a169037bf64f2.yaml releasenotes/notes/ovs-ct-firewall-driver-52a70a6a16d06f59.yaml releasenotes/notes/ovs-ipv6-tunnel-endpoints-f41b4954a04c43f6.yaml releasenotes/notes/rm-notify-entry-points-aa442134a780469a.yaml releasenotes/notes/segment_mtu_to_global_physnet_mtu-9cee5ff09557edeb.yaml releasenotes/notes/sending-garp-for-l3-ha-c118871833ad8743.yaml releasenotes/notes/set-of-default-qos-burst-value-0790773703fa08fc.yaml releasenotes/notes/sriov-agent-num-vf-0-0c06424247e7efe0.yaml releasenotes/notes/sriov_allow_use_many_nics_for_one_physnet-3570aa67a60ce6c4.yaml releasenotes/notes/sriov_show_l2_agent_extensions-ca852e155a529e99.yaml releasenotes/notes/use-keystoneauth-24f309566001a16b.yaml releasenotes/source/README.rst releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder tools/abandon_old_reviews.sh tools/check_unit_test_structure.sh tools/coding-checks.sh tools/configure_for_func_testing.sh tools/deploy_rootwrap.sh tools/generate_config_file_samples.sh tools/install_venv.py tools/install_venv_common.py tools/milestone-review-dash.py tools/misc-sanity-checks.sh tools/ostestr_compat_shim.sh tools/pecan_server.sh tools/split.sh tools/tox_install.sh tools/with_venv.shneutron-8.4.0/neutron.egg-info/dependency_links.txt0000664000567000056710000000000113044373206023612 0ustar jenkinsjenkins00000000000000 neutron-8.4.0/neutron.egg-info/not-zip-safe0000664000567000056710000000000113044373170021772 0ustar jenkinsjenkins00000000000000 neutron-8.4.0/neutron.egg-info/entry_points.txt0000664000567000056710000001676213044373206023056 0ustar jenkinsjenkins00000000000000[console_scripts] neutron-bgp-dragent = neutron.cmd.eventlet.agents.bgp_dragent:main neutron-db-manage = neutron.db.migration.cli:main neutron-debug = neutron.debug.shell:main neutron-dhcp-agent = neutron.cmd.eventlet.agents.dhcp:main neutron-ipset-cleanup = neutron.cmd.ipset_cleanup:main neutron-keepalived-state-change = neutron.cmd.keepalived_state_change:main neutron-l3-agent = neutron.cmd.eventlet.agents.l3:main neutron-linuxbridge-agent = neutron.cmd.eventlet.plugins.linuxbridge_neutron_agent:main neutron-linuxbridge-cleanup = neutron.cmd.linuxbridge_cleanup:main neutron-macvtap-agent = neutron.cmd.eventlet.plugins.macvtap_neutron_agent:main neutron-metadata-agent = neutron.cmd.eventlet.agents.metadata:main neutron-metering-agent = neutron.cmd.eventlet.services.metering_agent:main neutron-netns-cleanup = neutron.cmd.netns_cleanup:main neutron-ns-metadata-proxy = neutron.cmd.eventlet.agents.metadata_proxy:main neutron-openvswitch-agent = neutron.cmd.eventlet.plugins.ovs_neutron_agent:main neutron-ovs-cleanup = neutron.cmd.ovs_cleanup:main neutron-pd-notify = neutron.cmd.pd_notify:main neutron-rootwrap = oslo_rootwrap.cmd:main neutron-rootwrap-daemon = oslo_rootwrap.cmd:daemon neutron-rpc-server = neutron.cmd.eventlet.server:main_rpc_eventlet neutron-sanity-check = neutron.cmd.sanity_check:main neutron-server = neutron.cmd.eventlet.server:main neutron-sriov-nic-agent = neutron.cmd.eventlet.plugins.sriov_nic_neutron_agent:main neutron-usage-audit = neutron.cmd.eventlet.usage_audit:main [neutron.agent.firewall_drivers] iptables = neutron.agent.linux.iptables_firewall:IptablesFirewallDriver iptables_hybrid = neutron.agent.linux.iptables_firewall:OVSHybridIptablesFirewallDriver noop = neutron.agent.firewall:NoopFirewallDriver openvswitch = neutron.agent.linux.openvswitch_firewall:OVSFirewallDriver [neutron.agent.l2.extensions] qos = neutron.agent.l2.extensions.qos:QosAgentExtension [neutron.agent.linux.pd_drivers] dibbler = neutron.agent.linux.dibbler:PDDibbler [neutron.core_plugins] ml2 = neutron.plugins.ml2.plugin:Ml2Plugin [neutron.db.alembic_migrations] neutron = neutron.db.migration:alembic_migrations [neutron.interface_drivers] ivs = neutron.agent.linux.interface:IVSInterfaceDriver linuxbridge = neutron.agent.linux.interface:BridgeInterfaceDriver null = neutron.agent.linux.interface:NullDriver openvswitch = neutron.agent.linux.interface:OVSInterfaceDriver [neutron.ipam_drivers] fake = neutron.tests.unit.ipam.fake_driver:FakeDriver internal = neutron.ipam.drivers.neutrondb_ipam.driver:NeutronDbPool [neutron.ml2.extension_drivers] dns = neutron.plugins.ml2.extensions.dns_integration:DNSExtensionDriverML2 port_security = neutron.plugins.ml2.extensions.port_security:PortSecurityExtensionDriver qos = neutron.plugins.ml2.extensions.qos:QosExtensionDriver test = neutron.tests.unit.plugins.ml2.drivers.ext_test:TestExtensionDriver testdb = neutron.tests.unit.plugins.ml2.drivers.ext_test:TestDBExtensionDriver [neutron.ml2.mechanism_drivers] fake_agent = neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent:FakeAgentMechanismDriver l2population = neutron.plugins.ml2.drivers.l2pop.mech_driver:L2populationMechanismDriver linuxbridge = neutron.plugins.ml2.drivers.linuxbridge.mech_driver.mech_linuxbridge:LinuxbridgeMechanismDriver logger = neutron.tests.unit.plugins.ml2.drivers.mechanism_logger:LoggerMechanismDriver macvtap = neutron.plugins.ml2.drivers.macvtap.mech_driver.mech_macvtap:MacvtapMechanismDriver openvswitch = neutron.plugins.ml2.drivers.openvswitch.mech_driver.mech_openvswitch:OpenvswitchMechanismDriver sriovnicswitch = neutron.plugins.ml2.drivers.mech_sriov.mech_driver.mech_driver:SriovNicSwitchMechanismDriver test = neutron.tests.unit.plugins.ml2.drivers.mechanism_test:TestMechanismDriver [neutron.ml2.type_drivers] flat = neutron.plugins.ml2.drivers.type_flat:FlatTypeDriver geneve = neutron.plugins.ml2.drivers.type_geneve:GeneveTypeDriver gre = neutron.plugins.ml2.drivers.type_gre:GreTypeDriver local = neutron.plugins.ml2.drivers.type_local:LocalTypeDriver vlan = neutron.plugins.ml2.drivers.type_vlan:VlanTypeDriver vxlan = neutron.plugins.ml2.drivers.type_vxlan:VxlanTypeDriver [neutron.openstack.common.cache.backends] memory = neutron.openstack.common.cache._backends.memory:MemoryBackend [neutron.qos.agent_drivers] linuxbridge = neutron.plugins.ml2.drivers.linuxbridge.agent.extension_drivers.qos_driver:QosLinuxbridgeAgentDriver ovs = neutron.plugins.ml2.drivers.openvswitch.agent.extension_drivers.qos_driver:QosOVSAgentDriver sriov = neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers.qos_driver:QosSRIOVAgentDriver [neutron.qos.notification_drivers] message_queue = neutron.services.qos.notification_drivers.message_queue:RpcQosServiceNotificationDriver [neutron.service_plugins] auto_allocate = neutron.services.auto_allocate.plugin:Plugin bgp = neutron.services.bgp.bgp_plugin:BgpPlugin dummy = neutron.tests.unit.dummy_plugin:DummyServicePlugin firewall = neutron_fwaas.services.firewall.fwaas_plugin:FirewallPlugin flavors = neutron.services.flavors.flavors_plugin:FlavorsPlugin lbaas = neutron_lbaas.services.loadbalancer.plugin:LoadBalancerPlugin metering = neutron.services.metering.metering_plugin:MeteringPlugin network_ip_availability = neutron.services.network_ip_availability.plugin:NetworkIPAvailabilityPlugin neutron.services.firewall.fwaas_plugin.FirewallPlugin = neutron_fwaas.services.firewall.fwaas_plugin:FirewallPlugin neutron.services.loadbalancer.plugin.LoadBalancerPlugin = neutron_lbaas.services.loadbalancer.plugin:LoadBalancerPlugin neutron.services.vpn.plugin.VPNDriverPlugin = neutron_vpnaas.services.vpn.plugin:VPNDriverPlugin qos = neutron.services.qos.qos_plugin:QoSPlugin router = neutron.services.l3_router.l3_router_plugin:L3RouterPlugin tag = neutron.services.tag.tag_plugin:TagPlugin timestamp_core = neutron.services.timestamp.timestamp_plugin:TimeStampPlugin vpnaas = neutron_vpnaas.services.vpn.plugin:VPNDriverPlugin [neutron.services.external_dns_drivers] designate = neutron.services.externaldns.drivers.designate.driver:Designate [oslo.config.opts] neutron = neutron.opts:list_opts neutron.agent = neutron.opts:list_agent_opts neutron.base.agent = neutron.opts:list_base_agent_opts neutron.bgp.agent = neutron.services.bgp.common.opts:list_bgp_agent_opts neutron.db = neutron.opts:list_db_opts neutron.dhcp.agent = neutron.opts:list_dhcp_agent_opts neutron.extensions = neutron.opts:list_extension_opts neutron.l3.agent = neutron.opts:list_l3_agent_opts neutron.metadata.agent = neutron.opts:list_metadata_agent_opts neutron.metering.agent = neutron.opts:list_metering_agent_opts neutron.ml2 = neutron.opts:list_ml2_conf_opts neutron.ml2.linuxbridge.agent = neutron.opts:list_linux_bridge_opts neutron.ml2.macvtap.agent = neutron.opts:list_macvtap_opts neutron.ml2.ovs.agent = neutron.opts:list_ovs_opts neutron.ml2.sriov = neutron.opts:list_ml2_conf_sriov_opts neutron.ml2.sriov.agent = neutron.opts:list_sriov_agent_opts neutron.qos = neutron.opts:list_qos_opts nova.auth = neutron.opts:list_auth_opts [oslo.config.opts.defaults] neutron = neutron.common.config:set_cors_middleware_defaults [oslo.messaging.notify.drivers] neutron.openstack.common.notifier.log_notifier = oslo_messaging.notify._impl_log:LogDriver neutron.openstack.common.notifier.no_op_notifier = oslo_messaging.notify._impl_noop:NoOpDriver neutron.openstack.common.notifier.rpc_notifier = oslo_messaging.notify.messaging:MessagingDriver neutron.openstack.common.notifier.rpc_notifier2 = oslo_messaging.notify.messaging:MessagingV2Driver neutron.openstack.common.notifier.test_notifier = oslo_messaging.notify._impl_test:TestDriver neutron-8.4.0/neutron.egg-info/PKG-INFO0000664000567000056710000000407013044373206020642 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: neutron Version: 8.4.0 Summary: OpenStack Networking Home-page: http://www.openstack.org/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: Welcome! ======== You have come across a cloud computing network fabric controller. It has identified itself as "Neutron." It aims to tame your (cloud) networking! External Resources: =================== The homepage for Neutron is: http://launchpad.net/neutron. Use this site for asking for help, and filing bugs. Code is available on git.openstack.org at . The latest and most in-depth documentation on how to use Neutron is available at: . This includes: Neutron Administrator Guide http://docs.openstack.org/admin-guide-cloud/networking.html Networking Guide http://docs.openstack.org/networking-guide/ Neutron API Reference: http://docs.openstack.org/api/openstack-network/2.0/content/ Current Neutron developer documentation is available at: http://wiki.openstack.org/NeutronDevelopment For help on usage and hacking of Neutron, please send mail to . For information on how to contribute to Neutron, please see the contents of the CONTRIBUTING.rst file. Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 neutron-8.4.0/neutron.egg-info/top_level.txt0000664000567000056710000000001013044373206022265 0ustar jenkinsjenkins00000000000000neutron neutron-8.4.0/neutron.egg-info/pbr.json0000664000567000056710000000005613044373206021223 0ustar jenkinsjenkins00000000000000{"is_release": true, "git_version": "6f3869d"}neutron-8.4.0/README.rst0000664000567000056710000000211513044372760016052 0ustar jenkinsjenkins00000000000000Welcome! ======== You have come across a cloud computing network fabric controller. It has identified itself as "Neutron." It aims to tame your (cloud) networking! External Resources: =================== The homepage for Neutron is: http://launchpad.net/neutron. Use this site for asking for help, and filing bugs. Code is available on git.openstack.org at . The latest and most in-depth documentation on how to use Neutron is available at: . This includes: Neutron Administrator Guide http://docs.openstack.org/admin-guide-cloud/networking.html Networking Guide http://docs.openstack.org/networking-guide/ Neutron API Reference: http://docs.openstack.org/api/openstack-network/2.0/content/ Current Neutron developer documentation is available at: http://wiki.openstack.org/NeutronDevelopment For help on usage and hacking of Neutron, please send mail to . For information on how to contribute to Neutron, please see the contents of the CONTRIBUTING.rst file. neutron-8.4.0/CONTRIBUTING.rst0000664000567000056710000000106013044372760017022 0ustar jenkinsjenkins00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/neutron neutron-8.4.0/babel.cfg0000664000567000056710000000002113044372736016106 0ustar jenkinsjenkins00000000000000[python: **.py] neutron-8.4.0/tools/0000775000567000056710000000000013044373210015513 5ustar jenkinsjenkins00000000000000neutron-8.4.0/tools/install_venv.py0000664000567000056710000000464713044372736020620 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2010 OpenStack Foundation. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Installation script for Neutron's development virtualenv """ from __future__ import print_function import os import sys import install_venv_common as install_venv def print_help(): help = """ Neutron development environment setup is complete. Neutron development uses virtualenv to track and manage Python dependencies while in development and testing. To activate the Neutron virtualenv for the extent of your current shell session you can run: $ source .venv/bin/activate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running: $ tools/with_venv.sh Also, make test will automatically use the virtualenv. """ print(help) def main(argv): if 'tools_path' in os.environ: root = os.environ['tools_path'] else: root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) if 'venv' in os.environ: venv = os.environ['venv'] else: venv = os.path.join(root, '.venv') pip_requires = os.path.join(root, 'requirements.txt') test_requires = os.path.join(root, 'test-requirements.txt') py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) project = 'Neutron' install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, py_version, project) options = install.parse_args(argv) install.check_python_version() install.check_dependencies() install.create_virtualenv(no_site_packages=options.no_site_packages) install.install_dependencies() print_help() if __name__ == '__main__': main(sys.argv) neutron-8.4.0/tools/with_venv.sh0000775000567000056710000000153513044372736020103 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. tools_path=${tools_path:-$(dirname $0)} venv_path=${venv_path:-${tools_path}} venv_dir=${venv_name:-/../.venv} TOOLS=${tools_path} VENV=${venv:-${venv_path}/${venv_dir}} source $VENV/bin/activate && "$@" neutron-8.4.0/tools/milestone-review-dash.py0000775000567000056710000001113113044372760022311 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import argparse import sys from launchpadlib.launchpad import Launchpad def is_milestone_valid(project, name): milestone_names = [] for s in project.active_milestones: milestone_names.append(s.name) if name == s.name: return True print("No active milestone found") print("List of active milestones %s" % milestone_names) return False def _search_task(project, **kwargs): bugs = project.searchTasks(**kwargs) if not bugs: return gerrit_query = "(" for b in bugs: gerrit_query += ("message:%d OR " % b.bug.id) gerrit_query = gerrit_query[:-4] gerrit_query += ")\n\n" return gerrit_query def get_approved_rfe_query(project): return _search_task(project, **{'tags': ['rfe-approved']}) def get_critical_bugs_query(project): return _search_task(project, **{'status': ["In Progress"], 'importance': ["Critical"]}) def get_high_bugs_query(project): return _search_task(project, **{'status': ["In Progress"], 'importance': ["High"]}) def get_specs_query(project, milestone): query = "(" for s in project.valid_specifications: if s.milestone is not None: if s.milestone.name == milestone: query += ("topic:bp/%s OR " % s.name) if query == "(": # no blueprint was found return query = query[:-4] query += ")\n" return query def write_section(f, section_name, query): print(section_name) if query: f.write("[section \"") f.write(section_name) f.write("\"]\n") f.write("query = ") f.write(query) print(query) else: print("No result found\n") def write_queries_for_project(f, project, milestone): query = get_approved_rfe_query(project) section_name = "Approved RFE %s" % project.name write_section(f, section_name, query) query = get_critical_bugs_query(project) section_name = "Critical Bugs %s" % project.name write_section(f, section_name, query) query = get_high_bugs_query(project) section_name = "High Bugs %s" % project.name write_section(f, section_name, query) query = get_specs_query(project, milestone) section_name = "Blueprints %s" % project.name write_section(f, section_name, query) parser = argparse.ArgumentParser( description='Create dashboard for critical/high bugs, approved rfe and' ' blueprints. A .dash file will be created in the current' ' folder that you can serve as input for gerrit-dash-creator.' ' The output of the script can be used to query Gerrit' ' directly.') parser.add_argument('milestone', type=str, help='The release milestone') parser.add_argument('-o', '--output', type=str, help='Output file') args = parser.parse_args() milestone = args.milestone if args.output: file_name = args.output else: file_name = milestone + '.dash' cachedir = "~/.launchpadlib/cache/" launchpad = Launchpad.login_anonymously('just testing', 'production', cachedir, version="devel") neutron = launchpad.projects['neutron'] neutron_client = launchpad.projects['python-neutronclient'] if not is_milestone_valid(neutron, milestone): sys.exit() with open(file_name, 'w') as f: title = "[dashboard]\ntitle = Neutron %s Review Inbox\n" % milestone f.write(title) f.write("description = Review Inbox\n") f.write("foreach = (project:openstack/neutron OR " "project:openstack/python-neutronclient OR " "project:openstack/neutron-specs OR " "project:openstack/neutron-fwaas OR " "project:openstack/neutron-lbaas OR " "project:openstack/neutron-vpnaas) status:open NOT owner:self " "NOT label:Workflow<=-1 " "NOT label:Code-Review>=-2,self branch:master\n") f.write("\n") print("Querying Launchpad, this might take a while...") write_queries_for_project(f, neutron, milestone) write_queries_for_project(f, neutron_client, milestone) neutron-8.4.0/tools/deploy_rootwrap.sh0000775000567000056710000000356013044372736021323 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. set -eu if [ "$#" -ne 3 ]; then >&2 echo "Usage: $0 /path/to/neutron /path/to/target/etc /path/to/target/bin Deploy Neutron's rootwrap configuration. Warning: Any existing rootwrap files at the specified etc path will be removed by this script. Optional: set OS_SUDO_TESTING=1 to deploy the filters required by Neutron's functional testing suite." exit 1 fi OS_SUDO_TESTING=${OS_SUDO_TESTING:-0} neutron_path=$1 target_etc_path=$2 target_bin_path=$3 src_conf_path=${neutron_path}/etc src_conf=${src_conf_path}/rootwrap.conf src_rootwrap_path=${src_conf_path}/neutron/rootwrap.d dst_conf_path=${target_etc_path}/neutron dst_conf=${dst_conf_path}/rootwrap.conf dst_rootwrap_path=${dst_conf_path}/rootwrap.d if [[ -d "$dst_rootwrap_path" ]]; then rm -rf ${dst_rootwrap_path} fi mkdir -p -m 755 ${dst_rootwrap_path} cp -p ${src_rootwrap_path}/* ${dst_rootwrap_path}/ cp -p ${src_conf} ${dst_conf} sed -i "s:^filters_path=.*$:filters_path=${dst_rootwrap_path}:" ${dst_conf} sed -i "s:^\(exec_dirs=.*\)$:\1,${target_bin_path}:" ${dst_conf} if [[ "$OS_SUDO_TESTING" = "1" ]]; then sed -i 's/use_syslog=False/use_syslog=True/g' ${dst_conf} sed -i 's/syslog_log_level=ERROR/syslog_log_level=DEBUG/g' ${dst_conf} cp -p ${neutron_path}/neutron/tests/contrib/functional-testing.filters \ ${dst_rootwrap_path}/ fi neutron-8.4.0/tools/coding-checks.sh0000775000567000056710000000242513044372736020572 0ustar jenkinsjenkins00000000000000#!/bin/sh set -eu usage () { echo "Usage: $0 [OPTION]..." echo "Run Neutron's coding check(s)" echo "" echo " -Y, --pylint [] Run pylint check on the entire neutron module or just files changed in basecommit (e.g. HEAD~1)" echo " -h, --help Print this usage message" echo exit 0 } process_options () { i=1 while [ $i -le $# ]; do eval opt=\$$i case $opt in -h|--help) usage;; -Y|--pylint) pylint=1;; *) scriptargs="$scriptargs $opt" esac i=$((i+1)) done } run_pylint () { local target="${scriptargs:-all}" if [ "$target" = "all" ]; then files="neutron" else case "$target" in *HEAD~[0-9]*) files=$(git diff --diff-filter=AM --name-only $target -- "*.py");; *) echo "$target is an unrecognized basecommit"; exit 1;; esac fi echo "Running pylint..." echo "You can speed this up by running it on 'HEAD~[0-9]' (e.g. HEAD~1, this change only)..." if [ -n "${files}" ]; then pylint --rcfile=.pylintrc --output-format=colorized ${files} else echo "No python changes in this commit, pylint check not required." exit 0 fi } scriptargs= pylint=1 process_options $@ if [ $pylint -eq 1 ]; then run_pylint exit 0 fi neutron-8.4.0/tools/configure_for_func_testing.sh0000775000567000056710000002141513044372760023465 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. set -e # Control variable used to determine whether to execute this script # directly or allow the gate_hook to import. IS_GATE=${IS_GATE:-False} USE_CONSTRAINT_ENV=${USE_CONSTRAINT_ENV:-True} if [[ "$IS_GATE" != "True" ]] && [[ "$#" -lt 1 ]]; then >&2 echo "Usage: $0 /path/to/devstack [-i] Configure a host to run Neutron's functional test suite. -i Install Neutron's package dependencies. By default, it is assumed that devstack has already been used to deploy neutron to the target host and that package dependencies need not be installed. Warning: This script relies on devstack to perform extensive modification to the underlying host. It is recommended that it be invoked only on a throw-away VM." exit 1 fi # Skip the first argument OPTIND=2 while getopts ":i" opt; do case $opt in i) INSTALL_BASE_DEPENDENCIES=True ;; esac done # Default to environment variables to permit the gate_hook to override # when sourcing. VENV=${VENV:-dsvm-functional} DEVSTACK_PATH=${DEVSTACK_PATH:-$1} PROJECT_NAME=${PROJECT_NAME:-neutron} REPO_BASE=${GATE_DEST:-$(cd $(dirname "$0")/../.. && pwd)} INSTALL_MYSQL_ONLY=${INSTALL_MYSQL_ONLY:-False} # The gate should automatically install dependencies. INSTALL_BASE_DEPENDENCIES=${INSTALL_BASE_DEPENDENCIES:-$IS_GATE} if [ ! -f "$DEVSTACK_PATH/stack.sh" ]; then >&2 echo "Unable to find devstack at '$DEVSTACK_PATH'. Please verify that the specified path points to a valid devstack repo." exit 1 fi set -x function _init { # Subsequently-called devstack functions depend on the following variables. HOST_IP=127.0.0.1 FILES=$DEVSTACK_PATH/files TOP_DIR=$DEVSTACK_PATH source $DEVSTACK_PATH/stackrc # Allow the gate to override values set by stackrc. DEST=${GATE_DEST:-$DEST} STACK_USER=${GATE_STACK_USER:-$STACK_USER} } function _install_base_deps { echo_summary "Installing base dependencies" INSTALL_TESTONLY_PACKAGES=True PACKAGES=$(get_packages general,neutron,q-agt,q-l3) # Do not install 'python-' prefixed packages other than # python-dev*. Neutron's functional testing relies on deployment # to a tox env so there is no point in installing python # dependencies system-wide. PACKAGES=$(echo $PACKAGES | perl -pe 's|python-(?!dev)[^ ]*||g') install_package $PACKAGES } function _install_rpc_backend { echo_summary "Installing rabbitmq" RABBIT_USERID=${RABBIT_USERID:-stackrabbit} RABBIT_HOST=${RABBIT_HOST:-$SERVICE_HOST} RABBIT_PASSWORD=${RABBIT_HOST:-secretrabbit} source $DEVSTACK_PATH/lib/rpc_backend enable_service rabbit install_rpc_backend restart_rpc_backend } # _install_databases [install_pg] function _install_databases { local install_pg=${1:-True} echo_summary "Installing databases" # Avoid attempting to configure the db if it appears to already # have run. The setup as currently defined is not idempotent. if mysql openstack_citest > /dev/null 2>&1 < /dev/null; then echo_summary "DB config appears to be complete, skipping." return 0 fi MYSQL_PASSWORD=${MYSQL_PASSWORD:-secretmysql} DATABASE_PASSWORD=${DATABASE_PASSWORD:-secretdatabase} source $DEVSTACK_PATH/lib/database enable_service mysql initialize_database_backends install_database configure_database_mysql if [[ "$install_pg" == "True" ]]; then enable_service postgresql initialize_database_backends install_database configure_database_postgresql fi # Set up the 'openstack_citest' user and database in each backend tmp_dir=$(mktemp -d) trap "rm -rf $tmp_dir" EXIT cat << EOF > $tmp_dir/mysql.sql CREATE DATABASE openstack_citest; CREATE USER 'openstack_citest'@'localhost' IDENTIFIED BY 'openstack_citest'; CREATE USER 'openstack_citest' IDENTIFIED BY 'openstack_citest'; GRANT ALL PRIVILEGES ON *.* TO 'openstack_citest'@'localhost'; GRANT ALL PRIVILEGES ON *.* TO 'openstack_citest'; FLUSH PRIVILEGES; EOF /usr/bin/mysql -u root < $tmp_dir/mysql.sql if [[ "$install_pg" == "True" ]]; then cat << EOF > $tmp_dir/postgresql.sql CREATE USER openstack_citest WITH CREATEDB LOGIN PASSWORD 'openstack_citest'; CREATE DATABASE openstack_citest WITH OWNER openstack_citest; EOF # User/group postgres needs to be given access to tmp_dir setfacl -m g:postgres:rwx $tmp_dir sudo -u postgres /usr/bin/psql --file=$tmp_dir/postgresql.sql fi } function _install_agent_deps { echo_summary "Installing agent dependencies" source $DEVSTACK_PATH/lib/neutron-legacy ENABLED_SERVICES=q-agt,q-dhcp,q-l3 install_neutron_agent_packages } # Set up the rootwrap sudoers for neutron to target the rootwrap # configuration deployed in the venv. function _install_rootwrap_sudoers { echo_summary "Installing rootwrap sudoers file" PROJECT_VENV=$REPO_BASE/$PROJECT_NAME/.tox/$VENV ROOTWRAP_SUDOER_CMD="$PROJECT_VENV/bin/neutron-rootwrap $PROJECT_VENV/etc/neutron/rootwrap.conf *" ROOTWRAP_DAEMON_SUDOER_CMD="$PROJECT_VENV/bin/neutron-rootwrap-daemon $PROJECT_VENV/etc/neutron/rootwrap.conf" TEMPFILE=$(mktemp) cat << EOF > $TEMPFILE # A bug in oslo.rootwrap [1] prevents commands executed with 'ip netns # exec' from being automatically qualified with a prefix from # rootwrap's configured exec_dirs. To work around this problem, add # the venv bin path to a user-specific secure_path. # # While it might seem preferable to set a command-specific # secure_path, this would only ensure the correct path for 'ip netns # exec' and the command targeted for execution in the namespace would # not inherit the path. # # 1: https://bugs.launchpad.net/oslo.rootwrap/+bug/1417331 # Defaults:$STACK_USER secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$PROJECT_VENV/bin" $STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD $STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD EOF chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE # Name the functional testing rootwrap to ensure that it will be # loaded after the devstack rootwrap (50_stack_sh if present) so # that the functional testing secure_path (a superset of what # devstack expects) will not be overwritten. sudo mv $TEMPFILE /etc/sudoers.d/60-neutron-func-test-rootwrap } function _install_post_devstack { echo_summary "Performing post-devstack installation" _install_databases _install_rootwrap_sudoers if is_ubuntu; then install_package isc-dhcp-client install_package netcat-openbsd elif is_fedora; then install_package dhclient else exit_distro_not_supported "installing dhclient package" fi # Installing python-openvswitch from packages is a stop-gap while # python-openvswitch remains unavailable from pypi. This also # requires that sitepackages=True be set in tox.ini to allow the # venv to use the installed package. Once python-openvswitch # becomes available on pypi, this will no longer be required. # # NOTE: the package name 'python-openvswitch' is common across # supported distros. install_package python-openvswitch } function _configure_iptables_rules { # For linuxbridge agent fullstack tests we need to add special rules to # iptables for connection of agents to rabbitmq: CHAIN_NAME="openstack-INPUT" sudo iptables -n --list $CHAIN_NAME 1> /dev/null 2>&1 || CHAIN_NAME="INPUT" sudo iptables -I $CHAIN_NAME -s 240.0.0.0/8 -p tcp -m tcp -d 240.0.0.0/8 --dport 5672 -j ACCEPT } function configure_host_for_func_testing { echo_summary "Configuring host for functional testing" if [[ "$INSTALL_BASE_DEPENDENCIES" == "True" ]]; then # Installing of the following can be achieved via devstack by # installing neutron, so their installation is conditional to # minimize the work to do on a devstack-configured host. _install_base_deps _install_agent_deps _install_rpc_backend fi _install_post_devstack } _init if [[ "$IS_GATE" != "True" ]]; then if [[ "$INSTALL_MYSQL_ONLY" == "True" ]]; then _install_databases nopg else configure_host_for_func_testing fi fi if [[ "$VENV" =~ "dsvm-fullstack" ]]; then _configure_iptables_rules fi neutron-8.4.0/tools/check_unit_test_structure.sh0000775000567000056710000000437413044372736023371 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash # This script identifies the unit test modules that do not correspond # directly with a module in the code tree. See TESTING.rst for the # intended structure. neutron_path=$(cd "$(dirname "$0")/.." && pwd) base_test_path=neutron/tests/unit test_path=$neutron_path/$base_test_path test_files=$(find ${test_path} -iname 'test_*.py') ignore_regexes=( # The following test is required for oslo.versionedobjects "^objects/test_objects.py$" # The following open source plugin tests are not actually unit # tests and are ignored pending their relocation to the functional # test tree. "^plugins/ml2/drivers/mech_sriov/mech_driver/test_mech_sriov_nic_switch.py$" "^plugins/ml2/test_security_group.py$" "^plugins/ml2/test_port_binding.py$" "^plugins/ml2/test_extension_driver_api.py$" "^plugins/ml2/test_ext_portsecurity.py$" "^plugins/ml2/test_agent_scheduler.py$" "^plugins/ml2/test_tracked_resources.py$" "^plugins/ml2/drivers/openvswitch/agent/test_agent_scheduler.py$" "^plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py$" ) error_count=0 ignore_count=0 total_count=0 for test_file in ${test_files[@]}; do relative_path=${test_file#$test_path/} expected_path=$(dirname $neutron_path/neutron/$relative_path) test_filename=$(basename "$test_file") expected_filename=${test_filename#test_} # Module filename (e.g. foo/bar.py -> foo/test_bar.py) filename=$expected_path/$expected_filename # Package dir (e.g. foo/ -> test_foo.py) package_dir=${filename%.py} if [ ! -f "$filename" ] && [ ! -d "$package_dir" ]; then for ignore_regex in ${ignore_regexes[@]}; do if [[ "$relative_path" =~ $ignore_regex ]]; then ((ignore_count++)) continue 2 fi done echo "Unexpected test file: $base_test_path/$relative_path" ((error_count++)) fi ((total_count++)) done if [ "$ignore_count" -ne 0 ]; then echo "$ignore_count unmatched test modules were ignored" fi if [ "$error_count" -eq 0 ]; then echo 'Success! All test modules match targets in the code tree.' exit 0 else echo "Failure! $error_count of $total_count test modules do not match targets in the code tree." exit 1 fi neutron-8.4.0/tools/pecan_server.sh0000775000567000056710000000246413044372760020545 0ustar jenkinsjenkins00000000000000#!/bin/bash # Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # A script useful to develop changes to the codebase. It launches the pecan # API server and will reload it whenever the code changes if inotifywait is # installed. inotifywait --help >/dev/null 2>&1 if [[ $? -ne 1 ]]; then USE_INOTIFY=0 else USE_INOTIFY=1 fi DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/../ source "$DIR/.tox/py27/bin/activate" COMMAND="python -c 'from neutron.cmd.eventlet import server; server.main_wsgi_pecan()'" function cleanup() { kill $PID exit 0 } if [[ $USE_INOTIFY -eq 1 ]]; then trap cleanup INT while true; do eval "$COMMAND &" PID=$! inotifywait -e modify -r $DIR/neutron/ kill $PID done else eval $COMMAND fi neutron-8.4.0/tools/split.sh0000775000567000056710000000566413044372736017234 0ustar jenkinsjenkins00000000000000#!/bin/sh # # This script has been shamelessly copied and tweaked from original copy: # # https://github.com/openstack/oslo-incubator/blob/master/tools/graduate.sh # # Use this script to export a Neutron module to a separate git repo. # # You can call this script Call script like so: # # ./split.sh # # The file should be a text file like the one below: # # /path/to/file/file1 # /path/to/file/file2 # ... # /path/to/file/fileN # # Such a list can be generated with a command like this: # # find $path -type f # path is the base dir you want to list files for set -e if [ $# -lt 2 ]; then echo "Usage $0 " exit 1 fi set -x file_list_path="$1" project_name="$2" files_to_keep=$(cat $file_list_path) # Build the grep pattern for ignoring files that we want to keep keep_pattern="\($(echo $files_to_keep | sed -e 's/^/\^/' -e 's/ /\\|\^/g')\)" # Prune all other files in every commit pruner="git ls-files | grep -v \"$keep_pattern\" | git update-index --force-remove --stdin; git ls-files > /dev/stderr" # Find all first commits with listed files and find a subset of them that # predates all others roots="" for file in $files_to_keep; do file_root=$(git rev-list --reverse HEAD -- $file | head -n1) fail=0 for root in $roots; do if git merge-base --is-ancestor $root $file_root; then fail=1 break elif !git merge-base --is-ancestor $file_root $root; then new_roots="$new_roots $root" fi done if [ $fail -ne 1 ]; then roots="$new_roots $file_root" fi done # Purge all parents for those commits set_roots=" if [ 1 -eq 0 $(for root in $roots; do echo " -o \"\$GIT_COMMIT\" = '$root' "; done) ]; then echo ''; else cat; fi" # Enhance git_commit_non_empty_tree to skip merges with: # a) either two equal parents (commit that was about to land got purged as well # as all commits on mainline); # b) or with second parent being an ancestor to the first one (just as with a) # but when there are some commits on mainline). # In both cases drop second parent and let git_commit_non_empty_tree to decide # if commit worth doing (most likely not). skip_empty=$(cat << \EOF if [ $# = 5 ] && git merge-base --is-ancestor $5 $3; then git_commit_non_empty_tree $1 -p $3 else git_commit_non_empty_tree "$@" fi EOF ) # Filter out commits for unrelated files echo "Pruning commits for unrelated files..." git filter-branch \ --index-filter "$pruner" \ --parent-filter "$set_roots" \ --commit-filter "$skip_empty" \ --tag-name-filter cat \ -- --all # Generate the new .gitreview file echo "Generating new .gitreview file..." cat > .gitreview < # Port 29418 # # Note: due to gerrit bug somewhere, this double posts messages. :( # first purge the all reviews that are more than 4w old and blocked by a core -2 if [ "$1" = "--dry-run" ]; then echo "Enabling dry run mode" DRY_RUN=1 else DRY_RUN=0 fi set -o errexit function abandon_review { local gitid=$1 shift local msg=$@ # echo ssh review.openstack.org gerrit review $gitid --abandon --message \"$msg\" if [ $DRY_RUN -eq 1 ]; then echo "Would abandon $gitid" else echo "Abandoning $gitid" ssh review.openstack.org gerrit review $gitid --abandon --message \"$msg\" fi } PROJECTS="(project:openstack/neutron OR project:openstack/neutron-fwaas OR \ project:openstack/neutron-lbaas OR project:openstack/neutron-vpnaas OR \ project:openstack/python-neutronclient OR project:openstack/neutron-specs)" blocked_reviews=$(ssh review.openstack.org "gerrit query --current-patch-set --format json $PROJECTS status:open age:4w label:Code-Review<=-2" | jq .currentPatchSet.revision | grep -v null | sed 's/"//g') blocked_msg=$(cat < 4 weeks without comment and currently blocked by a core reviewer with a -2. We are abandoning this for now. Feel free to reactivate the review by pressing the restore button and contacting the reviewer with the -2 on this review to ensure you address their concerns. EOF ) # For testing, put in a git rev of something you own and uncomment # blocked_reviews="b6c4218ae4d75b86c33fa3d37c27bc23b46b6f0f" for review in $blocked_reviews; do # echo ssh review.openstack.org gerrit review $review --abandon --message \"$msg\" echo "Blocked review $review" abandon_review $review $blocked_msg done # then purge all the reviews that are > 4w with no changes and Jenkins has -1ed failing_reviews=$(ssh review.openstack.org "gerrit query --current-patch-set --format json $PROJECTS status:open age:4w NOT label:Verified>=1,jenkins" | jq .currentPatchSet.revision | grep -v null | sed 's/"//g') failing_msg=$(cat < 4 weeks without comment, and failed Jenkins the last time it was checked. We are abandoning this for now. Feel free to reactivate the review by pressing the restore button and leaving a 'recheck' comment to get fresh test results. EOF ) for review in $failing_reviews; do echo "Failing review $review" abandon_review $review $failing_msg done neutron-8.4.0/tools/install_venv_common.py0000664000567000056710000001350713044372736022163 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Provides methods needed by installation script for OpenStack development virtual environments. Since this script is used to bootstrap a virtualenv from the system's Python environment, it should be kept strictly compatible with Python 2.6. Synced in from openstack-common """ from __future__ import print_function import optparse import os import subprocess import sys class InstallVenv(object): def __init__(self, root, venv, requirements, test_requirements, py_version, project): self.root = root self.venv = venv self.requirements = requirements self.test_requirements = test_requirements self.py_version = py_version self.project = project def die(self, message, *args): print(message % args, file=sys.stderr) sys.exit(1) def check_python_version(self): if sys.version_info < (2, 6): self.die("Need Python Version >= 2.6") def run_command_with_code(self, cmd, redirect_output=True, check_exit_code=True): """Runs a command in an out-of-process shell. Returns the output of that command. Working directory is self.root. """ if redirect_output: stdout = subprocess.PIPE else: stdout = None proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) output = proc.communicate()[0] if check_exit_code and proc.returncode != 0: self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) return (output, proc.returncode) def run_command(self, cmd, redirect_output=True, check_exit_code=True): return self.run_command_with_code(cmd, redirect_output, check_exit_code)[0] def get_distro(self): if (os.path.exists('/etc/fedora-release') or os.path.exists('/etc/redhat-release')): return Fedora( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) else: return Distro( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) def check_dependencies(self): self.get_distro().install_virtualenv() def create_virtualenv(self, no_site_packages=True): """Creates the virtual environment and installs PIP. Creates the virtual environment and installs PIP only into the virtual environment. """ if not os.path.isdir(self.venv): print('Creating venv...', end=' ') if no_site_packages: self.run_command(['virtualenv', '-q', '--no-site-packages', self.venv]) else: self.run_command(['virtualenv', '-q', self.venv]) print('done.') else: print("venv already exists...") pass def pip_install(self, *args): self.run_command(['tools/with_venv.sh', 'pip', 'install', '--upgrade'] + list(args), redirect_output=False) def install_dependencies(self): print('Installing dependencies with pip (this can take a while)...') # First things first, make sure our venv has the latest pip and # setuptools and pbr self.pip_install('pip>=1.4') self.pip_install('setuptools') self.pip_install('pbr') self.pip_install('-r', self.requirements, '-r', self.test_requirements) def parse_args(self, argv): """Parses command-line arguments.""" parser = optparse.OptionParser() parser.add_option('-n', '--no-site-packages', action='store_true', help="Do not inherit packages from global Python " "install.") return parser.parse_args(argv[1:])[0] class Distro(InstallVenv): def check_cmd(self, cmd): return bool(self.run_command(['which', cmd], check_exit_code=False).strip()) def install_virtualenv(self): if self.check_cmd('virtualenv'): return if self.check_cmd('easy_install'): print('Installing virtualenv via easy_install...', end=' ') if self.run_command(['easy_install', 'virtualenv']): print('Succeeded') return else: print('Failed') self.die('ERROR: virtualenv not found.\n\n%s development' ' requires virtualenv, please install it using your' ' favorite package management tool' % self.project) class Fedora(Distro): """This covers all Fedora-based distributions. Includes: Fedora, RHEL, CentOS, Scientific Linux """ def check_pkg(self, pkg): return self.run_command_with_code(['rpm', '-q', pkg], check_exit_code=False)[1] == 0 def install_virtualenv(self): if self.check_cmd('virtualenv'): return if not self.check_pkg('python-virtualenv'): self.die("Please install 'python-virtualenv'.") super(Fedora, self).install_virtualenv() neutron-8.4.0/tools/misc-sanity-checks.sh0000775000567000056710000000441213044372760021562 0ustar jenkinsjenkins00000000000000#! /bin/sh # Copyright (C) 2014 VA Linux Systems Japan K.K. # Copyright (C) 2014 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. TMPDIR=`mktemp -d /tmp/${0##*/}.XXXXXX` || exit 1 export TMPDIR trap "rm -rf $TMPDIR" EXIT FAILURES=$TMPDIR/failures check_no_symlinks_allowed () { # Symlinks break the package build process, so ensure that they # do not slip in, except hidden symlinks. if [ $(find . -type l ! -path '*/.*' | wc -l) -ge 1 ]; then echo "Symlinks are not allowed!" >>$FAILURES fi } check_pot_files_errors () { # The job neutron-propose-translation-update does not update from # transifex since our po files contain duplicate entries where # obsolete entries duplicate normal entries. Prevent obsolete # entries to slip in find neutron -type f -regex '.*\.pot?' \ -print0|xargs -0 -n 1 msgfmt --check-format \ -o /dev/null if [ "$?" -ne 0 ]; then echo "PO files syntax is not correct!" >>$FAILURES fi } check_identical_policy_files () { # For unit tests, we maintain their own policy.json file to make test suite # independent of whether it's executed from the neutron source tree or from # site-packages installation path. We don't want two copies of the same # file to diverge, so checking that they are identical diff etc/policy.json neutron/tests/etc/policy.json 2>&1 > /dev/null if [ "$?" -ne 0 ]; then echo "policy.json files must be identical!" >>$FAILURES fi } # Add your checks here... check_no_symlinks_allowed check_pot_files_errors check_identical_policy_files # Fail, if there are emitted failures if [ -f $FAILURES ]; then cat $FAILURES exit 1 fi neutron-8.4.0/tools/generate_config_file_samples.sh0000775000567000056710000000144013044372736023727 0ustar jenkinsjenkins00000000000000#!/bin/sh # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. set -e GEN_CMD=oslo-config-generator if ! type "$GEN_CMD" > /dev/null; then echo "ERROR: $GEN_CMD not installed on the system." exit 1 fi for file in `ls etc/oslo-config-generator/*`; do $GEN_CMD --config-file=$file done set -x neutron-8.4.0/test-requirements.txt0000664000567000056710000000155213044372760020630 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking<0.11,>=0.10.0 cliff!=1.16.0,!=1.17.0,>=1.15.0 # Apache-2.0 coverage>=3.6 # Apache-2.0 fixtures<2.0,>=1.3.1 # Apache-2.0/BSD mock>=1.2 # BSD python-subunit>=0.0.18 # Apache-2.0/BSD requests-mock>=0.7.0 # Apache-2.0 sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 testrepository>=0.0.18 # Apache-2.0/BSD testtools>=1.4.0 # MIT testresources>=0.2.4 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD WebTest>=2.0 # MIT oslotest>=1.10.0 # Apache-2.0 os-testr>=0.4.1 # Apache-2.0 ddt>=1.0.1 # MIT pylint==1.4.5 # GNU GPL v2 reno>=0.1.1 # Apache2 # Needed to run DB commands in virtualenvs PyMySQL!=0.7.7,>=0.6.2 # MIT License neutron-8.4.0/setup.py0000664000567000056710000000200413044372736016075 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=1.8'], pbr=True) neutron-8.4.0/releasenotes/0000775000567000056710000000000013044373210017044 5ustar jenkinsjenkins00000000000000neutron-8.4.0/releasenotes/source/0000775000567000056710000000000013044373210020344 5ustar jenkinsjenkins00000000000000neutron-8.4.0/releasenotes/source/liberty.rst0000664000567000056710000000022213044372736022560 0ustar jenkinsjenkins00000000000000============================== Liberty Series Release Notes ============================== .. release-notes:: :branch: origin/stable/liberty neutron-8.4.0/releasenotes/source/_templates/0000775000567000056710000000000013044373210022501 5ustar jenkinsjenkins00000000000000neutron-8.4.0/releasenotes/source/_templates/.placeholder0000664000567000056710000000000013044372736024766 0ustar jenkinsjenkins00000000000000neutron-8.4.0/releasenotes/source/README.rst0000664000567000056710000000076513044372760022054 0ustar jenkinsjenkins00000000000000=========================== Neutron Release Notes Howto =========================== Release notes are a new feature for documenting new features in OpenStack projects. Background on the process, tooling, and methodology is documented in a `mailing list post by Doug Hellman `_. For information on how to create release notes, please consult the `Release Notes documentation `_. neutron-8.4.0/releasenotes/source/index.rst0000664000567000056710000000021513044372760022214 0ustar jenkinsjenkins00000000000000======================= Neutron Release Notes ======================= .. toctree:: :maxdepth: 1 README.rst liberty unreleased neutron-8.4.0/releasenotes/source/conf.py0000664000567000056710000002160713044372760021662 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Neutron Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'oslosphinx', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Neutron Release Notes' copyright = u'2015, Neutron Developers' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. from neutron.version import version_info as neutron_version # The full version, including alpha/beta/rc tags. release = neutron_version.version_string_with_vcs() # The short X.Y version. version = neutron_version.canonical_version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'NeutronReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'NeutronReleaseNotes.tex', u'Neutron Release Notes Documentation', u'Neutron Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'neutronreleasenotes', u'Neutron Release Notes Documentation', [u'Neutron Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'NeutronReleaseNotes', u'Neutron Release Notes Documentation', u'Neutron Developers', 'NeutronReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False neutron-8.4.0/releasenotes/source/_static/0000775000567000056710000000000013044373210021772 5ustar jenkinsjenkins00000000000000neutron-8.4.0/releasenotes/source/_static/.placeholder0000664000567000056710000000000013044372736024257 0ustar jenkinsjenkins00000000000000neutron-8.4.0/releasenotes/source/unreleased.rst0000664000567000056710000000015613044372736023243 0ustar jenkinsjenkins00000000000000============================= Current Series Release Notes ============================= .. release-notes:: neutron-8.4.0/releasenotes/notes/0000775000567000056710000000000013044373210020174 5ustar jenkinsjenkins00000000000000neutron-8.4.0/releasenotes/notes/correlate-address-scope-with-network-ea16e16b0154ac21.yaml0000664000567000056710000000016213044372736032436 0ustar jenkinsjenkins00000000000000--- features: - Add derived attributes to the network to tell users which address scopes the network is in. neutron-8.4.0/releasenotes/notes/deprecated-driver-e368e0befc9bee4c.yaml0000664000567000056710000000057313044372736027216 0ustar jenkinsjenkins00000000000000--- prelude: > OFAgent is decomposed and deprecated in the Mitaka cycle. other: - The Openflow Agent(OFAgent) mechanism driver is decomposed completely from neutron tree in the Mitaka. The OFAgent driver and its agent also are deprecated in favor of OpenvSwitch mechanism driver with "native" of_interface in the Mitaka and will be removed in the next release. neutron-8.4.0/releasenotes/notes/deprecate-force_gateway_on_subnet-376855c4e66f4e11.yaml0000664000567000056710000000017013044372736032007 0ustar jenkinsjenkins00000000000000--- deprecations: - The 'force_gateway_on_subnet' option is deprecated and will be removed in the 'Newton' cycle. neutron-8.4.0/releasenotes/notes/direct-physical-vnic-878d15bdb758b70e.yaml0000664000567000056710000000027313044372736027347 0ustar jenkinsjenkins00000000000000--- prelude: > Add new VNIC type for SR-IOV physical functions. features: - Neutron now supports creation of ports for exposing physical functions as network devices to guests. neutron-8.4.0/releasenotes/notes/add-timestamp-fields-f9ab949fc88f05f6.yaml0000664000567000056710000000057113044372760027416 0ustar jenkinsjenkins00000000000000--- prelude: > Timestamp fields are now added to neutron core resources. features: - Add timestamp fields 'created_at', 'updated_at' into neutron core resources like network, subnet, port and subnetpool. - And support for querying these resources by changed-since, it will return the resources changed after the specfic time string like YYYY-MM-DDTHH:MM:SS neutron-8.4.0/releasenotes/notes/add-get-me-a-network-56321aeef5389001.yaml0000664000567000056710000000052313044372760026753 0ustar jenkinsjenkins00000000000000--- prelude: > The "get-me-a-network" feature simplifies the process for launching an instance with basic network connectivity (via an externally connected private tenant network). features: - Once Nova takes advantage of this feature, a user can launch an instance without explicitly provisioning network resources. neutron-8.4.0/releasenotes/notes/firewall_driver_not_needed_on_server-4159669ad834dea6.yaml0000664000567000056710000000072113044372736032673 0ustar jenkinsjenkins00000000000000--- prelude: > The Neutron server no longer needs to be configured with a firewall driver and it can support mixed environments of hybrid iptables firewalls and the pure OVS firewall. features: - The Neutron server now learns the appropriate firewall wiring behavior from each OVS agent so it no longer needs to be configured with the firewall_driver. This means it also supports multiple agents with different types of firewalls. neutron-8.4.0/releasenotes/notes/1500-default-segment-mtu-54e2cf6aea9602d5.yaml0000664000567000056710000000174113044372736027646 0ustar jenkinsjenkins00000000000000--- prelude: > The ML2 plug-in supports calculating the MTU for networks that are realized as flat or VLAN networks, by consulting the 'segment_mtu' option. Prior to Mitaka, 'segment_mtu' defaults to 0 which disables this feature. This creates slightly confusing API results when querying Neutron networks, since the plugins that support the MTU API extension would return networks with the MTU equal to zero. Networks with an MTU of zero make little sense, since nothing could ever be transmitted. In Mitaka, 'segment_mtu' now defaults to 1500 which is the standard MTU for Ethernet networks in order to improve the "out of box" experience for typical deployments. features: - In Mitaka, queries to the Networking API for network objects will now return network objects that contain a sane MTU value. upgrade: - Operators using the ML2 plug-in with existing data may need to perform a database migration to update the MTU for existing networks neutron-8.4.0/releasenotes/notes/dvr-ovs-agent-6052a8d60fddde22.yaml0000664000567000056710000000055713044372736026067 0ustar jenkinsjenkins00000000000000--- prelude: > An OVS agent configured to run in DVR mode will fail to start if it cannot get proper DVR configuration values from the server on start-up. The agent will no longer fallback to non-DVR mode, since it may lead to inconsistency in the DVR-enabled cluster as the Neutron server does not distinguish between DVR and non-DVR OVS agents. neutron-8.4.0/releasenotes/notes/set-of-default-qos-burst-value-0790773703fa08fc.yaml0000664000567000056710000000027413044372736031045 0ustar jenkinsjenkins00000000000000--- prelude: > By default, the QoS driver for the Open vSwitch and Linuxbridge agents calculates the burst value as 80% of the available bandwidth. fixes: - Fixes bug 1572670neutron-8.4.0/releasenotes/notes/add-rbac-qos-8b1154ee756c66df.yaml0000664000567000056710000000021013044372736025551 0ustar jenkinsjenkins00000000000000--- prelude: > RBAC support for QoS policies features: - Neutron now supports sharing of QoS policies between a subset of tenants. neutron-8.4.0/releasenotes/notes/fail-on-missing-extensions-bc332124b780875b.yaml0000664000567000056710000000024513044372736030336 0ustar jenkinsjenkins00000000000000--- fixes: - The server will fail to start if any of the declared required extensions, as needed by core and service plugins, are not properly configured. neutron-8.4.0/releasenotes/notes/new-vif-type-for-pf-passthrough-33ec560b9b5d246f.yaml0000664000567000056710000000036713044372736031406 0ustar jenkinsjenkins00000000000000--- features: - SriovNicSwitchMechanismDriver driver now exposes a new VIF type 'hostdev_physical' for ports with vnic type 'direct-physical' (used for SR-IOV PF passthrough). This will enable Nova to provision PFs as Neutron ports. neutron-8.4.0/releasenotes/notes/macvtap-l2-agent-2b551d8ec341196d.yaml0000664000567000056710000000177513044372736026304 0ustar jenkinsjenkins00000000000000--- prelude: > Adding MacVtap ML2 driver and L2 Agent as new vswitch choice features: - Libvirt qemu/kvm instances can now be attached via MacVtap in bridge mode to a network. VLAN and FLAT attachments are supported. Other attachmentes than compute are not supported. issues: - To ensure any kind of migration works between all compute nodes, make sure that the same physical_interface_mappings is configured on each MacVtap compute node. Having different mappings could cause live migration to fail (if the configured physical network interface does not exist on the target host), or even worse, result in an instance placed on the wrong physical network (if the physical network interface exists on the target host, but is used by another physical network or not used at all by OpenStack). Such an instance does not have access to its configured networks anymore. It then has layer 2 connectivity to either another OpenStack network, or one of the hosts networks.neutron-8.4.0/releasenotes/notes/oslo-reports-166a169037bf64f2.yaml0000664000567000056710000000032313044372736025621 0ustar jenkinsjenkins00000000000000--- prelude: > Neutron is integrated with Guru Meditation Reports library. features: - Neutron services should respond to SIGUSR2 signal by dumping valuable debug information to standard error output. neutron-8.4.0/releasenotes/notes/network_ip_availability-d64bd7032b3c15ee.yaml0000664000567000056710000000064613044372736030302 0ustar jenkinsjenkins00000000000000--- prelude: > Neutron now provides network IP availability information. features: - A new API endpoint /v2.0/network-ip-availabilities that allows an admin to quickly get counts of used_ips and total_ips for network(s) is available. New endpoint allows filtering by network_id, network_name, tenant_id, and ip_version. Response returns network and nested subnet data that includes used and total IPs. neutron-8.4.0/releasenotes/notes/linuxbridge-agent-extensions-66bdf9feee25ef99.yaml0000664000567000056710000000033313044372736031367 0ustar jenkinsjenkins00000000000000--- prelude: > The Linuxbridge agent now supports l2 agent extensions. features: - The Linuxbridge agent can now be extended by 3rd parties using a pluggable mechanism. fixes: - partially closes bug 1468803 neutron-8.4.0/releasenotes/notes/add-port-rebinding-chance-33178b9abacf5804.yaml0000664000567000056710000000030213044372736030173 0ustar jenkinsjenkins00000000000000--- prelude: > ML2: ports can now recover from binding failed state. features: - Ports that failed to bind when an L2 agent was offline can now recover after the agent is back online. neutron-8.4.0/releasenotes/notes/default-local-dns-a1c3fa1451f228fa.yaml0000664000567000056710000000230313044372736026650 0ustar jenkinsjenkins00000000000000--- fixes: - Prior to Mitaka, name resolution in instances requires specifying DNS resolvers via the 'dnsmasq_dns_servers' option in the DHCP agent configuration file or via neutron subnet options. In this case, the data plane must provide connectivity between instances and upstream DNS resolvers. Omitting both of these methods causes the dnsmasq service to offer the IP address on which it resides to instances for name resolution. However, the static dnsmasq '--no-resolv' process argument prevents name resolution via dnsmasq, leaving instances without name resolution. Mitaka introduces the 'dnsmasq_local_resolv' option, default value False to preserve backward-compatibility, that enables the dnsmasq service to provide name resolution for instances via DNS resolvers on the host running the DHCP agent. In this case, the data plane must provide connectivity between the host and upstream DNS resolvers rather than between the instances and upstream DNS resolvers. Specifying DNS resolvers via the 'dnsmasq_dns_servers' option in the DHCP agent configuration overrides the 'dnsmasq_local_resolv' option for all subnets using the DHCP agent. neutron-8.4.0/releasenotes/notes/l3ha-agent-server-dependency-1fcb775328ac4502.yaml0000664000567000056710000000033713044372760030567 0ustar jenkinsjenkins00000000000000--- upgrade: - Server notifies L3 HA agents when HA router interface port status becomes active. Then L3 HA agents spawn keepalived process. So, server has to be restarted before the L3 agents during upgrade. neutron-8.4.0/releasenotes/notes/dvr-support-live-migration-b818b12bd9cbb518.yaml0000664000567000056710000000101613044372736030614 0ustar jenkinsjenkins00000000000000--- prelude: > Improve DVR's resiliency during Nova VM live migration events. fixes: - Create DVR router namespaces pro-actively on the destination node during live migration events. This helps minimize packet loss to floating IP traffic. issues: - More synchronization between Nova and Neutron is needed to properly handle live migration failures on either side. For instance, if live migration is reverted or canceled, some dangling Neutron resources may be left on the destination host. neutron-8.4.0/releasenotes/notes/use-keystoneauth-24f309566001a16b.yaml0000664000567000056710000000020113044372736026363 0ustar jenkinsjenkins00000000000000--- upgrade: - Neutron depends on keystoneauth instead of keystoneclient. features: - Neutron can interact with keystone v3. neutron-8.4.0/releasenotes/notes/QoS-for-linuxbridge-agent-bdb13515aac4e555.yaml0000664000567000056710000000115713044372736030255 0ustar jenkinsjenkins00000000000000--- prelude: > The LinuxBridge agent now supports QoS bandwidth limiting. features: - The LinuxBridge agent can now configure basic bandwidth limiting QoS rules set for ports and networks. It introduces two new config options for LinuxBridge agent. First is 'kernel_hz' option which is value of host kernel HZ setting. It is necessary for proper calculation of minimum burst value in tbf qdisc setting. Second is 'tbf_latency' which is value of latency to be configured in tc-tbf setting. Details about this option can be found in `tc-tbf manual `_. neutron-8.4.0/releasenotes/notes/segment_mtu_to_global_physnet_mtu-9cee5ff09557edeb.yaml0000664000567000056710000000102613044372736032561 0ustar jenkinsjenkins00000000000000--- deprecations: - The 'segment_mtu' option of the ML2 configuration has been deprecated and replaced with the 'global_physnet_mtu' option in the main Neutron configuration. This option is meant to be used by all plugins for an operator to reference their physical network's MTU, regardless of the backend plugin. Plugins should access this config option via the 'get_deployment_physnet_mtu' method added to neutron.plugins.common.utils to avoid being broken on any potential renames in the future. neutron-8.4.0/releasenotes/notes/add-ip-protocols-in-sg-60467a073e771aee.yaml0000664000567000056710000000040113044372736027420 0ustar jenkinsjenkins00000000000000--- prelude: > Add popular IP protocols to the security group code. End-users can specify protocol names instead of protocol numbers in both RESTful API and python-neutronclient CLI. upgrade: - Add popular IP protocols to security group code. neutron-8.4.0/releasenotes/notes/sending-garp-for-l3-ha-c118871833ad8743.yaml0000664000567000056710000000164213044372736027146 0ustar jenkinsjenkins00000000000000--- issues: - In kernels < 3.19 net.ipv4.ip_nonlocal_bind was not a per-namespace kernel option. L3 HA sets this option to zero to avoid sending gratuitous ARPs for IP addresses that were removed while processing. If this happens then gratuitous ARPs are going to be sent which might populate ARP caches of peer machines with the wrong MAC address. fixes: - Versions of keepalived < 1.2.20 don't send gratuitous ARPs when keepalived process receives SIGHUP signal. These versions are not packaged in some Linux distributions like RHEL, CentOS or Ubuntu Xenial. Not sending gratuitous ARPs may lead to peer ARP caches containing wrong information about floating IP addresses until the entry is invalidated. Neutron now sends gratuitous ARPs for all new IP addresses that appear on non-HA interfaces in router namespace which simulates behavior of new versions of keepalived. neutron-8.4.0/releasenotes/notes/linuxbridge_vxlan_arp_responder-e9ea91552e1b62a7.yaml0000664000567000056710000000211313044372736031757 0ustar jenkinsjenkins00000000000000--- upgrade: - When using ML2 and the Linux Bridge agent, the default value for the ARP Responder under L2Population has changed. The responder is now disabled to improve compatibility with the allowed-address-pair extension and to match the default behavior of the ML2 OVS agent. The logical network will now utilize traditional flood and learn through the overlay. When upgrading, existing vxlan devices will retain their old setup and be unimpacted by changes to this flag. To apply this to older devices created with the Liberty agent, the vxlan device must be removed and then the Mitaka agent restarted. The agent will recreate the vxlan devices with the current settings upon restart. To maintain pre-Mitaka behavior, enable the arp_responder in the Linux Bridge agent VXLAN config file prior to starting the updated agent. fixes: - The Linuxbridge agent now supports the ability to toggle the local ARP responder when L2Population is enabled. This ensures compatibility with the allowed-address-pairs extension. closes bug 1445089 ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000neutron-8.4.0/releasenotes/notes/advertisement-intervals-for-radvd-configurable-6d85b5fdd97a2742.yamlneutron-8.4.0/releasenotes/notes/advertisement-intervals-for-radvd-configurable-6d85b5fdd97a2742.yam0000664000567000056710000000103313044372736034345 0ustar jenkinsjenkins00000000000000--- fixes: - Prior to Mitaka, the settings that control the frequency of router advertisements transmitted by the radvd daemon were not able to be adjusted. Larger deployments may wish to decrease the frequency in which radvd sends multicast traffic. The 'min_rtr_adv_interval' and 'max_rtr_adv_interval' settings in the L3 agent configuration file map directly to the 'MinRtrAdvInterval' and 'MaxRtrAdvInterval' in the generated radvd.conf file. Consult the manpage for radvd.conf for more detailed information. neutron-8.4.0/releasenotes/notes/add-integration-with-external-dns-f56ec8a4993b1fc4.yaml0000664000567000056710000000125413044372736032032 0ustar jenkinsjenkins00000000000000--- prelude: > Support integration with external DNS service. features: - Floating IPs can have dns_name and dns_domain attributes associated with them - Ports can have a dns_name attribute associated with them. The network where a port is created can have a dns_domain associated with it - Floating IPs and ports will be published in an external DNS service if they have dns_name and dns_domain attributes associated with them. - The reference driver integrates neutron with designate - Drivers for other DNSaaS can be implemented - Driver is configured in the default section of neutron.conf using parameter 'external_dns_driver' neutron-8.4.0/releasenotes/notes/add-availability-zone-4440cf00be7c54ba.yaml0000664000567000056710000000123613044372736027526 0ustar jenkinsjenkins00000000000000--- prelude: > DHCP and L3 Agent scheduling is availability zone aware. features: - A DHCP agent is assigned to an availability zone; the network will be hosted by the DHCP agent with availability zone specified by the user. - An L3 agent is assigned to an availability zone; the router will be hosted by the L3 agent with availability zone specified by the user. This supports the use of availability zones with HA routers. DVR isn't supported now because L3HA and DVR integration isn't finished. other: - Please read the `OpenStack Networking Guide `_. neutron-8.4.0/releasenotes/notes/macvtap_assigned_vf_check-f4d07660ffd82a24.yaml0000664000567000056710000000012313044372736030447 0ustar jenkinsjenkins00000000000000--- fixes: - Fix SR-IOV agent macvtap assigned VF check when linux kernel < 3.13 neutron-8.4.0/releasenotes/notes/access_as_external_rbac-455dc74b9fa22761.yaml0000664000567000056710000000156713044372736030063 0ustar jenkinsjenkins00000000000000--- prelude: > External networks can now be controlled using the RBAC framework that was added in Liberty. This allows networks to be made available to specific tenants (as opposed to all tenants) to be used as an external gateway for routers and floating IPs. features: - External networks can now be controlled using the RBAC framework that was added in Liberty. This allows networks to be made available to specific tenants (as opposed to all tenants) to be used as an external gateway for routers and floating IPs. By default this feature will also allow regular tenants to make their networks available as external networks to other individual tenants (or even themselves), but they are prevented from using the wildcard to share to all tenants. This behavior can be adjusted via policy.json by the operator if desired. neutron-8.4.0/releasenotes/notes/sriov_allow_use_many_nics_for_one_physnet-3570aa67a60ce6c4.yaml0000664000567000056710000000052413044372736034041 0ustar jenkinsjenkins00000000000000--- prelude: > Several NICs per physical network can be used with SR-IOV. fixes: - The 'physical_device_mappings' of sriov_nic configuration now can accept more than one NIC per physical network. For example, if 'physnet2' is connected to enp1s0f0 and enp1s0f1, 'physnet2:enp1s0f0,physnet2:enp1s0f1' will be a valid option. neutron-8.4.0/releasenotes/notes/advertise_mtu_by_default-d8b0b056a74517b8.yaml0000664000567000056710000000140313044372736030276 0ustar jenkinsjenkins00000000000000--- features: - By default, the DHCP agent provides a network MTU value to instances using the corresponding DHCP option if core plugin calculates the value. For ML2 plugin, calculation mechanism is enabled by setting [ml2] path_mtu option to a value greater than zero. upgrade: - To disable, use [DEFAULT] advertise_mtu = False. other: - For overlay networks managed by ML2 core plugin, the calculation algorithm subtracts the overlay protocol overhead from the value of [ml2] path_mtu. The DHCP agent provides the resulting (smaller) MTU to instances using overlay networks. - The [DEFAULT] advertise_mtu option must contain a consistent value on all hosts running the DHCP agent. - Typical networks can use [ml2] path_mtu = 1500. neutron-8.4.0/releasenotes/notes/add-designate-driver-ssl-options-169c299c96f2aff0.yaml0000664000567000056710000000130413044372736031577 0ustar jenkinsjenkins00000000000000--- prelude: > Add options to designate external dns driver of neutron for SSL based connections. This makes it possible to use neutron with designate in scenario where endpoints are SSL based. Users can specify to skip cert validation or specify path to a valid cert in [designate] section of neutron.conf file. features: - Two new options are added to `[designate]` section to support SSL. - First option `insecure` allows to skip SSL validation when creating a keystone session to initate a designate client. Default value is False, which means to always verify connection. - Second option `ca_cert` allows setting path to a valid cert file. Default is None. neutron-8.4.0/releasenotes/notes/end-to-end-mtu-00345fc4282cb8fb.yaml0000664000567000056710000000171713044372736026046 0ustar jenkinsjenkins00000000000000--- features: - Use the value of the network 'mtu' attribute for the MTU of virtual network interfaces such as veth pairs, patch ports, and tap devices involving a particular network. - Enable end-to-end support for arbitrary MTUs including jumbo frames between instances and provider networks by moving MTU disparities between flat or VLAN networks and overlay networks from layer-2 devices to layer-3 devices that support path MTU discovery (PMTUD). upgrade: - Does not change MTU for existing virtual network interfaces. - Actions that create virtual network interfaces on an existing network with the 'mtu' attribute containing a value greater than zero could cause issues for network traffic traversing existing and new virtual network interfaces. fixes: - Explicitly configure MTU of virtual network interfaces rather than using default values or incorrect values that do not account for overlay protocol overhead. neutron-8.4.0/releasenotes/notes/deprecate-network-device-mtu-59b78264c9974808.yaml0000664000567000056710000000034313044372736030527 0ustar jenkinsjenkins00000000000000--- deprecations: - The 'network_device_mtu' option is deprecated and will be removed in the 'Newton' cycle. Please use the system-wide segment_mtu setting which the agents will take into account when wiring VIFs.neutron-8.4.0/releasenotes/notes/config-wsgi-pool-size-a4c06753b79fee6d.yaml0000664000567000056710000000104413044372736027534 0ustar jenkinsjenkins00000000000000--- prelude: > Support configuration of greenthreads pool for WSGI. other: - Operators may want to tune the ``max_overflow`` and ``wsgi_default_pool_size`` configuration options according to the investigations outlined in this `mailing list post `_. The default value of ``wsgi_default_pool_size`` inherits from that of oslo.config, which is currently 100. This is a change in default from the previous Neutron-specific value of 1000. neutron-8.4.0/releasenotes/notes/rm-notify-entry-points-aa442134a780469a.yaml0000664000567000056710000000054113044372736027536 0ustar jenkinsjenkins00000000000000--- prelude: > oslo.messaging.notify.drivers entry points are deprecated other: - The oslo.messaging.notify.drivers entry points that were left in tree for backward compatibility with Icehouse are deprecated and will be removed after liberty-eol. Configure notifications using the oslo_messaging configuration options in neutron.conf. neutron-8.4.0/releasenotes/notes/bgp-support-ef361825ca63f28b.yaml0000664000567000056710000000231013044372736025572 0ustar jenkinsjenkins00000000000000--- prelude: > Announcement of tenant prefixes and host routes for floating IP's via BGP is supported features: - Announcement of tenant subnets via BGP using centralized Neutron router gateway port as the next-hop - Announcement of floating IP host routes via BGP using the centralized Neutron router gateway port as the next-hop - Announcement of floating IP host routes via BGP using the floating IP agent gateway as the next-hop when the floating IP is associated through a distributed router issues: - When using DVR, if a floating IP is associated to a fixed IP direct access to the fixed IP is not possible when traffic is sent from outside of a Neutron tenant network (north-south traffic). Traffic sent between tenant networks (east-west traffic) is not affected. When using a distributed router, the floating IP will mask the fixed IP making it inaccessible, even though the tenant subnet is being announced as accessible through the centralized SNAT router. In such a case, traffic sent to the instance should be directed to the floating IP. This is a limitation of the Neutron L3 agent when using DVR and will be addressed in a future release. neutron-8.4.0/releasenotes/notes/sriov_show_l2_agent_extensions-ca852e155a529e99.yaml0000664000567000056710000000013013044372736031472 0ustar jenkinsjenkins00000000000000--- fixes: - Loaded agent extensions of SR-IOV agent are now shown in agent state API.neutron-8.4.0/releasenotes/notes/keepalived-state-change-server-threads-9ed775e7533dd1a0.yaml0000664000567000056710000000101113044372736032716 0ustar jenkinsjenkins00000000000000--- upgrade: - A new option ``ha_keepalived_state_change_server_threads`` has been added to configure the number of concurrent threads spawned for keepalived server connection requests. Higher values increase the CPU load on the agent nodes. The default value is half of the number of CPUs present on the node. This allows operators to tune the number of threads to suit their environment. With more threads, simultaneous requests for multiple HA routers state change can be handled faster. neutron-8.4.0/releasenotes/notes/sriov-agent-num-vf-0-0c06424247e7efe0.yaml0000664000567000056710000000006413044372736027026 0ustar jenkinsjenkins00000000000000--- fixes: - Allow SR-IOV agent to run with 0 vfs neutron-8.4.0/releasenotes/notes/hyperv-neutron-agent-decomposition-ae6a052aeb48c6ac.yaml0000664000567000056710000000071213044372760032466 0ustar jenkinsjenkins00000000000000--- upgrade: - The Hyper-V Neutron Agent has been fully decomposed from Neutron. The `neutron.plugins.hyperv.agent.security_groups_driver.HyperVSecurityGroupsDriver` firewall driver has been deprecated and will be removed in the 'O' cycle. Update the `neutron_hyperv_agent.conf` files on the Hyper-V nodes to use `hyperv.neutron.security_groups_driver.HyperVSecurityGroupsDriver`, which is the networking_hyperv security groups driver. neutron-8.4.0/releasenotes/notes/deprecate_max_fixed_ips_per_port-5e80518cbf25cfd6.yaml0000664000567000056710000000115513044372736032145 0ustar jenkinsjenkins00000000000000--- prelude: > max_fixed_ips_per_port has been deprecated and will be removed in the Newton or Ocata cycle depending on when all identified usecases of the options are satisfied via another quota system. deprecations: - max_fixed_ips_per_port has been deprecated and will be removed in the Newton or Ocata cycle depending on when all identified usecases of the options are satisfied via another quota system. If you depend on this configuration option to stop tenants from consuming IP addresses, please leave a comment on the `bug report `_. ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000neutron-8.4.0/releasenotes/notes/allow-non-admins-to-define-external-extra-routes-0d541fc356a5c546.yamlneutron-8.4.0/releasenotes/notes/allow-non-admins-to-define-external-extra-routes-0d541fc356a5c546.y0000664000567000056710000000011313044372736034036 0ustar jenkinsjenkins00000000000000--- features: - Allow non-admin users to define "external" extra-routes. neutron-8.4.0/releasenotes/notes/mtu-selection-and-advertisement-ab29f9ec43140224.yaml0000664000567000056710000000057413044372736031430 0ustar jenkinsjenkins00000000000000--- prelude: > Support for MTU selection and advertisement. features: - When advertise_mtu is set in the config, Neutron supports advertising the LinkMTU using Router Advertisements. other: - For details please read `Blueprint mtu-selection-and-advertisement `_. neutron-8.4.0/releasenotes/notes/add-standard-attr-descriptions-1ba0d7a454c3fd8f.yaml0000664000567000056710000000051013044372736031442 0ustar jenkinsjenkins00000000000000--- prelude: > Add description field to security group rules, networks, ports, routers, floating IPs, and subnet pools. features: - Security group rules, networks, ports, routers, floating IPs, and subnet pools may now contain an optional description which allows users to easily store details about entities. neutron-8.4.0/releasenotes/notes/ovs-ct-firewall-driver-52a70a6a16d06f59.yaml0000664000567000056710000000063113044372736027534 0ustar jenkinsjenkins00000000000000--- features: - New security groups firewall driver is introduced. It's based on OpenFlow using connection tracking. issues: - OVS firewall driver doesn't work well with other features using openflow. other: - OVS firewall driver requires OVS 2.5 version or higher with linux kernel 4.3 or higher. More info at `OVS github page `_. neutron-8.4.0/releasenotes/notes/add-tags-to-core-resources-b05330a129900609.yaml0000664000567000056710000000035713044372736030042 0ustar jenkinsjenkins00000000000000--- prelude: > Add tag mechanism for network resources features: - Users can set tags on their network resources. - Networks can be filtered by tags. The supported filters are 'tags', 'tags-any', 'not-tags' and 'not-tags-any'. neutron-8.4.0/releasenotes/notes/.placeholder0000664000567000056710000000000013044372736022461 0ustar jenkinsjenkins00000000000000neutron-8.4.0/releasenotes/notes/default-subnetpool-semantics-1cdc5cdde2be88c2.yaml0000664000567000056710000000230113044372736031403 0ustar jenkinsjenkins00000000000000--- features: - The subnet API now includes a new use_default_subnetpool attribute. This attribute can be specified on creating a subnet in lieu of a subnetpool_id. The two are mutually exclusive. If it is specified as True, the default subnet pool for the requested ip_version will be looked up and used. If no default exists, an error will be returned. deprecations: - The default_subnet_pools option is now deprecated and will be removed in the Newton release. The same functionality is now provided by setting is_default attribute on subnetpools to True using the API or client. fixes: - Before Mitaka, when a default subnetpool was defined in the configuration, a request to create a subnet would fall back to using it if no specific subnet pool was specified. This behavior broke the semantics of subnet create calls in this scenario and is now considered an API bug. This bug has been fixed so that there is no automatic fallback with the presence of a default subnet pool. Workflows which depended on this new behavior will have to be modified to set the new use_default_subnetpool attribute when creating a subnet. neutron-8.4.0/releasenotes/notes/config-file-generation-2eafc6602d57178e.yaml0000664000567000056710000000042413044372736027630 0ustar jenkinsjenkins00000000000000--- prelude: > Core configuration files are automatically generated. features: - Neutron no longer includes static example configuration files. Instead, use tools/generate_config_file_samples.sh to generate them. The files are generated with a .sample extension. neutron-8.4.0/releasenotes/notes/ovs-ipv6-tunnel-endpoints-f41b4954a04c43f6.yaml0000664000567000056710000000055413044372736030230 0ustar jenkinsjenkins00000000000000--- prelude: > Support for IPv6 addresses as tunnel endpoints in OVS. features: - The local_ip value in ml2_conf.ini can now be set to an IPv6 address configured on the system. other: - Requires OVS 2.5+ version or higher with linux kernel 4.3 or higher. More info at `OVS github page `_. neutron-8.4.0/releasenotes/notes/oslo-messaging-notifier-queue-d94677076a1db261.yaml0000664000567000056710000000054213044372736031043 0ustar jenkinsjenkins00000000000000--- features: - The RPC and notification queues have been separated into different queues. Specify the transport_url to be used for notifications within the [oslo_messaging_notifications] section of the configuration file. If no transport_url is specified in [oslo_messaging_notifications], the transport_url used for RPC will be used. neutron-8.4.0/releasenotes/notes/dvr-ha-support-cc67e84d9380cd0b.yaml0000664000567000056710000000112713044372736026272 0ustar jenkinsjenkins00000000000000--- prelude: > High Availability (HA) of SNAT service is supported for Distributed Virtual Routers (DVRs). features: - High Availability support for SNAT services on Distributed Virtual Routers. Routers can now be created with the flags distributed=True and ha=True. The created routers will provide Distributed Virtual Routing as well as SNAT high availability on the l3 agents configured for dvr_snat mode. issues: - Only creation of dvr/ha routers is currently supported. Upgrade from other types of routers to dvr/ha router is not supported on this release. neutron-8.4.0/releasenotes/notes/1500-default-mtu-b0d6e4ab193b62a4.yaml0000664000567000056710000000314313044372736026170 0ustar jenkinsjenkins00000000000000--- prelude: > The ML2 plug-in supports calculating the MTU for instances using overlay networks by subtracting the overlay protocol overhead from the value of 'path_mtu', ideally the physical (underlying) network MTU, and providing the smaller value to instances via DHCP. Prior to Mitaka, 'path_mtu' defaults to 0 which disables this feature. In Mitaka, 'path_mtu' defaults to 1500, a typical MTU for physical networks, to improve the "out of box" experience for typical deployments. features: - In Mitaka, the combination of 'path_mtu' defaulting to 1500 and 'advertise_mtu' defaulting to True provides a value of MTU accounting for any overlay protocol overhead on the network to instances using DHCP. For example, an instance attaching to a VXLAN network receives a 1450 MTU from DHCP accounting for 50 bytes of overhead from the VXLAN overlay protocol if using IPv4 endpoints. issues: - The combination of 'path_mtu' and 'advertise_mtu' only adjusts the MTU for instances rather than all virtual network components between instances and provider/public networks. In particular, setting 'path_mtu' to a value greater than 1500 can cause packet loss even if the physical network supports it. Also, the calculation does not consider additional overhead from IPv6 endpoints. upgrade: - Operators using the ML2 plug-in with 'path_mtu' defaulting to 0 may need to perform a database migration to update the MTU for existing networks and possibly disable existing workarounds for MTU problems such as increasing the physical network MTU to 1550. neutron-8.4.0/releasenotes/notes/clear-allowed-address-pairs-with-none-4757bcca78076c9e.yaml0000664000567000056710000000045513044372736032512 0ustar jenkinsjenkins00000000000000--- prelude: > Allowed address pairs can now be cleared by passing None in addition to an empty list. This is to make it possible to use the --action=clear option with the neutron client. neutron port-update --allowed-address-pairs action=clear fixes: - Fixes bug 1537734 neutron-8.4.0/releasenotes/notes/deprecate-router_id-34aca9ea5ee9e789.yaml0000664000567000056710000000013213044372760027410 0ustar jenkinsjenkins00000000000000--- upgrade: - The router_id option is deprecated and will be removed in the 'N' cycle. neutron-8.4.0/requirements.txt0000664000567000056710000000314113044372760017647 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr>=1.6 # Apache-2.0 Paste # MIT PasteDeploy>=1.5.0 # MIT Routes!=2.0,!=2.1,!=2.3.0,>=1.12.3;python_version=='2.7' # MIT Routes!=2.0,!=2.3.0,>=1.12.3;python_version!='2.7' # MIT debtcollector>=1.2.0 # Apache-2.0 eventlet!=0.18.3,>=0.18.2 # MIT pecan>=1.0.0 # BSD greenlet>=0.3.2 # MIT httplib2>=0.7.5 # MIT requests!=2.9.0,>=2.8.1 # Apache-2.0 Jinja2>=2.8 # BSD License (3 clause) keystonemiddleware!=4.1.0,>=4.0.0 # Apache-2.0 netaddr!=0.7.16,>=0.7.12 # BSD netifaces>=0.10.4 # MIT neutron-lib>=0.0.1 # Apache-2.0 python-neutronclient!=4.1.0,>=2.6.0 # Apache-2.0 retrying!=1.3.0,>=1.2.3 # Apache-2.0 ryu>=3.30 # Apache-2.0 SQLAlchemy<1.1.0,>=1.0.10 # MIT WebOb>=1.2.3 # MIT keystoneauth1>=2.1.0 # Apache-2.0 alembic>=0.8.0 # MIT six>=1.9.0 # MIT stevedore>=1.5.0 # Apache-2.0 oslo.concurrency>=3.7.1 # Apache-2.0 oslo.config>=3.7.0 # Apache-2.0 oslo.context>=0.2.0 # Apache-2.0 oslo.db>=4.1.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 oslo.messaging>=4.0.0 # Apache-2.0 oslo.middleware>=3.0.0 # Apache-2.0 oslo.policy>=0.5.0 # Apache-2.0 oslo.reports>=0.6.0 # Apache-2.0 oslo.rootwrap>=2.0.0 # Apache-2.0 oslo.serialization>=1.10.0 # Apache-2.0 oslo.service>=1.0.0 # Apache-2.0 oslo.utils>=3.5.0 # Apache-2.0 oslo.versionedobjects>=1.5.0 # Apache-2.0 ovs>=2.4.0;python_version=='2.7' # Apache-2.0 python-novaclient!=2.33.0,>=2.29.0 # Apache-2.0 python-designateclient>=1.5.0 # Apache-2.0 neutron-8.4.0/.coveragerc0000664000567000056710000000015713044372760016510 0ustar jenkinsjenkins00000000000000[run] branch = True source = neutron omit = neutron/tests/*,neutron/openstack/* [report] ignore_errors = True neutron-8.4.0/LICENSE0000664000567000056710000002363713044372736015407 0ustar jenkinsjenkins00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. neutron-8.4.0/tox.ini0000664000567000056710000001215313044372760015701 0ustar jenkinsjenkins00000000000000[tox] envlist = docs,py34,py27,pep8 minversion = 2.3.1 skipsdist = True [testenv] setenv = VIRTUAL_ENV={envdir} passenv = TRACE_FAILONLY GENERATE_HASHES http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY usedevelop = True install_command = {toxinidir}/tools/tox_install.sh {env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/mitaka} {opts} {packages} deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt whitelist_externals = sh commands = {toxinidir}/tools/ostestr_compat_shim.sh {posargs} # there is also secret magic in ostestr which lets you run in a fail only # mode. To do this define the TRACE_FAILONLY environmental variable. [testenv:api] basepython = python2.7 passenv = {[testenv]passenv} TEMPEST_CONFIG_DIR setenv = {[testenv]setenv} OS_TEST_PATH=./neutron/tests/api TEMPEST_CONFIG_DIR={env:TEMPEST_CONFIG_DIR:/opt/stack/tempest/etc} OS_TEST_API_WITH_REST=1 deps = {[testenv]deps} -r{toxinidir}/neutron/tests/api/requirements.txt [testenv:common] # Fake job to define environment variables shared between dsvm/non-dsvm jobs setenv = OS_TEST_TIMEOUT=180 commands = false [testenv:dsvm] # Fake job to define environment variables shared between dsvm jobs setenv = OS_SUDO_TESTING=1 OS_ROOTWRAP_CMD=sudo {envdir}/bin/neutron-rootwrap {envdir}/etc/neutron/rootwrap.conf OS_ROOTWRAP_DAEMON_CMD=sudo {envdir}/bin/neutron-rootwrap-daemon {envdir}/etc/neutron/rootwrap.conf OS_FAIL_ON_MISSING_DEPS=1 commands = false [testenv:functional] basepython = python2.7 setenv = {[testenv]setenv} {[testenv:common]setenv} OS_TEST_PATH=./neutron/tests/functional deps = {[testenv]deps} -r{toxinidir}/neutron/tests/functional/requirements.txt [testenv:functional-py34] basepython = python3.4 setenv = {[testenv:functional]setenv} deps = {[testenv:functional]deps} [testenv:dsvm-functional] basepython = python2.7 setenv = {[testenv:functional]setenv} {[testenv:dsvm]setenv} sitepackages=True deps = {[testenv:functional]deps} commands = {toxinidir}/tools/deploy_rootwrap.sh {toxinidir} {envdir}/etc {envdir}/bin {toxinidir}/tools/ostestr_compat_shim.sh {posargs} [testenv:dsvm-functional-py34] basepython = python3.4 setenv = {[testenv:dsvm-functional]setenv} sitepackages={[testenv:dsvm-functional]sitepackages} deps = {[testenv:dsvm-functional]deps} commands = {toxinidir}/tools/deploy_rootwrap.sh {toxinidir} {envdir}/etc {envdir}/bin {toxinidir}/tools/ostestr_compat_shim.sh {posargs} [testenv:dsvm-fullstack] setenv = {[testenv]setenv} {[testenv:common]setenv} {[testenv:dsvm]setenv} # workaround for DB teardown lock contention (bug/1541742) OS_TEST_TIMEOUT=600 OS_TEST_PATH=./neutron/tests/fullstack sitepackages=True deps = {[testenv:functional]deps} [testenv:releasenotes] # TODO(ihrachys): remove once infra supports constraints for this target install_command = {toxinidir}/tools/tox_install.sh unconstrained {opts} {packages} commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:pep8] basepython = python2.7 deps = {[testenv]deps} commands= # If it is easier to add a check via a shell script, consider adding it in this file sh ./tools/misc-sanity-checks.sh {toxinidir}/tools/check_unit_test_structure.sh # Checks for coding and style guidelines flake8 sh ./tools/coding-checks.sh --pylint '{posargs}' neutron-db-manage --config-file neutron/tests/etc/neutron.conf check_migration {[testenv:genconfig]commands} whitelist_externals = sh bash [testenv:cover] # TODO(ihrachys): remove once infra supports constraints for this target install_command = {toxinidir}/tools/tox_install.sh unconstrained {opts} {packages} basepython = python2.7 commands = python setup.py testr --coverage --testr-args='{posargs}' coverage report [testenv:venv] # TODO(ihrachys): remove once infra supports constraints for this target install_command = {toxinidir}/tools/tox_install.sh unconstrained {opts} {packages} commands = {posargs} [testenv:docs] commands = sphinx-build -W -b html doc/source doc/build/html [flake8] # E125 continuation line does not distinguish itself from next logical line # E126 continuation line over-indented for hanging indent # E128 continuation line under-indented for visual indent # E129 visually indented line with same indent as next logical line # E265 block comment should start with '# ' # H404 multi line docstring should start with a summary # H405 multi line docstring summary not separated with an empty line ignore = E125,E126,E128,E129,E265,H404,H405 show-source = true builtins = _ # neutron/tests/tempest needs to be excluded so long as it continues # to be copied directly from tempest, since tempest and neutron do not # share a flake8 configuration. exclude = ./.*,build,dist,neutron/openstack/common/*,neutron/tests/tempest [hacking] import_exceptions = neutron.i18n, neutron._i18n local-check-factory = neutron.hacking.checks.factory [testenv:genconfig] commands = {toxinidir}/tools/generate_config_file_samples.sh neutron-8.4.0/bin/0000775000567000056710000000000013044373210015123 5ustar jenkinsjenkins00000000000000neutron-8.4.0/bin/neutron-rootwrap-xen-dom00000775000567000056710000001214713044372736022064 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Neutron root wrapper for dom0. Executes networking commands in dom0. The XenAPI plugin is responsible determining whether a command is safe to execute. """ from __future__ import print_function from six.moves import configparser as ConfigParser from oslo_serialization import jsonutils as json import os import select import sys import XenAPI RC_UNAUTHORIZED = 99 RC_NOCOMMAND = 98 RC_BADCONFIG = 97 RC_XENAPI_ERROR = 96 def parse_args(): # Split arguments, require at least a command exec_name = sys.argv.pop(0) # argv[0] required; path to conf file if len(sys.argv) < 2: sys.stderr.write("%s: No command specified" % exec_name) sys.exit(RC_NOCOMMAND) config_file = sys.argv.pop(0) user_args = sys.argv[:] return exec_name, config_file, user_args def _xenapi_section_name(config): sections = [sect for sect in config.sections() if sect.lower() == "xenapi"] if len(sections) == 1: return sections[0] sys.stderr.write("Multiple [xenapi] sections or no [xenapi] section found!") sys.exit(RC_BADCONFIG) def load_configuration(exec_name, config_file): config = ConfigParser.RawConfigParser() config.read(config_file) try: exec_dirs = config.get("DEFAULT", "exec_dirs").split(",") filters_path = config.get("DEFAULT", "filters_path").split(",") section = _xenapi_section_name(config) url = config.get(section, "xenapi_connection_url") username = config.get(section, "xenapi_connection_username") password = config.get(section, "xenapi_connection_password") except ConfigParser.Error: sys.stderr.write("%s: Incorrect configuration file: %s" % (exec_name, config_file)) sys.exit(RC_BADCONFIG) if not url or not password: msg = ("%s: Must specify xenapi_connection_url, " "xenapi_connection_username (optionally), and " "xenapi_connection_password in %s") % (exec_name, config_file) sys.stderr.write(msg) sys.exit(RC_BADCONFIG) return dict( filters_path=filters_path, url=url, username=username, password=password, exec_dirs=exec_dirs, ) def filter_command(exec_name, filters_path, user_args, exec_dirs): # Add ../ to sys.path to allow running from branch possible_topdir = os.path.normpath(os.path.join(os.path.abspath(exec_name), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, "neutron", "__init__.py")): sys.path.insert(0, possible_topdir) from oslo_rootwrap import wrapper # Execute command if it matches any of the loaded filters filters = wrapper.load_filters(filters_path) filter_match = wrapper.match_filter( filters, user_args, exec_dirs=exec_dirs) if not filter_match: sys.stderr.write("Unauthorized command: %s" % ' '.join(user_args)) sys.exit(RC_UNAUTHORIZED) def run_command(url, username, password, user_args, cmd_input): try: session = XenAPI.Session(url) session.login_with_password(username, password) try: host = session.xenapi.session.get_this_host(session.handle) result = session.xenapi.host.call_plugin( host, 'netwrap', 'run_command', {'cmd': json.dumps(user_args), 'cmd_input': json.dumps(cmd_input)}) result_dict = json.loads(result) returncode = result_dict.get('returncode') captured_stdout = result_dict.get('out') captured_stderr = result_dict.get('err') sys.stdout.write(captured_stdout) sys.stderr.write(captured_stderr) sys.exit(returncode) finally: session.xenapi.session.logout() except Exception as e: sys.stderr.write("Failed to execute command in Dom0, %s" % e) sys.exit(RC_XENAPI_ERROR) def main(): exec_name, config_file, user_args = parse_args() config = load_configuration(exec_name, config_file) filter_command(exec_name, config['filters_path'], user_args, config['exec_dirs']) # If data is available on the standard input, we need to pass it to the # command executed in dom0 cmd_input = None if select.select([sys.stdin,],[],[],0.0)[0]: cmd_input = "".join(sys.stdin) return run_command(config['url'], config['username'], config['password'], user_args, cmd_input) if __name__ == '__main__': main() neutron-8.4.0/ChangeLog0000664000567000056710000137147113044373205016147 0ustar jenkinsjenkins00000000000000CHANGES ======= 8.4.0 ----- * Check for unbound ports in L3 RPC handler * Only send string values to OVSDB other_config column * XenAPI: add support for conntrack with XenServer * Support alembic 0.8.9 in test_autogen_process_directives * Truncate IPDevice's name to interface max size * ovs agent, native ARP response: set Eth src/dst * OVS agent: configure both OF10 and OF13 * ovs-agent: Catch exceptions in agent_main_wrapper * Unplug external device when delete snat namespace * Check for l3 agents count before router update * Solve unexpected NoneType returned by _get_routers_can_schedule * Ignore gre0 and gretap0 devices in netns cleanup script * Allow more time for DB migration tests * Don't depend on translated strings for error check * callbacks: Make the value of FLOATING_IP match with api resource * Parse the output of ip route more robustly * Add unit tests for ip_lib.get_routing_table * Add L3 HA test with linux bridge * ovs-agent: Close ryu app on all exceptions * Reduce IP link show calls for SR-IOV scan loop * gate_hook: Add a no-op rally case * Use subqueries for rbac_entries and subnets<->network * DVR: Fix race condition in creation of fip gateway * Delete default route if no gateway in external net * Correctly print --limit value passed via API * Skip larger than /64 subnets in DHCP agent * Delete conntrack when remote ipset member removed * SRIOV: don't block report_state with device count * Use revision to discard stale DHCP updates * Changing arping command execute to accept 1 as extra OK code * l3-ha: Send gratuitous ARP when new floating IP is added * DVR: remove misleading error log * Fix "failed unplugging ha interface" error when deleting router * Avoid trace in _notify_l3_agent_ha_port_update * LinuxBridge: Pass host into get_devices_details_list * Ensure there are fdb_entries before iterating * Update metadata proxy when subnet add/delete * Check if namespace exists before getting devices * ovsfw: Add a dl_type match for action=ct flows * ovsfw: fix troublesome port_rule_masking * Check for ha port to become ACTIVE * l2pop fdb flows for HA router ports * Get rid of double-join to rbac_entries without filter 8.3.0 ----- * ml2 lb: do not program arp responder when unused * Add 169.254.169.254 when enable force_metadata * Move class properties to instances for dhcp tests * New option for num_threads for state change server * DVR: Pings to floatingip returns with fixed-ip on same network * Install dibbler.filters rootwrap file * ml2: allow retry on retriabable db error by precommit * Check for StaleData errors in retry decorator * Support for MultipleExceptions in db_api decorators * Convert multiple exception types in the API * Fixes KeyError while updating bgp peer * fix port address allocation for auto-addr subnet * Pass not IPDevice but port_name into OVSBridge's add_port() * Fix wrong HA router state * Imported Translations from Zanata * Allow SR-IOV agent to start when number of vf is 0 * Implement check_vlan_transparency to return True in SR-IOV mech driver * Updated from global requirements * Check target_tenant when create rbac policy * DVR: Cleanup the stale snat redirect rules in router namespace * Add flush command to iproute in ip_lib * DVR: SNAT redirect rules should be removed only on Gateway clear * Fix internal server error during updating QoS rule * Fixes port device_id/device_owner change in failed operation * Refer to correct global_physnet_mtu option instead of segment_mtu * L3 DVR: use fanout when sending dvr arp table update * Suppresses a warning when no agents are configured * Allow auto-addressed ips deletion on port update * Set secure fail mode for physical bridges * Imported Translations from Zanata * Pass timeout in milliseconds to timer_wait * Fix code that's trying to read from a stale DB object * Handle deleted ports when creating a list of fdb entries * L3 agent: check router namespace existence before delete * BGP: exclude legacy fip in DVR fip host routes query * DVR: Clean stale snat-ns by checking its existence when agent restarts * Partial revert "DVR: Fix issue of SNAT rule for DVR with floating ip" * Restore old assert_ping behavior * Fix for check_vlan_transparency on mech drivers not called * DVR: Fix issue of SNAT rule for DVR with floating ip * Wait for vswitchd to add interfaces in native ovsdb * ovsdb: Don't let block() wait indefinitely * Filter HA router without HA port bindings after race conditions * Don't load DNS integration in l3_router_plugin * Added test cases for DVR L3 schedulers * Don't use exponential back-off for report_state 8.2.0 ----- * Move state reporting initialization to after worker forking * Fix broken URL in Mitaka Neutron release note * Avoid duplicate ipset processing for security groups * Macvtap: Allow noop alias as FW driver * Fix of ping usage in net_helpers.async_ping() * Lower ML2 message severity * Fix OVSBridge.set_protocols arg * Allow tox to be run with python 3 * Fix misuse of assertTrue in L3 DVR test case * Fix update of shared QoS policy * Add setting default max_burst value if not given by user * Check compatibility when auto schedule ha routers * Imported Translations from Zanata * DVR: handle floating IP reassociation on the same host * Not auto schedule router when sync routers from agent * Remove unhelpful test of oslo.service ServiceLauncher * Revert "Fix _get_id_for" * When deleting floating IP catch PortNotFound * Fix unicode bug for password-authenticated BGP peer * DVR: Ensure that only one fg device can exist at a time in fip ns * Allow min_l3_agents_per_router to equal one * Skip INVALID and UNASSIGNED ofport in vlan restore * sriov: Fix macvtap vf interface regex pattern * Fix _get_id_for * OVS: UnboundLocalError on switch timeout fixed * Cap tempest to < 12.1.0 * Imported Translations from Zanata * Imported Translations from Zanata * tests: clean up designate client session mock on test exit * ovs: set device MTU after it's moved into a namespace * Fix designate dns driver for SSL based endpoints * add PROTO_NUM_IPV6_ICMP for _validate_port_range * Update ml2 delete_subnet to deallocate via ipam * Fixed help messages for path_mtu and global_physnet_mtus options * DVR: Fix allowed_address_pair port binding with delayed fip * After a migration clean up the floating ip on the source host * Updated from global requirements * OVS Mech: Set hybrid plug based on agent config * Refactor the rpc callback version discovery mechanism 8.1.2 ----- * Revert "DVR: Clear SNAT namespace when agent restarts after router move" * qos: Add API test for shared policy * Fix get_free_namespace_port to actually avoid used ports * Force "out-of-band" controller connection mode * Fix validation of floating-ip association * DVR: Fix check multiprefix when delete ipv4 router interface * Pass ha_router_port flag for _snat_router_interfaces ports * Fixed help messages for path_mtu and global_physnet_mtus options * Fix help message for external_network_bridge * Fix bgp-speaker-network-remove error * Make deepcopy of update body in API layer * OVS: compare names when checking devices both added and deleted * Adopt to config_dir option being a list and not a string * [qos] section is missing from neutron.conf * fix wrong default value of qos.notification_drivers * neutron-db-manage: revision: fail for --autogenerate and branch * Fix invalid mock name in test_ovs_neutron_agent * Fix keepalived functional tests * Avoid testing oslo.service library internals * Only load timestamp service plugin in timestamp tests * Fetch router port subnets in bulk * Correct the interval between two reports * Insert validation with request params for HTTP DELETE * Make auto allocate plugin safe for unit/func tests * Check for existence of snat port before deleting it * Define localstatedir for ovs compilation * De-dup user-defined SG rules before iptables call * Change get_root_helper_child_pid to stop when it finds cmd * Add a missing address-scope extension check * Skip firewall blink test for ovs-fw * Fix regexp for ss output * fullstack: Use noop firewall * Always call ipam driver on subnet update * DVR: Ensure fpr and rfp devices are configured correctly * DVR: Use existing IPDevice to add address on FIP VETH * Migration for qospolicyrbacs has hardcoded InnoDB * Call ext_manager.delete_port on port removal * Improve handle port_update and port_delete events in ovs qos agent 8.1.1 ----- * DVR: Use IPDevice class consistently * Fix Windows IPDevice.device_has_ip racefulness * Avoid L3 agent termination without server * Fix for 'ofport' query retries during neutron agent start * DVR: Moving router from dvr_snat node removes the qrouters * OVS: Add support for IPv6 addresses as tunnel endpoints * DVR: Fix TypeError in arp update with allowed_address_pairs * DVR: Handle unbound allowed_address_pair port with FIP * Fix SR-IOV binding when two NICs mapped to one physnet * Enforce UUID of port/subnet ID for router interfaces * Restart dsnmasq on any network subnet change * Fix broken Tempest conf options in API tests * Fix update target tenant RBAC external path * Add semaphore to ML2 create_port db operation * Imported Translations from Zanata * Fix test failure against latest oslo.* from master * Add exponential back-off RPC client * Use correct session in update_allocation_pools * Preserve backward compatibility with OVS hybrid plugging * DVR: Clear SNAT namespace when agent restarts after router move * Don't log warning for missing resource_versions * Updated from global requirements * IPtables firewall prevent ICMPv6 spoofing 8.1.0 ----- * Revert "Improve performance of ensure_namespace" * OVS: Add mac spoofing filtering to flows * Imported Translations from Zanata * Support interface drivers that don't support mtu parameter for plug_new * LinuxBridge agent's QoS driver bw limit for egress traffic * Imported Translations from Zanata * Updated from global requirements * Clear DVR MAC on last agent deletion from host * Add an option for WSGI pool size * Don't disconnect br-int from phys br if connected * Fix deprecation warning for external_network_bridge * Add ALLOCATING state to routers * Cleanup stale OVS flows for physical bridges * Notify resource_versions from agents only when needed * ADDRESS_SCOPE_MARK_IDS should not be global for L3 agent * Wrap all update/delete l3_rpc handlers with retries * Values for [ml2]/physical_network_mtus should not be unique * Use new DB context when checking if agent is online during rescheduling * ovsfw: Load vlan tag from other_config * Imported Translations from Zanata * firewall: don't warn about a driver that does not accept bridge * Add uselist=True to subnet rbac_entries relationship * Iptables firewall prevent IP spoofed DHCP requests * Fix race conditions in IP availability API tests * Switched from fixtures to mock to mock out starting RPC consumers * Imported Translations from Zanata * Fix zuul_cloner errors during tox job setup * Return oslo_config Opts to config generator * Refactor and fix dummy process fixture * Switches metering agent to stateless iptables * Remove obsolete keepalived PID files before start * Add IPAllocation object to session info to stop GC * Ensure metadata agent doesn't use SSL for UNIX socket * DVR: Increase the link-local address pair range * SG protocol validation to allow numbers or names * L3 agent: match format used by iptables * Use right class method in IP availability tests * Make L3 HA interface creation concurrency safe * ovsfw: Remove vlan tag before injecting packets to port * Imported Translations from Zanata * test_network_ip_availability: Skip IPv6 tests when configured so * Retry updating agents table in case of deadlock * Allow to use several nics for physnet with SR-IOV 8.0.0 ----- * Support Routes==2.3 * port security: gracefully handle resources with no bindings * Ignore exception when deleting linux bridge if doesn't exist * Don't delete br-int to br-tun patch on startup * Constraint requirements using mitaka upper-constraints.txt file * functional: Update ref used from ovs branch-2.5 * Imported Translations from Zanata * DVR: rebind port if ofport changes * ovs-fw: Mark conntrack entries invalid if no rule is matched * l3: Send notify on router_create when ext gw is specified * Imported Translations from Zanata * Linux Bridge: Add mac spoofing filtering to ebtables * Imported Translations from Zanata * api tests: Check correct extensions 8.0.0.0rc2 ---------- * Imported Translations from Zanata * Fix setting peer to bridge interfaces * Updated from global requirements * Skip fullstack L3 HA test * Imported Translations from Zanata * Fix reference to uninitialized iptables manager * Move db query to fetch down bindings under try/except * Close XenAPI sessions in neutron-rootwrap-xen-dom0 * Watch for 'new' events in ovsdb monitor for ofport * conn_testers: Bump timeout for ICMPv6 echo tests * Removes host file contents from DHCP agent logs * Imported Translations from Zanata * Imported Translations from Zanata * Ovs agent can't start on Windows because of validate_local_ip * Imported Translations from Zanata * Accept icmpv6 as protocol of SG rule for backward compatibility * Outerjoin to networks for port ownership filter * Imported Translations from Zanata * Update devstack plugin for dependent packages * Remove test_external_network_visibility * Update .gitreview for stable/mitaka 8.0.0.0rc1 ---------- * tests: register all objects before validating their hash versions * Prevent all primary keys in Neutron OVOs from being updated * De-dup conntrack deletions before running them * Imported Translations from Zanata * Fix auto_allocated_topology migration with PostgreSQL * Fix add_is_default_to_subnetpool migration * Add custom SQLAlchemy type for MACAddress * Improve release notes for dvr fixes * Add custom SQLAlchemy type for CIDR * Add custom SQLAlchemy type for IP addresses * Fixes "OVS Agent doesn't start on Windows" * RBAC: Fix port query and deletion for network owner * DVR: Agent side change for live migration with floatingip * DVR:Pro-active router creation with live migration * Reset RNG seed with current time and pid for each test started * Create a hook in base object to modify the fields before DB operations * SG PRECOMMIT_CREATE should be triggered only once * Extend dicts with original model in create/update * Imported Translations from Zanata * Update network object in DHCP agent with router interface changes * Block delete_(network|subnet) transactioned calls * Imported Translations from Zanata * ADD API tests for network ip availability * Pecan: Allow unauthenticated version listing * L3HA: Do not wrap create/delete in transaction * Add metrics notifier to Pecan * Fix latest doc errors that crept in * Add remote vs local FAQ for callbacks * Revise deputy instructions to include deprecation warnings * Add deprecation tag * register the config generator default hook with the right name * Stops update_network handling updates it shouldn't * Fix PUT tag failure * Remove unused pngmath Sphinx extension * fullstack: increase test timeout * DHCP: Downgrade 'network has been deleted' logs * Fix the context passed to get_subnets in _validate_routes * Add reno for deprecation of max_fixed_ips_per_port * ML2: Downgrade 'no bound segment' warning * Delete 118~ API tests from Neutron * Using LOG.warning replace LOG.warn * policies: add an official 'l3-bgp' bug tag * Check tap bridge timestamps to detect local changes * Remove unused Tempest AdminManager * Construct exceptions before passing to retryrequest * Copy tempest.common.tempest_fixtures in to Neutron * Queries for DVR-aware floating IP next-hop lookups * Adds unit tests for external DNS integration * Fixes external DNS driver failure with Python 3.4 * Updates external DNS service if IP address changes * Add logging statements to help debug L3 sync * Only clear dns_name when user specifies parameter * Catch DB reference errors in binding DVR ports * Add BGP Callback and agent RPC notifcation implementations * Set DEFAULT_NETWORK_MTU to 1500 and use it * Downgrade network not found log in DHCP RPC * Downgrade "device not found" log message * Add global_physnet_mtu and deprecate segment_mtu * Ensures DNS_DRIVER is loaded before it is checked * Add Nova notifier hook calls to pecan * Add fip nat rules even if router disables shared snat * Add timestamp changed-since for core resources * Security groups: ensure correct ID is passed to exception * Pecan routing for agent schedulers * Use testscenarios for OVS flow tests * Tag the alembic migration revisions for Mitaka * Remove unused -constraints tox targets * constraints: fixed typo in tox_install.sh * security-groups: Add ipv6 support to ovs firewall * Fix tempest lib import in API tests * Delay description association proxy construction * Release notes: fix broken release notes * Fix API test for external subnet visibility * Release notes: prelude items should not have a - (aka bullet) * Use floating IP to connect different address scopes * Add a description field to all standard resources * Add timestamp for neutron core resources * Skip racey BGP tests * Continue the fwaas decoupling and cleanup * Remove obsolete todo * Nit: Occurances of OpenStack * Make all tox targets constrained * reno: Fix bad yaml in reno that ruins html output * Mock out database access for QoS policy object interface tests * Fix branch order when upgrading to alembic milestone * Fix pecan collection->plugin map for extensions * Autogenerate macvtap agent config file * Updates to Bug Deputy section of Bugs DevRef * hacking: remove oslo.* import check * devref: added details on the new l2 agent API mechanism * Revert "Revert "Functional test for address scope"" * Correct Pecan extensions test 8.0.0.0b3 --------- * Add Queries For BGP Route Lookups * Fix docs tox target for local runs * Improve logging for port binding * Allow auto-allocate's dry-run API call to work for regular users * Make OVS agent tunnel configuration logging less scary * make/update_subnetpool_dict call _dict_extend * Check if plugin supports starting rpc listeners * Make run_ofctl check for socket error * unbreak unit test caused by c5fa665de3173f3ad82cc3e7624b5968bc52c08d * Add filter for resource tag * Add tag mechanism for network resources * Make API framework more flexible for various extensions * Moved CORS middleware configuration into oslo-config-generator * Objects DB api: added composite key to handle multiple primary key * IP Availability: remove unused imports * BGP: remove unnecessary configuration setting * Add support for QoS for LinuxBridge agent * RPC Callback rolling upgrades reporting, and integration * Set veth_mtu default to 9000 * Provide dry-run flag to validate deployment requirements * Use network RBAC feature for external access * Deprecate network_device_mtu * Catch DBDuplicateEntry errors in RBAC code * DVR:Remove unwanted check in _get_dvr_service_port_hostid * Make agent interface plugging utilize network MTU * Removed static reference to LinuxBridge in logging * Add API extension for reporting IP availability usage statistics * Updated from global requirements * Filter HA routers without HA interface and state * Translations: use neutron._18n instead of neutron.18n * Collect details on ARP spoof functional failures * Revert "Functional test for address scope" * Remove effectively empty directories * Added agent specific API support to L2 extensions * Qos policy RBAC DB setup and migration * macvtap: Macvtap L2 Agent * ML2: Increase segment_mtu from 0 to 1500 bytes * Switch to using in-tree tempest lib * Catch DBReferenceError in HA router race conditions * Catch PortNotFound after HA router race condition * Change the exception type from ValueError to IpamValueInvalid * Fix test_get_device_id() failure on OSX * Make __table_args__ declarative in RBACColumns * Fix tox -e docs * Override addOnException to catch exceptions * BGP Dynamic Routing: introduce BgpDriver * Update default gateway in the fip namespace after subnet-update * Update docstring in test/tools.py * Pecan: filter items need type conversion * Pecan: use reservations in quota enforcement hook * Add use_default_subnetpool to subnet create requests * Ensure DVR unit tests use '/tmp' directory * API test for get-me-network * ovs-fw: Enhance port ranges with masks * Fix sanity check --no* BoolOpts * Correlate address scope with network * Fix generate_records_for_existing in migrations * Revert "tests: Collect info on failure of conn_tester" * Updated from global requirements * Revert the unused code for address scope * Deprecate 'force_gateway_on_subnet' configuration option * Fix 'TypeError: format requires a mapping' in OVS agent * Allow non-admins to define "external" extra-routes * Don't assume simplejson in pecan exception catch * IPAM: add missing translation * Functional test for address scope * deprecated: Raise message * Allow address pairs to be cleared with None * Document the ability to load service plugins at startup * .testr.conf: revert workaround of testtools bug * Add fullstack resources for linuxbridge agent * Pecan: get rid of member action hook * Pecan: replace dashes with underscores on controller lookup * Fix for adding gateway with IP outside subnet * Allow other extensions to extend Securitygroup resources * Adopt Grafana to plot Neutron Failure Rates * BGP Dynamic Routing: introduce BgpDrAgent * Add missing character * stadium: revise the introduction to the document * stadium: Add a guideline about contributor overlap * Security group: use correct logging format * Update devstack hooks to work with pecan jobs * Fix typo error for wrong msg format when CallbackFailure * Stop using non-existent method of Mock * Fix GROUP BY usage for PostgreSQL in migrations * Add bug tag for auto allocated topology * macvtap: ML2 mech driver for macvtap network attachments * Don't disable Nagle algorithm in HttpProtocol * Preserve subnet_create behavior in presence of subnet pools * Open vSwitch conntrack based firewall driver * Add VLAN tag info to port before applying SG initial setup * QOS: get rid of warnings for unit tests * Remove NEC plugin tables * DHCP: release DHCP port if not enough memory * Cleanup unused conf variables * Deprecate ARP spoofing protection option * tests: Use constants for icmp and arp in conn_testers * Add to the neutron bug deputy directions * L3 agent: log traceback on floating ip setup failure * Add the rebinding chance in _bind_port_if_needed * Pecan: implement DHCP notifications in NotifierHook * Pecan: Always associate plugins with resource * Remove deprecation warnings * Get rid of UnionModel for RBAC * Add necessary executable permission * Updated from global requirements * Add precommit_XXX event for security group and rules * Give the qos extension a friendly name * tests: Collect info on failure of conn_tester * Address masking issue during auto-allocation failure * Fix typo 'indepedent' in alembic_migration.rst * BGP Dynamic Routing: introduce BgpDrScheduler model * macvtap: Common functions and constants * Fix typo of dnsmasq * add arp_responder flag to linuxbridge agent * Switch "dsvm-functional:" into same pattern as constraints * Add BGP Dynamic Routing DB Model and Basic CRUD * fullstack: Gracefully stop neutron-server process * Remove VPN installation plumbing * Remove vpnaas tests from the Neutron API tree * Make netns_cleanup to purge resources of selected agent only * Add extension requirement in port-security api test * ML2: delete_port on deadlock during binding * Start using neutron-lib for shared constants and exceptions * Remove fwaas tests from the Neutron API tree * Remove office-hours from Polcies docs index * Add the ability to load a set of service plugins on startup * ML2: Configure path_mtu to default to 1500 bytes * Support MTU advertisement using IPv6 RAs * Pecan: wrap PUT response with resource name * Pecan: Controller and test refactor * stadium: Add a guideline related to project scope * stadium: Propose kuryr as an independent project * stadium: Separate proprietary interface projects * stadium: Add python-neutron-pd-driver * stadium: Group lbaas repos together * Remove PTL office hours * Bring back dvr routers autoscheduling * Fix getting agent id in linuxbridge agent * RPC Callback rolling upgrades logic * OVO common enum class for IPv6 modes * Move check_dvr_serviceable_ports_on_host() to dvr scheduler * L3: enable plugin to decide if subnet is mandatory * Implement 'get-me-a-network' API building block * Test helpers to facilitate testing BGP dynamic routing * Fix logging error for Guru Meditation Report * HA for DVR - Neutron Server side code changes * IP_LIB: fix indentations * idlutils: add in missing translations * sub_project_guidelines: Document the procedure to make a branch EOL * sub_project_guidelines: Remove "add tarball to launchpad" step * sub_project_guidelines: Update after direct-release default * Only restrict gateway_ip change for router ports * Make add_tap_interface resillient to removal * Updated from global requirements * Remove flavors from the list of extensions provided by core plugin * Log warning message if get_subnet_for_dvr fails * devstack: Fix check for blank prefix arg * ML2: Call _dict_extend in create_(net|port) ops * Pecan: add tenant_id to quota resource * Prevent binding IPv6 addresses to Neutron interfaces * Moving Common Agent into separate module * Add hacking check for assertEqual HTTP code * Pecan: Fix association of plugins with resources * Add missing periods * Postpone heavy policy check for ports to later * LB agent: Downgrade network not available message * Imported Translations from Zanata * Call Ryu's clean up function when ovs_neutron_agent.main terminates * Protect 'show' and 'index' with Retry decorator * Update related router when subnetpool change scope * Only ensure admin state on ports that exist * stadium: Update list of Neutron sub-projects * ML2: Update help text for path_mtu * Correct dev documentation for has_offline_migrations command * Reno note regarding OVS DVR agent failure on startup * Fix regression in routers auto scheduling logic * Compile OVS for functional tests * Trigger dhcp port_update for new auto_address subnets * Correction of spelling * Get tempest via zuul-cloner if needed and it is available * Fix typo in SecurityGroup HTTP error message * DHCP: fix regression with DNS nameservers * Add address scopes support to the L3 agent * Get rid of marshall_fdb_entries * Correct insufficient name for external process in manager log * Fix port relationship for DVRPortBinding * Fix params order in assertEqual * Address i18n related deprecation warnings * nova-notifier: Change warning to debug * Warn about a gotcha in the sub-project process * ML2: update port's status to DOWN if its binding info has changed * Fix remove_router_from_l3_agent for 'dvr' mode agent * DHCP: add in missing space at the end of the line * Fix bug when enable configuration named dnsmasq_base_log_dir * DVR: avoid race on dvr serviceable port deletion * Remove bindings of DVR routers to L3 agents on compute nodes * Only prevent l3 port deletion if router exists * Unmarshall portinfo on update_fdb_entries calls * Remove dead method delete_dvr_port_binding * SR-IOV: Agent remove listen to network delete event * Use a thinner try/except in _build_cmdline_callback * Fail if required extensions are missing * Add UniqueConstraint in L3HARouterAgentPortBinding * Delete Tempest fork, import from tempest and tempest_lib * Add relationship between port and floating ip * Update translation setup for neutron subprojects * Fix required extensions mix-up * Uniquely identify tunnel interfaces for fullstack tests * DVR: Remove unwanted call to _get_routers while arp update * lb: ml2-agt: Separate AgentLoop from LinuxBridge specific impl * item allocator should return same value for same key * Set default value for dnsmasq_local_resolv to False * Improve autonested_transaction * Rename confusing dvr_deletens_if_no_port * Bump AgentExtRpcCallback version to 1.1 * Raise RetryRequest on policy parent not found * create_object should not add an ID if not present in the DB model * Add generated port id to port dict * Updated from global requirements * Support api_workers option when using pecan * Elevate context for router lookups during floating IP association * Update alembic migration documentation * Add separate transport for notifications * Neutron review tool use message instead of topics for bugs * Increase default IPv6 router advertisement interval * Updated from global requirements * l3_db: Check dns-integration extension * Add dns_db to models/head.py * devref: Fix a typo in i18n.rst * Replace exit() by sys.exit() * Add missing index entry for external dns integration * l2pop rpc: Add a unit test for dualstacked port * Pecan: remove deprecated warning * RPC worker support for pecan server * Don't decide web_framework before config parse * Remove unwanted NOTE from dvr_local_router * DVR: Fix Duplicate IPtables rule detected warning message in l3agent * Make advertisement intervals for radvd configurable * Fix module's import order * neutron-db-manage: add has_offline_migrations command * Add popular IP protocols for security group * Decorate methods in ExtensionDescriptor with abstractmethod * Updated from global requirements * Remove obsolete plugin stuff * External DNS driver reference implementation * Move helper methods to create resorces to test_base * db_api: handle db objects that don't have 'id' as primary key * Introduce new queries to return DVR routers for a host * Refactor remove_router_interface() for DVR * sriov-mech: Introduce a new VIF type for PF vnic type * Ensure that tunnels are fully reset on ovs restart * OVS agent should fail if it can't get DVR mac address * Python3: Fix using dictionary keys() as list * Add network_update RPC into SR-IOV agent * Add L3 Notifications To Enable BGP Dynamic Routing * Fix check in _validate_ip_address() to fail on OSX * Remove floatingip address only when the address has been configured * Use tools_path/venv environment variables in install_venv * fix _validate_shared_update for dvr router ports * Rename new_network to new_network_id * DVR: Add action specific functions for arp_entry_update * Fixed qos devstack service name (should be q-qos) for -plus gate hook * bump the min tox version to 2.3.1 * Updated from global requirements 8.0.0.0b2 --------- * Add more log when dhcp agent sync_state * Imported Translations from Zanata * Fix docstring for check_dvr_serviceable_ports_on_host * Fixes typos Openstack -> OpenStack * Add tests for RPC methods/classes * OVS agent set max number of attempts to sync failed devices * Don't sync all devices when some fail * Make Neutron attempt to advertise MTUs by default * Optimize get_ports_on_host_by_subnet() dvr rpc handler * Do not remove router from dvr_snat agents on dvr port deletion * Make object creation methods in l3_hamode_db atomic * Remove dead method _get_router_ids * DVR: Optimize getting arp entry info * Add support for neutron-full and api jobs using pecan to gate_hook * Updated from global requirements * Move L2populationDbMixin to module-level functions * Fix L3 HA with IPv6 * Fix the duplicated references * Add opnfv tag to the list of auto-complete tags * Refactor router delete processing * Revert "Change function call order in ovs_neutron_agent." * Remove LinuxBridge manager get_local_ip_device arg * devref: added more details on rolling upgrade for notifications * Filter by device_owner instead of iterating by all subnet ports * Make security_groups_provider_updated work with Kilo agents * Introduce new query to return all hosts for DVR router * fix get_ha_sync_data_for_host for non-dvr agent * dhcp: handle advertise_mtu=True when plugin does not set mtu values * Retry port create/update on duplicate db records * doc: Update Ryu Ishimoto's IRC nick * Make neutron pecan server an option instead of binary * DVR: when updating port's fixed_ips, update arp * Fix Linux bridge test_report_state_revived failure on OSX * Prevent PD subnets with incorrect IPv6 modes * Added Keystone and RequestID headers to CORS middleware * Unify exceptions for assign router to dvr agent * Unify using assertIsInstance * HACKING: update HACKING.rst file to include latest changes * devstack: use stevedore entry point for flavor service plugin * Do not prohibit VXLAN over IPv6 * Updated from global requirements * tests: stop validating neutronclient in neutron-debug tests * Remove 'validate' key in 'type:dict_or_nodata' type * ML2: verify if required extension drivers are loaded * Add --dry-run mode to code review abandon tool * Fix typo in test path in Testing.rst * Fix floatingip status for an HA router * Fix URLs for pep8, and unit tests jobs * Static routes not added to qrouter namespace for DVR * Pass environment variables of proxy to tox * Pecan: fix quota management * Pecan: Fixes and tests for the policy enforcement hook * gate_hook: add support for dsvm-plus job type * Scope get_tenant_quotas by tenant_id * Add 'ovs' to requirements.txt * Fix params order in assertEqual * Use admin context when removing DVR router on vm port deletion * eliminate retries inside of _ensure_default_security_group * Register RA and PD config options in l3-agent * Provide kwargs for callback abort * Pecan controller loads service plugins * Make sure datapath_type is updated on bridges changed * Log INFO message when setting admin state up flag to False for OVS port * Fix regression with unbound ports and l2pop * L3 agent: paginate sync routers task * Remove duplicate for check_ports_exist_on_l3agent * ML2: Simplified boolean variable check * Pecan: Streamline request body processing * make floating IP specification test robust to races * Fix get_subnet_for_dvr() to return correct gateway mac * Updated from global requirements * Ensure agent binding modules are loaded * portbindings: use constants for extension keys * Add README with links on how to create release notes * Ensure that decomposed plugins do not break * LBaaS tests code removal * Make neutron-debug command follow cliff command convention * Rename _get_vm_port_hostid in dvr to reflect the right functionality * DVR: Rename dvr_vmarp_table_update * Remove unnecessary argument in limit manage * remove openstack-common.conf * Move notifications before DB retry decorator * Create a routing table manager * Fix uuid passing in disable_isolated_metadata_proxy * Fix incorrect classmethod declaration * Add unit test cases for linuxbridge agent when prevent_arp_spoofing is True * Adopt oslotest BaseTestCase as a base class for DietTestCase * Use oslo.utils.reflection extract the class name * Utils: Add missing translation to exception * Unify assertEqual for empty usages * SR-IOV: Fix macvtap assigned vf check when kernel < 3.13 * Delete metadata_proxy for network if it is not needed * Remove references to model_base through models_v2 * Allow get_unused_ip method to skip v6 and fix iter * Revert "Revert "Revert "Remove TEMPEST_CONFIG_DIR in the api tox env""" * Fix meter label rule creation * Remove l2pop _get_port_infos method * Remove L2populationDbMixin parent * devstack: don't enable qos service with the plugin * Add test for Neutron object versions * SR-IOV agent: display loaded extensions * Imported Translations from Zanata * Allow tox to pass more arguments to ostestr command * Add systemd notification after reporting initial state * Avoid duplicating tenant check when creating resources * Add extension_manager and support for extensions in linuxbridge agent * Fix API tests * Rule, member updates are missed with enhanced rpc * radvd prefix configuration for DHCPV6_Stateful RA * DVR: Rename dvr_update_router_addvm function * Support rootwrap sysctl and conntrack commands for non-l3 nodes * Remove openstack.common._i18n from Neutron * Kilo initial migration * Check missed ip6tables utility * Keep py3.X compatibility for urllib * Updated from global requirements * Misspelling in message * Mitigate restriction for fixed ips per dhcp port * dhcp: Default to using local DNS resolution * Fixing the deprecated library function * Remove unused variable use_call in ovs-agent * Wrong usage of "an" * Wrong usage of "a" * Trival: Remove unused logging import * Allow to control to use constraint env for functional jobs * DVR: optimize check_ports_exist_on_l3_agent() * Don't call add_ha_port inside a transaction * Call _allocate_vr_id outside of transaction * Change log level from error to warning * Fix Security-rule's port should not set to 0 when Protocol is TCP/UDP * Add constant to L3 extension for floating ips * dibbler: fix import order * Add address scope to floating IPs in RPC response to L3 agent * Add firewall blink + remote SG functional tests * Add test cases to testing firewall drivers * Ignore non rules related qos_policy changes * Remove check on dhcp enabled subnets while scheduling dvr * Run functional gate jobs in a constrained environment * update docstring for get_ports_on_host_by_subnet * Correct state_path option's help string * Updated from global requirements * Restore _validate_subnet/uuid_list not to break subproject gates * Delete test_restart_l3_agent_on_sighup * DVR: Remove get_port call from dvr_update_router_addvm * DVR:Fix _notify_l3_agent_new_port for proper arp update * Add tests that constrain db query count * Don't raise if polling manager is running when stopped * Add abstractmethod to FirewallDriver abstract class * Add a link of availability zone document into releasenote * Corrected wrong ethertype exception message * Misspelling in message * Use the constant HOST_ID instead of 'binding:host_id' * Force L3 agent to resync router it could not configure * Provide pointer for ML2 extension manager to effective guide * Add notes on loading strategies for ORM relationships * Enable Guru Meditation Reports for other refarch agents * Updated from global requirements * Catch known exceptions during deleting last HA router * Add new troubleshooting bugs tag * Add to deprecate OFAgent in release note * Refactor the subnetpools API tests * Clean up code for bug1511311 * Kill the vrrp orphan process when (re)spawn keepalived * reject leading '0's in IPv4 addr to avoid ambiguity * Remove duplicated code in attribute.py * QOS: add in missing translation * Separate the command for replace_port to delete and add * Fix comparison of Variant and other type in test_model_sync * Add check that list of agents is not empty in _get_enabled_agents * Remove unused parameter from _update_router_db method * Use a joined relationship for AZ info on routers * Cleanup all the release notes * Improve tox to show coverage report on same window * Tune _get_candidates for faster scheduling in dvr * Updating devref for networking-onos project * Use a joined relationship for AZ info on networks * Correct return values for bridge sysctl calls * Batch db segment retrieval * Separate rbac calculation from _make_network_dict * Add explicit address family to AddressScope * DVR: handle dvr serviceable port's host change * Adding a VNIC type for physical functions * Add functional test for availability_zone support * OVS: Reorder table-id constants * Deprecated tox -downloadcache option removed * API: _validate_ip_address should not raise an exception * Removing adv svcs dependencies on neutron * Return availability_zone_hints as list when net-create * Decompose OFAgent mechanism driver from neutron tree completely * Ignore possible suffix in iproute commands * Add option for nova endpoint type * Force service provider relationships to load * Add linuxbridge job to the dashboard * ML2: Add tests to validate quota usage tracking * Updated from global requirements * Add explanations and examples to TESTING.rst * Added CORS support to Neutron * L3 DB: set get_assoc_data to be an internal method * ovs_vhostuser: fix vhostuser_socket_dir typo * fix call which is only specific to enhanced_rpc * select router with subnet's gateway_ip for floatingip * Refactor OVS-agent tunnel config validate * Make keepalived add_vip idempotent * Fix timestamp in RBAC extension * Document relationship between ways of documenting new stuff * lb: Correct String formatting to get rid of logged ValueError * Skip keepalived_respawns test * Add release note covering keystoneauth and v3 * Pull project out of request in addition to tenant * Don't emit confusing error in netns-cleanup * Add address scope to ports in RPC response to L3 agent * Updated from global requirements * Avoid full_sync in l3_agent for router updates * move usage_audit to cmd/eventlet package * Use keystoneauth instead of keystoneclient * Deprecate _ builtin translation function * Use _ from neutron._i18n * Using substitution for Python String * Tox: Remove fullstack env, keep only dsvm-fullstack * Fix some inconsistency in docstrings * Set timetable for removal of oslo.messaging.notify.drivers * Delete stale neutron-server manual * Final decomposition of the nuage plugin * Final decomposition of Brocade vendor code * Trivial typo fix in LinuxBridge dashboard * Add a script to create review dashboard for a milestone * Remove Neutron core static example configuration files - addition * test_migrations: Avoid returning a filter object for python3 * Cleanup veth-pairs in default netns for functional tests * Reuse constants defined in attributes * Add availability_zone support for router * Fix default RBAC policy quota * Moved fullstack test doc content to TESTING.rst * Allow multiple imports for both ways of doing i18n * [policy] Clarify bug deputy does not require core bit * Run NOT NULL alterations before foreign key adds * Do not autoreschedule routers if l3 agent is back online * Add instrumentation devref, Part I * Updated from global requirements * Hyper-V: remove driver from the neutron tree * Fix typo in Docstring * Remove lbaas cruft from neutron gate_hook * Make port binding message on dead agents clear * Notify about port create/update unconditionally * HACKING: fix edge case with log hints * I18n related guideline for subprojects * Optimize "open" method with context manager * L3: add missing space to log message * Revert "Revert "OVS agent reacts to events instead of polling"" * XenAPI: Fix netwrap to support security group * Move i18n to _i18n, as per oslo_i18n guidelines * Clean up FIP namespace in DVR functional tests * devref: Rolling upgrade mechanism for rpc-callbacks * Remove version from setup.cfg * DVR:don't reschedule the l3 agent running on compute node 8.0.0.0b1 --------- * Add native of_interface fullstack tests * Disallow updating SG rule direction in RESOURCE_ATTRIBUTE_MAP * l3_db: it updates port attribute without L2 plugin * In port_dead, handle case when port already deleted * Change check_ports_exist_on_l3agent to pass the subnet_ids * lb: avoid doing nova VIF work plumbing tap to qbr * Remove Neutron core static example configuration files * Update 'Contributing Extensions' devref for Mitaka * HACKING: align the underline text and header * Imported Translations from Zanata * Remove transparent VLAN support from base plugin * Automatically generate neutron core configuration files * Support Unicode request_id on Python 3 * Stop using deprecated timeutils.total_seconds() * Correct unwatch_log to support python <= 2.7.5 * Move a note to bridge_lib * Add Guru Meditation Reports support to Neutron services * Fix alignment in message and remove unused module * Update toctree of neutron document * Don't drop ARP table jump during OVS rewiring * Remove useless lb-agent remove_empty_bridges * Delete HA network when last HA router is deleted * Change instances of Openstack to OpenStack * force releasenotes warnings to be treated as errors * Add availability_zone support for network * fix some misspellings * Freescale ML2 driver code complete decomposition * Add Incomplete state to list of acceptable states for RFE bugs * Fix typo for OVSDB * Clarify how we milestones are assigned * Support for IPv6 RDNSS Option in Router Advts * tox: pass TEMPEST_CONFIG_DIR envvar into api target environment * Wait for the watch process in test case * Add UnionModel support to filter query generator * Minor doc fix in alembic_migrations.rst * Some minor misspellings in comment block * Optimize router delete execution * Deprecate l3-agent router_id option * Make Neutron resources reference standard attr table * devref: add upgrade strategy page * Remove duplicate deprecation messages for quota_items option * Log error instead of exception trace * Refactor OVS-agent init-method * neutron-db-manage: mark several options as deprecated * ovs: Make interface name hashing algorithm common and extend it * Check gateway ip when update subnet * Use diffs for iptables restore instead of all rules * IPAM: add in missing exception translations * Remove BigSwitch plugin and driver * Add Access Control bug tag * Add index entry to vhost documentation * Make fullstack test_connectivity tests more forgiving * Fix get_subnet_ids_on_router in dvr scheduler * Remove misplaced copyright attribution * Fix misspelled word in docstring * neutron-db-manage: expose alembic 'heads' command * Reorganize and improve l3_agent functional tests * Make sure we return unicode strings for process output * Use compare-and-swap for IpamAvailabilityRange * Replace neutron-specific LengthStrOpt with StrOpt * Fix use of fields argument in get_rbac_policies * Updated from global requirements * Fix dashboard graphite URLs * Fix Neutron flavor framework * Keep reading stdout/stderr until after kill * Updated from global requirements * Fix the end point test for client * IPAM: fix 'enable-dhcp' with internal driver * Update HA router state if agent is not active * Send 50% less debug information when executing cmd * Fix alignment in message * Datapath on L2pop only for agents with tunneling-ip * Add hosted agents list to dhcp agent scheduler * Add vhost-user support via ovs capabilities/datapath_type * Remove deprecated use_namespaces option * Resync L3, DHCP and OVS/LB agents upon revival * Updated from global requirements * Add networking-infoblox sub-project * Firewall: fix typo * Fix the indentation issue * Elaborate how priorities are assigned to blueprints * Don't add default route to HA router if there is no gateway ip * Add a better description for notification_driver * Use DEVICE_OWNER_* for 'network:*' constants * Add the missing arg of RetryRequest exception in _lock_subnetpool * Update networking-powervm sub-project docs * Remove unused delete_dvr_mac_address method * Add fullstack testing doc content * Fix releasenotes/../unreleased.rst * Avoid race condition for reserved DHCP ports * Revert "Move dhcp_lease_duration into DHCP agent config options list" * sub_projects.rst: Update midonet functionalities * Switch to using neutron.common.utils:replace_file() * Trivial fix in ml2 conf * Remove the useless l3plugin check in l3_rpc.py * Fix some reST field lists in docstrings * Use DEVICE_OWNER_COMPUTE constant everywhere * Fix broken references in doc * Skip bindings with agent_id=None * Updated from global requirements * Use admin context when requesting floating ip's router info * Cleanup dhcp namespace upon dhcp setup * Use SIGUSR1 to notify l3 agent of changing prefix file * Last sync from oslo-incubator * Remove SysV init script for neutron-server * Refactor test_server functional tests * Undeprecate force_gateway_on_subnet option * Move dhcp_lease_duration into DHCP agent config options list * Add transaction for setting agent_id in L3HARouterAgentPortBinding * Check missed IPSet utility using neutron-sanity-check * Change the repos from stackforge to OpenStack * Revert "Revert "Remove TEMPEST_CONFIG_DIR in the api tox env"" * Require tox >= 2.0 * Use assertFalse(observed) instead of assertEqual(False, observed) * Fix heading markers for better docment toc view * Clarify that RFE bug reports should not have an importance set * Remove TEMPEST_CONFIG_DIR in the api tox env * Revert "Remove TEMPEST_CONFIG_DIR in the api tox env" * Lower l2pop "isn't bound to any segement" log to debug * DVR: remove redundant check * Disable IPv6 on bridge devices in LinuxBridgeManager * More graceful ovs-agent restart * sriov: add extensions option to configuration file * Fix dvr_local_router.floating_ip_added_dist failure after agent restart * Don't use duplicate filter names for functional testing * Replace get_all_neutron_bridges by get_deletable_bridges * Revert "OVS agent reacts to events instead of polling" * configure_for_func_testing.sh: Fix arguments for get_packages * Add call to pluggable IPAM from ml2 delete_subnet * Add "unreleased" release notes page * Final decomposition of opendaylight driver * Adding security-groups unittests * Don't snat traffic between fixed IPs behind same router * Remove MidonetInterfaceDriver * Update internal snat port prefix for multiple IPv6 subnets * Use get_interface_bridge instead of get_bridge_for_tap_device * Move LinuxBridge related features to bridge_lib * Reduce duplicated code in test_linuxbridge_neutron_agent * Document the neutron-release team * Updated from global requirements * Trivial fix in l3 agent * IPAM: make max fixed IP validations DRY * Fix misuse of log marker functions in neutron * More instructions for neutron-db-manage revision --autogenerate * Add in missing spaces at end of line * Do not use log hints for exceptions * Fix notification driver package * Adding a function prefix before parenthesis * Make command log in neutron utils.execute() a single line * move import to top and rename to make more readable * Move update_fip_statuses to Router class * Replace subnetpool config options with admin-only API * Add new config option for IPv6 Prefix Delegation * Correction and clarification to subproject stable guidelines * Make '*' the default ml2 flat_networks configuration * Add PyPI link for networking-calico * Deprecate new= argument from create_connection * OVS agent reacts to events instead of polling * Remove default=None for configuration bindings * Log hints should only be used for log messages * Add reno for release notes management * Add a note about the Neutron Bugs team in Launchpad * Update deprecated messages * Switch to using neutron.common.utils:replace_file() * Change function call order in ovs_neutron_agent * Ensure to decode bytes or fail * Optimize delete_csnat_router_interface_ports db query * Make string representation of DictModel generic * Add IRC part for effective neutron * PortOpt cleanups * Fix QoS VALID_RULE_TYPES location in devref * Docs: clarify that AnySubnetRequest is optional * Update neutron-debug to use stevedore aliases * Fix incorrect passing port dict in pluggable IPAM * Per-branch HEAD files for conflict management * Replace internal oslo_policy mock with public fixture * sub_project_guidelines.rst: Clarify stable branch creation for subprojects * Use a more pythonic string construction * Add ops tag to bugs policy * IPSet Manager: make code more pythonic * Imported Translations from Zanata * Remove deprecated nova_* options * Fixed a bunch of typos throughout Neutron * Decompose ML2 mechanism driver for Mellanox * Add text for deprecated parameter * Clarify with example mentioning gratuitous whitespace changes * Removes the use of mutables as default args * Decompose ML2 mechanism driver for OVSvApp * Fix usage of mutable object as default value * Make the Neutron Stadium documentation toplevel * Add notes about stable merge requirements for sub-projects * Fix incorrect capitilization of PyPI * Updated from global requirements * Log end of router updates for PD and delete branches * Don't update metadata_proxy if metadata is not enabled * Imported Translations from Zanata * DHCP agent: log when reloading allocations for a new VM port * Update specs backlog directory * Log the exception in linuxbridge_neutron_agent as exception * Replace utils.exec for IpNeighComm LinuxBridge drv * Formatting exception messages * Optimize get_bridge_for_tap_device * Optimize interface_exists_on_bridge * Correct indentation in linuxbridge_neutron_agent * Use oslo_config new type PortOpt for port options * Updated from global requirements * DVR: only notify needed agents on new VM port creation * Ensure l3 agent receives notification about added router * Imported Translations from Zanata * Support migrating of legacy routers to HA and back * Use string formatting instead of string replace * Delete fipnamespace when external net removed on DVR * Better tolerate deleted OVS ports in OVS agent * Remove GBP as a Neutron sub-project * get_device_by_ip: don't fail if device was deleted * Allow to specify branch for creating new migration * Mark for removal deadcode in neutron.common.utils * Adds base in-tree functional testing of the dhcp agent (OVS) * Fix _restore_local_vlan_map race * DVR: notify specific agent when deleting floating ip * Move test_extend_port_dict_no_port_security to where it belongs to * Fix the latest glitches that broke docs generation * Add effective note on DB exception to be aware of * Minor improvement in port_bound operation * Introduce an API test for specified floating ip address * Clarify what gerrit repositories can target neutron-specs * Fix error code when L3 HA + DVR router is created or updated * Spawn dedicated rpc workers for state reports queue * Fix l2pop regression * Remove deprecated sriov agent_required option * Remove deprecated namespace deletion options * Deepcopy port dict in dhcp rpc handler * Don't remove ip addresses if not master * Include alembic versions directory to the package * Fix formatting of hyperlinks provided in the office-hours doc * Remove IBM SDN-VE left-overs * Remove the port-forwarding sub-project from the list * Set security group provider rule for icmpv6 RA in DVR * Properly handle segmentation_id in OVS agent * ovs: remove several unneeded object attributes from setup_rpc() * Set ip_nonlocal_bind in namespace if it exists * Remove SUPPORTED_AGENT_TYPES for l2pop * DVR: Notify specific agent when update floatingip * Move some projects url from cgit/stackforge to cgit/openstack * Remove non-existent enable_tunneling conf from fullstack * Update notes about the Neutron teams * Validate ethertype for icmp protocols * Refactor _populate_ports_for_subnets for testability * Split the FIP Namespace delete in L3 agent for DVR * Add stevedore aliases for interface_driver configuration * Register oslo_service.wsgi options correctly * ovs_neutron_agent: display loaded extensions * Improvements to the blueprint management process * Add a note to ban agents from connecting to the DB * Revert "DVR: Notify specific agent when update floatingip" * Imported Translations from Zanata * Fix DVR downgrade exception / error code * Fix AttributeError on port_bound for missing ports * The exception type is wrong and makes the except block not work * Fix rendering * DVR: Notify specific agent when update floatingip * Do not try to delete a veth from a nonexistent namespace * Do not accept abbreviated CIDRs * Spelling and grammar corrections * Cross link sub-project release processes * Lower the log level for the message about concurrent port delete * Updated from global requirements * Update RFE documentation to clarify when the tag is not appropriate * Cache the ARP entries in L3 Agent for DVR * Revert "Make OVS interface name hashing algorithm common and extend it" * Enable specific extra_dhcp_opt to be left blank * Python 3: skip test_json_with_utf8 on Py3 * test_create_network_segment_allocation_fails: Assert the status * The first word of the error message should be capitalized * Create ipset set_name_exists() method * Add section for code review in effective neutron * Add -constraints sections for base CI jobs * Python 3: make post_test_hook work with more tox targets * Remove useless code in L3 HA unit tests * Move retries out of ML2 plugin * Include external bridge deprecation warning in string * Tweak RFE guidelines * Fix link in devref guide * Add ml2 extension drivers examples * Improve performance of ensure_namespace * Kill conntrackd state on HA routers FIP disassociation 7.0.0 ----- * Mock oslo policy HTTPCheck instead of urllib * Avoid DuplicateOptError in functional tests * Make test_server work with older versions of oslo.service * Always send status update for processed floating ips * Fix inconsistency in DHCPv6 hosts and options generation * L3 agent: use run_immediately parameter to sync after start * test_db_base_plugin_v2: Skip a few tests on some platforms * Fix error returned when an HA router is updated to DVR * Remove disable_service from DBs configuration * Replaced deprecated isotime() function * DVR: notify specific agent when creating floating ip * Fix the bug of "Spelling error of a word" * Fix iptables modules references in rule generation * Remove the embrane plugin * Fix functional test_server tests * Add deadlock warning to 'effective neutron' * Quick optimization to avoid a query if no ports have fixed ips * Add OpenFixture and get rid of 'open' mocks * Use assertTrue(observed) instead of assertEqual(True, observed) * Imported Translations from Zanata * QoS SR-IOV: allow to reset vf rate when VF is assigend to VM * Add track_quota_usage conf into neutron.conf * Only lock in set_members on mutating operations * Add pointers to access Neutron test coverage details * Consume ConfigurableMiddleware from oslo_middleware * Remove excessive fallback iptables ACCEPT rules * Consume sslutils and wsgi modules from oslo.service * test_create_router_gateway_fails fixes * Code refactor for generating integer in testcase * Effective: avoid mocking open() if you can * Cleaned up remaining incorrect usage for LOG.exception * Remove usage of WritableLogger from oslo_log * Fixed multiple py34 gate issues * Removed release_port_fixed_ip dead code * Validate local_ip for linuxbridge-agent * Removed neutronclient option from metadata agent * Adding headers to the devref docs 7.0.0.0rc2 ---------- * DHCP: protect against case when device name is None * Add testresources used by oslo.db fixture * Add the functional-py34 and dsvm-functional-py34 targets to tox.ini * Improvements to the RFE management process * Mock oslo policy HTTPCheck instead of urllib * Add py34 tags to the list of official tags * Updated from global requirements * Fix rule generation for single and all host rules * Fix iptables comments for bare jump rules * Add another patch scoping bullet point to effective_neutron * Removed a pile of debtcollector removals from neutron.context * L3 Agent support for routers with HA and DVR * Python 3: add classifiers * Adding Effective tips for plugin development * Add networking-bgpvpn lieutenants * Update gate dashboard URLs * Add some test guidelines to 'effective neutron' * Fix capitalization nit in patch 230218 * DHCP: protect against case when device name is None * Execute ipset command using check_exit_code * Add note in database section of 'effective neutron' * Correct MAC representation to match iptables output * Add note about negative feedback to 'effective neutron' * Add a note about agent/server compat to 'effective neutron' * Add a patch scope section to 'effective neutron' * Add a logging guideline to 'effective neutron' * Fix missing parent start() call in RpcWorker * Remove OneConvergence plugin from the source tree * Use assertIsNone(observed) instead of assertEqual(None, observed) * Document self.assertEqual(expected, observed) pattern * Move gateway processing out of init_router_port * Use assertIn and assertNotIn * Deprecate max_fixed_ips_per_port * Don't register agents for QoS l2pop fullstack test * The option force_metadata=True breaks the dhcp agent * Updated from global requirements * Do not log an error when deleting a linuxbridge does not exist * The option force_metadata=True breaks the dhcp agent * Decomposition phase2 for MidoNet plugin * Updated from global requirements * Changes in Neutron defect management * Tag the alembic migration revisions for Liberty * /common/utils.py py34 incompatibility * Just call set-manager if connecting fails * Fixes 'ovs-agent cannot start on Windows because root_helper opt is not found' * Use format to convert ints to strings * Fixes 'ovs-agent fails to start on Windows beacause of SIGHUP' * usage_audit: Fix usage_audit to work with ML2 * Pecan: Fix quota enforcement * metadata: don't crash proxy on non-unicode user data * Do not log an error when deleting a linuxbridge does not exist * /common/utils.py py34 incompatibility * Remove debtcollector.removals tagged ensure_dir * Consume service plugins queues in RPC workers * Imported Translations from Zanata * Add more commit msg tips to 'effective neutron' * Remove local variables from IPDevice.exists * Add availability_zone support base * Pecan: Fix quota enforcement * metadata: don't crash proxy on non-unicode user data * Add neutron-linuxbridge-cleanup util * Effective Neutron: add link to low-hanging-fruit bugs * Effective Neutron: add link to logging guidelines * Add IPDevice.exists() method * Simplify L3 HA scheduler tests * Python 3: fix invalid operation on dict_items objects * Use format to convert ints to strings * Add periodic agents health check * Imported Translations from Zanata * Fix db error when running python34 Unit tests * Remove OpenContrail plugin from the source tree * Correct cisco_ml2_apic_contracts.router_id length * Remove is_ha property from the router * Remove log decorator deprecated in Liberty * Deprecate branchless migration chains from neutron-db-manage * Support new mitaka directory with revisions * Fix the bug of "Error spelling of 'accomodate'" * Just call set-manager if connecting fails * Check idl.run() return value before blocking * Use separate queue for agent state reports * Remove remaining uses of load_admin_roles flag in tests * Make OVS interface name hashing algorithm common and extend it * Simplify extension processing * Fix URL target problem * Add devref for alembic milestone tagging * Add compatibility with iproute2 >= 4.0 * Tag the alembic migration revisions for Liberty * api test: Skip address-scope tests when the extension is not enabled * Check idl.run() return value before blocking * Check supported subnet CIDR * Remove zombie pecan hook * Adding trailing underscores to devref links * Python 3: use "open" instead of "file" * Imported Translations from Zanata * Handle empty bridge case in OVSBridge.get_ports_attributes * Devref for authorization policies enforcement * Fixing traces of "Replace prt variable by port" * Kill HEADS file 7.0.0.0rc1 ---------- * Don't write DHCP opts for SLAAC entries * Cleanup of Translations * Cleanup of Translations * Move ConfigDict and ConfigFileFixture to neutron.tests.common * Turn device not found errors in to exceptions * Fix quota usage tracker for security group rules * Update default branch in .gitreview to stable/liberty * SimpleInterfaceMonitor: get rid of self.data_received flag * Fixes 'ovs-agent fails to start on Windows beacause of SIGHUP' * Forbid more than one branch point in alembic dependency chains * Fix quota usage tracker for security group rules * Fixes 'ovs-agent cannot start on Windows because root_helper opt is not found' * Imported Translations from Zanata * Fix a few nits with the dashboard pages * Open Mitaka development * Fix the broken link in devref docs * Eliminate autoaddress check for DNS integration * Only get host data for floating ips on DVR routers * Add neutron subproject & stable branch gerrit review links * Link dashboards into generated documentation * Add neutron/master review link to dashboard/index.html * Create dashboard page with gate jobs statistics * ml2: don't consider drivers with no bind_port for qos supported rule types * Adds configurable agent type * Imported Translations from Zanata * Updated from global requirements * Relax service module check on service providers * Get rid of ConfigParser code in ProviderConfiguration * Rename check pipeline dashboards * tests: don't validate respawn as part of ovsdb monitor functional test * ovsdb monitor: get rid of custom _read_stdout/_read_stderr methods * Change ignore-errors to ignore_errors * Change router unbinding logic to be consistent with data model * delete_port: ensure quota usage is marked as dirty * Fix hostname roaming for ml2 tunnel endpoints * Execute ipset command using check_exit_code * Refactoring devstack script * Fix adding tap failure if bridge mapping is not provided * SubnetPoolsTest: Skip IPv6 tests appropriately * Remove an invalid comment * Fixes SNAT port not found for internal port * Don't write DHCP opts for SLAAC entries * Simplify join to rbac_entries for subnets * Update _TestModelMigration * Add --verbose to subset of cmds in neutron-db-manage * Use pecan controllers for routing * test_networks: Stop assuming net-mtu extension * Imported Translations from Zanata * Add skeleton to 'Effective Neutron' devref * Introduce kill_signal parameter to AsynProcess.stop() * Remove early yields in _iter_hosts in dhcp agent * Optimize if statement in dvr_local_router.py * Re-adds VIF_TYPE_VHOST_USER to portbindings extension * Introduce a separate RPC server * Fix log statement to log correct variable first_ip * Remove pecan branch reference from .gitreview file * ipam: Prevent none from being passed to delete * Remove restriction of adding constraints to expand * Delete unused file tests/unit/database_stubs.py * No network devices on network attached qos policies * Revert "Revert "Pecan WSGI: prevent plugins from opening AMQP connections"" * Use tempest-lib's token_client * Revert "Pecan WSGI: prevent plugins from opening AMQP connections" * Add constraint target to tox.ini * Fix establishing UDP connection * ovsdb: Fix a few docstring * Remove requirements.txt for the ofagent mechanism driver * Always return iterables in L3 get_candidates * Remove plural param to QUOTAS.count * Return version info on version controller * Log exception.msg before exception.message * Fix pecan policy enforcement for GET requests * Add missing resource discriminator in update resp * Fix missing check for admin/adv_service * Clarify and add a TODO in the controller * Set expected HTTP codes for create and delete * Add basic bulk support to collection controller * Prevent full sync in dhcp_agent when possible * Remove duplicated API server * Add QoS fullstack test * QoS agent extension and driver refactoring * Add IPv6 Address Resolution protection * Revert "AsyncProcess: try to kill tender" * Remove out-of-tree vendor AGENT_TYPE_* constant * func: Don't use private method of AsyncProcess * Remove unused ovs_lib method reset_bridge * Fix TypeError caused by delete_agent_gateway_port() * sub_project_guidelines: Add richer documentation * Fix typo: Large Ops, not Large Opts * Fix query in get_l3_agent_with_min_routers * Do not specify host for l2population topics * Add utility function for checking trusted port * Fix typo in error message in NetcatTester * docstring fix * AsyncProcess: try to kill tender * Enable servicing lbaasV2 vip by DVR * Switch scheduler drivers to load based schedulers * Fix BadRequest error on add_router_interface for DVR * Fix missing value types for log message * Tweak test_keepalived_respawns test logic * Reservations: Don't count usage if resource is unlimited * Restore reservations in API controller * ovs: don't use ARP responder for IPv6 addresses * Install sriov-agent.ini on 'setup.py install' * Configure gw_iface for RAs only in Master HA Router * Remove useless log from periodic_sync_routers_task * Replace is_this_snat_host validation with internal function * Revert "Remove address scopes from supported extensions" * Add l2pop support to full stack tests * Add tunneling support to full stack tests * Remove an unused DVR function * Handle ObjectDeletedError when deleting network ports/subnets * OVSAgentTestFramework: Remove _bind_ports * Descheduling DVR routers when ports are unbound from VM * Updated from global requirements * Reduce the chance of random check/gate test failures * Allow passing arbitrary ip route parameters to add/delete_route * Make ip address optional to add_route and delete_route * Add list routes * Fix dvr update for subnet attach multi subnets * Make ip rule comparison more robust * Remove hack for discovery novaclients extension * Check ICMP codes in range [0,255] * Remove address scopes from supported extensions * Add test to check that correct functions is used in expand/contract * SR-IOV: devstack support for SR-IOV agent * Fix test_external_tables_not_changed * Delete gateway conntrack state when remove external gateway * Updated from global requirements * Add non-model index names to autogen exclude filters * Implement expand/contract autogenerate extension * Cleanup the fip agent gateway port delete routines * Add RPC command and delete if last FIP on Agent * Delete FIP agent gateway port with external gw port * Remove ebtables_driver/manager dead code * Stop device_owner from being set to 'network:*' * Add oslo rootwrap daemon logging during functional tests * ovs agent resync may miss port remove event * tests: disable process monitor before managers * Retry metadata request on connection refused error * Add ability to use custom config in DHCP-agent * Improve DB operations for quota reservation * Qos SR-IOV: Refactor extension delete to get mac and pci slot * Adds support to provide the csum option for the OVS tunnels * Delete the useless variable agent_host * Handle process disappearing before we ask for its PPID * Allow only GET on Root controller * OVS agent: handle deleted ports on each rpc_loop iteration * Final decomposition of Cisco plugin * Remove Cisco Meta and N1KV monolithic plugins * Workaround test stream corruption issue * Fix RBAC filter query for negative case * Updated from global requirements * Remove _extract_roles method from neutron.policy * Fixed functional test that validates graceful ovs agent restart * _bind_devices query only existing ports * Stop logging deadlock tracebacks * Don't log exceptions in GW update on router create * Remove an unnecessary extension check for rbac * OVS agent: flush firewall rules for all deleted ports at once * Enable most unit tests for py34 job * Changed filter field to router_id * Fix a wrong condition for the _purge_metering_info function * Don't log deadlock or retry exceptions in L3 DB * Make sure service providers can be loaded correctly * sriov: update port state even if ip link fails * Retain logs for functional test cases 7.0.0.0b3 --------- * Don't setup ARP protection on OVS for network ports * Don't setup ARP protection on LB for network ports * Add support for PluginWorker and Process creation notification * Implement external physical bridge mapping in linuxbridge * Avoid DB errors when deleting network's ports and subnets * Better message on allowed address pairs error * Add info to debug test_keepalived_respawns gate failure * Enable to update external network subnet's gateway-ip * Make Neutron service flavor save service_type * Add tenant_id to flavor service profiles attributes * Remove implicit registration of *-aas service providers * Rename 'newapi' to 'pecan_wsgi' * Catch errors on 'port not found' while deleting subnet * Process user iptables rules before INVALID * OVS-agent: Introduce Ryu based OpenFlow implementation * Deprecate external_network_bridge option in L3 agent * Do not track active reservations * Deprecate --service option for neutron-db-manage * Add constraint target to tox.ini * DHCP agent: allow using gateway IPs instead of uniquely allocated * Resolve issue where router can't be removed from L3-agent in dvr mode * OVS agent add functional tests of OVS status * check_changed_vlans doesn't need registered_ports as param * [rpc] pull: removed a hack to avoid object backport triggered * Enable py34 tests for pluggable ipam backend * test_migrations: Remove unnecessary midonetclient mocks * Updated from global requirements * Fix import path in neutron-sanity-check for ml2_sriov opts * Decentralize the managemement of service providers * Remove requirements.txt for decomposed plugins/drivers * Linuxbridge-agent: fix bridge deletion * Correct neutron-ns-metadata-proxy command when watch_log is False * Split SR-IOV configuration file into driver and agent pieces * Python 3: use a hash to sort dictionaries * Implement TODO for version listing * Fix hooks for dealing with member actions * Fixed filters for functional tests * Fix usage of netaddr '.broadcast' * Add lieutenants contact for networking-calico * Adding networking-calico to sub_projects document * Fix locale problem in execute() * Remove duplicated codes in two test cases * Fixes wrong neutron Hyper-V Agent name in constants * Updated from global requirements * Improve python code for missing suggestion * Fix misnomer on network attribute * Refactor IpRouteCommand to allow using it without a device * Revert "Add support for unaddressed port" * Improve logging upon failure in iptables functional tests * handle gw_info outside of the db transaction on router creation * Remove ml2 resource extension success logging * Replace "prt" variable by "port" * Add optional file permission argument to replace_file() * Fixed the typo in the doc string of the class SubnetPoolReader * Add flows to tunnel bridge with proper cookie * Add lieutenants contact for networking-onos * Adding networking-onos to sub_projects document * Add policy and policy rule belongs check * Base on SqlTestCase to init db tables correctly * Stops patching an object method which could be gone at cleanup * Add enable_new_agents to neutron server * Document prefix delegation testing issues * Fix Prefix delegation router deletion key error * Add Geneve type driver support to ML2 * Fix DVR log strings in agent * devref: Add sub-project release notes * Process update_network in the openvswitch agent * Removing the SDN-VE monolithic plugin * [neutron-db-manage] Introduce contract and expand commands * Fix DBDuplicateEntry when creating port with fixed_ips on PD subnet * Update template for ModelMigrationSync test * Fix py34 No sql_connection parameter is established error * Switch to using os-testr's copy of subunit2html * Add a functional test to validate dvr snat namespace * Add snat ports cache to dvr router * DHCP agent: add 'bridged' property to interface driver * SR-IOV: deprecate agent_required option * SimpleInterfaceMonitor handle case when ofport is an empty set * Make delete-vlan-bridge and delete-vlan functions clear * Run py34 tests with testr * Use directly neutron.common.constants constants in l3_dvr_db * Make a couple of methods private * Add IPv6 Prefix Delegation compatibility to ipam_pluggable_backend * Validate router admin_state_up on upgrade to distributed * Fix AttributeError in _clean_updated_sg_member_conntrack_entries() * PLUMgrid plugin decomposition part II * Quota enforcement: remove locks on _dirty_tenants * L3 agent changes and reference implementation for IPv6 PD * Decomposition phase2 of NEC plugin * Allow py34 to run tests individually * Add dns_label processing for Ports * Remove out-of-tree vendor VIF_TYPE_* constants * Move in-tree vendor AGENT_TYPE_* constants * devref: added guidelines on how to maintain sub-projects * Stop logging STDOUT and STDERR on every shell out * Defer freeing of conntrack zone ids until allocation fails * Update the URLs to the Cloud Admin Guide * Remove redundant logging statements from RootWrapDaemonHelper * Rationalize neutron logs to help in troubleshooting router issues * Move db agent schedulers test to a more appropriate place * OVS agent don't hard code tunnel bridge name * Make models_v2 explicitly import rbac_db_models * Make NeutronDbObjectDuplicateEntry exception more verbose * Add empty policy rule to get_rule_type action * test_ovs_neutron_agent: Fix test_cleanup_stale_flows_iter_0 * Support dhcp metadata service for all networks * Move docstring to FakeMachineBase * Update rootwrap.conf to add /usr/local/sbin * Remove the ML2 Nuage driver code * Template for ModelMigrationTest for external repos * Only mark metadata packets on internal interfaces * Python 3: do not do "assertFalse(filter(...))" * ip_lib: support creating Linux dummy interface * Graceful OVS restart for DVR * DHCP agent: clarify logic of setup_dhcp_port * Add config option to specify ovs datapath * Python 3: fix test_ovs_tunnel * Python 3: use __code__ instead of func_code * IPv6 display suitable message when MTU is invalid on iface * Update oslo messaging configuration section for fullstack * Imported Translations from Transifex * QoS: fix get bandwidth limit rules to filter them per policy * Neutron RBAC API and network support * Fixed broken link in neutron-server's documents * Used namedtuple for ReservationInfo * Move in-tree vendor VIF_TYPE_* constants * Remove VIF_TYPES constant * Added initial devstack plugin * Fix qos api-tests after policy changes * fullstack: use migration scripts to create db schema * Only validate local_ip if using tunneling * qos: Delete bw limit rule when policy is deleted * Do not query reservations table when counting resources * Add support for unaddressed port * Sync FK constraints in db models with migration scripts * Add EnvironmentDescription, pass it down * Dropped release name from migration branch labels * Split DRIVER_TABLES in external.py * DVR: make sure snat portion is always scheduled when needed * neutron-db-manage: sync HEADS file with 'current' output * Fix _ensure_default_security_group logic * Add missing tenant_id validation in RESOURCE_ATTRIBUTE_MAP * Graceful ovs-agent restart * l2pop: check port mac in pre-commit to stop change * Adding Ale Omniswitch to sub_projects document * Add high-level functional/integration DVR tests * Add a fullstack fake VM, basic connectivity test * Final decomposition of ML2 Cisco UCSM driver * Fix query in get_reservations_for_resources * Move tests for non pluggable ipam backend * fullstack: Skip NotFound in safe_client cleanup * Fix tenant access to qos policies * Rename args for alembic 0.8.0 * Update sub projects git urls * Stop using quota reservations on base controller * Final decomposition of ML2 Nexus Driver * manual add/remove router for dvr_snat agent * DVR: fix router rescheduling on agent side * Python 3: fix test_utils * lb: stop handling Havana device updates * quota: synchronize resync and count with other dirty_tenants code * Add logging to debug oslo.messaging failure * Setup firewall filters only for required ports * Updated from global requirements * Quota enforcement: python3 compatibility * Devref for quotas * Reservations support * Fix .gitreview to not point at a branch * Don't fatal error during initialization for missing service providers * NSX: Move DB models as part of core vendor decomposition * doc: Improve table rendering using multi-row cells * Rename function '_update_port_down' * Redundant tests removed from ovs-lib unit tests: * Add network to SubnetContext * Unskip firewall test * NSX plugin: Moving away plugin extensions * Get rid of exception converter in db/api.py * Python 3: encode or decode i/o data of Popen.communicate() * Updated from global requirements * Use a conntrack zone per port in OVS * Fix some issues around tempest in fullstack testing doc * Add lieutenants contact for kuryr * Add dashboard folder and graphite dashboard to doc * lieutenants: Add Neutron infra lieutenants * DVR: do not reschedule router for down agents on compute nodes * Replace internal calls of create_{network, subnet, port} * ml2: Remove a redundant assignment in _bind_port_level * ml2: _commit_port_binding: Don't use None to mean False * Minor typo fix * l3: not use L2 plugin _get_subnet unnecessarily * l3_db: not use L2 plugin _get_port unnecessarily * Break down _bind_port_if_needed in ML2 * Pecan WSGI: prevent plugins from opening AMQP connections * Remove 'action' argument from _handle_fip_nat_rules() * Remove vmware plugin from neutron (etc part) * Setup reference service providers for API test runs * [neutron-db-manage] check_migration: validate labels * Python 3: fix neutron.tests.unit.api.test_extensions * Add configurable options for HA networks * Add test that checks external tables are not changed * [neutron-db-manage] remove old HEAD file when updating for branches * Remove unneeded shebangs * Python 3: hmac requires bytes key/msg * Python 3: encode unicode response bodies * Support for independent alembic branches in sub-projects * Remove bigswitch mech_driver entry point definition * Updated from global requirements * Python 3: specify a bytes to an argument for a format type 's' of struct.pack() * Preserve DVR FIP rule priority over Agent restarts * Treat sphinx warnings as errors * Distributed router can not add routes * Update fullstack multinode simulation image * Fix docs job * Improve callback registry devref documentation and usability * Final decomposition of the ML2 NCS driver * Fix update_subnet for prefix delegation * The unnecessary value "sgids" was deleted * Fix DVR interface delete by port when gateway is set * Skip FwaaS test that is failing due to race condition * Destroy ipset when the corresponding rule is removed * Python 3: compare response.body to bytes in namespace_proxy test * Forbid attaching rules if policy isn't accessible * DVR: fix router rescheduling on server side * Fix the low level OVS driver to really do egress * SR-IOV: Add Agent QoS driver to support bandwidth limit * Pass the extension driver exception to plugin * Update documentation acording to last QoS/OvS changes * OVS agent functional test for policy rule delete * Add Kuryr to sub_projects.rst * Clean up test_dvr_router_rem_fips_on_restarted_agent * Fix _update_subnet_allocation_pools returning empty list * devref: update quality_of_service * Replace 'import json' with oslo_serialization * SR-IOV: Convert max rate from kbps to Mbps * Add testing coverage .rst, missing test infrastructure to-dos * Python 3: encode unicode response bodies * Update port functional tests for qos agent * Neutron-Ironic integration patch * DVR: fix router scheduling * TESTING.rst love * Removed configuration option for qos agent driver selection * Add delete_port api to agent extension manager * Functional test for QoS policy bandwidth rule update * Support delegation of bind_port to networking-odl backend driver * Use oslo.log library instead of system logging module * resources_rpc: fixed singleton behavior for ResourcesPullRpcApi * Add thread locks on port routines for qos ext * Avoid dhcp_release for ipv6 addresses * SR-IOV: fixed singletion behavior for ESwitchManager * Validate local_ip for OVS tunnel * Imported Translations from Transifex * db_base_plugin_v2: Avoid creating another session * Consistent layout and headings for devref * Use DeferredOVSBridge in setup_default_table * Fix get_objects to allow filtering * QoS core extension: fixed dict extension when QoS policy is unset * OVS agent QoS extension functional test for bandwidth limit rules * Propagate notifications to agent consumers callbacks * Add rpc agent api and callbacks to resources_rpc * neutron.api.rpc.callbacks interface rework * Moved l2/agent_extensions_manager into l2/extensions/manager.py * Moved extensions/qos_agent.py into extensions/qos.py * Introduce base interface for core resource extensions * Do not delete fip namespace during l3 dvr agent resync * Introduce ItemAllocator class * Validate updated allocation pool before using it * Remove quotes from subshell call in tools/split.sh * Don't claim Linux Bridge ml2 driver supports bandwidth limit QoS rules * Clean up QoS rules first, then QoS policies * Pass the extension driver exception to plugin * Remove a few obsolete options from midonet.ini example * Rename a test method in test_policy.py * Revert "Add extension callbacks support for networks" * Updated quality_of_service devref doc to reflect reality * Broadcast service port's arp in DVR * usage_audit: Fix usage_audit to work with ML2 * Revert "Remove VPN from API tests" * Enable VPN plugin for API test * Validate interface_mappings on Linux bridge init * Initialize ancillary_port_info dict as blank in OVS agent * Enable fullstack multinode tests, add L3 HA test exemplar * SR-IOV: Update eswitch manager to support rate * Follow up with some cleanup for agent qos_driver * Gracefully handle duplicate rule creation * Fix: Skip rescheduling networks if no DHCP agents available * DB, IPAM & RPC changes for IPv6 Prefix Delegation * Python 3: convert dict_keys object to list * Python 3: do not compare int and NoneType * Remove VPN from API tests * Fix typos in neutron code * "FakeV4Subnet" class be inherited by following class * Update OVS driver to work with objects * Python 3: fix test_ovs_tunnel * _get_dvr_sync_data: Return a list, rather than dict_values for python3 * Fixing ICMP type and code validation * Support subnetpool association to an address scope * Add API tests for non-accessible policies * Gracefully handle fetching nonexistent rule * use single transaction to update qos policy associatation * Replaces reduce with six.moves.reduce for py 2/3 compatibility * Add oslo db retry decorator to the RPC handlers * Python 3: Fix test_security_groups_db * Replace to_dict() calls with a function decorator * Add DNS and DHCP log into dhcp agent * Install arp spoofing protection flow after setting port tag * Move 1c844d1677f7 expand migration to appropriate branch * Fix ipset can't be destroyed when last rule is deleted * Guarantee there is only one bandwidth limit rule per policy * Cleaned up some TODO comments for feature/qos that do not apply anymore * L2 agent extension manager: read extensions list from config file * objects.qos.policy: forbid deletion when attached to a port or a network * Remove handle_network/handle_subnet from l2 agent extensions * Move away nested transaction from _ensure_default_security_group * Moved QOS_POLICY_ID into qos_consts.py * Introduce get_ports_attributes in OVSBridge * Added missing [qos] section into neutron.conf * Enable rule delete test * objects: consolidate single transaction checks into test_base * objects.qos.policy: provide rules field, not type specific * Unite qos_rules and qos_*_rules tables * Switch controller to actually call the plugins * Add extensions listing to the controller * Add placeholder for notifier hook * Add hook for policy enforcement * Add quota enforcement hook * Add ownership validation hook * Add attribute population hook * Add resource/plugin identification hook * Add hook to create a context from the headers * Add hook to translate exceptions into HTTP codes * Add startup hook after pecan init for plugins * Add keystone middleware wrapper to pecan app * Fix accessing shared policies, add assoc tests * qos: forbid creating rules when there is no access to policy * Initial pecan structure * Remove unnecessary executable permission * NSX: Rename default_interface_name option * Arista Drivers decomposition part II * Python 3: pass bytes to base64.encode{string,bytes} * Python3: pass bytes to binascii.crc32 * Fix order of calls in update_port * Check that VXLAN is not in use in LB VXLAN check * Initialize port_info dict as blank in OVS agent * Ensure non-overlapping cidrs in subnetpools with galera * SR-IOV: update pci lib to support rate limit * SR-IOV: Fix SR-IOV agent to run ip link commands as root * QosPolicy: made shared field required and with default value = False * Python 3: Use '//' instead of '/' * Prevent update alloc pool over existing gateway ip * Moved out cisco n1kv mech driver and db models * Updated from global requirements 7.0.0.0b2 --------- * sriov: implement spoofchecking configuration * [qos] ovs: removed TODO for getting integration bridge from arguments * Fixes a typo phys_brs in place of phys_br * Update dhcp agent cache for network:dhcp ports * Keep dns nameserver order consistency * Extend vxlan_group option to allow a range of group addresses * Load the QoS notification driver from the configuration file * Add pluggable backend driver for QoS Service notification * Enable resource usage tracking for reference plugins * Add plural names for quota resources * Introduce usage data tracking for Neutron * Create packages for quota modules * Python 3: fix test_attributes * Add FUJITSU vendor plugin in sub_projects * Python 3: fix test_dhcp * test_db_base_plugin_v2: Improve DBReferenceError generation * Fix a microsecond format of isoformat() * Add update tests for policies and rules * Updated from global requirements * Python 3: fix test_context * Fix KeyError: 'L3_ROUTER_NAT' in l3 scheduler functional test * Introduce mechanism to determine supported qos rule types for a plugin * Cleanup IPAM tests * get_info: request object backport only if desired version is different * rpc.callbacks.registry: validate that callback provider is registered * rpc.callbacks.registry: validate type of callback result * Add UT for agent_extensions_manager * Don't set tenant_id for rule objects * Fix dhcp autoschedule test assertion logic * Fix inconsistency of if/return logic in attributes.py * Imported Translations from Transifex * [neutron-db-manage] revision: properly bootstrap a new branch * Add DB support for resource usage tracking * QoS: Remove type attribute from QoS rules * Don't enforce qos ml2 extension driver * ml2: added qos_profile_id to get_device_details payload * Add versioned object serialize/deserialize for resources RPC * policy: made attach_* and detach_* methods more robust * Decompose Apic ML2 mechanism driver * Remove duplicate DHCP agent registration in unit test * Python 3: do not index dict_values objects * L2 agent RPC add new RPC calls * Add Cathy Zhang as networking-sfc Lieutenant * Add error message when migrate from distributed router to centralized * Avoid printing log options multiple times * Support qos rules and fields parameters in GET requests * Pass context when deleting bandwidth limit rule * Add Pluggable IPAM Backend Part 2 * Create fip on subnet id * Python 3: fix neutron.tests.unit.agent.dhcp.test_agent * Updated from global requirements * Update port bindings for master router * [qos] cleanup _find_object from neutron.db.api * Revert "Mute neutron.callbacks notification logs." * qos: kill get_namespace() from service plugin * Base infrastructure for QoS API tests * Metaplugin removal * Remove line number of link and useless link * Disable port creation when invalid MAC address is provided * Fix handling of port-range-min 0 in secgroup RPC and agent * Fix a property comment in metadata_agent files * Add address scope API tests * Python 3: enable more tests * Add new ovs DB API to inquire interfaces name list in a bridge * Tweak wording for project inclusion process * Define fullstack router/network/subnet management fixture * Fix race condition by using lock on enable_radvd * Fix note in devref/contribute.rst * ensure_dir: move under neutron.common.utils * Add conntrack-tool to manage security groups * Adding a cleanup for 'qlbaas-' namespaces in netns_cleanup * Bug-Fix for unexpected DHCP agent redundant * Remove deprecated OVS and LB plugin DB tables * ovs_lib: Fix native implementation of db_list * Stop use of oslo_utils.timeutils.strtime() * Fix gateway port could not retrieve for subnet * Port help text for dvr_base_mac from neutron.conf * Add documentation for SRIOV NIC agent (previously missing) * Python 3: fix neutron.tests.unit.agent.linux.test_async_process * Adds garp_master_repeat and garp_master_refresh to keepalived.conf * Added functional tests for L3 schedulers * Always use BridgeDevice to manage linuxbridges * Update OVS Agent to work with Agent Extension Mgr * Instantiate qos agent driver * objects.rule: enable database tests for QosRule * Handle qos_policy on network/port create/update * Updated from global requirements * Validate that context exists * neutron-db-manage: fix check_migration for branch-less migration directories * Use only the lower 16 bits of iptables mark for marking * Python 3: fix test_provider_configuration * Add address_scope_db to neutron/models/head.py * OVS agent factor our port stats processing * Python3: Do not compare NoneType and integers * Use oslo_log.helpers.log_method_call * Unplug the VIF if dhcp port is deleted * Python 3: Wrap map() in a list call * Devref documentation for client command extension support * Alter unit test to match bug and cleanup ext logic * Allow overriding of the neutron endpoint URL in metadata agent * Allow passing table argument to construct IpRouteCommand * Make external_gateway_nat_rules easier to understand * Remove perform_snat_action indirection * Flavor Framework implementation * Add breakages in public API of devref * objects.qos.policy: support per type rule lists as synthetic fields * Network RBAC DB setup and legacy migration * [devref] db_layer: expand on how new migration scripts look like * Add oslo db retry decorator to non-CRUD actions * QoS Service devref * Implement QoS plugin * Add oslo db retry decorator to non-CRUD actions * Change prefix for namespace fixture * Imported Translations from Transifex * OVS-agent: Fix a docstring typo * Python 3: do not use types.ClassType * Create dvr base class and stop passing around snat_ports * Add qos section to ovs agent config * Mute neutron.callbacks notification logs * Small fixes in test_qos_agent UT * Add unit tests and fixes for OVS Agent QoS Extension Driver * Correct two spelling mistakes in Neutron devrefs * Improve check_migration command error message * Avoid using logging in signal handler * Galera multi-writers compliant sync_allocations * Fix SR-IOV mechanism driver tests directory * Switch to the oslo_utils.fileutils * Fix a regression in a recent IPAM change * Fix update_port_postcommit and port not found with DVR * Tighten exception handler for import_object * Updated from global requirements * bugs: Update info about current bug czar * Add another Lieutenant contact for Dragonflow * [neutron-db-manage] support separate migration branches * Add OVS QoS extension agent driver * Disable python3 tests failing due to Routes < 2.0 * Fix typo of 'receive' in test_dhcp_ipv6.py * Fix typo 'adress' * Add sub-project lieutenant for networking-midonet * Lower log level for extending network/subnet/port * Cleanup unused method get_plugin_version * Remove db-access semaphore in ML2 * Moving out cisco n1kv extensions * Remove self.snat_ports, a dvr thing, from router base class * Include comment in DHCP ip6tables rules * Qos Agent Extension * Fixed L3 agent manual scheduling for HA routers * Ensure floating IPs only use IPv4 addresses * Implement QoS policy detach from port and network * Add API stub for QoS support rule_type resource * Lower log level of errors caused by user requests to INFO * Reject router-interface-add with a port which doesn't have any addresses * Fix bug that resources in attr_map may point to same object * Updated sub_projects.rst for networking-vsphere * Imported Translations from Transifex * Enforce specific order for firewall.(un)filtered_ports and devices * objects.base: fixed object.delete() * objects.qos.policy: fixed get_*_policy and attach_* methods * objects.base: reset changes after getting objects from database * BaseObjectTestCase: rename test_class into _test_class * Cleanup rule models and objects * objects.qos: fixed create and update for QosBandwidthLimitRule * Use _is_this_snat_host and remove _get_gw_port_host * Move more snat code to dvr class that does snat * Add constants for vhost-user vif * get_vif_ports: ignore non-Interface ports * Add Pluggable IPAM Backend Part 1 * Fix duplicate entry catch for allowed address pairs * Fix failures introduced by the new version of mock * Arista ML2 driver should ignore non-vlan networks * Ensure that update_fip_statuses gets called * Make IPAM more pythonic * Move DVR related method to proper class * Introduce connection testers module * Allow IPAM backend switch * Correct fcntl.flock use in Pidfile.unlock * Move update_security_group_on_port to SecurityGroupDbMixin * Python 3: Fix a TypeError in policy.py * In Arista ML2 driver Reconfigure VLAN on VM migration * Add sub-project lieutenant for networking-plumgrid * Fix issues with allocation pool generation for ::/64 cidr * Add extra subnet route to ha router * Remove lingering traces of q_ * Make sure path_prefix is set during unit tests * Add IP_ANY dict to ease choosing between IPv4 and IPv6 "any" address * Python3: cast the result of zip() to list * Track allocation_pools in SubnetRequest * Add ARP spoofing protection for LinuxBridge agent * COMMON_PREFIXES cleanup - patch 5/5 * List up necessary files for thirdparty-ci.rst * Refactor init_l3 to separate router port use case * Devref for out-of-tree plugin/driver contribution * Python3: do not add dict_values objects * portsecurity_db_common: Access db columns in a consistent way * Python 3: do not index dict_keys objects * Remove unneeded OS_TEST_DBAPI_ADMIN_CONNECTION * Update DVR agent to use get_vifs_by_id * DVR: cleanup stale floating ip namespaces * COMMON_PREFIXES cleanup - patch 1/5 * Fall back on empty path if prefix is missing * Refactor IpRuleCommand to take more arguments * objects.qos: added unit tests for QosPolicy neutron object * objects.base: avoid db access if object does not have changes * Start documenting potential API breakages in devref:neutron_api * QoS extension fixes * Install more-specific ICMPv6 rule in DVR routers * devref: document API status for neutron.openstack.common.* * Python3: do not use urllib.urlencode * AgentExtensionsManager and AgentCoreResourceExtension * Generic Resources RPC * DVR: remove unused method * Generic rpc callback mechanism which could be reused * Update dhcp host portbinding on failover * OVS native DBListcommand if_exists support * Introduce the AFTER_READ callback for ports and networks * Collapse create_subnet into single method * Downgrade log level for gone port on status update * Add extension callbacks support for networks * [qos] policy: add methods to interact with policy bindings * Support Basic Address Scope CRUD as extensions * First QoS versioned objects, ever * Add bandwidth_limit rule type constant * Use EXT_TO_SERVICE_MAPPING instead of ALLOWED_SERVICES * Change the half of the bridge name used for ports * Fix log traces induced by retry decorator * Remove unused linux bridge agent configuration options * Add bandwidth_limit rules as sub-collection of qos policy * QoS: db models and migration rules * Add Create/Destroy API to OVS QoS BW Limiting * Fixing indentation and typo in comments * docs: link quality of service doc stub to devref index * Update PLUMgrid plugin information * Improve fixture usage * Move pylint dep from tox.ini to test-requirements * Disable pylint job * Remove bridge cleanup call * Move windows requirements to requirements.txt * Adds base in-tree functional testing of the ovs_neutron_agent * fix spelling mistakes * Register extraroute extension * Increase ping count on ARP spoof test * Read vif port information in bulk * Do not mock arping in L3AgentTestFramework functional tests * Fix Consolidate sriov agent and driver code * Remove failing SafeFixture tests * QoS service plugin stub * Create the QoS API extension stub * Switch to oslo.service * Revert "Removed test_lib module" * Don't access mock's attribute directly especially when it's not needed * Fix subnet updating failure on valid allocation pools * Add documentation for Linux Bridge (previously missing) * Add parent_id to _item calling from _handle_action * Add logging of agent heartbeats * populate port security default into network * Revert "Fix 'router_gateway' port status can't be updated" * RootHelperProcess: kill can consume signal number * Move NetcatTester to common/net_helpers * Make '_create_router' function handle Boolean kwargs correctly * ip_lib: Add flush() command to IpNeigh to clean arp cache * Refactor NetcatTester class * Use REST rather than ReST * lb-agent: handle security group updates in main loop * Add a double-mock guard to the base test case * Remove duplicated mock patch of ip_lib * Consolidate sriov agent and driver code * Restructure agent code in preparation for decomp * Fix ip_lib get_gateway for default gateway on an iface * fixing typo in gerrit query link in third party policies doc * Use last address in v6 allocation pool generation * Extend SubnetRequestFactory to access subnet dict * Remove duplicated call to setup_coreplugin * Remove double mock of dhcp agent periodic check * Remove double fanout mock * Remove double callback manager mocks * Remove ensure_dirs double-patch * Decompose _save_subnet * Fix tenant-id in Arista ML2 driver to support HA router * Log OVS agent configuration mismatch * Avoid env variable duplication in tox.ini * Skip ARP protection if 0.0.0.0/0 in addr pairs * linuxbridge: clean up README file * Fix tox errors in thirdparty-ci docs * Removed test_lib module * Updated from global requirements * Define SafeFixture base fixture * Remove quantum untracked files from .gitignore * Context class should initialise its own data * Abstract sync_allocations * ovsdb: attempt to enable connection_uri for native impl on startup * Just use {0,1,2} rather sys.std*.fileno() * Make Daemon pidfile arg optional * Different approach to indicate failure on SystemExit * Move third-party CI policy under docs/policies * Remove lbaas API tests, which are now in the lbaas repo 7.0.0.0b1 --------- * Only create one netaddr.IPNetwork object * Provide work around for 0.0.0.0/0 ::/0 for ipset * Fix >80 char lines that pep8 failed to detect * Deprecate "router_delete_namespaces" and "dhcp_delete_namespaces" * Make DHCPv6 out of bounds API test deterministic * Don't process network_delete events on OVS agent * dhcp fails if extra_dhcp_opts for stateless subnet enabled * Revert "Fix subnet creation failure on IPv6 valid gateway" * Support oslo_db 1.12 * Python 3: do not use itertools.izip * Override opportunistic database tests to PyMySQL * Extend default setenv instead of replacing it in tox.ini * Fix FloatingIP Namespace creation in DVR for Late Binding * Cleanup get_plugin_name() from the tree * Bulk move methods to ipam_backend_mixin.py * NSXv: update ini file to support dhcp_lease_time * Use sets to calculate added/original/removed ips * Add IPset cleanup script * Optimize ipset usage in IptablesFirewallDriver * Python3: do not set Request.body to a text string * Prepare for full stack CI job * Fix callback registry notification for security group rule * Python3: do not use __builtin__ * Ease debugging alembic by passing proper scripts path in alembic.ini * Use string exception casting everywhere * l3 agent: do router cleanup for unknown routers * Switch to oslo_utils.uuidutils * Fix subnet creation failure on IPv6 valid gateway * Decompose _create_subnet_from_pool * Move _delete_port * Decompose create_port and save_subnet * Retry port status update on StaleDataError * Allow setting Agents description to None * Fix RPC version to be a string * Decompose DVR CSNAT L3 Agent from Compute Node L3 Agent * cleanup openstack-common.conf and sync updated files * Fix l3 agent to not create already deleted router * Python3: do not use '+' on dict_items objects * Disable keepalived process in keepalived func test * Python3: do not use im_self/im_func/func_closure * Add request factory for pluggable IPAM * Python3: use dict.keys() instead of dict.iterkeys() * NSX QoS ext: RXTX factor can be decimal * Move _add_auto_addrs_on_network_ports * DHCP agent: Set an "ipxe" tag to work with Ironic * Add sanity_check for keepalived ipv6 support * Remove _check_ip_in_allocation_pool * Precision networking-bagpipe-l2 subproject * Don't delete DVR namespace if there are still ports on this node * Updated from global requirements * Fixed the only sphinx warning in docs * Fix SR-IOV mech driver to set port status to down when agent is required * read_hosts_file_leases shouldn't parse stateless IPv6 * Fix 'router_gateway' port status can't be updated * Update version for Liberty 7.0.0a0 ------- * Add networking-sfc to the list of affiliated Neutron projects * Minor improvements to sub_projects document * Python 3: do not use cmp(), nor sorted(..., cmp=...) * Move get_inteface_by_ip from LinuxBridge class to ip_lib * Add policy files specific to NSX plugins * Fix cisco_csr_identifier_map.ipsec_site_conn_id * fix rootwrap debug filter for ping all * Refactor rpc_loop() in ovs neutron agent * Add deadlock retry to API and ML2 RPC port update * ovsdb: session.rpc never initialized * Remove duplicated debug logging around locking * Refactor scan_ports() and update_ancillary_ports() in OVS Neutron Agent * Python3: do not change the size of a dict while iterating over it * Refactor TestRpcWorker and TestWorkerService * Juno_initial migration * docs: added job to well known tox envlist * API Extensions: inherit from the ExtensionDescriptor * Remove fossilized remains * Refactor update_port in db_base_plugin_v2 * Refactor _update_ips_for_port * Python 3: use dict.values instead of dict.itervalues * Put output of docs job into doc/build/html * Remove get_namespace from API extensions * Ensure no "db" related functional/fullstack tests are skipped in the gate * Use PyMySQL in MySQL related functional/fullstack tests * Skip rescheduling networks if no DHCP agents available * Reflect project moves from stackforge to openstack * VMWare NSXv: Add distributed URL locking to ini * Revert "Revert "Add VIF_DELETED notification event to Nova"" * Decompose db_base_plugin_v2.py with changes * Remove duplicate tunnel id check in sync_allocations * Remove meaningless no_delete from L3 test * Revert "Revert "Set default of api_workers to number of CPUs"" * OVSNeutronAgent pass the config as parameter * Refactor _update_subnet_allocation_pools * Stop sending gratuitous arp when ip version is 6 * Set .gitreview defaultbranch to feature/pecan * Fix Enum usage in 589f9237ca0e_cisco_n1kv_ml2_driver_tables * Imported Translations from Transifex * power grab * Change ensure_dir to not check directory exists first * Document existence of br-tun and br-int in the OVS agent * Correct indentation in neutron.api.v2.attributes * Python3: replace 'unicode' with 'six.text_type' * Fullstack testing devref follow up * Moving out the cisco n1kv section to stackforge * Ensure no "agent" functional tests are skipped in the gate * Remove useless pass from methods in type_tunnel.py * Make Vlantransparent extension inherit from ExtensionDescriptor * Actually allow to pass TRACE_FAILONLY to ostestr * Switch to os-testr to control testr * Introduce functions using arping executable * Revert "Defer segment lookup in NetworkContext object" * Added networking-plumgrid in plugin requirements * Switch from MySQL-python to PyMySQL * Context: Remove logic for read_deleted and deprecate it * Python 3: use next() instead of iterator.next() * Consume oslo.policy * policy: cleanup deprecation code to handle old extension:xxx rules * Fix a regression in "Separate ovs-ofctl using code as a driver" change * Break Pinger class to functions * Handle SIGHUP: neutron-server (multiprocess) and metadata agent * Allow update_port_status to take network param * Make pep8 job succeed when /etc/neutron/neutron.conf is not installed * Add a comment on _check_update_has_security_groups * Change defaultbranch in .gitreview * Enable all deprecation warnings for test runs * Remove get_admin_roles and associated logic * Add documentations for VXLAN Tunnels * Defer segment lookup in NetworkContext object * Fix typos in docs * Fixes bulk insertion of data to ml2_port_binding * Add Neutron PTL Office Hours * Python3: Enable all working tests in tox.ini * Add get_events to OVSDB monitor * Update ipset members when corresponding sg member is empty * Send 'security_groups_member_updated' when port changes * Remove full stack log noise * ML2: Remove TYPE_MULTI_SEGMENT * L3 agent should do report state before full sync at start * Clean only floating-ip related connection states * Refactor awkward logic in setup_dhcp_port * Add a "light" base test class for DB tests * Make _val_to_py and _py_to_val not private * Decompose db_base_plugin_v2.py part 2 * Fix typo in test class name * Start linuxbridge neutron agent using a launcher * Handle SIGHUP in ovs neutron agent * test_ovs_neutron_agent: Remove useless ofport=10 arguments * test_l3: Don't assume the order of subnets * Python 3: do not index a dict_values object * versionutils: switch from incubator version to oslo.log * Run RootHelperProcess always as root * Changes in rally-jobs/README.rst * Add more API tests for port-security extension: * Decompose the NCS ML2 Mechanism Driver * test_db_base_plugin_v2: Don't assume the order of fixed_ips * pylint: enable `duplicate-key` check * Remove reference to non-existent fullstack fixture * Enhance utils.ensure_dir to be resilient to concurrent workers * Use a single method to remove an address with its conntrack state * Decompose db_base_plugin_v2.py * Add sub-project lieutenants * Fix confusing parameters names * Extra indent in test_ovs_neutron_agent * Make MockFixedIntervalLoopingCall class as a helper class * Revert "Add VIF_DELETED notification event to Nova" * Wrap ML2 delete_port with db retry decorator * Remove extra indent in testcases * Check for 'removed' in port_info before reference * Catch broad exception in methods used in FixedIntervalLoopingCall * Add devref that explains fullstack testing and its direction * Remove get_dhcp_port RPC method * Refactor type_tunnel/gre/vxlan to reduce duplicate code * Imported Translations from Transifex * Update rootwrap.conf to add /usr/local/bin * Add route to metadata IP by default * Python3: use six.iteritems() instead of dict.iteritems() * Modify ipset functional tests to pass on older machines * Add a non-mixin function for model queries * Implement IPAM Driver loader * Remove comment about hash seed in tox.ini * Refactor mlnx mechanism driver to support infiniband only * Remove unused _uuid function alias from test_iptables.py * test_ovs_neutron_agent: Remove unnecessary mocking * Refactor type_gre.vxlan tests to reduce duplicate code * Removed duplicate keys in dicts in test * Don't update floating IP status if no change * Don't delete port from bridge on delete_port event * Enable random hash seeds * Fix formatting of core-reviewers doc * Get completely rid of contextlib.nested * Fix indentation errors in tests * Improve test_set_members_deleting_less_than_5 * Rename test_periodoc_resync_helper to test_periodic_resync_helper * Sort _get_new/deleted_set_ips responses in unittests * Ensure netfilter is enabled for bridges * Cleanup stale metadata processes on l3 agent sync * Imported Translations from Transifex * Fix ovs agent restore local_vlan_map failed * Use correct time delta function * Do not assume order of security group rules * ML2: Incorrect commented cisco mechanism driver name * py34: don't run any tests except unit tests * Move full-stack logs post-tests * Fix PYTHONHASHSEED bugs in test_security_groups_rpc * Addressing follow up comments for OVS_LIB fail_mode setting API * Move pool dispose() before os.fork * Add RFE submission guidelines * Switch to dictionary for iptables find * Process port IP requests before subnet requests * Remove time formatting in agent clock error * Persist DHCP leases to a local database * Flesh out the new RFE process and set deadlines for it's use * Do not assume order of dictionary elements in init_l3 * Introduce the Lieutenant system into Neutron * Isolate use of fixed_ips[0] to avoid confusion * Use the correct name for the "Repository Creator's Guide" * Do not assume order of convert_kvp_list_to_dict method responses * Do not assume order of iptables_firewall method responses * Do not assume order of get_sync_data_metering response elements * OVS-agent: Remove optional flags from br_tun.deferred() method * OVS_LIB support API for setting fail mode 'standalone' * Remove hack for sending gratuitous arp from fip ns * Force order of dhcp.needs_resync_reasons dictionary elements * Remove use of contextlib.nested (api-tests) * Use os._exit after forking * test_fork_error: Fix incorrect test mock * Skip external tables for neutron-db-manage --autogenerate * Fix a typo in _schedule_network method * Ensure non-overlapping cidrs in subnetpools without galera * Add callback prior to deleting a subnet * OVS-agent: Separate ovs-ofctl using code as a driver * Imported Translations from Transifex * Remove unnecessary brackets * Ensure mac address added to iptables is always in unix format * Remove use of contextlib.nested * Adding loadbalanacerv2 device owner constant to neutron constants * Python 3: use six.string_types instead of basestring * Fix minor errors in the Vyatta L3 Plugin: * Remove middleware oslo-incubator module * Match order of iptables arguments to iptables-save * fix DHCP port changed when dhcp-agent restart * VMware NSXV: update configuration file * IPAM reference driver * Python 3: Use six.moves.range * ovs-agent: prevent ARP requests with faked IP addresses * Use convenience method from db api to create nested transaction * Remove a unused Context class * Use namespace names in NetcatTester * Optimize IptablesManager._find_last_entry * Take Daemon stdin/stdout/stderr args as file objects * Support for concurrent full-stack tests * OVS-DVR: Suppress a confusing error log about csnat port * OVS-DVR: Improve an error log about csnat port * Replace ci.o.o links with docs.o.o/infra * Refactor initialize() of sriov mech driver * Centralized register_OVS_agent in tests * Don't pass namespace name in disable_isolated_metadata_proxy * Add client id option support to dhcp agent * Remove use of contextlib.nested * Allow updating port 'binding:host_id' be None * Block subnet create when a network hosts subnets allocated from different pools * Fix neutron tests * Allow unit tests to be run independently * SystemExit is ok for child processes * When disabling dhcp, delete fixed ip properly * Update build hooks * Append @randtoken to L3 agent namespaces in full stack tests * Add VIF_DELETED notification event to Nova * setup port filters when sg rules change * tests: don't allow oslo.config to autodiscover config files * mlnx MD: mlnx_direct removal * l2pop UT: Reduce code duplication in migration tests * Add unit tests for ML2 DVR port binding and fix PortContext inconsistencies * Make it clear the rfe tag is lower-case * Remove H305 from tox.ini pep8 ignore list * Allow users to run 'tox -epy34' * Deprecate quota_items, register resources upon REST initialization * Support BP:ipv6-router in Neutron HA Router * Catch ObjectDeletedError and skip port or subnet removal * Randomize tunnel id query to avoid contention * Remove skip of service-type management API test * Imported Translations from Transifex * Add capability to wait for IPv6 address in ip_lib * Remove from BridgeDevice homemade execute in namespace * remove router interface on Arista L3 plugin fails * Extenuate register_dhcp_agent code duplication in tests * Fix typos related to IPv6 use-cases * Refactor checks for device existence * Updated from global requirements * Check for missing network in _bind_devices * Add missed actions into policy.json * Reuse caller's session in ML2 DB methods * ARP spoofing patch: Data structures for rules * Limit router gw ports' stateful fixed IPs to one per address family * VMWare NSXv: Metadata for distributed router * VMware: update supported plugins * Allow to define enable_snat default value * Update the specs process for Liberty * changes log level to debug for help calls * Remove use of contextlib.nested * Fix fetching prevent_arp_spoofing from cfg in neutron-sanity-check * VMware: add in router types for NSXv * Reduce prefix and suffix length in ipsets * Add port-security extension API test cases * Add test for security groups * Use iptables zone to separate different ip_conntrack * Fix dhcp _test_sync_state_helper asserting calls wrong * Updated from global requirements * Enhance configure_for_func_testing.sh for *aaS use * Add IP version support to all ip_lib code * Imported Translations from Transifex * Get all interfaces for get_snat_sync_interfaces * OVS-agent: Ignore IPv6 addresses for ARP spoofing prevention * Remove un-used keys in keepalived tests * Deprecate config-based Quota Driver * Clarify stackforge/vmware-nsx is for VMware NSX suite * Updated from global requirements * l3 agent: fix grammar in router info not found warning * Finally let L3 and DHCP agents cleanup namespaces by default * Context: is_admin==True implies is_advsvc=True * Fix port creation verification of the port-security extension * Add some tests for floating ips * Add notes about official sub-projects * Updated ovsvapp_agent.ini in neutron * Don't use iterator in search for tunnel type * Remove is_active property from SimpleInterfaceMonitor * Updated from global requirements * Disembowel register_l3_agent code duplication in tests * Ensure mocks for lla allocator _write in test_agent * Fix _device_to_port_id for non-tap devices * Imported Translations from Transifex * Rename delete_gateway method name * Drop use of 'oslo' namespace package * Remove 'IP' from device exception message * Add icmpv6 to sg_supported_protocols * Suppress exception when trying to remove non existing device in SNAT redirect 2015.1.0 -------- * Run radvd as root * Add devices to update in RPC call security_groups_provider_updated * Run radvd as root * Support multiple IPv6 prefixes on internal router ports for an HA Router * Not creating HA router when not enough l3 agents * Eliminate extra queries used to retrieve gw_ports * Don't update port with host id of None * fix l3-agent restart with last runtime fip for dvr * Refactoring to adhere to coding convention * Replace unnecessary call to get_sync_routers * Move test_get_user_allocation*returns_none test to a proper class * Replace BaseLinuxTestCase by BaseSudoTestCase * Remove RecursivePermDirFixture useless cleanup * Utilities for building/parsing netns names to facilitate testing * Fix MismatchError to nondeterministic order for list of controllers * Add missing interface to populate subnets method * Don't resync on DHCP agent setup failure * Refactor socket ssl wrapping * Don't resync on DHCP agent setup failure * Replace BaseIPVethTestCase by FakeMachine * Return exception when attempting to add duplicate VIP * Imported Translations from Transifex * Allow plugin to specify router_id * Neutron to Drop Router Advts from VM ports * Fix L3 agent functional tests random failures * Mock report_state during L3 agent functional tests * Remove backward compatibility for check_is_admin 2015.1.0rc2 ----------- * Add weak reference test for callback manager * Spawn RADVD only in the master HA router * tests: confirm that _output_hosts_file does not log too often * Double functional testing timeout to 180s * Restrict subnet create/update to avoid DHCP resync * Only update MTU in update code for MTU * Restrict subnet create/update to avoid DHCP resync * Make sure OVS restarts when Exception occurred * Updated from global requirements * Remove dependency on weak reference for registry callbacks * Ensure metadata network works with DVR * Change callbacks logging from INFO to DEBUG * Fix DVR functional tests resources leak * Create bridges in ovsdb monitor functional tests * Refactor RESOURCE_ATTRIBUTE_MAP cleanup * remove metadata_proxy_local filters for rootwrap * Add use_slave DB api support * Fix incorrect query for user ip allocations * Fix typo acomplished => accomplished * OOP naming cleanup in l3_dvr_db * ARP spoofing patch: Low level ebtables integration * Fix test discovery for api and functional paths * Block allowed address pairs on other tenants' net * tests: confirm that _output_hosts_file does not log too often * Fix super cleanUp for fullstack ProcessFixture * Add security groups events * Block subnet create with mismatched IP versions * Remove neutron.tests.common.agents package * L3 DB: Defer port DB subnet lookups * lb-agent: ensure tap mtu is the same as physical device * Only update MTU in update code for MTU * Revive BaseLinuxTestCase._create_namespace * Defer creation of router JSON in get_routers RPC * ovs_lib: Fix a race between get_port_tag_dict and port removal * Correct inconsistent enable_snat management * _create_subnet_from_implicit_pool assumes external network extension * Log caught exceptions while deleting a router * Define FakeMachine helper for functional/fullstack tests * Replace custom method call logger with oslo.log helper * ML2: Change port status only when it's bound to the host * Release Import of Translations from Transifex * Simplify keepalived.virtual_routes * l2pop UT: Simplify migration tests * l2pop UT: Expire cached db objects before reusing a session * Correct typo for matching non-dict ovsdb rows * Fixes race condition and boosts the scheduling performance * Register ibm-db-alembic import for DB2 migrations * Fixes race condition and boosts the scheduling performance * ML2: Change port status only when it's bound to the host * Remove double queries in l3 DB get methods * Strip unnecessary overrides in extraroute_db mixin * Set loading strategy to joined for Routerport/Port * Avoid double-hopping deletes for security group rules * Set IPset hash type to 'net' instead of 'ip' * Revert "Add ipset element and hashsize tunables" * Set IPset hash type to 'net' instead of 'ip' * Update .gitreview to point to stable/kilo * Add Kilo release milestone * Quota model: use HasTenantId mixin * Clarify the init logic for the ML2 plugin * Deal with TODO related to Security Groups RPC API's classes * Add Kilo release milestone * Add some more comments to models/frozen.py * IPv6 SLAAC subnet create should update ports on net * Two api tests for 'firewall insertion mode' feature * OVS_LIB API addition - change bridge controller connection-mode * Imported Translations from Transifex * Drop the ovs_lib compat layer as per TODO note * Removed ml2_conf_odl.ini config file * IPv6 SLAAC subnet create should update ports on net * Use 'port' instead of 'ports' to reference port from IPAllocation * Enhance OVSDB Transaction timeout configuration * Added config variable for External Network type in ML2 * Update decomp progress chart * Provide details for configure multiple DHCP agents * Stop running L3 functional tests with both OVSDB interfaces * Fix formatting errors in TESTING.rst * Pass correct port ID back to RPC caller * Fix intermittent ipset_manager test failure * Fix mock return settings in test_full_uuids_skip_port_id_lookup * Add full-stack test * create_resource should return maximum length str * Add clock sync error detection on agent registration * Log RPC initialization in L3 service plugin and ML2 * Add block name to switch config options for MLX plug-ins * Fix the ImportErrors in l3 and dhcp scheduler functional tests * Removed jsonrpclib dependency * Additions to TESTING.rst * Handle race condition on subnet-delete * Move values for network_type to plugins.common.constants.py * allow OVSDB connection schema to be configurable * Add OVSDB connection as a parameter to the transaction * l3_rpc: Fix a comment typo * Fix native OVSDB db_get handling for UUID columns * Move iptables and ipset config registration into modules * Kill hostname validation for subnet:dns_nameservers * Adds DVR functional test for multi-external networks * context: reuse base oslo.context class for to_dict() * Fix routerid constraint migration * Synced versionutils from oslo-incubator * Removed ml2_conf_odl.ini config file * Router is not unscheduled when the last port is deleted * Remove L3 report_state logging * Double functional testing timeout to 180s * Non-json body on POST 500's * OVSDB python binding should use row.delete() to remove rows * Revert connection option post full-stack tests * Handle SIGHUP in dhcp and l3 agents * Sync service from oslo-incubator * Imported Translations from Transifex 2015.1.0rc1 ----------- * Add logging to dangling port to ml2 delete_subnet * Avoid synchronizing session when deleting networkdhcpagentbinding * Update L3 Agent Scheduler API tests * Revert "IPv6 SLAAC subnet create should update ports on net" * Add missing config parameters in neutron.conf * Moving VLAN Transparency support from core to extension * Re-use context session in ML2 DB get_port_binding_host * Consider all address scopes in init_l3 * Improves the description string for the config parameter metadata_workers * Fix intermittent UT failures in test_utils * OOP cleanup: start protected method names with underscore * Enhance TESTING.rst * Remove check for bash usage * Return from check_ports_exist_on_l3agent if no subnet found * Open Liberty development * Remove duplicated l3 router scheduler test cases * Remove tests from HA routers test framework * linuxbridge UT: Fix a regression of the recent ip_lib change * Fix dynamic arp populate error for dvr routers * Reorganize plugin test modules * Reorganize unit test tree * Add ipset element and hashsize tunables * Allow metadata proxy running with nobody user/group * Skip example retargetable functional test * Prepare for unit test reorg * Remove orphaned nuage unit test module * Add API tests for subnet-create with subnetpool * Refactoring cleanup for L3 agent callbacks * Imported Translations from Transifex * Support multiple IPv6 prefixes on internal router ports * Fix functional test using local timeout value * Add index for port * Always run dnsmasq as root * Move network MTU from core REST API to extension API * Refactoring of L3 agent notifications for router * Fix docstring for l3_dvr_db.dvr_vmarp_table_update * Treat all negative quota values as -1 * Router test enhancements * ovs_neutron_agent: Remove a redundant assignment of ovs_status * Move orphaned api test - deux * IPv6 SLAAC subnet create should update ports on net * Add API tests for Neutron DVR extension * Add missing neutron/tests/unit/agent/common/__init__.py * Allow metadata proxy to log with nobody user/group * Move orphaned api test * Implement default subnet pool configuration settings * Define bridge/port fixtures for OVS/LinuxBridge/Veth backends * Update core reviewer responsibilities * Remove "Arguments dropped when creating context" logging * Some cleanup in L3 HA code * Fix reference to non-existent setup_dvr_flows_on_integ_tun_br * Modify a different agent in test_update_agent_description * Move API tests to neutron.test.api * Simple subnetpool allocation quotas * Subnet allocation from a subnet pool * Simplify retargetable test framework * Increase max attempts to 2 for pings on ARP tests * Revert "Add ipset element and hashsize tunables" * Add API tests for subnetpool allocation * Handle no ofport in get_vif_port_to_ofport_map * Update .coveragerc after the removal of Cisco Nexus monolithic plugin * Make floatingip reachable from the same network * Fix functional configure script * Enable ARP spoofing prevention by default * Support IPv6 Router * Move final remnants of router processing to router classes * Only call get_engine().pool.dispose if _FACADE * Stop using deprecated DEFAULT group for lock_path * tests: don't rely on configuration files outside tests directory * Set floating IP port status to "N/A" * Add simple ARP spoofing protection * Imported Translations from Transifex * Add tests for the l3 agent namespaces manager * Make L3 agent honor periodic_interval setting * Handle non-index lookups in native OVSDB backend * Fix error raising in security groups method * Update NEC plugin decomposition status * Auto-update gateway port after subnet-create * Allow update of ext gateway IP's w/out port delete * Support Dual-Stack Gateway Ports on Neutron Routers * Remove auto deletion of routers in unit tests * No allocation needed for specific IPv6 SLAAC addr assignment * Remove neutron.tests.sub_base * Fix test case for DHCP agent interface restart * Store and log correct exception info * Test to verify shared attribute of network * Enable Process Monitor by default * Reload DHCP interface when its port is updated * Don't eagerly load ranges from IPAllocationPool * Revert "Fix validation of physical network name for flat nets" * Enable services on agents with admin_state_up False * Simplify base test cases * Send only one rule in queue on rule create/delete * Add full-stack tests framework * Stop any spawned ProcessMonitor at test cleanup * Add missing DeferredOVSBridge export * Use router state in get_ha_device_name and ha_network_added * Added note about removing bridge from mappings * Add language around re-proposing specs for new releases * Follow up patch for Validate when DVR enabled, l2_pop is also enabled * Fix displaying of devref for TestModelsMigrations * Use 1/0 as booleans for DB2 * Remove allow_overlap from subnetpools API * If configured, set the MTU for fpr/rfp intefaces * Add L3 router plugin shim for Brocade MLX * Moves ovs_lib to agent/common * OVS agent support on Hyper-V * No IPv6 SLAAC addrs for create router intf without fixed_ips * Cisco UCS Manager ML2 Mechanism Driver * Cisco Nexus1000V ML2 Mechanism Driver * Rename/move/remove HaRouter methods * lb-agent: use 'replace' instead of 'add' with 'bridge fdb' * Add some useful notes in devref/db_layer.rst * Fix a usage error of joinedload + filter in l3 scheduler * Move process_ha_router_added/removed from HA agent to router * Ml2 Mechanism Driver for OVSvApp Solution * Add eventlet monkey_patch helper * Move create_dvr_fip_interfaces in to DVR * Deprecate use_namespaces option * Add the default_ipv6_subnet_pool config option * Fix common misspellings * Fix port status not being updated properly * Fix handling of before/after notifications in linuxbridge agent * Move external port processing to router classes * Expose ha_state per router to agent binding via API * Decouple L3 and service plugins during DVR router migration * Transform BaseLinuxTestCase methods in helpers * Remove downgrade from existing migrations * Fix minor nits in _notify_l3_agent_new_port() * Drop support for SQL Schema Downgrades * VMWare NSXv: Metadata default gateway param * Imported Translations from Transifex * Move README.odl into opendaylight directory * Fix missing spaces in strings split across lines * Fix typos in neutron/db/migration * Remove unnecessary 'IN vs ==' sql query branches * Fix intermittent failure in TestNetworksFailover UT * Fixes floating IP regression with multiple routers * Add no_delete flag to UT router context manager * Updated from global requirements * Send notification to controller about HA router state change * Fix usage of 'default' parameter in 1955efc66455 migration * Move metadata proxy shared options to neutron.conf * Reuse nova batch notifier * Allow plugin to specify security-group rules ids upon creation * Add native OVSDB implementation of OVSDB API * Break coupling between ML2 and L3 during delete operation * Fix validation of physical network name for flat nets * Validate when DVR enabled, l2_pop is also enabled * Fix create_security_group_rule_bulk_native to return all created rules 2015.1.0b3 ---------- * Prepare Base(OVS)LinuxTestCase transformation in helpers * Improve DVR scale performance * Remove redundant unit tests from OVS DVR Agent * Hyper-V Agent decomposition * Enable to apply policies to resources with special plural * Add a missing mock in DHCPAgentWeightSchedulerTestCase * Basic subnetpool CRUD * Enable to specify context on POST requests during unittests * Fix a usage error of joinedload + filter in dhcp scheduler * Allow to request metadata proxy only from internal interfaces * Remove unused L3 HA RPC method * Replace keepalived notifier bash script with Python ip monitor * Add sanity check for OVSDB native support * Fix metering agent failure when chain missing * Fix minor decomp progress chart issues * Adding VLAN Transparency support for ML2 along with REST API changes * DHCP Service LoadBalancing Scheduler * Make DHCP tests cleanup neutron manager reference * Include IPv6 SLAAC addresses implicitly for port update * Api tests to cover network mtu attribute * Run more Rally benchmark on every patch * Fix DBDuplicateError handling in _ensure_default_security_group * Add ML2 VLAN mechanism driver for Brocade MLX and ICX switches * Include IPv6 SLAAC addresses implicitly for port create * Don't delete HA router primary VIP on agent restarts * Introduce External IPAM Interface * Expose Rest Api access to mtu attributes * Advertise mtu over dhcp * Add MTU selection to ML2 * IBM SDN-VE Plugin decomposition * Brocade Vyatta vrouter shim plugin for vendor decomposition * Fix spelling error in neutron.conf * OVS DVR UT: Remove an inappropriate str() conversion * Handle DBDuplicateError exception properly when creating default sg * Imported Translations from Transifex * Schedule net to a DHCP agt on subnet create * Revert "Set default of api_workers to number of CPUs" * Add portsecurity extension support * Revert "fix check_ports_exist_on_l3agent in no subnet case" * Move Unix domain socket helpers to a common place * Move mlnx agent to be under ml2/drivers/mlnx * iptables firewall: add framework for iptables firewall functional test * Adding a cleanup for 'fip-' and 'snat-' namespaces in netns_cleanup * replaces enumeration method used to get a list of interfaces * Remove unneeded DVRAgentRpcApiMixin from OVSDVRNeutronAgent * Prevent updating mac address of bound port * Update api tests from tempest * Set TEMPEST_CONFIG_DIR in the api tox env * Remove vendor entry point * Add a netns-cleanup functional test * Reduce db calls count in get_devices_details_list * Move internal port processing to router classes * Brocade vendor code decomposition from neutron repo * Refactor _remove_unused_security_group_info * Add MTU selection & advertisement settings to Neutron config * ML2 cisco_nexus MD: sync config and models with vendor repo * fix check_ports_exist_on_l3agent in no subnet case * Fix netns-cleanup broken by ProcessMonitor refactor * Improve validate of remove_router_interface * Set default of api_workers to number of CPUs * Refactor retry mechanism used in some DB operations * Revert "Revert "Remove port from ovsdb after its deletion"" * Add rootwrap daemon mode support * Break coupling between ML2 and L3 during create/update operations * Fix incorrect comments * Start metadata agent without trying to connect db * Remove router binding with router-interface-delete * Remove dead code * Update contribute.rst with Big Switch decomp * Migrate to oslo.log * Fix l3_agentschedulers_db for consistency of code * Return 404 when executing net-list-on-dhcp-agent with invalid agent_id * ofagent: Update after networking-ofagent release * Use common agent.linux.utils.ensure_dir method * Stop using RPC namespace to unbreak rolling upgrades * Add Mellanox decomposition progress to chart * Arista L3 Service Plugin decomposition * Fix pylint issue with type VS isinstance in event_observers * Raise QuotaResourceUnknown in the quota engine * utils.execute: Add a debug-level log to record stdin * Imported Translations from Transifex * contribute.rst: Use consistent tags * Add README and requirements.txt for VMware plugins * Fix non-existent self.local_subnets in DvrRouter class * Added oslo.log dependency * Don't notify dead DHCP agent of removed networks * Prevent calling waitall() inside a GreenPool's greenthread * Added check for emptyness where in_ is being used * Improve performance of _get_security_group_member_ips * NEC plugin code split * Imported Translations from Transifex * Change linux/ip_lib code to better handle address families * portsecurity_db: Fix a usage of is_attr_set * ofagent: Have a thin driver module * Don't start transaction during floating IP delete * linuxbridge UT: Mock get_interface_by_ip * linuxbridge UT: Do not create the same instance in each cases * In Arista ML2 delete tenant without any resources * Initial copy of api tests from tempest * Fix tempest api testing * Use an existing function in process monitor tests * Fix dhcp config dir removed too soon * FIP debug messages * Add proccess monitor to keepalived * Fix wrong log output in neutron/neutron/agent/linux/dhcp.py * [contribute.rst] Current status of Freescale Codebase * portsecurity_db: Use is_attr_set instead of a home-grown equivalent * Imported Translations from Transifex * Imported Translations from Transifex * Updated from global requirements * Add script to copy neutron api tests from tempest * ofagent: kill the left over after decomposition * Use accessors instead of private attributes for Ml2 plugin * Remove 'free' exclusions from pylint * Refactor the ProcessMonitor API * Networking OVS-DPDK plugin decomposition * Fix DB2 upgrade problem for Remove Hyper-V plugin * Big Switch Networks code split * Reduce code duplication and fix argument order in test_wsgi * Replace IPv4 and IPv6 default addresses with constants * VMware NSX: Update decomposition progress table * Updated from global requirements * Vendor decomposition to move CSR1000v support to the networking-cisco repo * Move Neutron Policy pages into the tree * Adding DB model changes for Nuage Plugin post decomposition * Add ability to run pylint check on modified files only * Fix test tautology for DVR * Decompose the VMware plugin * Remove references to 0.0.0.0/0 in iptable rules * Updated from global requirements * Change metadata driver unit tests to use monitored spawn * Decouple L3 and VPN service plugins during router operations * Move _set_subnet_arp_info to dvr_router * Refactor DVR _arp_entry methods * Refactor management of namespaces in the L3 Agent * Raise error upon deleting subnet with router ports * Imported Translations from Transifex * OVS UT: Remove useless return_value for setup_integration_br * Introduce ip address monitor * Add cisco decomposition progress to chart * oslo: sync all modules that depend on incubator log module * test_metadata_agent: don't check implementation details * Progress chart for MidoNet * Extend test coverage for iptables_firewall.py * Default the Linuxbridge agent to enabling VXLAN * Remove HyperVNeutronPlugin * ml2 plugin: use attributes.{NETWORK, SUBNET, PORT} consistently * ml2 extension driver: more tests, fix data argument inconsistency * Use oslo_config choices support * Metaplugin decomposition * ofagent: Vendor code decomposition * contribute.rst: Fill in the current status of ofagent * Missing entry points for cisco apic topology agents * Prevent direct port-delete of FIP Agent GW and CSNAT * PLUMgrid plugin decomposition * Improve structure of Vendor Decomposition progress chart * Removing a router twice from the same agent shouldn't cause an error * Simplify prepare_veth_pairs in functional tests * Add a functional test for iptables_manager.binary_name * Add InvalidIpForNetwork and InvalidIpForSubnet exception * ovs_neutron_agent should exit gracefully * Ensure tests run under python2.7 * Validate string length at API level * Capture progress chart for vendor decomposition * Fixes formatting errors in devref documentation * Imported Translations from Transifex * Fix retrieval of shared firewall_policies * Password config options should be marked secret * Check whether sudo is enabled in BaseSudoTestCase.setUpClass * Revert "Remove port from ovsdb after its deletion" * Add filter for provider network attributes in ML2 * tests: initialize policy in BaseTestCase * policy: don't hack around oslo.config path search algorithm * Make listing security groups faster * Allow AsyncProcess to block on process start and stop * Don't mock plugin in L3SchedulerTestBaseMixin * Adds migration script for Hyper-V Plugin tables * Make del_fdb_flow() idempotent * Update default tox envlist to match voting gate checks * Added a policy for retrieving the agent hosting a load balancer * Avoid ObjectDeletedError while accessing deleted binding * Correct db functional tests to support oslo.db 1.50 * Avoid DetachedInstanceError after session rollback * Always fill UDP checksums in DHCP replies * remove unused code in metadata agent code * Move pylint checks to pep8 testenv * Change L3 agent AdvancedService class to be non-singleton * Passes the plugin context variable in the ML2 Extension Driver API * devref: added guidelines to maintain service entry points * VMware NSXv: Added router-type to database model * Remove discover from test requirements * Add callbacks-based system to Neutron * Refactor Pinger class * Create/Delete FIP Agent gateway port only if DVR Routers * Move the assignment of existing_floating_ips before try block * Fix misspellings words in neutron * Ensure arping always exits * Updated from global requirements * wsgi: remove explicit monkey_patch() call * If providers exist in neutron.conf, don't look in services conf * test_ovs_dvr_neutron_agent: Use consistent variable names * Nuage core plugin decomposition * devref: consider sphinx warnings as failures * devref: don't link to nonlocal images * devref: fixed class name for test_migrations autodocumentation * devref: updated documentation for oslo-incubator * devref: updated documentation for advanced services * Avoid fetching network in _commit_port_binding * VMware: Router Type Extension Support * OVS UT: Change misleading constants in veth tests * test_l2population: Use a fake mech driver instead of ofagent * l2population_rpc: Make fdb_add_tun/fdb_remove_tun more flexible * Make nova notifier work with sessions * Fix parameters in exception handling * adopt namespace-less oslo imports * Do not run neutron-ns-metadata-proxy as root on dhcp agent * Move Floating IP processing to Router classes * Updated from global requirements * Improve exception handling in _process_router_update() * Cisco Nexus ML2 Vendor decomposition * Remove versioning import of novaclient * Remove remaining uses of passing root_helper * Remove root_helper arg from sanity checks * Enable pylint unnecessary-pass * Enable pylint no-value-for-parameter * Enable pylint bad-super-call * Enable 'free' pylint checks * Remove reference to self.services_sync * Fix type of exception in ml2 l2pop * VMware NSXv: Add configuration options to nsx.ini * Mock link local allocator write so UT doesn't write a file * VMWare NSXv: Add configuration params to nsx.ini * Remove error logs for a common situation (non created ipsets) * Default route missing for IPv6 subnets in HA Router * Unify logic that determines liveliness of DHCP agent * fix for _get_external_device_interface_name trace * ML2: remove underscore from public method * Fix static strings with labels for DVR * Get rid of rpc to fetch fip agent port on agent * Combining sec groups member update RPC calls * VMWare NSXv: id fields should be nullable * Check if routing rule exists before adding * Remove root_helper arg from DHCP agent * Remove root_helper arg from AsyncProcess * Remove root_helper arg from linuxbridge * Remove root_helper arg from SecurityGroupAgentRpc * Moved several services into neutron.cmd.eventlet * Monkey patch all the code inside neutron/cmd/eventlet/.. * tests: monkey patch stdlib before importing other modules * Don't monkey patch netns_cleanup * Remove root_helper arg from IpsetManager * Revert "Add the rebinding chance in _bind_port_if_needed" * Remove root_helper arg from IptablesManager * Remove root_helper arg from external_process * Add a functional test that checks HA router is configured on a restarted agent * Update midonet plugin requirements * Stop using passed root_helper in ip_lib * OVS UT: Fix some confusions between local vlan id and segmentation id * Un-break tox for unit tests * Fix FIP agent gw port delete based on external net * Skip DBDuplicateEntry exception in security group creation * Hyper-V: Fixes security groups issue * Fix the api job * Setup br-tun in secure fail mode to avoid broadcast storms * Delete qg device during DVR-SNAT router deletion * Automate host configuration for functional testing * ML2: Hierarchical port binding * ML2: DB changes for hierarchical port binding * Remove RPC dependency to create FIP agent gw port * Fix typo in bash tool * Remove remaining root_helper args from plugins * Fix usage drop_constraint in 2a1ee2fb59e0 migration * Fix index name in downgrade 26b54cf9024d migration * Remove root_helper arg from linux interface * Remove root_helper arg from L3 Agent * OVS DVR: Remove dead code * Updated from global requirements * Fix AttributeError exception for API's test_network_lifecycle * Remove root_helper arg for ovs_lib * Raise timeout for test_conntrack_disassociate_fip * Cleanup in keepalived tests * Add run_as_root option to utils.execute * Revert "monkey patch stdlib before importing other modules" * Remove unused RPC methods from l3_rpc * Tweak mocking logic for L3 plugin tests * Move NCS mech driver to its new home * Added policy for lbaas v2 agent extension resource * keepalived: use sh instead of bash for notifier scripts * Refactor to facilitate DVR scale performance * hacking: also catch 'import oslo.*' imports * Moved hacking unit test into proper location * Stale VXLAN and GRE tunnel port/flow deletion * Use ovsdb-api neutral column/value mappings * Prepare to functionally test OVSDB interfaces * NEC: Merge NEC plugin models into single module * Remove remaining do_delete from unit tests * Typos fixed * Scope state reporting rpc api using a messaging namespace * Remove use of keepalived 'vrrp_sync_group' as it is unused * Scope dvr rpc api using a messaging namespace * Updated from global requirements * Remove port from ovsdb after its deletion * Add index on tenant_id * Remove deprecated DEFAULT.root_helper * Provide routes for neighbor IPv4 subnets * OVS DVR: Use a right mac address value to compose a flow * Refactor radvd control in the l3-agent * monkey patch stdlib before importing other modules * Don't crash when adding duplicate gre allocation * Fix lack of device ownership enforcement for DVR routers * Search in /sys/class/net for network devices * Adopt rpc_api devref to new oslo_messaging namespace * Fix minor nits with the devref's contribute section * Remove VPN specific exception * Correctly mock-out 'ip route...' calls in IPv6 test * Cleanup dead code for dnsmasq * Add mtu attributes to network model * Add the rebinding chance in _bind_port_if_needed * Add vlan_transparent attribute to network model * Check conntrack rule cleanup on floating IP disassociate * l2-pop shouldn't notify agent about inactive ports * Drop devstack-gate files from Neutron repo * Use weak ref to avoid deleting fip namespace through agent * Move DVR floating ip methods to dvr_router * Provide more details about testing strategies * Add section for DevStack Integration Strategies to the DevRef * VMware: consolidate NSX models * Restore and fix vmware unit tests * Move extra routes processing to router classes * oslo: migrate to namespace-less import paths 2015.1.0b2 ---------- * Fix breakage in all service repo unit tests, due to duplicate imports of exts * Log entry when no Floating IP interface present * Refactor logging in loop to only log debug messages once * Nuke a useless lambda wrapper and call to iterkeys (review feedback) * Nuke remaining service config and extensions from main repo * Pass '--dhcp-authoritative' option to dnsmasq * Imported Translations from Transifex * ml2: Simplify _process_provider_create * Fix extra-dhcp-opt on stateless dhcpv6 subnet * Updated from global requirements * ML2: Use same port binding logic for DVR ports as non-DVR ports * Improve robustness of network failover * Decrease rpc timeout after agent receives SIGTERM * Configures RADVD to send managed flag in RA for DHCP_STATEFUL * Make prevent_l3_port_deletion handle missing port * Backout 152195, which doesn't check the same token that it saves * NSX DB models split, part 3 (and final) * NSX DB models split, part 2 * Discriminate loaded extensions using their alias * Refactor ml2 manager * Extension moving tweaks, exceptions and extension path fix * Log tenant ports if subnet could not be deleted * Fixing several misspellings in neutron * NSX DB models split, part 1 * Imported Translations from Transifex * Enable super-on-old-class pylint check * fixes error logging to use the right exception parent class * Drop bw compact module for OpenDayLight * Don't pass the port down to the floating ip processing * Move agent.linux.utils tests to proper location * Drop deprecated namespace for oslo.rootwrap * Encapsulate DVR Fip namespace * Move ha router functionality from the agent to ha_router * Remove duplicate logging of attribute validation errors * Add requirements.txt file for OpenDaylight Mech Driver * Mechanisms to move extensions and config into service repos * Fix flake exclude matching of .* * Hardening unittest, make resilient to address assignment order * Allow to request metadata proxy only with redirection * Remove unused mocks * Thining Arista ML2 driver from neutron tree * Allow port mac_address to be modified * Removed redundant statement from l3agentscheduler * Implements the ProcessMonitor in the l3_agent * Add option to remove networks from dead DHCP agents * Thin MLNX ML2 mechanism driver and agent * Fixing a log message in Arista L3 Service Plugin * Not assign dynamic IPv6 address on dhcp interface * Default security group table * Support Extra DHCP Options for IPv4 and IPv6 * Refactor _convert_sgr_to_iptables_rules in iptables_firewall * Do not check twice IP allocations for auto-address subnets * Make the interface driver available to the router classes * Make agent config available to the router classes * Updated from global requirements * Drop bin/neutron-rootwrap * Refactor iptables rule expansion for the non ipset case * Set locale before check dnsmasq version * Freescale FWaaS Plugin: Update to setup.cfg * Allow 'max_l3_agents_per_router' to be set to '0' * test_agent_scheduler: Fix a misleading variable name * Fix AttributeError when using DVRServerRpcApi * Add abandon script from nova * Add missing Connection.close() method * Deleting HA router with attached port causes DB inconsistencies * Refactor the ProcessMonitor _exit_handler to ProcessMonitor * TestL2PopulationRpcTestCase: Stop loading linuxbridge mech driver * Return 404 when executing router-list-on-l3-agent with invalid agent_id * VLAN support for DVR * Fixes Hyper-V agent root_helper issue * Ensure ofports are converted to string before trying to use join() * Add coverage for extra routes extension * Add address family to 'ip rule' calls * Add OVSDB abstract API * Add functional tests for IptablesManager using tcp/udp * dhcp: move dnsmasq version check to sanity_check * Use DVRServerRpcApi instead of a mixin * Scope secgroup rpc api using a messaging namespace * Add and use SecurityGroupAgentRpc * hyperv: drop useless messaging.Target on HyperVSecurityAgent * tests: don't spread fixtures.TempDir throughout test cases * Extract l2pop/DVR controller logic to common method * Imported Translations from Transifex * attributes: Additional IP address validation * Mention networking_odl in README.odl * Updated from global requirements * Overload correctly BASEV2.__table_args__ * Add notes on how to deal with stable branches * Configure IPv6 LLADDR only on master L3 HA instance * Do not duplicate message consumers * Add index on db "allocated" columns * pep8: cleaned up excludes * Remove check_i18n tox target * Implements ProcessMonitor in the dhcp_agent * Functional test IPAM DB operation * If router is HA, get current_cidrs from keepalived object * Move process monitor settings to neutron.conf AGENT section * Drop SecurityGroupServerRpcApiMixin * sriovnicagent: drop usage of SecurityGroupServerRpcApiMixin * sriovnicagent: untangle SecurityGroupAgentRpcMixin * mlnx: drop usage of SecurityGroupServerRpcApiMixin * mlnx: untangle SecurityGroupAgentRpcMixin * linuxbridge: drop usage of SecurityGroupServerRpcApiMixin * linuxbridge: untangle SecurityGroupAgentRpcMixin * Use db constraint to ensure mac address uniqueness * Ignore 404 error and lower a warning log to info * Reorganize OVSDB API * Use proper capitalization for OVS table names * Move shared metadata driver related config options * Remove useless constant from l3 agent module * Added test_dvr_router_lifecycle to cover dvr * Imported Translations from Transifex * Use constants from networking_odl project * Initialize dist_fip_count after agent restart * Fixes Multiple External Networks issue with DVR * Replace FLOATING_IP_CIDR_SUFFIX constant with utils * tests: drop usage of SecurityGroupServerRpcApiMixin * ovs: drop usage of SecurityGroupServerRpcApiMixin * oneconvergence: drop usage of SecurityGroupServerRpcApiMixin * ofagent: drop usage of SecurityGroupServerRpcApiMixin * nec: drop usage of SecurityGroupServerRpcApiMixin * hyperv: drop usage of SecurityGroupServerRpcApiMixin * bigswitch: drop usage of SecurityGroupServerRpcApiMixin * Create SecurityGroupServerRpcApi and add some docs * Improve agent-based flat/vlan ml2 port binding failure logging * ml2: remove stale _filter_nets_l3 in get_networks * drop unused test rootwrap filter file * Updated from global requirements * SIGHUP keepalived if L3 agent restarts * Update _cur names to _current in iptables_firewall.py * Added comments, and refactored _add_rule_by_security_group * Improve test coverage of dhcp agent scheduling * Imported Translations from Transifex * tools/split.sh: Tweak commit message * Switch to using abc in the retargetable client * common_db_mixin.py: simplify CommonDbMixin * Fixes blocking of VRF config in Arista L3 Plugin * Drop _test_rootwrap_exec test * Fix pylint unbalanced-tuple-unpacking warning * Corrected singulars/plurals in iptables_firewall.py * Create DvrRouter and HaRouter as a sub-class of Router * Remove unused self.sync_progress attribute * DHCP agent restructuring * Move Monkey patch back to being as early as possible * Fix outstanding failures with Neutron API job * Disable unbalanced-tuple-unpacking * Revert "Change transaction isolation so retry logic could work properly" * Change transaction isolation so retry logic could work properly * Updated from global requirements * Refactor the _get_external_device_interface_name method * Refactor of floating ip processing in L3 Agent * ML2: Driver API changes for hierarchical port binding * Fix some assertEqual argument orders * Don't log a warning if an iptables chain doesn't exist * Migrate to oslo.concurrency * Replace missing space in error message * Clarify misleading iptables comment * Fix missing spaces in error messages * make delete_router send delete_port to core_plugin * VMWare-NSXv: VMWare NSXv extensions * Dropped fixture module * base.py: Improve exception handling * Correct _test_delete_ports_by_device_id_second_call_failure * Add ovsdb-related functional tests * VMWare-NSXv: VMWare NSXv configuration file * Imported Translations from Transifex * Create arping helper in ip_lib * Initial thin ML2 mechanism driver * Enable adding new tag with options * Call on dhcp-agent DhcpLocalProcess.restart() breaks dhcp * Fixs shared networks in Arista ML2 driver * Move agent cleanup scripts to cmd module * Fix IP allocation for multiple slaac subnets * tests: don't restore stopped mock that is set in setUp() * misc-sanity-checks.sh: Some cleanups * Log iptables rules in a readable format * Remove main alias for bw compat with vpn agent * Midonet plugin decomposition * Fix topic for provider security group update * Specify prefix length for IPv6 subnets * Service split: cleaned up setup.cfg * VMWare NSXv DB model bugfix * Speed up initial L3 full sync time * hacking: enable H238 (old style class declaration, use new style) * hacking: enable W292 (no newline at end of file) * Update hacking to 0.10 * Use "if dict.get(key):" instead "if key in dict and dict[key]:" * Rename qexception->nexception * Fix AttributeError on check_foreign_keys in functional job * Catch StaleDataError in update_device_down * Code improvement in type_vxlan.py and type_gre.py files * Ensure config directory created before updating leases * Allow IptablesManager to manage mangle table * Fix IPv6 Subnet Slaac Check * Imported Translations from Transifex * Move non-bridge-related OVSBridge methods to BaseOVS * Move metadata agent entry to its own file * Run only one instance of Nuage sync cycle at a time * Updated from global requirements * Scope metadata rpc api using a messaging namespace * Provide doc string pointers for the dhcp agent rpc api * Remove DBDuplicateEntry columns check * Limit permission change * Break out config and entry point out of l3/agent file * Validate legacy router services before migration * Clarify dnsmasq version check failure message * Update comment about metadata_proxy_shared_secret config * Remove redundant tunnel ids from ovs agent * Add index generation for IPv6 rules for DVR * Correct l3-agent iptables rule for metadata proxy * Fix UT for L2pop test_get_agent_ports_no_data() * Move postcommit ops out of transaction for bulk * Reset policies after RESOURCE_ATTRIBUTE_MAP is populated * Remove SELECT FOR UPDATE from delete_network and delete_subnet * Bump minimal dnsmasq version to 2.67 * Make L3 HA VIPs ordering consistent in keepalived.conf * Add Process class helper to manage processes with namespace * Make lb mechanism driver use enable_security_group flag * Catch PortNotFound and SubnetNotFound during network_delete * HA for DVR - schema migration and change * Revert "Revert "Add metadata proxy L3 agent driver"" * moving vxlan module check to sanity checks and making practical * Drop functional/contrib directory * refactor l3-agent to include dvr.py * Validate IPv6 subnet while associating to Router * VMWare-NSXv: VMWare NSXv database models * Deal with PEP-0476 certificate chaining checking * Reduce duplicate code in test_iptables_manager * Add support for retargetable functional api testing * print error when no match mapping found in check_segment_for_agent * Tweak gate hooks scripts to handle both functional and api jobs * Replace mention of nose with nose2 in devref * Skip adding ips from non dhcp enabled subnets to hosts file * Add developer documentation for plugins/drivers contributions * Deletes floating agent gw port on disassociate * Add help text for 'host' parameter in neutron.conf file * Updated keystone_admin conf section to reflect changes in middleware * Removed spurious check for ip version * Ensure test_metaplugin handles random hashseeds * Ignore non-existent ports during OVS intf list * [apic ml2] Bind ports regardless of the owner * Improve unit test coverage for Ml2 db.py * Delete the console scripts for lbaas and vpnaas * Confusing message deleting default security group * Enable the "not-callable" pylint check * ovs_dvr: Use lazy logging interpolation * Add a constant for router interface device owners * Stale VXLAN & GRE tunnel endpoint deletion from DB * Add support for flat networks in SRIOV Mechanism Driver * Retry on unassigned ofport instead of treating it as a failure * VMware: fix security group check on port create * Eventlet green threads not released back to pool * Don't unnecessarily loop through all ports/interfaces * Set type=internal as part of port creation * Fix DVR flow problems for IPv6 subnet * Allow to specify IP address of floating ip * Do not count dvr agents while creating HA ports * csr1kv_hd_driver: Improve exception handling * Remove _delete_port_security_group_bindings from delete_port * Remove useless parameter from l3_dvr_db.py * Clean-up sanity checks done via shell scripts * Do not run neutron-ns-metadata-proxy as root on L3 agent * Correct invalid indentation in is_dvr_serviced * Add validation for the dvr router l3agent binding * Fixes spelling error * get_binary_name should returns strings without spaces * validate L3 HA min/max _l3_agents_per_router * Enable pylint checks for "anomalous" string escapes * Tighten dnsmasq version regex * Remove unnecessary regex grouping * Combine author_tag and log_translation_hint regexes * ML2 UT: Fix incorrect mock return value * ipv6: set OtherConfig flag for DHCPv6 stateless subnets * PLUMgrid plugin: Fix for delete subnet with admin context * brocade: Use lazy logging interpolation * linuxbridge: Use lazy logging interpolation * embrane: Use lazy logging interpolation * bigswitch: Use lazy logging interpolation * Use lazy logging interpolation * Cisco: logging incorrectly called with (fmt, arg) tuple * ml2: remove superfluous %s in LOG.debug() format * Fix typo'd format parameter in midonet_lib.py * Update L3 agent drivers singletons to look at new agent * Prevent symlinks to be added to the tree * Copy the contrib directory instead of moving it * Revert "Add metadata proxy L3 agent driver" * Scope dhcp rpc api using a messaging namespace * Validate local_ip for Linuxbridge agent * Allow setting a tenant router's external IP * Remove NSX 'service' plugin * Imported Translations from Transifex * Move DB TestModelsMigrations from unit to functional * tests: drop unit tests that only check default configuration values * Backward compatibility for advanced services * Update heal_script for alembic 0.7.1 2015.1.0b1 ---------- * Add metadata proxy L3 agent driver * Updated from global requirements * Move contrib directory to base test directory * Add OVS status and fix OVS crash * Option for root_helper when checking namespace * Cleanup req_format in test_api_v2_resource * Imported Translations from Transifex * Cisco: unsupported format character in log format * Correct arguments to logging function * Support 'alive' filter for get_agents() in agents_db * Minor lbaasv2 things from the feature branch, needed in neutron * Advanced services support in neutron-db-manage * Remove locking from network and subnet delete op * Removed unused iso8601 dependency * Avoid unnecessary explicit str() conversion around exceptions * Add functional test for l3-agent metadata proxy * Remove mlnx plugin * Set timeout for functional job * Enable test_migration * Fix neutron hang for IPv6 allocation pool update * tests: initialize admin context after super().setUp call * Improve performance of get_active_networks_info * Fixed test test_update_port_security_off_address_pairs * openvswitch/ofagent: Remove OVS.enable_tunneling option * Imported Translations from Transifex * Remove unused dependencies * Generate testr_results.html for neutron functional job * L3 Agent restructure - observer hierarchy * Replace non-ovs_lib calls of run_vsctl with libary functions * Don't restore stopped mock that is initialized in setUp() * Separate wait_until to standalone function * Imported Translations from Transifex * Mock up time.sleep to avoid unnecessary wait in test_ovs_tunnel * Catch duplicate errors scheduling SNAT service * Fix for KeyError: 'gw_port_host' on l3_agent * Migrate to oslo.context * Have L3 agent catch the correct exception * Not nova but neutron * Remove broad exception catch from periodic_sync_routers_task * Fix race condition in ProcessMonitor * Updated from global requirements * Refactor process_router method in L3 agent * Switch to using subunit-trace from tempest-lib * Move classes out of l3_agent.py * Prettify tox output for functional tests * Services split, pass 2 * Fix IPv6 RA security group rule for DVR * Imported Translations from Transifex * ofa_test_base: Fix NoSuchOptError in UT * Add lbaasv2 extension to Neutron for REST refactor * Remove TODO for H404 * Update rpc_api docs with example version update * Auto allocate gateway_ip even for SLAAC subnets * Updated from global requirements * Split services code out of Neutron, pass 1 * Use comments rather than no-op string statements * Fix AttributeError during startup of ovs agent in DVR mode * Enforce log hints * Disallow log hints in LOG.debug * Reduce code duplication in test_linux_dhcp * Print version info at start * Enforce log hints in ofagent and oneconvergence * Make sudo check in ip_lib.IpNetnsCommand.execute optional * Move set_override('root_helper', ...) to base functional class * Imported Translations from Transifex * IpsetManager refactoring * Update i18n translation for NEC plugin log msg's * return the dict of port when no sec-group involved * Imported Translations from Transifex * Update i18n translation for IBM plugin log msg's * Workflow documentation is now in infra-manual * tox.ini: Prevent casual addition of bash dependency * Updated from global requirements * Remove RpcCallback class * Convert several uses of RpcCallback * Fix up an old RpcProxy assumption * Remove RpcProxy class * Cleanup recent generalization in post mortem debugger * radvd: pass -m syslog to avoid thread lock for radvd 2.0+ * Get rid of py26 references: OrderedDict, httplib, xml testing * Imported Translations from Transifex * Fix enable_metadata_network flag * Fix program name in --version output * Enforce log hints in opencontrail * Update i18n translation for Metaplugin plugin * Update i18n translation for Brocade plugin log msg's * Update i18n translation for Nuage plugin * Update i18n translation for Embrane plugin * Enforce log hints in neutron.plugins.plumgrid * Remove ovs-vsctl call from OVSInterfaceDriver * Update i18n translation for Midonet plugin * Enforce log hints in neutron.plugins.sriovnicagent * Enforce log hints in neutron.plugins.hyperv * Imported Translations from Transifex * Drop RpcProxy usage from DhcpAgentNotifyAPI * Updated the README.rst * Fix base test class for functional api testing * Use oslo function for parsing bool from env var * Don't block on rpc calls in unit tests * Refactor test_migration * Strip square brackets from IPv6 addresses * Update i18n translation for BigSwitch plugin log msg's * Imported Translations from Transifex * pretty_tox.sh: Portablity improvement * iptables_manager: Fix get_binary_name for eventlet * test_dhcp_agent: Fix no-op tests * Drop old code from SecurityGroupAgentRpcApiMixin * Drop RpcProxy usage from ml2 AgentNotifierApi * Update i18n translation for Mellanox plugin and agent log msg's * Drop RpcProxy usage from L3AgentNotifyAPI * Simplify L3 HA unit test structure * Update i18n translation for VMware NSX plugin log msg's * Alter execute_alembic_command() to not assume all commands * hacking: Check if correct log markers are used * Fix hostname validation for nameservers * Removed python2.6 rootwrap filters * Imported Translations from Transifex * MeteringPluginRpc: Fix crash in periodic_task * Enable undefined-loop-variable pylint check * Remove unused variables from get_devices_details_list * Change description of default security group * Fix incorrect exception order in _execute_request * Migrate to oslo.i18n * Migrate to oslo.middleware * Remove unused xml constants * Check metadata iptables chains during functional test * Drop RpcProxy usage from MeteringAgentNotifyAPI * Drop RpcProxy usage from l2population code * Drop RpcProxy usage from cisco apic ml2 plugin * Drop RpcProxy usage from oneconvergence plugin * Synced processutils and periodic_task modules * Migrate to oslo.utils * Fix floating-ips in error state in dvr mode * Reject trailing whitespaces in IP address * Imported Translations from Transifex * CSCO:Tenants not to access unshared n/w profiles * Drop sudo requirement from a unit test * Remove Python 2.6 classifier * Update i18n translation for Cisco plugins and cfg agent log msg's * Remove ryu plugin * Imported Translations from Transifex * Drop RpcProxy usage from nec plugin * Drop RpcProxy usage from mlnx plugin * Drop RpcProxy usage from ibm plugin * Drop RpcProxy usage from hyperv plugin * Drop RpcProxy usage from cisco.l3 * Drop RpcProxy usage from cisco.cfg_agent * Drop RpcProxy usage from brocade plugin * Update rally-jobs files * Test HA router failover * Imported Translations from Transifex * Update i18n translation for linuxbridge log msg's * Update i18n translation for openvswitch log msg's * Update i18n translation for ML2 plugin log msg's * Updated from global requirements * Imported Translations from Transifex * Enforce log hints in neutron.services * Enforce log hints in neutron.services.metering * Fix metadata proxy start problem for v6-v4 network * Fix AttributeError in RPC code for DVR * Drop RpcProxy usage from bigswitch plugin * Drop RpcProxy usage from VPNaaS code * Drop RpcProxy usage from metering_agent * Fix context.elevated * Tighten up try/except block around rpc call * Implement migration of legacy routers to distributed * run_tests.sh OS X script fixes * Eliminate unnecessary indirection in L3 agent * Show progress output while running unit tests * Drop RpcProxy usage from LBaaS code * Enforce log hints in neutron.services.loadbalancer * Enforce log hints in neutron.services.firewall * Enforce log hints in neutron.services.l3_router * enable H401 hacking check * enable H237 check * Updated from global requirements * Check for default sec-group made case insensitive * Update i18n translation for neutron.server/scheduler log msg's * Update i18n translation for neutron.notifiers log msg's * Update i18n translation for neutron.common/debug log msg's * Imported Translations from Transifex * ofagent: Remove obsolete bridge_mappings (plugin side) * Delete FIP namespace when last VM is deleted * Fix a race condition adding a security group rule * Drop RpcProxy usage from FWaaS code * Drop RpcProxy usage from neutron.agent.rpc.PluginApi * Fix a copy/pasted test mistake * Drop test code copied from nova * Drop several uses of RpcCallback * Add some basic rpc api docs * Drop RpcCallback usage from DhcpRpcCallback * Drop RpcProxy usage from PluginReportStateAPI * Fix hostname regex pattern * Catch NoResultFound in _get_policy_profile_by_name * Validate loadbalancing method when updating a pool * Update i18n translation for neutron.api log msg's * Catch DBReferenceError exception during binding a router * Enable default SNAT from networks connected to a router indirectly * Imported Translations from Transifex * BSN: Optimistic locking strategy for consistency * BSN: include missing data in floating IP call * ofagent: Remove obsolete bridge_mappings (agent side) * NSX: Validate gateway device list against DB * Drop RpcProxy usage from MetadataPluginApi * Drop usage of RpcProxy from L3PluginApi * Prevent an iteration through ports on IPv6 slaac * Use a string multiplier instead of 59 repetitions * Convert all incoming protocol numbers to string * Updated from global requirements * Correct raw table regex in test_security_groups_rpc * BSN: Add network to ext_gw_info sent to backend * BSN: Set inconsistency record on delete failure * Fix PYTHONHASHSEED bugs in test_security_groups_rpc * Subnet delete for IPv6 SLAAC should not require prior port disassoc * Fix client side versions in dhcp rpc API * Drop usage of RpcProxy from DhcpPluginApi * linuxbridge-agent: make vxlan unicast check more efficent * Moved out common testcases from test_type_vxlan.py * Update i18n translation for neutron.extension log msg's * Update i18n translation for neutron.db log msg's * Update i18n translation for neutron.cmd log msg's * Update i18n translation for neutron.agents log msg's * enable F812 check for flake8 * enable F811 check for flake8 * Decrease policy logging verbosity * Support pudb as a different post mortem debugger * Cleanup and refactor methods in unit/test_security_groups_rpc * switch to oslo.serialization * Add rootwrap filters for ofagent * Updated policy module from oslo-incubator * Resolving some spelling mistakes * Fix for FIPs duplicated across hosts for DVR * Drop neutron.common.rpc.MessagingTimeout * Remove neutron.common.rpc.RemoteError * Remove neutron.common.rpc.RPCException * Remove useless return * Cisco VPNaaS and L3 router plugin integration * Fix missing allowed command in openvswitch xenapi agent * fix event_send for re-assign floating ip * Remove openvswitch core plugin entry point * rootwrap config files reference deleted quantum binaries * Fix L3 HA network creation to allow user to create router * Update default value for agent_required attribute * SRIOV: Fix Wrong Product ID for Intel NIC example * Imported Translations from Transifex * Updated from global requirements * Purge use of "PRED and A or B" poor-mans-ternary * Include call to delete_subnet from delete_network at DB level * Use correct base class for unit tests for ML2 drivers * Replace "nova" entries in iptables_manager with "neutron" * Drop and recreate FK if adding new PK to routerl3bindings * Imported Translations from Transifex * Remove duplicate ensure_remove_chain method in iptables_manager * ML2: fix file permissions * Fix sneaky copypaste typo in ovs agent scheduler test * Make L2 DVR Agent start successfully without an active neutron server * Detect if iproute2 support SR-IOV commands * Use stop() method on MessageHandlingServer * Rename constant to a more appropriate name * Big Switch: Fix SSL version on get_server_cert * Check for concurrent port binding deletion before binding the port * Imported Translations from Transifex * Batch ports from security groups RPC handler * Fix incorrect int/tuple comparison during binary search * Big Switch: Send notification after port update * Allow to add router interface to IPv6 SLAAC network * ML2 Cisco Nexus MD - not overwriting existing config * Reorder operations in (l3_dvr) update floating ip * Use RPC instead of neutron client in metadata agent * Add assertion to test_page_reverse method * Adds an option to enable broadcast replies to Dnsmasq * Add advsvc role to neutron policy file * NSX: allow multiple networks with same vlan on different phy_net * NSX: Fix foreign key constraint delete provider network * Imported Translations from Transifex * Fix 'Length too long' error in neutron-dsvm-functional tests * Remove use_namespaces from RouterInfo Property * Fix handling of CIDR in allowed address pairs * Updated from global requirements * Remove XML support * enable F402 check for flake8 * enable E713 in pep8 tests * NEC plugin: Allow to apply Packet filter on OFC router interface * _update_router_db: don't hold open transactions * Big Switch: Switch to TLSv1 in server manager * Only resync DHCP for a particular network when their is a failure * Validate network config (vlan) * Validate local_ip for OVS agent is actual ip address * Imported Translations from Transifex * Hyper-V: Remove useless use of "else" clause on for loop * Enable no-name-in-module pylint check * Move disabling of metadata and ipv6_ra to _destroy_router_namespace * Updated from global requirements * Adds macvtap support * Remove duplicate import of constants module * Switch run-time import to using importutils.import_module * Enable assignment-from-no-return pylint check * tox.ini: Avoid using bash where unnecessary * l2population_rpc: docstring improvements * Fix race condition on processing DVR floating IPs * neutron-db-manage finds automatically config file * Ensure test_agent_manager handles random hashseeds * Ensure ofagent unit tests handles random hashseeds * Moves the HA resource creations outside of transaction * Modify docstring on send_delete_port_request in N1kv plugin * Empty files should not contain copyright or license * Remove superfluous except/re-raise * Remove single occurrence of lost-exception warning * Schema enhancement to support MultiSegment Network * Remove redundant initialization and check from DVR RPC mixin * Improve performance of security group DB query * Optimize query in _select_dhcp_ips_for_network_ids * Updated cache module and its dependencies * Updated service.py and its dependencies * Updated fileutils and its dependencies * Cisco N1kv: Fix update network profile for add tenants * DB: Only ask for MAC instead of entire port * Only fetch port_id from SG binding table * NSX: Make conn_idle_timeout configurable * nsx plugin: keep old priority when reconnecting bad connection * l3_agent: avoid name conflict with context * Guard against concurrent port removal in DVR * Refactor l2_pop code to pass mac/ip info more readably * Fix KeyError in dhcp_rpc when plugin.port_update raise exception * Refactor _make_subnet_dict to avoid issuing unnecessary queries * openvswitch: Remove no longer used options * VPNaaS Cisco unit test clean-up * Call DVR VMARP notify outside of transaction 2014.2 ------ * remove E251 exemption from pep8 check * Race for l2pop when ports go up/down on same host * Catch exceptions in router rescheduler * Minor: remove unnecessary intermediate variable * Handle unused set_context in L3NatTestCaseMixin.floatingip_with_assoc * Use EUI64 for IPv6 SLAAC when subnet is specified * Arista L3 Ops is success if it is successful on one peer * Add unique constraints in IPAvailabilityRange * Remove two sets that are not referenced * Update VPN logging to use new i18n functions * mock.assert_called_once() is not a valid method * Check for VPN Objects when deleting interfaces * Compare subnet length as well when deleting DHCP entry * Add pylint tox environment and disable all existing warnings * Updated from global requirements * update the relative path of api_extensions_path * Reduce security group db calls to neutron server * Ignore top-level hidden dirs/files by default * Remove some duplicate unit tests * NSX: drop support to deprecated dist-router extension * Execute udevadm on other linux installs * Avoid constructing a RouterInfo object to get namespace name * Drop sslutils and versionutils modules * Imported Translations from Transifex * Remove an argument that is never used * Refactor _process_routers to handle a single router * Add Juno release milestone * Add database relationship between router and ports * Fix L2 agent does not remove unused ipset set 2014.2.rc2 ---------- * Add Juno release milestone * Add database relationship between router and ports * Disable PUT for IPv6 subnet attributes * Skip IPv6 Tests in the OpenContrail plugin * Remove all_routers argument from _process_routers * update ml2_migration to reflect optional methods * Disable PUT for IPv6 subnet attributes * Do not assume order of lvm.tun_ofports set elements * Skip IPv6 Tests in the OpenContrail plugin * Removed kombu from requirements * Updated from global requirements * Imported Translations from Transifex * Imported Translations from Transifex * Remove two sets that are not referenced * Forbid update of HA property of routers * Forbid update of HA property of routers * Teach DHCP Agent about DVR router interfaces * Updated from global requirements * Allow reading a tenant router's external IP * Raise exception if ipv6 prefix is inappropriate for address mode * Retry getting the list of service plugins * Add missing methods to NoopFirewallDriver * Don't fail when trying to unbind a router * Modify the ProcessMonitor class to have one less config parameter * Big Switch: Don't clear hash before sync * Remove sslutils from openstack.common * Divide _cleanup_namespaces for easy extensibility * L3 Agent should generate ns_name in a single place * Add comments to iptables rules to help debugging * nit : missing a "%s" in a log message * L3 agent should always use a unique CONF object * Iterate over same port_id if more than one exists * Fix setup of Neutron core plugin in VPNaaS UT 2014.2.rc1 ---------- * remove openvswitch plugin * Fix pid file location to avoid I->J changes that break metadata * Don't fail when trying to unbind a router * remove linuxbridge plugin * Allow reading a tenant router's external IP * Fix sleep function call * Add admin tenant name to nova notifier * ML2: move L3 cleanup out of network transaction * Open Kilo development * ML2 Cisco Nexus MD: Fix UT to send one create vlan message * Implement ModelsMigrationsSync test from oslo.db * Imported Translations from Transifex * Update migration scripts to support DB2 * Do not assume order of report list elements * Disallow unsharing used firewall policy * Imported Translations from Transifex * Add missing methods to NoopFirewallDriver * Raise exception if ipv6 prefix is inappropriate for address mode * Fix broken port query in Extraroute test case * Revert "Cleanup floatingips also on router delete" * fix dvr snat bindings for external-gw-clear * Fix quota limit range validator * Remove default dictionary from function def * Fix KeyError when getting secgroup info for ports * Create DHCP port for IPv6 subnet * Deletes floating ip related connection states * Do not lookup l3-agent for floating IP if host=None, dvr issue * Remove RPC notification from transaction in create/update port * Do not assume order of body and tags elements * Remove the translation tag for debug level logs in vmware plugin * Retry getting the list of service plugins * Fix entrypoint of OneConvergencePlugin plugin * Forbid regular users to reset admin-only attrs to default values * Finish small unit test refactor of API v2 tests * Security groups: prevent race for default security group creation * Stop admin using other tenants unshared rules * Eliminate OrderedDict from test_api_v2.py * Mock out all RPC calls with a fixture * Add logging for enforced policy rules * Imported Translations from Transifex * Remove unnecessary _make_port function in BSN UTs * ofagent: Drop log level of tenant-triggerable events * Set vif_details to reflect enable_security_group * Use dict_extend_functions to populate provider network attributes * Fix foreign key constraint error on ml2_dvr_port_bindings * Some clean up of code I'm preparing to modify * Indicate the begin and end of the sync process to EOS * DVR to delete router namespaces for service ports * Do not assume order of device_ids set elements * Fix 500 error on retrieving metadata by invalid URI * Only setup dhcp interface if dhcp is not active on network * HA routers master state now distributed amongst agents * Rework and enable VPNaaS UT for Cisco CSR REST * Update URL of Ryu official site in ofagent README files * Set dsvm-functional job to use system packages * Delete a broken subnet delete unit test * Fix to delete user and group association in Nuage Plugin * Deletes FIP agent gw port when last VM is deleted * Delete DB records instead of tables to speedup UT * Stop exception log in Big Switch unit tests * Separate Configuration from Freescale SDN ML2 mechanism Driver * NSX plugin: set VNIC_TYPE port binding attribute * Access correct key for template name * ofagent: Ignore unknown l2pop entry removals * Neutron metering does not check overlap ip range * Rename workers to api_workers and simplify code * Fix DVR to service DHCP Ports * Tunnel ID range validation for VXLAN/GRE networks * Remove @author(s) from copyright statements * BSN: Add context to backend request for debugging * Don't create unused ipset chain * Imported Translations from Transifex * Avoid an extra database query in schedule_snat_router * Add HA support to the l3 agent * Stop ignoring 400 errors returned by ODL * Fix a test_db_plugin unit test side_effect usage * Imported Translations from Transifex * Fix KeyError on missing gw_port_host for L3 agent in DVR mode * Stop using intersphinx * Updated from global requirements * Cisco N1kv: Remove vmnetwork delete REST call on last port delete * Remove the Cisco Nexus monolithic plugin * L3 Metering label as shared * Check for ports in subnet before deleting it from Nuage VSD * ofagent: Fix a possible crash in arp responder * Add a new scheduler for the l3 HA * Add functional testing to ipset_manager * Properly handle empty before/after notifications in l2pop code * Remove logic for conditional migrations * Make Juno migrations config independent * Introduce havana initial state * Adds ipset support for Security Groups * Refactor l3_agent.process_router_floating_ip_addresses * Cleanup floatingips also on router delete * use TRUE in SQL for boolean var * Remove faulty .assert_has_calls([]) * Fail on None before iteration attempt * Imported Translations from Transifex * ofagent: Remove broken XenAPI support * Passing admin tenant name to EOS * Fix for floating ip association and deletion * BSN: Allow concurrent reads to consistency DB * Remove useless check in _rpc_update_firewall * Use renamed _fail_second_call() in cisco nexus tests * Add L3 VRRP HA base classes * Allow DHCPv6 reply from server to client * Don't allow user to set firewall rule with port and no protocol * Added TAP_DEVICE_PREFIX info to common/constants * Fix comments in api.rpc.handlers * ofagent: Clean up logging * UTs: Disable auto deletion of ports/subnets/nets * Remove second call to get_subnets in delete_subnet * Changes to support FWaaS in a DVR based environment * Imported Translations from Transifex * Remove hints from schedule_router * Call unbind_snat_servicenode from schedule router * NSX: Correct allowed_address_pair return value on create_port * Add the unit tests for ml2.rpc module * Neutron should not use the neutronclient utils module for import_class * Add unit-test assert to check dict is superset of dict * Pythonified sanity_check.all_tests_passed * Removed direct access to MessagingServer * Remove subnet_id from check_ports_exist_on_l3agent * Add requests_mock to test-requirements.txt * Removed kombu from requirements * Fix metadata agent's auth info caching * Throw exception instances instead of classes * Add scheduler unit tests to enable bug fixes and refactoring * Fix AttributeError when setting external gateway on DVR router * Stop tracking connections in DVR FIP Namespace * Fixes formatting for debug output in neutron/agent/l3_agent.py * Avoid testing code duplication which introduced testing bugs * Supply missing cisco_cfg_agent.ini file * Reset IPv6 detection flag after IPv6 tests * Remove unused arg to config.setup_logging() * Updated from global requirements * Revert "Skip functional l3 agent test" 2014.2.b3 --------- * Fix leftover Timeout effecting most eventlet calls * shared policy shouldn't have unshared rules * ofagent: Remove @author tags and update copyright notices * Work toward Python 3.4 support and testing * Cleanup rename of get_compute_ports_on_host_by_subnet * Revert "Cisco DFA ML2 Mechanism Driver" * Refactor security group rpc call * Avoid auto-scheduling for distributed routers * Fix interface IP address for DVR with gateway * BSN: Bind external ports in ML2 driver * Remove SELECT FOR UPDATE use in delete_firewall * Big Switch: Retry on 503 errors from backend * Remove absolute path in KillFilter for metadata-proxy * Implements sync mechanism between Neutron and Nuage VSD * ofagent: Implement physical_interface_mappings * ofagent: Enable local arp responder for TYPE_LOCAL * ofagent: Enable local arp responder for TYPE_FLAT * Implements ProcessMonitor to watch over external processes * Skip functional l3 agent test * ofagent: Local arp responder for VLAN * Prevent SystemExits when running tests * Big Switch: Separate L3 functions into L3 service * Apic drivers enhancements (second approach): Topology * Big Switch: Bind IVS ports in ML2 driver * Add functional test for IptablesManager * Clarify message when no probes are cleared * Remove reference to cisco_cfg_agent.ini from setup.cfg again * Fix a bug in Mellanox plugin RPC caused by secgroup RPC refactoring * Don't spawn metadata-proxy for non-isolated nets * l2pop: Allow network types overridable * ML2: Fix release of network segments to allocation pools * Fix a recent ipv6 UT regression * Imported Translations from Transifex * Add endpoint_type parameter to MetaInterfaceDriver * Remove chain for correct router during update_routers() * ofagent: Enable local arp responder for local VMs * ofagent: merge br-tun into br-int * Apic drivers enhancements (second approach): Sync * Apic drivers enhancements (second approach): L3 refactor * ML2 Type Driver refactor part 2 * Adds router service plugin for CSR1kv * Introduces a keepalived manager for HA * Support for extensions in ML2 * Cisco DFA ML2 Mechanism Driver * Improve some plugins help strings * Provide a quick way to run flake8 * Apic drivers enhancements (second approach): L2 refactor * Make SecurityGroupsRpcCallback a separate callback class * Subnets with prefix length 0 are invalid * Adding mechanism driver in ML2 plugin for Nuage Networks * Fix state_path in tests * Add functional test for l3_agent * remove explicit include of the ovs plugin * NSX: log request body to NSX as debug * Datacenter moid should not be tuple * Remove ovs dependency in embrane plugin * Layer 3 service plugin to support hardware based routing * Remove binding:profile update from Mellanox ML2 MD * Remove old policies from policy.json * Apic drivers enhancements (second approach): Backend * Make DvrServerRpcCallback a separate callback class * Make DhcpRpcCallback a separate callback class * Adding support of DNS nameserver and Host routes for the Nuage Plugin * Block downgrade from icehouse to havana * Use lockutils module for tox functional env * Do not use auto_schedule_routers to add router to agent * Fix func job hook script permission problems * Check for IPv6 file before reading * Remove SELECT FOR UPDATE use in update_firewall * Fix l3 agent scheduling logic to avoid unwanted failures * Fix InvalidRequestError in auto_schedule_routers * Fix incorrect number of args to string format * Add support for provider-network extension in nuage Plugin * Make L3RpcCallback a separate callback class * Cisco VPN with in-band CSR (interim solution) * Inline "for val in [ref]" statements * Minor refactoring for add_router_to_l3_agent * Predictable iptables chains output order * Prefer "val !=/== ref" over "val (not) in [ref]" in conditions * Heal script: Drop fks before operating on columns * Fixed template of IPsecSiteConnectionNotFound message * Fix DVR to service LBaaS VIP Ports * Refactor test_type_gre/vxlan to reduce duplicate code * Fix heal_script for MySQL specifics * Make log level in linux.utils.execute configurable * Imported Translations from Transifex * Networks are not scheduled to DHCP agents for Cisco N1KV plugin * ext-gw update on dvr router improperly handled by l3-agent * metering driver default value is different in code and config file * Fix for floatingip-delete not removing fip_gw port * Increase the default poll duration for Cisco n1kv * Fix IpNetnsCommand to execute without root_wrapper when no netns * Increase ovsdb_monitor.SimpleInterfaceMonitor start timeout * Change autogenerate to be unconditional * Remove status initialization from plugin's create_firewall * Set firewall state to CREATED when dealing with DVR * Add template attr. for subnet, router create in Nuage plugin * Implement ip_lib.device_exists_with_ip_mac * Add _store_ip_allocation method * Updated from global requirements * Refactor plugin setup helpers out of test.base * Raise proper exception in case duplicate ipv6 address is allocated * Do not explicitly set mysql_engine * Fixes Hyper-V agent issue on Hyper-V 2008 R2 * Removing sorted() function from assertEqual() * Add hook scripts for the functional infra job * ML2 Type driver refactor part 1 * Minor refactoring of auto_schedule_routers * Add ipv6 forwarding for router namespaces * Refresh rpc_backend values in unit tests to those from oslo.messaging * Add unit tests covering single operations to ODL * One Convergence: Skip all tests with 'v6' in name * VPNaaS: Enable UT cases with newer oslo.messaging * Do not log WARN messages about lack of L3 agents for DVR routers * Add specific docs build option to tox * Fix policy rules for adding and removing router interfaces * Refactor type_tunnel/gre/vxlan to reduce duplicate code * Join tables in query for down L3 agents * Rename range to avoid shadowing the builtin * Fixes Hyper-V issue due to ML2 RPC versioning * A10 Networks LBaaS v1 Driver * Assign Cisco nw profile to multi-tenants in single request * Remove unused network parameter from _allocate_ips_for_port * corrects the typos in l3_router_plugin's comments * Support Stateful and Stateless DHCPv6 by dnsmasq * Implements securitygroup extension for nuage plugin * Fix bigswitch setup.cfg lines * Arista Layer 3 Sevice Plugin * Add config for visibility of cisco-policy-profile * Ensure ip6tables are used only if ipv6 is enabled in kernel * Remove invalid or useless initialization in test_type_vxlan * Fix migration set_length_of_description_field_metering * Set InnoDB engine for all existing tables * Use oslo.db create_engine instead of SQLAlchemy * Big Switch: Check for 'id' in port before lookup * Reorder operations in create_vip * Send HTTP exceptions in the format expected by neutronclient * Change nexus_dict to accept port lists * Update DVR Binding when router_id changes * Imported Translations from Transifex * Remove auto-generation of db schema from models at startup * Cisco N1kv plugin to send subtype on network profile creation * Implement namespace cleanup for new DVR namespaces * Fix config option names in ml2_conf_sriov.ini * NSX: Avoid floating IP status reset * correct getLoggers to use __name__ in code * Skip FWaaS config mismatch check if RPC method is unsupported * NSX: lift restriction on DVR update * Updated from global requirements * Use jsonutils instead of stdlib json * Remove INACTIVE status from FWaaS * Ignore http_proxy while connecting to test WSGI server * Fix interface add for dvr with gateway * l2pop: get_agent_ports: Don't yield (None, {}) * ML2: Make get_device_details report mac address as well * Delete DVR namespaces on node after removing last VM * Fix PortNotFound error during update_device_up for DVR * Option to remove routers from dead l3 agents * Remove SELECT FOR UPDATE use in ML2 tunnel driver add_endpoint * Fix KeyError during sync_routers * Fix PortNotFound exception during sync_routers * VPNaaS: Cisco fix validation for GW IP * Raise NotImplementedError instead of NotImplemented * Imported Translations from Transifex * Fix duplicate function: test_getattr_unallowed_attr * Preserve link local IP allocations for DVR fip ns across restart * Fix 404 error fetching metadata when using DVR * Raise exception for network delete with subnets presents * SecurityGroupRuleExists should point out rule id inseand of group id * Opencontrail plug-in implementation for core resources * Do not assume order of new_peers list elements * Make plugin and l3plugin available as mixin's properties * Use call to report state when ovs_agent starts up * add auth token to context * Fixes an issue with FIP re-association * NSX: unify the two distributed routing extensions * NSX: fix wording for configuration option * MLNX Agent: ensure removed ports get treated on resyncs * Add delete operations for the ODL MechanismDriver * Predictable field and filter ordering * Fixing neutron-db-manage with some options other than upgrade/downgrade * Removes extra indents from TestSubresourcePlugin * ofagent: Upgrade note about firewall_driver * Return port context from _bind_port_if_needed * MLNX Agent: Process port_update notifications in the main agent loop * Fix session's InvalidRequestError because of nested rollback * Remove unneeded device_owner field from l2pop tuple * ofagent: Remove network_delete method * Do not assume order of parameters in OVSBridge.add_flow call * Fix to throw correct error code for bad attribute * Improve external gateway update handling * Do not assume order of pci slot list * DeferredBridge to allow add_tunnel_port passthru * Enabled Cisco ML2 driver to use new upstream ncclient * Fix to enable L2pop to serve DVR * Remove duplicated check for router connect to external net * ofagent: Add a missing normalized_port_name * Return 403 instead of 404 on attr policy failures * Proper validation for inserting firewall rule * Imported Translations from Transifex * Ensure assertion matches dict iter order in test * Fix 500 error during router-update for dvr routers * Simple refactor to stop passing around an unused parameter * Make _build_uri_path output predictable * Radware: When a pip is needed, reuse the Port * Remove redundant topic from rpc calls * l3_db: refactor L3_NAT_DB_mixin * OVS flows apply concurrently using a deferred OVSBridge * Do not assume order of network_uuid's * Big Switch: Only update hash header on success * ofagent: Stop monitoring ovsdb for port changes * ofagent: Desupport ancillary bridges * Add a tox test environment for random hashseed testing * OFAgent: Implement arp responder * Updated from global requirements * Do not assume order of quotas dictionary elements * Move Cisco VPN RESTapi URI strings to constants * Remove ignored do_request timeout argument * Move from Python logging to Openstack logging * Imported Translations from Transifex * NSX: remove duplicate call to set_auth_cookie() * NSX: Correct default timeout params * Remove reference to cisco_cfg_agent.ini from setup.cfg * Exit Firewall Agent if config is invalid * Fix spelling mistakes * Fix DB Duplicate error when scheduling distributed routers * Imported Translations from Transifex * Make ML2 ensure_dvr_port_binding more robust * centralized router is incorrectly scheduled * Fix-DVR Gateway clear doesn't delete csnat port * Fix spelling in get_plugin_interface docstring * Use storage engine when creating tables in migrations * Removed configobj from test requirements * Implement Midonet Juno Network Api calls * Add missing ml2 plugin to migration 1fcfc149aca4 * Replace nullable from primary keys in tz_network_bindings with default * Use correct section for log message if interface_driver import fails * Make sure that gateway is in CIDR range by default * test_l3_plugin: L3AgentDbInteTestCase L3AgentDbSepTestCase fails * Add L3 Scheduler Changes for Distributed Routers * Pass filters in arrays in get_agent_gw_ports_exist_for_network * Do not schedule network when creating reserved DHCP port * Check that router info is set before calling _update_arp_entry * Move ARP responder test to sanity command * neutron.conf does not have the definition of firewall quotas * Fix wrong order of tables in downgrade * Fix deprecated opt in haproxy driver * Race condition of L3-agent to add/remove routers * Replaced the strings with respective constants * Make dvr_vmarp_table_update call conditional to dvr extension * ofagent: Update a comment in port_bound * Updated from global requirements * Set promote_secondaries when creating namespaces * Functional tests work fine with random PYTHONHASHSEED * Call config_parse in base test setup * ML2 additions to support DVR * Make test_l3_agent._prepare_router_data a module function * Remove redundant code in tests/unit/test_l3_agent * Fix ML2 Plugin binding:profile update * Set python hash seed to 0 in tox.ini * Add definition for new VIF type * Configuration agent for Cisco devices * Handle bool correctly during _extend_extra_router_dict * Encapsulate some port properties in the PortContext * Changes to remove the use of mapping tables from Nuage plugin * Updated from global requirements * Log exceptions inside spawned functions * Correct misspelled variable name * Avoid RequestURITooLong exception in metadata agent * Move loadbalancer vip port creation outside of transaction * Define some abstract methods in VpnDriver class * ML2 mechanism driver for SR-IOV capable NIC based switching, Part 2 * Modify L3 Agent for Distributed Routers * Audited attribute for policy update not changing * OFAgent: Share codes of l2-population in OVS agent 2014.2.b2 --------- * This patch changes the name of directory from mech_arista to arista * ML2 mechanism driver for SR-IOV capable NIC based switching, Part 1 * Add rule for updating network's router:external attribute * L2 Agent-side additions to support DVR * Imported Translations from Transifex * NSX: fix router ports port_security_enabled=False * Add partial specs support in ML2 for multiprovider extension * Add partial specs support in ML2 for gre/vxlan provider networks * Set nullable=False on tenant_id in apic_contracts table * call security_groups_member_updated in port_update * The default value of quota_firewall_rule should not be -1 * Correct LOG.debug use * Fix incorrect downgrade * Fix spelling mistake in the log message * Imported Translations from Transifex * Support Router Advertisement Daemon (radvd) for IPv6 * Move plugin.delete_port call out of transaction * Add partial specs support in ML2 for vlan provider networks * ML2: Update a comment after the recent bind_port change * NSX: fix validation logic on network gateway connect * Initialize RpcProxy objects correctly * Fix DVR regression for ofagent * RPC additions to support DVR * no quota for allowed address pair * Allow to import _LC, _LE, _LI and _LW functions directly * L2 Model additions to support DVR * Fixed audit notifications for dhcp-agent-network * Make readme reference git.openstack.org not github * Fix enums usage for postgres in migrations * Return a tuple of None's instead of one None * Fix a log typo in ML2 manager.bind_port() * Big Switch: Remove consistency hash on full sync * VPNaaS: Separate validation for Cisco impl * VPNaaS: separate out validation logic for ref impl * VMWare: don't notify on disassociate_floatingips() * Add L3 Extension for Distributed Routers * VPNaaS Cisco REST client enhance CSR create * Bump hacking to version 0.9.2 * Log methods using rpc communcation * Fixes port update failure when device ID is not updated * Support Quota extension in MidoNet plugin * NSX: Remove unneed call to _ensure_default_security_group * Use auth_token from keystonemiddleware * update vsm credential correctly * Shamelessly removing commented print line * L3 agent prefers RPC messages over full sync * Dnsmasq config files syntax issue when dhcp_domain is empty * Database healing migration * Fix incorrect default paramater in migration * Use method's logger in log decorator * Fixed audit notifications for l3-agent-router ops * Expand arp_responder help text * Send network name and uuid to subnet create * Cisco: Fix test cases which make incorrect create requests * ML2: Bind ports outside transactions * Freeze models for healing migration * NSX: Optionally not enforce nat rule match length check * ofagent: Handle device name prefixes other than "tap" * Add -s option for neutron metering rules * Security groups extension for PLUMgrid plugin * Missing max_routes in neutron.conf * Clear entries in Cisco N1KV specific tables on rollback * Allow unsharing a network used as gateway/floatingip * Change all occurences of no_delete to do_delete * Split up metering test case into plugin + test case * Use integer server_default value for multicast_ip_index * Validate expected parameters in add/remove router interfaces * Revert "VPNaaS REST Client UT Broken" * Mock out tunnel_sync in test to avoid sleeping * Add 'server_default' parameter * Add BSN plugin to agent migration script * Move _convert_to_nsx_transport_zones into nsx_utils * Extract CommonDBMixin to a separate file * Remove dead helper function from test_l3_plugin * Added support for NOS version 4.1.0, 5.0.0 and greater * Remove reference to setuptools_git * NSX: neutron router-interface-add should clear security-groups * Refactor 'if false do nothing' logic in l3 scheduler db * Imported Translations from Transifex * Add a gate-specific tox env for functional tests * NSX: remove unnecessary checks on network delete * Bump min required version for dnsmasq to 2.63 * Add CONTRIBUTING.rst * Do not mark device as processed if it wasn't * Fix 'server_default' parameter usage in models * Fix missing migration default value * Add a link to a blog post by RedHat that discusses GRE tunnels in OVS * Updated from global requirements * VPNaaS REST Client UT Broken * Avoid notifying while inside transaction opened in delete_port() * sync periodic_task fix from incubator * Omit mode keyword when spawning dnsmasq with some ipv6 subnets * Fixed spelling mistake in securitygroups_rpc * OVS agent: fix a comment on CANARY_TABLE * ofagent: Fix an argument mismatch bug in commit 9d13ea88 * Fix UnboundLocalError raised during L3 router sync task * Updated from global requirements * Fix isinstance assertions * Imported Translations from Transifex * Allow setting a rootwrap cmd for functional tests * Fix OVSBridge.get_port_ofport to handle empty output * Ignore variable column widths in ovsdb functional tests * Add configurable http_timeout parameter for Cisco N1K * NSX: fix indentations * BSN: Remove db lock and add missing contexts * NSX: properly handle floating ip status * Updated from global requirements * Fix example for running individual tests * Stop the dhcp-agent process when dnsmasq version is not determined * Switch to using of oslo.db * Replace occurences of 'test_tenant' with 'test-tenant' in tests * lb-agent: ensure removed devices get treated on resyncs * Imported Translations from Transifex * Add sanity check for nova notification support * changes ovs agent to get bridges via ovs_lib * Use correct MAX_LEN constant in agent functional tests * remove unsupported middleware * Fix re-creation of the pool directory * Add config for performance gate job * Use patch ports to interconnect integration/physical bridges * Exit rpc_loop when SIGTERM is recieved in ovs-agent * LBaaS new object model logging no-op driver * ofagent: Use port desc to monitor ports on br-int * Fixed dhcp & gateway ip conflict in PLUMgrid plugin * Introduce bulk calls for get device details * validate flat networks physical name * Remove __init__ method from TunnelCallback mixin * OVS agent: Correct bridge setup ordering * Revert "Revert "ovs-agent: Ensure integration bridge is created"" * Imported Translations from Transifex * Synced log module and its dependencies from olso-incubator * Pass newly created router to _update_router_gw_info * don't ignore rules that are already enforced * Updated neutron.conf to reflect new RPC options * Moved rpc_compat.py code back into rpc.py * Updated from global requirements * Updated from global requirements * ofagent: move main module from ryu repository * Don't convert numeric protocol values to int * Imported Translations from Transifex * Revert "Check NVP router's status before deploying a service" * Remove the useless vim modelines * Imported Translations from Transifex * Changing the poll_duration parameter type to int * Add test cases for plugins/ml2/plugin.py * Removed local modification in incubator code * Removed 'rpc' and 'notifier' incubator modules * Removed create_rpc_dispatcher methods * Use openstack.common.lockutils module for locks in tox functional tests * Pass serializer to oslo.messaging Notifier * Fix auto_schedule_networks to resist DBDuplicateEntry * Imported Translations from Transifex * Control active number of REST calls from Cisco N1kv plugin to VSM * Revert "ovs-agent: Ensure integration bridge is created" * ValueError should use '%' instead of ',' * NSX: return 400 if dscp set for trusted queue * NSX sync cache: add a flag to skip item deletion * NSX: propagate network name updates to backend * Renamed argument for create_consumer[s] * Renamed consume_in_thread -> consume_in_threads * Renamed start_rpc_listener -> start_rpc_listeners * Port to oslo.messaging * Imported Translations from Transifex * Pass 'top' to remove_rule so that rule matching succeeds * Big Switch: Stop watchdog on interval of 0 * Remove old quantum scripts * Move _filter_non_model_columns method to CommonDbMixin * Updated from global requirements * Ignore emacs checkpoint files * Big Switch: Lock consistency table for REST calls * Check port value when creating firewall rule with icmp protocol * Improve docstring for OVSNeutronAgent constructor * Big Switch ML2: sync detection in port-update * Imported Translations from Transifex * Remove SELECT FOR UPDATE use in ML2 type driver release_segment * Add vlan type driver unittests * Make sure we call BaseTestCase.setUp() first * Don't explicitly call .stop() on mock.patch objects * Don't instantiate RPC clients on import * Configure agents using neutron.common.config.init (formerly .parse) * linuxbridge-agent: process port updates in the main loop * Notify systemd when starting Neutron server * Ensure entries in dnsmasq belong to a subnet using DHCP * Added missing core_plugins symbolic names * Trigger provider security group update for RA * NSX: revert queue extension name change * Fix pool statistics for LBaaS Haproxy driver * Don't use root_helper when it's not needed * Introduced rpc_compat.create_connection() * Copy-paste RPC Service class for backwards compatibility * Introduce RpcCallback class * Fix opt helpstring for dhcp_lease_duration * Consistently use jsonutils instead of specific implementation * Imported Translations from Transifex * Adding static routes data for members * remove pep8 E122 exemption and correct style * Change default netpartition behavior in nuage plugin * Add 'ip rule ...' support to ip_lib * Add missing keyword raise to get_profile_binding function * Add logging for NSX status sync cache 2014.2.b1 --------- * Big Switch: Remove unnecessary initialization code * Big Switch: Import DB module in unit test * When l2-pop ON, clean stale ports in table0 br-tun * remove E112 hacking exemption and fix errors * Updated from global requirements * Allowed address pair: Removing check for overlap with fixed ips * NeutronManager: Remove explicit check of the existence of an attribute * Fix invalid IPv6 address used in FakeV6 variables * Improve vxlan type driver initialization performance * Floatingip extension support for nuage plugin * ovs-agent: Ensure integration bridge is created * Brocade mechanism driver depends on the brocade plugin templates * Brocade mechanism driver should be derived from ML2 plugin base class * changes ovs agent_id init to use hostname instead of mac * multiprovidernet: fix a comment * Imported Translations from Transifex * Fix race condition with firewall deletion * extensions: remove 'check_env' method * Check the validation of 'delay' and 'timeout' * Control update, delete for cisco-network-profile * Ensure routing key is specified in the address for a direct producer * Support Subnets that are configured by external RAs * Refactor code in update_subnet, splitting into individual methods * Make allocation_pools attribute of subnet updateable by PUT * Monkey patch threading module as early as possible * Introduced transition RPC exception types * Added RpcProxy class * ofagent: Fix VLAN usage for TYPE_FLAT and TYPE_VLAN * Big Switch: Catch exceptions in watchdog thread * Use import from six.moves to import the queue module * Start an unstarted patch in the hyperv unit tests * Imported Translations from Transifex * Fix NVP FWaaS occurs error when deleting a shared rule * Check NVP router's status before deploying a service * Add an option to turn off DF for GRE and VXLAN tunnels * Increase default metadata_workers, backlog to 4096 * Big Switch: Add missing data to topology sync * Replace XML with JSON for N1kv REST calls * Big Switch: Call correct method in watchdog * Freescale SDN Mechanism Driver for ML2 Plugin * OVS Agent: limit veth names to 15 chars * Added note to neutron.conf * Return no active network if the agent has not been learnt yet * Sync service module from oslo-incubator * ovs, ofagent: Remove dead code * Default to setting secure mode on the integration bridge * Cisco APIC Layer 3 Service plugin * Allow neutron-sanity-check to check OVS patch port support * Remove run-time version checking for openvswitch features * Add flat type driver unittests * Changed DictModel to dict with attribute access * Pass object to policy when finding fields to strip * Allow L3 base to handle extensions on router creation * Refactor some router-related methods * Add local type driver unittests * add engine parameter for offline migrations * Check DB scheme prior to migration to Ml2 * Removes unnecessary Embrane module-level mocks * Improve module-level mocks in midonet tests * Big Switch: fix capabilities retrieval code * Improve iptables_manager _modify_rules() method * NSX: bump http_timeout to 30 seconds * Log firewall status on delete in case of status inconsistency * BSN: Set hash header to empty instead of False * Neutron does not follow the RFC 3442 spec for DHCP * LBaaS add missing rootwrap filter for route * Radware LBaaS driver is able to flip to a secondary backend node * NSX: fix invalid docstring * NSX: fix tenant_id passed as security_profile_id * NSX: Fix request_id in api_client to increment * Improve usage of MagicMocks in ML2 and L3 tests * Improve readability of MagicMock use in RYU test * Remove function replacement with mock patch * Remove unnecessary MagicMocks in cisco unit tests * Handle errors from run_ofctl() when dumping flows * Sync periodic_task from oslo-incubator * Added missing plugin .ini files to setup.cfg * Imported Translations from Transifex * Make linux.utils.execute log error on return codes * FWaaS plugin doesn't need to handle firewall rule del ops * Reprogram flows when ovs-vswitchd restarts * Revert "fix openvswitch requirement check" * Updated from global requirements * Fix KeyError exception while updating dhcp port * NSX: fix bug for flat provider network * Disallow regular user to update firewall's shared attribute * Support 'infinite' dhcp_lease_duration * l2-pop : removing a TODO for the delete port use case * NEC plugin: Bump L3RPC callback version to 1.1 * Synced jsonutils from oslo-incubator * Imported Translations from Transifex * fix openvswitch requirement check * NSX: replace strong references to the plugin with weakref ones * Fixes bugs for requests sent to SDN-VE controller * Install SNAT rules for ipv4 only * Imported Translations from Transifex * Add NVP advanced service check before deleting a router * Disallow 'timeout' in health_monitor to be negative * Remove redundant default=None for config options * Fix for multiple misspelled words * Use list copy for events in nova notifier * Extraroute extension support for nuage plugin * OFAgent: Fixing lost vlan ids on interfaces * Set onlink routes for all subnets on an external network * Cisco APIC ML2 mechanism driver, part 2 * Remove all mostly untranslated PO files * remove token from notifier middleware * NSX: get rid of the last Nicira/NVP bits * Metadata agent caches networks for routers * Common decorator for caching methods * Make pid file locking non-blocking * Allowed Addresspairs: Removing check for overlap with fixed ips * Do not defer IPTables apply in firewall path * Metaclass Python 3.x Compatibility * Fix non-existent 'assert' calls to mocks * Log iptables rules when they fail to apply * Remove hard dependency on novaclient * Provide way to reserve dhcp port during failovers * Imported Translations from Transifex * Implement local ARP responder onto OVS agent * Fix typos in ovs_neutron_agent.py * Allow vlan type usage for OpenDaylight ml2 * NSX: do not raise on missing router during migration step * NSX: fix error when creating VM ports on subnets without dhcp * NSX: allow net-migration only in combined mode * OFAgent: Avoid processing ports which are not yet ready * Add missing translation support * Reorg table ml2_port_bindings when db migration * Remove unused parameter * NSX: Do a single query for all gateway devices * Add mailmap entry * Add 'secret' property for 'connection' option * NSX: Do not extend fault map for network gateway ext * Ensure tenant owns devices when creating a gateway * Corrected the syntax of port_update call to NVSD agent * Fix some typos in neutron/db and IBM SDN-VE plugin * Fix issubclass() hook behavior in PluginInterface * Imported Translations from Transifex * LBaaS VIP doesn't work after delete and re-add * OVS lib defer apply doesn't handle concurrency * Big Switch: Don't use MagicMocks unnecessarily * Make plugin deallocation check optional * Restore GARP by default for floating IPs * Ensure core plugin deallocation after every test * Updated from global requirements * Big Switch: Check source_address attribute exists * Revert "Big Switch: Check source_address attribute exists" * ML2 VxlanTypeDriver: Synchronize of VxlanAllocation table * Start ping listener also for postgresql * ofagent: Add a missing push_vlan action * NSX: ensure that no LSN is created on external networks * Make VPNaaS 'InUse' exception more clear * Remove explicit dependency on amqplib * Revert "Disable debug messages when running unit tests" * eswitch_neutron_agent: Whitespace fixes in comments * Upgrade failure for DB2 at ml2_binding_vif_details * Remove duplicate module-rgx line in .pylintrc * Disable debug messages when running unit tests * Perform policy checks only once on list responses * Allow DHCPv6 solicit from VM * Fix importing module in test_netscaler_driver * Record and log reason for dhcp agent resync * Big Switch: Check source_address attribute exists * L3 RPC loop could delete a router on concurrent update * Adding tenant-id while creating Radware ADC service * Fix H302 violations * Fix H302 violations in plugins package * Fix H302 violations in unit tests * Imported Translations from Transifex * lbaas on a network without gateway * Optimize querying for security groups * NSX: pass the right argument during metadata setup * Improve help strings for radware LbaaS driver * Fix network profile subtype validation in N1kv plugin * Performance improvement of router routes operations * Add support to dynamically upload drivers in PLUMgrid plugin * Imported Translations from Transifex * Reference new get_engine() method from wsgi.py * Allow test_l3_agent unit test to run individually * tests/unit: refactor reading neutron.conf.test * Don't print duplicate messages on SystemExit * Unit test cases for quota_db.py * Cisco VPN device driver - support IPSec connection updates * OVS and OF Agents: Create updated_ports attribute before setup_rpc * Imported Translations from Transifex * Updated from global requirements * Synced jsonutils from oslo-incubator * Imported Translations from Transifex * NSX: fix migration for networks without a subnet * Allow ML2 plugin test cases to be run independently * Removed signing_dir from neutron.conf * Add physical_network to binding:vif_details dictionary * Database exception causes UnboundLocalError in linuxbridge-agent * Wrong key router.interface reported by ceilometer * Imported Translations from Transifex * NSX: fix API payloads for dhcp/metadata setup * Improve ODL ML2 Exception Handling * NSX: change api mapping for Service Cluster to Edge Cluster * Fix protocol value for SG IPV6 RA rule * Cisco APIC ML2 mechanism driver, part 1 * LBaaS: remove orphan haproxy instances on agent start * Fixed floating IP logic in PLUMgrid plugin * Segregate the VSM calls from database calls in N1kv plugin * NSX: add nsx switch lookup to dhcp and metadata operations * Use set_gateway from ip_lib * Fix incorrect usage of sa.String() type * Re-submit "ML2 plugin should not delete ports on subnet deletion" * LBaaS: Set correct nullable parameter for agent_id * Vmware: Set correct nullable for lsn_id, nsx_port_id * IBM: set secret=True on passwd config field * Restore ability to run functional tests with run_tests.sh * Fix H302 violations in extensions package * Sync db code from oslo-incubator * Imported Translations from Transifex * Remove List events API from Cisco N1kv Neutron * NSX: Fix fake_api_client to raise NotFound * Replace loopingcall in notifier with a delayed send * ip-lib : use "ip neigh replace" instead of "ip neigh add" * Add 2-leg configuration to Radware LBaaS Driver * Fix H302 violations in db package and services * Cisco: Set correct nullable for switch_ip, instance_id, vlan_id * Ml2: Set correct nullable for admin_state_up * Drop service* tables only if they exist * Updated from global requirements * Make help texts more descriptive in Metaplugin * ML2 Cisco Nexus MD: Improve Unit Test Coverage * Fix migration that breaks Grenade jobs * Fix incorrect change of Enum type * allow delete_port to work when there are multiple floating ips * Add nova_ca_certificates_file option to neutron * gw_port should be set as lazy='join' * netaddr<=0.7.10 raises ValueError instead of AddrFormatError * Imported Translations from Transifex * netaddr<=0.7.10 raises ValueError instead of AddrFormatError * Validate IPv6 modes in API when IP version is 4 * Add 'ip neigh' to ip_lib * OFAgent: Improve handling of security group updates * OFAgent: Process port_update notifications in the main agent loop * NSX: sync thread catches wrong exceptions on not found * Notifier: Catch NotFound error from nova * Switch over to FixedIntervalLoopingCall * Check if bridge exists and make sure it's UP in ensure_bridge * Validate CIDR given as ip-prefix in security-group-rule-create * Support enhancements to Cisco CSR VPN REST APIs * Fix uninitialized variable reference * Nuage Plugin: Delete router requires precommit checks * Delete DHCP port without DHCP server on a net node * Improved quota error message * Remove device_exists in LinuxBridgeManager * Add support for multiple RPC workers under Metaplugin * Security Group rule validation for ICMP rules * Fix Metering doesn't respect the l3 agent binding * DHCP agent should check interface is UP before adding route * Remove workaround for bug #1219530 * Fix LBaaS Haproxy occurs error if no member is added * Add functional tests to verify ovs_lib VXLAN detection * Add nova_api_insecure flag to neutron * Allow combined certificate/key files for SSL * Verify ML2 type driver exists before calling del * Fix dangling patches in Cisco and Midonet tests * Make default nova_url use a version * ML2 Cisco Nexus MD: Remove unnecessary Cisco nexus DB * NSX plugin: fix get_gateway_devices * Exclude .ropeproject from flake8 checks * Register LBaaS resources to quotas engine * Remove mock.patch.stop from tests that inherit from BaseTestCase * Reschedule router if new external gateway is on other network * Update ensure()/reconnect() to catch MessagingError * Properly apply column default in migration pool_monitor_status * Remove "reuse_existing" from setup method in dhcp.py * Enable flake8 E711 and E712 checking * Fixes Hyper-V agent security groups disabling * Fixes Hyper-V agent security group ICMP rules * Fix typo in ml2 configuration file * Edge firewall: improve exception handling * Edge driver: Improve exception handling * Fix typo in comment * NSX: Fix KeyError in sync if nsx_router_id not found * VMware: log backend port creation in the right place * Revert "Hide ipv6 subnet API attributes" * BigSwitch: Create router ports synchronously * NSX: ensure dhcp port is setup on metadata network * Hide ipv6 subnet API attributes * Set correct columns' length * Enforce required config params for ODL driver * Add L2 Agent side handling for non consistent security_group settings * BSN: Remove module-level ref to httplib method * BigSwitch: Stop HTTP patch before overriding * Typographical correction of Arista ML2 help * Fix wrong section name "security_group" in sample config files * Set the log level to debug for loading extensions * Updated from global requirements * set api.extensions logging to ERROR in unit tests * Add common base class for agent functional tests * Remove RPC to plugin when dhcp sets default route * Imported Translations from Transifex * Add missing comma in nsx router mappings migration * OFAgent: Avoid re-wiring ports unnecessarily * BigSwitch: Improves server manager UT coverage * BigSwitch: Don't import portbindings_db until use * lb-agent: fix get_interfaces_on_bridge returning None * Clean out namespaces even if we don't delete namespaces * Call policy.init() once per API request * ofa_neutron_agent: Fix _phys_br_block_untranslated_traffic * Don't emit log for missing attribute check policy * Sync service and systemd modules from oslo-incubator * Imported Translations from Transifex * Move bash whitelisting to pep8 testenv * Fix test MAC addresses to be valid * ML2: ODL driver sets port status * Add a note that rpc_workers option is experimental * Fix Jenkins translation jobs * Redundant SG rule create calls in unit tests * Set ns_name in RouterInfo as attribute * Replace HTTPSConnection in NEC plugin * ignore build directory for pep8 * Imported Translations from Transifex * Delete routers that are requested but not reported as active * Explicitly import state_path opt in tests.base * fixes tests using called_once_ without assert * Remove invalid copyright headers under API module * update doc string - correct typo * Revert changes removing OVSBridge return * fixes broken neutron-netns-cleanup * Remove duplicated tests for check_ovs_vxlan_version * Permit ICMPv6 RAs only from known routers * Return 409 for second firewall creation * OFA agent: use hexadecimal IP address in tunnel port name * Fixing Arista CLI command * use floatingip's ID as key instead of itself * Use a temp dir for CONF.state_path * Use os.uname() instead of calling uname in subprocess * Enable hacking H301 check * Stop using portbindings_db in BSN ML2 driver * NSX: Fix pagination support * Removing vim header lines * Fix function parsing the kernel version * Updated from global requirements 2014.1.rc1 ---------- * Restore NOT NULL constraint lost by earlier migrations * BigSwitch: Semaphore on port status update * Remove last parts of Quantum compatibility shim * Imported Translations from Transifex * Fix quota_health_monitor opt name in neutron.conf * Add missing DB migrations for BSN ML2 plugin * Only send notifications on uuid device_id's * Add Icehouse no-op migration * Add support for https requests on nova metadata * Delete disassociated floating ips on external network deletion * Imported Translations from Transifex * Invoke _process_l3_create within plugin session * Invalid ovs-agent test case - test_fdb_add_flows * Add missing parameters for port creation * Move test_ovs_lib to tests/unit/agent/linux * Update BigSwitch Name to its correct name * Cancelling thread start while unit tests running * Delete duplicate external devices in router namespace * Deals with fails in update_*_postcommit ops * ML2 Cisco Nexus MD: Support portchannel interfaces * Changed the message line of RouterInUse class * UT: do not hide an original error in test resource ctxtmgr * BigSwitch: Move attr ref after error check * Fix namespace exist() method * Make dnsmasq aware of all names * Open Juno development * Prevent cross plugging router ports from other tenants * Adds OVS_HYBRID_PLUG flag to portbindings * Disable XML tests on Py26 * Subnets should be set as lazy='join' * nec plugin: allow to delete resource with ERROR status * Synced rpc and gettextutils modules from oslo-incubator * Import request_id middleware bug fix from oslo * Add unit test for add_vxlan in test_linux_ip_lib * Start using oslosphinx theme for docs * Migrate data from cap_port_filter to vif_details * Imported Translations from Transifex * Include cisco plugin in migration plugins with ovs * ML2 Cisco Nexus MD: Remove workaround for bug 1276395 * Fixed TypeError when creating MlnxException * Replace a usage of the deprecated root_helper option * Cisco VPN driver correct reporting for admin state chg * Add script to migrate ovs or lb db to ml2 db * Correct OVS VXLAN version check * LBaaS: make device driver decide whether to deploy instance * NSX plugin: return 400 for invalid gw certificate * Imported Translations from Transifex * Remove extra space in help string * Add enable_security_group to BigSwitch and OneConvergence ini files * Add nec plugin to allowed address pairs migration * Imported Translations from Transifex * Fix segment allocation tables in Cisco N1kv plugin * Updated from global requirements * NEC plugin: Rename quantum_id column to neutron_id * Log received pool.status * NEC plugin: Allow to add prefix to OFC REST URL * NEC plugin: Remove a colon from binding:profile key due to XML problem * rename ACTIVE_PENDING to ACTIVE_PENDING_STATUSES * VPNaaS support for VPN service admin state change and reporting * Use save_and_reraise_exception when reraise exception * Return meaningful error message on pool creation error * Don't set priority when calling mod_flow * Avoid creating FixedIntervalLoopingCall in agent UT * Imported Translations from Transifex * Big Switch Plugin: No REST port delete on net del * Add enable_security_group option * Get rid of additional db contention on fetching VIP * Fix typo in lbaas agent exception message * De-duplicate unit tests for ports in Big Switch * ML2: Remove validate_port_binding() and unbind_port() * Imported Translations from Transifex * Fix duplicate name of NVP LBaaS objs not allowed on vShield Edge * tests/unit: clean up notification driver * Use different name for the same constraint * Add a semaphore to some ML2 operations * Log dnsmasq host file generation * add HEAD sentinel file that contains migration revision * Added config value help text in ns metadata proxy * Fix usage of save_and_reraise_exception * Cisco VPN device driver post-merge cleanup * Fixes the Hyper-V agent individual ports metrics * Sync excutils from oslo * BigSwitch ML2: Include bound_segment in port * NEC plugin: Honor Retry-After response from OFC * Add update binding:profile with physical_network * return false or true according to binding result * Enable to select an RPC handling plugin under Metaplugin * Ensure to count firewalls in target tenant * Mock agent RPC for FWaaS tests to delete DB objs * Allow CIDRs with non-zero masked portions * Cisco plugin fails with ParseError no elem found * Cisco Nexus: maximum recursion error in ConnectionContext.__del__ * Don't use root to list namespaces * Fixes Hyper-V agent security groups enable issue * ML2 BigSwitch: Don't modify parent context * Advanced Services documentation * LBaaS: small cleanup in agent device driver interface * Change report_interval from 4 to 30, agent_down_time from 9 to 75 * Stop removing ip allocations on port delete * Imported Translations from Transifex * Ignore PortNotFound exceptions on lockless delete * Show neutron API request body with debug enabled * Add session persistence support for NVP advanced LBaaS * Fix misleading error message about failed dhcp notifications * NSX: Fix router-interface-delete returns 404 when router not in nsx * Fix _validate_mac_address method * BigSwitch: Watchdog thread start after servers * Calculate stateless IPv6 address * Create new IPv6 attributes for Subnets * Remove individual cfg.CONF.resets from tests * BigSwitch: Sync workaround for port del deadlock * NSX: Ensure gateway devices are usable after upgrade * Correctly inherit __table_args__ from parent class * Process ICMP type for iptables firewall * Imported Translations from Transifex * Added missing l3_update call in update_network * ML2 plugin involves in agent_scheduler migration * Imported Translations from Transifex * Avoid long transaction in plugin.delete_ports() * cisco: Do not change supported_extension_aliases directly * Fix KeyError except on router_info in FW Agent * NSX: remove last of unneed quantum references * NSX: fix intermetting UT failure on vshield test_router_create * Bugfix and refactoring for ovs_lib flow methods * Send fdb remove message when a port is migrated * Imported Translations from Transifex * Send network-changed notifications to nova * Notify nova when ports are ready * Skip radware failing test for now * NSX: Propagate name updates for security profiles * Fix in admin_state_up check function * NSX: lower the severity of messages about VIF's on external networks * Kill 'Skipping unknown group key: firewall_driver' log trace * Imported Translations from Transifex * API layer documentation * BigSwitch: Use eventlet.sleep in watchdog * Embrane LBaaS Driver * BigSwitch: Widen range of HTTPExceptions caught * Fix ml2 & nec plugins for allowedaddresspairs tests * Fix unittest failure in radware lbaas driver * Removes calls to mock.patch.stopall in unit tests * Stop mock patches by default in base test class * Query for port before calling l3plugin.disassociate_floatingips() * Optimize floating IP status update * NSX: Allow multiple references to same gw device * VPNaaS Device Driver for Cisco CSR * Updated from global requirements * BigSwitch: Fix certificate file helper functions * Create agents table when ML2 core_plugin is used * Fix usage of sqlalchemy type Integer * Fixing lost vlan ids on interfaces * Fix bug:range() is not same in py3.x and py2.x * Call target plugin out of DB transaction in the Metaplugin * NSX: Sync do not pass around model object * NSX: Make replication mode configurable * Updated from global requirements * Fix ml2 db migration of subnetroutes table * Imported Translations from Transifex * After bulk create send DHCP notification * Fix lack of extended port's attributes in Metaplugin * Add missing ondelete option to Cisco N1kv tables * Migration support for Mellanox Neutron plugin * Imported Translations from Transifex * Imported Translations from Transifex * Updated from global requirements * Add support for tenant-provided NSX gateways devices * NSX: fix nonsensical log trace on update port * BigSwitch: Fix rest call in consistency watchdog * BigSwitch: Fix cfg.Error format in exception * BigSwitch: Fix error for server config check * Fixed Spelling error in Readme * Adds state reporting to SDN-VE agent * Fix unittest failure in radware lbaas driver * Log configuration values for OFA agent * NSX: Add ability to retry on 503's returned by the controller * Cisco Neutron plugin fails DB migration * Floatingip_status migration not including Embrane's plugin * One Convergence Neutron Plugin l3 ext support * Nuage plugin was missed in floatingip_status db migration script * ML2 Cisco Nexus MD: VM migration support * Drop old nvp extension file * Makes the Extension loader behavior predictable * One Convergence Neutron Plugin Implementation * NEC plugin: delete old OFC ID mapping tables * Imported Translations from Transifex * Fix typo in migration script * Enhance GET networks performance of metaplugin * Adds the missing migration for gw_ext_mode * BigSwitch: Add SSL Certificate Validation * BigSwitch: Auto re-sync on backend inconsistencies * VPNaaS Service Driver for Cisco CSR 2014.1.b3 --------- * Updated from global requirements * Add OpenDaylight ML2 MechanismDriver * Replaces network:* strings by constants * Check vxlan enablement via modinfo * Do fip_status migration only for l3-capable plugins * Fix race condition in update_floatingip_statuses * Implementaion of Mechanism driver for Brocade VDX cluster of switches * NSX: passing wrong security_group id mapping to nsx backend * Avoid unnecessarily checking the existence of a device * Refactor netns.execute so that it is not necessary to check namespace * Minor refactoring for Hyper-V utils and tests * Adds Hyper-V Security Groups implementation * Rename migration lb_stats_needs_bigint to match revision number * Imported Translations from Transifex * NVP LBaaS: check for association before deleting health monitor * Different class names for VPNaaS migrations * ML2: database needs to be initalized after drivers loaded * replace rest of q_exc to n_exc in code base * Adds multiple RPC worker processes to neutron server * NEC plugin: PFC packet fitler support * Fix NVP/Nicira nits * Remove unused method update_fixed_ip_lease_expiration * NSX: nicira_models should import model_base directly * NSX: make sync backend run more often * Embrane Plugin fails alembic migrations * Implement Mellanox ML2 MechanismDriver * Use database session from the context in N1kv plugin * Delete subnet fails if assoc port has IPs from another subnet * Remove nvplib and move utility methods into nsxlib * BigSwitch: Add address pair support to plugin * Remove unused 'as e' in exception blocks * Remove vim line from db migartion template * Imported Translations from Transifex * Support advanced NVP IPsec VPN Service * Improves Arista's ML2 driver's sync performance * Fix NVP FWaaS errors when creating firewall without policy * Remove call to addCleanup(cfg.CONF.reset) * nec plugin: Avoid long transaction in delete_ports * Avoid using "raise" to reraise with modified exception * Imported Translations from Transifex * Implement OpenFlow Agent mechanism driver * Finish off rebranding of the Nicira NVP plugin * Log configuration values for OVS agent * BigSwitch: Asynchronous rest calls for port create * Introduce status for floating IPs * BigSwitch: Add agent to support neutron sec groups * N1kv: Fixes fields argument not None * Adds the new IBM SDN-VE plugin * Imported Translations from Transifex * Nuage Networks Plugin * Fixes spelling error Closes-Bug: #1284257 * Openvswitch update_port should return updated port info * Updated from global requirements * Remove unused variable * Change firewall to DOWN when admin state down * ovs-agent: use hexadecimal IP address in tunnel port name * NSX: add missing space 'routeron' * Imported Translations from Transifex * Fix DetachedInstanceError for Agent instance * Update License Headers to replace Nicira with VMware * Renaming plugin-specific exceptions to match NSX * Imported Translations from Transifex * DB Mappings for NSX security groups * NSX: port status must reflect fabric, not link status * Typo/grammar fixes for the example neutron config file * NSX: Pass NSX uuid when plugging l2 gw attachment * stats table needs columns to be bigint * Remove import extension dep from db migration * Fix get_vif_port_by_id to only return relevant ports * Developer documentation * Fix NSX migration path * ML2 mechanism driver access to binding details * Add user-supplied arguments in log_handler * Imported Translations from Transifex * NSX: Fix newly created port's status should be DOWN * BigSwitch: Stop using external locks * Rename/refactoring of NVP api client to NSX * Remove pyudev dependency * Rename DB models and related resources for VMware NSX plugin * Lower log level of errors due to user requests to INFO * Include proper Content-Type in the HTTP response headers * LBaaS: check for associations before deleting health monitor * l2-population/lb/vxlan : ip neigh add command failed * l2-population : send flooding entries when the last port goes down * tests/service: consolidate setUp/tearDown logic * Ensure ovsdb-client is stopped when OVS agent dies * NSX: Fix status sync with correct mappings * Support Port Binding Extension in Cisco N1kv plugin * change Openstack to OpenStack in neutron * ML2 binding:profile port attribute * Rename/remove Nicira NVP references from VMware NSX unit tests * Fix webob.exc.HTTPForbidden parameter miss * Sync oslo cache with oslo-incubator * Change tenant network type usage for IB Fabric * options: consolidate options definitions * Replace binding:capabilities with binding:vif_details * Make sure dnsmasq can distinguish IPv6 address from MAC address * Rename Neutron core/service plugins for VMware NSX * Make metaplugin be used with a router service plugin * Fix wrap target in iptables_manager * BigSwitch: Fix tenant_id for shared net requests * BigSwitch: Use backend floating IP endpoint * Updated from global requirements * Imported Translations from Transifex * Raise max header size to accommodate large tokens * NSX: get_port_status passed wrong id for network * Imported Translations from Transifex * Reset API naming scheme for VMware NSX plugin * remove pointless test TestN1kvNonDbTest * Rename Security Groups related methods for VMware NSX plugin * Rename L2 Switch/Gateway related methods for VMware NSX plugin * Rename Router related methods for VMware NSX plugin * Plugins should call __init__ of db_base_plugin for db.configure * Fixes Tempest XML test failures for Cisco N1kv plugin * Fixes broken documentation hyperlinks * Use "!=" instead of "is not" when comparing two values * ML2/vxlan/test: remove unnecessary self.addCleanup(cfg.CONF.reset) * Fix test_db_plugin.test_delete_port * Handle racing condition in OFC port deletion * Imported Translations from Transifex * Adds https support for metadata agent * Fix VPN agent does not handle multiple connections per vpn service * Don't require passing in port_security=False if security_groups present * wsgi.run_server no longer used * Use different context for each API request in unit tests * Sync minimum requirements * Implements an LBaaS driver for NetScaler devices * vshield task manager: abort tasks in stop() on termination * Copy cache package from oslo-incubator * BigSwitch: Move config and REST to diff modules * Implements provider network support in PLUMgrid plugin * Should specify expect_errors=False for success response * Fix unshortened IPv6 address caused DHCP crash * Add support to request vnic type on port * tests/unit: Initialize core plugin in TestL3GwModeMixin * Revert "Skip a test for nicira service plugin" * Improve unit test coverage for Cisco plugin model code * Imported Translations from Transifex * Fix class name typo in test_db_rpc_base * Embrane Tempest Compliance * ipt_mgr.ipv6 written in the wrong ipt_mgr.ipv4 * Update help message of flag 'enable_isolated_metadata' * Imported Translations from Transifex * Fix invalid facilities documented in rootwrap.conf * Reset the policy after loading extensions * Fix typo in service_drivers.ipsec * Validate rule uuids provided for update_policy * Add update from agent to plugin on device up * Remove dependent module py3kcompat * Delete duplicate internal devices in router namespace * Use six.StringIO/BytesIO instead of StringIO.StringIO * Parse JSON in ovs_lib.get_vif_port_by_id * Imported Translations from Transifex * Skip a test for nicira service plugin * Remove DEBUG:....nsx_cluster:Attribute is empty or null * Fix request timeout errors during calls to NSX controller * remove unused imports * L3 agent fetches the external network id once * Avoid processing ports which are not yet ready * Ensure that session is rolled back on bulk creates * Add DB mappings with NSX logical routers * Use save_and_reraise_exception when reraise exception * nec plugin: Compare OFS datapath_id as hex int * Use six.moves.urllib.parse instead of urlparse * Rename Queue related methods for VMware NSX plugin * Lowercase OVS sample config section headers * Add DB mappings with NSX logical switches * NSX: Fix possible deadlock in sync code * Raise an error from ovs_lib list operations * Add additional unit tests for the ML2 plugin * Fix ValueError in ip_lib.IpRouteCommand.get_gateway() * Imported Translations from Transifex * Fix log-related tracebacks in nsx plugin * add router_id to response for CRU on fw/vip objs * Move db migration of ml2 security groups to havana * Sync latest oslo.db code into neutron * Add support for router scheduling in Cisco N1kv Plugin * Imported Translations from Transifex * Add migration support from agent to NSX dhcp/metadata services * Validate multicast ip range in Cisco N1kv Plugin * NSX plugin: fix floatingip re-association * Re-enable lazy translation * Do not append to messages with + * Remove psutil dependency * Remove legacy quantum config path * LBaaS: move agent based driver files into a separate dir * mailmap: update .mailmap * Fix binding:host_id is set to None when port update * Return request-id in API response * Skip extra logging when devices is empty * Add extraroute_db support for Cisco N1kv Plugin * Improve handling of security group updates * ML2 plugin cannot raise NoResultFound exception * Fix typo in rootwrap files: neuton -> neutron * Imported Translations from Transifex * Prepare for multiple cisco ML2 mech drivers * ML2 Cisco Nexus MD: Create pre/post DB event handlers * Support building wheels (PEP-427) * NVP plugin:fix delete sec group when backend is out of sync * Use oslo.rootwrap library instead of local copy * Fix misspellings in neutron * Remove unnecessary call to get_dhcp_port from DeviceManager * Refactor to remove _recycle_ip * Allow multiple DNS forwarders for dnsmasq * Fix passing keystone token to neutronclient instance * Don't document non-existing flag '--hide-elapsed' * Fix race condition in network scheduling to dhcp agent * add quota support for ryu plugin * Imported Translations from Transifex * Enables BigSwitch/Restproxy ML2 VLAN driver * Add and update subnet properties in Cisco N1kv plugin * Fix error message typo * Configure floating IPs addresses after NAT rules * Add an explicit tox job for functional tests * improve UT coverage for nicira_db operations * Avoid re-wiring ports unnecessarily * Process port_update notifications in the main agent loop * Base ML2 bulk support on the loaded drivers * Imported Translations from Transifex * Removes an incorrect and unnecessary return * Reassign IP to vlan interface when deleting a VLAN bridge * Imported Translations from Transifex * Change metadata-agent to have a configurable backlog * Sync with commit-id: 9d529dd324d234d7aeaa3e6b4d3ab961f177e2ed * Remove unused RPC calls from n1kv plugin code * Change metadata-agent to spawn multiple workers * Extending quota support for neutron LBaaS entities * Tweak version nvp/nsx version validation logic for router operations * Simplify ip allocation/recycling to relieve db pressure * Remove unused code * Reduce severity of log messages in validation methods * Disallow non-admin users update net's shared attribute * Fix error while connecting to busy NSX L2 Gateway * Remove extra network scheduling from vmware nsx plugin * L3 Agent restart causes network outage * Remove garbage in vim header * Enable hacking H233 rule * Rename nvp_cluster for VMware NSX plugin * Minimize the cost of checking for api worker exit * Remove and recreate interface if already exists 2014.1.b2 --------- * Use an independent iptables lock per namespace * Report proper error message in PLUMgrid Plugin * Fix interprocess locks for run_tests.sh * Clean up ML2 Manager * Expunge session contents between plugin requests * Remove release_lease from the DHCP driver interface * VMware NSX: add sanity checks for NSX cluster backend * Update RPC code from oslo * Fix the migration adding a UC to agents table * Configure plugins by name * Fix negative unit test for sec group rules * NVP: Add LOG.exception to see why router was not created * Add binding:host_id when creating port for probe * Fix race condition in delete_port method. Fix update_port method * Use information from the dnsmasq hosts file to call dhcp_release * Fix pip install failure due to missing nvp.ini file * Imported Translations from Transifex * Imported Translations from Transifex * Make timeout for ovs-vsctl configurable * Remove extra whitespace * Fix extension description and remove unused exception * Fix mistake in usage drop_constraint parameters * Fix race condition on ml2 delete and update port methods * Fix Migration 50e86cb2637a and 38335592a0dc * L3 Agent can handle many external networks * Update lockutils and fixture in openstack.common * Add test to port_security to test with security_groups * LBaaS: handle NotFound exceptions in update_status callback * VMware NSX: Fix db integrity error on dhcp port operations * Use base.BaseTestCase in NVP config test * Remove plugin_name_v2 and extension_manager in test_config * Enables quota extension on BigSwitch plugin * Add security groups tables for ML2 plugin via migration * Rename nicira configuration elements to match new naming structure * Fix race in get_network(s) in OVS plugin * Imported Translations from Transifex * Fix empty network deletion in db_base_plugin for postgresql * Remove unused imports * nicira: fix db integrity error during port deletion * Rename check_nvp_config utility tool * Remove redundant codes * Remove dupl. for get_resources in adv. services * Start of new developer documentation * Fix NoSuchOptError in lbaas agent test * Corrects broken format strings in check_i18n.py * [ML2] l2-pop MD handle multi create/delete ports * Dnsmasq uses all agent IPs as nameservers * Imported Translations from Transifex * BigSwitch: Fixes floating IP backend updates * neutron-rootwrap-xen-dom0 handles data from stdin * Remove FWaaS Noop driver as default and move to unit tests dir * Send DHCP notifications regardless of agent status * Mock looping_call in metadata agent tests * Imported Translations from Transifex * Change default eswitchd port to avoid conflict * Midonet plugin: Fix source NAT * Add support for NSX/NVP Metadata services * Update the descriptions for the log cfg opts * Add VXLAN example to ovs_neutron_plugin.ini * Imported Translations from Transifex * ml2/type_gre: Adds missing clear_db to test_type_gre.py * ml2: gre, vxlan type driver can leak segment_id * NVP: propagate net-gw update to backend * Imported Translations from Transifex * Nicira: Fix core_plugin path and update default values in README * Include lswitch id in NSX plugin port mappings * Imported Translations from Transifex * Revert "move rpc_setup to the last step of __init__" * extra_dhcp_opt add checks for empty strings * LBaaS: synchronize haproxy deploy/undeploy_instance methods * NVP plugin: Do backend router delete out from db transaction * NVP plugin: Avoid timeouts if creating routers in parallel * Updates tox.ini to use new features * LBaaS: fix handling pending create/update members and health monitors * Add X-Tenant-ID to metadata request * Do not trigger agent notification if bindings do not change * fix --excluded of meter-label-rule-create is not working * move rpc_setup to the last step of __init__ * Updated from global requirements * Sync global requirements to pin sphinx to sphinx>=1.1.2,<1.2 * Update common network type consts to same origin * Remove start index 0 in range() * LBaaS: unify haproxy-on-host plugin driver and agent * change variable name from plugin into agent * Imported Translations from Transifex * Add post-mortem debug option for tests * validate if the router has external gateway interface set * Remove root_helper config from plugin ini * Fix a race condition in agents status update code * Add LeastRouters Scheduler to Neutron L3 Agent * Imported Translations from Transifex * Imported Translations from Transifex * Remove dead code _arp_spoofing_rule() * Add fwaas_driver.ini to setup.cfg * Switch to using spawn to properly treat errors during sync_state * Fix a typo in log exception in the metering agent * Sync rpc fix from oslo-incubator * Do not concatenate localized strings * Imported Translations from Transifex * Removed erronus config file comment * Fix str2dict and dict2str's incorrect behavior * Improve unit test coverage for Cisco plugin common code * Change to improve dhcp-agent sync_state * Fix downgrade in migration * Sync dhcp_agent.ini with the codes * Imported Translations from Transifex * Handle failures on update_dhcp_port * Handle exceptions on create_dhcp_port 2014.1.b1 --------- * Imported Translations from Transifex * Add vpnaas and debug filters to setup.cfg * Fix misspells * Fix bad call in port_update in linuxbridge agent * atomically setup ovs ports * Adds id in update_floatingip API in PLUMgrid plugin driver * Sync Log Levels from OSLO * update error msg for invalid state to update vpn resources * Add missing quota flags in the config file sample * Imported Translations from Transifex * Fix unable to add allow all IPv4/6 security group rule * Add request timeout handling for Mellanox Neutron Agent * Revert "ML2 plugin should not delete ports on subnet deletion" * Improve OVS agent logging for profiling * l3_agent: make process_router more robust * Fixes missing method in Hyper-V Utils (Metering) * Fix metering iptables driver doesn't read root_helper param * Updates .gitignore * Stop logging unnecessary warning on context create * Avoid loading policy when processing rpc requests * Improve unit test coverage for Cisco plugin base code * Pass in certain ICMPv6 types by default * Ensure NVP API connection port is always an integer * Mocking ryu plugin notifier in ryu plugin test * Rebind security groups only when they're updated * Fix format errors seen in rpc logging * Add test_handle_router_snat_rules_add_rules * Rebind allowed address pairs only if they changed * Enforce unique constraint on neutron pool members * Send only one agent notification on port update * Fix showing nonexistent NetworkGateway throws 500 instead of 404 * Imported Translations from Transifex * Update Zhenguo Niu's mailmap * Improve unit test coverage for Cisco plugin nexus code * Preserve floating ips when initializing l3 gateway interface * Fwaas can't run in operating system without namespace feature * Imported Translations from Transifex * metaplugin: use correct parameter to call neutron client * Replace stubout with fixtures * Imported Translations from Transifex * Imported Translations from Transifex * Mock the udevadm in the TunnelTestWithMTU test * Avoid dhcp agent race condition on subnet and network delete * Sync openstack.common.local from oslo * Imported Translations from Transifex * ML2 plugin should not delete ports on subnet deletion * Add state reporting to the metadata agent * Move MidonetInterfaceDriver and use mm-ctl * Do not add DHCP info to subnet if DHCP is disabled * Handle IPAddressGenerationFailure during get_dhcp_port * Add request-id to log messages * Imported Translations from Transifex * Enable polling minimization * Add configurable ovsdb monitor respawn interval * Ensure get_pid_to_kill works with rootwrap script * Adds tests, fixes Radware LBaaS driver as a result * Optionally delete namespaces when they are no longer needed * Call _destroy_metadata_proxy from _destroy_router_namespaces * Added check on plugin.supported_extension_aliases * Cisco nexus plugin fails to untrunk vlan if other hosts using vlan * Catch PortNotFound exception during get_dhcp_port * Reduce the severity of dhcp related log traces * MidoNet: Added support for the admin_state_up flag * Fix OVS agent reclaims local VLAN * Replace mox in unit tests with mock * LBaaS: fix reported binary name of a loadbalancer agent * Apply six for metaclass * NVP plugin:fix connectivity to fip from internal nw * Imported Translations from Transifex * Add support for NSX/NVP DHCP services * Fix downgrade in migration * Imported Translations from Transifex * Add log statements for policy check failures * Lower severity of log trace for DB integrity error * Adds delete of a extra_dhcp_opt on a port * Round-robin SVI switch selection fails on Cisco Nexus plugin * Tune up report and downtime intervals for l2 agent * Fix DB integrity issues when using postgres * Move Loadbalancer Noop driver to the unit tests * Removes unused nvp plugin config param * Midonet to support port association at floating IP creation * Arista ML2 mech driver cleanup and integration with portbindings * Fix MeteringLabel model to not clear router's tenant id on deletion * Fix downgrade in migration * Fix sqlalchemy DateTime type usage * Linux device name can have '@' or ':' characters * Remove the warning for Scheduling Network * Do not run "ovs-ofctl add-flow" with an invalid in_port * Replace a non-existing exception * Fix random unit-test failure for NVP advanced plugin * Updated from global requirements * Cleanup HACKING.rst * Remove confusing comment and code for LBaaS * Don't shadow str * ExtraRoute: fix _get_extra_routes_by_router_id() * remove repeated network type definition in cisco plugin * Refactor configuring of floating ips on a router * Remove database section from plugin.ini * Fix import log_handler error with publish_errors set * DHCP agent scheduler support for BigSwitch plugin * Fix segment range in N1KV test to remove overlap * Fix query error on dhcp release port for postgresql * sync log from oslo * Imported Translations from Transifex * Use correct device_manager member in dhcp driver * LBaaS UT: use constants vs magic numbers for http error codes * Modified configuration group name to lowercase * Avoid dhcp agent race condition on subnet and network delete * Ensure OVS plugin is loaded in OVS plugin test * Remove deprecated fields in keystone auth middleware * Fix error while creating l2 gateway services in nvp * Fix update_device_up method of linuxbridge plugin * LBaaS: Fix incorrect pool status change * Imported Translations from Transifex * NVP: Correct NVP router port mac to match neutron * Updated from global requirements * Removing workflows from the Radware driver code * LBaaS: when returning VIP include session_persistence even if None * Imported Translations from Transifex * change assertEquals to assertEqual * Fix TypeError: kill doesn't make sense * Update latest OSLO * Revert back to 'call' for agent reports * Imported Translations from Transifex * Imported Translations from Transifex * Fixing the syntax error in the XML Serializer * Raise VipExists exception in case Vip is created or updated for a pool that already has a Vip * Imported Translations from Transifex * NVP metadata access - create elevated context once * Fix race condition in dhcp agent * adding parameter to configure QueuePool in SQLAlchemy * Fix issues with db pooling * use the fact that empty sequences are false * Ensure that lockfile are defined in a common place * Imported Translations from Transifex * Fix typo in policy.json and checks in nicira plugin * Fix DB query returning ready devices in LoadBalancerCallbacks * Imported Translations from Transifex * Load all the necessary database tables when running cisco plugin * Fix haproxy cfg unit test * fix mis-placed paren in log statement for l3-scheduler * Imported Translations from Transifex * Add bulking support for Cisco plugin * Validate protocol when creating VIP * Allow tests in TestDhcpAgentEventHandler run independently * Add scheduling support for the Brocade plugin * Imported Translations from Transifex * Synchronize QuantumManager.get_instance() method * Imported Translations from Transifex * Imported Translations from Transifex * Pin SQLAlchemy to 0.7.x * Improve test coverage for quantum wsgi module * Adds delete-orphan to database deletion * Imported Translations from Transifex * Do not disable propagate on root logger * NVP metadata access - create elevated context once * Registers root_helper option for test_iptables_firewall * Resolves ryu plugin unittest errors * Set fake rpc implementation in test_lb_quantum_agent * Ensure DB pooling code works with newer eventlet versions * Imported Translations from Transifex * Sync latest Oslo components for updated copyright * drop rfc.sh * Replace "OpenStack LLC" with "OpenStack Foundation" * sync Oslo Grizzly stable branch with Quantum * First havana commit * Ensure port get works when NVP mapping not stored in Quantum DB * remove references to netstack in setup.py * Imported Translations from Transifex * port_security migration does not migrate data * Adds Grizzly migration revision * Switch to final 1.1.0 oslo.config release * Fix detection of deleted networks in DHCP agent * Add l3 db migration for plugins which did not support in folsom * Updates latest OSLO changes * Set fake rpc backend impl for TestLinuxBridgeAgent * Imported Translations from Transifex * Update oslo rpc libraries * Sets default MySql engine to InnoDB * Solve branch in migration path * Fixes Hyper-V agent issue with mixed network types * Imported Translations from Transifex * missing - in --config-file * Fix typo * Log the configuration options for metadata-proxy and agent * Imported Translations from Transifex * NVP plugin: return 409 if wrong router interface info on remove * Imported Translations from Transifex * Ensure metadata access network does not prevent router deletion * Filter out router ports without IPs when gathering router sync data * Do not delete subnets with IPs on router interfaces * Update to Quantum Client 2.2.0 * Add explicit egress rules to nvp security profile * Update tox.ini to support RHEL 6.x * Fix exception typo * Disable secgroup extension when Noop Firewall driver is used * Wrap quota controller with resource.Resource * Allow probe-create to specify device_owner * Enable handling the report_state RPC call in Brocade Plugin * Imported Translations from Transifex * Create quantum client for each api request in metadata agent * Lock tables for update on allocation/deletion * NVP plugin: configure metadata network only if overlapping IPs are enabled * Show default configuration Quotas * add ns-metadata-proxy rootwrap filters to dhcp.filters * isolated network metadata does not work with nvp plugin * Imported Translations from Transifex * Load quota resources dynamically * Notify creation or deletion of dhcp port for security group * fix mis-matched kwargs for a few calls to NvpPluginException * Populate default explicit allow rules for egress * Switch to oslo.config * Moved the configuration variables * Make run_tests.sh pep8 conf match tox * Fix syntax error in credential.py and missing __init__.py * Imported Translations from Transifex * Add common test base class to hold common things * fix incorrect pathname * Prevent DoS through XML entity expansion * Delete DATABASE option checkup testcases * Fixes linuxbridge agent downs with tap device deletion timing issue * Rename source_(group_id/ip_prefix) to remote_(group_id/ip_prefix) * Imported Translations from Transifex * Setup device alias by device flavor information * L3 port delete prevention: do not raise if no IP on port * Pin pep8 to 1.3.3 * Avoid sending names longer than 40 character to NVP * move cisco-specific extensions to Cisco extensions directory * Add UT for LBaaS HAProxy driver * Include health monitors expected codes upper bound into HAProxy config * Allow DHCP and L3 agents to choose if they should report state * Imported Translations from Transifex * Enable HA proxy to work with fedora * Prevent exception with VIP deletion * Change the default l3_agent_manager to L3NATAgent * Imported Translations from Transifex * NEC plugin support for dhcp network and router scheduling * enable linuxbridge for agent scheduler * Move network schedule to first port creation * Imported Translations from Transifex * Host route to metadata server with Bigswitch/Floodlight Plugin * Incorrect argument in calling post_json * fix update_port to get tenant_id from db rather than request * Ensure max length of iptables chain name w/o prefix is up to 11 chars * Cisco plugin support for creating ports without instances * mock quantum.agent.common.config.setup_logging * Imported Translations from Transifex * Add initial testr support * Replace direct tempfile usage with a fixture * Set fake rpc implementation in metaplugin test configuration * Enabled add gateway to refrain from checking exit code * Add stats reporting to HAProxy namespace driver * Add session persistence support to LBaaS HAProxy driver * Remove deprecated assertEquals alias * LBaaS Agent Reference Implementation * Imported Translations from Transifex * create a Quantum port to reserve VIP address * NVP plugin support for dhcp network scheduling * Bump python-quantumclient version to 2.1.2 * Add scheduling feature basing on agent management extension * Remove compat cfg wrapper * NVP Router: Do no perfom SNAT on E-W traffic * Enable multiple L3 GW services on NVP plugin * Fix retrieval of shared networks * Imported Translations from Transifex * Remove network type validation from provider networks extension * Fix NVP plugin not notifying metadata access network to DHCP agent * Limit amount of fixed ips per port * Fetch all pages when listing NVP Nat Rules * Unpin PasteDeploy dependency version * Make sure all db accesses use subtransaction * Use testtools instead of unittest or unittest2 * Port update with existing ip_address only causes exception * Enables packetfilter ext in NEC plugin based on its driver config * Set default api_extensions_path for NEC plugin * Fixes import reorder nits * Imported Translations from Transifex * Latest common updates * Limit chain name to 28 characters * Add midonet to setup.py * Add password secret to brocade plugin * Use db model hook to filter external network * Add default state_path to quantum.conf * Imported Translations from Transifex * Imported Translations from Transifex * refactor LoadBalancerPluginDbTestCase setUp() * Imported Translations from Transifex * Remove external_id and security group proxy code * Add pagination parameters for extension extraroute * Imported Translations from Transifex * Provide a default api_extensions_path for nvp_plugin * AttributeError: No such RPC function 'report_state' * Add pagination support for xml * Sync latest install_venv_common.py with olso * Imported Translations from Transifex * Add check-nvp-config utility * Close file descriptors when executing sub-processes * Add support Quantum Security Groups for Ryu plugin * Resolve branches in db migration scripts to G-3 release * Add Quantum support for NVP Layer-2 gateways * Implement MidoNet Quantum Plugin * Routing table configuration support on L3 * Correct permissions on quantum-hyperv-agent * Raising error if invalid attribute passed in * Support Port Binding Extension in BigSwitch plugin * Exit if DHCP agent interface_driver is not defined * Supporting pagination in api v2.0 * Update latest OSLO files * Modify dhcp agent for agent management extension * Imported Translations from Transifex * Metadata support for NVP plugin * Add routed-service-insertion * plugin/nec: Make sure resources on OFC is globally unique * Fix SG interface to reflect the reality * Add unit test for ryu-agent * Agent management extension * Need to pass port['port'] to _get_tenant_id_for_create() * Improve error handling when nvp and quantum are out of sync * Decouple helper functions from L3NatDBTestCase * Imported Translations from Transifex * Add Migration for nvp-qos extension * Use oslo-config-2013.1b3 * Shorten the DHCP default resync_interval * Add nvp qos extension * Imported Translations from Transifex * Unable to update port as non-admin nvp plugin * Update nvplib to use HTTP constants * Rename admin_status_up to admin_state_up * Fixed the typo of loadbalancer test case * Allow nicira plugin to handle multiple NVP API versions * Imported Translations from Transifex * L3 API support for BigSwitch-FloodLight Plugin * Add an update option to run_tests.sh * Avoid extra query when overlapping IPs are disabled * Allow tests from test_dhcp_agent run independently * Imported Translations from Transifex * Mark password config options with secret * Adds Brocade Plugin implementation * Add support for extended attributes for extension resources * Imported Translations from Transifex * Support iptables-based security group in NEC plugin * Persist updated expiration time * Support advanced validation of dictionaries in the API * Synchronize code from oslo * Add check for subnet update with conflict gateway and allocation_pools * Alembic migration script for Loadbalancing service * Fix NVP L3 gateway ports admin_state_down on creation * Remove cfg option default value and check if missing * Remove duplicated option state_path from netns cleanup * only destroy single namespace if router_id is set * Use AssertEqual instead of AssertTrue * Imported Translations from Transifex * Move auth_token configurations to quantum.conf * L3 API support for nicira plugin * Unused methods in quantum.wsgi clean up * Add firewall_driver option to linuxbridge_conf.ini * Adds API parameters to quantum.api.extension.ResourceExtension * fix grammar in NetworkInUse exception * Imported Translations from Transifex * PLUMgrid quantum plugin * Implements quantum security groups support on OVS plugin * Sync latest cfg from oslo-incubator * Improvements to API validation logic * Imported Translations from Transifex * add non-routed subnet metadata support * Imported Translations from Transifex * Enable OVS and NETNS utilities to perform logging * Add unit tests for Open vSwitch Quantum plugin * Add NVP Security group support * Fix import error in ryu-agent * Imported Translations from Transifex * Bad translation from network types to nvp transport types * Update .coveragerc * Register root_helper in test_debug_commands and test_dhcp_agent * Adds xml support for quantum v2 API * Allow tools/install_venv_common.py to be run from within the source directory * Cisco plugin cleanup follow up commit * Be smarter when figuring out broadcast address * Use policy_file parameter in quantum.policy * Imported Translations from Transifex * Define root_helper variable under the [AGENT] section * Fixes rest of "not in" usage * Updated to latest oslo-version code * Imported Translations from Transifex * Imported Translations from Transifex * Imported Translations from Transifex * Resetting session persisnence for a VIP * Improve data access method of ryu-agent * Fixes 'not in' operator usage * Imported Translations from Transifex * Adds support of TCP protocol for LBaaS VIPs * Sync latest cfg from oslo-incubator * Remove redunant key list generation in Cisco plugin * Fixes if statement inefficiency in quantum.agent.linux.interface * Imported Translations from Transifex * Postgresql ENUM type requires a name exceptions NVP Plugin * correct spelling of Notify in classname * Disable dhcp_domain distribution when dhcp_domain is empty * Make protocol and ethertype case insensitive for security groups * Fix branch in db migration scripts * Finish adding help strings to all config options in Quantum code * Add NVP port security implementation * Imported Translations from Transifex * Set default lock_path in state_path * Use install_venv_common.py from oslo * Make get_security_groups() return security group rules * Fix OVSQuantumAgent.port_update if not admin_state_up * Clean up test_extensions.py imports * Fixes import order errors * OVS cleanup utility removes veth pairs * Revert "Reqd. core_plugin for plugin agents & show cfg opts loaded." * Reqd. core_plugin for plugin agents & show cfg opts loaded * Ensure that correct root helper is used * Fix InvalidContentType can't be raised because of error in constructor * OVS: update status according to admin_state_up * Cisco plugin cleanup * Improving code reuse with loadbalancer entity deletion * Fix database reconnection * Fixes per tenant quota doesn't work * Adds port security api extension and base class * LinuxBridge: set port status as 'DOWN' on creation * LinuxBridge: update status according to admin_state_up * Use babel to generate translation file * LBaaS plugin returns unnecessary information for PING and TCP health monitors * Fix all extension contract classes inherit from extensions.ExtensionDescriptor * get_security_group() now returns rules * set allocation_pool_id nullable=False * make IPv6 unit test work on systems with eth0 * Support Port Binding Extension in NEC plugin * Enable NEC OpenFlow plugin to use per-tenant quota * Enhance wsgi to listen on ipv6 address * Fix i18n messages * Update Oslo rpc * Enforces generic sqlalchemy types in migrations * Remove redudant code * Removes redundant code in quantum.api.api_common * Fix i18n messages in quantum.api.api_common * Completes unittest coverage of quantum.api.api_common * Enable test_agent_ovs_cleanup to be run alone * Fix i18n messages for cisco plugin * Provide atomic database access for ports in linuxbridge plugin * Add help strings to config file options in Quantum code * Document that code is on github now in README * Config lockutils to use a temp path for tests * Fix downgrade revision to make db migration linear * Send notification on router interface create/delete * More unittests for quantum.api.v2.base * Fixes inefficiency in quantum.api.v2.base._filters * Refactor hyperv plugin and agent * Update Oslo rpc module * Provide atomic database access nvp plugin * _validate_security_groups_on_port was not validating external_ids * Update WebOb version to >=1.2 * Ensure that agents also set control_exchange * Add a common test case for Port Binding Extension * Fix line endings from CRLF to LF * Fixes import order nits * Fix ATTR_NOT_SPECIFIED comparison errors * Add migration for network bindings in NVP plugin * NEC OpenFlow plugin supports L3 agent RPC * Update latest OSLO * Catch up RPC context fixes on NEC OpenFlow plugin * ensure all enums in loadbalancer models have names * Adding multi switch support to the Cisco Nexus plugin * Name the securitygrouprules.direction enum * Adds support for deploying Quantum on Windows * Adds a Hyper-V Quantum plugin * Add exception validation for subnet used * Remove accessing cfg.CONF.DATABASE in nec-agent * Inform a client if Quantum provides port filtering feature * Remove unsused imports in the plugins package * DHCP agent unable to access port when restarting * Remove unused imports in unit tests * Use default_notification_level when notification * Latest OSLO updates * NvpPluginException mixes err_msg and err_desc * Fixes i18n messages in nvp plugin * Optimize if/else logic in quantum.api.v2.base.prepare_request_body() * Fixes quantum.api.v2.base._filters to be more intuitive * Fix for loadbalancer vips list * rename port attribute variable to SECURITYGROUPS from SECURITYGROUP * Remove relative imports from NVP plugin * Port to argparse based cfg * Fix database configuration of ryu-agent * Pass X-Forwarded-For header to Nova * The change implemented Lbaas CRUD Sqlalchemy operations * Iptables security group implementation for LinuxBridge * Update the migration template's default kwargs * add migration support for lb security groups * Fix import for quantum-db-manage * Allow nvp_api to load balance requests * API extension and DB support for service types * Add migration support to Quantum * Remove some unused imports * Undo change to require WebOb 1.2.3, instead, require only >=1.0.8 * Add common support for database configuration * Fixup import syntax error in unit test * Enable the user to enforce validity of the gateway IP * Add comment to indicate bridge names' length * refactor QuotaV2 import to match to other exts * change xxx_metadata_agent() into xxx_metadata_proxy() * Fix the replacement placeholder in string * Ensure that exception prints UUID and not pointer * .gitignore cleanup * Fixes i18n message for nec plugin * Fixes i18n message for ryu plugin * Remove unused imports in debug package * sql_dbpool_enabled not passed to configured_db nvp_plugin * Enable tenants to set non-owned ext network as router gateway * Upgrade WebOb to 1.2.3 * Logging module cleanup * Remove unused imports in common package * Remove unused imports in rootwrap package * Remove unused imports in db package * Remove unused imports in api package * Provider network implementation for NVP plugin * Remove unused imports in agent package * Set default core_plugin to None * Ensure that exception prints correct text * Cleans up bulk_body generation in quantum.api.v2.base.prepare_request_body() * Exceptions cleanup * Readjust try/catch block in quantum.api.v2.base.create() * Ensures that the dnsmasq configuration file flag is always set * Ensure allocation pools are deleted from database * Raise InvalidInput directly instead of catch it * Ensure bulk creations have quota validations * Correct exception output for subnet deletion when port is used * Update the configuration help for the OVS cleanup utility * Implementing string representation for model classes * Provide "atomic" database access for networks * Add OVS cleanup utility * Removes redundant code in quantum.api.v2.base.create() * Add eventlet db_pool use for mysql * Clean up executable modules * Fixes import order nits * Fix log message for unreferenced variable * The patch introduces an API extension for LBaaS service * Fix pep8 issues * Add tox artifacts to .gitignore * Correct i18n messages for bigswitch plugin * dhcp_agent.ini, l3_agent.ini: update dhcp/l3_agent.ini * Make patch-tun and patch-int configurable * Update test_router_list to validate the router returned * Fixed the security group port binding should be automatically deleted when delete_port * Add restproxy.ini to config_path in setup.py * Replaces assertEquals to assertEqual * Completes coverage of quantum.api.v2.resource * Fixed the unit tests using SQLite do not check foreign keys * dhcp.filters needs ovs_vsctl permission * Correct i18n message for nicira plugin * Correct i18n message for metaplugin * add parent/sub-resource support into Quantum API framework * plugins/ryu: l3 agent rpc for Ryu plugin is broken * pluins/ryu: Fixes context exception in Ryu plugin * DRY for network() and subnet() in test_db_plugin.py * Adds validity checks for ethertype and protocol * Add script for checking i18n message * Update evenlet monkey patch flags * Remove unnecessary port deletion * Support to reset dnsname_servers and host_routes to empty * Prevent unnecessary database read by l3 agent * Correct i18n message for linuxbridge plugin * Add router testcases that missing in L3NatDBTestCase * Releasing resources of context manager functions if exceptions occur * Drop duplicated port_id check in remove_router_interface() * Returns more appropriate error when address pool is exhausted * Add VIF binding extensions * Sort router testcases as group for L3NatDBTestCase * Refactor resources listing testcase for test_db_plugin.py * l3 agent rpc * Fix rootwrap cfg for src installed metadata proxy * Add metadata_agent.ini to config_path in setup.py * add state_path sample back to l3_agent.ini file * plugin/ryu: make live-migration work with Ryu plugin * Remove __init__.py from bin/ and tools/ * Removes unused code in quantum.common * Fixes import order nits * update state_path default to be the same value * Use /usr/bin/ for the metadata proxy in l3.filters * prevent deletion of router interface if it is needed by a floating ip * Completes coverage of quantum.agent.linux.utils * Fixes Rpc related exception in NVP plugin * make the DHCP agent use a unique queue name * Fixes Context exception in BigSwitch/FloodLight Plugin * fix remap of floating-ip within l3-agent polling interval * Completes coverage of quantum.agent.rpc.py * Completes coverage of quantum.agent.netns_cleanup.py * add metadata proxy support for Quantum Networks * Make signing dir a subdir in /var/lib/quantum * Use openstack.common.logging in NEC OpenFlow plugin * Correct i18n message for api and db module * Fixes update router gateway successful with existed floatingip association * Fixes order of route entries * fix so cisco plugin db model to not override count methods * Use auth_token middleware in keystoneclient * Fixes pep8 nit * Make sure we can update when there is no gateway port linked to it * Fix syntax error in nvplib * Removes quantum.tests.test_api_v2._uuid() * Add filters for quantum-debug * Removing unnecessary setUp()/tearDown() in SecurityGroupsTestCase * Fix exception when security group rule already exists * Don't force run_tests.sh pep8 only to use -N * Correct i18n message for ovs plugin * Replaces uuid.uuid4 with uuidutils.generate_uuid() * Correct i18n message * Removes _validate_boolean() * Removes quantum.common.utils.str_uuid() * Refactors quantum.api.v2.attributes.py * Updates tearDown() to release instance objects * pass static to argv to quantum-debug config parser * Improve openvswitch and linuxbridge agents' parsing of mappings * Move extension.py into quantum/api * Ensure that the expiration time for leased IP is updated correctly * Fix context problem * bug 1057844: improve floating-ip association checks * fix broken logic of only using hasattr to check for get_x_counts * Prevent router being deleted if it is used by a floating IP * Updates clear_db() to unregister models and close session * The change allows loading several service plugins along with core plugin * fix incorrect kwarg param name for region with l3-agent * All egress traffic allowed by default should be implied * Fix unitest test_router_list with wrong fake return value * Delete floating port and floatingip in the same transaction * Completes unittest coverage of quantum.api.v2.attributes.py * Use DB count to get resource counts * plugin/ryu, linux/interface: remove ryu specific interface driver * Allow NVP plugin to use per-tenant quota extension * Revert "Put gw_port into router dict result." * Ensure that deleted gateway IP address is recycled correctly * Ensure that fixed port IP address is in valid allocation range * RESTProxy Plugin for Floodlight and BigSwitch * Ensure that mac address is set to namespace side veth end * plugin/ryu: update for ryu update * plugin/ryu: add tunnel support * Adds tests for attribute._validate_uuid * Adds tests to attribute.convert_to_int * Adds tests for attributes.is_attr_set * Adds test scripts for _validate_string * Adds test scripts for _validate_range * Part of the patch set that enables VM's to use libvirts bridge type * Remove qpid configuration variables no longer supported * Removing unsed code for Cisco Quantum Plugin V1 * Add QUANTUM_ prefix for env used by quantum-debug * Make tox.ini run pep8 checks on bin * Explicitly include versioninfo in tarball * Adds test scripts for _validate_values * Clean up quantum.api.v2.validators * Add indication when quantum server started * Import lockutils and fileutils from openstack-common * Update latest openstack-common code * Clean up executable modules * Remove nova code from Quantum Cisco Plugin * Use isinstance for _validate_boolean * Fixes convert_to_boolean logic * Updated openstack-common setup and version code * Validate L3 inputs * Treat case when pid is None * Fix openssl zombies * Ensure that the anyjson version is correct * Add eventlet_backdoor and threadgroup from openstack-common * Add loopingcall from openstack-common * Added service from openstack-common * Sync latest notifier changes from openstack-common * Update KillFilter to handle 'deleted' exe's * Pep8 fixes for quantum master * Use _validate_uuid in quantum.plugins.nec.extensions.packetfilter.py * Use is_uuid_like in quantum.extensions.securitygroup.py * Removes regex validation of UUIDs in dhcp_agent * Use uuidutils.is_uuid_like in quantum.extentions.l3 * Implements _validate_uuid * Use uuidutils for uuid validation * Drop lxml dependency * Testcase of listing collection shouldn't depend on default order of db query * Add uuidutils module * Log loaded extension messages as INFO not WARNING * db_base_plugin_v2.QuantumDbPluginV2.create_port clean-up * Clean-up comments in quantum/db/l3_db.py * Import order clean-up * let metaplugin work with plugin which has not l3 extension support * Ensure that HTTP 400 codes are returned for invalid input * Use openstack common log to do logging * Put gw_port into router dict result * Add check for cidr overrapping for adding external gateway * Fix unnecessary logging messages during tests * support 'send_arp_for_ha' option in l3_agent * pin sqlalchemy to 0.7 * Remove unused metaplugin agents * Get subnets of router interfaces with an elevated context * Support external network in probe-create * remove unused modules for linuxbridge/ovs plugin agent * Chmod agent/linux/iptables_manager.py * Quantum Security Groups API * Make create_floatingip support transaction * Update policies * Notify about router and floating IP usages * Fix exception when port status is updated with linux bridge plugin * Call iptables without absolute path * Delete the child object via setting the parent's attribute to None * Add unit tests for the ovs quantum agent * Add MTU support to Linux bridge * Correct Intended Audience * Add OpenStack trove classifier for PyPI * use object directly instead of the foreigh key to update master db object * Remove database access from agents * Fix database clear when table does not exist * IP subnet validation fixes * Update default base database to be V2 * Update common * add test for create subnet with default gateway and conflict allocation pool * Logging indicates when service starts and terminates * Ensures port is not created when database exception occurs * Improve unit test times * Add control_exchange option to common/config.py * Treat invalid namespace call * get_network in nvp plugin didn't return subnet information * tests/unit/ryu/test_ryu_db: db failure * correct nvplib to update device_id * Update rpc and notifier libs from openstack.common * Add quantum-usage-audit * Fix filters default value in get_networks * l3_nat_agent was renamed to l3_agent and this was missed * Update vif driver of Ryu plugin * Support for several HA RabbitMQ servers * Correct the error message in the Class NoNetworkAvailable * Fix flag name for l3 agent external network id * clean notification options in quantum.conf * Add log setting options into quantum.conf * Warn about use of overlapping ips in config file * Do global CIDR check if overlapping IPs disabled * Fix rootwrap filter for dnsmasq when no namespace is used * Add common popen support to the cisco plugin * Use sqlite db on file for unit tests * Uses a common subprocess popen function * remove default value of local_ip in OVS agent * Remove a function that is not used * all rootwrap filter for 'route', used by l3-agent * l3-agent: move check if ext-net bridge exists within daemon loop * Add catch-call try/catch within rpc_loop in ovs plugin agent * Fix OVS and LB plugins' VLAN allocation table synchronization * ZMQ fixes for Quantum from openstack-common * Restore SIGPIPE default action for subprocesses * Fix for flat network creation in Cisco plugin * Removes test desription that is no longer valid * Modified code Pyflakes warning * Fix deadlock of Metaplugin * remove unittest section for nec plugin README file * remove unittest section for ryu plugin README file * Fix for DB error in the Cisco plugin * modify the wrong phy_brs into phys_brs * NVP plugin missing dhcp rpc callbacks * make README point to real v2 API spec * README file changes for Cisco plugin * fix for nested rootwrap checks with 'ip netns exec' * always push down metadata rules for router, not just if gateway exists * Removed eval of unchecked strings * Update NVP plugin to Quantum v2 * ovs-lib: make db_get_map return empty dict on error * Update l3-agent.ini with missing configuration flags * Sync a change to rpc from openstack-common * Fix for failing network operations in Cisco plugin * add missing files from setup.py * Add quantum-nec-agent to bin directory * remove not need shebang line in quantum debug * make rootwrap filters path consistent with other openstack project * Bump version to 2013.1, open Grizzly * Fix lack of L3 support of NEC OpenFlow plugin * Add a new interface driver OVSVethInterfaceDriver * Ensure that l3 agent does not crash on restart * make subnets attribute of a network read-only * Exclude openstack-common from pep8 test * Ensures that the Linux Bridge Plugin runs with L3 agent * Remove an external port when an error occurs during FIP creation * Remove the exeception handler since it makes no sense * Add enable_tunneling openvswitch configuration variable * Create .mailmap file * Update default policy for add/remove router interface to admin_or_owner * Add periodic check resync check to DHCP agent * Update metaplugin with l3 extension update * Add DHCP RPC API support to NEC OpenFlow plugin * Remove an external interface when router-gateway is removed * openvswitch plugin does not remove inbound unicast flow in br-tun * Remove default name for DHCP port * Added policy checks for add interface and remove interface * allow multiple l3-agents to run, each with one external gateway net * Prevent floating-ip and ex-gateway ports should prevent net deletion * fix generation of exception for mismatched floating ip tenant-ids * Give better error to client on server 500 error * Change 422 error to 400 error * Add IP version check for IP address fields * Policies for external networks * Add IP commands to rootwrap fileter for OVS agent * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Fix broken L3 support of Ryu plugin * check subnet overlapping when adding interface to router * add local network type and use by default for tenant networks * Fix data passed to policy engine on update * remove incorrect mock assert_called in unit tests * Fix dhcp agent rpc exception handling * Add missing include for logging when log_config is used * Modified code Pyflakes warning * Modified code pyflakes warning * Improve error message when flat network already exists * Lower webob dep from v1.2.0 to v1.0.8 * Allocation pool creation should check if gateway is in subnet * Make sure floating IPs + gateways must be on external nets * restart dnsmasq when subnet cidr set changes * supress dhcp router opt for subnets with null gw * add rootwrap filters to wrap ip netns exec * Implements agent for Quantum Networking testing * Quantum dhcp crashes if no networks exist * Update with latest code from openstack-common (stable/folsom) * Fixes undefined variable 'network_type' in OVS agent * Create utility to clean-up netns * Fix lack of L3 support of Ryu plugin * Ensure that port update set correct tag in OVS * ovs_lib unable to parse return when port == -1 * L3: make use of namespaces by agent configurable * Fix error in rule for metadata server dnat * Fix programming error of ryu-plugin * Ensure network delete is handled by OVS agent * Implement L3 support in Metaplugin * Fixes agent problem with RPC * netns commands should always run in the root ns * Add lease expiration management to ip recycling * misc L3 fixes * expose openvswitch GRE tunnel_id via provider API * Do not transfer ips if there isn't any * prevent invalid deletion of ports using by L3 devices * Modified code PEP8 warning * Implementation of 2nd phase of provider extension for openswitch * Mangle network namespace name used by dhcp_agent * Update rootwrap; track changes in nova/cinder * remove policy check for host_routes in update_port * Ensure proper validation for l3 API attributes * Cisco nexus sub-plugin update_network fix * Fix dhcp option distribution by dnsmasq * fix bug where network owned resources block delete * Plugin aware extensions should also be reset at each test setup * Ensure network connectivity for linuxbridge flat network * Execute unit tests for Cisco plugin with Quantum tests * prevent OVS + LB plugins from clearing device_id and device_owner * updated outdated comments in base v2 plugin class * clear db._ENGINE for each plugin init in Metaplugin * Enable tox to run OVS plugin unit tests * Allow tox to run plugin specific unit tests * fixes cisco nexus plugin delete network issue * Fix Metainterface driver with namespace * Add lease expiration script support for dnsmasq * Remove 'verbose' API capability * PEP8 issues fixed * removed some unused global variable * Update TESTING file * Typo fix in quantum: existant => existent * Add DHCP RPC API support to Ryu plugin * Run core unit tests for each plugin * OVS plugin tunnel bridges never learn * Add nosehtmloutput as a test dependency * fix typo in OVS plugin from recent bugfix * enable router deletion logic in l3-agent * Enable users to list subnets on shared networks * Fix IP allocation on shared networks ports * Move metaplugin test for common test directory * Enable DHCP agent to work with plugin when L2 agents use DB polling * fix associating a floating IP during floating IP creation * Ensure that LB agent does not terminate if interface already exists in bridge * Treat exceptions when invoking ovs-vsctl * Remove v1.0 and v1.1 API from version info * Get OVS port details from port ID * Fix undefined variables * Fixing unit test failures in Cisco plugin * fix netns delete so that it works when a ns is set * Linuxbridge support for L3 agent * Fix exception message for bulk create failure * quantum l3 + floating IP support * Add missing conversion specifiers in exception messages * Use a common constant for the port/network 'status' value * Remove unused variable * Log message missing parameter causes exception * Update README for v2 API * Fix flavor extension based on new attribute extension spec * Update the Nicira NVP plugin to support the v2 Quantum API * Enhancements to Cisco v2 meta-plugin * Add model support for DHCP lease expiration * Trivial openvswitch plugin cleanup * Convert DHCP from polling to RPC * Add quota per-tenant * Reset device owner when port on agent is down * Allow extra config files in unit tests * Fix visual indentation for PEP8 conformance * Updates pip requirements * NEC OpenFlow plugin support * Enables Cisco NXOS to configure multiple ports Implements blueprint cisco-nxos-enables-multiple-ports * Implementation of second phase of provider extension * deal with parent_id not in target * remove old gflags config code * convert query string according to attr map * Add device_owner attribute to port * implementation for bug 1008180 * Fix bulk create operations and make them atomic * Make sure that there's a way of creating a subnet without a gateway * Update latest openstack files * improve test_db_plugin so it can be leveraged by extension tests * Adds the 'public network' concept to Quantum * RPC support for OVS Plugin and Agent * Initial implemention of MetaPlugin * Make dhcp agent configurable for namespace * Linux Agent improvements for L3 * In some cases device check causes an exception * normalize the json output of show a given extension * move the correct veth into the netns for the LB * linux bridge fixes following v1 code removal * fixes typo in ensure_namespace * Remove v1 code from quantum-server * Add netns to support overlapping address ranges * dhcp-agent: Ryu plugin support for dhcp agent * fix missing deallocation of gateway ip * RPC support for Linux Bridge Plugin and Agent * Implementation of bp per-net-dhcp-enable * Enhance Base MAC validation * Use function registration for policy checks * Exempt openstack-common from pep8 check * Make 4th octet of mac_range configurable * Replace openvswitch plugin's VlanMap with vlan_ids DB table * Remove unused properties * Notification for network/subnet/port create/delete/update. blueprint quantum-notifications * Make the plugin for test_db_plugin configurable * update DHCP agent to work with linuxbridge plug-in * ryu/plugin, agent: unbreak 610017c460b85e1b7d11327d050972bb03fcc0c3 * Add classmethod decorator to class methods of providervlan ext * Only delete VLAN information after Quantum network is deleted * Make quantum pipeline configurable from quantum.conf * ovs_quantum_plugin should use reconnect_interval in common conf * add name into port and subnet * Update openvswitch tunnel unittest * Enable agents and plugins to use the same configuration file * Fix linuxbridge agent tests * Update openstack-common files * Initial V2 implementation of provider extension * Implements data-driven views and extended attributes * Add v2 API support for the Cisco plugin Blueprint cisco-plugin-v2-api-support * Enhance V2 validations to work better for integers and booleans * Refactor the test cases so that all the test cases are under one test class * Add quota features into quantum. Blueprint quantum-api-quotas * Assume that subclass validates value of UUID * fix bug lp:1025526,update iniparser.py to accept empty value * Ensures policy file is reloaded only if updated * Provide way to specify id in models_v2 * Add validity checks to Quantum v2 resources * Avoid removal of attributes used by policy engine * Raise proper exception if policy file do not exist * Introduce files from openstack common * Ensures API v2 router does not load plugin twice * ovs-agent exception non-existent ports * Ryu plugin support for v2 Quantum API * Add option sql_max_retries for database connection * Enable quantum agents to work with global cfg.CONF * Create DHCP agent tap device from port ID * Fix some syntax errors * fix bug lp:1019230,update rpc from openstack-common * Fix v2 API policy checks when keystone is in use * implement dhcp agent for quantum * Corrects imported modules in Cisco and Ryu according to latest nova packages * Validate that network_id in port/subnet POST belong to the same tenant * Verify CIDR overlaps among networks' subnets * Address problems with foreign keys with subnet and network deletion * Add 'allocation_pools' to Quantum v2 API subnets * Delete IP allocation range for subnet when deleting subnet * Fix linux bridge plugin to be consistent with naming rules * v2 support for the linux bridge plugin * OVS plugin support for v2 Quantum API * Check if interface exists in bridge prior to adding * Ensure that subnet_id is on correct network * Use setuptools git plugin for file inclusion * Cisco's unplug_iface refers to non existing exception * Implement IP address allocation * Enable user to configure base mac address * Bug #1012418 - quantum agent for OVS does not install properly on Xen XCP * Add simple file loggin to ovs_quantum_agent * Fixing pep8 warning messages Bug #1017805 * Network deletion and subnet creation bug fixes bug 1017395 * Remove paste configuration details to a seperate file. blueprint use-common-cfg * Bug 1015953 - linuxbridge_quantum_agent device_exists() is buggy * Reorder imports by full module path * Added iptables_manager ( based on openstack/linux_net.py ) This module will be the base library to implement security groups and generic firewall. It is an independent iptables module, made to be easy to package if used by agents and also inside quantum * Unit test and Readme changes related to cisco plugin * Implements the blueprint use-common-cfg for the quantum service. More specifically uses global CONF for the quantum.conf file * Ensure unique mac address allocation. This is the first part of bug 1008029 * Add authZ through incorporation of policy checks * Fix additional pep8 issues on Jenkins bug 1014644 * removed "runthis" and other unused functions from utils.py * Linux bridge agents did not work with common linus utils bug 1014286 * Added vlan range management for OVS plugin * Bug #1013967 - Quantum is breaking on tests with pep 1.3 * Remove wrong base class for l2network_models after v2.0 API * Cisco cli cannot find argument action_prefix * Use openstack.common.exception * Remove unused functions in common/utils.py * API v2: mprove validation of post/put, rename few attributes * Bug #1000406 - Return value of shell commands is not checked by plugins * Fix python2.4 incompatibility * Add API v2 support * Binaries should report versions * Fix up test running to match jenkins expectation * Add build_sphinx options * Remove unused imports * Quantum should use openstack.common.jsonutils * Remove hardcoded version for pep8 from tools/test-requires * AuthN support for Quantum * fix bug lp:1007557,remove unused functions in utils.py * Add common dir for shared agent code, add OVS lib * Bug #1007153 * Register enable_tunneling as bool opt * Quantum should use openstack.common.importutils * PEP8 fixes * Bug #1002605 * Automatically determine Quantum version from source * Fix linux bridge section name Bug #1006684 * Remove the reference to non existing exception by linuxbridgeplugin * bug #1006281 * Parse linuxbridge plugins using openstack.common.cfg * Bug #1004584 * fix some pylint warnings * fix errors in database test cases * Log the exception so app loading issues can be debuged * remove unneeded import from OVS agent that break 2.4 compat * blueprint man-support and fix documentation build bug 995283 * Fix print error for linux bridge bindings bug 1001941 * Add HACKING.rst to tarball generation bug 1001220 * fall back to `ip link` when `ip tuntap` unavailable bug 989868 * Cisco plugin CLI call to quantumclient CLI * Calling Super method from QuantumPortAwareScheduler.__init__ * OVS plugin: add tunnel ips to central database * Include AUTHORS in release package * blueprint database-common bug 995438 * bug 996163 * Bug #994758 * Change Resource.__call__() to not leak internal errors * Let OVSQuantumTunnelAgent sync with database * Cleaned up log usage * blueprint agent-db-ha bug 985470 bug 985646 * Update codebase for HACKING compliance * Make sample quantum.conf compliant with docs * Make ovs Interface option set properly * Removed simplejson from pip-requires * Remove dependency on python-quantumclient * Add sphinx to the test build deps * Add HACKING.rst coding style doc * return 404 for invalid api version request * fix issue with OVS plugin VLAN allocation after a quantum-server restart * bug 963152: add a few missing files to sdist tarball * API docs: fix typo for network delete * Open Folsom * Bug #956559 VIF driver and scheduler for UCS plugin are broken since the flag configuration mechanism in nova is changed. Fixing that and also fixing some property names, along changes to how the quantum client code is invoked * plugin/ryu/agent: unbreak a06b316cb47369ef4a2c522f5240fa3f7f529135 * Fix path to python-quantumclient * Split out pip requires and aligned tox file * ryu/nova: catch up d1888a3359345acffd8d0845c137eefd88072112 * Add root_helper to quantum agents * Fix missing files in sdist package [bug 954906] * Fix for bug 921743 Response codes for create ops in API v1.0 not compliant with spec * bug 954538 Fix for the cisco unit tests * check connection in Listener. refer to Bug #943031 * fixed incorrect duplicate title * Fixed incorrect title for example 3.10 * Downgraded required version of WebOb to 1.0.8 * Bug #949261 Removing nova drivers for Linux Bridge Plugin * Remove outdated content from OVS plugin README, point to website instead * add git commit date / sha1 to sphinx html docs * more files missing in sdist tarball * make sure pip-requires is included in setup.py sdist * Introducing the tenant owenrship checks in the Cisco plugin, changes are almost identical to those in Bug#942713 * Fix some plugins that don't check that nets + ports are owned by tenant * remove pep8 and strict lxml version from setup.py * plugin: introduce ryu plugin * bug 934459: pip no longer supports -E * Fix bug 940732 stack.sh can't match sql_connection string * Return appropriate error for invalid-port state in create port API * blueprint quantum-ovs-tunnel-agent * Initial commit: nvp plugin * unittests: setup FLAGS.state_path properly: bug 938637 * Cleanup the source distribution * Fix ovs config file location * blueprint quantum-linux-bridge-plugin * Remove quantum CLI console script * Bug 925372: remove deprecated webob attributes (and also specify stable webob version in pip-requires) * bug 923510: avoid querying all ports for non-detail GET Network call * Make tox config work * Pin versions to standard versions * bp/api-filters This changeset implements filters for core Quantum API and provides unit tests * Split out quantum.client and quantum.common * Quantum was missing depend on lxml * bp/api-error-codes Restructured API error codes for Quantum API v1.1 This changeset provides the following changes: - Only standard HTTP errors for Quantum API v1.1 - Customized fault response body formatting according to API version - Changes to unit tests to deal with version specific status codes * blueprint ovs-portstats * Add support for dealing with 501 errors (notimplemented) * Improved VlanMap * moving batch config out of quantum-server repo * bug 920299: remove duplicate + outdate README * Getting ready for the client split * Removed erroneous print from setup.py * Fixes setup scripts for quantum plugins * Base version.py on glance * fix mysql port in sql_connection example.. * Make the quantum top-level a namespace package * Add __init__.py from plugin to be copied on setup scripts * Fix lp bug 897882 * PEP8 quantum cleanup * Install a good version of pip in the venv * Rename .quantum-venv to .venv * Updating Cisco README with instructions on installing the patched ncclient library * Remove plugin pip-requires * blueprint refactor-readme-to-manual * Bug #890028 * Implementation of the BP services-insertion-wrapper inside the Cisco Plugin * blueprint operational-status-ovs-plugin * bug 903580: remove invalid extensions path from quantum.conf * Fix for bug 902175 * Readme Fix * blueprint api-framework-essex * Fix for bug 900277 * Fix for bug 900316 * Modified the Readme for Unit Test Execution Instructions * Bug 900093 Remove unused function in db/api.py * bug #891246: Fix paths in agent Makefile * Second round of packaging changes * Bug 891705 Fix to change reference to the Quantum CLI from within the Cisco extensions' CLI module * Correcting the plugins classpath in the Quantum README * The relative path for the "ucs_inventory.ini" file has been fixed * bug #891267 : for XS, grab iface-id from XAPI directly if needed * Changes to make pip-based tests work with jenkins * Fix for bug 890498 * Fix for bug 888811 * Fixing find_config_file after packaging changes * Added timeout flag to ovs-vsctl to avoid infinte waiting * Add quantum.exceptions path to configed ext paths * Fix for Bug #888820 - pip-requires file support for plugins * Fixing Cisco plugin after update_* change * Fix for bug 888207 * Fix for bug 877525 * Bug #875995: Quantum README fixes * Change version numbers to be compatible with debian packaging * Make the openvswitch plugin tests work again * Swich over to update_{net,port} instead of rename_net and set_port_state * Added try import to quantum-server and quantum-cli * Bug 887706 * Blueprint authentication-for-quantum * blueprint quantum-packaging * Moved the initialization of the blade state so that the interfaces which are configured outside of Quantum are also initialized in the blade state * fix minor double-serialization bug in client.py * bug #863635: remove vestigial cheetah import from bin/cli * Change the ovs plugin create_*() calls to take the kwargs param * Changing the log messages in order to be always identified by their sub-packages of origin, and they can even be filtered on that basis * Add .gitreview config file for gerrit * New tests are being adding to the Diablo code (Cisco L2-Network plugin), and some fixes in the case where the tests were failing * Add the ability to specify multiple extension directories * Add code-coverage support to run_tests.sh (lp860160) * Change port/net create calls to take an additional kwargs param * ovs plugin: Remove reference to set_external_ids.sh * fix pep8 issues in Cisco plugin * Remove hack for figuring out the vif interface identifier (lp859864) 2011.3 ------ * Update openvswitch plugin README * Update openvswitch plugin README * Get output from run_tests * Add rfc.sh to help with gerrit workflow * merge tyler's unit tests for cisco plugin changes lp845140 * merge salv's no-cheetah CLI branch lp 842190 * Addressing Dan's comment on output generator * merge sumit's branch for lp837752 * merge salv's branch for bug834013 * merge salv's branch for keystone token on client bug838006 * merge rohit's db test branch: lp838318 * merge salv fix for bug 841982, fix minor pep8 violation * merge salv fix for bug834008 * Changes to address Salvatore's review comments, removed unnecessary imports, and changed a debug message * changing key names to confirm to api specs * Merging latest from lp:quantum * Merging lo:~salvatore-orlando/quantum/quantum-api-auth * Implementing Dan's suggestion concerning fixing the bug in db api rather than FakePlugin * Fixing bad indent * syncing diverged branches * merging from lp:quantum * merging from lp:quantum * Updating CLI for not using Cheetah anymore. Now using a mechanism based on Python built-in templates * Fixing the bug in FakePlugin * made general exception handling messages consistent removed LOG pylint errors cleanup in tests * Create operation now generate response with status code 202 * restoring correct default pipeline * Mergin from lp:quantum * Add information about quantum dependency for nova * merge salv's branch to remove dummy plugin * Changing communication between UCSM driver to UCSM to HTTPS * Adding CLI usage examlpes to the README * Adding client-side support for Keystone integration * Keystone-integrated pipeline should not be default in quantum.conf * Removing class DUmmyDataPlugin * Removed redundant configuration, and added more comments in the configuration files * Updating the README file * Merging Shweta's test cases for mutliport resource * Adding Multinic tests * Typo fix in README * Merging Sumit's changes including fixes for multinic support, and CLI module for working with extensions * More fixes for multi-nic support * Fixed a bug with plug_interface * Merging from Cisco branch * Changes to incorporate earlier review comments, also for multiport resource * adding quantum database unit test cases * Merging changes from Ying's branch (new mutliport resource) * add multiport and exception handling * add multiport resource * Merging from lp:quantum * Avoiding deserializing body multiple times with several parameters * merge cisco consolidated plugin changes * Test on param_value changes as follows: * Merging lp:~salvatore-orlando/quantum/bug834449 * Merging Ying's changes (minor) * fix print statements in novatenant and portprofile * merge trunk * Minor refactoring * Changes to l2network_plugin for create_ports and pylint fixes to cli.py * Modified CLI to handle both core and extensions CLI * merge trunk * lp835216 client lib was not passing in kwargs when creating exceptions * lp834694 fix integrity error when deleting network with unattached ports. Add unit test * Minor fix in delete_port * merging changes from cisco consolidated branch * Fixes to support multinic * Merging fixes from Sumit's branch for extension API version number and to UCS inventory to associated VIF-ID with ports * Merging from the Cisco branch * adding new api methods using just port_id * Fixing the extensions URL to 1.0 and pep8 error * bug fixes to handle multinic * Merging Shweta's fix for extensions' test cases (clean up was not happening completely) * Adding Network and Port clean up functions for portprofile unit tests * Merging from lp:quantum * Merging Shweta's fixes in the tests for key names changes in the Core API * make CLI show_port command display interface-id, add additional test case * merge salvatore's new cli code * Dictionary key values changes in test_extension * Merging lp:quantum, resolving conflict * merge two pep8 branch * Merging Ying's pep8 fixes * fix pep8 issues * Merging quantum trunk * fix pep8 warnings * Updating common/extensions.py in order not to instantiate a QuantumManager when retrieving plugin * Cleaning pep8 * Merging lp:~danwent/quantum/lp834491 Fixing Bug #834491: api alignment merge broke ovs plugin (Critical) * Addressing comments from Dan * Merging from quantum * merge cisco extensions branch * lp834491: change plugin to work with API code after the API alignment merge * Merging Shweta's fixes to the test cases for the extensions * Added Extension & ucs driver test changes and fixes * Merging from Sumit's branch, changes to VIF-driver and Scheduler; extension action names have been changed in response to Salvatore's review comments in the extensions branch review * Syncing with Cisco extensions branch * Merging changes from Sumit's branch * Changes qos description to string; changes extension API names for get_host and get_instance_port * Mergin Ying's branch * change get_host and get_instance_port function name * Cleaning (removing) unused code..hooray ! fixes for extension tests * Sorting correctly all imports for the Nexus Driver and Unit Test * Fixed the Unit Test for Nexus Driver * add cisco_faults under l2network package * move faults/exceptions to l2network package, remove unecessary faults definitions change the portprofile action api's method fix imports order and other comments issues * Merging from Sumit's branch, import ordering related changes * Changing the order of imports (to satisfy convention) * Merging the Cisco branch * Updating README according to Somik's comment * Finishing cli work Fixing bug with XML deserialization * Completing Unit Tests * Merging lp:~salvatore-orlando/quantum/quantum-api-alignment * Configuration of multiple VLANs on the same Nexus Switch Interfaces * Adding unit test for rename_network * Added logging to syslog or file specified at command line removed plugin direct mode fixed unit tests to reflect changes in cli code fixex pep8 errors * Merging from Sumit's branch * Fixed some bugs with credential and qos resources; also fixed l2network_single_blade * Merging Rohit's changes * helper function to get creds based on name * integration with l2network_plugin.py * fixing relative import in nexus_db.py * putting in db support for creds and qos * merge latest quantum branch and resolve conflicts * Merging lp:~asomya/quantum/lp833163 Fix for Bug #833163: Pep8 violations in recent packaging changes that were merged into trunk (Critical) * Addressing Somik's comment * Templated output for CLI completed! * PEP8 fixes for setup.py * delete quantum/common/test_lib.py to prepare for quantum merge * Made changes according to reviewer's comments. Add addtional information on extension test in README * Merging changes from Sumit's branch * Merging lp:~cisco-openstack/quantum/802dot1qbh-vifdriver-scheduler * Merging lp:~cisco-openstack/quantum/l2network-plugin-persistence * Fixed a bug in the initialization of the UCS inventory; fixed another bug in deleting a port * Noticed some pep8 errors, fixed them * Merging lp:quantum * Changes to incorporate reviwer's comments. Also changed client.py to handle extension URLs * Review Changes * remove unnecessary code and sync faults and exception handling * Code changed base on Reviews pep8 passed pylint 9.10 * merging with lp:quantum * merging from lp:quantum * Fixes based on review comments * Addressing comments from Ziad and Somik * merge lp:~bgh/quantum/lp837174 * Fix unit test printing (lp837174) * Fixing issue in view builders concerning attachment identifiers * Code clean up as per reviewr's request; documentation strings, unused code, etc * Rewording of the README file to clarify the use of the SSh port * clean up code and fix some comments * clean code and fix some comments * Merging from Sumit's latest branch - Fixed loading of Nexus DB tables; moved imports to l2nework_db.py; Refactoring of code to generalize inventory handling (enhancement) * Fixed loading of Nexus DB tables; moved imports to l2nework_db.py, changes discussed & approved by Rohit * Making Keystone version configurable * Accidentally took quantum.conf out of branch. Now back in * Merging lp:~raxnetworking/quantum/bug827272 * Merging branch: lp:~danwent/quantum/test-refactor * Removing "excess" file * Missed adding a file earlier, fixed a small issue * Refactoring of code to generalize inventory handling (enhancement) * Merging UCS inventory state initialization fix from Sumit's branch * Fixes an issue with loading the UCS inventory when a dynamic nic has been used outside of Quantum * Removed obsolete instructions from README * Changes to reflect the new features (mutli-blade, multi-chassis support) * Changes to support calls from VIF Driver and Scheduler * Pep8, pylint fixes * fixing pep8 error * adding helper function for port binding model * UCS inventore persistence and pep8/pylint fixes * UCS persistence fixes * added new columns to models for ucs plugin multi blade support updated methods in ucs_db for newly added columns changed column dynamic_vnic_id in port binding table to blade_intf_dn updated tests to handle new column name * Merging rohit's UCS persistence support * UCS plugin persistence * Persistence support for UCS plugin network * adding utility functions to create dictionaries * Merging changes from Rohit's branch * Merging changes from cisco extensions * added ucs plugin related execptions in cisco_exceptions.py added ucs plugin persistence related modules - ucs_models.py and ucs_db.py added ucs db related unit tests in test_database.py fixed formatting in l2network_models.py and test_database.py * Adding some error checks * Reduced excessive logging * Several fixes to initial version * fixing the the test_database.py tests * pylint and pep8 fixes * Change profile-id * merged Shweta's branch for ext test. Minor fix for review comments * Review Changes * merged Shweta's ext test branch * Initial commit with lots of changes * Moved the conf file uncer the cisco directory * Moved the conf file uncer the cisco directory * Updated conf file * Adding Entension API unt tests * Syncing with lp:quantum * Code refactored, made changes are per reviwer's suggestions * sync up with l2network exception handling for extension * merged Cisco branch's latest changes * Adding changes from Sumit's latest merge * merge with lp:~cisco-openstack/quantum/l2network-plugin-extensions * replace exception handler by using cisco_exceptions * Raising exceptions in extension resources handling (where missing). Changing exception name to QosNotFound * Changing exception name to QosNotFound * Mergin from Cisco branch * Raising exceptions in extension resources handling (where missing) * Merging fixes to client side exception handling. Thanks lp:tylesmit ! * Merging fixes and changes batch-config script. Thanks lp:danwent ! * Adding the Nexus support to the Persistence Framwork Modification of the Nexus Unit Case to be running with Persistence Framework pep8 passed pylint 8.81/10 * added nexus exception in cisco_exceptions.py added log to methods in l2network_db.py added nexus_db.py and nexus_models.py - persistence modules for nexus plugin * add plugins.ini back * add all conf/*.ini back * merge with ying's branch * merging with Ying's extension branch * remove ying's test ciscoplugin * remove all configuration files * remove cisco_demo and test_scripts directory, which were used by our local tests * Removed concatenation per review comments * change the configuration files to the default values * pylint and pep8 fix * merging with ~cisco-openstack/quantum/l2network-plugin-extensions * fix pylint issuses * Making keystone integration optional in quantum configuration * Merging bug fix for Bug 821733. Thanks lp:salvatore-orlando ! * Fixing typo * Making the client raise the appropriate exception if needed. Also increasing the pylint score to above 8 * pep8 error fixed for l2network_db.py * Mering Sumit's branch with plugin support for Credentials, QoS, NovaTenant resources. Also merging latest from lp:~cisco-openstack/quantum/l2network-plugin-persistence * Merging from Sumit's branch, VIF-driver and Quantum-aware scheduler * Removed extra spaces to satisfy pep8 * VIF driver for 802.1qbh and Quantum aware scheduler * fix some pylint issues * Pylint and pep8 fixes * Changes to support credentials, qos, and novatenant extensions * Removing unused error response codes * Merging lp:~asomya/quantum/lp824145 Fix for Bug#824145 : Adding a setup script for quantum * merge trunk pep8 fixes adapting CLI to API v1.0 Fixing wsgi to avoid failure with extensions * Fixed indentation and changed file comments * add extension change to ying's branch * merge trunk * Pulling in changes from lp:quantum * Merging Cisco's contribution to Quantum. Thanks to various folks at Cisco Systems, Quantum will have plugins to integrate with Cisco UCS blade servers using 802.1Qbh, Cisco Nexus family of switches and the ability for Quantum plugin to have multiple switches/devices within a single Quantum plugin * Merging Shweta's change to fix a function call in the test code * Adding the changed UCS Driver function names in test_ucs_driver * Santhosh/Deepak | Fixed an issue where collection actions for PUT and DELETE methods in resource extension were routing to update and delete action of the resource * Merging from Sumit's branch pylint fixes and incorporating review comments * Changes to README file and merging Shweta's changes * Mergin Shweta's test changes, also README file * Changes to test structure. Adding pylint correctons * Fixes to the README file per earlier review comments. Also removed main from one of the modules * Mergin from cisco brach * Merging from lp:quantum * Pulling changes from Cisco branch * Pylint fixes * exit unit tests if tests are invoked specifying a particular test * Merging Nexus pylint changes and other enhancements from Edgar * pep8 passed pylint 8.83 * Merging Rohit's changes * Partial commit * Moved test_database.py to plugins/cisco/tests/unit/ Edited test_database.py to be able to run like other tests pylint for cisco/db folder - 8.85/10 pylint for cisco/tests/unit/test_database.py - 8.42/10 pep8 done * Adding a new file with all the XML snippets to make code easier to read Moving the Nexus SSH server port to the configuration file Removing main functions Making some changes based on Dan and Salvatore reviews * Changes in the README file to incorporate Somik's comments * pylint changes - pylint score for cisco/db folder - 8.27/10 pep8 checks done * Removing extra testing function on Nexus Driver * Merging plugin and tests' changes * Fixes to the tests which were breaking, including fixes to the test cases * Pulling in changes from Rohit's branch * Pulling in changes from Shweta's branch * Removed main from modules as per review comments * updated README file to include persistence framework setup instructions updated db api.py unset_attachment method to return port moved db_conn.ini into cisco/conf/ with other configuration files updated l2network_plugin_configuration.py to get db config cleaned up l2network_db.py - removed config parser code as using cisco config parser updated l2network_db.py to raise specific exceptions in error cases updated create_vlanid method in l2network_db.py to not raise exception if vlan rows exist updated portprofile and portprofile_binding methods to include tenant_id as an argument added cisco/db/test_database.py containing unit tests for quantum and l2network_plugin tables edited get_pp_binding method in l2network_db.py to return empty list when no results found pep8 checks done * Adding Persistence unit test * Fixed bugs while testing * pep8 errors fixed * Merging rohit's changes * Changes to support persistence framework * Merging: lp:~danwent/quantum/client-lib * Merging: lp:~tylesmit/quantum/api-client-fix-serialization Adding automattic serialization to all requests by moving it to do_request * First, trivial, implementation of authN+authZ * fixes from rohit's branch * from rohit's branch * Adding more templates More tests * - Added new tables VlanID to generate ids and maintain usage of vlans - Added wrapper functions to get next unused vlan, populate vlans, release vlans, getall vlans, isused van and delete van - Added ported instead of networked for portprofile binding table - Changed wrapper methods and test cases for portprofile binding to use portid * Adding missing files to branch * Simplifying condition * FIxing missing 'output' variable @ line 243 (syntax error) * Adding automattic serialization to all requests by moving it to do_request * added network and port models similar to quantum with following changes - - InnoDB as storage engine to allow foreign key constraints - joinedLoad operation on the queries to make use of relation between Network and Port Moved out the network and port code to make l2network contain vlanbinding, portprofile and portprofile bindings * Authentication with Keystone. auth_token Middleware tweaked and imported in Quantum tree Developing Authorization middleware * Introducting cheetah Updating list_nets in CLI Writing unit tests for list_nets Stubbing out with FakeConnection now * I'm too tired * Stubout work in progress * Merging quantum extenions framework into trunk. Thanks rajaram vinkesh, deepak & santhosh for the great work! * - added network and port models into the l2network plugin instead of using quantum models - added api methods for network and ports - restructured code to use the l2network network and port - added l2network base class for other tables to inherit - added support for l2network plugin model objects to behave like dictionary (gets rid of code to convert objects into dictionaries) - added foreign key constraints to l2network plugin model attributes representing columns - added attributes to represent relation between models in l2network plugin - added joinedload only to network and port (need to to for others) - added InnoDB as the storage medium in base table for imposing foreign keys - updated l2network test cases to handle foreign key constraints * lp Bug#824145 : Adding a setup script for quantum * skeleton for cli unit tests * merge trunk * Removing exceptions as well (previously only API faults were removed) * Merged quantum trunk * adding renamed client-lib tests * Tiny change to the README file, instructions on how to get ncclient * - Adding setup script * Adding db connection and l2network plugin database modules * update CLI to use show instead of list for calls that do not return a list * rename client_lib unit tests so it is run by ./run_tests.sh, update tests to handle name changes * force batch_config.py to use json, as XML has issues (see bug: 798262) * update batch_config.py to use new client lib, hooray for deleting code * Changed to default plugin class name * Rajaram/Vinkesh | Added examples of scoping extension alias in request and action extension * Added tests directory to list of modules in the README file * Added "tests" directory to the list modules in the README file * Adding the required build for Nexus support * Merging changes addressing Bug # 802772. Thanks lp:danwent ! * Merging bugfix for Bug 822890 - Added License file for Quantum code distribution * Fixed typo in README * README file updates (pointer to Nova Cactus branch), and numerous other edits based on Mark's template * L2 Network Plugin Framework merge * Incorporated changes in response to review comments from Ram * Adding Apache Version 2.0 license file. This is the official license agreement under which Quantum code is available to the Open Source community * Making a check for the presence of UCS/Nexus plugin (earlier it was not in certain cases). With this change, if the UCS/Nexus plugins are not enabled, the core API tests can be run even on Ubuntu (and RHEL without the requirement of any specific network hardware) * Merging test cases from Shwetas' branch, and further modified README file * Merging the test framework from Shweta's branch * decluttering _parse_request_params method for QuantumController * Fixing detail action for port collection Adding PortIsDown exception Adding unit tests for detail actions and PortIsDown PEP8 FIXES * Adding Unit Test Cases Now * Adding Cisco Unit Tests * minor enhancements to quantum client-lib * RHEL limitation updated * Adding support for expressing format through Content-Type header Adding action detail for port resource (Member & Collection) * Changes to enhance L2 network plugin framework * undo unintentional formatting change in run_tests.sh * remove unneeded __init__ * refactoring testing code to support plugin tests * Added QuantunPluginBase as the base class for the l2network_plugin * Generalized and put placeholders * another merge * pep8 cleanup, restore defaults * Added info about ssh conf required for nexus switch * merge * remove unneeded tests from ovs_quantum_plugin * Nexus plugin classpath was incorrect, fixed it * Edits to reflect conf changes, made it easier to follow * merge heckj's pip-requires fixes * Fixed issue with creating new port profiles (one configuration parameter got left out during the migration to the new configuration scheme). Also fixed a bug in the calculation of the profile id * Fixes the broken call to second level of plugins. Renaming will work now * updates to pip-requires for CI * Loading of device-specific plugins and drivers is done dynamically by setting configuration. All configuration is driven through configuration files place in the conf directory. Each .ini conf file contains info on the configuration. README file updated to reflect all the changes. Fixed issue with delete_network deleting the network even when attachments were present. Fixed issue with port id generation * Deepak/Vinkesh | Fixed show action in extension controller to return 404, added example to include namespace in a request extension * Merged quantum trunk * Santhosh/Vinkesh | Added extension_stubs file * Removing extra file in Nexus Driver * Removing extra file in Nexus Driver * Relabelling API version to 1.0! * Cosmetic changes to unit tests for client library. Pep8 fixes * Removed quantum/plugins/cisco/db/ and quantum/cisco_extensions since these will be merged separately * Adding conf directory for configuration files * Fixed pep8 error * Merging changes * Merging changes from lp:quantum * Fixed an issue selecting the right port interface and also properly switching off the Nexus Interface * Completing API spec alignment Unit tests aligned with changes in the API spec * Applying fix for bug #814518 Merging from lp:~salvatore-orlando/quantum/bug814518 * Adding controller and view builder for attachment resource * Merging the port profile client name fix * Earlier fix resulted in a different issue (profile client name, was also being used as profile name, hence breaking) * Truncated the port profile client name length to 16 characters (ucsm excepts max 17 chars) * Mergin fix for Bug 818321 * Merging approved OVS plugin configuration change branch. Thanks lp:danwent ! * Merging the brand new Quantum-client-library feature * Requests now send the Content-Type in the HTTP request * fix broken flush in db.network_destroy, pep8 fixes * req/res alignment complete. Status code alignment ALMOST complete (need to sort out 200 vs 202 for create ops) * Vinkesh | Changed import orders according to pep8 recommendations * Including a flag to activate the NX-OS driver Updating the README documentation * merging branch for bug802772, which this branch is stacked on top of * WIP. Still need to align APIs for interface plug/unplug * Fixing pep8 errors * Adding the Nexus OS driver based on the new PlugIn structure * fix incorrect handling of duplicate network name, add exception for duplicate network name, and add unit test to confirm detection * WIP * Merging lp:quantum updates * Fixing syntax issue. I had a 2.7+ style dict comprehension, so I made it 2.6 friendly * Removing a debugging line * pep8 fix * Fixing API behaviour for throwing 400 error on invalid body. Adding unit test for creating a port without request body * make ovs plugin pay attention to port state * persistence of l2network & ucs plugins using mysql - db_conn.ini - configuration details of making a connection to the database - db_test_plugin.py - contains abstraction methods for storing database values in a dict and unit test cases for DB testing - l2network_db.py - db methods for l2network models - l2network_models.py - class definitions for the l2 network tables - ucs_db.py - db methods for ucs models - ucs_models.py - class definition for the ucs tables dynamic loading of the 2nd layer plugin db's based on passed arguments Create, Delete, Get, Getall, Update database methods at - Quantum, L2Network and Ucs Unit test cases for create, delete, getall and update operations for L2Network and Ucs plugins pep8 checks done branch based off revision 34 plugin-framework * Vinkesh/Santhosh | Moved the stub classes in test_extensions to a separate file extension_stubs * Merged from trunk * bug802772 update exception handling in OVS plugin to use API exceptions * merged the latest changes from plugin-framework branch - revision 39 conforming to the new cisco plugin directory structure and moving all db related modules into cisco/db folder updated db_test_plugin.py - added import of cisco constants module - added LOG.getLogger for logging component name - updated import module paths for l2network_models/db and ucs_models/db to use the new directory structure - updated (rearranged) imports section to obey openstack alphabetical placement convention updated db_conn.ini - updated database name from cisco_naas to quantum_l2network unit test cases ran successfully and pep8 checks done again * removing a few additional lines that aren't needed once we don't calculate port count * Adding a tests directory, this can be used for plugin-specific test cases * also remove line that computes portcount, as it is unneeded now that we don't return it * Including copyright info * merge branch for to fix bug817826 * For the modules to get added, missed in the earlier checkin * remove PortCount attribute of network object, as it is not in the spec and was causing us to hit bug 818321 (note: this commit does not fix the underlyingproblem with xml deserialization, it just makes sure we don't hit it with the existing API code) * Changed the directory structure to a more organized one. Fixed the imports to reflect the new structure * Merging the latest changes from lp:quantum * change default integration bridge from br100 to br-int to reflect new default for OVS vif-plugging in nova Diablo-3 release * fix bug 817826 and similar error in batch_config.py * persistence of l2network & ucs plugins using mysql - db_conn.ini - configuration details of making a connection to the database - db_test_plugin.py - contains abstraction methods for storing database values in a dict and unit test cases for DB testing - l2network_db.py - db methods for l2network models - l2network_models.py - class definitions for the l2 network tables - ucs_db.py - db methods for ucs models - ucs_models.py - class definition for the ucs tables dynamic loading of the 2nd layer plugin db's based on passed arguments Create, Delete, Get, Getall, Update database methods at - Quantum, L2Network and Ucs Unit test cases for create, delete, getall and update operations for L2Network and Ucs plugins pep8 checks done branch based off revision 34 plugin-framework * merge Salvatore's api branch with fixes for tests. Tweaking branch to remove unwanted bin/quantum.py as part of merge * Merging in main repo updates * Updating to fix some SSL issues * Removing extra quantum.py file from source control removing unused import from quantum/api/__init__.py * Apply fix for bug #817813 Merging lp:~danwent/quantum/bug817813 * Apply fix for bug #814012 Merging lp:~danwent/quantum/bug814012 * Apply fix for bug #814517 merging lp:~tylesmit/quantum/quantum-bug-814517 * bug 817813: default provider in plugins.ini accidentally changed. Changing it back to FakePlugin * Changed the param name "network-name" to "net-name" since the Quantum service expects the later * Removing some legacy code from the unit tests * Adding unit tests to cover the client library * Changing the CLI to use the new client library * Adding refactored API Client * pep8 fixes * fix bug 814012, add unit tests for it * Resolving Bug 814517 which caused XML to have extra whitespace * Vinkesh/Santhosh | Removed loading extensions from 'contrib' and fixed an indentation bug while loading extensions * Santhosh/Rajaram|modified extensions section in README * Rajaram/Santhosh | Added logging to the PluginAwareExtensionManager failures * Rajaram/Santhosh|Added plugin interface in foxinsox and Updated README * Rajaram/Santhosh|quantum manager loads plugin only once, even though both extension middleware and APIRouter calls it * Santhosh/Rajaram|latest merge from quantum and made extensions use options to load plugin * Apply fix for bug #797419 merging lp:~salvatore-orlando/quantum/bug797419 * Re-fixing issues with XML deserialization (changes got lost in merges with trunk) Adapting assertions in unit tests merged from trunk to reflect changes in the API due to RFE requested by Erik Carlin * Rajaram/Vinkesh | Plugins advertise which extensions it supports * Merging branch lp:~salvatore-orlando/quantum/bug802892 Fixing bug #802892 * Merging branch lp:~netstack/quantum/quantum-unit-tests * Fixing silly pep8 error * doh * Restoring quantum_plugin_base to previous state. Will discuss in the future whether allow API layer to pass options to plugins upon initialization * Vinkesh/Santhosh | Added tests to check the member and collection custom actions of ResourceExtensions * Vinkesh/Deepak | Moved plugin related checks in ExtensionManager code to PluginAwareExtensionManager * Deepak/Vinkesh | Added an base abstract class which can be inherited by PluginInterface class which defines the contract expected by extension * Vinkesh/Deepak| Added doc and small refactoring * Unit tests for API completed fixed pep8 errors * Add TESTING document: description and polices for quantum tests * Adding more unit tests * Deepak/Santhosh | ExtensionManager verifies that plugin implements the interface expected by the extension * Santhosh/Deepak | Made supports_extension method optional for plugin, plugin will be loaded only once * Merged from quantum trunk * Santhosh/deepak| Load extensions supported by plugin * add extension code in.(last push does not include this directory.) * add api extensions (including portprofiles resources and associate/disassociate actions.) * Changes to support port-profile extension. Fixed an error in the README file * Very initial version of the nxos driver .... lets call it ver 0.0.1! * Removing code related to functional tests * Porting shell script get-vif.sh to python module get-vif.py for cisco ucsm module * Required for recognizing the "cisco" package. Missed in the initial checkin * Applying fix for bug #804237 from branch lp:~salvatore-orlando/quantum/bug804237 * minor pep8 fix * Changed some credentials (does not affect functionality) * This file is not required * Initial checkin for the L2-Network Plugin with all the associated modules and artifacts * Rajaram/Santosh|misc readablity improvements to extension tests * Santosh/Rajaram| added extenstion test to show header extensibility * Rajaram/Vinkesh | Added tests to confirm extensions can edit previously uneditable field * removing pep8 errors * Added more unit tests for API Starting work on functional tests, importing code from Glance * Now REALLY using in-memory db * Adapated plugin infrastructure to allow API to pass options to plugins Now using in-memory sqlite db for tests on FakePlugin teardown() now 'resets' the in-memory db Adding unit tests for APIs * Fixing error introduced in find_config * Removing excess debug line * Fixing syntax errors in db/models.py * Temporary commit * Now loading plugin before setting up routes. Passing same plugin instance to API controllers * Adding unit test Applying pep8 fixes * Starting implementation of unit tests Fixing minor bugs with FakePlugin * Removing static data for FakePlugin * - Unit tests will use FakePlugin - FakePlugin adapted to db API with sqlite - db Models updated to inherit from generic Quantum Base model (provides utility functions and capabilities for treating db objects as dicts - see nova.db.models.NovaBase) - functional tests commented out temporarily. Will un-comment when code for starting actual service is in place * Adding Routes>=1.12.3 to tools/pip-requires * Work in progress - just starting * ...and again! * I hope I get the commit right now * removing "quantum" folder as well from etc * removing api-paste.ini * Addressing comments from Somik * Merging dan wendlandt's bugfixes for Bug #800466 and improvements that enable Quantum to seamlessly run on KVM! * fix pep8 introduced by trunk merge * A small start on unit tests: mostly a proof of concept that contains a test for api/ports.py * Added some more plugin agnostic tests (attachment and negative tests) and some pep8 fixes * merge * more pep8 goodness * Fixing bug #798262 * refactor batch_config, allow multiple attaches with the empty string * Merge: bzr merge lp:~bgh/quantum/bugfixes * Fix cut and paste error in api_unplug_iface * Fixing bug #798261 * no-commit * Santhosh/Vinkesh | Added extensions framework * merge and pep8 cleanup * Merging latest changes from parent repo - lp:network-service , Parent repo had approved merge proposal for merging lp:~santhom/network-service/quantum_testing_framework , which has now been merged into lp:network-service * Merging pep8 and functional test related changes lp:~santhom/network-service/quantum_testing_framework branch * add example to usage string for batch_config.py * Bug fixes and clean-up, including supporting libvirt * Fix typo in mysql package check * Fix typo in mysql package check * Adding support for 'detail' action on networks objects * README fixes * Santhosh/Deepak | Fixed the import issue and config.load_paste_app issue * Santhosh/Vinkesh | Fixed all the pep8 violations. Modified the 'req' to 'request' across all the services and wsgi so that it's consistent with other projects * Santhosh/Vinkesh | Added the testing framework. Moved the smoketest to tests/functional * merged remote README changes * Fix cli.py from last merge when it got overwritten * Fixing pep8 errors removing excess debug lines * Add dependencies to README and fix whitespace * Fix merge indentation errors * Merged Brad's ovsplugin code * pep8 changes for quantum-framework code pieces * Update Quantum README file with instructions to launch the service and get going * Updated quantum_plugin_base with with return type dataformats as well as exceptions * Added a basic README file and updated Quantum plugin base class with appropriate exceptions * Initial commit of exceptions that are raised by a quantum plugin * Make the wording a little clearer * Remove -a option from examples (it no longer exists) * Make the API the default * Address Dan's review comments * Make the manager a little smarter about finding its config file * Fix another TODO: remove main function from manager * Fix detail_net and list_ports commands * Remove get_all_interfaces and fix detail_network commands * Initial version of openvswitch plugin * * Merged changes from Salvatore's branch - quantum-api-workinprogress * Removed spurious methods from quantum_base_plugin class. * Updated the sample plugins to be compliant with the new QuantumBase class * Update readme with quantum specific instructions * Address some of the remaining TODOs and general cleanup * Add headers * Initial cut of openvswitch plugin * Add database models/functions for ports and networks * Print the command list in the help * Whitespace fixes * Added api functions for the interface commands * Initial rework of cli to use the WS api * Copy over miniclient from testscripts and port tests.py to use unittest * Adding ports.py to source control * pep8 fixes (1st batch) * First working version of Quantum API * Adding views/networks.py to bzr * Adding serialization/deserilization for network resources. Adding fake plugin * networks api with final URL structure. No serialization yet * Implementing interface with plugin * adpating wsgi files * Work in progress on network API * Adding first files for quantum API * Minor fixes: indentation in bin/quantum and fix import in config.py * Adding api paste configuration file * Removing .pydevproject from version control * Branching from quantum-framework * Adding flags.py to infrastructure code * Move plugin configuration to plugins.ini - a config file * 1) Created a DummDataPlugin in SamplePlugin module * merged salvatore's changes to local branch * 1) Added a bare-bones framework for quantum plugins. 2) Created demo quantum plugin that conforms to QuantumPluginBase Abstract class specification. 3) Demonstrated plugin registration and invocation using the demo plugin called "QuantumEchoPlugin" 4) Created the initial file structure for a quantum CLI 5) Seeded the utils module that will contain frequently used Quantum utilities. 6) Modified the manager module to initialize and register the quantum plugin defined in a configuration file. I have hard-coded the path to plugin for now but this will move to a quantum.conf file * Fixing pep8 errors * adding /bzrignore to precent checking in pyc files and that sort of stuff.. * Pushing initial started code based on Glance project and infrstructure work done by the melange team * Merging in Shweta's fixes from the review by Sumit * Minor Fix in ucs tests * Fixing issues discussed in merge prop. The UCS Inventory clears the DB on teardown. The multiblade tests now check to see if a port exists in the db before deleting it. It checks to make sure the UCSInventory is set in the config * Adding UCS inventory tests * Merging in latest changes from lp:quantum * Merging in Shweta's test changes * Ading Ucs db tests * Removing excess imports * Fixing pep8 errors and pushing pylint score up to 8.57 * Fix for bug/893663 Making Cisco CLI usable from installed packages * Bug 903684: functions defined twice in utils.py * blueprint api-operational-status * Adds sqlalchemy support for ovs_quantum_plugin * bug 903581: remove etc/quantum.conf.sample as it is invalid * Fixing bug/903829 Making setup_server.py not try to install quantum.conf.sample * Removing a couple extra lines * Adding some tests, fixing some bugs, and making the tearDown correctly remove PortProfiles * Adding author information * Removing a negative test until I can figure out how to implement it * Removing some negative tests until I can figure out how to implement them * Updating tests * Fixing port-related calls * Adding tests * Tweaking other multiblade tests * Updating multiblade create_network test * Starting making multi_blade model return data * Adding initial multi blade test file from Shubhangi